diff --git a/deps/v8/.gitignore b/deps/v8/.gitignore index bcec3768f398dc..b1f61ed6fcf908 100644 --- a/deps/v8/.gitignore +++ b/deps/v8/.gitignore @@ -35,6 +35,7 @@ /_* /build /buildtools +/gypfiles/.gold_plugin /gypfiles/win_toolchain.json /hydrogen.cfg /obj @@ -47,8 +48,6 @@ /test/benchmarks/data /test/fuzzer/wasm_corpus /test/fuzzer/wasm_corpus.tar.gz -/test/fuzzer/wasm_asmjs_corpus -/test/fuzzer/wasm_asmjs_corpus.tar.gz /test/mozilla/data /test/promises-aplus/promises-tests /test/promises-aplus/promises-tests.tar.gz diff --git a/deps/v8/AUTHORS b/deps/v8/AUTHORS index 07cf31914431e0..048702701c4211 100644 --- a/deps/v8/AUTHORS +++ b/deps/v8/AUTHORS @@ -20,7 +20,7 @@ Imagination Technologies, LLC <*@imgtec.com> Loongson Technology Corporation Limited <*@loongson.cn> Code Aurora Forum <*@codeaurora.org> Home Jinni Inc. <*@homejinni.com> -IBM Inc. <*@*.ibm.com> +IBM Inc. <*@*ibm.com> Samsung <*@*.samsung.com> Joyent, Inc <*@joyent.com> RT-RK Computer Based System <*@rt-rk.com> @@ -126,6 +126,7 @@ Victor Costan Vlad Burlik Vladimir Krivosheev Vladimir Shutoff +Wiktor Garbacz Yu Yin Zac Hansen Zhongping Wang diff --git a/deps/v8/BUILD.gn b/deps/v8/BUILD.gn index b60425df45b469..494ba22f2934d6 100644 --- a/deps/v8/BUILD.gn +++ b/deps/v8/BUILD.gn @@ -24,9 +24,6 @@ declare_args() { # Sets -DV8_ENABLE_FUTURE. v8_enable_future = false - # Sets -DV8_DISABLE_TURBO. - v8_disable_turbo = false - # Sets -DVERIFY_HEAP. v8_enable_verify_heap = "" @@ -82,6 +79,10 @@ declare_args() { # Sets -dV8_CONCURRENT_MARKING v8_enable_concurrent_marking = false + # Build the snapshot with unwinding information for perf. + # Sets -dV8_USE_SNAPSHOT_WITH_UNWINDING_INFO. + v8_perf_prof_unwinding_info = false + # With post mortem support enabled, metadata is embedded into libv8 that # describes various parameters of the VM for use by debuggers. See # tools/gen-postmortem-metadata.py for details. @@ -111,9 +112,13 @@ declare_args() { v8_experimental_extra_library_files = [ "//test/cctest/test-experimental-extra.js" ] - v8_enable_gdbjit = ((v8_current_cpu == "x86" || v8_current_cpu == "x64" || - v8_current_cpu == "x87") && (is_linux || is_mac)) || - (v8_current_cpu == "ppc64" && is_linux) + v8_enable_gdbjit = + ((v8_current_cpu == "x86" || v8_current_cpu == "x64") && + (is_linux || is_mac)) || (v8_current_cpu == "ppc64" && is_linux) + + # Temporary flag to allow embedders to update their microtasks scopes + # while rolling in a new version of V8. + v8_check_microtasks_scopes_consistency = "" } # Derived defaults. @@ -132,6 +137,9 @@ if (v8_enable_trace_maps == "") { if (v8_enable_v8_checks == "") { v8_enable_v8_checks = is_debug } +if (v8_check_microtasks_scopes_consistency == "") { + v8_check_microtasks_scopes_consistency = is_debug || dcheck_always_on +} # Specifies if the target build is a simulator build. Comparing target cpu # with v8 target cpu to not affect simulator builds for making cross-compile @@ -219,9 +227,6 @@ config("features") { if (v8_enable_future) { defines += [ "V8_ENABLE_FUTURE" ] } - if (v8_disable_turbo) { - defines += [ "V8_DISABLE_TURBO" ] - } if (v8_enable_gdbjit) { defines += [ "ENABLE_GDB_JIT_INTERFACE" ] } @@ -263,6 +268,9 @@ config("features") { } if (v8_use_snapshot) { defines += [ "V8_USE_SNAPSHOT" ] + if (v8_perf_prof_unwinding_info) { + defines += [ "V8_USE_SNAPSHOT_WITH_UNWINDING_INFO" ] + } } if (v8_use_external_startup_data) { defines += [ "V8_USE_EXTERNAL_STARTUP_DATA" ] @@ -270,6 +278,9 @@ config("features") { if (v8_enable_concurrent_marking) { defines += [ "V8_CONCURRENT_MARKING" ] } + if (v8_check_microtasks_scopes_consistency) { + defines += [ "V8_CHECK_MICROTASKS_SCOPES_CONSISTENCY" ] + } } config("toolchain") { @@ -320,8 +331,7 @@ config("toolchain") { defines += [ "_MIPS_TARGET_SIMULATOR" ] } - # TODO(jochen): Add support for mips. - if (v8_current_cpu == "mipsel") { + if (v8_current_cpu == "mipsel" || v8_current_cpu == "mips") { defines += [ "V8_TARGET_ARCH_MIPS" ] if (v8_can_use_fpu_instructions) { defines += [ "CAN_USE_FPU_INSTRUCTIONS" ] @@ -355,15 +365,17 @@ config("toolchain") { # TODO(jochen): Add support for mips_arch_variant rx and loongson. } - # TODO(jochen): Add support for mips64. - if (v8_current_cpu == "mips64el") { + if (v8_current_cpu == "mips64el" || v8_current_cpu == "mips64") { defines += [ "V8_TARGET_ARCH_MIPS64" ] if (v8_can_use_fpu_instructions) { defines += [ "CAN_USE_FPU_INSTRUCTIONS" ] } - # TODO(jochen): Add support for big endian host byteorder. - defines += [ "V8_TARGET_ARCH_MIPS64_LE" ] + if (host_byteorder == "little") { + defines += [ "V8_TARGET_ARCH_MIPS64_LE" ] + } else if (host_byteorder == "big") { + defines += [ "V8_TARGET_ARCH_MIPS64_BE" ] + } if (v8_use_mips_abi_hardfloat) { defines += [ "__mips_hard_float=1", @@ -496,6 +508,19 @@ config("toolchain") { } } +# Configs for code coverage with gcov. Separate configs for cflags and ldflags +# to selectively influde cflags in non-test targets only. +config("v8_gcov_coverage_cflags") { + cflags = [ + "-fprofile-arcs", + "-ftest-coverage", + ] +} + +config("v8_gcov_coverage_ldflags") { + ldflags = [ "-fprofile-arcs" ] +} + ############################################################################### # Actions # @@ -523,8 +548,6 @@ action("js2c") { "src/js/typedarray.js", "src/js/collection.js", "src/js/weak-collection.js", - "src/js/collection-iterator.js", - "src/js/promise.js", "src/js/messages.js", "src/js/templates.js", "src/js/spread.js", @@ -703,6 +726,12 @@ action("postmortem-metadata") { "src/objects-inl.h", "src/objects/map.h", "src/objects/map-inl.h", + "src/objects/script.h", + "src/objects/script-inl.h", + "src/objects/shared-function-info.h", + "src/objects/shared-function-info-inl.h", + "src/objects/string.h", + "src/objects/string-inl.h", ] outputs = [ @@ -750,6 +779,10 @@ action("run_mksnapshot") { ] } + if (v8_perf_prof_unwinding_info) { + args += [ "--perf-prof-unwinding-info" ] + } + if (v8_use_external_startup_data) { outputs += [ "$root_out_dir/snapshot_blob.bin" ] args += [ @@ -769,6 +802,7 @@ action("v8_dump_build_config") { outputs = [ "$root_out_dir/v8_build_config.json", ] + is_gcov_coverage = v8_code_coverage && !is_clang args = [ rebase_path("$root_out_dir/v8_build_config.json", root_build_dir), "current_cpu=\"$current_cpu\"", @@ -777,6 +811,7 @@ action("v8_dump_build_config") { "is_cfi=$is_cfi", "is_component_build=$is_component_build", "is_debug=$is_debug", + "is_gcov_coverage=$is_gcov_coverage", "is_msan=$is_msan", "is_tsan=$is_tsan", "target_cpu=\"$target_cpu\"", @@ -907,12 +942,16 @@ v8_source_set("v8_builtins_generators") { "src/builtins/builtins-async-iterator-gen.cc", "src/builtins/builtins-boolean-gen.cc", "src/builtins/builtins-call-gen.cc", + "src/builtins/builtins-call-gen.h", + "src/builtins/builtins-collections-gen.cc", "src/builtins/builtins-console-gen.cc", "src/builtins/builtins-constructor-gen.cc", "src/builtins/builtins-constructor-gen.h", "src/builtins/builtins-constructor.h", "src/builtins/builtins-conversion-gen.cc", + "src/builtins/builtins-conversion-gen.h", "src/builtins/builtins-date-gen.cc", + "src/builtins/builtins-debug-gen.cc", "src/builtins/builtins-forin-gen.cc", "src/builtins/builtins-forin-gen.h", "src/builtins/builtins-function-gen.cc", @@ -923,11 +962,14 @@ v8_source_set("v8_builtins_generators") { "src/builtins/builtins-internal-gen.cc", "src/builtins/builtins-interpreter-gen.cc", "src/builtins/builtins-intl-gen.cc", + "src/builtins/builtins-iterator-gen.cc", + "src/builtins/builtins-iterator-gen.h", "src/builtins/builtins-math-gen.cc", "src/builtins/builtins-number-gen.cc", "src/builtins/builtins-object-gen.cc", "src/builtins/builtins-promise-gen.cc", "src/builtins/builtins-promise-gen.h", + "src/builtins/builtins-proxy-gen.cc", "src/builtins/builtins-regexp-gen.cc", "src/builtins/builtins-regexp-gen.h", "src/builtins/builtins-sharedarraybuffer-gen.cc", @@ -994,11 +1036,6 @@ v8_source_set("v8_builtins_generators") { ### gcmole(arch:s390) ### "src/builtins/s390/builtins-s390.cc", ] - } else if (v8_current_cpu == "x87") { - sources += [ - ### gcmole(arch:x87) ### - "src/builtins/x87/builtins-x87.cc", - ] } if (!v8_enable_i18n_support) { @@ -1053,6 +1090,9 @@ v8_header_set("v8_headers") { v8_source_set("v8_base") { visibility = [ ":*" ] # Only targets in this file can depend on this. + # Split static libraries on windows into two. + split_count = 2 + sources = [ "//base/trace_event/common/trace_event_common.h", @@ -1070,7 +1110,6 @@ v8_source_set("v8_base") { "src/accessors.h", "src/address-map.cc", "src/address-map.h", - "src/allocation-site-scopes.cc", "src/allocation-site-scopes.h", "src/allocation.cc", "src/allocation.h", @@ -1105,10 +1144,8 @@ v8_source_set("v8_base") { "src/ast/ast-function-literal-id-reindexer.h", "src/ast/ast-numbering.cc", "src/ast/ast-numbering.h", + "src/ast/ast-source-ranges.h", "src/ast/ast-traversal-visitor.h", - "src/ast/ast-type-bounds.h", - "src/ast/ast-types.cc", - "src/ast/ast-types.h", "src/ast/ast-value-factory.cc", "src/ast/ast-value-factory.h", "src/ast/ast.cc", @@ -1145,11 +1182,11 @@ v8_source_set("v8_base") { "src/builtins/builtins-boolean.cc", "src/builtins/builtins-call.cc", "src/builtins/builtins-callsite.cc", + "src/builtins/builtins-collections.cc", "src/builtins/builtins-console.cc", "src/builtins/builtins-constructor.h", "src/builtins/builtins-dataview.cc", "src/builtins/builtins-date.cc", - "src/builtins/builtins-debug.cc", "src/builtins/builtins-definitions.h", "src/builtins/builtins-descriptors.h", "src/builtins/builtins-error.cc", @@ -1158,11 +1195,12 @@ v8_source_set("v8_base") { "src/builtins/builtins-internal.cc", "src/builtins/builtins-interpreter.cc", "src/builtins/builtins-intl.cc", + "src/builtins/builtins-intl.h", "src/builtins/builtins-json.cc", "src/builtins/builtins-math.cc", "src/builtins/builtins-number.cc", "src/builtins/builtins-object.cc", - "src/builtins/builtins-proxy.cc", + "src/builtins/builtins-promise.cc", "src/builtins/builtins-reflect.cc", "src/builtins/builtins-regexp.cc", "src/builtins/builtins-sharedarraybuffer.cc", @@ -1186,7 +1224,6 @@ v8_source_set("v8_base") { "src/code-factory.h", "src/code-stub-assembler.cc", "src/code-stub-assembler.h", - "src/code-stubs-hydrogen.cc", "src/code-stubs-utils.h", "src/code-stubs.cc", "src/code-stubs.h", @@ -1232,6 +1269,8 @@ v8_source_set("v8_base") { "src/compiler/bytecode-liveness-map.cc", "src/compiler/bytecode-liveness-map.h", "src/compiler/c-linkage.cc", + "src/compiler/check-elimination.cc", + "src/compiler/check-elimination.h", "src/compiler/checkpoint-elimination.cc", "src/compiler/checkpoint-elimination.h", "src/compiler/code-assembler.cc", @@ -1324,8 +1363,6 @@ v8_source_set("v8_base") { "src/compiler/linkage.h", "src/compiler/live-range-separator.cc", "src/compiler/live-range-separator.h", - "src/compiler/liveness-analyzer.cc", - "src/compiler/liveness-analyzer.h", "src/compiler/load-elimination.cc", "src/compiler/load-elimination.h", "src/compiler/loop-analysis.cc", @@ -1369,6 +1406,8 @@ v8_source_set("v8_base") { "src/compiler/pipeline-statistics.h", "src/compiler/pipeline.cc", "src/compiler/pipeline.h", + "src/compiler/property-access-builder.cc", + "src/compiler/property-access-builder.h", "src/compiler/raw-machine-assembler.cc", "src/compiler/raw-machine-assembler.h", "src/compiler/redundancy-elimination.cc", @@ -1397,8 +1436,6 @@ v8_source_set("v8_base") { "src/compiler/state-values-utils.h", "src/compiler/store-store-elimination.cc", "src/compiler/store-store-elimination.h", - "src/compiler/tail-call-optimization.cc", - "src/compiler/tail-call-optimization.h", "src/compiler/type-cache.cc", "src/compiler/type-cache.h", "src/compiler/typed-optimization.cc", @@ -1426,67 +1463,6 @@ v8_source_set("v8_base") { "src/counters-inl.h", "src/counters.cc", "src/counters.h", - "src/crankshaft/compilation-phase.cc", - "src/crankshaft/compilation-phase.h", - "src/crankshaft/hydrogen-alias-analysis.h", - "src/crankshaft/hydrogen-bce.cc", - "src/crankshaft/hydrogen-bce.h", - "src/crankshaft/hydrogen-canonicalize.cc", - "src/crankshaft/hydrogen-canonicalize.h", - "src/crankshaft/hydrogen-check-elimination.cc", - "src/crankshaft/hydrogen-check-elimination.h", - "src/crankshaft/hydrogen-dce.cc", - "src/crankshaft/hydrogen-dce.h", - "src/crankshaft/hydrogen-dehoist.cc", - "src/crankshaft/hydrogen-dehoist.h", - "src/crankshaft/hydrogen-environment-liveness.cc", - "src/crankshaft/hydrogen-environment-liveness.h", - "src/crankshaft/hydrogen-escape-analysis.cc", - "src/crankshaft/hydrogen-escape-analysis.h", - "src/crankshaft/hydrogen-flow-engine.h", - "src/crankshaft/hydrogen-gvn.cc", - "src/crankshaft/hydrogen-gvn.h", - "src/crankshaft/hydrogen-infer-representation.cc", - "src/crankshaft/hydrogen-infer-representation.h", - "src/crankshaft/hydrogen-infer-types.cc", - "src/crankshaft/hydrogen-infer-types.h", - "src/crankshaft/hydrogen-instructions.cc", - "src/crankshaft/hydrogen-instructions.h", - "src/crankshaft/hydrogen-load-elimination.cc", - "src/crankshaft/hydrogen-load-elimination.h", - "src/crankshaft/hydrogen-mark-unreachable.cc", - "src/crankshaft/hydrogen-mark-unreachable.h", - "src/crankshaft/hydrogen-osr.cc", - "src/crankshaft/hydrogen-osr.h", - "src/crankshaft/hydrogen-range-analysis.cc", - "src/crankshaft/hydrogen-range-analysis.h", - "src/crankshaft/hydrogen-redundant-phi.cc", - "src/crankshaft/hydrogen-redundant-phi.h", - "src/crankshaft/hydrogen-removable-simulates.cc", - "src/crankshaft/hydrogen-removable-simulates.h", - "src/crankshaft/hydrogen-representation-changes.cc", - "src/crankshaft/hydrogen-representation-changes.h", - "src/crankshaft/hydrogen-sce.cc", - "src/crankshaft/hydrogen-sce.h", - "src/crankshaft/hydrogen-store-elimination.cc", - "src/crankshaft/hydrogen-store-elimination.h", - "src/crankshaft/hydrogen-types.cc", - "src/crankshaft/hydrogen-types.h", - "src/crankshaft/hydrogen-uint32-analysis.cc", - "src/crankshaft/hydrogen-uint32-analysis.h", - "src/crankshaft/hydrogen.cc", - "src/crankshaft/hydrogen.h", - "src/crankshaft/lithium-allocator-inl.h", - "src/crankshaft/lithium-allocator.cc", - "src/crankshaft/lithium-allocator.h", - "src/crankshaft/lithium-codegen.cc", - "src/crankshaft/lithium-codegen.h", - "src/crankshaft/lithium-inl.h", - "src/crankshaft/lithium.cc", - "src/crankshaft/lithium.h", - "src/crankshaft/typing.cc", - "src/crankshaft/typing.h", - "src/crankshaft/unique.h", "src/date.cc", "src/date.h", "src/dateparser-inl.h", @@ -1518,7 +1494,6 @@ v8_source_set("v8_base") { "src/double.h", "src/dtoa.cc", "src/dtoa.h", - "src/effects.h", "src/eh-frame.cc", "src/eh-frame.h", "src/elements-kind.cc", @@ -1560,6 +1535,7 @@ v8_source_set("v8_base") { "src/flag-definitions.h", "src/flags.cc", "src/flags.h", + "src/float.h", "src/frames-inl.h", "src/frames.cc", "src/frames.h", @@ -1581,7 +1557,6 @@ v8_source_set("v8_base") { "src/heap/array-buffer-tracker.h", "src/heap/code-stats.cc", "src/heap/code-stats.h", - "src/heap/concurrent-marking-deque.h", "src/heap/concurrent-marking.cc", "src/heap/concurrent-marking.h", "src/heap/embedder-tracing.cc", @@ -1599,9 +1574,11 @@ v8_source_set("v8_base") { "src/heap/incremental-marking.cc", "src/heap/incremental-marking.h", "src/heap/item-parallel-job.h", + "src/heap/local-allocator.h", "src/heap/mark-compact-inl.h", "src/heap/mark-compact.cc", "src/heap/mark-compact.h", + "src/heap/marking.cc", "src/heap/marking.h", "src/heap/memory-reducer.cc", "src/heap/memory-reducer.h", @@ -1610,7 +1587,6 @@ v8_source_set("v8_base") { "src/heap/objects-visiting-inl.h", "src/heap/objects-visiting.cc", "src/heap/objects-visiting.h", - "src/heap/page-parallel-job.h", "src/heap/remembered-set.h", "src/heap/scavenge-job.cc", "src/heap/scavenge-job.h", @@ -1625,7 +1601,7 @@ v8_source_set("v8_base") { "src/heap/spaces.h", "src/heap/store-buffer.cc", "src/heap/store-buffer.h", - "src/heap/workstealing-marking-deque.h", + "src/heap/worklist.h", "src/ic/access-compiler-data.h", "src/ic/access-compiler.cc", "src/ic/access-compiler.h", @@ -1650,6 +1626,7 @@ v8_source_set("v8_base") { "src/identity-map.h", "src/interface-descriptors.cc", "src/interface-descriptors.h", + "src/interpreter/block-coverage-builder.h", "src/interpreter/bytecode-array-accessor.cc", "src/interpreter/bytecode-array-accessor.h", "src/interpreter/bytecode-array-builder.cc", @@ -1740,10 +1717,15 @@ v8_source_set("v8_base") { "src/objects-printer.cc", "src/objects.cc", "src/objects.h", + "src/objects/arguments-inl.h", + "src/objects/arguments.h", "src/objects/code-cache-inl.h", "src/objects/code-cache.h", "src/objects/compilation-cache-inl.h", "src/objects/compilation-cache.h", + "src/objects/debug-objects-inl.h", + "src/objects/debug-objects.cc", + "src/objects/debug-objects.h", "src/objects/descriptor-array.h", "src/objects/dictionary.h", "src/objects/frame-array-inl.h", @@ -1757,12 +1739,20 @@ v8_source_set("v8_base") { "src/objects/map-inl.h", "src/objects/map.h", "src/objects/module-info.h", + "src/objects/name-inl.h", + "src/objects/name.h", "src/objects/object-macros-undef.h", "src/objects/object-macros.h", "src/objects/regexp-match-info.h", "src/objects/scope-info.cc", "src/objects/scope-info.h", + "src/objects/script-inl.h", + "src/objects/script.h", + "src/objects/shared-function-info-inl.h", + "src/objects/shared-function-info.h", + "src/objects/string-inl.h", "src/objects/string-table.h", + "src/objects/string.h", "src/ostreams.cc", "src/ostreams.h", "src/parsing/duplicate-finder.h", @@ -1948,8 +1938,6 @@ v8_source_set("v8_base") { "src/trap-handler/trap-handler.h", "src/type-hints.cc", "src/type-hints.h", - "src/type-info.cc", - "src/type-info.h", "src/unicode-cache-inl.h", "src/unicode-cache.h", "src/unicode-decoder.cc", @@ -1976,6 +1964,8 @@ v8_source_set("v8_base") { "src/visitors.h", "src/vm-state-inl.h", "src/vm-state.h", + "src/wasm/compilation-manager.cc", + "src/wasm/compilation-manager.h", "src/wasm/decoder.h", "src/wasm/function-body-decoder-impl.h", "src/wasm/function-body-decoder.cc", @@ -1983,6 +1973,8 @@ v8_source_set("v8_base") { "src/wasm/leb-helper.h", "src/wasm/local-decl-encoder.cc", "src/wasm/local-decl-encoder.h", + "src/wasm/module-compiler.cc", + "src/wasm/module-compiler.h", "src/wasm/module-decoder.cc", "src/wasm/module-decoder.h", "src/wasm/signature-map.cc", @@ -2011,6 +2003,7 @@ v8_source_set("v8_base") { "src/wasm/wasm-result.h", "src/wasm/wasm-text.cc", "src/wasm/wasm-text.h", + "src/wasm/wasm-value.h", "src/zone/accounting-allocator.cc", "src/zone/accounting-allocator.h", "src/zone/zone-allocator.h", @@ -2030,12 +2023,6 @@ v8_source_set("v8_base") { "src/compiler/ia32/instruction-codes-ia32.h", "src/compiler/ia32/instruction-scheduler-ia32.cc", "src/compiler/ia32/instruction-selector-ia32.cc", - "src/crankshaft/ia32/lithium-codegen-ia32.cc", - "src/crankshaft/ia32/lithium-codegen-ia32.h", - "src/crankshaft/ia32/lithium-gap-resolver-ia32.cc", - "src/crankshaft/ia32/lithium-gap-resolver-ia32.h", - "src/crankshaft/ia32/lithium-ia32.cc", - "src/crankshaft/ia32/lithium-ia32.h", "src/debug/ia32/debug-ia32.cc", "src/full-codegen/ia32/full-codegen-ia32.cc", "src/ia32/assembler-ia32-inl.h", @@ -2070,12 +2057,6 @@ v8_source_set("v8_base") { "src/compiler/x64/instruction-selector-x64.cc", "src/compiler/x64/unwinding-info-writer-x64.cc", "src/compiler/x64/unwinding-info-writer-x64.h", - "src/crankshaft/x64/lithium-codegen-x64.cc", - "src/crankshaft/x64/lithium-codegen-x64.h", - "src/crankshaft/x64/lithium-gap-resolver-x64.cc", - "src/crankshaft/x64/lithium-gap-resolver-x64.h", - "src/crankshaft/x64/lithium-x64.cc", - "src/crankshaft/x64/lithium-x64.h", "src/debug/x64/debug-x64.cc", "src/full-codegen/x64/full-codegen-x64.cc", "src/ic/x64/access-compiler-x64.cc", @@ -2136,12 +2117,6 @@ v8_source_set("v8_base") { "src/compiler/arm/instruction-selector-arm.cc", "src/compiler/arm/unwinding-info-writer-arm.cc", "src/compiler/arm/unwinding-info-writer-arm.h", - "src/crankshaft/arm/lithium-arm.cc", - "src/crankshaft/arm/lithium-arm.h", - "src/crankshaft/arm/lithium-codegen-arm.cc", - "src/crankshaft/arm/lithium-codegen-arm.h", - "src/crankshaft/arm/lithium-gap-resolver-arm.cc", - "src/crankshaft/arm/lithium-gap-resolver-arm.h", "src/debug/arm/debug-arm.cc", "src/full-codegen/arm/full-codegen-arm.cc", "src/ic/arm/access-compiler-arm.cc", @@ -2181,6 +2156,7 @@ v8_source_set("v8_base") { "src/arm64/macro-assembler-arm64.h", "src/arm64/simulator-arm64.cc", "src/arm64/simulator-arm64.h", + "src/arm64/simulator-logic-arm64.cc", "src/arm64/utils-arm64.cc", "src/arm64/utils-arm64.h", "src/compiler/arm64/code-generator-arm64.cc", @@ -2189,15 +2165,6 @@ v8_source_set("v8_base") { "src/compiler/arm64/instruction-selector-arm64.cc", "src/compiler/arm64/unwinding-info-writer-arm64.cc", "src/compiler/arm64/unwinding-info-writer-arm64.h", - "src/crankshaft/arm64/delayed-masm-arm64-inl.h", - "src/crankshaft/arm64/delayed-masm-arm64.cc", - "src/crankshaft/arm64/delayed-masm-arm64.h", - "src/crankshaft/arm64/lithium-arm64.cc", - "src/crankshaft/arm64/lithium-arm64.h", - "src/crankshaft/arm64/lithium-codegen-arm64.cc", - "src/crankshaft/arm64/lithium-codegen-arm64.h", - "src/crankshaft/arm64/lithium-gap-resolver-arm64.cc", - "src/crankshaft/arm64/lithium-gap-resolver-arm64.h", "src/debug/arm64/debug-arm64.cc", "src/full-codegen/arm64/full-codegen-arm64.cc", "src/ic/arm64/access-compiler-arm64.cc", @@ -2212,12 +2179,6 @@ v8_source_set("v8_base") { "src/compiler/mips/instruction-codes-mips.h", "src/compiler/mips/instruction-scheduler-mips.cc", "src/compiler/mips/instruction-selector-mips.cc", - "src/crankshaft/mips/lithium-codegen-mips.cc", - "src/crankshaft/mips/lithium-codegen-mips.h", - "src/crankshaft/mips/lithium-gap-resolver-mips.cc", - "src/crankshaft/mips/lithium-gap-resolver-mips.h", - "src/crankshaft/mips/lithium-mips.cc", - "src/crankshaft/mips/lithium-mips.h", "src/debug/mips/debug-mips.cc", "src/full-codegen/mips/full-codegen-mips.cc", "src/ic/mips/access-compiler-mips.cc", @@ -2251,12 +2212,6 @@ v8_source_set("v8_base") { "src/compiler/mips64/instruction-codes-mips64.h", "src/compiler/mips64/instruction-scheduler-mips64.cc", "src/compiler/mips64/instruction-selector-mips64.cc", - "src/crankshaft/mips64/lithium-codegen-mips64.cc", - "src/crankshaft/mips64/lithium-codegen-mips64.h", - "src/crankshaft/mips64/lithium-gap-resolver-mips64.cc", - "src/crankshaft/mips64/lithium-gap-resolver-mips64.h", - "src/crankshaft/mips64/lithium-mips64.cc", - "src/crankshaft/mips64/lithium-mips64.h", "src/debug/mips64/debug-mips64.cc", "src/full-codegen/mips64/full-codegen-mips64.cc", "src/ic/mips64/access-compiler-mips64.cc", @@ -2290,12 +2245,6 @@ v8_source_set("v8_base") { "src/compiler/ppc/instruction-codes-ppc.h", "src/compiler/ppc/instruction-scheduler-ppc.cc", "src/compiler/ppc/instruction-selector-ppc.cc", - "src/crankshaft/ppc/lithium-codegen-ppc.cc", - "src/crankshaft/ppc/lithium-codegen-ppc.h", - "src/crankshaft/ppc/lithium-gap-resolver-ppc.cc", - "src/crankshaft/ppc/lithium-gap-resolver-ppc.h", - "src/crankshaft/ppc/lithium-ppc.cc", - "src/crankshaft/ppc/lithium-ppc.h", "src/debug/ppc/debug-ppc.cc", "src/full-codegen/ppc/full-codegen-ppc.cc", "src/ic/ppc/access-compiler-ppc.cc", @@ -2329,12 +2278,6 @@ v8_source_set("v8_base") { "src/compiler/s390/instruction-codes-s390.h", "src/compiler/s390/instruction-scheduler-s390.cc", "src/compiler/s390/instruction-selector-s390.cc", - "src/crankshaft/s390/lithium-codegen-s390.cc", - "src/crankshaft/s390/lithium-codegen-s390.h", - "src/crankshaft/s390/lithium-gap-resolver-s390.cc", - "src/crankshaft/s390/lithium-gap-resolver-s390.h", - "src/crankshaft/s390/lithium-s390.cc", - "src/crankshaft/s390/lithium-s390.h", "src/debug/s390/debug-s390.cc", "src/full-codegen/s390/full-codegen-s390.cc", "src/ic/s390/access-compiler-s390.cc", @@ -2362,43 +2305,6 @@ v8_source_set("v8_base") { "src/s390/simulator-s390.cc", "src/s390/simulator-s390.h", ] - } else if (v8_current_cpu == "x87") { - sources += [ ### gcmole(arch:x87) ### - "src/compiler/x87/code-generator-x87.cc", - "src/compiler/x87/instruction-codes-x87.h", - "src/compiler/x87/instruction-scheduler-x87.cc", - "src/compiler/x87/instruction-selector-x87.cc", - "src/crankshaft/x87/lithium-codegen-x87.cc", - "src/crankshaft/x87/lithium-codegen-x87.h", - "src/crankshaft/x87/lithium-gap-resolver-x87.cc", - "src/crankshaft/x87/lithium-gap-resolver-x87.h", - "src/crankshaft/x87/lithium-x87.cc", - "src/crankshaft/x87/lithium-x87.h", - "src/debug/x87/debug-x87.cc", - "src/full-codegen/x87/full-codegen-x87.cc", - "src/ic/x87/access-compiler-x87.cc", - "src/ic/x87/handler-compiler-x87.cc", - "src/ic/x87/ic-x87.cc", - "src/regexp/x87/regexp-macro-assembler-x87.cc", - "src/regexp/x87/regexp-macro-assembler-x87.h", - "src/x87/assembler-x87-inl.h", - "src/x87/assembler-x87.cc", - "src/x87/assembler-x87.h", - "src/x87/code-stubs-x87.cc", - "src/x87/code-stubs-x87.h", - "src/x87/codegen-x87.cc", - "src/x87/codegen-x87.h", - "src/x87/cpu-x87.cc", - "src/x87/deoptimizer-x87.cc", - "src/x87/disasm-x87.cc", - "src/x87/frames-x87.cc", - "src/x87/frames-x87.h", - "src/x87/interface-descriptors-x87.cc", - "src/x87/macro-assembler-x87.cc", - "src/x87/macro-assembler-x87.h", - "src/x87/simulator-x87.cc", - "src/x87/simulator-x87.h", - ] } configs = [ ":internal_config" ] @@ -2421,6 +2327,8 @@ v8_source_set("v8_base") { } else { sources -= [ "src/builtins/builtins-intl.cc", + "src/builtins/builtins-intl.h", + "src/char-predicates.cc", "src/intl.cc", "src/intl.h", "src/objects/intl-objects.cc", @@ -2473,6 +2381,7 @@ v8_component("v8_libbase") { "src/base/macros.h", "src/base/once.cc", "src/base/once.h", + "src/base/optional.h", "src/base/platform/condition-variable.cc", "src/base/platform/condition-variable.h", "src/base/platform/elapsed-timer.h", @@ -2490,6 +2399,7 @@ v8_component("v8_libbase") { "src/base/safe_math_impl.h", "src/base/sys-info.cc", "src/base/sys-info.h", + "src/base/template-utils.h", "src/base/timezone-cache.h", "src/base/utils/random-number-generator.cc", "src/base/utils/random-number-generator.h", @@ -2557,6 +2467,11 @@ v8_component("v8_libbase") { "src/base/platform/platform-linux.cc", ] } + } else if (is_fuchsia) { + sources += [ + "src/base/debug/stack_trace_fuchsia.cc", + "src/base/platform/platform-fuchsia.cc", + ] } else if (is_mac) { sources += [ "src/base/debug/stack_trace_posix.cc", @@ -2737,7 +2652,7 @@ group("v8_fuzzers") { ":v8_simple_json_fuzzer", ":v8_simple_parser_fuzzer", ":v8_simple_regexp_fuzzer", - ":v8_simple_wasm_asmjs_fuzzer", + ":v8_simple_wasm_async_fuzzer", ":v8_simple_wasm_call_fuzzer", ":v8_simple_wasm_code_fuzzer", ":v8_simple_wasm_compile_fuzzer", @@ -2758,10 +2673,6 @@ if (is_component_build) { "src/v8dll-main.cc", ] - deps = [ - ":v8_dump_build_config", - ] - public_deps = [ ":v8_base", ":v8_maybe_snapshot", @@ -2779,10 +2690,6 @@ if (is_component_build) { "src/v8dll-main.cc", ] - deps = [ - ":v8_dump_build_config", - ] - public_deps = [ ":v8_base", ":v8_maybe_snapshot", @@ -2798,10 +2705,6 @@ if (is_component_build) { } } else { group("v8") { - deps = [ - ":v8_dump_build_config", - ] - public_deps = [ ":v8_base", ":v8_maybe_snapshot", @@ -2813,10 +2716,6 @@ if (is_component_build) { group("v8_for_testing") { testonly = true - deps = [ - ":v8_dump_build_config", - ] - public_deps = [ ":v8_base", ":v8_maybe_snapshot", @@ -3069,9 +2968,9 @@ v8_source_set("wasm_fuzzer") { v8_fuzzer("wasm_fuzzer") { } -v8_source_set("wasm_asmjs_fuzzer") { +v8_source_set("wasm_async_fuzzer") { sources = [ - "test/fuzzer/wasm-asmjs.cc", + "test/fuzzer/wasm-async.cc", ] deps = [ @@ -3086,7 +2985,7 @@ v8_source_set("wasm_asmjs_fuzzer") { ] } -v8_fuzzer("wasm_asmjs_fuzzer") { +v8_fuzzer("wasm_async_fuzzer") { } v8_source_set("wasm_code_fuzzer") { diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog index 7ee1b37e798c5c..f3e2941fddd5aa 100644 --- a/deps/v8/ChangeLog +++ b/deps/v8/ChangeLog @@ -1,3 +1,2833 @@ +2017-07-18: Version 6.1.534 + + Performance and stability improvements on all platforms. + + +2017-07-18: Version 6.1.533 + + Performance and stability improvements on all platforms. + + +2017-07-18: Version 6.1.532 + + Performance and stability improvements on all platforms. + + +2017-07-18: Version 6.1.531 + + Performance and stability improvements on all platforms. + + +2017-07-18: Version 6.1.530 + + Performance and stability improvements on all platforms. + + +2017-07-18: Version 6.1.529 + + Performance and stability improvements on all platforms. + + +2017-07-18: Version 6.1.528 + + Performance and stability improvements on all platforms. + + +2017-07-18: Version 6.1.527 + + Performance and stability improvements on all platforms. + + +2017-07-18: Version 6.1.526 + + Performance and stability improvements on all platforms. + + +2017-07-18: Version 6.1.525 + + Performance and stability improvements on all platforms. + + +2017-07-18: Version 6.1.524 + + Performance and stability improvements on all platforms. + + +2017-07-18: Version 6.1.523 + + Performance and stability improvements on all platforms. + + +2017-07-18: Version 6.1.522 + + Performance and stability improvements on all platforms. + + +2017-07-17: Version 6.1.521 + + Performance and stability improvements on all platforms. + + +2017-07-17: Version 6.1.520 + + Performance and stability improvements on all platforms. + + +2017-07-17: Version 6.1.519 + + Performance and stability improvements on all platforms. + + +2017-07-17: Version 6.1.518 + + Performance and stability improvements on all platforms. + + +2017-07-17: Version 6.1.517 + + Performance and stability improvements on all platforms. + + +2017-07-17: Version 6.1.516 + + Performance and stability improvements on all platforms. + + +2017-07-17: Version 6.1.515 + + Performance and stability improvements on all platforms. + + +2017-07-17: Version 6.1.514 + + Performance and stability improvements on all platforms. + + +2017-07-17: Version 6.1.513 + + Performance and stability improvements on all platforms. + + +2017-07-17: Version 6.1.512 + + Performance and stability improvements on all platforms. + + +2017-07-17: Version 6.1.511 + + Performance and stability improvements on all platforms. + + +2017-07-17: Version 6.1.510 + + Performance and stability improvements on all platforms. + + +2017-07-17: Version 6.1.509 + + Performance and stability improvements on all platforms. + + +2017-07-17: Version 6.1.508 + + Performance and stability improvements on all platforms. + + +2017-07-17: Version 6.1.507 + + Performance and stability improvements on all platforms. + + +2017-07-17: Version 6.1.506 + + Performance and stability improvements on all platforms. + + +2017-07-17: Version 6.1.505 + + Performance and stability improvements on all platforms. + + +2017-07-17: Version 6.1.504 + + Performance and stability improvements on all platforms. + + +2017-07-17: Version 6.1.503 + + Performance and stability improvements on all platforms. + + +2017-07-17: Version 6.1.502 + + Performance and stability improvements on all platforms. + + +2017-07-17: Version 6.1.501 + + Performance and stability improvements on all platforms. + + +2017-07-17: Version 6.1.500 + + Performance and stability improvements on all platforms. + + +2017-07-17: Version 6.1.499 + + Performance and stability improvements on all platforms. + + +2017-07-17: Version 6.1.498 + + Performance and stability improvements on all platforms. + + +2017-07-16: Version 6.1.497 + + Performance and stability improvements on all platforms. + + +2017-07-14: Version 6.1.496 + + Performance and stability improvements on all platforms. + + +2017-07-14: Version 6.1.495 + + Performance and stability improvements on all platforms. + + +2017-07-14: Version 6.1.494 + + Performance and stability improvements on all platforms. + + +2017-07-14: Version 6.1.493 + + Performance and stability improvements on all platforms. + + +2017-07-14: Version 6.1.492 + + Performance and stability improvements on all platforms. + + +2017-07-14: Version 6.1.491 + + Performance and stability improvements on all platforms. + + +2017-07-14: Version 6.1.490 + + Performance and stability improvements on all platforms. + + +2017-07-14: Version 6.1.489 + + Performance and stability improvements on all platforms. + + +2017-07-14: Version 6.1.488 + + Performance and stability improvements on all platforms. + + +2017-07-14: Version 6.1.487 + + Performance and stability improvements on all platforms. + + +2017-07-14: Version 6.1.486 + + Performance and stability improvements on all platforms. + + +2017-07-14: Version 6.1.485 + + Performance and stability improvements on all platforms. + + +2017-07-14: Version 6.1.484 + + Performance and stability improvements on all platforms. + + +2017-07-14: Version 6.1.483 + + Performance and stability improvements on all platforms. + + +2017-07-14: Version 6.1.482 + + Performance and stability improvements on all platforms. + + +2017-07-14: Version 6.1.481 + + Performance and stability improvements on all platforms. + + +2017-07-14: Version 6.1.480 + + Performance and stability improvements on all platforms. + + +2017-07-14: Version 6.1.479 + + Performance and stability improvements on all platforms. + + +2017-07-14: Version 6.1.478 + + Performance and stability improvements on all platforms. + + +2017-07-13: Version 6.1.477 + + Performance and stability improvements on all platforms. + + +2017-07-13: Version 6.1.476 + + Performance and stability improvements on all platforms. + + +2017-07-13: Version 6.1.475 + + Performance and stability improvements on all platforms. + + +2017-07-13: Version 6.1.474 + + Performance and stability improvements on all platforms. + + +2017-07-13: Version 6.1.473 + + Performance and stability improvements on all platforms. + + +2017-07-13: Version 6.1.472 + + Performance and stability improvements on all platforms. + + +2017-07-13: Version 6.1.471 + + Performance and stability improvements on all platforms. + + +2017-07-13: Version 6.1.470 + + Performance and stability improvements on all platforms. + + +2017-07-13: Version 6.1.469 + + Performance and stability improvements on all platforms. + + +2017-07-13: Version 6.1.468 + + Performance and stability improvements on all platforms. + + +2017-07-13: Version 6.1.467 + + Performance and stability improvements on all platforms. + + +2017-07-13: Version 6.1.466 + + Performance and stability improvements on all platforms. + + +2017-07-13: Version 6.1.465 + + Performance and stability improvements on all platforms. + + +2017-07-13: Version 6.1.464 + + Performance and stability improvements on all platforms. + + +2017-07-13: Version 6.1.463 + + Performance and stability improvements on all platforms. + + +2017-07-13: Version 6.1.462 + + Performance and stability improvements on all platforms. + + +2017-07-13: Version 6.1.461 + + Performance and stability improvements on all platforms. + + +2017-07-13: Version 6.1.460 + + Performance and stability improvements on all platforms. + + +2017-07-12: Version 6.1.459 + + Performance and stability improvements on all platforms. + + +2017-07-12: Version 6.1.458 + + Performance and stability improvements on all platforms. + + +2017-07-12: Version 6.1.457 + + Performance and stability improvements on all platforms. + + +2017-07-12: Version 6.1.456 + + Performance and stability improvements on all platforms. + + +2017-07-12: Version 6.1.455 + + Performance and stability improvements on all platforms. + + +2017-07-12: Version 6.1.454 + + Performance and stability improvements on all platforms. + + +2017-07-12: Version 6.1.453 + + Performance and stability improvements on all platforms. + + +2017-07-12: Version 6.1.452 + + Performance and stability improvements on all platforms. + + +2017-07-12: Version 6.1.451 + + Performance and stability improvements on all platforms. + + +2017-07-12: Version 6.1.450 + + Performance and stability improvements on all platforms. + + +2017-07-12: Version 6.1.449 + + Performance and stability improvements on all platforms. + + +2017-07-12: Version 6.1.448 + + Performance and stability improvements on all platforms. + + +2017-07-12: Version 6.1.447 + + Performance and stability improvements on all platforms. + + +2017-07-12: Version 6.1.446 + + Performance and stability improvements on all platforms. + + +2017-07-12: Version 6.1.445 + + Performance and stability improvements on all platforms. + + +2017-07-12: Version 6.1.444 + + Performance and stability improvements on all platforms. + + +2017-07-12: Version 6.1.443 + + Performance and stability improvements on all platforms. + + +2017-07-12: Version 6.1.442 + + Performance and stability improvements on all platforms. + + +2017-07-12: Version 6.1.441 + + Performance and stability improvements on all platforms. + + +2017-07-12: Version 6.1.440 + + Performance and stability improvements on all platforms. + + +2017-07-12: Version 6.1.439 + + Performance and stability improvements on all platforms. + + +2017-07-12: Version 6.1.438 + + Performance and stability improvements on all platforms. + + +2017-07-12: Version 6.1.437 + + Performance and stability improvements on all platforms. + + +2017-07-11: Version 6.1.436 + + Performance and stability improvements on all platforms. + + +2017-07-11: Version 6.1.435 + + Performance and stability improvements on all platforms. + + +2017-07-11: Version 6.1.434 + + Performance and stability improvements on all platforms. + + +2017-07-11: Version 6.1.433 + + Performance and stability improvements on all platforms. + + +2017-07-11: Version 6.1.432 + + Performance and stability improvements on all platforms. + + +2017-07-11: Version 6.1.431 + + Performance and stability improvements on all platforms. + + +2017-07-11: Version 6.1.430 + + Performance and stability improvements on all platforms. + + +2017-07-11: Version 6.1.429 + + Performance and stability improvements on all platforms. + + +2017-07-11: Version 6.1.428 + + Performance and stability improvements on all platforms. + + +2017-07-11: Version 6.1.427 + + Performance and stability improvements on all platforms. + + +2017-07-11: Version 6.1.426 + + Performance and stability improvements on all platforms. + + +2017-07-11: Version 6.1.425 + + Performance and stability improvements on all platforms. + + +2017-07-11: Version 6.1.424 + + Performance and stability improvements on all platforms. + + +2017-07-11: Version 6.1.423 + + Performance and stability improvements on all platforms. + + +2017-07-11: Version 6.1.422 + + Performance and stability improvements on all platforms. + + +2017-07-11: Version 6.1.421 + + Performance and stability improvements on all platforms. + + +2017-07-11: Version 6.1.420 + + Performance and stability improvements on all platforms. + + +2017-07-11: Version 6.1.419 + + Performance and stability improvements on all platforms. + + +2017-07-10: Version 6.1.418 + + Performance and stability improvements on all platforms. + + +2017-07-10: Version 6.1.417 + + Performance and stability improvements on all platforms. + + +2017-07-10: Version 6.1.416 + + Performance and stability improvements on all platforms. + + +2017-07-10: Version 6.1.415 + + Performance and stability improvements on all platforms. + + +2017-07-10: Version 6.1.414 + + Performance and stability improvements on all platforms. + + +2017-07-10: Version 6.1.413 + + Performance and stability improvements on all platforms. + + +2017-07-10: Version 6.1.412 + + Performance and stability improvements on all platforms. + + +2017-07-10: Version 6.1.411 + + Performance and stability improvements on all platforms. + + +2017-07-10: Version 6.1.410 + + Performance and stability improvements on all platforms. + + +2017-07-10: Version 6.1.409 + + Performance and stability improvements on all platforms. + + +2017-07-10: Version 6.1.408 + + Performance and stability improvements on all platforms. + + +2017-07-10: Version 6.1.407 + + Performance and stability improvements on all platforms. + + +2017-07-10: Version 6.1.406 + + Performance and stability improvements on all platforms. + + +2017-07-10: Version 6.1.405 + + Performance and stability improvements on all platforms. + + +2017-07-10: Version 6.1.404 + + Performance and stability improvements on all platforms. + + +2017-07-10: Version 6.1.403 + + Performance and stability improvements on all platforms. + + +2017-07-10: Version 6.1.402 + + Performance and stability improvements on all platforms. + + +2017-07-10: Version 6.1.401 + + Performance and stability improvements on all platforms. + + +2017-07-10: Version 6.1.400 + + Performance and stability improvements on all platforms. + + +2017-07-10: Version 6.1.399 + + Performance and stability improvements on all platforms. + + +2017-07-10: Version 6.1.398 + + Performance and stability improvements on all platforms. + + +2017-07-10: Version 6.1.397 + + Performance and stability improvements on all platforms. + + +2017-07-10: Version 6.1.396 + + Performance and stability improvements on all platforms. + + +2017-07-10: Version 6.1.395 + + Performance and stability improvements on all platforms. + + +2017-07-06: Version 6.1.394 + + Performance and stability improvements on all platforms. + + +2017-07-06: Version 6.1.393 + + Performance and stability improvements on all platforms. + + +2017-07-06: Version 6.1.392 + + Performance and stability improvements on all platforms. + + +2017-07-06: Version 6.1.391 + + Performance and stability improvements on all platforms. + + +2017-07-06: Version 6.1.390 + + Performance and stability improvements on all platforms. + + +2017-07-06: Version 6.1.389 + + Performance and stability improvements on all platforms. + + +2017-07-06: Version 6.1.388 + + Performance and stability improvements on all platforms. + + +2017-07-06: Version 6.1.387 + + Performance and stability improvements on all platforms. + + +2017-07-06: Version 6.1.386 + + Performance and stability improvements on all platforms. + + +2017-07-06: Version 6.1.385 + + Performance and stability improvements on all platforms. + + +2017-07-06: Version 6.1.384 + + Performance and stability improvements on all platforms. + + +2017-07-06: Version 6.1.383 + + Performance and stability improvements on all platforms. + + +2017-07-06: Version 6.1.382 + + Performance and stability improvements on all platforms. + + +2017-07-06: Version 6.1.381 + + Performance and stability improvements on all platforms. + + +2017-07-06: Version 6.1.380 + + Performance and stability improvements on all platforms. + + +2017-07-06: Version 6.1.379 + + Performance and stability improvements on all platforms. + + +2017-07-06: Version 6.1.378 + + Performance and stability improvements on all platforms. + + +2017-07-06: Version 6.1.377 + + Performance and stability improvements on all platforms. + + +2017-07-06: Version 6.1.376 + + Performance and stability improvements on all platforms. + + +2017-07-06: Version 6.1.375 + + Performance and stability improvements on all platforms. + + +2017-07-05: Version 6.1.374 + + Performance and stability improvements on all platforms. + + +2017-07-05: Version 6.1.373 + + Performance and stability improvements on all platforms. + + +2017-07-05: Version 6.1.372 + + Performance and stability improvements on all platforms. + + +2017-07-05: Version 6.1.371 + + Performance and stability improvements on all platforms. + + +2017-07-05: Version 6.1.370 + + Performance and stability improvements on all platforms. + + +2017-07-05: Version 6.1.369 + + Performance and stability improvements on all platforms. + + +2017-07-05: Version 6.1.368 + + Performance and stability improvements on all platforms. + + +2017-07-05: Version 6.1.367 + + Performance and stability improvements on all platforms. + + +2017-07-05: Version 6.1.366 + + Performance and stability improvements on all platforms. + + +2017-07-05: Version 6.1.365 + + Performance and stability improvements on all platforms. + + +2017-07-05: Version 6.1.364 + + Performance and stability improvements on all platforms. + + +2017-07-05: Version 6.1.363 + + Performance and stability improvements on all platforms. + + +2017-07-04: Version 6.1.362 + + Performance and stability improvements on all platforms. + + +2017-07-04: Version 6.1.361 + + Performance and stability improvements on all platforms. + + +2017-07-04: Version 6.1.360 + + Performance and stability improvements on all platforms. + + +2017-07-04: Version 6.1.359 + + Performance and stability improvements on all platforms. + + +2017-07-04: Version 6.1.358 + + Performance and stability improvements on all platforms. + + +2017-07-04: Version 6.1.357 + + Performance and stability improvements on all platforms. + + +2017-07-03: Version 6.1.356 + + Performance and stability improvements on all platforms. + + +2017-07-03: Version 6.1.355 + + Performance and stability improvements on all platforms. + + +2017-07-03: Version 6.1.354 + + Performance and stability improvements on all platforms. + + +2017-07-03: Version 6.1.353 + + Performance and stability improvements on all platforms. + + +2017-07-03: Version 6.1.352 + + Performance and stability improvements on all platforms. + + +2017-07-03: Version 6.1.351 + + Performance and stability improvements on all platforms. + + +2017-07-03: Version 6.1.350 + + Performance and stability improvements on all platforms. + + +2017-07-03: Version 6.1.349 + + Performance and stability improvements on all platforms. + + +2017-07-03: Version 6.1.348 + + Performance and stability improvements on all platforms. + + +2017-07-03: Version 6.1.347 + + Performance and stability improvements on all platforms. + + +2017-07-03: Version 6.1.346 + + Performance and stability improvements on all platforms. + + +2017-07-03: Version 6.1.345 + + Performance and stability improvements on all platforms. + + +2017-06-30: Version 6.1.344 + + Performance and stability improvements on all platforms. + + +2017-06-30: Version 6.1.343 + + Performance and stability improvements on all platforms. + + +2017-06-30: Version 6.1.342 + + Performance and stability improvements on all platforms. + + +2017-06-30: Version 6.1.341 + + Performance and stability improvements on all platforms. + + +2017-06-30: Version 6.1.340 + + Performance and stability improvements on all platforms. + + +2017-06-30: Version 6.1.339 + + Performance and stability improvements on all platforms. + + +2017-06-30: Version 6.1.338 + + Performance and stability improvements on all platforms. + + +2017-06-30: Version 6.1.337 + + Performance and stability improvements on all platforms. + + +2017-06-30: Version 6.1.336 + + Performance and stability improvements on all platforms. + + +2017-06-30: Version 6.1.335 + + Performance and stability improvements on all platforms. + + +2017-06-30: Version 6.1.334 + + Performance and stability improvements on all platforms. + + +2017-06-30: Version 6.1.333 + + Performance and stability improvements on all platforms. + + +2017-06-30: Version 6.1.332 + + Performance and stability improvements on all platforms. + + +2017-06-30: Version 6.1.331 + + Performance and stability improvements on all platforms. + + +2017-06-30: Version 6.1.330 + + Performance and stability improvements on all platforms. + + +2017-06-30: Version 6.1.329 + + Performance and stability improvements on all platforms. + + +2017-06-29: Version 6.1.328 + + Performance and stability improvements on all platforms. + + +2017-06-29: Version 6.1.327 + + Performance and stability improvements on all platforms. + + +2017-06-29: Version 6.1.326 + + Performance and stability improvements on all platforms. + + +2017-06-29: Version 6.1.325 + + Performance and stability improvements on all platforms. + + +2017-06-29: Version 6.1.324 + + Performance and stability improvements on all platforms. + + +2017-06-29: Version 6.1.323 + + Performance and stability improvements on all platforms. + + +2017-06-29: Version 6.1.322 + + Performance and stability improvements on all platforms. + + +2017-06-29: Version 6.1.321 + + Performance and stability improvements on all platforms. + + +2017-06-29: Version 6.1.320 + + Performance and stability improvements on all platforms. + + +2017-06-29: Version 6.1.319 + + Performance and stability improvements on all platforms. + + +2017-06-29: Version 6.1.318 + + Performance and stability improvements on all platforms. + + +2017-06-29: Version 6.1.317 + + Performance and stability improvements on all platforms. + + +2017-06-29: Version 6.1.316 + + Performance and stability improvements on all platforms. + + +2017-06-29: Version 6.1.315 + + Performance and stability improvements on all platforms. + + +2017-06-29: Version 6.1.314 + + Performance and stability improvements on all platforms. + + +2017-06-29: Version 6.1.313 + + Performance and stability improvements on all platforms. + + +2017-06-28: Version 6.1.312 + + Performance and stability improvements on all platforms. + + +2017-06-28: Version 6.1.311 + + Performance and stability improvements on all platforms. + + +2017-06-28: Version 6.1.310 + + Performance and stability improvements on all platforms. + + +2017-06-28: Version 6.1.309 + + Performance and stability improvements on all platforms. + + +2017-06-28: Version 6.1.308 + + Performance and stability improvements on all platforms. + + +2017-06-28: Version 6.1.307 + + Performance and stability improvements on all platforms. + + +2017-06-28: Version 6.1.306 + + Performance and stability improvements on all platforms. + + +2017-06-28: Version 6.1.305 + + Performance and stability improvements on all platforms. + + +2017-06-28: Version 6.1.304 + + Performance and stability improvements on all platforms. + + +2017-06-28: Version 6.1.303 + + Performance and stability improvements on all platforms. + + +2017-06-28: Version 6.1.302 + + Performance and stability improvements on all platforms. + + +2017-06-28: Version 6.1.301 + + Performance and stability improvements on all platforms. + + +2017-06-28: Version 6.1.300 + + Performance and stability improvements on all platforms. + + +2017-06-27: Version 6.1.299 + + Performance and stability improvements on all platforms. + + +2017-06-27: Version 6.1.298 + + Performance and stability improvements on all platforms. + + +2017-06-27: Version 6.1.297 + + Performance and stability improvements on all platforms. + + +2017-06-27: Version 6.1.296 + + Performance and stability improvements on all platforms. + + +2017-06-27: Version 6.1.295 + + Performance and stability improvements on all platforms. + + +2017-06-27: Version 6.1.294 + + Performance and stability improvements on all platforms. + + +2017-06-26: Version 6.1.293 + + Performance and stability improvements on all platforms. + + +2017-06-26: Version 6.1.292 + + Performance and stability improvements on all platforms. + + +2017-06-26: Version 6.1.291 + + Performance and stability improvements on all platforms. + + +2017-06-26: Version 6.1.290 + + Performance and stability improvements on all platforms. + + +2017-06-26: Version 6.1.289 + + Performance and stability improvements on all platforms. + + +2017-06-26: Version 6.1.288 + + Performance and stability improvements on all platforms. + + +2017-06-26: Version 6.1.287 + + Performance and stability improvements on all platforms. + + +2017-06-26: Version 6.1.286 + + Performance and stability improvements on all platforms. + + +2017-06-26: Version 6.1.285 + + Performance and stability improvements on all platforms. + + +2017-06-26: Version 6.1.284 + + Performance and stability improvements on all platforms. + + +2017-06-26: Version 6.1.283 + + Performance and stability improvements on all platforms. + + +2017-06-26: Version 6.1.282 + + Performance and stability improvements on all platforms. + + +2017-06-26: Version 6.1.281 + + Performance and stability improvements on all platforms. + + +2017-06-26: Version 6.1.280 + + Performance and stability improvements on all platforms. + + +2017-06-26: Version 6.1.279 + + Performance and stability improvements on all platforms. + + +2017-06-26: Version 6.1.278 + + Performance and stability improvements on all platforms. + + +2017-06-26: Version 6.1.277 + + Performance and stability improvements on all platforms. + + +2017-06-26: Version 6.1.276 + + Performance and stability improvements on all platforms. + + +2017-06-26: Version 6.1.275 + + Performance and stability improvements on all platforms. + + +2017-06-26: Version 6.1.274 + + Performance and stability improvements on all platforms. + + +2017-06-26: Version 6.1.273 + + Performance and stability improvements on all platforms. + + +2017-06-26: Version 6.1.272 + + Performance and stability improvements on all platforms. + + +2017-06-26: Version 6.1.271 + + Performance and stability improvements on all platforms. + + +2017-06-26: Version 6.1.270 + + Performance and stability improvements on all platforms. + + +2017-06-26: Version 6.1.269 + + Performance and stability improvements on all platforms. + + +2017-06-26: Version 6.1.268 + + Performance and stability improvements on all platforms. + + +2017-06-24: Version 6.1.267 + + Performance and stability improvements on all platforms. + + +2017-06-23: Version 6.1.266 + + Performance and stability improvements on all platforms. + + +2017-06-23: Version 6.1.265 + + Performance and stability improvements on all platforms. + + +2017-06-23: Version 6.1.264 + + Performance and stability improvements on all platforms. + + +2017-06-23: Version 6.1.263 + + Performance and stability improvements on all platforms. + + +2017-06-23: Version 6.1.262 + + Performance and stability improvements on all platforms. + + +2017-06-23: Version 6.1.261 + + Performance and stability improvements on all platforms. + + +2017-06-23: Version 6.1.260 + + Performance and stability improvements on all platforms. + + +2017-06-23: Version 6.1.259 + + Performance and stability improvements on all platforms. + + +2017-06-23: Version 6.1.258 + + Performance and stability improvements on all platforms. + + +2017-06-23: Version 6.1.257 + + Performance and stability improvements on all platforms. + + +2017-06-23: Version 6.1.256 + + Performance and stability improvements on all platforms. + + +2017-06-23: Version 6.1.255 + + Performance and stability improvements on all platforms. + + +2017-06-23: Version 6.1.254 + + Performance and stability improvements on all platforms. + + +2017-06-23: Version 6.1.253 + + Performance and stability improvements on all platforms. + + +2017-06-23: Version 6.1.252 + + Performance and stability improvements on all platforms. + + +2017-06-23: Version 6.1.251 + + Performance and stability improvements on all platforms. + + +2017-06-23: Version 6.1.250 + + Performance and stability improvements on all platforms. + + +2017-06-22: Version 6.1.249 + + Performance and stability improvements on all platforms. + + +2017-06-22: Version 6.1.248 + + Performance and stability improvements on all platforms. + + +2017-06-22: Version 6.1.247 + + Performance and stability improvements on all platforms. + + +2017-06-22: Version 6.1.246 + + Performance and stability improvements on all platforms. + + +2017-06-22: Version 6.1.245 + + Performance and stability improvements on all platforms. + + +2017-06-22: Version 6.1.244 + + Performance and stability improvements on all platforms. + + +2017-06-22: Version 6.1.243 + + Performance and stability improvements on all platforms. + + +2017-06-22: Version 6.1.242 + + Performance and stability improvements on all platforms. + + +2017-06-22: Version 6.1.241 + + Performance and stability improvements on all platforms. + + +2017-06-22: Version 6.1.240 + + Performance and stability improvements on all platforms. + + +2017-06-22: Version 6.1.239 + + Performance and stability improvements on all platforms. + + +2017-06-22: Version 6.1.238 + + Performance and stability improvements on all platforms. + + +2017-06-22: Version 6.1.237 + + Performance and stability improvements on all platforms. + + +2017-06-22: Version 6.1.236 + + Performance and stability improvements on all platforms. + + +2017-06-22: Version 6.1.235 + + Performance and stability improvements on all platforms. + + +2017-06-22: Version 6.1.234 + + Performance and stability improvements on all platforms. + + +2017-06-22: Version 6.1.233 + + Performance and stability improvements on all platforms. + + +2017-06-22: Version 6.1.232 + + Performance and stability improvements on all platforms. + + +2017-06-22: Version 6.1.231 + + Performance and stability improvements on all platforms. + + +2017-06-22: Version 6.1.230 + + Performance and stability improvements on all platforms. + + +2017-06-22: Version 6.1.229 + + Performance and stability improvements on all platforms. + + +2017-06-22: Version 6.1.228 + + Performance and stability improvements on all platforms. + + +2017-06-22: Version 6.1.227 + + Performance and stability improvements on all platforms. + + +2017-06-22: Version 6.1.226 + + Performance and stability improvements on all platforms. + + +2017-06-22: Version 6.1.225 + + Performance and stability improvements on all platforms. + + +2017-06-22: Version 6.1.224 + + Performance and stability improvements on all platforms. + + +2017-06-22: Version 6.1.223 + + Performance and stability improvements on all platforms. + + +2017-06-21: Version 6.1.222 + + Performance and stability improvements on all platforms. + + +2017-06-21: Version 6.1.221 + + Performance and stability improvements on all platforms. + + +2017-06-21: Version 6.1.220 + + Performance and stability improvements on all platforms. + + +2017-06-21: Version 6.1.219 + + Performance and stability improvements on all platforms. + + +2017-06-21: Version 6.1.218 + + Performance and stability improvements on all platforms. + + +2017-06-21: Version 6.1.217 + + Performance and stability improvements on all platforms. + + +2017-06-21: Version 6.1.216 + + Performance and stability improvements on all platforms. + + +2017-06-21: Version 6.1.215 + + Performance and stability improvements on all platforms. + + +2017-06-20: Version 6.1.214 + + Performance and stability improvements on all platforms. + + +2017-06-20: Version 6.1.213 + + Performance and stability improvements on all platforms. + + +2017-06-20: Version 6.1.212 + + Performance and stability improvements on all platforms. + + +2017-06-20: Version 6.1.211 + + Performance and stability improvements on all platforms. + + +2017-06-20: Version 6.1.210 + + Performance and stability improvements on all platforms. + + +2017-06-20: Version 6.1.209 + + Performance and stability improvements on all platforms. + + +2017-06-20: Version 6.1.208 + + Performance and stability improvements on all platforms. + + +2017-06-20: Version 6.1.207 + + Performance and stability improvements on all platforms. + + +2017-06-20: Version 6.1.206 + + Performance and stability improvements on all platforms. + + +2017-06-20: Version 6.1.205 + + Performance and stability improvements on all platforms. + + +2017-06-20: Version 6.1.204 + + Performance and stability improvements on all platforms. + + +2017-06-20: Version 6.1.203 + + Performance and stability improvements on all platforms. + + +2017-06-20: Version 6.1.202 + + Performance and stability improvements on all platforms. + + +2017-06-16: Version 6.1.201 + + Performance and stability improvements on all platforms. + + +2017-06-16: Version 6.1.200 + + Performance and stability improvements on all platforms. + + +2017-06-16: Version 6.1.199 + + Performance and stability improvements on all platforms. + + +2017-06-16: Version 6.1.198 + + Performance and stability improvements on all platforms. + + +2017-06-16: Version 6.1.197 + + Performance and stability improvements on all platforms. + + +2017-06-16: Version 6.1.196 + + Performance and stability improvements on all platforms. + + +2017-06-16: Version 6.1.195 + + Performance and stability improvements on all platforms. + + +2017-06-16: Version 6.1.194 + + Performance and stability improvements on all platforms. + + +2017-06-16: Version 6.1.193 + + Performance and stability improvements on all platforms. + + +2017-06-16: Version 6.1.192 + + Performance and stability improvements on all platforms. + + +2017-06-16: Version 6.1.191 + + Performance and stability improvements on all platforms. + + +2017-06-16: Version 6.1.190 + + Performance and stability improvements on all platforms. + + +2017-06-16: Version 6.1.189 + + Performance and stability improvements on all platforms. + + +2017-06-16: Version 6.1.188 + + Performance and stability improvements on all platforms. + + +2017-06-15: Version 6.1.187 + + Performance and stability improvements on all platforms. + + +2017-06-15: Version 6.1.186 + + Performance and stability improvements on all platforms. + + +2017-06-15: Version 6.1.185 + + Performance and stability improvements on all platforms. + + +2017-06-15: Version 6.1.184 + + Performance and stability improvements on all platforms. + + +2017-06-15: Version 6.1.183 + + Performance and stability improvements on all platforms. + + +2017-06-15: Version 6.1.182 + + Performance and stability improvements on all platforms. + + +2017-06-15: Version 6.1.181 + + Performance and stability improvements on all platforms. + + +2017-06-15: Version 6.1.180 + + Performance and stability improvements on all platforms. + + +2017-06-15: Version 6.1.179 + + Performance and stability improvements on all platforms. + + +2017-06-15: Version 6.1.178 + + Performance and stability improvements on all platforms. + + +2017-06-14: Version 6.1.177 + + Performance and stability improvements on all platforms. + + +2017-06-14: Version 6.1.176 + + Performance and stability improvements on all platforms. + + +2017-06-14: Version 6.1.175 + + Performance and stability improvements on all platforms. + + +2017-06-14: Version 6.1.174 + + Performance and stability improvements on all platforms. + + +2017-06-14: Version 6.1.173 + + Performance and stability improvements on all platforms. + + +2017-06-14: Version 6.1.172 + + Performance and stability improvements on all platforms. + + +2017-06-14: Version 6.1.171 + + Performance and stability improvements on all platforms. + + +2017-06-14: Version 6.1.170 + + Performance and stability improvements on all platforms. + + +2017-06-14: Version 6.1.169 + + Performance and stability improvements on all platforms. + + +2017-06-14: Version 6.1.168 + + Performance and stability improvements on all platforms. + + +2017-06-14: Version 6.1.167 + + Performance and stability improvements on all platforms. + + +2017-06-14: Version 6.1.166 + + Performance and stability improvements on all platforms. + + +2017-06-14: Version 6.1.165 + + Performance and stability improvements on all platforms. + + +2017-06-13: Version 6.1.164 + + Performance and stability improvements on all platforms. + + +2017-06-13: Version 6.1.163 + + Performance and stability improvements on all platforms. + + +2017-06-13: Version 6.1.162 + + Performance and stability improvements on all platforms. + + +2017-06-13: Version 6.1.161 + + Performance and stability improvements on all platforms. + + +2017-06-13: Version 6.1.160 + + Performance and stability improvements on all platforms. + + +2017-06-13: Version 6.1.159 + + Performance and stability improvements on all platforms. + + +2017-06-13: Version 6.1.158 + + Performance and stability improvements on all platforms. + + +2017-06-13: Version 6.1.157 + + Performance and stability improvements on all platforms. + + +2017-06-13: Version 6.1.156 + + Performance and stability improvements on all platforms. + + +2017-06-13: Version 6.1.155 + + Performance and stability improvements on all platforms. + + +2017-06-13: Version 6.1.154 + + Performance and stability improvements on all platforms. + + +2017-06-13: Version 6.1.153 + + Performance and stability improvements on all platforms. + + +2017-06-13: Version 6.1.152 + + Performance and stability improvements on all platforms. + + +2017-06-13: Version 6.1.151 + + Performance and stability improvements on all platforms. + + +2017-06-13: Version 6.1.150 + + Performance and stability improvements on all platforms. + + +2017-06-13: Version 6.1.149 + + Performance and stability improvements on all platforms. + + +2017-06-12: Version 6.1.148 + + Performance and stability improvements on all platforms. + + +2017-06-12: Version 6.1.147 + + Performance and stability improvements on all platforms. + + +2017-06-12: Version 6.1.146 + + Performance and stability improvements on all platforms. + + +2017-06-12: Version 6.1.145 + + Performance and stability improvements on all platforms. + + +2017-06-12: Version 6.1.144 + + Performance and stability improvements on all platforms. + + +2017-06-12: Version 6.1.143 + + Performance and stability improvements on all platforms. + + +2017-06-12: Version 6.1.142 + + Performance and stability improvements on all platforms. + + +2017-06-12: Version 6.1.141 + + Performance and stability improvements on all platforms. + + +2017-06-12: Version 6.1.140 + + Performance and stability improvements on all platforms. + + +2017-06-12: Version 6.1.139 + + Performance and stability improvements on all platforms. + + +2017-06-12: Version 6.1.138 + + Performance and stability improvements on all platforms. + + +2017-06-12: Version 6.1.137 + + Performance and stability improvements on all platforms. + + +2017-06-12: Version 6.1.136 + + Performance and stability improvements on all platforms. + + +2017-06-12: Version 6.1.135 + + Performance and stability improvements on all platforms. + + +2017-06-12: Version 6.1.134 + + Performance and stability improvements on all platforms. + + +2017-06-12: Version 6.1.133 + + Performance and stability improvements on all platforms. + + +2017-06-12: Version 6.1.132 + + Performance and stability improvements on all platforms. + + +2017-06-12: Version 6.1.131 + + Performance and stability improvements on all platforms. + + +2017-06-12: Version 6.1.130 + + Performance and stability improvements on all platforms. + + +2017-06-12: Version 6.1.129 + + Performance and stability improvements on all platforms. + + +2017-06-12: Version 6.1.128 + + Performance and stability improvements on all platforms. + + +2017-06-12: Version 6.1.127 + + Performance and stability improvements on all platforms. + + +2017-06-12: Version 6.1.126 + + Performance and stability improvements on all platforms. + + +2017-06-12: Version 6.1.125 + + Performance and stability improvements on all platforms. + + +2017-06-12: Version 6.1.124 + + Performance and stability improvements on all platforms. + + +2017-06-11: Version 6.1.123 + + Performance and stability improvements on all platforms. + + +2017-06-11: Version 6.1.122 + + Performance and stability improvements on all platforms. + + +2017-06-11: Version 6.1.121 + + Performance and stability improvements on all platforms. + + +2017-06-09: Version 6.1.120 + + Performance and stability improvements on all platforms. + + +2017-06-09: Version 6.1.119 + + Performance and stability improvements on all platforms. + + +2017-06-09: Version 6.1.118 + + Performance and stability improvements on all platforms. + + +2017-06-09: Version 6.1.117 + + Performance and stability improvements on all platforms. + + +2017-06-09: Version 6.1.116 + + Performance and stability improvements on all platforms. + + +2017-06-09: Version 6.1.115 + + Performance and stability improvements on all platforms. + + +2017-06-09: Version 6.1.114 + + Performance and stability improvements on all platforms. + + +2017-06-09: Version 6.1.113 + + Performance and stability improvements on all platforms. + + +2017-06-08: Version 6.1.112 + + Performance and stability improvements on all platforms. + + +2017-06-08: Version 6.1.111 + + Performance and stability improvements on all platforms. + + +2017-06-08: Version 6.1.110 + + Performance and stability improvements on all platforms. + + +2017-06-08: Version 6.1.109 + + Performance and stability improvements on all platforms. + + +2017-06-08: Version 6.1.108 + + Performance and stability improvements on all platforms. + + +2017-06-08: Version 6.1.107 + + Performance and stability improvements on all platforms. + + +2017-06-08: Version 6.1.106 + + Performance and stability improvements on all platforms. + + +2017-06-08: Version 6.1.105 + + Performance and stability improvements on all platforms. + + +2017-06-08: Version 6.1.104 + + Performance and stability improvements on all platforms. + + +2017-06-08: Version 6.1.103 + + Performance and stability improvements on all platforms. + + +2017-06-08: Version 6.1.102 + + Performance and stability improvements on all platforms. + + +2017-06-07: Version 6.1.101 + + Performance and stability improvements on all platforms. + + +2017-06-07: Version 6.1.100 + + Performance and stability improvements on all platforms. + + +2017-06-07: Version 6.1.99 + + Performance and stability improvements on all platforms. + + +2017-06-07: Version 6.1.98 + + Performance and stability improvements on all platforms. + + +2017-06-07: Version 6.1.97 + + Performance and stability improvements on all platforms. + + +2017-06-07: Version 6.1.96 + + Performance and stability improvements on all platforms. + + +2017-06-07: Version 6.1.95 + + Performance and stability improvements on all platforms. + + +2017-06-07: Version 6.1.94 + + Performance and stability improvements on all platforms. + + +2017-06-07: Version 6.1.93 + + Performance and stability improvements on all platforms. + + +2017-06-07: Version 6.1.92 + + Performance and stability improvements on all platforms. + + +2017-06-07: Version 6.1.91 + + Performance and stability improvements on all platforms. + + +2017-06-07: Version 6.1.90 + + Performance and stability improvements on all platforms. + + +2017-06-07: Version 6.1.89 + + Performance and stability improvements on all platforms. + + +2017-06-07: Version 6.1.88 + + Performance and stability improvements on all platforms. + + +2017-06-07: Version 6.1.87 + + Performance and stability improvements on all platforms. + + +2017-06-07: Version 6.1.86 + + Performance and stability improvements on all platforms. + + +2017-06-07: Version 6.1.85 + + Performance and stability improvements on all platforms. + + +2017-06-06: Version 6.1.84 + + Performance and stability improvements on all platforms. + + +2017-06-06: Version 6.1.83 + + Performance and stability improvements on all platforms. + + +2017-06-06: Version 6.1.82 + + Performance and stability improvements on all platforms. + + +2017-06-06: Version 6.1.81 + + Performance and stability improvements on all platforms. + + +2017-06-06: Version 6.1.80 + + Performance and stability improvements on all platforms. + + +2017-06-06: Version 6.1.79 + + Performance and stability improvements on all platforms. + + +2017-06-06: Version 6.1.78 + + Performance and stability improvements on all platforms. + + +2017-06-06: Version 6.1.77 + + Performance and stability improvements on all platforms. + + +2017-06-06: Version 6.1.76 + + Performance and stability improvements on all platforms. + + +2017-06-06: Version 6.1.75 + + Performance and stability improvements on all platforms. + + +2017-06-06: Version 6.1.74 + + Performance and stability improvements on all platforms. + + +2017-06-06: Version 6.1.73 + + Performance and stability improvements on all platforms. + + +2017-06-05: Version 6.1.72 + + Performance and stability improvements on all platforms. + + +2017-06-05: Version 6.1.71 + + Performance and stability improvements on all platforms. + + +2017-06-05: Version 6.1.70 + + Performance and stability improvements on all platforms. + + +2017-06-05: Version 6.1.69 + + Performance and stability improvements on all platforms. + + +2017-06-05: Version 6.1.68 + + Performance and stability improvements on all platforms. + + +2017-06-05: Version 6.1.67 + + Performance and stability improvements on all platforms. + + +2017-06-05: Version 6.1.66 + + Performance and stability improvements on all platforms. + + +2017-06-04: Version 6.1.65 + + Performance and stability improvements on all platforms. + + +2017-06-03: Version 6.1.64 + + Performance and stability improvements on all platforms. + + +2017-06-02: Version 6.1.63 + + Performance and stability improvements on all platforms. + + +2017-06-02: Version 6.1.62 + + Performance and stability improvements on all platforms. + + +2017-06-02: Version 6.1.61 + + Performance and stability improvements on all platforms. + + +2017-06-02: Version 6.1.60 + + Performance and stability improvements on all platforms. + + +2017-06-02: Version 6.1.59 + + Performance and stability improvements on all platforms. + + +2017-06-02: Version 6.1.58 + + Performance and stability improvements on all platforms. + + +2017-06-01: Version 6.1.57 + + Performance and stability improvements on all platforms. + + +2017-06-01: Version 6.1.56 + + Performance and stability improvements on all platforms. + + +2017-06-01: Version 6.1.55 + + Performance and stability improvements on all platforms. + + +2017-06-01: Version 6.1.54 + + Performance and stability improvements on all platforms. + + +2017-06-01: Version 6.1.53 + + Performance and stability improvements on all platforms. + + +2017-06-01: Version 6.1.52 + + Performance and stability improvements on all platforms. + + +2017-06-01: Version 6.1.51 + + Performance and stability improvements on all platforms. + + +2017-06-01: Version 6.1.50 + + Performance and stability improvements on all platforms. + + +2017-06-01: Version 6.1.49 + + Performance and stability improvements on all platforms. + + +2017-06-01: Version 6.1.48 + + Performance and stability improvements on all platforms. + + +2017-06-01: Version 6.1.47 + + Performance and stability improvements on all platforms. + + +2017-05-31: Version 6.1.46 + + Performance and stability improvements on all platforms. + + +2017-05-31: Version 6.1.45 + + Performance and stability improvements on all platforms. + + +2017-05-31: Version 6.1.44 + + Performance and stability improvements on all platforms. + + +2017-05-31: Version 6.1.43 + + Performance and stability improvements on all platforms. + + +2017-05-31: Version 6.1.42 + + Performance and stability improvements on all platforms. + + +2017-05-31: Version 6.1.41 + + Performance and stability improvements on all platforms. + + +2017-05-31: Version 6.1.40 + + Performance and stability improvements on all platforms. + + +2017-05-31: Version 6.1.39 + + Performance and stability improvements on all platforms. + + +2017-05-31: Version 6.1.38 + + Performance and stability improvements on all platforms. + + +2017-05-31: Version 6.1.37 + + Performance and stability improvements on all platforms. + + +2017-05-31: Version 6.1.36 + + Performance and stability improvements on all platforms. + + +2017-05-31: Version 6.1.35 + + Performance and stability improvements on all platforms. + + +2017-05-31: Version 6.1.34 + + Performance and stability improvements on all platforms. + + +2017-05-30: Version 6.1.33 + + Performance and stability improvements on all platforms. + + +2017-05-30: Version 6.1.32 + + Performance and stability improvements on all platforms. + + +2017-05-30: Version 6.1.31 + + Performance and stability improvements on all platforms. + + +2017-05-30: Version 6.1.30 + + Performance and stability improvements on all platforms. + + +2017-05-30: Version 6.1.29 + + Performance and stability improvements on all platforms. + + +2017-05-30: Version 6.1.28 + + Performance and stability improvements on all platforms. + + +2017-05-30: Version 6.1.27 + + Performance and stability improvements on all platforms. + + +2017-05-30: Version 6.1.26 + + Performance and stability improvements on all platforms. + + +2017-05-30: Version 6.1.25 + + Performance and stability improvements on all platforms. + + +2017-05-30: Version 6.1.24 + + Performance and stability improvements on all platforms. + + +2017-05-30: Version 6.1.23 + + Performance and stability improvements on all platforms. + + +2017-05-30: Version 6.1.22 + + Performance and stability improvements on all platforms. + + +2017-05-30: Version 6.1.21 + + Performance and stability improvements on all platforms. + + +2017-05-29: Version 6.1.20 + + Performance and stability improvements on all platforms. + + +2017-05-29: Version 6.1.19 + + Performance and stability improvements on all platforms. + + +2017-05-29: Version 6.1.18 + + Performance and stability improvements on all platforms. + + +2017-05-29: Version 6.1.17 + + Performance and stability improvements on all platforms. + + +2017-05-29: Version 6.1.16 + + Performance and stability improvements on all platforms. + + +2017-05-29: Version 6.1.15 + + Performance and stability improvements on all platforms. + + +2017-05-29: Version 6.1.14 + + Performance and stability improvements on all platforms. + + +2017-05-29: Version 6.1.13 + + Performance and stability improvements on all platforms. + + +2017-05-29: Version 6.1.12 + + Performance and stability improvements on all platforms. + + +2017-05-29: Version 6.1.11 + + Performance and stability improvements on all platforms. + + +2017-05-29: Version 6.1.10 + + Performance and stability improvements on all platforms. + + +2017-05-29: Version 6.1.9 + + Performance and stability improvements on all platforms. + + +2017-05-29: Version 6.1.8 + + Performance and stability improvements on all platforms. + + +2017-05-29: Version 6.1.7 + + Performance and stability improvements on all platforms. + + +2017-05-29: Version 6.1.6 + + Performance and stability improvements on all platforms. + + +2017-05-29: Version 6.1.5 + + Performance and stability improvements on all platforms. + + +2017-05-29: Version 6.1.4 + + Performance and stability improvements on all platforms. + + +2017-05-29: Version 6.1.3 + + Performance and stability improvements on all platforms. + + +2017-05-29: Version 6.1.2 + + Performance and stability improvements on all platforms. + + +2017-05-29: Version 6.1.1 + + Performance and stability improvements on all platforms. + + +2017-05-24: Version 6.0.318 + + Performance and stability improvements on all platforms. + + +2017-05-24: Version 6.0.317 + + Performance and stability improvements on all platforms. + + +2017-05-23: Version 6.0.316 + + Performance and stability improvements on all platforms. + + +2017-05-23: Version 6.0.315 + + Performance and stability improvements on all platforms. + + +2017-05-23: Version 6.0.314 + + Performance and stability improvements on all platforms. + + +2017-05-23: Version 6.0.313 + + Performance and stability improvements on all platforms. + + +2017-05-23: Version 6.0.312 + + Performance and stability improvements on all platforms. + + +2017-05-23: Version 6.0.311 + + Performance and stability improvements on all platforms. + + +2017-05-23: Version 6.0.310 + + Performance and stability improvements on all platforms. + + +2017-05-23: Version 6.0.309 + + Performance and stability improvements on all platforms. + + +2017-05-23: Version 6.0.308 + + Performance and stability improvements on all platforms. + + +2017-05-23: Version 6.0.307 + + Performance and stability improvements on all platforms. + + +2017-05-23: Version 6.0.306 + + Performance and stability improvements on all platforms. + + +2017-05-23: Version 6.0.305 + + Performance and stability improvements on all platforms. + + +2017-05-23: Version 6.0.304 + + Performance and stability improvements on all platforms. + + +2017-05-22: Version 6.0.303 + + Performance and stability improvements on all platforms. + + +2017-05-22: Version 6.0.302 + + Performance and stability improvements on all platforms. + + +2017-05-22: Version 6.0.301 + + Performance and stability improvements on all platforms. + + +2017-05-22: Version 6.0.300 + + Performance and stability improvements on all platforms. + + +2017-05-22: Version 6.0.299 + + Performance and stability improvements on all platforms. + + +2017-05-22: Version 6.0.298 + + Performance and stability improvements on all platforms. + + +2017-05-22: Version 6.0.297 + + Performance and stability improvements on all platforms. + + +2017-05-22: Version 6.0.296 + + Performance and stability improvements on all platforms. + + +2017-05-22: Version 6.0.295 + + Performance and stability improvements on all platforms. + + +2017-05-22: Version 6.0.294 + + Performance and stability improvements on all platforms. + + +2017-05-22: Version 6.0.293 + + Performance and stability improvements on all platforms. + + +2017-05-22: Version 6.0.292 + + Performance and stability improvements on all platforms. + + +2017-05-22: Version 6.0.291 + + Performance and stability improvements on all platforms. + + +2017-05-22: Version 6.0.290 + + Performance and stability improvements on all platforms. + + +2017-05-22: Version 6.0.289 + + Performance and stability improvements on all platforms. + + +2017-05-22: Version 6.0.288 + + Performance and stability improvements on all platforms. + + +2017-05-21: Version 6.0.287 + + Performance and stability improvements on all platforms. + + 2017-05-20: Version 6.0.286 Performance and stability improvements on all platforms. diff --git a/deps/v8/DEPS b/deps/v8/DEPS index 1a55e663c6b17a..7752da2f40f817 100644 --- a/deps/v8/DEPS +++ b/deps/v8/DEPS @@ -8,23 +8,23 @@ vars = { deps = { "v8/build": - Var("chromium_url") + "/chromium/src/build.git" + "@" + "1caf3a69f3b0379c9fef2493aa1b3cda96e17d7b", + Var("chromium_url") + "/chromium/src/build.git" + "@" + "1808a907ce42f13b224c263e9843d718fc6d9c39", "v8/tools/gyp": Var("chromium_url") + "/external/gyp.git" + "@" + "eb296f67da078ec01f5e3a9ea9cdc6d26d680161", "v8/third_party/icu": - Var("chromium_url") + "/chromium/deps/icu.git" + "@" + "c844075aa0f1758d04f9192825f1b1e7e607992e", + Var("chromium_url") + "/chromium/deps/icu.git" + "@" + "dfa798fe694702b43a3debc3290761f22b1acaf8", "v8/third_party/instrumented_libraries": Var("chromium_url") + "/chromium/src/third_party/instrumented_libraries.git" + "@" + "644afd349826cb68204226a16c38bde13abe9c3c", "v8/buildtools": - Var("chromium_url") + "/chromium/buildtools.git" + "@" + "98f00fa10dbad2cdbb2e297a66c3d6d5bc3994f3", + Var("chromium_url") + "/chromium/buildtools.git" + "@" + "5ad14542a6a74dd914f067b948c5d3e8d170396b", "v8/base/trace_event/common": - Var("chromium_url") + "/chromium/src/base/trace_event/common.git" + "@" + "39a3450531fc73432e963db8668695d2e8f13053", + Var("chromium_url") + "/chromium/src/base/trace_event/common.git" + "@" + "65d1d42a5df6c0a563a6fdfa58a135679185e5d9", "v8/third_party/jinja2": Var("chromium_url") + "/chromium/src/third_party/jinja2.git" + "@" + "d34383206fa42d52faa10bb9931d6d538f3a57e0", "v8/third_party/markupsafe": Var("chromium_url") + "/chromium/src/third_party/markupsafe.git" + "@" + "8f45f5cfa0009d2a70589bcda0349b8cb2b72783", "v8/tools/swarming_client": - Var('chromium_url') + '/external/swarming.client.git' + '@' + "a941a089ff1000403078b74cb628eb430f07d271", + Var('chromium_url') + '/external/swarming.client.git' + '@' + "a56c2b39ca23bdf41458421a7f825ddbf3f43f28", "v8/testing/gtest": Var("chromium_url") + "/external/github.com/google/googletest.git" + "@" + "6f8a66431cb592dad629028a50b3dd418a408c87", "v8/testing/gmock": @@ -34,21 +34,21 @@ deps = { "v8/test/mozilla/data": Var("chromium_url") + "/v8/deps/third_party/mozilla-tests.git" + "@" + "f6c578a10ea707b1a8ab0b88943fe5115ce2b9be", "v8/test/test262/data": - Var("chromium_url") + "/external/github.com/tc39/test262.git" + "@" + "230f9fc5688ce76bfaa99aba5f680a159eaac9e2", + Var("chromium_url") + "/external/github.com/tc39/test262.git" + "@" + "1b911a8f8abf4cb63882cfbe72dcd4c82bb8ad91", "v8/test/test262/harness": Var("chromium_url") + "/external/github.com/test262-utils/test262-harness-py.git" + "@" + "0f2acdd882c84cff43b9d60df7574a1901e2cdcd", "v8/tools/clang": - Var("chromium_url") + "/chromium/src/tools/clang.git" + "@" + "05f306039aa5029fa88768690e5c512097419f9d", + Var("chromium_url") + "/chromium/src/tools/clang.git" + "@" + "844603c1fcd47f578931b3ccd583e19f816a3842", "v8/test/wasm-js": - Var("chromium_url") + "/external/github.com/WebAssembly/spec.git" + "@" + "07fd6430f879d36928d179a62d9bdeed82286065", + Var("chromium_url") + "/external/github.com/WebAssembly/spec.git" + "@" + "aadd3a340c78e53078a7bb6c17cc30f105c2960c", } deps_os = { "android": { "v8/third_party/android_tools": - Var("chromium_url") + "/android_tools.git" + "@" + "cb6bc21107001e2f2eeee2707b482b2b755baf51", + Var("chromium_url") + "/android_tools.git" + "@" + "e9d4018e149d50172ed462a7c21137aa915940ec", "v8/third_party/catapult": - Var('chromium_url') + "/external/github.com/catapult-project/catapult.git" + "@" + "08a6e0ac161db7309d8f9cad0ccd38e0b1fd41e0", + Var('chromium_url') + "/external/github.com/catapult-project/catapult.git" + "@" + "44b022b2a09508ec025ae76a26308e89deb2cf69", }, } @@ -262,13 +262,6 @@ hooks = [ 'v8/third_party/binutils/download.py', ], }, - { - # Pull gold plugin if needed or requested via GYP_DEFINES. - # Note: This must run before the clang update. - 'name': 'gold_plugin', - 'pattern': '.', - 'action': ['python', 'v8/gypfiles/download_gold_plugin.py'], - }, { # Pull clang if needed or requested via GYP_DEFINES. # Note: On Win, this should run after win_toolchain, as it may use it. diff --git a/deps/v8/Makefile b/deps/v8/Makefile index ed5b3a7fabc325..b381918355602d 100644 --- a/deps/v8/Makefile +++ b/deps/v8/Makefile @@ -255,14 +255,13 @@ endif # Architectures and modes to be compiled. Consider these to be internal # variables, don't override them (use the targets instead). -ARCHES = ia32 x64 arm arm64 mips mipsel mips64 mips64el x87 ppc ppc64 s390 \ - s390x -ARCHES32 = ia32 arm mips mipsel x87 ppc s390 +ARCHES = ia32 x64 arm arm64 mips mipsel mips64 mips64el ppc ppc64 s390 s390x +ARCHES32 = ia32 arm mips mipsel ppc s390 DEFAULT_ARCHES = ia32 x64 arm MODES = release debug optdebug DEFAULT_MODES = release debug ANDROID_ARCHES = android_ia32 android_x64 android_arm android_arm64 \ - android_mipsel android_x87 + android_mipsel # List of files that trigger Makefile regeneration: GYPFILES = third_party/icu/icu.gypi third_party/icu/icu.gyp \ @@ -272,9 +271,7 @@ GYPFILES = third_party/icu/icu.gypi third_party/icu/icu.gyp \ test/cctest/cctest.gyp test/fuzzer/fuzzer.gyp \ test/unittests/unittests.gyp src/v8.gyp \ tools/parser-shell.gyp testing/gmock.gyp testing/gtest.gyp \ - buildtools/third_party/libc++abi/libc++abi.gyp \ - buildtools/third_party/libc++/libc++.gyp samples/samples.gyp \ - src/third_party/vtune/v8vtune.gyp src/d8.gyp + samples/samples.gyp src/third_party/vtune/v8vtune.gyp src/d8.gyp # If vtunejit=on, the v8vtune.gyp will be appended. ifeq ($(vtunejit), on) diff --git a/deps/v8/OWNERS b/deps/v8/OWNERS index 4a2dcdf74cddb6..dd96fa6b5fe586 100644 --- a/deps/v8/OWNERS +++ b/deps/v8/OWNERS @@ -35,3 +35,6 @@ ulan@chromium.org verwaest@chromium.org vogelheim@chromium.org yangguo@chromium.org + +# TEAM: v8-dev@googlegroups.com +# COMPONENT: Blink>JavaScript diff --git a/deps/v8/PRESUBMIT.py b/deps/v8/PRESUBMIT.py index 7d7faec69614be..2d79ae682ce16f 100644 --- a/deps/v8/PRESUBMIT.py +++ b/deps/v8/PRESUBMIT.py @@ -31,6 +31,7 @@ for more details about the presubmit API built into gcl. """ +import json import re import sys @@ -277,6 +278,7 @@ def _CommonChecks(input_api, output_api): results.extend( _CheckNoInlineHeaderIncludesInNormalHeaders(input_api, output_api)) results.extend(_CheckMissingFiles(input_api, output_api)) + results.extend(_CheckJSONFiles(input_api, output_api)) return results @@ -316,6 +318,25 @@ def _CheckCommitMessageBugEntry(input_api, output_api): return [output_api.PresubmitError(r) for r in results] +def _CheckJSONFiles(input_api, output_api): + def FilterFile(affected_file): + return input_api.FilterSourceFile( + affected_file, + white_list=(r'.+\.json',)) + + results = [] + for f in input_api.AffectedFiles( + file_filter=FilterFile, include_deletes=False): + with open(f.LocalPath()) as j: + try: + json.load(j) + except Exception as e: + results.append( + 'JSON validation failed for %s. Error:\n%s' % (f.LocalPath(), e)) + + return [output_api.PresubmitError(r) for r in results] + + def CheckChangeOnUpload(input_api, output_api): results = [] results.extend(_CommonChecks(input_api, output_api)) @@ -332,3 +353,19 @@ def CheckChangeOnCommit(input_api, output_api): input_api, output_api, json_url='http://v8-status.appspot.com/current?format=json')) return results + +def PostUploadHook(cl, change, output_api): + """git cl upload will call this hook after the issue is created/modified. + + This hook adds a noi18n bot if the patch affects Intl. + """ + def affects_intl(f): + return 'intl' in f.LocalPath() or 'test262' in f.LocalPath() + if not change.AffectedFiles(file_filter=affects_intl): + return [] + return output_api.EnsureCQIncludeTrybotsAreAdded( + cl, + [ + 'master.tryserver.v8:v8_linux_noi18n_rel_ng' + ], + 'Automatically added noi18n trybots to run tests on CQ.') diff --git a/deps/v8/base/trace_event/common/trace_event_common.h b/deps/v8/base/trace_event/common/trace_event_common.h index 76d3039250ed89..bdc450d56824cd 100644 --- a/deps/v8/base/trace_event/common/trace_event_common.h +++ b/deps/v8/base/trace_event/common/trace_event_common.h @@ -359,6 +359,12 @@ TRACE_EVENT_PHASE_MARK, category_group, name, timestamp, \ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val) +#define TRACE_EVENT_MARK_WITH_TIMESTAMP2( \ + category_group, name, timestamp, arg1_name, arg1_val, arg2_name, arg2_val) \ + INTERNAL_TRACE_EVENT_ADD_WITH_TIMESTAMP( \ + TRACE_EVENT_PHASE_MARK, category_group, name, timestamp, \ + TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, arg2_name, arg2_val) + #define TRACE_EVENT_COPY_MARK(category_group, name) \ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_MARK, category_group, name, \ TRACE_EVENT_FLAG_COPY) diff --git a/deps/v8/build_overrides/build.gni b/deps/v8/build_overrides/build.gni index 8dcaf3a29d78f1..b656fce61a1aa2 100644 --- a/deps/v8/build_overrides/build.gni +++ b/deps/v8/build_overrides/build.gni @@ -2,9 +2,6 @@ # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. -mac_sdk_min_build_override = "10.10" -mac_deployment_target_build_override = "10.7" - # Variable that can be used to support multiple build scenarios, like having # Chromium specific targets in a client project's GN file etc. build_with_chromium = false diff --git a/deps/v8/codereview.settings b/deps/v8/codereview.settings index bff4e38ba51885..b7a5a972b018bb 100644 --- a/deps/v8/codereview.settings +++ b/deps/v8/codereview.settings @@ -4,3 +4,4 @@ CODE_REVIEW_SERVER: https://codereview.chromium.org CC_LIST: v8-reviews@googlegroups.com VIEW_VC: https://chromium.googlesource.com/v8/v8/+/ STATUS: http://v8-status.appspot.com/status +RUN_POST_UPLOAD_HOOK: True diff --git a/deps/v8/gni/isolate.gni b/deps/v8/gni/isolate.gni index 90bc8c5d7fa4b9..82dc8cf3fbc005 100644 --- a/deps/v8/gni/isolate.gni +++ b/deps/v8/gni/isolate.gni @@ -56,16 +56,16 @@ template("v8_isolate_run") { } # Translate gn to gyp variables. + if (v8_code_coverage) { + coverage = "1" + } else { + coverage = "0" + } if (is_asan) { asan = "1" } else { asan = "0" } - if (is_lsan) { - lsan = "1" - } else { - lsan = "0" - } if (is_msan) { msan = "1" } else { @@ -158,15 +158,13 @@ template("v8_isolate_run") { "--config-variable", "is_gn=1", "--config-variable", - "lsan=$lsan", - "--config-variable", "msan=$msan", "--config-variable", "tsan=$tsan", "--config-variable", - "coverage=0", + "coverage=$coverage", "--config-variable", - "sanitizer_coverage=0", + "sanitizer_coverage=$sanitizer_coverage_flags", "--config-variable", "component=$component", "--config-variable", diff --git a/deps/v8/gni/v8.gni b/deps/v8/gni/v8.gni index 33f85f989b73e6..9a2bb3dff4ffa6 100644 --- a/deps/v8/gni/v8.gni +++ b/deps/v8/gni/v8.gni @@ -4,8 +4,13 @@ import("//build/config/sanitizers/sanitizers.gni") import("//build/config/v8_target_cpu.gni") +import("//build/split_static_library.gni") declare_args() { + # Set flags for tracking code coverage. Uses gcov with gcc and sanitizer + # coverage with clang. + v8_code_coverage = false + # Includes files needed for correctness fuzzing. v8_correctness_fuzzer = false @@ -84,6 +89,13 @@ if (is_debug && !v8_optimized_debug) { } } +if (v8_code_coverage && !is_clang) { + v8_add_configs += [ + v8_path_prefix + ":v8_gcov_coverage_cflags", + v8_path_prefix + ":v8_gcov_coverage_ldflags", + ] +} + if (is_posix && v8_enable_backtrace) { v8_remove_configs += [ "//build/config/gcc:symbol_visibility_hidden" ] v8_add_configs += [ "//build/config/gcc:symbol_visibility_default" ] @@ -91,20 +103,19 @@ if (is_posix && v8_enable_backtrace) { # All templates should be kept in sync. template("v8_source_set") { - if (defined(v8_static_library) && v8_static_library) { - static_library(target_name) { - forward_variables_from(invoker, "*", [ "configs" ]) - configs += invoker.configs - configs -= v8_remove_configs - configs += v8_add_configs - } + if (defined(invoker.split_count) && invoker.split_count > 1 && + defined(v8_static_library) && v8_static_library && is_win) { + link_target_type = "split_static_library" + } else if (defined(v8_static_library) && v8_static_library) { + link_target_type = "static_library" } else { - source_set(target_name) { - forward_variables_from(invoker, "*", [ "configs" ]) - configs += invoker.configs - configs -= v8_remove_configs - configs += v8_add_configs - } + link_target_type = "source_set" + } + target(link_target_type, target_name) { + forward_variables_from(invoker, "*", [ "configs" ]) + configs += invoker.configs + configs -= v8_remove_configs + configs += v8_add_configs } } @@ -135,6 +146,19 @@ template("v8_executable") { # For enabling ASLR. ldflags = [ "-pie" ] } + if (defined(testonly) && testonly && v8_code_coverage) { + # Only add code coverage cflags for non-test files for performance + # reasons. + if (is_clang) { + configs -= [ "//build/config/sanitizers:default_sanitizer_flags" ] + configs += [ "//build/config/sanitizers:default_sanitizer_flags_but_coverage" ] + } else { + configs -= [ v8_path_prefix + ":v8_gcov_coverage_cflags" ] + } + } + deps += [ + v8_path_prefix + ":v8_dump_build_config", + ] } } diff --git a/deps/v8/gypfiles/download_gold_plugin.py b/deps/v8/gypfiles/download_gold_plugin.py deleted file mode 100755 index b8131fd449d7e2..00000000000000 --- a/deps/v8/gypfiles/download_gold_plugin.py +++ /dev/null @@ -1,81 +0,0 @@ -#!/usr/bin/env python -# Copyright 2015 the V8 project authors. All rights reserved. -# Copyright 2015 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -"""Script to download LLVM gold plugin from google storage.""" - -import json -import os -import re -import platform -import shutil -import subprocess -import sys -import zipfile - -# Bail out on windows and cygwin. -if "win" in platform.system().lower(): - # Python 2.7.6 hangs at the second path.insert command on windows. Works - # with python 2.7.8. - print "Gold plugin download not supported on windows." - sys.exit(0) - -SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) -CHROME_SRC = os.path.abspath(os.path.join(SCRIPT_DIR, os.pardir)) -sys.path.insert(0, os.path.join(CHROME_SRC, 'tools')) - -import find_depot_tools - -DEPOT_PATH = find_depot_tools.add_depot_tools_to_path() -GSUTIL_PATH = os.path.join(DEPOT_PATH, 'gsutil.py') - -LLVM_BUILD_PATH = os.path.join(CHROME_SRC, 'third_party', 'llvm-build', - 'Release+Asserts') -CLANG_UPDATE_PY = os.path.join(CHROME_SRC, 'tools', 'clang', 'scripts', - 'update.py') -CLANG_REVISION = os.popen(CLANG_UPDATE_PY + ' --print-revision').read().rstrip() - -CLANG_BUCKET = 'gs://chromium-browser-clang/Linux_x64' - -GOLD_PLUGIN_PATH = os.path.join(LLVM_BUILD_PATH, 'lib', 'LLVMgold.so') - -sys.path.insert(0, os.path.join(CHROME_SRC, 'tools', 'clang', 'scripts')) - -import update - -def main(): - if not re.search(r'cfi_vptr=1', os.environ.get('GYP_DEFINES', '')): - # Bailout if this is not a cfi build. - print 'Skipping gold plugin download for non-cfi build.' - return 0 - if (os.path.exists(GOLD_PLUGIN_PATH) and - update.ReadStampFile().strip() == update.PACKAGE_VERSION): - # Bailout if clang is up-to-date. This requires the script to be run before - # the clang update step! I.e. afterwards clang would always be up-to-date. - print 'Skipping gold plugin download. File present and clang up to date.' - return 0 - - # Make sure this works on empty checkouts (i.e. clang not downloaded yet). - if not os.path.exists(LLVM_BUILD_PATH): - os.makedirs(LLVM_BUILD_PATH) - - targz_name = 'llvmgold-%s.tgz' % CLANG_REVISION - remote_path = '%s/%s' % (CLANG_BUCKET, targz_name) - - os.chdir(LLVM_BUILD_PATH) - - # TODO(pcc): Fix gsutil.py cp url file < /dev/null 2>&0 - # (currently aborts with exit code 1, - # https://github.com/GoogleCloudPlatform/gsutil/issues/289) or change the - # stdin->stderr redirect in update.py to do something else (crbug.com/494442). - subprocess.check_call(['python', GSUTIL_PATH, - 'cp', remote_path, targz_name], - stderr=open('/dev/null', 'w')) - subprocess.check_call(['tar', 'xzf', targz_name]) - os.remove(targz_name) - return 0 - -if __name__ == '__main__': - sys.exit(main()) diff --git a/deps/v8/gypfiles/features.gypi b/deps/v8/gypfiles/features.gypi index b38735e162ec8e..0eeec2466ebc51 100644 --- a/deps/v8/gypfiles/features.gypi +++ b/deps/v8/gypfiles/features.gypi @@ -73,6 +73,9 @@ # Enable/disable JavaScript API accessors. 'v8_js_accessors%': 0, + + # Temporary flag to allow embedders to update their microtasks scopes. + 'v8_check_microtasks_scopes_consistency%': 'false', }, 'target_defaults': { 'conditions': [ @@ -118,12 +121,15 @@ ['dcheck_always_on!=0', { 'defines': ['DEBUG',], }], + ['v8_check_microtasks_scopes_consistency=="true"', { + 'defines': ['V8_CHECK_MICROTASKS_SCOPES_CONSISTENCY',], + }], ], # conditions 'configurations': { 'DebugBaseCommon': { 'abstract': 1, 'variables': { - 'v8_enable_handle_zapping%': 0, + 'v8_enable_handle_zapping%': 1, }, 'conditions': [ ['v8_enable_handle_zapping==1', { @@ -133,7 +139,7 @@ }, # Debug 'Release': { 'variables': { - 'v8_enable_handle_zapping%': 1, + 'v8_enable_handle_zapping%': 0, }, 'conditions': [ ['v8_enable_handle_zapping==1', { diff --git a/deps/v8/gypfiles/isolate.gypi b/deps/v8/gypfiles/isolate.gypi index af3b3ae5c8926e..11b05705307625 100644 --- a/deps/v8/gypfiles/isolate.gypi +++ b/deps/v8/gypfiles/isolate.gypi @@ -75,7 +75,6 @@ '--config-variable', 'has_valgrind=<(has_valgrind)', '--config-variable', 'icu_use_data_file_flag=<(icu_use_data_file_flag)', '--config-variable', 'is_gn=0', - '--config-variable', 'lsan=<(lsan)', '--config-variable', 'msan=<(msan)', '--config-variable', 'tsan=<(tsan)', '--config-variable', 'coverage=<(coverage)', diff --git a/deps/v8/gypfiles/standalone.gypi b/deps/v8/gypfiles/standalone.gypi index 4c805bf6432ff3..a30373be6129e1 100644 --- a/deps/v8/gypfiles/standalone.gypi +++ b/deps/v8/gypfiles/standalone.gypi @@ -43,6 +43,7 @@ 'v8_enable_i18n_support%': 1, 'v8_deprecation_warnings': 1, 'v8_imminent_deprecation_warnings': 1, + 'v8_check_microtasks_scopes_consistency': 'true', 'msvs_multi_core_compile%': '1', 'mac_deployment_target%': '10.7', 'release_extra_cflags%': '', @@ -135,8 +136,6 @@ 'clang_dir%': '<(base_dir)/third_party/llvm-build/Release+Asserts', 'make_clang_dir%': '<(base_dir)/third_party/llvm-build/Release+Asserts', - 'use_lto%': 0, - # Control Flow Integrity for virtual calls and casts. # See http://clang.llvm.org/docs/ControlFlowIntegrity.html 'cfi_vptr%': 0, @@ -201,7 +200,6 @@ 'use_prebuilt_instrumented_libraries%': '<(use_prebuilt_instrumented_libraries)', 'use_custom_libcxx%': '<(use_custom_libcxx)', 'linux_use_bundled_gold%': '<(linux_use_bundled_gold)', - 'use_lto%': '<(use_lto)', 'cfi_vptr%': '<(cfi_vptr)', 'cfi_diag%': '<(cfi_diag)', 'cfi_blacklist%': '<(cfi_blacklist)', @@ -264,14 +262,14 @@ # goma doesn't support PDB yet. 'fastbuild%': 1, }], - ['((v8_target_arch=="ia32" or v8_target_arch=="x64" or v8_target_arch=="x87") and \ + ['((v8_target_arch=="ia32" or v8_target_arch=="x64") and \ (OS=="linux" or OS=="mac")) or (v8_target_arch=="ppc64" and OS=="linux")', { 'v8_enable_gdbjit%': 1, }, { 'v8_enable_gdbjit%': 0, }], ['(OS=="linux" or OS=="mac") and (target_arch=="ia32" or target_arch=="x64") and \ - (v8_target_arch!="x87" and v8_target_arch!="x32")', { + v8_target_arch!="x32"', { 'clang%': 1, }, { 'clang%': 0, @@ -292,9 +290,6 @@ # the C++ standard library is used. 'use_custom_libcxx%': 1, }], - ['cfi_vptr==1', { - 'use_lto%': 1, - }], ['OS=="android"', { # Location of Android NDK. 'variables': { @@ -678,15 +673,11 @@ }], ], }], - ['linux_use_bundled_gold==1 and not (clang==0 and use_lto==1)', { + ['linux_use_bundled_gold==1', { # Put our binutils, which contains gold in the search path. We pass # the path to gold to the compiler. gyp leaves unspecified what the # cwd is when running the compiler, so the normal gyp path-munging # fails us. This hack gets the right path. - # - # Disabled when using GCC LTO because GCC also uses the -B search - # path at link time to find "as", and our bundled "as" can only - # target x86. 'ldflags': [ # Note, Chromium allows ia32 host arch as well, we limit this to # x64 in v8. @@ -696,12 +687,15 @@ ['sysroot!="" and clang==1', { 'target_conditions': [ ['_toolset=="target"', { + 'variables': { + 'ld_paths': ['JavaScript>API diff --git a/deps/v8/include/PRESUBMIT.py b/deps/v8/include/PRESUBMIT.py new file mode 100644 index 00000000000000..386f2e5006186f --- /dev/null +++ b/deps/v8/include/PRESUBMIT.py @@ -0,0 +1,29 @@ +# Copyright 2017 the V8 project authors. All rights reserved.') +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""Presubmit script for //v8/include + +See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts +for more details about the presubmit API built into depot_tools. +""" + +import os + + +def PostUploadHook(cl, change, output_api): + """git cl upload will call this hook after the issue is created/modified. + + This hook adds extra try bots to the CL description in order to run layout + tests in addition to CQ try bots. + """ + def header_filter(f): + return '.h' in os.path.split(f.LocalPath())[1] + if not change.AffectedFiles(file_filter=header_filter): + return [] + return output_api.EnsureCQIncludeTrybotsAreAdded( + cl, + [ + 'master.tryserver.chromium.linux:linux_chromium_rel_ng' + ], + 'Automatically added layout test trybots to run tests on CQ.') diff --git a/deps/v8/include/v8-version.h b/deps/v8/include/v8-version.h index db9369f649a9e5..2e88feb401e7e1 100644 --- a/deps/v8/include/v8-version.h +++ b/deps/v8/include/v8-version.h @@ -9,9 +9,9 @@ // NOTE these macros are used by some of the tool scripts and the build // system so their names cannot be changed without changing the scripts. #define V8_MAJOR_VERSION 6 -#define V8_MINOR_VERSION 0 -#define V8_BUILD_NUMBER 287 -#define V8_PATCH_LEVEL 53 +#define V8_MINOR_VERSION 1 +#define V8_BUILD_NUMBER 534 +#define V8_PATCH_LEVEL 36 // Use 1 for candidates and 0 otherwise. // (Boolean macro values are not supported by all preprocessors.) diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h index 1b3117ef34c67d..e37c549cb46b81 100644 --- a/deps/v8/include/v8.h +++ b/deps/v8/include/v8.h @@ -869,8 +869,6 @@ class V8_EXPORT HandleScope { HandleScope(const HandleScope&) = delete; void operator=(const HandleScope&) = delete; - void* operator new(size_t size); - void operator delete(void*, size_t); protected: V8_INLINE HandleScope() {} @@ -881,6 +879,13 @@ class V8_EXPORT HandleScope { internal::Object* value); private: + // Declaring operator new and delete as deleted is not spec compliant. + // Therefore declare them private instead to disable dynamic alloc + void* operator new(size_t size); + void* operator new[](size_t size); + void operator delete(void*, size_t); + void operator delete[](void*, size_t); + // Uses heap_object to obtain the current Isolate. static internal::Object** CreateHandle(internal::HeapObject* heap_object, internal::Object* value); @@ -921,10 +926,15 @@ class V8_EXPORT EscapableHandleScope : public HandleScope { EscapableHandleScope(const EscapableHandleScope&) = delete; void operator=(const EscapableHandleScope&) = delete; + + private: + // Declaring operator new and delete as deleted is not spec compliant. + // Therefore declare them private instead to disable dynamic alloc void* operator new(size_t size); + void* operator new[](size_t size); void operator delete(void*, size_t); + void operator delete[](void*, size_t); - private: internal::Object** Escape(internal::Object** escape_value); internal::Object** escape_slot_; }; @@ -941,10 +951,15 @@ class V8_EXPORT SealHandleScope { SealHandleScope(const SealHandleScope&) = delete; void operator=(const SealHandleScope&) = delete; + + private: + // Declaring operator new and delete as deleted is not spec compliant. + // Therefore declare them private instead to disable dynamic alloc void* operator new(size_t size); + void* operator new[](size_t size); void operator delete(void*, size_t); + void operator delete[](void*, size_t); - private: internal::Isolate* const isolate_; internal::Object** prev_limit_; int prev_sealed_level_; @@ -1016,9 +1031,6 @@ class ScriptOrigin { V8_INLINE Local ResourceName() const; V8_INLINE Local ResourceLineOffset() const; V8_INLINE Local ResourceColumnOffset() const; - /** - * Returns true for embedder's debugger scripts - */ V8_INLINE Local ScriptID() const; V8_INLINE Local SourceMapUrl() const; V8_INLINE ScriptOriginOptions Options() const { return options_; } @@ -1032,7 +1044,6 @@ class ScriptOrigin { Local source_map_url_; }; - /** * A compiled JavaScript script, not yet tied to a Context. */ @@ -1064,6 +1075,22 @@ class V8_EXPORT UnboundScript { static const int kNoScriptId = 0; }; +/** + * A location in JavaScript source. + */ +class V8_EXPORT Location { + public: + int GetLineNumber() { return line_number_; } + int GetColumnNumber() { return column_number_; } + + Location(int line_number, int column_number) + : line_number_(line_number), column_number_(column_number) {} + + private: + int line_number_; + int column_number_; +}; + /** * This is an unfinished experimental feature, and is only exposed * here for internal testing purposes. DO NOT USE. @@ -1072,6 +1099,28 @@ class V8_EXPORT UnboundScript { */ class V8_EXPORT Module { public: + /** + * The different states a module can be in. + */ + enum Status { + kUninstantiated, + kInstantiating, + kInstantiated, + kEvaluating, + kEvaluated, + kErrored + }; + + /** + * Returns the module's current status. + */ + Status GetStatus() const; + + /** + * For a module in kErrored status, this returns the corresponding exception. + */ + Local GetException() const; + /** * Returns the number of modules requested by this module. */ @@ -1083,6 +1132,12 @@ class V8_EXPORT Module { */ Local GetModuleRequest(int i) const; + /** + * Returns the source location (line number and column number) of the ith + * module specifier's first occurrence in this module. + */ + Location GetModuleRequestLocation(int i) const; + /** * Returns the identity hash for this object. */ @@ -1095,40 +1150,29 @@ class V8_EXPORT Module { /** * ModuleDeclarationInstantiation * - * Returns false if an exception occurred during instantiation. (In the case - * where the callback throws an exception, that exception is propagated.) + * Returns an empty Maybe if an exception occurred during + * instantiation. (In the case where the callback throws an exception, that + * exception is propagated.) */ - V8_WARN_UNUSED_RESULT bool Instantiate(Local context, - ResolveCallback callback); + V8_DEPRECATED("Use Maybe version", + bool Instantiate(Local context, + ResolveCallback callback)); + V8_WARN_UNUSED_RESULT Maybe InstantiateModule(Local context, + ResolveCallback callback); /** * ModuleEvaluation * * Returns the completion value. + * TODO(neis): Be more precise or say nothing. */ V8_WARN_UNUSED_RESULT MaybeLocal Evaluate(Local context); -}; - -/** - * This is an unfinished experimental feature, and is only exposed - * here for internal testing purposes. DO NOT USE. - * - * A compiled JavaScript module. - */ -class V8_EXPORT DynamicImportResult { - public: - /** - * Resolves the promise with the namespace object of the given - * module. - */ - V8_WARN_UNUSED_RESULT bool FinishDynamicImportSuccess(Local context, - Local module); /** - * Rejects the promise with the given exception. + * Returns the namespace object of this module. The module must have + * been successfully instantiated before and must not be errored. */ - V8_WARN_UNUSED_RESULT bool FinishDynamicImportFailure(Local context, - Local exception); + Local GetModuleNamespace(); }; /** @@ -3053,12 +3097,9 @@ class V8_EXPORT Object : public Value { // // Note also that this only works for named properties. V8_DEPRECATED("Use CreateDataProperty / DefineOwnProperty", - bool ForceSet(Local key, Local value, - PropertyAttribute attribs = None)); - V8_DEPRECATE_SOON("Use CreateDataProperty / DefineOwnProperty", - Maybe ForceSet(Local context, - Local key, Local value, - PropertyAttribute attribs = None)); + Maybe ForceSet(Local context, Local key, + Local value, + PropertyAttribute attribs = None)); V8_DEPRECATE_SOON("Use maybe version", Local Get(Local key)); V8_WARN_UNUSED_RESULT MaybeLocal Get(Local context, @@ -4251,7 +4292,18 @@ class V8_EXPORT ArrayBuffer : public Object { */ class V8_EXPORT Contents { // NOLINT public: - Contents() : data_(NULL), byte_length_(0) {} + Contents() + : data_(nullptr), + byte_length_(0), + allocation_base_(nullptr), + allocation_length_(0), + allocation_mode_(Allocator::AllocationMode::kNormal) {} + + void* AllocationBase() const { return allocation_base_; } + size_t AllocationLength() const { return allocation_length_; } + Allocator::AllocationMode AllocationMode() const { + return allocation_mode_; + } void* Data() const { return data_; } size_t ByteLength() const { return byte_length_; } @@ -4259,6 +4311,9 @@ class V8_EXPORT ArrayBuffer : public Object { private: void* data_; size_t byte_length_; + void* allocation_base_; + size_t allocation_length_; + Allocator::AllocationMode allocation_mode_; friend class ArrayBuffer; }; @@ -4607,7 +4662,18 @@ class V8_EXPORT SharedArrayBuffer : public Object { */ class V8_EXPORT Contents { // NOLINT public: - Contents() : data_(NULL), byte_length_(0) {} + Contents() + : data_(nullptr), + byte_length_(0), + allocation_base_(nullptr), + allocation_length_(0), + allocation_mode_(ArrayBuffer::Allocator::AllocationMode::kNormal) {} + + void* AllocationBase() const { return allocation_base_; } + size_t AllocationLength() const { return allocation_length_; } + ArrayBuffer::Allocator::AllocationMode AllocationMode() const { + return allocation_mode_; + } void* Data() const { return data_; } size_t ByteLength() const { return byte_length_; } @@ -4615,6 +4681,9 @@ class V8_EXPORT SharedArrayBuffer : public Object { private: void* data_; size_t byte_length_; + void* allocation_base_; + size_t allocation_length_; + ArrayBuffer::Allocator::AllocationMode allocation_mode_; friend class SharedArrayBuffer; }; @@ -4861,6 +4930,7 @@ class V8_EXPORT External : public Value { F(ArrayProto_forEach, array_for_each_iterator) \ F(ArrayProto_keys, array_keys_iterator) \ F(ArrayProto_values, array_values_iterator) \ + F(ErrorPrototype, initial_error_prototype) \ F(IteratorPrototype, initial_iterator_prototype) enum Intrinsic { @@ -5925,6 +5995,8 @@ V8_INLINE Local False(Isolate* isolate); * * The arguments for set_max_semi_space_size, set_max_old_space_size, * set_max_executable_size, set_code_range_size specify limits in MB. + * + * The argument for set_max_semi_space_size_in_kb is in KB. */ class V8_EXPORT ResourceConstraints { public: @@ -5942,10 +6014,28 @@ class V8_EXPORT ResourceConstraints { void ConfigureDefaults(uint64_t physical_memory, uint64_t virtual_memory_limit); - int max_semi_space_size() const { return max_semi_space_size_; } - void set_max_semi_space_size(int limit_in_mb) { - max_semi_space_size_ = limit_in_mb; + // Returns the max semi-space size in MB. + V8_DEPRECATE_SOON("Use max_semi_space_size_in_kb()", + int max_semi_space_size()) { + return static_cast(max_semi_space_size_in_kb_ / 1024); } + + // Sets the max semi-space size in MB. + V8_DEPRECATE_SOON("Use set_max_semi_space_size_in_kb(size_t limit_in_kb)", + void set_max_semi_space_size(int limit_in_mb)) { + max_semi_space_size_in_kb_ = limit_in_mb * 1024; + } + + // Returns the max semi-space size in KB. + size_t max_semi_space_size_in_kb() const { + return max_semi_space_size_in_kb_; + } + + // Sets the max semi-space size in KB. + void set_max_semi_space_size_in_kb(size_t limit_in_kb) { + max_semi_space_size_in_kb_ = limit_in_kb; + } + int max_old_space_size() const { return max_old_space_size_; } void set_max_old_space_size(int limit_in_mb) { max_old_space_size_ = limit_in_mb; @@ -5971,7 +6061,10 @@ class V8_EXPORT ResourceConstraints { } private: - int max_semi_space_size_; + // max_semi_space_size_ is in KB + size_t max_semi_space_size_in_kb_; + + // The remaining limits are in MB int max_old_space_size_; int max_executable_size_; uint32_t* stack_limit_; @@ -6059,21 +6152,23 @@ typedef void (*DeprecatedCallCompletedCallback)(); /** * HostImportDynamicallyCallback is called when we require the * embedder to load a module. This is used as part of the dynamic - * import syntax. The behavior of this callback is not specified in - * EcmaScript. + * import syntax. * * The referrer is the name of the file which calls the dynamic * import. The referrer can be used to resolve the module location. * * The specifier is the name of the module that should be imported. * - * The DynamicImportResult object is used to signal success or failure - * by calling it's respective methods. + * The embedder must compile, instantiate, evaluate the Module, and + * obtain it's namespace object. * + * The Promise returned from this function is forwarded to userland + * JavaScript. The embedder must resolve this promise with the module + * namespace object. In case of an exception, the embedder must reject + * this promise with the exception. */ -typedef void (*HostImportModuleDynamicallyCallback)( - Isolate* isolate, Local referrer, Local specifier, - Local result); +typedef MaybeLocal (*HostImportModuleDynamicallyCallback)( + Local context, Local referrer, Local specifier); /** * PromiseHook with type kInit is called when a new promise is @@ -6196,11 +6291,18 @@ typedef void (*FailedAccessCheckCallback)(Local target, * Callback to check if code generation from strings is allowed. See * Context::AllowCodeGenerationFromStrings. */ -typedef bool (*AllowCodeGenerationFromStringsCallback)(Local context); +typedef bool (*DeprecatedAllowCodeGenerationFromStringsCallback)( + Local context); +typedef bool (*AllowCodeGenerationFromStringsCallback)(Local context, + Local source); -// --- WASM compilation callbacks --- +// --- WebAssembly compilation callbacks --- typedef bool (*ExtensionCallback)(const FunctionCallbackInfo&); +// --- Callback for APIs defined on v8-supported objects, but implemented +// by the embedder. Example: WebAssembly.{compile|instantiate}Streaming --- +typedef void (*ApiImplementationCallback)(const FunctionCallbackInfo&); + // --- Garbage Collection Callbacks --- /** @@ -6624,8 +6726,7 @@ class V8_EXPORT Isolate { add_histogram_sample_callback(nullptr), array_buffer_allocator(nullptr), external_references(nullptr), - allow_atomics_wait(true), - host_import_module_dynamically_callback_(nullptr) {} + allow_atomics_wait(true) {} /** * The optional entry_hook allows the host application to provide the @@ -6688,16 +6789,6 @@ class V8_EXPORT Isolate { * this isolate. This can also be configured via SetAllowAtomicsWait. */ bool allow_atomics_wait; - - /** - * This is an unfinished experimental feature, and is only exposed - * here for internal testing purposes. DO NOT USE. - * - * This specifies the callback called by the upcoming dynamic - * import() language feature to load modules. - */ - HostImportModuleDynamicallyCallback - host_import_module_dynamically_callback_; }; @@ -6836,6 +6927,7 @@ class V8_EXPORT Isolate { kAssigmentExpressionLHSIsCallInSloppy = 36, kAssigmentExpressionLHSIsCallInStrict = 37, kPromiseConstructorReturnedUndefined = 38, + kConstructorNonUndefinedPrimitiveReturn = 39, // If you add new values here, you'll also need to update Chromium's: // UseCounter.h, V8PerIsolateData.cpp, histograms.xml @@ -6888,6 +6980,16 @@ class V8_EXPORT Isolate { void SetAbortOnUncaughtExceptionCallback( AbortOnUncaughtExceptionCallback callback); + /** + * This is an unfinished experimental feature, and is only exposed + * here for internal testing purposes. DO NOT USE. + * + * This specifies the callback called by the upcoming dynamic + * import() language feature to load modules. + */ + void SetHostImportModuleDynamicallyCallback( + HostImportModuleDynamicallyCallback callback); + /** * Optional notification that the system is running low on memory. * V8 uses these notifications to guide heuristics. @@ -7085,6 +7187,12 @@ class V8_EXPORT Isolate { */ Local GetEnteredOrMicrotaskContext(); + /** + * Returns the Context that corresponds to the Incumbent realm in HTML spec. + * https://html.spec.whatwg.org/multipage/webappapis.html#incumbent + */ + Local GetIncumbentContext(); + /** * Schedules an exception to be thrown when returning to JavaScript. When an * exception has been scheduled it is illegal to invoke any JavaScript @@ -7137,6 +7245,17 @@ class V8_EXPORT Isolate { */ void RemoveGCEpilogueCallback(GCCallback callback); + typedef size_t (*GetExternallyAllocatedMemoryInBytesCallback)(); + + /** + * Set the callback that tells V8 how much memory is currently allocated + * externally of the V8 heap. Ideally this memory is somehow connected to V8 + * objects and may get freed-up when the corresponding V8 objects get + * collected by a V8 garbage collection. + */ + void SetGetExternallyAllocatedMemoryInBytesCallback( + GetExternallyAllocatedMemoryInBytesCallback callback); + /** * Forcefully terminate the current thread of JavaScript execution * in the given isolate. @@ -7452,14 +7571,18 @@ class V8_EXPORT Isolate { */ void SetAllowCodeGenerationFromStringsCallback( AllowCodeGenerationFromStringsCallback callback); + V8_DEPRECATED("Use callback with source parameter.", + void SetAllowCodeGenerationFromStringsCallback( + DeprecatedAllowCodeGenerationFromStringsCallback callback)); /** - * Embedder over{ride|load} injection points for wasm APIs. + * Embedder over{ride|load} injection points for wasm APIs. The expectation + * is that the embedder sets them at most once. */ void SetWasmModuleCallback(ExtensionCallback callback); - void SetWasmCompileCallback(ExtensionCallback callback); void SetWasmInstanceCallback(ExtensionCallback callback); - void SetWasmInstantiateCallback(ExtensionCallback callback); + + void SetWasmCompileStreamingCallback(ApiImplementationCallback callback); /** * Check if V8 is dead and therefore unusable. This is the case after @@ -7556,14 +7679,19 @@ class V8_EXPORT Isolate { ~Isolate() = delete; Isolate(const Isolate&) = delete; Isolate& operator=(const Isolate&) = delete; + // Deleting operator new and delete here is allowed as ctor and dtor is also + // deleted. void* operator new(size_t size) = delete; + void* operator new[](size_t size) = delete; void operator delete(void*, size_t) = delete; + void operator delete[](void*, size_t) = delete; private: template friend class PersistentValueMapBase; void ReportExternalAllocationLimitReached(); + void CheckMemoryPressure(); }; class V8_EXPORT StartupData { @@ -7611,8 +7739,9 @@ class V8_EXPORT V8 { * strings should be allowed. */ V8_INLINE static V8_DEPRECATED( - "Use isolate version", void SetAllowCodeGenerationFromStringsCallback( - AllowCodeGenerationFromStringsCallback that)); + "Use isolate version", + void SetAllowCodeGenerationFromStringsCallback( + DeprecatedAllowCodeGenerationFromStringsCallback that)); /** * Check if V8 is dead and therefore unusable. This is the case after @@ -7923,7 +8052,7 @@ class V8_EXPORT V8 { */ static void ShutdownPlatform(); -#if V8_OS_LINUX && V8_TARGET_ARCH_X64 && !V8_OS_ANDROID +#if V8_OS_POSIX /** * Give the V8 signal handler a chance to handle a fault. * @@ -7944,7 +8073,7 @@ class V8_EXPORT V8 { * points to a ucontext_t structure. */ static bool TryHandleSignal(int signal_number, void* info, void* context); -#endif // V8_OS_LINUX +#endif // V8_OS_POSIX /** * Enable the default signal handler rather than using one provided by the @@ -8283,10 +8412,15 @@ class V8_EXPORT TryCatch { TryCatch(const TryCatch&) = delete; void operator=(const TryCatch&) = delete; + + private: + // Declaring operator new and delete as deleted is not spec compliant. + // Therefore declare them private instead to disable dynamic alloc void* operator new(size_t size); + void* operator new[](size_t size); void operator delete(void*, size_t); + void operator delete[](void*, size_t); - private: void ResetInternal(); internal::Isolate* isolate_; @@ -8539,6 +8673,27 @@ class V8_EXPORT Context { Local context_; }; + /** + * Stack-allocated class to support the backup incumbent settings object + * stack. + * https://html.spec.whatwg.org/multipage/webappapis.html#backup-incumbent-settings-object-stack + */ + class BackupIncumbentScope { + public: + /** + * |backup_incumbent_context| is pushed onto the backup incumbent settings + * object stack. + */ + explicit BackupIncumbentScope(Local backup_incumbent_context); + ~BackupIncumbentScope(); + + private: + friend class internal::Isolate; + + Local backup_incumbent_context_; + const BackupIncumbentScope* prev_ = nullptr; + }; + private: friend class Value; friend class Script; @@ -8786,6 +8941,8 @@ class Internals { static const int kExternalMemoryOffset = 4 * kApiPointerSize; static const int kExternalMemoryLimitOffset = kExternalMemoryOffset + kApiInt64Size; + static const int kExternalMemoryAtLastMarkCompactOffset = + kExternalMemoryLimitOffset + kApiInt64Size; static const int kIsolateRootsOffset = kExternalMemoryLimitOffset + kApiInt64Size + kApiInt64Size + kApiPointerSize + kApiPointerSize; @@ -10004,13 +10161,32 @@ uint32_t Isolate::GetNumberOfDataSlots() { int64_t Isolate::AdjustAmountOfExternalAllocatedMemory( int64_t change_in_bytes) { typedef internal::Internals I; + const int64_t kMemoryReducerActivationLimit = 32 * 1024 * 1024; int64_t* external_memory = reinterpret_cast( reinterpret_cast(this) + I::kExternalMemoryOffset); - const int64_t external_memory_limit = *reinterpret_cast( + int64_t* external_memory_limit = reinterpret_cast( reinterpret_cast(this) + I::kExternalMemoryLimitOffset); + int64_t* external_memory_at_last_mc = + reinterpret_cast(reinterpret_cast(this) + + I::kExternalMemoryAtLastMarkCompactOffset); const int64_t amount = *external_memory + change_in_bytes; + *external_memory = amount; - if (change_in_bytes > 0 && amount > external_memory_limit) { + + int64_t allocation_diff_since_last_mc = + *external_memory_at_last_mc - *external_memory; + allocation_diff_since_last_mc = allocation_diff_since_last_mc < 0 + ? -allocation_diff_since_last_mc + : allocation_diff_since_last_mc; + if (allocation_diff_since_last_mc > kMemoryReducerActivationLimit) { + CheckMemoryPressure(); + } + + if (change_in_bytes < 0) { + *external_memory_limit += change_in_bytes; + } + + if (change_in_bytes > 0 && amount > *external_memory_limit) { ReportExternalAllocationLimitReached(); } return *external_memory; @@ -10040,11 +10216,11 @@ void* Context::GetAlignedPointerFromEmbedderData(int index) { #endif } - void V8::SetAllowCodeGenerationFromStringsCallback( - AllowCodeGenerationFromStringsCallback callback) { + DeprecatedAllowCodeGenerationFromStringsCallback callback) { Isolate* isolate = Isolate::GetCurrent(); - isolate->SetAllowCodeGenerationFromStringsCallback(callback); + isolate->SetAllowCodeGenerationFromStringsCallback( + reinterpret_cast(callback)); } diff --git a/deps/v8/include/v8config.h b/deps/v8/include/v8config.h index 964949c24c3293..69cf3b78c8b2c8 100644 --- a/deps/v8/include/v8config.h +++ b/deps/v8/include/v8config.h @@ -61,6 +61,7 @@ // V8_OS_CYGWIN - Cygwin // V8_OS_DRAGONFLYBSD - DragonFlyBSD // V8_OS_FREEBSD - FreeBSD +// V8_OS_FUCHSIA - Fuchsia // V8_OS_LINUX - Linux // V8_OS_MACOSX - Mac OS X // V8_OS_NETBSD - NetBSD @@ -95,6 +96,9 @@ # define V8_OS_BSD 1 # define V8_OS_FREEBSD 1 # define V8_OS_POSIX 1 +#elif defined(__Fuchsia__) +# define V8_OS_FUCHSIA 1 +# define V8_OS_POSIX 1 #elif defined(__DragonFly__) # define V8_OS_BSD 1 # define V8_OS_DRAGONFLYBSD 1 @@ -169,7 +173,6 @@ // supported // V8_HAS_ATTRIBUTE_DEPRECATED - __attribute__((deprecated)) supported // V8_HAS_ATTRIBUTE_NOINLINE - __attribute__((noinline)) supported -// V8_HAS_ATTRIBUTE_NORETURN - __attribute__((noreturn)) supported // V8_HAS_ATTRIBUTE_UNUSED - __attribute__((unused)) supported // V8_HAS_ATTRIBUTE_VISIBILITY - __attribute__((visibility)) supported // V8_HAS_ATTRIBUTE_WARN_UNUSED_RESULT - __attribute__((warn_unused_result)) @@ -209,7 +212,6 @@ # define V8_HAS_ATTRIBUTE_ALWAYS_INLINE (__has_attribute(always_inline)) # define V8_HAS_ATTRIBUTE_DEPRECATED (__has_attribute(deprecated)) # define V8_HAS_ATTRIBUTE_NOINLINE (__has_attribute(noinline)) -# define V8_HAS_ATTRIBUTE_NORETURN (__has_attribute(noreturn)) # define V8_HAS_ATTRIBUTE_UNUSED (__has_attribute(unused)) # define V8_HAS_ATTRIBUTE_VISIBILITY (__has_attribute(visibility)) # define V8_HAS_ATTRIBUTE_WARN_UNUSED_RESULT \ @@ -250,7 +252,6 @@ # define V8_HAS_ATTRIBUTE_DEPRECATED (V8_GNUC_PREREQ(3, 4, 0)) # define V8_HAS_ATTRIBUTE_DEPRECATED_MESSAGE (V8_GNUC_PREREQ(4, 5, 0)) # define V8_HAS_ATTRIBUTE_NOINLINE (V8_GNUC_PREREQ(3, 4, 0)) -# define V8_HAS_ATTRIBUTE_NORETURN (V8_GNUC_PREREQ(2, 5, 0)) # define V8_HAS_ATTRIBUTE_UNUSED (V8_GNUC_PREREQ(2, 95, 0)) # define V8_HAS_ATTRIBUTE_VISIBILITY (V8_GNUC_PREREQ(4, 3, 0)) # define V8_HAS_ATTRIBUTE_WARN_UNUSED_RESULT \ @@ -311,18 +312,6 @@ #endif -// A macro used to tell the compiler that a particular function never returns. -// Use like: -// V8_NORETURN void MyAbort() { abort(); } -#if V8_HAS_ATTRIBUTE_NORETURN -# define V8_NORETURN __attribute__((noreturn)) -#elif V8_HAS_DECLSPEC_NORETURN -# define V8_NORETURN __declspec(noreturn) -#else -# define V8_NORETURN /* NOT SUPPORTED */ -#endif - - // A macro (V8_DEPRECATED) to mark classes or functions as deprecated. #if defined(V8_DEPRECATION_WARNINGS) && V8_HAS_ATTRIBUTE_DEPRECATED_MESSAGE #define V8_DEPRECATED(message, declarator) \ diff --git a/deps/v8/infra/mb/mb_config.pyl b/deps/v8/infra/mb/mb_config.pyl index 1a8247fc2b71af..adb436219a9463 100644 --- a/deps/v8/infra/mb/mb_config.pyl +++ b/deps/v8/infra/mb/mb_config.pyl @@ -9,6 +9,9 @@ # Bots are ordered by appearance on waterfall. 'masters': { 'developer_default': { + 'android.arm.debug': 'default_debug_android_arm', + 'android.arm.optdebug': 'default_optdebug_android_arm', + 'android.arm.release': 'default_release_android_arm', 'arm.debug': 'default_debug_arm', 'arm.optdebug': 'default_optdebug_arm', 'arm.release': 'default_release_arm', @@ -42,12 +45,12 @@ }, 'client.dart.fyi': { - 'v8-linux-release': 'gyp_release_x86_disassembler', - 'v8-win-release': 'gyp_release_x86_disassembler', - 'v8-mac-release': 'gyp_release_x86_disassembler', + 'v8-linux-release': 'gn_release_x86_disassembler', + 'v8-win-release': 'gn_release_x86_disassembler', + 'v8-mac-release': 'gn_release_x86_disassembler', }, 'client.dynamorio': { - 'linux-v8-dr': 'gyp_release_x64', + 'linux-v8-dr': 'gn_release_x64', }, 'client.v8': { # Linux. @@ -60,6 +63,7 @@ 'V8 Linux - verify csa': 'gn_release_x86_verify_csa', # Linux64. 'V8 Linux64 - builder': 'gn_release_x64_valgrind', + 'V8 Linux64 - concurrent marking - builder': 'gn_release_x64_concurrent_marking', 'V8 Linux64 - debug builder': 'gn_debug_x64_valgrind', 'V8 Linux64 - custom snapshot - debug builder': 'gn_debug_x64_custom', 'V8 Linux64 - internal snapshot': 'gn_release_x64_internal', @@ -87,8 +91,23 @@ 'V8 Linux64 TSAN - concurrent marking': 'gn_release_x64_tsan_concurrent_marking', 'V8 Linux - arm64 - sim - MSAN': 'gn_release_simulate_arm64_msan', - # Clusterfuzz. + # Misc. + 'V8 Linux gcc 4.8': 'gn_release_x86_gcc', + 'V8 Linux64 gcc 4.8 - debug': 'gn_debug_x64_gcc', + # FYI. + 'V8 Linux - swarming staging': 'gn_release_x64', + 'V8 Linux64 - cfi': 'gn_release_x64_cfi', + 'V8 Linux64 UBSanVptr': 'gn_release_x64_ubsan_vptr', + 'V8 Linux - vtunejit': 'gn_debug_x86_vtunejit', + 'V8 Linux64 - gcov coverage': 'gn_release_x64_gcc_coverage', + 'V8 Linux - predictable': 'gn_release_x86_predictable', + 'V8 Linux - full debug': 'gn_full_debug_x86', + 'V8 Linux - interpreted regexp': 'gn_release_x86_interpreted_regexp', + 'V8 Random Deopt Fuzzer - debug': 'gn_debug_x86', + }, + 'client.v8.clusterfuzz': { 'V8 Linux64 - release builder': 'gn_release_x64_correctness_fuzzer', + 'V8 Linux64 - debug builder': 'gn_debug_x64', 'V8 Linux64 ASAN no inline - release builder': 'gn_release_x64_asan_symbolized_edge_verify_heap', 'V8 Linux64 ASAN - debug builder': 'gn_debug_x64_asan_edge', @@ -98,23 +117,12 @@ 'gn_debug_simulate_arm_asan_edge', 'V8 Linux ASAN mipsel - debug builder': 'gn_debug_simulate_mipsel_asan_edge', - # Misc. - 'V8 Linux gcc 4.8': 'gn_release_x86_gcc', - 'V8 Linux64 gcc 4.8 - debug': 'gn_debug_x64_gcc', - # FYI. - 'V8 Linux - swarming staging': 'gn_release_x64', - # TODO(machenbach): Figure out if symbolized is still needed. The - # original config also specified -O1, which we dropped because chromium - # doesn't have it (anymore). - 'V8 Linux64 - cfi': 'gyp_release_x64_cfi_symbolized', - 'V8 Linux - vtunejit': 'gn_debug_x86_vtunejit', - 'V8 Linux64 - gcov coverage': 'gyp_release_x64_gcc_coverage', - 'V8 Linux - predictable': 'gn_release_x86_predictable', - 'V8 Linux - full debug': 'gyp_full_debug_x86', - 'V8 Linux - interpreted regexp': 'gn_release_x86_interpreted_regexp', - 'V8 Random Deopt Fuzzer - debug': 'gyp_debug_x86', + 'V8 Linux64 CFI - release builder': 'gn_release_x64_cfi_clusterfuzz', + 'V8 Linux MSAN no origins': + 'gn_release_simulate_arm64_msan_no_origins_edge', + 'V8 Linux MSAN chained origins': + 'gn_release_simulate_arm64_msan_edge', }, - 'client.v8.ports': { # Arm. 'V8 Arm - builder': 'gn_release_arm', @@ -139,8 +147,6 @@ # S390. 'V8 Linux - s390 - sim': 'gyp_release_simulate_s390', 'V8 Linux - s390x - sim': 'gyp_release_simulate_s390x', - # X87. - 'V8 Linux - x87 - nosnap - debug builder': 'gyp_debug_simulate_x87', }, 'client.v8.branches': { 'V8 Linux - beta branch': 'gn_release_x86', @@ -189,10 +195,11 @@ 'v8_linux64_asan_rel_ng': 'gn_release_x64_asan_minimal_symbols', 'v8_linux64_msan_rel': 'gn_release_simulate_arm64_msan_minimal_symbols', 'v8_linux64_sanitizer_coverage_rel': - 'gyp_release_x64_asan_minimal_symbols_coverage', + 'gn_release_x64_asan_minimal_symbols_coverage', 'v8_linux64_tsan_rel': 'gn_release_x64_tsan_minimal_symbols', 'v8_linux64_tsan_concurrent_marking_rel_ng': 'gn_release_x64_tsan_concurrent_marking_minimal_symbols', + 'v8_linux64_ubsan_rel_ng': 'gn_release_x64_ubsan_vptr_minimal_symbols', 'v8_win_dbg': 'gn_debug_x86_trybot', 'v8_win_compile_dbg': 'gn_debug_x86_trybot', 'v8_win_rel_ng': 'gn_release_x86_trybot', @@ -231,6 +238,14 @@ 'gn', 'debug', 'simulate_arm', 'v8_enable_slow_dchecks'], 'default_release_arm': [ 'gn', 'release', 'simulate_arm'], + 'default_debug_android_arm': [ + 'gn', 'debug', 'arm', 'android', 'crosscompile', + 'v8_enable_slow_dchecks', 'v8_full_debug'], + 'default_optdebug_android_arm': [ + 'gn', 'debug', 'arm', 'android', 'crosscompile', + 'v8_enable_slow_dchecks' ], + 'default_release_android_arm': [ + 'gn', 'release', 'arm', 'android', 'crosscompile'], 'default_debug_arm64': [ 'gn', 'debug', 'simulate_arm64', 'v8_enable_slow_dchecks', 'v8_full_debug'], @@ -321,6 +336,10 @@ 'gn_release_simulate_arm64_msan_minimal_symbols': [ 'gn', 'release_bot', 'simulate_arm64', 'msan', 'minimal_symbols', 'swarming'], + 'gn_release_simulate_arm64_msan_edge': [ + 'gn', 'release_bot', 'simulate_arm64', 'edge', 'msan'], + 'gn_release_simulate_arm64_msan_no_origins_edge': [ + 'gn', 'release_bot', 'simulate_arm64', 'edge', 'msan_no_origins'], 'gn_release_simulate_arm64_trybot': [ 'gn', 'release_trybot', 'simulate_arm64', 'swarming'], 'gn_release_simulate_mipsel': [ @@ -330,7 +349,8 @@ # GN debug configs for arm. 'gn_debug_arm': [ - 'gn', 'debug_bot', 'arm', 'crosscompile', 'hard_float', 'swarming'], + 'gn', 'debug_bot', 'arm', 'crosscompile', 'hard_float', 'swarming', + 'no_custom_libcxx'], # GN release configs for arm. 'gn_release_arm': [ @@ -350,15 +370,26 @@ 'gn_release_x64_asan_minimal_symbols': [ 'gn', 'release_bot', 'x64', 'asan', 'lsan', 'minimal_symbols', 'swarming'], + 'gn_release_x64_asan_minimal_symbols_coverage': [ + 'gn', 'release_bot', 'x64', 'asan', 'bb', 'coverage', 'lsan', + 'minimal_symbols', 'swarming'], 'gn_release_x64_asan_no_lsan': [ 'gn', 'release_bot', 'x64', 'asan', 'swarming'], 'gn_release_x64_asan_symbolized_edge_verify_heap': [ 'gn', 'release_bot', 'x64', 'asan', 'edge', 'lsan', 'symbolized', 'v8_verify_heap'], + 'gn_release_x64_cfi': [ + 'gn', 'release_bot', 'x64', 'cfi', 'swarming'], + 'gn_release_x64_cfi_clusterfuzz': [ + 'gn', 'release_bot', 'x64', 'cfi_clusterfuzz'], 'gn_release_x64_clang': [ 'gn', 'release_bot', 'x64', 'clang', 'swarming'], + 'gn_release_x64_concurrent_marking': [ + 'gn', 'release_bot', 'x64', 'v8_enable_concurrent_marking', 'swarming'], 'gn_release_x64_correctness_fuzzer' : [ 'gn', 'release_bot', 'x64', 'v8_correctness_fuzzer'], + 'gn_release_x64_gcc_coverage': [ + 'gn', 'release_bot', 'x64', 'coverage', 'gcc', 'no_custom_libcxx'], 'gn_release_x64_internal': [ 'gn', 'release_bot', 'x64', 'swarming', 'v8_snapshot_internal'], 'gn_release_x64_minimal_symbols': [ @@ -375,10 +406,16 @@ 'minimal_symbols', 'swarming'], 'gn_release_x64_tsan_minimal_symbols': [ 'gn', 'release_bot', 'x64', 'tsan', 'minimal_symbols', 'swarming'], + 'gn_release_x64_ubsan_vptr': [ + 'gn', 'release_bot', 'x64', 'ubsan_vptr'], + 'gn_release_x64_ubsan_vptr_minimal_symbols': [ + 'gn', 'release_bot', 'x64', 'ubsan_vptr', 'minimal_symbols'], 'gn_release_x64_valgrind': [ - 'gn', 'release_bot', 'x64', 'swarming', 'valgrind'], + 'gn', 'release_bot', 'x64', 'swarming', 'valgrind', + 'no_custom_libcxx'], 'gn_release_x64_valgrind_trybot': [ - 'gn', 'release_trybot', 'x64', 'swarming', 'valgrind'], + 'gn', 'release_trybot', 'x64', 'swarming', 'valgrind', + 'no_custom_libcxx'], 'gn_release_x64_verify_csa': [ 'gn', 'release_bot', 'x64', 'swarming', 'dcheck_always_on', 'v8_enable_slow_dchecks', 'v8_verify_csa'], @@ -391,13 +428,14 @@ 'gn_debug_x64_custom': [ 'gn', 'debug_bot', 'x64', 'swarming', 'v8_snapshot_custom'], 'gn_debug_x64_gcc': [ - 'gn', 'debug_bot', 'x64', 'gcc'], + 'gn', 'debug_bot', 'x64', 'gcc', 'no_custom_libcxx'], 'gn_debug_x64_minimal_symbols': [ 'gn', 'debug_bot', 'x64', 'minimal_symbols', 'swarming'], 'gn_debug_x64_trybot': [ 'gn', 'debug_trybot', 'x64', 'swarming'], 'gn_debug_x64_valgrind': [ - 'gn', 'debug_bot', 'x64', 'swarming', 'valgrind'], + 'gn', 'debug_bot', 'x64', 'swarming', 'valgrind', + 'no_custom_libcxx'], # GN debug configs for x86. 'gn_debug_x86': [ @@ -414,14 +452,20 @@ 'gn', 'debug_trybot', 'x86', 'swarming'], 'gn_debug_x86_vtunejit': [ 'gn', 'debug_bot', 'x86', 'v8_enable_vtunejit'], + 'gn_full_debug_x86': [ + 'gn', 'debug', 'x86', 'goma', 'static', 'v8_enable_slow_dchecks', + 'v8_full_debug'], # GN release configs for x86. 'gn_release_x86': [ 'gn', 'release_bot', 'x86', 'swarming'], + 'gn_release_x86_disassembler': [ + 'gn', 'release_bot', 'x86', 'v8_enable_disassembler'], 'gn_release_x86_gcc': [ - 'gn', 'release_bot', 'x86', 'gcc'], + 'gn', 'release_bot', 'x86', 'gcc', 'no_custom_libcxx'], 'gn_release_x86_gcc_minimal_symbols': [ - 'gn', 'release_bot', 'x86', 'gcc', 'minimal_symbols'], + 'gn', 'release_bot', 'x86', 'gcc', 'minimal_symbols', + 'no_custom_libcxx'], 'gn_release_x86_gcmole': [ 'gn', 'release_bot', 'x86', 'gcmole', 'swarming'], 'gn_release_x86_gcmole_trybot': [ @@ -449,17 +493,6 @@ 'gn', 'release_bot', 'x86', 'swarming', 'dcheck_always_on', 'v8_enable_slow_dchecks', 'v8_verify_csa'], - # Gyp debug configs for simulators. - 'gyp_debug_simulate_x87': [ - 'gyp', 'debug_bot_static', 'simulate_x87', 'swarming'], - - # Gyp debug configs for x86. - 'gyp_debug_x86': [ - 'gyp', 'debug_bot', 'x86', 'swarming'], - 'gyp_full_debug_x86': [ - 'gyp', 'debug', 'x86', 'goma', 'static', 'v8_enable_slow_dchecks', - 'v8_full_debug'], - # Gyp release configs for mips. 'gyp_release_mips_no_snap_no_i18n': [ 'gyp', 'release', 'mips', 'crosscompile', 'static', 'v8_no_i18n', @@ -478,17 +511,6 @@ # Gyp release configs for x64. 'gyp_release_x64': [ 'gyp', 'release_bot', 'x64', 'swarming'], - 'gyp_release_x64_asan_minimal_symbols_coverage': [ - 'gyp', 'release_bot', 'x64', 'asan', 'bb', 'coverage', 'lsan', - 'minimal_symbols', 'swarming'], - 'gyp_release_x64_cfi_symbolized': [ - 'gyp', 'release_bot', 'x64', 'cfi', 'swarming', 'symbolized'], - 'gyp_release_x64_gcc_coverage': [ - 'gyp', 'release_bot', 'x64', 'coverage', 'gcc'], - - # Gyp release configs for x86. - 'gyp_release_x86_disassembler': [ - 'gyp', 'release_bot', 'x86', 'v8_enable_disassembler'], }, 'mixins': { @@ -518,7 +540,14 @@ }, 'cfi': { - 'gn_args': 'is_cfi=true use_cfi_diag=true', + 'gn_args': ('is_cfi=true use_cfi_cast=true use_cfi_diag=true ' + 'use_cfi_recover=false'), + 'gyp_defines': 'cfi_vptr=1 cfi_diag=1', + }, + + 'cfi_clusterfuzz': { + 'gn_args': ('is_cfi=true use_cfi_cast=true use_cfi_diag=true ' + 'use_cfi_recover=true'), 'gyp_defines': 'cfi_vptr=1 cfi_diag=1', }, @@ -528,7 +557,7 @@ }, 'coverage': { - # TODO(machenbach): Add this to gn. + 'gn_args': 'v8_code_coverage=true', 'gyp_defines': 'coverage=1', }, @@ -552,12 +581,6 @@ 'v8_optimized_debug'], }, - 'debug_bot_static': { - 'mixins': [ - 'debug', 'static', 'goma', 'v8_enable_slow_dchecks', - 'v8_optimized_debug'], - }, - 'debug_trybot': { 'mixins': ['debug_bot', 'minimal_symbols'], }, @@ -611,8 +634,16 @@ 'msan': { 'gn_args': ('is_msan=true msan_track_origins=2 ' 'use_prebuilt_instrumented_libraries=true'), - 'gyp_defines': ('clang=1 msan=1 msan_track_origins=2 ' - 'use_prebuilt_instrumented_libraries=1'), + }, + + 'msan_no_origins': { + 'gn_args': ('is_msan=true msan_track_origins=0 ' + 'use_prebuilt_instrumented_libraries=true'), + }, + + # TODO(machenbach): Remove when http://crbug.com/738814 is resolved. + 'no_custom_libcxx': { + 'gn_args': 'use_custom_libcxx=false', }, 'release': { @@ -673,11 +704,6 @@ 'gyp_defines': 'target_arch=x64 v8_target_arch=s390x', }, - 'simulate_x87': { - 'gn_args': 'target_cpu="x86" v8_target_cpu="x87"', - 'gyp_defines': 'target_arch=ia32 v8_target_arch=x87', - }, - 'static': { 'gn_args': 'is_component_build=false', 'gyp_defines': 'component=static_library', @@ -700,6 +726,13 @@ 'gyp_defines': 'clang=1 tsan=1', }, + 'ubsan_vptr': { + # TODO(krasin): Remove is_ubsan_no_recover=true when + # https://llvm.org/bugs/show_bug.cgi?id=25569 is fixed and just use + # ubsan_vptr instead. + 'gn_args': 'is_ubsan_vptr=true is_ubsan_no_recover=true', + }, + 'valgrind': { 'gn_args': 'v8_has_valgrind=true', 'gyp_defines': 'has_valgrind=1', diff --git a/deps/v8/snapshot_toolchain.gni b/deps/v8/snapshot_toolchain.gni index 893bdc589f95ca..80cd1bd390c1dc 100644 --- a/deps/v8/snapshot_toolchain.gni +++ b/deps/v8/snapshot_toolchain.gni @@ -76,9 +76,11 @@ if (v8_snapshot_toolchain == "") { if (v8_current_cpu == "x64" || v8_current_cpu == "x86") { _cpus = v8_current_cpu - } else if (v8_current_cpu == "arm64" || v8_current_cpu == "mips64el") { + } else if (v8_current_cpu == "arm64" || v8_current_cpu == "mips64el" || + v8_current_cpu == "mips64") { _cpus = "x64_v8_${v8_current_cpu}" - } else if (v8_current_cpu == "arm" || v8_current_cpu == "mipsel") { + } else if (v8_current_cpu == "arm" || v8_current_cpu == "mipsel" || + v8_current_cpu == "mips") { _cpus = "x86_v8_${v8_current_cpu}" } else { # This branch should not be reached; leave _cpus blank so the assert diff --git a/deps/v8/src/OWNERS b/deps/v8/src/OWNERS index 8bbbab6ecb7287..83a275c80f46e3 100644 --- a/deps/v8/src/OWNERS +++ b/deps/v8/src/OWNERS @@ -3,3 +3,5 @@ per-file intl.*=mnita@google.com per-file intl.*=jshin@chromium.org per-file typing-asm.*=aseemgarg@chromium.org per-file typing-asm.*=bradnelson@chromium.org + +# COMPONENT: Blink>JavaScript>Runtime diff --git a/deps/v8/src/PRESUBMIT.py b/deps/v8/src/PRESUBMIT.py new file mode 100644 index 00000000000000..d928a6068919fc --- /dev/null +++ b/deps/v8/src/PRESUBMIT.py @@ -0,0 +1,29 @@ +# Copyright 2017 the V8 project authors. All rights reserved.') +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""Presubmit script for //v8/src + +See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts +for more details about the presubmit API built into depot_tools. +""" + +import os + + +def PostUploadHook(cl, change, output_api): + """git cl upload will call this hook after the issue is created/modified. + + This hook adds extra try bots to the CL description in order to run layout + tests in addition to CQ try bots. + """ + def is_api_cc(f): + return 'api.cc' == os.path.split(f.LocalPath())[1] + if not change.AffectedFiles(file_filter=is_api_cc): + return [] + return output_api.EnsureCQIncludeTrybotsAreAdded( + cl, + [ + 'master.tryserver.chromium.linux:linux_chromium_rel_ng' + ], + 'Automatically added layout test trybots to run tests on CQ.') diff --git a/deps/v8/src/accessors.cc b/deps/v8/src/accessors.cc index 98f780d5895dbd..32ee1b61e32287 100644 --- a/deps/v8/src/accessors.cc +++ b/deps/v8/src/accessors.cc @@ -649,11 +649,7 @@ void Accessors::ScriptEvalFromFunctionNameGetter( Handle shared( SharedFunctionInfo::cast(script->eval_from_shared())); // Find the name of the function calling eval. - if (!shared->name()->IsUndefined(isolate)) { - result = Handle(shared->name(), isolate); - } else { - result = Handle(shared->inferred_name(), isolate); - } + result = Handle(shared->name(), isolate); } info.GetReturnValue().Set(Utils::ToLocal(result)); } diff --git a/deps/v8/src/address-map.cc b/deps/v8/src/address-map.cc index 79f8e62d5454f2..4b0d02958872c9 100644 --- a/deps/v8/src/address-map.cc +++ b/deps/v8/src/address-map.cc @@ -20,6 +20,8 @@ RootIndexMap::RootIndexMap(Isolate* isolate) { if (!root->IsHeapObject()) continue; // Omit root entries that can be written after initialization. They must // not be referenced through the root list in the snapshot. + // Since we map the raw address of an root item to its root list index, the + // raw address must be constant, i.e. the object must be immovable. if (isolate->heap()->RootCanBeTreatedAsConstant(root_index)) { HeapObject* heap_object = HeapObject::cast(root); Maybe maybe_index = map_->Get(heap_object); diff --git a/deps/v8/src/allocation-site-scopes.cc b/deps/v8/src/allocation-site-scopes.cc deleted file mode 100644 index 6b9fd03a21913e..00000000000000 --- a/deps/v8/src/allocation-site-scopes.cc +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright 2013 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include "src/allocation-site-scopes.h" -#include "src/factory.h" -#include "src/isolate.h" -#include "src/objects-inl.h" - -namespace v8 { -namespace internal { - - -Handle AllocationSiteCreationContext::EnterNewScope() { - Handle scope_site; - if (top().is_null()) { - // We are creating the top level AllocationSite as opposed to a nested - // AllocationSite. - InitializeTraversal(isolate()->factory()->NewAllocationSite()); - scope_site = Handle(*top(), isolate()); - if (FLAG_trace_creation_allocation_sites) { - PrintF("*** Creating top level AllocationSite %p\n", - static_cast(*scope_site)); - } - } else { - DCHECK(!current().is_null()); - scope_site = isolate()->factory()->NewAllocationSite(); - if (FLAG_trace_creation_allocation_sites) { - PrintF("Creating nested site (top, current, new) (%p, %p, %p)\n", - static_cast(*top()), - static_cast(*current()), - static_cast(*scope_site)); - } - current()->set_nested_site(*scope_site); - update_current_site(*scope_site); - } - DCHECK(!scope_site.is_null()); - return scope_site; -} - - -void AllocationSiteCreationContext::ExitScope( - Handle scope_site, - Handle object) { - if (!object.is_null()) { - bool top_level = !scope_site.is_null() && - top().is_identical_to(scope_site); - - scope_site->set_transition_info(*object); - if (FLAG_trace_creation_allocation_sites) { - if (top_level) { - PrintF("*** Setting AllocationSite %p transition_info %p\n", - static_cast(*scope_site), - static_cast(*object)); - } else { - PrintF("Setting AllocationSite (%p, %p) transition_info %p\n", - static_cast(*top()), - static_cast(*scope_site), - static_cast(*object)); - } - } - } -} - - -bool AllocationSiteUsageContext::ShouldCreateMemento(Handle object) { - if (activated_ && AllocationSite::CanTrack(object->map()->instance_type())) { - if (FLAG_allocation_site_pretenuring || - AllocationSite::GetMode(object->GetElementsKind()) == - TRACK_ALLOCATION_SITE) { - if (FLAG_trace_creation_allocation_sites) { - PrintF("*** Creating Memento for %s %p\n", - object->IsJSArray() ? "JSArray" : "JSObject", - static_cast(*object)); - } - return true; - } - } - return false; -} - -} // namespace internal -} // namespace v8 diff --git a/deps/v8/src/allocation-site-scopes.h b/deps/v8/src/allocation-site-scopes.h index da2b9dc45c2ff5..60614c5e01cd12 100644 --- a/deps/v8/src/allocation-site-scopes.h +++ b/deps/v8/src/allocation-site-scopes.h @@ -7,11 +7,11 @@ #include "src/handles.h" #include "src/objects.h" +#include "src/objects/map.h" namespace v8 { namespace internal { - // AllocationSiteContext is the base class for walking and copying a nested // boilerplate with AllocationSite and AllocationMemento support. class AllocationSiteContext { @@ -34,6 +34,8 @@ class AllocationSiteContext { void InitializeTraversal(Handle site) { top_ = site; + // {current_} is updated in place to not create unnecessary Handles, hence + // we initially need a separate handle. current_ = Handle::New(*top_, isolate()); } @@ -44,18 +46,6 @@ class AllocationSiteContext { }; -// AllocationSiteCreationContext aids in the creation of AllocationSites to -// accompany object literals. -class AllocationSiteCreationContext : public AllocationSiteContext { - public: - explicit AllocationSiteCreationContext(Isolate* isolate) - : AllocationSiteContext(isolate) { } - - Handle EnterNewScope(); - void ExitScope(Handle site, Handle object); -}; - - // AllocationSiteUsageContext aids in the creation of AllocationMementos placed // behind some/all components of a copied object literal. class AllocationSiteUsageContext : public AllocationSiteContext { @@ -82,10 +72,26 @@ class AllocationSiteUsageContext : public AllocationSiteContext { Handle object) { // This assert ensures that we are pointing at the right sub-object in a // recursive walk of a nested literal. - DCHECK(object.is_null() || *object == scope_site->transition_info()); + DCHECK(object.is_null() || *object == scope_site->boilerplate()); + } + + bool ShouldCreateMemento(Handle object) { + if (activated_ && + AllocationSite::CanTrack(object->map()->instance_type())) { + if (FLAG_allocation_site_pretenuring || + AllocationSite::ShouldTrack(object->GetElementsKind())) { + if (FLAG_trace_creation_allocation_sites) { + PrintF("*** Creating Memento for %s %p\n", + object->IsJSArray() ? "JSArray" : "JSObject", + static_cast(*object)); + } + return true; + } + } + return false; } - bool ShouldCreateMemento(Handle object); + static const bool kCopying = true; private: Handle top_site_; diff --git a/deps/v8/src/allocation.cc b/deps/v8/src/allocation.cc index fde01f6447b41f..0a39a796bc3644 100644 --- a/deps/v8/src/allocation.cc +++ b/deps/v8/src/allocation.cc @@ -53,7 +53,7 @@ char* StrNDup(const char* str, int n) { void* AlignedAlloc(size_t size, size_t alignment) { DCHECK_LE(V8_ALIGNOF(void*), alignment); - DCHECK(base::bits::IsPowerOfTwo64(alignment)); + DCHECK(base::bits::IsPowerOfTwo(alignment)); void* ptr; #if V8_OS_WIN ptr = _aligned_malloc(size, alignment); diff --git a/deps/v8/src/api-natives.cc b/deps/v8/src/api-natives.cc index ef51f950a5ecb8..8a649534f827a3 100644 --- a/deps/v8/src/api-natives.cc +++ b/deps/v8/src/api-natives.cc @@ -39,15 +39,16 @@ MaybeHandle InstantiateObject(Isolate* isolate, bool is_hidden_prototype, bool is_prototype); -MaybeHandle InstantiateFunction(Isolate* isolate, - Handle data, - Handle name = Handle()); +MaybeHandle InstantiateFunction( + Isolate* isolate, Handle data, + MaybeHandle maybe_name = MaybeHandle()); -MaybeHandle Instantiate(Isolate* isolate, Handle data, - Handle name = Handle()) { +MaybeHandle Instantiate( + Isolate* isolate, Handle data, + MaybeHandle maybe_name = MaybeHandle()) { if (data->IsFunctionTemplateInfo()) { - return InstantiateFunction(isolate, - Handle::cast(data), name); + return InstantiateFunction( + isolate, Handle::cast(data), maybe_name); } else if (data->IsObjectTemplateInfo()) { return InstantiateObject(isolate, Handle::cast(data), Handle(), false, false); @@ -250,7 +251,7 @@ MaybeHandle ConfigureInstance(Isolate* isolate, Handle obj, DCHECK_EQ(kData, details.kind()); v8::Intrinsic intrinsic = - static_cast(Smi::cast(properties->get(i++))->value()); + static_cast(Smi::ToInt(properties->get(i++))); auto prop_data = handle(GetIntrinsic(isolate, intrinsic), isolate); RETURN_ON_EXCEPTION(isolate, DefineDataProperty(isolate, obj, name, @@ -311,7 +312,7 @@ void CacheTemplateInstantiation(Isolate* isolate, int serial_number, Handle cache = isolate->slow_template_instantiations_cache(); auto new_cache = - UnseededNumberDictionary::AtNumberPut(cache, serial_number, object); + UnseededNumberDictionary::Set(cache, serial_number, object); if (*new_cache != *cache) { isolate->native_context()->set_slow_template_instantiations_cache( *new_cache); @@ -333,14 +334,9 @@ void UncacheTemplateInstantiation(Isolate* isolate, int serial_number, Handle cache = isolate->slow_template_instantiations_cache(); int entry = cache->FindEntry(serial_number); - DCHECK(entry != UnseededNumberDictionary::kNotFound); - Handle result = - UnseededNumberDictionary::DeleteProperty(cache, entry); - USE(result); - DCHECK(result->IsTrue(isolate)); - auto new_cache = UnseededNumberDictionary::Shrink(cache, entry); - isolate->native_context()->set_slow_template_instantiations_cache( - *new_cache); + DCHECK_NE(UnseededNumberDictionary::kNotFound, entry); + cache = UnseededNumberDictionary::DeleteEntry(cache, entry); + isolate->native_context()->set_slow_template_instantiations_cache(*cache); } } @@ -361,7 +357,7 @@ MaybeHandle InstantiateObject(Isolate* isolate, bool is_hidden_prototype, bool is_prototype) { Handle constructor; - int serial_number = Smi::cast(info->serial_number())->value(); + int serial_number = Smi::ToInt(info->serial_number()); if (!new_target.is_null()) { if (IsSimpleInstantiation(isolate, *info, *new_target)) { constructor = Handle::cast(new_target); @@ -402,7 +398,7 @@ MaybeHandle InstantiateObject(Isolate* isolate, ASSIGN_RETURN_ON_EXCEPTION(isolate, object, JSObject::New(constructor, new_target), JSObject); - if (is_prototype) JSObject::OptimizeAsPrototype(object, FAST_PROTOTYPE); + if (is_prototype) JSObject::OptimizeAsPrototype(object); ASSIGN_RETURN_ON_EXCEPTION( isolate, result, @@ -450,8 +446,8 @@ MaybeHandle GetInstancePrototype(Isolate* isolate, MaybeHandle InstantiateFunction(Isolate* isolate, Handle data, - Handle name) { - int serial_number = Smi::cast(data->serial_number())->value(); + MaybeHandle maybe_name) { + int serial_number = Smi::ToInt(data->serial_number()); if (serial_number) { Handle result; if (ProbeInstantiationsCache(isolate, serial_number, @@ -492,10 +488,7 @@ MaybeHandle InstantiateFunction(Isolate* isolate, } } Handle function = ApiNatives::CreateApiFunction( - isolate, data, prototype, ApiNatives::JavaScriptObjectType); - if (!name.is_null() && name->IsString()) { - function->shared()->set_name(*name); - } + isolate, data, prototype, ApiNatives::JavaScriptObjectType, maybe_name); if (serial_number) { // Cache the function. CacheTemplateInstantiation(isolate, serial_number, CachingMode::kUnlimited, @@ -538,10 +531,10 @@ void AddPropertyToPropertyList(Isolate* isolate, Handle templ, } // namespace MaybeHandle ApiNatives::InstantiateFunction( - Handle data) { + Handle data, MaybeHandle maybe_name) { Isolate* isolate = data->GetIsolate(); InvokeScope invoke_scope(isolate); - return ::v8::internal::InstantiateFunction(isolate, data); + return ::v8::internal::InstantiateFunction(isolate, data, maybe_name); } MaybeHandle ApiNatives::InstantiateObject( @@ -562,7 +555,7 @@ MaybeHandle ApiNatives::InstantiateRemoteObject( Handle object_map = isolate->factory()->NewMap( JS_SPECIAL_API_OBJECT_TYPE, JSObject::kHeaderSize + data->embedder_field_count() * kPointerSize, - FAST_HOLEY_SMI_ELEMENTS); + HOLEY_SMI_ELEMENTS); object_map->SetConstructor(*constructor); object_map->set_is_access_check_needed(true); @@ -575,7 +568,7 @@ MaybeHandle ApiNatives::InstantiateRemoteObject( void ApiNatives::AddDataProperty(Isolate* isolate, Handle info, Handle name, Handle value, PropertyAttributes attributes) { - PropertyDetails details(kData, attributes, 0, PropertyCellType::kNoCell); + PropertyDetails details(kData, attributes, PropertyCellType::kNoCell); auto details_handle = handle(details.AsSmi(), isolate); Handle data[] = {name, details_handle, value}; AddPropertyToPropertyList(isolate, info, arraysize(data), data); @@ -587,7 +580,7 @@ void ApiNatives::AddDataProperty(Isolate* isolate, Handle info, PropertyAttributes attributes) { auto value = handle(Smi::FromInt(intrinsic), isolate); auto intrinsic_marker = isolate->factory()->true_value(); - PropertyDetails details(kData, attributes, 0, PropertyCellType::kNoCell); + PropertyDetails details(kData, attributes, PropertyCellType::kNoCell); auto details_handle = handle(details.AsSmi(), isolate); Handle data[] = {name, intrinsic_marker, details_handle, value}; AddPropertyToPropertyList(isolate, info, arraysize(data), data); @@ -600,7 +593,7 @@ void ApiNatives::AddAccessorProperty(Isolate* isolate, Handle getter, Handle setter, PropertyAttributes attributes) { - PropertyDetails details(kAccessor, attributes, 0, PropertyCellType::kNoCell); + PropertyDetails details(kAccessor, attributes, PropertyCellType::kNoCell); auto details_handle = handle(details.AsSmi(), isolate); Handle data[] = {name, details_handle, getter, setter}; AddPropertyToPropertyList(isolate, info, arraysize(data), data); @@ -621,12 +614,16 @@ void ApiNatives::AddNativeDataProperty(Isolate* isolate, info->set_property_accessors(*list); } - Handle ApiNatives::CreateApiFunction( Isolate* isolate, Handle obj, - Handle prototype, ApiInstanceType instance_type) { + Handle prototype, ApiInstanceType instance_type, + MaybeHandle maybe_name) { Handle shared = - FunctionTemplateInfo::GetOrCreateSharedFunctionInfo(isolate, obj); + FunctionTemplateInfo::GetOrCreateSharedFunctionInfo(isolate, obj, + maybe_name); + // To simplify things, API functions always have shared name. + DCHECK(shared->has_shared_name()); + Handle result = isolate->factory()->NewFunctionFromSharedFunctionInfo( shared, isolate->native_context()); @@ -695,7 +692,7 @@ Handle ApiNatives::CreateApiFunction( } Handle map = - isolate->factory()->NewMap(type, instance_size, FAST_HOLEY_SMI_ELEMENTS); + isolate->factory()->NewMap(type, instance_size, HOLEY_SMI_ELEMENTS); JSFunction::SetInitialMap(result, map, Handle::cast(prototype)); // Mark as undetectable if needed. diff --git a/deps/v8/src/api-natives.h b/deps/v8/src/api-natives.h index 74d3788fd123d1..455be0dd06b480 100644 --- a/deps/v8/src/api-natives.h +++ b/deps/v8/src/api-natives.h @@ -20,7 +20,8 @@ class ApiNatives { static const int kInitialFunctionCacheSize = 256; MUST_USE_RESULT static MaybeHandle InstantiateFunction( - Handle data); + Handle data, + MaybeHandle maybe_name = MaybeHandle()); MUST_USE_RESULT static MaybeHandle InstantiateObject( Handle data, @@ -35,10 +36,10 @@ class ApiNatives { GlobalProxyType }; - static Handle CreateApiFunction(Isolate* isolate, - Handle obj, - Handle prototype, - ApiInstanceType instance_type); + static Handle CreateApiFunction( + Isolate* isolate, Handle obj, + Handle prototype, ApiInstanceType instance_type, + MaybeHandle maybe_name = MaybeHandle()); static void AddDataProperty(Isolate* isolate, Handle info, Handle name, Handle value, diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc index 818dfa1e22e69a..09da3ac8835266 100644 --- a/deps/v8/src/api.cc +++ b/deps/v8/src/api.cc @@ -80,6 +80,26 @@ namespace v8 { +/* + * Most API methods should use one of the three macros: + * + * ENTER_V8, ENTER_V8_NO_SCRIPT, ENTER_V8_NO_SCRIPT_NO_EXCEPTION. + * + * The latter two assume that no script is executed, and no exceptions are + * scheduled in addition (respectively). Creating a pending exception and + * removing it before returning is ok. + * + * Exceptions should be handled either by invoking one of the + * RETURN_ON_FAILED_EXECUTION* macros. + * + * Don't use macros with DO_NOT_USE in their name. + * + * TODO(jochen): Document debugger specific macros. + * TODO(jochen): Document LOG_API and other RuntimeCallStats macros. + * TODO(jochen): All API methods should invoke one of the ENTER_V8* macros. + * TODO(jochen): Remove calls form API methods to DO_NOT_USE macros. + */ + #define LOG_API(isolate, class_name, function_name) \ i::RuntimeCallTimerScope _runtime_timer( \ isolate, &i::RuntimeCallStats::API_##class_name##_##function_name); \ @@ -87,16 +107,16 @@ namespace v8 { #define ENTER_V8_DO_NOT_USE(isolate) i::VMState __state__((isolate)) -#define PREPARE_FOR_EXECUTION_GENERIC(isolate, context, class_name, \ - function_name, bailout_value, \ - HandleScopeClass, do_callback) \ - if (IsExecutionTerminatingCheck(isolate)) { \ - return bailout_value; \ - } \ - HandleScopeClass handle_scope(isolate); \ - CallDepthScope call_depth_scope(isolate, context); \ - LOG_API(isolate, class_name, function_name); \ - ENTER_V8_DO_NOT_USE(isolate); \ +#define ENTER_V8_HELPER_DO_NOT_USE(isolate, context, class_name, \ + function_name, bailout_value, \ + HandleScopeClass, do_callback) \ + if (IsExecutionTerminatingCheck(isolate)) { \ + return bailout_value; \ + } \ + HandleScopeClass handle_scope(isolate); \ + CallDepthScope call_depth_scope(isolate, context); \ + LOG_API(isolate, class_name, function_name); \ + i::VMState __state__((isolate)); \ bool has_pending_exception = false #define PREPARE_FOR_DEBUG_INTERFACE_EXECUTION_WITH_ISOLATE(isolate, T) \ @@ -105,7 +125,7 @@ namespace v8 { } \ InternalEscapableScope handle_scope(isolate); \ CallDepthScope call_depth_scope(isolate, v8::Local()); \ - ENTER_V8_DO_NOT_USE(isolate); \ + i::VMState __state__((isolate)); \ bool has_pending_exception = false #define PREPARE_FOR_EXECUTION_WITH_CONTEXT(context, class_name, function_name, \ @@ -114,45 +134,26 @@ namespace v8 { auto isolate = context.IsEmpty() \ ? i::Isolate::Current() \ : reinterpret_cast(context->GetIsolate()); \ - PREPARE_FOR_EXECUTION_GENERIC(isolate, context, class_name, function_name, \ - bailout_value, HandleScopeClass, do_callback); - -#define PREPARE_FOR_EXECUTION_WITH_CONTEXT_IN_RUNTIME_CALL_STATS_SCOPE( \ - category, name, context, class_name, function_name, bailout_value, \ - HandleScopeClass, do_callback) \ - auto isolate = context.IsEmpty() \ - ? i::Isolate::Current() \ - : reinterpret_cast(context->GetIsolate()); \ - TRACE_EVENT_CALL_STATS_SCOPED(isolate, category, name); \ - PREPARE_FOR_EXECUTION_GENERIC(isolate, context, class_name, function_name, \ - bailout_value, HandleScopeClass, do_callback); - -#define PREPARE_FOR_EXECUTION_WITH_ISOLATE(isolate, class_name, function_name, \ - T) \ - PREPARE_FOR_EXECUTION_GENERIC(isolate, Local(), class_name, \ - function_name, MaybeLocal(), \ - InternalEscapableScope, false); + ENTER_V8_HELPER_DO_NOT_USE(isolate, context, class_name, function_name, \ + bailout_value, HandleScopeClass, do_callback); #define PREPARE_FOR_EXECUTION(context, class_name, function_name, T) \ PREPARE_FOR_EXECUTION_WITH_CONTEXT(context, class_name, function_name, \ MaybeLocal(), InternalEscapableScope, \ false) -#define PREPARE_FOR_EXECUTION_WITH_CALLBACK(context, class_name, \ - function_name, T) \ - PREPARE_FOR_EXECUTION_WITH_CONTEXT(context, class_name, function_name, \ - MaybeLocal(), InternalEscapableScope, \ - true) - -#define PREPARE_FOR_EXECUTION_PRIMITIVE(context, class_name, function_name, T) \ - PREPARE_FOR_EXECUTION_WITH_CONTEXT(context, class_name, function_name, \ - Nothing(), i::HandleScope, false) - -#define PREPARE_FOR_EXECUTION_BOOL(context, class_name, function_name) \ - PREPARE_FOR_EXECUTION_WITH_CONTEXT(context, class_name, function_name, \ - false, i::HandleScope, false) +#define ENTER_V8(isolate, context, class_name, function_name, bailout_value, \ + HandleScopeClass) \ + ENTER_V8_HELPER_DO_NOT_USE(isolate, context, class_name, function_name, \ + bailout_value, HandleScopeClass, true) #ifdef DEBUG +#define ENTER_V8_NO_SCRIPT(isolate, context, class_name, function_name, \ + bailout_value, HandleScopeClass) \ + ENTER_V8_HELPER_DO_NOT_USE(isolate, context, class_name, function_name, \ + bailout_value, HandleScopeClass, false); \ + i::DisallowJavascriptExecutionDebugOnly __no_script__((isolate)) + #define ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate) \ i::VMState __state__((isolate)); \ i::DisallowJavascriptExecutionDebugOnly __no_script__((isolate)); \ @@ -162,6 +163,11 @@ namespace v8 { i::VMState __state__((isolate)); \ i::DisallowExceptions __no_exceptions__((isolate)) #else +#define ENTER_V8_NO_SCRIPT(isolate, context, class_name, function_name, \ + bailout_value, HandleScopeClass) \ + ENTER_V8_HELPER_DO_NOT_USE(isolate, context, class_name, function_name, \ + bailout_value, HandleScopeClass, false) + #define ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate) \ i::VMState __state__((isolate)); @@ -169,24 +175,19 @@ namespace v8 { i::VMState __state__((isolate)); #endif // DEBUG -#define EXCEPTION_BAILOUT_CHECK_SCOPED(isolate, value) \ - do { \ - if (has_pending_exception) { \ - call_depth_scope.Escape(); \ - return value; \ - } \ +#define EXCEPTION_BAILOUT_CHECK_SCOPED_DO_NOT_USE(isolate, value) \ + do { \ + if (has_pending_exception) { \ + call_depth_scope.Escape(); \ + return value; \ + } \ } while (false) - #define RETURN_ON_FAILED_EXECUTION(T) \ - EXCEPTION_BAILOUT_CHECK_SCOPED(isolate, MaybeLocal()) - + EXCEPTION_BAILOUT_CHECK_SCOPED_DO_NOT_USE(isolate, MaybeLocal()) #define RETURN_ON_FAILED_EXECUTION_PRIMITIVE(T) \ - EXCEPTION_BAILOUT_CHECK_SCOPED(isolate, Nothing()) - -#define RETURN_ON_FAILED_EXECUTION_BOOL() \ - EXCEPTION_BAILOUT_CHECK_SCOPED(isolate, false) + EXCEPTION_BAILOUT_CHECK_SCOPED_DO_NOT_USE(isolate, Nothing()) #define RETURN_TO_LOCAL_UNCHECKED(maybe_local, T) \ return maybe_local.FromMaybe(Local()); @@ -208,8 +209,8 @@ class InternalEscapableScope : public v8::EscapableHandleScope { : v8::EscapableHandleScope(reinterpret_cast(isolate)) {} }; - -#ifdef DEBUG +// TODO(jochen): This should be #ifdef DEBUG +#ifdef V8_CHECK_MICROTASKS_SCOPES_CONSISTENCY void CheckMicrotasksScopesConsistency(i::Isolate* isolate) { auto handle_scope_implementer = isolate->handle_scope_implementer(); if (handle_scope_implementer->microtasks_policy() == @@ -248,7 +249,8 @@ class CallDepthScope { } if (!escaped_) isolate_->handle_scope_implementer()->DecrementCallDepth(); if (do_callback) isolate_->FireCallCompletedCallback(); -#ifdef DEBUG +// TODO(jochen): This should be #ifdef DEBUG +#ifdef V8_CHECK_MICROTASKS_SCOPES_CONSISTENCY if (do_callback) CheckMicrotasksScopesConsistency(isolate_); #endif } @@ -475,7 +477,8 @@ class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator { virtual void Free(void* data, size_t) { free(data); } virtual void* Reserve(size_t length) { - return base::VirtualMemory::ReserveRegion(length); + return base::VirtualMemory::ReserveRegion(length, + base::OS::GetRandomMmapAddr()); } virtual void Free(void* data, size_t length, @@ -875,7 +878,7 @@ Extension::Extension(const char* name, } ResourceConstraints::ResourceConstraints() - : max_semi_space_size_(0), + : max_semi_space_size_in_kb_(0), max_old_space_size_(0), stack_limit_(NULL), code_range_size_(0), @@ -883,38 +886,11 @@ ResourceConstraints::ResourceConstraints() void ResourceConstraints::ConfigureDefaults(uint64_t physical_memory, uint64_t virtual_memory_limit) { -#if V8_OS_ANDROID - // Android has higher physical memory requirements before raising the maximum - // heap size limits since it has no swap space. - const uint64_t low_limit = 512ul * i::MB; - const uint64_t medium_limit = 1ul * i::GB; - const uint64_t high_limit = 2ul * i::GB; -#else - const uint64_t low_limit = 512ul * i::MB; - const uint64_t medium_limit = 768ul * i::MB; - const uint64_t high_limit = 1ul * i::GB; -#endif - - if (physical_memory <= low_limit) { - set_max_semi_space_size(i::Heap::kMaxSemiSpaceSizeLowMemoryDevice); - set_max_old_space_size(i::Heap::kMaxOldSpaceSizeLowMemoryDevice); - set_max_zone_pool_size(i::AccountingAllocator::kMaxPoolSizeLowMemoryDevice); - } else if (physical_memory <= medium_limit) { - set_max_semi_space_size(i::Heap::kMaxSemiSpaceSizeMediumMemoryDevice); - set_max_old_space_size(i::Heap::kMaxOldSpaceSizeMediumMemoryDevice); - set_max_zone_pool_size( - i::AccountingAllocator::kMaxPoolSizeMediumMemoryDevice); - } else if (physical_memory <= high_limit) { - set_max_semi_space_size(i::Heap::kMaxSemiSpaceSizeHighMemoryDevice); - set_max_old_space_size(i::Heap::kMaxOldSpaceSizeHighMemoryDevice); - set_max_zone_pool_size( - i::AccountingAllocator::kMaxPoolSizeHighMemoryDevice); - } else { - set_max_semi_space_size(i::Heap::kMaxSemiSpaceSizeHugeMemoryDevice); - set_max_old_space_size(i::Heap::kMaxOldSpaceSizeHugeMemoryDevice); - set_max_zone_pool_size( - i::AccountingAllocator::kMaxPoolSizeHugeMemoryDevice); - } + set_max_semi_space_size_in_kb( + i::Heap::ComputeMaxSemiSpaceSize(physical_memory)); + set_max_old_space_size( + static_cast(i::Heap::ComputeMaxOldGenerationSize(physical_memory))); + set_max_zone_pool_size(i::AccountingAllocator::kMaxPoolSize); if (virtual_memory_limit > 0 && i::kRequiresCodeRange) { // Reserve no more than 1/8 of the memory for the code range, but at most @@ -925,10 +901,9 @@ void ResourceConstraints::ConfigureDefaults(uint64_t physical_memory, } } - void SetResourceConstraints(i::Isolate* isolate, const ResourceConstraints& constraints) { - int semi_space_size = constraints.max_semi_space_size(); + size_t semi_space_size = constraints.max_semi_space_size_in_kb(); int old_space_size = constraints.max_old_space_size(); size_t code_range_size = constraints.code_range_size(); size_t max_pool_size = constraints.max_zone_pool_size(); @@ -1068,8 +1043,9 @@ HandleScope::~HandleScope() { } void* HandleScope::operator new(size_t) { base::OS::Abort(); } - +void* HandleScope::operator new[](size_t) { base::OS::Abort(); } void HandleScope::operator delete(void*, size_t) { base::OS::Abort(); } +void HandleScope::operator delete[](void*, size_t) { base::OS::Abort(); } int HandleScope::NumberOfHandles(Isolate* isolate) { return i::HandleScope::NumberOfHandles( @@ -1109,8 +1085,11 @@ i::Object** EscapableHandleScope::Escape(i::Object** escape_value) { } void* EscapableHandleScope::operator new(size_t) { base::OS::Abort(); } - +void* EscapableHandleScope::operator new[](size_t) { base::OS::Abort(); } void EscapableHandleScope::operator delete(void*, size_t) { base::OS::Abort(); } +void EscapableHandleScope::operator delete[](void*, size_t) { + base::OS::Abort(); +} SealHandleScope::SealHandleScope(Isolate* isolate) : isolate_(reinterpret_cast(isolate)) { @@ -1131,8 +1110,9 @@ SealHandleScope::~SealHandleScope() { } void* SealHandleScope::operator new(size_t) { base::OS::Abort(); } - +void* SealHandleScope::operator new[](size_t) { base::OS::Abort(); } void SealHandleScope::operator delete(void*, size_t) { base::OS::Abort(); } +void SealHandleScope::operator delete[](void*, size_t) { base::OS::Abort(); } void Context::Enter() { i::Handle env = Utils::OpenHandle(this); @@ -1144,7 +1124,6 @@ void Context::Enter() { isolate->set_context(*env); } - void Context::Exit() { i::Handle env = Utils::OpenHandle(this); i::Isolate* isolate = env->GetIsolate(); @@ -1159,6 +1138,22 @@ void Context::Exit() { isolate->set_context(impl->RestoreContext()); } +Context::BackupIncumbentScope::BackupIncumbentScope( + Local backup_incumbent_context) + : backup_incumbent_context_(backup_incumbent_context) { + DCHECK(!backup_incumbent_context_.IsEmpty()); + + i::Handle env = Utils::OpenHandle(*backup_incumbent_context_); + i::Isolate* isolate = env->GetIsolate(); + prev_ = isolate->top_backup_incumbent_scope(); + isolate->set_top_backup_incumbent_scope(this); +} + +Context::BackupIncumbentScope::~BackupIncumbentScope() { + i::Handle env = Utils::OpenHandle(*backup_incumbent_context_); + i::Isolate* isolate = env->GetIsolate(); + isolate->set_top_backup_incumbent_scope(prev_); +} static void* DecodeSmiToAligned(i::Object* value, const char* location) { Utils::ApiCheck(value->IsSmi(), location, "Not a Smi"); @@ -1366,8 +1361,9 @@ static Local FunctionTemplateNew( obj->set_undetectable(false); obj->set_needs_access_check(false); obj->set_accept_any_receiver(true); - if (!signature.IsEmpty()) + if (!signature.IsEmpty()) { obj->set_signature(*Utils::OpenHandle(*signature)); + } obj->set_cached_property_name( cached_property_name.IsEmpty() ? isolate->heap()->the_hole_value() @@ -2062,9 +2058,10 @@ Local UnboundScript::GetSourceMappingURL() { MaybeLocal Script::Run(Local context) { - PREPARE_FOR_EXECUTION_WITH_CONTEXT_IN_RUNTIME_CALL_STATS_SCOPE( - "v8", "V8.Execute", context, Script, Run, MaybeLocal(), - InternalEscapableScope, true); + auto isolate = reinterpret_cast(context->GetIsolate()); + TRACE_EVENT_CALL_STATS_SCOPED(isolate, "v8", "V8.Execute"); + ENTER_V8(isolate, context, Script, Run, MaybeLocal(), + InternalEscapableScope); i::HistogramTimerScope execute_timer(isolate->counters()->execute(), true); i::AggregatingHistogramTimerScope timer(isolate->counters()->compile_lazy()); i::TimerEventScope timer_scope(isolate); @@ -2096,37 +2093,33 @@ Local Script::GetUnboundScript() { i::Handle(i::JSFunction::cast(*obj)->shared())); } -bool DynamicImportResult::FinishDynamicImportSuccess(Local context, - Local module) { - PREPARE_FOR_EXECUTION_BOOL(context, Module, FinishDynamicImportSuccess); - auto promise = Utils::OpenHandle(this); - i::Handle module_obj = Utils::OpenHandle(*module); - i::Handle module_namespace = - i::Module::GetModuleNamespace(module_obj); - i::Handle argv[] = {promise, module_namespace}; - has_pending_exception = - i::Execution::Call(isolate, isolate->promise_resolve(), - isolate->factory()->undefined_value(), arraysize(argv), - argv) - .is_null(); - RETURN_ON_FAILED_EXECUTION_BOOL(); - return true; -} -bool DynamicImportResult::FinishDynamicImportFailure(Local context, - Local exception) { - PREPARE_FOR_EXECUTION_BOOL(context, Module, FinishDynamicImportFailure); - auto promise = Utils::OpenHandle(this); - // We pass true to trigger the debugger's on exception handler. - i::Handle argv[] = {promise, Utils::OpenHandle(*exception), - isolate->factory()->ToBoolean(true)}; - has_pending_exception = - i::Execution::Call(isolate, isolate->promise_internal_reject(), - isolate->factory()->undefined_value(), arraysize(argv), - argv) - .is_null(); - RETURN_ON_FAILED_EXECUTION_BOOL(); - return true; +Module::Status Module::GetStatus() const { + i::Handle self = Utils::OpenHandle(this); + switch (self->status()) { + case i::Module::kUninstantiated: + case i::Module::kPreInstantiating: + return kUninstantiated; + case i::Module::kInstantiating: + return kInstantiating; + case i::Module::kInstantiated: + return kInstantiated; + case i::Module::kEvaluating: + return kEvaluating; + case i::Module::kEvaluated: + return kEvaluated; + case i::Module::kErrored: + return kErrored; + } + UNREACHABLE(); +} + +Local Module::GetException() const { + Utils::ApiCheck(GetStatus() == kErrored, "v8::Module::GetException", + "Module status must be kErrored"); + i::Handle self = Utils::OpenHandle(this); + i::Isolate* isolate = self->GetIsolate(); + return ToApiHandle(i::handle(self->GetException(), isolate)); } int Module::GetModuleRequestsLength() const { @@ -2144,28 +2137,63 @@ Local Module::GetModuleRequest(int i) const { return ToApiHandle(i::handle(module_requests->get(i), isolate)); } +Location Module::GetModuleRequestLocation(int i) const { + CHECK_GE(i, 0); + i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); + i::HandleScope scope(isolate); + i::Handle self = Utils::OpenHandle(this); + i::Handle module_request_positions( + self->info()->module_request_positions(), isolate); + CHECK_LT(i, module_request_positions->length()); + int position = i::Smi::ToInt(module_request_positions->get(i)); + i::Handle script(self->script(), isolate); + i::Script::PositionInfo info; + i::Script::GetPositionInfo(script, position, &info, i::Script::WITH_OFFSET); + return v8::Location(info.line, info.column); +} + +Local Module::GetModuleNamespace() { + Utils::ApiCheck( + GetStatus() != kErrored && GetStatus() >= kInstantiated, + "v8::Module::GetModuleNamespace", + "GetModuleNamespace should be used on a successfully instantiated" + "module. The current module has not been instantiated or has errored"); + i::Handle self = Utils::OpenHandle(this); + i::Handle module_namespace = + i::Module::GetModuleNamespace(self); + return ToApiHandle(module_namespace); +} + int Module::GetIdentityHash() const { return Utils::OpenHandle(this)->hash(); } bool Module::Instantiate(Local context, Module::ResolveCallback callback) { - PREPARE_FOR_EXECUTION_BOOL(context, Module, Instantiate); + return InstantiateModule(context, callback).FromMaybe(false); +} + +Maybe Module::InstantiateModule(Local context, + Module::ResolveCallback callback) { + auto isolate = reinterpret_cast(context->GetIsolate()); + ENTER_V8_NO_SCRIPT(isolate, context, Module, InstantiateModule, + Nothing(), i::HandleScope); has_pending_exception = !i::Module::Instantiate(Utils::OpenHandle(this), context, callback); - RETURN_ON_FAILED_EXECUTION_BOOL(); - return true; + RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool); + return Just(true); } MaybeLocal Module::Evaluate(Local context) { - PREPARE_FOR_EXECUTION_WITH_CONTEXT_IN_RUNTIME_CALL_STATS_SCOPE( - "v8", "V8.Execute", context, Module, Evaluate, MaybeLocal(), - InternalEscapableScope, true); + auto isolate = reinterpret_cast(context->GetIsolate()); + TRACE_EVENT_CALL_STATS_SCOPED(isolate, "v8", "V8.Execute"); + ENTER_V8(isolate, context, Module, Evaluate, MaybeLocal(), + InternalEscapableScope); i::HistogramTimerScope execute_timer(isolate->counters()->execute(), true); i::AggregatingHistogramTimerScope timer(isolate->counters()->compile_lazy()); i::TimerEventScope timer_scope(isolate); i::Handle self = Utils::OpenHandle(this); // It's an API error to call Evaluate before Instantiate. - CHECK(self->instantiated()); + CHECK_GE(self->status(), i::Module::kInstantiated); Local result; has_pending_exception = !ToLocal(i::Module::Evaluate(self), &result); @@ -2175,10 +2203,11 @@ MaybeLocal Module::Evaluate(Local context) { MaybeLocal ScriptCompiler::CompileUnboundInternal( Isolate* v8_isolate, Source* source, CompileOptions options) { - i::Isolate* isolate = reinterpret_cast(v8_isolate); - PREPARE_FOR_EXECUTION_WITH_ISOLATE(isolate, ScriptCompiler, CompileUnbound, - UnboundScript); + auto isolate = reinterpret_cast(v8_isolate); TRACE_EVENT_CALL_STATS_SCOPED(isolate, "v8", "V8.ScriptCompiler"); + ENTER_V8_NO_SCRIPT(isolate, v8_isolate->GetCurrentContext(), ScriptCompiler, + CompileUnbound, MaybeLocal(), + InternalEscapableScope); // Don't try to produce any kind of cache when the debugger is loaded. if (isolate->debug()->is_loaded() && @@ -2354,14 +2383,9 @@ MaybeLocal ScriptCompiler::CompileFunctionInContext( Function); TRACE_EVENT0("v8", "V8.ScriptCompiler"); i::Handle source_string; - int parameters_end_pos = i::kNoSourcePosition; auto factory = isolate->factory(); if (arguments_count) { - if (i::FLAG_harmony_function_tostring) { - source_string = factory->NewStringFromStaticChars("(function anonymous("); - } else { - source_string = factory->NewStringFromStaticChars("(function("); - } + source_string = factory->NewStringFromStaticChars("(function("); for (size_t i = 0; i < arguments_count; ++i) { IsIdentifierHelper helper; if (!helper.Check(*Utils::OpenHandle(*arguments[i]))) { @@ -2380,25 +2404,12 @@ MaybeLocal ScriptCompiler::CompileFunctionInContext( RETURN_ON_FAILED_EXECUTION(Function); } i::Handle brackets; - if (i::FLAG_harmony_function_tostring) { - // Append linefeed and signal that text beyond the linefeed is not part of - // the formal parameters. - brackets = factory->NewStringFromStaticChars("\n) {\n"); - parameters_end_pos = source_string->length() + 1; - } else { - brackets = factory->NewStringFromStaticChars("){"); - } + brackets = factory->NewStringFromStaticChars("){"); has_pending_exception = !factory->NewConsString(source_string, brackets) .ToHandle(&source_string); RETURN_ON_FAILED_EXECUTION(Function); } else { - if (i::FLAG_harmony_function_tostring) { - source_string = - factory->NewStringFromStaticChars("(function anonymous(\n) {\n"); - parameters_end_pos = source_string->length() - 4; - } else { - source_string = factory->NewStringFromStaticChars("(function(){"); - } + source_string = factory->NewStringFromStaticChars("(function(){"); } int scope_position = source_string->length(); @@ -2448,7 +2459,7 @@ MaybeLocal ScriptCompiler::CompileFunctionInContext( has_pending_exception = !i::Compiler::GetFunctionFromEval( source_string, outer_info, context, i::SLOPPY, - i::ONLY_SINGLE_FUNCTION_LITERAL, parameters_end_pos, + i::ONLY_SINGLE_FUNCTION_LITERAL, i::kNoSourcePosition, eval_scope_position, eval_position, line_offset, column_offset - scope_position, name_obj, source->resource_options) .ToHandle(&fun); @@ -2665,8 +2676,9 @@ v8::TryCatch::~TryCatch() { } void* v8::TryCatch::operator new(size_t) { base::OS::Abort(); } - +void* v8::TryCatch::operator new[](size_t) { base::OS::Abort(); } void v8::TryCatch::operator delete(void*, size_t) { base::OS::Abort(); } +void v8::TryCatch::operator delete[](void*, size_t) { base::OS::Abort(); } bool v8::TryCatch::HasCaught() const { return !reinterpret_cast(exception_)->IsTheHole(isolate_); @@ -2994,7 +3006,7 @@ Local StackTrace::AsArray() { frames->set(i, *frame_obj); } return Utils::ToLocal(isolate->factory()->NewJSArrayWithElements( - frames, i::FAST_ELEMENTS, frame_count)); + frames, i::PACKED_ELEMENTS, frame_count)); } @@ -3168,8 +3180,7 @@ bool NativeWeakMap::Delete(Local v8_key) { // --- J S O N --- MaybeLocal JSON::Parse(Isolate* v8_isolate, Local json_string) { - auto isolate = reinterpret_cast(v8_isolate); - PREPARE_FOR_EXECUTION_WITH_ISOLATE(isolate, JSON, Parse, Value); + PREPARE_FOR_EXECUTION(v8_isolate->GetCurrentContext(), JSON, Parse, Value); i::Handle string = Utils::OpenHandle(*json_string); i::Handle source = i::String::Flatten(string); i::Handle undefined = isolate->factory()->undefined_value(); @@ -3282,7 +3293,9 @@ void ValueSerializer::SetTreatArrayBufferViewsAsHostObjects(bool mode) { Maybe ValueSerializer::WriteValue(Local context, Local value) { - PREPARE_FOR_EXECUTION_PRIMITIVE(context, ValueSerializer, WriteValue, bool); + auto isolate = reinterpret_cast(context->GetIsolate()); + ENTER_V8(isolate, context, ValueSerializer, WriteValue, Nothing(), + i::HandleScope); i::Handle object = Utils::OpenHandle(*value); Maybe result = private_->serializer.WriteObject(object); has_pending_exception = result.IsNothing(); @@ -3373,7 +3386,9 @@ ValueDeserializer::ValueDeserializer(Isolate* isolate, const uint8_t* data, ValueDeserializer::~ValueDeserializer() { delete private_; } Maybe ValueDeserializer::ReadHeader(Local context) { - PREPARE_FOR_EXECUTION_PRIMITIVE(context, ValueDeserializer, ReadHeader, bool); + auto isolate = reinterpret_cast(context->GetIsolate()); + ENTER_V8_NO_SCRIPT(isolate, context, ValueDeserializer, ReadHeader, + Nothing(), i::HandleScope); // We could have aborted during the constructor. // If so, ReadHeader is where we report it. @@ -3626,7 +3641,7 @@ bool Value::IsInt32() const { bool Value::IsUint32() const { i::Handle obj = Utils::OpenHandle(this); - if (obj->IsSmi()) return i::Smi::cast(*obj)->value() >= 0; + if (obj->IsSmi()) return i::Smi::ToInt(*obj) >= 0; if (obj->IsNumber()) { double value = obj->Number(); return !i::IsMinusZero(value) && @@ -4067,7 +4082,9 @@ bool Value::BooleanValue() const { Maybe Value::NumberValue(Local context) const { auto obj = Utils::OpenHandle(this); if (obj->IsNumber()) return Just(obj->Number()); - PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, NumberValue, double); + auto isolate = reinterpret_cast(context->GetIsolate()); + ENTER_V8(isolate, context, Value, NumberValue, Nothing(), + i::HandleScope); i::Handle num; has_pending_exception = !i::Object::ToNumber(obj).ToHandle(&num); RETURN_ON_FAILED_EXECUTION_PRIMITIVE(double); @@ -4088,7 +4105,9 @@ Maybe Value::IntegerValue(Local context) const { if (obj->IsNumber()) { return Just(NumberToInt64(*obj)); } - PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, IntegerValue, int64_t); + auto isolate = reinterpret_cast(context->GetIsolate()); + ENTER_V8(isolate, context, Value, IntegerValue, Nothing(), + i::HandleScope); i::Handle num; has_pending_exception = !i::Object::ToInteger(isolate, obj).ToHandle(&num); RETURN_ON_FAILED_EXECUTION_PRIMITIVE(int64_t); @@ -4100,7 +4119,7 @@ int64_t Value::IntegerValue() const { auto obj = Utils::OpenHandle(this); if (obj->IsNumber()) { if (obj->IsSmi()) { - return i::Smi::cast(*obj)->value(); + return i::Smi::ToInt(*obj); } else { return static_cast(obj->Number()); } @@ -4112,11 +4131,13 @@ int64_t Value::IntegerValue() const { Maybe Value::Int32Value(Local context) const { auto obj = Utils::OpenHandle(this); if (obj->IsNumber()) return Just(NumberToInt32(*obj)); - PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, Int32Value, int32_t); + auto isolate = reinterpret_cast(context->GetIsolate()); + ENTER_V8(isolate, context, Value, Int32Value, Nothing(), + i::HandleScope); i::Handle num; has_pending_exception = !i::Object::ToInt32(isolate, obj).ToHandle(&num); RETURN_ON_FAILED_EXECUTION_PRIMITIVE(int32_t); - return Just(num->IsSmi() ? i::Smi::cast(*num)->value() + return Just(num->IsSmi() ? i::Smi::ToInt(*num) : static_cast(num->Number())); } @@ -4131,11 +4152,13 @@ int32_t Value::Int32Value() const { Maybe Value::Uint32Value(Local context) const { auto obj = Utils::OpenHandle(this); if (obj->IsNumber()) return Just(NumberToUint32(*obj)); - PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, Uint32Value, uint32_t); + auto isolate = reinterpret_cast(context->GetIsolate()); + ENTER_V8(isolate, context, Value, Uint32Value, Nothing(), + i::HandleScope); i::Handle num; has_pending_exception = !i::Object::ToUint32(isolate, obj).ToHandle(&num); RETURN_ON_FAILED_EXECUTION_PRIMITIVE(uint32_t); - return Just(num->IsSmi() ? static_cast(i::Smi::cast(*num)->value()) + return Just(num->IsSmi() ? static_cast(i::Smi::ToInt(*num)) : static_cast(num->Number())); } @@ -4150,7 +4173,7 @@ uint32_t Value::Uint32Value() const { MaybeLocal Value::ToArrayIndex(Local context) const { auto self = Utils::OpenHandle(this); if (self->IsSmi()) { - if (i::Smi::cast(*self)->value() >= 0) return Utils::Uint32ToLocal(self); + if (i::Smi::ToInt(*self) >= 0) return Utils::Uint32ToLocal(self); return Local(); } PREPARE_FOR_EXECUTION(context, Object, ToArrayIndex, Uint32); @@ -4176,7 +4199,7 @@ MaybeLocal Value::ToArrayIndex(Local context) const { Local Value::ToArrayIndex() const { auto self = Utils::OpenHandle(this); if (self->IsSmi()) { - if (i::Smi::cast(*self)->value() >= 0) return Utils::Uint32ToLocal(self); + if (i::Smi::ToInt(*self) >= 0) return Utils::Uint32ToLocal(self); return Local(); } auto context = ContextFromHeapObject(self); @@ -4228,7 +4251,9 @@ Local Value::TypeOf(v8::Isolate* external_isolate) { Maybe Value::InstanceOf(v8::Local context, v8::Local object) { - PREPARE_FOR_EXECUTION_PRIMITIVE(context, Value, InstanceOf, bool); + auto isolate = reinterpret_cast(context->GetIsolate()); + ENTER_V8(isolate, context, Value, InstanceOf, Nothing(), + i::HandleScope); auto left = Utils::OpenHandle(this); auto right = Utils::OpenHandle(*object); i::Handle result; @@ -4240,7 +4265,8 @@ Maybe Value::InstanceOf(v8::Local context, Maybe v8::Object::Set(v8::Local context, v8::Local key, v8::Local value) { - PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, Set, bool); + auto isolate = reinterpret_cast(context->GetIsolate()); + ENTER_V8(isolate, context, Object, Set, Nothing(), i::HandleScope); auto self = Utils::OpenHandle(this); auto key_obj = Utils::OpenHandle(*key); auto value_obj = Utils::OpenHandle(*value); @@ -4260,7 +4286,8 @@ bool v8::Object::Set(v8::Local key, v8::Local value) { Maybe v8::Object::Set(v8::Local context, uint32_t index, v8::Local value) { - PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, Set, bool); + auto isolate = reinterpret_cast(context->GetIsolate()); + ENTER_V8(isolate, context, Object, Set, Nothing(), i::HandleScope); auto self = Utils::OpenHandle(this); auto value_obj = Utils::OpenHandle(*value); has_pending_exception = i::Object::SetElement(isolate, self, index, value_obj, @@ -4279,7 +4306,9 @@ bool v8::Object::Set(uint32_t index, v8::Local value) { Maybe v8::Object::CreateDataProperty(v8::Local context, v8::Local key, v8::Local value) { - PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, CreateDataProperty, bool); + auto isolate = reinterpret_cast(context->GetIsolate()); + ENTER_V8(isolate, context, Object, CreateDataProperty, Nothing(), + i::HandleScope); i::Handle self = Utils::OpenHandle(this); i::Handle key_obj = Utils::OpenHandle(*key); i::Handle value_obj = Utils::OpenHandle(*value); @@ -4297,7 +4326,9 @@ Maybe v8::Object::CreateDataProperty(v8::Local context, Maybe v8::Object::CreateDataProperty(v8::Local context, uint32_t index, v8::Local value) { - PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, CreateDataProperty, bool); + auto isolate = reinterpret_cast(context->GetIsolate()); + ENTER_V8(isolate, context, Object, CreateDataProperty, Nothing(), + i::HandleScope); i::Handle self = Utils::OpenHandle(this); i::Handle value_obj = Utils::OpenHandle(*value); @@ -4406,7 +4437,9 @@ Maybe v8::Object::DefineOwnProperty(v8::Local context, v8::Local key, v8::Local value, v8::PropertyAttribute attributes) { - PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, DefineOwnProperty, bool); + auto isolate = reinterpret_cast(context->GetIsolate()); + ENTER_V8(isolate, context, Object, DefineOwnProperty, Nothing(), + i::HandleScope); i::Handle self = Utils::OpenHandle(this); i::Handle key_obj = Utils::OpenHandle(*key); i::Handle value_obj = Utils::OpenHandle(*value); @@ -4426,7 +4459,9 @@ Maybe v8::Object::DefineOwnProperty(v8::Local context, Maybe v8::Object::DefineProperty(v8::Local context, v8::Local key, PropertyDescriptor& descriptor) { - PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, DefineProperty, bool); + auto isolate = reinterpret_cast(context->GetIsolate()); + ENTER_V8(isolate, context, Object, DefineOwnProperty, Nothing(), + i::HandleScope); i::Handle self = Utils::OpenHandle(this); i::Handle key_obj = Utils::OpenHandle(*key); @@ -4455,7 +4490,9 @@ static i::MaybeHandle DefineObjectProperty( Maybe v8::Object::ForceSet(v8::Local context, v8::Local key, v8::Local value, v8::PropertyAttribute attribs) { - PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, ForceSet, bool); + auto isolate = reinterpret_cast(context->GetIsolate()); + ENTER_V8_NO_SCRIPT(isolate, context, Object, ForceSet, Nothing(), + i::HandleScope); auto self = i::Handle::cast(Utils::OpenHandle(this)); auto key_obj = Utils::OpenHandle(*key); auto value_obj = Utils::OpenHandle(*value); @@ -4468,27 +4505,11 @@ Maybe v8::Object::ForceSet(v8::Local context, } -bool v8::Object::ForceSet(v8::Local key, v8::Local value, - v8::PropertyAttribute attribs) { - i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); - PREPARE_FOR_EXECUTION_GENERIC(isolate, Local(), Object, ForceSet, - false, i::HandleScope, false); - i::Handle self = - i::Handle::cast(Utils::OpenHandle(this)); - i::Handle key_obj = Utils::OpenHandle(*key); - i::Handle value_obj = Utils::OpenHandle(*value); - has_pending_exception = - DefineObjectProperty(self, key_obj, value_obj, - static_cast(attribs)) - .is_null(); - EXCEPTION_BAILOUT_CHECK_SCOPED(isolate, false); - return true; -} - - Maybe v8::Object::SetPrivate(Local context, Local key, Local value) { - PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, SetPrivate, bool); + auto isolate = reinterpret_cast(context->GetIsolate()); + ENTER_V8_NO_SCRIPT(isolate, context, Object, SetPrivate, Nothing(), + i::HandleScope); auto self = Utils::OpenHandle(this); auto key_obj = Utils::OpenHandle(reinterpret_cast(*key)); auto value_obj = Utils::OpenHandle(*value); @@ -4556,8 +4577,9 @@ MaybeLocal v8::Object::GetPrivate(Local context, Maybe v8::Object::GetPropertyAttributes( Local context, Local key) { - PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, GetPropertyAttributes, - PropertyAttribute); + auto isolate = reinterpret_cast(context->GetIsolate()); + ENTER_V8(isolate, context, Object, GetPropertyAttributes, + Nothing(), i::HandleScope); auto self = Utils::OpenHandle(this); auto key_obj = Utils::OpenHandle(*key); if (!key_obj->IsName()) { @@ -4615,7 +4637,9 @@ Local v8::Object::GetPrototype() { Maybe v8::Object::SetPrototype(Local context, Local value) { - PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, SetPrototype, bool); + auto isolate = reinterpret_cast(context->GetIsolate()); + ENTER_V8(isolate, context, Object, SetPrototype, Nothing(), + i::HandleScope); auto self = Utils::OpenHandle(this); auto value_obj = Utils::OpenHandle(*value); // We do not allow exceptions thrown while setting the prototype @@ -4726,7 +4750,9 @@ Local v8::Object::GetConstructorName() { Maybe v8::Object::SetIntegrityLevel(Local context, IntegrityLevel level) { - PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, SetIntegrityLevel, bool); + auto isolate = reinterpret_cast(context->GetIsolate()); + ENTER_V8(isolate, context, Object, SetIntegrityLevel, Nothing(), + i::HandleScope); auto self = Utils::OpenHandle(this); i::JSReceiver::IntegrityLevel i_level = level == IntegrityLevel::kFrozen ? i::FROZEN : i::SEALED; @@ -4738,7 +4764,8 @@ Maybe v8::Object::SetIntegrityLevel(Local context, } Maybe v8::Object::Delete(Local context, Local key) { - PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, Delete, bool); + auto isolate = reinterpret_cast(context->GetIsolate()); + ENTER_V8(isolate, context, Object, Delete, Nothing(), i::HandleScope); auto self = Utils::OpenHandle(this); auto key_obj = Utils::OpenHandle(*key); Maybe result = @@ -4762,7 +4789,8 @@ Maybe v8::Object::DeletePrivate(Local context, Maybe v8::Object::Has(Local context, Local key) { - PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, Get, bool); + auto isolate = reinterpret_cast(context->GetIsolate()); + ENTER_V8(isolate, context, Object, Has, Nothing(), i::HandleScope); auto self = Utils::OpenHandle(this); auto key_obj = Utils::OpenHandle(*key); Maybe maybe = Nothing(); @@ -4795,7 +4823,8 @@ Maybe v8::Object::HasPrivate(Local context, Local key) { Maybe v8::Object::Delete(Local context, uint32_t index) { - PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, DeleteProperty, bool); + auto isolate = reinterpret_cast(context->GetIsolate()); + ENTER_V8(isolate, context, Object, Delete, Nothing(), i::HandleScope); auto self = Utils::OpenHandle(this); Maybe result = i::JSReceiver::DeleteElement(self, index); has_pending_exception = result.IsNothing(); @@ -4811,7 +4840,8 @@ bool v8::Object::Delete(uint32_t index) { Maybe v8::Object::Has(Local context, uint32_t index) { - PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, Get, bool); + auto isolate = reinterpret_cast(context->GetIsolate()); + ENTER_V8(isolate, context, Object, Has, Nothing(), i::HandleScope); auto self = Utils::OpenHandle(this); auto maybe = i::JSReceiver::HasElement(self, index); has_pending_exception = maybe.IsNothing(); @@ -4832,7 +4862,9 @@ static Maybe ObjectSetAccessor(Local context, Object* self, AccessControl settings, PropertyAttribute attributes, bool is_special_data_property) { - PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, SetAccessor, bool); + auto isolate = reinterpret_cast(context->GetIsolate()); + ENTER_V8_NO_SCRIPT(isolate, context, Object, SetAccessor, Nothing(), + i::HandleScope); if (!Utils::OpenHandle(self)->IsJSObject()) return Just(false); i::Handle obj = i::Handle::cast(Utils::OpenHandle(self)); @@ -4916,7 +4948,9 @@ Maybe Object::SetNativeDataProperty(v8::Local context, Maybe v8::Object::HasOwnProperty(Local context, Local key) { - PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, HasOwnProperty, bool); + auto isolate = reinterpret_cast(context->GetIsolate()); + ENTER_V8(isolate, context, Object, HasOwnProperty, Nothing(), + i::HandleScope); auto self = Utils::OpenHandle(this); auto key_val = Utils::OpenHandle(*key); auto result = i::JSReceiver::HasOwnProperty(self, key_val); @@ -4926,7 +4960,9 @@ Maybe v8::Object::HasOwnProperty(Local context, } Maybe v8::Object::HasOwnProperty(Local context, uint32_t index) { - PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, HasOwnProperty, bool); + auto isolate = reinterpret_cast(context->GetIsolate()); + ENTER_V8(isolate, context, Object, HasOwnProperty, Nothing(), + i::HandleScope); auto self = Utils::OpenHandle(this); auto result = i::JSReceiver::HasOwnProperty(self, index); has_pending_exception = result.IsNothing(); @@ -4942,7 +4978,9 @@ bool v8::Object::HasOwnProperty(Local key) { Maybe v8::Object::HasRealNamedProperty(Local context, Local key) { - PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, HasRealNamedProperty, bool); + auto isolate = reinterpret_cast(context->GetIsolate()); + ENTER_V8_NO_SCRIPT(isolate, context, Object, HasRealNamedProperty, + Nothing(), i::HandleScope); auto self = Utils::OpenHandle(this); if (!self->IsJSObject()) return Just(false); auto key_val = Utils::OpenHandle(*key); @@ -4962,8 +5000,9 @@ bool v8::Object::HasRealNamedProperty(Local key) { Maybe v8::Object::HasRealIndexedProperty(Local context, uint32_t index) { - PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, HasRealIndexedProperty, - bool); + auto isolate = reinterpret_cast(context->GetIsolate()); + ENTER_V8_NO_SCRIPT(isolate, context, Object, HasRealIndexedProperty, + Nothing(), i::HandleScope); auto self = Utils::OpenHandle(this); if (!self->IsJSObject()) return Just(false); auto result = i::JSObject::HasRealElementProperty( @@ -4982,8 +5021,9 @@ bool v8::Object::HasRealIndexedProperty(uint32_t index) { Maybe v8::Object::HasRealNamedCallbackProperty(Local context, Local key) { - PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, HasRealNamedCallbackProperty, - bool); + auto isolate = reinterpret_cast(context->GetIsolate()); + ENTER_V8_NO_SCRIPT(isolate, context, Object, HasRealNamedCallbackProperty, + Nothing(), i::HandleScope); auto self = Utils::OpenHandle(this); if (!self->IsJSObject()) return Just(false); auto key_val = Utils::OpenHandle(*key); @@ -5048,9 +5088,10 @@ Local v8::Object::GetRealNamedPropertyInPrototypeChain( Maybe v8::Object::GetRealNamedPropertyAttributesInPrototypeChain( Local context, Local key) { - PREPARE_FOR_EXECUTION_PRIMITIVE( - context, Object, GetRealNamedPropertyAttributesInPrototypeChain, - PropertyAttribute); + auto isolate = reinterpret_cast(context->GetIsolate()); + ENTER_V8_NO_SCRIPT(isolate, context, Object, + GetRealNamedPropertyAttributesInPrototypeChain, + Nothing(), i::HandleScope); i::Handle self = Utils::OpenHandle(this); if (!self->IsJSObject()) return Nothing(); i::Handle key_obj = Utils::OpenHandle(*key); @@ -5101,8 +5142,9 @@ Local v8::Object::GetRealNamedProperty(Local key) { Maybe v8::Object::GetRealNamedPropertyAttributes( Local context, Local key) { - PREPARE_FOR_EXECUTION_PRIMITIVE( - context, Object, GetRealNamedPropertyAttributes, PropertyAttribute); + auto isolate = reinterpret_cast(context->GetIsolate()); + ENTER_V8_NO_SCRIPT(isolate, context, Object, GetRealNamedPropertyAttributes, + Nothing(), i::HandleScope); auto self = Utils::OpenHandle(this); auto key_obj = Utils::OpenHandle(*key); i::LookupIterator it = i::LookupIterator::PropertyOrElement( @@ -5163,9 +5205,10 @@ bool v8::Object::IsConstructor() { MaybeLocal Object::CallAsFunction(Local context, Local recv, int argc, Local argv[]) { - PREPARE_FOR_EXECUTION_WITH_CONTEXT_IN_RUNTIME_CALL_STATS_SCOPE( - "v8", "V8.Execute", context, Object, CallAsFunction, MaybeLocal(), - InternalEscapableScope, true); + auto isolate = reinterpret_cast(context->GetIsolate()); + TRACE_EVENT_CALL_STATS_SCOPED(isolate, "v8", "V8.Execute"); + ENTER_V8(isolate, context, Object, CallAsFunction, MaybeLocal(), + InternalEscapableScope); i::TimerEventScope timer_scope(isolate); auto self = Utils::OpenHandle(this); auto recv_obj = Utils::OpenHandle(*recv); @@ -5190,9 +5233,10 @@ Local Object::CallAsFunction(v8::Local recv, int argc, MaybeLocal Object::CallAsConstructor(Local context, int argc, Local argv[]) { - PREPARE_FOR_EXECUTION_WITH_CONTEXT_IN_RUNTIME_CALL_STATS_SCOPE( - "v8", "V8.Execute", context, Object, CallAsConstructor, - MaybeLocal(), InternalEscapableScope, true); + auto isolate = reinterpret_cast(context->GetIsolate()); + TRACE_EVENT_CALL_STATS_SCOPED(isolate, "v8", "V8.Execute"); + ENTER_V8(isolate, context, Object, CallAsConstructor, MaybeLocal(), + InternalEscapableScope); i::TimerEventScope timer_scope(isolate); auto self = Utils::OpenHandle(this); STATIC_ASSERT(sizeof(v8::Local) == sizeof(i::Object**)); @@ -5241,9 +5285,10 @@ Local Function::NewInstance() const { MaybeLocal Function::NewInstance(Local context, int argc, v8::Local argv[]) const { - PREPARE_FOR_EXECUTION_WITH_CONTEXT_IN_RUNTIME_CALL_STATS_SCOPE( - "v8", "V8.Execute", context, Function, NewInstance, MaybeLocal(), - InternalEscapableScope, true); + auto isolate = reinterpret_cast(context->GetIsolate()); + TRACE_EVENT_CALL_STATS_SCOPED(isolate, "v8", "V8.Execute"); + ENTER_V8(isolate, context, Function, NewInstance, MaybeLocal(), + InternalEscapableScope); i::TimerEventScope timer_scope(isolate); auto self = Utils::OpenHandle(this); STATIC_ASSERT(sizeof(v8::Local) == sizeof(i::Object**)); @@ -5266,9 +5311,10 @@ Local Function::NewInstance(int argc, MaybeLocal Function::Call(Local context, v8::Local recv, int argc, v8::Local argv[]) { - PREPARE_FOR_EXECUTION_WITH_CONTEXT_IN_RUNTIME_CALL_STATS_SCOPE( - "v8", "V8.Execute", context, Function, Call, MaybeLocal(), - InternalEscapableScope, true); + auto isolate = reinterpret_cast(context->GetIsolate()); + TRACE_EVENT_CALL_STATS_SCOPED(isolate, "v8", "V8.Execute"); + ENTER_V8(isolate, context, Function, Call, MaybeLocal(), + InternalEscapableScope); i::TimerEventScope timer_scope(isolate); auto self = Utils::OpenHandle(this); i::Handle recv_obj = Utils::OpenHandle(*recv); @@ -5743,7 +5789,6 @@ class Utf8LengthHelper : public i::AllStatic { } } UNREACHABLE(); - return 0; } static inline int Calculate(i::ConsString* current) { @@ -6160,7 +6205,7 @@ bool Boolean::Value() const { int64_t Integer::Value() const { i::Handle obj = Utils::OpenHandle(this); if (obj->IsSmi()) { - return i::Smi::cast(*obj)->value(); + return i::Smi::ToInt(*obj); } else { return static_cast(obj->Number()); } @@ -6170,7 +6215,7 @@ int64_t Integer::Value() const { int32_t Int32::Value() const { i::Handle obj = Utils::OpenHandle(this); if (obj->IsSmi()) { - return i::Smi::cast(*obj)->value(); + return i::Smi::ToInt(*obj); } else { return static_cast(obj->Number()); } @@ -6180,7 +6225,7 @@ int32_t Int32::Value() const { uint32_t Uint32::Value() const { i::Handle obj = Utils::OpenHandle(this); if (obj->IsSmi()) { - return i::Smi::cast(*obj)->value(); + return i::Smi::ToInt(*obj); } else { return static_cast(obj->Number()); } @@ -6286,12 +6331,16 @@ bool v8::V8::Initialize() { return true; } -#if V8_OS_LINUX && V8_TARGET_ARCH_X64 && !V8_OS_ANDROID +#if V8_OS_POSIX bool V8::TryHandleSignal(int signum, void* info, void* context) { +#if V8_OS_LINUX && V8_TARGET_ARCH_X64 && !V8_OS_ANDROID return v8::internal::trap_handler::TryHandleSignal( signum, static_cast(info), static_cast(context)); +#else // V8_OS_LINUX && V8_TARGET_ARCH_X64 && !V8_OS_ANDROID + return false; +#endif } -#endif // V8_OS_LINUX +#endif bool V8::RegisterDefaultSignalHandler() { return v8::internal::trap_handler::RegisterDefaultSignalHandler(); @@ -6500,6 +6549,11 @@ Local NewContext( v8::MaybeLocal global_object, size_t context_snapshot_index, v8::DeserializeInternalFieldsCallback embedder_fields_deserializer) { i::Isolate* isolate = reinterpret_cast(external_isolate); + // TODO(jkummerow): This is for crbug.com/713699. Remove it if it doesn't + // fail. + // Sanity-check that the isolate is initialized and usable. + CHECK(isolate->builtins()->Illegal()->IsCode()); + TRACE_EVENT_CALL_STATS_SCOPED(isolate, "v8", "V8.NewContext"); LOG_API(isolate, Context, New); i::HandleScope scope(isolate); @@ -7155,8 +7209,7 @@ void v8::Date::DateTimeConfigurationChangeNotification(Isolate* isolate) { DCHECK_EQ(1, date_cache_version->length()); CHECK(date_cache_version->get(0)->IsSmi()); date_cache_version->set( - 0, - i::Smi::FromInt(i::Smi::cast(date_cache_version->get(0))->value() + 1)); + 0, i::Smi::FromInt(i::Smi::ToInt(date_cache_version->get(0)) + 1)); } @@ -7222,7 +7275,7 @@ uint32_t v8::Array::Length() const { i::Handle obj = Utils::OpenHandle(this); i::Object* length = obj->length(); if (length->IsSmi()) { - return i::Smi::cast(length)->value(); + return i::Smi::ToInt(length); } else { return static_cast(length->Number()); } @@ -7233,7 +7286,7 @@ MaybeLocal Array::CloneElementAt(Local context, uint32_t index) { PREPARE_FOR_EXECUTION(context, Array, CloneElementAt, Object); auto self = Utils::OpenHandle(this); - if (!self->HasFastObjectElements()) return Local(); + if (!self->HasObjectElements()) return Local(); i::FixedArray* elms = i::FixedArray::cast(self->elements()); i::Object* paragon = elms->get(index); if (!paragon->IsJSObject()) return Local(); @@ -7304,7 +7357,8 @@ MaybeLocal Map::Set(Local context, Local key, Maybe Map::Has(Local context, Local key) { - PREPARE_FOR_EXECUTION_PRIMITIVE(context, Map, Has, bool); + auto isolate = reinterpret_cast(context->GetIsolate()); + ENTER_V8(isolate, context, Map, Has, Nothing(), i::HandleScope); auto self = Utils::OpenHandle(this); i::Handle result; i::Handle argv[] = {Utils::OpenHandle(*key)}; @@ -7317,7 +7371,8 @@ Maybe Map::Has(Local context, Local key) { Maybe Map::Delete(Local context, Local key) { - PREPARE_FOR_EXECUTION_PRIMITIVE(context, Map, Delete, bool); + auto isolate = reinterpret_cast(context->GetIsolate()); + ENTER_V8(isolate, context, Map, Delete, Nothing(), i::HandleScope); auto self = Utils::OpenHandle(this); i::Handle result; i::Handle argv[] = {Utils::OpenHandle(*key)}; @@ -7329,13 +7384,20 @@ Maybe Map::Delete(Local context, Local key) { } namespace { + +enum class MapAsArrayKind { + kEntries = i::JS_MAP_KEY_VALUE_ITERATOR_TYPE, + kKeys = i::JS_MAP_KEY_ITERATOR_TYPE, + kValues = i::JS_MAP_VALUE_ITERATOR_TYPE +}; + i::Handle MapAsArray(i::Isolate* isolate, i::Object* table_obj, - int offset, int kind) { + int offset, MapAsArrayKind kind) { i::Factory* factory = isolate->factory(); i::Handle table(i::OrderedHashMap::cast(table_obj)); if (offset >= table->NumberOfElements()) return factory->NewJSArray(0); int length = (table->NumberOfElements() - offset) * - (kind == i::JSMapIterator::kKindEntries ? 2 : 1); + (kind == MapAsArrayKind::kEntries ? 2 : 1); i::Handle result = factory->NewFixedArray(length); int result_index = 0; { @@ -7346,20 +7408,19 @@ i::Handle MapAsArray(i::Isolate* isolate, i::Object* table_obj, i::Object* key = table->KeyAt(i); if (key == the_hole) continue; if (offset-- > 0) continue; - if (kind == i::JSMapIterator::kKindEntries || - kind == i::JSMapIterator::kKindKeys) { + if (kind == MapAsArrayKind::kEntries || kind == MapAsArrayKind::kKeys) { result->set(result_index++, key); } - if (kind == i::JSMapIterator::kKindEntries || - kind == i::JSMapIterator::kKindValues) { + if (kind == MapAsArrayKind::kEntries || kind == MapAsArrayKind::kValues) { result->set(result_index++, table->ValueAt(i)); } } } DCHECK_EQ(result_index, result->length()); DCHECK_EQ(result_index, length); - return factory->NewJSArrayWithElements(result, i::FAST_ELEMENTS, length); + return factory->NewJSArrayWithElements(result, i::PACKED_ELEMENTS, length); } + } // namespace Local Map::AsArray() const { @@ -7368,7 +7429,7 @@ Local Map::AsArray() const { LOG_API(isolate, Map, AsArray); ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate); return Utils::ToLocal( - MapAsArray(isolate, obj->table(), 0, i::JSMapIterator::kKindEntries)); + MapAsArray(isolate, obj->table(), 0, MapAsArrayKind::kEntries)); } @@ -7410,7 +7471,8 @@ MaybeLocal Set::Add(Local context, Local key) { Maybe Set::Has(Local context, Local key) { - PREPARE_FOR_EXECUTION_PRIMITIVE(context, Set, Has, bool); + auto isolate = reinterpret_cast(context->GetIsolate()); + ENTER_V8(isolate, context, Set, Has, Nothing(), i::HandleScope); auto self = Utils::OpenHandle(this); i::Handle result; i::Handle argv[] = {Utils::OpenHandle(*key)}; @@ -7423,7 +7485,8 @@ Maybe Set::Has(Local context, Local key) { Maybe Set::Delete(Local context, Local key) { - PREPARE_FOR_EXECUTION_PRIMITIVE(context, Set, Delete, bool); + auto isolate = reinterpret_cast(context->GetIsolate()); + ENTER_V8(isolate, context, Set, Delete, Nothing(), i::HandleScope); auto self = Utils::OpenHandle(this); i::Handle result; i::Handle argv[] = {Utils::OpenHandle(*key)}; @@ -7456,7 +7519,7 @@ i::Handle SetAsArray(i::Isolate* isolate, i::Object* table_obj, } DCHECK_EQ(result_index, result->length()); DCHECK_EQ(result_index, length); - return factory->NewJSArrayWithElements(result, i::FAST_ELEMENTS, length); + return factory->NewJSArrayWithElements(result, i::PACKED_ELEMENTS, length); } } // namespace @@ -7495,7 +7558,9 @@ Local Promise::Resolver::GetPromise() { Maybe Promise::Resolver::Resolve(Local context, Local value) { - PREPARE_FOR_EXECUTION_PRIMITIVE(context, Promise_Resolver, Resolve, bool); + auto isolate = reinterpret_cast(context->GetIsolate()); + ENTER_V8(isolate, context, Promise_Resolver, Resolve, Nothing(), + i::HandleScope); auto self = Utils::OpenHandle(this); i::Handle argv[] = {self, Utils::OpenHandle(*value)}; has_pending_exception = @@ -7516,7 +7581,9 @@ void Promise::Resolver::Resolve(Local value) { Maybe Promise::Resolver::Reject(Local context, Local value) { - PREPARE_FOR_EXECUTION_PRIMITIVE(context, Promise_Resolver, Resolve, bool); + auto isolate = reinterpret_cast(context->GetIsolate()); + ENTER_V8(isolate, context, Promise_Resolver, Reject, Nothing(), + i::HandleScope); auto self = Utils::OpenHandle(this); // We pass true to trigger the debugger's on exception handler. @@ -7648,10 +7715,10 @@ MaybeLocal Proxy::New(Local context, Local local_target, } Local WasmCompiledModule::GetWasmWireBytes() { - i::Handle obj = - i::Handle::cast(Utils::OpenHandle(this)); + i::Handle obj = + i::Handle::cast(Utils::OpenHandle(this)); i::Handle compiled_part = - i::handle(i::WasmCompiledModule::cast(obj->GetEmbedderField(0))); + i::handle(i::WasmCompiledModule::cast(obj->compiled_module())); i::Handle wire_bytes(compiled_part->module_bytes()); return Local::Cast(Utils::ToLocal(wire_bytes)); } @@ -7695,10 +7762,10 @@ MaybeLocal WasmCompiledModule::FromTransferrableModule( } WasmCompiledModule::SerializedModule WasmCompiledModule::Serialize() { - i::Handle obj = - i::Handle::cast(Utils::OpenHandle(this)); + i::Handle obj = + i::Handle::cast(Utils::OpenHandle(this)); i::Handle compiled_part = - i::handle(i::WasmCompiledModule::cast(obj->GetEmbedderField(0))); + i::handle(i::WasmCompiledModule::cast(obj->compiled_module())); std::unique_ptr script_data = i::WasmCompiledModuleSerializer::SerializeWasmModule(obj->GetIsolate(), @@ -7815,6 +7882,11 @@ v8::ArrayBuffer::Contents v8::ArrayBuffer::GetContents() { i::Handle self = Utils::OpenHandle(this); size_t byte_length = static_cast(self->byte_length()->Number()); Contents contents; + contents.allocation_base_ = self->allocation_base(); + contents.allocation_length_ = self->allocation_length(); + contents.allocation_mode_ = self->has_guard_region() + ? Allocator::AllocationMode::kReservation + : Allocator::AllocationMode::kNormal; contents.data_ = self->backing_store(); contents.byte_length_ = byte_length; return contents; @@ -8023,6 +8095,12 @@ v8::SharedArrayBuffer::Contents v8::SharedArrayBuffer::GetContents() { Contents contents; contents.data_ = self->backing_store(); contents.byte_length_ = byte_length; + // SharedArrayBuffers never have guard regions, so their allocation and data + // are equivalent. + contents.allocation_base_ = self->backing_store(); + contents.allocation_length_ = byte_length; + contents.allocation_mode_ = + ArrayBufferAllocator::Allocator::AllocationMode::kNormal; return contents; } @@ -8179,6 +8257,11 @@ void Isolate::ReportExternalAllocationLimitReached() { heap->ReportExternalMemoryPressure(); } +void Isolate::CheckMemoryPressure() { + i::Heap* heap = reinterpret_cast(this)->heap(); + if (heap->gc_state() != i::Heap::NOT_IN_GC) return; + heap->CheckMemoryPressure(); +} HeapProfiler* Isolate::GetHeapProfiler() { i::HeapProfiler* heap_profiler = @@ -8239,6 +8322,12 @@ v8::Local Isolate::GetEnteredOrMicrotaskContext() { return Utils::ToLocal(i::Handle::cast(last)); } +v8::Local Isolate::GetIncumbentContext() { + i::Isolate* isolate = reinterpret_cast(this); + i::Handle context = isolate->GetIncumbentContext(); + return Utils::ToLocal(context); +} + v8::Local Isolate::ThrowException(v8::Local value) { i::Isolate* isolate = reinterpret_cast(this); ENTER_V8_DO_NOT_USE(isolate); @@ -8294,6 +8383,12 @@ void Isolate::SetEmbedderHeapTracer(EmbedderHeapTracer* tracer) { isolate->heap()->SetEmbedderHeapTracer(tracer); } +void Isolate::SetGetExternallyAllocatedMemoryInBytesCallback( + GetExternallyAllocatedMemoryInBytesCallback callback) { + i::Isolate* isolate = reinterpret_cast(this); + isolate->heap()->SetGetExternallyAllocatedMemoryInBytesCallback(callback); +} + void Isolate::TerminateExecution() { i::Isolate* isolate = reinterpret_cast(this); isolate->stack_guard()->RequestTerminateExecution(); @@ -8392,16 +8487,17 @@ Isolate* IsolateNewImpl(internal::Isolate* isolate, isolate->set_api_external_references(params.external_references); isolate->set_allow_atomics_wait(params.allow_atomics_wait); - if (params.host_import_module_dynamically_callback_ != nullptr) { - isolate->SetHostImportModuleDynamicallyCallback( - params.host_import_module_dynamically_callback_); - } - SetResourceConstraints(isolate, params.constraints); // TODO(jochen): Once we got rid of Isolate::Current(), we can remove this. Isolate::Scope isolate_scope(v8_isolate); if (params.entry_hook || !i::Snapshot::Initialize(isolate)) { + base::ElapsedTimer timer; + if (i::FLAG_profile_deserialization) timer.Start(); isolate->Init(NULL); + if (i::FLAG_profile_deserialization) { + double ms = timer.Elapsed().InMillisecondsF(); + i::PrintF("[Initializing isolate from scratch took %0.3f ms]\n", ms); + } } return v8_isolate; } @@ -8446,6 +8542,11 @@ void Isolate::SetAbortOnUncaughtExceptionCallback( isolate->SetAbortOnUncaughtExceptionCallback(callback); } +void Isolate::SetHostImportModuleDynamicallyCallback( + HostImportModuleDynamicallyCallback callback) { + i::Isolate* isolate = reinterpret_cast(this); + isolate->SetHostImportModuleDynamicallyCallback(callback); +} Isolate::DisallowJavascriptExecutionScope::DisallowJavascriptExecutionScope( Isolate* isolate, @@ -8739,25 +8840,20 @@ void Isolate::SetUseCounterCallback(UseCounterCallback callback) { void Isolate::SetCounterFunction(CounterLookupCallback callback) { i::Isolate* isolate = reinterpret_cast(this); - isolate->stats_table()->SetCounterFunction(callback); - isolate->InitializeLoggingAndCounters(); - isolate->counters()->ResetCounters(); + isolate->counters()->ResetCounterFunction(callback); } void Isolate::SetCreateHistogramFunction(CreateHistogramCallback callback) { i::Isolate* isolate = reinterpret_cast(this); - isolate->stats_table()->SetCreateHistogramFunction(callback); - isolate->InitializeLoggingAndCounters(); - isolate->counters()->ResetHistograms(); - isolate->counters()->InitializeHistograms(); + isolate->counters()->ResetCreateHistogramFunction(callback); } void Isolate::SetAddHistogramSampleFunction( AddHistogramSampleCallback callback) { reinterpret_cast(this) - ->stats_table() + ->counters() ->SetAddHistogramSampleFunction(callback); } @@ -8893,6 +8989,13 @@ void Isolate::SetAllowCodeGenerationFromStringsCallback( isolate->set_allow_code_gen_callback(callback); } +void Isolate::SetAllowCodeGenerationFromStringsCallback( + DeprecatedAllowCodeGenerationFromStringsCallback callback) { + i::Isolate* isolate = reinterpret_cast(this); + isolate->set_allow_code_gen_callback( + reinterpret_cast(callback)); +} + #define CALLBACK_SETTER(ExternalName, Type, InternalName) \ void Isolate::Set##ExternalName(Type callback) { \ i::Isolate* isolate = reinterpret_cast(this); \ @@ -8900,10 +9003,10 @@ void Isolate::SetAllowCodeGenerationFromStringsCallback( } CALLBACK_SETTER(WasmModuleCallback, ExtensionCallback, wasm_module_callback) -CALLBACK_SETTER(WasmCompileCallback, ExtensionCallback, wasm_compile_callback) CALLBACK_SETTER(WasmInstanceCallback, ExtensionCallback, wasm_instance_callback) -CALLBACK_SETTER(WasmInstantiateCallback, ExtensionCallback, - wasm_instantiate_callback) + +CALLBACK_SETTER(WasmCompileStreamingCallback, ApiImplementationCallback, + wasm_compile_streaming_callback) bool Isolate::IsDead() { i::Isolate* isolate = reinterpret_cast(this); @@ -9212,14 +9315,9 @@ void Debug::SetLiveEditEnabled(Isolate* isolate, bool enable) { debug::SetLiveEditEnabled(isolate, enable); } -bool Debug::IsTailCallEliminationEnabled(Isolate* isolate) { - i::Isolate* internal_isolate = reinterpret_cast(isolate); - return internal_isolate->is_tail_call_elimination_enabled(); -} +bool Debug::IsTailCallEliminationEnabled(Isolate* isolate) { return false; } void Debug::SetTailCallEliminationEnabled(Isolate* isolate, bool enabled) { - i::Isolate* internal_isolate = reinterpret_cast(isolate); - internal_isolate->SetTailCallEliminationEnabled(enabled); } MaybeLocal Debug::GetInternalProperties(Isolate* v8_isolate, @@ -9233,7 +9331,7 @@ void debug::SetContextId(Local context, int id) { int debug::GetContextId(Local context) { i::Object* value = Utils::OpenHandle(*context)->debug_context_id(); - return (value->IsSmi()) ? i::Smi::cast(value)->value() : 0; + return (value->IsSmi()) ? i::Smi::ToInt(value) : 0; } Local debug::GetDebugContext(Isolate* isolate) { @@ -9413,7 +9511,7 @@ Maybe debug::Script::ContextId() const { i::HandleScope handle_scope(isolate); i::Handle script = Utils::OpenHandle(this); i::Object* value = script->context_data(); - if (value->IsSmi()) return Just(i::Smi::cast(value)->value()); + if (value->IsSmi()) return Just(i::Smi::ToInt(value)); return Nothing(); } @@ -9437,7 +9535,7 @@ bool debug::Script::IsModule() const { namespace { int GetSmiValue(i::Handle array, int index) { - return i::Smi::cast(array->get(index))->value(); + return i::Smi::ToInt(array->get(index)); } bool CompareBreakLocation(const i::BreakLocation& loc1, @@ -9573,10 +9671,10 @@ std::pair debug::WasmScript::GetFunctionRange( DCHECK_GT(compiled_module->module()->functions.size(), function_index); i::wasm::WasmFunction& func = compiled_module->module()->functions[function_index]; - DCHECK_GE(i::kMaxInt, func.code_start_offset); - DCHECK_GE(i::kMaxInt, func.code_end_offset); - return std::make_pair(static_cast(func.code_start_offset), - static_cast(func.code_end_offset)); + DCHECK_GE(i::kMaxInt, func.code.offset()); + DCHECK_GE(i::kMaxInt, func.code.end_offset()); + return std::make_pair(static_cast(func.code.offset()), + static_cast(func.code.end_offset())); } debug::WasmDisassembly debug::WasmScript::DisassembleFunction( @@ -9618,7 +9716,7 @@ void debug::GetLoadedScripts(v8::Isolate* v8_isolate, i::Isolate* isolate = reinterpret_cast(v8_isolate); ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate); // TODO(kozyatinskiy): remove this GC once tests are dealt with. - isolate->heap()->CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask, + isolate->heap()->CollectAllGarbage(i::Heap::kMakeHeapIterableMask, i::GarbageCollectionReason::kDebugger); { i::DisallowHeapAllocation no_gc; @@ -9707,19 +9805,19 @@ v8::MaybeLocal debug::EntriesPreview(Isolate* v8_isolate, if (object->IsJSMapIterator()) { i::Handle iterator = i::Handle::cast(object); - int iterator_kind = i::Smi::cast(iterator->kind())->value(); - *is_key_value = iterator_kind == i::JSMapIterator::kKindEntries; + MapAsArrayKind const kind = + static_cast(iterator->map()->instance_type()); + *is_key_value = kind == MapAsArrayKind::kEntries; if (!iterator->HasMore()) return v8::Array::New(v8_isolate); return Utils::ToLocal(MapAsArray(isolate, iterator->table(), - i::Smi::cast(iterator->index())->value(), - iterator_kind)); + i::Smi::ToInt(iterator->index()), kind)); } if (object->IsJSSetIterator()) { i::Handle it = i::Handle::cast(object); *is_key_value = false; if (!it->HasMore()) return v8::Array::New(v8_isolate); return Utils::ToLocal( - SetAsArray(isolate, it->table(), i::Smi::cast(it->index())->value())); + SetAsArray(isolate, it->table(), i::Smi::ToInt(it->index()))); } return v8::MaybeLocal(); } @@ -9745,11 +9843,13 @@ Local debug::GetBuiltin(Isolate* v8_isolate, Builtin builtin) { case kObjectGetOwnPropertySymbols: name = i::Builtins::kObjectGetOwnPropertySymbols; break; + default: + UNREACHABLE(); } i::Handle call_code(isolate->builtins()->builtin(name)); i::Handle fun = isolate->factory()->NewFunctionWithoutPrototype( - isolate->factory()->empty_string(), call_code, false); + isolate->factory()->empty_string(), call_code, i::SLOPPY); fun->shared()->DontAdaptArguments(); return Utils::ToLocal(handle_scope.CloseAndEscape(fun)); } @@ -9774,6 +9874,18 @@ int debug::GetStackFrameId(v8::Local frame) { return Utils::OpenHandle(*frame)->id(); } +v8::Local debug::GetDetailedStackTrace( + Isolate* v8_isolate, v8::Local v8_error) { + i::Isolate* isolate = reinterpret_cast(v8_isolate); + i::Handle error = Utils::OpenHandle(*v8_error); + if (!error->IsJSObject()) { + return v8::Local(); + } + i::Handle stack_trace = + isolate->GetDetailedStackTrace(i::Handle::cast(error)); + return Utils::StackTraceToLocal(stack_trace); +} + MaybeLocal debug::GeneratorObject::Script() { i::Handle obj = Utils::OpenHandle(this); i::Object* maybe_script = obj->function()->shared()->script(); @@ -9826,6 +9938,10 @@ Local CpuProfileNode::GetFunctionName() const { } } +int debug::Coverage::BlockData::StartOffset() const { return block_->start; } +int debug::Coverage::BlockData::EndOffset() const { return block_->end; } +uint32_t debug::Coverage::BlockData::Count() const { return block_->count; } + int debug::Coverage::FunctionData::StartOffset() const { return function_->start; } @@ -9838,6 +9954,19 @@ MaybeLocal debug::Coverage::FunctionData::Name() const { return ToApiHandle(function_->name); } +size_t debug::Coverage::FunctionData::BlockCount() const { + return function_->blocks.size(); +} + +bool debug::Coverage::FunctionData::HasBlockCoverage() const { + return function_->has_block_coverage; +} + +debug::Coverage::BlockData debug::Coverage::FunctionData::GetBlockData( + size_t i) const { + return BlockData(&function_->blocks.at(i)); +} + Local debug::Coverage::ScriptData::GetScript() const { return ToApiHandle(script_->script); } diff --git a/deps/v8/src/api.h b/deps/v8/src/api.h index 3b97e04fb288ab..e856a4408ce756 100644 --- a/deps/v8/src/api.h +++ b/deps/v8/src/api.h @@ -111,8 +111,7 @@ class RegisteredExtension { V(NativeWeakMap, JSWeakMap) \ V(debug::GeneratorObject, JSGeneratorObject) \ V(debug::Script, Script) \ - V(Promise, JSPromise) \ - V(DynamicImportResult, JSPromise) + V(Promise, JSPromise) class Utils { public: @@ -186,8 +185,6 @@ class Utils { v8::internal::Handle obj); static inline Local PromiseToLocal( v8::internal::Handle obj); - static inline Local PromiseToDynamicImportResult( - v8::internal::Handle obj); static inline Local StackTraceToLocal( v8::internal::Handle obj); static inline Local StackFrameToLocal( @@ -320,7 +317,6 @@ MAKE_TO_LOCAL(SignatureToLocal, FunctionTemplateInfo, Signature) MAKE_TO_LOCAL(AccessorSignatureToLocal, FunctionTemplateInfo, AccessorSignature) MAKE_TO_LOCAL(MessageToLocal, Object, Message) MAKE_TO_LOCAL(PromiseToLocal, JSObject, Promise) -MAKE_TO_LOCAL(PromiseToDynamicImportResult, JSPromise, DynamicImportResult) MAKE_TO_LOCAL(StackTraceToLocal, FixedArray, StackTrace) MAKE_TO_LOCAL(StackFrameToLocal, StackFrameInfo, StackFrame) MAKE_TO_LOCAL(NumberToLocal, Object, Number) diff --git a/deps/v8/src/arguments.h b/deps/v8/src/arguments.h index 1d91b20b2bd2b6..f3fcb8edb0508a 100644 --- a/deps/v8/src/arguments.h +++ b/deps/v8/src/arguments.h @@ -50,9 +50,7 @@ class Arguments BASE_EMBEDDED { return Handle(reinterpret_cast(value)); } - int smi_at(int index) { - return Smi::cast((*this)[index])->value(); - } + int smi_at(int index) { return Smi::ToInt((*this)[index]); } double number_at(int index) { return (*this)[index]->Number(); diff --git a/deps/v8/src/arm/assembler-arm-inl.h b/deps/v8/src/arm/assembler-arm-inl.h index b5a59bb4764694..52218cc8ce92b0 100644 --- a/deps/v8/src/arm/assembler-arm-inl.h +++ b/deps/v8/src/arm/assembler-arm-inl.h @@ -280,7 +280,7 @@ void RelocInfo::Visit(Heap* heap) { Operand::Operand(int32_t immediate, RelocInfo::Mode rmode) { rm_ = no_reg; - imm32_ = immediate; + value_.immediate = immediate; rmode_ = rmode; } @@ -288,14 +288,14 @@ Operand Operand::Zero() { return Operand(static_cast(0)); } Operand::Operand(const ExternalReference& f) { rm_ = no_reg; - imm32_ = reinterpret_cast(f.address()); + value_.immediate = reinterpret_cast(f.address()); rmode_ = RelocInfo::EXTERNAL_REFERENCE; } Operand::Operand(Smi* value) { rm_ = no_reg; - imm32_ = reinterpret_cast(value); + value_.immediate = reinterpret_cast(value); rmode_ = RelocInfo::NONE32; } @@ -400,11 +400,7 @@ void Assembler::deserialization_set_target_internal_reference_at( bool Assembler::is_constant_pool_load(Address pc) { - if (CpuFeatures::IsSupported(ARMv7)) { - return !Assembler::IsMovW(Memory::int32_at(pc)); - } else { - return !Assembler::IsMovImmed(Memory::int32_at(pc)); - } + return IsLdrPcImmediateOffset(Memory::int32_at(pc)); } diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc index 6932e973796b2c..876af4e6191e8e 100644 --- a/deps/v8/src/arm/assembler-arm.cc +++ b/deps/v8/src/arm/assembler-arm.cc @@ -42,6 +42,7 @@ #include "src/assembler-inl.h" #include "src/base/bits.h" #include "src/base/cpu.h" +#include "src/code-stubs.h" #include "src/macro-assembler.h" #include "src/objects-inl.h" @@ -372,19 +373,10 @@ void RelocInfo::unchecked_update_wasm_size(Isolate* isolate, uint32_t size, // Implementation of Operand and MemOperand // See assembler-arm-inl.h for inlined constructors -Operand::Operand(Handle handle) { - AllowDeferredHandleDereference using_raw_address; +Operand::Operand(Handle handle) { rm_ = no_reg; - // Verify all Objects referred by code are NOT in new space. - Object* obj = *handle; - if (obj->IsHeapObject()) { - imm32_ = reinterpret_cast(handle.location()); - rmode_ = RelocInfo::EMBEDDED_OBJECT; - } else { - // no relocation needed - imm32_ = reinterpret_cast(obj); - rmode_ = RelocInfo::NONE32; - } + value_.immediate = reinterpret_cast(handle.address()); + rmode_ = RelocInfo::EMBEDDED_OBJECT; } @@ -417,6 +409,21 @@ Operand::Operand(Register rm, ShiftOp shift_op, Register rs) { rs_ = rs; } +Operand Operand::EmbeddedNumber(double value) { + int32_t smi; + if (DoubleToSmiInteger(value, &smi)) return Operand(Smi::FromInt(smi)); + Operand result(0, RelocInfo::EMBEDDED_OBJECT); + result.is_heap_object_request_ = true; + result.value_.heap_object_request = HeapObjectRequest(value); + return result; +} + +Operand Operand::EmbeddedCode(CodeStub* stub) { + Operand result(0, RelocInfo::CODE_TARGET); + result.is_heap_object_request_ = true; + result.value_.heap_object_request = HeapObjectRequest(stub); + return result; +} MemOperand::MemOperand(Register rn, int32_t offset, AddrMode am) { rn_ = rn; @@ -488,6 +495,25 @@ void NeonMemOperand::SetAlignment(int align) { } } +void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) { + for (auto& request : heap_object_requests_) { + Handle object; + switch (request.kind()) { + case HeapObjectRequest::kHeapNumber: + object = isolate->factory()->NewHeapNumber(request.heap_number(), + IMMUTABLE, TENURED); + break; + case HeapObjectRequest::kCodeStub: + request.code_stub()->set_isolate(isolate); + object = request.code_stub()->GetCode(); + break; + } + Address pc = buffer_ + request.offset(); + Memory::Address_at(constant_pool_entry_address(pc, 0 /* unused */)) = + object.address(); + } +} + // ----------------------------------------------------------------------------- // Specific instructions, constants, and masks. @@ -542,19 +568,19 @@ const Instr kLdrStrInstrTypeMask = 0xffff0000; Assembler::Assembler(IsolateData isolate_data, void* buffer, int buffer_size) : AssemblerBase(isolate_data, buffer, buffer_size), - recorded_ast_id_(TypeFeedbackId::None()), pending_32_bit_constants_(), - pending_64_bit_constants_() { + pending_64_bit_constants_(), + scratch_register_list_(ip.bit()) { pending_32_bit_constants_.reserve(kMinNumPendingConstants); pending_64_bit_constants_.reserve(kMinNumPendingConstants); reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_); next_buffer_check_ = 0; + code_target_sharing_blocked_nesting_ = 0; const_pool_blocked_nesting_ = 0; no_const_pool_before_ = 0; first_const_pool_32_use_ = -1; first_const_pool_64_use_ = -1; last_bound_pos_ = 0; - ClearRecordedAstId(); if (CpuFeatures::IsSupported(VFP32DREGS)) { // Register objects tend to be abstracted and survive between scopes, so // it's awkward to use CpuFeatures::VFP32DREGS with CpuFeatureScope. To make @@ -565,16 +591,19 @@ Assembler::Assembler(IsolateData isolate_data, void* buffer, int buffer_size) Assembler::~Assembler() { - DCHECK(const_pool_blocked_nesting_ == 0); + DCHECK_EQ(const_pool_blocked_nesting_, 0); + DCHECK_EQ(code_target_sharing_blocked_nesting_, 0); } - -void Assembler::GetCode(CodeDesc* desc) { +void Assembler::GetCode(Isolate* isolate, CodeDesc* desc) { // Emit constant pool if necessary. int constant_pool_offset = 0; CheckConstPool(true, false); DCHECK(pending_32_bit_constants_.empty()); DCHECK(pending_64_bit_constants_.empty()); + + AllocateAndInstallRequestedHeapObjects(isolate); + // Set up code descriptor. desc->buffer = buffer_; desc->buffer_size = buffer_size_; @@ -589,7 +618,7 @@ void Assembler::GetCode(CodeDesc* desc) { void Assembler::Align(int m) { - DCHECK(m >= 4 && base::bits::IsPowerOfTwo32(m)); + DCHECK(m >= 4 && base::bits::IsPowerOfTwo(m)); DCHECK((pc_offset() & (kInstrSize - 1)) == 0); while ((pc_offset() & (m - 1)) != 0) { nop(); @@ -1033,15 +1062,14 @@ void Assembler::next(Label* L) { } } +namespace { // Low-level code emission routines depending on the addressing mode. // If this returns true then you have to use the rotate_imm and immed_8 // that it returns, because it may have already changed the instruction // to match them! -static bool fits_shifter(uint32_t imm32, - uint32_t* rotate_imm, - uint32_t* immed_8, - Instr* instr) { +bool FitsShifter(uint32_t imm32, uint32_t* rotate_imm, uint32_t* immed_8, + Instr* instr) { // imm32 must be unsigned. for (int rot = 0; rot < 16; rot++) { uint32_t imm8 = base::bits::RotateLeft32(imm32, 2 * rot); @@ -1055,7 +1083,7 @@ static bool fits_shifter(uint32_t imm32, // immediate fits, change the opcode. if (instr != NULL) { if ((*instr & kMovMvnMask) == kMovMvnPattern) { - if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) { + if (FitsShifter(~imm32, rotate_imm, immed_8, NULL)) { *instr ^= kMovMvnFlip; return true; } else if ((*instr & kMovLeaveCCMask) == kMovLeaveCCPattern) { @@ -1069,7 +1097,7 @@ static bool fits_shifter(uint32_t imm32, } } } else if ((*instr & kCmpCmnMask) == kCmpCmnPattern) { - if (fits_shifter(-static_cast(imm32), rotate_imm, immed_8, NULL)) { + if (FitsShifter(-static_cast(imm32), rotate_imm, immed_8, NULL)) { *instr ^= kCmpCmnFlip; return true; } @@ -1077,13 +1105,13 @@ static bool fits_shifter(uint32_t imm32, Instr alu_insn = (*instr & kALUMask); if (alu_insn == ADD || alu_insn == SUB) { - if (fits_shifter(-static_cast(imm32), rotate_imm, immed_8, NULL)) { + if (FitsShifter(-static_cast(imm32), rotate_imm, immed_8, NULL)) { *instr ^= kAddSubFlip; return true; } } else if (alu_insn == AND || alu_insn == BIC) { - if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) { + if (FitsShifter(~imm32, rotate_imm, immed_8, NULL)) { *instr ^= kAndBicFlip; return true; } @@ -1093,26 +1121,23 @@ static bool fits_shifter(uint32_t imm32, return false; } - // We have to use the temporary register for things that can be relocated even // if they can be encoded in the ARM's 12 bits of immediate-offset instruction // space. There is no guarantee that the relocated location can be similarly // encoded. -bool Operand::must_output_reloc_info(const Assembler* assembler) const { - if (rmode_ == RelocInfo::EXTERNAL_REFERENCE) { +bool MustOutputRelocInfo(RelocInfo::Mode rmode, const Assembler* assembler) { + if (rmode == RelocInfo::EXTERNAL_REFERENCE) { if (assembler != NULL && assembler->predictable_code_size()) return true; return assembler->serializer_enabled(); - } else if (RelocInfo::IsNone(rmode_)) { + } else if (RelocInfo::IsNone(rmode)) { return false; } return true; } - -static bool use_mov_immediate_load(const Operand& x, - const Assembler* assembler) { +bool UseMovImmediateLoad(const Operand& x, const Assembler* assembler) { DCHECK(assembler != nullptr); - if (x.must_output_reloc_info(assembler)) { + if (x.MustOutputRelocInfo(assembler)) { // Prefer constant pool if data is likely to be patched. return false; } else { @@ -1121,21 +1146,27 @@ static bool use_mov_immediate_load(const Operand& x, } } +} // namespace -int Operand::instructions_required(const Assembler* assembler, - Instr instr) const { +bool Operand::MustOutputRelocInfo(const Assembler* assembler) const { + return v8::internal::MustOutputRelocInfo(rmode_, assembler); +} + +int Operand::InstructionsRequired(const Assembler* assembler, + Instr instr) const { DCHECK(assembler != nullptr); if (rm_.is_valid()) return 1; uint32_t dummy1, dummy2; - if (must_output_reloc_info(assembler) || - !fits_shifter(imm32_, &dummy1, &dummy2, &instr)) { + if (MustOutputRelocInfo(assembler) || + !FitsShifter(immediate(), &dummy1, &dummy2, &instr)) { // The immediate operand cannot be encoded as a shifter operand, or use of // constant pool is required. First account for the instructions required // for the constant pool or immediate load int instructions; - if (use_mov_immediate_load(*this, assembler)) { - // A movw / movt or mov / orr immediate load. - instructions = CpuFeatures::IsSupported(ARMv7) ? 2 : 4; + if (UseMovImmediateLoad(*this, assembler)) { + DCHECK(CpuFeatures::IsSupported(ARMv7)); + // A movw / movt immediate load. + instructions = 2; } else { // A small constant pool load. instructions = 1; @@ -1154,22 +1185,18 @@ int Operand::instructions_required(const Assembler* assembler, } } - -void Assembler::move_32_bit_immediate(Register rd, - const Operand& x, - Condition cond) { - uint32_t imm32 = static_cast(x.imm32_); - if (x.must_output_reloc_info(this)) { - RecordRelocInfo(x.rmode_); - } - - if (use_mov_immediate_load(x, this)) { - // use_mov_immediate_load should return false when we need to output +void Assembler::Move32BitImmediate(Register rd, const Operand& x, + Condition cond) { + if (UseMovImmediateLoad(x, this)) { + // UseMovImmediateLoad should return false when we need to output // relocation info, since we prefer the constant pool for values that // can be patched. - DCHECK(!x.must_output_reloc_info(this)); - Register target = rd.code() == pc.code() ? ip : rd; + DCHECK(!x.MustOutputRelocInfo(this)); + UseScratchRegisterScope temps(this); + // Re-use the destination register as a scratch if possible. + Register target = !rd.is(pc) ? rd : temps.Acquire(); if (CpuFeatures::IsSupported(ARMv7)) { + uint32_t imm32 = static_cast(x.immediate()); CpuFeatureScope scope(this, ARMv7); movw(target, imm32 & 0xffff, cond); movt(target, imm32 >> 16, cond); @@ -1178,59 +1205,100 @@ void Assembler::move_32_bit_immediate(Register rd, mov(rd, target, LeaveCC, cond); } } else { - ConstantPoolEntry::Access access = - ConstantPoolAddEntry(pc_offset(), x.rmode_, x.imm32_); - DCHECK(access == ConstantPoolEntry::REGULAR); - USE(access); + int32_t immediate; + if (x.IsHeapObjectRequest()) { + RequestHeapObject(x.heap_object_request()); + immediate = 0; + } else { + immediate = x.immediate(); + } + ConstantPoolAddEntry(pc_offset(), x.rmode_, immediate); ldr(rd, MemOperand(pc, 0), cond); } } - -void Assembler::addrmod1(Instr instr, - Register rn, - Register rd, - const Operand& x) { +void Assembler::AddrMode1(Instr instr, Register rd, Register rn, + const Operand& x) { CheckBuffer(); + uint32_t opcode = instr & kOpCodeMask; + bool set_flags = (instr & S) != 0; + DCHECK((opcode == ADC) || (opcode == ADD) || (opcode == AND) || + (opcode == BIC) || (opcode == EOR) || (opcode == ORR) || + (opcode == RSB) || (opcode == RSC) || (opcode == SBC) || + (opcode == SUB) || (opcode == CMN) || (opcode == CMP) || + (opcode == TEQ) || (opcode == TST) || (opcode == MOV) || + (opcode == MVN)); + // For comparison instructions, rd is not defined. + DCHECK(rd.is_valid() || (opcode == CMN) || (opcode == CMP) || + (opcode == TEQ) || (opcode == TST)); + // For move instructions, rn is not defined. + DCHECK(rn.is_valid() || (opcode == MOV) || (opcode == MVN)); + DCHECK(rd.is_valid() || rn.is_valid()); DCHECK((instr & ~(kCondMask | kOpCodeMask | S)) == 0); - if (!x.rm_.is_valid()) { - // Immediate. - uint32_t rotate_imm; - uint32_t immed_8; - if (x.must_output_reloc_info(this) || - !fits_shifter(x.imm32_, &rotate_imm, &immed_8, &instr)) { + if (!AddrMode1TryEncodeOperand(&instr, x)) { + DCHECK(x.IsImmediate()); + // Upon failure to encode, the opcode should not have changed. + DCHECK(opcode == (instr & kOpCodeMask)); + Condition cond = Instruction::ConditionField(instr); + if ((opcode == MOV) && !set_flags) { + // Generate a sequence of mov instructions or a load from the constant + // pool only for a MOV instruction which does not set the flags. + DCHECK(!rn.is_valid()); + Move32BitImmediate(rd, x, cond); + } else { // The immediate operand cannot be encoded as a shifter operand, so load - // it first to register ip and change the original instruction to use ip. - // However, if the original instruction is a 'mov rd, x' (not setting the - // condition code), then replace it with a 'ldr rd, [pc]'. - CHECK(!rn.is(ip)); // rn should never be ip, or will be trashed - Condition cond = Instruction::ConditionField(instr); - if ((instr & ~kCondMask) == 13*B21) { // mov, S not set - move_32_bit_immediate(rd, x, cond); - } else { - mov(ip, x, LeaveCC, cond); - addrmod1(instr, rn, rd, Operand(ip)); - } - return; + // it first to a scratch register and change the original instruction to + // use it. + UseScratchRegisterScope temps(this); + // Re-use the destination register if possible. + Register scratch = + (rd.is_valid() && !rd.is(rn) && !rd.is(pc)) ? rd : temps.Acquire(); + mov(scratch, x, LeaveCC, cond); + AddrMode1(instr, rd, rn, Operand(scratch)); } - instr |= I | rotate_imm*B8 | immed_8; - } else if (!x.rs_.is_valid()) { - // Immediate shift. - instr |= x.shift_imm_*B7 | x.shift_op_ | x.rm_.code(); + return; + } + if (!rd.is_valid()) { + // Emit a comparison instruction. + emit(instr | rn.code() * B16); + } else if (!rn.is_valid()) { + // Emit a move instruction. If the operand is a register-shifted register, + // then prevent the destination from being PC as this is unpredictable. + DCHECK(!x.IsRegisterShiftedRegister() || !rd.is(pc)); + emit(instr | rd.code() * B12); } else { - // Register shift. - DCHECK(!rn.is(pc) && !rd.is(pc) && !x.rm_.is(pc) && !x.rs_.is(pc)); - instr |= x.rs_.code()*B8 | x.shift_op_ | B4 | x.rm_.code(); + emit(instr | rn.code() * B16 | rd.code() * B12); } - emit(instr | rn.code()*B16 | rd.code()*B12); if (rn.is(pc) || x.rm_.is(pc)) { // Block constant pool emission for one instruction after reading pc. BlockConstPoolFor(1); } } +bool Assembler::AddrMode1TryEncodeOperand(Instr* instr, const Operand& x) { + if (x.IsImmediate()) { + // Immediate. + uint32_t rotate_imm; + uint32_t immed_8; + if (x.MustOutputRelocInfo(this) || + !FitsShifter(x.immediate(), &rotate_imm, &immed_8, instr)) { + // Let the caller handle generating multiple instructions. + return false; + } + *instr |= I | rotate_imm * B8 | immed_8; + } else if (x.IsImmediateShiftedRegister()) { + *instr |= x.shift_imm_ * B7 | x.shift_op_ | x.rm_.code(); + } else { + DCHECK(x.IsRegisterShiftedRegister()); + // It is unpredictable to use the PC in this case. + DCHECK(!x.rm_.is(pc) && !x.rs_.is(pc)); + *instr |= x.rs_.code() * B8 | x.shift_op_ | B4 | x.rm_.code(); + } -void Assembler::addrmod2(Instr instr, Register rd, const MemOperand& x) { + return true; +} + +void Assembler::AddrMode2(Instr instr, Register rd, const MemOperand& x) { DCHECK((instr & ~(kCondMask | B | L)) == B26); int am = x.am_; if (!x.rm_.is_valid()) { @@ -1241,11 +1309,16 @@ void Assembler::addrmod2(Instr instr, Register rd, const MemOperand& x) { am ^= U; } if (!is_uint12(offset_12)) { - // Immediate offset cannot be encoded, load it first to register ip - // rn (and rd in a load) should never be ip, or will be trashed. - DCHECK(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip))); - mov(ip, Operand(x.offset_), LeaveCC, Instruction::ConditionField(instr)); - addrmod2(instr, rd, MemOperand(x.rn_, ip, x.am_)); + // Immediate offset cannot be encoded, load it first to a scratch + // register. + UseScratchRegisterScope temps(this); + // Allow re-using rd for load instructions if possible. + bool is_load = (instr & L) == L; + Register scratch = + (is_load && !rd.is(x.rn_) && !rd.is(pc)) ? rd : temps.Acquire(); + mov(scratch, Operand(x.offset_), LeaveCC, + Instruction::ConditionField(instr)); + AddrMode2(instr, rd, MemOperand(x.rn_, scratch, x.am_)); return; } DCHECK(offset_12 >= 0); // no masking needed @@ -1261,11 +1334,11 @@ void Assembler::addrmod2(Instr instr, Register rd, const MemOperand& x) { emit(instr | am | x.rn_.code()*B16 | rd.code()*B12); } - -void Assembler::addrmod3(Instr instr, Register rd, const MemOperand& x) { +void Assembler::AddrMode3(Instr instr, Register rd, const MemOperand& x) { DCHECK((instr & ~(kCondMask | L | S6 | H)) == (B4 | B7)); DCHECK(x.rn_.is_valid()); int am = x.am_; + bool is_load = (instr & L) == L; if (!x.rm_.is_valid()) { // Immediate offset. int offset_8 = x.offset_; @@ -1274,22 +1347,29 @@ void Assembler::addrmod3(Instr instr, Register rd, const MemOperand& x) { am ^= U; } if (!is_uint8(offset_8)) { - // Immediate offset cannot be encoded, load it first to register ip - // rn (and rd in a load) should never be ip, or will be trashed. - DCHECK(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip))); - mov(ip, Operand(x.offset_), LeaveCC, Instruction::ConditionField(instr)); - addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_)); + // Immediate offset cannot be encoded, load it first to a scratch + // register. + UseScratchRegisterScope temps(this); + // Allow re-using rd for load instructions if possible. + Register scratch = + (is_load && !rd.is(x.rn_) && !rd.is(pc)) ? rd : temps.Acquire(); + mov(scratch, Operand(x.offset_), LeaveCC, + Instruction::ConditionField(instr)); + AddrMode3(instr, rd, MemOperand(x.rn_, scratch, x.am_)); return; } DCHECK(offset_8 >= 0); // no masking needed instr |= B | (offset_8 >> 4)*B8 | (offset_8 & 0xf); } else if (x.shift_imm_ != 0) { - // Scaled register offset not supported, load index first - // rn (and rd in a load) should never be ip, or will be trashed. - DCHECK(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip))); - mov(ip, Operand(x.rm_, x.shift_op_, x.shift_imm_), LeaveCC, + // Scaled register offsets are not supported, compute the offset seperately + // to a scratch register. + UseScratchRegisterScope temps(this); + // Allow re-using rd for load instructions if possible. + Register scratch = + (is_load && !rd.is(x.rn_) && !rd.is(pc)) ? rd : temps.Acquire(); + mov(scratch, Operand(x.rm_, x.shift_op_, x.shift_imm_), LeaveCC, Instruction::ConditionField(instr)); - addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_)); + AddrMode3(instr, rd, MemOperand(x.rn_, scratch, x.am_)); return; } else { // Register offset. @@ -1300,16 +1380,14 @@ void Assembler::addrmod3(Instr instr, Register rd, const MemOperand& x) { emit(instr | am | x.rn_.code()*B16 | rd.code()*B12); } - -void Assembler::addrmod4(Instr instr, Register rn, RegList rl) { +void Assembler::AddrMode4(Instr instr, Register rn, RegList rl) { DCHECK((instr & ~(kCondMask | P | U | W | L)) == B27); DCHECK(rl != 0); DCHECK(!rn.is(pc)); emit(instr | rn.code()*B16 | rl); } - -void Assembler::addrmod5(Instr instr, CRegister crd, const MemOperand& x) { +void Assembler::AddrMode5(Instr instr, CRegister crd, const MemOperand& x) { // Unindexed addressing is not encoded by this function. DCHECK_EQ((B27 | B26), (instr & ~(kCondMask | kCoprocessorMask | P | U | N | W | L))); @@ -1325,7 +1403,7 @@ void Assembler::addrmod5(Instr instr, CRegister crd, const MemOperand& x) { DCHECK(is_uint8(offset_8)); // unsigned word offset must fit in a byte DCHECK((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback - // Post-indexed addressing requires W == 1; different than in addrmod2/3. + // Post-indexed addressing requires W == 1; different than in AddrMode2/3. if ((am & P) == 0) am |= W; @@ -1419,19 +1497,19 @@ void Assembler::blx(Label* L) { void Assembler::and_(Register dst, Register src1, const Operand& src2, SBit s, Condition cond) { - addrmod1(cond | AND | s, src1, dst, src2); + AddrMode1(cond | AND | s, dst, src1, src2); } void Assembler::eor(Register dst, Register src1, const Operand& src2, SBit s, Condition cond) { - addrmod1(cond | EOR | s, src1, dst, src2); + AddrMode1(cond | EOR | s, dst, src1, src2); } void Assembler::sub(Register dst, Register src1, const Operand& src2, SBit s, Condition cond) { - addrmod1(cond | SUB | s, src1, dst, src2); + AddrMode1(cond | SUB | s, dst, src1, src2); } void Assembler::sub(Register dst, Register src1, Register src2, SBit s, @@ -1441,13 +1519,13 @@ void Assembler::sub(Register dst, Register src1, Register src2, SBit s, void Assembler::rsb(Register dst, Register src1, const Operand& src2, SBit s, Condition cond) { - addrmod1(cond | RSB | s, src1, dst, src2); + AddrMode1(cond | RSB | s, dst, src1, src2); } void Assembler::add(Register dst, Register src1, const Operand& src2, SBit s, Condition cond) { - addrmod1(cond | ADD | s, src1, dst, src2); + AddrMode1(cond | ADD | s, dst, src1, src2); } void Assembler::add(Register dst, Register src1, Register src2, SBit s, @@ -1457,24 +1535,24 @@ void Assembler::add(Register dst, Register src1, Register src2, SBit s, void Assembler::adc(Register dst, Register src1, const Operand& src2, SBit s, Condition cond) { - addrmod1(cond | ADC | s, src1, dst, src2); + AddrMode1(cond | ADC | s, dst, src1, src2); } void Assembler::sbc(Register dst, Register src1, const Operand& src2, SBit s, Condition cond) { - addrmod1(cond | SBC | s, src1, dst, src2); + AddrMode1(cond | SBC | s, dst, src1, src2); } void Assembler::rsc(Register dst, Register src1, const Operand& src2, SBit s, Condition cond) { - addrmod1(cond | RSC | s, src1, dst, src2); + AddrMode1(cond | RSC | s, dst, src1, src2); } void Assembler::tst(Register src1, const Operand& src2, Condition cond) { - addrmod1(cond | TST | S, src1, r0, src2); + AddrMode1(cond | TST | S, no_reg, src1, src2); } void Assembler::tst(Register src1, Register src2, Condition cond) { @@ -1482,12 +1560,12 @@ void Assembler::tst(Register src1, Register src2, Condition cond) { } void Assembler::teq(Register src1, const Operand& src2, Condition cond) { - addrmod1(cond | TEQ | S, src1, r0, src2); + AddrMode1(cond | TEQ | S, no_reg, src1, src2); } void Assembler::cmp(Register src1, const Operand& src2, Condition cond) { - addrmod1(cond | CMP | S, src1, r0, src2); + AddrMode1(cond | CMP | S, no_reg, src1, src2); } void Assembler::cmp(Register src1, Register src2, Condition cond) { @@ -1502,13 +1580,13 @@ void Assembler::cmp_raw_immediate( void Assembler::cmn(Register src1, const Operand& src2, Condition cond) { - addrmod1(cond | CMN | S, src1, r0, src2); + AddrMode1(cond | CMN | S, no_reg, src1, src2); } void Assembler::orr(Register dst, Register src1, const Operand& src2, SBit s, Condition cond) { - addrmod1(cond | ORR | s, src1, dst, src2); + AddrMode1(cond | ORR | s, dst, src1, src2); } void Assembler::orr(Register dst, Register src1, Register src2, SBit s, @@ -1520,8 +1598,8 @@ void Assembler::mov(Register dst, const Operand& src, SBit s, Condition cond) { // Don't allow nop instructions in the form mov rn, rn to be generated using // the mov instruction. They must be generated using nop(int/NopMarkerTypes) // or MarkCode(int/NopMarkerTypes) pseudo instructions. - DCHECK(!(src.is_reg() && src.rm().is(dst) && s == LeaveCC && cond == al)); - addrmod1(cond | MOV | s, r0, dst, src); + DCHECK(!(src.IsRegister() && src.rm().is(dst) && s == LeaveCC && cond == al)); + AddrMode1(cond | MOV | s, dst, no_reg, src); } void Assembler::mov(Register dst, Register src, SBit s, Condition cond) { @@ -1581,17 +1659,17 @@ void Assembler::movt(Register reg, uint32_t immediate, Condition cond) { void Assembler::bic(Register dst, Register src1, const Operand& src2, SBit s, Condition cond) { - addrmod1(cond | BIC | s, src1, dst, src2); + AddrMode1(cond | BIC | s, dst, src1, src2); } void Assembler::mvn(Register dst, const Operand& src, SBit s, Condition cond) { - addrmod1(cond | MVN | s, r0, dst, src); + AddrMode1(cond | MVN | s, dst, no_reg, src); } void Assembler::asr(Register dst, Register src1, const Operand& src2, SBit s, Condition cond) { - if (src2.is_reg()) { + if (src2.IsRegister()) { mov(dst, Operand(src1, ASR, src2.rm()), s, cond); } else { mov(dst, Operand(src1, ASR, src2.immediate()), s, cond); @@ -1600,7 +1678,7 @@ void Assembler::asr(Register dst, Register src1, const Operand& src2, SBit s, void Assembler::lsl(Register dst, Register src1, const Operand& src2, SBit s, Condition cond) { - if (src2.is_reg()) { + if (src2.IsRegister()) { mov(dst, Operand(src1, LSL, src2.rm()), s, cond); } else { mov(dst, Operand(src1, LSL, src2.immediate()), s, cond); @@ -1609,7 +1687,7 @@ void Assembler::lsl(Register dst, Register src1, const Operand& src2, SBit s, void Assembler::lsr(Register dst, Register src1, const Operand& src2, SBit s, Condition cond) { - if (src2.is_reg()) { + if (src2.IsRegister()) { mov(dst, Operand(src1, LSR, src2.rm()), s, cond); } else { mov(dst, Operand(src1, LSR, src2.immediate()), s, cond); @@ -1745,8 +1823,8 @@ void Assembler::usat(Register dst, Condition cond) { DCHECK(!dst.is(pc) && !src.rm_.is(pc)); DCHECK((satpos >= 0) && (satpos <= 31)); + DCHECK(src.IsImmediateShiftedRegister()); DCHECK((src.shift_op_ == ASR) || (src.shift_op_ == LSL)); - DCHECK(src.rs_.is(no_reg)); int sh = 0; if (src.shift_op_ == ASR) { @@ -1839,9 +1917,8 @@ void Assembler::pkhbt(Register dst, // Rd(15-12) | imm5(11-7) | 0(6) | 01(5-4) | Rm(3-0) DCHECK(!dst.is(pc)); DCHECK(!src1.is(pc)); + DCHECK(src2.IsImmediateShiftedRegister()); DCHECK(!src2.rm().is(pc)); - DCHECK(!src2.rm().is(no_reg)); - DCHECK(src2.rs().is(no_reg)); DCHECK((src2.shift_imm_ >= 0) && (src2.shift_imm_ <= 31)); DCHECK(src2.shift_op() == LSL); emit(cond | 0x68*B20 | src1.code()*B16 | dst.code()*B12 | @@ -1858,9 +1935,8 @@ void Assembler::pkhtb(Register dst, // Rd(15-12) | imm5(11-7) | 1(6) | 01(5-4) | Rm(3-0) DCHECK(!dst.is(pc)); DCHECK(!src1.is(pc)); + DCHECK(src2.IsImmediateShiftedRegister()); DCHECK(!src2.rm().is(pc)); - DCHECK(!src2.rm().is(no_reg)); - DCHECK(src2.rs().is(no_reg)); DCHECK((src2.shift_imm_ >= 1) && (src2.shift_imm_ <= 32)); DCHECK(src2.shift_op() == ASR); int asr = (src2.shift_imm_ == 32) ? 0 : src2.shift_imm_; @@ -2007,20 +2083,23 @@ void Assembler::msr(SRegisterFieldMask fields, const Operand& src, DCHECK((fields & 0x000f0000) != 0); // At least one field must be set. DCHECK(((fields & 0xfff0ffff) == CPSR) || ((fields & 0xfff0ffff) == SPSR)); Instr instr; - if (!src.rm_.is_valid()) { + if (src.IsImmediate()) { // Immediate. uint32_t rotate_imm; uint32_t immed_8; - if (src.must_output_reloc_info(this) || - !fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) { - // Immediate operand cannot be encoded, load it first to register ip. - move_32_bit_immediate(ip, src); - msr(fields, Operand(ip), cond); + if (src.MustOutputRelocInfo(this) || + !FitsShifter(src.immediate(), &rotate_imm, &immed_8, NULL)) { + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + // Immediate operand cannot be encoded, load it first to a scratch + // register. + Move32BitImmediate(scratch, src); + msr(fields, Operand(scratch), cond); return; } instr = I | rotate_imm*B8 | immed_8; } else { - DCHECK(!src.rs_.is_valid() && src.shift_imm_ == 0); // only rm allowed + DCHECK(src.IsRegister()); // Only rm is allowed. instr = src.rm_.code(); } emit(cond | instr | B24 | B21 | fields | 15*B12); @@ -2029,42 +2108,42 @@ void Assembler::msr(SRegisterFieldMask fields, const Operand& src, // Load/Store instructions. void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) { - addrmod2(cond | B26 | L, dst, src); + AddrMode2(cond | B26 | L, dst, src); } void Assembler::str(Register src, const MemOperand& dst, Condition cond) { - addrmod2(cond | B26, src, dst); + AddrMode2(cond | B26, src, dst); } void Assembler::ldrb(Register dst, const MemOperand& src, Condition cond) { - addrmod2(cond | B26 | B | L, dst, src); + AddrMode2(cond | B26 | B | L, dst, src); } void Assembler::strb(Register src, const MemOperand& dst, Condition cond) { - addrmod2(cond | B26 | B, src, dst); + AddrMode2(cond | B26 | B, src, dst); } void Assembler::ldrh(Register dst, const MemOperand& src, Condition cond) { - addrmod3(cond | L | B7 | H | B4, dst, src); + AddrMode3(cond | L | B7 | H | B4, dst, src); } void Assembler::strh(Register src, const MemOperand& dst, Condition cond) { - addrmod3(cond | B7 | H | B4, src, dst); + AddrMode3(cond | B7 | H | B4, src, dst); } void Assembler::ldrsb(Register dst, const MemOperand& src, Condition cond) { - addrmod3(cond | L | B7 | S6 | B4, dst, src); + AddrMode3(cond | L | B7 | S6 | B4, dst, src); } void Assembler::ldrsh(Register dst, const MemOperand& src, Condition cond) { - addrmod3(cond | L | B7 | S6 | H | B4, dst, src); + AddrMode3(cond | L | B7 | S6 | H | B4, dst, src); } @@ -2074,7 +2153,7 @@ void Assembler::ldrd(Register dst1, Register dst2, DCHECK(!dst1.is(lr)); // r14. DCHECK_EQ(0, dst1.code() % 2); DCHECK_EQ(dst1.code() + 1, dst2.code()); - addrmod3(cond | B7 | B6 | B4, dst1, src); + AddrMode3(cond | B7 | B6 | B4, dst1, src); } @@ -2084,7 +2163,7 @@ void Assembler::strd(Register src1, Register src2, DCHECK(!src1.is(lr)); // r14. DCHECK_EQ(0, src1.code() % 2); DCHECK_EQ(src1.code() + 1, src2.code()); - addrmod3(cond | B7 | B6 | B5 | B4, src1, dst); + AddrMode3(cond | B7 | B6 | B5 | B4, src1, dst); } // Load/Store exclusive instructions. @@ -2162,7 +2241,7 @@ void Assembler::ldm(BlockAddrMode am, // ABI stack constraint: ldmxx base, {..sp..} base != sp is not restartable. DCHECK(base.is(sp) || (dst & sp.bit()) == 0); - addrmod4(cond | B27 | am | L, base, dst); + AddrMode4(cond | B27 | am | L, base, dst); // Emit the constant pool after a function return implemented by ldm ..{..pc}. if (cond == al && (dst & pc.bit()) != 0) { @@ -2180,7 +2259,7 @@ void Assembler::stm(BlockAddrMode am, Register base, RegList src, Condition cond) { - addrmod4(cond | B27 | am, base, src); + AddrMode4(cond | B27 | am, base, src); } @@ -2318,7 +2397,7 @@ void Assembler::ldc(Coprocessor coproc, const MemOperand& src, LFlag l, Condition cond) { - addrmod5(cond | B27 | B26 | l | L | coproc*B8, crd, src); + AddrMode5(cond | B27 | B26 | l | L | coproc * B8, crd, src); } @@ -2370,15 +2449,18 @@ void Assembler::vldr(const DwVfpRegister dst, emit(cond | 0xD*B24 | u*B23 | d*B22 | B20 | base.code()*B16 | vd*B12 | 0xB*B8 | ((offset / 4) & 255)); } else { - // Larger offsets must be handled by computing the correct address - // in the ip register. - DCHECK(!base.is(ip)); + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + // Larger offsets must be handled by computing the correct address in a + // scratch register. + DCHECK(!base.is(scratch)); if (u == 1) { - add(ip, base, Operand(offset)); + add(scratch, base, Operand(offset)); } else { - sub(ip, base, Operand(offset)); + sub(scratch, base, Operand(offset)); } - emit(cond | 0xD*B24 | d*B22 | B20 | ip.code()*B16 | vd*B12 | 0xB*B8); + emit(cond | 0xD * B24 | d * B22 | B20 | scratch.code() * B16 | vd * B12 | + 0xB * B8); } } @@ -2389,9 +2471,11 @@ void Assembler::vldr(const DwVfpRegister dst, DCHECK(VfpRegisterIsAvailable(dst)); DCHECK(operand.am_ == Offset); if (operand.rm().is_valid()) { - add(ip, operand.rn(), + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + add(scratch, operand.rn(), Operand(operand.rm(), operand.shift_op_, operand.shift_imm_)); - vldr(dst, ip, 0, cond); + vldr(dst, scratch, 0, cond); } else { vldr(dst, operand.rn(), operand.offset(), cond); } @@ -2419,15 +2503,18 @@ void Assembler::vldr(const SwVfpRegister dst, emit(cond | u*B23 | d*B22 | 0xD1*B20 | base.code()*B16 | sd*B12 | 0xA*B8 | ((offset / 4) & 255)); } else { - // Larger offsets must be handled by computing the correct address - // in the ip register. - DCHECK(!base.is(ip)); + // Larger offsets must be handled by computing the correct address in a + // scratch register. + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + DCHECK(!base.is(scratch)); if (u == 1) { - add(ip, base, Operand(offset)); + add(scratch, base, Operand(offset)); } else { - sub(ip, base, Operand(offset)); + sub(scratch, base, Operand(offset)); } - emit(cond | d*B22 | 0xD1*B20 | ip.code()*B16 | sd*B12 | 0xA*B8); + emit(cond | d * B22 | 0xD1 * B20 | scratch.code() * B16 | sd * B12 | + 0xA * B8); } } @@ -2437,9 +2524,11 @@ void Assembler::vldr(const SwVfpRegister dst, const Condition cond) { DCHECK(operand.am_ == Offset); if (operand.rm().is_valid()) { - add(ip, operand.rn(), + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + add(scratch, operand.rn(), Operand(operand.rm(), operand.shift_op_, operand.shift_imm_)); - vldr(dst, ip, 0, cond); + vldr(dst, scratch, 0, cond); } else { vldr(dst, operand.rn(), operand.offset(), cond); } @@ -2469,15 +2558,18 @@ void Assembler::vstr(const DwVfpRegister src, emit(cond | 0xD*B24 | u*B23 | d*B22 | base.code()*B16 | vd*B12 | 0xB*B8 | ((offset / 4) & 255)); } else { - // Larger offsets must be handled by computing the correct address - // in the ip register. - DCHECK(!base.is(ip)); + // Larger offsets must be handled by computing the correct address in the a + // scratch register. + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + DCHECK(!base.is(scratch)); if (u == 1) { - add(ip, base, Operand(offset)); + add(scratch, base, Operand(offset)); } else { - sub(ip, base, Operand(offset)); + sub(scratch, base, Operand(offset)); } - emit(cond | 0xD*B24 | d*B22 | ip.code()*B16 | vd*B12 | 0xB*B8); + emit(cond | 0xD * B24 | d * B22 | scratch.code() * B16 | vd * B12 | + 0xB * B8); } } @@ -2488,9 +2580,11 @@ void Assembler::vstr(const DwVfpRegister src, DCHECK(VfpRegisterIsAvailable(src)); DCHECK(operand.am_ == Offset); if (operand.rm().is_valid()) { - add(ip, operand.rn(), + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + add(scratch, operand.rn(), Operand(operand.rm(), operand.shift_op_, operand.shift_imm_)); - vstr(src, ip, 0, cond); + vstr(src, scratch, 0, cond); } else { vstr(src, operand.rn(), operand.offset(), cond); } @@ -2518,15 +2612,18 @@ void Assembler::vstr(const SwVfpRegister src, emit(cond | u*B23 | d*B22 | 0xD0*B20 | base.code()*B16 | sd*B12 | 0xA*B8 | ((offset / 4) & 255)); } else { - // Larger offsets must be handled by computing the correct address - // in the ip register. - DCHECK(!base.is(ip)); + // Larger offsets must be handled by computing the correct address in a + // scratch register. + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + DCHECK(!base.is(scratch)); if (u == 1) { - add(ip, base, Operand(offset)); + add(scratch, base, Operand(offset)); } else { - sub(ip, base, Operand(offset)); + sub(scratch, base, Operand(offset)); } - emit(cond | d*B22 | 0xD0*B20 | ip.code()*B16 | sd*B12 | 0xA*B8); + emit(cond | d * B22 | 0xD0 * B20 | scratch.code() * B16 | sd * B12 | + 0xA * B8); } } @@ -2536,9 +2633,11 @@ void Assembler::vstr(const SwVfpRegister src, const Condition cond) { DCHECK(operand.am_ == Offset); if (operand.rm().is_valid()) { - add(ip, operand.rn(), + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + add(scratch, operand.rn(), Operand(operand.rm(), operand.shift_op_, operand.shift_imm_)); - vstr(src, ip, 0, cond); + vstr(src, scratch, 0, cond); } else { vstr(src, operand.rn(), operand.offset(), cond); } @@ -2612,19 +2711,16 @@ void Assembler::vstm(BlockAddrMode am, Register base, SwVfpRegister first, 0xA*B8 | count); } - -static void DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) { - uint64_t i; - memcpy(&i, &d, 8); +static void DoubleAsTwoUInt32(Double d, uint32_t* lo, uint32_t* hi) { + uint64_t i = d.AsUint64(); *lo = i & 0xffffffff; *hi = i >> 32; } - // Only works for little endian floating point formats. // We don't support VFP on the mixed endian floating point platform. -static bool FitsVmovFPImmediate(double d, uint32_t* encoding) { +static bool FitsVmovFPImmediate(Double d, uint32_t* encoding) { // VMOV can accept an immediate of the form: // // +/- m * 2^(-n) where 16 <= m <= 31 and 0 <= n <= 7 @@ -2670,10 +2766,10 @@ static bool FitsVmovFPImmediate(double d, uint32_t* encoding) { return true; } - -void Assembler::vmov(const SwVfpRegister dst, float imm) { +void Assembler::vmov(const SwVfpRegister dst, Float32 imm) { uint32_t enc; - if (CpuFeatures::IsSupported(VFPv3) && FitsVmovFPImmediate(imm, &enc)) { + if (CpuFeatures::IsSupported(VFPv3) && + FitsVmovFPImmediate(Double(imm.get_scalar()), &enc)) { CpuFeatureScope scope(this, VFPv3); // The float can be encoded in the instruction. // @@ -2685,17 +2781,16 @@ void Assembler::vmov(const SwVfpRegister dst, float imm) { dst.split_code(&vd, &d); emit(al | 0x1D * B23 | d * B22 | 0x3 * B20 | vd * B12 | 0x5 * B9 | enc); } else { - mov(ip, Operand(bit_cast(imm))); - vmov(dst, ip); + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + mov(scratch, Operand(imm.get_bits())); + vmov(dst, scratch); } } - -void Assembler::vmov(const DwVfpRegister dst, - double imm, - const Register scratch) { +void Assembler::vmov(const DwVfpRegister dst, Double imm, + const Register extra_scratch) { DCHECK(VfpRegisterIsAvailable(dst)); - DCHECK(!scratch.is(ip)); uint32_t enc; if (CpuFeatures::IsSupported(VFPv3) && FitsVmovFPImmediate(imm, &enc)) { CpuFeatureScope scope(this, VFPv3); @@ -2725,42 +2820,42 @@ void Assembler::vmov(const DwVfpRegister dst, // The code could also randomize the order of values, though // that's tricky because vldr has a limited reach. Furthermore // it breaks load locality. - ConstantPoolEntry::Access access = ConstantPoolAddEntry(pc_offset(), imm); - DCHECK(access == ConstantPoolEntry::REGULAR); - USE(access); + ConstantPoolAddEntry(pc_offset(), imm); vldr(dst, MemOperand(pc, 0)); } else { // Synthesise the double from ARM immediates. uint32_t lo, hi; DoubleAsTwoUInt32(imm, &lo, &hi); + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); if (lo == hi) { // Move the low and high parts of the double to a D register in one // instruction. - mov(ip, Operand(lo)); - vmov(dst, ip, ip); - } else if (scratch.is(no_reg)) { - mov(ip, Operand(lo)); - vmov(dst, VmovIndexLo, ip); + mov(scratch, Operand(lo)); + vmov(dst, scratch, scratch); + } else if (extra_scratch.is(no_reg)) { + // We only have one spare scratch register. + mov(scratch, Operand(lo)); + vmov(dst, VmovIndexLo, scratch); if (((lo & 0xffff) == (hi & 0xffff)) && CpuFeatures::IsSupported(ARMv7)) { CpuFeatureScope scope(this, ARMv7); - movt(ip, hi >> 16); + movt(scratch, hi >> 16); } else { - mov(ip, Operand(hi)); + mov(scratch, Operand(hi)); } - vmov(dst, VmovIndexHi, ip); + vmov(dst, VmovIndexHi, scratch); } else { // Move the low and high parts of the double to a D register in one // instruction. - mov(ip, Operand(lo)); - mov(scratch, Operand(hi)); - vmov(dst, ip, scratch); + mov(scratch, Operand(lo)); + mov(extra_scratch, Operand(hi)); + vmov(dst, scratch, extra_scratch); } } } - void Assembler::vmov(const SwVfpRegister dst, const SwVfpRegister src, const Condition cond) { @@ -2898,7 +2993,6 @@ static bool IsSignedVFPType(VFPType type) { return false; default: UNREACHABLE(); - return false; } } @@ -2913,7 +3007,6 @@ static bool IsIntegerVFPType(VFPType type) { return false; default: UNREACHABLE(); - return false; } } @@ -2926,7 +3019,6 @@ static bool IsDoubleVFPType(VFPType type) { return true; default: UNREACHABLE(); - return false; } } @@ -4887,7 +4979,7 @@ int Assembler::DecodeShiftImm(Instr instr) { Instr Assembler::PatchShiftImm(Instr instr, int immed) { uint32_t rotate_imm = 0; uint32_t immed_8 = 0; - bool immed_fits = fits_shifter(immed, &rotate_imm, &immed_8, NULL); + bool immed_fits = FitsShifter(immed, &rotate_imm, &immed_8, NULL); DCHECK(immed_fits); USE(immed_fits); return (instr & ~kOff12Mask) | (rotate_imm << 8) | immed_8; @@ -4915,7 +5007,7 @@ bool Assembler::IsOrrImmed(Instr instr) { bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) { uint32_t dummy1; uint32_t dummy2; - return fits_shifter(imm32, &dummy1, &dummy2, NULL); + return FitsShifter(imm32, &dummy1, &dummy2, NULL); } @@ -4945,9 +5037,7 @@ void Assembler::GrowBuffer() { // Some internal data structures overflow for very large buffers, // they must ensure that kMaximalBufferSize is not too large. - if (desc.buffer_size > kMaximalBufferSize || - static_cast(desc.buffer_size) > - isolate_data().max_old_generation_size_) { + if (desc.buffer_size > kMaximalBufferSize) { V8::FatalProcessOutOfMemory("Assembler::GrowBuffer"); } @@ -5019,7 +5109,6 @@ void Assembler::emit_code_stub_address(Code* stub) { pc_ += sizeof(uint32_t); } - void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { if (RelocInfo::IsNone(rmode) || // Don't record external references unless the heap will be serialized. @@ -5028,49 +5117,90 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { return; } DCHECK(buffer_space() >= kMaxRelocSize); // too late to grow buffer here - if (rmode == RelocInfo::CODE_TARGET_WITH_ID) { - data = RecordedAstId().ToInt(); - ClearRecordedAstId(); - } RelocInfo rinfo(pc_, rmode, data, NULL); reloc_info_writer.Write(&rinfo); } - -ConstantPoolEntry::Access Assembler::ConstantPoolAddEntry(int position, - RelocInfo::Mode rmode, - intptr_t value) { +void Assembler::ConstantPoolAddEntry(int position, RelocInfo::Mode rmode, + intptr_t value) { DCHECK(rmode != RelocInfo::COMMENT && rmode != RelocInfo::CONST_POOL && rmode != RelocInfo::NONE64); bool sharing_ok = RelocInfo::IsNone(rmode) || - !(serializer_enabled() || rmode < RelocInfo::CELL); + (rmode >= RelocInfo::FIRST_SHAREABLE_RELOC_MODE); DCHECK(pending_32_bit_constants_.size() < kMaxNumPending32Constants); if (pending_32_bit_constants_.empty()) { first_const_pool_32_use_ = position; } - ConstantPoolEntry entry(position, value, sharing_ok); + ConstantPoolEntry entry(position, value, + sharing_ok || (rmode == RelocInfo::CODE_TARGET && + IsCodeTargetSharingAllowed())); + + bool shared = false; + if (sharing_ok) { + // Merge the constant, if possible. + for (size_t i = 0; i < pending_32_bit_constants_.size(); i++) { + ConstantPoolEntry& current_entry = pending_32_bit_constants_[i]; + if (!current_entry.sharing_ok()) continue; + if (entry.value() == current_entry.value()) { + entry.set_merged_index(i); + shared = true; + break; + } + } + } + + // Share entries if allowed and possible. + // Null-values are placeholders and must be ignored. + if (rmode == RelocInfo::CODE_TARGET && IsCodeTargetSharingAllowed() && + value != 0) { + // Sharing entries here relies on canonicalized handles - without them, we + // will miss the optimisation opportunity. + Address handle_address = reinterpret_cast
(value); + auto existing = handle_to_index_map_.find(handle_address); + if (existing != handle_to_index_map_.end()) { + int index = existing->second; + entry.set_merged_index(index); + shared = true; + } else { + // Keep track of this code handle. + handle_to_index_map_[handle_address] = + static_cast(pending_32_bit_constants_.size()); + } + } + pending_32_bit_constants_.push_back(entry); // Make sure the constant pool is not emitted in place of the next // instruction for which we just recorded relocation info. BlockConstPoolFor(1); - return ConstantPoolEntry::REGULAR; -} + // Emit relocation info. + if (MustOutputRelocInfo(rmode, this) && !shared) { + RecordRelocInfo(rmode); + } +} -ConstantPoolEntry::Access Assembler::ConstantPoolAddEntry(int position, - double value) { +void Assembler::ConstantPoolAddEntry(int position, Double value) { DCHECK(pending_64_bit_constants_.size() < kMaxNumPending64Constants); if (pending_64_bit_constants_.empty()) { first_const_pool_64_use_ = position; } ConstantPoolEntry entry(position, value); + + // Merge the constant, if possible. + for (size_t i = 0; i < pending_64_bit_constants_.size(); i++) { + ConstantPoolEntry& current_entry = pending_64_bit_constants_[i]; + DCHECK(current_entry.sharing_ok()); + if (entry.value() == current_entry.value()) { + entry.set_merged_index(i); + break; + } + } pending_64_bit_constants_.push_back(entry); // Make sure the constant pool is not emitted in place of the next // instruction for which we just recorded relocation info. BlockConstPoolFor(1); - return ConstantPoolEntry::REGULAR; } @@ -5171,29 +5301,12 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) { int size_after_marker = estimated_size_after_marker; for (size_t i = 0; i < pending_64_bit_constants_.size(); i++) { ConstantPoolEntry& entry = pending_64_bit_constants_[i]; - DCHECK(!entry.is_merged()); - for (size_t j = 0; j < i; j++) { - if (entry.value64() == pending_64_bit_constants_[j].value64()) { - DCHECK(!pending_64_bit_constants_[j].is_merged()); - entry.set_merged_index(j); - size_after_marker -= kDoubleSize; - break; - } - } + if (entry.is_merged()) size_after_marker -= kDoubleSize; } for (size_t i = 0; i < pending_32_bit_constants_.size(); i++) { ConstantPoolEntry& entry = pending_32_bit_constants_[i]; - DCHECK(!entry.is_merged()); - if (!entry.sharing_ok()) continue; - for (size_t j = 0; j < i; j++) { - if (entry.value() == pending_32_bit_constants_[j].value()) { - DCHECK(!pending_32_bit_constants_[j].is_merged()); - entry.set_merged_index(j); - size_after_marker -= kPointerSize; - break; - } - } + if (entry.is_merged()) size_after_marker -= kPointerSize; } int size = size_up_to_marker + size_after_marker; @@ -5292,6 +5405,8 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) { pending_32_bit_constants_.clear(); pending_64_bit_constants_.clear(); + handle_to_index_map_.clear(); + first_const_pool_32_use_ = -1; first_const_pool_64_use_ = -1; @@ -5333,6 +5448,22 @@ void PatchingAssembler::FlushICache(Isolate* isolate) { Assembler::FlushICache(isolate, buffer_, buffer_size_ - kGap); } +UseScratchRegisterScope::UseScratchRegisterScope(Assembler* assembler) + : available_(assembler->GetScratchRegisterList()), + old_available_(*available_) {} + +UseScratchRegisterScope::~UseScratchRegisterScope() { + *available_ = old_available_; +} + +Register UseScratchRegisterScope::Acquire() { + DCHECK(available_ != nullptr); + DCHECK(*available_ != 0); + int index = static_cast(base::bits::CountTrailingZeros32(*available_)); + *available_ &= ~(1UL << index); + return Register::from_code(index); +} + } // namespace internal } // namespace v8 diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h index a6284937235dcf..dd61bf2abbf832 100644 --- a/deps/v8/src/arm/assembler-arm.h +++ b/deps/v8/src/arm/assembler-arm.h @@ -45,6 +45,8 @@ #include "src/arm/constants-arm.h" #include "src/assembler.h" +#include "src/double.h" +#include "src/float.h" namespace v8 { namespace internal { @@ -501,7 +503,7 @@ class Operand BASE_EMBEDDED { RelocInfo::Mode rmode = RelocInfo::NONE32)); INLINE(static Operand Zero()); INLINE(explicit Operand(const ExternalReference& f)); - explicit Operand(Handle handle); + explicit Operand(Handle handle); INLINE(explicit Operand(Smi* value)); // rm @@ -524,18 +526,29 @@ class Operand BASE_EMBEDDED { // rm rs explicit Operand(Register rm, ShiftOp shift_op, Register rs); + static Operand EmbeddedNumber(double number); // Smi or HeapNumber. + static Operand EmbeddedCode(CodeStub* stub); + // Return true if this is a register operand. - INLINE(bool is_reg() const) { + bool IsRegister() const { return rm_.is_valid() && rs_.is(no_reg) && shift_op_ == LSL && shift_imm_ == 0; } + // Return true if this is a register operand shifted with an immediate. + bool IsImmediateShiftedRegister() const { + return rm_.is_valid() && !rs_.is_valid(); + } + // Return true if this is a register operand shifted with a register. + bool IsRegisterShiftedRegister() const { + return rm_.is_valid() && rs_.is_valid(); + } // Return the number of actual instructions required to implement the given // instruction for this particular operand. This can be a single instruction, - // if no load into the ip register is necessary, or anything between 2 and 4 - // instructions when we need to load from the constant pool (depending upon + // if no load into a scratch register is necessary, or anything between 2 and + // 4 instructions when we need to load from the constant pool (depending upon // whether the constant pool entry is in the small or extended section). If // the instruction this operand is used for is a MOV or MVN instruction the // actual instruction to use is required for this calculation. For other @@ -543,24 +556,46 @@ class Operand BASE_EMBEDDED { // // The value returned is only valid as long as no entries are added to the // constant pool between this call and the actual instruction being emitted. - int instructions_required(const Assembler* assembler, Instr instr = 0) const; - bool must_output_reloc_info(const Assembler* assembler) const; + int InstructionsRequired(const Assembler* assembler, Instr instr = 0) const; + bool MustOutputRelocInfo(const Assembler* assembler) const; inline int32_t immediate() const { - DCHECK(!rm_.is_valid()); - return imm32_; + DCHECK(IsImmediate()); + DCHECK(!IsHeapObjectRequest()); + return value_.immediate; + } + bool IsImmediate() const { + return !rm_.is_valid(); + } + + HeapObjectRequest heap_object_request() const { + DCHECK(IsHeapObjectRequest()); + return value_.heap_object_request; + } + bool IsHeapObjectRequest() const { + DCHECK_IMPLIES(is_heap_object_request_, IsImmediate()); + DCHECK_IMPLIES(is_heap_object_request_, + rmode_ == RelocInfo::EMBEDDED_OBJECT || + rmode_ == RelocInfo::CODE_TARGET); + return is_heap_object_request_; } Register rm() const { return rm_; } Register rs() const { return rs_; } ShiftOp shift_op() const { return shift_op_; } + private: Register rm_; Register rs_; ShiftOp shift_op_; - int shift_imm_; // valid if rm_ != no_reg && rs_ == no_reg - int32_t imm32_; // valid if rm_ == no_reg + int shift_imm_; // valid if rm_ != no_reg && rs_ == no_reg + union Value { + Value() {} + HeapObjectRequest heap_object_request; // if is_heap_object_request_ + int32_t immediate; // otherwise + } value_; // valid if rm_ == no_reg + bool is_heap_object_request_ = false; RelocInfo::Mode rmode_; friend class Assembler; @@ -573,8 +608,9 @@ class MemOperand BASE_EMBEDDED { // [rn +/- offset] Offset/NegOffset // [rn +/- offset]! PreIndex/NegPreIndex // [rn], +/- offset PostIndex/NegPostIndex - // offset is any signed 32-bit value; offset is first loaded to register ip if - // it does not fit the addressing mode (12-bit unsigned and sign bit) + // offset is any signed 32-bit value; offset is first loaded to a scratch + // register if it does not fit the addressing mode (12-bit unsigned and sign + // bit) explicit MemOperand(Register rn, int32_t offset = 0, AddrMode am = Offset); // [rn +/- rm] Offset/NegOffset @@ -703,7 +739,7 @@ class Assembler : public AssemblerBase { // GetCode emits any pending (non-emitted) code and fills the descriptor // desc. GetCode() is idempotent; it returns the same result if no other // Assembler functions are invoked in between GetCode() calls. - void GetCode(CodeDesc* desc); + void GetCode(Isolate* isolate, CodeDesc* desc); // Label operations & relative jumps (PPUM Appendix D) // @@ -789,6 +825,8 @@ class Assembler : public AssemblerBase { static constexpr int kDebugBreakSlotLength = kDebugBreakSlotInstructions * kInstrSize; + RegList* GetScratchRegisterList() { return &scratch_register_list_; } + // --------------------------------------------------------------------------- // Code generation @@ -1131,10 +1169,10 @@ class Assembler : public AssemblerBase { SwVfpRegister last, Condition cond = al); - void vmov(const SwVfpRegister dst, float imm); + void vmov(const SwVfpRegister dst, Float32 imm); void vmov(const DwVfpRegister dst, - double imm, - const Register scratch = no_reg); + Double imm, + const Register extra_scratch = no_reg); void vmov(const SwVfpRegister dst, const SwVfpRegister src, const Condition cond = al); @@ -1491,24 +1529,40 @@ class Assembler : public AssemblerBase { DISALLOW_IMPLICIT_CONSTRUCTORS(BlockConstPoolScope); }; - // Debugging + // Class for blocking sharing of code targets in constant pool. + class BlockCodeTargetSharingScope { + public: + explicit BlockCodeTargetSharingScope(Assembler* assem) : assem_(nullptr) { + Open(assem); + } + // This constructor does not initialize the scope. The user needs to + // explicitly call Open() before using it. + BlockCodeTargetSharingScope() : assem_(nullptr) {} + ~BlockCodeTargetSharingScope() { + Close(); + } + void Open(Assembler* assem) { + DCHECK_NULL(assem_); + DCHECK_NOT_NULL(assem); + assem_ = assem; + assem_->StartBlockCodeTargetSharing(); + } - // Mark address of a debug break slot. - void RecordDebugBreakSlot(RelocInfo::Mode mode); + private: + void Close() { + if (assem_ != nullptr) { + assem_->EndBlockCodeTargetSharing(); + } + } + Assembler* assem_; - // Record the AST id of the CallIC being compiled, so that it can be placed - // in the relocation information. - void SetRecordedAstId(TypeFeedbackId ast_id) { - DCHECK(recorded_ast_id_.IsNone()); - recorded_ast_id_ = ast_id; - } + DISALLOW_COPY_AND_ASSIGN(BlockCodeTargetSharingScope); + }; - TypeFeedbackId RecordedAstId() { - DCHECK(!recorded_ast_id_.IsNone()); - return recorded_ast_id_; - } + // Debugging - void ClearRecordedAstId() { recorded_ast_id_ = TypeFeedbackId::None(); } + // Mark address of a debug break slot. + void RecordDebugBreakSlot(RelocInfo::Mode mode); // Record a comment relocation entry that can be used by a disassembler. // Use --code-comments to enable. @@ -1636,11 +1690,6 @@ class Assembler : public AssemblerBase { } protected: - // Relocation for a type-recording IC has the AST id added to it. This - // member variable is a way to pass the information from the call site to - // the relocation info. - TypeFeedbackId recorded_ast_id_; - int buffer_space() const { return reloc_info_writer.pos() - pc_; } // Decode branch instruction at pos and return branch target pos @@ -1649,8 +1698,22 @@ class Assembler : public AssemblerBase { // Patch branch instruction at pos to branch to given branch target pos void target_at_put(int pos, int target_pos); + // Prevent sharing of code target constant pool entries until + // EndBlockCodeTargetSharing is called. Calls to this function can be nested + // but must be followed by an equal number of call to + // EndBlockCodeTargetSharing. + void StartBlockCodeTargetSharing() { + ++code_target_sharing_blocked_nesting_; + } + + // Resume sharing of constant pool code target entries. Needs to be called + // as many times as StartBlockCodeTargetSharing to have an effect. + void EndBlockCodeTargetSharing() { + --code_target_sharing_blocked_nesting_; + } + // Prevent contant pool emission until EndBlockConstPool is called. - // Call to this function can be nested but must be followed by an equal + // Calls to this function can be nested but must be followed by an equal // number of call to EndBlockConstpool. void StartBlockConstPool() { if (const_pool_blocked_nesting_++ == 0) { @@ -1660,7 +1723,7 @@ class Assembler : public AssemblerBase { } } - // Resume constant pool emission. Need to be called as many time as + // Resume constant pool emission. Needs to be called as many times as // StartBlockConstPool to have an effect. void EndBlockConstPool() { if (--const_pool_blocked_nesting_ == 0) { @@ -1726,6 +1789,12 @@ class Assembler : public AssemblerBase { std::vector pending_32_bit_constants_; std::vector pending_64_bit_constants_; + // Map of address of handle to index in pending_32_bit_constants_. + std::map handle_to_index_map_; + + // Scratch registers available for use by the Assembler. + RegList scratch_register_list_; + private: // Avoid overflows for displacements etc. static const int kMaximalBufferSize = 512 * MB; @@ -1749,6 +1818,11 @@ class Assembler : public AssemblerBase { static constexpr int kCheckPoolIntervalInst = 32; static constexpr int kCheckPoolInterval = kCheckPoolIntervalInst * kInstrSize; + // Sharing of code target entries may be blocked in some code sequences. + int code_target_sharing_blocked_nesting_; + bool IsCodeTargetSharingAllowed() const { + return code_target_sharing_blocked_nesting_ == 0; + } // Emission of the constant pool may be blocked in some code sequences. int const_pool_blocked_nesting_; // Block emission if this is not zero. @@ -1766,16 +1840,21 @@ class Assembler : public AssemblerBase { void GrowBuffer(); // 32-bit immediate values - void move_32_bit_immediate(Register rd, - const Operand& x, - Condition cond = al); + void Move32BitImmediate(Register rd, const Operand& x, Condition cond = al); // Instruction generation - void addrmod1(Instr instr, Register rn, Register rd, const Operand& x); - void addrmod2(Instr instr, Register rd, const MemOperand& x); - void addrmod3(Instr instr, Register rd, const MemOperand& x); - void addrmod4(Instr instr, Register rn, RegList rl); - void addrmod5(Instr instr, CRegister crd, const MemOperand& x); + void AddrMode1(Instr instr, Register rd, Register rn, const Operand& x); + // Attempt to encode operand |x| for instruction |instr| and return true on + // success. The result will be encoded in |instr| directly. This method may + // change the opcode if deemed beneficial, for instance, MOV may be turned + // into MVN, ADD into SUB, AND into BIC, ...etc. The only reason this method + // may fail is that the operand is an immediate that cannot be encoded. + bool AddrMode1TryEncodeOperand(Instr* instr, const Operand& x); + + void AddrMode2(Instr instr, Register rd, const MemOperand& x); + void AddrMode3(Instr instr, Register rd, const MemOperand& x); + void AddrMode4(Instr instr, Register rn, RegList rl); + void AddrMode5(Instr instr, CRegister crd, const MemOperand& x); // Labels void print(Label* L); @@ -1784,15 +1863,28 @@ class Assembler : public AssemblerBase { // Record reloc info for current pc_ void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0); - ConstantPoolEntry::Access ConstantPoolAddEntry(int position, - RelocInfo::Mode rmode, - intptr_t value); - ConstantPoolEntry::Access ConstantPoolAddEntry(int position, double value); + void ConstantPoolAddEntry(int position, RelocInfo::Mode rmode, + intptr_t value); + void ConstantPoolAddEntry(int position, Double value); friend class RelocInfo; friend class CodePatcher; friend class BlockConstPoolScope; + friend class BlockCodeTargetSharingScope; friend class EnsureSpace; + + // The following functions help with avoiding allocations of embedded heap + // objects during the code assembly phase. {RequestHeapObject} records the + // need for a future heap number allocation or code stub generation. After + // code assembly, {AllocateAndInstallRequestedHeapObjects} will allocate these + // objects and place them where they are expected (determined by the pc offset + // associated with each request). That is, for each request, it will patch the + // dummy heap object handle that we emitted during code assembly with the + // actual heap object handle. + void RequestHeapObject(HeapObjectRequest request); + void AllocateAndInstallRequestedHeapObjects(Isolate* isolate); + + std::forward_list heap_object_requests_; }; constexpr int kNoCodeAgeSequenceLength = 3 * Assembler::kInstrSize; @@ -1811,6 +1903,29 @@ class PatchingAssembler : public Assembler { void FlushICache(Isolate* isolate); }; +// This scope utility allows scratch registers to be managed safely. The +// Assembler's GetScratchRegisterList() is used as a pool of scratch +// registers. These registers can be allocated on demand, and will be returned +// at the end of the scope. +// +// When the scope ends, the Assembler's list will be restored to its original +// state, even if the list is modified by some other means. Note that this scope +// can be nested but the destructors need to run in the opposite order as the +// constructors. We do not have assertions for this. +class UseScratchRegisterScope { + public: + explicit UseScratchRegisterScope(Assembler* assembler); + ~UseScratchRegisterScope(); + + // Take a register from the list and return it. + Register Acquire(); + + private: + // Currently available scratch registers. + RegList* available_; + // Available scratch registers at the start of this scope. + RegList old_available_; +}; } // namespace internal } // namespace v8 diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc index fc59f4007e1589..61d52f58f42c45 100644 --- a/deps/v8/src/arm/code-stubs-arm.cc +++ b/deps/v8/src/arm/code-stubs-arm.cc @@ -12,6 +12,7 @@ #include "src/bootstrapper.h" #include "src/codegen.h" #include "src/counters.h" +#include "src/double.h" #include "src/heap/heap-inl.h" #include "src/ic/handler-compiler.h" #include "src/ic/ic.h" @@ -51,29 +52,6 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, Register rhs); -void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm, - ExternalReference miss) { - // Update the static counter each time a new code stub is generated. - isolate()->counters()->code_stubs()->Increment(); - - CallInterfaceDescriptor descriptor = GetCallInterfaceDescriptor(); - int param_count = descriptor.GetRegisterParameterCount(); - { - // Call the runtime system in a fresh internal frame. - FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL); - DCHECK(param_count == 0 || - r0.is(descriptor.GetRegisterParameter(param_count - 1))); - // Push arguments - for (int i = 0; i < param_count; ++i) { - __ push(descriptor.GetRegisterParameter(i)); - } - __ CallExternalReference(miss, param_count); - } - - __ Ret(); -} - - void DoubleToIStub::Generate(MacroAssembler* masm) { Label out_of_range, only_low, negate, done; Register input_reg = source(); @@ -671,7 +649,7 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { const int fp_argument_count = 0; AllowExternalCallThatCantCauseGC scope(masm); - __ PrepareCallCFunction(argument_count, fp_argument_count, scratch); + __ PrepareCallCFunction(argument_count, fp_argument_count); __ mov(r0, Operand(ExternalReference::isolate_address(isolate()))); __ CallCFunction( ExternalReference::store_buffer_overflow_function(isolate()), @@ -710,7 +688,7 @@ void MathPowStub::Generate(MacroAssembler* masm) { __ push(lr); { AllowExternalCallThatCantCauseGC scope(masm); - __ PrepareCallCFunction(0, 2, scratch); + __ PrepareCallCFunction(0, 2); __ MovToFloatParameters(double_base, double_exponent); __ CallCFunction( ExternalReference::power_double_double_function(isolate()), 0, 2); @@ -731,7 +709,7 @@ void MathPowStub::Generate(MacroAssembler* masm) { __ mov(exponent, scratch); } __ vmov(double_scratch, double_base); // Back up base. - __ vmov(double_result, 1.0, scratch2); + __ vmov(double_result, Double(1.0), scratch2); // Get absolute value of exponent. __ cmp(scratch, Operand::Zero()); @@ -746,7 +724,7 @@ void MathPowStub::Generate(MacroAssembler* masm) { __ cmp(exponent, Operand::Zero()); __ b(ge, &done); - __ vmov(double_scratch, 1.0, scratch); + __ vmov(double_scratch, Double(1.0), scratch); __ vdiv(double_result, double_scratch, double_result); // Test whether result is zero. Bail out to check for subnormal result. // Due to subnormals, x^-y == (1/x)^y does not hold in all cases. @@ -761,7 +739,7 @@ void MathPowStub::Generate(MacroAssembler* masm) { __ push(lr); { AllowExternalCallThatCantCauseGC scope(masm); - __ PrepareCallCFunction(0, 2, scratch); + __ PrepareCallCFunction(0, 2); __ MovToFloatParameters(double_base, double_exponent); __ CallCFunction(ExternalReference::power_double_double_function(isolate()), 0, 2); @@ -781,12 +759,9 @@ bool CEntryStub::NeedsImmovableCode() { void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) { CEntryStub::GenerateAheadOfTime(isolate); StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate); - StubFailureTrampolineStub::GenerateAheadOfTime(isolate); CommonArrayConstructorStub::GenerateStubsAheadOfTime(isolate); CreateAllocationSiteStub::GenerateAheadOfTime(isolate); CreateWeakCellStub::GenerateAheadOfTime(isolate); - BinaryOpICStub::GenerateAheadOfTime(isolate); - BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate); StoreFastElementStub::GenerateAheadOfTime(isolate); } @@ -847,7 +822,7 @@ void CEntryStub::Generate(MacroAssembler* masm) { if (FLAG_debug_code) { if (frame_alignment > kPointerSize) { Label alignment_as_expected; - DCHECK(base::bits::IsPowerOfTwo32(frame_alignment)); + DCHECK(base::bits::IsPowerOfTwo(frame_alignment)); __ tst(sp, Operand(frame_alignment_mask)); __ b(eq, &alignment_as_expected); // Don't use Check here, as it will call Runtime_Abort re-entering here. @@ -911,7 +886,7 @@ void CEntryStub::Generate(MacroAssembler* masm) { if (FLAG_debug_code) { Label okay; ExternalReference pending_exception_address( - Isolate::kPendingExceptionAddress, isolate()); + IsolateAddressId::kPendingExceptionAddress, isolate()); __ mov(r3, Operand(pending_exception_address)); __ ldr(r3, MemOperand(r3)); __ CompareRoot(r3, Heap::kTheHoleValueRootIndex); @@ -940,15 +915,15 @@ void CEntryStub::Generate(MacroAssembler* masm) { __ bind(&exception_returned); ExternalReference pending_handler_context_address( - Isolate::kPendingHandlerContextAddress, isolate()); + IsolateAddressId::kPendingHandlerContextAddress, isolate()); ExternalReference pending_handler_code_address( - Isolate::kPendingHandlerCodeAddress, isolate()); + IsolateAddressId::kPendingHandlerCodeAddress, isolate()); ExternalReference pending_handler_offset_address( - Isolate::kPendingHandlerOffsetAddress, isolate()); + IsolateAddressId::kPendingHandlerOffsetAddress, isolate()); ExternalReference pending_handler_fp_address( - Isolate::kPendingHandlerFPAddress, isolate()); + IsolateAddressId::kPendingHandlerFPAddress, isolate()); ExternalReference pending_handler_sp_address( - Isolate::kPendingHandlerSPAddress, isolate()); + IsolateAddressId::kPendingHandlerSPAddress, isolate()); // Ask the runtime for help to determine the handler. This will set r0 to // contain the current pending exception, don't clobber it. @@ -956,7 +931,7 @@ void CEntryStub::Generate(MacroAssembler* masm) { isolate()); { FrameScope scope(masm, StackFrame::MANUAL); - __ PrepareCallCFunction(3, 0, r0); + __ PrepareCallCFunction(3, 0); __ mov(r0, Operand(0)); __ mov(r1, Operand(0)); __ mov(r2, Operand(ExternalReference::isolate_address(isolate()))); @@ -1006,7 +981,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) { // Save callee-saved vfp registers. __ vstm(db_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg); // Set up the reserved register for 0.0. - __ vmov(kDoubleRegZero, 0.0); + __ vmov(kDoubleRegZero, Double(0.0)); // Get address of argv, see stm above. // r0: code entry @@ -1028,31 +1003,38 @@ void JSEntryStub::Generate(MacroAssembler* masm) { StackFrame::Type marker = type(); __ mov(r7, Operand(StackFrame::TypeToMarker(marker))); __ mov(r6, Operand(StackFrame::TypeToMarker(marker))); - __ mov(r5, - Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate()))); + __ mov(r5, Operand(ExternalReference(IsolateAddressId::kCEntryFPAddress, + isolate()))); __ ldr(r5, MemOperand(r5)); - __ mov(ip, Operand(-1)); // Push a bad frame pointer to fail if it is used. - __ stm(db_w, sp, r5.bit() | r6.bit() | r7.bit() | - ip.bit()); + { + UseScratchRegisterScope temps(masm); + Register scratch = temps.Acquire(); + + // Push a bad frame pointer to fail if it is used. + __ mov(scratch, Operand(-1)); + __ stm(db_w, sp, r5.bit() | r6.bit() | r7.bit() | scratch.bit()); + } + + Register scratch = r6; // Set up frame pointer for the frame to be pushed. __ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset)); // If this is the outermost JS call, set js_entry_sp value. Label non_outermost_js; - ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate()); + ExternalReference js_entry_sp(IsolateAddressId::kJSEntrySPAddress, isolate()); __ mov(r5, Operand(ExternalReference(js_entry_sp))); - __ ldr(r6, MemOperand(r5)); - __ cmp(r6, Operand::Zero()); + __ ldr(scratch, MemOperand(r5)); + __ cmp(scratch, Operand::Zero()); __ b(ne, &non_outermost_js); __ str(fp, MemOperand(r5)); - __ mov(ip, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME)); + __ mov(scratch, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME)); Label cont; __ b(&cont); __ bind(&non_outermost_js); - __ mov(ip, Operand(StackFrame::INNER_JSENTRY_FRAME)); + __ mov(scratch, Operand(StackFrame::INNER_JSENTRY_FRAME)); __ bind(&cont); - __ push(ip); + __ push(scratch); // Jump to a faked try block that does the invoke, with a faked catch // block that sets the pending exception. @@ -1069,10 +1051,11 @@ void JSEntryStub::Generate(MacroAssembler* masm) { // field in the JSEnv and return a failure sentinel. Coming in here the // fp will be invalid because the PushStackHandler below sets it to 0 to // signal the existence of the JSEntry frame. - __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress, - isolate()))); + __ mov(scratch, + Operand(ExternalReference(IsolateAddressId::kPendingExceptionAddress, + isolate()))); } - __ str(r0, MemOperand(ip)); + __ str(r0, MemOperand(scratch)); __ LoadRoot(r0, Heap::kExceptionRootIndex); __ b(&exit); @@ -1098,16 +1081,16 @@ void JSEntryStub::Generate(MacroAssembler* masm) { if (type() == StackFrame::ENTRY_CONSTRUCT) { ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline, isolate()); - __ mov(ip, Operand(construct_entry)); + __ mov(scratch, Operand(construct_entry)); } else { ExternalReference entry(Builtins::kJSEntryTrampoline, isolate()); - __ mov(ip, Operand(entry)); + __ mov(scratch, Operand(entry)); } - __ ldr(ip, MemOperand(ip)); // deref address - __ add(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag)); + __ ldr(scratch, MemOperand(scratch)); // deref address + __ add(scratch, scratch, Operand(Code::kHeaderSize - kHeapObjectTag)); // Branch and link to JSEntryTrampoline. - __ Call(ip); + __ Call(scratch); // Unlink this frame from the handler chain. __ PopStackHandler(); @@ -1125,9 +1108,9 @@ void JSEntryStub::Generate(MacroAssembler* masm) { // Restore the top frame descriptors from the stack. __ pop(r3); - __ mov(ip, - Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate()))); - __ str(r3, MemOperand(ip)); + __ mov(scratch, Operand(ExternalReference(IsolateAddressId::kCEntryFPAddress, + isolate()))); + __ str(r3, MemOperand(scratch)); // Reset the stack to the callee saved registers. __ add(sp, sp, Operand(-EntryFrameConstants::kCallerFPOffset)); @@ -1228,8 +1211,8 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) { // write-barrier is needed. __ bind(&megamorphic); __ add(r5, r2, Operand::PointerOffsetFromSmiKey(r3)); - __ LoadRoot(ip, Heap::kmegamorphic_symbolRootIndex); - __ str(ip, FieldMemOperand(r5, FixedArray::kHeaderSize)); + __ LoadRoot(r4, Heap::kmegamorphic_symbolRootIndex); + __ str(r4, FieldMemOperand(r5, FixedArray::kHeaderSize)); __ jmp(&done); // An uninitialized cache is patched with the function @@ -1321,8 +1304,8 @@ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) { __ bind(&got_smi_index_); // Check for index out of range. - __ ldr(ip, FieldMemOperand(object_, String::kLengthOffset)); - __ cmp(ip, Operand(index_)); + __ ldr(result_, FieldMemOperand(object_, String::kLengthOffset)); + __ cmp(result_, Operand(index_)); __ b(ls, index_out_of_range_); __ SmiUntag(index_); @@ -1487,37 +1470,6 @@ void StringHelper::GenerateOneByteCharsCompareLoop( } -void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- r1 : left - // -- r0 : right - // -- lr : return address - // ----------------------------------- - - // Load r2 with the allocation site. We stick an undefined dummy value here - // and replace it with the real allocation site later when we instantiate this - // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate(). - __ Move(r2, isolate()->factory()->undefined_value()); - - // Make sure that we actually patched the allocation site. - if (FLAG_debug_code) { - __ tst(r2, Operand(kSmiTagMask)); - __ Assert(ne, kExpectedAllocationSite); - __ push(r2); - __ ldr(r2, FieldMemOperand(r2, HeapObject::kMapOffset)); - __ LoadRoot(ip, Heap::kAllocationSiteMapRootIndex); - __ cmp(r2, ip); - __ pop(r2); - __ Assert(eq, kExpectedAllocationSite); - } - - // Tail call into the stub that handles binary operations with allocation - // sites. - BinaryOpWithAllocationSiteStub stub(isolate(), state()); - __ TailCallStub(&stub); -} - - void CompareICStub::GenerateBooleans(MacroAssembler* masm) { DCHECK_EQ(CompareICState::BOOLEAN, state()); Label miss; @@ -1852,22 +1804,22 @@ void CompareICStub::GenerateKnownReceivers(MacroAssembler* masm) { void CompareICStub::GenerateMiss(MacroAssembler* masm) { + Register scratch = r2; { // Call the runtime system in a fresh internal frame. FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL); __ Push(r1, r0); __ Push(lr, r1, r0); - __ mov(ip, Operand(Smi::FromInt(op()))); - __ push(ip); + __ mov(scratch, Operand(Smi::FromInt(op()))); + __ push(scratch); __ CallRuntime(Runtime::kCompareIC_Miss); // Compute the entry point of the rewritten stub. - __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag)); + __ add(scratch, r0, Operand(Code::kHeaderSize - kHeapObjectTag)); // Restore registers. __ pop(lr); __ Pop(r1, r0); } - - __ Jump(r2); + __ Jump(scratch); } @@ -1949,7 +1901,7 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm, // Restore the properties. __ ldr(properties, - FieldMemOperand(receiver, JSObject::kPropertiesOffset)); + FieldMemOperand(receiver, JSObject::kPropertiesOrHashOffset)); } const int spill_mask = @@ -1957,7 +1909,7 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm, r2.bit() | r1.bit() | r0.bit()); __ stm(db_w, sp, spill_mask); - __ ldr(r0, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); + __ ldr(r0, FieldMemOperand(receiver, JSObject::kPropertiesOrHashOffset)); __ mov(r1, Operand(Handle(name))); NameDictionaryLookupStub stub(masm->isolate(), NEGATIVE_LOOKUP); __ CallStub(&stub); @@ -2148,7 +2100,7 @@ void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) { void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) { regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode()); int argument_count = 3; - __ PrepareCallCFunction(argument_count, regs_.scratch0()); + __ PrepareCallCFunction(argument_count); Register address = r0.is(regs_.address()) ? regs_.scratch0() : regs_.address(); DCHECK(!address.is(regs_.object())); @@ -2173,10 +2125,11 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker( MacroAssembler* masm, OnNoNeedToInformIncrementalMarker on_no_need, Mode mode) { - Label on_black; Label need_incremental; Label need_incremental_pop_scratch; +#ifndef V8_CONCURRENT_MARKING + Label on_black; // Let's look at the color of the object: If it is not black we don't have // to inform the incremental marker. __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black); @@ -2190,6 +2143,7 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker( } __ bind(&on_black); +#endif // Get the value from the slot. __ ldr(regs_.scratch0(), MemOperand(regs_.address(), 0)); @@ -2238,20 +2192,16 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker( // Fall through when we need to inform the incremental marker. } - -void StubFailureTrampolineStub::Generate(MacroAssembler* masm) { - CEntryStub ces(isolate(), 1, kSaveFPRegs); - __ Call(ces.GetCode(), RelocInfo::CODE_TARGET); - int parameter_count_offset = - StubFailureTrampolineFrameConstants::kArgumentsLengthOffset; - __ ldr(r1, MemOperand(fp, parameter_count_offset)); - if (function_mode() == JS_FUNCTION_STUB_MODE) { - __ add(r1, r1, Operand(1)); +void ProfileEntryHookStub::MaybeCallEntryHookDelayed(TurboAssembler* tasm, + Zone* zone) { + if (tasm->isolate()->function_entry_hook() != NULL) { + tasm->MaybeCheckConstPool(); + PredictableCodeSizeScope predictable(tasm); + predictable.ExpectSize(tasm->CallStubSize() + 2 * Assembler::kInstrSize); + tasm->push(lr); + tasm->CallStubDelayed(new (zone) ProfileEntryHookStub(nullptr)); + tasm->pop(lr); } - masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE); - __ mov(r1, Operand(r1, LSL, kPointerSizeLog2)); - __ add(sp, sp, r1); - __ Ret(); } void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) { @@ -2259,8 +2209,7 @@ void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) { ProfileEntryHookStub stub(masm->isolate()); masm->MaybeCheckConstPool(); PredictableCodeSizeScope predictable(masm); - predictable.ExpectSize(masm->CallStubSize(&stub) + - 2 * Assembler::kInstrSize); + predictable.ExpectSize(masm->CallStubSize() + 2 * Assembler::kInstrSize); __ push(lr); __ CallStub(&stub); __ pop(lr); @@ -2300,26 +2249,31 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) { int frame_alignment = masm->ActivationFrameAlignment(); if (frame_alignment > kPointerSize) { __ mov(r5, sp); - DCHECK(base::bits::IsPowerOfTwo32(frame_alignment)); + DCHECK(base::bits::IsPowerOfTwo(frame_alignment)); __ and_(sp, sp, Operand(-frame_alignment)); } + { + UseScratchRegisterScope temps(masm); + Register scratch = temps.Acquire(); + #if V8_HOST_ARCH_ARM - int32_t entry_hook = - reinterpret_cast(isolate()->function_entry_hook()); - __ mov(ip, Operand(entry_hook)); + int32_t entry_hook = + reinterpret_cast(isolate()->function_entry_hook()); + __ mov(scratch, Operand(entry_hook)); #else - // Under the simulator we need to indirect the entry hook through a - // trampoline function at a known address. - // It additionally takes an isolate as a third parameter - __ mov(r2, Operand(ExternalReference::isolate_address(isolate()))); + // Under the simulator we need to indirect the entry hook through a + // trampoline function at a known address. + // It additionally takes an isolate as a third parameter + __ mov(r2, Operand(ExternalReference::isolate_address(isolate()))); - ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline)); - __ mov(ip, Operand(ExternalReference(&dispatcher, - ExternalReference::BUILTIN_CALL, - isolate()))); + ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline)); + __ mov(scratch, + Operand(ExternalReference( + &dispatcher, ExternalReference::BUILTIN_CALL, isolate()))); #endif - __ Call(ip); + __ Call(scratch); + } // Restore the stack pointer if needed. if (frame_alignment > kPointerSize) { @@ -2338,8 +2292,8 @@ static void CreateArrayDispatch(MacroAssembler* masm, T stub(masm->isolate(), GetInitialFastElementsKind(), mode); __ TailCallStub(&stub); } else if (mode == DONT_OVERRIDE) { - int last_index = GetSequenceIndexFromFastElementsKind( - TERMINAL_FAST_ELEMENTS_KIND); + int last_index = + GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND); for (int i = 0; i <= last_index; ++i) { ElementsKind kind = GetFastElementsKindFromSequenceIndex(i); __ cmp(r3, Operand(kind)); @@ -2362,24 +2316,12 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm, // r0 - number of arguments // r1 - constructor? // sp[0] - last argument - Label normal_sequence; - if (mode == DONT_OVERRIDE) { - STATIC_ASSERT(FAST_SMI_ELEMENTS == 0); - STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); - STATIC_ASSERT(FAST_ELEMENTS == 2); - STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3); - STATIC_ASSERT(FAST_DOUBLE_ELEMENTS == 4); - STATIC_ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5); - - // is the low bit set? If so, we are holey and that is good. - __ tst(r3, Operand(1)); - __ b(ne, &normal_sequence); - } - - // look at the first argument - __ ldr(r5, MemOperand(sp, 0)); - __ cmp(r5, Operand::Zero()); - __ b(eq, &normal_sequence); + STATIC_ASSERT(PACKED_SMI_ELEMENTS == 0); + STATIC_ASSERT(HOLEY_SMI_ELEMENTS == 1); + STATIC_ASSERT(PACKED_ELEMENTS == 2); + STATIC_ASSERT(HOLEY_ELEMENTS == 3); + STATIC_ASSERT(PACKED_DOUBLE_ELEMENTS == 4); + STATIC_ASSERT(HOLEY_DOUBLE_ELEMENTS == 5); if (mode == DISABLE_ALLOCATION_SITES) { ElementsKind initial = GetInitialFastElementsKind(); @@ -2389,13 +2331,12 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm, holey_initial, DISABLE_ALLOCATION_SITES); __ TailCallStub(&stub_holey); - - __ bind(&normal_sequence); - ArraySingleArgumentConstructorStub stub(masm->isolate(), - initial, - DISABLE_ALLOCATION_SITES); - __ TailCallStub(&stub); } else if (mode == DONT_OVERRIDE) { + // is the low bit set? If so, we are holey and that is good. + Label normal_sequence; + __ tst(r3, Operand(1)); + __ b(ne, &normal_sequence); + // We are going to create a holey array, but our kind is non-holey. // Fix kind and retry (only if we have an allocation site in the slot). __ add(r3, r3, Operand(1)); @@ -2410,13 +2351,15 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm, // in the AllocationSite::transition_info field because elements kind is // restricted to a portion of the field...upper bits need to be left alone. STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0); - __ ldr(r4, FieldMemOperand(r2, AllocationSite::kTransitionInfoOffset)); + __ ldr(r4, FieldMemOperand( + r2, AllocationSite::kTransitionInfoOrBoilerplateOffset)); __ add(r4, r4, Operand(Smi::FromInt(kFastElementsKindPackedToHoley))); - __ str(r4, FieldMemOperand(r2, AllocationSite::kTransitionInfoOffset)); + __ str(r4, FieldMemOperand( + r2, AllocationSite::kTransitionInfoOrBoilerplateOffset)); __ bind(&normal_sequence); - int last_index = GetSequenceIndexFromFastElementsKind( - TERMINAL_FAST_ELEMENTS_KIND); + int last_index = + GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND); for (int i = 0; i <= last_index; ++i) { ElementsKind kind = GetFastElementsKindFromSequenceIndex(i); __ cmp(r3, Operand(kind)); @@ -2434,13 +2377,13 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm, template static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) { - int to_index = GetSequenceIndexFromFastElementsKind( - TERMINAL_FAST_ELEMENTS_KIND); + int to_index = + GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND); for (int i = 0; i <= to_index; ++i) { ElementsKind kind = GetFastElementsKindFromSequenceIndex(i); T stub(isolate, kind); stub.GetCode(); - if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) { + if (AllocationSite::ShouldTrack(kind)) { T stub1(isolate, kind, DISABLE_ALLOCATION_SITES); stub1.GetCode(); } @@ -2454,7 +2397,7 @@ void CommonArrayConstructorStub::GenerateStubsAheadOfTime(Isolate* isolate) { isolate); ArrayNArgumentsConstructorStub stub(isolate); stub.GetCode(); - ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS }; + ElementsKind kinds[2] = {PACKED_ELEMENTS, HOLEY_ELEMENTS}; for (int i = 0; i < 2; i++) { // For internal arrays we only need a few things InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]); @@ -2522,7 +2465,8 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) { __ CompareRoot(r2, Heap::kUndefinedValueRootIndex); __ b(eq, &no_info); - __ ldr(r3, FieldMemOperand(r2, AllocationSite::kTransitionInfoOffset)); + __ ldr(r3, FieldMemOperand( + r2, AllocationSite::kTransitionInfoOrBoilerplateOffset)); __ SmiUntag(r3); STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0); __ and_(r3, r3, Operand(AllocationSite::ElementsKindBits::kMask)); @@ -2596,21 +2540,21 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) { if (FLAG_debug_code) { Label done; - __ cmp(r3, Operand(FAST_ELEMENTS)); + __ cmp(r3, Operand(PACKED_ELEMENTS)); __ b(eq, &done); - __ cmp(r3, Operand(FAST_HOLEY_ELEMENTS)); + __ cmp(r3, Operand(HOLEY_ELEMENTS)); __ Assert(eq, kInvalidElementsKindForInternalArrayOrInternalPackedArray); __ bind(&done); } Label fast_elements_case; - __ cmp(r3, Operand(FAST_ELEMENTS)); + __ cmp(r3, Operand(PACKED_ELEMENTS)); __ b(eq, &fast_elements_case); - GenerateCase(masm, FAST_HOLEY_ELEMENTS); + GenerateCase(masm, HOLEY_ELEMENTS); __ bind(&fast_elements_case); - GenerateCase(masm, FAST_ELEMENTS); + GenerateCase(masm, PACKED_ELEMENTS); } static int AddressOffset(ExternalReference ref0, ExternalReference ref1) { @@ -2666,7 +2610,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm, if (FLAG_log_timer_events) { FrameScope frame(masm, StackFrame::MANUAL); __ PushSafepointRegisters(); - __ PrepareCallCFunction(1, r0); + __ PrepareCallCFunction(1); __ mov(r0, Operand(ExternalReference::isolate_address(isolate))); __ CallCFunction(ExternalReference::log_enter_external_function(isolate), 1); @@ -2682,7 +2626,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm, if (FLAG_log_timer_events) { FrameScope frame(masm, StackFrame::MANUAL); __ PushSafepointRegisters(); - __ PrepareCallCFunction(1, r0); + __ PrepareCallCFunction(1); __ mov(r0, Operand(ExternalReference::isolate_address(isolate))); __ CallCFunction(ExternalReference::log_leave_external_function(isolate), 1); @@ -2707,8 +2651,8 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm, } __ sub(r6, r6, Operand(1)); __ str(r6, MemOperand(r9, kLevelOffset)); - __ ldr(ip, MemOperand(r9, kLimitOffset)); - __ cmp(r5, ip); + __ ldr(r6, MemOperand(r9, kLimitOffset)); + __ cmp(r5, r6); __ b(ne, &delete_allocated_handles); // Leave the API exit frame. @@ -2727,8 +2671,8 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm, // Check if the function scheduled an exception. __ LoadRoot(r4, Heap::kTheHoleValueRootIndex); - __ mov(ip, Operand(ExternalReference::scheduled_exception_address(isolate))); - __ ldr(r5, MemOperand(ip)); + __ mov(r6, Operand(ExternalReference::scheduled_exception_address(isolate))); + __ ldr(r5, MemOperand(r6)); __ cmp(r4, r5); __ b(ne, &promote_scheduled_exception); @@ -2742,7 +2686,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm, __ bind(&delete_allocated_handles); __ str(r5, MemOperand(r9, kLimitOffset)); __ mov(r4, r0); - __ PrepareCallCFunction(1, r5); + __ PrepareCallCFunction(1); __ mov(r0, Operand(ExternalReference::isolate_address(isolate))); __ CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate), 1); @@ -2798,20 +2742,22 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) { // call data __ push(call_data); - Register scratch = call_data; - __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex); + Register scratch0 = call_data; + Register scratch1 = r5; + __ LoadRoot(scratch0, Heap::kUndefinedValueRootIndex); // return value - __ push(scratch); + __ push(scratch0); // return value default - __ push(scratch); + __ push(scratch0); // isolate - __ mov(scratch, Operand(ExternalReference::isolate_address(masm->isolate()))); - __ push(scratch); + __ mov(scratch1, + Operand(ExternalReference::isolate_address(masm->isolate()))); + __ push(scratch1); // holder __ push(holder); // Prepare arguments. - __ mov(scratch, sp); + __ mov(scratch0, sp); // Allocate the v8::Arguments structure in the arguments' space since // it's not controlled by GC. @@ -2820,18 +2766,19 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) { FrameScope frame_scope(masm, StackFrame::MANUAL); __ EnterExitFrame(false, kApiStackSpace); - DCHECK(!api_function_address.is(r0) && !scratch.is(r0)); + DCHECK(!api_function_address.is(r0) && !scratch0.is(r0)); // r0 = FunctionCallbackInfo& // Arguments is after the return address. __ add(r0, sp, Operand(1 * kPointerSize)); // FunctionCallbackInfo::implicit_args_ - __ str(scratch, MemOperand(r0, 0 * kPointerSize)); + __ str(scratch0, MemOperand(r0, 0 * kPointerSize)); // FunctionCallbackInfo::values_ - __ add(ip, scratch, Operand((FCA::kArgsLength - 1 + argc()) * kPointerSize)); - __ str(ip, MemOperand(r0, 1 * kPointerSize)); + __ add(scratch1, scratch0, + Operand((FCA::kArgsLength - 1 + argc()) * kPointerSize)); + __ str(scratch1, MemOperand(r0, 1 * kPointerSize)); // FunctionCallbackInfo::length_ = argc - __ mov(ip, Operand(argc())); - __ str(ip, MemOperand(r0, 2 * kPointerSize)); + __ mov(scratch0, Operand(argc())); + __ str(scratch0, MemOperand(r0, 2 * kPointerSize)); ExternalReference thunk_ref = ExternalReference::invoke_function_callback(masm->isolate()); diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc index db6068df9e23ff..1fc4dca381bbb6 100644 --- a/deps/v8/src/arm/codegen-arm.cc +++ b/deps/v8/src/arm/codegen-arm.cc @@ -143,7 +143,8 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate, __ ldr(temp1, MemOperand(src, 4, PostIndex)); __ str(temp1, MemOperand(dest, 4, PostIndex)); } else { - Register temp2 = ip; + UseScratchRegisterScope temps(&masm); + Register temp2 = temps.Acquire(); Label loop; __ bic(temp2, chars, Operand(0x3), SetCC); @@ -167,7 +168,7 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate, __ Ret(); CodeDesc desc; - masm.GetCode(&desc); + masm.GetCode(isolate, &desc); DCHECK(!RelocInfo::RequiresRelocation(isolate, desc)); Assembler::FlushICache(isolate, buffer, actual_size); @@ -219,8 +220,10 @@ MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function( __ vst1(Neon16, NeonListOperand(d0, 2), NeonMemOperand(dest)); __ Ret(); } else { + UseScratchRegisterScope temps(&masm); + Register temp1 = r3; - Register temp2 = ip; + Register temp2 = temps.Acquire(); Register temp3 = lr; Register temp4 = r4; Label loop; @@ -256,7 +259,7 @@ MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function( } CodeDesc desc; - masm.GetCode(&desc); + masm.GetCode(isolate, &desc); Assembler::FlushICache(isolate, buffer, actual_size); base::OS::ProtectCode(buffer, actual_size); @@ -284,7 +287,7 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) { __ Ret(); CodeDesc desc; - masm.GetCode(&desc); + masm.GetCode(isolate, &desc); DCHECK(!RelocInfo::RequiresRelocation(isolate, desc)); Assembler::FlushICache(isolate, buffer, actual_size); diff --git a/deps/v8/src/arm/deoptimizer-arm.cc b/deps/v8/src/arm/deoptimizer-arm.cc index b33b977879d728..8138f53c7eb3a2 100644 --- a/deps/v8/src/arm/deoptimizer-arm.cc +++ b/deps/v8/src/arm/deoptimizer-arm.cc @@ -87,24 +87,6 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) { } -void Deoptimizer::SetPlatformCompiledStubRegisters( - FrameDescription* output_frame, CodeStubDescriptor* descriptor) { - ApiFunction function(descriptor->deoptimization_handler()); - ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_); - intptr_t handler = reinterpret_cast(xref.address()); - int params = descriptor->GetHandlerParameterCount(); - output_frame->SetRegister(r0.code(), params); - output_frame->SetRegister(r1.code(), handler); -} - - -void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) { - for (int i = 0; i < DwVfpRegister::kMaxNumRegisters; ++i) { - Float64 double_value = input_->GetDoubleRegister(i); - output_frame->SetDoubleRegister(i, double_value); - } -} - #define __ masm()-> // This code tries to be close to ia32 code so that any changes can be @@ -129,9 +111,11 @@ void Deoptimizer::TableEntryGenerator::Generate() { // We use a run-time check for VFP32DREGS. CpuFeatureScope scope(masm(), VFP32DREGS, CpuFeatureScope::kDontCheckSupported); + UseScratchRegisterScope temps(masm()); + Register scratch = temps.Acquire(); // Check CPU flags for number of registers, setting the Z condition flag. - __ CheckFor32DRegs(ip); + __ CheckFor32DRegs(scratch); // Push registers d0-d15, and possibly d16-d31, on the stack. // If d16-d31 are not pushed, decrease the stack pointer instead. @@ -148,8 +132,13 @@ void Deoptimizer::TableEntryGenerator::Generate() { // handle this a bit differently. __ stm(db_w, sp, restored_regs | sp.bit() | lr.bit() | pc.bit()); - __ mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate()))); - __ str(fp, MemOperand(ip)); + { + UseScratchRegisterScope temps(masm()); + Register scratch = temps.Acquire(); + __ mov(scratch, Operand(ExternalReference( + IsolateAddressId::kCEntryFPAddress, isolate()))); + __ str(fp, MemOperand(scratch)); + } const int kSavedRegistersAreaSize = (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize + kFloatRegsSize; @@ -167,7 +156,7 @@ void Deoptimizer::TableEntryGenerator::Generate() { // Allocate a new deoptimizer object. // Pass four arguments in r0 to r3 and fifth argument on stack. - __ PrepareCallCFunction(6, r5); + __ PrepareCallCFunction(6); __ mov(r0, Operand(0)); Label context_check; __ ldr(r1, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset)); @@ -248,7 +237,7 @@ void Deoptimizer::TableEntryGenerator::Generate() { // Compute the output frame in the deoptimizer. __ push(r0); // Preserve deoptimizer object across call. // r0: deoptimizer object; r1: scratch. - __ PrepareCallCFunction(1, r1); + __ PrepareCallCFunction(1); // Call Deoptimizer::ComputeOutputFrames(). { AllowExternalCallThatCantCauseGC scope(masm()); @@ -311,15 +300,18 @@ void Deoptimizer::TableEntryGenerator::Generate() { // Restore the registers from the stack. __ ldm(ia_w, sp, restored_regs); // all but pc registers. - __ pop(ip); // remove sp - __ pop(ip); // remove lr __ InitializeRootRegister(); - __ pop(ip); // remove pc - __ pop(ip); // get continuation, leave pc on stack - __ pop(lr); - __ Jump(ip); + // Remove sp, lr and pc. + __ Drop(3); + { + UseScratchRegisterScope temps(masm()); + Register scratch = temps.Acquire(); + __ pop(scratch); // get continuation, leave pc on stack + __ pop(lr); + __ Jump(scratch); + } __ stop("Unreachable."); } @@ -332,13 +324,15 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() { // ARMv7, we can use movw (with a maximum immediate of 0xffff). On ARMv6, we // need two instructions. STATIC_ASSERT((kMaxNumberOfEntries - 1) <= 0xffff); + UseScratchRegisterScope temps(masm()); + Register scratch = temps.Acquire(); if (CpuFeatures::IsSupported(ARMv7)) { CpuFeatureScope scope(masm(), ARMv7); Label done; for (int i = 0; i < count(); i++) { int start = masm()->pc_offset(); USE(start); - __ movw(ip, i); + __ movw(scratch, i); __ b(&done); DCHECK_EQ(table_entry_size_, masm()->pc_offset() - start); } @@ -354,14 +348,14 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() { for (int i = 0; i < count(); i++) { int start = masm()->pc_offset(); USE(start); - __ mov(ip, Operand(i & 0xff)); // Set the low byte. + __ mov(scratch, Operand(i & 0xff)); // Set the low byte. __ b(&high_fixes[i >> 8]); // Jump to the secondary table. DCHECK_EQ(table_entry_size_, masm()->pc_offset() - start); } // Generate the secondary table, to set the high byte. for (int high = 1; high <= high_fix_max; high++) { __ bind(&high_fixes[high]); - __ orr(ip, ip, Operand(high << 8)); + __ orr(scratch, scratch, Operand(high << 8)); // If this isn't the last entry, emit a branch to the end of the table. // The last entry can just fall through. if (high < high_fix_max) __ b(&high_fixes[0]); @@ -371,7 +365,7 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() { // through with no additional branch. __ bind(&high_fixes[0]); } - __ push(ip); + __ push(scratch); } diff --git a/deps/v8/src/arm/disasm-arm.cc b/deps/v8/src/arm/disasm-arm.cc index 0b8fee10f4d2af..7f63b193b08422 100644 --- a/deps/v8/src/arm/disasm-arm.cc +++ b/deps/v8/src/arm/disasm-arm.cc @@ -343,7 +343,6 @@ int Decoder::FormatRegister(Instruction* instr, const char* format) { return 5; } UNREACHABLE(); - return -1; } @@ -416,8 +415,8 @@ void Decoder::FormatNeonList(int Vd, int type) { void Decoder::FormatNeonMemory(int Rn, int align, int Rm) { - out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, - "[r%d", Rn); + out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "[%s", + converter_.NameOfCPURegister(Rn)); if (align != 0) { out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, ":%d", (1 << align) << 6); @@ -427,8 +426,8 @@ void Decoder::FormatNeonMemory(int Rn, int align, int Rm) { } else if (Rm == 13) { Print("]!"); } else { - out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, - "], r%d", Rm); + out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "], %s", + converter_.NameOfCPURegister(Rm)); } } @@ -686,7 +685,8 @@ int Decoder::FormatOption(Instruction* instr, const char* format) { return -1; } } - out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%p", addr); + out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%p", + static_cast(addr)); return 1; } case 'S': @@ -705,7 +705,6 @@ int Decoder::FormatOption(Instruction* instr, const char* format) { } } UNREACHABLE(); - return -1; } @@ -1559,6 +1558,7 @@ void Decoder::DecodeTypeVFP(Instruction* instr) { (instr->VAValue() == 0x0)) { DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(instr); } else if ((instr->VLValue() == 0x0) && (instr->VCValue() == 0x1)) { + const char* rt_name = converter_.NameOfCPURegister(instr->RtValue()); if (instr->Bit(23) == 0) { int opc1_opc2 = (instr->Bits(22, 21) << 2) | instr->Bits(6, 5); if ((opc1_opc2 & 0xb) == 0) { @@ -1570,31 +1570,30 @@ void Decoder::DecodeTypeVFP(Instruction* instr) { } } else { int vd = instr->VFPNRegValue(kDoublePrecision); - int rt = instr->RtValue(); if ((opc1_opc2 & 0x8) != 0) { // NeonS8 / NeonU8 int i = opc1_opc2 & 0x7; out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, - "vmov.8 d%d[%d], r%d", vd, i, rt); + "vmov.8 d%d[%d], %s", vd, i, rt_name); } else if ((opc1_opc2 & 0x1) != 0) { // NeonS16 / NeonU16 int i = (opc1_opc2 >> 1) & 0x3; out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, - "vmov.16 d%d[%d], r%d", vd, i, rt); + "vmov.16 d%d[%d], %s", vd, i, rt_name); } else { Unknown(instr); } } } else { int size = 32; - if (instr->Bit(5) != 0) + if (instr->Bit(5) != 0) { size = 16; - else if (instr->Bit(22) != 0) + } else if (instr->Bit(22) != 0) { size = 8; + } int Vd = instr->VFPNRegValue(kSimd128Precision); - int Rt = instr->RtValue(); out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, - "vdup.%i q%d, r%d", size, Vd, Rt); + "vdup.%i q%d, %s", size, Vd, rt_name); } } else if ((instr->VLValue() == 0x1) && (instr->VCValue() == 0x1)) { int opc1_opc2 = (instr->Bits(22, 21) << 2) | instr->Bits(6, 5); @@ -1607,19 +1606,20 @@ void Decoder::DecodeTypeVFP(Instruction* instr) { } } else { char sign = instr->Bit(23) != 0 ? 'u' : 's'; - int rt = instr->RtValue(); + const char* rt_name = converter_.NameOfCPURegister(instr->RtValue()); int vn = instr->VFPNRegValue(kDoublePrecision); if ((opc1_opc2 & 0x8) != 0) { // NeonS8 / NeonU8 int i = opc1_opc2 & 0x7; - out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, - "vmov.%c8 r%d, d%d[%d]", sign, rt, vn, i); + out_buffer_pos_ += + SNPrintF(out_buffer_ + out_buffer_pos_, "vmov.%c8 %s, d%d[%d]", + sign, rt_name, vn, i); } else if ((opc1_opc2 & 0x1) != 0) { // NeonS16 / NeonU16 int i = (opc1_opc2 >> 1) & 0x3; out_buffer_pos_ += - SNPrintF(out_buffer_ + out_buffer_pos_, "vmov.%c16 r%d, d%d[%d]", - sign, rt, vn, i); + SNPrintF(out_buffer_ + out_buffer_pos_, "vmov.%c16 %s, d%d[%d]", + sign, rt_name, vn, i); } else { Unknown(instr); } @@ -2424,17 +2424,17 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) { case 0xA: case 0xB: if ((instr->Bits(22, 20) == 5) && (instr->Bits(15, 12) == 0xf)) { - int Rn = instr->Bits(19, 16); + const char* rn_name = converter_.NameOfCPURegister(instr->Bits(19, 16)); int offset = instr->Bits(11, 0); if (offset == 0) { out_buffer_pos_ += - SNPrintF(out_buffer_ + out_buffer_pos_, "pld [r%d]", Rn); + SNPrintF(out_buffer_ + out_buffer_pos_, "pld [%s]", rn_name); } else if (instr->Bit(23) == 0) { out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, - "pld [r%d, #-%d]", Rn, offset); + "pld [%s, #-%d]", rn_name, offset); } else { out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, - "pld [r%d, #+%d]", Rn, offset); + "pld [%s, #+%d]", rn_name, offset); } } else if (instr->SpecialValue() == 0xA && instr->Bits(22, 20) == 7) { int option = instr->Bits(3, 0); diff --git a/deps/v8/src/arm/frames-arm.cc b/deps/v8/src/arm/frames-arm.cc index 8529bb541c2640..b0e2c1454da88b 100644 --- a/deps/v8/src/arm/frames-arm.cc +++ b/deps/v8/src/arm/frames-arm.cc @@ -21,15 +21,6 @@ Register JavaScriptFrame::fp_register() { return v8::internal::fp; } Register JavaScriptFrame::context_register() { return cp; } Register JavaScriptFrame::constant_pool_pointer_register() { UNREACHABLE(); - return no_reg; -} - - -Register StubFailureTrampolineFrame::fp_register() { return v8::internal::fp; } -Register StubFailureTrampolineFrame::context_register() { return cp; } -Register StubFailureTrampolineFrame::constant_pool_pointer_register() { - UNREACHABLE(); - return no_reg; } diff --git a/deps/v8/src/arm/interface-descriptors-arm.cc b/deps/v8/src/arm/interface-descriptors-arm.cc index f2fb703b9fd454..c042ade156aa14 100644 --- a/deps/v8/src/arm/interface-descriptors-arm.cc +++ b/deps/v8/src/arm/interface-descriptors-arm.cc @@ -49,6 +49,8 @@ const Register StoreTransitionDescriptor::MapRegister() { return r5; } const Register StringCompareDescriptor::LeftRegister() { return r1; } const Register StringCompareDescriptor::RightRegister() { return r0; } +const Register StringConcatDescriptor::ArgumentsCountRegister() { return r0; } + const Register ApiGetterDescriptor::HolderRegister() { return r0; } const Register ApiGetterDescriptor::CallbackRegister() { return r3; } @@ -155,6 +157,16 @@ void CallTrampolineDescriptor::InitializePlatformSpecific( data->InitializePlatformSpecific(arraysize(registers), registers); } +void CallVarargsDescriptor::InitializePlatformSpecific( + CallInterfaceDescriptorData* data) { + // r0 : number of arguments (on the stack, not including receiver) + // r1 : the target to call + // r2 : arguments list (FixedArray) + // r4 : arguments list length (untagged) + Register registers[] = {r1, r0, r2, r4}; + data->InitializePlatformSpecific(arraysize(registers), registers); +} + void CallForwardVarargsDescriptor::InitializePlatformSpecific( CallInterfaceDescriptorData* data) { // r0 : number of arguments @@ -164,6 +176,34 @@ void CallForwardVarargsDescriptor::InitializePlatformSpecific( data->InitializePlatformSpecific(arraysize(registers), registers); } +void CallWithSpreadDescriptor::InitializePlatformSpecific( + CallInterfaceDescriptorData* data) { + // r0 : number of arguments (on the stack, not including receiver) + // r1 : the target to call + // r2 : the object to spread + Register registers[] = {r1, r0, r2}; + data->InitializePlatformSpecific(arraysize(registers), registers); +} + +void CallWithArrayLikeDescriptor::InitializePlatformSpecific( + CallInterfaceDescriptorData* data) { + // r1 : the target to call + // r2 : the arguments list + Register registers[] = {r1, r2}; + data->InitializePlatformSpecific(arraysize(registers), registers); +} + +void ConstructVarargsDescriptor::InitializePlatformSpecific( + CallInterfaceDescriptorData* data) { + // r0 : number of arguments (on the stack, not including receiver) + // r1 : the target to call + // r3 : the new target + // r2 : arguments list (FixedArray) + // r4 : arguments list length (untagged) + Register registers[] = {r1, r3, r0, r2, r4}; + data->InitializePlatformSpecific(arraysize(registers), registers); +} + void ConstructForwardVarargsDescriptor::InitializePlatformSpecific( CallInterfaceDescriptorData* data) { // r0 : number of arguments @@ -174,6 +214,25 @@ void ConstructForwardVarargsDescriptor::InitializePlatformSpecific( data->InitializePlatformSpecific(arraysize(registers), registers); } +void ConstructWithSpreadDescriptor::InitializePlatformSpecific( + CallInterfaceDescriptorData* data) { + // r0 : number of arguments (on the stack, not including receiver) + // r1 : the target to call + // r3 : the new target + // r2 : the object to spread + Register registers[] = {r1, r3, r0, r2}; + data->InitializePlatformSpecific(arraysize(registers), registers); +} + +void ConstructWithArrayLikeDescriptor::InitializePlatformSpecific( + CallInterfaceDescriptorData* data) { + // r1 : the target to call + // r3 : the new target + // r2 : the arguments list + Register registers[] = {r1, r3, r2}; + data->InitializePlatformSpecific(arraysize(registers), registers); +} + void ConstructStubDescriptor::InitializePlatformSpecific( CallInterfaceDescriptorData* data) { // r0 : number of arguments @@ -378,8 +437,7 @@ void ResumeGeneratorDescriptor::InitializePlatformSpecific( Register registers[] = { r0, // the value to pass to the generator r1, // the JSGeneratorObject to resume - r2, // the resume mode (tagged) - r3, // SuspendFlags (tagged) + r2 // the resume mode (tagged) }; data->InitializePlatformSpecific(arraysize(registers), registers); } diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc index 7256086b1d50e5..4fda72574abdea 100644 --- a/deps/v8/src/arm/macro-assembler-arm.cc +++ b/deps/v8/src/arm/macro-assembler-arm.cc @@ -14,6 +14,7 @@ #include "src/codegen.h" #include "src/counters.h" #include "src/debug/debug.h" +#include "src/double.h" #include "src/objects-inl.h" #include "src/register-configuration.h" #include "src/runtime/runtime.h" @@ -25,55 +26,39 @@ namespace internal { MacroAssembler::MacroAssembler(Isolate* isolate, void* buffer, int size, CodeObjectRequired create_code_object) - : Assembler(isolate, buffer, size), - generating_stub_(false), - has_frame_(false), - isolate_(isolate), + : TurboAssembler(isolate, buffer, size, create_code_object), jit_cookie_(0) { if (FLAG_mask_constants_with_cookie) { jit_cookie_ = isolate->random_number_generator()->NextInt(); } - if (create_code_object == CodeObjectRequired::kYes) { - code_object_ = - Handle::New(isolate_->heap()->undefined_value(), isolate_); - } } +void TurboAssembler::Jump(Register target, Condition cond) { bx(target, cond); } -void MacroAssembler::Jump(Register target, Condition cond) { - bx(target, cond); -} - - -void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode, +void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond) { DCHECK(RelocInfo::IsCodeTarget(rmode)); mov(pc, Operand(target, rmode), LeaveCC, cond); } - -void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode, +void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond) { DCHECK(!RelocInfo::IsCodeTarget(rmode)); Jump(reinterpret_cast(target), rmode, cond); } - -void MacroAssembler::Jump(Handle code, RelocInfo::Mode rmode, +void TurboAssembler::Jump(Handle code, RelocInfo::Mode rmode, Condition cond) { DCHECK(RelocInfo::IsCodeTarget(rmode)); // 'code' is always generated ARM code, never THUMB code - AllowDeferredHandleDereference embedding_raw_address; - Jump(reinterpret_cast(code.location()), rmode, cond); + Jump(reinterpret_cast(code.address()), rmode, cond); } - -int MacroAssembler::CallSize(Register target, Condition cond) { +int TurboAssembler::CallSize(Register target, Condition cond) { return kInstrSize; } - -void MacroAssembler::Call(Register target, Condition cond) { +void TurboAssembler::Call(Register target, Condition cond) { // Block constant pool for the call instruction sequence. BlockConstPoolScope block_const_pool(this); Label start; @@ -82,22 +67,19 @@ void MacroAssembler::Call(Register target, Condition cond) { DCHECK_EQ(CallSize(target, cond), SizeOfCodeGeneratedSince(&start)); } - -int MacroAssembler::CallSize( - Address target, RelocInfo::Mode rmode, Condition cond) { +int TurboAssembler::CallSize(Address target, RelocInfo::Mode rmode, + Condition cond) { Instr mov_instr = cond | MOV | LeaveCC; Operand mov_operand = Operand(reinterpret_cast(target), rmode); return kInstrSize + - mov_operand.instructions_required(this, mov_instr) * kInstrSize; + mov_operand.InstructionsRequired(this, mov_instr) * kInstrSize; } - -int MacroAssembler::CallStubSize( - CodeStub* stub, TypeFeedbackId ast_id, Condition cond) { - return CallSize(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id, cond); +int TurboAssembler::CallStubSize() { + return CallSize(Handle(), RelocInfo::CODE_TARGET, al); } -void MacroAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond, +void TurboAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond, TargetAddressStorageMode mode, bool check_constant_pool) { // Check if we have to emit the constant pool before we block it. @@ -118,6 +100,9 @@ void MacroAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond, int expected_size = CallSize(target, rmode, cond); #endif + // Use ip directly instead of using UseScratchRegisterScope, as we do not + // preserve scratch registers across calls. + // Call sequence on V7 or later may be : // movw ip, #... @ call address low 16 // movt ip, #... @ call address high 16 @@ -138,29 +123,17 @@ void MacroAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond, } } - -int MacroAssembler::CallSize(Handle code, - RelocInfo::Mode rmode, - TypeFeedbackId ast_id, +int TurboAssembler::CallSize(Handle code, RelocInfo::Mode rmode, Condition cond) { - AllowDeferredHandleDereference using_raw_address; - return CallSize(reinterpret_cast
(code.location()), rmode, cond); + return CallSize(code.address(), rmode, cond); } -void MacroAssembler::Call(Handle code, RelocInfo::Mode rmode, - TypeFeedbackId ast_id, Condition cond, - TargetAddressStorageMode mode, +void TurboAssembler::Call(Handle code, RelocInfo::Mode rmode, + Condition cond, TargetAddressStorageMode mode, bool check_constant_pool) { - Label start; - bind(&start); DCHECK(RelocInfo::IsCodeTarget(rmode)); - if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) { - SetRecordedAstId(ast_id); - rmode = RelocInfo::CODE_TARGET_WITH_ID; - } // 'code' is always generated ARM code, never THUMB code - AllowDeferredHandleDereference embedding_raw_address; - Call(reinterpret_cast
(code.location()), rmode, cond, mode); + Call(code.address(), rmode, cond, mode); } void MacroAssembler::CallDeoptimizer(Address target) { @@ -168,6 +141,9 @@ void MacroAssembler::CallDeoptimizer(Address target) { uintptr_t target_raw = reinterpret_cast(target); + // Use ip directly instead of using UseScratchRegisterScope, as we do not + // preserve scratch registers across calls. + // We use blx, like a call, but it does not return here. The link register is // used by the deoptimizer to work out what called it. if (CpuFeatures::IsSupported(ARMv7)) { @@ -198,22 +174,19 @@ int MacroAssembler::CallDeoptimizerSize() { return 3 * kInstrSize; } -void MacroAssembler::Ret(Condition cond) { - bx(lr, cond); -} - +void TurboAssembler::Ret(Condition cond) { bx(lr, cond); } -void MacroAssembler::Drop(int count, Condition cond) { +void TurboAssembler::Drop(int count, Condition cond) { if (count > 0) { add(sp, sp, Operand(count * kPointerSize), LeaveCC, cond); } } -void MacroAssembler::Drop(Register count, Condition cond) { +void TurboAssembler::Drop(Register count, Condition cond) { add(sp, sp, Operand(count, LSL, kPointerSizeLog2), LeaveCC, cond); } -void MacroAssembler::Ret(int drop, Condition cond) { +void TurboAssembler::Ret(int drop, Condition cond) { Drop(drop, cond); Ret(cond); } @@ -234,53 +207,63 @@ void MacroAssembler::Swap(Register reg1, } } +void TurboAssembler::Call(Label* target) { bl(target); } -void MacroAssembler::Call(Label* target) { - bl(target); +void TurboAssembler::Push(Handle handle) { + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + mov(scratch, Operand(handle)); + push(scratch); } - -void MacroAssembler::Push(Handle handle) { - mov(ip, Operand(handle)); - push(ip); +void TurboAssembler::Push(Smi* smi) { + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + mov(scratch, Operand(smi)); + push(scratch); } -void MacroAssembler::Push(Smi* smi) { Push(Handle(smi, isolate())); } +void MacroAssembler::PushObject(Handle handle) { + if (handle->IsHeapObject()) { + Push(Handle::cast(handle)); + } else { + Push(Smi::cast(*handle)); + } +} -void MacroAssembler::Move(Register dst, Smi* smi) { mov(dst, Operand(smi)); } +void TurboAssembler::Move(Register dst, Smi* smi) { mov(dst, Operand(smi)); } -void MacroAssembler::Move(Register dst, Handle value) { +void TurboAssembler::Move(Register dst, Handle value) { mov(dst, Operand(value)); } - -void MacroAssembler::Move(Register dst, Register src, Condition cond) { +void TurboAssembler::Move(Register dst, Register src, Condition cond) { if (!dst.is(src)) { mov(dst, src, LeaveCC, cond); } } -void MacroAssembler::Move(SwVfpRegister dst, SwVfpRegister src, +void TurboAssembler::Move(SwVfpRegister dst, SwVfpRegister src, Condition cond) { if (!dst.is(src)) { vmov(dst, src, cond); } } -void MacroAssembler::Move(DwVfpRegister dst, DwVfpRegister src, +void TurboAssembler::Move(DwVfpRegister dst, DwVfpRegister src, Condition cond) { if (!dst.is(src)) { vmov(dst, src, cond); } } -void MacroAssembler::Move(QwNeonRegister dst, QwNeonRegister src) { +void TurboAssembler::Move(QwNeonRegister dst, QwNeonRegister src) { if (!dst.is(src)) { vmov(dst, src); } } -void MacroAssembler::Swap(DwVfpRegister srcdst0, DwVfpRegister srcdst1) { +void TurboAssembler::Swap(DwVfpRegister srcdst0, DwVfpRegister srcdst1) { if (srcdst0.is(srcdst1)) return; // Swapping aliased registers emits nothing. DCHECK(VfpRegisterIsAvailable(srcdst0)); @@ -297,7 +280,7 @@ void MacroAssembler::Swap(DwVfpRegister srcdst0, DwVfpRegister srcdst1) { } } -void MacroAssembler::Swap(QwNeonRegister srcdst0, QwNeonRegister srcdst1) { +void TurboAssembler::Swap(QwNeonRegister srcdst0, QwNeonRegister srcdst1) { if (!srcdst0.is(srcdst1)) { vswp(srcdst0, srcdst1); } @@ -309,23 +292,24 @@ void MacroAssembler::Mls(Register dst, Register src1, Register src2, CpuFeatureScope scope(this, ARMv7); mls(dst, src1, src2, srcA, cond); } else { - DCHECK(!srcA.is(ip)); - mul(ip, src1, src2, LeaveCC, cond); - sub(dst, srcA, ip, LeaveCC, cond); + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + DCHECK(!srcA.is(scratch)); + mul(scratch, src1, src2, LeaveCC, cond); + sub(dst, srcA, scratch, LeaveCC, cond); } } void MacroAssembler::And(Register dst, Register src1, const Operand& src2, Condition cond) { - if (!src2.is_reg() && - !src2.must_output_reloc_info(this) && + if (!src2.IsRegister() && !src2.MustOutputRelocInfo(this) && src2.immediate() == 0) { mov(dst, Operand::Zero(), LeaveCC, cond); - } else if (!(src2.instructions_required(this) == 1) && - !src2.must_output_reloc_info(this) && + } else if (!(src2.InstructionsRequired(this) == 1) && + !src2.MustOutputRelocInfo(this) && CpuFeatures::IsSupported(ARMv7) && - base::bits::IsPowerOfTwo32(src2.immediate() + 1)) { + base::bits::IsPowerOfTwo(src2.immediate() + 1)) { CpuFeatureScope scope(this, ARMv7); ubfx(dst, src1, 0, WhichPowerOf2(static_cast(src2.immediate()) + 1), cond); @@ -395,8 +379,7 @@ void MacroAssembler::Bfi(Register dst, } } - -void MacroAssembler::Bfc(Register dst, Register src, int lsb, int width, +void TurboAssembler::Bfc(Register dst, Register src, int lsb, int width, Condition cond) { DCHECK(lsb < 32); if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) { @@ -446,9 +429,7 @@ void MacroAssembler::Store(Register src, } } - -void MacroAssembler::LoadRoot(Register destination, - Heap::RootListIndex index, +void TurboAssembler::LoadRoot(Register destination, Heap::RootListIndex index, Condition cond) { ldr(destination, MemOperand(kRootRegister, index << kPointerSizeLog2), cond); } @@ -522,9 +503,8 @@ void MacroAssembler::RecordWriteField( } } - -// Will clobber 4 registers: object, map, dst, ip. The -// register 'object' contains a heap object pointer. +// Will clobber 3 registers: object, map and dst. The register 'object' contains +// a heap object pointer. A scratch register also needs to be available. void MacroAssembler::RecordWriteForMap(Register object, Register map, Register dst, @@ -541,8 +521,10 @@ void MacroAssembler::RecordWriteForMap(Register object, } if (emit_debug_code()) { - ldr(ip, FieldMemOperand(object, HeapObject::kMapOffset)); - cmp(ip, map); + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); + cmp(scratch, map); Check(eq, kWrongAddressOrValuePassedToRecordWrite); } @@ -582,7 +564,11 @@ void MacroAssembler::RecordWriteForMap(Register object, // Count number of write barriers in generated code. isolate()->counters()->write_barriers_static()->Increment(); - IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, ip, dst); + { + UseScratchRegisterScope temps(this); + IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, + temps.Acquire(), dst); + } // Clobber clobbered registers when running with the debug-code flag // turned on to provoke errors. @@ -592,10 +578,9 @@ void MacroAssembler::RecordWriteForMap(Register object, } } - -// Will clobber 4 registers: object, address, scratch, ip. The -// register 'object' contains a heap object pointer. The heap object -// tag is shifted away. +// Will clobber 3 registers: object, address, and value. The register 'object' +// contains a heap object pointer. The heap object tag is shifted away. +// A scratch register also needs to be available. void MacroAssembler::RecordWrite( Register object, Register address, @@ -607,8 +592,10 @@ void MacroAssembler::RecordWrite( PointersToHereCheck pointers_to_here_check_for_value) { DCHECK(!object.is(value)); if (emit_debug_code()) { - ldr(ip, MemOperand(address)); - cmp(ip, value); + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + ldr(scratch, MemOperand(address)); + cmp(scratch, value); Check(eq, kWrongAddressOrValuePassedToRecordWrite); } @@ -653,8 +640,11 @@ void MacroAssembler::RecordWrite( // Count number of write barriers in generated code. isolate()->counters()->write_barriers_static()->Increment(); - IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, ip, - value); + { + UseScratchRegisterScope temps(this); + IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, + temps.Acquire(), value); + } // Clobber clobbered registers when running with the debug-code flag // turned on to provoke errors. @@ -681,8 +671,8 @@ void MacroAssembler::RecordWriteCodeEntryField(Register js_function, if (emit_debug_code()) { add(scratch, js_function, Operand(offset - kHeapObjectTag)); - ldr(ip, MemOperand(scratch)); - cmp(ip, code_entry); + ldr(scratch, MemOperand(scratch)); + cmp(scratch, code_entry); Check(eq, kWrongAddressOrValuePassedToRecordWrite); } @@ -706,7 +696,7 @@ void MacroAssembler::RecordWriteCodeEntryField(Register js_function, stm(db_w, sp, (kCallerSaved | lr.bit())); int argument_count = 3; - PrepareCallCFunction(argument_count, code_entry); + PrepareCallCFunction(argument_count); mov(r0, js_function); mov(r1, dst); @@ -741,14 +731,16 @@ void MacroAssembler::RememberedSetHelper(Register object, // For debug tests. bind(&ok); } // Load store buffer top. - ExternalReference store_buffer = - ExternalReference::store_buffer_top(isolate()); - mov(ip, Operand(store_buffer)); - ldr(scratch, MemOperand(ip)); - // Store pointer to buffer and increment buffer top. - str(address, MemOperand(scratch, kPointerSize, PostIndex)); - // Write back new top of buffer. - str(scratch, MemOperand(ip)); + { + UseScratchRegisterScope temps(this); + Register store_buffer = temps.Acquire(); + mov(store_buffer, Operand(ExternalReference::store_buffer_top(isolate()))); + ldr(scratch, MemOperand(store_buffer)); + // Store pointer to buffer and increment buffer top. + str(address, MemOperand(scratch, kPointerSize, PostIndex)); + // Write back new top of buffer. + str(scratch, MemOperand(store_buffer)); + } // Call stub on end of buffer. // Check for end of buffer. tst(scratch, Operand(StoreBuffer::kStoreBufferMask)); @@ -768,7 +760,7 @@ void MacroAssembler::RememberedSetHelper(Register object, // For debug tests. } } -void MacroAssembler::PushCommonFrame(Register marker_reg) { +void TurboAssembler::PushCommonFrame(Register marker_reg) { if (marker_reg.is_valid()) { if (marker_reg.code() > fp.code()) { stm(db_w, sp, fp.bit() | lr.bit()); @@ -797,7 +789,7 @@ void MacroAssembler::PopCommonFrame(Register marker_reg) { } } -void MacroAssembler::PushStandardFrame(Register function_reg) { +void TurboAssembler::PushStandardFrame(Register function_reg) { DCHECK(!function_reg.is_valid() || function_reg.code() < cp.code()); stm(db_w, sp, (function_reg.is_valid() ? function_reg.bit() : 0) | cp.bit() | fp.bit() | lr.bit()); @@ -927,7 +919,7 @@ void MacroAssembler::Strd(Register src1, Register src2, } } -void MacroAssembler::VFPCanonicalizeNaN(const DwVfpRegister dst, +void TurboAssembler::VFPCanonicalizeNaN(const DwVfpRegister dst, const DwVfpRegister src, const Condition cond) { // Subtracting 0.0 preserves all inputs except for signalling NaNs, which @@ -936,38 +928,35 @@ void MacroAssembler::VFPCanonicalizeNaN(const DwVfpRegister dst, vsub(dst, src, kDoubleRegZero, cond); } - -void MacroAssembler::VFPCompareAndSetFlags(const SwVfpRegister src1, +void TurboAssembler::VFPCompareAndSetFlags(const SwVfpRegister src1, const SwVfpRegister src2, const Condition cond) { // Compare and move FPSCR flags to the normal condition flags. VFPCompareAndLoadFlags(src1, src2, pc, cond); } -void MacroAssembler::VFPCompareAndSetFlags(const SwVfpRegister src1, +void TurboAssembler::VFPCompareAndSetFlags(const SwVfpRegister src1, const float src2, const Condition cond) { // Compare and move FPSCR flags to the normal condition flags. VFPCompareAndLoadFlags(src1, src2, pc, cond); } - -void MacroAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1, +void TurboAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1, const DwVfpRegister src2, const Condition cond) { // Compare and move FPSCR flags to the normal condition flags. VFPCompareAndLoadFlags(src1, src2, pc, cond); } -void MacroAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1, +void TurboAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1, const double src2, const Condition cond) { // Compare and move FPSCR flags to the normal condition flags. VFPCompareAndLoadFlags(src1, src2, pc, cond); } - -void MacroAssembler::VFPCompareAndLoadFlags(const SwVfpRegister src1, +void TurboAssembler::VFPCompareAndLoadFlags(const SwVfpRegister src1, const SwVfpRegister src2, const Register fpscr_flags, const Condition cond) { @@ -976,7 +965,7 @@ void MacroAssembler::VFPCompareAndLoadFlags(const SwVfpRegister src1, vmrs(fpscr_flags, cond); } -void MacroAssembler::VFPCompareAndLoadFlags(const SwVfpRegister src1, +void TurboAssembler::VFPCompareAndLoadFlags(const SwVfpRegister src1, const float src2, const Register fpscr_flags, const Condition cond) { @@ -985,8 +974,7 @@ void MacroAssembler::VFPCompareAndLoadFlags(const SwVfpRegister src1, vmrs(fpscr_flags, cond); } - -void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1, +void TurboAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1, const DwVfpRegister src2, const Register fpscr_flags, const Condition cond) { @@ -995,7 +983,7 @@ void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1, vmrs(fpscr_flags, cond); } -void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1, +void TurboAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1, const double src2, const Register fpscr_flags, const Condition cond) { @@ -1004,23 +992,20 @@ void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1, vmrs(fpscr_flags, cond); } - -void MacroAssembler::Vmov(const DwVfpRegister dst, - const double imm, +void MacroAssembler::Vmov(const DwVfpRegister dst, Double imm, const Register scratch) { - int64_t imm_bits = bit_cast(imm); + uint64_t imm_bits = imm.AsUint64(); // Handle special values first. - if (imm_bits == bit_cast(0.0)) { + if (imm_bits == Double(0.0).AsUint64()) { vmov(dst, kDoubleRegZero); - } else if (imm_bits == bit_cast(-0.0)) { + } else if (imm_bits == Double(-0.0).AsUint64()) { vneg(dst, kDoubleRegZero); } else { vmov(dst, imm, scratch); } } - -void MacroAssembler::VmovHigh(Register dst, DwVfpRegister src) { +void TurboAssembler::VmovHigh(Register dst, DwVfpRegister src) { if (src.code() < 16) { const LowDwVfpRegister loc = LowDwVfpRegister::from_code(src.code()); vmov(dst, loc.high()); @@ -1029,8 +1014,7 @@ void MacroAssembler::VmovHigh(Register dst, DwVfpRegister src) { } } - -void MacroAssembler::VmovHigh(DwVfpRegister dst, Register src) { +void TurboAssembler::VmovHigh(DwVfpRegister dst, Register src) { if (dst.code() < 16) { const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code()); vmov(loc.high(), src); @@ -1039,8 +1023,7 @@ void MacroAssembler::VmovHigh(DwVfpRegister dst, Register src) { } } - -void MacroAssembler::VmovLow(Register dst, DwVfpRegister src) { +void TurboAssembler::VmovLow(Register dst, DwVfpRegister src) { if (src.code() < 16) { const LowDwVfpRegister loc = LowDwVfpRegister::from_code(src.code()); vmov(dst, loc.low()); @@ -1049,8 +1032,7 @@ void MacroAssembler::VmovLow(Register dst, DwVfpRegister src) { } } - -void MacroAssembler::VmovLow(DwVfpRegister dst, Register src) { +void TurboAssembler::VmovLow(DwVfpRegister dst, Register src) { if (dst.code() < 16) { const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code()); vmov(loc.low(), src); @@ -1059,7 +1041,7 @@ void MacroAssembler::VmovLow(DwVfpRegister dst, Register src) { } } -void MacroAssembler::VmovExtended(Register dst, int src_code) { +void TurboAssembler::VmovExtended(Register dst, int src_code) { DCHECK_LE(SwVfpRegister::kMaxNumRegisters, src_code); DCHECK_GT(SwVfpRegister::kMaxNumRegisters * 2, src_code); if (src_code & 0x1) { @@ -1069,7 +1051,7 @@ void MacroAssembler::VmovExtended(Register dst, int src_code) { } } -void MacroAssembler::VmovExtended(int dst_code, Register src) { +void TurboAssembler::VmovExtended(int dst_code, Register src) { DCHECK_LE(SwVfpRegister::kMaxNumRegisters, dst_code); DCHECK_GT(SwVfpRegister::kMaxNumRegisters * 2, dst_code); if (dst_code & 0x1) { @@ -1079,7 +1061,7 @@ void MacroAssembler::VmovExtended(int dst_code, Register src) { } } -void MacroAssembler::VmovExtended(int dst_code, int src_code) { +void TurboAssembler::VmovExtended(int dst_code, int src_code) { if (src_code == dst_code) return; if (src_code < SwVfpRegister::kMaxNumRegisters && @@ -1143,7 +1125,7 @@ void MacroAssembler::VmovExtended(int dst_code, int src_code) { } } -void MacroAssembler::VmovExtended(int dst_code, const MemOperand& src) { +void TurboAssembler::VmovExtended(int dst_code, const MemOperand& src) { if (dst_code < SwVfpRegister::kMaxNumRegisters) { vldr(SwVfpRegister::from_code(dst_code), src); } else { @@ -1155,7 +1137,7 @@ void MacroAssembler::VmovExtended(int dst_code, const MemOperand& src) { } } -void MacroAssembler::VmovExtended(const MemOperand& dst, int src_code) { +void TurboAssembler::VmovExtended(const MemOperand& dst, int src_code) { if (src_code < SwVfpRegister::kMaxNumRegisters) { vstr(SwVfpRegister::from_code(src_code), dst); } else { @@ -1166,7 +1148,7 @@ void MacroAssembler::VmovExtended(const MemOperand& dst, int src_code) { } } -void MacroAssembler::ExtractLane(Register dst, QwNeonRegister src, +void TurboAssembler::ExtractLane(Register dst, QwNeonRegister src, NeonDataType dt, int lane) { int size = NeonSz(dt); // 0, 1, 2 int byte = lane << size; @@ -1178,7 +1160,7 @@ void MacroAssembler::ExtractLane(Register dst, QwNeonRegister src, vmov(dt, dst, double_source, double_lane); } -void MacroAssembler::ExtractLane(Register dst, DwVfpRegister src, +void TurboAssembler::ExtractLane(Register dst, DwVfpRegister src, NeonDataType dt, int lane) { int size = NeonSz(dt); // 0, 1, 2 int byte = lane << size; @@ -1187,13 +1169,13 @@ void MacroAssembler::ExtractLane(Register dst, DwVfpRegister src, vmov(dt, dst, src, double_lane); } -void MacroAssembler::ExtractLane(SwVfpRegister dst, QwNeonRegister src, +void TurboAssembler::ExtractLane(SwVfpRegister dst, QwNeonRegister src, int lane) { int s_code = src.code() * 4 + lane; VmovExtended(dst.code(), s_code); } -void MacroAssembler::ReplaceLane(QwNeonRegister dst, QwNeonRegister src, +void TurboAssembler::ReplaceLane(QwNeonRegister dst, QwNeonRegister src, Register src_lane, NeonDataType dt, int lane) { Move(dst, src); int size = NeonSz(dt); // 0, 1, 2 @@ -1206,14 +1188,14 @@ void MacroAssembler::ReplaceLane(QwNeonRegister dst, QwNeonRegister src, vmov(dt, double_dst, double_lane, src_lane); } -void MacroAssembler::ReplaceLane(QwNeonRegister dst, QwNeonRegister src, +void TurboAssembler::ReplaceLane(QwNeonRegister dst, QwNeonRegister src, SwVfpRegister src_lane, int lane) { Move(dst, src); int s_code = dst.code() * 4 + lane; VmovExtended(s_code, src_lane.code()); } -void MacroAssembler::LslPair(Register dst_low, Register dst_high, +void TurboAssembler::LslPair(Register dst_low, Register dst_high, Register src_low, Register src_high, Register scratch, Register shift) { DCHECK(!AreAliased(dst_high, src_low)); @@ -1236,7 +1218,7 @@ void MacroAssembler::LslPair(Register dst_low, Register dst_high, bind(&done); } -void MacroAssembler::LslPair(Register dst_low, Register dst_high, +void TurboAssembler::LslPair(Register dst_low, Register dst_high, Register src_low, Register src_high, uint32_t shift) { DCHECK(!AreAliased(dst_high, src_low)); @@ -1259,7 +1241,7 @@ void MacroAssembler::LslPair(Register dst_low, Register dst_high, } } -void MacroAssembler::LsrPair(Register dst_low, Register dst_high, +void TurboAssembler::LsrPair(Register dst_low, Register dst_high, Register src_low, Register src_high, Register scratch, Register shift) { DCHECK(!AreAliased(dst_low, src_high)); @@ -1283,7 +1265,7 @@ void MacroAssembler::LsrPair(Register dst_low, Register dst_high, bind(&done); } -void MacroAssembler::LsrPair(Register dst_low, Register dst_high, +void TurboAssembler::LsrPair(Register dst_low, Register dst_high, Register src_low, Register src_high, uint32_t shift) { DCHECK(!AreAliased(dst_low, src_high)); @@ -1306,7 +1288,7 @@ void MacroAssembler::LsrPair(Register dst_low, Register dst_high, } } -void MacroAssembler::AsrPair(Register dst_low, Register dst_high, +void TurboAssembler::AsrPair(Register dst_low, Register dst_high, Register src_low, Register src_high, Register scratch, Register shift) { DCHECK(!AreAliased(dst_low, src_high)); @@ -1329,7 +1311,7 @@ void MacroAssembler::AsrPair(Register dst_low, Register dst_high, bind(&done); } -void MacroAssembler::AsrPair(Register dst_low, Register dst_high, +void TurboAssembler::AsrPair(Register dst_low, Register dst_high, Register src_low, Register src_high, uint32_t shift) { DCHECK(!AreAliased(dst_low, src_high)); @@ -1352,12 +1334,14 @@ void MacroAssembler::AsrPair(Register dst_low, Register dst_high, } } -void MacroAssembler::StubPrologue(StackFrame::Type type) { - mov(ip, Operand(StackFrame::TypeToMarker(type))); - PushCommonFrame(ip); +void TurboAssembler::StubPrologue(StackFrame::Type type) { + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + mov(scratch, Operand(StackFrame::TypeToMarker(type))); + PushCommonFrame(scratch); } -void MacroAssembler::Prologue(bool code_pre_aging) { +void TurboAssembler::Prologue(bool code_pre_aging) { { PredictableCodeSizeScope predictible_code_size_scope( this, kNoCodeAgeSequenceLength); // The following three instructions must remain together and unmodified @@ -1381,20 +1365,20 @@ void MacroAssembler::EmitLoadFeedbackVector(Register vector) { ldr(vector, FieldMemOperand(vector, Cell::kValueOffset)); } - -void MacroAssembler::EnterFrame(StackFrame::Type type, +void TurboAssembler::EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg) { // r0-r3: preserved - mov(ip, Operand(StackFrame::TypeToMarker(type))); - PushCommonFrame(ip); + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + mov(scratch, Operand(StackFrame::TypeToMarker(type))); + PushCommonFrame(scratch); if (type == StackFrame::INTERNAL) { - mov(ip, Operand(CodeObject())); - push(ip); + mov(scratch, Operand(CodeObject())); + push(scratch); } } - -int MacroAssembler::LeaveFrame(StackFrame::Type type) { +int TurboAssembler::LeaveFrame(StackFrame::Type type) { // r0: preserved // r1: preserved // r2: preserved @@ -1424,31 +1408,35 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space, StackFrame::Type frame_type) { DCHECK(frame_type == StackFrame::EXIT || frame_type == StackFrame::BUILTIN_EXIT); + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); // Set up the frame structure on the stack. DCHECK_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement); DCHECK_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset); DCHECK_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset); - mov(ip, Operand(StackFrame::TypeToMarker(frame_type))); - PushCommonFrame(ip); + mov(scratch, Operand(StackFrame::TypeToMarker(frame_type))); + PushCommonFrame(scratch); // Reserve room for saved entry sp and code object. sub(sp, fp, Operand(ExitFrameConstants::kFixedFrameSizeFromFp)); if (emit_debug_code()) { - mov(ip, Operand::Zero()); - str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset)); + mov(scratch, Operand::Zero()); + str(scratch, MemOperand(fp, ExitFrameConstants::kSPOffset)); } - mov(ip, Operand(CodeObject())); - str(ip, MemOperand(fp, ExitFrameConstants::kCodeOffset)); + mov(scratch, Operand(CodeObject())); + str(scratch, MemOperand(fp, ExitFrameConstants::kCodeOffset)); // Save the frame pointer and the context in top. - mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate()))); - str(fp, MemOperand(ip)); - mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate()))); - str(cp, MemOperand(ip)); + mov(scratch, Operand(ExternalReference(IsolateAddressId::kCEntryFPAddress, + isolate()))); + str(fp, MemOperand(scratch)); + mov(scratch, + Operand(ExternalReference(IsolateAddressId::kContextAddress, isolate()))); + str(cp, MemOperand(scratch)); // Optionally save all double registers. if (save_doubles) { - SaveFPRegs(sp, ip); + SaveFPRegs(sp, scratch); // Note that d0 will be accessible at // fp - ExitFrameConstants::kFrameSize - // DwVfpRegister::kMaxNumRegisters * kDoubleSize, @@ -1460,17 +1448,17 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space, const int frame_alignment = MacroAssembler::ActivationFrameAlignment(); sub(sp, sp, Operand((stack_space + 1) * kPointerSize)); if (frame_alignment > 0) { - DCHECK(base::bits::IsPowerOfTwo32(frame_alignment)); + DCHECK(base::bits::IsPowerOfTwo(frame_alignment)); and_(sp, sp, Operand(-frame_alignment)); } // Set the exit frame sp value to point just before the return address // location. - add(ip, sp, Operand(kPointerSize)); - str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset)); + add(scratch, sp, Operand(kPointerSize)); + str(scratch, MemOperand(fp, ExitFrameConstants::kSPOffset)); } -int MacroAssembler::ActivationFrameAlignment() { +int TurboAssembler::ActivationFrameAlignment() { #if V8_HOST_ARCH_ARM // Running on the real platform. Use the alignment as mandated by the local // environment. @@ -1491,6 +1479,8 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count, bool restore_context, bool argument_count_is_length) { ConstantPoolUnavailableScope constant_pool_unavailable(this); + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); // Optionally restore all double registers. if (save_doubles) { @@ -1498,22 +1488,25 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count, const int offset = ExitFrameConstants::kFixedFrameSizeFromFp; sub(r3, fp, Operand(offset + DwVfpRegister::kMaxNumRegisters * kDoubleSize)); - RestoreFPRegs(r3, ip); + RestoreFPRegs(r3, scratch); } // Clear top frame. mov(r3, Operand::Zero()); - mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate()))); - str(r3, MemOperand(ip)); + mov(scratch, Operand(ExternalReference(IsolateAddressId::kCEntryFPAddress, + isolate()))); + str(r3, MemOperand(scratch)); // Restore current context from top and clear it in debug mode. if (restore_context) { - mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate()))); - ldr(cp, MemOperand(ip)); + mov(scratch, Operand(ExternalReference(IsolateAddressId::kContextAddress, + isolate()))); + ldr(cp, MemOperand(scratch)); } #ifdef DEBUG - mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate()))); - str(r3, MemOperand(ip)); + mov(scratch, + Operand(ExternalReference(IsolateAddressId::kContextAddress, isolate()))); + str(r3, MemOperand(scratch)); #endif // Tear down the exit frame, pop the arguments, and return. @@ -1528,8 +1521,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count, } } - -void MacroAssembler::MovFromFloatResult(const DwVfpRegister dst) { +void TurboAssembler::MovFromFloatResult(const DwVfpRegister dst) { if (use_eabi_hardfloat()) { Move(dst, d0); } else { @@ -1539,11 +1531,11 @@ void MacroAssembler::MovFromFloatResult(const DwVfpRegister dst) { // On ARM this is just a synonym to make the purpose clear. -void MacroAssembler::MovFromFloatParameter(DwVfpRegister dst) { +void TurboAssembler::MovFromFloatParameter(DwVfpRegister dst) { MovFromFloatResult(dst); } -void MacroAssembler::PrepareForTailCall(const ParameterCount& callee_args_count, +void TurboAssembler::PrepareForTailCall(const ParameterCount& callee_args_count, Register caller_args_count_reg, Register scratch0, Register scratch1) { #if DEBUG @@ -1778,7 +1770,6 @@ void MacroAssembler::InvokeFunction(Register fun, ldr(expected_reg, FieldMemOperand(temp_reg, SharedFunctionInfo::kFormalParameterCountOffset)); - SmiUntag(expected_reg); ParameterCount expected(expected_reg); InvokeFunctionCode(fun, new_target, expected, actual, flag, call_wrapper); @@ -1850,7 +1841,8 @@ void MacroAssembler::PushStackHandler() { STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize); // Link the current handler as the next handler. - mov(r6, Operand(ExternalReference(Isolate::kHandlerAddress, isolate()))); + mov(r6, + Operand(ExternalReference(IsolateAddressId::kHandlerAddress, isolate()))); ldr(r5, MemOperand(r6)); push(r5); @@ -1860,11 +1852,14 @@ void MacroAssembler::PushStackHandler() { void MacroAssembler::PopStackHandler() { + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); pop(r1); - mov(ip, Operand(ExternalReference(Isolate::kHandlerAddress, isolate()))); + mov(scratch, + Operand(ExternalReference(IsolateAddressId::kHandlerAddress, isolate()))); add(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize)); - str(r1, MemOperand(ip)); + str(r1, MemOperand(scratch)); } @@ -1907,7 +1902,6 @@ void MacroAssembler::Allocate(int object_size, Label* gc_required, AllocationFlags flags) { DCHECK(object_size <= kMaxRegularHeapObjectSize); - DCHECK((flags & ALLOCATION_FOLDED) == 0); if (!FLAG_inline_new) { if (emit_debug_code()) { // Trash the registers to simulate an allocation failure. @@ -1919,7 +1913,7 @@ void MacroAssembler::Allocate(int object_size, return; } - DCHECK(!AreAliased(result, scratch1, scratch2, ip)); + DCHECK(!AreAliased(result, scratch1, scratch2)); // Make object size into bytes. if ((flags & SIZE_IN_WORDS) != 0) { @@ -1939,13 +1933,12 @@ void MacroAssembler::Allocate(int object_size, intptr_t top = reinterpret_cast(allocation_top.address()); intptr_t limit = reinterpret_cast(allocation_limit.address()); DCHECK((limit - top) == kPointerSize); - DCHECK(result.code() < ip.code()); + + UseScratchRegisterScope temps(this); // Set up allocation top address register. Register top_address = scratch1; - // This code stores a temporary value in ip. This is OK, as the code below - // does not need ip for implicit literal generation. - Register alloc_limit = ip; + Register alloc_limit = temps.Acquire(); Register result_end = scratch2; mov(top_address, Operand(allocation_top)); @@ -1980,8 +1973,8 @@ void MacroAssembler::Allocate(int object_size, } // Calculate new top and bail out if new space is exhausted. Use result - // to calculate the new top. We must preserve the ip register at this - // point, so we cannot just use add(). + // to calculate the new top. We have already acquired the scratch register at + // this point, so we cannot just use add(). DCHECK(object_size > 0); Register source = result; int shift = 0; @@ -1993,7 +1986,7 @@ void MacroAssembler::Allocate(int object_size, object_size -= bits; shift += 8; Operand bits_operand(bits); - DCHECK(bits_operand.instructions_required(this) == 1); + DCHECK(bits_operand.InstructionsRequired(this) == 1); add(result_end, source, bits_operand); source = result_end; } @@ -2002,10 +1995,7 @@ void MacroAssembler::Allocate(int object_size, cmp(result_end, Operand(alloc_limit)); b(hi, gc_required); - if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) { - // The top pointer is not updated for allocation folding dominators. - str(result_end, MemOperand(top_address)); - } + str(result_end, MemOperand(top_address)); // Tag object. add(result, result, Operand(kHeapObjectTag)); @@ -2015,7 +2005,6 @@ void MacroAssembler::Allocate(int object_size, void MacroAssembler::Allocate(Register object_size, Register result, Register result_end, Register scratch, Label* gc_required, AllocationFlags flags) { - DCHECK((flags & ALLOCATION_FOLDED) == 0); if (!FLAG_inline_new) { if (emit_debug_code()) { // Trash the registers to simulate an allocation failure. @@ -2029,8 +2018,7 @@ void MacroAssembler::Allocate(Register object_size, Register result, // |object_size| and |result_end| may overlap if the DOUBLE_ALIGNMENT flag // is not specified. Other registers must not overlap. - DCHECK(!AreAliased(object_size, result, scratch, ip)); - DCHECK(!AreAliased(result_end, result, scratch, ip)); + DCHECK(!AreAliased(object_size, result, scratch, result_end)); DCHECK((flags & DOUBLE_ALIGNMENT) == 0 || !object_size.is(result_end)); // Check relative positions of allocation top and limit addresses. @@ -2044,13 +2032,12 @@ void MacroAssembler::Allocate(Register object_size, Register result, intptr_t top = reinterpret_cast(allocation_top.address()); intptr_t limit = reinterpret_cast(allocation_limit.address()); DCHECK((limit - top) == kPointerSize); - DCHECK(result.code() < ip.code()); + + UseScratchRegisterScope temps(this); // Set up allocation top address and allocation limit registers. Register top_address = scratch; - // This code stores a temporary value in ip. This is OK, as the code below - // does not need ip for implicit literal generation. - Register alloc_limit = ip; + Register alloc_limit = temps.Acquire(); mov(top_address, Operand(allocation_top)); if ((flags & RESULT_CONTAINS_TOP) == 0) { @@ -2100,118 +2087,9 @@ void MacroAssembler::Allocate(Register object_size, Register result, tst(result_end, Operand(kObjectAlignmentMask)); Check(eq, kUnalignedAllocationInNewSpace); } - if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) { - // The top pointer is not updated for allocation folding dominators. - str(result_end, MemOperand(top_address)); - } - - // Tag object. - add(result, result, Operand(kHeapObjectTag)); -} - -void MacroAssembler::FastAllocate(Register object_size, Register result, - Register result_end, Register scratch, - AllocationFlags flags) { - // |object_size| and |result_end| may overlap if the DOUBLE_ALIGNMENT flag - // is not specified. Other registers must not overlap. - DCHECK(!AreAliased(object_size, result, scratch, ip)); - DCHECK(!AreAliased(result_end, result, scratch, ip)); - DCHECK((flags & DOUBLE_ALIGNMENT) == 0 || !object_size.is(result_end)); - - ExternalReference allocation_top = - AllocationUtils::GetAllocationTopReference(isolate(), flags); - - Register top_address = scratch; - mov(top_address, Operand(allocation_top)); - ldr(result, MemOperand(top_address)); - - if ((flags & DOUBLE_ALIGNMENT) != 0) { - // Align the next allocation. Storing the filler map without checking top is - // safe in new-space because the limit of the heap is aligned there. - DCHECK(kPointerAlignment * 2 == kDoubleAlignment); - and_(result_end, result, Operand(kDoubleAlignmentMask), SetCC); - Label aligned; - b(eq, &aligned); - mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map())); - str(result_end, MemOperand(result, kDoubleSize / 2, PostIndex)); - bind(&aligned); - } - - // Calculate new top using result. Object size may be in words so a shift is - // required to get the number of bytes. - if ((flags & SIZE_IN_WORDS) != 0) { - add(result_end, result, Operand(object_size, LSL, kPointerSizeLog2), SetCC); - } else { - add(result_end, result, Operand(object_size), SetCC); - } - - // Update allocation top. result temporarily holds the new top. - if (emit_debug_code()) { - tst(result_end, Operand(kObjectAlignmentMask)); - Check(eq, kUnalignedAllocationInNewSpace); - } - // The top pointer is not updated for allocation folding dominators. - str(result_end, MemOperand(top_address)); - - add(result, result, Operand(kHeapObjectTag)); -} - -void MacroAssembler::FastAllocate(int object_size, Register result, - Register scratch1, Register scratch2, - AllocationFlags flags) { - DCHECK(object_size <= kMaxRegularHeapObjectSize); - DCHECK(!AreAliased(result, scratch1, scratch2, ip)); - - // Make object size into bytes. - if ((flags & SIZE_IN_WORDS) != 0) { - object_size *= kPointerSize; - } - DCHECK_EQ(0, object_size & kObjectAlignmentMask); - - ExternalReference allocation_top = - AllocationUtils::GetAllocationTopReference(isolate(), flags); - - // Set up allocation top address register. - Register top_address = scratch1; - Register result_end = scratch2; - mov(top_address, Operand(allocation_top)); - ldr(result, MemOperand(top_address)); - - if ((flags & DOUBLE_ALIGNMENT) != 0) { - // Align the next allocation. Storing the filler map without checking top is - // safe in new-space because the limit of the heap is aligned there. - STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment); - and_(result_end, result, Operand(kDoubleAlignmentMask), SetCC); - Label aligned; - b(eq, &aligned); - mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map())); - str(result_end, MemOperand(result, kDoubleSize / 2, PostIndex)); - bind(&aligned); - } - - // Calculate new top using result. Object size may be in words so a shift is - // required to get the number of bytes. We must preserve the ip register at - // this point, so we cannot just use add(). - DCHECK(object_size > 0); - Register source = result; - int shift = 0; - while (object_size != 0) { - if (((object_size >> shift) & 0x03) == 0) { - shift += 2; - } else { - int bits = object_size & (0xff << shift); - object_size -= bits; - shift += 8; - Operand bits_operand(bits); - DCHECK(bits_operand.instructions_required(this) == 1); - add(result_end, source, bits_operand); - source = result_end; - } - } - - // The top pointer is not updated for allocation folding dominators. str(result_end, MemOperand(top_address)); + // Tag object. add(result, result, Operand(kHeapObjectTag)); } @@ -2219,7 +2097,8 @@ void MacroAssembler::CompareObjectType(Register object, Register map, Register type_reg, InstanceType type) { - const Register temp = type_reg.is(no_reg) ? ip : type_reg; + UseScratchRegisterScope temps(this); + const Register temp = type_reg.is(no_reg) ? temps.Acquire() : type_reg; ldr(map, FieldMemOperand(object, HeapObject::kMapOffset)); CompareInstanceType(map, temp, type); @@ -2229,11 +2108,6 @@ void MacroAssembler::CompareObjectType(Register object, void MacroAssembler::CompareInstanceType(Register map, Register type_reg, InstanceType type) { - // Registers map and type_reg can be ip. These two lines assert - // that ip can be used with the two instructions (the constants - // will never need ip). - STATIC_ASSERT(Map::kInstanceTypeOffset < 4096); - STATIC_ASSERT(LAST_TYPE < 256); ldrb(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset)); cmp(type_reg, Operand(type)); } @@ -2241,9 +2115,11 @@ void MacroAssembler::CompareInstanceType(Register map, void MacroAssembler::CompareRoot(Register obj, Heap::RootListIndex index) { - DCHECK(!obj.is(ip)); - LoadRoot(ip, index); - cmp(obj, ip); + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + DCHECK(!obj.is(scratch)); + LoadRoot(scratch, index); + cmp(obj, scratch); } void MacroAssembler::CompareMap(Register obj, @@ -2277,18 +2153,17 @@ void MacroAssembler::CheckMap(Register obj, bind(&success); } - -void MacroAssembler::CheckMap(Register obj, - Register scratch, - Heap::RootListIndex index, - Label* fail, +void MacroAssembler::CheckMap(Register obj, Register scratch, + Heap::RootListIndex index, Label* fail, SmiCheckType smi_check_type) { + UseScratchRegisterScope temps(this); + Register root_register = temps.Acquire(); if (smi_check_type == DO_SMI_CHECK) { JumpIfSmi(obj, fail); } ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); - LoadRoot(ip, index); - cmp(scratch, ip); + LoadRoot(root_register, index); + cmp(scratch, root_register); b(ne, fail); } @@ -2319,21 +2194,49 @@ void MacroAssembler::GetMapConstructor(Register result, Register map, } void MacroAssembler::CallStub(CodeStub* stub, - TypeFeedbackId ast_id, Condition cond) { DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs. - Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id, cond, - CAN_INLINE_TARGET_ADDRESS, false); + Call(stub->GetCode(), RelocInfo::CODE_TARGET, cond, CAN_INLINE_TARGET_ADDRESS, + false); } +void TurboAssembler::CallStubDelayed(CodeStub* stub) { + DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs. + + // Block constant pool for the call instruction sequence. + BlockConstPoolScope block_const_pool(this); + Label start; + bind(&start); + +#ifdef DEBUG + // Check the expected size before generating code to ensure we assume the same + // constant pool availability (e.g., whether constant pool is full or not). + int expected_size = CallStubSize(); +#endif + + // Call sequence on V7 or later may be : + // movw ip, #... @ call address low 16 + // movt ip, #... @ call address high 16 + // blx ip + // @ return address + // Or for pre-V7 or values that may be back-patched + // to avoid ICache flushes: + // ldr ip, [pc, #...] @ call address + // blx ip + // @ return address + + mov(ip, Operand::EmbeddedCode(stub)); + blx(ip, al); + + DCHECK_EQ(expected_size, SizeOfCodeGeneratedSince(&start)); +} void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) { Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond); } - -bool MacroAssembler::AllowThisStubCall(CodeStub* stub) { - return has_frame_ || !stub->SometimesSetsUpAFrame(); +bool TurboAssembler::AllowThisStubCall(CodeStub* stub) { + return has_frame() || !stub->SometimesSetsUpAFrame(); } void MacroAssembler::SmiToDouble(LowDwVfpRegister value, Register smi) { @@ -2342,8 +2245,10 @@ void MacroAssembler::SmiToDouble(LowDwVfpRegister value, Register smi) { vmov(value.low(), smi); vcvt_f64_s32(value, 1); } else { - SmiUntag(ip, smi); - vmov(value.low(), ip); + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + SmiUntag(scratch, smi); + vmov(value.low(), scratch); vcvt_f64_s32(value, value.low()); } } @@ -2415,22 +2320,24 @@ void MacroAssembler::TryInt32Floor(Register result, bind(&exception); } -void MacroAssembler::TryInlineTruncateDoubleToI(Register result, +void TurboAssembler::TryInlineTruncateDoubleToI(Register result, DwVfpRegister double_input, Label* done) { LowDwVfpRegister double_scratch = kScratchDoubleReg; vcvt_s32_f64(double_scratch.low(), double_input); vmov(result, double_scratch.low()); + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + // If result is not saturated (0x7fffffff or 0x80000000), we are done. - sub(ip, result, Operand(1)); - cmp(ip, Operand(0x7ffffffe)); + sub(scratch, result, Operand(1)); + cmp(scratch, Operand(0x7ffffffe)); b(lt, done); } - -void MacroAssembler::TruncateDoubleToI(Register result, - DwVfpRegister double_input) { +void TurboAssembler::TruncateDoubleToIDelayed(Zone* zone, Register result, + DwVfpRegister double_input) { Label done; TryInlineTruncateDoubleToI(result, double_input, &done); @@ -2440,8 +2347,7 @@ void MacroAssembler::TruncateDoubleToI(Register result, sub(sp, sp, Operand(kDoubleSize)); // Put input on stack. vstr(double_input, MemOperand(sp, 0)); - DoubleToIStub stub(isolate(), sp, result, 0, true, true); - CallStub(&stub); + CallStubDelayed(new (zone) DoubleToIStub(nullptr, sp, result, 0, true, true)); add(sp, sp, Operand(kDoubleSize)); pop(lr); @@ -2449,48 +2355,6 @@ void MacroAssembler::TruncateDoubleToI(Register result, bind(&done); } - -void MacroAssembler::TruncateHeapNumberToI(Register result, - Register object) { - Label done; - LowDwVfpRegister double_scratch = kScratchDoubleReg; - DCHECK(!result.is(object)); - - vldr(double_scratch, - MemOperand(object, HeapNumber::kValueOffset - kHeapObjectTag)); - TryInlineTruncateDoubleToI(result, double_scratch, &done); - - // If we fell through then inline version didn't succeed - call stub instead. - push(lr); - DoubleToIStub stub(isolate(), - object, - result, - HeapNumber::kValueOffset - kHeapObjectTag, - true, - true); - CallStub(&stub); - pop(lr); - - bind(&done); -} - - -void MacroAssembler::TruncateNumberToI(Register object, - Register result, - Register heap_number_map, - Register scratch1, - Label* not_number) { - Label done; - DCHECK(!result.is(object)); - - UntagAndJumpIfSmi(result, object, &done); - JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number); - TruncateHeapNumberToI(result, object); - - bind(&done); -} - - void MacroAssembler::GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits) { @@ -2510,6 +2374,17 @@ void MacroAssembler::GetLeastBitsFromInt32(Register dst, and_(dst, src, Operand((1 << num_least_bits) - 1)); } +void TurboAssembler::CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid, + SaveFPRegsMode save_doubles) { + const Runtime::Function* f = Runtime::FunctionForId(fid); + // TODO(1236192): Most runtime routines don't need the number of + // arguments passed in because it is constant. At some point we + // should remove this need and make the runtime routine entry code + // smarter. + mov(r0, Operand(f->nargs)); + mov(r1, Operand(ExternalReference(f, isolate()))); + CallStubDelayed(new (zone) CEntryStub(nullptr, 1, save_doubles)); +} void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments, @@ -2567,16 +2442,6 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin, Jump(stub.GetCode(), RelocInfo::CODE_TARGET); } -void MacroAssembler::SetCounter(StatsCounter* counter, int value, - Register scratch1, Register scratch2) { - if (FLAG_native_code_counters && counter->Enabled()) { - mov(scratch1, Operand(value)); - mov(scratch2, Operand(ExternalReference(counter))); - str(scratch1, MemOperand(scratch2)); - } -} - - void MacroAssembler::IncrementCounter(StatsCounter* counter, int value, Register scratch1, Register scratch2) { DCHECK(value > 0); @@ -2600,15 +2465,12 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value, } } - -void MacroAssembler::Assert(Condition cond, BailoutReason reason) { +void TurboAssembler::Assert(Condition cond, BailoutReason reason) { if (emit_debug_code()) Check(cond, reason); } - - -void MacroAssembler::Check(Condition cond, BailoutReason reason) { +void TurboAssembler::Check(Condition cond, BailoutReason reason) { Label L; b(cond, &L); Abort(reason); @@ -2616,8 +2478,7 @@ void MacroAssembler::Check(Condition cond, BailoutReason reason) { bind(&L); } - -void MacroAssembler::Abort(BailoutReason reason) { +void TurboAssembler::Abort(BailoutReason reason) { Label abort_start; bind(&abort_start); #ifdef DEBUG @@ -2633,13 +2494,10 @@ void MacroAssembler::Abort(BailoutReason reason) { } #endif - // Check if Abort() has already been initialized. - DCHECK(isolate()->builtins()->Abort()->IsHeapObject()); - Move(r1, Smi::FromInt(static_cast(reason))); // Disable stub call restrictions to always allow calls to abort. - if (!has_frame_) { + if (!has_frame()) { // We don't actually want to generate a pile of code for this, so just // claim there is a stack frame, without generating one. FrameScope scope(this, StackFrame::NONE); @@ -2698,7 +2556,7 @@ void MacroAssembler::LoadGlobalFunctionInitialMap(Register function, } } -void MacroAssembler::InitializeRootRegister() { +void TurboAssembler::InitializeRootRegister() { ExternalReference roots_array_start = ExternalReference::roots_array_start(isolate()); mov(kRootRegister, Operand(roots_array_start)); @@ -2759,7 +2617,7 @@ void MacroAssembler::NonNegativeSmiTst(Register value) { tst(value, Operand(kSmiTagMask | kSmiSignMask)); } -void MacroAssembler::JumpIfSmi(Register value, Label* smi_label) { +void TurboAssembler::JumpIfSmi(Register value, Label* smi_label) { tst(value, Operand(kSmiTagMask)); b(eq, smi_label); } @@ -2795,6 +2653,17 @@ void MacroAssembler::AssertSmi(Register object) { } } +void MacroAssembler::AssertFixedArray(Register object) { + if (emit_debug_code()) { + STATIC_ASSERT(kSmiTag == 0); + tst(object, Operand(kSmiTagMask)); + Check(ne, kOperandIsASmiAndNotAFixedArray); + push(object); + CompareObjectType(object, object, object, FIXED_ARRAY_TYPE); + pop(object); + Check(eq, kOperandIsNotAFixedArray); + } +} void MacroAssembler::AssertFunction(Register object) { if (emit_debug_code()) { @@ -2821,8 +2690,7 @@ void MacroAssembler::AssertBoundFunction(Register object) { } } -void MacroAssembler::AssertGeneratorObject(Register object, Register flags) { - // `flags` should be an untagged integer. See `SuspendFlags` in src/globals.h +void MacroAssembler::AssertGeneratorObject(Register object) { if (!emit_debug_code()) return; tst(object, Operand(kSmiTagMask)); Check(ne, kOperandIsASmiAndNotAGeneratorObject); @@ -2832,17 +2700,14 @@ void MacroAssembler::AssertGeneratorObject(Register object, Register flags) { push(object); ldr(map, FieldMemOperand(object, HeapObject::kMapOffset)); - Label async, do_check; - tst(flags, Operand(static_cast(SuspendFlags::kGeneratorTypeMask))); - b(ne, &async); - // Check if JSGeneratorObject - CompareInstanceType(map, object, JS_GENERATOR_OBJECT_TYPE); - jmp(&do_check); + Label do_check; + Register instance_type = object; + CompareInstanceType(map, instance_type, JS_GENERATOR_OBJECT_TYPE); + b(eq, &do_check); - bind(&async); - // Check if JSAsyncGeneratorObject - CompareInstanceType(map, object, JS_ASYNC_GENERATOR_OBJECT_TYPE); + // Check if JSAsyncGeneratorObject (See MacroAssembler::CompareInstanceType) + cmp(instance_type, Operand(JS_ASYNC_GENERATOR_OBJECT_TYPE)); bind(&do_check); // Restore generator object to register and perform assertion @@ -2975,25 +2840,12 @@ void MacroAssembler::AllocateJSValue(Register result, Register constructor, LoadGlobalFunctionInitialMap(constructor, scratch1, scratch2); str(scratch1, FieldMemOperand(result, HeapObject::kMapOffset)); LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex); - str(scratch1, FieldMemOperand(result, JSObject::kPropertiesOffset)); + str(scratch1, FieldMemOperand(result, JSObject::kPropertiesOrHashOffset)); str(scratch1, FieldMemOperand(result, JSObject::kElementsOffset)); str(value, FieldMemOperand(result, JSValue::kValueOffset)); STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize); } -void MacroAssembler::InitializeFieldsWithFiller(Register current_address, - Register end_address, - Register filler) { - Label loop, entry; - b(&entry); - bind(&loop); - str(filler, MemOperand(current_address, kPointerSize, PostIndex)); - bind(&entry); - cmp(current_address, end_address); - b(lo, &loop); -} - - void MacroAssembler::CheckFor32DRegs(Register scratch) { mov(scratch, Operand(ExternalReference::cpu_features())); ldr(scratch, MemOperand(scratch)); @@ -3019,7 +2871,7 @@ void MacroAssembler::RestoreFPRegs(Register location, Register scratch) { } template -void MacroAssembler::FloatMaxHelper(T result, T left, T right, +void TurboAssembler::FloatMaxHelper(T result, T left, T right, Label* out_of_line) { // This trivial case is caught sooner, so that the out-of-line code can be // completely avoided. @@ -3050,7 +2902,7 @@ void MacroAssembler::FloatMaxHelper(T result, T left, T right, } template -void MacroAssembler::FloatMaxOutOfLineHelper(T result, T left, T right) { +void TurboAssembler::FloatMaxOutOfLineHelper(T result, T left, T right) { DCHECK(!left.is(right)); // ARMv8: At least one of left and right is a NaN. @@ -3063,7 +2915,7 @@ void MacroAssembler::FloatMaxOutOfLineHelper(T result, T left, T right) { } template -void MacroAssembler::FloatMinHelper(T result, T left, T right, +void TurboAssembler::FloatMinHelper(T result, T left, T right, Label* out_of_line) { // This trivial case is caught sooner, so that the out-of-line code can be // completely avoided. @@ -3109,7 +2961,7 @@ void MacroAssembler::FloatMinHelper(T result, T left, T right, } template -void MacroAssembler::FloatMinOutOfLineHelper(T result, T left, T right) { +void TurboAssembler::FloatMinOutOfLineHelper(T result, T left, T right) { DCHECK(!left.is(right)); // At least one of left and right is a NaN. Use vadd to propagate the NaN @@ -3117,42 +2969,42 @@ void MacroAssembler::FloatMinOutOfLineHelper(T result, T left, T right) { vadd(result, left, right); } -void MacroAssembler::FloatMax(SwVfpRegister result, SwVfpRegister left, +void TurboAssembler::FloatMax(SwVfpRegister result, SwVfpRegister left, SwVfpRegister right, Label* out_of_line) { FloatMaxHelper(result, left, right, out_of_line); } -void MacroAssembler::FloatMin(SwVfpRegister result, SwVfpRegister left, +void TurboAssembler::FloatMin(SwVfpRegister result, SwVfpRegister left, SwVfpRegister right, Label* out_of_line) { FloatMinHelper(result, left, right, out_of_line); } -void MacroAssembler::FloatMax(DwVfpRegister result, DwVfpRegister left, +void TurboAssembler::FloatMax(DwVfpRegister result, DwVfpRegister left, DwVfpRegister right, Label* out_of_line) { FloatMaxHelper(result, left, right, out_of_line); } -void MacroAssembler::FloatMin(DwVfpRegister result, DwVfpRegister left, +void TurboAssembler::FloatMin(DwVfpRegister result, DwVfpRegister left, DwVfpRegister right, Label* out_of_line) { FloatMinHelper(result, left, right, out_of_line); } -void MacroAssembler::FloatMaxOutOfLine(SwVfpRegister result, SwVfpRegister left, +void TurboAssembler::FloatMaxOutOfLine(SwVfpRegister result, SwVfpRegister left, SwVfpRegister right) { FloatMaxOutOfLineHelper(result, left, right); } -void MacroAssembler::FloatMinOutOfLine(SwVfpRegister result, SwVfpRegister left, +void TurboAssembler::FloatMinOutOfLine(SwVfpRegister result, SwVfpRegister left, SwVfpRegister right) { FloatMinOutOfLineHelper(result, left, right); } -void MacroAssembler::FloatMaxOutOfLine(DwVfpRegister result, DwVfpRegister left, +void TurboAssembler::FloatMaxOutOfLine(DwVfpRegister result, DwVfpRegister left, DwVfpRegister right) { FloatMaxOutOfLineHelper(result, left, right); } -void MacroAssembler::FloatMinOutOfLine(DwVfpRegister result, DwVfpRegister left, +void TurboAssembler::FloatMinOutOfLine(DwVfpRegister result, DwVfpRegister left, DwVfpRegister right) { FloatMinOutOfLineHelper(result, left, right); } @@ -3174,8 +3026,7 @@ void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte( static const int kRegisterPassedArguments = 4; - -int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments, +int TurboAssembler::CalculateStackPassedWords(int num_reg_arguments, int num_double_arguments) { int stack_passed_words = 0; if (use_eabi_hardfloat()) { @@ -3197,55 +3048,19 @@ int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments, return stack_passed_words; } - -void MacroAssembler::EmitSeqStringSetCharCheck(Register string, - Register index, - Register value, - uint32_t encoding_mask) { - Label is_object; - SmiTst(string); - Check(ne, kNonObject); - - ldr(ip, FieldMemOperand(string, HeapObject::kMapOffset)); - ldrb(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset)); - - and_(ip, ip, Operand(kStringRepresentationMask | kStringEncodingMask)); - cmp(ip, Operand(encoding_mask)); - Check(eq, kUnexpectedStringType); - - // The index is assumed to be untagged coming in, tag it to compare with the - // string length without using a temp register, it is restored at the end of - // this function. - Label index_tag_ok, index_tag_bad; - TrySmiTag(index, index, &index_tag_bad); - b(&index_tag_ok); - bind(&index_tag_bad); - Abort(kIndexIsTooLarge); - bind(&index_tag_ok); - - ldr(ip, FieldMemOperand(string, String::kLengthOffset)); - cmp(index, ip); - Check(lt, kIndexIsTooLarge); - - cmp(index, Operand(Smi::kZero)); - Check(ge, kIndexIsNegative); - - SmiUntag(index, index); -} - - -void MacroAssembler::PrepareCallCFunction(int num_reg_arguments, - int num_double_arguments, - Register scratch) { +void TurboAssembler::PrepareCallCFunction(int num_reg_arguments, + int num_double_arguments) { int frame_alignment = ActivationFrameAlignment(); int stack_passed_arguments = CalculateStackPassedWords( num_reg_arguments, num_double_arguments); if (frame_alignment > kPointerSize) { + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); // Make stack end at alignment and make room for num_arguments - 4 words // and the original value of sp. mov(scratch, sp); sub(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize)); - DCHECK(base::bits::IsPowerOfTwo32(frame_alignment)); + DCHECK(base::bits::IsPowerOfTwo(frame_alignment)); and_(sp, sp, Operand(-frame_alignment)); str(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize)); } else { @@ -3253,14 +3068,7 @@ void MacroAssembler::PrepareCallCFunction(int num_reg_arguments, } } - -void MacroAssembler::PrepareCallCFunction(int num_reg_arguments, - Register scratch) { - PrepareCallCFunction(num_reg_arguments, 0, scratch); -} - - -void MacroAssembler::MovToFloatParameter(DwVfpRegister src) { +void TurboAssembler::MovToFloatParameter(DwVfpRegister src) { DCHECK(src.is(d0)); if (!use_eabi_hardfloat()) { vmov(r0, r1, src); @@ -3269,12 +3077,11 @@ void MacroAssembler::MovToFloatParameter(DwVfpRegister src) { // On ARM this is just a synonym to make the purpose clear. -void MacroAssembler::MovToFloatResult(DwVfpRegister src) { +void TurboAssembler::MovToFloatResult(DwVfpRegister src) { MovToFloatParameter(src); } - -void MacroAssembler::MovToFloatParameters(DwVfpRegister src1, +void TurboAssembler::MovToFloatParameters(DwVfpRegister src1, DwVfpRegister src2) { DCHECK(src1.is(d0)); DCHECK(src2.is(d1)); @@ -3284,35 +3091,30 @@ void MacroAssembler::MovToFloatParameters(DwVfpRegister src1, } } - -void MacroAssembler::CallCFunction(ExternalReference function, +void TurboAssembler::CallCFunction(ExternalReference function, int num_reg_arguments, int num_double_arguments) { - mov(ip, Operand(function)); - CallCFunctionHelper(ip, num_reg_arguments, num_double_arguments); + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + mov(scratch, Operand(function)); + CallCFunctionHelper(scratch, num_reg_arguments, num_double_arguments); } - -void MacroAssembler::CallCFunction(Register function, - int num_reg_arguments, +void TurboAssembler::CallCFunction(Register function, int num_reg_arguments, int num_double_arguments) { CallCFunctionHelper(function, num_reg_arguments, num_double_arguments); } - -void MacroAssembler::CallCFunction(ExternalReference function, +void TurboAssembler::CallCFunction(ExternalReference function, int num_arguments) { CallCFunction(function, num_arguments, 0); } - -void MacroAssembler::CallCFunction(Register function, - int num_arguments) { +void TurboAssembler::CallCFunction(Register function, int num_arguments) { CallCFunction(function, num_arguments, 0); } - -void MacroAssembler::CallCFunctionHelper(Register function, +void TurboAssembler::CallCFunctionHelper(Register function, int num_reg_arguments, int num_double_arguments) { DCHECK_LE(num_reg_arguments + num_double_arguments, kMaxCParameters); @@ -3325,7 +3127,7 @@ void MacroAssembler::CallCFunctionHelper(Register function, int frame_alignment = base::OS::ActivationFrameAlignment(); int frame_alignment_mask = frame_alignment - 1; if (frame_alignment > kPointerSize) { - DCHECK(base::bits::IsPowerOfTwo32(frame_alignment)); + DCHECK(base::bits::IsPowerOfTwo(frame_alignment)); Label alignment_as_expected; tst(sp, Operand(frame_alignment_mask)); b(eq, &alignment_as_expected); @@ -3350,13 +3152,8 @@ void MacroAssembler::CallCFunctionHelper(Register function, } } - -void MacroAssembler::CheckPageFlag( - Register object, - Register scratch, - int mask, - Condition cc, - Label* condition_met) { +void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask, + Condition cc, Label* condition_met) { DCHECK(cc == eq || cc == ne); Bfc(scratch, object, 0, kPageSizeBits); ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset)); @@ -3385,19 +3182,22 @@ void MacroAssembler::HasColor(Register object, GetMarkBits(object, bitmap_scratch, mask_scratch); Label other_color, word_boundary; - ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); - tst(ip, Operand(mask_scratch)); + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + ldr(scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); + tst(scratch, Operand(mask_scratch)); b(first_bit == 1 ? eq : ne, &other_color); // Shift left 1 by adding. add(mask_scratch, mask_scratch, Operand(mask_scratch), SetCC); b(eq, &word_boundary); - tst(ip, Operand(mask_scratch)); + tst(scratch, Operand(mask_scratch)); b(second_bit == 1 ? ne : eq, has_color); jmp(&other_color); bind(&word_boundary); - ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize)); - tst(ip, Operand(1)); + ldr(scratch, + MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize)); + tst(scratch, Operand(1)); b(second_bit == 1 ? ne : eq, has_color); bind(&other_color); } @@ -3410,17 +3210,19 @@ void MacroAssembler::GetMarkBits(Register addr_reg, and_(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask)); Ubfx(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2); const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2; - Ubfx(ip, addr_reg, kLowBits, kPageSizeBits - kLowBits); - add(bitmap_reg, bitmap_reg, Operand(ip, LSL, kPointerSizeLog2)); - mov(ip, Operand(1)); - mov(mask_reg, Operand(ip, LSL, mask_reg)); + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + Ubfx(scratch, addr_reg, kLowBits, kPageSizeBits - kLowBits); + add(bitmap_reg, bitmap_reg, Operand(scratch, LSL, kPointerSizeLog2)); + mov(scratch, Operand(1)); + mov(mask_reg, Operand(scratch, LSL, mask_reg)); } void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch, Register mask_scratch, Register load_scratch, Label* value_is_white) { - DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, ip)); + DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch)); GetMarkBits(value, bitmap_scratch, mask_scratch); // If the value is black or grey we don't need to do anything. @@ -3442,26 +3244,6 @@ void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) { } -void MacroAssembler::ClampDoubleToUint8(Register result_reg, - DwVfpRegister input_reg, - LowDwVfpRegister double_scratch) { - Label done; - - // Handle inputs >= 255 (including +infinity). - Vmov(double_scratch, 255.0, result_reg); - mov(result_reg, Operand(255)); - VFPCompareAndSetFlags(input_reg, double_scratch); - b(ge, &done); - - // For inputs < 255 (including negative) vcvt_u32_f64 with round-to-nearest - // rounding mode will provide the correct result. - vcvt_u32_f64(double_scratch.low(), input_reg, kFPSCRRounding); - vmov(result_reg, double_scratch.low()); - - bind(&done); -} - - void MacroAssembler::LoadInstanceDescriptors(Register map, Register descriptors) { ldr(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset)); @@ -3556,51 +3338,6 @@ void MacroAssembler::CheckEnumCache(Label* call_runtime) { b(ne, &next); } -void MacroAssembler::TestJSArrayForAllocationMemento( - Register receiver_reg, - Register scratch_reg, - Label* no_memento_found) { - Label map_check; - Label top_check; - ExternalReference new_space_allocation_top_adr = - ExternalReference::new_space_allocation_top_address(isolate()); - const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag; - const int kMementoLastWordOffset = - kMementoMapOffset + AllocationMemento::kSize - kPointerSize; - - // Bail out if the object is not in new space. - JumpIfNotInNewSpace(receiver_reg, scratch_reg, no_memento_found); - // If the object is in new space, we need to check whether it is on the same - // page as the current top. - add(scratch_reg, receiver_reg, Operand(kMementoLastWordOffset)); - mov(ip, Operand(new_space_allocation_top_adr)); - ldr(ip, MemOperand(ip)); - eor(scratch_reg, scratch_reg, Operand(ip)); - tst(scratch_reg, Operand(~Page::kPageAlignmentMask)); - b(eq, &top_check); - // The object is on a different page than allocation top. Bail out if the - // object sits on the page boundary as no memento can follow and we cannot - // touch the memory following it. - add(scratch_reg, receiver_reg, Operand(kMementoLastWordOffset)); - eor(scratch_reg, scratch_reg, Operand(receiver_reg)); - tst(scratch_reg, Operand(~Page::kPageAlignmentMask)); - b(ne, no_memento_found); - // Continue with the actual map check. - jmp(&map_check); - // If top is on the same page as the current object, we need to check whether - // we are below top. - bind(&top_check); - add(scratch_reg, receiver_reg, Operand(kMementoLastWordOffset)); - mov(ip, Operand(new_space_allocation_top_adr)); - ldr(ip, MemOperand(ip)); - cmp(scratch_reg, ip); - b(ge, no_memento_found); - // Memento map check. - bind(&map_check); - ldr(scratch_reg, MemOperand(receiver_reg, kMementoMapOffset)); - cmp(scratch_reg, Operand(isolate()->factory()->allocation_memento_map())); -} - Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3, @@ -3623,7 +3360,6 @@ Register GetRegisterThatIsNotOneOf(Register reg1, return candidate; } UNREACHABLE(); - return no_reg; } #ifdef DEBUG @@ -3699,29 +3435,6 @@ void CodePatcher::EmitCondition(Condition cond) { masm_.emit(instr); } - -void MacroAssembler::TruncatingDiv(Register result, - Register dividend, - int32_t divisor) { - DCHECK(!dividend.is(result)); - DCHECK(!dividend.is(ip)); - DCHECK(!result.is(ip)); - base::MagicNumbersForDivision mag = - base::SignedDivisionByConstant(bit_cast(divisor)); - mov(ip, Operand(mag.multiplier)); - bool neg = (mag.multiplier & (1U << 31)) != 0; - if (divisor > 0 && neg) { - smmla(result, dividend, ip, dividend); - } else { - smmul(result, dividend, ip); - if (divisor < 0 && !neg && mag.multiplier > 0) { - sub(result, result, Operand(dividend)); - } - } - if (mag.shift > 0) mov(result, Operand(result, ASR, mag.shift)); - add(result, result, Operand(dividend, LSR, 31)); -} - } // namespace internal } // namespace v8 diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h index 506364686f6671..7d4d7344a43239 100644 --- a/deps/v8/src/arm/macro-assembler-arm.h +++ b/deps/v8/src/arm/macro-assembler-arm.h @@ -86,255 +86,48 @@ enum TargetAddressStorageMode { NEVER_INLINE_TARGET_ADDRESS }; -// MacroAssembler implements a collection of frequently used macros. -class MacroAssembler: public Assembler { +class TurboAssembler : public Assembler { public: - MacroAssembler(Isolate* isolate, void* buffer, int size, - CodeObjectRequired create_code_object); - - int jit_cookie() const { return jit_cookie_; } - - Isolate* isolate() const { return isolate_; } - - // Returns the size of a call in instructions. Note, the value returned is - // only valid as long as no entries are added to the constant pool between - // checking the call size and emitting the actual call. - static int CallSize(Register target, Condition cond = al); - int CallSize(Address target, RelocInfo::Mode rmode, Condition cond = al); - int CallStubSize(CodeStub* stub, - TypeFeedbackId ast_id = TypeFeedbackId::None(), - Condition cond = al); - - // Jump, Call, and Ret pseudo instructions implementing inter-working. - void Jump(Register target, Condition cond = al); - void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al); - void Jump(Handle code, RelocInfo::Mode rmode, Condition cond = al); - void Call(Register target, Condition cond = al); - void Call(Address target, RelocInfo::Mode rmode, Condition cond = al, - TargetAddressStorageMode mode = CAN_INLINE_TARGET_ADDRESS, - bool check_constant_pool = true); - void Call(Handle code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET, - TypeFeedbackId ast_id = TypeFeedbackId::None(), Condition cond = al, - TargetAddressStorageMode mode = CAN_INLINE_TARGET_ADDRESS, - bool check_constant_pool = true); - int CallSize(Handle code, - RelocInfo::Mode rmode = RelocInfo::CODE_TARGET, - TypeFeedbackId ast_id = TypeFeedbackId::None(), - Condition cond = al); - void Ret(Condition cond = al); - - // Used for patching in calls to the deoptimizer. - void CallDeoptimizer(Address target); - static int CallDeoptimizerSize(); - - // Emit code that loads |parameter_index|'th parameter from the stack to - // the register according to the CallInterfaceDescriptor definition. - // |sp_to_caller_sp_offset_in_words| specifies the number of words pushed - // below the caller's sp. - template - void LoadParameterFromStack( - Register reg, typename Descriptor::ParameterIndices parameter_index, - int sp_to_ra_offset_in_words = 0) { - DCHECK(Descriptor::kPassLastArgsOnStack); - UNIMPLEMENTED(); - } - - // Emit code to discard a non-negative number of pointer-sized elements - // from the stack, clobbering only the sp register. - void Drop(int count, Condition cond = al); - void Drop(Register count, Condition cond = al); - - void Ret(int drop, Condition cond = al); - - // Swap two registers. If the scratch register is omitted then a slightly - // less efficient form using xor instead of mov is emitted. - void Swap(Register reg1, - Register reg2, - Register scratch = no_reg, - Condition cond = al); - - void Mls(Register dst, Register src1, Register src2, Register srcA, - Condition cond = al); - void And(Register dst, Register src1, const Operand& src2, - Condition cond = al); - void Ubfx(Register dst, Register src, int lsb, int width, - Condition cond = al); - void Sbfx(Register dst, Register src, int lsb, int width, - Condition cond = al); - // The scratch register is not used for ARMv7. - // scratch can be the same register as src (in which case it is trashed), but - // not the same as dst. - void Bfi(Register dst, - Register src, - Register scratch, - int lsb, - int width, - Condition cond = al); - void Bfc(Register dst, Register src, int lsb, int width, Condition cond = al); - - void Call(Label* target); - void Push(Register src) { push(src); } - void Pop(Register dst) { pop(dst); } - - // Register move. May do nothing if the registers are identical. - void Move(Register dst, Smi* smi); - void Move(Register dst, Handle value); - void Move(Register dst, Register src, Condition cond = al); - void Move(Register dst, const Operand& src, SBit sbit = LeaveCC, - Condition cond = al) { - if (!src.is_reg() || !src.rm().is(dst) || sbit != LeaveCC) { - mov(dst, src, sbit, cond); + TurboAssembler(Isolate* isolate, void* buffer, int buffer_size, + CodeObjectRequired create_code_object) + : Assembler(isolate, buffer, buffer_size), isolate_(isolate) { + if (create_code_object == CodeObjectRequired::kYes) { + code_object_ = + Handle::New(isolate->heap()->undefined_value(), isolate); } } - void Move(SwVfpRegister dst, SwVfpRegister src, Condition cond = al); - void Move(DwVfpRegister dst, DwVfpRegister src, Condition cond = al); - void Move(QwNeonRegister dst, QwNeonRegister src); - // Register swap. - void Swap(DwVfpRegister srcdst0, DwVfpRegister srcdst1); - void Swap(QwNeonRegister srcdst0, QwNeonRegister srcdst1); - - void Load(Register dst, const MemOperand& src, Representation r); - void Store(Register src, const MemOperand& dst, Representation r); - - // Load an object from the root table. - void LoadRoot(Register destination, - Heap::RootListIndex index, - Condition cond = al); - // Store an object to the root table. - void StoreRoot(Register source, - Heap::RootListIndex index, - Condition cond = al); - // --------------------------------------------------------------------------- - // GC Support - - void IncrementalMarkingRecordWriteHelper(Register object, - Register value, - Register address); - - enum RememberedSetFinalAction { - kReturnAtEnd, - kFallThroughAtEnd - }; - - // Record in the remembered set the fact that we have a pointer to new space - // at the address pointed to by the addr register. Only works if addr is not - // in new space. - void RememberedSetHelper(Register object, // Used for debug code. - Register addr, - Register scratch, - SaveFPRegsMode save_fp, - RememberedSetFinalAction and_then); - - void CheckPageFlag(Register object, - Register scratch, - int mask, - Condition cc, - Label* condition_met); + void set_has_frame(bool value) { has_frame_ = value; } + bool has_frame() const { return has_frame_; } - // Check if object is in new space. Jumps if the object is not in new space. - // The register scratch can be object itself, but scratch will be clobbered. - void JumpIfNotInNewSpace(Register object, - Register scratch, - Label* branch) { - InNewSpace(object, scratch, eq, branch); - } + Isolate* isolate() const { return isolate_; } - // Check if object is in new space. Jumps if the object is in new space. - // The register scratch can be object itself, but it will be clobbered. - void JumpIfInNewSpace(Register object, - Register scratch, - Label* branch) { - InNewSpace(object, scratch, ne, branch); + Handle CodeObject() { + DCHECK(!code_object_.is_null()); + return code_object_; } - // Check if an object has a given incremental marking color. - void HasColor(Register object, - Register scratch0, - Register scratch1, - Label* has_color, - int first_bit, - int second_bit); - - void JumpIfBlack(Register object, - Register scratch0, - Register scratch1, - Label* on_black); - - // Checks the color of an object. If the object is white we jump to the - // incremental marker. - void JumpIfWhite(Register value, Register scratch1, Register scratch2, - Register scratch3, Label* value_is_white); + // Activation support. + void EnterFrame(StackFrame::Type type, + bool load_constant_pool_pointer_reg = false); + // Returns the pc offset at which the frame ends. + int LeaveFrame(StackFrame::Type type); - // Notify the garbage collector that we wrote a pointer into an object. - // |object| is the object being stored into, |value| is the object being - // stored. value and scratch registers are clobbered by the operation. - // The offset is the offset from the start of the object, not the offset from - // the tagged HeapObject pointer. For use with FieldMemOperand(reg, off). - void RecordWriteField( - Register object, - int offset, - Register value, - Register scratch, - LinkRegisterStatus lr_status, - SaveFPRegsMode save_fp, - RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, - SmiCheck smi_check = INLINE_SMI_CHECK, - PointersToHereCheck pointers_to_here_check_for_value = - kPointersToHereMaybeInteresting); + // Push a fixed frame, consisting of lr, fp + void PushCommonFrame(Register marker_reg = no_reg); - // As above, but the offset has the tag presubtracted. For use with - // MemOperand(reg, off). - inline void RecordWriteContextSlot( - Register context, - int offset, - Register value, - Register scratch, - LinkRegisterStatus lr_status, - SaveFPRegsMode save_fp, - RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, - SmiCheck smi_check = INLINE_SMI_CHECK, - PointersToHereCheck pointers_to_here_check_for_value = - kPointersToHereMaybeInteresting) { - RecordWriteField(context, - offset + kHeapObjectTag, - value, - scratch, - lr_status, - save_fp, - remembered_set_action, - smi_check, - pointers_to_here_check_for_value); - } + // Generates function and stub prologue code. + void StubPrologue(StackFrame::Type type); + void Prologue(bool code_pre_aging); - // Notify the garbage collector that we wrote a code entry into a - // JSFunction. Only scratch is clobbered by the operation. - void RecordWriteCodeEntryField(Register js_function, Register code_entry, - Register scratch); + // Push a standard frame, consisting of lr, fp, context and JS function + void PushStandardFrame(Register function_reg); - void RecordWriteForMap( - Register object, - Register map, - Register dst, - LinkRegisterStatus lr_status, - SaveFPRegsMode save_fp); + void InitializeRootRegister(); - // For a given |object| notify the garbage collector that the slot |address| - // has been written. |value| is the object being stored. The value and - // address registers are clobbered by the operation. - void RecordWrite( - Register object, - Register address, - Register value, - LinkRegisterStatus lr_status, - SaveFPRegsMode save_fp, - RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, - SmiCheck smi_check = INLINE_SMI_CHECK, - PointersToHereCheck pointers_to_here_check_for_value = - kPointersToHereMaybeInteresting); + void Push(Register src) { push(src); } - // Push a handle. - void Push(Handle handle); + void Push(Handle handle); void Push(Smi* smi); // Push two registers. Pushes leftmost register first (to highest address). @@ -363,17 +156,12 @@ class MacroAssembler: public Assembler { } // Push four registers. Pushes leftmost register first (to highest address). - void Push(Register src1, - Register src2, - Register src3, - Register src4, + void Push(Register src1, Register src2, Register src3, Register src4, Condition cond = al) { if (src1.code() > src2.code()) { if (src2.code() > src3.code()) { if (src3.code() > src4.code()) { - stm(db_w, - sp, - src1.bit() | src2.bit() | src3.bit() | src4.bit(), + stm(db_w, sp, src1.bit() | src2.bit() | src3.bit() | src4.bit(), cond); } else { stm(db_w, sp, src1.bit() | src2.bit() | src3.bit(), cond); @@ -418,6 +206,8 @@ class MacroAssembler: public Assembler { } } + void Pop(Register dst) { pop(dst); } + // Pop two registers. Pops rightmost register first (from lower address). void Pop(Register src1, Register src2, Condition cond = al) { DCHECK(!src1.is(src2)); @@ -446,18 +236,13 @@ class MacroAssembler: public Assembler { } // Pop four registers. Pops rightmost register first (from lower address). - void Pop(Register src1, - Register src2, - Register src3, - Register src4, + void Pop(Register src1, Register src2, Register src3, Register src4, Condition cond = al) { DCHECK(!AreAliased(src1, src2, src3, src4)); if (src1.code() > src2.code()) { if (src2.code() > src3.code()) { if (src3.code() > src4.code()) { - ldm(ia_w, - sp, - src1.bit() | src2.bit() | src3.bit() | src4.bit(), + ldm(ia_w, sp, src1.bit() | src2.bit() | src3.bit() | src4.bit(), cond); } else { ldr(src4, MemOperand(sp, 4, PostIndex), cond); @@ -473,11 +258,426 @@ class MacroAssembler: public Assembler { } } - // Push a fixed frame, consisting of lr, fp - void PushCommonFrame(Register marker_reg = no_reg); + // Before calling a C-function from generated code, align arguments on stack. + // After aligning the frame, non-register arguments must be stored in + // sp[0], sp[4], etc., not pushed. The argument count assumes all arguments + // are word sized. If double arguments are used, this function assumes that + // all double arguments are stored before core registers; otherwise the + // correct alignment of the double values is not guaranteed. + // Some compilers/platforms require the stack to be aligned when calling + // C++ code. + // Needs a scratch register to do some arithmetic. This register will be + // trashed. + void PrepareCallCFunction(int num_reg_arguments, + int num_double_registers = 0); + + // Removes current frame and its arguments from the stack preserving + // the arguments and a return address pushed to the stack for the next call. + // Both |callee_args_count| and |caller_args_count_reg| do not include + // receiver. |callee_args_count| is not modified, |caller_args_count_reg| + // is trashed. + void PrepareForTailCall(const ParameterCount& callee_args_count, + Register caller_args_count_reg, Register scratch0, + Register scratch1); + + // There are two ways of passing double arguments on ARM, depending on + // whether soft or hard floating point ABI is used. These functions + // abstract parameter passing for the three different ways we call + // C functions from generated code. + void MovToFloatParameter(DwVfpRegister src); + void MovToFloatParameters(DwVfpRegister src1, DwVfpRegister src2); + void MovToFloatResult(DwVfpRegister src); + + // Calls a C function and cleans up the space for arguments allocated + // by PrepareCallCFunction. The called function is not allowed to trigger a + // garbage collection, since that might move the code and invalidate the + // return address (unless this is somehow accounted for by the called + // function). + void CallCFunction(ExternalReference function, int num_arguments); + void CallCFunction(Register function, int num_arguments); + void CallCFunction(ExternalReference function, int num_reg_arguments, + int num_double_arguments); + void CallCFunction(Register function, int num_reg_arguments, + int num_double_arguments); + + void MovFromFloatParameter(DwVfpRegister dst); + void MovFromFloatResult(DwVfpRegister dst); + + // Calls Abort(msg) if the condition cond is not satisfied. + // Use --debug_code to enable. + void Assert(Condition cond, BailoutReason reason); + + // Like Assert(), but always enabled. + void Check(Condition cond, BailoutReason reason); + + // Print a message to stdout and abort execution. + void Abort(BailoutReason msg); + + inline bool AllowThisStubCall(CodeStub* stub); + + void LslPair(Register dst_low, Register dst_high, Register src_low, + Register src_high, Register scratch, Register shift); + void LslPair(Register dst_low, Register dst_high, Register src_low, + Register src_high, uint32_t shift); + void LsrPair(Register dst_low, Register dst_high, Register src_low, + Register src_high, Register scratch, Register shift); + void LsrPair(Register dst_low, Register dst_high, Register src_low, + Register src_high, uint32_t shift); + void AsrPair(Register dst_low, Register dst_high, Register src_low, + Register src_high, Register scratch, Register shift); + void AsrPair(Register dst_low, Register dst_high, Register src_low, + Register src_high, uint32_t shift); + + // Returns the size of a call in instructions. Note, the value returned is + // only valid as long as no entries are added to the constant pool between + // checking the call size and emitting the actual call. + static int CallSize(Register target, Condition cond = al); + int CallSize(Address target, RelocInfo::Mode rmode, Condition cond = al); + int CallSize(Handle code, + RelocInfo::Mode rmode = RelocInfo::CODE_TARGET, + Condition cond = al); + int CallStubSize(); + + void CallStubDelayed(CodeStub* stub); + void CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid, + SaveFPRegsMode save_doubles = kDontSaveFPRegs); + + // Jump, Call, and Ret pseudo instructions implementing inter-working. + void Call(Register target, Condition cond = al); + void Call(Address target, RelocInfo::Mode rmode, Condition cond = al, + TargetAddressStorageMode mode = CAN_INLINE_TARGET_ADDRESS, + bool check_constant_pool = true); + void Call(Handle code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET, + Condition cond = al, + TargetAddressStorageMode mode = CAN_INLINE_TARGET_ADDRESS, + bool check_constant_pool = true); + void Call(Label* target); + + // Emit code to discard a non-negative number of pointer-sized elements + // from the stack, clobbering only the sp register. + void Drop(int count, Condition cond = al); + void Drop(Register count, Condition cond = al); + + void Ret(Condition cond = al); + void Ret(int drop, Condition cond = al); + + // Compare single values and move the result to the normal condition flags. + void VFPCompareAndSetFlags(const SwVfpRegister src1, const SwVfpRegister src2, + const Condition cond = al); + void VFPCompareAndSetFlags(const SwVfpRegister src1, const float src2, + const Condition cond = al); + + // Compare double values and move the result to the normal condition flags. + void VFPCompareAndSetFlags(const DwVfpRegister src1, const DwVfpRegister src2, + const Condition cond = al); + void VFPCompareAndSetFlags(const DwVfpRegister src1, const double src2, + const Condition cond = al); + + // If the value is a NaN, canonicalize the value else, do nothing. + void VFPCanonicalizeNaN(const DwVfpRegister dst, const DwVfpRegister src, + const Condition cond = al); + void VFPCanonicalizeNaN(const DwVfpRegister value, + const Condition cond = al) { + VFPCanonicalizeNaN(value, value, cond); + } + + void VmovHigh(Register dst, DwVfpRegister src); + void VmovHigh(DwVfpRegister dst, Register src); + void VmovLow(Register dst, DwVfpRegister src); + void VmovLow(DwVfpRegister dst, Register src); + + void CheckPageFlag(Register object, Register scratch, int mask, Condition cc, + Label* condition_met); + + void Jump(Register target, Condition cond = al); + void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al); + void Jump(Handle code, RelocInfo::Mode rmode, Condition cond = al); + + // Perform a floating-point min or max operation with the + // (IEEE-754-compatible) semantics of ARM64's fmin/fmax. Some cases, typically + // NaNs or +/-0.0, are expected to be rare and are handled in out-of-line + // code. The specific behaviour depends on supported instructions. + // + // These functions assume (and assert) that !left.is(right). It is permitted + // for the result to alias either input register. + void FloatMax(SwVfpRegister result, SwVfpRegister left, SwVfpRegister right, + Label* out_of_line); + void FloatMin(SwVfpRegister result, SwVfpRegister left, SwVfpRegister right, + Label* out_of_line); + void FloatMax(DwVfpRegister result, DwVfpRegister left, DwVfpRegister right, + Label* out_of_line); + void FloatMin(DwVfpRegister result, DwVfpRegister left, DwVfpRegister right, + Label* out_of_line); + + // Generate out-of-line cases for the macros above. + void FloatMaxOutOfLine(SwVfpRegister result, SwVfpRegister left, + SwVfpRegister right); + void FloatMinOutOfLine(SwVfpRegister result, SwVfpRegister left, + SwVfpRegister right); + void FloatMaxOutOfLine(DwVfpRegister result, DwVfpRegister left, + DwVfpRegister right); + void FloatMinOutOfLine(DwVfpRegister result, DwVfpRegister left, + DwVfpRegister right); + + void ExtractLane(Register dst, QwNeonRegister src, NeonDataType dt, int lane); + void ExtractLane(Register dst, DwVfpRegister src, NeonDataType dt, int lane); + void ExtractLane(SwVfpRegister dst, QwNeonRegister src, int lane); + void ReplaceLane(QwNeonRegister dst, QwNeonRegister src, Register src_lane, + NeonDataType dt, int lane); + void ReplaceLane(QwNeonRegister dst, QwNeonRegister src, + SwVfpRegister src_lane, int lane); + + // Register move. May do nothing if the registers are identical. + void Move(Register dst, Smi* smi); + void Move(Register dst, Handle value); + void Move(Register dst, Register src, Condition cond = al); + void Move(Register dst, const Operand& src, SBit sbit = LeaveCC, + Condition cond = al) { + if (!src.IsRegister() || !src.rm().is(dst) || sbit != LeaveCC) { + mov(dst, src, sbit, cond); + } + } + void Move(SwVfpRegister dst, SwVfpRegister src, Condition cond = al); + void Move(DwVfpRegister dst, DwVfpRegister src, Condition cond = al); + void Move(QwNeonRegister dst, QwNeonRegister src); + + // Simulate s-register moves for imaginary s32 - s63 registers. + void VmovExtended(Register dst, int src_code); + void VmovExtended(int dst_code, Register src); + // Move between s-registers and imaginary s-registers. + void VmovExtended(int dst_code, int src_code); + void VmovExtended(int dst_code, const MemOperand& src); + void VmovExtended(const MemOperand& dst, int src_code); + + // Register swap. + void Swap(DwVfpRegister srcdst0, DwVfpRegister srcdst1); + void Swap(QwNeonRegister srcdst0, QwNeonRegister srcdst1); + + // Get the actual activation frame alignment for target environment. + static int ActivationFrameAlignment(); + + void Bfc(Register dst, Register src, int lsb, int width, Condition cond = al); + + void SmiUntag(Register reg, SBit s = LeaveCC) { + mov(reg, Operand::SmiUntag(reg), s); + } + void SmiUntag(Register dst, Register src, SBit s = LeaveCC) { + mov(dst, Operand::SmiUntag(src), s); + } + + // Load an object from the root table. + void LoadRoot(Register destination, Heap::RootListIndex index, + Condition cond = al); + + // Jump if the register contains a smi. + void JumpIfSmi(Register value, Label* smi_label); + + // Performs a truncating conversion of a floating point number as used by + // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it + // succeeds, otherwise falls through if result is saturated. On return + // 'result' either holds answer, or is clobbered on fall through. + // + // Only public for the test code in test-code-stubs-arm.cc. + void TryInlineTruncateDoubleToI(Register result, DwVfpRegister input, + Label* done); + + // Performs a truncating conversion of a floating point number as used by + // the JS bitwise operations. See ECMA-262 9.5: ToInt32. + // Exits with 'result' holding the answer. + void TruncateDoubleToIDelayed(Zone* zone, Register result, + DwVfpRegister double_input); + + // EABI variant for double arguments in use. + bool use_eabi_hardfloat() { +#ifdef __arm__ + return base::OS::ArmUsingHardFloat(); +#elif USE_EABI_HARDFLOAT + return true; +#else + return false; +#endif + } + + private: + bool has_frame_ = false; + Isolate* const isolate_; + // This handle will be patched with the code object on installation. + Handle code_object_; + + // Compare single values and then load the fpscr flags to a register. + void VFPCompareAndLoadFlags(const SwVfpRegister src1, + const SwVfpRegister src2, + const Register fpscr_flags, + const Condition cond = al); + void VFPCompareAndLoadFlags(const SwVfpRegister src1, const float src2, + const Register fpscr_flags, + const Condition cond = al); + + // Compare double values and then load the fpscr flags to a register. + void VFPCompareAndLoadFlags(const DwVfpRegister src1, + const DwVfpRegister src2, + const Register fpscr_flags, + const Condition cond = al); + void VFPCompareAndLoadFlags(const DwVfpRegister src1, const double src2, + const Register fpscr_flags, + const Condition cond = al); + + void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al); + + // Implementation helpers for FloatMin and FloatMax. + template + void FloatMaxHelper(T result, T left, T right, Label* out_of_line); + template + void FloatMinHelper(T result, T left, T right, Label* out_of_line); + template + void FloatMaxOutOfLineHelper(T result, T left, T right); + template + void FloatMinOutOfLineHelper(T result, T left, T right); + + int CalculateStackPassedWords(int num_reg_arguments, + int num_double_arguments); + + void CallCFunctionHelper(Register function, int num_reg_arguments, + int num_double_arguments); +}; + +// MacroAssembler implements a collection of frequently used macros. +class MacroAssembler : public TurboAssembler { + public: + MacroAssembler(Isolate* isolate, void* buffer, int size, + CodeObjectRequired create_code_object); + + int jit_cookie() const { return jit_cookie_; } + + // Used for patching in calls to the deoptimizer. + void CallDeoptimizer(Address target); + static int CallDeoptimizerSize(); + + // Emit code that loads |parameter_index|'th parameter from the stack to + // the register according to the CallInterfaceDescriptor definition. + // |sp_to_caller_sp_offset_in_words| specifies the number of words pushed + // below the caller's sp. + template + void LoadParameterFromStack( + Register reg, typename Descriptor::ParameterIndices parameter_index, + int sp_to_ra_offset_in_words = 0) { + DCHECK(Descriptor::kPassLastArgsOnStack); + UNIMPLEMENTED(); + } + + // Swap two registers. If the scratch register is omitted then a slightly + // less efficient form using xor instead of mov is emitted. + void Swap(Register reg1, Register reg2, Register scratch = no_reg, + Condition cond = al); + + void Mls(Register dst, Register src1, Register src2, Register srcA, + Condition cond = al); + void And(Register dst, Register src1, const Operand& src2, + Condition cond = al); + void Ubfx(Register dst, Register src, int lsb, int width, + Condition cond = al); + void Sbfx(Register dst, Register src, int lsb, int width, + Condition cond = al); + // The scratch register is not used for ARMv7. + // scratch can be the same register as src (in which case it is trashed), but + // not the same as dst. + void Bfi(Register dst, Register src, Register scratch, int lsb, int width, + Condition cond = al); + + void PushObject(Handle object); + + void Load(Register dst, const MemOperand& src, Representation r); + void Store(Register src, const MemOperand& dst, Representation r); + + // Store an object to the root table. + void StoreRoot(Register source, Heap::RootListIndex index, + Condition cond = al); + + // --------------------------------------------------------------------------- + // GC Support + + void IncrementalMarkingRecordWriteHelper(Register object, Register value, + Register address); + + enum RememberedSetFinalAction { kReturnAtEnd, kFallThroughAtEnd }; + + // Record in the remembered set the fact that we have a pointer to new space + // at the address pointed to by the addr register. Only works if addr is not + // in new space. + void RememberedSetHelper(Register object, // Used for debug code. + Register addr, Register scratch, + SaveFPRegsMode save_fp, + RememberedSetFinalAction and_then); + + // Check if object is in new space. Jumps if the object is not in new space. + // The register scratch can be object itself, but scratch will be clobbered. + void JumpIfNotInNewSpace(Register object, Register scratch, Label* branch) { + InNewSpace(object, scratch, eq, branch); + } + + // Check if object is in new space. Jumps if the object is in new space. + // The register scratch can be object itself, but it will be clobbered. + void JumpIfInNewSpace(Register object, Register scratch, Label* branch) { + InNewSpace(object, scratch, ne, branch); + } + + // Check if an object has a given incremental marking color. + void HasColor(Register object, Register scratch0, Register scratch1, + Label* has_color, int first_bit, int second_bit); + + void JumpIfBlack(Register object, Register scratch0, Register scratch1, + Label* on_black); + + // Checks the color of an object. If the object is white we jump to the + // incremental marker. + void JumpIfWhite(Register value, Register scratch1, Register scratch2, + Register scratch3, Label* value_is_white); + + // Notify the garbage collector that we wrote a pointer into an object. + // |object| is the object being stored into, |value| is the object being + // stored. value and scratch registers are clobbered by the operation. + // The offset is the offset from the start of the object, not the offset from + // the tagged HeapObject pointer. For use with FieldMemOperand(reg, off). + void RecordWriteField( + Register object, int offset, Register value, Register scratch, + LinkRegisterStatus lr_status, SaveFPRegsMode save_fp, + RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, + SmiCheck smi_check = INLINE_SMI_CHECK, + PointersToHereCheck pointers_to_here_check_for_value = + kPointersToHereMaybeInteresting); + + // As above, but the offset has the tag presubtracted. For use with + // MemOperand(reg, off). + inline void RecordWriteContextSlot( + Register context, int offset, Register value, Register scratch, + LinkRegisterStatus lr_status, SaveFPRegsMode save_fp, + RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, + SmiCheck smi_check = INLINE_SMI_CHECK, + PointersToHereCheck pointers_to_here_check_for_value = + kPointersToHereMaybeInteresting) { + RecordWriteField(context, offset + kHeapObjectTag, value, scratch, + lr_status, save_fp, remembered_set_action, smi_check, + pointers_to_here_check_for_value); + } + + // Notify the garbage collector that we wrote a code entry into a + // JSFunction. Only scratch is clobbered by the operation. + void RecordWriteCodeEntryField(Register js_function, Register code_entry, + Register scratch); - // Push a standard frame, consisting of lr, fp, context and JS function - void PushStandardFrame(Register function_reg); + void RecordWriteForMap(Register object, Register map, Register dst, + LinkRegisterStatus lr_status, SaveFPRegsMode save_fp); + + // For a given |object| notify the garbage collector that the slot |address| + // has been written. |value| is the object being stored. The value and + // address registers are clobbered by the operation. + void RecordWrite( + Register object, Register address, Register value, + LinkRegisterStatus lr_status, SaveFPRegsMode save_fp, + RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, + SmiCheck smi_check = INLINE_SMI_CHECK, + PointersToHereCheck pointers_to_here_check_for_value = + kPointersToHereMaybeInteresting); void PopCommonFrame(Register marker_reg = no_reg); @@ -504,86 +704,9 @@ class MacroAssembler: public Assembler { const MemOperand& dst, Condition cond = al); - // If the value is a NaN, canonicalize the value else, do nothing. - void VFPCanonicalizeNaN(const DwVfpRegister dst, - const DwVfpRegister src, - const Condition cond = al); - void VFPCanonicalizeNaN(const DwVfpRegister value, - const Condition cond = al) { - VFPCanonicalizeNaN(value, value, cond); - } - - // Compare single values and move the result to the normal condition flags. - void VFPCompareAndSetFlags(const SwVfpRegister src1, const SwVfpRegister src2, - const Condition cond = al); - void VFPCompareAndSetFlags(const SwVfpRegister src1, const float src2, - const Condition cond = al); - - // Compare double values and move the result to the normal condition flags. - void VFPCompareAndSetFlags(const DwVfpRegister src1, - const DwVfpRegister src2, - const Condition cond = al); - void VFPCompareAndSetFlags(const DwVfpRegister src1, - const double src2, - const Condition cond = al); - - // Compare single values and then load the fpscr flags to a register. - void VFPCompareAndLoadFlags(const SwVfpRegister src1, - const SwVfpRegister src2, - const Register fpscr_flags, - const Condition cond = al); - void VFPCompareAndLoadFlags(const SwVfpRegister src1, const float src2, - const Register fpscr_flags, - const Condition cond = al); - - // Compare double values and then load the fpscr flags to a register. - void VFPCompareAndLoadFlags(const DwVfpRegister src1, - const DwVfpRegister src2, - const Register fpscr_flags, - const Condition cond = al); - void VFPCompareAndLoadFlags(const DwVfpRegister src1, - const double src2, - const Register fpscr_flags, - const Condition cond = al); - - void Vmov(const DwVfpRegister dst, - const double imm, + void Vmov(const DwVfpRegister dst, Double imm, const Register scratch = no_reg); - void VmovHigh(Register dst, DwVfpRegister src); - void VmovHigh(DwVfpRegister dst, Register src); - void VmovLow(Register dst, DwVfpRegister src); - void VmovLow(DwVfpRegister dst, Register src); - - // Simulate s-register moves for imaginary s32 - s63 registers. - void VmovExtended(Register dst, int src_code); - void VmovExtended(int dst_code, Register src); - // Move between s-registers and imaginary s-registers. - void VmovExtended(int dst_code, int src_code); - void VmovExtended(int dst_code, const MemOperand& src); - void VmovExtended(const MemOperand& dst, int src_code); - - void ExtractLane(Register dst, QwNeonRegister src, NeonDataType dt, int lane); - void ExtractLane(Register dst, DwVfpRegister src, NeonDataType dt, int lane); - void ExtractLane(SwVfpRegister dst, QwNeonRegister src, int lane); - void ReplaceLane(QwNeonRegister dst, QwNeonRegister src, Register src_lane, - NeonDataType dt, int lane); - void ReplaceLane(QwNeonRegister dst, QwNeonRegister src, - SwVfpRegister src_lane, int lane); - - void LslPair(Register dst_low, Register dst_high, Register src_low, - Register src_high, Register scratch, Register shift); - void LslPair(Register dst_low, Register dst_high, Register src_low, - Register src_high, uint32_t shift); - void LsrPair(Register dst_low, Register dst_high, Register src_low, - Register src_high, Register scratch, Register shift); - void LsrPair(Register dst_low, Register dst_high, Register src_low, - Register src_high, uint32_t shift); - void AsrPair(Register dst_low, Register dst_high, Register src_low, - Register src_high, Register scratch, Register shift); - void AsrPair(Register dst_low, Register dst_high, Register src_low, - Register src_high, uint32_t shift); - // Loads the number from object into dst register. // If |object| is neither smi nor heap number, |not_number| is jumped to // with |object| still intact. @@ -618,10 +741,6 @@ class MacroAssembler: public Assembler { LowDwVfpRegister double_scratch1, Label* not_int32); - // Generates function and stub prologue code. - void StubPrologue(StackFrame::Type type); - void Prologue(bool code_pre_aging); - // Enter exit frame. // stack_space - extra stack space, used for alignment before call to C. void EnterExitFrame(bool save_doubles, int stack_space = 0, @@ -634,9 +753,6 @@ class MacroAssembler: public Assembler { bool restore_context, bool argument_count_is_length = false); - // Get the actual activation frame alignment for target environment. - static int ActivationFrameAlignment(); - void LoadContext(Register dst, int context_chain_length); // Load the global object from the current context. @@ -657,20 +773,9 @@ class MacroAssembler: public Assembler { Register map, Register scratch); - void InitializeRootRegister(); - // --------------------------------------------------------------------------- // JavaScript invokes - // Removes current frame and its arguments from the stack preserving - // the arguments and a return address pushed to the stack for the next call. - // Both |callee_args_count| and |caller_args_count_reg| do not include - // receiver. |callee_args_count| is not modified, |caller_args_count_reg| - // is trashed. - void PrepareForTailCall(const ParameterCount& callee_args_count, - Register caller_args_count_reg, Register scratch0, - Register scratch1); - // Invoke the JavaScript function code by either calling or jumping. void InvokeFunctionCode(Register function, Register new_target, const ParameterCount& expected, @@ -778,15 +883,6 @@ class MacroAssembler: public Assembler { void Allocate(Register object_size, Register result, Register result_end, Register scratch, Label* gc_required, AllocationFlags flags); - // FastAllocate is right now only used for folded allocations. It just - // increments the top pointer without checking against limit. This can only - // be done if it was proved earlier that the allocation will succeed. - void FastAllocate(int object_size, Register result, Register scratch1, - Register scratch2, AllocationFlags flags); - - void FastAllocate(Register object_size, Register result, Register result_end, - Register scratch, AllocationFlags flags); - // Allocates a heap number or jumps to the gc_required label if the young // space is full and a scavenge is needed. All registers are clobbered also // when control continues at the gc_required label. @@ -809,12 +905,6 @@ class MacroAssembler: public Assembler { Register scratch1, Register scratch2, Label* gc_required); - // Initialize fields with filler values. Fields starting at |current_address| - // not including |end_address| are overwritten with the value in |filler|. At - // the end the loop, |current_address| takes the value of |end_address|. - void InitializeFieldsWithFiller(Register current_address, - Register end_address, Register filler); - // --------------------------------------------------------------------------- // Support functions. @@ -830,7 +920,7 @@ class MacroAssembler: public Assembler { // are the same register). It leaves the heap object in the heap_object // register unless the heap_object register is the same register as one of the // other registers. - // Type_reg can be no_reg. In that case ip is used. + // Type_reg can be no_reg. In that case a scratch register is used. void CompareObjectType(Register heap_object, Register map, Register type_reg, @@ -882,11 +972,13 @@ class MacroAssembler: public Assembler { void LoadWeakValue(Register value, Handle cell, Label* miss); // Compare the object in a register to a value from the root list. - // Uses the ip register as scratch. + // Acquires a scratch register. void CompareRoot(Register obj, Heap::RootListIndex index); void PushRoot(Heap::RootListIndex index) { - LoadRoot(ip, index); - Push(ip); + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + LoadRoot(scratch, index); + Push(scratch); } // Compare the object in a register to a value and jump if they are equal. @@ -940,36 +1032,6 @@ class MacroAssembler: public Assembler { Label* done, Label* exact); - // Performs a truncating conversion of a floating point number as used by - // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it - // succeeds, otherwise falls through if result is saturated. On return - // 'result' either holds answer, or is clobbered on fall through. - // - // Only public for the test code in test-code-stubs-arm.cc. - void TryInlineTruncateDoubleToI(Register result, - DwVfpRegister input, - Label* done); - - // Performs a truncating conversion of a floating point number as used by - // the JS bitwise operations. See ECMA-262 9.5: ToInt32. - // Exits with 'result' holding the answer. - void TruncateDoubleToI(Register result, DwVfpRegister double_input); - - // Performs a truncating conversion of a heap number as used by - // the JS bitwise operations. See ECMA-262 9.5: ToInt32. 'result' and 'input' - // must be different registers. Exits with 'result' holding the answer. - void TruncateHeapNumberToI(Register result, Register object); - - // Converts the smi or heap number in object to an int32 using the rules - // for ToInt32 as described in ECMAScript 9.5.: the value is truncated - // and brought into the range -2^31 .. +2^31 - 1. 'result' and 'input' must be - // different registers. - void TruncateNumberToI(Register object, - Register result, - Register heap_number_map, - Register scratch1, - Label* not_int32); - // Check whether d16-d31 are available on the CPU. The result is given by the // Z condition flag: Z==0 if d16-d31 available, Z==1 otherwise. void CheckFor32DRegs(Register scratch); @@ -982,38 +1044,11 @@ class MacroAssembler: public Assembler { // values to location, restoring [d0..(d15|d31)]. void RestoreFPRegs(Register location, Register scratch); - // Perform a floating-point min or max operation with the - // (IEEE-754-compatible) semantics of ARM64's fmin/fmax. Some cases, typically - // NaNs or +/-0.0, are expected to be rare and are handled in out-of-line - // code. The specific behaviour depends on supported instructions. - // - // These functions assume (and assert) that !left.is(right). It is permitted - // for the result to alias either input register. - void FloatMax(SwVfpRegister result, SwVfpRegister left, SwVfpRegister right, - Label* out_of_line); - void FloatMin(SwVfpRegister result, SwVfpRegister left, SwVfpRegister right, - Label* out_of_line); - void FloatMax(DwVfpRegister result, DwVfpRegister left, DwVfpRegister right, - Label* out_of_line); - void FloatMin(DwVfpRegister result, DwVfpRegister left, DwVfpRegister right, - Label* out_of_line); - - // Generate out-of-line cases for the macros above. - void FloatMaxOutOfLine(SwVfpRegister result, SwVfpRegister left, - SwVfpRegister right); - void FloatMinOutOfLine(SwVfpRegister result, SwVfpRegister left, - SwVfpRegister right); - void FloatMaxOutOfLine(DwVfpRegister result, DwVfpRegister left, - DwVfpRegister right); - void FloatMinOutOfLine(DwVfpRegister result, DwVfpRegister left, - DwVfpRegister right); - // --------------------------------------------------------------------------- // Runtime calls // Call a code stub. void CallStub(CodeStub* stub, - TypeFeedbackId ast_id = TypeFeedbackId::None(), Condition cond = al); // Call a code stub. @@ -1048,106 +1083,18 @@ class MacroAssembler: public Assembler { // Convenience function: tail call a runtime routine (jump). void TailCallRuntime(Runtime::FunctionId fid); - int CalculateStackPassedWords(int num_reg_arguments, - int num_double_arguments); - - // Before calling a C-function from generated code, align arguments on stack. - // After aligning the frame, non-register arguments must be stored in - // sp[0], sp[4], etc., not pushed. The argument count assumes all arguments - // are word sized. If double arguments are used, this function assumes that - // all double arguments are stored before core registers; otherwise the - // correct alignment of the double values is not guaranteed. - // Some compilers/platforms require the stack to be aligned when calling - // C++ code. - // Needs a scratch register to do some arithmetic. This register will be - // trashed. - void PrepareCallCFunction(int num_reg_arguments, - int num_double_registers, - Register scratch); - void PrepareCallCFunction(int num_reg_arguments, - Register scratch); - - // There are two ways of passing double arguments on ARM, depending on - // whether soft or hard floating point ABI is used. These functions - // abstract parameter passing for the three different ways we call - // C functions from generated code. - void MovToFloatParameter(DwVfpRegister src); - void MovToFloatParameters(DwVfpRegister src1, DwVfpRegister src2); - void MovToFloatResult(DwVfpRegister src); - - // Calls a C function and cleans up the space for arguments allocated - // by PrepareCallCFunction. The called function is not allowed to trigger a - // garbage collection, since that might move the code and invalidate the - // return address (unless this is somehow accounted for by the called - // function). - void CallCFunction(ExternalReference function, int num_arguments); - void CallCFunction(Register function, int num_arguments); - void CallCFunction(ExternalReference function, - int num_reg_arguments, - int num_double_arguments); - void CallCFunction(Register function, - int num_reg_arguments, - int num_double_arguments); - - void MovFromFloatParameter(DwVfpRegister dst); - void MovFromFloatResult(DwVfpRegister dst); - // Jump to a runtime routine. void JumpToExternalReference(const ExternalReference& builtin, bool builtin_exit_frame = false); - Handle CodeObject() { - DCHECK(!code_object_.is_null()); - return code_object_; - } - - - // Emit code for a truncating division by a constant. The dividend register is - // unchanged and ip gets clobbered. Dividend and result must be different. - void TruncatingDiv(Register result, Register dividend, int32_t divisor); - // --------------------------------------------------------------------------- // StatsCounter support - void SetCounter(StatsCounter* counter, int value, - Register scratch1, Register scratch2); void IncrementCounter(StatsCounter* counter, int value, Register scratch1, Register scratch2); void DecrementCounter(StatsCounter* counter, int value, Register scratch1, Register scratch2); - - // --------------------------------------------------------------------------- - // Debugging - - // Calls Abort(msg) if the condition cond is not satisfied. - // Use --debug_code to enable. - void Assert(Condition cond, BailoutReason reason); - - // Like Assert(), but always enabled. - void Check(Condition cond, BailoutReason reason); - - // Print a message to stdout and abort execution. - void Abort(BailoutReason msg); - - // Verify restrictions about code generated in stubs. - void set_generating_stub(bool value) { generating_stub_ = value; } - bool generating_stub() { return generating_stub_; } - void set_has_frame(bool value) { has_frame_ = value; } - bool has_frame() { return has_frame_; } - inline bool AllowThisStubCall(CodeStub* stub); - - // EABI variant for double arguments in use. - bool use_eabi_hardfloat() { -#ifdef __arm__ - return base::OS::ArmUsingHardFloat(); -#elif USE_EABI_HARDFLOAT - return true; -#else - return false; -#endif - } - // --------------------------------------------------------------------------- // Number utilities @@ -1182,19 +1129,14 @@ class MacroAssembler: public Assembler { TrySmiTag(reg, reg, not_a_smi); } void TrySmiTag(Register reg, Register src, Label* not_a_smi) { - SmiTag(ip, src, SetCC); + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + SmiTag(scratch, src, SetCC); b(vs, not_a_smi); - mov(reg, ip); + mov(reg, scratch); } - void SmiUntag(Register reg, SBit s = LeaveCC) { - mov(reg, Operand::SmiUntag(reg), s); - } - void SmiUntag(Register dst, Register src, SBit s = LeaveCC) { - mov(dst, Operand::SmiUntag(src), s); - } - // Untag the source value into destination and jump if source is a smi. // Souce and destination can be the same register. void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case); @@ -1202,8 +1144,6 @@ class MacroAssembler: public Assembler { // Test if the register contains a smi (Z == 0 (eq) if true). void SmiTst(Register value); void NonNegativeSmiTst(Register value); - // Jump if the register contains a smi. - void JumpIfSmi(Register value, Label* smi_label); // Jump if either of the registers contain a non-smi. void JumpIfNotSmi(Register value, Label* not_smi_label); // Jump if either of the registers contain a non-smi. @@ -1215,6 +1155,9 @@ class MacroAssembler: public Assembler { void AssertNotSmi(Register object); void AssertSmi(Register object); + // Abort execution if argument is not a FixedArray, enabled via --debug-code. + void AssertFixedArray(Register object); + // Abort execution if argument is not a JSFunction, enabled via --debug-code. void AssertFunction(Register object); @@ -1222,9 +1165,9 @@ class MacroAssembler: public Assembler { // enabled via --debug-code. void AssertBoundFunction(Register object); - // Abort execution if argument is not a JSGeneratorObject, + // Abort execution if argument is not a JSGeneratorObject (or subclass), // enabled via --debug-code. - void AssertGeneratorObject(Register object, Register suspend_flags); + void AssertGeneratorObject(Register object); // Abort execution if argument is not undefined or an AllocationSite, enabled // via --debug-code. @@ -1268,19 +1211,8 @@ class MacroAssembler: public Assembler { void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name); - void EmitSeqStringSetCharCheck(Register string, - Register index, - Register value, - uint32_t encoding_mask); - - void ClampUint8(Register output_reg, Register input_reg); - void ClampDoubleToUint8(Register result_reg, - DwVfpRegister input_reg, - LowDwVfpRegister double_scratch); - - void LoadInstanceDescriptors(Register map, Register descriptors); void EnumLength(Register dst, Register map); void NumberOfOwnDescriptors(Register dst, Register map); @@ -1308,12 +1240,6 @@ class MacroAssembler: public Assembler { // Load the type feedback vector from a JavaScript frame. void EmitLoadFeedbackVector(Register vector); - // Activation support. - void EnterFrame(StackFrame::Type type, - bool load_constant_pool_pointer_reg = false); - // Returns the pc offset at which the frame ends. - int LeaveFrame(StackFrame::Type type); - void EnterBuiltinFrame(Register context, Register target, Register argc); void LeaveBuiltinFrame(Register context, Register target, Register argc); @@ -1321,23 +1247,7 @@ class MacroAssembler: public Assembler { // in r0. Assumes that any other register can be used as a scratch. void CheckEnumCache(Label* call_runtime); - // AllocationMemento support. Arrays may have an associated - // AllocationMemento object that can be checked for in order to pretransition - // to another type. - // On entry, receiver_reg should point to the array object. - // scratch_reg gets clobbered. - // If allocation info is present, condition flags are set to eq. - void TestJSArrayForAllocationMemento(Register receiver_reg, - Register scratch_reg, - Label* no_memento_found); - private: - void CallCFunctionHelper(Register function, - int num_reg_arguments, - int num_double_arguments); - - void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al); - // Helper functions for generating invokes. void InvokePrologue(const ParameterCount& expected, const ParameterCount& actual, @@ -1364,21 +1274,6 @@ class MacroAssembler: public Assembler { MemOperand SafepointRegisterSlot(Register reg); MemOperand SafepointRegistersAndDoublesSlot(Register reg); - // Implementation helpers for FloatMin and FloatMax. - template - void FloatMaxHelper(T result, T left, T right, Label* out_of_line); - template - void FloatMinHelper(T result, T left, T right, Label* out_of_line); - template - void FloatMaxOutOfLineHelper(T result, T left, T right); - template - void FloatMinOutOfLineHelper(T result, T left, T right); - - bool generating_stub_; - bool has_frame_; - Isolate* isolate_; - // This handle will be patched with the code object on installation. - Handle code_object_; int jit_cookie_; // Needs access to SafepointRegisterStackIndex for compiled frame diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc index 1f7e1466921d94..dc279ceb442f8c 100644 --- a/deps/v8/src/arm/simulator-arm.cc +++ b/deps/v8/src/arm/simulator-arm.cc @@ -3225,7 +3225,6 @@ void Simulator::DecodeType7(Instruction* instr) { void Simulator::DecodeTypeVFP(Instruction* instr) { DCHECK((instr->TypeValue() == 7) && (instr->Bit(24) == 0x0) ); DCHECK(instr->Bits(11, 9) == 0x5); - // Obtain single precision register codes. int m = instr->VFPMRegValue(kSinglePrecision); int d = instr->VFPDRegValue(kSinglePrecision); @@ -3749,7 +3748,6 @@ bool get_inv_op_vfp_flag(VFPRoundingMode mode, (val <= (min_int - 1.0)); default: UNREACHABLE(); - return true; } } diff --git a/deps/v8/src/arm64/assembler-arm64-inl.h b/deps/v8/src/arm64/assembler-arm64-inl.h index e865b634b500c2..fbc4ac41fb6c51 100644 --- a/deps/v8/src/arm64/assembler-arm64-inl.h +++ b/deps/v8/src/arm64/assembler-arm64-inl.h @@ -16,7 +16,7 @@ namespace internal { bool CpuFeatures::SupportsCrankshaft() { return true; } -bool CpuFeatures::SupportsWasmSimd128() { return false; } +bool CpuFeatures::SupportsWasmSimd128() { return true; } void RelocInfo::apply(intptr_t delta) { // On arm64 only internal references need extra work. @@ -57,6 +57,15 @@ inline int CPURegister::SizeInBytes() const { return reg_size / 8; } +inline bool CPURegister::Is8Bits() const { + DCHECK(IsValid()); + return reg_size == 8; +} + +inline bool CPURegister::Is16Bits() const { + DCHECK(IsValid()); + return reg_size == 16; +} inline bool CPURegister::Is32Bits() const { DCHECK(IsValid()); @@ -69,9 +78,13 @@ inline bool CPURegister::Is64Bits() const { return reg_size == 64; } +inline bool CPURegister::Is128Bits() const { + DCHECK(IsValid()); + return reg_size == 128; +} inline bool CPURegister::IsValid() const { - if (IsValidRegister() || IsValidFPRegister()) { + if (IsValidRegister() || IsValidVRegister()) { DCHECK(!IsNone()); return true; } else { @@ -87,14 +100,14 @@ inline bool CPURegister::IsValidRegister() const { ((reg_code < kNumberOfRegisters) || (reg_code == kSPRegInternalCode)); } - -inline bool CPURegister::IsValidFPRegister() const { - return IsFPRegister() && - ((reg_size == kSRegSizeInBits) || (reg_size == kDRegSizeInBits)) && - (reg_code < kNumberOfFPRegisters); +inline bool CPURegister::IsValidVRegister() const { + return IsVRegister() && + ((reg_size == kBRegSizeInBits) || (reg_size == kHRegSizeInBits) || + (reg_size == kSRegSizeInBits) || (reg_size == kDRegSizeInBits) || + (reg_size == kQRegSizeInBits)) && + (reg_code < kNumberOfVRegisters); } - inline bool CPURegister::IsNone() const { // kNoRegister types should always have size 0 and code 0. DCHECK((reg_type != kNoRegister) || (reg_code == 0)); @@ -120,11 +133,7 @@ inline bool CPURegister::IsRegister() const { return reg_type == kRegister; } - -inline bool CPURegister::IsFPRegister() const { - return reg_type == kFPRegister; -} - +inline bool CPURegister::IsVRegister() const { return reg_type == kVRegister; } inline bool CPURegister::IsSameSizeAndType(const CPURegister& other) const { return (reg_size == other.reg_size) && (reg_type == other.reg_type); @@ -200,7 +209,7 @@ inline Register Register::XRegFromCode(unsigned code) { if (code == kSPRegInternalCode) { return csp; } else { - DCHECK(code < kNumberOfRegisters); + DCHECK_LT(code, static_cast(kNumberOfRegisters)); return Register::Create(code, kXRegSizeInBits); } } @@ -210,23 +219,40 @@ inline Register Register::WRegFromCode(unsigned code) { if (code == kSPRegInternalCode) { return wcsp; } else { - DCHECK(code < kNumberOfRegisters); + DCHECK_LT(code, static_cast(kNumberOfRegisters)); return Register::Create(code, kWRegSizeInBits); } } +inline VRegister VRegister::BRegFromCode(unsigned code) { + DCHECK_LT(code, static_cast(kNumberOfVRegisters)); + return VRegister::Create(code, kBRegSizeInBits); +} -inline FPRegister FPRegister::SRegFromCode(unsigned code) { - DCHECK(code < kNumberOfFPRegisters); - return FPRegister::Create(code, kSRegSizeInBits); +inline VRegister VRegister::HRegFromCode(unsigned code) { + DCHECK_LT(code, static_cast(kNumberOfVRegisters)); + return VRegister::Create(code, kHRegSizeInBits); } +inline VRegister VRegister::SRegFromCode(unsigned code) { + DCHECK_LT(code, static_cast(kNumberOfVRegisters)); + return VRegister::Create(code, kSRegSizeInBits); +} -inline FPRegister FPRegister::DRegFromCode(unsigned code) { - DCHECK(code < kNumberOfFPRegisters); - return FPRegister::Create(code, kDRegSizeInBits); +inline VRegister VRegister::DRegFromCode(unsigned code) { + DCHECK_LT(code, static_cast(kNumberOfVRegisters)); + return VRegister::Create(code, kDRegSizeInBits); } +inline VRegister VRegister::QRegFromCode(unsigned code) { + DCHECK_LT(code, static_cast(kNumberOfVRegisters)); + return VRegister::Create(code, kQRegSizeInBits); +} + +inline VRegister VRegister::VRegFromCode(unsigned code) { + DCHECK_LT(code, static_cast(kNumberOfVRegisters)); + return VRegister::Create(code, kVRegSizeInBits); +} inline Register CPURegister::W() const { DCHECK(IsValidRegister()); @@ -239,16 +265,34 @@ inline Register CPURegister::X() const { return Register::XRegFromCode(reg_code); } +inline VRegister CPURegister::V() const { + DCHECK(IsValidVRegister()); + return VRegister::VRegFromCode(reg_code); +} + +inline VRegister CPURegister::B() const { + DCHECK(IsValidVRegister()); + return VRegister::BRegFromCode(reg_code); +} + +inline VRegister CPURegister::H() const { + DCHECK(IsValidVRegister()); + return VRegister::HRegFromCode(reg_code); +} -inline FPRegister CPURegister::S() const { - DCHECK(IsValidFPRegister()); - return FPRegister::SRegFromCode(reg_code); +inline VRegister CPURegister::S() const { + DCHECK(IsValidVRegister()); + return VRegister::SRegFromCode(reg_code); } +inline VRegister CPURegister::D() const { + DCHECK(IsValidVRegister()); + return VRegister::DRegFromCode(reg_code); +} -inline FPRegister CPURegister::D() const { - DCHECK(IsValidFPRegister()); - return FPRegister::DRegFromCode(reg_code); +inline VRegister CPURegister::Q() const { + DCHECK(IsValidVRegister()); + return VRegister::QRegFromCode(reg_code); } @@ -310,7 +354,6 @@ Immediate::Immediate(T t, RelocInfo::Mode rmode) STATIC_ASSERT(ImmediateInitializer::kIsIntType); } - // Operand. template Operand::Operand(Handle value) : immediate_(value), reg_(NoReg) {} @@ -325,7 +368,6 @@ Operand::Operand(T t, RelocInfo::Mode rmode) : immediate_(t, rmode), reg_(NoReg) {} - Operand::Operand(Register reg, Shift shift, unsigned shift_amount) : immediate_(0), reg_(reg), @@ -352,9 +394,21 @@ Operand::Operand(Register reg, Extend extend, unsigned shift_amount) DCHECK(reg.Is64Bits() || ((extend != SXTX) && (extend != UXTX))); } +bool Operand::IsHeapObjectRequest() const { + DCHECK_IMPLIES(heap_object_request_.has_value(), reg_.Is(NoReg)); + DCHECK_IMPLIES(heap_object_request_.has_value(), + immediate_.rmode() == RelocInfo::EMBEDDED_OBJECT || + immediate_.rmode() == RelocInfo::CODE_TARGET); + return heap_object_request_.has_value(); +} + +HeapObjectRequest Operand::heap_object_request() const { + DCHECK(IsHeapObjectRequest()); + return *heap_object_request_; +} bool Operand::IsImmediate() const { - return reg_.Is(NoReg); + return reg_.Is(NoReg) && !IsHeapObjectRequest(); } @@ -383,6 +437,13 @@ Operand Operand::ToExtendedRegister() const { return Operand(reg_, reg_.Is64Bits() ? UXTX : UXTW, shift_amount_); } +Immediate Operand::immediate_for_heap_object_request() const { + DCHECK((heap_object_request().kind() == HeapObjectRequest::kHeapNumber && + immediate_.rmode() == RelocInfo::EMBEDDED_OBJECT) || + (heap_object_request().kind() == HeapObjectRequest::kCodeStub && + immediate_.rmode() == RelocInfo::CODE_TARGET)); + return immediate_; +} Immediate Operand::immediate() const { DCHECK(IsImmediate()); @@ -491,7 +552,7 @@ MemOperand::MemOperand(Register base, const Operand& offset, AddrMode addrmode) regoffset_ = NoReg; } else if (offset.IsShiftedRegister()) { - DCHECK(addrmode == Offset); + DCHECK((addrmode == Offset) || (addrmode == PostIndex)); regoffset_ = offset.reg(); shift_ = offset.shift(); @@ -877,21 +938,20 @@ LoadStoreOp Assembler::LoadOpFor(const CPURegister& rt) { if (rt.IsRegister()) { return rt.Is64Bits() ? LDR_x : LDR_w; } else { - DCHECK(rt.IsFPRegister()); - return rt.Is64Bits() ? LDR_d : LDR_s; - } -} - - -LoadStorePairOp Assembler::LoadPairOpFor(const CPURegister& rt, - const CPURegister& rt2) { - DCHECK(AreSameSizeAndType(rt, rt2)); - USE(rt2); - if (rt.IsRegister()) { - return rt.Is64Bits() ? LDP_x : LDP_w; - } else { - DCHECK(rt.IsFPRegister()); - return rt.Is64Bits() ? LDP_d : LDP_s; + DCHECK(rt.IsVRegister()); + switch (rt.SizeInBits()) { + case kBRegSizeInBits: + return LDR_b; + case kHRegSizeInBits: + return LDR_h; + case kSRegSizeInBits: + return LDR_s; + case kDRegSizeInBits: + return LDR_d; + default: + DCHECK(rt.IsQ()); + return LDR_q; + } } } @@ -901,11 +961,29 @@ LoadStoreOp Assembler::StoreOpFor(const CPURegister& rt) { if (rt.IsRegister()) { return rt.Is64Bits() ? STR_x : STR_w; } else { - DCHECK(rt.IsFPRegister()); - return rt.Is64Bits() ? STR_d : STR_s; + DCHECK(rt.IsVRegister()); + switch (rt.SizeInBits()) { + case kBRegSizeInBits: + return STR_b; + case kHRegSizeInBits: + return STR_h; + case kSRegSizeInBits: + return STR_s; + case kDRegSizeInBits: + return STR_d; + default: + DCHECK(rt.IsQ()); + return STR_q; + } } } +LoadStorePairOp Assembler::LoadPairOpFor(const CPURegister& rt, + const CPURegister& rt2) { + DCHECK_EQ(STP_w | LoadStorePairLBit, LDP_w); + return static_cast(StorePairOpFor(rt, rt2) | + LoadStorePairLBit); +} LoadStorePairOp Assembler::StorePairOpFor(const CPURegister& rt, const CPURegister& rt2) { @@ -914,8 +992,16 @@ LoadStorePairOp Assembler::StorePairOpFor(const CPURegister& rt, if (rt.IsRegister()) { return rt.Is64Bits() ? STP_x : STP_w; } else { - DCHECK(rt.IsFPRegister()); - return rt.Is64Bits() ? STP_d : STP_s; + DCHECK(rt.IsVRegister()); + switch (rt.SizeInBits()) { + case kSRegSizeInBits: + return STP_s; + case kDRegSizeInBits: + return STP_d; + default: + DCHECK(rt.IsQ()); + return STP_q; + } } } @@ -924,7 +1010,7 @@ LoadLiteralOp Assembler::LoadLiteralOpFor(const CPURegister& rt) { if (rt.IsRegister()) { return rt.Is64Bits() ? LDR_x_lit : LDR_w_lit; } else { - DCHECK(rt.IsFPRegister()); + DCHECK(rt.IsVRegister()); return rt.Is64Bits() ? LDR_d_lit : LDR_s_lit; } } @@ -945,7 +1031,6 @@ Instr Assembler::Flags(FlagsUpdate S) { return 0 << FlagsUpdate_offset; } UNREACHABLE(); - return 0; } @@ -1108,9 +1193,8 @@ Instr Assembler::ImmLS(int imm9) { return truncate_to_int9(imm9) << ImmLS_offset; } - -Instr Assembler::ImmLSPair(int imm7, LSDataSize size) { - DCHECK(((imm7 >> size) << size) == imm7); +Instr Assembler::ImmLSPair(int imm7, unsigned size) { + DCHECK_EQ((imm7 >> size) << size, imm7); int scaled_imm7 = imm7 >> size; DCHECK(is_int7(scaled_imm7)); return truncate_to_int7(scaled_imm7) << ImmLSPair_offset; @@ -1152,10 +1236,17 @@ Instr Assembler::ImmBarrierType(int imm2) { return imm2 << ImmBarrierType_offset; } - -LSDataSize Assembler::CalcLSDataSize(LoadStoreOp op) { - DCHECK((SizeLS_offset + SizeLS_width) == (kInstructionSize * 8)); - return static_cast(op >> SizeLS_offset); +unsigned Assembler::CalcLSDataSize(LoadStoreOp op) { + DCHECK((LSSize_offset + LSSize_width) == (kInstructionSize * 8)); + unsigned size = static_cast(op >> LSSize_offset); + if ((op & LSVector_mask) != 0) { + // Vector register memory operations encode the access size in the "size" + // and "opc" fields. + if ((size == 0) && ((op & LSOpc_mask) >> LSOpc_offset) >= 2) { + size = kQRegSizeLog2; + } + } + return size; } @@ -1170,11 +1261,7 @@ Instr Assembler::ShiftMoveWide(int shift) { return shift << ShiftMoveWide_offset; } - -Instr Assembler::FPType(FPRegister fd) { - return fd.Is64Bits() ? FP64 : FP32; -} - +Instr Assembler::FPType(VRegister fd) { return fd.Is64Bits() ? FP64 : FP32; } Instr Assembler::FPScale(unsigned scale) { DCHECK(is_uint6(scale)); @@ -1205,18 +1292,6 @@ inline void Assembler::CheckBuffer() { } } - -TypeFeedbackId Assembler::RecordedAstId() { - DCHECK(!recorded_ast_id_.IsNone()); - return recorded_ast_id_; -} - - -void Assembler::ClearRecordedAstId() { - recorded_ast_id_ = TypeFeedbackId::None(); -} - - } // namespace internal } // namespace v8 diff --git a/deps/v8/src/arm64/assembler-arm64.cc b/deps/v8/src/arm64/assembler-arm64.cc index ec12e77274a517..1cabc01cec7703 100644 --- a/deps/v8/src/arm64/assembler-arm64.cc +++ b/deps/v8/src/arm64/assembler-arm64.cc @@ -34,12 +34,12 @@ #include "src/arm64/frames-arm64.h" #include "src/base/bits.h" #include "src/base/cpu.h" +#include "src/code-stubs.h" #include "src/register-configuration.h" namespace v8 { namespace internal { - // ----------------------------------------------------------------------------- // CpuFeatures implementation. @@ -89,8 +89,8 @@ CPURegister CPURegList::PopHighestIndex() { void CPURegList::RemoveCalleeSaved() { if (type() == CPURegister::kRegister) { Remove(GetCalleeSaved(RegisterSizeInBits())); - } else if (type() == CPURegister::kFPRegister) { - Remove(GetCalleeSavedFP(RegisterSizeInBits())); + } else if (type() == CPURegister::kVRegister) { + Remove(GetCalleeSavedV(RegisterSizeInBits())); } else { DCHECK(type() == CPURegister::kNoRegister); DCHECK(IsEmpty()); @@ -103,9 +103,8 @@ CPURegList CPURegList::GetCalleeSaved(int size) { return CPURegList(CPURegister::kRegister, size, 19, 29); } - -CPURegList CPURegList::GetCalleeSavedFP(int size) { - return CPURegList(CPURegister::kFPRegister, size, 8, 15); +CPURegList CPURegList::GetCalleeSavedV(int size) { + return CPURegList(CPURegister::kVRegister, size, 8, 15); } @@ -116,11 +115,10 @@ CPURegList CPURegList::GetCallerSaved(int size) { return list; } - -CPURegList CPURegList::GetCallerSavedFP(int size) { +CPURegList CPURegList::GetCallerSavedV(int size) { // Registers d0-d7 and d16-d31 are caller-saved. - CPURegList list = CPURegList(CPURegister::kFPRegister, size, 0, 7); - list.Combine(CPURegList(CPURegister::kFPRegister, size, 16, 31)); + CPURegList list = CPURegList(CPURegister::kVRegister, size, 0, 7); + list.Combine(CPURegList(CPURegister::kVRegister, size, 16, 31)); return list; } @@ -220,7 +218,6 @@ Register GetAllocatableRegisterThatIsNotOneOf(Register reg1, Register reg2, return candidate; } UNREACHABLE(); - return NoReg; } @@ -240,7 +237,7 @@ bool AreAliased(const CPURegister& reg1, const CPURegister& reg2, if (regs[i].IsRegister()) { number_of_valid_regs++; unique_regs |= regs[i].Bit(); - } else if (regs[i].IsFPRegister()) { + } else if (regs[i].IsVRegister()) { number_of_valid_fpregs++; unique_fpregs |= regs[i].Bit(); } else { @@ -277,20 +274,43 @@ bool AreSameSizeAndType(const CPURegister& reg1, const CPURegister& reg2, return match; } +bool AreSameFormat(const VRegister& reg1, const VRegister& reg2, + const VRegister& reg3, const VRegister& reg4) { + DCHECK(reg1.IsValid()); + return (!reg2.IsValid() || reg2.IsSameFormat(reg1)) && + (!reg3.IsValid() || reg3.IsSameFormat(reg1)) && + (!reg4.IsValid() || reg4.IsSameFormat(reg1)); +} + +bool AreConsecutive(const VRegister& reg1, const VRegister& reg2, + const VRegister& reg3, const VRegister& reg4) { + DCHECK(reg1.IsValid()); + if (!reg2.IsValid()) { + DCHECK(!reg3.IsValid() && !reg4.IsValid()); + return true; + } else if (reg2.code() != ((reg1.code() + 1) % kNumberOfVRegisters)) { + return false; + } -void Immediate::InitializeHandle(Handle handle) { - AllowDeferredHandleDereference using_raw_address; + if (!reg3.IsValid()) { + DCHECK(!reg4.IsValid()); + return true; + } else if (reg3.code() != ((reg2.code() + 1) % kNumberOfVRegisters)) { + return false; + } - // Verify all Objects referred by code are NOT in new space. - Object* obj = *handle; - if (obj->IsHeapObject()) { - value_ = reinterpret_cast(handle.location()); - rmode_ = RelocInfo::EMBEDDED_OBJECT; - } else { - STATIC_ASSERT(sizeof(intptr_t) == sizeof(int64_t)); - value_ = reinterpret_cast(obj); - rmode_ = RelocInfo::NONE64; + if (!reg4.IsValid()) { + return true; + } else if (reg4.code() != ((reg3.code() + 1) % kNumberOfVRegisters)) { + return false; } + + return true; +} + +void Immediate::InitializeHandle(Handle handle) { + value_ = reinterpret_cast(handle.address()); + rmode_ = RelocInfo::EMBEDDED_OBJECT; } @@ -304,36 +324,52 @@ bool Operand::NeedsRelocation(const Assembler* assembler) const { return !RelocInfo::IsNone(rmode); } +bool ConstPool::AddSharedEntry(SharedEntryMap& entry_map, uint64_t data, + int offset) { + auto existing = entry_map.find(data); + if (existing == entry_map.end()) { + entry_map[data] = static_cast(entries_.size()); + entries_.push_back(std::make_pair(data, std::vector(1, offset))); + return true; + } + int index = existing->second; + entries_[index].second.push_back(offset); + return false; +} // Constant Pool. -void ConstPool::RecordEntry(intptr_t data, - RelocInfo::Mode mode) { +bool ConstPool::RecordEntry(intptr_t data, RelocInfo::Mode mode) { DCHECK(mode != RelocInfo::COMMENT && mode != RelocInfo::CONST_POOL && mode != RelocInfo::VENEER_POOL && mode != RelocInfo::CODE_AGE_SEQUENCE && mode != RelocInfo::DEOPT_SCRIPT_OFFSET && mode != RelocInfo::DEOPT_INLINING_ID && mode != RelocInfo::DEOPT_REASON && mode != RelocInfo::DEOPT_ID); + + bool write_reloc_info = true; + uint64_t raw_data = static_cast(data); int offset = assm_->pc_offset(); if (IsEmpty()) { first_use_ = offset; } - std::pair entry = std::make_pair(raw_data, offset); if (CanBeShared(mode)) { - shared_entries_.insert(entry); - if (shared_entries_.count(entry.first) == 1) { - shared_entries_count++; - } + write_reloc_info = AddSharedEntry(shared_entries_, raw_data, offset); + } else if (mode == RelocInfo::CODE_TARGET && + assm_->IsCodeTargetSharingAllowed() && raw_data != 0) { + // A zero data value is a placeholder and must not be shared. + write_reloc_info = AddSharedEntry(handle_to_index_map_, raw_data, offset); } else { - unique_entries_.push_back(entry); + entries_.push_back(std::make_pair(raw_data, std::vector(1, offset))); } if (EntryCount() > Assembler::kApproxMaxPoolEntryCount) { // Request constant pool emission after the next instruction. assm_->SetNextConstPoolCheckIn(1); } + + return write_reloc_info; } @@ -442,8 +478,8 @@ void ConstPool::Emit(bool require_jump) { void ConstPool::Clear() { shared_entries_.clear(); - shared_entries_count = 0; - unique_entries_.clear(); + handle_to_index_map_.clear(); + entries_.clear(); first_use_ = -1; } @@ -453,8 +489,7 @@ bool ConstPool::CanBeShared(RelocInfo::Mode mode) { DCHECK(mode != RelocInfo::NONE32); return RelocInfo::IsNone(mode) || - (!assm_->serializer_enabled() && - (mode >= RelocInfo::FIRST_SHAREABLE_RELOC_MODE)); + (mode >= RelocInfo::FIRST_SHAREABLE_RELOC_MODE); } @@ -512,43 +547,19 @@ void ConstPool::EmitGuard() { void ConstPool::EmitEntries() { DCHECK(IsAligned(assm_->pc_offset(), 8)); - typedef std::multimap::const_iterator SharedEntriesIterator; - SharedEntriesIterator value_it; - // Iterate through the keys (constant pool values). - for (value_it = shared_entries_.begin(); - value_it != shared_entries_.end(); - value_it = shared_entries_.upper_bound(value_it->first)) { - std::pair range; - uint64_t data = value_it->first; - range = shared_entries_.equal_range(data); - SharedEntriesIterator offset_it; - // Iterate through the offsets of a given key. - for (offset_it = range.first; offset_it != range.second; offset_it++) { - Instruction* instr = assm_->InstructionAt(offset_it->second); + // Emit entries. + for (const auto& entry : entries_) { + for (const auto& pc : entry.second) { + Instruction* instr = assm_->InstructionAt(pc); // Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0. DCHECK(instr->IsLdrLiteral() && instr->ImmLLiteral() == 0); instr->SetImmPCOffsetTarget(assm_->isolate_data(), assm_->pc()); } - assm_->dc64(data); - } - shared_entries_.clear(); - shared_entries_count = 0; - // Emit unique entries. - std::vector >::const_iterator unique_it; - for (unique_it = unique_entries_.begin(); - unique_it != unique_entries_.end(); - unique_it++) { - Instruction* instr = assm_->InstructionAt(unique_it->second); - - // Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0. - DCHECK(instr->IsLdrLiteral() && instr->ImmLLiteral() == 0); - instr->SetImmPCOffsetTarget(assm_->isolate_data(), assm_->pc()); - assm_->dc64(unique_it->first); + assm_->dc64(entry.first); } - unique_entries_.clear(); - first_use_ = -1; + Clear(); } @@ -556,26 +567,28 @@ void ConstPool::EmitEntries() { Assembler::Assembler(IsolateData isolate_data, void* buffer, int buffer_size) : AssemblerBase(isolate_data, buffer, buffer_size), constpool_(this), - recorded_ast_id_(TypeFeedbackId::None()), unresolved_branches_() { const_pool_blocked_nesting_ = 0; veneer_pool_blocked_nesting_ = 0; + code_target_sharing_blocked_nesting_ = 0; Reset(); } Assembler::~Assembler() { DCHECK(constpool_.IsEmpty()); - DCHECK(const_pool_blocked_nesting_ == 0); - DCHECK(veneer_pool_blocked_nesting_ == 0); + DCHECK_EQ(const_pool_blocked_nesting_, 0); + DCHECK_EQ(veneer_pool_blocked_nesting_, 0); + DCHECK_EQ(code_target_sharing_blocked_nesting_, 0); } void Assembler::Reset() { #ifdef DEBUG DCHECK((pc_ >= buffer_) && (pc_ < buffer_ + buffer_size_)); - DCHECK(const_pool_blocked_nesting_ == 0); - DCHECK(veneer_pool_blocked_nesting_ == 0); + DCHECK_EQ(const_pool_blocked_nesting_, 0); + DCHECK_EQ(veneer_pool_blocked_nesting_, 0); + DCHECK_EQ(code_target_sharing_blocked_nesting_, 0); DCHECK(unresolved_branches_.empty()); memset(buffer_, 0, pc_ - buffer_); #endif @@ -586,15 +599,33 @@ void Assembler::Reset() { next_constant_pool_check_ = 0; next_veneer_pool_check_ = kMaxInt; no_const_pool_before_ = 0; - ClearRecordedAstId(); } +void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) { + for (auto& request : heap_object_requests_) { + Handle object; + switch (request.kind()) { + case HeapObjectRequest::kHeapNumber: + object = isolate->factory()->NewHeapNumber(request.heap_number(), + IMMUTABLE, TENURED); + break; + case HeapObjectRequest::kCodeStub: + request.code_stub()->set_isolate(isolate); + object = request.code_stub()->GetCode(); + break; + } + Address pc = buffer_ + request.offset(); + Memory::Address_at(target_pointer_address_at(pc)) = object.address(); + } +} -void Assembler::GetCode(CodeDesc* desc) { +void Assembler::GetCode(Isolate* isolate, CodeDesc* desc) { // Emit constant pool if necessary. CheckConstPool(true, false); DCHECK(constpool_.IsEmpty()); + AllocateAndInstallRequestedHeapObjects(isolate); + // Set up code descriptor. if (desc) { desc->buffer = reinterpret_cast(buffer_); @@ -612,7 +643,7 @@ void Assembler::GetCode(CodeDesc* desc) { void Assembler::Align(int m) { - DCHECK(m >= 4 && base::bits::IsPowerOfTwo32(m)); + DCHECK(m >= 4 && base::bits::IsPowerOfTwo(m)); while ((pc_offset() & (m - 1)) != 0) { nop(); } @@ -1683,6 +1714,32 @@ void Assembler::ldr_pcrel(const CPURegister& rt, int imm19) { Emit(LoadLiteralOpFor(rt) | ImmLLiteral(imm19) | Rt(rt)); } +Operand Operand::EmbeddedNumber(double number) { + int32_t smi; + if (DoubleToSmiInteger(number, &smi)) { + return Operand(Immediate(Smi::FromInt(smi))); + } + Operand result(0, RelocInfo::EMBEDDED_OBJECT); + result.heap_object_request_.emplace(number); + DCHECK(result.IsHeapObjectRequest()); + return result; +} + +Operand Operand::EmbeddedCode(CodeStub* stub) { + Operand result(0, RelocInfo::CODE_TARGET); + result.heap_object_request_.emplace(stub); + DCHECK(result.IsHeapObjectRequest()); + return result; +} + +void Assembler::ldr(const CPURegister& rt, const Operand& operand) { + if (operand.IsHeapObjectRequest()) { + RequestHeapObject(operand.heap_object_request()); + ldr(rt, operand.immediate_for_heap_object_request()); + } else { + ldr(rt, operand.immediate()); + } +} void Assembler::ldr(const CPURegister& rt, const Immediate& imm) { // Currently we only support 64-bit literals. @@ -1773,422 +1830,2137 @@ void Assembler::stlxrh(const Register& rs, const Register& rt, Emit(STLXR_h | Rs(rs) | Rt2(x31) | RnSP(rn) | Rt(rt)); } -void Assembler::mov(const Register& rd, const Register& rm) { - // Moves involving the stack pointer are encoded as add immediate with - // second operand of zero. Otherwise, orr with first operand zr is - // used. - if (rd.IsSP() || rm.IsSP()) { - add(rd, rm, 0); +void Assembler::NEON3DifferentL(const VRegister& vd, const VRegister& vn, + const VRegister& vm, NEON3DifferentOp vop) { + DCHECK(AreSameFormat(vn, vm)); + DCHECK((vn.Is1H() && vd.Is1S()) || (vn.Is1S() && vd.Is1D()) || + (vn.Is8B() && vd.Is8H()) || (vn.Is4H() && vd.Is4S()) || + (vn.Is2S() && vd.Is2D()) || (vn.Is16B() && vd.Is8H()) || + (vn.Is8H() && vd.Is4S()) || (vn.Is4S() && vd.Is2D())); + Instr format, op = vop; + if (vd.IsScalar()) { + op |= NEON_Q | NEONScalar; + format = SFormat(vn); } else { - orr(rd, AppropriateZeroRegFor(rd), rm); + format = VFormat(vn); + } + Emit(format | op | Rm(vm) | Rn(vn) | Rd(vd)); +} + +void Assembler::NEON3DifferentW(const VRegister& vd, const VRegister& vn, + const VRegister& vm, NEON3DifferentOp vop) { + DCHECK(AreSameFormat(vd, vn)); + DCHECK((vm.Is8B() && vd.Is8H()) || (vm.Is4H() && vd.Is4S()) || + (vm.Is2S() && vd.Is2D()) || (vm.Is16B() && vd.Is8H()) || + (vm.Is8H() && vd.Is4S()) || (vm.Is4S() && vd.Is2D())); + Emit(VFormat(vm) | vop | Rm(vm) | Rn(vn) | Rd(vd)); +} + +void Assembler::NEON3DifferentHN(const VRegister& vd, const VRegister& vn, + const VRegister& vm, NEON3DifferentOp vop) { + DCHECK(AreSameFormat(vm, vn)); + DCHECK((vd.Is8B() && vn.Is8H()) || (vd.Is4H() && vn.Is4S()) || + (vd.Is2S() && vn.Is2D()) || (vd.Is16B() && vn.Is8H()) || + (vd.Is8H() && vn.Is4S()) || (vd.Is4S() && vn.Is2D())); + Emit(VFormat(vd) | vop | Rm(vm) | Rn(vn) | Rd(vd)); +} + +#define NEON_3DIFF_LONG_LIST(V) \ + V(pmull, NEON_PMULL, vn.IsVector() && vn.Is8B()) \ + V(pmull2, NEON_PMULL2, vn.IsVector() && vn.Is16B()) \ + V(saddl, NEON_SADDL, vn.IsVector() && vn.IsD()) \ + V(saddl2, NEON_SADDL2, vn.IsVector() && vn.IsQ()) \ + V(sabal, NEON_SABAL, vn.IsVector() && vn.IsD()) \ + V(sabal2, NEON_SABAL2, vn.IsVector() && vn.IsQ()) \ + V(uabal, NEON_UABAL, vn.IsVector() && vn.IsD()) \ + V(uabal2, NEON_UABAL2, vn.IsVector() && vn.IsQ()) \ + V(sabdl, NEON_SABDL, vn.IsVector() && vn.IsD()) \ + V(sabdl2, NEON_SABDL2, vn.IsVector() && vn.IsQ()) \ + V(uabdl, NEON_UABDL, vn.IsVector() && vn.IsD()) \ + V(uabdl2, NEON_UABDL2, vn.IsVector() && vn.IsQ()) \ + V(smlal, NEON_SMLAL, vn.IsVector() && vn.IsD()) \ + V(smlal2, NEON_SMLAL2, vn.IsVector() && vn.IsQ()) \ + V(umlal, NEON_UMLAL, vn.IsVector() && vn.IsD()) \ + V(umlal2, NEON_UMLAL2, vn.IsVector() && vn.IsQ()) \ + V(smlsl, NEON_SMLSL, vn.IsVector() && vn.IsD()) \ + V(smlsl2, NEON_SMLSL2, vn.IsVector() && vn.IsQ()) \ + V(umlsl, NEON_UMLSL, vn.IsVector() && vn.IsD()) \ + V(umlsl2, NEON_UMLSL2, vn.IsVector() && vn.IsQ()) \ + V(smull, NEON_SMULL, vn.IsVector() && vn.IsD()) \ + V(smull2, NEON_SMULL2, vn.IsVector() && vn.IsQ()) \ + V(umull, NEON_UMULL, vn.IsVector() && vn.IsD()) \ + V(umull2, NEON_UMULL2, vn.IsVector() && vn.IsQ()) \ + V(ssubl, NEON_SSUBL, vn.IsVector() && vn.IsD()) \ + V(ssubl2, NEON_SSUBL2, vn.IsVector() && vn.IsQ()) \ + V(uaddl, NEON_UADDL, vn.IsVector() && vn.IsD()) \ + V(uaddl2, NEON_UADDL2, vn.IsVector() && vn.IsQ()) \ + V(usubl, NEON_USUBL, vn.IsVector() && vn.IsD()) \ + V(usubl2, NEON_USUBL2, vn.IsVector() && vn.IsQ()) \ + V(sqdmlal, NEON_SQDMLAL, vn.Is1H() || vn.Is1S() || vn.Is4H() || vn.Is2S()) \ + V(sqdmlal2, NEON_SQDMLAL2, vn.Is1H() || vn.Is1S() || vn.Is8H() || vn.Is4S()) \ + V(sqdmlsl, NEON_SQDMLSL, vn.Is1H() || vn.Is1S() || vn.Is4H() || vn.Is2S()) \ + V(sqdmlsl2, NEON_SQDMLSL2, vn.Is1H() || vn.Is1S() || vn.Is8H() || vn.Is4S()) \ + V(sqdmull, NEON_SQDMULL, vn.Is1H() || vn.Is1S() || vn.Is4H() || vn.Is2S()) \ + V(sqdmull2, NEON_SQDMULL2, vn.Is1H() || vn.Is1S() || vn.Is8H() || vn.Is4S()) + +#define DEFINE_ASM_FUNC(FN, OP, AS) \ + void Assembler::FN(const VRegister& vd, const VRegister& vn, \ + const VRegister& vm) { \ + DCHECK(AS); \ + NEON3DifferentL(vd, vn, vm, OP); \ } +NEON_3DIFF_LONG_LIST(DEFINE_ASM_FUNC) +#undef DEFINE_ASM_FUNC + +#define NEON_3DIFF_HN_LIST(V) \ + V(addhn, NEON_ADDHN, vd.IsD()) \ + V(addhn2, NEON_ADDHN2, vd.IsQ()) \ + V(raddhn, NEON_RADDHN, vd.IsD()) \ + V(raddhn2, NEON_RADDHN2, vd.IsQ()) \ + V(subhn, NEON_SUBHN, vd.IsD()) \ + V(subhn2, NEON_SUBHN2, vd.IsQ()) \ + V(rsubhn, NEON_RSUBHN, vd.IsD()) \ + V(rsubhn2, NEON_RSUBHN2, vd.IsQ()) + +#define DEFINE_ASM_FUNC(FN, OP, AS) \ + void Assembler::FN(const VRegister& vd, const VRegister& vn, \ + const VRegister& vm) { \ + DCHECK(AS); \ + NEON3DifferentHN(vd, vn, vm, OP); \ + } +NEON_3DIFF_HN_LIST(DEFINE_ASM_FUNC) +#undef DEFINE_ASM_FUNC + +void Assembler::NEONPerm(const VRegister& vd, const VRegister& vn, + const VRegister& vm, NEONPermOp op) { + DCHECK(AreSameFormat(vd, vn, vm)); + DCHECK(!vd.Is1D()); + Emit(VFormat(vd) | op | Rm(vm) | Rn(vn) | Rd(vd)); } +void Assembler::trn1(const VRegister& vd, const VRegister& vn, + const VRegister& vm) { + NEONPerm(vd, vn, vm, NEON_TRN1); +} -void Assembler::mvn(const Register& rd, const Operand& operand) { - orn(rd, AppropriateZeroRegFor(rd), operand); +void Assembler::trn2(const VRegister& vd, const VRegister& vn, + const VRegister& vm) { + NEONPerm(vd, vn, vm, NEON_TRN2); } +void Assembler::uzp1(const VRegister& vd, const VRegister& vn, + const VRegister& vm) { + NEONPerm(vd, vn, vm, NEON_UZP1); +} -void Assembler::mrs(const Register& rt, SystemRegister sysreg) { - DCHECK(rt.Is64Bits()); - Emit(MRS | ImmSystemRegister(sysreg) | Rt(rt)); +void Assembler::uzp2(const VRegister& vd, const VRegister& vn, + const VRegister& vm) { + NEONPerm(vd, vn, vm, NEON_UZP2); } +void Assembler::zip1(const VRegister& vd, const VRegister& vn, + const VRegister& vm) { + NEONPerm(vd, vn, vm, NEON_ZIP1); +} -void Assembler::msr(SystemRegister sysreg, const Register& rt) { - DCHECK(rt.Is64Bits()); - Emit(MSR | Rt(rt) | ImmSystemRegister(sysreg)); +void Assembler::zip2(const VRegister& vd, const VRegister& vn, + const VRegister& vm) { + NEONPerm(vd, vn, vm, NEON_ZIP2); } +void Assembler::NEONShiftImmediate(const VRegister& vd, const VRegister& vn, + NEONShiftImmediateOp op, int immh_immb) { + DCHECK(AreSameFormat(vd, vn)); + Instr q, scalar; + if (vn.IsScalar()) { + q = NEON_Q; + scalar = NEONScalar; + } else { + q = vd.IsD() ? 0 : NEON_Q; + scalar = 0; + } + Emit(q | op | scalar | immh_immb | Rn(vn) | Rd(vd)); +} + +void Assembler::NEONShiftLeftImmediate(const VRegister& vd, const VRegister& vn, + int shift, NEONShiftImmediateOp op) { + int laneSizeInBits = vn.LaneSizeInBits(); + DCHECK((shift >= 0) && (shift < laneSizeInBits)); + NEONShiftImmediate(vd, vn, op, (laneSizeInBits + shift) << 16); +} + +void Assembler::NEONShiftRightImmediate(const VRegister& vd, + const VRegister& vn, int shift, + NEONShiftImmediateOp op) { + int laneSizeInBits = vn.LaneSizeInBits(); + DCHECK((shift >= 1) && (shift <= laneSizeInBits)); + NEONShiftImmediate(vd, vn, op, ((2 * laneSizeInBits) - shift) << 16); +} + +void Assembler::NEONShiftImmediateL(const VRegister& vd, const VRegister& vn, + int shift, NEONShiftImmediateOp op) { + int laneSizeInBits = vn.LaneSizeInBits(); + DCHECK((shift >= 0) && (shift < laneSizeInBits)); + int immh_immb = (laneSizeInBits + shift) << 16; + + DCHECK((vn.Is8B() && vd.Is8H()) || (vn.Is4H() && vd.Is4S()) || + (vn.Is2S() && vd.Is2D()) || (vn.Is16B() && vd.Is8H()) || + (vn.Is8H() && vd.Is4S()) || (vn.Is4S() && vd.Is2D())); + Instr q; + q = vn.IsD() ? 0 : NEON_Q; + Emit(q | op | immh_immb | Rn(vn) | Rd(vd)); +} + +void Assembler::NEONShiftImmediateN(const VRegister& vd, const VRegister& vn, + int shift, NEONShiftImmediateOp op) { + Instr q, scalar; + int laneSizeInBits = vd.LaneSizeInBits(); + DCHECK((shift >= 1) && (shift <= laneSizeInBits)); + int immh_immb = (2 * laneSizeInBits - shift) << 16; + + if (vn.IsScalar()) { + DCHECK((vd.Is1B() && vn.Is1H()) || (vd.Is1H() && vn.Is1S()) || + (vd.Is1S() && vn.Is1D())); + q = NEON_Q; + scalar = NEONScalar; + } else { + DCHECK((vd.Is8B() && vn.Is8H()) || (vd.Is4H() && vn.Is4S()) || + (vd.Is2S() && vn.Is2D()) || (vd.Is16B() && vn.Is8H()) || + (vd.Is8H() && vn.Is4S()) || (vd.Is4S() && vn.Is2D())); + scalar = 0; + q = vd.IsD() ? 0 : NEON_Q; + } + Emit(q | op | scalar | immh_immb | Rn(vn) | Rd(vd)); +} -void Assembler::hint(SystemHint code) { - Emit(HINT | ImmHint(code) | Rt(xzr)); +void Assembler::shl(const VRegister& vd, const VRegister& vn, int shift) { + DCHECK(vd.IsVector() || vd.Is1D()); + NEONShiftLeftImmediate(vd, vn, shift, NEON_SHL); } +void Assembler::sli(const VRegister& vd, const VRegister& vn, int shift) { + DCHECK(vd.IsVector() || vd.Is1D()); + NEONShiftLeftImmediate(vd, vn, shift, NEON_SLI); +} -void Assembler::dmb(BarrierDomain domain, BarrierType type) { - Emit(DMB | ImmBarrierDomain(domain) | ImmBarrierType(type)); +void Assembler::sqshl(const VRegister& vd, const VRegister& vn, int shift) { + NEONShiftLeftImmediate(vd, vn, shift, NEON_SQSHL_imm); } +void Assembler::sqshlu(const VRegister& vd, const VRegister& vn, int shift) { + NEONShiftLeftImmediate(vd, vn, shift, NEON_SQSHLU); +} -void Assembler::dsb(BarrierDomain domain, BarrierType type) { - Emit(DSB | ImmBarrierDomain(domain) | ImmBarrierType(type)); +void Assembler::uqshl(const VRegister& vd, const VRegister& vn, int shift) { + NEONShiftLeftImmediate(vd, vn, shift, NEON_UQSHL_imm); } +void Assembler::sshll(const VRegister& vd, const VRegister& vn, int shift) { + DCHECK(vn.IsD()); + NEONShiftImmediateL(vd, vn, shift, NEON_SSHLL); +} -void Assembler::isb() { - Emit(ISB | ImmBarrierDomain(FullSystem) | ImmBarrierType(BarrierAll)); +void Assembler::sshll2(const VRegister& vd, const VRegister& vn, int shift) { + DCHECK(vn.IsQ()); + NEONShiftImmediateL(vd, vn, shift, NEON_SSHLL); } +void Assembler::sxtl(const VRegister& vd, const VRegister& vn) { + sshll(vd, vn, 0); +} -void Assembler::fmov(FPRegister fd, double imm) { - DCHECK(fd.Is64Bits()); - DCHECK(IsImmFP64(imm)); - Emit(FMOV_d_imm | Rd(fd) | ImmFP64(imm)); +void Assembler::sxtl2(const VRegister& vd, const VRegister& vn) { + sshll2(vd, vn, 0); } +void Assembler::ushll(const VRegister& vd, const VRegister& vn, int shift) { + DCHECK(vn.IsD()); + NEONShiftImmediateL(vd, vn, shift, NEON_USHLL); +} -void Assembler::fmov(FPRegister fd, float imm) { - DCHECK(fd.Is32Bits()); - DCHECK(IsImmFP32(imm)); - Emit(FMOV_s_imm | Rd(fd) | ImmFP32(imm)); +void Assembler::ushll2(const VRegister& vd, const VRegister& vn, int shift) { + DCHECK(vn.IsQ()); + NEONShiftImmediateL(vd, vn, shift, NEON_USHLL); } +void Assembler::uxtl(const VRegister& vd, const VRegister& vn) { + ushll(vd, vn, 0); +} -void Assembler::fmov(Register rd, FPRegister fn) { - DCHECK(rd.SizeInBits() == fn.SizeInBits()); - FPIntegerConvertOp op = rd.Is32Bits() ? FMOV_ws : FMOV_xd; - Emit(op | Rd(rd) | Rn(fn)); +void Assembler::uxtl2(const VRegister& vd, const VRegister& vn) { + ushll2(vd, vn, 0); } +void Assembler::sri(const VRegister& vd, const VRegister& vn, int shift) { + DCHECK(vd.IsVector() || vd.Is1D()); + NEONShiftRightImmediate(vd, vn, shift, NEON_SRI); +} -void Assembler::fmov(FPRegister fd, Register rn) { - DCHECK(fd.SizeInBits() == rn.SizeInBits()); - FPIntegerConvertOp op = fd.Is32Bits() ? FMOV_sw : FMOV_dx; - Emit(op | Rd(fd) | Rn(rn)); +void Assembler::sshr(const VRegister& vd, const VRegister& vn, int shift) { + DCHECK(vd.IsVector() || vd.Is1D()); + NEONShiftRightImmediate(vd, vn, shift, NEON_SSHR); } +void Assembler::ushr(const VRegister& vd, const VRegister& vn, int shift) { + DCHECK(vd.IsVector() || vd.Is1D()); + NEONShiftRightImmediate(vd, vn, shift, NEON_USHR); +} -void Assembler::fmov(FPRegister fd, FPRegister fn) { - DCHECK(fd.SizeInBits() == fn.SizeInBits()); - Emit(FPType(fd) | FMOV | Rd(fd) | Rn(fn)); +void Assembler::srshr(const VRegister& vd, const VRegister& vn, int shift) { + DCHECK(vd.IsVector() || vd.Is1D()); + NEONShiftRightImmediate(vd, vn, shift, NEON_SRSHR); } +void Assembler::urshr(const VRegister& vd, const VRegister& vn, int shift) { + DCHECK(vd.IsVector() || vd.Is1D()); + NEONShiftRightImmediate(vd, vn, shift, NEON_URSHR); +} -void Assembler::fadd(const FPRegister& fd, - const FPRegister& fn, - const FPRegister& fm) { - FPDataProcessing2Source(fd, fn, fm, FADD); +void Assembler::ssra(const VRegister& vd, const VRegister& vn, int shift) { + DCHECK(vd.IsVector() || vd.Is1D()); + NEONShiftRightImmediate(vd, vn, shift, NEON_SSRA); } +void Assembler::usra(const VRegister& vd, const VRegister& vn, int shift) { + DCHECK(vd.IsVector() || vd.Is1D()); + NEONShiftRightImmediate(vd, vn, shift, NEON_USRA); +} -void Assembler::fsub(const FPRegister& fd, - const FPRegister& fn, - const FPRegister& fm) { - FPDataProcessing2Source(fd, fn, fm, FSUB); +void Assembler::srsra(const VRegister& vd, const VRegister& vn, int shift) { + DCHECK(vd.IsVector() || vd.Is1D()); + NEONShiftRightImmediate(vd, vn, shift, NEON_SRSRA); } +void Assembler::ursra(const VRegister& vd, const VRegister& vn, int shift) { + DCHECK(vd.IsVector() || vd.Is1D()); + NEONShiftRightImmediate(vd, vn, shift, NEON_URSRA); +} -void Assembler::fmul(const FPRegister& fd, - const FPRegister& fn, - const FPRegister& fm) { - FPDataProcessing2Source(fd, fn, fm, FMUL); +void Assembler::shrn(const VRegister& vd, const VRegister& vn, int shift) { + DCHECK(vn.IsVector() && vd.IsD()); + NEONShiftImmediateN(vd, vn, shift, NEON_SHRN); } +void Assembler::shrn2(const VRegister& vd, const VRegister& vn, int shift) { + DCHECK(vn.IsVector() && vd.IsQ()); + NEONShiftImmediateN(vd, vn, shift, NEON_SHRN); +} -void Assembler::fmadd(const FPRegister& fd, - const FPRegister& fn, - const FPRegister& fm, - const FPRegister& fa) { - FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ? FMADD_s : FMADD_d); +void Assembler::rshrn(const VRegister& vd, const VRegister& vn, int shift) { + DCHECK(vn.IsVector() && vd.IsD()); + NEONShiftImmediateN(vd, vn, shift, NEON_RSHRN); } +void Assembler::rshrn2(const VRegister& vd, const VRegister& vn, int shift) { + DCHECK(vn.IsVector() && vd.IsQ()); + NEONShiftImmediateN(vd, vn, shift, NEON_RSHRN); +} -void Assembler::fmsub(const FPRegister& fd, - const FPRegister& fn, - const FPRegister& fm, - const FPRegister& fa) { - FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ? FMSUB_s : FMSUB_d); +void Assembler::sqshrn(const VRegister& vd, const VRegister& vn, int shift) { + DCHECK(vd.IsD() || (vn.IsScalar() && vd.IsScalar())); + NEONShiftImmediateN(vd, vn, shift, NEON_SQSHRN); } +void Assembler::sqshrn2(const VRegister& vd, const VRegister& vn, int shift) { + DCHECK(vn.IsVector() && vd.IsQ()); + NEONShiftImmediateN(vd, vn, shift, NEON_SQSHRN); +} -void Assembler::fnmadd(const FPRegister& fd, - const FPRegister& fn, - const FPRegister& fm, - const FPRegister& fa) { - FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ? FNMADD_s : FNMADD_d); +void Assembler::sqrshrn(const VRegister& vd, const VRegister& vn, int shift) { + DCHECK(vd.IsD() || (vn.IsScalar() && vd.IsScalar())); + NEONShiftImmediateN(vd, vn, shift, NEON_SQRSHRN); } +void Assembler::sqrshrn2(const VRegister& vd, const VRegister& vn, int shift) { + DCHECK(vn.IsVector() && vd.IsQ()); + NEONShiftImmediateN(vd, vn, shift, NEON_SQRSHRN); +} -void Assembler::fnmsub(const FPRegister& fd, - const FPRegister& fn, - const FPRegister& fm, - const FPRegister& fa) { - FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ? FNMSUB_s : FNMSUB_d); +void Assembler::sqshrun(const VRegister& vd, const VRegister& vn, int shift) { + DCHECK(vd.IsD() || (vn.IsScalar() && vd.IsScalar())); + NEONShiftImmediateN(vd, vn, shift, NEON_SQSHRUN); } +void Assembler::sqshrun2(const VRegister& vd, const VRegister& vn, int shift) { + DCHECK(vn.IsVector() && vd.IsQ()); + NEONShiftImmediateN(vd, vn, shift, NEON_SQSHRUN); +} -void Assembler::fdiv(const FPRegister& fd, - const FPRegister& fn, - const FPRegister& fm) { - FPDataProcessing2Source(fd, fn, fm, FDIV); +void Assembler::sqrshrun(const VRegister& vd, const VRegister& vn, int shift) { + DCHECK(vd.IsD() || (vn.IsScalar() && vd.IsScalar())); + NEONShiftImmediateN(vd, vn, shift, NEON_SQRSHRUN); } +void Assembler::sqrshrun2(const VRegister& vd, const VRegister& vn, int shift) { + DCHECK(vn.IsVector() && vd.IsQ()); + NEONShiftImmediateN(vd, vn, shift, NEON_SQRSHRUN); +} -void Assembler::fmax(const FPRegister& fd, - const FPRegister& fn, - const FPRegister& fm) { - FPDataProcessing2Source(fd, fn, fm, FMAX); +void Assembler::uqshrn(const VRegister& vd, const VRegister& vn, int shift) { + DCHECK(vd.IsD() || (vn.IsScalar() && vd.IsScalar())); + NEONShiftImmediateN(vd, vn, shift, NEON_UQSHRN); } +void Assembler::uqshrn2(const VRegister& vd, const VRegister& vn, int shift) { + DCHECK(vn.IsVector() && vd.IsQ()); + NEONShiftImmediateN(vd, vn, shift, NEON_UQSHRN); +} -void Assembler::fmaxnm(const FPRegister& fd, - const FPRegister& fn, - const FPRegister& fm) { - FPDataProcessing2Source(fd, fn, fm, FMAXNM); +void Assembler::uqrshrn(const VRegister& vd, const VRegister& vn, int shift) { + DCHECK(vd.IsD() || (vn.IsScalar() && vd.IsScalar())); + NEONShiftImmediateN(vd, vn, shift, NEON_UQRSHRN); } +void Assembler::uqrshrn2(const VRegister& vd, const VRegister& vn, int shift) { + DCHECK(vn.IsVector() && vd.IsQ()); + NEONShiftImmediateN(vd, vn, shift, NEON_UQRSHRN); +} -void Assembler::fmin(const FPRegister& fd, - const FPRegister& fn, - const FPRegister& fm) { - FPDataProcessing2Source(fd, fn, fm, FMIN); +void Assembler::uaddw(const VRegister& vd, const VRegister& vn, + const VRegister& vm) { + DCHECK(vm.IsD()); + NEON3DifferentW(vd, vn, vm, NEON_UADDW); } +void Assembler::uaddw2(const VRegister& vd, const VRegister& vn, + const VRegister& vm) { + DCHECK(vm.IsQ()); + NEON3DifferentW(vd, vn, vm, NEON_UADDW2); +} -void Assembler::fminnm(const FPRegister& fd, - const FPRegister& fn, - const FPRegister& fm) { - FPDataProcessing2Source(fd, fn, fm, FMINNM); +void Assembler::saddw(const VRegister& vd, const VRegister& vn, + const VRegister& vm) { + DCHECK(vm.IsD()); + NEON3DifferentW(vd, vn, vm, NEON_SADDW); } +void Assembler::saddw2(const VRegister& vd, const VRegister& vn, + const VRegister& vm) { + DCHECK(vm.IsQ()); + NEON3DifferentW(vd, vn, vm, NEON_SADDW2); +} -void Assembler::fabs(const FPRegister& fd, - const FPRegister& fn) { - DCHECK(fd.SizeInBits() == fn.SizeInBits()); - FPDataProcessing1Source(fd, fn, FABS); +void Assembler::usubw(const VRegister& vd, const VRegister& vn, + const VRegister& vm) { + DCHECK(vm.IsD()); + NEON3DifferentW(vd, vn, vm, NEON_USUBW); } +void Assembler::usubw2(const VRegister& vd, const VRegister& vn, + const VRegister& vm) { + DCHECK(vm.IsQ()); + NEON3DifferentW(vd, vn, vm, NEON_USUBW2); +} -void Assembler::fneg(const FPRegister& fd, - const FPRegister& fn) { - DCHECK(fd.SizeInBits() == fn.SizeInBits()); - FPDataProcessing1Source(fd, fn, FNEG); +void Assembler::ssubw(const VRegister& vd, const VRegister& vn, + const VRegister& vm) { + DCHECK(vm.IsD()); + NEON3DifferentW(vd, vn, vm, NEON_SSUBW); } +void Assembler::ssubw2(const VRegister& vd, const VRegister& vn, + const VRegister& vm) { + DCHECK(vm.IsQ()); + NEON3DifferentW(vd, vn, vm, NEON_SSUBW2); +} -void Assembler::fsqrt(const FPRegister& fd, - const FPRegister& fn) { - DCHECK(fd.SizeInBits() == fn.SizeInBits()); - FPDataProcessing1Source(fd, fn, FSQRT); +void Assembler::mov(const Register& rd, const Register& rm) { + // Moves involving the stack pointer are encoded as add immediate with + // second operand of zero. Otherwise, orr with first operand zr is + // used. + if (rd.IsSP() || rm.IsSP()) { + add(rd, rm, 0); + } else { + orr(rd, AppropriateZeroRegFor(rd), rm); + } } +void Assembler::ins(const VRegister& vd, int vd_index, const Register& rn) { + // We support vd arguments of the form vd.VxT() or vd.T(), where x is the + // number of lanes, and T is b, h, s or d. + int lane_size = vd.LaneSizeInBytes(); + NEONFormatField format; + switch (lane_size) { + case 1: + format = NEON_16B; + DCHECK(rn.IsW()); + break; + case 2: + format = NEON_8H; + DCHECK(rn.IsW()); + break; + case 4: + format = NEON_4S; + DCHECK(rn.IsW()); + break; + default: + DCHECK_EQ(lane_size, 8); + DCHECK(rn.IsX()); + format = NEON_2D; + break; + } -void Assembler::frinta(const FPRegister& fd, - const FPRegister& fn) { - DCHECK(fd.SizeInBits() == fn.SizeInBits()); - FPDataProcessing1Source(fd, fn, FRINTA); + DCHECK((0 <= vd_index) && + (vd_index < LaneCountFromFormat(static_cast(format)))); + Emit(NEON_INS_GENERAL | ImmNEON5(format, vd_index) | Rn(rn) | Rd(vd)); } +void Assembler::mov(const Register& rd, const VRegister& vn, int vn_index) { + DCHECK_GE(vn.SizeInBytes(), 4); + umov(rd, vn, vn_index); +} -void Assembler::frintm(const FPRegister& fd, - const FPRegister& fn) { - DCHECK(fd.SizeInBits() == fn.SizeInBits()); - FPDataProcessing1Source(fd, fn, FRINTM); +void Assembler::smov(const Register& rd, const VRegister& vn, int vn_index) { + // We support vn arguments of the form vn.VxT() or vn.T(), where x is the + // number of lanes, and T is b, h, s. + int lane_size = vn.LaneSizeInBytes(); + NEONFormatField format; + Instr q = 0; + switch (lane_size) { + case 1: + format = NEON_16B; + break; + case 2: + format = NEON_8H; + break; + default: + DCHECK_EQ(lane_size, 4); + DCHECK(rd.IsX()); + format = NEON_4S; + break; + } + q = rd.IsW() ? 0 : NEON_Q; + DCHECK((0 <= vn_index) && + (vn_index < LaneCountFromFormat(static_cast(format)))); + Emit(q | NEON_SMOV | ImmNEON5(format, vn_index) | Rn(vn) | Rd(rd)); } +void Assembler::cls(const VRegister& vd, const VRegister& vn) { + DCHECK(AreSameFormat(vd, vn)); + DCHECK(!vd.Is1D() && !vd.Is2D()); + Emit(VFormat(vn) | NEON_CLS | Rn(vn) | Rd(vd)); +} -void Assembler::frintn(const FPRegister& fd, - const FPRegister& fn) { - DCHECK(fd.SizeInBits() == fn.SizeInBits()); - FPDataProcessing1Source(fd, fn, FRINTN); +void Assembler::clz(const VRegister& vd, const VRegister& vn) { + DCHECK(AreSameFormat(vd, vn)); + DCHECK(!vd.Is1D() && !vd.Is2D()); + Emit(VFormat(vn) | NEON_CLZ | Rn(vn) | Rd(vd)); } +void Assembler::cnt(const VRegister& vd, const VRegister& vn) { + DCHECK(AreSameFormat(vd, vn)); + DCHECK(vd.Is8B() || vd.Is16B()); + Emit(VFormat(vn) | NEON_CNT | Rn(vn) | Rd(vd)); +} -void Assembler::frintp(const FPRegister& fd, const FPRegister& fn) { - DCHECK(fd.SizeInBits() == fn.SizeInBits()); - FPDataProcessing1Source(fd, fn, FRINTP); +void Assembler::rev16(const VRegister& vd, const VRegister& vn) { + DCHECK(AreSameFormat(vd, vn)); + DCHECK(vd.Is8B() || vd.Is16B()); + Emit(VFormat(vn) | NEON_REV16 | Rn(vn) | Rd(vd)); } +void Assembler::rev32(const VRegister& vd, const VRegister& vn) { + DCHECK(AreSameFormat(vd, vn)); + DCHECK(vd.Is8B() || vd.Is16B() || vd.Is4H() || vd.Is8H()); + Emit(VFormat(vn) | NEON_REV32 | Rn(vn) | Rd(vd)); +} -void Assembler::frintz(const FPRegister& fd, - const FPRegister& fn) { - DCHECK(fd.SizeInBits() == fn.SizeInBits()); - FPDataProcessing1Source(fd, fn, FRINTZ); +void Assembler::rev64(const VRegister& vd, const VRegister& vn) { + DCHECK(AreSameFormat(vd, vn)); + DCHECK(!vd.Is1D() && !vd.Is2D()); + Emit(VFormat(vn) | NEON_REV64 | Rn(vn) | Rd(vd)); } +void Assembler::ursqrte(const VRegister& vd, const VRegister& vn) { + DCHECK(AreSameFormat(vd, vn)); + DCHECK(vd.Is2S() || vd.Is4S()); + Emit(VFormat(vn) | NEON_URSQRTE | Rn(vn) | Rd(vd)); +} -void Assembler::fcmp(const FPRegister& fn, - const FPRegister& fm) { - DCHECK(fn.SizeInBits() == fm.SizeInBits()); - Emit(FPType(fn) | FCMP | Rm(fm) | Rn(fn)); +void Assembler::urecpe(const VRegister& vd, const VRegister& vn) { + DCHECK(AreSameFormat(vd, vn)); + DCHECK(vd.Is2S() || vd.Is4S()); + Emit(VFormat(vn) | NEON_URECPE | Rn(vn) | Rd(vd)); } +void Assembler::NEONAddlp(const VRegister& vd, const VRegister& vn, + NEON2RegMiscOp op) { + DCHECK((op == NEON_SADDLP) || (op == NEON_UADDLP) || (op == NEON_SADALP) || + (op == NEON_UADALP)); -void Assembler::fcmp(const FPRegister& fn, - double value) { - USE(value); - // Although the fcmp instruction can strictly only take an immediate value of - // +0.0, we don't need to check for -0.0 because the sign of 0.0 doesn't - // affect the result of the comparison. - DCHECK(value == 0.0); - Emit(FPType(fn) | FCMP_zero | Rn(fn)); + DCHECK((vn.Is8B() && vd.Is4H()) || (vn.Is4H() && vd.Is2S()) || + (vn.Is2S() && vd.Is1D()) || (vn.Is16B() && vd.Is8H()) || + (vn.Is8H() && vd.Is4S()) || (vn.Is4S() && vd.Is2D())); + Emit(VFormat(vn) | op | Rn(vn) | Rd(vd)); } +void Assembler::saddlp(const VRegister& vd, const VRegister& vn) { + NEONAddlp(vd, vn, NEON_SADDLP); +} -void Assembler::fccmp(const FPRegister& fn, - const FPRegister& fm, - StatusFlags nzcv, - Condition cond) { - DCHECK(fn.SizeInBits() == fm.SizeInBits()); - Emit(FPType(fn) | FCCMP | Rm(fm) | Cond(cond) | Rn(fn) | Nzcv(nzcv)); +void Assembler::uaddlp(const VRegister& vd, const VRegister& vn) { + NEONAddlp(vd, vn, NEON_UADDLP); } +void Assembler::sadalp(const VRegister& vd, const VRegister& vn) { + NEONAddlp(vd, vn, NEON_SADALP); +} -void Assembler::fcsel(const FPRegister& fd, - const FPRegister& fn, - const FPRegister& fm, - Condition cond) { - DCHECK(fd.SizeInBits() == fn.SizeInBits()); - DCHECK(fd.SizeInBits() == fm.SizeInBits()); - Emit(FPType(fd) | FCSEL | Rm(fm) | Cond(cond) | Rn(fn) | Rd(fd)); +void Assembler::uadalp(const VRegister& vd, const VRegister& vn) { + NEONAddlp(vd, vn, NEON_UADALP); } +void Assembler::NEONAcrossLanesL(const VRegister& vd, const VRegister& vn, + NEONAcrossLanesOp op) { + DCHECK((vn.Is8B() && vd.Is1H()) || (vn.Is16B() && vd.Is1H()) || + (vn.Is4H() && vd.Is1S()) || (vn.Is8H() && vd.Is1S()) || + (vn.Is4S() && vd.Is1D())); + Emit(VFormat(vn) | op | Rn(vn) | Rd(vd)); +} -void Assembler::FPConvertToInt(const Register& rd, - const FPRegister& fn, - FPIntegerConvertOp op) { - Emit(SF(rd) | FPType(fn) | op | Rn(fn) | Rd(rd)); +void Assembler::saddlv(const VRegister& vd, const VRegister& vn) { + NEONAcrossLanesL(vd, vn, NEON_SADDLV); } +void Assembler::uaddlv(const VRegister& vd, const VRegister& vn) { + NEONAcrossLanesL(vd, vn, NEON_UADDLV); +} -void Assembler::fcvt(const FPRegister& fd, - const FPRegister& fn) { - if (fd.Is64Bits()) { - // Convert float to double. - DCHECK(fn.Is32Bits()); - FPDataProcessing1Source(fd, fn, FCVT_ds); +void Assembler::NEONAcrossLanes(const VRegister& vd, const VRegister& vn, + NEONAcrossLanesOp op) { + DCHECK((vn.Is8B() && vd.Is1B()) || (vn.Is16B() && vd.Is1B()) || + (vn.Is4H() && vd.Is1H()) || (vn.Is8H() && vd.Is1H()) || + (vn.Is4S() && vd.Is1S())); + if ((op & NEONAcrossLanesFPFMask) == NEONAcrossLanesFPFixed) { + Emit(FPFormat(vn) | op | Rn(vn) | Rd(vd)); } else { - // Convert double to float. - DCHECK(fn.Is64Bits()); - FPDataProcessing1Source(fd, fn, FCVT_sd); + Emit(VFormat(vn) | op | Rn(vn) | Rd(vd)); } } +#define NEON_ACROSSLANES_LIST(V) \ + V(fmaxv, NEON_FMAXV, vd.Is1S()) \ + V(fminv, NEON_FMINV, vd.Is1S()) \ + V(fmaxnmv, NEON_FMAXNMV, vd.Is1S()) \ + V(fminnmv, NEON_FMINNMV, vd.Is1S()) \ + V(addv, NEON_ADDV, true) \ + V(smaxv, NEON_SMAXV, true) \ + V(sminv, NEON_SMINV, true) \ + V(umaxv, NEON_UMAXV, true) \ + V(uminv, NEON_UMINV, true) + +#define DEFINE_ASM_FUNC(FN, OP, AS) \ + void Assembler::FN(const VRegister& vd, const VRegister& vn) { \ + DCHECK(AS); \ + NEONAcrossLanes(vd, vn, OP); \ + } +NEON_ACROSSLANES_LIST(DEFINE_ASM_FUNC) +#undef DEFINE_ASM_FUNC + +void Assembler::mov(const VRegister& vd, int vd_index, const Register& rn) { + ins(vd, vd_index, rn); +} + +void Assembler::umov(const Register& rd, const VRegister& vn, int vn_index) { + // We support vn arguments of the form vn.VxT() or vn.T(), where x is the + // number of lanes, and T is b, h, s or d. + int lane_size = vn.LaneSizeInBytes(); + NEONFormatField format; + Instr q = 0; + switch (lane_size) { + case 1: + format = NEON_16B; + DCHECK(rd.IsW()); + break; + case 2: + format = NEON_8H; + DCHECK(rd.IsW()); + break; + case 4: + format = NEON_4S; + DCHECK(rd.IsW()); + break; + default: + DCHECK_EQ(lane_size, 8); + DCHECK(rd.IsX()); + format = NEON_2D; + q = NEON_Q; + break; + } -void Assembler::fcvtau(const Register& rd, const FPRegister& fn) { - FPConvertToInt(rd, fn, FCVTAU); + DCHECK((0 <= vn_index) && + (vn_index < LaneCountFromFormat(static_cast(format)))); + Emit(q | NEON_UMOV | ImmNEON5(format, vn_index) | Rn(vn) | Rd(rd)); } +void Assembler::mov(const VRegister& vd, const VRegister& vn, int vn_index) { + DCHECK(vd.IsScalar()); + dup(vd, vn, vn_index); +} -void Assembler::fcvtas(const Register& rd, const FPRegister& fn) { - FPConvertToInt(rd, fn, FCVTAS); +void Assembler::dup(const VRegister& vd, const Register& rn) { + DCHECK(!vd.Is1D()); + DCHECK_EQ(vd.Is2D(), rn.IsX()); + Instr q = vd.IsD() ? 0 : NEON_Q; + Emit(q | NEON_DUP_GENERAL | ImmNEON5(VFormat(vd), 0) | Rn(rn) | Rd(vd)); } +void Assembler::ins(const VRegister& vd, int vd_index, const VRegister& vn, + int vn_index) { + DCHECK(AreSameFormat(vd, vn)); + // We support vd arguments of the form vd.VxT() or vd.T(), where x is the + // number of lanes, and T is b, h, s or d. + int lane_size = vd.LaneSizeInBytes(); + NEONFormatField format; + switch (lane_size) { + case 1: + format = NEON_16B; + break; + case 2: + format = NEON_8H; + break; + case 4: + format = NEON_4S; + break; + default: + DCHECK_EQ(lane_size, 8); + format = NEON_2D; + break; + } -void Assembler::fcvtmu(const Register& rd, const FPRegister& fn) { - FPConvertToInt(rd, fn, FCVTMU); + DCHECK((0 <= vd_index) && + (vd_index < LaneCountFromFormat(static_cast(format)))); + DCHECK((0 <= vn_index) && + (vn_index < LaneCountFromFormat(static_cast(format)))); + Emit(NEON_INS_ELEMENT | ImmNEON5(format, vd_index) | + ImmNEON4(format, vn_index) | Rn(vn) | Rd(vd)); } +void Assembler::NEONTable(const VRegister& vd, const VRegister& vn, + const VRegister& vm, NEONTableOp op) { + DCHECK(vd.Is16B() || vd.Is8B()); + DCHECK(vn.Is16B()); + DCHECK(AreSameFormat(vd, vm)); + Emit(op | (vd.IsQ() ? NEON_Q : 0) | Rm(vm) | Rn(vn) | Rd(vd)); +} -void Assembler::fcvtms(const Register& rd, const FPRegister& fn) { - FPConvertToInt(rd, fn, FCVTMS); +void Assembler::tbl(const VRegister& vd, const VRegister& vn, + const VRegister& vm) { + NEONTable(vd, vn, vm, NEON_TBL_1v); } +void Assembler::tbl(const VRegister& vd, const VRegister& vn, + const VRegister& vn2, const VRegister& vm) { + USE(vn2); + DCHECK(AreSameFormat(vn, vn2)); + DCHECK(AreConsecutive(vn, vn2)); + NEONTable(vd, vn, vm, NEON_TBL_2v); +} -void Assembler::fcvtnu(const Register& rd, const FPRegister& fn) { - FPConvertToInt(rd, fn, FCVTNU); +void Assembler::tbl(const VRegister& vd, const VRegister& vn, + const VRegister& vn2, const VRegister& vn3, + const VRegister& vm) { + USE(vn2); + USE(vn3); + DCHECK(AreSameFormat(vn, vn2, vn3)); + DCHECK(AreConsecutive(vn, vn2, vn3)); + NEONTable(vd, vn, vm, NEON_TBL_3v); } +void Assembler::tbl(const VRegister& vd, const VRegister& vn, + const VRegister& vn2, const VRegister& vn3, + const VRegister& vn4, const VRegister& vm) { + USE(vn2); + USE(vn3); + USE(vn4); + DCHECK(AreSameFormat(vn, vn2, vn3, vn4)); + DCHECK(AreConsecutive(vn, vn2, vn3, vn4)); + NEONTable(vd, vn, vm, NEON_TBL_4v); +} -void Assembler::fcvtns(const Register& rd, const FPRegister& fn) { - FPConvertToInt(rd, fn, FCVTNS); +void Assembler::tbx(const VRegister& vd, const VRegister& vn, + const VRegister& vm) { + NEONTable(vd, vn, vm, NEON_TBX_1v); } +void Assembler::tbx(const VRegister& vd, const VRegister& vn, + const VRegister& vn2, const VRegister& vm) { + USE(vn2); + DCHECK(AreSameFormat(vn, vn2)); + DCHECK(AreConsecutive(vn, vn2)); + NEONTable(vd, vn, vm, NEON_TBX_2v); +} -void Assembler::fcvtzu(const Register& rd, const FPRegister& fn) { - FPConvertToInt(rd, fn, FCVTZU); +void Assembler::tbx(const VRegister& vd, const VRegister& vn, + const VRegister& vn2, const VRegister& vn3, + const VRegister& vm) { + USE(vn2); + USE(vn3); + DCHECK(AreSameFormat(vn, vn2, vn3)); + DCHECK(AreConsecutive(vn, vn2, vn3)); + NEONTable(vd, vn, vm, NEON_TBX_3v); } +void Assembler::tbx(const VRegister& vd, const VRegister& vn, + const VRegister& vn2, const VRegister& vn3, + const VRegister& vn4, const VRegister& vm) { + USE(vn2); + USE(vn3); + USE(vn4); + DCHECK(AreSameFormat(vn, vn2, vn3, vn4)); + DCHECK(AreConsecutive(vn, vn2, vn3, vn4)); + NEONTable(vd, vn, vm, NEON_TBX_4v); +} -void Assembler::fcvtzs(const Register& rd, const FPRegister& fn) { - FPConvertToInt(rd, fn, FCVTZS); +void Assembler::mov(const VRegister& vd, int vd_index, const VRegister& vn, + int vn_index) { + ins(vd, vd_index, vn, vn_index); } +void Assembler::mvn(const Register& rd, const Operand& operand) { + orn(rd, AppropriateZeroRegFor(rd), operand); +} -void Assembler::scvtf(const FPRegister& fd, - const Register& rn, - unsigned fbits) { - if (fbits == 0) { - Emit(SF(rn) | FPType(fd) | SCVTF | Rn(rn) | Rd(fd)); - } else { - Emit(SF(rn) | FPType(fd) | SCVTF_fixed | FPScale(64 - fbits) | Rn(rn) | - Rd(fd)); - } +void Assembler::mrs(const Register& rt, SystemRegister sysreg) { + DCHECK(rt.Is64Bits()); + Emit(MRS | ImmSystemRegister(sysreg) | Rt(rt)); } +void Assembler::msr(SystemRegister sysreg, const Register& rt) { + DCHECK(rt.Is64Bits()); + Emit(MSR | Rt(rt) | ImmSystemRegister(sysreg)); +} -void Assembler::ucvtf(const FPRegister& fd, - const Register& rn, - unsigned fbits) { - if (fbits == 0) { - Emit(SF(rn) | FPType(fd) | UCVTF | Rn(rn) | Rd(fd)); +void Assembler::hint(SystemHint code) { Emit(HINT | ImmHint(code) | Rt(xzr)); } + +// NEON structure loads and stores. +Instr Assembler::LoadStoreStructAddrModeField(const MemOperand& addr) { + Instr addr_field = RnSP(addr.base()); + + if (addr.IsPostIndex()) { + static_assert(NEONLoadStoreMultiStructPostIndex == + static_cast( + NEONLoadStoreSingleStructPostIndex), + "Opcodes must match for NEON post index memop."); + + addr_field |= NEONLoadStoreMultiStructPostIndex; + if (addr.offset() == 0) { + addr_field |= RmNot31(addr.regoffset()); + } else { + // The immediate post index addressing mode is indicated by rm = 31. + // The immediate is implied by the number of vector registers used. + addr_field |= (0x1f << Rm_offset); + } } else { - Emit(SF(rn) | FPType(fd) | UCVTF_fixed | FPScale(64 - fbits) | Rn(rn) | - Rd(fd)); + DCHECK(addr.IsImmediateOffset() && (addr.offset() == 0)); } + return addr_field; } - -void Assembler::dcptr(Label* label) { - RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE); - if (label->is_bound()) { - // The label is bound, so it does not need to be updated and the internal - // reference should be emitted. - // - // In this case, label->pos() returns the offset of the label from the - // start of the buffer. - internal_reference_positions_.push_back(pc_offset()); - dc64(reinterpret_cast(buffer_ + label->pos())); +void Assembler::LoadStoreStructVerify(const VRegister& vt, + const MemOperand& addr, Instr op) { +#ifdef DEBUG + // Assert that addressing mode is either offset (with immediate 0), post + // index by immediate of the size of the register list, or post index by a + // value in a core register. + if (addr.IsImmediateOffset()) { + DCHECK_EQ(addr.offset(), 0); } else { - int32_t offset; - if (label->is_linked()) { - // The label is linked, so the internal reference should be added - // onto the end of the label's link chain. - // - // In this case, label->pos() returns the offset of the last linked - // instruction from the start of the buffer. - offset = label->pos() - pc_offset(); - DCHECK(offset != kStartOfLabelLinkChain); - } else { - // The label is unused, so it now becomes linked and the internal - // reference is at the start of the new link chain. - offset = kStartOfLabelLinkChain; - } - // The instruction at pc is now the last link in the label's chain. - label->link_to(pc_offset()); + int offset = vt.SizeInBytes(); + switch (op) { + case NEON_LD1_1v: + case NEON_ST1_1v: + offset *= 1; + break; + case NEONLoadStoreSingleStructLoad1: + case NEONLoadStoreSingleStructStore1: + case NEON_LD1R: + offset = (offset / vt.LaneCount()) * 1; + break; - // Traditionally the offset to the previous instruction in the chain is - // encoded in the instruction payload (e.g. branch range) but internal - // references are not instructions so while unbound they are encoded as - // two consecutive brk instructions. The two 16-bit immediates are used - // to encode the offset. - offset >>= kInstructionSizeLog2; - DCHECK(is_int32(offset)); - uint32_t high16 = unsigned_bitextract_32(31, 16, offset); - uint32_t low16 = unsigned_bitextract_32(15, 0, offset); + case NEON_LD1_2v: + case NEON_ST1_2v: + case NEON_LD2: + case NEON_ST2: + offset *= 2; + break; + case NEONLoadStoreSingleStructLoad2: + case NEONLoadStoreSingleStructStore2: + case NEON_LD2R: + offset = (offset / vt.LaneCount()) * 2; + break; - brk(high16); - brk(low16); + case NEON_LD1_3v: + case NEON_ST1_3v: + case NEON_LD3: + case NEON_ST3: + offset *= 3; + break; + case NEONLoadStoreSingleStructLoad3: + case NEONLoadStoreSingleStructStore3: + case NEON_LD3R: + offset = (offset / vt.LaneCount()) * 3; + break; + + case NEON_LD1_4v: + case NEON_ST1_4v: + case NEON_LD4: + case NEON_ST4: + offset *= 4; + break; + case NEONLoadStoreSingleStructLoad4: + case NEONLoadStoreSingleStructStore4: + case NEON_LD4R: + offset = (offset / vt.LaneCount()) * 4; + break; + default: + UNREACHABLE(); + } + DCHECK(!addr.regoffset().Is(NoReg) || addr.offset() == offset); } +#else + USE(vt); + USE(addr); + USE(op); +#endif } +void Assembler::LoadStoreStruct(const VRegister& vt, const MemOperand& addr, + NEONLoadStoreMultiStructOp op) { + LoadStoreStructVerify(vt, addr, op); + DCHECK(vt.IsVector() || vt.Is1D()); + Emit(op | LoadStoreStructAddrModeField(addr) | LSVFormat(vt) | Rt(vt)); +} -// Note: -// Below, a difference in case for the same letter indicates a -// negated bit. -// If b is 1, then B is 0. -Instr Assembler::ImmFP32(float imm) { - DCHECK(IsImmFP32(imm)); - // bits: aBbb.bbbc.defg.h000.0000.0000.0000.0000 - uint32_t bits = float_to_rawbits(imm); - // bit7: a000.0000 - uint32_t bit7 = ((bits >> 31) & 0x1) << 7; - // bit6: 0b00.0000 - uint32_t bit6 = ((bits >> 29) & 0x1) << 6; - // bit5_to_0: 00cd.efgh - uint32_t bit5_to_0 = (bits >> 19) & 0x3f; +void Assembler::LoadStoreStructSingleAllLanes(const VRegister& vt, + const MemOperand& addr, + NEONLoadStoreSingleStructOp op) { + LoadStoreStructVerify(vt, addr, op); + Emit(op | LoadStoreStructAddrModeField(addr) | LSVFormat(vt) | Rt(vt)); +} + +void Assembler::ld1(const VRegister& vt, const MemOperand& src) { + LoadStoreStruct(vt, src, NEON_LD1_1v); +} + +void Assembler::ld1(const VRegister& vt, const VRegister& vt2, + const MemOperand& src) { + USE(vt2); + DCHECK(AreSameFormat(vt, vt2)); + DCHECK(AreConsecutive(vt, vt2)); + LoadStoreStruct(vt, src, NEON_LD1_2v); +} + +void Assembler::ld1(const VRegister& vt, const VRegister& vt2, + const VRegister& vt3, const MemOperand& src) { + USE(vt2); + USE(vt3); + DCHECK(AreSameFormat(vt, vt2, vt3)); + DCHECK(AreConsecutive(vt, vt2, vt3)); + LoadStoreStruct(vt, src, NEON_LD1_3v); +} + +void Assembler::ld1(const VRegister& vt, const VRegister& vt2, + const VRegister& vt3, const VRegister& vt4, + const MemOperand& src) { + USE(vt2); + USE(vt3); + USE(vt4); + DCHECK(AreSameFormat(vt, vt2, vt3, vt4)); + DCHECK(AreConsecutive(vt, vt2, vt3, vt4)); + LoadStoreStruct(vt, src, NEON_LD1_4v); +} + +void Assembler::ld2(const VRegister& vt, const VRegister& vt2, + const MemOperand& src) { + USE(vt2); + DCHECK(AreSameFormat(vt, vt2)); + DCHECK(AreConsecutive(vt, vt2)); + LoadStoreStruct(vt, src, NEON_LD2); +} + +void Assembler::ld2(const VRegister& vt, const VRegister& vt2, int lane, + const MemOperand& src) { + USE(vt2); + DCHECK(AreSameFormat(vt, vt2)); + DCHECK(AreConsecutive(vt, vt2)); + LoadStoreStructSingle(vt, lane, src, NEONLoadStoreSingleStructLoad2); +} + +void Assembler::ld2r(const VRegister& vt, const VRegister& vt2, + const MemOperand& src) { + USE(vt2); + DCHECK(AreSameFormat(vt, vt2)); + DCHECK(AreConsecutive(vt, vt2)); + LoadStoreStructSingleAllLanes(vt, src, NEON_LD2R); +} + +void Assembler::ld3(const VRegister& vt, const VRegister& vt2, + const VRegister& vt3, const MemOperand& src) { + USE(vt2); + USE(vt3); + DCHECK(AreSameFormat(vt, vt2, vt3)); + DCHECK(AreConsecutive(vt, vt2, vt3)); + LoadStoreStruct(vt, src, NEON_LD3); +} + +void Assembler::ld3(const VRegister& vt, const VRegister& vt2, + const VRegister& vt3, int lane, const MemOperand& src) { + USE(vt2); + USE(vt3); + DCHECK(AreSameFormat(vt, vt2, vt3)); + DCHECK(AreConsecutive(vt, vt2, vt3)); + LoadStoreStructSingle(vt, lane, src, NEONLoadStoreSingleStructLoad3); +} + +void Assembler::ld3r(const VRegister& vt, const VRegister& vt2, + const VRegister& vt3, const MemOperand& src) { + USE(vt2); + USE(vt3); + DCHECK(AreSameFormat(vt, vt2, vt3)); + DCHECK(AreConsecutive(vt, vt2, vt3)); + LoadStoreStructSingleAllLanes(vt, src, NEON_LD3R); +} + +void Assembler::ld4(const VRegister& vt, const VRegister& vt2, + const VRegister& vt3, const VRegister& vt4, + const MemOperand& src) { + USE(vt2); + USE(vt3); + USE(vt4); + DCHECK(AreSameFormat(vt, vt2, vt3, vt4)); + DCHECK(AreConsecutive(vt, vt2, vt3, vt4)); + LoadStoreStruct(vt, src, NEON_LD4); +} + +void Assembler::ld4(const VRegister& vt, const VRegister& vt2, + const VRegister& vt3, const VRegister& vt4, int lane, + const MemOperand& src) { + USE(vt2); + USE(vt3); + USE(vt4); + DCHECK(AreSameFormat(vt, vt2, vt3, vt4)); + DCHECK(AreConsecutive(vt, vt2, vt3, vt4)); + LoadStoreStructSingle(vt, lane, src, NEONLoadStoreSingleStructLoad4); +} + +void Assembler::ld4r(const VRegister& vt, const VRegister& vt2, + const VRegister& vt3, const VRegister& vt4, + const MemOperand& src) { + USE(vt2); + USE(vt3); + USE(vt4); + DCHECK(AreSameFormat(vt, vt2, vt3, vt4)); + DCHECK(AreConsecutive(vt, vt2, vt3, vt4)); + LoadStoreStructSingleAllLanes(vt, src, NEON_LD4R); +} + +void Assembler::st1(const VRegister& vt, const MemOperand& src) { + LoadStoreStruct(vt, src, NEON_ST1_1v); +} + +void Assembler::st1(const VRegister& vt, const VRegister& vt2, + const MemOperand& src) { + USE(vt2); + DCHECK(AreSameFormat(vt, vt2)); + DCHECK(AreConsecutive(vt, vt2)); + LoadStoreStruct(vt, src, NEON_ST1_2v); +} + +void Assembler::st1(const VRegister& vt, const VRegister& vt2, + const VRegister& vt3, const MemOperand& src) { + USE(vt2); + USE(vt3); + DCHECK(AreSameFormat(vt, vt2, vt3)); + DCHECK(AreConsecutive(vt, vt2, vt3)); + LoadStoreStruct(vt, src, NEON_ST1_3v); +} + +void Assembler::st1(const VRegister& vt, const VRegister& vt2, + const VRegister& vt3, const VRegister& vt4, + const MemOperand& src) { + USE(vt2); + USE(vt3); + USE(vt4); + DCHECK(AreSameFormat(vt, vt2, vt3, vt4)); + DCHECK(AreConsecutive(vt, vt2, vt3, vt4)); + LoadStoreStruct(vt, src, NEON_ST1_4v); +} + +void Assembler::st2(const VRegister& vt, const VRegister& vt2, + const MemOperand& dst) { + USE(vt2); + DCHECK(AreSameFormat(vt, vt2)); + DCHECK(AreConsecutive(vt, vt2)); + LoadStoreStruct(vt, dst, NEON_ST2); +} + +void Assembler::st2(const VRegister& vt, const VRegister& vt2, int lane, + const MemOperand& dst) { + USE(vt2); + DCHECK(AreSameFormat(vt, vt2)); + DCHECK(AreConsecutive(vt, vt2)); + LoadStoreStructSingle(vt, lane, dst, NEONLoadStoreSingleStructStore2); +} + +void Assembler::st3(const VRegister& vt, const VRegister& vt2, + const VRegister& vt3, const MemOperand& dst) { + USE(vt2); + USE(vt3); + DCHECK(AreSameFormat(vt, vt2, vt3)); + DCHECK(AreConsecutive(vt, vt2, vt3)); + LoadStoreStruct(vt, dst, NEON_ST3); +} - return (bit7 | bit6 | bit5_to_0) << ImmFP_offset; +void Assembler::st3(const VRegister& vt, const VRegister& vt2, + const VRegister& vt3, int lane, const MemOperand& dst) { + USE(vt2); + USE(vt3); + DCHECK(AreSameFormat(vt, vt2, vt3)); + DCHECK(AreConsecutive(vt, vt2, vt3)); + LoadStoreStructSingle(vt, lane, dst, NEONLoadStoreSingleStructStore3); } +void Assembler::st4(const VRegister& vt, const VRegister& vt2, + const VRegister& vt3, const VRegister& vt4, + const MemOperand& dst) { + USE(vt2); + USE(vt3); + USE(vt4); + DCHECK(AreSameFormat(vt, vt2, vt3, vt4)); + DCHECK(AreConsecutive(vt, vt2, vt3, vt4)); + LoadStoreStruct(vt, dst, NEON_ST4); +} + +void Assembler::st4(const VRegister& vt, const VRegister& vt2, + const VRegister& vt3, const VRegister& vt4, int lane, + const MemOperand& dst) { + USE(vt2); + USE(vt3); + USE(vt4); + DCHECK(AreSameFormat(vt, vt2, vt3, vt4)); + DCHECK(AreConsecutive(vt, vt2, vt3, vt4)); + LoadStoreStructSingle(vt, lane, dst, NEONLoadStoreSingleStructStore4); +} + +void Assembler::LoadStoreStructSingle(const VRegister& vt, uint32_t lane, + const MemOperand& addr, + NEONLoadStoreSingleStructOp op) { + LoadStoreStructVerify(vt, addr, op); + + // We support vt arguments of the form vt.VxT() or vt.T(), where x is the + // number of lanes, and T is b, h, s or d. + unsigned lane_size = vt.LaneSizeInBytes(); + DCHECK_LT(lane, kQRegSize / lane_size); + + // Lane size is encoded in the opcode field. Lane index is encoded in the Q, + // S and size fields. + lane *= lane_size; + + // Encodings for S[0]/D[0] and S[2]/D[1] are distinguished using the least- + // significant bit of the size field, so we increment lane here to account for + // that. + if (lane_size == 8) lane++; + + Instr size = (lane << NEONLSSize_offset) & NEONLSSize_mask; + Instr s = (lane << (NEONS_offset - 2)) & NEONS_mask; + Instr q = (lane << (NEONQ_offset - 3)) & NEONQ_mask; + + Instr instr = op; + switch (lane_size) { + case 1: + instr |= NEONLoadStoreSingle_b; + break; + case 2: + instr |= NEONLoadStoreSingle_h; + break; + case 4: + instr |= NEONLoadStoreSingle_s; + break; + default: + DCHECK_EQ(lane_size, 8U); + instr |= NEONLoadStoreSingle_d; + } + + Emit(instr | LoadStoreStructAddrModeField(addr) | q | size | s | Rt(vt)); +} + +void Assembler::ld1(const VRegister& vt, int lane, const MemOperand& src) { + LoadStoreStructSingle(vt, lane, src, NEONLoadStoreSingleStructLoad1); +} + +void Assembler::ld1r(const VRegister& vt, const MemOperand& src) { + LoadStoreStructSingleAllLanes(vt, src, NEON_LD1R); +} + +void Assembler::st1(const VRegister& vt, int lane, const MemOperand& dst) { + LoadStoreStructSingle(vt, lane, dst, NEONLoadStoreSingleStructStore1); +} + +void Assembler::dmb(BarrierDomain domain, BarrierType type) { + Emit(DMB | ImmBarrierDomain(domain) | ImmBarrierType(type)); +} + +void Assembler::dsb(BarrierDomain domain, BarrierType type) { + Emit(DSB | ImmBarrierDomain(domain) | ImmBarrierType(type)); +} + +void Assembler::isb() { + Emit(ISB | ImmBarrierDomain(FullSystem) | ImmBarrierType(BarrierAll)); +} + +void Assembler::fmov(const VRegister& vd, double imm) { + if (vd.IsScalar()) { + DCHECK(vd.Is1D()); + Emit(FMOV_d_imm | Rd(vd) | ImmFP(imm)); + } else { + DCHECK(vd.Is2D()); + Instr op = NEONModifiedImmediate_MOVI | NEONModifiedImmediateOpBit; + Emit(NEON_Q | op | ImmNEONFP(imm) | NEONCmode(0xf) | Rd(vd)); + } +} + +void Assembler::fmov(const VRegister& vd, float imm) { + if (vd.IsScalar()) { + DCHECK(vd.Is1S()); + Emit(FMOV_s_imm | Rd(vd) | ImmFP(imm)); + } else { + DCHECK(vd.Is2S() | vd.Is4S()); + Instr op = NEONModifiedImmediate_MOVI; + Instr q = vd.Is4S() ? NEON_Q : 0; + Emit(q | op | ImmNEONFP(imm) | NEONCmode(0xf) | Rd(vd)); + } +} + +void Assembler::fmov(const Register& rd, const VRegister& fn) { + DCHECK_EQ(rd.SizeInBits(), fn.SizeInBits()); + FPIntegerConvertOp op = rd.Is32Bits() ? FMOV_ws : FMOV_xd; + Emit(op | Rd(rd) | Rn(fn)); +} + +void Assembler::fmov(const VRegister& vd, const Register& rn) { + DCHECK_EQ(vd.SizeInBits(), rn.SizeInBits()); + FPIntegerConvertOp op = vd.Is32Bits() ? FMOV_sw : FMOV_dx; + Emit(op | Rd(vd) | Rn(rn)); +} + +void Assembler::fmov(const VRegister& vd, const VRegister& vn) { + DCHECK_EQ(vd.SizeInBits(), vn.SizeInBits()); + Emit(FPType(vd) | FMOV | Rd(vd) | Rn(vn)); +} + +void Assembler::fmov(const VRegister& vd, int index, const Register& rn) { + DCHECK((index == 1) && vd.Is1D() && rn.IsX()); + USE(index); + Emit(FMOV_d1_x | Rd(vd) | Rn(rn)); +} + +void Assembler::fmov(const Register& rd, const VRegister& vn, int index) { + DCHECK((index == 1) && vn.Is1D() && rd.IsX()); + USE(index); + Emit(FMOV_x_d1 | Rd(rd) | Rn(vn)); +} + +void Assembler::fmadd(const VRegister& fd, const VRegister& fn, + const VRegister& fm, const VRegister& fa) { + FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ? FMADD_s : FMADD_d); +} + +void Assembler::fmsub(const VRegister& fd, const VRegister& fn, + const VRegister& fm, const VRegister& fa) { + FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ? FMSUB_s : FMSUB_d); +} + +void Assembler::fnmadd(const VRegister& fd, const VRegister& fn, + const VRegister& fm, const VRegister& fa) { + FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ? FNMADD_s : FNMADD_d); +} + +void Assembler::fnmsub(const VRegister& fd, const VRegister& fn, + const VRegister& fm, const VRegister& fa) { + FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ? FNMSUB_s : FNMSUB_d); +} + +void Assembler::fnmul(const VRegister& vd, const VRegister& vn, + const VRegister& vm) { + DCHECK(AreSameSizeAndType(vd, vn, vm)); + Instr op = vd.Is1S() ? FNMUL_s : FNMUL_d; + Emit(FPType(vd) | op | Rm(vm) | Rn(vn) | Rd(vd)); +} + +void Assembler::fcmp(const VRegister& fn, const VRegister& fm) { + DCHECK_EQ(fn.SizeInBits(), fm.SizeInBits()); + Emit(FPType(fn) | FCMP | Rm(fm) | Rn(fn)); +} + +void Assembler::fcmp(const VRegister& fn, double value) { + USE(value); + // Although the fcmp instruction can strictly only take an immediate value of + // +0.0, we don't need to check for -0.0 because the sign of 0.0 doesn't + // affect the result of the comparison. + DCHECK_EQ(value, 0.0); + Emit(FPType(fn) | FCMP_zero | Rn(fn)); +} + +void Assembler::fccmp(const VRegister& fn, const VRegister& fm, + StatusFlags nzcv, Condition cond) { + DCHECK_EQ(fn.SizeInBits(), fm.SizeInBits()); + Emit(FPType(fn) | FCCMP | Rm(fm) | Cond(cond) | Rn(fn) | Nzcv(nzcv)); +} + +void Assembler::fcsel(const VRegister& fd, const VRegister& fn, + const VRegister& fm, Condition cond) { + DCHECK_EQ(fd.SizeInBits(), fn.SizeInBits()); + DCHECK_EQ(fd.SizeInBits(), fm.SizeInBits()); + Emit(FPType(fd) | FCSEL | Rm(fm) | Cond(cond) | Rn(fn) | Rd(fd)); +} + +void Assembler::NEONFPConvertToInt(const Register& rd, const VRegister& vn, + Instr op) { + Emit(SF(rd) | FPType(vn) | op | Rn(vn) | Rd(rd)); +} + +void Assembler::NEONFPConvertToInt(const VRegister& vd, const VRegister& vn, + Instr op) { + if (vn.IsScalar()) { + DCHECK((vd.Is1S() && vn.Is1S()) || (vd.Is1D() && vn.Is1D())); + op |= NEON_Q | NEONScalar; + } + Emit(FPFormat(vn) | op | Rn(vn) | Rd(vd)); +} + +void Assembler::fcvt(const VRegister& vd, const VRegister& vn) { + FPDataProcessing1SourceOp op; + if (vd.Is1D()) { + DCHECK(vn.Is1S() || vn.Is1H()); + op = vn.Is1S() ? FCVT_ds : FCVT_dh; + } else if (vd.Is1S()) { + DCHECK(vn.Is1D() || vn.Is1H()); + op = vn.Is1D() ? FCVT_sd : FCVT_sh; + } else { + DCHECK(vd.Is1H()); + DCHECK(vn.Is1D() || vn.Is1S()); + op = vn.Is1D() ? FCVT_hd : FCVT_hs; + } + FPDataProcessing1Source(vd, vn, op); +} + +void Assembler::fcvtl(const VRegister& vd, const VRegister& vn) { + DCHECK((vd.Is4S() && vn.Is4H()) || (vd.Is2D() && vn.Is2S())); + Instr format = vd.Is2D() ? (1 << NEONSize_offset) : 0; + Emit(format | NEON_FCVTL | Rn(vn) | Rd(vd)); +} + +void Assembler::fcvtl2(const VRegister& vd, const VRegister& vn) { + DCHECK((vd.Is4S() && vn.Is8H()) || (vd.Is2D() && vn.Is4S())); + Instr format = vd.Is2D() ? (1 << NEONSize_offset) : 0; + Emit(NEON_Q | format | NEON_FCVTL | Rn(vn) | Rd(vd)); +} + +void Assembler::fcvtn(const VRegister& vd, const VRegister& vn) { + DCHECK((vn.Is4S() && vd.Is4H()) || (vn.Is2D() && vd.Is2S())); + Instr format = vn.Is2D() ? (1 << NEONSize_offset) : 0; + Emit(format | NEON_FCVTN | Rn(vn) | Rd(vd)); +} + +void Assembler::fcvtn2(const VRegister& vd, const VRegister& vn) { + DCHECK((vn.Is4S() && vd.Is8H()) || (vn.Is2D() && vd.Is4S())); + Instr format = vn.Is2D() ? (1 << NEONSize_offset) : 0; + Emit(NEON_Q | format | NEON_FCVTN | Rn(vn) | Rd(vd)); +} + +void Assembler::fcvtxn(const VRegister& vd, const VRegister& vn) { + Instr format = 1 << NEONSize_offset; + if (vd.IsScalar()) { + DCHECK(vd.Is1S() && vn.Is1D()); + Emit(format | NEON_FCVTXN_scalar | Rn(vn) | Rd(vd)); + } else { + DCHECK(vd.Is2S() && vn.Is2D()); + Emit(format | NEON_FCVTXN | Rn(vn) | Rd(vd)); + } +} + +void Assembler::fcvtxn2(const VRegister& vd, const VRegister& vn) { + DCHECK(vd.Is4S() && vn.Is2D()); + Instr format = 1 << NEONSize_offset; + Emit(NEON_Q | format | NEON_FCVTXN | Rn(vn) | Rd(vd)); +} + +#define NEON_FP2REGMISC_FCVT_LIST(V) \ + V(fcvtnu, NEON_FCVTNU, FCVTNU) \ + V(fcvtns, NEON_FCVTNS, FCVTNS) \ + V(fcvtpu, NEON_FCVTPU, FCVTPU) \ + V(fcvtps, NEON_FCVTPS, FCVTPS) \ + V(fcvtmu, NEON_FCVTMU, FCVTMU) \ + V(fcvtms, NEON_FCVTMS, FCVTMS) \ + V(fcvtau, NEON_FCVTAU, FCVTAU) \ + V(fcvtas, NEON_FCVTAS, FCVTAS) + +#define DEFINE_ASM_FUNCS(FN, VEC_OP, SCA_OP) \ + void Assembler::FN(const Register& rd, const VRegister& vn) { \ + NEONFPConvertToInt(rd, vn, SCA_OP); \ + } \ + void Assembler::FN(const VRegister& vd, const VRegister& vn) { \ + NEONFPConvertToInt(vd, vn, VEC_OP); \ + } +NEON_FP2REGMISC_FCVT_LIST(DEFINE_ASM_FUNCS) +#undef DEFINE_ASM_FUNCS + +void Assembler::scvtf(const VRegister& vd, const VRegister& vn, int fbits) { + DCHECK_GE(fbits, 0); + if (fbits == 0) { + NEONFP2RegMisc(vd, vn, NEON_SCVTF); + } else { + DCHECK(vd.Is1D() || vd.Is1S() || vd.Is2D() || vd.Is2S() || vd.Is4S()); + NEONShiftRightImmediate(vd, vn, fbits, NEON_SCVTF_imm); + } +} + +void Assembler::ucvtf(const VRegister& vd, const VRegister& vn, int fbits) { + DCHECK_GE(fbits, 0); + if (fbits == 0) { + NEONFP2RegMisc(vd, vn, NEON_UCVTF); + } else { + DCHECK(vd.Is1D() || vd.Is1S() || vd.Is2D() || vd.Is2S() || vd.Is4S()); + NEONShiftRightImmediate(vd, vn, fbits, NEON_UCVTF_imm); + } +} + +void Assembler::scvtf(const VRegister& vd, const Register& rn, int fbits) { + DCHECK_GE(fbits, 0); + if (fbits == 0) { + Emit(SF(rn) | FPType(vd) | SCVTF | Rn(rn) | Rd(vd)); + } else { + Emit(SF(rn) | FPType(vd) | SCVTF_fixed | FPScale(64 - fbits) | Rn(rn) | + Rd(vd)); + } +} + +void Assembler::ucvtf(const VRegister& fd, const Register& rn, int fbits) { + DCHECK_GE(fbits, 0); + if (fbits == 0) { + Emit(SF(rn) | FPType(fd) | UCVTF | Rn(rn) | Rd(fd)); + } else { + Emit(SF(rn) | FPType(fd) | UCVTF_fixed | FPScale(64 - fbits) | Rn(rn) | + Rd(fd)); + } +} + +void Assembler::NEON3Same(const VRegister& vd, const VRegister& vn, + const VRegister& vm, NEON3SameOp vop) { + DCHECK(AreSameFormat(vd, vn, vm)); + DCHECK(vd.IsVector() || !vd.IsQ()); + + Instr format, op = vop; + if (vd.IsScalar()) { + op |= NEON_Q | NEONScalar; + format = SFormat(vd); + } else { + format = VFormat(vd); + } + + Emit(format | op | Rm(vm) | Rn(vn) | Rd(vd)); +} + +void Assembler::NEONFP3Same(const VRegister& vd, const VRegister& vn, + const VRegister& vm, Instr op) { + DCHECK(AreSameFormat(vd, vn, vm)); + Emit(FPFormat(vd) | op | Rm(vm) | Rn(vn) | Rd(vd)); +} + +#define NEON_FP2REGMISC_LIST(V) \ + V(fabs, NEON_FABS, FABS) \ + V(fneg, NEON_FNEG, FNEG) \ + V(fsqrt, NEON_FSQRT, FSQRT) \ + V(frintn, NEON_FRINTN, FRINTN) \ + V(frinta, NEON_FRINTA, FRINTA) \ + V(frintp, NEON_FRINTP, FRINTP) \ + V(frintm, NEON_FRINTM, FRINTM) \ + V(frintx, NEON_FRINTX, FRINTX) \ + V(frintz, NEON_FRINTZ, FRINTZ) \ + V(frinti, NEON_FRINTI, FRINTI) \ + V(frsqrte, NEON_FRSQRTE, NEON_FRSQRTE_scalar) \ + V(frecpe, NEON_FRECPE, NEON_FRECPE_scalar) + +#define DEFINE_ASM_FUNC(FN, VEC_OP, SCA_OP) \ + void Assembler::FN(const VRegister& vd, const VRegister& vn) { \ + Instr op; \ + if (vd.IsScalar()) { \ + DCHECK(vd.Is1S() || vd.Is1D()); \ + op = SCA_OP; \ + } else { \ + DCHECK(vd.Is2S() || vd.Is2D() || vd.Is4S()); \ + op = VEC_OP; \ + } \ + NEONFP2RegMisc(vd, vn, op); \ + } +NEON_FP2REGMISC_LIST(DEFINE_ASM_FUNC) +#undef DEFINE_ASM_FUNC + +void Assembler::shll(const VRegister& vd, const VRegister& vn, int shift) { + DCHECK((vd.Is8H() && vn.Is8B() && shift == 8) || + (vd.Is4S() && vn.Is4H() && shift == 16) || + (vd.Is2D() && vn.Is2S() && shift == 32)); + USE(shift); + Emit(VFormat(vn) | NEON_SHLL | Rn(vn) | Rd(vd)); +} + +void Assembler::shll2(const VRegister& vd, const VRegister& vn, int shift) { + USE(shift); + DCHECK((vd.Is8H() && vn.Is16B() && shift == 8) || + (vd.Is4S() && vn.Is8H() && shift == 16) || + (vd.Is2D() && vn.Is4S() && shift == 32)); + Emit(VFormat(vn) | NEON_SHLL | Rn(vn) | Rd(vd)); +} + +void Assembler::NEONFP2RegMisc(const VRegister& vd, const VRegister& vn, + NEON2RegMiscOp vop, double value) { + DCHECK(AreSameFormat(vd, vn)); + DCHECK_EQ(value, 0.0); + USE(value); + + Instr op = vop; + if (vd.IsScalar()) { + DCHECK(vd.Is1S() || vd.Is1D()); + op |= NEON_Q | NEONScalar; + } else { + DCHECK(vd.Is2S() || vd.Is2D() || vd.Is4S()); + } + + Emit(FPFormat(vd) | op | Rn(vn) | Rd(vd)); +} + +void Assembler::fcmeq(const VRegister& vd, const VRegister& vn, double value) { + NEONFP2RegMisc(vd, vn, NEON_FCMEQ_zero, value); +} + +void Assembler::fcmge(const VRegister& vd, const VRegister& vn, double value) { + NEONFP2RegMisc(vd, vn, NEON_FCMGE_zero, value); +} + +void Assembler::fcmgt(const VRegister& vd, const VRegister& vn, double value) { + NEONFP2RegMisc(vd, vn, NEON_FCMGT_zero, value); +} + +void Assembler::fcmle(const VRegister& vd, const VRegister& vn, double value) { + NEONFP2RegMisc(vd, vn, NEON_FCMLE_zero, value); +} + +void Assembler::fcmlt(const VRegister& vd, const VRegister& vn, double value) { + NEONFP2RegMisc(vd, vn, NEON_FCMLT_zero, value); +} -Instr Assembler::ImmFP64(double imm) { +void Assembler::frecpx(const VRegister& vd, const VRegister& vn) { + DCHECK(vd.IsScalar()); + DCHECK(AreSameFormat(vd, vn)); + DCHECK(vd.Is1S() || vd.Is1D()); + Emit(FPFormat(vd) | NEON_FRECPX_scalar | Rn(vn) | Rd(vd)); +} + +void Assembler::fcvtzs(const Register& rd, const VRegister& vn, int fbits) { + DCHECK(vn.Is1S() || vn.Is1D()); + DCHECK((fbits >= 0) && (fbits <= rd.SizeInBits())); + if (fbits == 0) { + Emit(SF(rd) | FPType(vn) | FCVTZS | Rn(vn) | Rd(rd)); + } else { + Emit(SF(rd) | FPType(vn) | FCVTZS_fixed | FPScale(64 - fbits) | Rn(vn) | + Rd(rd)); + } +} + +void Assembler::fcvtzs(const VRegister& vd, const VRegister& vn, int fbits) { + DCHECK_GE(fbits, 0); + if (fbits == 0) { + NEONFP2RegMisc(vd, vn, NEON_FCVTZS); + } else { + DCHECK(vd.Is1D() || vd.Is1S() || vd.Is2D() || vd.Is2S() || vd.Is4S()); + NEONShiftRightImmediate(vd, vn, fbits, NEON_FCVTZS_imm); + } +} + +void Assembler::fcvtzu(const Register& rd, const VRegister& vn, int fbits) { + DCHECK(vn.Is1S() || vn.Is1D()); + DCHECK((fbits >= 0) && (fbits <= rd.SizeInBits())); + if (fbits == 0) { + Emit(SF(rd) | FPType(vn) | FCVTZU | Rn(vn) | Rd(rd)); + } else { + Emit(SF(rd) | FPType(vn) | FCVTZU_fixed | FPScale(64 - fbits) | Rn(vn) | + Rd(rd)); + } +} + +void Assembler::fcvtzu(const VRegister& vd, const VRegister& vn, int fbits) { + DCHECK_GE(fbits, 0); + if (fbits == 0) { + NEONFP2RegMisc(vd, vn, NEON_FCVTZU); + } else { + DCHECK(vd.Is1D() || vd.Is1S() || vd.Is2D() || vd.Is2S() || vd.Is4S()); + NEONShiftRightImmediate(vd, vn, fbits, NEON_FCVTZU_imm); + } +} + +void Assembler::NEONFP2RegMisc(const VRegister& vd, const VRegister& vn, + Instr op) { + DCHECK(AreSameFormat(vd, vn)); + Emit(FPFormat(vd) | op | Rn(vn) | Rd(vd)); +} + +void Assembler::NEON2RegMisc(const VRegister& vd, const VRegister& vn, + NEON2RegMiscOp vop, int value) { + DCHECK(AreSameFormat(vd, vn)); + DCHECK_EQ(value, 0); + USE(value); + + Instr format, op = vop; + if (vd.IsScalar()) { + op |= NEON_Q | NEONScalar; + format = SFormat(vd); + } else { + format = VFormat(vd); + } + + Emit(format | op | Rn(vn) | Rd(vd)); +} + +void Assembler::cmeq(const VRegister& vd, const VRegister& vn, int value) { + DCHECK(vd.IsVector() || vd.Is1D()); + NEON2RegMisc(vd, vn, NEON_CMEQ_zero, value); +} + +void Assembler::cmge(const VRegister& vd, const VRegister& vn, int value) { + DCHECK(vd.IsVector() || vd.Is1D()); + NEON2RegMisc(vd, vn, NEON_CMGE_zero, value); +} + +void Assembler::cmgt(const VRegister& vd, const VRegister& vn, int value) { + DCHECK(vd.IsVector() || vd.Is1D()); + NEON2RegMisc(vd, vn, NEON_CMGT_zero, value); +} + +void Assembler::cmle(const VRegister& vd, const VRegister& vn, int value) { + DCHECK(vd.IsVector() || vd.Is1D()); + NEON2RegMisc(vd, vn, NEON_CMLE_zero, value); +} + +void Assembler::cmlt(const VRegister& vd, const VRegister& vn, int value) { + DCHECK(vd.IsVector() || vd.Is1D()); + NEON2RegMisc(vd, vn, NEON_CMLT_zero, value); +} + +#define NEON_3SAME_LIST(V) \ + V(add, NEON_ADD, vd.IsVector() || vd.Is1D()) \ + V(addp, NEON_ADDP, vd.IsVector() || vd.Is1D()) \ + V(sub, NEON_SUB, vd.IsVector() || vd.Is1D()) \ + V(cmeq, NEON_CMEQ, vd.IsVector() || vd.Is1D()) \ + V(cmge, NEON_CMGE, vd.IsVector() || vd.Is1D()) \ + V(cmgt, NEON_CMGT, vd.IsVector() || vd.Is1D()) \ + V(cmhi, NEON_CMHI, vd.IsVector() || vd.Is1D()) \ + V(cmhs, NEON_CMHS, vd.IsVector() || vd.Is1D()) \ + V(cmtst, NEON_CMTST, vd.IsVector() || vd.Is1D()) \ + V(sshl, NEON_SSHL, vd.IsVector() || vd.Is1D()) \ + V(ushl, NEON_USHL, vd.IsVector() || vd.Is1D()) \ + V(srshl, NEON_SRSHL, vd.IsVector() || vd.Is1D()) \ + V(urshl, NEON_URSHL, vd.IsVector() || vd.Is1D()) \ + V(sqdmulh, NEON_SQDMULH, vd.IsLaneSizeH() || vd.IsLaneSizeS()) \ + V(sqrdmulh, NEON_SQRDMULH, vd.IsLaneSizeH() || vd.IsLaneSizeS()) \ + V(shadd, NEON_SHADD, vd.IsVector() && !vd.IsLaneSizeD()) \ + V(uhadd, NEON_UHADD, vd.IsVector() && !vd.IsLaneSizeD()) \ + V(srhadd, NEON_SRHADD, vd.IsVector() && !vd.IsLaneSizeD()) \ + V(urhadd, NEON_URHADD, vd.IsVector() && !vd.IsLaneSizeD()) \ + V(shsub, NEON_SHSUB, vd.IsVector() && !vd.IsLaneSizeD()) \ + V(uhsub, NEON_UHSUB, vd.IsVector() && !vd.IsLaneSizeD()) \ + V(smax, NEON_SMAX, vd.IsVector() && !vd.IsLaneSizeD()) \ + V(smaxp, NEON_SMAXP, vd.IsVector() && !vd.IsLaneSizeD()) \ + V(smin, NEON_SMIN, vd.IsVector() && !vd.IsLaneSizeD()) \ + V(sminp, NEON_SMINP, vd.IsVector() && !vd.IsLaneSizeD()) \ + V(umax, NEON_UMAX, vd.IsVector() && !vd.IsLaneSizeD()) \ + V(umaxp, NEON_UMAXP, vd.IsVector() && !vd.IsLaneSizeD()) \ + V(umin, NEON_UMIN, vd.IsVector() && !vd.IsLaneSizeD()) \ + V(uminp, NEON_UMINP, vd.IsVector() && !vd.IsLaneSizeD()) \ + V(saba, NEON_SABA, vd.IsVector() && !vd.IsLaneSizeD()) \ + V(sabd, NEON_SABD, vd.IsVector() && !vd.IsLaneSizeD()) \ + V(uaba, NEON_UABA, vd.IsVector() && !vd.IsLaneSizeD()) \ + V(uabd, NEON_UABD, vd.IsVector() && !vd.IsLaneSizeD()) \ + V(mla, NEON_MLA, vd.IsVector() && !vd.IsLaneSizeD()) \ + V(mls, NEON_MLS, vd.IsVector() && !vd.IsLaneSizeD()) \ + V(mul, NEON_MUL, vd.IsVector() && !vd.IsLaneSizeD()) \ + V(and_, NEON_AND, vd.Is8B() || vd.Is16B()) \ + V(orr, NEON_ORR, vd.Is8B() || vd.Is16B()) \ + V(orn, NEON_ORN, vd.Is8B() || vd.Is16B()) \ + V(eor, NEON_EOR, vd.Is8B() || vd.Is16B()) \ + V(bic, NEON_BIC, vd.Is8B() || vd.Is16B()) \ + V(bit, NEON_BIT, vd.Is8B() || vd.Is16B()) \ + V(bif, NEON_BIF, vd.Is8B() || vd.Is16B()) \ + V(bsl, NEON_BSL, vd.Is8B() || vd.Is16B()) \ + V(pmul, NEON_PMUL, vd.Is8B() || vd.Is16B()) \ + V(uqadd, NEON_UQADD, true) \ + V(sqadd, NEON_SQADD, true) \ + V(uqsub, NEON_UQSUB, true) \ + V(sqsub, NEON_SQSUB, true) \ + V(sqshl, NEON_SQSHL, true) \ + V(uqshl, NEON_UQSHL, true) \ + V(sqrshl, NEON_SQRSHL, true) \ + V(uqrshl, NEON_UQRSHL, true) + +#define DEFINE_ASM_FUNC(FN, OP, AS) \ + void Assembler::FN(const VRegister& vd, const VRegister& vn, \ + const VRegister& vm) { \ + DCHECK(AS); \ + NEON3Same(vd, vn, vm, OP); \ + } +NEON_3SAME_LIST(DEFINE_ASM_FUNC) +#undef DEFINE_ASM_FUNC + +#define NEON_FP3SAME_LIST(V) \ + V(fadd, NEON_FADD, FADD) \ + V(fsub, NEON_FSUB, FSUB) \ + V(fmul, NEON_FMUL, FMUL) \ + V(fdiv, NEON_FDIV, FDIV) \ + V(fmax, NEON_FMAX, FMAX) \ + V(fmaxnm, NEON_FMAXNM, FMAXNM) \ + V(fmin, NEON_FMIN, FMIN) \ + V(fminnm, NEON_FMINNM, FMINNM) \ + V(fmulx, NEON_FMULX, NEON_FMULX_scalar) \ + V(frecps, NEON_FRECPS, NEON_FRECPS_scalar) \ + V(frsqrts, NEON_FRSQRTS, NEON_FRSQRTS_scalar) \ + V(fabd, NEON_FABD, NEON_FABD_scalar) \ + V(fmla, NEON_FMLA, 0) \ + V(fmls, NEON_FMLS, 0) \ + V(facge, NEON_FACGE, NEON_FACGE_scalar) \ + V(facgt, NEON_FACGT, NEON_FACGT_scalar) \ + V(fcmeq, NEON_FCMEQ, NEON_FCMEQ_scalar) \ + V(fcmge, NEON_FCMGE, NEON_FCMGE_scalar) \ + V(fcmgt, NEON_FCMGT, NEON_FCMGT_scalar) \ + V(faddp, NEON_FADDP, 0) \ + V(fmaxp, NEON_FMAXP, 0) \ + V(fminp, NEON_FMINP, 0) \ + V(fmaxnmp, NEON_FMAXNMP, 0) \ + V(fminnmp, NEON_FMINNMP, 0) + +#define DEFINE_ASM_FUNC(FN, VEC_OP, SCA_OP) \ + void Assembler::FN(const VRegister& vd, const VRegister& vn, \ + const VRegister& vm) { \ + Instr op; \ + if ((SCA_OP != 0) && vd.IsScalar()) { \ + DCHECK(vd.Is1S() || vd.Is1D()); \ + op = SCA_OP; \ + } else { \ + DCHECK(vd.IsVector()); \ + DCHECK(vd.Is2S() || vd.Is2D() || vd.Is4S()); \ + op = VEC_OP; \ + } \ + NEONFP3Same(vd, vn, vm, op); \ + } +NEON_FP3SAME_LIST(DEFINE_ASM_FUNC) +#undef DEFINE_ASM_FUNC + +void Assembler::addp(const VRegister& vd, const VRegister& vn) { + DCHECK((vd.Is1D() && vn.Is2D())); + Emit(SFormat(vd) | NEON_ADDP_scalar | Rn(vn) | Rd(vd)); +} + +void Assembler::faddp(const VRegister& vd, const VRegister& vn) { + DCHECK((vd.Is1S() && vn.Is2S()) || (vd.Is1D() && vn.Is2D())); + Emit(FPFormat(vd) | NEON_FADDP_scalar | Rn(vn) | Rd(vd)); +} + +void Assembler::fmaxp(const VRegister& vd, const VRegister& vn) { + DCHECK((vd.Is1S() && vn.Is2S()) || (vd.Is1D() && vn.Is2D())); + Emit(FPFormat(vd) | NEON_FMAXP_scalar | Rn(vn) | Rd(vd)); +} + +void Assembler::fminp(const VRegister& vd, const VRegister& vn) { + DCHECK((vd.Is1S() && vn.Is2S()) || (vd.Is1D() && vn.Is2D())); + Emit(FPFormat(vd) | NEON_FMINP_scalar | Rn(vn) | Rd(vd)); +} + +void Assembler::fmaxnmp(const VRegister& vd, const VRegister& vn) { + DCHECK((vd.Is1S() && vn.Is2S()) || (vd.Is1D() && vn.Is2D())); + Emit(FPFormat(vd) | NEON_FMAXNMP_scalar | Rn(vn) | Rd(vd)); +} + +void Assembler::fminnmp(const VRegister& vd, const VRegister& vn) { + DCHECK((vd.Is1S() && vn.Is2S()) || (vd.Is1D() && vn.Is2D())); + Emit(FPFormat(vd) | NEON_FMINNMP_scalar | Rn(vn) | Rd(vd)); +} + +void Assembler::orr(const VRegister& vd, const int imm8, const int left_shift) { + NEONModifiedImmShiftLsl(vd, imm8, left_shift, NEONModifiedImmediate_ORR); +} + +void Assembler::mov(const VRegister& vd, const VRegister& vn) { + DCHECK(AreSameFormat(vd, vn)); + if (vd.IsD()) { + orr(vd.V8B(), vn.V8B(), vn.V8B()); + } else { + DCHECK(vd.IsQ()); + orr(vd.V16B(), vn.V16B(), vn.V16B()); + } +} + +void Assembler::bic(const VRegister& vd, const int imm8, const int left_shift) { + NEONModifiedImmShiftLsl(vd, imm8, left_shift, NEONModifiedImmediate_BIC); +} + +void Assembler::movi(const VRegister& vd, const uint64_t imm, Shift shift, + const int shift_amount) { + DCHECK((shift == LSL) || (shift == MSL)); + if (vd.Is2D() || vd.Is1D()) { + DCHECK_EQ(shift_amount, 0); + int imm8 = 0; + for (int i = 0; i < 8; ++i) { + int byte = (imm >> (i * 8)) & 0xff; + DCHECK((byte == 0) || (byte == 0xff)); + if (byte == 0xff) { + imm8 |= (1 << i); + } + } + Instr q = vd.Is2D() ? NEON_Q : 0; + Emit(q | NEONModImmOp(1) | NEONModifiedImmediate_MOVI | + ImmNEONabcdefgh(imm8) | NEONCmode(0xe) | Rd(vd)); + } else if (shift == LSL) { + NEONModifiedImmShiftLsl(vd, static_cast(imm), shift_amount, + NEONModifiedImmediate_MOVI); + } else { + NEONModifiedImmShiftMsl(vd, static_cast(imm), shift_amount, + NEONModifiedImmediate_MOVI); + } +} + +void Assembler::mvn(const VRegister& vd, const VRegister& vn) { + DCHECK(AreSameFormat(vd, vn)); + if (vd.IsD()) { + not_(vd.V8B(), vn.V8B()); + } else { + DCHECK(vd.IsQ()); + not_(vd.V16B(), vn.V16B()); + } +} + +void Assembler::mvni(const VRegister& vd, const int imm8, Shift shift, + const int shift_amount) { + DCHECK((shift == LSL) || (shift == MSL)); + if (shift == LSL) { + NEONModifiedImmShiftLsl(vd, imm8, shift_amount, NEONModifiedImmediate_MVNI); + } else { + NEONModifiedImmShiftMsl(vd, imm8, shift_amount, NEONModifiedImmediate_MVNI); + } +} + +void Assembler::NEONFPByElement(const VRegister& vd, const VRegister& vn, + const VRegister& vm, int vm_index, + NEONByIndexedElementOp vop) { + DCHECK(AreSameFormat(vd, vn)); + DCHECK((vd.Is2S() && vm.Is1S()) || (vd.Is4S() && vm.Is1S()) || + (vd.Is1S() && vm.Is1S()) || (vd.Is2D() && vm.Is1D()) || + (vd.Is1D() && vm.Is1D())); + DCHECK((vm.Is1S() && (vm_index < 4)) || (vm.Is1D() && (vm_index < 2))); + + Instr op = vop; + int index_num_bits = vm.Is1S() ? 2 : 1; + if (vd.IsScalar()) { + op |= NEON_Q | NEONScalar; + } + + Emit(FPFormat(vd) | op | ImmNEONHLM(vm_index, index_num_bits) | Rm(vm) | + Rn(vn) | Rd(vd)); +} + +void Assembler::NEONByElement(const VRegister& vd, const VRegister& vn, + const VRegister& vm, int vm_index, + NEONByIndexedElementOp vop) { + DCHECK(AreSameFormat(vd, vn)); + DCHECK((vd.Is4H() && vm.Is1H()) || (vd.Is8H() && vm.Is1H()) || + (vd.Is1H() && vm.Is1H()) || (vd.Is2S() && vm.Is1S()) || + (vd.Is4S() && vm.Is1S()) || (vd.Is1S() && vm.Is1S())); + DCHECK((vm.Is1H() && (vm.code() < 16) && (vm_index < 8)) || + (vm.Is1S() && (vm_index < 4))); + + Instr format, op = vop; + int index_num_bits = vm.Is1H() ? 3 : 2; + if (vd.IsScalar()) { + op |= NEONScalar | NEON_Q; + format = SFormat(vn); + } else { + format = VFormat(vn); + } + Emit(format | op | ImmNEONHLM(vm_index, index_num_bits) | Rm(vm) | Rn(vn) | + Rd(vd)); +} + +void Assembler::NEONByElementL(const VRegister& vd, const VRegister& vn, + const VRegister& vm, int vm_index, + NEONByIndexedElementOp vop) { + DCHECK((vd.Is4S() && vn.Is4H() && vm.Is1H()) || + (vd.Is4S() && vn.Is8H() && vm.Is1H()) || + (vd.Is1S() && vn.Is1H() && vm.Is1H()) || + (vd.Is2D() && vn.Is2S() && vm.Is1S()) || + (vd.Is2D() && vn.Is4S() && vm.Is1S()) || + (vd.Is1D() && vn.Is1S() && vm.Is1S())); + + DCHECK((vm.Is1H() && (vm.code() < 16) && (vm_index < 8)) || + (vm.Is1S() && (vm_index < 4))); + + Instr format, op = vop; + int index_num_bits = vm.Is1H() ? 3 : 2; + if (vd.IsScalar()) { + op |= NEONScalar | NEON_Q; + format = SFormat(vn); + } else { + format = VFormat(vn); + } + Emit(format | op | ImmNEONHLM(vm_index, index_num_bits) | Rm(vm) | Rn(vn) | + Rd(vd)); +} + +#define NEON_BYELEMENT_LIST(V) \ + V(mul, NEON_MUL_byelement, vn.IsVector()) \ + V(mla, NEON_MLA_byelement, vn.IsVector()) \ + V(mls, NEON_MLS_byelement, vn.IsVector()) \ + V(sqdmulh, NEON_SQDMULH_byelement, true) \ + V(sqrdmulh, NEON_SQRDMULH_byelement, true) + +#define DEFINE_ASM_FUNC(FN, OP, AS) \ + void Assembler::FN(const VRegister& vd, const VRegister& vn, \ + const VRegister& vm, int vm_index) { \ + DCHECK(AS); \ + NEONByElement(vd, vn, vm, vm_index, OP); \ + } +NEON_BYELEMENT_LIST(DEFINE_ASM_FUNC) +#undef DEFINE_ASM_FUNC + +#define NEON_FPBYELEMENT_LIST(V) \ + V(fmul, NEON_FMUL_byelement) \ + V(fmla, NEON_FMLA_byelement) \ + V(fmls, NEON_FMLS_byelement) \ + V(fmulx, NEON_FMULX_byelement) + +#define DEFINE_ASM_FUNC(FN, OP) \ + void Assembler::FN(const VRegister& vd, const VRegister& vn, \ + const VRegister& vm, int vm_index) { \ + NEONFPByElement(vd, vn, vm, vm_index, OP); \ + } +NEON_FPBYELEMENT_LIST(DEFINE_ASM_FUNC) +#undef DEFINE_ASM_FUNC + +#define NEON_BYELEMENT_LONG_LIST(V) \ + V(sqdmull, NEON_SQDMULL_byelement, vn.IsScalar() || vn.IsD()) \ + V(sqdmull2, NEON_SQDMULL_byelement, vn.IsVector() && vn.IsQ()) \ + V(sqdmlal, NEON_SQDMLAL_byelement, vn.IsScalar() || vn.IsD()) \ + V(sqdmlal2, NEON_SQDMLAL_byelement, vn.IsVector() && vn.IsQ()) \ + V(sqdmlsl, NEON_SQDMLSL_byelement, vn.IsScalar() || vn.IsD()) \ + V(sqdmlsl2, NEON_SQDMLSL_byelement, vn.IsVector() && vn.IsQ()) \ + V(smull, NEON_SMULL_byelement, vn.IsVector() && vn.IsD()) \ + V(smull2, NEON_SMULL_byelement, vn.IsVector() && vn.IsQ()) \ + V(umull, NEON_UMULL_byelement, vn.IsVector() && vn.IsD()) \ + V(umull2, NEON_UMULL_byelement, vn.IsVector() && vn.IsQ()) \ + V(smlal, NEON_SMLAL_byelement, vn.IsVector() && vn.IsD()) \ + V(smlal2, NEON_SMLAL_byelement, vn.IsVector() && vn.IsQ()) \ + V(umlal, NEON_UMLAL_byelement, vn.IsVector() && vn.IsD()) \ + V(umlal2, NEON_UMLAL_byelement, vn.IsVector() && vn.IsQ()) \ + V(smlsl, NEON_SMLSL_byelement, vn.IsVector() && vn.IsD()) \ + V(smlsl2, NEON_SMLSL_byelement, vn.IsVector() && vn.IsQ()) \ + V(umlsl, NEON_UMLSL_byelement, vn.IsVector() && vn.IsD()) \ + V(umlsl2, NEON_UMLSL_byelement, vn.IsVector() && vn.IsQ()) + +#define DEFINE_ASM_FUNC(FN, OP, AS) \ + void Assembler::FN(const VRegister& vd, const VRegister& vn, \ + const VRegister& vm, int vm_index) { \ + DCHECK(AS); \ + NEONByElementL(vd, vn, vm, vm_index, OP); \ + } +NEON_BYELEMENT_LONG_LIST(DEFINE_ASM_FUNC) +#undef DEFINE_ASM_FUNC + +void Assembler::suqadd(const VRegister& vd, const VRegister& vn) { + NEON2RegMisc(vd, vn, NEON_SUQADD); +} + +void Assembler::usqadd(const VRegister& vd, const VRegister& vn) { + NEON2RegMisc(vd, vn, NEON_USQADD); +} + +void Assembler::abs(const VRegister& vd, const VRegister& vn) { + DCHECK(vd.IsVector() || vd.Is1D()); + NEON2RegMisc(vd, vn, NEON_ABS); +} + +void Assembler::sqabs(const VRegister& vd, const VRegister& vn) { + NEON2RegMisc(vd, vn, NEON_SQABS); +} + +void Assembler::neg(const VRegister& vd, const VRegister& vn) { + DCHECK(vd.IsVector() || vd.Is1D()); + NEON2RegMisc(vd, vn, NEON_NEG); +} + +void Assembler::sqneg(const VRegister& vd, const VRegister& vn) { + NEON2RegMisc(vd, vn, NEON_SQNEG); +} + +void Assembler::NEONXtn(const VRegister& vd, const VRegister& vn, + NEON2RegMiscOp vop) { + Instr format, op = vop; + if (vd.IsScalar()) { + DCHECK((vd.Is1B() && vn.Is1H()) || (vd.Is1H() && vn.Is1S()) || + (vd.Is1S() && vn.Is1D())); + op |= NEON_Q | NEONScalar; + format = SFormat(vd); + } else { + DCHECK((vd.Is8B() && vn.Is8H()) || (vd.Is4H() && vn.Is4S()) || + (vd.Is2S() && vn.Is2D()) || (vd.Is16B() && vn.Is8H()) || + (vd.Is8H() && vn.Is4S()) || (vd.Is4S() && vn.Is2D())); + format = VFormat(vd); + } + Emit(format | op | Rn(vn) | Rd(vd)); +} + +void Assembler::xtn(const VRegister& vd, const VRegister& vn) { + DCHECK(vd.IsVector() && vd.IsD()); + NEONXtn(vd, vn, NEON_XTN); +} + +void Assembler::xtn2(const VRegister& vd, const VRegister& vn) { + DCHECK(vd.IsVector() && vd.IsQ()); + NEONXtn(vd, vn, NEON_XTN); +} + +void Assembler::sqxtn(const VRegister& vd, const VRegister& vn) { + DCHECK(vd.IsScalar() || vd.IsD()); + NEONXtn(vd, vn, NEON_SQXTN); +} + +void Assembler::sqxtn2(const VRegister& vd, const VRegister& vn) { + DCHECK(vd.IsVector() && vd.IsQ()); + NEONXtn(vd, vn, NEON_SQXTN); +} + +void Assembler::sqxtun(const VRegister& vd, const VRegister& vn) { + DCHECK(vd.IsScalar() || vd.IsD()); + NEONXtn(vd, vn, NEON_SQXTUN); +} + +void Assembler::sqxtun2(const VRegister& vd, const VRegister& vn) { + DCHECK(vd.IsVector() && vd.IsQ()); + NEONXtn(vd, vn, NEON_SQXTUN); +} + +void Assembler::uqxtn(const VRegister& vd, const VRegister& vn) { + DCHECK(vd.IsScalar() || vd.IsD()); + NEONXtn(vd, vn, NEON_UQXTN); +} + +void Assembler::uqxtn2(const VRegister& vd, const VRegister& vn) { + DCHECK(vd.IsVector() && vd.IsQ()); + NEONXtn(vd, vn, NEON_UQXTN); +} + +// NEON NOT and RBIT are distinguised by bit 22, the bottom bit of "size". +void Assembler::not_(const VRegister& vd, const VRegister& vn) { + DCHECK(AreSameFormat(vd, vn)); + DCHECK(vd.Is8B() || vd.Is16B()); + Emit(VFormat(vd) | NEON_RBIT_NOT | Rn(vn) | Rd(vd)); +} + +void Assembler::rbit(const VRegister& vd, const VRegister& vn) { + DCHECK(AreSameFormat(vd, vn)); + DCHECK(vd.Is8B() || vd.Is16B()); + Emit(VFormat(vn) | (1 << NEONSize_offset) | NEON_RBIT_NOT | Rn(vn) | Rd(vd)); +} + +void Assembler::ext(const VRegister& vd, const VRegister& vn, + const VRegister& vm, int index) { + DCHECK(AreSameFormat(vd, vn, vm)); + DCHECK(vd.Is8B() || vd.Is16B()); + DCHECK((0 <= index) && (index < vd.LaneCount())); + Emit(VFormat(vd) | NEON_EXT | Rm(vm) | ImmNEONExt(index) | Rn(vn) | Rd(vd)); +} + +void Assembler::dup(const VRegister& vd, const VRegister& vn, int vn_index) { + Instr q, scalar; + + // We support vn arguments of the form vn.VxT() or vn.T(), where x is the + // number of lanes, and T is b, h, s or d. + int lane_size = vn.LaneSizeInBytes(); + NEONFormatField format; + switch (lane_size) { + case 1: + format = NEON_16B; + break; + case 2: + format = NEON_8H; + break; + case 4: + format = NEON_4S; + break; + default: + DCHECK_EQ(lane_size, 8); + format = NEON_2D; + break; + } + + if (vd.IsScalar()) { + q = NEON_Q; + scalar = NEONScalar; + } else { + DCHECK(!vd.Is1D()); + q = vd.IsD() ? 0 : NEON_Q; + scalar = 0; + } + Emit(q | scalar | NEON_DUP_ELEMENT | ImmNEON5(format, vn_index) | Rn(vn) | + Rd(vd)); +} + +void Assembler::dcptr(Label* label) { + RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE); + if (label->is_bound()) { + // The label is bound, so it does not need to be updated and the internal + // reference should be emitted. + // + // In this case, label->pos() returns the offset of the label from the + // start of the buffer. + internal_reference_positions_.push_back(pc_offset()); + dc64(reinterpret_cast(buffer_ + label->pos())); + } else { + int32_t offset; + if (label->is_linked()) { + // The label is linked, so the internal reference should be added + // onto the end of the label's link chain. + // + // In this case, label->pos() returns the offset of the last linked + // instruction from the start of the buffer. + offset = label->pos() - pc_offset(); + DCHECK(offset != kStartOfLabelLinkChain); + } else { + // The label is unused, so it now becomes linked and the internal + // reference is at the start of the new link chain. + offset = kStartOfLabelLinkChain; + } + // The instruction at pc is now the last link in the label's chain. + label->link_to(pc_offset()); + + // Traditionally the offset to the previous instruction in the chain is + // encoded in the instruction payload (e.g. branch range) but internal + // references are not instructions so while unbound they are encoded as + // two consecutive brk instructions. The two 16-bit immediates are used + // to encode the offset. + offset >>= kInstructionSizeLog2; + DCHECK(is_int32(offset)); + uint32_t high16 = unsigned_bitextract_32(31, 16, offset); + uint32_t low16 = unsigned_bitextract_32(15, 0, offset); + + brk(high16); + brk(low16); + } +} + +// Below, a difference in case for the same letter indicates a +// negated bit. If b is 1, then B is 0. +uint32_t Assembler::FPToImm8(double imm) { DCHECK(IsImmFP64(imm)); // bits: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000 // 0000.0000.0000.0000.0000.0000.0000.0000 - uint64_t bits = double_to_rawbits(imm); + uint64_t bits = bit_cast(imm); // bit7: a000.0000 uint64_t bit7 = ((bits >> 63) & 0x1) << 7; // bit6: 0b00.0000 @@ -2196,14 +3968,16 @@ Instr Assembler::ImmFP64(double imm) { // bit5_to_0: 00cd.efgh uint64_t bit5_to_0 = (bits >> 48) & 0x3f; - return static_cast((bit7 | bit6 | bit5_to_0) << ImmFP_offset); + return static_cast(bit7 | bit6 | bit5_to_0); } +Instr Assembler::ImmFP(double imm) { return FPToImm8(imm) << ImmFP_offset; } +Instr Assembler::ImmNEONFP(double imm) { + return ImmNEONabcdefgh(FPToImm8(imm)); +} // Code generation helpers. -void Assembler::MoveWide(const Register& rd, - uint64_t imm, - int shift, +void Assembler::MoveWide(const Register& rd, uint64_t imm, int shift, MoveWideImmediateOp mov_op) { // Ignore the top 32 bits of an immediate if we're moving to a W register. if (rd.Is32Bits()) { @@ -2245,13 +4019,9 @@ void Assembler::MoveWide(const Register& rd, ImmMoveWide(static_cast(imm)) | ShiftMoveWide(shift)); } - -void Assembler::AddSub(const Register& rd, - const Register& rn, - const Operand& operand, - FlagsUpdate S, - AddSubOp op) { - DCHECK(rd.SizeInBits() == rn.SizeInBits()); +void Assembler::AddSub(const Register& rd, const Register& rn, + const Operand& operand, FlagsUpdate S, AddSubOp op) { + DCHECK_EQ(rd.SizeInBits(), rn.SizeInBits()); DCHECK(!operand.NeedsRelocation(this)); if (operand.IsImmediate()) { int64_t immediate = operand.ImmediateValue(); @@ -2260,8 +4030,8 @@ void Assembler::AddSub(const Register& rd, Emit(SF(rd) | AddSubImmediateFixed | op | Flags(S) | ImmAddSub(static_cast(immediate)) | dest_reg | RnSP(rn)); } else if (operand.IsShiftedRegister()) { - DCHECK(operand.reg().SizeInBits() == rd.SizeInBits()); - DCHECK(operand.shift() != ROR); + DCHECK_EQ(operand.reg().SizeInBits(), rd.SizeInBits()); + DCHECK_NE(operand.shift(), ROR); // For instructions of the form: // add/sub wsp, , [, LSL #0-3 ] @@ -2283,39 +4053,34 @@ void Assembler::AddSub(const Register& rd, } } - -void Assembler::AddSubWithCarry(const Register& rd, - const Register& rn, - const Operand& operand, - FlagsUpdate S, +void Assembler::AddSubWithCarry(const Register& rd, const Register& rn, + const Operand& operand, FlagsUpdate S, AddSubWithCarryOp op) { - DCHECK(rd.SizeInBits() == rn.SizeInBits()); - DCHECK(rd.SizeInBits() == operand.reg().SizeInBits()); + DCHECK_EQ(rd.SizeInBits(), rn.SizeInBits()); + DCHECK_EQ(rd.SizeInBits(), operand.reg().SizeInBits()); DCHECK(operand.IsShiftedRegister() && (operand.shift_amount() == 0)); DCHECK(!operand.NeedsRelocation(this)); Emit(SF(rd) | op | Flags(S) | Rm(operand.reg()) | Rn(rn) | Rd(rd)); } - void Assembler::hlt(int code) { DCHECK(is_uint16(code)); Emit(HLT | ImmException(code)); } - void Assembler::brk(int code) { DCHECK(is_uint16(code)); Emit(BRK | ImmException(code)); } - void Assembler::EmitStringData(const char* string) { size_t len = strlen(string) + 1; - DCHECK(RoundUp(len, kInstructionSize) <= static_cast(kGap)); + DCHECK_LE(RoundUp(len, kInstructionSize), static_cast(kGap)); EmitData(string, static_cast(len)); // Pad with NULL characters until pc_ is aligned. const char pad[] = {'\0', '\0', '\0', '\0'}; - STATIC_ASSERT(sizeof(pad) == kInstructionSize); + static_assert(sizeof(pad) == kInstructionSize, + "Size of padding must match instruction size."); EmitData(pad, RoundUp(pc_offset(), kInstructionSize) - pc_offset()); } @@ -2349,7 +4114,7 @@ void Assembler::debug(const char* message, uint32_t code, Instr params) { #endif if (params & BREAK) { - hlt(kImmExceptionIsDebug); + brk(0); } } @@ -2432,33 +4197,75 @@ void Assembler::DataProcessing1Source(const Register& rd, Emit(SF(rn) | op | Rn(rn) | Rd(rd)); } - -void Assembler::FPDataProcessing1Source(const FPRegister& fd, - const FPRegister& fn, +void Assembler::FPDataProcessing1Source(const VRegister& vd, + const VRegister& vn, FPDataProcessing1SourceOp op) { - Emit(FPType(fn) | op | Rn(fn) | Rd(fd)); + Emit(FPType(vn) | op | Rn(vn) | Rd(vd)); } - -void Assembler::FPDataProcessing2Source(const FPRegister& fd, - const FPRegister& fn, - const FPRegister& fm, +void Assembler::FPDataProcessing2Source(const VRegister& fd, + const VRegister& fn, + const VRegister& fm, FPDataProcessing2SourceOp op) { DCHECK(fd.SizeInBits() == fn.SizeInBits()); DCHECK(fd.SizeInBits() == fm.SizeInBits()); Emit(FPType(fd) | op | Rm(fm) | Rn(fn) | Rd(fd)); } - -void Assembler::FPDataProcessing3Source(const FPRegister& fd, - const FPRegister& fn, - const FPRegister& fm, - const FPRegister& fa, +void Assembler::FPDataProcessing3Source(const VRegister& fd, + const VRegister& fn, + const VRegister& fm, + const VRegister& fa, FPDataProcessing3SourceOp op) { DCHECK(AreSameSizeAndType(fd, fn, fm, fa)); Emit(FPType(fd) | op | Rm(fm) | Rn(fn) | Rd(fd) | Ra(fa)); } +void Assembler::NEONModifiedImmShiftLsl(const VRegister& vd, const int imm8, + const int left_shift, + NEONModifiedImmediateOp op) { + DCHECK(vd.Is8B() || vd.Is16B() || vd.Is4H() || vd.Is8H() || vd.Is2S() || + vd.Is4S()); + DCHECK((left_shift == 0) || (left_shift == 8) || (left_shift == 16) || + (left_shift == 24)); + DCHECK(is_uint8(imm8)); + + int cmode_1, cmode_2, cmode_3; + if (vd.Is8B() || vd.Is16B()) { + DCHECK_EQ(op, NEONModifiedImmediate_MOVI); + cmode_1 = 1; + cmode_2 = 1; + cmode_3 = 1; + } else { + cmode_1 = (left_shift >> 3) & 1; + cmode_2 = left_shift >> 4; + cmode_3 = 0; + if (vd.Is4H() || vd.Is8H()) { + DCHECK((left_shift == 0) || (left_shift == 8)); + cmode_3 = 1; + } + } + int cmode = (cmode_3 << 3) | (cmode_2 << 2) | (cmode_1 << 1); + + Instr q = vd.IsQ() ? NEON_Q : 0; + + Emit(q | op | ImmNEONabcdefgh(imm8) | NEONCmode(cmode) | Rd(vd)); +} + +void Assembler::NEONModifiedImmShiftMsl(const VRegister& vd, const int imm8, + const int shift_amount, + NEONModifiedImmediateOp op) { + DCHECK(vd.Is2S() || vd.Is4S()); + DCHECK((shift_amount == 8) || (shift_amount == 16)); + DCHECK(is_uint8(imm8)); + + int cmode_0 = (shift_amount >> 4) & 1; + int cmode = 0xc | cmode_0; + + Instr q = vd.IsQ() ? NEON_Q : 0; + + Emit(q | op | ImmNEONabcdefgh(imm8) | NEONCmode(cmode) | Rd(vd)); +} void Assembler::EmitShift(const Register& rd, const Register& rn, @@ -2558,7 +4365,7 @@ void Assembler::LoadStore(const CPURegister& rt, Instr memop = op | Rt(rt) | RnSP(addr.base()); if (addr.IsImmediateOffset()) { - LSDataSize size = CalcLSDataSize(op); + unsigned size = CalcLSDataSize(op); if (IsImmLSScaled(addr.offset(), size)) { int offset = static_cast(addr.offset()); // Use the scaled addressing mode. @@ -2611,14 +4418,12 @@ bool Assembler::IsImmLSUnscaled(int64_t offset) { return is_int9(offset); } - -bool Assembler::IsImmLSScaled(int64_t offset, LSDataSize size) { +bool Assembler::IsImmLSScaled(int64_t offset, unsigned size) { bool offset_is_size_multiple = (((offset >> size) << size) == offset); return offset_is_size_multiple && is_uint12(offset >> size); } - -bool Assembler::IsImmLSPair(int64_t offset, LSDataSize size) { +bool Assembler::IsImmLSPair(int64_t offset, unsigned size) { bool offset_is_size_multiple = (((offset >> size) << size) == offset); return offset_is_size_multiple && is_int7(offset >> size); } @@ -2628,6 +4433,8 @@ bool Assembler::IsImmLLiteral(int64_t offset) { int inst_size = static_cast(kInstructionSizeLog2); bool offset_is_inst_multiple = (((offset >> inst_size) << inst_size) == offset); + DCHECK_GT(offset, 0); + offset >>= kLoadLiteralScaleLog2; return offset_is_inst_multiple && is_intn(offset, ImmLLiteral_width); } @@ -2759,7 +4566,7 @@ bool Assembler::IsImmLogical(uint64_t value, } // If the repeat period d is not a power of two, it can't be encoded. - if (!IS_POWER_OF_TWO(d)) { + if (!base::bits::IsPowerOfTwo(d)) { return false; } @@ -2849,7 +4656,7 @@ bool Assembler::IsImmConditionalCompare(int64_t immediate) { bool Assembler::IsImmFP32(float imm) { // Valid values will have the form: // aBbb.bbbc.defg.h000.0000.0000.0000.0000 - uint32_t bits = float_to_rawbits(imm); + uint32_t bits = bit_cast(imm); // bits[19..0] are cleared. if ((bits & 0x7ffff) != 0) { return false; @@ -2874,7 +4681,7 @@ bool Assembler::IsImmFP64(double imm) { // Valid values will have the form: // aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000 // 0000.0000.0000.0000.0000.0000.0000.0000 - uint64_t bits = double_to_rawbits(imm); + uint64_t bits = bit_cast(imm); // bits[47..0] are cleared. if ((bits & 0xffffffffffffL) != 0) { return false; @@ -2908,9 +4715,7 @@ void Assembler::GrowBuffer() { // Some internal data structures overflow for very large buffers, // they must ensure that kMaximalBufferSize is not too large. - if (desc.buffer_size > kMaximalBufferSize || - static_cast(desc.buffer_size) > - isolate_data().max_old_generation_size_) { + if (desc.buffer_size > kMaximalBufferSize) { V8::FatalProcessOutOfMemory("Assembler::GrowBuffer"); } @@ -2957,6 +4762,8 @@ void Assembler::GrowBuffer() { void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { // We do not try to reuse pool constants. RelocInfo rinfo(reinterpret_cast(pc_), rmode, data, NULL); + bool write_reloc_info = true; + if (((rmode >= RelocInfo::COMMENT) && (rmode <= RelocInfo::DEBUG_BREAK_SLOT_AT_TAIL_CALL)) || (rmode == RelocInfo::INTERNAL_REFERENCE) || @@ -2972,27 +4779,20 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { RelocInfo::IsConstPool(rmode) || RelocInfo::IsVeneerPool(rmode)); // These modes do not need an entry in the constant pool. } else { - constpool_.RecordEntry(data, rmode); + write_reloc_info = constpool_.RecordEntry(data, rmode); // Make sure the constant pool is not emitted in place of the next // instruction for which we just recorded relocation info. BlockConstPoolFor(1); } - if (!RelocInfo::IsNone(rmode)) { + if (!RelocInfo::IsNone(rmode) && write_reloc_info) { // Don't record external references unless the heap will be serialized. if (rmode == RelocInfo::EXTERNAL_REFERENCE && !serializer_enabled() && !emit_debug_code()) { return; } DCHECK(buffer_space() >= kMaxRelocSize); // too late to grow buffer here - if (rmode == RelocInfo::CODE_TARGET_WITH_ID) { - RelocInfo reloc_info_with_ast_id(reinterpret_cast(pc_), rmode, - RecordedAstId().ToInt(), NULL); - ClearRecordedAstId(); - reloc_info_writer.Write(&reloc_info_with_ast_id); - } else { - reloc_info_writer.Write(&rinfo); - } + reloc_info_writer.Write(&rinfo); } } diff --git a/deps/v8/src/arm64/assembler-arm64.h b/deps/v8/src/arm64/assembler-arm64.h index e4ca410abd3eab..cc9315458dfc24 100644 --- a/deps/v8/src/arm64/assembler-arm64.h +++ b/deps/v8/src/arm64/assembler-arm64.h @@ -13,6 +13,7 @@ #include "src/arm64/constants-arm64.h" #include "src/arm64/instructions-arm64.h" #include "src/assembler.h" +#include "src/base/optional.h" #include "src/globals.h" #include "src/utils.h" @@ -55,7 +56,9 @@ namespace internal { #define SIMD128_REGISTERS(V) \ V(q0) V(q1) V(q2) V(q3) V(q4) V(q5) V(q6) V(q7) \ - V(q8) V(q9) V(q10) V(q11) V(q12) V(q13) V(q14) V(q15) + V(q8) V(q9) V(q10) V(q11) V(q12) V(q13) V(q14) V(q15) \ + V(q16) V(q17) V(q18) V(q19) V(q20) V(q21) V(q22) V(q23) \ + V(q24) V(q25) V(q26) V(q27) V(q28) V(q29) V(q30) V(q31) #define ALLOCATABLE_DOUBLE_REGISTERS(R) \ R(d0) R(d1) R(d2) R(d3) R(d4) R(d5) R(d6) R(d7) \ @@ -67,11 +70,10 @@ namespace internal { constexpr int kRegListSizeInBits = sizeof(RegList) * kBitsPerByte; static const int kNoCodeAgeSequenceLength = 5 * kInstructionSize; -// Some CPURegister methods can return Register and FPRegister types, so we +// Some CPURegister methods can return Register and VRegister types, so we // need to declare them in advance. struct Register; -struct FPRegister; - +struct VRegister; struct CPURegister { enum Code { @@ -87,17 +89,22 @@ struct CPURegister { // which are always zero-initialized before any constructors are called. kInvalid = 0, kRegister, - kFPRegister, + kVRegister, kNoRegister }; constexpr CPURegister() : CPURegister(0, 0, CPURegister::kNoRegister) {} - constexpr CPURegister(int reg_code, int reg_size, RegisterType reg_type) - : reg_code(reg_code), reg_size(reg_size), reg_type(reg_type) {} + constexpr CPURegister(int reg_code, int reg_size, RegisterType reg_type, + int lane_count = 1) + : reg_code(reg_code), + reg_size(reg_size), + reg_type(reg_type), + lane_count(lane_count) {} - static CPURegister Create(int code, int size, RegisterType type) { - CPURegister r = {code, size, type}; + static CPURegister Create(int reg_code, int reg_size, RegisterType reg_type, + int lane_count = 1) { + CPURegister r = {reg_code, reg_size, reg_type, lane_count}; return r; } @@ -106,12 +113,15 @@ struct CPURegister { RegList Bit() const; int SizeInBits() const; int SizeInBytes() const; + bool Is8Bits() const; + bool Is16Bits() const; bool Is32Bits() const; bool Is64Bits() const; + bool Is128Bits() const; bool IsValid() const; bool IsValidOrNone() const; bool IsValidRegister() const; - bool IsValidFPRegister() const; + bool IsValidVRegister() const; bool IsNone() const; bool Is(const CPURegister& other) const; bool Aliases(const CPURegister& other) const; @@ -120,12 +130,34 @@ struct CPURegister { bool IsSP() const; bool IsRegister() const; - bool IsFPRegister() const; + bool IsVRegister() const; + + bool IsFPRegister() const { return IsS() || IsD(); } + + bool IsW() const { return IsValidRegister() && Is32Bits(); } + bool IsX() const { return IsValidRegister() && Is64Bits(); } + + // These assertions ensure that the size and type of the register are as + // described. They do not consider the number of lanes that make up a vector. + // So, for example, Is8B() implies IsD(), and Is1D() implies IsD, but IsD() + // does not imply Is1D() or Is8B(). + // Check the number of lanes, ie. the format of the vector, using methods such + // as Is8B(), Is1D(), etc. in the VRegister class. + bool IsV() const { return IsVRegister(); } + bool IsB() const { return IsV() && Is8Bits(); } + bool IsH() const { return IsV() && Is16Bits(); } + bool IsS() const { return IsV() && Is32Bits(); } + bool IsD() const { return IsV() && Is64Bits(); } + bool IsQ() const { return IsV() && Is128Bits(); } Register X() const; Register W() const; - FPRegister D() const; - FPRegister S() const; + VRegister V() const; + VRegister B() const; + VRegister H() const; + VRegister D() const; + VRegister S() const; + VRegister Q() const; bool IsSameSizeAndType(const CPURegister& other) const; @@ -136,6 +168,7 @@ struct CPURegister { int reg_code; int reg_size; RegisterType reg_type; + int lane_count; }; @@ -190,7 +223,7 @@ struct Register : public CPURegister { constexpr bool kSimpleFPAliasing = true; constexpr bool kSimdMaskRegisters = false; -struct FPRegister : public CPURegister { +struct VRegister : public CPURegister { enum Code { #define REGISTER_CODE(R) kCode_##R, DOUBLE_REGISTERS(REGISTER_CODE) @@ -199,41 +232,123 @@ struct FPRegister : public CPURegister { kCode_no_reg = -1 }; - static FPRegister Create(int code, int size) { - return FPRegister( - CPURegister::Create(code, size, CPURegister::kFPRegister)); + static VRegister Create(int reg_code, int reg_size, int lane_count = 1) { + DCHECK(base::bits::IsPowerOfTwo(lane_count) && (lane_count <= 16)); + VRegister v(CPURegister::Create(reg_code, reg_size, CPURegister::kVRegister, + lane_count)); + DCHECK(v.IsValidVRegister()); + return v; + } + + static VRegister Create(int reg_code, VectorFormat format) { + int reg_size = RegisterSizeInBitsFromFormat(format); + int reg_count = IsVectorFormat(format) ? LaneCountFromFormat(format) : 1; + return VRegister::Create(reg_code, reg_size, reg_count); } - constexpr FPRegister() : CPURegister() {} + constexpr VRegister() : CPURegister() {} - constexpr explicit FPRegister(const CPURegister& r) : CPURegister(r) {} + constexpr explicit VRegister(const CPURegister& r) : CPURegister(r) {} bool IsValid() const { - DCHECK(IsFPRegister() || IsNone()); - return IsValidFPRegister(); + DCHECK(IsVRegister() || IsNone()); + return IsValidVRegister(); + } + + static VRegister BRegFromCode(unsigned code); + static VRegister HRegFromCode(unsigned code); + static VRegister SRegFromCode(unsigned code); + static VRegister DRegFromCode(unsigned code); + static VRegister QRegFromCode(unsigned code); + static VRegister VRegFromCode(unsigned code); + + VRegister V8B() const { + return VRegister::Create(code(), kDRegSizeInBits, 8); + } + VRegister V16B() const { + return VRegister::Create(code(), kQRegSizeInBits, 16); + } + VRegister V4H() const { + return VRegister::Create(code(), kDRegSizeInBits, 4); + } + VRegister V8H() const { + return VRegister::Create(code(), kQRegSizeInBits, 8); + } + VRegister V2S() const { + return VRegister::Create(code(), kDRegSizeInBits, 2); + } + VRegister V4S() const { + return VRegister::Create(code(), kQRegSizeInBits, 4); + } + VRegister V2D() const { + return VRegister::Create(code(), kQRegSizeInBits, 2); + } + VRegister V1D() const { + return VRegister::Create(code(), kDRegSizeInBits, 1); + } + + bool Is8B() const { return (Is64Bits() && (lane_count == 8)); } + bool Is16B() const { return (Is128Bits() && (lane_count == 16)); } + bool Is4H() const { return (Is64Bits() && (lane_count == 4)); } + bool Is8H() const { return (Is128Bits() && (lane_count == 8)); } + bool Is2S() const { return (Is64Bits() && (lane_count == 2)); } + bool Is4S() const { return (Is128Bits() && (lane_count == 4)); } + bool Is1D() const { return (Is64Bits() && (lane_count == 1)); } + bool Is2D() const { return (Is128Bits() && (lane_count == 2)); } + + // For consistency, we assert the number of lanes of these scalar registers, + // even though there are no vectors of equivalent total size with which they + // could alias. + bool Is1B() const { + DCHECK(!(Is8Bits() && IsVector())); + return Is8Bits(); + } + bool Is1H() const { + DCHECK(!(Is16Bits() && IsVector())); + return Is16Bits(); + } + bool Is1S() const { + DCHECK(!(Is32Bits() && IsVector())); + return Is32Bits(); + } + + bool IsLaneSizeB() const { return LaneSizeInBits() == kBRegSizeInBits; } + bool IsLaneSizeH() const { return LaneSizeInBits() == kHRegSizeInBits; } + bool IsLaneSizeS() const { return LaneSizeInBits() == kSRegSizeInBits; } + bool IsLaneSizeD() const { return LaneSizeInBits() == kDRegSizeInBits; } + + bool IsScalar() const { return lane_count == 1; } + bool IsVector() const { return lane_count > 1; } + + bool IsSameFormat(const VRegister& other) const { + return (reg_size == other.reg_size) && (lane_count == other.lane_count); } - static FPRegister SRegFromCode(unsigned code); - static FPRegister DRegFromCode(unsigned code); + int LaneCount() const { return lane_count; } + + unsigned LaneSizeInBytes() const { return SizeInBytes() / lane_count; } + + unsigned LaneSizeInBits() const { return LaneSizeInBytes() * 8; } // Start of V8 compatibility section --------------------- - static constexpr int kMaxNumRegisters = kNumberOfFPRegisters; + static constexpr int kMaxNumRegisters = kNumberOfVRegisters; STATIC_ASSERT(kMaxNumRegisters == Code::kAfterLast); - // Crankshaft can use all the FP registers except: + // Crankshaft can use all the V registers except: // - d15 which is used to keep the 0 double value // - d30 which is used in crankshaft as a double scratch register // - d31 which is used in the MacroAssembler as a double scratch register - static FPRegister from_code(int code) { + static VRegister from_code(int code) { // Always return a D register. - return FPRegister::Create(code, kDRegSizeInBits); + return VRegister::Create(code, kDRegSizeInBits); } // End of V8 compatibility section ----------------------- }; - -STATIC_ASSERT(sizeof(CPURegister) == sizeof(Register)); -STATIC_ASSERT(sizeof(CPURegister) == sizeof(FPRegister)); +static_assert(sizeof(CPURegister) == sizeof(Register), + "CPURegister must be same size as Register"); +static_assert(sizeof(CPURegister) == sizeof(VRegister), + "CPURegister must be same size as VRegister"); #define DEFINE_REGISTER(register_class, name, code, size, type) \ constexpr register_class name { CPURegister(code, size, type) } @@ -241,10 +356,10 @@ STATIC_ASSERT(sizeof(CPURegister) == sizeof(FPRegister)); constexpr register_class alias = name // No*Reg is used to indicate an unused argument, or an error case. Note that -// these all compare equal (using the Is() method). The Register and FPRegister +// these all compare equal (using the Is() method). The Register and VRegister // variants are provided for convenience. DEFINE_REGISTER(Register, NoReg, 0, 0, CPURegister::kNoRegister); -DEFINE_REGISTER(FPRegister, NoFPReg, 0, 0, CPURegister::kNoRegister); +DEFINE_REGISTER(VRegister, NoVReg, 0, 0, CPURegister::kNoRegister); DEFINE_REGISTER(CPURegister, NoCPUReg, 0, 0, CPURegister::kNoRegister); // v8 compatibility. @@ -261,17 +376,25 @@ DEFINE_REGISTER(Register, wcsp, kSPRegInternalCode, kWRegSizeInBits, DEFINE_REGISTER(Register, csp, kSPRegInternalCode, kXRegSizeInBits, CPURegister::kRegister); -#define DEFINE_FPREGISTERS(N) \ - DEFINE_REGISTER(FPRegister, s##N, N, kSRegSizeInBits, \ - CPURegister::kFPRegister); \ - DEFINE_REGISTER(FPRegister, d##N, N, kDRegSizeInBits, \ - CPURegister::kFPRegister); -GENERAL_REGISTER_CODE_LIST(DEFINE_FPREGISTERS) -#undef DEFINE_FPREGISTERS +#define DEFINE_VREGISTERS(N) \ + DEFINE_REGISTER(VRegister, b##N, N, kBRegSizeInBits, \ + CPURegister::kVRegister); \ + DEFINE_REGISTER(VRegister, h##N, N, kHRegSizeInBits, \ + CPURegister::kVRegister); \ + DEFINE_REGISTER(VRegister, s##N, N, kSRegSizeInBits, \ + CPURegister::kVRegister); \ + DEFINE_REGISTER(VRegister, d##N, N, kDRegSizeInBits, \ + CPURegister::kVRegister); \ + DEFINE_REGISTER(VRegister, q##N, N, kQRegSizeInBits, \ + CPURegister::kVRegister); \ + DEFINE_REGISTER(VRegister, v##N, N, kQRegSizeInBits, CPURegister::kVRegister); +GENERAL_REGISTER_CODE_LIST(DEFINE_VREGISTERS) +#undef DEFINE_VREGISTERS #undef DEFINE_REGISTER // Registers aliases. +ALIAS_REGISTER(VRegister, v8_, v8); // Avoid conflicts with namespace v8. ALIAS_REGISTER(Register, ip0, x16); ALIAS_REGISTER(Register, ip1, x17); ALIAS_REGISTER(Register, wip0, w16); @@ -294,13 +417,17 @@ ALIAS_REGISTER(Register, xzr, x31); ALIAS_REGISTER(Register, wzr, w31); // Keeps the 0 double value. -ALIAS_REGISTER(FPRegister, fp_zero, d15); +ALIAS_REGISTER(VRegister, fp_zero, d15); +// MacroAssembler fixed V Registers. +ALIAS_REGISTER(VRegister, fp_fixed1, d27); +ALIAS_REGISTER(VRegister, fp_fixed2, d28); +ALIAS_REGISTER(VRegister, fp_fixed3, d29); // same as Crankshaft scratch. // Crankshaft double scratch register. -ALIAS_REGISTER(FPRegister, crankshaft_fp_scratch, d29); -// MacroAssembler double scratch registers. -ALIAS_REGISTER(FPRegister, fp_scratch, d30); -ALIAS_REGISTER(FPRegister, fp_scratch1, d30); -ALIAS_REGISTER(FPRegister, fp_scratch2, d31); +ALIAS_REGISTER(VRegister, crankshaft_fp_scratch, d29); +// MacroAssembler scratch V registers. +ALIAS_REGISTER(VRegister, fp_scratch, d30); +ALIAS_REGISTER(VRegister, fp_scratch1, d30); +ALIAS_REGISTER(VRegister, fp_scratch2, d31); #undef ALIAS_REGISTER @@ -335,11 +462,24 @@ bool AreSameSizeAndType(const CPURegister& reg1, const CPURegister& reg7 = NoCPUReg, const CPURegister& reg8 = NoCPUReg); -typedef FPRegister FloatRegister; -typedef FPRegister DoubleRegister; - -// TODO(arm64) Define SIMD registers. -typedef FPRegister Simd128Register; +// AreSameFormat returns true if all of the specified VRegisters have the same +// vector format. Arguments set to NoVReg are ignored, as are any subsequent +// arguments. At least one argument (reg1) must be valid (not NoVReg). +bool AreSameFormat(const VRegister& reg1, const VRegister& reg2, + const VRegister& reg3 = NoVReg, + const VRegister& reg4 = NoVReg); + +// AreConsecutive returns true if all of the specified VRegisters are +// consecutive in the register file. Arguments may be set to NoVReg, and if so, +// subsequent arguments must also be NoVReg. At least one argument (reg1) must +// be valid (not NoVReg). +bool AreConsecutive(const VRegister& reg1, const VRegister& reg2, + const VRegister& reg3 = NoVReg, + const VRegister& reg4 = NoVReg); + +typedef VRegister FloatRegister; +typedef VRegister DoubleRegister; +typedef VRegister Simd128Register; // ----------------------------------------------------------------------------- // Lists of registers. @@ -363,10 +503,10 @@ class CPURegList { CPURegList(CPURegister::RegisterType type, int size, int first_reg, int last_reg) : size_(size), type_(type) { - DCHECK(((type == CPURegister::kRegister) && - (last_reg < kNumberOfRegisters)) || - ((type == CPURegister::kFPRegister) && - (last_reg < kNumberOfFPRegisters))); + DCHECK( + ((type == CPURegister::kRegister) && (last_reg < kNumberOfRegisters)) || + ((type == CPURegister::kVRegister) && + (last_reg < kNumberOfVRegisters))); DCHECK(last_reg >= first_reg); list_ = (1UL << (last_reg + 1)) - 1; list_ &= ~((1UL << first_reg) - 1); @@ -419,11 +559,13 @@ class CPURegList { // AAPCS64 callee-saved registers. static CPURegList GetCalleeSaved(int size = kXRegSizeInBits); - static CPURegList GetCalleeSavedFP(int size = kDRegSizeInBits); + static CPURegList GetCalleeSavedV(int size = kDRegSizeInBits); // AAPCS64 caller-saved registers. Note that this includes lr. + // TODO(all): Determine how we handle d8-d15 being callee-saved, but the top + // 64-bits being caller-saved. static CPURegList GetCallerSaved(int size = kXRegSizeInBits); - static CPURegList GetCallerSavedFP(int size = kDRegSizeInBits); + static CPURegList GetCallerSavedV(int size = kDRegSizeInBits); // Registers saved as safepoints. static CPURegList GetSafepointSavedRegisters(); @@ -474,17 +616,16 @@ class CPURegList { bool IsValid() const { const RegList kValidRegisters = 0x8000000ffffffff; - const RegList kValidFPRegisters = 0x0000000ffffffff; + const RegList kValidVRegisters = 0x0000000ffffffff; switch (type_) { case CPURegister::kRegister: return (list_ & kValidRegisters) == list_; - case CPURegister::kFPRegister: - return (list_ & kValidFPRegisters) == list_; + case CPURegister::kVRegister: + return (list_ & kValidVRegisters) == list_; case CPURegister::kNoRegister: return list_ == 0; default: UNREACHABLE(); - return false; } } }; @@ -492,12 +633,11 @@ class CPURegList { // AAPCS64 callee-saved registers. #define kCalleeSaved CPURegList::GetCalleeSaved() -#define kCalleeSavedFP CPURegList::GetCalleeSavedFP() - +#define kCalleeSavedV CPURegList::GetCalleeSavedV() // AAPCS64 caller-saved registers. Note that this includes lr. #define kCallerSaved CPURegList::GetCallerSaved() -#define kCallerSavedFP CPURegList::GetCallerSavedFP() +#define kCallerSavedV CPURegList::GetCallerSavedV() // ----------------------------------------------------------------------------- // Immediates. @@ -518,7 +658,7 @@ class Immediate { RelocInfo::Mode rmode() const { return rmode_; } private: - void InitializeHandle(Handle value); + void InitializeHandle(Handle value); int64_t value_; RelocInfo::Mode rmode_; @@ -551,6 +691,13 @@ class Operand { Extend extend, unsigned shift_amount = 0); + static Operand EmbeddedNumber(double number); // Smi or HeapNumber. + static Operand EmbeddedCode(CodeStub* stub); + + inline bool IsHeapObjectRequest() const; + inline HeapObjectRequest heap_object_request() const; + inline Immediate immediate_for_heap_object_request() const; + template inline explicit Operand(Handle handle); @@ -586,6 +733,7 @@ class Operand { inline static Operand UntagSmiAndScale(Register smi, int scale); private: + base::Optional heap_object_request_; Immediate immediate_; Register reg_; Shift shift_; @@ -652,17 +800,11 @@ class MemOperand { class ConstPool { public: - explicit ConstPool(Assembler* assm) - : assm_(assm), - first_use_(-1), - shared_entries_count(0) {} - void RecordEntry(intptr_t data, RelocInfo::Mode mode); - int EntryCount() const { - return shared_entries_count + static_cast(unique_entries_.size()); - } - bool IsEmpty() const { - return shared_entries_.empty() && unique_entries_.empty(); - } + explicit ConstPool(Assembler* assm) : assm_(assm), first_use_(-1) {} + // Returns true when we need to write RelocInfo and false when we do not. + bool RecordEntry(intptr_t data, RelocInfo::Mode mode); + int EntryCount() const { return static_cast(entries_.size()); } + bool IsEmpty() const { return entries_.empty(); } // Distance in bytes between the current pc and the first instruction // using the pool. If there are no pending entries return kMaxInt. int DistanceToFirstUse(); @@ -686,16 +828,29 @@ class ConstPool { void EmitGuard(); void EmitEntries(); + typedef std::map SharedEntryMap; + // Adds a shared entry to entries_, using 'entry_map' to determine whether we + // already track this entry. Returns true if this is the first time we add + // this entry, false otherwise. + bool AddSharedEntry(SharedEntryMap& entry_map, uint64_t data, int offset); + Assembler* assm_; // Keep track of the first instruction requiring a constant pool entry // since the previous constant pool was emitted. int first_use_; - // values, pc offset(s) of entries which can be shared. - std::multimap shared_entries_; - // Number of distinct literal in shared entries. - int shared_entries_count; - // values, pc offset of entries which cannot be shared. - std::vector > unique_entries_; + + // Map of data to index in entries_ for shared entries. + SharedEntryMap shared_entries_; + + // Map of address of handle to index in entries_. We need to keep track of + // code targets separately from other shared entries, as they can be + // relocated. + SharedEntryMap handle_to_index_map_; + + // Values, pc offset(s) of entries. Use a vector to preserve the order of + // insertion, as the serializer expects code target RelocInfo to point to + // constant pool addresses in an ascending order. + std::vector > > entries_; }; @@ -741,7 +896,7 @@ class Assembler : public AssemblerBase { // // The descriptor (desc) can be NULL. In that case, the code is finalized as // usual, but the descriptor is not populated. - void GetCode(CodeDesc* desc); + void GetCode(Isolate* isolate, CodeDesc* desc); // Insert the smallest number of nop instructions // possible to align the pc offset to a multiple @@ -857,7 +1012,7 @@ class Assembler : public AssemblerBase { // Prevent contant pool emission until EndBlockConstPool is called. // Call to this function can be nested but must be followed by an equal - // number of call to EndBlockConstpool. + // number of calls to EndBlockConstpool. void StartBlockConstPool(); // Resume constant pool emission. Need to be called as many time as @@ -872,7 +1027,7 @@ class Assembler : public AssemblerBase { // Prevent veneer pool emission until EndBlockVeneerPool is called. // Call to this function can be nested but must be followed by an equal - // number of call to EndBlockConstpool. + // number of calls to EndBlockConstpool. void StartBlockVeneerPool(); // Resume constant pool emission. Need to be called as many time as @@ -925,7 +1080,6 @@ class Assembler : public AssemblerBase { // the marker and branch over the data. void RecordConstPool(int size); - // Instruction set functions ------------------------------------------------ // Branch / Jump instructions. @@ -1064,9 +1218,101 @@ class Assembler : public AssemblerBase { const Register& rn, const Operand& operand); + // Bitwise and. + void and_(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Bit clear immediate. + void bic(const VRegister& vd, const int imm8, const int left_shift = 0); + + // Bit clear. + void bic(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Bitwise insert if false. + void bif(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Bitwise insert if true. + void bit(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Bitwise select. + void bsl(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Polynomial multiply. + void pmul(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Vector move immediate. + void movi(const VRegister& vd, const uint64_t imm, Shift shift = LSL, + const int shift_amount = 0); + + // Bitwise not. + void mvn(const VRegister& vd, const VRegister& vn); + + // Vector move inverted immediate. + void mvni(const VRegister& vd, const int imm8, Shift shift = LSL, + const int shift_amount = 0); + + // Signed saturating accumulate of unsigned value. + void suqadd(const VRegister& vd, const VRegister& vn); + + // Unsigned saturating accumulate of signed value. + void usqadd(const VRegister& vd, const VRegister& vn); + + // Absolute value. + void abs(const VRegister& vd, const VRegister& vn); + + // Signed saturating absolute value. + void sqabs(const VRegister& vd, const VRegister& vn); + + // Negate. + void neg(const VRegister& vd, const VRegister& vn); + + // Signed saturating negate. + void sqneg(const VRegister& vd, const VRegister& vn); + + // Bitwise not. + void not_(const VRegister& vd, const VRegister& vn); + + // Extract narrow. + void xtn(const VRegister& vd, const VRegister& vn); + + // Extract narrow (second part). + void xtn2(const VRegister& vd, const VRegister& vn); + + // Signed saturating extract narrow. + void sqxtn(const VRegister& vd, const VRegister& vn); + + // Signed saturating extract narrow (second part). + void sqxtn2(const VRegister& vd, const VRegister& vn); + + // Unsigned saturating extract narrow. + void uqxtn(const VRegister& vd, const VRegister& vn); + + // Unsigned saturating extract narrow (second part). + void uqxtn2(const VRegister& vd, const VRegister& vn); + + // Signed saturating extract unsigned narrow. + void sqxtun(const VRegister& vd, const VRegister& vn); + + // Signed saturating extract unsigned narrow (second part). + void sqxtun2(const VRegister& vd, const VRegister& vn); + + // Move register to register. + void mov(const VRegister& vd, const VRegister& vn); + + // Bitwise not or. + void orn(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Bitwise exclusive or. + void eor(const VRegister& vd, const VRegister& vn, const VRegister& vm); + // Bitwise or (A | B). void orr(const Register& rd, const Register& rn, const Operand& operand); + // Bitwise or. + void orr(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Bitwise or immediate. + void orr(const VRegister& vd, const int imm8, const int left_shift = 0); + // Bitwise nor (A | ~B). void orn(const Register& rd, const Register& rn, const Operand& operand); @@ -1361,6 +1607,7 @@ class Assembler : public AssemblerBase { // Load literal to register. void ldr(const CPURegister& rt, const Immediate& imm); + void ldr(const CPURegister& rt, const Operand& operand); // Load-acquire word. void ldar(const Register& rt, const Register& rn); @@ -1473,147 +1720,1080 @@ class Assembler : public AssemblerBase { mov(Register::XRegFromCode(n), Register::XRegFromCode(n)); } + // Add. + void add(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unsigned halving add. + void uhadd(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Subtract. + void sub(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed halving add. + void shadd(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Multiply by scalar element. + void mul(const VRegister& vd, const VRegister& vn, const VRegister& vm, + int vm_index); + + // Multiply-add by scalar element. + void mla(const VRegister& vd, const VRegister& vn, const VRegister& vm, + int vm_index); + + // Multiply-subtract by scalar element. + void mls(const VRegister& vd, const VRegister& vn, const VRegister& vm, + int vm_index); + + // Signed long multiply-add by scalar element. + void smlal(const VRegister& vd, const VRegister& vn, const VRegister& vm, + int vm_index); + + // Signed long multiply-add by scalar element (second part). + void smlal2(const VRegister& vd, const VRegister& vn, const VRegister& vm, + int vm_index); + + // Unsigned long multiply-add by scalar element. + void umlal(const VRegister& vd, const VRegister& vn, const VRegister& vm, + int vm_index); + + // Unsigned long multiply-add by scalar element (second part). + void umlal2(const VRegister& vd, const VRegister& vn, const VRegister& vm, + int vm_index); + + // Signed long multiply-sub by scalar element. + void smlsl(const VRegister& vd, const VRegister& vn, const VRegister& vm, + int vm_index); + + // Signed long multiply-sub by scalar element (second part). + void smlsl2(const VRegister& vd, const VRegister& vn, const VRegister& vm, + int vm_index); + + // Unsigned long multiply-sub by scalar element. + void umlsl(const VRegister& vd, const VRegister& vn, const VRegister& vm, + int vm_index); + + // Unsigned long multiply-sub by scalar element (second part). + void umlsl2(const VRegister& vd, const VRegister& vn, const VRegister& vm, + int vm_index); + + // Signed long multiply by scalar element. + void smull(const VRegister& vd, const VRegister& vn, const VRegister& vm, + int vm_index); + + // Signed long multiply by scalar element (second part). + void smull2(const VRegister& vd, const VRegister& vn, const VRegister& vm, + int vm_index); + + // Unsigned long multiply by scalar element. + void umull(const VRegister& vd, const VRegister& vn, const VRegister& vm, + int vm_index); + + // Unsigned long multiply by scalar element (second part). + void umull2(const VRegister& vd, const VRegister& vn, const VRegister& vm, + int vm_index); + + // Add narrow returning high half. + void addhn(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Add narrow returning high half (second part). + void addhn2(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed saturating double long multiply by element. + void sqdmull(const VRegister& vd, const VRegister& vn, const VRegister& vm, + int vm_index); + + // Signed saturating double long multiply by element (second part). + void sqdmull2(const VRegister& vd, const VRegister& vn, const VRegister& vm, + int vm_index); + + // Signed saturating doubling long multiply-add by element. + void sqdmlal(const VRegister& vd, const VRegister& vn, const VRegister& vm, + int vm_index); + + // Signed saturating doubling long multiply-add by element (second part). + void sqdmlal2(const VRegister& vd, const VRegister& vn, const VRegister& vm, + int vm_index); + + // Signed saturating doubling long multiply-sub by element. + void sqdmlsl(const VRegister& vd, const VRegister& vn, const VRegister& vm, + int vm_index); + + // Signed saturating doubling long multiply-sub by element (second part). + void sqdmlsl2(const VRegister& vd, const VRegister& vn, const VRegister& vm, + int vm_index); + + // Compare bitwise to zero. + void cmeq(const VRegister& vd, const VRegister& vn, int value); + + // Compare signed greater than or equal to zero. + void cmge(const VRegister& vd, const VRegister& vn, int value); + + // Compare signed greater than zero. + void cmgt(const VRegister& vd, const VRegister& vn, int value); + + // Compare signed less than or equal to zero. + void cmle(const VRegister& vd, const VRegister& vn, int value); + + // Compare signed less than zero. + void cmlt(const VRegister& vd, const VRegister& vn, int value); + + // Unsigned rounding halving add. + void urhadd(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Compare equal. + void cmeq(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Compare signed greater than or equal. + void cmge(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Compare signed greater than. + void cmgt(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Compare unsigned higher. + void cmhi(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Compare unsigned higher or same. + void cmhs(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Compare bitwise test bits nonzero. + void cmtst(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed shift left by register. + void sshl(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unsigned shift left by register. + void ushl(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed saturating doubling long multiply-subtract. + void sqdmlsl(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed saturating doubling long multiply-subtract (second part). + void sqdmlsl2(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed saturating doubling long multiply. + void sqdmull(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed saturating doubling long multiply (second part). + void sqdmull2(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed saturating doubling multiply returning high half. + void sqdmulh(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed saturating rounding doubling multiply returning high half. + void sqrdmulh(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed saturating doubling multiply element returning high half. + void sqdmulh(const VRegister& vd, const VRegister& vn, const VRegister& vm, + int vm_index); + + // Signed saturating rounding doubling multiply element returning high half. + void sqrdmulh(const VRegister& vd, const VRegister& vn, const VRegister& vm, + int vm_index); + + // Unsigned long multiply long. + void umull(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unsigned long multiply (second part). + void umull2(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Rounding add narrow returning high half. + void raddhn(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Subtract narrow returning high half. + void subhn(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Subtract narrow returning high half (second part). + void subhn2(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Rounding add narrow returning high half (second part). + void raddhn2(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Rounding subtract narrow returning high half. + void rsubhn(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Rounding subtract narrow returning high half (second part). + void rsubhn2(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed saturating shift left by register. + void sqshl(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unsigned saturating shift left by register. + void uqshl(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed rounding shift left by register. + void srshl(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unsigned rounding shift left by register. + void urshl(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed saturating rounding shift left by register. + void sqrshl(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unsigned saturating rounding shift left by register. + void uqrshl(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed absolute difference. + void sabd(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unsigned absolute difference and accumulate. + void uaba(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Shift left by immediate and insert. + void sli(const VRegister& vd, const VRegister& vn, int shift); + + // Shift right by immediate and insert. + void sri(const VRegister& vd, const VRegister& vn, int shift); + + // Signed maximum. + void smax(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed pairwise maximum. + void smaxp(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Add across vector. + void addv(const VRegister& vd, const VRegister& vn); + + // Signed add long across vector. + void saddlv(const VRegister& vd, const VRegister& vn); + + // Unsigned add long across vector. + void uaddlv(const VRegister& vd, const VRegister& vn); + + // FP maximum number across vector. + void fmaxnmv(const VRegister& vd, const VRegister& vn); + + // FP maximum across vector. + void fmaxv(const VRegister& vd, const VRegister& vn); + + // FP minimum number across vector. + void fminnmv(const VRegister& vd, const VRegister& vn); + + // FP minimum across vector. + void fminv(const VRegister& vd, const VRegister& vn); + + // Signed maximum across vector. + void smaxv(const VRegister& vd, const VRegister& vn); + + // Signed minimum. + void smin(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed minimum pairwise. + void sminp(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed minimum across vector. + void sminv(const VRegister& vd, const VRegister& vn); + + // One-element structure store from one register. + void st1(const VRegister& vt, const MemOperand& src); + + // One-element structure store from two registers. + void st1(const VRegister& vt, const VRegister& vt2, const MemOperand& src); + + // One-element structure store from three registers. + void st1(const VRegister& vt, const VRegister& vt2, const VRegister& vt3, + const MemOperand& src); + + // One-element structure store from four registers. + void st1(const VRegister& vt, const VRegister& vt2, const VRegister& vt3, + const VRegister& vt4, const MemOperand& src); + + // One-element single structure store from one lane. + void st1(const VRegister& vt, int lane, const MemOperand& src); + + // Two-element structure store from two registers. + void st2(const VRegister& vt, const VRegister& vt2, const MemOperand& src); + + // Two-element single structure store from two lanes. + void st2(const VRegister& vt, const VRegister& vt2, int lane, + const MemOperand& src); + + // Three-element structure store from three registers. + void st3(const VRegister& vt, const VRegister& vt2, const VRegister& vt3, + const MemOperand& src); + + // Three-element single structure store from three lanes. + void st3(const VRegister& vt, const VRegister& vt2, const VRegister& vt3, + int lane, const MemOperand& src); + + // Four-element structure store from four registers. + void st4(const VRegister& vt, const VRegister& vt2, const VRegister& vt3, + const VRegister& vt4, const MemOperand& src); + + // Four-element single structure store from four lanes. + void st4(const VRegister& vt, const VRegister& vt2, const VRegister& vt3, + const VRegister& vt4, int lane, const MemOperand& src); + + // Unsigned add long. + void uaddl(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unsigned add long (second part). + void uaddl2(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unsigned add wide. + void uaddw(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unsigned add wide (second part). + void uaddw2(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed add long. + void saddl(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed add long (second part). + void saddl2(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed add wide. + void saddw(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed add wide (second part). + void saddw2(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unsigned subtract long. + void usubl(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unsigned subtract long (second part). + void usubl2(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unsigned subtract wide. + void usubw(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed subtract long. + void ssubl(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed subtract long (second part). + void ssubl2(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed integer subtract wide. + void ssubw(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed integer subtract wide (second part). + void ssubw2(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unsigned subtract wide (second part). + void usubw2(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unsigned maximum. + void umax(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unsigned pairwise maximum. + void umaxp(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unsigned maximum across vector. + void umaxv(const VRegister& vd, const VRegister& vn); + + // Unsigned minimum. + void umin(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unsigned pairwise minimum. + void uminp(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unsigned minimum across vector. + void uminv(const VRegister& vd, const VRegister& vn); + + // Transpose vectors (primary). + void trn1(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Transpose vectors (secondary). + void trn2(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unzip vectors (primary). + void uzp1(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unzip vectors (secondary). + void uzp2(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Zip vectors (primary). + void zip1(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Zip vectors (secondary). + void zip2(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed shift right by immediate. + void sshr(const VRegister& vd, const VRegister& vn, int shift); + + // Unsigned shift right by immediate. + void ushr(const VRegister& vd, const VRegister& vn, int shift); + + // Signed rounding shift right by immediate. + void srshr(const VRegister& vd, const VRegister& vn, int shift); + + // Unsigned rounding shift right by immediate. + void urshr(const VRegister& vd, const VRegister& vn, int shift); + + // Signed shift right by immediate and accumulate. + void ssra(const VRegister& vd, const VRegister& vn, int shift); + + // Unsigned shift right by immediate and accumulate. + void usra(const VRegister& vd, const VRegister& vn, int shift); + + // Signed rounding shift right by immediate and accumulate. + void srsra(const VRegister& vd, const VRegister& vn, int shift); + + // Unsigned rounding shift right by immediate and accumulate. + void ursra(const VRegister& vd, const VRegister& vn, int shift); + + // Shift right narrow by immediate. + void shrn(const VRegister& vd, const VRegister& vn, int shift); + + // Shift right narrow by immediate (second part). + void shrn2(const VRegister& vd, const VRegister& vn, int shift); + + // Rounding shift right narrow by immediate. + void rshrn(const VRegister& vd, const VRegister& vn, int shift); + + // Rounding shift right narrow by immediate (second part). + void rshrn2(const VRegister& vd, const VRegister& vn, int shift); + + // Unsigned saturating shift right narrow by immediate. + void uqshrn(const VRegister& vd, const VRegister& vn, int shift); + + // Unsigned saturating shift right narrow by immediate (second part). + void uqshrn2(const VRegister& vd, const VRegister& vn, int shift); + + // Unsigned saturating rounding shift right narrow by immediate. + void uqrshrn(const VRegister& vd, const VRegister& vn, int shift); + + // Unsigned saturating rounding shift right narrow by immediate (second part). + void uqrshrn2(const VRegister& vd, const VRegister& vn, int shift); + + // Signed saturating shift right narrow by immediate. + void sqshrn(const VRegister& vd, const VRegister& vn, int shift); + + // Signed saturating shift right narrow by immediate (second part). + void sqshrn2(const VRegister& vd, const VRegister& vn, int shift); + + // Signed saturating rounded shift right narrow by immediate. + void sqrshrn(const VRegister& vd, const VRegister& vn, int shift); + + // Signed saturating rounded shift right narrow by immediate (second part). + void sqrshrn2(const VRegister& vd, const VRegister& vn, int shift); + + // Signed saturating shift right unsigned narrow by immediate. + void sqshrun(const VRegister& vd, const VRegister& vn, int shift); + + // Signed saturating shift right unsigned narrow by immediate (second part). + void sqshrun2(const VRegister& vd, const VRegister& vn, int shift); + + // Signed sat rounded shift right unsigned narrow by immediate. + void sqrshrun(const VRegister& vd, const VRegister& vn, int shift); + + // Signed sat rounded shift right unsigned narrow by immediate (second part). + void sqrshrun2(const VRegister& vd, const VRegister& vn, int shift); + + // FP reciprocal step. + void frecps(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // FP reciprocal estimate. + void frecpe(const VRegister& vd, const VRegister& vn); + + // FP reciprocal square root estimate. + void frsqrte(const VRegister& vd, const VRegister& vn); + + // FP reciprocal square root step. + void frsqrts(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed absolute difference and accumulate long. + void sabal(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed absolute difference and accumulate long (second part). + void sabal2(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unsigned absolute difference and accumulate long. + void uabal(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unsigned absolute difference and accumulate long (second part). + void uabal2(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed absolute difference long. + void sabdl(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed absolute difference long (second part). + void sabdl2(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unsigned absolute difference long. + void uabdl(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unsigned absolute difference long (second part). + void uabdl2(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Polynomial multiply long. + void pmull(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Polynomial multiply long (second part). + void pmull2(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed long multiply-add. + void smlal(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed long multiply-add (second part). + void smlal2(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unsigned long multiply-add. + void umlal(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unsigned long multiply-add (second part). + void umlal2(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed long multiply-sub. + void smlsl(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed long multiply-sub (second part). + void smlsl2(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unsigned long multiply-sub. + void umlsl(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unsigned long multiply-sub (second part). + void umlsl2(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed long multiply. + void smull(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed long multiply (second part). + void smull2(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed saturating doubling long multiply-add. + void sqdmlal(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed saturating doubling long multiply-add (second part). + void sqdmlal2(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unsigned absolute difference. + void uabd(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed absolute difference and accumulate. + void saba(const VRegister& vd, const VRegister& vn, const VRegister& vm); + // FP instructions. // Move immediate to FP register. - void fmov(FPRegister fd, double imm); - void fmov(FPRegister fd, float imm); + void fmov(const VRegister& fd, double imm); + void fmov(const VRegister& fd, float imm); // Move FP register to register. - void fmov(Register rd, FPRegister fn); + void fmov(const Register& rd, const VRegister& fn); // Move register to FP register. - void fmov(FPRegister fd, Register rn); + void fmov(const VRegister& fd, const Register& rn); // Move FP register to FP register. - void fmov(FPRegister fd, FPRegister fn); + void fmov(const VRegister& fd, const VRegister& fn); + + // Move 64-bit register to top half of 128-bit FP register. + void fmov(const VRegister& vd, int index, const Register& rn); + + // Move top half of 128-bit FP register to 64-bit register. + void fmov(const Register& rd, const VRegister& vn, int index); // FP add. - void fadd(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm); + void fadd(const VRegister& vd, const VRegister& vn, const VRegister& vm); // FP subtract. - void fsub(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm); + void fsub(const VRegister& vd, const VRegister& vn, const VRegister& vm); // FP multiply. - void fmul(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm); - - // FP fused multiply and add. - void fmadd(const FPRegister& fd, - const FPRegister& fn, - const FPRegister& fm, - const FPRegister& fa); - - // FP fused multiply and subtract. - void fmsub(const FPRegister& fd, - const FPRegister& fn, - const FPRegister& fm, - const FPRegister& fa); - - // FP fused multiply, add and negate. - void fnmadd(const FPRegister& fd, - const FPRegister& fn, - const FPRegister& fm, - const FPRegister& fa); - - // FP fused multiply, subtract and negate. - void fnmsub(const FPRegister& fd, - const FPRegister& fn, - const FPRegister& fm, - const FPRegister& fa); + void fmul(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // FP compare equal to zero. + void fcmeq(const VRegister& vd, const VRegister& vn, double imm); + + // FP greater than zero. + void fcmgt(const VRegister& vd, const VRegister& vn, double imm); + + // FP greater than or equal to zero. + void fcmge(const VRegister& vd, const VRegister& vn, double imm); + + // FP less than or equal to zero. + void fcmle(const VRegister& vd, const VRegister& vn, double imm); + + // FP less than to zero. + void fcmlt(const VRegister& vd, const VRegister& vn, double imm); + + // FP absolute difference. + void fabd(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // FP pairwise add vector. + void faddp(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // FP pairwise add scalar. + void faddp(const VRegister& vd, const VRegister& vn); + + // FP pairwise maximum scalar. + void fmaxp(const VRegister& vd, const VRegister& vn); + + // FP pairwise maximum number scalar. + void fmaxnmp(const VRegister& vd, const VRegister& vn); + + // FP pairwise minimum number scalar. + void fminnmp(const VRegister& vd, const VRegister& vn); + + // FP vector multiply accumulate. + void fmla(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // FP vector multiply subtract. + void fmls(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // FP vector multiply extended. + void fmulx(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // FP absolute greater than or equal. + void facge(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // FP absolute greater than. + void facgt(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // FP multiply by element. + void fmul(const VRegister& vd, const VRegister& vn, const VRegister& vm, + int vm_index); + + // FP fused multiply-add to accumulator by element. + void fmla(const VRegister& vd, const VRegister& vn, const VRegister& vm, + int vm_index); + + // FP fused multiply-sub from accumulator by element. + void fmls(const VRegister& vd, const VRegister& vn, const VRegister& vm, + int vm_index); + + // FP multiply extended by element. + void fmulx(const VRegister& vd, const VRegister& vn, const VRegister& vm, + int vm_index); + + // FP compare equal. + void fcmeq(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // FP greater than. + void fcmgt(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // FP greater than or equal. + void fcmge(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // FP pairwise maximum vector. + void fmaxp(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // FP pairwise minimum vector. + void fminp(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // FP pairwise minimum scalar. + void fminp(const VRegister& vd, const VRegister& vn); + + // FP pairwise maximum number vector. + void fmaxnmp(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // FP pairwise minimum number vector. + void fminnmp(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // FP fused multiply-add. + void fmadd(const VRegister& vd, const VRegister& vn, const VRegister& vm, + const VRegister& va); + + // FP fused multiply-subtract. + void fmsub(const VRegister& vd, const VRegister& vn, const VRegister& vm, + const VRegister& va); + + // FP fused multiply-add and negate. + void fnmadd(const VRegister& vd, const VRegister& vn, const VRegister& vm, + const VRegister& va); + + // FP fused multiply-subtract and negate. + void fnmsub(const VRegister& vd, const VRegister& vn, const VRegister& vm, + const VRegister& va); + + // FP multiply-negate scalar. + void fnmul(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // FP reciprocal exponent scalar. + void frecpx(const VRegister& vd, const VRegister& vn); // FP divide. - void fdiv(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm); + void fdiv(const VRegister& vd, const VRegister& vn, const VRegister& vm); // FP maximum. - void fmax(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm); + void fmax(const VRegister& vd, const VRegister& vn, const VRegister& vm); // FP minimum. - void fmin(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm); + void fmin(const VRegister& vd, const VRegister& vn, const VRegister& vm); // FP maximum. - void fmaxnm(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm); + void fmaxnm(const VRegister& vd, const VRegister& vn, const VRegister& vm); // FP minimum. - void fminnm(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm); + void fminnm(const VRegister& vd, const VRegister& vn, const VRegister& vm); // FP absolute. - void fabs(const FPRegister& fd, const FPRegister& fn); + void fabs(const VRegister& vd, const VRegister& vn); // FP negate. - void fneg(const FPRegister& fd, const FPRegister& fn); + void fneg(const VRegister& vd, const VRegister& vn); // FP square root. - void fsqrt(const FPRegister& fd, const FPRegister& fn); + void fsqrt(const VRegister& vd, const VRegister& vn); - // FP round to integer (nearest with ties to away). - void frinta(const FPRegister& fd, const FPRegister& fn); + // FP round to integer nearest with ties to away. + void frinta(const VRegister& vd, const VRegister& vn); - // FP round to integer (toward minus infinity). - void frintm(const FPRegister& fd, const FPRegister& fn); + // FP round to integer, implicit rounding. + void frinti(const VRegister& vd, const VRegister& vn); - // FP round to integer (nearest with ties to even). - void frintn(const FPRegister& fd, const FPRegister& fn); + // FP round to integer toward minus infinity. + void frintm(const VRegister& vd, const VRegister& vn); - // FP round to integer (towards plus infinity). - void frintp(const FPRegister& fd, const FPRegister& fn); + // FP round to integer nearest with ties to even. + void frintn(const VRegister& vd, const VRegister& vn); - // FP round to integer (towards zero.) - void frintz(const FPRegister& fd, const FPRegister& fn); + // FP round to integer towards plus infinity. + void frintp(const VRegister& vd, const VRegister& vn); + + // FP round to integer, exact, implicit rounding. + void frintx(const VRegister& vd, const VRegister& vn); + + // FP round to integer towards zero. + void frintz(const VRegister& vd, const VRegister& vn); // FP compare registers. - void fcmp(const FPRegister& fn, const FPRegister& fm); + void fcmp(const VRegister& vn, const VRegister& vm); // FP compare immediate. - void fcmp(const FPRegister& fn, double value); + void fcmp(const VRegister& vn, double value); // FP conditional compare. - void fccmp(const FPRegister& fn, - const FPRegister& fm, - StatusFlags nzcv, + void fccmp(const VRegister& vn, const VRegister& vm, StatusFlags nzcv, Condition cond); // FP conditional select. - void fcsel(const FPRegister& fd, - const FPRegister& fn, - const FPRegister& fm, + void fcsel(const VRegister& vd, const VRegister& vn, const VRegister& vm, Condition cond); - // Common FP Convert function - void FPConvertToInt(const Register& rd, - const FPRegister& fn, - FPIntegerConvertOp op); + // Common FP Convert functions. + void NEONFPConvertToInt(const Register& rd, const VRegister& vn, Instr op); + void NEONFPConvertToInt(const VRegister& vd, const VRegister& vn, Instr op); + + // FP convert between precisions. + void fcvt(const VRegister& vd, const VRegister& vn); + + // FP convert to higher precision. + void fcvtl(const VRegister& vd, const VRegister& vn); + + // FP convert to higher precision (second part). + void fcvtl2(const VRegister& vd, const VRegister& vn); + + // FP convert to lower precision. + void fcvtn(const VRegister& vd, const VRegister& vn); + + // FP convert to lower prevision (second part). + void fcvtn2(const VRegister& vd, const VRegister& vn); + + // FP convert to lower precision, rounding to odd. + void fcvtxn(const VRegister& vd, const VRegister& vn); + + // FP convert to lower precision, rounding to odd (second part). + void fcvtxn2(const VRegister& vd, const VRegister& vn); + + // FP convert to signed integer, nearest with ties to away. + void fcvtas(const Register& rd, const VRegister& vn); + + // FP convert to unsigned integer, nearest with ties to away. + void fcvtau(const Register& rd, const VRegister& vn); + + // FP convert to signed integer, nearest with ties to away. + void fcvtas(const VRegister& vd, const VRegister& vn); + + // FP convert to unsigned integer, nearest with ties to away. + void fcvtau(const VRegister& vd, const VRegister& vn); + + // FP convert to signed integer, round towards -infinity. + void fcvtms(const Register& rd, const VRegister& vn); + + // FP convert to unsigned integer, round towards -infinity. + void fcvtmu(const Register& rd, const VRegister& vn); + + // FP convert to signed integer, round towards -infinity. + void fcvtms(const VRegister& vd, const VRegister& vn); + + // FP convert to unsigned integer, round towards -infinity. + void fcvtmu(const VRegister& vd, const VRegister& vn); + + // FP convert to signed integer, nearest with ties to even. + void fcvtns(const Register& rd, const VRegister& vn); + + // FP convert to unsigned integer, nearest with ties to even. + void fcvtnu(const Register& rd, const VRegister& vn); + + // FP convert to signed integer, nearest with ties to even. + void fcvtns(const VRegister& rd, const VRegister& vn); - // FP convert between single and double precision. - void fcvt(const FPRegister& fd, const FPRegister& fn); + // FP convert to unsigned integer, nearest with ties to even. + void fcvtnu(const VRegister& rd, const VRegister& vn); - // Convert FP to unsigned integer (nearest with ties to away). - void fcvtau(const Register& rd, const FPRegister& fn); + // FP convert to signed integer or fixed-point, round towards zero. + void fcvtzs(const Register& rd, const VRegister& vn, int fbits = 0); - // Convert FP to signed integer (nearest with ties to away). - void fcvtas(const Register& rd, const FPRegister& fn); + // FP convert to unsigned integer or fixed-point, round towards zero. + void fcvtzu(const Register& rd, const VRegister& vn, int fbits = 0); - // Convert FP to unsigned integer (round towards -infinity). - void fcvtmu(const Register& rd, const FPRegister& fn); + // FP convert to signed integer or fixed-point, round towards zero. + void fcvtzs(const VRegister& vd, const VRegister& vn, int fbits = 0); - // Convert FP to signed integer (round towards -infinity). - void fcvtms(const Register& rd, const FPRegister& fn); + // FP convert to unsigned integer or fixed-point, round towards zero. + void fcvtzu(const VRegister& vd, const VRegister& vn, int fbits = 0); - // Convert FP to unsigned integer (nearest with ties to even). - void fcvtnu(const Register& rd, const FPRegister& fn); + // FP convert to signed integer, round towards +infinity. + void fcvtps(const Register& rd, const VRegister& vn); - // Convert FP to signed integer (nearest with ties to even). - void fcvtns(const Register& rd, const FPRegister& fn); + // FP convert to unsigned integer, round towards +infinity. + void fcvtpu(const Register& rd, const VRegister& vn); - // Convert FP to unsigned integer (round towards zero). - void fcvtzu(const Register& rd, const FPRegister& fn); + // FP convert to signed integer, round towards +infinity. + void fcvtps(const VRegister& vd, const VRegister& vn); - // Convert FP to signed integer (rounf towards zero). - void fcvtzs(const Register& rd, const FPRegister& fn); + // FP convert to unsigned integer, round towards +infinity. + void fcvtpu(const VRegister& vd, const VRegister& vn); // Convert signed integer or fixed point to FP. - void scvtf(const FPRegister& fd, const Register& rn, unsigned fbits = 0); + void scvtf(const VRegister& fd, const Register& rn, int fbits = 0); // Convert unsigned integer or fixed point to FP. - void ucvtf(const FPRegister& fd, const Register& rn, unsigned fbits = 0); + void ucvtf(const VRegister& fd, const Register& rn, int fbits = 0); + + // Convert signed integer or fixed-point to FP. + void scvtf(const VRegister& fd, const VRegister& vn, int fbits = 0); + + // Convert unsigned integer or fixed-point to FP. + void ucvtf(const VRegister& fd, const VRegister& vn, int fbits = 0); + + // Extract vector from pair of vectors. + void ext(const VRegister& vd, const VRegister& vn, const VRegister& vm, + int index); + + // Duplicate vector element to vector or scalar. + void dup(const VRegister& vd, const VRegister& vn, int vn_index); + + // Duplicate general-purpose register to vector. + void dup(const VRegister& vd, const Register& rn); + + // Insert vector element from general-purpose register. + void ins(const VRegister& vd, int vd_index, const Register& rn); + + // Move general-purpose register to a vector element. + void mov(const VRegister& vd, int vd_index, const Register& rn); + + // Unsigned move vector element to general-purpose register. + void umov(const Register& rd, const VRegister& vn, int vn_index); + + // Move vector element to general-purpose register. + void mov(const Register& rd, const VRegister& vn, int vn_index); + + // Move vector element to scalar. + void mov(const VRegister& vd, const VRegister& vn, int vn_index); + + // Insert vector element from another vector element. + void ins(const VRegister& vd, int vd_index, const VRegister& vn, + int vn_index); + + // Move vector element to another vector element. + void mov(const VRegister& vd, int vd_index, const VRegister& vn, + int vn_index); + + // Signed move vector element to general-purpose register. + void smov(const Register& rd, const VRegister& vn, int vn_index); + + // One-element structure load to one register. + void ld1(const VRegister& vt, const MemOperand& src); + + // One-element structure load to two registers. + void ld1(const VRegister& vt, const VRegister& vt2, const MemOperand& src); + + // One-element structure load to three registers. + void ld1(const VRegister& vt, const VRegister& vt2, const VRegister& vt3, + const MemOperand& src); + + // One-element structure load to four registers. + void ld1(const VRegister& vt, const VRegister& vt2, const VRegister& vt3, + const VRegister& vt4, const MemOperand& src); + + // One-element single structure load to one lane. + void ld1(const VRegister& vt, int lane, const MemOperand& src); + + // One-element single structure load to all lanes. + void ld1r(const VRegister& vt, const MemOperand& src); + + // Two-element structure load. + void ld2(const VRegister& vt, const VRegister& vt2, const MemOperand& src); + + // Two-element single structure load to one lane. + void ld2(const VRegister& vt, const VRegister& vt2, int lane, + const MemOperand& src); + + // Two-element single structure load to all lanes. + void ld2r(const VRegister& vt, const VRegister& vt2, const MemOperand& src); + + // Three-element structure load. + void ld3(const VRegister& vt, const VRegister& vt2, const VRegister& vt3, + const MemOperand& src); + + // Three-element single structure load to one lane. + void ld3(const VRegister& vt, const VRegister& vt2, const VRegister& vt3, + int lane, const MemOperand& src); + + // Three-element single structure load to all lanes. + void ld3r(const VRegister& vt, const VRegister& vt2, const VRegister& vt3, + const MemOperand& src); + + // Four-element structure load. + void ld4(const VRegister& vt, const VRegister& vt2, const VRegister& vt3, + const VRegister& vt4, const MemOperand& src); + + // Four-element single structure load to one lane. + void ld4(const VRegister& vt, const VRegister& vt2, const VRegister& vt3, + const VRegister& vt4, int lane, const MemOperand& src); + + // Four-element single structure load to all lanes. + void ld4r(const VRegister& vt, const VRegister& vt2, const VRegister& vt3, + const VRegister& vt4, const MemOperand& src); + + // Count leading sign bits. + void cls(const VRegister& vd, const VRegister& vn); + + // Count leading zero bits (vector). + void clz(const VRegister& vd, const VRegister& vn); + + // Population count per byte. + void cnt(const VRegister& vd, const VRegister& vn); + + // Reverse bit order. + void rbit(const VRegister& vd, const VRegister& vn); + + // Reverse elements in 16-bit halfwords. + void rev16(const VRegister& vd, const VRegister& vn); + + // Reverse elements in 32-bit words. + void rev32(const VRegister& vd, const VRegister& vn); + + // Reverse elements in 64-bit doublewords. + void rev64(const VRegister& vd, const VRegister& vn); + + // Unsigned reciprocal square root estimate. + void ursqrte(const VRegister& vd, const VRegister& vn); + + // Unsigned reciprocal estimate. + void urecpe(const VRegister& vd, const VRegister& vn); + + // Signed pairwise long add and accumulate. + void sadalp(const VRegister& vd, const VRegister& vn); + + // Signed pairwise long add. + void saddlp(const VRegister& vd, const VRegister& vn); + + // Unsigned pairwise long add. + void uaddlp(const VRegister& vd, const VRegister& vn); + + // Unsigned pairwise long add and accumulate. + void uadalp(const VRegister& vd, const VRegister& vn); + + // Shift left by immediate. + void shl(const VRegister& vd, const VRegister& vn, int shift); + + // Signed saturating shift left by immediate. + void sqshl(const VRegister& vd, const VRegister& vn, int shift); + + // Signed saturating shift left unsigned by immediate. + void sqshlu(const VRegister& vd, const VRegister& vn, int shift); + + // Unsigned saturating shift left by immediate. + void uqshl(const VRegister& vd, const VRegister& vn, int shift); + + // Signed shift left long by immediate. + void sshll(const VRegister& vd, const VRegister& vn, int shift); + + // Signed shift left long by immediate (second part). + void sshll2(const VRegister& vd, const VRegister& vn, int shift); + + // Signed extend long. + void sxtl(const VRegister& vd, const VRegister& vn); + + // Signed extend long (second part). + void sxtl2(const VRegister& vd, const VRegister& vn); + + // Unsigned shift left long by immediate. + void ushll(const VRegister& vd, const VRegister& vn, int shift); + + // Unsigned shift left long by immediate (second part). + void ushll2(const VRegister& vd, const VRegister& vn, int shift); + + // Shift left long by element size. + void shll(const VRegister& vd, const VRegister& vn, int shift); + + // Shift left long by element size (second part). + void shll2(const VRegister& vd, const VRegister& vn, int shift); + + // Unsigned extend long. + void uxtl(const VRegister& vd, const VRegister& vn); + + // Unsigned extend long (second part). + void uxtl2(const VRegister& vd, const VRegister& vn); + + // Signed rounding halving add. + void srhadd(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unsigned halving sub. + void uhsub(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed halving sub. + void shsub(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unsigned saturating add. + void uqadd(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed saturating add. + void sqadd(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unsigned saturating subtract. + void uqsub(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed saturating subtract. + void sqsub(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Add pairwise. + void addp(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Add pair of elements scalar. + void addp(const VRegister& vd, const VRegister& vn); + + // Multiply-add to accumulator. + void mla(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Multiply-subtract to accumulator. + void mls(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Multiply. + void mul(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Table lookup from one register. + void tbl(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Table lookup from two registers. + void tbl(const VRegister& vd, const VRegister& vn, const VRegister& vn2, + const VRegister& vm); + + // Table lookup from three registers. + void tbl(const VRegister& vd, const VRegister& vn, const VRegister& vn2, + const VRegister& vn3, const VRegister& vm); + + // Table lookup from four registers. + void tbl(const VRegister& vd, const VRegister& vn, const VRegister& vn2, + const VRegister& vn3, const VRegister& vn4, const VRegister& vm); + + // Table lookup extension from one register. + void tbx(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Table lookup extension from two registers. + void tbx(const VRegister& vd, const VRegister& vn, const VRegister& vn2, + const VRegister& vm); + + // Table lookup extension from three registers. + void tbx(const VRegister& vd, const VRegister& vn, const VRegister& vn2, + const VRegister& vn3, const VRegister& vm); + + // Table lookup extension from four registers. + void tbx(const VRegister& vd, const VRegister& vn, const VRegister& vn2, + const VRegister& vn3, const VRegister& vn4, const VRegister& vm); // Instruction functions used only for test, debug, and patching. // Emit raw instructions in the instruction stream. @@ -1663,37 +2843,43 @@ class Assembler : public AssemblerBase { // Register encoding. static Instr Rd(CPURegister rd) { - DCHECK(rd.code() != kSPRegInternalCode); + DCHECK_NE(rd.code(), kSPRegInternalCode); return rd.code() << Rd_offset; } static Instr Rn(CPURegister rn) { - DCHECK(rn.code() != kSPRegInternalCode); + DCHECK_NE(rn.code(), kSPRegInternalCode); return rn.code() << Rn_offset; } static Instr Rm(CPURegister rm) { - DCHECK(rm.code() != kSPRegInternalCode); + DCHECK_NE(rm.code(), kSPRegInternalCode); return rm.code() << Rm_offset; } + static Instr RmNot31(CPURegister rm) { + DCHECK_NE(rm.code(), kSPRegInternalCode); + DCHECK(!rm.IsZero()); + return Rm(rm); + } + static Instr Ra(CPURegister ra) { - DCHECK(ra.code() != kSPRegInternalCode); + DCHECK_NE(ra.code(), kSPRegInternalCode); return ra.code() << Ra_offset; } static Instr Rt(CPURegister rt) { - DCHECK(rt.code() != kSPRegInternalCode); + DCHECK_NE(rt.code(), kSPRegInternalCode); return rt.code() << Rt_offset; } static Instr Rt2(CPURegister rt2) { - DCHECK(rt2.code() != kSPRegInternalCode); + DCHECK_NE(rt2.code(), kSPRegInternalCode); return rt2.code() << Rt2_offset; } static Instr Rs(CPURegister rs) { - DCHECK(rs.code() != kSPRegInternalCode); + DCHECK_NE(rs.code(), kSPRegInternalCode); return rs.code() << Rs_offset; } @@ -1749,17 +2935,174 @@ class Assembler : public AssemblerBase { // MemOperand offset encoding. inline static Instr ImmLSUnsigned(int imm12); inline static Instr ImmLS(int imm9); - inline static Instr ImmLSPair(int imm7, LSDataSize size); + inline static Instr ImmLSPair(int imm7, unsigned size); inline static Instr ImmShiftLS(unsigned shift_amount); inline static Instr ImmException(int imm16); inline static Instr ImmSystemRegister(int imm15); inline static Instr ImmHint(int imm7); inline static Instr ImmBarrierDomain(int imm2); inline static Instr ImmBarrierType(int imm2); - inline static LSDataSize CalcLSDataSize(LoadStoreOp op); + inline static unsigned CalcLSDataSize(LoadStoreOp op); + + // Instruction bits for vector format in data processing operations. + static Instr VFormat(VRegister vd) { + if (vd.Is64Bits()) { + switch (vd.LaneCount()) { + case 2: + return NEON_2S; + case 4: + return NEON_4H; + case 8: + return NEON_8B; + default: + UNREACHABLE(); + } + } else { + DCHECK(vd.Is128Bits()); + switch (vd.LaneCount()) { + case 2: + return NEON_2D; + case 4: + return NEON_4S; + case 8: + return NEON_8H; + case 16: + return NEON_16B; + default: + UNREACHABLE(); + } + } + } + + // Instruction bits for vector format in floating point data processing + // operations. + static Instr FPFormat(VRegister vd) { + if (vd.LaneCount() == 1) { + // Floating point scalar formats. + DCHECK(vd.Is32Bits() || vd.Is64Bits()); + return vd.Is64Bits() ? FP64 : FP32; + } + + // Two lane floating point vector formats. + if (vd.LaneCount() == 2) { + DCHECK(vd.Is64Bits() || vd.Is128Bits()); + return vd.Is128Bits() ? NEON_FP_2D : NEON_FP_2S; + } + + // Four lane floating point vector format. + DCHECK((vd.LaneCount() == 4) && vd.Is128Bits()); + return NEON_FP_4S; + } + + // Instruction bits for vector format in load and store operations. + static Instr LSVFormat(VRegister vd) { + if (vd.Is64Bits()) { + switch (vd.LaneCount()) { + case 1: + return LS_NEON_1D; + case 2: + return LS_NEON_2S; + case 4: + return LS_NEON_4H; + case 8: + return LS_NEON_8B; + default: + UNREACHABLE(); + } + } else { + DCHECK(vd.Is128Bits()); + switch (vd.LaneCount()) { + case 2: + return LS_NEON_2D; + case 4: + return LS_NEON_4S; + case 8: + return LS_NEON_8H; + case 16: + return LS_NEON_16B; + default: + UNREACHABLE(); + } + } + } + + // Instruction bits for scalar format in data processing operations. + static Instr SFormat(VRegister vd) { + DCHECK(vd.IsScalar()); + switch (vd.SizeInBytes()) { + case 1: + return NEON_B; + case 2: + return NEON_H; + case 4: + return NEON_S; + case 8: + return NEON_D; + default: + UNREACHABLE(); + } + } + + static Instr ImmNEONHLM(int index, int num_bits) { + int h, l, m; + if (num_bits == 3) { + DCHECK(is_uint3(index)); + h = (index >> 2) & 1; + l = (index >> 1) & 1; + m = (index >> 0) & 1; + } else if (num_bits == 2) { + DCHECK(is_uint2(index)); + h = (index >> 1) & 1; + l = (index >> 0) & 1; + m = 0; + } else { + DCHECK(is_uint1(index) && (num_bits == 1)); + h = (index >> 0) & 1; + l = 0; + m = 0; + } + return (h << NEONH_offset) | (l << NEONL_offset) | (m << NEONM_offset); + } + + static Instr ImmNEONExt(int imm4) { + DCHECK(is_uint4(imm4)); + return imm4 << ImmNEONExt_offset; + } + + static Instr ImmNEON5(Instr format, int index) { + DCHECK(is_uint4(index)); + int s = LaneSizeInBytesLog2FromFormat(static_cast(format)); + int imm5 = (index << (s + 1)) | (1 << s); + return imm5 << ImmNEON5_offset; + } + + static Instr ImmNEON4(Instr format, int index) { + DCHECK(is_uint4(index)); + int s = LaneSizeInBytesLog2FromFormat(static_cast(format)); + int imm4 = index << s; + return imm4 << ImmNEON4_offset; + } + + static Instr ImmNEONabcdefgh(int imm8) { + DCHECK(is_uint8(imm8)); + Instr instr; + instr = ((imm8 >> 5) & 7) << ImmNEONabc_offset; + instr |= (imm8 & 0x1f) << ImmNEONdefgh_offset; + return instr; + } + + static Instr NEONCmode(int cmode) { + DCHECK(is_uint4(cmode)); + return cmode << NEONCmode_offset; + } + + static Instr NEONModImmOp(int op) { + DCHECK(is_uint1(op)); + return op << NEONModImmOp_offset; + } static bool IsImmLSUnscaled(int64_t offset); - static bool IsImmLSScaled(int64_t offset, LSDataSize size); + static bool IsImmLSScaled(int64_t offset, unsigned size); static bool IsImmLLiteral(int64_t offset); // Move immediates encoding. @@ -1767,12 +3110,12 @@ class Assembler : public AssemblerBase { inline static Instr ShiftMoveWide(int shift); // FP Immediates. - static Instr ImmFP32(float imm); - static Instr ImmFP64(double imm); + static Instr ImmFP(double imm); + static Instr ImmNEONFP(double imm); inline static Instr FPScale(unsigned scale); // FP register type. - inline static Instr FPType(FPRegister fd); + inline static Instr FPType(VRegister fd); // Class for scoping postponing the constant pool generation. class BlockConstPoolScope { @@ -1840,16 +3183,56 @@ class Assembler : public AssemblerBase { DISALLOW_IMPLICIT_CONSTRUCTORS(BlockPoolsScope); }; + // Class for blocking sharing of code targets in constant pool. + class BlockCodeTargetSharingScope { + public: + explicit BlockCodeTargetSharingScope(Assembler* assem) : assem_(nullptr) { + Open(assem); + } + // This constructor does not initialize the scope. The user needs to + // explicitly call Open() before using it. + BlockCodeTargetSharingScope() : assem_(nullptr) {} + ~BlockCodeTargetSharingScope() { Close(); } + void Open(Assembler* assem) { + DCHECK_NULL(assem_); + DCHECK_NOT_NULL(assem); + assem_ = assem; + assem_->StartBlockCodeTargetSharing(); + } + + private: + void Close() { + if (assem_ != nullptr) { + assem_->EndBlockCodeTargetSharing(); + } + } + Assembler* assem_; + + DISALLOW_COPY_AND_ASSIGN(BlockCodeTargetSharingScope); + }; + protected: inline const Register& AppropriateZeroRegFor(const CPURegister& reg) const; void LoadStore(const CPURegister& rt, const MemOperand& addr, LoadStoreOp op); - void LoadStorePair(const CPURegister& rt, const CPURegister& rt2, const MemOperand& addr, LoadStorePairOp op); - static bool IsImmLSPair(int64_t offset, LSDataSize size); + void LoadStoreStruct(const VRegister& vt, const MemOperand& addr, + NEONLoadStoreMultiStructOp op); + void LoadStoreStruct1(const VRegister& vt, int reg_count, + const MemOperand& addr); + void LoadStoreStructSingle(const VRegister& vt, uint32_t lane, + const MemOperand& addr, + NEONLoadStoreSingleStructOp op); + void LoadStoreStructSingleAllLanes(const VRegister& vt, + const MemOperand& addr, + NEONLoadStoreSingleStructOp op); + void LoadStoreStructVerify(const VRegister& vt, const MemOperand& addr, + Instr op); + + static bool IsImmLSPair(int64_t offset, unsigned size); void Logical(const Register& rd, const Register& rn, @@ -1913,7 +3296,19 @@ class Assembler : public AssemblerBase { Label* label, Instruction* label_veneer = NULL); + // Prevent sharing of code target constant pool entries until + // EndBlockCodeTargetSharing is called. Calls to this function can be nested + // but must be followed by an equal number of call to + // EndBlockCodeTargetSharing. + void StartBlockCodeTargetSharing() { ++code_target_sharing_blocked_nesting_; } + + // Resume sharing of constant pool code target entries. Needs to be called + // as many times as StartBlockCodeTargetSharing to have an effect. + void EndBlockCodeTargetSharing() { --code_target_sharing_blocked_nesting_; } + private: + static uint32_t FPToImm8(double imm); + // Instruction helpers. void MoveWide(const Register& rd, uint64_t imm, @@ -1942,18 +3337,66 @@ class Assembler : public AssemblerBase { const Register& rm, const Register& ra, DataProcessing3SourceOp op); - void FPDataProcessing1Source(const FPRegister& fd, - const FPRegister& fn, + void FPDataProcessing1Source(const VRegister& fd, const VRegister& fn, FPDataProcessing1SourceOp op); - void FPDataProcessing2Source(const FPRegister& fd, - const FPRegister& fn, - const FPRegister& fm, + void FPDataProcessing2Source(const VRegister& fd, const VRegister& fn, + const VRegister& fm, FPDataProcessing2SourceOp op); - void FPDataProcessing3Source(const FPRegister& fd, - const FPRegister& fn, - const FPRegister& fm, - const FPRegister& fa, + void FPDataProcessing3Source(const VRegister& fd, const VRegister& fn, + const VRegister& fm, const VRegister& fa, FPDataProcessing3SourceOp op); + void NEONAcrossLanesL(const VRegister& vd, const VRegister& vn, + NEONAcrossLanesOp op); + void NEONAcrossLanes(const VRegister& vd, const VRegister& vn, + NEONAcrossLanesOp op); + void NEONModifiedImmShiftLsl(const VRegister& vd, const int imm8, + const int left_shift, + NEONModifiedImmediateOp op); + void NEONModifiedImmShiftMsl(const VRegister& vd, const int imm8, + const int shift_amount, + NEONModifiedImmediateOp op); + void NEON3Same(const VRegister& vd, const VRegister& vn, const VRegister& vm, + NEON3SameOp vop); + void NEONFP3Same(const VRegister& vd, const VRegister& vn, + const VRegister& vm, Instr op); + void NEON3DifferentL(const VRegister& vd, const VRegister& vn, + const VRegister& vm, NEON3DifferentOp vop); + void NEON3DifferentW(const VRegister& vd, const VRegister& vn, + const VRegister& vm, NEON3DifferentOp vop); + void NEON3DifferentHN(const VRegister& vd, const VRegister& vn, + const VRegister& vm, NEON3DifferentOp vop); + void NEONFP2RegMisc(const VRegister& vd, const VRegister& vn, + NEON2RegMiscOp vop, double value = 0.0); + void NEON2RegMisc(const VRegister& vd, const VRegister& vn, + NEON2RegMiscOp vop, int value = 0); + void NEONFP2RegMisc(const VRegister& vd, const VRegister& vn, Instr op); + void NEONAddlp(const VRegister& vd, const VRegister& vn, NEON2RegMiscOp op); + void NEONPerm(const VRegister& vd, const VRegister& vn, const VRegister& vm, + NEONPermOp op); + void NEONFPByElement(const VRegister& vd, const VRegister& vn, + const VRegister& vm, int vm_index, + NEONByIndexedElementOp op); + void NEONByElement(const VRegister& vd, const VRegister& vn, + const VRegister& vm, int vm_index, + NEONByIndexedElementOp op); + void NEONByElementL(const VRegister& vd, const VRegister& vn, + const VRegister& vm, int vm_index, + NEONByIndexedElementOp op); + void NEONShiftImmediate(const VRegister& vd, const VRegister& vn, + NEONShiftImmediateOp op, int immh_immb); + void NEONShiftLeftImmediate(const VRegister& vd, const VRegister& vn, + int shift, NEONShiftImmediateOp op); + void NEONShiftRightImmediate(const VRegister& vd, const VRegister& vn, + int shift, NEONShiftImmediateOp op); + void NEONShiftImmediateL(const VRegister& vd, const VRegister& vn, int shift, + NEONShiftImmediateOp op); + void NEONShiftImmediateN(const VRegister& vd, const VRegister& vn, int shift, + NEONShiftImmediateOp op); + void NEONXtn(const VRegister& vd, const VRegister& vn, NEON2RegMiscOp vop); + void NEONTable(const VRegister& vd, const VRegister& vn, const VRegister& vm, + NEONTableOp op); + + Instr LoadStoreStructAddrModeField(const MemOperand& addr); // Label helpers. @@ -2044,6 +3487,12 @@ class Assembler : public AssemblerBase { // Emission of the veneer pools may be blocked in some code sequences. int veneer_pool_blocked_nesting_; // Block emission if this is not zero. + // Sharing of code target entries may be blocked in some code sequences. + int code_target_sharing_blocked_nesting_; + bool IsCodeTargetSharingAllowed() const { + return code_target_sharing_blocked_nesting_ == 0; + } + // Relocation info generation // Each relocation is encoded as a variable size value static constexpr int kMaxRelocSize = RelocInfoWriter::kMaxSize; @@ -2064,22 +3513,7 @@ class Assembler : public AssemblerBase { // The pending constant pool. ConstPool constpool_; - // Relocation for a type-recording IC has the AST id added to it. This - // member variable is a way to pass the information from the call site to - // the relocation info. - TypeFeedbackId recorded_ast_id_; - - inline TypeFeedbackId RecordedAstId(); - inline void ClearRecordedAstId(); - protected: - // Record the AST id of the CallIC being compiled, so that it can be placed - // in the relocation information. - void SetRecordedAstId(TypeFeedbackId ast_id) { - DCHECK(recorded_ast_id_.IsNone()); - recorded_ast_id_ = ast_id; - } - // Code generation // The relocation writer's position is at least kGap bytes below the end of // the generated instructions. This is so that multi-instruction sequences do @@ -2089,6 +3523,22 @@ class Assembler : public AssemblerBase { static constexpr int kGap = 128; public: +#ifdef DEBUG + // Functions used for testing. + int GetConstantPoolEntriesSizeForTesting() const { + // Do not include branch over the pool. + return constpool_.EntryCount() * kPointerSize; + } + + static constexpr int GetCheckConstPoolIntervalForTesting() { + return kCheckConstPoolInterval; + } + + static constexpr int GetApproxMaxDistToConstPoolForTesting() { + return kApproxMaxDistToConstPool; + } +#endif + class FarBranchInfo { public: FarBranchInfo(int offset, Label* label) @@ -2148,6 +3598,19 @@ class Assembler : public AssemblerBase { // the length of the label chain. void DeleteUnresolvedBranchInfoForLabelTraverse(Label* label); + // The following functions help with avoiding allocations of embedded heap + // objects during the code assembly phase. {RequestHeapObject} records the + // need for a future heap number allocation or code stub generation. After + // code assembly, {AllocateAndInstallRequestedHeapObjects} will allocate these + // objects and place them where they are expected (determined by the pc offset + // associated with each request). That is, for each request, it will patch the + // dummy heap object handle that we emitted during code assembly with the + // actual heap object handle. + void RequestHeapObject(HeapObjectRequest request); + void AllocateAndInstallRequestedHeapObjects(Isolate* isolate); + + std::forward_list heap_object_requests_; + private: friend class EnsureSpace; friend class ConstPool; diff --git a/deps/v8/src/arm64/code-stubs-arm64.cc b/deps/v8/src/arm64/code-stubs-arm64.cc index c3c3367b10937d..0628a2c923f5db 100644 --- a/deps/v8/src/arm64/code-stubs-arm64.cc +++ b/deps/v8/src/arm64/code-stubs-arm64.cc @@ -38,32 +38,6 @@ void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) { __ TailCallRuntime(Runtime::kNewArray); } -void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm, - ExternalReference miss) { - // Update the static counter each time a new code stub is generated. - isolate()->counters()->code_stubs()->Increment(); - - CallInterfaceDescriptor descriptor = GetCallInterfaceDescriptor(); - int param_count = descriptor.GetRegisterParameterCount(); - { - // Call the runtime system in a fresh internal frame. - FrameScope scope(masm, StackFrame::INTERNAL); - DCHECK((param_count == 0) || - x0.Is(descriptor.GetRegisterParameter(param_count - 1))); - - // Push arguments - MacroAssembler::PushPopQueue queue(masm); - for (int i = 0; i < param_count; ++i) { - queue.Queue(descriptor.GetRegisterParameter(i)); - } - queue.PushQueued(); - - __ CallExternalReference(miss, param_count); - } - - __ Ret(); -} - void DoubleToIStub::Generate(MacroAssembler* masm) { Label done; @@ -147,8 +121,8 @@ void DoubleToIStub::Generate(MacroAssembler* masm) { // See call site for description. static void EmitIdenticalObjectComparison(MacroAssembler* masm, Register left, Register right, Register scratch, - FPRegister double_scratch, - Label* slow, Condition cond) { + VRegister double_scratch, Label* slow, + Condition cond) { DCHECK(!AreAliased(left, right, scratch)); Label not_identical, return_equal, heap_number; Register result = x0; @@ -292,12 +266,9 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, // See call site for description. -static void EmitSmiNonsmiComparison(MacroAssembler* masm, - Register left, - Register right, - FPRegister left_d, - FPRegister right_d, - Label* slow, +static void EmitSmiNonsmiComparison(MacroAssembler* masm, Register left, + Register right, VRegister left_d, + VRegister right_d, Label* slow, bool strict) { DCHECK(!AreAliased(left_d, right_d)); DCHECK((left.is(x0) && right.is(x1)) || @@ -476,8 +447,8 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) { // In case 3, we have found out that we were dealing with a number-number // comparison. The double values of the numbers have been loaded, right into // rhs_d, left into lhs_d. - FPRegister rhs_d = d0; - FPRegister lhs_d = d1; + VRegister rhs_d = d0; + VRegister lhs_d = d1; EmitSmiNonsmiComparison(masm, lhs, rhs, lhs_d, rhs_d, &slow, strict()); __ Bind(&both_loaded_as_doubles); @@ -613,7 +584,7 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) { void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { CPURegList saved_regs = kCallerSaved; - CPURegList saved_fp_regs = kCallerSavedFP; + CPURegList saved_fp_regs = kCallerSavedV; // We don't allow a GC during a store buffer overflow so there is no need to // store the registers in any particular way, but we do have to store and @@ -686,12 +657,12 @@ void MathPowStub::Generate(MacroAssembler* masm) { Register exponent_integer = MathPowIntegerDescriptor::exponent(); DCHECK(exponent_integer.is(x12)); Register saved_lr = x19; - FPRegister result_double = d0; - FPRegister base_double = d0; - FPRegister exponent_double = d1; - FPRegister base_double_copy = d2; - FPRegister scratch1_double = d6; - FPRegister scratch0_double = d7; + VRegister result_double = d0; + VRegister base_double = d0; + VRegister exponent_double = d1; + VRegister base_double_copy = d2; + VRegister scratch1_double = d6; + VRegister scratch0_double = d7; // A fast-path for integer exponents. Label exponent_is_smi, exponent_is_integer; @@ -803,14 +774,11 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) { // CEntryStub. CEntryStub::GenerateAheadOfTime(isolate); StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate); - StubFailureTrampolineStub::GenerateAheadOfTime(isolate); CommonArrayConstructorStub::GenerateStubsAheadOfTime(isolate); CreateAllocationSiteStub::GenerateAheadOfTime(isolate); CreateWeakCellStub::GenerateAheadOfTime(isolate); - BinaryOpICStub::GenerateAheadOfTime(isolate); StoreRegistersStateStub::GenerateAheadOfTime(isolate); RestoreRegistersStateStub::GenerateAheadOfTime(isolate); - BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate); StoreFastElementStub::GenerateAheadOfTime(isolate); } @@ -1046,15 +1014,15 @@ void CEntryStub::Generate(MacroAssembler* masm) { __ Bind(&exception_returned); ExternalReference pending_handler_context_address( - Isolate::kPendingHandlerContextAddress, isolate()); + IsolateAddressId::kPendingHandlerContextAddress, isolate()); ExternalReference pending_handler_code_address( - Isolate::kPendingHandlerCodeAddress, isolate()); + IsolateAddressId::kPendingHandlerCodeAddress, isolate()); ExternalReference pending_handler_offset_address( - Isolate::kPendingHandlerOffsetAddress, isolate()); + IsolateAddressId::kPendingHandlerOffsetAddress, isolate()); ExternalReference pending_handler_fp_address( - Isolate::kPendingHandlerFPAddress, isolate()); + IsolateAddressId::kPendingHandlerFPAddress, isolate()); ExternalReference pending_handler_sp_address( - Isolate::kPendingHandlerSPAddress, isolate()); + IsolateAddressId::kPendingHandlerSPAddress, isolate()); // Ask the runtime for help to determine the handler. This will set x0 to // contain the current pending exception, don't clobber it. @@ -1142,7 +1110,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) { int64_t bad_frame_pointer = -1L; // Bad frame pointer to fail if it is used. __ Mov(x13, bad_frame_pointer); __ Mov(x12, StackFrame::TypeToMarker(marker)); - __ Mov(x11, ExternalReference(Isolate::kCEntryFPAddress, isolate())); + __ Mov(x11, ExternalReference(IsolateAddressId::kCEntryFPAddress, isolate())); __ Ldr(x10, MemOperand(x11)); __ Push(x13, x12, xzr, x10); @@ -1152,7 +1120,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) { // Push the JS entry frame marker. Also set js_entry_sp if this is the // outermost JS call. Label non_outermost_js, done; - ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate()); + ExternalReference js_entry_sp(IsolateAddressId::kJSEntrySPAddress, isolate()); __ Mov(x10, ExternalReference(js_entry_sp)); __ Ldr(x11, MemOperand(x10)); __ Cbnz(x11, &non_outermost_js); @@ -1191,8 +1159,8 @@ void JSEntryStub::Generate(MacroAssembler* masm) { // field in the JSEnv and return a failure sentinel. Coming in here the // fp will be invalid because the PushTryHandler below sets it to 0 to // signal the existence of the JSEntry frame. - __ Mov(x10, Operand(ExternalReference(Isolate::kPendingExceptionAddress, - isolate()))); + __ Mov(x10, Operand(ExternalReference( + IsolateAddressId::kPendingExceptionAddress, isolate()))); } __ Str(code_entry, MemOperand(x10)); __ LoadRoot(x0, Heap::kExceptionRootIndex); @@ -1252,7 +1220,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) { // Restore the top frame descriptors from the stack. __ Pop(x10); - __ Mov(x11, ExternalReference(Isolate::kCEntryFPAddress, isolate())); + __ Mov(x11, ExternalReference(IsolateAddressId::kCEntryFPAddress, isolate())); __ Str(x10, MemOperand(x11)); // Reset the stack to the callee saved registers. @@ -1582,8 +1550,8 @@ void CompareICStub::GenerateNumbers(MacroAssembler* masm) { Register result = x0; Register rhs = x0; Register lhs = x1; - FPRegister rhs_d = d0; - FPRegister lhs_d = d1; + VRegister rhs_d = d0; + VRegister lhs_d = d1; if (left() == CompareICState::SMI) { __ JumpIfNotSmi(lhs, &miss); @@ -2009,32 +1977,6 @@ void StringHelper::GenerateOneByteCharsCompareLoop( } -void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- x1 : left - // -- x0 : right - // -- lr : return address - // ----------------------------------- - - // Load x2 with the allocation site. We stick an undefined dummy value here - // and replace it with the real allocation site later when we instantiate this - // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate(). - __ LoadObject(x2, handle(isolate()->heap()->undefined_value())); - - // Make sure that we actually patched the allocation site. - if (FLAG_debug_code) { - __ AssertNotSmi(x2, kExpectedAllocationSite); - __ Ldr(x10, FieldMemOperand(x2, HeapObject::kMapOffset)); - __ AssertRegisterIsRoot(x10, Heap::kAllocationSiteMapRootIndex, - kExpectedAllocationSite); - } - - // Tail call into the stub that handles binary operations with allocation - // sites. - BinaryOpWithAllocationSiteStub stub(isolate(), state()); - __ TailCallStub(&stub); -} - RecordWriteStub::RegisterAllocation::RegisterAllocation(Register object, Register address, Register scratch) @@ -2042,7 +1984,7 @@ RecordWriteStub::RegisterAllocation::RegisterAllocation(Register object, address_(address), scratch0_(scratch), saved_regs_(kCallerSaved), - saved_fp_regs_(kCallerSavedFP) { + saved_fp_regs_(kCallerSavedV) { DCHECK(!AreAliased(scratch, object, address)); // The SaveCallerSaveRegisters method needs to save caller-saved @@ -2131,10 +2073,11 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker( MacroAssembler* masm, OnNoNeedToInformIncrementalMarker on_no_need, Mode mode) { - Label on_black; Label need_incremental; Label need_incremental_pop_scratch; +#ifndef V8_CONCURRENT_MARKING + Label on_black; // If the object is not black we don't have to inform the incremental marker. __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black); @@ -2148,6 +2091,8 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker( } __ Bind(&on_black); +#endif + // Get the value from the slot. Register val = regs_.scratch0(); __ Ldr(val, MemOperand(regs_.address())); @@ -2225,26 +2170,25 @@ void RecordWriteStub::Generate(MacroAssembler* masm) { } -void StubFailureTrampolineStub::Generate(MacroAssembler* masm) { - CEntryStub ces(isolate(), 1, kSaveFPRegs); - __ Call(ces.GetCode(), RelocInfo::CODE_TARGET); - int parameter_count_offset = - StubFailureTrampolineFrameConstants::kArgumentsLengthOffset; - __ Ldr(x1, MemOperand(fp, parameter_count_offset)); - if (function_mode() == JS_FUNCTION_STUB_MODE) { - __ Add(x1, x1, 1); - } - masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE); - __ Drop(x1); - // Return to IC Miss stub, continuation still on stack. - __ Ret(); -} - // The entry hook is a "BumpSystemStackPointer" instruction (sub), followed by // a "Push lr" instruction, followed by a call. static const unsigned int kProfileEntryHookCallSize = Assembler::kCallSizeWithRelocation + (2 * kInstructionSize); +void ProfileEntryHookStub::MaybeCallEntryHookDelayed(TurboAssembler* tasm, + Zone* zone) { + if (tasm->isolate()->function_entry_hook() != NULL) { + Assembler::BlockConstPoolScope no_const_pools(tasm); + DontEmitDebugCodeScope no_debug_code(tasm); + Label entry_hook_call_start; + tasm->Bind(&entry_hook_call_start); + tasm->Push(lr); + tasm->CallStubDelayed(new (zone) ProfileEntryHookStub(nullptr)); + DCHECK(tasm->SizeOfCodeGeneratedSince(&entry_hook_call_start) == + kProfileEntryHookCallSize); + tasm->Pop(lr); + } +} void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) { if (masm->isolate()->function_entry_hook() != NULL) { @@ -2257,7 +2201,6 @@ void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) { __ CallStub(&stub); DCHECK(masm->SizeOfCodeGeneratedSince(&entry_hook_call_start) == kProfileEntryHookCallSize); - __ Pop(lr); } } @@ -2397,7 +2340,7 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm, __ PushCPURegList(spill_list); - __ Ldr(x0, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); + __ Ldr(x0, FieldMemOperand(receiver, JSObject::kPropertiesOrHashOffset)); __ Mov(x1, Operand(name)); NameDictionaryLookupStub stub(masm->isolate(), NEGATIVE_LOOKUP); __ CallStub(&stub); @@ -2543,23 +2486,12 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm, Register allocation_site = x2; Register kind = x3; - Label normal_sequence; - if (mode == DONT_OVERRIDE) { - STATIC_ASSERT(FAST_SMI_ELEMENTS == 0); - STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); - STATIC_ASSERT(FAST_ELEMENTS == 2); - STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3); - STATIC_ASSERT(FAST_DOUBLE_ELEMENTS == 4); - STATIC_ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5); - - // Is the low bit set? If so, the array is holey. - __ Tbnz(kind, 0, &normal_sequence); - } - - // Look at the last argument. - // TODO(jbramley): What does a 0 argument represent? - __ Peek(x10, 0); - __ Cbz(x10, &normal_sequence); + STATIC_ASSERT(PACKED_SMI_ELEMENTS == 0); + STATIC_ASSERT(HOLEY_SMI_ELEMENTS == 1); + STATIC_ASSERT(PACKED_ELEMENTS == 2); + STATIC_ASSERT(HOLEY_ELEMENTS == 3); + STATIC_ASSERT(PACKED_DOUBLE_ELEMENTS == 4); + STATIC_ASSERT(HOLEY_DOUBLE_ELEMENTS == 5); if (mode == DISABLE_ALLOCATION_SITES) { ElementsKind initial = GetInitialFastElementsKind(); @@ -2569,13 +2501,11 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm, holey_initial, DISABLE_ALLOCATION_SITES); __ TailCallStub(&stub_holey); - - __ Bind(&normal_sequence); - ArraySingleArgumentConstructorStub stub(masm->isolate(), - initial, - DISABLE_ALLOCATION_SITES); - __ TailCallStub(&stub); } else if (mode == DONT_OVERRIDE) { + // Is the low bit set? If so, the array is holey. + Label normal_sequence; + __ Tbnz(kind, 0, &normal_sequence); + // We are going to create a holey array, but our kind is non-holey. // Fix kind and retry (only if we have an allocation site in the slot). __ Orr(kind, kind, 1); @@ -2591,11 +2521,13 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm, // in the AllocationSite::transition_info field because elements kind is // restricted to a portion of the field; upper bits need to be left alone. STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0); - __ Ldr(x11, FieldMemOperand(allocation_site, - AllocationSite::kTransitionInfoOffset)); + __ Ldr(x11, + FieldMemOperand(allocation_site, + AllocationSite::kTransitionInfoOrBoilerplateOffset)); __ Add(x11, x11, Smi::FromInt(kFastElementsKindPackedToHoley)); - __ Str(x11, FieldMemOperand(allocation_site, - AllocationSite::kTransitionInfoOffset)); + __ Str(x11, + FieldMemOperand(allocation_site, + AllocationSite::kTransitionInfoOrBoilerplateOffset)); __ Bind(&normal_sequence); int last_index = @@ -2619,13 +2551,13 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm, template static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) { - int to_index = GetSequenceIndexFromFastElementsKind( - TERMINAL_FAST_ELEMENTS_KIND); + int to_index = + GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND); for (int i = 0; i <= to_index; ++i) { ElementsKind kind = GetFastElementsKindFromSequenceIndex(i); T stub(isolate, kind); stub.GetCode(); - if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) { + if (AllocationSite::ShouldTrack(kind)) { T stub1(isolate, kind, DISABLE_ALLOCATION_SITES); stub1.GetCode(); } @@ -2639,7 +2571,7 @@ void CommonArrayConstructorStub::GenerateStubsAheadOfTime(Isolate* isolate) { isolate); ArrayNArgumentsConstructorStub stub(isolate); stub.GetCode(); - ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS }; + ElementsKind kinds[2] = {PACKED_ELEMENTS, HOLEY_ELEMENTS}; for (int i = 0; i < 2; i++) { // For internal arrays we only need a few things InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]); @@ -2718,9 +2650,9 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) { // Get the elements kind and case on that. __ JumpIfRoot(allocation_site, Heap::kUndefinedValueRootIndex, &no_info); - __ Ldrsw(kind, - UntagSmiFieldMemOperand(allocation_site, - AllocationSite::kTransitionInfoOffset)); + __ Ldrsw(kind, UntagSmiFieldMemOperand( + allocation_site, + AllocationSite::kTransitionInfoOrBoilerplateOffset)); __ And(kind, kind, AllocationSite::ElementsKindBits::kMask); GenerateDispatchToArrayStub(masm, DONT_OVERRIDE); @@ -2809,17 +2741,17 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) { if (FLAG_debug_code) { Label done; - __ Cmp(x3, FAST_ELEMENTS); - __ Ccmp(x3, FAST_HOLEY_ELEMENTS, ZFlag, ne); + __ Cmp(x3, PACKED_ELEMENTS); + __ Ccmp(x3, HOLEY_ELEMENTS, ZFlag, ne); __ Assert(eq, kInvalidElementsKindForInternalArrayOrInternalPackedArray); } Label fast_elements_case; - __ CompareAndBranch(kind, FAST_ELEMENTS, eq, &fast_elements_case); - GenerateCase(masm, FAST_HOLEY_ELEMENTS); + __ CompareAndBranch(kind, PACKED_ELEMENTS, eq, &fast_elements_case); + GenerateCase(masm, HOLEY_ELEMENTS); __ Bind(&fast_elements_case); - GenerateCase(masm, FAST_ELEMENTS); + GenerateCase(masm, PACKED_ELEMENTS); } // The number of register that CallApiFunctionAndReturn will need to save on diff --git a/deps/v8/src/arm64/constants-arm64.h b/deps/v8/src/arm64/constants-arm64.h index ddaa30e98453fe..dc2e55cf825e35 100644 --- a/deps/v8/src/arm64/constants-arm64.h +++ b/deps/v8/src/arm64/constants-arm64.h @@ -33,13 +33,13 @@ const unsigned kLoadLiteralScaleLog2 = 2; const unsigned kMaxLoadLiteralRange = 1 * MB; const int kNumberOfRegisters = 32; -const int kNumberOfFPRegisters = 32; +const int kNumberOfVRegisters = 32; // Callee saved registers are x19-x30(lr). const int kNumberOfCalleeSavedRegisters = 11; const int kFirstCalleeSavedRegisterIndex = 19; // Callee saved FP registers are d8-d15. -const int kNumberOfCalleeSavedFPRegisters = 8; -const int kFirstCalleeSavedFPRegisterIndex = 8; +const int kNumberOfCalleeSavedVRegisters = 8; +const int kFirstCalleeSavedVRegisterIndex = 8; // Callee saved registers with no specific purpose in JS are x19-x25. const unsigned kJSCalleeSavedRegList = 0x03f80000; const int kWRegSizeInBits = 32; @@ -58,6 +58,17 @@ const int kDRegSizeInBits = 64; const int kDRegSizeInBitsLog2 = 6; const int kDRegSize = kDRegSizeInBits >> 3; const int kDRegSizeLog2 = kDRegSizeInBitsLog2 - 3; +const int kDRegSizeInBytesLog2 = kDRegSizeInBitsLog2 - 3; +const int kBRegSizeInBits = 8; +const int kBRegSize = kBRegSizeInBits >> 3; +const int kHRegSizeInBits = 16; +const int kHRegSize = kHRegSizeInBits >> 3; +const int kQRegSizeInBits = 128; +const int kQRegSizeInBitsLog2 = 7; +const int kQRegSize = kQRegSizeInBits >> 3; +const int kQRegSizeLog2 = kQRegSizeInBitsLog2 - 3; +const int kVRegSizeInBits = kQRegSizeInBits; +const int kVRegSize = kVRegSizeInBits >> 3; const int64_t kWRegMask = 0x00000000ffffffffL; const int64_t kXRegMask = 0xffffffffffffffffL; const int64_t kSRegMask = 0x00000000ffffffffL; @@ -110,12 +121,27 @@ const unsigned kDoubleWordSize = 64; const unsigned kDoubleWordSizeInBytes = kDoubleWordSize >> 3; const unsigned kQuadWordSize = 128; const unsigned kQuadWordSizeInBytes = kQuadWordSize >> 3; +const int kMaxLanesPerVector = 16; + +const unsigned kAddressTagOffset = 56; +const unsigned kAddressTagWidth = 8; +const uint64_t kAddressTagMask = ((UINT64_C(1) << kAddressTagWidth) - 1) + << kAddressTagOffset; +static_assert(kAddressTagMask == UINT64_C(0xff00000000000000), + "AddressTagMask must represent most-significant eight bits."); + // AArch64 floating-point specifics. These match IEEE-754. const unsigned kDoubleMantissaBits = 52; const unsigned kDoubleExponentBits = 11; const unsigned kDoubleExponentBias = 1023; const unsigned kFloatMantissaBits = 23; const unsigned kFloatExponentBits = 8; +const unsigned kFloatExponentBias = 127; +const unsigned kFloat16MantissaBits = 10; +const unsigned kFloat16ExponentBits = 5; +const unsigned kFloat16ExponentBias = 15; + +typedef uint16_t float16; #define INSTRUCTION_FIELDS_LIST(V_) \ /* Register fields */ \ @@ -126,7 +152,7 @@ const unsigned kFloatExponentBits = 8; V_(Rt, 4, 0, Bits) /* Load dest / store source. */ \ V_(Rt2, 14, 10, Bits) /* Load second dest / */ \ /* store second source. */ \ - V_(Rs, 20, 16, Bits) /* Store-exclusive status */ \ + V_(Rs, 20, 16, Bits) /* Store-exclusive status */ \ V_(PrefetchMode, 4, 0, Bits) \ \ /* Common bits */ \ @@ -181,8 +207,22 @@ const unsigned kFloatExponentBits = 8; V_(ImmLS, 20, 12, SignedBits) \ V_(ImmLSUnsigned, 21, 10, Bits) \ V_(ImmLSPair, 21, 15, SignedBits) \ - V_(SizeLS, 31, 30, Bits) \ V_(ImmShiftLS, 12, 12, Bits) \ + V_(LSOpc, 23, 22, Bits) \ + V_(LSVector, 26, 26, Bits) \ + V_(LSSize, 31, 30, Bits) \ + \ + /* NEON generic fields */ \ + V_(NEONQ, 30, 30, Bits) \ + V_(NEONSize, 23, 22, Bits) \ + V_(NEONLSSize, 11, 10, Bits) \ + V_(NEONS, 12, 12, Bits) \ + V_(NEONL, 21, 21, Bits) \ + V_(NEONM, 20, 20, Bits) \ + V_(NEONH, 11, 11, Bits) \ + V_(ImmNEONExt, 14, 11, Bits) \ + V_(ImmNEON5, 20, 16, Bits) \ + V_(ImmNEON4, 14, 11, Bits) \ \ /* Other immediates */ \ V_(ImmUncondBranch, 25, 0, SignedBits) \ @@ -206,7 +246,21 @@ const unsigned kFloatExponentBits = 8; V_(LoadStoreXNotExclusive, 23, 23, Bits) \ V_(LoadStoreXAcquireRelease, 15, 15, Bits) \ V_(LoadStoreXSizeLog2, 31, 30, Bits) \ - V_(LoadStoreXPair, 21, 21, Bits) + V_(LoadStoreXPair, 21, 21, Bits) \ + \ + /* NEON load/store */ \ + V_(NEONLoad, 22, 22, Bits) \ + \ + /* NEON Modified Immediate fields */ \ + V_(ImmNEONabc, 18, 16, Bits) \ + V_(ImmNEONdefgh, 9, 5, Bits) \ + V_(NEONModImmOp, 29, 29, Bits) \ + V_(NEONCmode, 15, 12, Bits) \ + \ + /* NEON Shift Immediate fields */ \ + V_(ImmNEONImmhImmb, 22, 16, Bits) \ + V_(ImmNEONImmh, 22, 19, Bits) \ + V_(ImmNEONImmb, 18, 16, Bits) #define SYSTEM_REGISTER_FIELDS_LIST(V_, M_) \ /* NZCV */ \ @@ -297,7 +351,6 @@ inline Condition CommuteCondition(Condition cond) { // invalid as it doesn't necessary make sense to reverse it (consider // 'mi' for instance). UNREACHABLE(); - return nv; } } @@ -338,7 +391,8 @@ enum Shift { LSL = 0x0, LSR = 0x1, ASR = 0x2, - ROR = 0x3 + ROR = 0x3, + MSL = 0x4 }; enum Extend { @@ -411,6 +465,10 @@ enum SystemRegister { // default: printf("Unknown instruction\n"); // } +// Used to corrupt encodings by setting all bits when orred. Although currently +// unallocated in AArch64, this encoding is not guaranteed to be undefined +// indefinitely. +const uint32_t kUnallocatedInstruction = 0xffffffff; // Generic fields. enum GenericInstrField { @@ -420,6 +478,47 @@ enum GenericInstrField { FP64 = 0x00400000 }; +enum NEONFormatField { + NEONFormatFieldMask = 0x40C00000, + NEON_Q = 0x40000000, + NEON_8B = 0x00000000, + NEON_16B = NEON_8B | NEON_Q, + NEON_4H = 0x00400000, + NEON_8H = NEON_4H | NEON_Q, + NEON_2S = 0x00800000, + NEON_4S = NEON_2S | NEON_Q, + NEON_1D = 0x00C00000, + NEON_2D = 0x00C00000 | NEON_Q +}; + +enum NEONFPFormatField { + NEONFPFormatFieldMask = 0x40400000, + NEON_FP_2S = FP32, + NEON_FP_4S = FP32 | NEON_Q, + NEON_FP_2D = FP64 | NEON_Q +}; + +enum NEONLSFormatField { + NEONLSFormatFieldMask = 0x40000C00, + LS_NEON_8B = 0x00000000, + LS_NEON_16B = LS_NEON_8B | NEON_Q, + LS_NEON_4H = 0x00000400, + LS_NEON_8H = LS_NEON_4H | NEON_Q, + LS_NEON_2S = 0x00000800, + LS_NEON_4S = LS_NEON_2S | NEON_Q, + LS_NEON_1D = 0x00000C00, + LS_NEON_2D = LS_NEON_1D | NEON_Q +}; + +enum NEONScalarFormatField { + NEONScalarFormatFieldMask = 0x00C00000, + NEONScalar = 0x10000000, + NEON_B = 0x00000000, + NEON_H = 0x00400000, + NEON_S = 0x00800000, + NEON_D = 0x00C00000 +}; + // PC relative addressing. enum PCRelAddressingOp { PCRelAddressingFixed = 0x10000000, @@ -713,16 +812,12 @@ enum LoadStorePairAnyOp { LoadStorePairAnyFixed = 0x28000000 }; -#define LOAD_STORE_PAIR_OP_LIST(V) \ - V(STP, w, 0x00000000), \ - V(LDP, w, 0x00400000), \ - V(LDPSW, x, 0x40400000), \ - V(STP, x, 0x80000000), \ - V(LDP, x, 0x80400000), \ - V(STP, s, 0x04000000), \ - V(LDP, s, 0x04400000), \ - V(STP, d, 0x44000000), \ - V(LDP, d, 0x44400000) +#define LOAD_STORE_PAIR_OP_LIST(V) \ + V(STP, w, 0x00000000) \ + , V(LDP, w, 0x00400000), V(LDPSW, x, 0x40400000), V(STP, x, 0x80000000), \ + V(LDP, x, 0x80400000), V(STP, s, 0x04000000), V(LDP, s, 0x04400000), \ + V(STP, d, 0x44000000), V(LDP, d, 0x44400000), V(STP, q, 0x84000000), \ + V(LDP, q, 0x84400000) // Load/store pair (post, pre and offset.) enum LoadStorePairOp { @@ -777,25 +872,34 @@ enum LoadLiteralOp { LDR_d_lit = LoadLiteralFixed | 0x44000000 }; -#define LOAD_STORE_OP_LIST(V) \ - V(ST, RB, w, 0x00000000), \ - V(ST, RH, w, 0x40000000), \ - V(ST, R, w, 0x80000000), \ - V(ST, R, x, 0xC0000000), \ - V(LD, RB, w, 0x00400000), \ - V(LD, RH, w, 0x40400000), \ - V(LD, R, w, 0x80400000), \ - V(LD, R, x, 0xC0400000), \ - V(LD, RSB, x, 0x00800000), \ - V(LD, RSH, x, 0x40800000), \ - V(LD, RSW, x, 0x80800000), \ - V(LD, RSB, w, 0x00C00000), \ - V(LD, RSH, w, 0x40C00000), \ - V(ST, R, s, 0x84000000), \ - V(ST, R, d, 0xC4000000), \ - V(LD, R, s, 0x84400000), \ - V(LD, R, d, 0xC4400000) - +// clang-format off + +#define LOAD_STORE_OP_LIST(V) \ + V(ST, RB, w, 0x00000000), \ + V(ST, RH, w, 0x40000000), \ + V(ST, R, w, 0x80000000), \ + V(ST, R, x, 0xC0000000), \ + V(LD, RB, w, 0x00400000), \ + V(LD, RH, w, 0x40400000), \ + V(LD, R, w, 0x80400000), \ + V(LD, R, x, 0xC0400000), \ + V(LD, RSB, x, 0x00800000), \ + V(LD, RSH, x, 0x40800000), \ + V(LD, RSW, x, 0x80800000), \ + V(LD, RSB, w, 0x00C00000), \ + V(LD, RSH, w, 0x40C00000), \ + V(ST, R, b, 0x04000000), \ + V(ST, R, h, 0x44000000), \ + V(ST, R, s, 0x84000000), \ + V(ST, R, d, 0xC4000000), \ + V(ST, R, q, 0x04800000), \ + V(LD, R, b, 0x04400000), \ + V(LD, R, h, 0x44400000), \ + V(LD, R, s, 0x84400000), \ + V(LD, R, d, 0xC4400000), \ + V(LD, R, q, 0x04C00000) + +// clang-format on // Load/store unscaled offset. enum LoadStoreUnscaledOffsetOp { @@ -810,11 +914,10 @@ enum LoadStoreUnscaledOffsetOp { // Load/store (post, pre, offset and unsigned.) enum LoadStoreOp { - LoadStoreOpMask = 0xC4C00000, - #define LOAD_STORE(A, B, C, D) \ - A##B##_##C = D + LoadStoreMask = 0xC4C00000, +#define LOAD_STORE(A, B, C, D) A##B##_##C = D LOAD_STORE_OP_LIST(LOAD_STORE), - #undef LOAD_STORE +#undef LOAD_STORE PRFM = 0xC0800000 }; @@ -1063,42 +1166,46 @@ enum FPImmediateOp { enum FPDataProcessing1SourceOp { FPDataProcessing1SourceFixed = 0x1E204000, FPDataProcessing1SourceFMask = 0x5F207C00, - FPDataProcessing1SourceMask = 0xFFFFFC00, - FMOV_s = FPDataProcessing1SourceFixed | 0x00000000, - FMOV_d = FPDataProcessing1SourceFixed | FP64 | 0x00000000, - FMOV = FMOV_s, - FABS_s = FPDataProcessing1SourceFixed | 0x00008000, - FABS_d = FPDataProcessing1SourceFixed | FP64 | 0x00008000, - FABS = FABS_s, - FNEG_s = FPDataProcessing1SourceFixed | 0x00010000, - FNEG_d = FPDataProcessing1SourceFixed | FP64 | 0x00010000, - FNEG = FNEG_s, - FSQRT_s = FPDataProcessing1SourceFixed | 0x00018000, - FSQRT_d = FPDataProcessing1SourceFixed | FP64 | 0x00018000, - FSQRT = FSQRT_s, - FCVT_ds = FPDataProcessing1SourceFixed | 0x00028000, - FCVT_sd = FPDataProcessing1SourceFixed | FP64 | 0x00020000, + FPDataProcessing1SourceMask = 0xFFFFFC00, + FMOV_s = FPDataProcessing1SourceFixed | 0x00000000, + FMOV_d = FPDataProcessing1SourceFixed | FP64 | 0x00000000, + FMOV = FMOV_s, + FABS_s = FPDataProcessing1SourceFixed | 0x00008000, + FABS_d = FPDataProcessing1SourceFixed | FP64 | 0x00008000, + FABS = FABS_s, + FNEG_s = FPDataProcessing1SourceFixed | 0x00010000, + FNEG_d = FPDataProcessing1SourceFixed | FP64 | 0x00010000, + FNEG = FNEG_s, + FSQRT_s = FPDataProcessing1SourceFixed | 0x00018000, + FSQRT_d = FPDataProcessing1SourceFixed | FP64 | 0x00018000, + FSQRT = FSQRT_s, + FCVT_ds = FPDataProcessing1SourceFixed | 0x00028000, + FCVT_sd = FPDataProcessing1SourceFixed | FP64 | 0x00020000, + FCVT_hs = FPDataProcessing1SourceFixed | 0x00038000, + FCVT_hd = FPDataProcessing1SourceFixed | FP64 | 0x00038000, + FCVT_sh = FPDataProcessing1SourceFixed | 0x00C20000, + FCVT_dh = FPDataProcessing1SourceFixed | 0x00C28000, FRINTN_s = FPDataProcessing1SourceFixed | 0x00040000, FRINTN_d = FPDataProcessing1SourceFixed | FP64 | 0x00040000, - FRINTN = FRINTN_s, + FRINTN = FRINTN_s, FRINTP_s = FPDataProcessing1SourceFixed | 0x00048000, FRINTP_d = FPDataProcessing1SourceFixed | FP64 | 0x00048000, - FRINTP = FRINTP_s, + FRINTP = FRINTP_s, FRINTM_s = FPDataProcessing1SourceFixed | 0x00050000, FRINTM_d = FPDataProcessing1SourceFixed | FP64 | 0x00050000, - FRINTM = FRINTM_s, + FRINTM = FRINTM_s, FRINTZ_s = FPDataProcessing1SourceFixed | 0x00058000, FRINTZ_d = FPDataProcessing1SourceFixed | FP64 | 0x00058000, - FRINTZ = FRINTZ_s, + FRINTZ = FRINTZ_s, FRINTA_s = FPDataProcessing1SourceFixed | 0x00060000, FRINTA_d = FPDataProcessing1SourceFixed | FP64 | 0x00060000, - FRINTA = FRINTA_s, + FRINTA = FRINTA_s, FRINTX_s = FPDataProcessing1SourceFixed | 0x00070000, FRINTX_d = FPDataProcessing1SourceFixed | FP64 | 0x00070000, - FRINTX = FRINTX_s, + FRINTX = FRINTX_s, FRINTI_s = FPDataProcessing1SourceFixed | 0x00078000, FRINTI_d = FPDataProcessing1SourceFixed | FP64 | 0x00078000, - FRINTI = FRINTI_s + FRINTI = FRINTI_s }; // Floating point data processing 2 source. @@ -1154,71 +1261,73 @@ enum FPDataProcessing3SourceOp { enum FPIntegerConvertOp { FPIntegerConvertFixed = 0x1E200000, FPIntegerConvertFMask = 0x5F20FC00, - FPIntegerConvertMask = 0xFFFFFC00, - FCVTNS = FPIntegerConvertFixed | 0x00000000, + FPIntegerConvertMask = 0xFFFFFC00, + FCVTNS = FPIntegerConvertFixed | 0x00000000, FCVTNS_ws = FCVTNS, FCVTNS_xs = FCVTNS | SixtyFourBits, FCVTNS_wd = FCVTNS | FP64, FCVTNS_xd = FCVTNS | SixtyFourBits | FP64, - FCVTNU = FPIntegerConvertFixed | 0x00010000, + FCVTNU = FPIntegerConvertFixed | 0x00010000, FCVTNU_ws = FCVTNU, FCVTNU_xs = FCVTNU | SixtyFourBits, FCVTNU_wd = FCVTNU | FP64, FCVTNU_xd = FCVTNU | SixtyFourBits | FP64, - FCVTPS = FPIntegerConvertFixed | 0x00080000, + FCVTPS = FPIntegerConvertFixed | 0x00080000, FCVTPS_ws = FCVTPS, FCVTPS_xs = FCVTPS | SixtyFourBits, FCVTPS_wd = FCVTPS | FP64, FCVTPS_xd = FCVTPS | SixtyFourBits | FP64, - FCVTPU = FPIntegerConvertFixed | 0x00090000, + FCVTPU = FPIntegerConvertFixed | 0x00090000, FCVTPU_ws = FCVTPU, FCVTPU_xs = FCVTPU | SixtyFourBits, FCVTPU_wd = FCVTPU | FP64, FCVTPU_xd = FCVTPU | SixtyFourBits | FP64, - FCVTMS = FPIntegerConvertFixed | 0x00100000, + FCVTMS = FPIntegerConvertFixed | 0x00100000, FCVTMS_ws = FCVTMS, FCVTMS_xs = FCVTMS | SixtyFourBits, FCVTMS_wd = FCVTMS | FP64, FCVTMS_xd = FCVTMS | SixtyFourBits | FP64, - FCVTMU = FPIntegerConvertFixed | 0x00110000, + FCVTMU = FPIntegerConvertFixed | 0x00110000, FCVTMU_ws = FCVTMU, FCVTMU_xs = FCVTMU | SixtyFourBits, FCVTMU_wd = FCVTMU | FP64, FCVTMU_xd = FCVTMU | SixtyFourBits | FP64, - FCVTZS = FPIntegerConvertFixed | 0x00180000, + FCVTZS = FPIntegerConvertFixed | 0x00180000, FCVTZS_ws = FCVTZS, FCVTZS_xs = FCVTZS | SixtyFourBits, FCVTZS_wd = FCVTZS | FP64, FCVTZS_xd = FCVTZS | SixtyFourBits | FP64, - FCVTZU = FPIntegerConvertFixed | 0x00190000, + FCVTZU = FPIntegerConvertFixed | 0x00190000, FCVTZU_ws = FCVTZU, FCVTZU_xs = FCVTZU | SixtyFourBits, FCVTZU_wd = FCVTZU | FP64, FCVTZU_xd = FCVTZU | SixtyFourBits | FP64, - SCVTF = FPIntegerConvertFixed | 0x00020000, - SCVTF_sw = SCVTF, - SCVTF_sx = SCVTF | SixtyFourBits, - SCVTF_dw = SCVTF | FP64, - SCVTF_dx = SCVTF | SixtyFourBits | FP64, - UCVTF = FPIntegerConvertFixed | 0x00030000, - UCVTF_sw = UCVTF, - UCVTF_sx = UCVTF | SixtyFourBits, - UCVTF_dw = UCVTF | FP64, - UCVTF_dx = UCVTF | SixtyFourBits | FP64, - FCVTAS = FPIntegerConvertFixed | 0x00040000, + SCVTF = FPIntegerConvertFixed | 0x00020000, + SCVTF_sw = SCVTF, + SCVTF_sx = SCVTF | SixtyFourBits, + SCVTF_dw = SCVTF | FP64, + SCVTF_dx = SCVTF | SixtyFourBits | FP64, + UCVTF = FPIntegerConvertFixed | 0x00030000, + UCVTF_sw = UCVTF, + UCVTF_sx = UCVTF | SixtyFourBits, + UCVTF_dw = UCVTF | FP64, + UCVTF_dx = UCVTF | SixtyFourBits | FP64, + FCVTAS = FPIntegerConvertFixed | 0x00040000, FCVTAS_ws = FCVTAS, FCVTAS_xs = FCVTAS | SixtyFourBits, FCVTAS_wd = FCVTAS | FP64, FCVTAS_xd = FCVTAS | SixtyFourBits | FP64, - FCVTAU = FPIntegerConvertFixed | 0x00050000, + FCVTAU = FPIntegerConvertFixed | 0x00050000, FCVTAU_ws = FCVTAU, FCVTAU_xs = FCVTAU | SixtyFourBits, FCVTAU_wd = FCVTAU | FP64, FCVTAU_xd = FCVTAU | SixtyFourBits | FP64, - FMOV_ws = FPIntegerConvertFixed | 0x00060000, - FMOV_sw = FPIntegerConvertFixed | 0x00070000, - FMOV_xd = FMOV_ws | SixtyFourBits | FP64, - FMOV_dx = FMOV_sw | SixtyFourBits | FP64 + FMOV_ws = FPIntegerConvertFixed | 0x00060000, + FMOV_sw = FPIntegerConvertFixed | 0x00070000, + FMOV_xd = FMOV_ws | SixtyFourBits | FP64, + FMOV_dx = FMOV_sw | SixtyFourBits | FP64, + FMOV_d1_x = FPIntegerConvertFixed | SixtyFourBits | 0x008F0000, + FMOV_x_d1 = FPIntegerConvertFixed | SixtyFourBits | 0x008E0000 }; // Conversion between fixed point and floating point. @@ -1248,6 +1357,757 @@ enum FPFixedPointConvertOp { UCVTF_dx_fixed = UCVTF_fixed | SixtyFourBits | FP64 }; +// NEON instructions with two register operands. +enum NEON2RegMiscOp { + NEON2RegMiscFixed = 0x0E200800, + NEON2RegMiscFMask = 0x9F3E0C00, + NEON2RegMiscMask = 0xBF3FFC00, + NEON2RegMiscUBit = 0x20000000, + NEON_REV64 = NEON2RegMiscFixed | 0x00000000, + NEON_REV32 = NEON2RegMiscFixed | 0x20000000, + NEON_REV16 = NEON2RegMiscFixed | 0x00001000, + NEON_SADDLP = NEON2RegMiscFixed | 0x00002000, + NEON_UADDLP = NEON_SADDLP | NEON2RegMiscUBit, + NEON_SUQADD = NEON2RegMiscFixed | 0x00003000, + NEON_USQADD = NEON_SUQADD | NEON2RegMiscUBit, + NEON_CLS = NEON2RegMiscFixed | 0x00004000, + NEON_CLZ = NEON2RegMiscFixed | 0x20004000, + NEON_CNT = NEON2RegMiscFixed | 0x00005000, + NEON_RBIT_NOT = NEON2RegMiscFixed | 0x20005000, + NEON_SADALP = NEON2RegMiscFixed | 0x00006000, + NEON_UADALP = NEON_SADALP | NEON2RegMiscUBit, + NEON_SQABS = NEON2RegMiscFixed | 0x00007000, + NEON_SQNEG = NEON2RegMiscFixed | 0x20007000, + NEON_CMGT_zero = NEON2RegMiscFixed | 0x00008000, + NEON_CMGE_zero = NEON2RegMiscFixed | 0x20008000, + NEON_CMEQ_zero = NEON2RegMiscFixed | 0x00009000, + NEON_CMLE_zero = NEON2RegMiscFixed | 0x20009000, + NEON_CMLT_zero = NEON2RegMiscFixed | 0x0000A000, + NEON_ABS = NEON2RegMiscFixed | 0x0000B000, + NEON_NEG = NEON2RegMiscFixed | 0x2000B000, + NEON_XTN = NEON2RegMiscFixed | 0x00012000, + NEON_SQXTUN = NEON2RegMiscFixed | 0x20012000, + NEON_SHLL = NEON2RegMiscFixed | 0x20013000, + NEON_SQXTN = NEON2RegMiscFixed | 0x00014000, + NEON_UQXTN = NEON_SQXTN | NEON2RegMiscUBit, + + NEON2RegMiscOpcode = 0x0001F000, + NEON_RBIT_NOT_opcode = NEON_RBIT_NOT & NEON2RegMiscOpcode, + NEON_NEG_opcode = NEON_NEG & NEON2RegMiscOpcode, + NEON_XTN_opcode = NEON_XTN & NEON2RegMiscOpcode, + NEON_UQXTN_opcode = NEON_UQXTN & NEON2RegMiscOpcode, + + // These instructions use only one bit of the size field. The other bit is + // used to distinguish between instructions. + NEON2RegMiscFPMask = NEON2RegMiscMask | 0x00800000, + NEON_FABS = NEON2RegMiscFixed | 0x0080F000, + NEON_FNEG = NEON2RegMiscFixed | 0x2080F000, + NEON_FCVTN = NEON2RegMiscFixed | 0x00016000, + NEON_FCVTXN = NEON2RegMiscFixed | 0x20016000, + NEON_FCVTL = NEON2RegMiscFixed | 0x00017000, + NEON_FRINTN = NEON2RegMiscFixed | 0x00018000, + NEON_FRINTA = NEON2RegMiscFixed | 0x20018000, + NEON_FRINTP = NEON2RegMiscFixed | 0x00818000, + NEON_FRINTM = NEON2RegMiscFixed | 0x00019000, + NEON_FRINTX = NEON2RegMiscFixed | 0x20019000, + NEON_FRINTZ = NEON2RegMiscFixed | 0x00819000, + NEON_FRINTI = NEON2RegMiscFixed | 0x20819000, + NEON_FCVTNS = NEON2RegMiscFixed | 0x0001A000, + NEON_FCVTNU = NEON_FCVTNS | NEON2RegMiscUBit, + NEON_FCVTPS = NEON2RegMiscFixed | 0x0081A000, + NEON_FCVTPU = NEON_FCVTPS | NEON2RegMiscUBit, + NEON_FCVTMS = NEON2RegMiscFixed | 0x0001B000, + NEON_FCVTMU = NEON_FCVTMS | NEON2RegMiscUBit, + NEON_FCVTZS = NEON2RegMiscFixed | 0x0081B000, + NEON_FCVTZU = NEON_FCVTZS | NEON2RegMiscUBit, + NEON_FCVTAS = NEON2RegMiscFixed | 0x0001C000, + NEON_FCVTAU = NEON_FCVTAS | NEON2RegMiscUBit, + NEON_FSQRT = NEON2RegMiscFixed | 0x2081F000, + NEON_SCVTF = NEON2RegMiscFixed | 0x0001D000, + NEON_UCVTF = NEON_SCVTF | NEON2RegMiscUBit, + NEON_URSQRTE = NEON2RegMiscFixed | 0x2081C000, + NEON_URECPE = NEON2RegMiscFixed | 0x0081C000, + NEON_FRSQRTE = NEON2RegMiscFixed | 0x2081D000, + NEON_FRECPE = NEON2RegMiscFixed | 0x0081D000, + NEON_FCMGT_zero = NEON2RegMiscFixed | 0x0080C000, + NEON_FCMGE_zero = NEON2RegMiscFixed | 0x2080C000, + NEON_FCMEQ_zero = NEON2RegMiscFixed | 0x0080D000, + NEON_FCMLE_zero = NEON2RegMiscFixed | 0x2080D000, + NEON_FCMLT_zero = NEON2RegMiscFixed | 0x0080E000, + + NEON_FCVTL_opcode = NEON_FCVTL & NEON2RegMiscOpcode, + NEON_FCVTN_opcode = NEON_FCVTN & NEON2RegMiscOpcode +}; + +// NEON instructions with three same-type operands. +enum NEON3SameOp { + NEON3SameFixed = 0x0E200400, + NEON3SameFMask = 0x9F200400, + NEON3SameMask = 0xBF20FC00, + NEON3SameUBit = 0x20000000, + NEON_ADD = NEON3SameFixed | 0x00008000, + NEON_ADDP = NEON3SameFixed | 0x0000B800, + NEON_SHADD = NEON3SameFixed | 0x00000000, + NEON_SHSUB = NEON3SameFixed | 0x00002000, + NEON_SRHADD = NEON3SameFixed | 0x00001000, + NEON_CMEQ = NEON3SameFixed | NEON3SameUBit | 0x00008800, + NEON_CMGE = NEON3SameFixed | 0x00003800, + NEON_CMGT = NEON3SameFixed | 0x00003000, + NEON_CMHI = NEON3SameFixed | NEON3SameUBit | NEON_CMGT, + NEON_CMHS = NEON3SameFixed | NEON3SameUBit | NEON_CMGE, + NEON_CMTST = NEON3SameFixed | 0x00008800, + NEON_MLA = NEON3SameFixed | 0x00009000, + NEON_MLS = NEON3SameFixed | 0x20009000, + NEON_MUL = NEON3SameFixed | 0x00009800, + NEON_PMUL = NEON3SameFixed | 0x20009800, + NEON_SRSHL = NEON3SameFixed | 0x00005000, + NEON_SQSHL = NEON3SameFixed | 0x00004800, + NEON_SQRSHL = NEON3SameFixed | 0x00005800, + NEON_SSHL = NEON3SameFixed | 0x00004000, + NEON_SMAX = NEON3SameFixed | 0x00006000, + NEON_SMAXP = NEON3SameFixed | 0x0000A000, + NEON_SMIN = NEON3SameFixed | 0x00006800, + NEON_SMINP = NEON3SameFixed | 0x0000A800, + NEON_SABD = NEON3SameFixed | 0x00007000, + NEON_SABA = NEON3SameFixed | 0x00007800, + NEON_UABD = NEON3SameFixed | NEON3SameUBit | NEON_SABD, + NEON_UABA = NEON3SameFixed | NEON3SameUBit | NEON_SABA, + NEON_SQADD = NEON3SameFixed | 0x00000800, + NEON_SQSUB = NEON3SameFixed | 0x00002800, + NEON_SUB = NEON3SameFixed | NEON3SameUBit | 0x00008000, + NEON_UHADD = NEON3SameFixed | NEON3SameUBit | NEON_SHADD, + NEON_UHSUB = NEON3SameFixed | NEON3SameUBit | NEON_SHSUB, + NEON_URHADD = NEON3SameFixed | NEON3SameUBit | NEON_SRHADD, + NEON_UMAX = NEON3SameFixed | NEON3SameUBit | NEON_SMAX, + NEON_UMAXP = NEON3SameFixed | NEON3SameUBit | NEON_SMAXP, + NEON_UMIN = NEON3SameFixed | NEON3SameUBit | NEON_SMIN, + NEON_UMINP = NEON3SameFixed | NEON3SameUBit | NEON_SMINP, + NEON_URSHL = NEON3SameFixed | NEON3SameUBit | NEON_SRSHL, + NEON_UQADD = NEON3SameFixed | NEON3SameUBit | NEON_SQADD, + NEON_UQRSHL = NEON3SameFixed | NEON3SameUBit | NEON_SQRSHL, + NEON_UQSHL = NEON3SameFixed | NEON3SameUBit | NEON_SQSHL, + NEON_UQSUB = NEON3SameFixed | NEON3SameUBit | NEON_SQSUB, + NEON_USHL = NEON3SameFixed | NEON3SameUBit | NEON_SSHL, + NEON_SQDMULH = NEON3SameFixed | 0x0000B000, + NEON_SQRDMULH = NEON3SameFixed | 0x2000B000, + + // NEON floating point instructions with three same-type operands. + NEON3SameFPFixed = NEON3SameFixed | 0x0000C000, + NEON3SameFPFMask = NEON3SameFMask | 0x0000C000, + NEON3SameFPMask = NEON3SameMask | 0x00800000, + NEON_FADD = NEON3SameFixed | 0x0000D000, + NEON_FSUB = NEON3SameFixed | 0x0080D000, + NEON_FMUL = NEON3SameFixed | 0x2000D800, + NEON_FDIV = NEON3SameFixed | 0x2000F800, + NEON_FMAX = NEON3SameFixed | 0x0000F000, + NEON_FMAXNM = NEON3SameFixed | 0x0000C000, + NEON_FMAXP = NEON3SameFixed | 0x2000F000, + NEON_FMAXNMP = NEON3SameFixed | 0x2000C000, + NEON_FMIN = NEON3SameFixed | 0x0080F000, + NEON_FMINNM = NEON3SameFixed | 0x0080C000, + NEON_FMINP = NEON3SameFixed | 0x2080F000, + NEON_FMINNMP = NEON3SameFixed | 0x2080C000, + NEON_FMLA = NEON3SameFixed | 0x0000C800, + NEON_FMLS = NEON3SameFixed | 0x0080C800, + NEON_FMULX = NEON3SameFixed | 0x0000D800, + NEON_FRECPS = NEON3SameFixed | 0x0000F800, + NEON_FRSQRTS = NEON3SameFixed | 0x0080F800, + NEON_FABD = NEON3SameFixed | 0x2080D000, + NEON_FADDP = NEON3SameFixed | 0x2000D000, + NEON_FCMEQ = NEON3SameFixed | 0x0000E000, + NEON_FCMGE = NEON3SameFixed | 0x2000E000, + NEON_FCMGT = NEON3SameFixed | 0x2080E000, + NEON_FACGE = NEON3SameFixed | 0x2000E800, + NEON_FACGT = NEON3SameFixed | 0x2080E800, + + // NEON logical instructions with three same-type operands. + NEON3SameLogicalFixed = NEON3SameFixed | 0x00001800, + NEON3SameLogicalFMask = NEON3SameFMask | 0x0000F800, + NEON3SameLogicalMask = 0xBFE0FC00, + NEON3SameLogicalFormatMask = NEON_Q, + NEON_AND = NEON3SameLogicalFixed | 0x00000000, + NEON_ORR = NEON3SameLogicalFixed | 0x00A00000, + NEON_ORN = NEON3SameLogicalFixed | 0x00C00000, + NEON_EOR = NEON3SameLogicalFixed | 0x20000000, + NEON_BIC = NEON3SameLogicalFixed | 0x00400000, + NEON_BIF = NEON3SameLogicalFixed | 0x20C00000, + NEON_BIT = NEON3SameLogicalFixed | 0x20800000, + NEON_BSL = NEON3SameLogicalFixed | 0x20400000 +}; + +// NEON instructions with three different-type operands. +enum NEON3DifferentOp { + NEON3DifferentFixed = 0x0E200000, + NEON3DifferentFMask = 0x9F200C00, + NEON3DifferentMask = 0xFF20FC00, + NEON_ADDHN = NEON3DifferentFixed | 0x00004000, + NEON_ADDHN2 = NEON_ADDHN | NEON_Q, + NEON_PMULL = NEON3DifferentFixed | 0x0000E000, + NEON_PMULL2 = NEON_PMULL | NEON_Q, + NEON_RADDHN = NEON3DifferentFixed | 0x20004000, + NEON_RADDHN2 = NEON_RADDHN | NEON_Q, + NEON_RSUBHN = NEON3DifferentFixed | 0x20006000, + NEON_RSUBHN2 = NEON_RSUBHN | NEON_Q, + NEON_SABAL = NEON3DifferentFixed | 0x00005000, + NEON_SABAL2 = NEON_SABAL | NEON_Q, + NEON_SABDL = NEON3DifferentFixed | 0x00007000, + NEON_SABDL2 = NEON_SABDL | NEON_Q, + NEON_SADDL = NEON3DifferentFixed | 0x00000000, + NEON_SADDL2 = NEON_SADDL | NEON_Q, + NEON_SADDW = NEON3DifferentFixed | 0x00001000, + NEON_SADDW2 = NEON_SADDW | NEON_Q, + NEON_SMLAL = NEON3DifferentFixed | 0x00008000, + NEON_SMLAL2 = NEON_SMLAL | NEON_Q, + NEON_SMLSL = NEON3DifferentFixed | 0x0000A000, + NEON_SMLSL2 = NEON_SMLSL | NEON_Q, + NEON_SMULL = NEON3DifferentFixed | 0x0000C000, + NEON_SMULL2 = NEON_SMULL | NEON_Q, + NEON_SSUBL = NEON3DifferentFixed | 0x00002000, + NEON_SSUBL2 = NEON_SSUBL | NEON_Q, + NEON_SSUBW = NEON3DifferentFixed | 0x00003000, + NEON_SSUBW2 = NEON_SSUBW | NEON_Q, + NEON_SQDMLAL = NEON3DifferentFixed | 0x00009000, + NEON_SQDMLAL2 = NEON_SQDMLAL | NEON_Q, + NEON_SQDMLSL = NEON3DifferentFixed | 0x0000B000, + NEON_SQDMLSL2 = NEON_SQDMLSL | NEON_Q, + NEON_SQDMULL = NEON3DifferentFixed | 0x0000D000, + NEON_SQDMULL2 = NEON_SQDMULL | NEON_Q, + NEON_SUBHN = NEON3DifferentFixed | 0x00006000, + NEON_SUBHN2 = NEON_SUBHN | NEON_Q, + NEON_UABAL = NEON_SABAL | NEON3SameUBit, + NEON_UABAL2 = NEON_UABAL | NEON_Q, + NEON_UABDL = NEON_SABDL | NEON3SameUBit, + NEON_UABDL2 = NEON_UABDL | NEON_Q, + NEON_UADDL = NEON_SADDL | NEON3SameUBit, + NEON_UADDL2 = NEON_UADDL | NEON_Q, + NEON_UADDW = NEON_SADDW | NEON3SameUBit, + NEON_UADDW2 = NEON_UADDW | NEON_Q, + NEON_UMLAL = NEON_SMLAL | NEON3SameUBit, + NEON_UMLAL2 = NEON_UMLAL | NEON_Q, + NEON_UMLSL = NEON_SMLSL | NEON3SameUBit, + NEON_UMLSL2 = NEON_UMLSL | NEON_Q, + NEON_UMULL = NEON_SMULL | NEON3SameUBit, + NEON_UMULL2 = NEON_UMULL | NEON_Q, + NEON_USUBL = NEON_SSUBL | NEON3SameUBit, + NEON_USUBL2 = NEON_USUBL | NEON_Q, + NEON_USUBW = NEON_SSUBW | NEON3SameUBit, + NEON_USUBW2 = NEON_USUBW | NEON_Q +}; + +// NEON instructions operating across vectors. +enum NEONAcrossLanesOp { + NEONAcrossLanesFixed = 0x0E300800, + NEONAcrossLanesFMask = 0x9F3E0C00, + NEONAcrossLanesMask = 0xBF3FFC00, + NEON_ADDV = NEONAcrossLanesFixed | 0x0001B000, + NEON_SADDLV = NEONAcrossLanesFixed | 0x00003000, + NEON_UADDLV = NEONAcrossLanesFixed | 0x20003000, + NEON_SMAXV = NEONAcrossLanesFixed | 0x0000A000, + NEON_SMINV = NEONAcrossLanesFixed | 0x0001A000, + NEON_UMAXV = NEONAcrossLanesFixed | 0x2000A000, + NEON_UMINV = NEONAcrossLanesFixed | 0x2001A000, + + // NEON floating point across instructions. + NEONAcrossLanesFPFixed = NEONAcrossLanesFixed | 0x0000C000, + NEONAcrossLanesFPFMask = NEONAcrossLanesFMask | 0x0000C000, + NEONAcrossLanesFPMask = NEONAcrossLanesMask | 0x00800000, + + NEON_FMAXV = NEONAcrossLanesFPFixed | 0x2000F000, + NEON_FMINV = NEONAcrossLanesFPFixed | 0x2080F000, + NEON_FMAXNMV = NEONAcrossLanesFPFixed | 0x2000C000, + NEON_FMINNMV = NEONAcrossLanesFPFixed | 0x2080C000 +}; + +// NEON instructions with indexed element operand. +enum NEONByIndexedElementOp { + NEONByIndexedElementFixed = 0x0F000000, + NEONByIndexedElementFMask = 0x9F000400, + NEONByIndexedElementMask = 0xBF00F400, + NEON_MUL_byelement = NEONByIndexedElementFixed | 0x00008000, + NEON_MLA_byelement = NEONByIndexedElementFixed | 0x20000000, + NEON_MLS_byelement = NEONByIndexedElementFixed | 0x20004000, + NEON_SMULL_byelement = NEONByIndexedElementFixed | 0x0000A000, + NEON_SMLAL_byelement = NEONByIndexedElementFixed | 0x00002000, + NEON_SMLSL_byelement = NEONByIndexedElementFixed | 0x00006000, + NEON_UMULL_byelement = NEONByIndexedElementFixed | 0x2000A000, + NEON_UMLAL_byelement = NEONByIndexedElementFixed | 0x20002000, + NEON_UMLSL_byelement = NEONByIndexedElementFixed | 0x20006000, + NEON_SQDMULL_byelement = NEONByIndexedElementFixed | 0x0000B000, + NEON_SQDMLAL_byelement = NEONByIndexedElementFixed | 0x00003000, + NEON_SQDMLSL_byelement = NEONByIndexedElementFixed | 0x00007000, + NEON_SQDMULH_byelement = NEONByIndexedElementFixed | 0x0000C000, + NEON_SQRDMULH_byelement = NEONByIndexedElementFixed | 0x0000D000, + + // Floating point instructions. + NEONByIndexedElementFPFixed = NEONByIndexedElementFixed | 0x00800000, + NEONByIndexedElementFPMask = NEONByIndexedElementMask | 0x00800000, + NEON_FMLA_byelement = NEONByIndexedElementFPFixed | 0x00001000, + NEON_FMLS_byelement = NEONByIndexedElementFPFixed | 0x00005000, + NEON_FMUL_byelement = NEONByIndexedElementFPFixed | 0x00009000, + NEON_FMULX_byelement = NEONByIndexedElementFPFixed | 0x20009000 +}; + +// NEON modified immediate. +enum NEONModifiedImmediateOp { + NEONModifiedImmediateFixed = 0x0F000400, + NEONModifiedImmediateFMask = 0x9FF80400, + NEONModifiedImmediateOpBit = 0x20000000, + NEONModifiedImmediate_MOVI = NEONModifiedImmediateFixed | 0x00000000, + NEONModifiedImmediate_MVNI = NEONModifiedImmediateFixed | 0x20000000, + NEONModifiedImmediate_ORR = NEONModifiedImmediateFixed | 0x00001000, + NEONModifiedImmediate_BIC = NEONModifiedImmediateFixed | 0x20001000 +}; + +// NEON extract. +enum NEONExtractOp { + NEONExtractFixed = 0x2E000000, + NEONExtractFMask = 0xBF208400, + NEONExtractMask = 0xBFE08400, + NEON_EXT = NEONExtractFixed | 0x00000000 +}; + +enum NEONLoadStoreMultiOp { + NEONLoadStoreMultiL = 0x00400000, + NEONLoadStoreMulti1_1v = 0x00007000, + NEONLoadStoreMulti1_2v = 0x0000A000, + NEONLoadStoreMulti1_3v = 0x00006000, + NEONLoadStoreMulti1_4v = 0x00002000, + NEONLoadStoreMulti2 = 0x00008000, + NEONLoadStoreMulti3 = 0x00004000, + NEONLoadStoreMulti4 = 0x00000000 +}; + +// NEON load/store multiple structures. +enum NEONLoadStoreMultiStructOp { + NEONLoadStoreMultiStructFixed = 0x0C000000, + NEONLoadStoreMultiStructFMask = 0xBFBF0000, + NEONLoadStoreMultiStructMask = 0xBFFFF000, + NEONLoadStoreMultiStructStore = NEONLoadStoreMultiStructFixed, + NEONLoadStoreMultiStructLoad = + NEONLoadStoreMultiStructFixed | NEONLoadStoreMultiL, + NEON_LD1_1v = NEONLoadStoreMultiStructLoad | NEONLoadStoreMulti1_1v, + NEON_LD1_2v = NEONLoadStoreMultiStructLoad | NEONLoadStoreMulti1_2v, + NEON_LD1_3v = NEONLoadStoreMultiStructLoad | NEONLoadStoreMulti1_3v, + NEON_LD1_4v = NEONLoadStoreMultiStructLoad | NEONLoadStoreMulti1_4v, + NEON_LD2 = NEONLoadStoreMultiStructLoad | NEONLoadStoreMulti2, + NEON_LD3 = NEONLoadStoreMultiStructLoad | NEONLoadStoreMulti3, + NEON_LD4 = NEONLoadStoreMultiStructLoad | NEONLoadStoreMulti4, + NEON_ST1_1v = NEONLoadStoreMultiStructStore | NEONLoadStoreMulti1_1v, + NEON_ST1_2v = NEONLoadStoreMultiStructStore | NEONLoadStoreMulti1_2v, + NEON_ST1_3v = NEONLoadStoreMultiStructStore | NEONLoadStoreMulti1_3v, + NEON_ST1_4v = NEONLoadStoreMultiStructStore | NEONLoadStoreMulti1_4v, + NEON_ST2 = NEONLoadStoreMultiStructStore | NEONLoadStoreMulti2, + NEON_ST3 = NEONLoadStoreMultiStructStore | NEONLoadStoreMulti3, + NEON_ST4 = NEONLoadStoreMultiStructStore | NEONLoadStoreMulti4 +}; + +// NEON load/store multiple structures with post-index addressing. +enum NEONLoadStoreMultiStructPostIndexOp { + NEONLoadStoreMultiStructPostIndexFixed = 0x0C800000, + NEONLoadStoreMultiStructPostIndexFMask = 0xBFA00000, + NEONLoadStoreMultiStructPostIndexMask = 0xBFE0F000, + NEONLoadStoreMultiStructPostIndex = 0x00800000, + NEON_LD1_1v_post = NEON_LD1_1v | NEONLoadStoreMultiStructPostIndex, + NEON_LD1_2v_post = NEON_LD1_2v | NEONLoadStoreMultiStructPostIndex, + NEON_LD1_3v_post = NEON_LD1_3v | NEONLoadStoreMultiStructPostIndex, + NEON_LD1_4v_post = NEON_LD1_4v | NEONLoadStoreMultiStructPostIndex, + NEON_LD2_post = NEON_LD2 | NEONLoadStoreMultiStructPostIndex, + NEON_LD3_post = NEON_LD3 | NEONLoadStoreMultiStructPostIndex, + NEON_LD4_post = NEON_LD4 | NEONLoadStoreMultiStructPostIndex, + NEON_ST1_1v_post = NEON_ST1_1v | NEONLoadStoreMultiStructPostIndex, + NEON_ST1_2v_post = NEON_ST1_2v | NEONLoadStoreMultiStructPostIndex, + NEON_ST1_3v_post = NEON_ST1_3v | NEONLoadStoreMultiStructPostIndex, + NEON_ST1_4v_post = NEON_ST1_4v | NEONLoadStoreMultiStructPostIndex, + NEON_ST2_post = NEON_ST2 | NEONLoadStoreMultiStructPostIndex, + NEON_ST3_post = NEON_ST3 | NEONLoadStoreMultiStructPostIndex, + NEON_ST4_post = NEON_ST4 | NEONLoadStoreMultiStructPostIndex +}; + +enum NEONLoadStoreSingleOp { + NEONLoadStoreSingle1 = 0x00000000, + NEONLoadStoreSingle2 = 0x00200000, + NEONLoadStoreSingle3 = 0x00002000, + NEONLoadStoreSingle4 = 0x00202000, + NEONLoadStoreSingleL = 0x00400000, + NEONLoadStoreSingle_b = 0x00000000, + NEONLoadStoreSingle_h = 0x00004000, + NEONLoadStoreSingle_s = 0x00008000, + NEONLoadStoreSingle_d = 0x00008400, + NEONLoadStoreSingleAllLanes = 0x0000C000, + NEONLoadStoreSingleLenMask = 0x00202000 +}; + +// NEON load/store single structure. +enum NEONLoadStoreSingleStructOp { + NEONLoadStoreSingleStructFixed = 0x0D000000, + NEONLoadStoreSingleStructFMask = 0xBF9F0000, + NEONLoadStoreSingleStructMask = 0xBFFFE000, + NEONLoadStoreSingleStructStore = NEONLoadStoreSingleStructFixed, + NEONLoadStoreSingleStructLoad = + NEONLoadStoreSingleStructFixed | NEONLoadStoreSingleL, + NEONLoadStoreSingleStructLoad1 = + NEONLoadStoreSingle1 | NEONLoadStoreSingleStructLoad, + NEONLoadStoreSingleStructLoad2 = + NEONLoadStoreSingle2 | NEONLoadStoreSingleStructLoad, + NEONLoadStoreSingleStructLoad3 = + NEONLoadStoreSingle3 | NEONLoadStoreSingleStructLoad, + NEONLoadStoreSingleStructLoad4 = + NEONLoadStoreSingle4 | NEONLoadStoreSingleStructLoad, + NEONLoadStoreSingleStructStore1 = + NEONLoadStoreSingle1 | NEONLoadStoreSingleStructFixed, + NEONLoadStoreSingleStructStore2 = + NEONLoadStoreSingle2 | NEONLoadStoreSingleStructFixed, + NEONLoadStoreSingleStructStore3 = + NEONLoadStoreSingle3 | NEONLoadStoreSingleStructFixed, + NEONLoadStoreSingleStructStore4 = + NEONLoadStoreSingle4 | NEONLoadStoreSingleStructFixed, + NEON_LD1_b = NEONLoadStoreSingleStructLoad1 | NEONLoadStoreSingle_b, + NEON_LD1_h = NEONLoadStoreSingleStructLoad1 | NEONLoadStoreSingle_h, + NEON_LD1_s = NEONLoadStoreSingleStructLoad1 | NEONLoadStoreSingle_s, + NEON_LD1_d = NEONLoadStoreSingleStructLoad1 | NEONLoadStoreSingle_d, + NEON_LD1R = NEONLoadStoreSingleStructLoad1 | NEONLoadStoreSingleAllLanes, + NEON_ST1_b = NEONLoadStoreSingleStructStore1 | NEONLoadStoreSingle_b, + NEON_ST1_h = NEONLoadStoreSingleStructStore1 | NEONLoadStoreSingle_h, + NEON_ST1_s = NEONLoadStoreSingleStructStore1 | NEONLoadStoreSingle_s, + NEON_ST1_d = NEONLoadStoreSingleStructStore1 | NEONLoadStoreSingle_d, + + NEON_LD2_b = NEONLoadStoreSingleStructLoad2 | NEONLoadStoreSingle_b, + NEON_LD2_h = NEONLoadStoreSingleStructLoad2 | NEONLoadStoreSingle_h, + NEON_LD2_s = NEONLoadStoreSingleStructLoad2 | NEONLoadStoreSingle_s, + NEON_LD2_d = NEONLoadStoreSingleStructLoad2 | NEONLoadStoreSingle_d, + NEON_LD2R = NEONLoadStoreSingleStructLoad2 | NEONLoadStoreSingleAllLanes, + NEON_ST2_b = NEONLoadStoreSingleStructStore2 | NEONLoadStoreSingle_b, + NEON_ST2_h = NEONLoadStoreSingleStructStore2 | NEONLoadStoreSingle_h, + NEON_ST2_s = NEONLoadStoreSingleStructStore2 | NEONLoadStoreSingle_s, + NEON_ST2_d = NEONLoadStoreSingleStructStore2 | NEONLoadStoreSingle_d, + + NEON_LD3_b = NEONLoadStoreSingleStructLoad3 | NEONLoadStoreSingle_b, + NEON_LD3_h = NEONLoadStoreSingleStructLoad3 | NEONLoadStoreSingle_h, + NEON_LD3_s = NEONLoadStoreSingleStructLoad3 | NEONLoadStoreSingle_s, + NEON_LD3_d = NEONLoadStoreSingleStructLoad3 | NEONLoadStoreSingle_d, + NEON_LD3R = NEONLoadStoreSingleStructLoad3 | NEONLoadStoreSingleAllLanes, + NEON_ST3_b = NEONLoadStoreSingleStructStore3 | NEONLoadStoreSingle_b, + NEON_ST3_h = NEONLoadStoreSingleStructStore3 | NEONLoadStoreSingle_h, + NEON_ST3_s = NEONLoadStoreSingleStructStore3 | NEONLoadStoreSingle_s, + NEON_ST3_d = NEONLoadStoreSingleStructStore3 | NEONLoadStoreSingle_d, + + NEON_LD4_b = NEONLoadStoreSingleStructLoad4 | NEONLoadStoreSingle_b, + NEON_LD4_h = NEONLoadStoreSingleStructLoad4 | NEONLoadStoreSingle_h, + NEON_LD4_s = NEONLoadStoreSingleStructLoad4 | NEONLoadStoreSingle_s, + NEON_LD4_d = NEONLoadStoreSingleStructLoad4 | NEONLoadStoreSingle_d, + NEON_LD4R = NEONLoadStoreSingleStructLoad4 | NEONLoadStoreSingleAllLanes, + NEON_ST4_b = NEONLoadStoreSingleStructStore4 | NEONLoadStoreSingle_b, + NEON_ST4_h = NEONLoadStoreSingleStructStore4 | NEONLoadStoreSingle_h, + NEON_ST4_s = NEONLoadStoreSingleStructStore4 | NEONLoadStoreSingle_s, + NEON_ST4_d = NEONLoadStoreSingleStructStore4 | NEONLoadStoreSingle_d +}; + +// NEON load/store single structure with post-index addressing. +enum NEONLoadStoreSingleStructPostIndexOp { + NEONLoadStoreSingleStructPostIndexFixed = 0x0D800000, + NEONLoadStoreSingleStructPostIndexFMask = 0xBF800000, + NEONLoadStoreSingleStructPostIndexMask = 0xBFE0E000, + NEONLoadStoreSingleStructPostIndex = 0x00800000, + NEON_LD1_b_post = NEON_LD1_b | NEONLoadStoreSingleStructPostIndex, + NEON_LD1_h_post = NEON_LD1_h | NEONLoadStoreSingleStructPostIndex, + NEON_LD1_s_post = NEON_LD1_s | NEONLoadStoreSingleStructPostIndex, + NEON_LD1_d_post = NEON_LD1_d | NEONLoadStoreSingleStructPostIndex, + NEON_LD1R_post = NEON_LD1R | NEONLoadStoreSingleStructPostIndex, + NEON_ST1_b_post = NEON_ST1_b | NEONLoadStoreSingleStructPostIndex, + NEON_ST1_h_post = NEON_ST1_h | NEONLoadStoreSingleStructPostIndex, + NEON_ST1_s_post = NEON_ST1_s | NEONLoadStoreSingleStructPostIndex, + NEON_ST1_d_post = NEON_ST1_d | NEONLoadStoreSingleStructPostIndex, + + NEON_LD2_b_post = NEON_LD2_b | NEONLoadStoreSingleStructPostIndex, + NEON_LD2_h_post = NEON_LD2_h | NEONLoadStoreSingleStructPostIndex, + NEON_LD2_s_post = NEON_LD2_s | NEONLoadStoreSingleStructPostIndex, + NEON_LD2_d_post = NEON_LD2_d | NEONLoadStoreSingleStructPostIndex, + NEON_LD2R_post = NEON_LD2R | NEONLoadStoreSingleStructPostIndex, + NEON_ST2_b_post = NEON_ST2_b | NEONLoadStoreSingleStructPostIndex, + NEON_ST2_h_post = NEON_ST2_h | NEONLoadStoreSingleStructPostIndex, + NEON_ST2_s_post = NEON_ST2_s | NEONLoadStoreSingleStructPostIndex, + NEON_ST2_d_post = NEON_ST2_d | NEONLoadStoreSingleStructPostIndex, + + NEON_LD3_b_post = NEON_LD3_b | NEONLoadStoreSingleStructPostIndex, + NEON_LD3_h_post = NEON_LD3_h | NEONLoadStoreSingleStructPostIndex, + NEON_LD3_s_post = NEON_LD3_s | NEONLoadStoreSingleStructPostIndex, + NEON_LD3_d_post = NEON_LD3_d | NEONLoadStoreSingleStructPostIndex, + NEON_LD3R_post = NEON_LD3R | NEONLoadStoreSingleStructPostIndex, + NEON_ST3_b_post = NEON_ST3_b | NEONLoadStoreSingleStructPostIndex, + NEON_ST3_h_post = NEON_ST3_h | NEONLoadStoreSingleStructPostIndex, + NEON_ST3_s_post = NEON_ST3_s | NEONLoadStoreSingleStructPostIndex, + NEON_ST3_d_post = NEON_ST3_d | NEONLoadStoreSingleStructPostIndex, + + NEON_LD4_b_post = NEON_LD4_b | NEONLoadStoreSingleStructPostIndex, + NEON_LD4_h_post = NEON_LD4_h | NEONLoadStoreSingleStructPostIndex, + NEON_LD4_s_post = NEON_LD4_s | NEONLoadStoreSingleStructPostIndex, + NEON_LD4_d_post = NEON_LD4_d | NEONLoadStoreSingleStructPostIndex, + NEON_LD4R_post = NEON_LD4R | NEONLoadStoreSingleStructPostIndex, + NEON_ST4_b_post = NEON_ST4_b | NEONLoadStoreSingleStructPostIndex, + NEON_ST4_h_post = NEON_ST4_h | NEONLoadStoreSingleStructPostIndex, + NEON_ST4_s_post = NEON_ST4_s | NEONLoadStoreSingleStructPostIndex, + NEON_ST4_d_post = NEON_ST4_d | NEONLoadStoreSingleStructPostIndex +}; + +// NEON register copy. +enum NEONCopyOp { + NEONCopyFixed = 0x0E000400, + NEONCopyFMask = 0x9FE08400, + NEONCopyMask = 0x3FE08400, + NEONCopyInsElementMask = NEONCopyMask | 0x40000000, + NEONCopyInsGeneralMask = NEONCopyMask | 0x40007800, + NEONCopyDupElementMask = NEONCopyMask | 0x20007800, + NEONCopyDupGeneralMask = NEONCopyDupElementMask, + NEONCopyUmovMask = NEONCopyMask | 0x20007800, + NEONCopySmovMask = NEONCopyMask | 0x20007800, + NEON_INS_ELEMENT = NEONCopyFixed | 0x60000000, + NEON_INS_GENERAL = NEONCopyFixed | 0x40001800, + NEON_DUP_ELEMENT = NEONCopyFixed | 0x00000000, + NEON_DUP_GENERAL = NEONCopyFixed | 0x00000800, + NEON_SMOV = NEONCopyFixed | 0x00002800, + NEON_UMOV = NEONCopyFixed | 0x00003800 +}; + +// NEON scalar instructions with indexed element operand. +enum NEONScalarByIndexedElementOp { + NEONScalarByIndexedElementFixed = 0x5F000000, + NEONScalarByIndexedElementFMask = 0xDF000400, + NEONScalarByIndexedElementMask = 0xFF00F400, + NEON_SQDMLAL_byelement_scalar = NEON_Q | NEONScalar | NEON_SQDMLAL_byelement, + NEON_SQDMLSL_byelement_scalar = NEON_Q | NEONScalar | NEON_SQDMLSL_byelement, + NEON_SQDMULL_byelement_scalar = NEON_Q | NEONScalar | NEON_SQDMULL_byelement, + NEON_SQDMULH_byelement_scalar = NEON_Q | NEONScalar | NEON_SQDMULH_byelement, + NEON_SQRDMULH_byelement_scalar = + NEON_Q | NEONScalar | NEON_SQRDMULH_byelement, + + // Floating point instructions. + NEONScalarByIndexedElementFPFixed = + NEONScalarByIndexedElementFixed | 0x00800000, + NEONScalarByIndexedElementFPMask = + NEONScalarByIndexedElementMask | 0x00800000, + NEON_FMLA_byelement_scalar = NEON_Q | NEONScalar | NEON_FMLA_byelement, + NEON_FMLS_byelement_scalar = NEON_Q | NEONScalar | NEON_FMLS_byelement, + NEON_FMUL_byelement_scalar = NEON_Q | NEONScalar | NEON_FMUL_byelement, + NEON_FMULX_byelement_scalar = NEON_Q | NEONScalar | NEON_FMULX_byelement +}; + +// NEON shift immediate. +enum NEONShiftImmediateOp { + NEONShiftImmediateFixed = 0x0F000400, + NEONShiftImmediateFMask = 0x9F800400, + NEONShiftImmediateMask = 0xBF80FC00, + NEONShiftImmediateUBit = 0x20000000, + NEON_SHL = NEONShiftImmediateFixed | 0x00005000, + NEON_SSHLL = NEONShiftImmediateFixed | 0x0000A000, + NEON_USHLL = NEONShiftImmediateFixed | 0x2000A000, + NEON_SLI = NEONShiftImmediateFixed | 0x20005000, + NEON_SRI = NEONShiftImmediateFixed | 0x20004000, + NEON_SHRN = NEONShiftImmediateFixed | 0x00008000, + NEON_RSHRN = NEONShiftImmediateFixed | 0x00008800, + NEON_UQSHRN = NEONShiftImmediateFixed | 0x20009000, + NEON_UQRSHRN = NEONShiftImmediateFixed | 0x20009800, + NEON_SQSHRN = NEONShiftImmediateFixed | 0x00009000, + NEON_SQRSHRN = NEONShiftImmediateFixed | 0x00009800, + NEON_SQSHRUN = NEONShiftImmediateFixed | 0x20008000, + NEON_SQRSHRUN = NEONShiftImmediateFixed | 0x20008800, + NEON_SSHR = NEONShiftImmediateFixed | 0x00000000, + NEON_SRSHR = NEONShiftImmediateFixed | 0x00002000, + NEON_USHR = NEONShiftImmediateFixed | 0x20000000, + NEON_URSHR = NEONShiftImmediateFixed | 0x20002000, + NEON_SSRA = NEONShiftImmediateFixed | 0x00001000, + NEON_SRSRA = NEONShiftImmediateFixed | 0x00003000, + NEON_USRA = NEONShiftImmediateFixed | 0x20001000, + NEON_URSRA = NEONShiftImmediateFixed | 0x20003000, + NEON_SQSHLU = NEONShiftImmediateFixed | 0x20006000, + NEON_SCVTF_imm = NEONShiftImmediateFixed | 0x0000E000, + NEON_UCVTF_imm = NEONShiftImmediateFixed | 0x2000E000, + NEON_FCVTZS_imm = NEONShiftImmediateFixed | 0x0000F800, + NEON_FCVTZU_imm = NEONShiftImmediateFixed | 0x2000F800, + NEON_SQSHL_imm = NEONShiftImmediateFixed | 0x00007000, + NEON_UQSHL_imm = NEONShiftImmediateFixed | 0x20007000 +}; + +// NEON scalar register copy. +enum NEONScalarCopyOp { + NEONScalarCopyFixed = 0x5E000400, + NEONScalarCopyFMask = 0xDFE08400, + NEONScalarCopyMask = 0xFFE0FC00, + NEON_DUP_ELEMENT_scalar = NEON_Q | NEONScalar | NEON_DUP_ELEMENT +}; + +// NEON scalar pairwise instructions. +enum NEONScalarPairwiseOp { + NEONScalarPairwiseFixed = 0x5E300800, + NEONScalarPairwiseFMask = 0xDF3E0C00, + NEONScalarPairwiseMask = 0xFFB1F800, + NEON_ADDP_scalar = NEONScalarPairwiseFixed | 0x0081B000, + NEON_FMAXNMP_scalar = NEONScalarPairwiseFixed | 0x2000C000, + NEON_FMINNMP_scalar = NEONScalarPairwiseFixed | 0x2080C000, + NEON_FADDP_scalar = NEONScalarPairwiseFixed | 0x2000D000, + NEON_FMAXP_scalar = NEONScalarPairwiseFixed | 0x2000F000, + NEON_FMINP_scalar = NEONScalarPairwiseFixed | 0x2080F000 +}; + +// NEON scalar shift immediate. +enum NEONScalarShiftImmediateOp { + NEONScalarShiftImmediateFixed = 0x5F000400, + NEONScalarShiftImmediateFMask = 0xDF800400, + NEONScalarShiftImmediateMask = 0xFF80FC00, + NEON_SHL_scalar = NEON_Q | NEONScalar | NEON_SHL, + NEON_SLI_scalar = NEON_Q | NEONScalar | NEON_SLI, + NEON_SRI_scalar = NEON_Q | NEONScalar | NEON_SRI, + NEON_SSHR_scalar = NEON_Q | NEONScalar | NEON_SSHR, + NEON_USHR_scalar = NEON_Q | NEONScalar | NEON_USHR, + NEON_SRSHR_scalar = NEON_Q | NEONScalar | NEON_SRSHR, + NEON_URSHR_scalar = NEON_Q | NEONScalar | NEON_URSHR, + NEON_SSRA_scalar = NEON_Q | NEONScalar | NEON_SSRA, + NEON_USRA_scalar = NEON_Q | NEONScalar | NEON_USRA, + NEON_SRSRA_scalar = NEON_Q | NEONScalar | NEON_SRSRA, + NEON_URSRA_scalar = NEON_Q | NEONScalar | NEON_URSRA, + NEON_UQSHRN_scalar = NEON_Q | NEONScalar | NEON_UQSHRN, + NEON_UQRSHRN_scalar = NEON_Q | NEONScalar | NEON_UQRSHRN, + NEON_SQSHRN_scalar = NEON_Q | NEONScalar | NEON_SQSHRN, + NEON_SQRSHRN_scalar = NEON_Q | NEONScalar | NEON_SQRSHRN, + NEON_SQSHRUN_scalar = NEON_Q | NEONScalar | NEON_SQSHRUN, + NEON_SQRSHRUN_scalar = NEON_Q | NEONScalar | NEON_SQRSHRUN, + NEON_SQSHLU_scalar = NEON_Q | NEONScalar | NEON_SQSHLU, + NEON_SQSHL_imm_scalar = NEON_Q | NEONScalar | NEON_SQSHL_imm, + NEON_UQSHL_imm_scalar = NEON_Q | NEONScalar | NEON_UQSHL_imm, + NEON_SCVTF_imm_scalar = NEON_Q | NEONScalar | NEON_SCVTF_imm, + NEON_UCVTF_imm_scalar = NEON_Q | NEONScalar | NEON_UCVTF_imm, + NEON_FCVTZS_imm_scalar = NEON_Q | NEONScalar | NEON_FCVTZS_imm, + NEON_FCVTZU_imm_scalar = NEON_Q | NEONScalar | NEON_FCVTZU_imm +}; + +// NEON table. +enum NEONTableOp { + NEONTableFixed = 0x0E000000, + NEONTableFMask = 0xBF208C00, + NEONTableExt = 0x00001000, + NEONTableMask = 0xBF20FC00, + NEON_TBL_1v = NEONTableFixed | 0x00000000, + NEON_TBL_2v = NEONTableFixed | 0x00002000, + NEON_TBL_3v = NEONTableFixed | 0x00004000, + NEON_TBL_4v = NEONTableFixed | 0x00006000, + NEON_TBX_1v = NEON_TBL_1v | NEONTableExt, + NEON_TBX_2v = NEON_TBL_2v | NEONTableExt, + NEON_TBX_3v = NEON_TBL_3v | NEONTableExt, + NEON_TBX_4v = NEON_TBL_4v | NEONTableExt +}; + +// NEON perm. +enum NEONPermOp { + NEONPermFixed = 0x0E000800, + NEONPermFMask = 0xBF208C00, + NEONPermMask = 0x3F20FC00, + NEON_UZP1 = NEONPermFixed | 0x00001000, + NEON_TRN1 = NEONPermFixed | 0x00002000, + NEON_ZIP1 = NEONPermFixed | 0x00003000, + NEON_UZP2 = NEONPermFixed | 0x00005000, + NEON_TRN2 = NEONPermFixed | 0x00006000, + NEON_ZIP2 = NEONPermFixed | 0x00007000 +}; + +// NEON scalar instructions with two register operands. +enum NEONScalar2RegMiscOp { + NEONScalar2RegMiscFixed = 0x5E200800, + NEONScalar2RegMiscFMask = 0xDF3E0C00, + NEONScalar2RegMiscMask = NEON_Q | NEONScalar | NEON2RegMiscMask, + NEON_CMGT_zero_scalar = NEON_Q | NEONScalar | NEON_CMGT_zero, + NEON_CMEQ_zero_scalar = NEON_Q | NEONScalar | NEON_CMEQ_zero, + NEON_CMLT_zero_scalar = NEON_Q | NEONScalar | NEON_CMLT_zero, + NEON_CMGE_zero_scalar = NEON_Q | NEONScalar | NEON_CMGE_zero, + NEON_CMLE_zero_scalar = NEON_Q | NEONScalar | NEON_CMLE_zero, + NEON_ABS_scalar = NEON_Q | NEONScalar | NEON_ABS, + NEON_SQABS_scalar = NEON_Q | NEONScalar | NEON_SQABS, + NEON_NEG_scalar = NEON_Q | NEONScalar | NEON_NEG, + NEON_SQNEG_scalar = NEON_Q | NEONScalar | NEON_SQNEG, + NEON_SQXTN_scalar = NEON_Q | NEONScalar | NEON_SQXTN, + NEON_UQXTN_scalar = NEON_Q | NEONScalar | NEON_UQXTN, + NEON_SQXTUN_scalar = NEON_Q | NEONScalar | NEON_SQXTUN, + NEON_SUQADD_scalar = NEON_Q | NEONScalar | NEON_SUQADD, + NEON_USQADD_scalar = NEON_Q | NEONScalar | NEON_USQADD, + + NEONScalar2RegMiscOpcode = NEON2RegMiscOpcode, + NEON_NEG_scalar_opcode = NEON_NEG_scalar & NEONScalar2RegMiscOpcode, + + NEONScalar2RegMiscFPMask = NEONScalar2RegMiscMask | 0x00800000, + NEON_FRSQRTE_scalar = NEON_Q | NEONScalar | NEON_FRSQRTE, + NEON_FRECPE_scalar = NEON_Q | NEONScalar | NEON_FRECPE, + NEON_SCVTF_scalar = NEON_Q | NEONScalar | NEON_SCVTF, + NEON_UCVTF_scalar = NEON_Q | NEONScalar | NEON_UCVTF, + NEON_FCMGT_zero_scalar = NEON_Q | NEONScalar | NEON_FCMGT_zero, + NEON_FCMEQ_zero_scalar = NEON_Q | NEONScalar | NEON_FCMEQ_zero, + NEON_FCMLT_zero_scalar = NEON_Q | NEONScalar | NEON_FCMLT_zero, + NEON_FCMGE_zero_scalar = NEON_Q | NEONScalar | NEON_FCMGE_zero, + NEON_FCMLE_zero_scalar = NEON_Q | NEONScalar | NEON_FCMLE_zero, + NEON_FRECPX_scalar = NEONScalar2RegMiscFixed | 0x0081F000, + NEON_FCVTNS_scalar = NEON_Q | NEONScalar | NEON_FCVTNS, + NEON_FCVTNU_scalar = NEON_Q | NEONScalar | NEON_FCVTNU, + NEON_FCVTPS_scalar = NEON_Q | NEONScalar | NEON_FCVTPS, + NEON_FCVTPU_scalar = NEON_Q | NEONScalar | NEON_FCVTPU, + NEON_FCVTMS_scalar = NEON_Q | NEONScalar | NEON_FCVTMS, + NEON_FCVTMU_scalar = NEON_Q | NEONScalar | NEON_FCVTMU, + NEON_FCVTZS_scalar = NEON_Q | NEONScalar | NEON_FCVTZS, + NEON_FCVTZU_scalar = NEON_Q | NEONScalar | NEON_FCVTZU, + NEON_FCVTAS_scalar = NEON_Q | NEONScalar | NEON_FCVTAS, + NEON_FCVTAU_scalar = NEON_Q | NEONScalar | NEON_FCVTAU, + NEON_FCVTXN_scalar = NEON_Q | NEONScalar | NEON_FCVTXN +}; + +// NEON scalar instructions with three same-type operands. +enum NEONScalar3SameOp { + NEONScalar3SameFixed = 0x5E200400, + NEONScalar3SameFMask = 0xDF200400, + NEONScalar3SameMask = 0xFF20FC00, + NEON_ADD_scalar = NEON_Q | NEONScalar | NEON_ADD, + NEON_CMEQ_scalar = NEON_Q | NEONScalar | NEON_CMEQ, + NEON_CMGE_scalar = NEON_Q | NEONScalar | NEON_CMGE, + NEON_CMGT_scalar = NEON_Q | NEONScalar | NEON_CMGT, + NEON_CMHI_scalar = NEON_Q | NEONScalar | NEON_CMHI, + NEON_CMHS_scalar = NEON_Q | NEONScalar | NEON_CMHS, + NEON_CMTST_scalar = NEON_Q | NEONScalar | NEON_CMTST, + NEON_SUB_scalar = NEON_Q | NEONScalar | NEON_SUB, + NEON_UQADD_scalar = NEON_Q | NEONScalar | NEON_UQADD, + NEON_SQADD_scalar = NEON_Q | NEONScalar | NEON_SQADD, + NEON_UQSUB_scalar = NEON_Q | NEONScalar | NEON_UQSUB, + NEON_SQSUB_scalar = NEON_Q | NEONScalar | NEON_SQSUB, + NEON_USHL_scalar = NEON_Q | NEONScalar | NEON_USHL, + NEON_SSHL_scalar = NEON_Q | NEONScalar | NEON_SSHL, + NEON_UQSHL_scalar = NEON_Q | NEONScalar | NEON_UQSHL, + NEON_SQSHL_scalar = NEON_Q | NEONScalar | NEON_SQSHL, + NEON_URSHL_scalar = NEON_Q | NEONScalar | NEON_URSHL, + NEON_SRSHL_scalar = NEON_Q | NEONScalar | NEON_SRSHL, + NEON_UQRSHL_scalar = NEON_Q | NEONScalar | NEON_UQRSHL, + NEON_SQRSHL_scalar = NEON_Q | NEONScalar | NEON_SQRSHL, + NEON_SQDMULH_scalar = NEON_Q | NEONScalar | NEON_SQDMULH, + NEON_SQRDMULH_scalar = NEON_Q | NEONScalar | NEON_SQRDMULH, + + // NEON floating point scalar instructions with three same-type operands. + NEONScalar3SameFPFixed = NEONScalar3SameFixed | 0x0000C000, + NEONScalar3SameFPFMask = NEONScalar3SameFMask | 0x0000C000, + NEONScalar3SameFPMask = NEONScalar3SameMask | 0x00800000, + NEON_FACGE_scalar = NEON_Q | NEONScalar | NEON_FACGE, + NEON_FACGT_scalar = NEON_Q | NEONScalar | NEON_FACGT, + NEON_FCMEQ_scalar = NEON_Q | NEONScalar | NEON_FCMEQ, + NEON_FCMGE_scalar = NEON_Q | NEONScalar | NEON_FCMGE, + NEON_FCMGT_scalar = NEON_Q | NEONScalar | NEON_FCMGT, + NEON_FMULX_scalar = NEON_Q | NEONScalar | NEON_FMULX, + NEON_FRECPS_scalar = NEON_Q | NEONScalar | NEON_FRECPS, + NEON_FRSQRTS_scalar = NEON_Q | NEONScalar | NEON_FRSQRTS, + NEON_FABD_scalar = NEON_Q | NEONScalar | NEON_FABD +}; + +// NEON scalar instructions with three different-type operands. +enum NEONScalar3DiffOp { + NEONScalar3DiffFixed = 0x5E200000, + NEONScalar3DiffFMask = 0xDF200C00, + NEONScalar3DiffMask = NEON_Q | NEONScalar | NEON3DifferentMask, + NEON_SQDMLAL_scalar = NEON_Q | NEONScalar | NEON_SQDMLAL, + NEON_SQDMLSL_scalar = NEON_Q | NEONScalar | NEON_SQDMLSL, + NEON_SQDMULL_scalar = NEON_Q | NEONScalar | NEON_SQDMULL +}; + // Unimplemented and unallocated instructions. These are defined to make fixed // bit assertion easier. enum UnimplementedOp { diff --git a/deps/v8/src/arm64/decoder-arm64-inl.h b/deps/v8/src/arm64/decoder-arm64-inl.h index 2405f878300075..6718bd3d689994 100644 --- a/deps/v8/src/arm64/decoder-arm64-inl.h +++ b/deps/v8/src/arm64/decoder-arm64-inl.h @@ -213,6 +213,11 @@ void Decoder::DecodeLoadStore(Instruction* instr) { (instr->Bits(27, 24) == 0xC) || (instr->Bits(27, 24) == 0xD) ); + if ((instr->Bit(28) == 0) && (instr->Bit(29) == 0) && (instr->Bit(26) == 1)) { + DecodeNEONLoadStore(instr); + return; + } + if (instr->Bit(24) == 0) { if (instr->Bit(28) == 0) { if (instr->Bit(29) == 0) { @@ -226,8 +231,6 @@ void Decoder::DecodeLoadStore(Instruction* instr) { } else { V::VisitLoadStoreAcquireRelease(instr); } - } else { - DecodeAdvSIMDLoadStore(instr); } } else { if ((instr->Bits(31, 30) == 0x3) || @@ -513,16 +516,14 @@ void Decoder::DecodeFP(Instruction* instr) { (instr->Bits(27, 24) == 0xF) ); if (instr->Bit(28) == 0) { - DecodeAdvSIMDDataProcessing(instr); + DecodeNEONVectorDataProcessing(instr); } else { - if (instr->Bit(29) == 1) { + if (instr->Bits(31, 30) == 0x3) { V::VisitUnallocated(instr); + } else if (instr->Bits(31, 30) == 0x1) { + DecodeNEONScalarDataProcessing(instr); } else { - if (instr->Bits(31, 30) == 0x3) { - V::VisitUnallocated(instr); - } else if (instr->Bits(31, 30) == 0x1) { - DecodeAdvSIMDDataProcessing(instr); - } else { + if (instr->Bit(29) == 0) { if (instr->Bit(24) == 0) { if (instr->Bit(21) == 0) { if ((instr->Bit(23) == 1) || @@ -629,25 +630,190 @@ void Decoder::DecodeFP(Instruction* instr) { V::VisitFPDataProcessing3Source(instr); } } + } else { + V::VisitUnallocated(instr); } } } } - -template -void Decoder::DecodeAdvSIMDLoadStore(Instruction* instr) { - // TODO(all): Implement Advanced SIMD load/store instruction decode. +template +void Decoder::DecodeNEONLoadStore(Instruction* instr) { DCHECK(instr->Bits(29, 25) == 0x6); - V::VisitUnimplemented(instr); + if (instr->Bit(31) == 0) { + if ((instr->Bit(24) == 0) && (instr->Bit(21) == 1)) { + V::VisitUnallocated(instr); + return; + } + + if (instr->Bit(23) == 0) { + if (instr->Bits(20, 16) == 0) { + if (instr->Bit(24) == 0) { + V::VisitNEONLoadStoreMultiStruct(instr); + } else { + V::VisitNEONLoadStoreSingleStruct(instr); + } + } else { + V::VisitUnallocated(instr); + } + } else { + if (instr->Bit(24) == 0) { + V::VisitNEONLoadStoreMultiStructPostIndex(instr); + } else { + V::VisitNEONLoadStoreSingleStructPostIndex(instr); + } + } + } else { + V::VisitUnallocated(instr); + } } +template +void Decoder::DecodeNEONVectorDataProcessing(Instruction* instr) { + DCHECK(instr->Bits(28, 25) == 0x7); + if (instr->Bit(31) == 0) { + if (instr->Bit(24) == 0) { + if (instr->Bit(21) == 0) { + if (instr->Bit(15) == 0) { + if (instr->Bit(10) == 0) { + if (instr->Bit(29) == 0) { + if (instr->Bit(11) == 0) { + V::VisitNEONTable(instr); + } else { + V::VisitNEONPerm(instr); + } + } else { + V::VisitNEONExtract(instr); + } + } else { + if (instr->Bits(23, 22) == 0) { + V::VisitNEONCopy(instr); + } else { + V::VisitUnallocated(instr); + } + } + } else { + V::VisitUnallocated(instr); + } + } else { + if (instr->Bit(10) == 0) { + if (instr->Bit(11) == 0) { + V::VisitNEON3Different(instr); + } else { + if (instr->Bits(18, 17) == 0) { + if (instr->Bit(20) == 0) { + if (instr->Bit(19) == 0) { + V::VisitNEON2RegMisc(instr); + } else { + if (instr->Bits(30, 29) == 0x2) { + V::VisitUnallocated(instr); + } else { + V::VisitUnallocated(instr); + } + } + } else { + if (instr->Bit(19) == 0) { + V::VisitNEONAcrossLanes(instr); + } else { + V::VisitUnallocated(instr); + } + } + } else { + V::VisitUnallocated(instr); + } + } + } else { + V::VisitNEON3Same(instr); + } + } + } else { + if (instr->Bit(10) == 0) { + V::VisitNEONByIndexedElement(instr); + } else { + if (instr->Bit(23) == 0) { + if (instr->Bits(22, 19) == 0) { + V::VisitNEONModifiedImmediate(instr); + } else { + V::VisitNEONShiftImmediate(instr); + } + } else { + V::VisitUnallocated(instr); + } + } + } + } else { + V::VisitUnallocated(instr); + } +} -template -void Decoder::DecodeAdvSIMDDataProcessing(Instruction* instr) { - // TODO(all): Implement Advanced SIMD data processing instruction decode. - DCHECK(instr->Bits(27, 25) == 0x7); - V::VisitUnimplemented(instr); +template +void Decoder::DecodeNEONScalarDataProcessing(Instruction* instr) { + DCHECK(instr->Bits(28, 25) == 0xF); + if (instr->Bit(24) == 0) { + if (instr->Bit(21) == 0) { + if (instr->Bit(15) == 0) { + if (instr->Bit(10) == 0) { + if (instr->Bit(29) == 0) { + if (instr->Bit(11) == 0) { + V::VisitUnallocated(instr); + } else { + V::VisitUnallocated(instr); + } + } else { + V::VisitUnallocated(instr); + } + } else { + if (instr->Bits(23, 22) == 0) { + V::VisitNEONScalarCopy(instr); + } else { + V::VisitUnallocated(instr); + } + } + } else { + V::VisitUnallocated(instr); + } + } else { + if (instr->Bit(10) == 0) { + if (instr->Bit(11) == 0) { + V::VisitNEONScalar3Diff(instr); + } else { + if (instr->Bits(18, 17) == 0) { + if (instr->Bit(20) == 0) { + if (instr->Bit(19) == 0) { + V::VisitNEONScalar2RegMisc(instr); + } else { + if (instr->Bit(29) == 0) { + V::VisitUnallocated(instr); + } else { + V::VisitUnallocated(instr); + } + } + } else { + if (instr->Bit(19) == 0) { + V::VisitNEONScalarPairwise(instr); + } else { + V::VisitUnallocated(instr); + } + } + } else { + V::VisitUnallocated(instr); + } + } + } else { + V::VisitNEONScalar3Same(instr); + } + } + } else { + if (instr->Bit(10) == 0) { + V::VisitNEONScalarByIndexedElement(instr); + } else { + if (instr->Bit(23) == 0) { + V::VisitNEONScalarShiftImmediate(instr); + } else { + V::VisitUnallocated(instr); + } + } + } } diff --git a/deps/v8/src/arm64/decoder-arm64.h b/deps/v8/src/arm64/decoder-arm64.h index a17b3244128c75..a89bf38980e711 100644 --- a/deps/v8/src/arm64/decoder-arm64.h +++ b/deps/v8/src/arm64/decoder-arm64.h @@ -16,50 +16,72 @@ namespace internal { // List macro containing all visitors needed by the decoder class. -#define VISITOR_LIST(V) \ - V(PCRelAddressing) \ - V(AddSubImmediate) \ - V(LogicalImmediate) \ - V(MoveWideImmediate) \ - V(Bitfield) \ - V(Extract) \ - V(UnconditionalBranch) \ - V(UnconditionalBranchToRegister) \ - V(CompareBranch) \ - V(TestBranch) \ - V(ConditionalBranch) \ - V(System) \ - V(Exception) \ - V(LoadStorePairPostIndex) \ - V(LoadStorePairOffset) \ - V(LoadStorePairPreIndex) \ - V(LoadLiteral) \ - V(LoadStoreUnscaledOffset) \ - V(LoadStorePostIndex) \ - V(LoadStorePreIndex) \ - V(LoadStoreRegisterOffset) \ - V(LoadStoreUnsignedOffset) \ - V(LoadStoreAcquireRelease) \ - V(LogicalShifted) \ - V(AddSubShifted) \ - V(AddSubExtended) \ - V(AddSubWithCarry) \ - V(ConditionalCompareRegister) \ - V(ConditionalCompareImmediate) \ - V(ConditionalSelect) \ - V(DataProcessing1Source) \ - V(DataProcessing2Source) \ - V(DataProcessing3Source) \ - V(FPCompare) \ - V(FPConditionalCompare) \ - V(FPConditionalSelect) \ - V(FPImmediate) \ - V(FPDataProcessing1Source) \ - V(FPDataProcessing2Source) \ - V(FPDataProcessing3Source) \ - V(FPIntegerConvert) \ - V(FPFixedPointConvert) \ - V(Unallocated) \ +#define VISITOR_LIST(V) \ + V(PCRelAddressing) \ + V(AddSubImmediate) \ + V(LogicalImmediate) \ + V(MoveWideImmediate) \ + V(Bitfield) \ + V(Extract) \ + V(UnconditionalBranch) \ + V(UnconditionalBranchToRegister) \ + V(CompareBranch) \ + V(TestBranch) \ + V(ConditionalBranch) \ + V(System) \ + V(Exception) \ + V(LoadStorePairPostIndex) \ + V(LoadStorePairOffset) \ + V(LoadStorePairPreIndex) \ + V(LoadLiteral) \ + V(LoadStoreUnscaledOffset) \ + V(LoadStorePostIndex) \ + V(LoadStorePreIndex) \ + V(LoadStoreRegisterOffset) \ + V(LoadStoreUnsignedOffset) \ + V(LoadStoreAcquireRelease) \ + V(LogicalShifted) \ + V(AddSubShifted) \ + V(AddSubExtended) \ + V(AddSubWithCarry) \ + V(ConditionalCompareRegister) \ + V(ConditionalCompareImmediate) \ + V(ConditionalSelect) \ + V(DataProcessing1Source) \ + V(DataProcessing2Source) \ + V(DataProcessing3Source) \ + V(FPCompare) \ + V(FPConditionalCompare) \ + V(FPConditionalSelect) \ + V(FPImmediate) \ + V(FPDataProcessing1Source) \ + V(FPDataProcessing2Source) \ + V(FPDataProcessing3Source) \ + V(FPIntegerConvert) \ + V(FPFixedPointConvert) \ + V(NEON2RegMisc) \ + V(NEON3Different) \ + V(NEON3Same) \ + V(NEONAcrossLanes) \ + V(NEONByIndexedElement) \ + V(NEONCopy) \ + V(NEONExtract) \ + V(NEONLoadStoreMultiStruct) \ + V(NEONLoadStoreMultiStructPostIndex) \ + V(NEONLoadStoreSingleStruct) \ + V(NEONLoadStoreSingleStructPostIndex) \ + V(NEONModifiedImmediate) \ + V(NEONScalar2RegMisc) \ + V(NEONScalar3Diff) \ + V(NEONScalar3Same) \ + V(NEONScalarByIndexedElement) \ + V(NEONScalarCopy) \ + V(NEONScalarPairwise) \ + V(NEONScalarShiftImmediate) \ + V(NEONShiftImmediate) \ + V(NEONTable) \ + V(NEONPerm) \ + V(Unallocated) \ V(Unimplemented) // The Visitor interface. Disassembler and simulator (and other tools) @@ -109,6 +131,8 @@ class DispatchingDecoderVisitor : public DecoderVisitor { // stored by the decoder. void RemoveVisitor(DecoderVisitor* visitor); + void VisitNEONShiftImmediate(const Instruction* instr); + #define DECLARE(A) void Visit##A(Instruction* instr); VISITOR_LIST(DECLARE) #undef DECLARE @@ -173,12 +197,17 @@ class Decoder : public V { // Decode the Advanced SIMD (NEON) load/store part of the instruction tree, // and call the corresponding visitors. // On entry, instruction bits 29:25 = 0x6. - void DecodeAdvSIMDLoadStore(Instruction* instr); + void DecodeNEONLoadStore(Instruction* instr); // Decode the Advanced SIMD (NEON) data processing part of the instruction // tree, and call the corresponding visitors. // On entry, instruction bits 27:25 = 0x7. - void DecodeAdvSIMDDataProcessing(Instruction* instr); + void DecodeNEONVectorDataProcessing(Instruction* instr); + + // Decode the Advanced SIMD (NEON) scalar data processing part of the + // instruction tree, and call the corresponding visitors. + // On entry, instruction bits 28:25 = 0xF. + void DecodeNEONScalarDataProcessing(Instruction* instr); }; diff --git a/deps/v8/src/arm64/deoptimizer-arm64.cc b/deps/v8/src/arm64/deoptimizer-arm64.cc index a178e1d95effdf..dac144d3d1f2b8 100644 --- a/deps/v8/src/arm64/deoptimizer-arm64.cc +++ b/deps/v8/src/arm64/deoptimizer-arm64.cc @@ -87,26 +87,6 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) { } -void Deoptimizer::SetPlatformCompiledStubRegisters( - FrameDescription* output_frame, CodeStubDescriptor* descriptor) { - ApiFunction function(descriptor->deoptimization_handler()); - ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_); - intptr_t handler = reinterpret_cast(xref.address()); - int params = descriptor->GetHandlerParameterCount(); - output_frame->SetRegister(x0.code(), params); - output_frame->SetRegister(x1.code(), handler); -} - - -void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) { - for (int i = 0; i < DoubleRegister::kMaxNumRegisters; ++i) { - Float64 double_value = input_->GetDoubleRegister(i); - output_frame->SetDoubleRegister(i, double_value); - } -} - - - #define __ masm()-> void Deoptimizer::TableEntryGenerator::Generate() { @@ -118,13 +98,13 @@ void Deoptimizer::TableEntryGenerator::Generate() { // Save all allocatable double registers. CPURegList saved_double_registers( - CPURegister::kFPRegister, kDRegSizeInBits, + CPURegister::kVRegister, kDRegSizeInBits, RegisterConfiguration::Crankshaft()->allocatable_double_codes_mask()); __ PushCPURegList(saved_double_registers); // Save all allocatable float registers. CPURegList saved_float_registers( - CPURegister::kFPRegister, kSRegSizeInBits, + CPURegister::kVRegister, kSRegSizeInBits, RegisterConfiguration::Crankshaft()->allocatable_float_codes_mask()); __ PushCPURegList(saved_float_registers); @@ -133,7 +113,8 @@ void Deoptimizer::TableEntryGenerator::Generate() { saved_registers.Combine(fp); __ PushCPURegList(saved_registers); - __ Mov(x3, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate()))); + __ Mov(x3, Operand(ExternalReference(IsolateAddressId::kCEntryFPAddress, + isolate()))); __ Str(fp, MemOperand(x3)); const int kSavedRegistersAreaSize = diff --git a/deps/v8/src/arm64/disasm-arm64.cc b/deps/v8/src/arm64/disasm-arm64.cc index e3ef4595d87fa2..288cfe4705424b 100644 --- a/deps/v8/src/arm64/disasm-arm64.cc +++ b/deps/v8/src/arm64/disasm-arm64.cc @@ -11,6 +11,7 @@ #include "src/arm64/decoder-arm64-inl.h" #include "src/arm64/disasm-arm64.h" +#include "src/arm64/utils-arm64.h" #include "src/base/platform/platform.h" #include "src/disasm.h" #include "src/macro-assembler.h" @@ -94,9 +95,9 @@ void DisassemblingDecoder::VisitAddSubShifted(Instruction* instr) { bool rd_is_zr = RdIsZROrSP(instr); bool rn_is_zr = RnIsZROrSP(instr); const char *mnemonic = ""; - const char *form = "'Rd, 'Rn, 'Rm'HDP"; - const char *form_cmp = "'Rn, 'Rm'HDP"; - const char *form_neg = "'Rd, 'Rm'HDP"; + const char* form = "'Rd, 'Rn, 'Rm'NDP"; + const char* form_cmp = "'Rn, 'Rm'NDP"; + const char* form_neg = "'Rd, 'Rm'NDP"; switch (instr->Mask(AddSubShiftedMask)) { case ADD_w_shift: @@ -286,7 +287,7 @@ void DisassemblingDecoder::VisitLogicalShifted(Instruction* instr) { bool rd_is_zr = RdIsZROrSP(instr); bool rn_is_zr = RnIsZROrSP(instr); const char *mnemonic = ""; - const char *form = "'Rd, 'Rn, 'Rm'HLo"; + const char* form = "'Rd, 'Rn, 'Rm'NLo"; switch (instr->Mask(LogicalShiftedMask)) { case AND_w: @@ -304,7 +305,7 @@ void DisassemblingDecoder::VisitLogicalShifted(Instruction* instr) { mnemonic = "ands"; if (rd_is_zr) { mnemonic = "tst"; - form = "'Rn, 'Rm'HLo"; + form = "'Rn, 'Rm'NLo"; } break; } @@ -322,7 +323,7 @@ void DisassemblingDecoder::VisitLogicalShifted(Instruction* instr) { mnemonic = "orn"; if (rn_is_zr) { mnemonic = "mvn"; - form = "'Rd, 'Rm'HLo"; + form = "'Rd, 'Rm'NLo"; } break; } @@ -527,7 +528,9 @@ void DisassemblingDecoder::VisitPCRelAddressing(Instruction* instr) { void DisassemblingDecoder::VisitConditionalBranch(Instruction* instr) { switch (instr->Mask(ConditionalBranchMask)) { - case B_cond: Format(instr, "b.'CBrn", "'BImmCond"); break; + case B_cond: + Format(instr, "b.'CBrn", "'TImmCond"); + break; default: UNREACHABLE(); } } @@ -556,7 +559,7 @@ void DisassemblingDecoder::VisitUnconditionalBranchToRegister( void DisassemblingDecoder::VisitUnconditionalBranch(Instruction* instr) { const char *mnemonic = ""; - const char *form = "'BImmUncn"; + const char* form = "'TImmUncn"; switch (instr->Mask(UnconditionalBranchMask)) { case B: mnemonic = "b"; break; @@ -689,7 +692,7 @@ void DisassemblingDecoder::VisitDataProcessing3Source(Instruction* instr) { void DisassemblingDecoder::VisitCompareBranch(Instruction* instr) { const char *mnemonic = ""; - const char *form = "'Rt, 'BImmCmpa"; + const char* form = "'Rt, 'TImmCmpa"; switch (instr->Mask(CompareBranchMask)) { case CBZ_w: @@ -708,7 +711,7 @@ void DisassemblingDecoder::VisitTestBranch(Instruction* instr) { // disassembled as Wt, otherwise Xt. As the top bit of the immediate is // encoded in bit 31 of the instruction, we can reuse the Rt form, which // uses bit 31 (normally "sf") to choose the register size. - const char *form = "'Rt, 'IS, 'BImmTest"; + const char* form = "'Rt, 'IS, 'TImmTest"; switch (instr->Mask(TestBranchMask)) { case TBZ: mnemonic = "tbz"; break; @@ -738,25 +741,30 @@ void DisassemblingDecoder::VisitMoveWideImmediate(Instruction* instr) { Format(instr, mnemonic, form); } - -#define LOAD_STORE_LIST(V) \ - V(STRB_w, "strb", "'Wt") \ - V(STRH_w, "strh", "'Wt") \ - V(STR_w, "str", "'Wt") \ - V(STR_x, "str", "'Xt") \ - V(LDRB_w, "ldrb", "'Wt") \ - V(LDRH_w, "ldrh", "'Wt") \ - V(LDR_w, "ldr", "'Wt") \ - V(LDR_x, "ldr", "'Xt") \ - V(LDRSB_x, "ldrsb", "'Xt") \ - V(LDRSH_x, "ldrsh", "'Xt") \ - V(LDRSW_x, "ldrsw", "'Xt") \ - V(LDRSB_w, "ldrsb", "'Wt") \ - V(LDRSH_w, "ldrsh", "'Wt") \ - V(STR_s, "str", "'St") \ - V(STR_d, "str", "'Dt") \ - V(LDR_s, "ldr", "'St") \ - V(LDR_d, "ldr", "'Dt") +#define LOAD_STORE_LIST(V) \ + V(STRB_w, "strb", "'Wt") \ + V(STRH_w, "strh", "'Wt") \ + V(STR_w, "str", "'Wt") \ + V(STR_x, "str", "'Xt") \ + V(LDRB_w, "ldrb", "'Wt") \ + V(LDRH_w, "ldrh", "'Wt") \ + V(LDR_w, "ldr", "'Wt") \ + V(LDR_x, "ldr", "'Xt") \ + V(LDRSB_x, "ldrsb", "'Xt") \ + V(LDRSH_x, "ldrsh", "'Xt") \ + V(LDRSW_x, "ldrsw", "'Xt") \ + V(LDRSB_w, "ldrsb", "'Wt") \ + V(LDRSH_w, "ldrsh", "'Wt") \ + V(STR_b, "str", "'Bt") \ + V(STR_h, "str", "'Ht") \ + V(STR_s, "str", "'St") \ + V(STR_d, "str", "'Dt") \ + V(LDR_b, "ldr", "'Bt") \ + V(LDR_h, "ldr", "'Ht") \ + V(LDR_s, "ldr", "'St") \ + V(LDR_d, "ldr", "'Dt") \ + V(STR_q, "str", "'Qt") \ + V(LDR_q, "ldr", "'Qt") void DisassemblingDecoder::VisitLoadStorePreIndex(Instruction* instr) { const char *mnemonic = "unimplemented"; @@ -861,17 +869,18 @@ void DisassemblingDecoder::VisitLoadLiteral(Instruction* instr) { Format(instr, mnemonic, form); } - #define LOAD_STORE_PAIR_LIST(V) \ - V(STP_w, "stp", "'Wt, 'Wt2", "4") \ - V(LDP_w, "ldp", "'Wt, 'Wt2", "4") \ - V(LDPSW_x, "ldpsw", "'Xt, 'Xt2", "4") \ - V(STP_x, "stp", "'Xt, 'Xt2", "8") \ - V(LDP_x, "ldp", "'Xt, 'Xt2", "8") \ - V(STP_s, "stp", "'St, 'St2", "4") \ - V(LDP_s, "ldp", "'St, 'St2", "4") \ - V(STP_d, "stp", "'Dt, 'Dt2", "8") \ - V(LDP_d, "ldp", "'Dt, 'Dt2", "8") + V(STP_w, "stp", "'Wt, 'Wt2", "2") \ + V(LDP_w, "ldp", "'Wt, 'Wt2", "2") \ + V(LDPSW_x, "ldpsw", "'Xt, 'Xt2", "2") \ + V(STP_x, "stp", "'Xt, 'Xt2", "3") \ + V(LDP_x, "ldp", "'Xt, 'Xt2", "3") \ + V(STP_s, "stp", "'St, 'St2", "2") \ + V(LDP_s, "ldp", "'St, 'St2", "2") \ + V(STP_d, "stp", "'Dt, 'Dt2", "3") \ + V(LDP_d, "ldp", "'Dt, 'Dt2", "3") \ + V(LDP_q, "ldp", "'Qt, 'Qt2", "4") \ + V(STP_q, "stp", "'Qt, 'Qt2", "4") void DisassemblingDecoder::VisitLoadStorePairPostIndex(Instruction* instr) { const char *mnemonic = "unimplemented"; @@ -1010,6 +1019,22 @@ void DisassemblingDecoder::VisitFPDataProcessing1Source(Instruction* instr) { #undef FORMAT case FCVT_ds: mnemonic = "fcvt"; form = "'Dd, 'Sn"; break; case FCVT_sd: mnemonic = "fcvt"; form = "'Sd, 'Dn"; break; + case FCVT_hs: + mnemonic = "fcvt"; + form = "'Hd, 'Sn"; + break; + case FCVT_sh: + mnemonic = "fcvt"; + form = "'Sd, 'Hn"; + break; + case FCVT_dh: + mnemonic = "fcvt"; + form = "'Dd, 'Hn"; + break; + case FCVT_hd: + mnemonic = "fcvt"; + form = "'Hd, 'Dn"; + break; default: form = "(FPDataProcessing1Source)"; } Format(instr, mnemonic, form); @@ -1083,6 +1108,14 @@ void DisassemblingDecoder::VisitFPIntegerConvert(Instruction* instr) { case FMOV_xd: mnemonic = "fmov"; form = form_rf; break; case FMOV_sw: case FMOV_dx: mnemonic = "fmov"; form = form_fr; break; + case FMOV_d1_x: + mnemonic = "fmov"; + form = "'Vd.D[1], 'Rn"; + break; + case FMOV_x_d1: + mnemonic = "fmov"; + form = "'Rd, 'Vn.D[1]"; + break; case FCVTAS_ws: case FCVTAS_xs: case FCVTAS_wd: @@ -1115,6 +1148,20 @@ void DisassemblingDecoder::VisitFPIntegerConvert(Instruction* instr) { case FCVTZS_wd: case FCVTZS_xs: case FCVTZS_ws: mnemonic = "fcvtzs"; form = form_rf; break; + case FCVTPU_xd: + case FCVTPU_ws: + case FCVTPU_wd: + case FCVTPU_xs: + mnemonic = "fcvtpu"; + form = form_rf; + break; + case FCVTPS_xd: + case FCVTPS_wd: + case FCVTPS_xs: + case FCVTPS_ws: + mnemonic = "fcvtps"; + form = form_rf; + break; case SCVTF_sw: case SCVTF_sx: case SCVTF_dw: @@ -1234,159 +1281,2290 @@ void DisassemblingDecoder::VisitException(Instruction* instr) { Format(instr, mnemonic, form); } +void DisassemblingDecoder::VisitNEON3Same(Instruction* instr) { + const char* mnemonic = "unimplemented"; + const char* form = "'Vd.%s, 'Vn.%s, 'Vm.%s"; + NEONFormatDecoder nfd(instr); -void DisassemblingDecoder::VisitUnimplemented(Instruction* instr) { - Format(instr, "unimplemented", "(Unimplemented)"); + if (instr->Mask(NEON3SameLogicalFMask) == NEON3SameLogicalFixed) { + switch (instr->Mask(NEON3SameLogicalMask)) { + case NEON_AND: + mnemonic = "and"; + break; + case NEON_ORR: + mnemonic = "orr"; + if (instr->Rm() == instr->Rn()) { + mnemonic = "mov"; + form = "'Vd.%s, 'Vn.%s"; + } + break; + case NEON_ORN: + mnemonic = "orn"; + break; + case NEON_EOR: + mnemonic = "eor"; + break; + case NEON_BIC: + mnemonic = "bic"; + break; + case NEON_BIF: + mnemonic = "bif"; + break; + case NEON_BIT: + mnemonic = "bit"; + break; + case NEON_BSL: + mnemonic = "bsl"; + break; + default: + form = "(NEON3Same)"; + } + nfd.SetFormatMaps(nfd.LogicalFormatMap()); + } else { + static const char* mnemonics[] = { + "shadd", "uhadd", "shadd", "uhadd", + "sqadd", "uqadd", "sqadd", "uqadd", + "srhadd", "urhadd", "srhadd", "urhadd", + NULL, NULL, NULL, + NULL, // Handled by logical cases above. + "shsub", "uhsub", "shsub", "uhsub", + "sqsub", "uqsub", "sqsub", "uqsub", + "cmgt", "cmhi", "cmgt", "cmhi", + "cmge", "cmhs", "cmge", "cmhs", + "sshl", "ushl", "sshl", "ushl", + "sqshl", "uqshl", "sqshl", "uqshl", + "srshl", "urshl", "srshl", "urshl", + "sqrshl", "uqrshl", "sqrshl", "uqrshl", + "smax", "umax", "smax", "umax", + "smin", "umin", "smin", "umin", + "sabd", "uabd", "sabd", "uabd", + "saba", "uaba", "saba", "uaba", + "add", "sub", "add", "sub", + "cmtst", "cmeq", "cmtst", "cmeq", + "mla", "mls", "mla", "mls", + "mul", "pmul", "mul", "pmul", + "smaxp", "umaxp", "smaxp", "umaxp", + "sminp", "uminp", "sminp", "uminp", + "sqdmulh", "sqrdmulh", "sqdmulh", "sqrdmulh", + "addp", "unallocated", "addp", "unallocated", + "fmaxnm", "fmaxnmp", "fminnm", "fminnmp", + "fmla", "unallocated", "fmls", "unallocated", + "fadd", "faddp", "fsub", "fabd", + "fmulx", "fmul", "unallocated", "unallocated", + "fcmeq", "fcmge", "unallocated", "fcmgt", + "unallocated", "facge", "unallocated", "facgt", + "fmax", "fmaxp", "fmin", "fminp", + "frecps", "fdiv", "frsqrts", "unallocated"}; + + // Operation is determined by the opcode bits (15-11), the top bit of + // size (23) and the U bit (29). + unsigned index = + (instr->Bits(15, 11) << 2) | (instr->Bit(23) << 1) | instr->Bit(29); + DCHECK_LT(index, arraysize(mnemonics)); + mnemonic = mnemonics[index]; + // Assert that index is not one of the previously handled logical + // instructions. + DCHECK_NOT_NULL(mnemonic); + + if (instr->Mask(NEON3SameFPFMask) == NEON3SameFPFixed) { + nfd.SetFormatMaps(nfd.FPFormatMap()); + } + } + Format(instr, mnemonic, nfd.Substitute(form)); } +void DisassemblingDecoder::VisitNEON2RegMisc(Instruction* instr) { + const char* mnemonic = "unimplemented"; + const char* form = "'Vd.%s, 'Vn.%s"; + const char* form_cmp_zero = "'Vd.%s, 'Vn.%s, #0"; + const char* form_fcmp_zero = "'Vd.%s, 'Vn.%s, #0.0"; + NEONFormatDecoder nfd(instr); -void DisassemblingDecoder::VisitUnallocated(Instruction* instr) { - Format(instr, "unallocated", "(Unallocated)"); -} - + static const NEONFormatMap map_lp_ta = { + {23, 22, 30}, {NF_4H, NF_8H, NF_2S, NF_4S, NF_1D, NF_2D}}; -void DisassemblingDecoder::ProcessOutput(Instruction* /*instr*/) { - // The base disasm does nothing more than disassembling into a buffer. -} + static const NEONFormatMap map_cvt_ta = {{22}, {NF_4S, NF_2D}}; + static const NEONFormatMap map_cvt_tb = {{22, 30}, + {NF_4H, NF_8H, NF_2S, NF_4S}}; -void DisassemblingDecoder::Format(Instruction* instr, const char* mnemonic, - const char* format) { - // TODO(mcapewel) don't think I can use the instr address here - there needs - // to be a base address too - DCHECK(mnemonic != NULL); - ResetOutput(); - Substitute(instr, mnemonic); - if (format != NULL) { - buffer_[buffer_pos_++] = ' '; - Substitute(instr, format); + if (instr->Mask(NEON2RegMiscOpcode) <= NEON_NEG_opcode) { + // These instructions all use a two bit size field, except NOT and RBIT, + // which use the field to encode the operation. + switch (instr->Mask(NEON2RegMiscMask)) { + case NEON_REV64: + mnemonic = "rev64"; + break; + case NEON_REV32: + mnemonic = "rev32"; + break; + case NEON_REV16: + mnemonic = "rev16"; + break; + case NEON_SADDLP: + mnemonic = "saddlp"; + nfd.SetFormatMap(0, &map_lp_ta); + break; + case NEON_UADDLP: + mnemonic = "uaddlp"; + nfd.SetFormatMap(0, &map_lp_ta); + break; + case NEON_SUQADD: + mnemonic = "suqadd"; + break; + case NEON_USQADD: + mnemonic = "usqadd"; + break; + case NEON_CLS: + mnemonic = "cls"; + break; + case NEON_CLZ: + mnemonic = "clz"; + break; + case NEON_CNT: + mnemonic = "cnt"; + break; + case NEON_SADALP: + mnemonic = "sadalp"; + nfd.SetFormatMap(0, &map_lp_ta); + break; + case NEON_UADALP: + mnemonic = "uadalp"; + nfd.SetFormatMap(0, &map_lp_ta); + break; + case NEON_SQABS: + mnemonic = "sqabs"; + break; + case NEON_SQNEG: + mnemonic = "sqneg"; + break; + case NEON_CMGT_zero: + mnemonic = "cmgt"; + form = form_cmp_zero; + break; + case NEON_CMGE_zero: + mnemonic = "cmge"; + form = form_cmp_zero; + break; + case NEON_CMEQ_zero: + mnemonic = "cmeq"; + form = form_cmp_zero; + break; + case NEON_CMLE_zero: + mnemonic = "cmle"; + form = form_cmp_zero; + break; + case NEON_CMLT_zero: + mnemonic = "cmlt"; + form = form_cmp_zero; + break; + case NEON_ABS: + mnemonic = "abs"; + break; + case NEON_NEG: + mnemonic = "neg"; + break; + case NEON_RBIT_NOT: + switch (instr->FPType()) { + case 0: + mnemonic = "mvn"; + break; + case 1: + mnemonic = "rbit"; + break; + default: + form = "(NEON2RegMisc)"; + } + nfd.SetFormatMaps(nfd.LogicalFormatMap()); + break; + } + } else { + // These instructions all use a one bit size field, except XTN, SQXTUN, + // SHLL, SQXTN and UQXTN, which use a two bit size field. + nfd.SetFormatMaps(nfd.FPFormatMap()); + switch (instr->Mask(NEON2RegMiscFPMask)) { + case NEON_FABS: + mnemonic = "fabs"; + break; + case NEON_FNEG: + mnemonic = "fneg"; + break; + case NEON_FCVTN: + mnemonic = instr->Mask(NEON_Q) ? "fcvtn2" : "fcvtn"; + nfd.SetFormatMap(0, &map_cvt_tb); + nfd.SetFormatMap(1, &map_cvt_ta); + break; + case NEON_FCVTXN: + mnemonic = instr->Mask(NEON_Q) ? "fcvtxn2" : "fcvtxn"; + nfd.SetFormatMap(0, &map_cvt_tb); + nfd.SetFormatMap(1, &map_cvt_ta); + break; + case NEON_FCVTL: + mnemonic = instr->Mask(NEON_Q) ? "fcvtl2" : "fcvtl"; + nfd.SetFormatMap(0, &map_cvt_ta); + nfd.SetFormatMap(1, &map_cvt_tb); + break; + case NEON_FRINTN: + mnemonic = "frintn"; + break; + case NEON_FRINTA: + mnemonic = "frinta"; + break; + case NEON_FRINTP: + mnemonic = "frintp"; + break; + case NEON_FRINTM: + mnemonic = "frintm"; + break; + case NEON_FRINTX: + mnemonic = "frintx"; + break; + case NEON_FRINTZ: + mnemonic = "frintz"; + break; + case NEON_FRINTI: + mnemonic = "frinti"; + break; + case NEON_FCVTNS: + mnemonic = "fcvtns"; + break; + case NEON_FCVTNU: + mnemonic = "fcvtnu"; + break; + case NEON_FCVTPS: + mnemonic = "fcvtps"; + break; + case NEON_FCVTPU: + mnemonic = "fcvtpu"; + break; + case NEON_FCVTMS: + mnemonic = "fcvtms"; + break; + case NEON_FCVTMU: + mnemonic = "fcvtmu"; + break; + case NEON_FCVTZS: + mnemonic = "fcvtzs"; + break; + case NEON_FCVTZU: + mnemonic = "fcvtzu"; + break; + case NEON_FCVTAS: + mnemonic = "fcvtas"; + break; + case NEON_FCVTAU: + mnemonic = "fcvtau"; + break; + case NEON_FSQRT: + mnemonic = "fsqrt"; + break; + case NEON_SCVTF: + mnemonic = "scvtf"; + break; + case NEON_UCVTF: + mnemonic = "ucvtf"; + break; + case NEON_URSQRTE: + mnemonic = "ursqrte"; + break; + case NEON_URECPE: + mnemonic = "urecpe"; + break; + case NEON_FRSQRTE: + mnemonic = "frsqrte"; + break; + case NEON_FRECPE: + mnemonic = "frecpe"; + break; + case NEON_FCMGT_zero: + mnemonic = "fcmgt"; + form = form_fcmp_zero; + break; + case NEON_FCMGE_zero: + mnemonic = "fcmge"; + form = form_fcmp_zero; + break; + case NEON_FCMEQ_zero: + mnemonic = "fcmeq"; + form = form_fcmp_zero; + break; + case NEON_FCMLE_zero: + mnemonic = "fcmle"; + form = form_fcmp_zero; + break; + case NEON_FCMLT_zero: + mnemonic = "fcmlt"; + form = form_fcmp_zero; + break; + default: + if ((NEON_XTN_opcode <= instr->Mask(NEON2RegMiscOpcode)) && + (instr->Mask(NEON2RegMiscOpcode) <= NEON_UQXTN_opcode)) { + nfd.SetFormatMap(0, nfd.IntegerFormatMap()); + nfd.SetFormatMap(1, nfd.LongIntegerFormatMap()); + + switch (instr->Mask(NEON2RegMiscMask)) { + case NEON_XTN: + mnemonic = "xtn"; + break; + case NEON_SQXTN: + mnemonic = "sqxtn"; + break; + case NEON_UQXTN: + mnemonic = "uqxtn"; + break; + case NEON_SQXTUN: + mnemonic = "sqxtun"; + break; + case NEON_SHLL: + mnemonic = "shll"; + nfd.SetFormatMap(0, nfd.LongIntegerFormatMap()); + nfd.SetFormatMap(1, nfd.IntegerFormatMap()); + switch (instr->NEONSize()) { + case 0: + form = "'Vd.%s, 'Vn.%s, #8"; + break; + case 1: + form = "'Vd.%s, 'Vn.%s, #16"; + break; + case 2: + form = "'Vd.%s, 'Vn.%s, #32"; + break; + default: + Format(instr, "unallocated", "(NEON2RegMisc)"); + return; + } + } + Format(instr, nfd.Mnemonic(mnemonic), nfd.Substitute(form)); + return; + } else { + form = "(NEON2RegMisc)"; + } + } } - buffer_[buffer_pos_] = 0; - ProcessOutput(instr); + Format(instr, mnemonic, nfd.Substitute(form)); } +void DisassemblingDecoder::VisitNEON3Different(Instruction* instr) { + const char* mnemonic = "unimplemented"; + const char* form = "'Vd.%s, 'Vn.%s, 'Vm.%s"; -void DisassemblingDecoder::Substitute(Instruction* instr, const char* string) { - char chr = *string++; - while (chr != '\0') { - if (chr == '\'') { - string += SubstituteField(instr, string); - } else { - buffer_[buffer_pos_++] = chr; - } - chr = *string++; + NEONFormatDecoder nfd(instr); + nfd.SetFormatMap(0, nfd.LongIntegerFormatMap()); + + // Ignore the Q bit. Appending a "2" suffix is handled later. + switch (instr->Mask(NEON3DifferentMask) & ~NEON_Q) { + case NEON_PMULL: + mnemonic = "pmull"; + break; + case NEON_SABAL: + mnemonic = "sabal"; + break; + case NEON_SABDL: + mnemonic = "sabdl"; + break; + case NEON_SADDL: + mnemonic = "saddl"; + break; + case NEON_SMLAL: + mnemonic = "smlal"; + break; + case NEON_SMLSL: + mnemonic = "smlsl"; + break; + case NEON_SMULL: + mnemonic = "smull"; + break; + case NEON_SSUBL: + mnemonic = "ssubl"; + break; + case NEON_SQDMLAL: + mnemonic = "sqdmlal"; + break; + case NEON_SQDMLSL: + mnemonic = "sqdmlsl"; + break; + case NEON_SQDMULL: + mnemonic = "sqdmull"; + break; + case NEON_UABAL: + mnemonic = "uabal"; + break; + case NEON_UABDL: + mnemonic = "uabdl"; + break; + case NEON_UADDL: + mnemonic = "uaddl"; + break; + case NEON_UMLAL: + mnemonic = "umlal"; + break; + case NEON_UMLSL: + mnemonic = "umlsl"; + break; + case NEON_UMULL: + mnemonic = "umull"; + break; + case NEON_USUBL: + mnemonic = "usubl"; + break; + case NEON_SADDW: + mnemonic = "saddw"; + nfd.SetFormatMap(1, nfd.LongIntegerFormatMap()); + break; + case NEON_SSUBW: + mnemonic = "ssubw"; + nfd.SetFormatMap(1, nfd.LongIntegerFormatMap()); + break; + case NEON_UADDW: + mnemonic = "uaddw"; + nfd.SetFormatMap(1, nfd.LongIntegerFormatMap()); + break; + case NEON_USUBW: + mnemonic = "usubw"; + nfd.SetFormatMap(1, nfd.LongIntegerFormatMap()); + break; + case NEON_ADDHN: + mnemonic = "addhn"; + nfd.SetFormatMaps(nfd.LongIntegerFormatMap()); + nfd.SetFormatMap(0, nfd.IntegerFormatMap()); + break; + case NEON_RADDHN: + mnemonic = "raddhn"; + nfd.SetFormatMaps(nfd.LongIntegerFormatMap()); + nfd.SetFormatMap(0, nfd.IntegerFormatMap()); + break; + case NEON_RSUBHN: + mnemonic = "rsubhn"; + nfd.SetFormatMaps(nfd.LongIntegerFormatMap()); + nfd.SetFormatMap(0, nfd.IntegerFormatMap()); + break; + case NEON_SUBHN: + mnemonic = "subhn"; + nfd.SetFormatMaps(nfd.LongIntegerFormatMap()); + nfd.SetFormatMap(0, nfd.IntegerFormatMap()); + break; + default: + form = "(NEON3Different)"; } + Format(instr, nfd.Mnemonic(mnemonic), nfd.Substitute(form)); } +void DisassemblingDecoder::VisitNEONAcrossLanes(Instruction* instr) { + const char* mnemonic = "unimplemented"; + const char* form = "%sd, 'Vn.%s"; -int DisassemblingDecoder::SubstituteField(Instruction* instr, - const char* format) { - switch (format[0]) { - case 'R': // Register. X or W, selected by sf bit. - case 'F': // FP Register. S or D, selected by type field. - case 'W': - case 'X': - case 'S': - case 'D': return SubstituteRegisterField(instr, format); - case 'I': return SubstituteImmediateField(instr, format); - case 'L': return SubstituteLiteralField(instr, format); - case 'H': return SubstituteShiftField(instr, format); - case 'P': return SubstitutePrefetchField(instr, format); - case 'C': return SubstituteConditionField(instr, format); - case 'E': return SubstituteExtendField(instr, format); - case 'A': return SubstitutePCRelAddressField(instr, format); - case 'B': return SubstituteBranchTargetField(instr, format); - case 'O': return SubstituteLSRegOffsetField(instr, format); - case 'M': return SubstituteBarrierField(instr, format); - default: { - UNREACHABLE(); - return 1; + NEONFormatDecoder nfd(instr, NEONFormatDecoder::ScalarFormatMap(), + NEONFormatDecoder::IntegerFormatMap()); + + if (instr->Mask(NEONAcrossLanesFPFMask) == NEONAcrossLanesFPFixed) { + nfd.SetFormatMap(0, nfd.FPScalarFormatMap()); + nfd.SetFormatMap(1, nfd.FPFormatMap()); + switch (instr->Mask(NEONAcrossLanesFPMask)) { + case NEON_FMAXV: + mnemonic = "fmaxv"; + break; + case NEON_FMINV: + mnemonic = "fminv"; + break; + case NEON_FMAXNMV: + mnemonic = "fmaxnmv"; + break; + case NEON_FMINNMV: + mnemonic = "fminnmv"; + break; + default: + form = "(NEONAcrossLanes)"; + break; + } + } else if (instr->Mask(NEONAcrossLanesFMask) == NEONAcrossLanesFixed) { + switch (instr->Mask(NEONAcrossLanesMask)) { + case NEON_ADDV: + mnemonic = "addv"; + break; + case NEON_SMAXV: + mnemonic = "smaxv"; + break; + case NEON_SMINV: + mnemonic = "sminv"; + break; + case NEON_UMAXV: + mnemonic = "umaxv"; + break; + case NEON_UMINV: + mnemonic = "uminv"; + break; + case NEON_SADDLV: + mnemonic = "saddlv"; + nfd.SetFormatMap(0, nfd.LongScalarFormatMap()); + break; + case NEON_UADDLV: + mnemonic = "uaddlv"; + nfd.SetFormatMap(0, nfd.LongScalarFormatMap()); + break; + default: + form = "(NEONAcrossLanes)"; + break; } } + Format(instr, mnemonic, + nfd.Substitute(form, NEONFormatDecoder::kPlaceholder, + NEONFormatDecoder::kFormat)); } +void DisassemblingDecoder::VisitNEONByIndexedElement(Instruction* instr) { + const char* mnemonic = "unimplemented"; + bool l_instr = false; + bool fp_instr = false; -int DisassemblingDecoder::SubstituteRegisterField(Instruction* instr, - const char* format) { - unsigned reg_num = 0; - unsigned field_len = 2; - switch (format[1]) { - case 'd': reg_num = instr->Rd(); break; - case 'n': reg_num = instr->Rn(); break; - case 'm': reg_num = instr->Rm(); break; - case 'a': reg_num = instr->Ra(); break; - case 't': { - if (format[2] == '2') { - reg_num = instr->Rt2(); - field_len = 3; - } else { - reg_num = instr->Rt(); - } + const char* form = "'Vd.%s, 'Vn.%s, 'Ve.%s['IVByElemIndex]"; + + static const NEONFormatMap map_ta = {{23, 22}, {NF_UNDEF, NF_4S, NF_2D}}; + NEONFormatDecoder nfd(instr, &map_ta, NEONFormatDecoder::IntegerFormatMap(), + NEONFormatDecoder::ScalarFormatMap()); + + switch (instr->Mask(NEONByIndexedElementMask)) { + case NEON_SMULL_byelement: + mnemonic = "smull"; + l_instr = true; break; - } - case 's': - reg_num = instr->Rs(); + case NEON_UMULL_byelement: + mnemonic = "umull"; + l_instr = true; break; - default: UNREACHABLE(); - } - - // Increase field length for registers tagged as stack. - if (format[2] == 's') { - field_len = 3; + case NEON_SMLAL_byelement: + mnemonic = "smlal"; + l_instr = true; + break; + case NEON_UMLAL_byelement: + mnemonic = "umlal"; + l_instr = true; + break; + case NEON_SMLSL_byelement: + mnemonic = "smlsl"; + l_instr = true; + break; + case NEON_UMLSL_byelement: + mnemonic = "umlsl"; + l_instr = true; + break; + case NEON_SQDMULL_byelement: + mnemonic = "sqdmull"; + l_instr = true; + break; + case NEON_SQDMLAL_byelement: + mnemonic = "sqdmlal"; + l_instr = true; + break; + case NEON_SQDMLSL_byelement: + mnemonic = "sqdmlsl"; + l_instr = true; + break; + case NEON_MUL_byelement: + mnemonic = "mul"; + break; + case NEON_MLA_byelement: + mnemonic = "mla"; + break; + case NEON_MLS_byelement: + mnemonic = "mls"; + break; + case NEON_SQDMULH_byelement: + mnemonic = "sqdmulh"; + break; + case NEON_SQRDMULH_byelement: + mnemonic = "sqrdmulh"; + break; + default: + switch (instr->Mask(NEONByIndexedElementFPMask)) { + case NEON_FMUL_byelement: + mnemonic = "fmul"; + fp_instr = true; + break; + case NEON_FMLA_byelement: + mnemonic = "fmla"; + fp_instr = true; + break; + case NEON_FMLS_byelement: + mnemonic = "fmls"; + fp_instr = true; + break; + case NEON_FMULX_byelement: + mnemonic = "fmulx"; + fp_instr = true; + break; + } } - char reg_type; - if (format[0] == 'R') { - // Register type is R: use sf bit to choose X and W. - reg_type = instr->SixtyFourBits() ? 'x' : 'w'; - } else if (format[0] == 'F') { - // Floating-point register: use type field to choose S or D. - reg_type = ((instr->FPType() & 1) == 0) ? 's' : 'd'; + if (l_instr) { + Format(instr, nfd.Mnemonic(mnemonic), nfd.Substitute(form)); + } else if (fp_instr) { + nfd.SetFormatMap(0, nfd.FPFormatMap()); + Format(instr, mnemonic, nfd.Substitute(form)); } else { - // Register type is specified. Make it lower case. - reg_type = format[0] + 0x20; + nfd.SetFormatMap(0, nfd.IntegerFormatMap()); + Format(instr, mnemonic, nfd.Substitute(form)); } +} - if ((reg_num != kZeroRegCode) || (reg_type == 's') || (reg_type == 'd')) { - // A normal register: w0 - w30, x0 - x30, s0 - s31, d0 - d31. - - // Filter special registers - if ((reg_type == 'x') && (reg_num == 27)) { - AppendToOutput("cp"); - } else if ((reg_type == 'x') && (reg_num == 28)) { - AppendToOutput("jssp"); - } else if ((reg_type == 'x') && (reg_num == 29)) { - AppendToOutput("fp"); - } else if ((reg_type == 'x') && (reg_num == 30)) { - AppendToOutput("lr"); +void DisassemblingDecoder::VisitNEONCopy(Instruction* instr) { + const char* mnemonic = "unimplemented"; + const char* form = "(NEONCopy)"; + + NEONFormatDecoder nfd(instr, NEONFormatDecoder::TriangularFormatMap(), + NEONFormatDecoder::TriangularScalarFormatMap()); + + if (instr->Mask(NEONCopyInsElementMask) == NEON_INS_ELEMENT) { + mnemonic = "mov"; + nfd.SetFormatMap(0, nfd.TriangularScalarFormatMap()); + form = "'Vd.%s['IVInsIndex1], 'Vn.%s['IVInsIndex2]"; + } else if (instr->Mask(NEONCopyInsGeneralMask) == NEON_INS_GENERAL) { + mnemonic = "mov"; + nfd.SetFormatMap(0, nfd.TriangularScalarFormatMap()); + if (nfd.GetVectorFormat() == kFormatD) { + form = "'Vd.%s['IVInsIndex1], 'Xn"; } else { - AppendToOutput("%c%d", reg_type, reg_num); + form = "'Vd.%s['IVInsIndex1], 'Wn"; + } + } else if (instr->Mask(NEONCopyUmovMask) == NEON_UMOV) { + if (instr->Mask(NEON_Q) || ((instr->ImmNEON5() & 7) == 4)) { + mnemonic = "mov"; + } else { + mnemonic = "umov"; + } + nfd.SetFormatMap(0, nfd.TriangularScalarFormatMap()); + if (nfd.GetVectorFormat() == kFormatD) { + form = "'Xd, 'Vn.%s['IVInsIndex1]"; + } else { + form = "'Wd, 'Vn.%s['IVInsIndex1]"; + } + } else if (instr->Mask(NEONCopySmovMask) == NEON_SMOV) { + mnemonic = "smov"; + nfd.SetFormatMap(0, nfd.TriangularScalarFormatMap()); + form = "'Rdq, 'Vn.%s['IVInsIndex1]"; + } else if (instr->Mask(NEONCopyDupElementMask) == NEON_DUP_ELEMENT) { + mnemonic = "dup"; + form = "'Vd.%s, 'Vn.%s['IVInsIndex1]"; + } else if (instr->Mask(NEONCopyDupGeneralMask) == NEON_DUP_GENERAL) { + mnemonic = "dup"; + if (nfd.GetVectorFormat() == kFormat2D) { + form = "'Vd.%s, 'Xn"; + } else { + form = "'Vd.%s, 'Wn"; } - } else if (format[2] == 's') { - // Disassemble w31/x31 as stack pointer wcsp/csp. - AppendToOutput("%s", (reg_type == 'w') ? "wcsp" : "csp"); - } else { - // Disassemble w31/x31 as zero register wzr/xzr. - AppendToOutput("%czr", reg_type); } + Format(instr, mnemonic, nfd.Substitute(form)); +} - return field_len; +void DisassemblingDecoder::VisitNEONExtract(Instruction* instr) { + const char* mnemonic = "unimplemented"; + const char* form = "(NEONExtract)"; + NEONFormatDecoder nfd(instr, NEONFormatDecoder::LogicalFormatMap()); + if (instr->Mask(NEONExtractMask) == NEON_EXT) { + mnemonic = "ext"; + form = "'Vd.%s, 'Vn.%s, 'Vm.%s, 'IVExtract"; + } + Format(instr, mnemonic, nfd.Substitute(form)); } +void DisassemblingDecoder::VisitNEONLoadStoreMultiStruct(Instruction* instr) { + const char* mnemonic = NULL; + const char* form = NULL; + const char* form_1v = "{'Vt.%1$s}, ['Xns]"; + const char* form_2v = "{'Vt.%1$s, 'Vt2.%1$s}, ['Xns]"; + const char* form_3v = "{'Vt.%1$s, 'Vt2.%1$s, 'Vt3.%1$s}, ['Xns]"; + const char* form_4v = "{'Vt.%1$s, 'Vt2.%1$s, 'Vt3.%1$s, 'Vt4.%1$s}, ['Xns]"; + NEONFormatDecoder nfd(instr, NEONFormatDecoder::LoadStoreFormatMap()); + + switch (instr->Mask(NEONLoadStoreMultiStructMask)) { + case NEON_LD1_1v: + mnemonic = "ld1"; + form = form_1v; + break; + case NEON_LD1_2v: + mnemonic = "ld1"; + form = form_2v; + break; + case NEON_LD1_3v: + mnemonic = "ld1"; + form = form_3v; + break; + case NEON_LD1_4v: + mnemonic = "ld1"; + form = form_4v; + break; + case NEON_LD2: + mnemonic = "ld2"; + form = form_2v; + break; + case NEON_LD3: + mnemonic = "ld3"; + form = form_3v; + break; + case NEON_LD4: + mnemonic = "ld4"; + form = form_4v; + break; + case NEON_ST1_1v: + mnemonic = "st1"; + form = form_1v; + break; + case NEON_ST1_2v: + mnemonic = "st1"; + form = form_2v; + break; + case NEON_ST1_3v: + mnemonic = "st1"; + form = form_3v; + break; + case NEON_ST1_4v: + mnemonic = "st1"; + form = form_4v; + break; + case NEON_ST2: + mnemonic = "st2"; + form = form_2v; + break; + case NEON_ST3: + mnemonic = "st3"; + form = form_3v; + break; + case NEON_ST4: + mnemonic = "st4"; + form = form_4v; + break; + default: + break; + } -int DisassemblingDecoder::SubstituteImmediateField(Instruction* instr, - const char* format) { - DCHECK(format[0] == 'I'); + // Work out unallocated encodings. + bool allocated = (mnemonic != NULL); + switch (instr->Mask(NEONLoadStoreMultiStructMask)) { + case NEON_LD2: + case NEON_LD3: + case NEON_LD4: + case NEON_ST2: + case NEON_ST3: + case NEON_ST4: + // LD[2-4] and ST[2-4] cannot use .1d format. + allocated = (instr->NEONQ() != 0) || (instr->NEONLSSize() != 3); + break; + default: + break; + } + if (allocated) { + DCHECK_NOT_NULL(mnemonic); + DCHECK_NOT_NULL(form); + } else { + mnemonic = "unallocated"; + form = "(NEONLoadStoreMultiStruct)"; + } - switch (format[1]) { - case 'M': { // IMoveImm or IMoveLSL. - if (format[5] == 'I') { - uint64_t imm = static_cast(instr->ImmMoveWide()) - << (16 * instr->ShiftMoveWide()); - AppendToOutput("#0x%" PRIx64, imm); - } else { - DCHECK(format[5] == 'L'); - AppendToOutput("#0x%" PRIx32, instr->ImmMoveWide()); + Format(instr, mnemonic, nfd.Substitute(form)); +} + +void DisassemblingDecoder::VisitNEONLoadStoreMultiStructPostIndex( + Instruction* instr) { + const char* mnemonic = NULL; + const char* form = NULL; + const char* form_1v = "{'Vt.%1$s}, ['Xns], 'Xmr1"; + const char* form_2v = "{'Vt.%1$s, 'Vt2.%1$s}, ['Xns], 'Xmr2"; + const char* form_3v = "{'Vt.%1$s, 'Vt2.%1$s, 'Vt3.%1$s}, ['Xns], 'Xmr3"; + const char* form_4v = + "{'Vt.%1$s, 'Vt2.%1$s, 'Vt3.%1$s, 'Vt4.%1$s}, ['Xns], 'Xmr4"; + NEONFormatDecoder nfd(instr, NEONFormatDecoder::LoadStoreFormatMap()); + + switch (instr->Mask(NEONLoadStoreMultiStructPostIndexMask)) { + case NEON_LD1_1v_post: + mnemonic = "ld1"; + form = form_1v; + break; + case NEON_LD1_2v_post: + mnemonic = "ld1"; + form = form_2v; + break; + case NEON_LD1_3v_post: + mnemonic = "ld1"; + form = form_3v; + break; + case NEON_LD1_4v_post: + mnemonic = "ld1"; + form = form_4v; + break; + case NEON_LD2_post: + mnemonic = "ld2"; + form = form_2v; + break; + case NEON_LD3_post: + mnemonic = "ld3"; + form = form_3v; + break; + case NEON_LD4_post: + mnemonic = "ld4"; + form = form_4v; + break; + case NEON_ST1_1v_post: + mnemonic = "st1"; + form = form_1v; + break; + case NEON_ST1_2v_post: + mnemonic = "st1"; + form = form_2v; + break; + case NEON_ST1_3v_post: + mnemonic = "st1"; + form = form_3v; + break; + case NEON_ST1_4v_post: + mnemonic = "st1"; + form = form_4v; + break; + case NEON_ST2_post: + mnemonic = "st2"; + form = form_2v; + break; + case NEON_ST3_post: + mnemonic = "st3"; + form = form_3v; + break; + case NEON_ST4_post: + mnemonic = "st4"; + form = form_4v; + break; + default: + break; + } + + // Work out unallocated encodings. + bool allocated = (mnemonic != NULL); + switch (instr->Mask(NEONLoadStoreMultiStructPostIndexMask)) { + case NEON_LD2_post: + case NEON_LD3_post: + case NEON_LD4_post: + case NEON_ST2_post: + case NEON_ST3_post: + case NEON_ST4_post: + // LD[2-4] and ST[2-4] cannot use .1d format. + allocated = (instr->NEONQ() != 0) || (instr->NEONLSSize() != 3); + break; + default: + break; + } + if (allocated) { + DCHECK_NOT_NULL(mnemonic); + DCHECK_NOT_NULL(form); + } else { + mnemonic = "unallocated"; + form = "(NEONLoadStoreMultiStructPostIndex)"; + } + + Format(instr, mnemonic, nfd.Substitute(form)); +} + +void DisassemblingDecoder::VisitNEONLoadStoreSingleStruct(Instruction* instr) { + const char* mnemonic = NULL; + const char* form = NULL; + + const char* form_1b = "{'Vt.b}['IVLSLane0], ['Xns]"; + const char* form_1h = "{'Vt.h}['IVLSLane1], ['Xns]"; + const char* form_1s = "{'Vt.s}['IVLSLane2], ['Xns]"; + const char* form_1d = "{'Vt.d}['IVLSLane3], ['Xns]"; + NEONFormatDecoder nfd(instr, NEONFormatDecoder::LoadStoreFormatMap()); + + switch (instr->Mask(NEONLoadStoreSingleStructMask)) { + case NEON_LD1_b: + mnemonic = "ld1"; + form = form_1b; + break; + case NEON_LD1_h: + mnemonic = "ld1"; + form = form_1h; + break; + case NEON_LD1_s: + mnemonic = "ld1"; + static_assert((NEON_LD1_s | (1 << NEONLSSize_offset)) == NEON_LD1_d, + "LSB of size distinguishes S and D registers."); + form = ((instr->NEONLSSize() & 1) == 0) ? form_1s : form_1d; + break; + case NEON_ST1_b: + mnemonic = "st1"; + form = form_1b; + break; + case NEON_ST1_h: + mnemonic = "st1"; + form = form_1h; + break; + case NEON_ST1_s: + mnemonic = "st1"; + static_assert((NEON_ST1_s | (1 << NEONLSSize_offset)) == NEON_ST1_d, + "LSB of size distinguishes S and D registers."); + form = ((instr->NEONLSSize() & 1) == 0) ? form_1s : form_1d; + break; + case NEON_LD1R: + mnemonic = "ld1r"; + form = "{'Vt.%s}, ['Xns]"; + break; + case NEON_LD2_b: + case NEON_ST2_b: + mnemonic = (instr->NEONLoad() == 1) ? "ld2" : "st2"; + form = "{'Vt.b, 'Vt2.b}['IVLSLane0], ['Xns]"; + break; + case NEON_LD2_h: + case NEON_ST2_h: + mnemonic = (instr->NEONLoad() == 1) ? "ld2" : "st2"; + form = "{'Vt.h, 'Vt2.h}['IVLSLane1], ['Xns]"; + break; + case NEON_LD2_s: + case NEON_ST2_s: + static_assert((NEON_ST2_s | (1 << NEONLSSize_offset)) == NEON_ST2_d, + "LSB of size distinguishes S and D registers."); + static_assert((NEON_LD2_s | (1 << NEONLSSize_offset)) == NEON_LD2_d, + "LSB of size distinguishes S and D registers."); + mnemonic = (instr->NEONLoad() == 1) ? "ld2" : "st2"; + if ((instr->NEONLSSize() & 1) == 0) { + form = "{'Vt.s, 'Vt2.s}['IVLSLane2], ['Xns]"; + } else { + form = "{'Vt.d, 'Vt2.d}['IVLSLane3], ['Xns]"; + } + break; + case NEON_LD2R: + mnemonic = "ld2r"; + form = "{'Vt.%s, 'Vt2.%s}, ['Xns]"; + break; + case NEON_LD3_b: + case NEON_ST3_b: + mnemonic = (instr->NEONLoad() == 1) ? "ld3" : "st3"; + form = "{'Vt.b, 'Vt2.b, 'Vt3.b}['IVLSLane0], ['Xns]"; + break; + case NEON_LD3_h: + case NEON_ST3_h: + mnemonic = (instr->NEONLoad() == 1) ? "ld3" : "st3"; + form = "{'Vt.h, 'Vt2.h, 'Vt3.h}['IVLSLane1], ['Xns]"; + break; + case NEON_LD3_s: + case NEON_ST3_s: + mnemonic = (instr->NEONLoad() == 1) ? "ld3" : "st3"; + if ((instr->NEONLSSize() & 1) == 0) { + form = "{'Vt.s, 'Vt2.s, 'Vt3.s}['IVLSLane2], ['Xns]"; + } else { + form = "{'Vt.d, 'Vt2.d, 'Vt3.d}['IVLSLane3], ['Xns]"; + } + break; + case NEON_LD3R: + mnemonic = "ld3r"; + form = "{'Vt.%s, 'Vt2.%s, 'Vt3.%s}, ['Xns]"; + break; + case NEON_LD4_b: + case NEON_ST4_b: + mnemonic = (instr->NEONLoad() == 1) ? "ld4" : "st4"; + form = "{'Vt.b, 'Vt2.b, 'Vt3.b, 'Vt4.b}['IVLSLane0], ['Xns]"; + break; + case NEON_LD4_h: + case NEON_ST4_h: + mnemonic = (instr->NEONLoad() == 1) ? "ld4" : "st4"; + form = "{'Vt.h, 'Vt2.h, 'Vt3.h, 'Vt4.h}['IVLSLane1], ['Xns]"; + break; + case NEON_LD4_s: + case NEON_ST4_s: + static_assert((NEON_LD4_s | (1 << NEONLSSize_offset)) == NEON_LD4_d, + "LSB of size distinguishes S and D registers."); + static_assert((NEON_ST4_s | (1 << NEONLSSize_offset)) == NEON_ST4_d, + "LSB of size distinguishes S and D registers."); + mnemonic = (instr->NEONLoad() == 1) ? "ld4" : "st4"; + if ((instr->NEONLSSize() & 1) == 0) { + form = "{'Vt.s, 'Vt2.s, 'Vt3.s, 'Vt4.s}['IVLSLane2], ['Xns]"; + } else { + form = "{'Vt.d, 'Vt2.d, 'Vt3.d, 'Vt4.d}['IVLSLane3], ['Xns]"; + } + break; + case NEON_LD4R: + mnemonic = "ld4r"; + form = "{'Vt.%1$s, 'Vt2.%1$s, 'Vt3.%1$s, 'Vt4.%1$s}, ['Xns]"; + break; + default: + break; + } + + // Work out unallocated encodings. + bool allocated = (mnemonic != NULL); + switch (instr->Mask(NEONLoadStoreSingleStructMask)) { + case NEON_LD1_h: + case NEON_LD2_h: + case NEON_LD3_h: + case NEON_LD4_h: + case NEON_ST1_h: + case NEON_ST2_h: + case NEON_ST3_h: + case NEON_ST4_h: + DCHECK(allocated); + allocated = ((instr->NEONLSSize() & 1) == 0); + break; + case NEON_LD1_s: + case NEON_LD2_s: + case NEON_LD3_s: + case NEON_LD4_s: + case NEON_ST1_s: + case NEON_ST2_s: + case NEON_ST3_s: + case NEON_ST4_s: + DCHECK(allocated); + allocated = (instr->NEONLSSize() <= 1) && + ((instr->NEONLSSize() == 0) || (instr->NEONS() == 0)); + break; + case NEON_LD1R: + case NEON_LD2R: + case NEON_LD3R: + case NEON_LD4R: + DCHECK(allocated); + allocated = (instr->NEONS() == 0); + break; + default: + break; + } + if (allocated) { + DCHECK_NOT_NULL(mnemonic); + DCHECK_NOT_NULL(form); + } else { + mnemonic = "unallocated"; + form = "(NEONLoadStoreSingleStruct)"; + } + + Format(instr, mnemonic, nfd.Substitute(form)); +} + +void DisassemblingDecoder::VisitNEONLoadStoreSingleStructPostIndex( + Instruction* instr) { + const char* mnemonic = NULL; + const char* form = NULL; + + const char* form_1b = "{'Vt.b}['IVLSLane0], ['Xns], 'Xmb1"; + const char* form_1h = "{'Vt.h}['IVLSLane1], ['Xns], 'Xmb2"; + const char* form_1s = "{'Vt.s}['IVLSLane2], ['Xns], 'Xmb4"; + const char* form_1d = "{'Vt.d}['IVLSLane3], ['Xns], 'Xmb8"; + NEONFormatDecoder nfd(instr, NEONFormatDecoder::LoadStoreFormatMap()); + + switch (instr->Mask(NEONLoadStoreSingleStructPostIndexMask)) { + case NEON_LD1_b_post: + mnemonic = "ld1"; + form = form_1b; + break; + case NEON_LD1_h_post: + mnemonic = "ld1"; + form = form_1h; + break; + case NEON_LD1_s_post: + mnemonic = "ld1"; + static_assert((NEON_LD1_s | (1 << NEONLSSize_offset)) == NEON_LD1_d, + "LSB of size distinguishes S and D registers."); + form = ((instr->NEONLSSize() & 1) == 0) ? form_1s : form_1d; + break; + case NEON_ST1_b_post: + mnemonic = "st1"; + form = form_1b; + break; + case NEON_ST1_h_post: + mnemonic = "st1"; + form = form_1h; + break; + case NEON_ST1_s_post: + mnemonic = "st1"; + static_assert((NEON_ST1_s | (1 << NEONLSSize_offset)) == NEON_ST1_d, + "LSB of size distinguishes S and D registers."); + form = ((instr->NEONLSSize() & 1) == 0) ? form_1s : form_1d; + break; + case NEON_LD1R_post: + mnemonic = "ld1r"; + form = "{'Vt.%s}, ['Xns], 'Xmz1"; + break; + case NEON_LD2_b_post: + case NEON_ST2_b_post: + mnemonic = (instr->NEONLoad() == 1) ? "ld2" : "st2"; + form = "{'Vt.b, 'Vt2.b}['IVLSLane0], ['Xns], 'Xmb2"; + break; + case NEON_ST2_h_post: + case NEON_LD2_h_post: + mnemonic = (instr->NEONLoad() == 1) ? "ld2" : "st2"; + form = "{'Vt.h, 'Vt2.h}['IVLSLane1], ['Xns], 'Xmb4"; + break; + case NEON_LD2_s_post: + case NEON_ST2_s_post: + mnemonic = (instr->NEONLoad() == 1) ? "ld2" : "st2"; + if ((instr->NEONLSSize() & 1) == 0) + form = "{'Vt.s, 'Vt2.s}['IVLSLane2], ['Xns], 'Xmb8"; + else + form = "{'Vt.d, 'Vt2.d}['IVLSLane3], ['Xns], 'Xmb16"; + break; + case NEON_LD2R_post: + mnemonic = "ld2r"; + form = "{'Vt.%s, 'Vt2.%s}, ['Xns], 'Xmz2"; + break; + case NEON_LD3_b_post: + case NEON_ST3_b_post: + mnemonic = (instr->NEONLoad() == 1) ? "ld3" : "st3"; + form = "{'Vt.b, 'Vt2.b, 'Vt3.b}['IVLSLane0], ['Xns], 'Xmb3"; + break; + case NEON_LD3_h_post: + case NEON_ST3_h_post: + mnemonic = (instr->NEONLoad() == 1) ? "ld3" : "st3"; + form = "{'Vt.h, 'Vt2.h, 'Vt3.h}['IVLSLane1], ['Xns], 'Xmb6"; + break; + case NEON_LD3_s_post: + case NEON_ST3_s_post: + mnemonic = (instr->NEONLoad() == 1) ? "ld3" : "st3"; + if ((instr->NEONLSSize() & 1) == 0) + form = "{'Vt.s, 'Vt2.s, 'Vt3.s}['IVLSLane2], ['Xns], 'Xmb12"; + else + form = "{'Vt.d, 'Vt2.d, 'Vt3.d}['IVLSLane3], ['Xns], 'Xmb24"; + break; + case NEON_LD3R_post: + mnemonic = "ld3r"; + form = "{'Vt.%s, 'Vt2.%s, 'Vt3.%s}, ['Xns], 'Xmz3"; + break; + case NEON_LD4_b_post: + case NEON_ST4_b_post: + mnemonic = (instr->NEONLoad() == 1) ? "ld4" : "st4"; + form = "{'Vt.b, 'Vt2.b, 'Vt3.b, 'Vt4.b}['IVLSLane0], ['Xns], 'Xmb4"; + break; + case NEON_LD4_h_post: + case NEON_ST4_h_post: + mnemonic = (instr->NEONLoad()) == 1 ? "ld4" : "st4"; + form = "{'Vt.h, 'Vt2.h, 'Vt3.h, 'Vt4.h}['IVLSLane1], ['Xns], 'Xmb8"; + break; + case NEON_LD4_s_post: + case NEON_ST4_s_post: + mnemonic = (instr->NEONLoad() == 1) ? "ld4" : "st4"; + if ((instr->NEONLSSize() & 1) == 0) + form = "{'Vt.s, 'Vt2.s, 'Vt3.s, 'Vt4.s}['IVLSLane2], ['Xns], 'Xmb16"; + else + form = "{'Vt.d, 'Vt2.d, 'Vt3.d, 'Vt4.d}['IVLSLane3], ['Xns], 'Xmb32"; + break; + case NEON_LD4R_post: + mnemonic = "ld4r"; + form = "{'Vt.%1$s, 'Vt2.%1$s, 'Vt3.%1$s, 'Vt4.%1$s}, ['Xns], 'Xmz4"; + break; + default: + break; + } + + // Work out unallocated encodings. + bool allocated = (mnemonic != NULL); + switch (instr->Mask(NEONLoadStoreSingleStructPostIndexMask)) { + case NEON_LD1_h_post: + case NEON_LD2_h_post: + case NEON_LD3_h_post: + case NEON_LD4_h_post: + case NEON_ST1_h_post: + case NEON_ST2_h_post: + case NEON_ST3_h_post: + case NEON_ST4_h_post: + DCHECK(allocated); + allocated = ((instr->NEONLSSize() & 1) == 0); + break; + case NEON_LD1_s_post: + case NEON_LD2_s_post: + case NEON_LD3_s_post: + case NEON_LD4_s_post: + case NEON_ST1_s_post: + case NEON_ST2_s_post: + case NEON_ST3_s_post: + case NEON_ST4_s_post: + DCHECK(allocated); + allocated = (instr->NEONLSSize() <= 1) && + ((instr->NEONLSSize() == 0) || (instr->NEONS() == 0)); + break; + case NEON_LD1R_post: + case NEON_LD2R_post: + case NEON_LD3R_post: + case NEON_LD4R_post: + DCHECK(allocated); + allocated = (instr->NEONS() == 0); + break; + default: + break; + } + if (allocated) { + DCHECK_NOT_NULL(mnemonic); + DCHECK_NOT_NULL(form); + } else { + mnemonic = "unallocated"; + form = "(NEONLoadStoreSingleStructPostIndex)"; + } + + Format(instr, mnemonic, nfd.Substitute(form)); +} + +void DisassemblingDecoder::VisitNEONModifiedImmediate(Instruction* instr) { + const char* mnemonic = "unimplemented"; + const char* form = "'Vt.%s, 'IVMIImm8, lsl 'IVMIShiftAmt1"; + + int cmode = instr->NEONCmode(); + int cmode_3 = (cmode >> 3) & 1; + int cmode_2 = (cmode >> 2) & 1; + int cmode_1 = (cmode >> 1) & 1; + int cmode_0 = cmode & 1; + int q = instr->NEONQ(); + int op = instr->NEONModImmOp(); + + static const NEONFormatMap map_b = {{30}, {NF_8B, NF_16B}}; + static const NEONFormatMap map_h = {{30}, {NF_4H, NF_8H}}; + static const NEONFormatMap map_s = {{30}, {NF_2S, NF_4S}}; + NEONFormatDecoder nfd(instr, &map_b); + + if (cmode_3 == 0) { + if (cmode_0 == 0) { + mnemonic = (op == 1) ? "mvni" : "movi"; + } else { // cmode<0> == '1'. + mnemonic = (op == 1) ? "bic" : "orr"; + } + nfd.SetFormatMap(0, &map_s); + } else { // cmode<3> == '1'. + if (cmode_2 == 0) { + if (cmode_0 == 0) { + mnemonic = (op == 1) ? "mvni" : "movi"; + } else { // cmode<0> == '1'. + mnemonic = (op == 1) ? "bic" : "orr"; + } + nfd.SetFormatMap(0, &map_h); + } else { // cmode<2> == '1'. + if (cmode_1 == 0) { + mnemonic = (op == 1) ? "mvni" : "movi"; + form = "'Vt.%s, 'IVMIImm8, msl 'IVMIShiftAmt2"; + nfd.SetFormatMap(0, &map_s); + } else { // cmode<1> == '1'. + if (cmode_0 == 0) { + mnemonic = "movi"; + if (op == 0) { + form = "'Vt.%s, 'IVMIImm8"; + } else { + form = (q == 0) ? "'Dd, 'IVMIImm" : "'Vt.2d, 'IVMIImm"; + } + } else { // cmode<0> == '1' + mnemonic = "fmov"; + if (op == 0) { + form = "'Vt.%s, 'IVMIImmFPSingle"; + nfd.SetFormatMap(0, &map_s); + } else { + if (q == 1) { + form = "'Vt.2d, 'IVMIImmFPDouble"; + } else { + mnemonic = "unallocated"; + form = "(NEONModifiedImmediate)"; + } + } + } + } + } + } + Format(instr, mnemonic, nfd.Substitute(form)); +} + +void DisassemblingDecoder::VisitNEONPerm(Instruction* instr) { + const char* mnemonic = "unimplemented"; + const char* form = "'Vd.%s, 'Vn.%s, 'Vm.%s"; + NEONFormatDecoder nfd(instr); + + switch (instr->Mask(NEONPermMask)) { + case NEON_TRN1: + mnemonic = "trn1"; + break; + case NEON_TRN2: + mnemonic = "trn2"; + break; + case NEON_UZP1: + mnemonic = "uzp1"; + break; + case NEON_UZP2: + mnemonic = "uzp2"; + break; + case NEON_ZIP1: + mnemonic = "zip1"; + break; + case NEON_ZIP2: + mnemonic = "zip2"; + break; + default: + form = "(NEONPerm)"; + } + Format(instr, mnemonic, nfd.Substitute(form)); +} + +void DisassemblingDecoder::VisitNEONScalar2RegMisc(Instruction* instr) { + const char* mnemonic = "unimplemented"; + const char* form = "%sd, %sn"; + const char* form_0 = "%sd, %sn, #0"; + const char* form_fp0 = "%sd, %sn, #0.0"; + + NEONFormatDecoder nfd(instr, NEONFormatDecoder::ScalarFormatMap()); + + if (instr->Mask(NEON2RegMiscOpcode) <= NEON_NEG_scalar_opcode) { + // These instructions all use a two bit size field, except NOT and RBIT, + // which use the field to encode the operation. + switch (instr->Mask(NEONScalar2RegMiscMask)) { + case NEON_CMGT_zero_scalar: + mnemonic = "cmgt"; + form = form_0; + break; + case NEON_CMGE_zero_scalar: + mnemonic = "cmge"; + form = form_0; + break; + case NEON_CMLE_zero_scalar: + mnemonic = "cmle"; + form = form_0; + break; + case NEON_CMLT_zero_scalar: + mnemonic = "cmlt"; + form = form_0; + break; + case NEON_CMEQ_zero_scalar: + mnemonic = "cmeq"; + form = form_0; + break; + case NEON_NEG_scalar: + mnemonic = "neg"; + break; + case NEON_SQNEG_scalar: + mnemonic = "sqneg"; + break; + case NEON_ABS_scalar: + mnemonic = "abs"; + break; + case NEON_SQABS_scalar: + mnemonic = "sqabs"; + break; + case NEON_SUQADD_scalar: + mnemonic = "suqadd"; + break; + case NEON_USQADD_scalar: + mnemonic = "usqadd"; + break; + default: + form = "(NEONScalar2RegMisc)"; + } + } else { + // These instructions all use a one bit size field, except SQXTUN, SQXTN + // and UQXTN, which use a two bit size field. + nfd.SetFormatMaps(nfd.FPScalarFormatMap()); + switch (instr->Mask(NEONScalar2RegMiscFPMask)) { + case NEON_FRSQRTE_scalar: + mnemonic = "frsqrte"; + break; + case NEON_FRECPE_scalar: + mnemonic = "frecpe"; + break; + case NEON_SCVTF_scalar: + mnemonic = "scvtf"; + break; + case NEON_UCVTF_scalar: + mnemonic = "ucvtf"; + break; + case NEON_FCMGT_zero_scalar: + mnemonic = "fcmgt"; + form = form_fp0; + break; + case NEON_FCMGE_zero_scalar: + mnemonic = "fcmge"; + form = form_fp0; + break; + case NEON_FCMLE_zero_scalar: + mnemonic = "fcmle"; + form = form_fp0; + break; + case NEON_FCMLT_zero_scalar: + mnemonic = "fcmlt"; + form = form_fp0; + break; + case NEON_FCMEQ_zero_scalar: + mnemonic = "fcmeq"; + form = form_fp0; + break; + case NEON_FRECPX_scalar: + mnemonic = "frecpx"; + break; + case NEON_FCVTNS_scalar: + mnemonic = "fcvtns"; + break; + case NEON_FCVTNU_scalar: + mnemonic = "fcvtnu"; + break; + case NEON_FCVTPS_scalar: + mnemonic = "fcvtps"; + break; + case NEON_FCVTPU_scalar: + mnemonic = "fcvtpu"; + break; + case NEON_FCVTMS_scalar: + mnemonic = "fcvtms"; + break; + case NEON_FCVTMU_scalar: + mnemonic = "fcvtmu"; + break; + case NEON_FCVTZS_scalar: + mnemonic = "fcvtzs"; + break; + case NEON_FCVTZU_scalar: + mnemonic = "fcvtzu"; + break; + case NEON_FCVTAS_scalar: + mnemonic = "fcvtas"; + break; + case NEON_FCVTAU_scalar: + mnemonic = "fcvtau"; + break; + case NEON_FCVTXN_scalar: + nfd.SetFormatMap(0, nfd.LongScalarFormatMap()); + mnemonic = "fcvtxn"; + break; + default: + nfd.SetFormatMap(0, nfd.ScalarFormatMap()); + nfd.SetFormatMap(1, nfd.LongScalarFormatMap()); + switch (instr->Mask(NEONScalar2RegMiscMask)) { + case NEON_SQXTN_scalar: + mnemonic = "sqxtn"; + break; + case NEON_UQXTN_scalar: + mnemonic = "uqxtn"; + break; + case NEON_SQXTUN_scalar: + mnemonic = "sqxtun"; + break; + default: + form = "(NEONScalar2RegMisc)"; + } + } + } + Format(instr, mnemonic, nfd.SubstitutePlaceholders(form)); +} + +void DisassemblingDecoder::VisitNEONScalar3Diff(Instruction* instr) { + const char* mnemonic = "unimplemented"; + const char* form = "%sd, %sn, %sm"; + NEONFormatDecoder nfd(instr, NEONFormatDecoder::LongScalarFormatMap(), + NEONFormatDecoder::ScalarFormatMap()); + + switch (instr->Mask(NEONScalar3DiffMask)) { + case NEON_SQDMLAL_scalar: + mnemonic = "sqdmlal"; + break; + case NEON_SQDMLSL_scalar: + mnemonic = "sqdmlsl"; + break; + case NEON_SQDMULL_scalar: + mnemonic = "sqdmull"; + break; + default: + form = "(NEONScalar3Diff)"; + } + Format(instr, mnemonic, nfd.SubstitutePlaceholders(form)); +} + +void DisassemblingDecoder::VisitNEONScalar3Same(Instruction* instr) { + const char* mnemonic = "unimplemented"; + const char* form = "%sd, %sn, %sm"; + NEONFormatDecoder nfd(instr, NEONFormatDecoder::ScalarFormatMap()); + + if (instr->Mask(NEONScalar3SameFPFMask) == NEONScalar3SameFPFixed) { + nfd.SetFormatMaps(nfd.FPScalarFormatMap()); + switch (instr->Mask(NEONScalar3SameFPMask)) { + case NEON_FACGE_scalar: + mnemonic = "facge"; + break; + case NEON_FACGT_scalar: + mnemonic = "facgt"; + break; + case NEON_FCMEQ_scalar: + mnemonic = "fcmeq"; + break; + case NEON_FCMGE_scalar: + mnemonic = "fcmge"; + break; + case NEON_FCMGT_scalar: + mnemonic = "fcmgt"; + break; + case NEON_FMULX_scalar: + mnemonic = "fmulx"; + break; + case NEON_FRECPS_scalar: + mnemonic = "frecps"; + break; + case NEON_FRSQRTS_scalar: + mnemonic = "frsqrts"; + break; + case NEON_FABD_scalar: + mnemonic = "fabd"; + break; + default: + form = "(NEONScalar3Same)"; + } + } else { + switch (instr->Mask(NEONScalar3SameMask)) { + case NEON_ADD_scalar: + mnemonic = "add"; + break; + case NEON_SUB_scalar: + mnemonic = "sub"; + break; + case NEON_CMEQ_scalar: + mnemonic = "cmeq"; + break; + case NEON_CMGE_scalar: + mnemonic = "cmge"; + break; + case NEON_CMGT_scalar: + mnemonic = "cmgt"; + break; + case NEON_CMHI_scalar: + mnemonic = "cmhi"; + break; + case NEON_CMHS_scalar: + mnemonic = "cmhs"; + break; + case NEON_CMTST_scalar: + mnemonic = "cmtst"; + break; + case NEON_UQADD_scalar: + mnemonic = "uqadd"; + break; + case NEON_SQADD_scalar: + mnemonic = "sqadd"; + break; + case NEON_UQSUB_scalar: + mnemonic = "uqsub"; + break; + case NEON_SQSUB_scalar: + mnemonic = "sqsub"; + break; + case NEON_USHL_scalar: + mnemonic = "ushl"; + break; + case NEON_SSHL_scalar: + mnemonic = "sshl"; + break; + case NEON_UQSHL_scalar: + mnemonic = "uqshl"; + break; + case NEON_SQSHL_scalar: + mnemonic = "sqshl"; + break; + case NEON_URSHL_scalar: + mnemonic = "urshl"; + break; + case NEON_SRSHL_scalar: + mnemonic = "srshl"; + break; + case NEON_UQRSHL_scalar: + mnemonic = "uqrshl"; + break; + case NEON_SQRSHL_scalar: + mnemonic = "sqrshl"; + break; + case NEON_SQDMULH_scalar: + mnemonic = "sqdmulh"; + break; + case NEON_SQRDMULH_scalar: + mnemonic = "sqrdmulh"; + break; + default: + form = "(NEONScalar3Same)"; + } + } + Format(instr, mnemonic, nfd.SubstitutePlaceholders(form)); +} + +void DisassemblingDecoder::VisitNEONScalarByIndexedElement(Instruction* instr) { + const char* mnemonic = "unimplemented"; + const char* form = "%sd, %sn, 'Ve.%s['IVByElemIndex]"; + NEONFormatDecoder nfd(instr, NEONFormatDecoder::ScalarFormatMap()); + bool long_instr = false; + + switch (instr->Mask(NEONScalarByIndexedElementMask)) { + case NEON_SQDMULL_byelement_scalar: + mnemonic = "sqdmull"; + long_instr = true; + break; + case NEON_SQDMLAL_byelement_scalar: + mnemonic = "sqdmlal"; + long_instr = true; + break; + case NEON_SQDMLSL_byelement_scalar: + mnemonic = "sqdmlsl"; + long_instr = true; + break; + case NEON_SQDMULH_byelement_scalar: + mnemonic = "sqdmulh"; + break; + case NEON_SQRDMULH_byelement_scalar: + mnemonic = "sqrdmulh"; + break; + default: + nfd.SetFormatMap(0, nfd.FPScalarFormatMap()); + switch (instr->Mask(NEONScalarByIndexedElementFPMask)) { + case NEON_FMUL_byelement_scalar: + mnemonic = "fmul"; + break; + case NEON_FMLA_byelement_scalar: + mnemonic = "fmla"; + break; + case NEON_FMLS_byelement_scalar: + mnemonic = "fmls"; + break; + case NEON_FMULX_byelement_scalar: + mnemonic = "fmulx"; + break; + default: + form = "(NEONScalarByIndexedElement)"; + } + } + + if (long_instr) { + nfd.SetFormatMap(0, nfd.LongScalarFormatMap()); + } + + Format(instr, mnemonic, + nfd.Substitute(form, nfd.kPlaceholder, nfd.kPlaceholder, nfd.kFormat)); +} + +void DisassemblingDecoder::VisitNEONScalarCopy(Instruction* instr) { + const char* mnemonic = "unimplemented"; + const char* form = "(NEONScalarCopy)"; + + NEONFormatDecoder nfd(instr, NEONFormatDecoder::TriangularScalarFormatMap()); + + if (instr->Mask(NEONScalarCopyMask) == NEON_DUP_ELEMENT_scalar) { + mnemonic = "mov"; + form = "%sd, 'Vn.%s['IVInsIndex1]"; + } + + Format(instr, mnemonic, nfd.Substitute(form, nfd.kPlaceholder, nfd.kFormat)); +} + +void DisassemblingDecoder::VisitNEONScalarPairwise(Instruction* instr) { + const char* mnemonic = "unimplemented"; + const char* form = "%sd, 'Vn.%s"; + NEONFormatMap map = {{22}, {NF_2S, NF_2D}}; + NEONFormatDecoder nfd(instr, NEONFormatDecoder::FPScalarFormatMap(), &map); + + switch (instr->Mask(NEONScalarPairwiseMask)) { + case NEON_ADDP_scalar: + mnemonic = "addp"; + break; + case NEON_FADDP_scalar: + mnemonic = "faddp"; + break; + case NEON_FMAXP_scalar: + mnemonic = "fmaxp"; + break; + case NEON_FMAXNMP_scalar: + mnemonic = "fmaxnmp"; + break; + case NEON_FMINP_scalar: + mnemonic = "fminp"; + break; + case NEON_FMINNMP_scalar: + mnemonic = "fminnmp"; + break; + default: + form = "(NEONScalarPairwise)"; + } + Format(instr, mnemonic, + nfd.Substitute(form, NEONFormatDecoder::kPlaceholder, + NEONFormatDecoder::kFormat)); +} + +void DisassemblingDecoder::VisitNEONScalarShiftImmediate(Instruction* instr) { + const char* mnemonic = "unimplemented"; + const char* form = "%sd, %sn, 'Is1"; + const char* form_2 = "%sd, %sn, 'Is2"; + + static const NEONFormatMap map_shift = { + {22, 21, 20, 19}, + {NF_UNDEF, NF_B, NF_H, NF_H, NF_S, NF_S, NF_S, NF_S, NF_D, NF_D, NF_D, + NF_D, NF_D, NF_D, NF_D, NF_D}}; + static const NEONFormatMap map_shift_narrow = { + {21, 20, 19}, {NF_UNDEF, NF_H, NF_S, NF_S, NF_D, NF_D, NF_D, NF_D}}; + NEONFormatDecoder nfd(instr, &map_shift); + + if (instr->ImmNEONImmh()) { // immh has to be non-zero. + switch (instr->Mask(NEONScalarShiftImmediateMask)) { + case NEON_FCVTZU_imm_scalar: + mnemonic = "fcvtzu"; + break; + case NEON_FCVTZS_imm_scalar: + mnemonic = "fcvtzs"; + break; + case NEON_SCVTF_imm_scalar: + mnemonic = "scvtf"; + break; + case NEON_UCVTF_imm_scalar: + mnemonic = "ucvtf"; + break; + case NEON_SRI_scalar: + mnemonic = "sri"; + break; + case NEON_SSHR_scalar: + mnemonic = "sshr"; + break; + case NEON_USHR_scalar: + mnemonic = "ushr"; + break; + case NEON_SRSHR_scalar: + mnemonic = "srshr"; + break; + case NEON_URSHR_scalar: + mnemonic = "urshr"; + break; + case NEON_SSRA_scalar: + mnemonic = "ssra"; + break; + case NEON_USRA_scalar: + mnemonic = "usra"; + break; + case NEON_SRSRA_scalar: + mnemonic = "srsra"; + break; + case NEON_URSRA_scalar: + mnemonic = "ursra"; + break; + case NEON_SHL_scalar: + mnemonic = "shl"; + form = form_2; + break; + case NEON_SLI_scalar: + mnemonic = "sli"; + form = form_2; + break; + case NEON_SQSHLU_scalar: + mnemonic = "sqshlu"; + form = form_2; + break; + case NEON_SQSHL_imm_scalar: + mnemonic = "sqshl"; + form = form_2; + break; + case NEON_UQSHL_imm_scalar: + mnemonic = "uqshl"; + form = form_2; + break; + case NEON_UQSHRN_scalar: + mnemonic = "uqshrn"; + nfd.SetFormatMap(1, &map_shift_narrow); + break; + case NEON_UQRSHRN_scalar: + mnemonic = "uqrshrn"; + nfd.SetFormatMap(1, &map_shift_narrow); + break; + case NEON_SQSHRN_scalar: + mnemonic = "sqshrn"; + nfd.SetFormatMap(1, &map_shift_narrow); + break; + case NEON_SQRSHRN_scalar: + mnemonic = "sqrshrn"; + nfd.SetFormatMap(1, &map_shift_narrow); + break; + case NEON_SQSHRUN_scalar: + mnemonic = "sqshrun"; + nfd.SetFormatMap(1, &map_shift_narrow); + break; + case NEON_SQRSHRUN_scalar: + mnemonic = "sqrshrun"; + nfd.SetFormatMap(1, &map_shift_narrow); + break; + default: + form = "(NEONScalarShiftImmediate)"; + } + } else { + form = "(NEONScalarShiftImmediate)"; + } + Format(instr, mnemonic, nfd.SubstitutePlaceholders(form)); +} + +void DisassemblingDecoder::VisitNEONShiftImmediate(Instruction* instr) { + const char* mnemonic = "unimplemented"; + const char* form = "'Vd.%s, 'Vn.%s, 'Is1"; + const char* form_shift_2 = "'Vd.%s, 'Vn.%s, 'Is2"; + const char* form_xtl = "'Vd.%s, 'Vn.%s"; + + // 0001->8H, 001x->4S, 01xx->2D, all others undefined. + static const NEONFormatMap map_shift_ta = { + {22, 21, 20, 19}, + {NF_UNDEF, NF_8H, NF_4S, NF_4S, NF_2D, NF_2D, NF_2D, NF_2D}}; + + // 00010->8B, 00011->16B, 001x0->4H, 001x1->8H, + // 01xx0->2S, 01xx1->4S, 1xxx1->2D, all others undefined. + static const NEONFormatMap map_shift_tb = { + {22, 21, 20, 19, 30}, + {NF_UNDEF, NF_UNDEF, NF_8B, NF_16B, NF_4H, NF_8H, NF_4H, NF_8H, + NF_2S, NF_4S, NF_2S, NF_4S, NF_2S, NF_4S, NF_2S, NF_4S, + NF_UNDEF, NF_2D, NF_UNDEF, NF_2D, NF_UNDEF, NF_2D, NF_UNDEF, NF_2D, + NF_UNDEF, NF_2D, NF_UNDEF, NF_2D, NF_UNDEF, NF_2D, NF_UNDEF, NF_2D}}; + + NEONFormatDecoder nfd(instr, &map_shift_tb); + + if (instr->ImmNEONImmh()) { // immh has to be non-zero. + switch (instr->Mask(NEONShiftImmediateMask)) { + case NEON_SQSHLU: + mnemonic = "sqshlu"; + form = form_shift_2; + break; + case NEON_SQSHL_imm: + mnemonic = "sqshl"; + form = form_shift_2; + break; + case NEON_UQSHL_imm: + mnemonic = "uqshl"; + form = form_shift_2; + break; + case NEON_SHL: + mnemonic = "shl"; + form = form_shift_2; + break; + case NEON_SLI: + mnemonic = "sli"; + form = form_shift_2; + break; + case NEON_SCVTF_imm: + mnemonic = "scvtf"; + break; + case NEON_UCVTF_imm: + mnemonic = "ucvtf"; + break; + case NEON_FCVTZU_imm: + mnemonic = "fcvtzu"; + break; + case NEON_FCVTZS_imm: + mnemonic = "fcvtzs"; + break; + case NEON_SRI: + mnemonic = "sri"; + break; + case NEON_SSHR: + mnemonic = "sshr"; + break; + case NEON_USHR: + mnemonic = "ushr"; + break; + case NEON_SRSHR: + mnemonic = "srshr"; + break; + case NEON_URSHR: + mnemonic = "urshr"; + break; + case NEON_SSRA: + mnemonic = "ssra"; + break; + case NEON_USRA: + mnemonic = "usra"; + break; + case NEON_SRSRA: + mnemonic = "srsra"; + break; + case NEON_URSRA: + mnemonic = "ursra"; + break; + case NEON_SHRN: + mnemonic = instr->Mask(NEON_Q) ? "shrn2" : "shrn"; + nfd.SetFormatMap(1, &map_shift_ta); + break; + case NEON_RSHRN: + mnemonic = instr->Mask(NEON_Q) ? "rshrn2" : "rshrn"; + nfd.SetFormatMap(1, &map_shift_ta); + break; + case NEON_UQSHRN: + mnemonic = instr->Mask(NEON_Q) ? "uqshrn2" : "uqshrn"; + nfd.SetFormatMap(1, &map_shift_ta); + break; + case NEON_UQRSHRN: + mnemonic = instr->Mask(NEON_Q) ? "uqrshrn2" : "uqrshrn"; + nfd.SetFormatMap(1, &map_shift_ta); + break; + case NEON_SQSHRN: + mnemonic = instr->Mask(NEON_Q) ? "sqshrn2" : "sqshrn"; + nfd.SetFormatMap(1, &map_shift_ta); + break; + case NEON_SQRSHRN: + mnemonic = instr->Mask(NEON_Q) ? "sqrshrn2" : "sqrshrn"; + nfd.SetFormatMap(1, &map_shift_ta); + break; + case NEON_SQSHRUN: + mnemonic = instr->Mask(NEON_Q) ? "sqshrun2" : "sqshrun"; + nfd.SetFormatMap(1, &map_shift_ta); + break; + case NEON_SQRSHRUN: + mnemonic = instr->Mask(NEON_Q) ? "sqrshrun2" : "sqrshrun"; + nfd.SetFormatMap(1, &map_shift_ta); + break; + case NEON_SSHLL: + nfd.SetFormatMap(0, &map_shift_ta); + if (instr->ImmNEONImmb() == 0 && + CountSetBits(instr->ImmNEONImmh(), 32) == 1) { // sxtl variant. + form = form_xtl; + mnemonic = instr->Mask(NEON_Q) ? "sxtl2" : "sxtl"; + } else { // sshll variant. + form = form_shift_2; + mnemonic = instr->Mask(NEON_Q) ? "sshll2" : "sshll"; + } + break; + case NEON_USHLL: + nfd.SetFormatMap(0, &map_shift_ta); + if (instr->ImmNEONImmb() == 0 && + CountSetBits(instr->ImmNEONImmh(), 32) == 1) { // uxtl variant. + form = form_xtl; + mnemonic = instr->Mask(NEON_Q) ? "uxtl2" : "uxtl"; + } else { // ushll variant. + form = form_shift_2; + mnemonic = instr->Mask(NEON_Q) ? "ushll2" : "ushll"; + } + break; + default: + form = "(NEONShiftImmediate)"; + } + } else { + form = "(NEONShiftImmediate)"; + } + Format(instr, mnemonic, nfd.Substitute(form)); +} + +void DisassemblingDecoder::VisitNEONTable(Instruction* instr) { + const char* mnemonic = "unimplemented"; + const char* form = "(NEONTable)"; + const char form_1v[] = "'Vd.%%s, {'Vn.16b}, 'Vm.%%s"; + const char form_2v[] = "'Vd.%%s, {'Vn.16b, v%d.16b}, 'Vm.%%s"; + const char form_3v[] = "'Vd.%%s, {'Vn.16b, v%d.16b, v%d.16b}, 'Vm.%%s"; + const char form_4v[] = + "'Vd.%%s, {'Vn.16b, v%d.16b, v%d.16b, v%d.16b}, 'Vm.%%s"; + static const NEONFormatMap map_b = {{30}, {NF_8B, NF_16B}}; + NEONFormatDecoder nfd(instr, &map_b); + + switch (instr->Mask(NEONTableMask)) { + case NEON_TBL_1v: + mnemonic = "tbl"; + form = form_1v; + break; + case NEON_TBL_2v: + mnemonic = "tbl"; + form = form_2v; + break; + case NEON_TBL_3v: + mnemonic = "tbl"; + form = form_3v; + break; + case NEON_TBL_4v: + mnemonic = "tbl"; + form = form_4v; + break; + case NEON_TBX_1v: + mnemonic = "tbx"; + form = form_1v; + break; + case NEON_TBX_2v: + mnemonic = "tbx"; + form = form_2v; + break; + case NEON_TBX_3v: + mnemonic = "tbx"; + form = form_3v; + break; + case NEON_TBX_4v: + mnemonic = "tbx"; + form = form_4v; + break; + default: + break; + } + + char re_form[sizeof(form_4v)]; + int reg_num = instr->Rn(); + snprintf(re_form, sizeof(re_form), form, (reg_num + 1) % kNumberOfVRegisters, + (reg_num + 2) % kNumberOfVRegisters, + (reg_num + 3) % kNumberOfVRegisters); + + Format(instr, mnemonic, nfd.Substitute(re_form)); +} + +void DisassemblingDecoder::VisitUnimplemented(Instruction* instr) { + Format(instr, "unimplemented", "(Unimplemented)"); +} + +void DisassemblingDecoder::VisitUnallocated(Instruction* instr) { + Format(instr, "unallocated", "(Unallocated)"); +} + +void DisassemblingDecoder::ProcessOutput(Instruction* /*instr*/) { + // The base disasm does nothing more than disassembling into a buffer. +} + +void DisassemblingDecoder::AppendRegisterNameToOutput(const CPURegister& reg) { + DCHECK(reg.IsValid()); + char reg_char; + + if (reg.IsRegister()) { + reg_char = reg.Is64Bits() ? 'x' : 'w'; + } else { + DCHECK(reg.IsVRegister()); + switch (reg.SizeInBits()) { + case kBRegSizeInBits: + reg_char = 'b'; + break; + case kHRegSizeInBits: + reg_char = 'h'; + break; + case kSRegSizeInBits: + reg_char = 's'; + break; + case kDRegSizeInBits: + reg_char = 'd'; + break; + default: + DCHECK(reg.Is128Bits()); + reg_char = 'q'; + } + } + + if (reg.IsVRegister() || !(reg.Aliases(csp) || reg.Aliases(xzr))) { + // Filter special registers + if (reg.IsX() && (reg.code() == 27)) { + AppendToOutput("cp"); + } else if (reg.IsX() && (reg.code() == 28)) { + AppendToOutput("jssp"); + } else if (reg.IsX() && (reg.code() == 29)) { + AppendToOutput("fp"); + } else if (reg.IsX() && (reg.code() == 30)) { + AppendToOutput("lr"); + } else { + // A core or scalar/vector register: [wx]0 - 30, [bhsdq]0 - 31. + AppendToOutput("%c%d", reg_char, reg.code()); + } + } else if (reg.Aliases(csp)) { + // Disassemble w31/x31 as stack pointer wcsp/csp. + AppendToOutput("%s", reg.Is64Bits() ? "csp" : "wcsp"); + } else { + // Disassemble w31/x31 as zero register wzr/xzr. + AppendToOutput("%czr", reg_char); + } +} + +void DisassemblingDecoder::Format(Instruction* instr, const char* mnemonic, + const char* format) { + // TODO(mcapewel) don't think I can use the instr address here - there needs + // to be a base address too + DCHECK(mnemonic != NULL); + ResetOutput(); + Substitute(instr, mnemonic); + if (format != NULL) { + buffer_[buffer_pos_++] = ' '; + Substitute(instr, format); + } + buffer_[buffer_pos_] = 0; + ProcessOutput(instr); +} + +void DisassemblingDecoder::Substitute(Instruction* instr, const char* string) { + char chr = *string++; + while (chr != '\0') { + if (chr == '\'') { + string += SubstituteField(instr, string); + } else { + buffer_[buffer_pos_++] = chr; + } + chr = *string++; + } +} + +int DisassemblingDecoder::SubstituteField(Instruction* instr, + const char* format) { + switch (format[0]) { + // NB. The remaining substitution prefix characters are: GJKUZ. + case 'R': // Register. X or W, selected by sf bit. + case 'F': // FP register. S or D, selected by type field. + case 'V': // Vector register, V, vector format. + case 'W': + case 'X': + case 'B': + case 'H': + case 'S': + case 'D': + case 'Q': + return SubstituteRegisterField(instr, format); + case 'I': + return SubstituteImmediateField(instr, format); + case 'L': + return SubstituteLiteralField(instr, format); + case 'N': + return SubstituteShiftField(instr, format); + case 'P': + return SubstitutePrefetchField(instr, format); + case 'C': + return SubstituteConditionField(instr, format); + case 'E': + return SubstituteExtendField(instr, format); + case 'A': + return SubstitutePCRelAddressField(instr, format); + case 'T': + return SubstituteBranchTargetField(instr, format); + case 'O': + return SubstituteLSRegOffsetField(instr, format); + case 'M': + return SubstituteBarrierField(instr, format); + default: + UNREACHABLE(); + } +} + +int DisassemblingDecoder::SubstituteRegisterField(Instruction* instr, + const char* format) { + char reg_prefix = format[0]; + unsigned reg_num = 0; + unsigned field_len = 2; + + switch (format[1]) { + case 'd': + reg_num = instr->Rd(); + if (format[2] == 'q') { + reg_prefix = instr->NEONQ() ? 'X' : 'W'; + field_len = 3; + } + break; + case 'n': + reg_num = instr->Rn(); + break; + case 'm': + reg_num = instr->Rm(); + switch (format[2]) { + // Handle registers tagged with b (bytes), z (instruction), or + // r (registers), used for address updates in + // NEON load/store instructions. + case 'r': + case 'b': + case 'z': { + field_len = 3; + char* eimm; + int imm = static_cast(strtol(&format[3], &eimm, 10)); + field_len += eimm - &format[3]; + if (reg_num == 31) { + switch (format[2]) { + case 'z': + imm *= (1 << instr->NEONLSSize()); + break; + case 'r': + imm *= (instr->NEONQ() == 0) ? kDRegSize : kQRegSize; + break; + case 'b': + break; + } + AppendToOutput("#%d", imm); + return field_len; + } + break; + } + } + break; + case 'e': + // This is register Rm, but using a 4-bit specifier. Used in NEON + // by-element instructions. + reg_num = (instr->Rm() & 0xf); + break; + case 'a': + reg_num = instr->Ra(); + break; + case 't': + reg_num = instr->Rt(); + if (format[0] == 'V') { + if ((format[2] >= '2') && (format[2] <= '4')) { + // Handle consecutive vector register specifiers Vt2, Vt3 and Vt4. + reg_num = (reg_num + format[2] - '1') % 32; + field_len = 3; + } + } else { + if (format[2] == '2') { + // Handle register specifier Rt2. + reg_num = instr->Rt2(); + field_len = 3; + } + } + break; + case 's': + reg_num = instr->Rs(); + break; + default: + UNREACHABLE(); + } + + // Increase field length for registers tagged as stack. + if (format[2] == 's') { + field_len = 3; + } + + CPURegister::RegisterType reg_type; + unsigned reg_size; + + if (reg_prefix == 'R') { + reg_prefix = instr->SixtyFourBits() ? 'X' : 'W'; + } else if (reg_prefix == 'F') { + reg_prefix = ((instr->FPType() & 1) == 0) ? 'S' : 'D'; + } + + switch (reg_prefix) { + case 'W': + reg_type = CPURegister::kRegister; + reg_size = kWRegSizeInBits; + break; + case 'X': + reg_type = CPURegister::kRegister; + reg_size = kXRegSizeInBits; + break; + case 'B': + reg_type = CPURegister::kVRegister; + reg_size = kBRegSizeInBits; + break; + case 'H': + reg_type = CPURegister::kVRegister; + reg_size = kHRegSizeInBits; + break; + case 'S': + reg_type = CPURegister::kVRegister; + reg_size = kSRegSizeInBits; + break; + case 'D': + reg_type = CPURegister::kVRegister; + reg_size = kDRegSizeInBits; + break; + case 'Q': + reg_type = CPURegister::kVRegister; + reg_size = kQRegSizeInBits; + break; + case 'V': + AppendToOutput("v%d", reg_num); + return field_len; + default: + UNREACHABLE(); + reg_type = CPURegister::kRegister; + reg_size = kXRegSizeInBits; + } + + if ((reg_type == CPURegister::kRegister) && (reg_num == kZeroRegCode) && + (format[2] == 's')) { + reg_num = kSPRegInternalCode; + } + + AppendRegisterNameToOutput(CPURegister::Create(reg_num, reg_size, reg_type)); + + return field_len; +} + +int DisassemblingDecoder::SubstituteImmediateField(Instruction* instr, + const char* format) { + DCHECK(format[0] == 'I'); + + switch (format[1]) { + case 'M': { // IMoveImm or IMoveLSL. + if (format[5] == 'I' || format[5] == 'N') { + uint64_t imm = static_cast(instr->ImmMoveWide()) + << (16 * instr->ShiftMoveWide()); + if (format[5] == 'N') imm = ~imm; + if (!instr->SixtyFourBits()) imm &= UINT64_C(0xffffffff); + AppendToOutput("#0x%" PRIx64, imm); + } else { + DCHECK(format[5] == 'L'); + AppendToOutput("#0x%" PRIx64, instr->ImmMoveWide()); if (instr->ShiftMoveWide() > 0) { AppendToOutput(", lsl #%d", 16 * instr->ShiftMoveWide()); } @@ -1409,15 +3587,15 @@ int DisassemblingDecoder::SubstituteImmediateField(Instruction* instr, case 'P': { // ILPx - Immediate Load/Store Pair, x = access size. if (instr->ImmLSPair() != 0) { // format[3] is the scale value. Convert to a number. - int scale = format[3] - 0x30; + int scale = 1 << (format[3] - '0'); AppendToOutput(", #%" PRId32, instr->ImmLSPair() * scale); } return 4; } case 'U': { // ILU - Immediate Load/Store Unsigned. if (instr->ImmLSUnsigned() != 0) { - AppendToOutput(", #%" PRId32, instr->ImmLSUnsigned() - << instr->SizeLS()); + int shift = instr->SizeLS(); + AppendToOutput(", #%" PRId32, instr->ImmLSUnsigned() << shift); } return 3; } @@ -1473,13 +3651,120 @@ int DisassemblingDecoder::SubstituteImmediateField(Instruction* instr, instr->ImmTestBranchBit40()); return 2; } + case 's': { // Is - Shift (immediate). + switch (format[2]) { + case '1': { // Is1 - SSHR. + int shift = 16 << HighestSetBitPosition(instr->ImmNEONImmh()); + shift -= instr->ImmNEONImmhImmb(); + AppendToOutput("#%d", shift); + return 3; + } + case '2': { // Is2 - SLI. + int shift = instr->ImmNEONImmhImmb(); + shift -= 8 << HighestSetBitPosition(instr->ImmNEONImmh()); + AppendToOutput("#%d", shift); + return 3; + } + default: { + UNIMPLEMENTED(); + return 0; + } + } + } case 'D': { // IDebug - HLT and BRK instructions. AppendToOutput("#0x%x", instr->ImmException()); return 6; } + case 'V': { // Immediate Vector. + switch (format[2]) { + case 'E': { // IVExtract. + AppendToOutput("#%" PRId64, instr->ImmNEONExt()); + return 9; + } + case 'B': { // IVByElemIndex. + int vm_index = (instr->NEONH() << 1) | instr->NEONL(); + if (instr->NEONSize() == 1) { + vm_index = (vm_index << 1) | instr->NEONM(); + } + AppendToOutput("%d", vm_index); + return strlen("IVByElemIndex"); + } + case 'I': { // INS element. + if (strncmp(format, "IVInsIndex", strlen("IVInsIndex")) == 0) { + unsigned rd_index, rn_index; + unsigned imm5 = instr->ImmNEON5(); + unsigned imm4 = instr->ImmNEON4(); + int tz = CountTrailingZeros(imm5, 32); + if (tz <= 3) { // Defined for 0 <= tz <= 3 only. + rd_index = imm5 >> (tz + 1); + rn_index = imm4 >> tz; + if (strncmp(format, "IVInsIndex1", strlen("IVInsIndex1")) == 0) { + AppendToOutput("%d", rd_index); + return strlen("IVInsIndex1"); + } else if (strncmp(format, "IVInsIndex2", + strlen("IVInsIndex2")) == 0) { + AppendToOutput("%d", rn_index); + return strlen("IVInsIndex2"); + } + } + return 0; + } + } + case 'L': { // IVLSLane[0123] - suffix indicates access size shift. + AppendToOutput("%d", instr->NEONLSIndex(format[8] - '0')); + return 9; + } + case 'M': { // Modified Immediate cases. + if (strncmp(format, "IVMIImmFPSingle", strlen("IVMIImmFPSingle")) == + 0) { + AppendToOutput("#0x%" PRIx32 " (%.4f)", instr->ImmNEONabcdefgh(), + instr->ImmNEONFP32()); + return strlen("IVMIImmFPSingle"); + } else if (strncmp(format, "IVMIImmFPDouble", + strlen("IVMIImmFPDouble")) == 0) { + AppendToOutput("#0x%" PRIx32 " (%.4f)", instr->ImmNEONabcdefgh(), + instr->ImmNEONFP64()); + return strlen("IVMIImmFPDouble"); + } else if (strncmp(format, "IVMIImm8", strlen("IVMIImm8")) == 0) { + uint64_t imm8 = instr->ImmNEONabcdefgh(); + AppendToOutput("#0x%" PRIx64, imm8); + return strlen("IVMIImm8"); + } else if (strncmp(format, "IVMIImm", strlen("IVMIImm")) == 0) { + uint64_t imm8 = instr->ImmNEONabcdefgh(); + uint64_t imm = 0; + for (int i = 0; i < 8; ++i) { + if (imm8 & (1 << i)) { + imm |= (UINT64_C(0xff) << (8 * i)); + } + } + AppendToOutput("#0x%" PRIx64, imm); + return strlen("IVMIImm"); + } else if (strncmp(format, "IVMIShiftAmt1", + strlen("IVMIShiftAmt1")) == 0) { + int cmode = instr->NEONCmode(); + int shift_amount = 8 * ((cmode >> 1) & 3); + AppendToOutput("#%d", shift_amount); + return strlen("IVMIShiftAmt1"); + } else if (strncmp(format, "IVMIShiftAmt2", + strlen("IVMIShiftAmt2")) == 0) { + int cmode = instr->NEONCmode(); + int shift_amount = 8 << (cmode & 1); + AppendToOutput("#%d", shift_amount); + return strlen("IVMIShiftAmt2"); + } else { + UNIMPLEMENTED(); + return 0; + } + } + default: { + UNIMPLEMENTED(); + return 0; + } + } + } default: { + printf("%s", format); UNREACHABLE(); - return 0; } } } @@ -1515,7 +3800,6 @@ int DisassemblingDecoder::SubstituteBitfieldImmediateField(Instruction* instr, } default: { UNREACHABLE(); - return 0; } } } @@ -1542,14 +3826,14 @@ int DisassemblingDecoder::SubstituteLiteralField(Instruction* instr, int DisassemblingDecoder::SubstituteShiftField(Instruction* instr, const char* format) { - DCHECK(format[0] == 'H'); - DCHECK(instr->ShiftDP() <= 0x3); + DCHECK_EQ(format[0], 'N'); + DCHECK_LE(instr->ShiftDP(), 0x3); switch (format[1]) { - case 'D': { // HDP. + case 'D': { // NDP. DCHECK(instr->ShiftDP() != ROR); } // Fall through. - case 'L': { // HLo. + case 'L': { // NLo. if (instr->ImmDPShift() != 0) { const char* shift_type[] = {"lsl", "lsr", "asr", "ror"}; AppendToOutput(", %s #%" PRId32, shift_type[instr->ShiftDP()], @@ -1559,7 +3843,6 @@ int DisassemblingDecoder::SubstituteShiftField(Instruction* instr, } default: UNREACHABLE(); - return 0; } } @@ -1608,17 +3891,17 @@ int DisassemblingDecoder::SubstitutePCRelAddressField(Instruction* instr, int DisassemblingDecoder::SubstituteBranchTargetField(Instruction* instr, const char* format) { - DCHECK(strncmp(format, "BImm", 4) == 0); + DCHECK_EQ(strncmp(format, "TImm", 4), 0); int64_t offset = 0; switch (format[5]) { - // BImmUncn - unconditional branch immediate. + // TImmUncn - unconditional branch immediate. case 'n': offset = instr->ImmUncondBranch(); break; - // BImmCond - conditional branch immediate. + // TImmCond - conditional branch immediate. case 'o': offset = instr->ImmCondBranch(); break; - // BImmCmpa - compare and branch immediate. + // TImmCmpa - compare and branch immediate. case 'm': offset = instr->ImmCmpBranch(); break; - // BImmTest - test and branch immediate. + // TImmTest - test and branch immediate. case 'e': offset = instr->ImmTestBranch(); break; default: UNREACHABLE(); } diff --git a/deps/v8/src/arm64/disasm-arm64.h b/deps/v8/src/arm64/disasm-arm64.h index 4b477bc438e8f1..c12d53b7e60734 100644 --- a/deps/v8/src/arm64/disasm-arm64.h +++ b/deps/v8/src/arm64/disasm-arm64.h @@ -5,6 +5,7 @@ #ifndef V8_ARM64_DISASM_ARM64_H #define V8_ARM64_DISASM_ARM64_H +#include "src/arm64/assembler-arm64.h" #include "src/arm64/decoder-arm64.h" #include "src/arm64/instructions-arm64.h" #include "src/globals.h" @@ -29,6 +30,13 @@ class DisassemblingDecoder : public DecoderVisitor { protected: virtual void ProcessOutput(Instruction* instr); + // Default output functions. The functions below implement a default way of + // printing elements in the disassembly. A sub-class can override these to + // customize the disassembly output. + + // Prints the name of a register. + virtual void AppendRegisterNameToOutput(const CPURegister& reg); + void Format(Instruction* instr, const char* mnemonic, const char* format); void Substitute(Instruction* instr, const char* string); int SubstituteField(Instruction* instr, const char* format); diff --git a/deps/v8/src/arm64/frames-arm64.cc b/deps/v8/src/arm64/frames-arm64.cc index bf2fde119e8a67..68e8d757c8834c 100644 --- a/deps/v8/src/arm64/frames-arm64.cc +++ b/deps/v8/src/arm64/frames-arm64.cc @@ -19,15 +19,6 @@ Register JavaScriptFrame::fp_register() { return v8::internal::fp; } Register JavaScriptFrame::context_register() { return cp; } Register JavaScriptFrame::constant_pool_pointer_register() { UNREACHABLE(); - return no_reg; -} - - -Register StubFailureTrampolineFrame::fp_register() { return v8::internal::fp; } -Register StubFailureTrampolineFrame::context_register() { return cp; } -Register StubFailureTrampolineFrame::constant_pool_pointer_register() { - UNREACHABLE(); - return no_reg; } diff --git a/deps/v8/src/arm64/instructions-arm64.cc b/deps/v8/src/arm64/instructions-arm64.cc index 4b419d6dbdb152..f4dbd75533bd6d 100644 --- a/deps/v8/src/arm64/instructions-arm64.cc +++ b/deps/v8/src/arm64/instructions-arm64.cc @@ -21,7 +21,7 @@ bool Instruction::IsLoad() const { if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) { return Mask(LoadStorePairLBit) != 0; } else { - LoadStoreOp op = static_cast(Mask(LoadStoreOpMask)); + LoadStoreOp op = static_cast(Mask(LoadStoreMask)); switch (op) { case LDRB_w: case LDRH_w: @@ -32,8 +32,12 @@ bool Instruction::IsLoad() const { case LDRSH_w: case LDRSH_x: case LDRSW_x: + case LDR_b: + case LDR_h: case LDR_s: - case LDR_d: return true; + case LDR_d: + case LDR_q: + return true; default: return false; } } @@ -48,14 +52,18 @@ bool Instruction::IsStore() const { if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) { return Mask(LoadStorePairLBit) == 0; } else { - LoadStoreOp op = static_cast(Mask(LoadStoreOpMask)); + LoadStoreOp op = static_cast(Mask(LoadStoreMask)); switch (op) { case STRB_w: case STRH_w: case STR_w: case STR_x: + case STR_b: + case STR_h: case STR_s: - case STR_d: return true; + case STR_d: + case STR_q: + return true; default: return false; } } @@ -136,46 +144,50 @@ uint64_t Instruction::ImmLogical() { } } UNREACHABLE(); - return 0; } - -float Instruction::ImmFP32() { - // ImmFP: abcdefgh (8 bits) - // Single: aBbb.bbbc.defg.h000.0000.0000.0000.0000 (32 bits) - // where B is b ^ 1 - uint32_t bits = ImmFP(); - uint32_t bit7 = (bits >> 7) & 0x1; - uint32_t bit6 = (bits >> 6) & 0x1; - uint32_t bit5_to_0 = bits & 0x3f; - uint32_t result = (bit7 << 31) | ((32 - bit6) << 25) | (bit5_to_0 << 19); - - return rawbits_to_float(result); +uint32_t Instruction::ImmNEONabcdefgh() const { + return ImmNEONabc() << 5 | ImmNEONdefgh(); } +float Instruction::ImmFP32() { return Imm8ToFP32(ImmFP()); } + +double Instruction::ImmFP64() { return Imm8ToFP64(ImmFP()); } -double Instruction::ImmFP64() { - // ImmFP: abcdefgh (8 bits) - // Double: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000 - // 0000.0000.0000.0000.0000.0000.0000.0000 (64 bits) - // where B is b ^ 1 - uint32_t bits = ImmFP(); - uint64_t bit7 = (bits >> 7) & 0x1; - uint64_t bit6 = (bits >> 6) & 0x1; - uint64_t bit5_to_0 = bits & 0x3f; - uint64_t result = (bit7 << 63) | ((256 - bit6) << 54) | (bit5_to_0 << 48); +float Instruction::ImmNEONFP32() const { return Imm8ToFP32(ImmNEONabcdefgh()); } - return rawbits_to_double(result); +double Instruction::ImmNEONFP64() const { + return Imm8ToFP64(ImmNEONabcdefgh()); } +unsigned CalcLSDataSize(LoadStoreOp op) { + DCHECK_EQ(static_cast(LSSize_offset + LSSize_width), + kInstructionSize * 8); + unsigned size = static_cast(op) >> LSSize_offset; + if ((op & LSVector_mask) != 0) { + // Vector register memory operations encode the access size in the "size" + // and "opc" fields. + if ((size == 0) && ((op & LSOpc_mask) >> LSOpc_offset) >= 2) { + size = kQRegSizeLog2; + } + } + return size; +} -LSDataSize CalcLSPairDataSize(LoadStorePairOp op) { +unsigned CalcLSPairDataSize(LoadStorePairOp op) { + static_assert(kXRegSize == kDRegSize, "X and D registers must be same size."); + static_assert(kWRegSize == kSRegSize, "W and S registers must be same size."); switch (op) { + case STP_q: + case LDP_q: + return kQRegSizeLog2; case STP_x: case LDP_x: case STP_d: - case LDP_d: return LSDoubleWord; - default: return LSWord; + case LDP_d: + return kXRegSizeLog2; + default: + return kWRegSizeLog2; } } @@ -334,7 +346,405 @@ uint64_t InstructionSequence::InlineData() const { return payload; } +VectorFormat VectorFormatHalfWidth(VectorFormat vform) { + DCHECK(vform == kFormat8H || vform == kFormat4S || vform == kFormat2D || + vform == kFormatH || vform == kFormatS || vform == kFormatD); + switch (vform) { + case kFormat8H: + return kFormat8B; + case kFormat4S: + return kFormat4H; + case kFormat2D: + return kFormat2S; + case kFormatH: + return kFormatB; + case kFormatS: + return kFormatH; + case kFormatD: + return kFormatS; + default: + UNREACHABLE(); + } +} + +VectorFormat VectorFormatDoubleWidth(VectorFormat vform) { + DCHECK(vform == kFormat8B || vform == kFormat4H || vform == kFormat2S || + vform == kFormatB || vform == kFormatH || vform == kFormatS); + switch (vform) { + case kFormat8B: + return kFormat8H; + case kFormat4H: + return kFormat4S; + case kFormat2S: + return kFormat2D; + case kFormatB: + return kFormatH; + case kFormatH: + return kFormatS; + case kFormatS: + return kFormatD; + default: + UNREACHABLE(); + } +} + +VectorFormat VectorFormatFillQ(VectorFormat vform) { + switch (vform) { + case kFormatB: + case kFormat8B: + case kFormat16B: + return kFormat16B; + case kFormatH: + case kFormat4H: + case kFormat8H: + return kFormat8H; + case kFormatS: + case kFormat2S: + case kFormat4S: + return kFormat4S; + case kFormatD: + case kFormat1D: + case kFormat2D: + return kFormat2D; + default: + UNREACHABLE(); + } +} + +VectorFormat VectorFormatHalfWidthDoubleLanes(VectorFormat vform) { + switch (vform) { + case kFormat4H: + return kFormat8B; + case kFormat8H: + return kFormat16B; + case kFormat2S: + return kFormat4H; + case kFormat4S: + return kFormat8H; + case kFormat1D: + return kFormat2S; + case kFormat2D: + return kFormat4S; + default: + UNREACHABLE(); + } +} + +VectorFormat VectorFormatDoubleLanes(VectorFormat vform) { + DCHECK(vform == kFormat8B || vform == kFormat4H || vform == kFormat2S); + switch (vform) { + case kFormat8B: + return kFormat16B; + case kFormat4H: + return kFormat8H; + case kFormat2S: + return kFormat4S; + default: + UNREACHABLE(); + } +} + +VectorFormat VectorFormatHalfLanes(VectorFormat vform) { + DCHECK(vform == kFormat16B || vform == kFormat8H || vform == kFormat4S); + switch (vform) { + case kFormat16B: + return kFormat8B; + case kFormat8H: + return kFormat4H; + case kFormat4S: + return kFormat2S; + default: + UNREACHABLE(); + } +} + +VectorFormat ScalarFormatFromLaneSize(int laneSize) { + switch (laneSize) { + case 8: + return kFormatB; + case 16: + return kFormatH; + case 32: + return kFormatS; + case 64: + return kFormatD; + default: + UNREACHABLE(); + } +} + +VectorFormat ScalarFormatFromFormat(VectorFormat vform) { + return ScalarFormatFromLaneSize(LaneSizeInBitsFromFormat(vform)); +} + +unsigned RegisterSizeInBytesFromFormat(VectorFormat vform) { + return RegisterSizeInBitsFromFormat(vform) / 8; +} + +unsigned RegisterSizeInBitsFromFormat(VectorFormat vform) { + DCHECK_NE(vform, kFormatUndefined); + switch (vform) { + case kFormatB: + return kBRegSizeInBits; + case kFormatH: + return kHRegSizeInBits; + case kFormatS: + return kSRegSizeInBits; + case kFormatD: + return kDRegSizeInBits; + case kFormat8B: + case kFormat4H: + case kFormat2S: + case kFormat1D: + return kDRegSizeInBits; + default: + return kQRegSizeInBits; + } +} + +unsigned LaneSizeInBitsFromFormat(VectorFormat vform) { + DCHECK_NE(vform, kFormatUndefined); + switch (vform) { + case kFormatB: + case kFormat8B: + case kFormat16B: + return 8; + case kFormatH: + case kFormat4H: + case kFormat8H: + return 16; + case kFormatS: + case kFormat2S: + case kFormat4S: + return 32; + case kFormatD: + case kFormat1D: + case kFormat2D: + return 64; + default: + UNREACHABLE(); + } +} + +int LaneSizeInBytesFromFormat(VectorFormat vform) { + return LaneSizeInBitsFromFormat(vform) / 8; +} + +int LaneSizeInBytesLog2FromFormat(VectorFormat vform) { + DCHECK_NE(vform, kFormatUndefined); + switch (vform) { + case kFormatB: + case kFormat8B: + case kFormat16B: + return 0; + case kFormatH: + case kFormat4H: + case kFormat8H: + return 1; + case kFormatS: + case kFormat2S: + case kFormat4S: + return 2; + case kFormatD: + case kFormat1D: + case kFormat2D: + return 3; + default: + UNREACHABLE(); + } +} + +int LaneCountFromFormat(VectorFormat vform) { + DCHECK_NE(vform, kFormatUndefined); + switch (vform) { + case kFormat16B: + return 16; + case kFormat8B: + case kFormat8H: + return 8; + case kFormat4H: + case kFormat4S: + return 4; + case kFormat2S: + case kFormat2D: + return 2; + case kFormat1D: + case kFormatB: + case kFormatH: + case kFormatS: + case kFormatD: + return 1; + default: + UNREACHABLE(); + } +} + +int MaxLaneCountFromFormat(VectorFormat vform) { + DCHECK_NE(vform, kFormatUndefined); + switch (vform) { + case kFormatB: + case kFormat8B: + case kFormat16B: + return 16; + case kFormatH: + case kFormat4H: + case kFormat8H: + return 8; + case kFormatS: + case kFormat2S: + case kFormat4S: + return 4; + case kFormatD: + case kFormat1D: + case kFormat2D: + return 2; + default: + UNREACHABLE(); + } +} + +// Does 'vform' indicate a vector format or a scalar format? +bool IsVectorFormat(VectorFormat vform) { + DCHECK_NE(vform, kFormatUndefined); + switch (vform) { + case kFormatB: + case kFormatH: + case kFormatS: + case kFormatD: + return false; + default: + return true; + } +} + +int64_t MaxIntFromFormat(VectorFormat vform) { + return INT64_MAX >> (64 - LaneSizeInBitsFromFormat(vform)); +} + +int64_t MinIntFromFormat(VectorFormat vform) { + return INT64_MIN >> (64 - LaneSizeInBitsFromFormat(vform)); +} + +uint64_t MaxUintFromFormat(VectorFormat vform) { + return UINT64_MAX >> (64 - LaneSizeInBitsFromFormat(vform)); +} + +NEONFormatDecoder::NEONFormatDecoder(const Instruction* instr) { + instrbits_ = instr->InstructionBits(); + SetFormatMaps(IntegerFormatMap()); +} + +NEONFormatDecoder::NEONFormatDecoder(const Instruction* instr, + const NEONFormatMap* format) { + instrbits_ = instr->InstructionBits(); + SetFormatMaps(format); +} + +NEONFormatDecoder::NEONFormatDecoder(const Instruction* instr, + const NEONFormatMap* format0, + const NEONFormatMap* format1) { + instrbits_ = instr->InstructionBits(); + SetFormatMaps(format0, format1); +} + +NEONFormatDecoder::NEONFormatDecoder(const Instruction* instr, + const NEONFormatMap* format0, + const NEONFormatMap* format1, + const NEONFormatMap* format2) { + instrbits_ = instr->InstructionBits(); + SetFormatMaps(format0, format1, format2); +} + +void NEONFormatDecoder::SetFormatMaps(const NEONFormatMap* format0, + const NEONFormatMap* format1, + const NEONFormatMap* format2) { + DCHECK_NOT_NULL(format0); + formats_[0] = format0; + formats_[1] = (format1 == NULL) ? formats_[0] : format1; + formats_[2] = (format2 == NULL) ? formats_[1] : format2; +} + +void NEONFormatDecoder::SetFormatMap(unsigned index, + const NEONFormatMap* format) { + DCHECK_LT(index, arraysize(formats_)); + DCHECK_NOT_NULL(format); + formats_[index] = format; +} +const char* NEONFormatDecoder::SubstitutePlaceholders(const char* string) { + return Substitute(string, kPlaceholder, kPlaceholder, kPlaceholder); +} + +const char* NEONFormatDecoder::Substitute(const char* string, + SubstitutionMode mode0, + SubstitutionMode mode1, + SubstitutionMode mode2) { + snprintf(form_buffer_, sizeof(form_buffer_), string, GetSubstitute(0, mode0), + GetSubstitute(1, mode1), GetSubstitute(2, mode2)); + return form_buffer_; +} + +const char* NEONFormatDecoder::Mnemonic(const char* mnemonic) { + if ((instrbits_ & NEON_Q) != 0) { + snprintf(mne_buffer_, sizeof(mne_buffer_), "%s2", mnemonic); + return mne_buffer_; + } + return mnemonic; +} + +VectorFormat NEONFormatDecoder::GetVectorFormat(int format_index) { + return GetVectorFormat(formats_[format_index]); +} + +VectorFormat NEONFormatDecoder::GetVectorFormat( + const NEONFormatMap* format_map) { + static const VectorFormat vform[] = { + kFormatUndefined, kFormat8B, kFormat16B, kFormat4H, kFormat8H, + kFormat2S, kFormat4S, kFormat1D, kFormat2D, kFormatB, + kFormatH, kFormatS, kFormatD}; + DCHECK_LT(GetNEONFormat(format_map), arraysize(vform)); + return vform[GetNEONFormat(format_map)]; +} + +const char* NEONFormatDecoder::GetSubstitute(int index, SubstitutionMode mode) { + if (mode == kFormat) { + return NEONFormatAsString(GetNEONFormat(formats_[index])); + } + DCHECK_EQ(mode, kPlaceholder); + return NEONFormatAsPlaceholder(GetNEONFormat(formats_[index])); +} + +NEONFormat NEONFormatDecoder::GetNEONFormat(const NEONFormatMap* format_map) { + return format_map->map[PickBits(format_map->bits)]; +} + +const char* NEONFormatDecoder::NEONFormatAsString(NEONFormat format) { + static const char* formats[] = {"undefined", "8b", "16b", "4h", "8h", + "2s", "4s", "1d", "2d", "b", + "h", "s", "d"}; + DCHECK_LT(format, arraysize(formats)); + return formats[format]; +} + +const char* NEONFormatDecoder::NEONFormatAsPlaceholder(NEONFormat format) { + DCHECK((format == NF_B) || (format == NF_H) || (format == NF_S) || + (format == NF_D) || (format == NF_UNDEF)); + static const char* formats[] = { + "undefined", "undefined", "undefined", "undefined", "undefined", + "undefined", "undefined", "undefined", "undefined", "'B", + "'H", "'S", "'D"}; + return formats[format]; +} + +uint8_t NEONFormatDecoder::PickBits(const uint8_t bits[]) { + uint8_t result = 0; + for (unsigned b = 0; b < kNEONFormatMaxBits; b++) { + if (bits[b] == 0) break; + result <<= 1; + result |= ((instrbits_ & (1 << bits[b])) == 0) ? 0 : 1; + } + return result; +} } // namespace internal } // namespace v8 diff --git a/deps/v8/src/arm64/instructions-arm64.h b/deps/v8/src/arm64/instructions-arm64.h index 6110a14722bb30..b6b38166bfb153 100644 --- a/deps/v8/src/arm64/instructions-arm64.h +++ b/deps/v8/src/arm64/instructions-arm64.h @@ -23,13 +23,17 @@ typedef uint32_t Instr; // symbol is defined as uint32_t/uint64_t initialized with the desired bit // pattern. Otherwise, the same symbol is declared as an external float/double. #if defined(ARM64_DEFINE_FP_STATICS) +#define DEFINE_FLOAT16(name, value) extern const uint16_t name = value #define DEFINE_FLOAT(name, value) extern const uint32_t name = value #define DEFINE_DOUBLE(name, value) extern const uint64_t name = value #else +#define DEFINE_FLOAT16(name, value) extern const float16 name #define DEFINE_FLOAT(name, value) extern const float name #define DEFINE_DOUBLE(name, value) extern const double name #endif // defined(ARM64_DEFINE_FP_STATICS) +DEFINE_FLOAT16(kFP16PositiveInfinity, 0x7c00); +DEFINE_FLOAT16(kFP16NegativeInfinity, 0xfc00); DEFINE_FLOAT(kFP32PositiveInfinity, 0x7f800000); DEFINE_FLOAT(kFP32NegativeInfinity, 0xff800000); DEFINE_DOUBLE(kFP64PositiveInfinity, 0x7ff0000000000000UL); @@ -47,19 +51,14 @@ DEFINE_FLOAT(kFP32QuietNaN, 0x7fc00001); // The default NaN values (for FPCR.DN=1). DEFINE_DOUBLE(kFP64DefaultNaN, 0x7ff8000000000000UL); DEFINE_FLOAT(kFP32DefaultNaN, 0x7fc00000); +DEFINE_FLOAT16(kFP16DefaultNaN, 0x7e00); +#undef DEFINE_FLOAT16 #undef DEFINE_FLOAT #undef DEFINE_DOUBLE - -enum LSDataSize { - LSByte = 0, - LSHalfword = 1, - LSWord = 2, - LSDoubleWord = 3 -}; - -LSDataSize CalcLSPairDataSize(LoadStorePairOp op); +unsigned CalcLSDataSize(LoadStoreOp op); +unsigned CalcLSPairDataSize(LoadStorePairOp op); enum ImmBranchType { UnknownBranchType = 0, @@ -82,9 +81,10 @@ enum FPRounding { FPNegativeInfinity = 0x2, FPZero = 0x3, - // The final rounding mode is only available when explicitly specified by the - // instruction (such as with fcvta). It cannot be set in FPCR. - FPTieAway + // The final rounding modes are only available when explicitly specified by + // the instruction (such as with fcvta). They cannot be set in FPCR. + FPTieAway, + FPRoundOdd }; enum Reg31Mode { @@ -152,14 +152,29 @@ class Instruction { } uint64_t ImmLogical(); + unsigned ImmNEONabcdefgh() const; float ImmFP32(); double ImmFP64(); + float ImmNEONFP32() const; + double ImmNEONFP64() const; - LSDataSize SizeLSPair() const { + unsigned SizeLS() const { + return CalcLSDataSize(static_cast(Mask(LoadStoreMask))); + } + + unsigned SizeLSPair() const { return CalcLSPairDataSize( static_cast(Mask(LoadStorePairMask))); } + int NEONLSIndex(int access_size_shift) const { + int q = NEONQ(); + int s = NEONS(); + int size = NEONLSSize(); + int index = (q << 3) | (s << 2) | size; + return index >> access_size_shift; + } + // Helpers. bool IsCondBranchImm() const { return Mask(ConditionalBranchFMask) == ConditionalBranchFixed; @@ -181,6 +196,33 @@ class Instruction { return BranchType() != UnknownBranchType; } + static float Imm8ToFP32(uint32_t imm8) { + // Imm8: abcdefgh (8 bits) + // Single: aBbb.bbbc.defg.h000.0000.0000.0000.0000 (32 bits) + // where B is b ^ 1 + uint32_t bits = imm8; + uint32_t bit7 = (bits >> 7) & 0x1; + uint32_t bit6 = (bits >> 6) & 0x1; + uint32_t bit5_to_0 = bits & 0x3f; + uint32_t result = (bit7 << 31) | ((32 - bit6) << 25) | (bit5_to_0 << 19); + + return bit_cast(result); + } + + static double Imm8ToFP64(uint32_t imm8) { + // Imm8: abcdefgh (8 bits) + // Double: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000 + // 0000.0000.0000.0000.0000.0000.0000.0000 (64 bits) + // where B is b ^ 1 + uint32_t bits = imm8; + uint64_t bit7 = (bits >> 7) & 0x1; + uint64_t bit6 = (bits >> 6) & 0x1; + uint64_t bit5_to_0 = bits & 0x3f; + uint64_t result = (bit7 << 63) | ((256 - bit6) << 54) | (bit5_to_0 << 48); + + return bit_cast(result); + } + bool IsLdrLiteral() const { return Mask(LoadLiteralFMask) == LoadLiteralFixed; } @@ -300,7 +342,6 @@ class Instruction { return ImmTestBranch_width; default: UNREACHABLE(); - return 0; } } @@ -417,6 +458,48 @@ class Instruction { void SetBranchImmTarget(Instruction* target); }; +// Functions for handling NEON vector format information. +enum VectorFormat { + kFormatUndefined = 0xffffffff, + kFormat8B = NEON_8B, + kFormat16B = NEON_16B, + kFormat4H = NEON_4H, + kFormat8H = NEON_8H, + kFormat2S = NEON_2S, + kFormat4S = NEON_4S, + kFormat1D = NEON_1D, + kFormat2D = NEON_2D, + + // Scalar formats. We add the scalar bit to distinguish between scalar and + // vector enumerations; the bit is always set in the encoding of scalar ops + // and always clear for vector ops. Although kFormatD and kFormat1D appear + // to be the same, their meaning is subtly different. The first is a scalar + // operation, the second a vector operation that only affects one lane. + kFormatB = NEON_B | NEONScalar, + kFormatH = NEON_H | NEONScalar, + kFormatS = NEON_S | NEONScalar, + kFormatD = NEON_D | NEONScalar +}; + +VectorFormat VectorFormatHalfWidth(VectorFormat vform); +VectorFormat VectorFormatDoubleWidth(VectorFormat vform); +VectorFormat VectorFormatDoubleLanes(VectorFormat vform); +VectorFormat VectorFormatHalfLanes(VectorFormat vform); +VectorFormat ScalarFormatFromLaneSize(int lanesize); +VectorFormat VectorFormatHalfWidthDoubleLanes(VectorFormat vform); +VectorFormat VectorFormatFillQ(VectorFormat vform); +VectorFormat ScalarFormatFromFormat(VectorFormat vform); +unsigned RegisterSizeInBitsFromFormat(VectorFormat vform); +unsigned RegisterSizeInBytesFromFormat(VectorFormat vform); +int LaneSizeInBytesFromFormat(VectorFormat vform); +unsigned LaneSizeInBitsFromFormat(VectorFormat vform); +int LaneSizeInBytesLog2FromFormat(VectorFormat vform); +int LaneCountFromFormat(VectorFormat vform); +int MaxLaneCountFromFormat(VectorFormat vform); +bool IsVectorFormat(VectorFormat vform); +int64_t MaxIntFromFormat(VectorFormat vform); +int64_t MinIntFromFormat(VectorFormat vform); +uint64_t MaxUintFromFormat(VectorFormat vform); // Where Instruction looks at instructions generated by the Assembler, // InstructionSequence looks at instructions sequences generated by the @@ -504,7 +587,7 @@ const unsigned kDebugMessageOffset = 3 * kInstructionSize; // // For example: // -// __ debug("print registers and fp registers", 0, LOG_REGS | LOG_FP_REGS); +// __ debug("print registers and fp registers", 0, LOG_REGS | LOG_VREGS); // will print the registers and fp registers only once. // // __ debug("trace disasm", 1, TRACE_ENABLE | LOG_DISASM); @@ -517,24 +600,201 @@ const unsigned kDebugMessageOffset = 3 * kInstructionSize; // stops tracing the registers. const unsigned kDebuggerTracingDirectivesMask = 3 << 6; enum DebugParameters { - NO_PARAM = 0, - BREAK = 1 << 0, - LOG_DISASM = 1 << 1, // Use only with TRACE. Disassemble the code. - LOG_REGS = 1 << 2, // Log general purpose registers. - LOG_FP_REGS = 1 << 3, // Log floating-point registers. - LOG_SYS_REGS = 1 << 4, // Log the status flags. - LOG_WRITE = 1 << 5, // Log any memory write. - - LOG_STATE = LOG_REGS | LOG_FP_REGS | LOG_SYS_REGS, - LOG_ALL = LOG_DISASM | LOG_STATE | LOG_WRITE, + NO_PARAM = 0, + BREAK = 1 << 0, + LOG_DISASM = 1 << 1, // Use only with TRACE. Disassemble the code. + LOG_REGS = 1 << 2, // Log general purpose registers. + LOG_VREGS = 1 << 3, // Log NEON and floating-point registers. + LOG_SYS_REGS = 1 << 4, // Log the status flags. + LOG_WRITE = 1 << 5, // Log any memory write. + + LOG_NONE = 0, + LOG_STATE = LOG_REGS | LOG_VREGS | LOG_SYS_REGS, + LOG_ALL = LOG_DISASM | LOG_STATE | LOG_WRITE, // Trace control. - TRACE_ENABLE = 1 << 6, - TRACE_DISABLE = 2 << 6, + TRACE_ENABLE = 1 << 6, + TRACE_DISABLE = 2 << 6, TRACE_OVERRIDE = 3 << 6 }; +enum NEONFormat { + NF_UNDEF = 0, + NF_8B = 1, + NF_16B = 2, + NF_4H = 3, + NF_8H = 4, + NF_2S = 5, + NF_4S = 6, + NF_1D = 7, + NF_2D = 8, + NF_B = 9, + NF_H = 10, + NF_S = 11, + NF_D = 12 +}; + +static const unsigned kNEONFormatMaxBits = 6; +struct NEONFormatMap { + // The bit positions in the instruction to consider. + uint8_t bits[kNEONFormatMaxBits]; + + // Mapping from concatenated bits to format. + NEONFormat map[1 << kNEONFormatMaxBits]; +}; + +class NEONFormatDecoder { + public: + enum SubstitutionMode { kPlaceholder, kFormat }; + + // Construct a format decoder with increasingly specific format maps for each + // substitution. If no format map is specified, the default is the integer + // format map. + explicit NEONFormatDecoder(const Instruction* instr); + NEONFormatDecoder(const Instruction* instr, const NEONFormatMap* format); + NEONFormatDecoder(const Instruction* instr, const NEONFormatMap* format0, + const NEONFormatMap* format1); + NEONFormatDecoder(const Instruction* instr, const NEONFormatMap* format0, + const NEONFormatMap* format1, const NEONFormatMap* format2); + + // Set the format mapping for all or individual substitutions. + void SetFormatMaps(const NEONFormatMap* format0, + const NEONFormatMap* format1 = NULL, + const NEONFormatMap* format2 = NULL); + void SetFormatMap(unsigned index, const NEONFormatMap* format); + + // Substitute %s in the input string with the placeholder string for each + // register, ie. "'B", "'H", etc. + const char* SubstitutePlaceholders(const char* string); + + // Substitute %s in the input string with a new string based on the + // substitution mode. + const char* Substitute(const char* string, SubstitutionMode mode0 = kFormat, + SubstitutionMode mode1 = kFormat, + SubstitutionMode mode2 = kFormat); + + // Append a "2" to a mnemonic string based of the state of the Q bit. + const char* Mnemonic(const char* mnemonic); + + VectorFormat GetVectorFormat(int format_index = 0); + VectorFormat GetVectorFormat(const NEONFormatMap* format_map); + + // Built in mappings for common cases. + + // The integer format map uses three bits (Q, size<1:0>) to encode the + // "standard" set of NEON integer vector formats. + static const NEONFormatMap* IntegerFormatMap() { + static const NEONFormatMap map = { + {23, 22, 30}, + {NF_8B, NF_16B, NF_4H, NF_8H, NF_2S, NF_4S, NF_UNDEF, NF_2D}}; + return ↦ + } + + // The long integer format map uses two bits (size<1:0>) to encode the + // long set of NEON integer vector formats. These are used in narrow, wide + // and long operations. + static const NEONFormatMap* LongIntegerFormatMap() { + static const NEONFormatMap map = {{23, 22}, {NF_8H, NF_4S, NF_2D}}; + return ↦ + } + + // The FP format map uses two bits (Q, size<0>) to encode the NEON FP vector + // formats: NF_2S, NF_4S, NF_2D. + static const NEONFormatMap* FPFormatMap() { + // The FP format map assumes two bits (Q, size<0>) are used to encode the + // NEON FP vector formats: NF_2S, NF_4S, NF_2D. + static const NEONFormatMap map = {{22, 30}, + {NF_2S, NF_4S, NF_UNDEF, NF_2D}}; + return ↦ + } + + // The load/store format map uses three bits (Q, 11, 10) to encode the + // set of NEON vector formats. + static const NEONFormatMap* LoadStoreFormatMap() { + static const NEONFormatMap map = { + {11, 10, 30}, + {NF_8B, NF_16B, NF_4H, NF_8H, NF_2S, NF_4S, NF_1D, NF_2D}}; + return ↦ + } + + // The logical format map uses one bit (Q) to encode the NEON vector format: + // NF_8B, NF_16B. + static const NEONFormatMap* LogicalFormatMap() { + static const NEONFormatMap map = {{30}, {NF_8B, NF_16B}}; + return ↦ + } + + // The triangular format map uses between two and five bits to encode the NEON + // vector format: + // xxx10->8B, xxx11->16B, xx100->4H, xx101->8H + // x1000->2S, x1001->4S, 10001->2D, all others undefined. + static const NEONFormatMap* TriangularFormatMap() { + static const NEONFormatMap map = { + {19, 18, 17, 16, 30}, + {NF_UNDEF, NF_UNDEF, NF_8B, NF_16B, NF_4H, NF_8H, NF_8B, NF_16B, + NF_2S, NF_4S, NF_8B, NF_16B, NF_4H, NF_8H, NF_8B, NF_16B, + NF_UNDEF, NF_2D, NF_8B, NF_16B, NF_4H, NF_8H, NF_8B, NF_16B, + NF_2S, NF_4S, NF_8B, NF_16B, NF_4H, NF_8H, NF_8B, NF_16B}}; + return ↦ + } + + // The scalar format map uses two bits (size<1:0>) to encode the NEON scalar + // formats: NF_B, NF_H, NF_S, NF_D. + static const NEONFormatMap* ScalarFormatMap() { + static const NEONFormatMap map = {{23, 22}, {NF_B, NF_H, NF_S, NF_D}}; + return ↦ + } + + // The long scalar format map uses two bits (size<1:0>) to encode the longer + // NEON scalar formats: NF_H, NF_S, NF_D. + static const NEONFormatMap* LongScalarFormatMap() { + static const NEONFormatMap map = {{23, 22}, {NF_H, NF_S, NF_D}}; + return ↦ + } + + // The FP scalar format map assumes one bit (size<0>) is used to encode the + // NEON FP scalar formats: NF_S, NF_D. + static const NEONFormatMap* FPScalarFormatMap() { + static const NEONFormatMap map = {{22}, {NF_S, NF_D}}; + return ↦ + } + + // The triangular scalar format map uses between one and four bits to encode + // the NEON FP scalar formats: + // xxx1->B, xx10->H, x100->S, 1000->D, all others undefined. + static const NEONFormatMap* TriangularScalarFormatMap() { + static const NEONFormatMap map = { + {19, 18, 17, 16}, + {NF_UNDEF, NF_B, NF_H, NF_B, NF_S, NF_B, NF_H, NF_B, NF_D, NF_B, NF_H, + NF_B, NF_S, NF_B, NF_H, NF_B}}; + return ↦ + } + + private: + // Get a pointer to a string that represents the format or placeholder for + // the specified substitution index, based on the format map and instruction. + const char* GetSubstitute(int index, SubstitutionMode mode); + + // Get the NEONFormat enumerated value for bits obtained from the + // instruction based on the specified format mapping. + NEONFormat GetNEONFormat(const NEONFormatMap* format_map); + + // Convert a NEONFormat into a string. + static const char* NEONFormatAsString(NEONFormat format); + + // Convert a NEONFormat into a register placeholder string. + static const char* NEONFormatAsPlaceholder(NEONFormat format); + + // Select bits from instrbits_ defined by the bits array, concatenate them, + // and return the value. + uint8_t PickBits(const uint8_t bits[]); + + Instr instrbits_; + const NEONFormatMap* formats_[3]; + char form_buffer_[64]; + char mne_buffer_[16]; +}; } // namespace internal } // namespace v8 diff --git a/deps/v8/src/arm64/instrument-arm64.cc b/deps/v8/src/arm64/instrument-arm64.cc index c6e27f8ee32e65..2ed67ba57c2594 100644 --- a/deps/v8/src/arm64/instrument-arm64.cc +++ b/deps/v8/src/arm64/instrument-arm64.cc @@ -377,7 +377,7 @@ void Instrument::InstrumentLoadStore(Instruction* instr) { static Counter* load_fp_counter = GetCounter("Load FP"); static Counter* store_fp_counter = GetCounter("Store FP"); - switch (instr->Mask(LoadStoreOpMask)) { + switch (instr->Mask(LoadStoreMask)) { case STRB_w: // Fall through. case STRH_w: // Fall through. case STR_w: // Fall through. @@ -595,6 +595,159 @@ void Instrument::VisitFPFixedPointConvert(Instruction* instr) { counter->Increment(); } +void Instrument::VisitNEON2RegMisc(Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + +void Instrument::VisitNEON3Different(Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + +void Instrument::VisitNEON3Same(Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + +void Instrument::VisitNEONAcrossLanes(Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + +void Instrument::VisitNEONByIndexedElement(Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + +void Instrument::VisitNEONCopy(Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + +void Instrument::VisitNEONExtract(Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + +void Instrument::VisitNEONLoadStoreMultiStruct(Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + +void Instrument::VisitNEONLoadStoreMultiStructPostIndex(Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + +void Instrument::VisitNEONLoadStoreSingleStruct(Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + +void Instrument::VisitNEONLoadStoreSingleStructPostIndex(Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + +void Instrument::VisitNEONModifiedImmediate(Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + +void Instrument::VisitNEONPerm(Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + +void Instrument::VisitNEONScalar2RegMisc(Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + +void Instrument::VisitNEONScalar3Diff(Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + +void Instrument::VisitNEONScalar3Same(Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + +void Instrument::VisitNEONScalarByIndexedElement(Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + +void Instrument::VisitNEONScalarCopy(Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + +void Instrument::VisitNEONScalarPairwise(Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + +void Instrument::VisitNEONScalarShiftImmediate(Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + +void Instrument::VisitNEONShiftImmediate(Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + +void Instrument::VisitNEONTable(Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} void Instrument::VisitUnallocated(Instruction* instr) { Update(); diff --git a/deps/v8/src/arm64/interface-descriptors-arm64.cc b/deps/v8/src/arm64/interface-descriptors-arm64.cc index 887adddf29829c..29078ed5d2efaf 100644 --- a/deps/v8/src/arm64/interface-descriptors-arm64.cc +++ b/deps/v8/src/arm64/interface-descriptors-arm64.cc @@ -49,6 +49,8 @@ const Register StoreTransitionDescriptor::MapRegister() { return x5; } const Register StringCompareDescriptor::LeftRegister() { return x1; } const Register StringCompareDescriptor::RightRegister() { return x0; } +const Register StringConcatDescriptor::ArgumentsCountRegister() { return x0; } + const Register ApiGetterDescriptor::HolderRegister() { return x0; } const Register ApiGetterDescriptor::CallbackRegister() { return x3; } @@ -174,6 +176,16 @@ void CallTrampolineDescriptor::InitializePlatformSpecific( data->InitializePlatformSpecific(arraysize(registers), registers); } +void CallVarargsDescriptor::InitializePlatformSpecific( + CallInterfaceDescriptorData* data) { + // x0 : number of arguments (on the stack, not including receiver) + // x1 : the target to call + // x2 : arguments list (FixedArray) + // x4 : arguments list length (untagged) + Register registers[] = {x1, x0, x2, x4}; + data->InitializePlatformSpecific(arraysize(registers), registers); +} + void CallForwardVarargsDescriptor::InitializePlatformSpecific( CallInterfaceDescriptorData* data) { // x1: target @@ -183,6 +195,34 @@ void CallForwardVarargsDescriptor::InitializePlatformSpecific( data->InitializePlatformSpecific(arraysize(registers), registers); } +void CallWithSpreadDescriptor::InitializePlatformSpecific( + CallInterfaceDescriptorData* data) { + // x0 : number of arguments (on the stack, not including receiver) + // x1 : the target to call + // x2 : the object to spread + Register registers[] = {x1, x0, x2}; + data->InitializePlatformSpecific(arraysize(registers), registers); +} + +void CallWithArrayLikeDescriptor::InitializePlatformSpecific( + CallInterfaceDescriptorData* data) { + // x1 : the target to call + // x2 : the arguments list + Register registers[] = {x1, x2}; + data->InitializePlatformSpecific(arraysize(registers), registers); +} + +void ConstructVarargsDescriptor::InitializePlatformSpecific( + CallInterfaceDescriptorData* data) { + // x0 : number of arguments (on the stack, not including receiver) + // x1 : the target to call + // x3 : the new target + // x2 : arguments list (FixedArray) + // x4 : arguments list length (untagged) + Register registers[] = {x1, x3, x0, x2, x4}; + data->InitializePlatformSpecific(arraysize(registers), registers); +} + void ConstructForwardVarargsDescriptor::InitializePlatformSpecific( CallInterfaceDescriptorData* data) { // x3: new target @@ -193,6 +233,25 @@ void ConstructForwardVarargsDescriptor::InitializePlatformSpecific( data->InitializePlatformSpecific(arraysize(registers), registers); } +void ConstructWithSpreadDescriptor::InitializePlatformSpecific( + CallInterfaceDescriptorData* data) { + // x0 : number of arguments (on the stack, not including receiver) + // x1 : the target to call + // x3 : the new target + // x2 : the object to spread + Register registers[] = {x1, x3, x0, x2}; + data->InitializePlatformSpecific(arraysize(registers), registers); +} + +void ConstructWithArrayLikeDescriptor::InitializePlatformSpecific( + CallInterfaceDescriptorData* data) { + // x1 : the target to call + // x3 : the new target + // x2 : the arguments list + Register registers[] = {x1, x3, x2}; + data->InitializePlatformSpecific(arraysize(registers), registers); +} + void ConstructStubDescriptor::InitializePlatformSpecific( CallInterfaceDescriptorData* data) { // x3: new target @@ -407,8 +466,7 @@ void ResumeGeneratorDescriptor::InitializePlatformSpecific( Register registers[] = { x0, // the value to pass to the generator x1, // the JSGeneratorObject to resume - x2, // the resume mode (tagged) - x3 // SuspendFlags (tagged) + x2 // the resume mode (tagged) }; data->InitializePlatformSpecific(arraysize(registers), registers); } diff --git a/deps/v8/src/arm64/macro-assembler-arm64-inl.h b/deps/v8/src/arm64/macro-assembler-arm64-inl.h index e2fbc8f4af9bcf..2815f31881b71d 100644 --- a/deps/v8/src/arm64/macro-assembler-arm64-inl.h +++ b/deps/v8/src/arm64/macro-assembler-arm64-inl.h @@ -35,36 +35,28 @@ MemOperand UntagSmiMemOperand(Register object, int offset) { return MemOperand(object, offset + (kSmiShift / kBitsPerByte)); } - -void MacroAssembler::And(const Register& rd, - const Register& rn, +void TurboAssembler::And(const Register& rd, const Register& rn, const Operand& operand) { - DCHECK(allow_macro_instructions_); + DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); LogicalMacro(rd, rn, operand, AND); } - -void MacroAssembler::Ands(const Register& rd, - const Register& rn, +void TurboAssembler::Ands(const Register& rd, const Register& rn, const Operand& operand) { - DCHECK(allow_macro_instructions_); + DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); LogicalMacro(rd, rn, operand, ANDS); } - -void MacroAssembler::Tst(const Register& rn, - const Operand& operand) { - DCHECK(allow_macro_instructions_); +void TurboAssembler::Tst(const Register& rn, const Operand& operand) { + DCHECK(allow_macro_instructions()); LogicalMacro(AppropriateZeroRegFor(rn), rn, operand, ANDS); } - -void MacroAssembler::Bic(const Register& rd, - const Register& rn, +void TurboAssembler::Bic(const Register& rd, const Register& rn, const Operand& operand) { - DCHECK(allow_macro_instructions_); + DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); LogicalMacro(rd, rn, operand, BIC); } @@ -73,53 +65,42 @@ void MacroAssembler::Bic(const Register& rd, void MacroAssembler::Bics(const Register& rd, const Register& rn, const Operand& operand) { - DCHECK(allow_macro_instructions_); + DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); LogicalMacro(rd, rn, operand, BICS); } - -void MacroAssembler::Orr(const Register& rd, - const Register& rn, +void TurboAssembler::Orr(const Register& rd, const Register& rn, const Operand& operand) { - DCHECK(allow_macro_instructions_); + DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); LogicalMacro(rd, rn, operand, ORR); } - -void MacroAssembler::Orn(const Register& rd, - const Register& rn, +void TurboAssembler::Orn(const Register& rd, const Register& rn, const Operand& operand) { - DCHECK(allow_macro_instructions_); + DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); LogicalMacro(rd, rn, operand, ORN); } - -void MacroAssembler::Eor(const Register& rd, - const Register& rn, +void TurboAssembler::Eor(const Register& rd, const Register& rn, const Operand& operand) { - DCHECK(allow_macro_instructions_); + DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); LogicalMacro(rd, rn, operand, EOR); } - -void MacroAssembler::Eon(const Register& rd, - const Register& rn, +void TurboAssembler::Eon(const Register& rd, const Register& rn, const Operand& operand) { - DCHECK(allow_macro_instructions_); + DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); LogicalMacro(rd, rn, operand, EON); } - -void MacroAssembler::Ccmp(const Register& rn, - const Operand& operand, - StatusFlags nzcv, - Condition cond) { - DCHECK(allow_macro_instructions_); +void TurboAssembler::Ccmp(const Register& rn, const Operand& operand, + StatusFlags nzcv, Condition cond) { + DCHECK(allow_macro_instructions()); if (operand.IsImmediate() && (operand.ImmediateValue() < 0)) { ConditionalCompareMacro(rn, -operand.ImmediateValue(), nzcv, cond, CCMN); } else { @@ -132,7 +113,7 @@ void MacroAssembler::Ccmn(const Register& rn, const Operand& operand, StatusFlags nzcv, Condition cond) { - DCHECK(allow_macro_instructions_); + DCHECK(allow_macro_instructions()); if (operand.IsImmediate() && (operand.ImmediateValue() < 0)) { ConditionalCompareMacro(rn, -operand.ImmediateValue(), nzcv, cond, CCMP); } else { @@ -140,11 +121,9 @@ void MacroAssembler::Ccmn(const Register& rn, } } - -void MacroAssembler::Add(const Register& rd, - const Register& rn, +void TurboAssembler::Add(const Register& rd, const Register& rn, const Operand& operand) { - DCHECK(allow_macro_instructions_); + DCHECK(allow_macro_instructions()); if (operand.IsImmediate() && (operand.ImmediateValue() < 0) && IsImmAddSub(-operand.ImmediateValue())) { AddSubMacro(rd, rn, -operand.ImmediateValue(), LeaveFlags, SUB); @@ -153,10 +132,9 @@ void MacroAssembler::Add(const Register& rd, } } -void MacroAssembler::Adds(const Register& rd, - const Register& rn, +void TurboAssembler::Adds(const Register& rd, const Register& rn, const Operand& operand) { - DCHECK(allow_macro_instructions_); + DCHECK(allow_macro_instructions()); if (operand.IsImmediate() && (operand.ImmediateValue() < 0) && IsImmAddSub(-operand.ImmediateValue())) { AddSubMacro(rd, rn, -operand.ImmediateValue(), SetFlags, SUB); @@ -165,11 +143,9 @@ void MacroAssembler::Adds(const Register& rd, } } - -void MacroAssembler::Sub(const Register& rd, - const Register& rn, +void TurboAssembler::Sub(const Register& rd, const Register& rn, const Operand& operand) { - DCHECK(allow_macro_instructions_); + DCHECK(allow_macro_instructions()); if (operand.IsImmediate() && (operand.ImmediateValue() < 0) && IsImmAddSub(-operand.ImmediateValue())) { AddSubMacro(rd, rn, -operand.ImmediateValue(), LeaveFlags, ADD); @@ -178,11 +154,9 @@ void MacroAssembler::Sub(const Register& rd, } } - -void MacroAssembler::Subs(const Register& rd, - const Register& rn, +void TurboAssembler::Subs(const Register& rd, const Register& rn, const Operand& operand) { - DCHECK(allow_macro_instructions_); + DCHECK(allow_macro_instructions()); if (operand.IsImmediate() && (operand.ImmediateValue() < 0) && IsImmAddSub(-operand.ImmediateValue())) { AddSubMacro(rd, rn, -operand.ImmediateValue(), SetFlags, ADD); @@ -191,22 +165,18 @@ void MacroAssembler::Subs(const Register& rd, } } - -void MacroAssembler::Cmn(const Register& rn, const Operand& operand) { - DCHECK(allow_macro_instructions_); +void TurboAssembler::Cmn(const Register& rn, const Operand& operand) { + DCHECK(allow_macro_instructions()); Adds(AppropriateZeroRegFor(rn), rn, operand); } - -void MacroAssembler::Cmp(const Register& rn, const Operand& operand) { - DCHECK(allow_macro_instructions_); +void TurboAssembler::Cmp(const Register& rn, const Operand& operand) { + DCHECK(allow_macro_instructions()); Subs(AppropriateZeroRegFor(rn), rn, operand); } - -void MacroAssembler::Neg(const Register& rd, - const Operand& operand) { - DCHECK(allow_macro_instructions_); +void TurboAssembler::Neg(const Register& rd, const Operand& operand) { + DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); if (operand.IsImmediate()) { Mov(rd, -operand.ImmediateValue()); @@ -215,18 +185,14 @@ void MacroAssembler::Neg(const Register& rd, } } - -void MacroAssembler::Negs(const Register& rd, - const Operand& operand) { - DCHECK(allow_macro_instructions_); +void TurboAssembler::Negs(const Register& rd, const Operand& operand) { + DCHECK(allow_macro_instructions()); Subs(rd, AppropriateZeroRegFor(rd), operand); } - -void MacroAssembler::Adc(const Register& rd, - const Register& rn, +void TurboAssembler::Adc(const Register& rd, const Register& rn, const Operand& operand) { - DCHECK(allow_macro_instructions_); + DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); AddSubWithCarryMacro(rd, rn, operand, LeaveFlags, ADC); } @@ -235,7 +201,7 @@ void MacroAssembler::Adc(const Register& rd, void MacroAssembler::Adcs(const Register& rd, const Register& rn, const Operand& operand) { - DCHECK(allow_macro_instructions_); + DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); AddSubWithCarryMacro(rd, rn, operand, SetFlags, ADC); } @@ -244,7 +210,7 @@ void MacroAssembler::Adcs(const Register& rd, void MacroAssembler::Sbc(const Register& rd, const Register& rn, const Operand& operand) { - DCHECK(allow_macro_instructions_); + DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); AddSubWithCarryMacro(rd, rn, operand, LeaveFlags, SBC); } @@ -253,7 +219,7 @@ void MacroAssembler::Sbc(const Register& rd, void MacroAssembler::Sbcs(const Register& rd, const Register& rn, const Operand& operand) { - DCHECK(allow_macro_instructions_); + DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); AddSubWithCarryMacro(rd, rn, operand, SetFlags, SBC); } @@ -261,7 +227,7 @@ void MacroAssembler::Sbcs(const Register& rd, void MacroAssembler::Ngc(const Register& rd, const Operand& operand) { - DCHECK(allow_macro_instructions_); + DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); Register zr = AppropriateZeroRegFor(rd); Sbc(rd, zr, operand); @@ -270,41 +236,38 @@ void MacroAssembler::Ngc(const Register& rd, void MacroAssembler::Ngcs(const Register& rd, const Operand& operand) { - DCHECK(allow_macro_instructions_); + DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); Register zr = AppropriateZeroRegFor(rd); Sbcs(rd, zr, operand); } - -void MacroAssembler::Mvn(const Register& rd, uint64_t imm) { - DCHECK(allow_macro_instructions_); +void TurboAssembler::Mvn(const Register& rd, uint64_t imm) { + DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); Mov(rd, ~imm); } - -#define DEFINE_FUNCTION(FN, REGTYPE, REG, OP) \ -void MacroAssembler::FN(const REGTYPE REG, const MemOperand& addr) { \ - DCHECK(allow_macro_instructions_); \ - LoadStoreMacro(REG, addr, OP); \ -} +#define DEFINE_FUNCTION(FN, REGTYPE, REG, OP) \ + void TurboAssembler::FN(const REGTYPE REG, const MemOperand& addr) { \ + DCHECK(allow_macro_instructions()); \ + LoadStoreMacro(REG, addr, OP); \ + } LS_MACRO_LIST(DEFINE_FUNCTION) #undef DEFINE_FUNCTION - #define DEFINE_FUNCTION(FN, REGTYPE, REG, REG2, OP) \ - void MacroAssembler::FN(const REGTYPE REG, const REGTYPE REG2, \ + void TurboAssembler::FN(const REGTYPE REG, const REGTYPE REG2, \ const MemOperand& addr) { \ - DCHECK(allow_macro_instructions_); \ + DCHECK(allow_macro_instructions()); \ LoadStorePairMacro(REG, REG2, addr, OP); \ } LSPAIR_MACRO_LIST(DEFINE_FUNCTION) #undef DEFINE_FUNCTION #define DECLARE_FUNCTION(FN, OP) \ - void MacroAssembler::FN(const Register& rt, const Register& rn) { \ - DCHECK(allow_macro_instructions_); \ + void TurboAssembler::FN(const Register& rt, const Register& rn) { \ + DCHECK(allow_macro_instructions()); \ OP(rt, rn); \ } LDA_STL_MACRO_LIST(DECLARE_FUNCTION) @@ -313,47 +276,39 @@ LDA_STL_MACRO_LIST(DECLARE_FUNCTION) #define DECLARE_FUNCTION(FN, OP) \ void MacroAssembler::FN(const Register& rs, const Register& rt, \ const Register& rn) { \ - DCHECK(allow_macro_instructions_); \ + DCHECK(allow_macro_instructions()); \ OP(rs, rt, rn); \ } STLX_MACRO_LIST(DECLARE_FUNCTION) #undef DECLARE_FUNCTION -void MacroAssembler::Asr(const Register& rd, - const Register& rn, +void TurboAssembler::Asr(const Register& rd, const Register& rn, unsigned shift) { - DCHECK(allow_macro_instructions_); + DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); asr(rd, rn, shift); } - -void MacroAssembler::Asr(const Register& rd, - const Register& rn, +void TurboAssembler::Asr(const Register& rd, const Register& rn, const Register& rm) { - DCHECK(allow_macro_instructions_); + DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); asrv(rd, rn, rm); } - -void MacroAssembler::B(Label* label) { +void TurboAssembler::B(Label* label) { b(label); CheckVeneerPool(false, false); } - -void MacroAssembler::B(Condition cond, Label* label) { - DCHECK(allow_macro_instructions_); +void TurboAssembler::B(Condition cond, Label* label) { + DCHECK(allow_macro_instructions()); B(label, cond); } - -void MacroAssembler::Bfi(const Register& rd, - const Register& rn, - unsigned lsb, +void TurboAssembler::Bfi(const Register& rd, const Register& rn, unsigned lsb, unsigned width) { - DCHECK(allow_macro_instructions_); + DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); bfi(rd, rn, lsb, width); } @@ -363,40 +318,35 @@ void MacroAssembler::Bfxil(const Register& rd, const Register& rn, unsigned lsb, unsigned width) { - DCHECK(allow_macro_instructions_); + DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); bfxil(rd, rn, lsb, width); } - -void MacroAssembler::Bind(Label* label) { - DCHECK(allow_macro_instructions_); +void TurboAssembler::Bind(Label* label) { + DCHECK(allow_macro_instructions()); bind(label); } - -void MacroAssembler::Bl(Label* label) { - DCHECK(allow_macro_instructions_); +void TurboAssembler::Bl(Label* label) { + DCHECK(allow_macro_instructions()); bl(label); } - -void MacroAssembler::Blr(const Register& xn) { - DCHECK(allow_macro_instructions_); +void TurboAssembler::Blr(const Register& xn) { + DCHECK(allow_macro_instructions()); DCHECK(!xn.IsZero()); blr(xn); } - -void MacroAssembler::Br(const Register& xn) { - DCHECK(allow_macro_instructions_); +void TurboAssembler::Br(const Register& xn) { + DCHECK(allow_macro_instructions()); DCHECK(!xn.IsZero()); br(xn); } - -void MacroAssembler::Brk(int code) { - DCHECK(allow_macro_instructions_); +void TurboAssembler::Brk(int code) { + DCHECK(allow_macro_instructions()); brk(code); } @@ -404,7 +354,7 @@ void MacroAssembler::Brk(int code) { void MacroAssembler::Cinc(const Register& rd, const Register& rn, Condition cond) { - DCHECK(allow_macro_instructions_); + DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); DCHECK((cond != al) && (cond != nv)); cinc(rd, rn, cond); @@ -414,31 +364,27 @@ void MacroAssembler::Cinc(const Register& rd, void MacroAssembler::Cinv(const Register& rd, const Register& rn, Condition cond) { - DCHECK(allow_macro_instructions_); + DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); DCHECK((cond != al) && (cond != nv)); cinv(rd, rn, cond); } - -void MacroAssembler::Cls(const Register& rd, const Register& rn) { - DCHECK(allow_macro_instructions_); +void TurboAssembler::Cls(const Register& rd, const Register& rn) { + DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); cls(rd, rn); } - -void MacroAssembler::Clz(const Register& rd, const Register& rn) { - DCHECK(allow_macro_instructions_); +void TurboAssembler::Clz(const Register& rd, const Register& rn) { + DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); clz(rd, rn); } - -void MacroAssembler::Cneg(const Register& rd, - const Register& rn, +void TurboAssembler::Cneg(const Register& rd, const Register& rn, Condition cond) { - DCHECK(allow_macro_instructions_); + DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); DCHECK((cond != al) && (cond != nv)); cneg(rd, rn, cond); @@ -449,7 +395,7 @@ void MacroAssembler::Cneg(const Register& rd, // due to the truncation side-effect when used on W registers. void MacroAssembler::CzeroX(const Register& rd, Condition cond) { - DCHECK(allow_macro_instructions_); + DCHECK(allow_macro_instructions()); DCHECK(!rd.IsSP() && rd.Is64Bits()); DCHECK((cond != al) && (cond != nv)); csel(rd, xzr, rd, cond); @@ -461,7 +407,7 @@ void MacroAssembler::CzeroX(const Register& rd, void MacroAssembler::CmovX(const Register& rd, const Register& rn, Condition cond) { - DCHECK(allow_macro_instructions_); + DCHECK(allow_macro_instructions()); DCHECK(!rd.IsSP()); DCHECK(rd.Is64Bits() && rn.Is64Bits()); DCHECK((cond != al) && (cond != nv)); @@ -470,9 +416,8 @@ void MacroAssembler::CmovX(const Register& rd, } } - -void MacroAssembler::Cset(const Register& rd, Condition cond) { - DCHECK(allow_macro_instructions_); +void TurboAssembler::Cset(const Register& rd, Condition cond) { + DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); DCHECK((cond != al) && (cond != nv)); cset(rd, cond); @@ -480,18 +425,15 @@ void MacroAssembler::Cset(const Register& rd, Condition cond) { void MacroAssembler::Csetm(const Register& rd, Condition cond) { - DCHECK(allow_macro_instructions_); + DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); DCHECK((cond != al) && (cond != nv)); csetm(rd, cond); } - -void MacroAssembler::Csinc(const Register& rd, - const Register& rn, - const Register& rm, - Condition cond) { - DCHECK(allow_macro_instructions_); +void TurboAssembler::Csinc(const Register& rd, const Register& rn, + const Register& rm, Condition cond) { + DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); DCHECK((cond != al) && (cond != nv)); csinc(rd, rn, rm, cond); @@ -502,7 +444,7 @@ void MacroAssembler::Csinv(const Register& rd, const Register& rn, const Register& rm, Condition cond) { - DCHECK(allow_macro_instructions_); + DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); DCHECK((cond != al) && (cond != nv)); csinv(rd, rn, rm, cond); @@ -513,7 +455,7 @@ void MacroAssembler::Csneg(const Register& rd, const Register& rn, const Register& rm, Condition cond) { - DCHECK(allow_macro_instructions_); + DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); DCHECK((cond != al) && (cond != nv)); csneg(rd, rn, rm, cond); @@ -521,19 +463,18 @@ void MacroAssembler::Csneg(const Register& rd, void MacroAssembler::Dmb(BarrierDomain domain, BarrierType type) { - DCHECK(allow_macro_instructions_); + DCHECK(allow_macro_instructions()); dmb(domain, type); } void MacroAssembler::Dsb(BarrierDomain domain, BarrierType type) { - DCHECK(allow_macro_instructions_); + DCHECK(allow_macro_instructions()); dsb(domain, type); } - -void MacroAssembler::Debug(const char* message, uint32_t code, Instr params) { - DCHECK(allow_macro_instructions_); +void TurboAssembler::Debug(const char* message, uint32_t code, Instr params) { + DCHECK(allow_macro_instructions()); debug(message, code, params); } @@ -542,47 +483,39 @@ void MacroAssembler::Extr(const Register& rd, const Register& rn, const Register& rm, unsigned lsb) { - DCHECK(allow_macro_instructions_); + DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); extr(rd, rn, rm, lsb); } - -void MacroAssembler::Fabs(const FPRegister& fd, const FPRegister& fn) { - DCHECK(allow_macro_instructions_); +void TurboAssembler::Fabs(const VRegister& fd, const VRegister& fn) { + DCHECK(allow_macro_instructions()); fabs(fd, fn); } - -void MacroAssembler::Fadd(const FPRegister& fd, - const FPRegister& fn, - const FPRegister& fm) { - DCHECK(allow_macro_instructions_); +void TurboAssembler::Fadd(const VRegister& fd, const VRegister& fn, + const VRegister& fm) { + DCHECK(allow_macro_instructions()); fadd(fd, fn, fm); } - -void MacroAssembler::Fccmp(const FPRegister& fn, - const FPRegister& fm, - StatusFlags nzcv, - Condition cond) { - DCHECK(allow_macro_instructions_); +void TurboAssembler::Fccmp(const VRegister& fn, const VRegister& fm, + StatusFlags nzcv, Condition cond) { + DCHECK(allow_macro_instructions()); DCHECK((cond != al) && (cond != nv)); fccmp(fn, fm, nzcv, cond); } - -void MacroAssembler::Fcmp(const FPRegister& fn, const FPRegister& fm) { - DCHECK(allow_macro_instructions_); +void TurboAssembler::Fcmp(const VRegister& fn, const VRegister& fm) { + DCHECK(allow_macro_instructions()); fcmp(fn, fm); } - -void MacroAssembler::Fcmp(const FPRegister& fn, double value) { - DCHECK(allow_macro_instructions_); +void TurboAssembler::Fcmp(const VRegister& fn, double value) { + DCHECK(allow_macro_instructions()); if (value != 0.0) { UseScratchRegisterScope temps(this); - FPRegister tmp = temps.AcquireSameSizeAs(fn); + VRegister tmp = temps.AcquireSameSizeAs(fn); Fmov(tmp, value); fcmp(fn, tmp); } else { @@ -590,364 +523,281 @@ void MacroAssembler::Fcmp(const FPRegister& fn, double value) { } } - -void MacroAssembler::Fcsel(const FPRegister& fd, - const FPRegister& fn, - const FPRegister& fm, - Condition cond) { - DCHECK(allow_macro_instructions_); +void MacroAssembler::Fcsel(const VRegister& fd, const VRegister& fn, + const VRegister& fm, Condition cond) { + DCHECK(allow_macro_instructions()); DCHECK((cond != al) && (cond != nv)); fcsel(fd, fn, fm, cond); } - -void MacroAssembler::Fcvt(const FPRegister& fd, const FPRegister& fn) { - DCHECK(allow_macro_instructions_); +void TurboAssembler::Fcvt(const VRegister& fd, const VRegister& fn) { + DCHECK(allow_macro_instructions()); fcvt(fd, fn); } - -void MacroAssembler::Fcvtas(const Register& rd, const FPRegister& fn) { - DCHECK(allow_macro_instructions_); +void TurboAssembler::Fcvtas(const Register& rd, const VRegister& fn) { + DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); fcvtas(rd, fn); } - -void MacroAssembler::Fcvtau(const Register& rd, const FPRegister& fn) { - DCHECK(allow_macro_instructions_); +void TurboAssembler::Fcvtau(const Register& rd, const VRegister& fn) { + DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); fcvtau(rd, fn); } - -void MacroAssembler::Fcvtms(const Register& rd, const FPRegister& fn) { - DCHECK(allow_macro_instructions_); +void TurboAssembler::Fcvtms(const Register& rd, const VRegister& fn) { + DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); fcvtms(rd, fn); } - -void MacroAssembler::Fcvtmu(const Register& rd, const FPRegister& fn) { - DCHECK(allow_macro_instructions_); +void TurboAssembler::Fcvtmu(const Register& rd, const VRegister& fn) { + DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); fcvtmu(rd, fn); } - -void MacroAssembler::Fcvtns(const Register& rd, const FPRegister& fn) { - DCHECK(allow_macro_instructions_); +void TurboAssembler::Fcvtns(const Register& rd, const VRegister& fn) { + DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); fcvtns(rd, fn); } - -void MacroAssembler::Fcvtnu(const Register& rd, const FPRegister& fn) { - DCHECK(allow_macro_instructions_); +void TurboAssembler::Fcvtnu(const Register& rd, const VRegister& fn) { + DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); fcvtnu(rd, fn); } - -void MacroAssembler::Fcvtzs(const Register& rd, const FPRegister& fn) { - DCHECK(allow_macro_instructions_); +void TurboAssembler::Fcvtzs(const Register& rd, const VRegister& fn) { + DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); fcvtzs(rd, fn); } -void MacroAssembler::Fcvtzu(const Register& rd, const FPRegister& fn) { - DCHECK(allow_macro_instructions_); +void TurboAssembler::Fcvtzu(const Register& rd, const VRegister& fn) { + DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); fcvtzu(rd, fn); } - -void MacroAssembler::Fdiv(const FPRegister& fd, - const FPRegister& fn, - const FPRegister& fm) { - DCHECK(allow_macro_instructions_); +void TurboAssembler::Fdiv(const VRegister& fd, const VRegister& fn, + const VRegister& fm) { + DCHECK(allow_macro_instructions()); fdiv(fd, fn, fm); } - -void MacroAssembler::Fmadd(const FPRegister& fd, - const FPRegister& fn, - const FPRegister& fm, - const FPRegister& fa) { - DCHECK(allow_macro_instructions_); +void MacroAssembler::Fmadd(const VRegister& fd, const VRegister& fn, + const VRegister& fm, const VRegister& fa) { + DCHECK(allow_macro_instructions()); fmadd(fd, fn, fm, fa); } - -void MacroAssembler::Fmax(const FPRegister& fd, - const FPRegister& fn, - const FPRegister& fm) { - DCHECK(allow_macro_instructions_); +void TurboAssembler::Fmax(const VRegister& fd, const VRegister& fn, + const VRegister& fm) { + DCHECK(allow_macro_instructions()); fmax(fd, fn, fm); } - -void MacroAssembler::Fmaxnm(const FPRegister& fd, - const FPRegister& fn, - const FPRegister& fm) { - DCHECK(allow_macro_instructions_); +void MacroAssembler::Fmaxnm(const VRegister& fd, const VRegister& fn, + const VRegister& fm) { + DCHECK(allow_macro_instructions()); fmaxnm(fd, fn, fm); } - -void MacroAssembler::Fmin(const FPRegister& fd, - const FPRegister& fn, - const FPRegister& fm) { - DCHECK(allow_macro_instructions_); +void TurboAssembler::Fmin(const VRegister& fd, const VRegister& fn, + const VRegister& fm) { + DCHECK(allow_macro_instructions()); fmin(fd, fn, fm); } - -void MacroAssembler::Fminnm(const FPRegister& fd, - const FPRegister& fn, - const FPRegister& fm) { - DCHECK(allow_macro_instructions_); +void MacroAssembler::Fminnm(const VRegister& fd, const VRegister& fn, + const VRegister& fm) { + DCHECK(allow_macro_instructions()); fminnm(fd, fn, fm); } - -void MacroAssembler::Fmov(FPRegister fd, FPRegister fn) { - DCHECK(allow_macro_instructions_); +void TurboAssembler::Fmov(VRegister fd, VRegister fn) { + DCHECK(allow_macro_instructions()); // Only emit an instruction if fd and fn are different, and they are both D // registers. fmov(s0, s0) is not a no-op because it clears the top word of // d0. Technically, fmov(d0, d0) is not a no-op either because it clears the - // top of q0, but FPRegister does not currently support Q registers. + // top of q0, but VRegister does not currently support Q registers. if (!fd.Is(fn) || !fd.Is64Bits()) { fmov(fd, fn); } } - -void MacroAssembler::Fmov(FPRegister fd, Register rn) { - DCHECK(allow_macro_instructions_); +void TurboAssembler::Fmov(VRegister fd, Register rn) { + DCHECK(allow_macro_instructions()); fmov(fd, rn); } +void TurboAssembler::Fmov(VRegister vd, double imm) { + DCHECK(allow_macro_instructions()); -void MacroAssembler::Fmov(FPRegister fd, double imm) { - DCHECK(allow_macro_instructions_); - if (fd.Is32Bits()) { - Fmov(fd, static_cast(imm)); + if (vd.Is1S() || vd.Is2S() || vd.Is4S()) { + Fmov(vd, static_cast(imm)); return; } - DCHECK(fd.Is64Bits()); + DCHECK(vd.Is1D() || vd.Is2D()); if (IsImmFP64(imm)) { - fmov(fd, imm); - } else if ((imm == 0.0) && (copysign(1.0, imm) == 1.0)) { - fmov(fd, xzr); + fmov(vd, imm); } else { - Ldr(fd, imm); + uint64_t bits = bit_cast(imm); + if (vd.IsScalar()) { + if (bits == 0) { + fmov(vd, xzr); + } else { + Ldr(vd, imm); + } + } else { + // TODO(all): consider NEON support for load literal. + Movi(vd, bits); + } } } - -void MacroAssembler::Fmov(FPRegister fd, float imm) { - DCHECK(allow_macro_instructions_); - if (fd.Is64Bits()) { - Fmov(fd, static_cast(imm)); +void TurboAssembler::Fmov(VRegister vd, float imm) { + DCHECK(allow_macro_instructions()); + if (vd.Is1D() || vd.Is2D()) { + Fmov(vd, static_cast(imm)); return; } - DCHECK(fd.Is32Bits()); + DCHECK(vd.Is1S() || vd.Is2S() || vd.Is4S()); if (IsImmFP32(imm)) { - fmov(fd, imm); - } else if ((imm == 0.0) && (copysign(1.0, imm) == 1.0)) { - fmov(fd, wzr); + fmov(vd, imm); } else { - UseScratchRegisterScope temps(this); - Register tmp = temps.AcquireW(); - // TODO(all): Use Assembler::ldr(const FPRegister& ft, float imm). - Mov(tmp, float_to_rawbits(imm)); - Fmov(fd, tmp); + uint32_t bits = bit_cast(imm); + if (vd.IsScalar()) { + if (bits == 0) { + fmov(vd, wzr); + } else { + UseScratchRegisterScope temps(this); + Register tmp = temps.AcquireW(); + // TODO(all): Use Assembler::ldr(const VRegister& ft, float imm). + Mov(tmp, bit_cast(imm)); + Fmov(vd, tmp); + } + } else { + // TODO(all): consider NEON support for load literal. + Movi(vd, bits); + } } } - -void MacroAssembler::Fmov(Register rd, FPRegister fn) { - DCHECK(allow_macro_instructions_); +void TurboAssembler::Fmov(Register rd, VRegister fn) { + DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); fmov(rd, fn); } - -void MacroAssembler::Fmsub(const FPRegister& fd, - const FPRegister& fn, - const FPRegister& fm, - const FPRegister& fa) { - DCHECK(allow_macro_instructions_); +void MacroAssembler::Fmsub(const VRegister& fd, const VRegister& fn, + const VRegister& fm, const VRegister& fa) { + DCHECK(allow_macro_instructions()); fmsub(fd, fn, fm, fa); } - -void MacroAssembler::Fmul(const FPRegister& fd, - const FPRegister& fn, - const FPRegister& fm) { - DCHECK(allow_macro_instructions_); +void TurboAssembler::Fmul(const VRegister& fd, const VRegister& fn, + const VRegister& fm) { + DCHECK(allow_macro_instructions()); fmul(fd, fn, fm); } - -void MacroAssembler::Fneg(const FPRegister& fd, const FPRegister& fn) { - DCHECK(allow_macro_instructions_); - fneg(fd, fn); -} - - -void MacroAssembler::Fnmadd(const FPRegister& fd, - const FPRegister& fn, - const FPRegister& fm, - const FPRegister& fa) { - DCHECK(allow_macro_instructions_); +void MacroAssembler::Fnmadd(const VRegister& fd, const VRegister& fn, + const VRegister& fm, const VRegister& fa) { + DCHECK(allow_macro_instructions()); fnmadd(fd, fn, fm, fa); } - -void MacroAssembler::Fnmsub(const FPRegister& fd, - const FPRegister& fn, - const FPRegister& fm, - const FPRegister& fa) { - DCHECK(allow_macro_instructions_); +void MacroAssembler::Fnmsub(const VRegister& fd, const VRegister& fn, + const VRegister& fm, const VRegister& fa) { + DCHECK(allow_macro_instructions()); fnmsub(fd, fn, fm, fa); } - -void MacroAssembler::Frinta(const FPRegister& fd, const FPRegister& fn) { - DCHECK(allow_macro_instructions_); - frinta(fd, fn); -} - - -void MacroAssembler::Frintm(const FPRegister& fd, const FPRegister& fn) { - DCHECK(allow_macro_instructions_); - frintm(fd, fn); -} - - -void MacroAssembler::Frintn(const FPRegister& fd, const FPRegister& fn) { - DCHECK(allow_macro_instructions_); - frintn(fd, fn); -} - - -void MacroAssembler::Frintp(const FPRegister& fd, const FPRegister& fn) { - DCHECK(allow_macro_instructions_); - frintp(fd, fn); -} - - -void MacroAssembler::Frintz(const FPRegister& fd, const FPRegister& fn) { - DCHECK(allow_macro_instructions_); - frintz(fd, fn); -} - - -void MacroAssembler::Fsqrt(const FPRegister& fd, const FPRegister& fn) { - DCHECK(allow_macro_instructions_); - fsqrt(fd, fn); -} - - -void MacroAssembler::Fsub(const FPRegister& fd, - const FPRegister& fn, - const FPRegister& fm) { - DCHECK(allow_macro_instructions_); +void TurboAssembler::Fsub(const VRegister& fd, const VRegister& fn, + const VRegister& fm) { + DCHECK(allow_macro_instructions()); fsub(fd, fn, fm); } void MacroAssembler::Hint(SystemHint code) { - DCHECK(allow_macro_instructions_); + DCHECK(allow_macro_instructions()); hint(code); } void MacroAssembler::Hlt(int code) { - DCHECK(allow_macro_instructions_); + DCHECK(allow_macro_instructions()); hlt(code); } void MacroAssembler::Isb() { - DCHECK(allow_macro_instructions_); + DCHECK(allow_macro_instructions()); isb(); } - -void MacroAssembler::Ldr(const CPURegister& rt, const Immediate& imm) { - DCHECK(allow_macro_instructions_); - ldr(rt, imm); +void TurboAssembler::Ldr(const CPURegister& rt, const Operand& operand) { + DCHECK(allow_macro_instructions()); + ldr(rt, operand); } - -void MacroAssembler::Ldr(const CPURegister& rt, double imm) { - DCHECK(allow_macro_instructions_); +void TurboAssembler::Ldr(const CPURegister& rt, double imm) { + DCHECK(allow_macro_instructions()); DCHECK(rt.Is64Bits()); - ldr(rt, Immediate(double_to_rawbits(imm))); + ldr(rt, Immediate(bit_cast(imm))); } - -void MacroAssembler::Lsl(const Register& rd, - const Register& rn, +void TurboAssembler::Lsl(const Register& rd, const Register& rn, unsigned shift) { - DCHECK(allow_macro_instructions_); + DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); lsl(rd, rn, shift); } - -void MacroAssembler::Lsl(const Register& rd, - const Register& rn, +void TurboAssembler::Lsl(const Register& rd, const Register& rn, const Register& rm) { - DCHECK(allow_macro_instructions_); + DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); lslv(rd, rn, rm); } - -void MacroAssembler::Lsr(const Register& rd, - const Register& rn, +void TurboAssembler::Lsr(const Register& rd, const Register& rn, unsigned shift) { - DCHECK(allow_macro_instructions_); + DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); lsr(rd, rn, shift); } - -void MacroAssembler::Lsr(const Register& rd, - const Register& rn, +void TurboAssembler::Lsr(const Register& rd, const Register& rn, const Register& rm) { - DCHECK(allow_macro_instructions_); + DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); lsrv(rd, rn, rm); } - -void MacroAssembler::Madd(const Register& rd, - const Register& rn, - const Register& rm, - const Register& ra) { - DCHECK(allow_macro_instructions_); +void TurboAssembler::Madd(const Register& rd, const Register& rn, + const Register& rm, const Register& ra) { + DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); madd(rd, rn, rm, ra); } - -void MacroAssembler::Mneg(const Register& rd, - const Register& rn, +void TurboAssembler::Mneg(const Register& rd, const Register& rn, const Register& rm) { - DCHECK(allow_macro_instructions_); + DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); mneg(rd, rn, rm); } - -void MacroAssembler::Mov(const Register& rd, const Register& rn) { - DCHECK(allow_macro_instructions_); +void TurboAssembler::Mov(const Register& rd, const Register& rn) { + DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); // Emit a register move only if the registers are distinct, or if they are // not X registers. Note that mov(w0, w0) is not a no-op because it clears @@ -959,53 +809,45 @@ void MacroAssembler::Mov(const Register& rd, const Register& rn) { void MacroAssembler::Movk(const Register& rd, uint64_t imm, int shift) { - DCHECK(allow_macro_instructions_); + DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); movk(rd, imm, shift); } - -void MacroAssembler::Mrs(const Register& rt, SystemRegister sysreg) { - DCHECK(allow_macro_instructions_); +void TurboAssembler::Mrs(const Register& rt, SystemRegister sysreg) { + DCHECK(allow_macro_instructions()); DCHECK(!rt.IsZero()); mrs(rt, sysreg); } void MacroAssembler::Msr(SystemRegister sysreg, const Register& rt) { - DCHECK(allow_macro_instructions_); + DCHECK(allow_macro_instructions()); msr(sysreg, rt); } - -void MacroAssembler::Msub(const Register& rd, - const Register& rn, - const Register& rm, - const Register& ra) { - DCHECK(allow_macro_instructions_); +void TurboAssembler::Msub(const Register& rd, const Register& rn, + const Register& rm, const Register& ra) { + DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); msub(rd, rn, rm, ra); } - -void MacroAssembler::Mul(const Register& rd, - const Register& rn, +void TurboAssembler::Mul(const Register& rd, const Register& rn, const Register& rm) { - DCHECK(allow_macro_instructions_); + DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); mul(rd, rn, rm); } - -void MacroAssembler::Rbit(const Register& rd, const Register& rn) { - DCHECK(allow_macro_instructions_); +void TurboAssembler::Rbit(const Register& rd, const Register& rn) { + DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); rbit(rd, rn); } - -void MacroAssembler::Ret(const Register& xn) { - DCHECK(allow_macro_instructions_); +void TurboAssembler::Ret(const Register& xn) { + DCHECK(allow_macro_instructions()); DCHECK(!xn.IsZero()); ret(xn); CheckVeneerPool(false, false); @@ -1013,39 +855,33 @@ void MacroAssembler::Ret(const Register& xn) { void MacroAssembler::Rev(const Register& rd, const Register& rn) { - DCHECK(allow_macro_instructions_); + DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); rev(rd, rn); } - -void MacroAssembler::Rev16(const Register& rd, const Register& rn) { - DCHECK(allow_macro_instructions_); +void TurboAssembler::Rev16(const Register& rd, const Register& rn) { + DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); rev16(rd, rn); } - -void MacroAssembler::Rev32(const Register& rd, const Register& rn) { - DCHECK(allow_macro_instructions_); +void TurboAssembler::Rev32(const Register& rd, const Register& rn) { + DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); rev32(rd, rn); } - -void MacroAssembler::Ror(const Register& rd, - const Register& rs, +void TurboAssembler::Ror(const Register& rd, const Register& rs, unsigned shift) { - DCHECK(allow_macro_instructions_); + DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); ror(rd, rs, shift); } - -void MacroAssembler::Ror(const Register& rd, - const Register& rn, +void TurboAssembler::Ror(const Register& rd, const Register& rn, const Register& rm) { - DCHECK(allow_macro_instructions_); + DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); rorv(rd, rn, rm); } @@ -1055,34 +891,27 @@ void MacroAssembler::Sbfiz(const Register& rd, const Register& rn, unsigned lsb, unsigned width) { - DCHECK(allow_macro_instructions_); + DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); sbfiz(rd, rn, lsb, width); } - -void MacroAssembler::Sbfx(const Register& rd, - const Register& rn, - unsigned lsb, +void TurboAssembler::Sbfx(const Register& rd, const Register& rn, unsigned lsb, unsigned width) { - DCHECK(allow_macro_instructions_); + DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); sbfx(rd, rn, lsb, width); } - -void MacroAssembler::Scvtf(const FPRegister& fd, - const Register& rn, +void TurboAssembler::Scvtf(const VRegister& fd, const Register& rn, unsigned fbits) { - DCHECK(allow_macro_instructions_); + DCHECK(allow_macro_instructions()); scvtf(fd, rn, fbits); } - -void MacroAssembler::Sdiv(const Register& rd, - const Register& rn, +void TurboAssembler::Sdiv(const Register& rd, const Register& rn, const Register& rm) { - DCHECK(allow_macro_instructions_); + DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); sdiv(rd, rn, rm); } @@ -1092,7 +921,7 @@ void MacroAssembler::Smaddl(const Register& rd, const Register& rn, const Register& rm, const Register& ra) { - DCHECK(allow_macro_instructions_); + DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); smaddl(rd, rn, rm, ra); } @@ -1102,16 +931,14 @@ void MacroAssembler::Smsubl(const Register& rd, const Register& rn, const Register& rm, const Register& ra) { - DCHECK(allow_macro_instructions_); + DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); smsubl(rd, rn, rm, ra); } - -void MacroAssembler::Smull(const Register& rd, - const Register& rn, +void TurboAssembler::Smull(const Register& rd, const Register& rn, const Register& rm) { - DCHECK(allow_macro_instructions_); + DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); smull(rd, rn, rm); } @@ -1120,73 +947,59 @@ void MacroAssembler::Smull(const Register& rd, void MacroAssembler::Smulh(const Register& rd, const Register& rn, const Register& rm) { - DCHECK(allow_macro_instructions_); + DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); smulh(rd, rn, rm); } - -void MacroAssembler::Umull(const Register& rd, const Register& rn, +void TurboAssembler::Umull(const Register& rd, const Register& rn, const Register& rm) { - DCHECK(allow_macro_instructions_); + DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); umaddl(rd, rn, rm, xzr); } - -void MacroAssembler::Sxtb(const Register& rd, const Register& rn) { - DCHECK(allow_macro_instructions_); +void TurboAssembler::Sxtb(const Register& rd, const Register& rn) { + DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); sxtb(rd, rn); } - -void MacroAssembler::Sxth(const Register& rd, const Register& rn) { - DCHECK(allow_macro_instructions_); +void TurboAssembler::Sxth(const Register& rd, const Register& rn) { + DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); sxth(rd, rn); } - -void MacroAssembler::Sxtw(const Register& rd, const Register& rn) { - DCHECK(allow_macro_instructions_); +void TurboAssembler::Sxtw(const Register& rd, const Register& rn) { + DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); sxtw(rd, rn); } - -void MacroAssembler::Ubfiz(const Register& rd, - const Register& rn, - unsigned lsb, +void TurboAssembler::Ubfiz(const Register& rd, const Register& rn, unsigned lsb, unsigned width) { - DCHECK(allow_macro_instructions_); + DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); ubfiz(rd, rn, lsb, width); } - -void MacroAssembler::Ubfx(const Register& rd, - const Register& rn, - unsigned lsb, +void TurboAssembler::Ubfx(const Register& rd, const Register& rn, unsigned lsb, unsigned width) { - DCHECK(allow_macro_instructions_); + DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); ubfx(rd, rn, lsb, width); } - -void MacroAssembler::Ucvtf(const FPRegister& fd, - const Register& rn, +void TurboAssembler::Ucvtf(const VRegister& fd, const Register& rn, unsigned fbits) { - DCHECK(allow_macro_instructions_); + DCHECK(allow_macro_instructions()); ucvtf(fd, rn, fbits); } - -void MacroAssembler::Udiv(const Register& rd, - const Register& rn, +void TurboAssembler::Udiv(const Register& rd, const Register& rn, const Register& rm) { - DCHECK(allow_macro_instructions_); + DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); udiv(rd, rn, rm); } @@ -1196,7 +1009,7 @@ void MacroAssembler::Umaddl(const Register& rd, const Register& rn, const Register& rm, const Register& ra) { - DCHECK(allow_macro_instructions_); + DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); umaddl(rd, rn, rm, ra); } @@ -1206,28 +1019,25 @@ void MacroAssembler::Umsubl(const Register& rd, const Register& rn, const Register& rm, const Register& ra) { - DCHECK(allow_macro_instructions_); + DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); umsubl(rd, rn, rm, ra); } - -void MacroAssembler::Uxtb(const Register& rd, const Register& rn) { - DCHECK(allow_macro_instructions_); +void TurboAssembler::Uxtb(const Register& rd, const Register& rn) { + DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); uxtb(rd, rn); } - -void MacroAssembler::Uxth(const Register& rd, const Register& rn) { - DCHECK(allow_macro_instructions_); +void TurboAssembler::Uxth(const Register& rd, const Register& rn) { + DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); uxth(rd, rn); } - -void MacroAssembler::Uxtw(const Register& rd, const Register& rn) { - DCHECK(allow_macro_instructions_); +void TurboAssembler::Uxtw(const Register& rd, const Register& rn) { + DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); uxtw(rd, rn); } @@ -1236,13 +1046,13 @@ void MacroAssembler::AlignAndSetCSPForFrame() { int sp_alignment = ActivationFrameAlignment(); // AAPCS64 mandates at least 16-byte alignment. DCHECK(sp_alignment >= 16); - DCHECK(base::bits::IsPowerOfTwo32(sp_alignment)); + DCHECK(base::bits::IsPowerOfTwo(sp_alignment)); Bic(csp, StackPointer(), sp_alignment - 1); SetStackPointer(csp); } -void MacroAssembler::BumpSystemStackPointer(const Operand& space) { - DCHECK(!csp.Is(sp_)); +void TurboAssembler::BumpSystemStackPointer(const Operand& space) { + DCHECK(!csp.Is(StackPointer())); if (!TmpList()->IsEmpty()) { Sub(csp, StackPointer(), space); } else { @@ -1276,18 +1086,16 @@ void MacroAssembler::BumpSystemStackPointer(const Operand& space) { AssertStackConsistency(); } - -void MacroAssembler::SyncSystemStackPointer() { +void TurboAssembler::SyncSystemStackPointer() { DCHECK(emit_debug_code()); - DCHECK(!csp.Is(sp_)); + DCHECK(!csp.Is(StackPointer())); { InstructionAccurateScope scope(this); mov(csp, StackPointer()); } AssertStackConsistency(); } - -void MacroAssembler::InitializeRootRegister() { +void TurboAssembler::InitializeRootRegister() { ExternalReference roots_array_start = ExternalReference::roots_array_start(isolate()); Mov(root, Operand(roots_array_start)); @@ -1304,8 +1112,7 @@ void MacroAssembler::SmiTag(Register dst, Register src) { void MacroAssembler::SmiTag(Register smi) { SmiTag(smi, smi); } - -void MacroAssembler::SmiUntag(Register dst, Register src) { +void TurboAssembler::SmiUntag(Register dst, Register src) { STATIC_ASSERT(kXRegSizeInBits == static_cast(kSmiShift + kSmiValueSize)); DCHECK(dst.Is64Bits() && src.Is64Bits()); @@ -1315,12 +1122,9 @@ void MacroAssembler::SmiUntag(Register dst, Register src) { Asr(dst, src, kSmiShift); } +void TurboAssembler::SmiUntag(Register smi) { SmiUntag(smi, smi); } -void MacroAssembler::SmiUntag(Register smi) { SmiUntag(smi, smi); } - - -void MacroAssembler::SmiUntagToDouble(FPRegister dst, - Register src, +void MacroAssembler::SmiUntagToDouble(VRegister dst, Register src, UntagMode mode) { DCHECK(dst.Is64Bits() && src.Is64Bits()); if (FLAG_enable_slow_asserts && (mode == kNotSpeculativeUntag)) { @@ -1329,9 +1133,7 @@ void MacroAssembler::SmiUntagToDouble(FPRegister dst, Scvtf(dst, src, kSmiShift); } - -void MacroAssembler::SmiUntagToFloat(FPRegister dst, - Register src, +void MacroAssembler::SmiUntagToFloat(VRegister dst, Register src, UntagMode mode) { DCHECK(dst.Is32Bits() && src.Is64Bits()); if (FLAG_enable_slow_asserts && (mode == kNotSpeculativeUntag)) { @@ -1356,9 +1158,7 @@ void MacroAssembler::SmiTagAndPush(Register src1, Register src2) { Push(src1.W(), wzr, src2.W(), wzr); } - -void MacroAssembler::JumpIfSmi(Register value, - Label* smi_label, +void TurboAssembler::JumpIfSmi(Register value, Label* smi_label, Label* not_smi_label) { STATIC_ASSERT((kSmiTagSize == 1) && (kSmiTag == 0)); // Check if the tag bit is set. @@ -1442,7 +1242,7 @@ void MacroAssembler::ObjectUntag(Register untagged_obj, Register obj) { Bic(untagged_obj, obj, kHeapObjectTag); } -void MacroAssembler::jmp(Label* L) { B(L); } +void TurboAssembler::jmp(Label* L) { B(L); } void MacroAssembler::IsObjectJSStringType(Register object, Register type, @@ -1463,17 +1263,29 @@ void MacroAssembler::IsObjectJSStringType(Register object, } } - -void MacroAssembler::Push(Handle handle) { +void TurboAssembler::Push(Handle handle) { UseScratchRegisterScope temps(this); Register tmp = temps.AcquireX(); Mov(tmp, Operand(handle)); Push(tmp); } -void MacroAssembler::Push(Smi* smi) { Push(Handle(smi, isolate())); } +void TurboAssembler::Push(Smi* smi) { + UseScratchRegisterScope temps(this); + Register tmp = temps.AcquireX(); + Mov(tmp, Operand(smi)); + Push(tmp); +} + +void MacroAssembler::PushObject(Handle handle) { + if (handle->IsHeapObject()) { + Push(Handle::cast(handle)); + } else { + Push(Smi::cast(*handle)); + } +} -void MacroAssembler::Claim(int64_t count, uint64_t unit_size) { +void TurboAssembler::Claim(int64_t count, uint64_t unit_size) { DCHECK(count >= 0); uint64_t size = count * unit_size; @@ -1490,10 +1302,9 @@ void MacroAssembler::Claim(int64_t count, uint64_t unit_size) { Sub(StackPointer(), StackPointer(), size); } - -void MacroAssembler::Claim(const Register& count, uint64_t unit_size) { +void TurboAssembler::Claim(const Register& count, uint64_t unit_size) { if (unit_size == 0) return; - DCHECK(base::bits::IsPowerOfTwo64(unit_size)); + DCHECK(base::bits::IsPowerOfTwo(unit_size)); const int shift = CountTrailingZeros(unit_size, kXRegSizeInBits); const Operand size(count, LSL, shift); @@ -1512,7 +1323,7 @@ void MacroAssembler::Claim(const Register& count, uint64_t unit_size) { void MacroAssembler::ClaimBySMI(const Register& count_smi, uint64_t unit_size) { - DCHECK(unit_size == 0 || base::bits::IsPowerOfTwo64(unit_size)); + DCHECK(unit_size == 0 || base::bits::IsPowerOfTwo(unit_size)); const int shift = CountTrailingZeros(unit_size, kXRegSizeInBits) - kSmiShift; const Operand size(count_smi, (shift >= 0) ? (LSL) : (LSR), @@ -1529,8 +1340,7 @@ void MacroAssembler::ClaimBySMI(const Register& count_smi, uint64_t unit_size) { Sub(StackPointer(), StackPointer(), size); } - -void MacroAssembler::Drop(int64_t count, uint64_t unit_size) { +void TurboAssembler::Drop(int64_t count, uint64_t unit_size) { DCHECK(count >= 0); uint64_t size = count * unit_size; @@ -1550,10 +1360,9 @@ void MacroAssembler::Drop(int64_t count, uint64_t unit_size) { } } - -void MacroAssembler::Drop(const Register& count, uint64_t unit_size) { +void TurboAssembler::Drop(const Register& count, uint64_t unit_size) { if (unit_size == 0) return; - DCHECK(base::bits::IsPowerOfTwo64(unit_size)); + DCHECK(base::bits::IsPowerOfTwo(unit_size)); const int shift = CountTrailingZeros(unit_size, kXRegSizeInBits); const Operand size(count, LSL, shift); @@ -1575,7 +1384,7 @@ void MacroAssembler::Drop(const Register& count, uint64_t unit_size) { void MacroAssembler::DropBySMI(const Register& count_smi, uint64_t unit_size) { - DCHECK(unit_size == 0 || base::bits::IsPowerOfTwo64(unit_size)); + DCHECK(unit_size == 0 || base::bits::IsPowerOfTwo(unit_size)); const int shift = CountTrailingZeros(unit_size, kXRegSizeInBits) - kSmiShift; const Operand size(count_smi, (shift >= 0) ? (LSL) : (LSR), @@ -1613,8 +1422,7 @@ void MacroAssembler::CompareAndBranch(const Register& lhs, } } - -void MacroAssembler::TestAndBranchIfAnySet(const Register& reg, +void TurboAssembler::TestAndBranchIfAnySet(const Register& reg, const uint64_t bit_pattern, Label* label) { int bits = reg.SizeInBits(); @@ -1627,8 +1435,7 @@ void MacroAssembler::TestAndBranchIfAnySet(const Register& reg, } } - -void MacroAssembler::TestAndBranchIfAllClear(const Register& reg, +void TurboAssembler::TestAndBranchIfAllClear(const Register& reg, const uint64_t bit_pattern, Label* label) { int bits = reg.SizeInBits(); diff --git a/deps/v8/src/arm64/macro-assembler-arm64.cc b/deps/v8/src/arm64/macro-assembler-arm64.cc index 2282c941baf0ad..acecfb950c7930 100644 --- a/deps/v8/src/arm64/macro-assembler-arm64.cc +++ b/deps/v8/src/arm64/macro-assembler-arm64.cc @@ -27,38 +27,16 @@ namespace internal { MacroAssembler::MacroAssembler(Isolate* isolate, byte* buffer, unsigned buffer_size, CodeObjectRequired create_code_object) - : Assembler(isolate, buffer, buffer_size), - generating_stub_(false), -#if DEBUG - allow_macro_instructions_(true), -#endif - has_frame_(false), - isolate_(isolate), - use_real_aborts_(true), - sp_(jssp), - tmp_list_(DefaultTmpList()), - fptmp_list_(DefaultFPTmpList()) { - if (create_code_object == CodeObjectRequired::kYes) { - code_object_ = - Handle::New(isolate_->heap()->undefined_value(), isolate_); - } -} - - -CPURegList MacroAssembler::DefaultTmpList() { - return CPURegList(ip0, ip1); -} + : TurboAssembler(isolate, buffer, buffer_size, create_code_object) {} +CPURegList TurboAssembler::DefaultTmpList() { return CPURegList(ip0, ip1); } -CPURegList MacroAssembler::DefaultFPTmpList() { +CPURegList TurboAssembler::DefaultFPTmpList() { return CPURegList(fp_scratch1, fp_scratch2); } - -void MacroAssembler::LogicalMacro(const Register& rd, - const Register& rn, - const Operand& operand, - LogicalOp op) { +void TurboAssembler::LogicalMacro(const Register& rd, const Register& rn, + const Operand& operand, LogicalOp op) { UseScratchRegisterScope temps(this); if (operand.NeedsRelocation(this)) { @@ -165,9 +143,8 @@ void MacroAssembler::LogicalMacro(const Register& rd, } } - -void MacroAssembler::Mov(const Register& rd, uint64_t imm) { - DCHECK(allow_macro_instructions_); +void TurboAssembler::Mov(const Register& rd, uint64_t imm) { + DCHECK(allow_macro_instructions()); DCHECK(is_uint32(imm) || is_int32(imm) || rd.Is64Bits()); DCHECK(!rd.IsZero()); @@ -244,11 +221,9 @@ void MacroAssembler::Mov(const Register& rd, uint64_t imm) { } } - -void MacroAssembler::Mov(const Register& rd, - const Operand& operand, +void TurboAssembler::Mov(const Register& rd, const Operand& operand, DiscardMoveMode discard_mode) { - DCHECK(allow_macro_instructions_); + DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); // Provide a swap register for instructions that need to write into the @@ -257,7 +232,7 @@ void MacroAssembler::Mov(const Register& rd, Register dst = (rd.IsSP()) ? temps.AcquireSameSizeAs(rd) : rd; if (operand.NeedsRelocation(this)) { - Ldr(dst, operand.immediate()); + Ldr(dst, operand); } else if (operand.IsImmediate()) { // Call the macro assembler for generic immediates. @@ -300,9 +275,174 @@ void MacroAssembler::Mov(const Register& rd, } } +void TurboAssembler::Movi16bitHelper(const VRegister& vd, uint64_t imm) { + DCHECK(is_uint16(imm)); + int byte1 = (imm & 0xff); + int byte2 = ((imm >> 8) & 0xff); + if (byte1 == byte2) { + movi(vd.Is64Bits() ? vd.V8B() : vd.V16B(), byte1); + } else if (byte1 == 0) { + movi(vd, byte2, LSL, 8); + } else if (byte2 == 0) { + movi(vd, byte1); + } else if (byte1 == 0xff) { + mvni(vd, ~byte2 & 0xff, LSL, 8); + } else if (byte2 == 0xff) { + mvni(vd, ~byte1 & 0xff); + } else { + UseScratchRegisterScope temps(this); + Register temp = temps.AcquireW(); + movz(temp, imm); + dup(vd, temp); + } +} + +void TurboAssembler::Movi32bitHelper(const VRegister& vd, uint64_t imm) { + DCHECK(is_uint32(imm)); -void MacroAssembler::Mvn(const Register& rd, const Operand& operand) { - DCHECK(allow_macro_instructions_); + uint8_t bytes[sizeof(imm)]; + memcpy(bytes, &imm, sizeof(imm)); + + // All bytes are either 0x00 or 0xff. + { + bool all0orff = true; + for (int i = 0; i < 4; ++i) { + if ((bytes[i] != 0) && (bytes[i] != 0xff)) { + all0orff = false; + break; + } + } + + if (all0orff == true) { + movi(vd.Is64Bits() ? vd.V1D() : vd.V2D(), ((imm << 32) | imm)); + return; + } + } + + // Of the 4 bytes, only one byte is non-zero. + for (int i = 0; i < 4; i++) { + if ((imm & (0xff << (i * 8))) == imm) { + movi(vd, bytes[i], LSL, i * 8); + return; + } + } + + // Of the 4 bytes, only one byte is not 0xff. + for (int i = 0; i < 4; i++) { + uint32_t mask = ~(0xff << (i * 8)); + if ((imm & mask) == mask) { + mvni(vd, ~bytes[i] & 0xff, LSL, i * 8); + return; + } + } + + // Immediate is of the form 0x00MMFFFF. + if ((imm & 0xff00ffff) == 0x0000ffff) { + movi(vd, bytes[2], MSL, 16); + return; + } + + // Immediate is of the form 0x0000MMFF. + if ((imm & 0xffff00ff) == 0x000000ff) { + movi(vd, bytes[1], MSL, 8); + return; + } + + // Immediate is of the form 0xFFMM0000. + if ((imm & 0xff00ffff) == 0xff000000) { + mvni(vd, ~bytes[2] & 0xff, MSL, 16); + return; + } + // Immediate is of the form 0xFFFFMM00. + if ((imm & 0xffff00ff) == 0xffff0000) { + mvni(vd, ~bytes[1] & 0xff, MSL, 8); + return; + } + + // Top and bottom 16-bits are equal. + if (((imm >> 16) & 0xffff) == (imm & 0xffff)) { + Movi16bitHelper(vd.Is64Bits() ? vd.V4H() : vd.V8H(), imm & 0xffff); + return; + } + + // Default case. + { + UseScratchRegisterScope temps(this); + Register temp = temps.AcquireW(); + Mov(temp, imm); + dup(vd, temp); + } +} + +void TurboAssembler::Movi64bitHelper(const VRegister& vd, uint64_t imm) { + // All bytes are either 0x00 or 0xff. + { + bool all0orff = true; + for (int i = 0; i < 8; ++i) { + int byteval = (imm >> (i * 8)) & 0xff; + if (byteval != 0 && byteval != 0xff) { + all0orff = false; + break; + } + } + if (all0orff == true) { + movi(vd, imm); + return; + } + } + + // Top and bottom 32-bits are equal. + if (((imm >> 32) & 0xffffffff) == (imm & 0xffffffff)) { + Movi32bitHelper(vd.Is64Bits() ? vd.V2S() : vd.V4S(), imm & 0xffffffff); + return; + } + + // Default case. + { + UseScratchRegisterScope temps(this); + Register temp = temps.AcquireX(); + Mov(temp, imm); + if (vd.Is1D()) { + mov(vd.D(), 0, temp); + } else { + dup(vd.V2D(), temp); + } + } +} + +void TurboAssembler::Movi(const VRegister& vd, uint64_t imm, Shift shift, + int shift_amount) { + DCHECK(allow_macro_instructions()); + if (shift_amount != 0 || shift != LSL) { + movi(vd, imm, shift, shift_amount); + } else if (vd.Is8B() || vd.Is16B()) { + // 8-bit immediate. + DCHECK(is_uint8(imm)); + movi(vd, imm); + } else if (vd.Is4H() || vd.Is8H()) { + // 16-bit immediate. + Movi16bitHelper(vd, imm); + } else if (vd.Is2S() || vd.Is4S()) { + // 32-bit immediate. + Movi32bitHelper(vd, imm); + } else { + // 64-bit immediate. + Movi64bitHelper(vd, imm); + } +} + +void TurboAssembler::Movi(const VRegister& vd, uint64_t hi, uint64_t lo) { + // TODO(all): Move 128-bit values in a more efficient way. + DCHECK(vd.Is128Bits()); + UseScratchRegisterScope temps(this); + Movi(vd.V2D(), lo); + Register temp = temps.AcquireX(); + Mov(temp, hi); + Ins(vd.V2D(), 1, temp); +} + +void TurboAssembler::Mvn(const Register& rd, const Operand& operand) { + DCHECK(allow_macro_instructions()); if (operand.NeedsRelocation(this)) { Ldr(rd, operand.immediate()); @@ -324,8 +464,7 @@ void MacroAssembler::Mvn(const Register& rd, const Operand& operand) { } } - -unsigned MacroAssembler::CountClearHalfWords(uint64_t imm, unsigned reg_size) { +unsigned TurboAssembler::CountClearHalfWords(uint64_t imm, unsigned reg_size) { DCHECK((reg_size % 8) == 0); int count = 0; for (unsigned i = 0; i < (reg_size / 16); i++) { @@ -340,7 +479,7 @@ unsigned MacroAssembler::CountClearHalfWords(uint64_t imm, unsigned reg_size) { // The movz instruction can generate immediates containing an arbitrary 16-bit // half-word, with remaining bits clear, eg. 0x00001234, 0x0000123400000000. -bool MacroAssembler::IsImmMovz(uint64_t imm, unsigned reg_size) { +bool TurboAssembler::IsImmMovz(uint64_t imm, unsigned reg_size) { DCHECK((reg_size == kXRegSizeInBits) || (reg_size == kWRegSizeInBits)); return CountClearHalfWords(imm, reg_size) >= ((reg_size / 16) - 1); } @@ -348,15 +487,13 @@ bool MacroAssembler::IsImmMovz(uint64_t imm, unsigned reg_size) { // The movn instruction can generate immediates containing an arbitrary 16-bit // half-word, with remaining bits set, eg. 0xffff1234, 0xffff1234ffffffff. -bool MacroAssembler::IsImmMovn(uint64_t imm, unsigned reg_size) { +bool TurboAssembler::IsImmMovn(uint64_t imm, unsigned reg_size) { return IsImmMovz(~imm, reg_size); } - -void MacroAssembler::ConditionalCompareMacro(const Register& rn, +void TurboAssembler::ConditionalCompareMacro(const Register& rn, const Operand& operand, - StatusFlags nzcv, - Condition cond, + StatusFlags nzcv, Condition cond, ConditionalCompareOp op) { DCHECK((cond != al) && (cond != nv)); if (operand.NeedsRelocation(this)) { @@ -387,7 +524,7 @@ void MacroAssembler::Csel(const Register& rd, const Register& rn, const Operand& operand, Condition cond) { - DCHECK(allow_macro_instructions_); + DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); DCHECK((cond != al) && (cond != nv)); if (operand.IsImmediate()) { @@ -419,8 +556,7 @@ void MacroAssembler::Csel(const Register& rd, } } - -bool MacroAssembler::TryOneInstrMoveImmediate(const Register& dst, +bool TurboAssembler::TryOneInstrMoveImmediate(const Register& dst, int64_t imm) { unsigned n, imm_s, imm_r; int reg_size = dst.SizeInBits(); @@ -442,7 +578,7 @@ bool MacroAssembler::TryOneInstrMoveImmediate(const Register& dst, return false; } -Operand MacroAssembler::MoveImmediateForShiftedOp(const Register& dst, +Operand TurboAssembler::MoveImmediateForShiftedOp(const Register& dst, int64_t imm, PreShiftImmMode mode) { int reg_size = dst.SizeInBits(); @@ -485,11 +621,8 @@ Operand MacroAssembler::MoveImmediateForShiftedOp(const Register& dst, return Operand(dst); } - -void MacroAssembler::AddSubMacro(const Register& rd, - const Register& rn, - const Operand& operand, - FlagsUpdate S, +void TurboAssembler::AddSubMacro(const Register& rd, const Register& rn, + const Operand& operand, FlagsUpdate S, AddSubOp op) { if (operand.IsZero() && rd.Is(rn) && rd.Is64Bits() && rn.Is64Bits() && !operand.NeedsRelocation(this) && (S == LeaveFlags)) { @@ -534,11 +667,9 @@ void MacroAssembler::AddSubMacro(const Register& rd, } } - -void MacroAssembler::AddSubWithCarryMacro(const Register& rd, +void TurboAssembler::AddSubWithCarryMacro(const Register& rd, const Register& rn, - const Operand& operand, - FlagsUpdate S, + const Operand& operand, FlagsUpdate S, AddSubWithCarryOp op) { DCHECK(rd.SizeInBits() == rn.SizeInBits()); UseScratchRegisterScope temps(this); @@ -585,12 +716,10 @@ void MacroAssembler::AddSubWithCarryMacro(const Register& rd, } } - -void MacroAssembler::LoadStoreMacro(const CPURegister& rt, - const MemOperand& addr, - LoadStoreOp op) { +void TurboAssembler::LoadStoreMacro(const CPURegister& rt, + const MemOperand& addr, LoadStoreOp op) { int64_t offset = addr.offset(); - LSDataSize size = CalcLSDataSize(op); + unsigned size = CalcLSDataSize(op); // Check if an immediate offset fits in the immediate field of the // appropriate instruction. If not, emit two instructions to perform @@ -617,7 +746,7 @@ void MacroAssembler::LoadStoreMacro(const CPURegister& rt, } } -void MacroAssembler::LoadStorePairMacro(const CPURegister& rt, +void TurboAssembler::LoadStorePairMacro(const CPURegister& rt, const CPURegister& rt2, const MemOperand& addr, LoadStorePairOp op) { @@ -625,7 +754,7 @@ void MacroAssembler::LoadStorePairMacro(const CPURegister& rt, DCHECK(!addr.IsRegisterOffset()); int64_t offset = addr.offset(); - LSDataSize size = CalcLSPairDataSize(op); + unsigned size = CalcLSPairDataSize(op); // Check if the offset fits in the immediate field of the appropriate // instruction. If not, emit two instructions to perform the operation. @@ -695,9 +824,8 @@ void MacroAssembler::Store(const Register& rt, } } - -bool MacroAssembler::NeedExtraInstructionsOrRegisterBranch( - Label *label, ImmBranchType b_type) { +bool TurboAssembler::NeedExtraInstructionsOrRegisterBranch( + Label* label, ImmBranchType b_type) { bool need_longer_range = false; // There are two situations in which we care about the offset being out of // range: @@ -721,9 +849,8 @@ bool MacroAssembler::NeedExtraInstructionsOrRegisterBranch( return need_longer_range; } - -void MacroAssembler::Adr(const Register& rd, Label* label, AdrHint hint) { - DCHECK(allow_macro_instructions_); +void TurboAssembler::Adr(const Register& rd, Label* label, AdrHint hint) { + DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); if (hint == kAdrNear) { @@ -756,8 +883,7 @@ void MacroAssembler::Adr(const Register& rd, Label* label, AdrHint hint) { } } - -void MacroAssembler::B(Label* label, BranchType type, Register reg, int bit) { +void TurboAssembler::B(Label* label, BranchType type, Register reg, int bit) { DCHECK((reg.Is(NoReg) || type >= kBranchTypeFirstUsingReg) && (bit == -1 || type >= kBranchTypeFirstUsingBit)); if (kBranchTypeFirstCondition <= type && type <= kBranchTypeLastCondition) { @@ -776,9 +902,8 @@ void MacroAssembler::B(Label* label, BranchType type, Register reg, int bit) { } } - -void MacroAssembler::B(Label* label, Condition cond) { - DCHECK(allow_macro_instructions_); +void TurboAssembler::B(Label* label, Condition cond) { + DCHECK(allow_macro_instructions()); DCHECK((cond != al) && (cond != nv)); Label done; @@ -794,9 +919,8 @@ void MacroAssembler::B(Label* label, Condition cond) { bind(&done); } - -void MacroAssembler::Tbnz(const Register& rt, unsigned bit_pos, Label* label) { - DCHECK(allow_macro_instructions_); +void TurboAssembler::Tbnz(const Register& rt, unsigned bit_pos, Label* label) { + DCHECK(allow_macro_instructions()); Label done; bool need_extra_instructions = @@ -811,9 +935,8 @@ void MacroAssembler::Tbnz(const Register& rt, unsigned bit_pos, Label* label) { bind(&done); } - -void MacroAssembler::Tbz(const Register& rt, unsigned bit_pos, Label* label) { - DCHECK(allow_macro_instructions_); +void TurboAssembler::Tbz(const Register& rt, unsigned bit_pos, Label* label) { + DCHECK(allow_macro_instructions()); Label done; bool need_extra_instructions = @@ -828,9 +951,8 @@ void MacroAssembler::Tbz(const Register& rt, unsigned bit_pos, Label* label) { bind(&done); } - -void MacroAssembler::Cbnz(const Register& rt, Label* label) { - DCHECK(allow_macro_instructions_); +void TurboAssembler::Cbnz(const Register& rt, Label* label) { + DCHECK(allow_macro_instructions()); Label done; bool need_extra_instructions = @@ -845,9 +967,8 @@ void MacroAssembler::Cbnz(const Register& rt, Label* label) { bind(&done); } - -void MacroAssembler::Cbz(const Register& rt, Label* label) { - DCHECK(allow_macro_instructions_); +void TurboAssembler::Cbz(const Register& rt, Label* label) { + DCHECK(allow_macro_instructions()); Label done; bool need_extra_instructions = @@ -865,11 +986,9 @@ void MacroAssembler::Cbz(const Register& rt, Label* label) { // Pseudo-instructions. - -void MacroAssembler::Abs(const Register& rd, const Register& rm, - Label* is_not_representable, - Label* is_representable) { - DCHECK(allow_macro_instructions_); +void TurboAssembler::Abs(const Register& rd, const Register& rm, + Label* is_not_representable, Label* is_representable) { + DCHECK(allow_macro_instructions()); DCHECK(AreSameSizeAndType(rd, rm)); Cmp(rm, 1); @@ -891,8 +1010,7 @@ void MacroAssembler::Abs(const Register& rd, const Register& rm, // Abstracted stack operations. - -void MacroAssembler::Push(const CPURegister& src0, const CPURegister& src1, +void TurboAssembler::Push(const CPURegister& src0, const CPURegister& src1, const CPURegister& src2, const CPURegister& src3) { DCHECK(AreSameSizeAndType(src0, src1, src2, src3)); @@ -903,8 +1021,7 @@ void MacroAssembler::Push(const CPURegister& src0, const CPURegister& src1, PushHelper(count, size, src0, src1, src2, src3); } - -void MacroAssembler::Push(const CPURegister& src0, const CPURegister& src1, +void TurboAssembler::Push(const CPURegister& src0, const CPURegister& src1, const CPURegister& src2, const CPURegister& src3, const CPURegister& src4, const CPURegister& src5, const CPURegister& src6, const CPURegister& src7) { @@ -918,8 +1035,7 @@ void MacroAssembler::Push(const CPURegister& src0, const CPURegister& src1, PushHelper(count - 4, size, src4, src5, src6, src7); } - -void MacroAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1, +void TurboAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1, const CPURegister& dst2, const CPURegister& dst3) { // It is not valid to pop into the same register more than once in one // instruction, not even into the zero register. @@ -934,8 +1050,7 @@ void MacroAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1, PopPostamble(count, size); } - -void MacroAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1, +void TurboAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1, const CPURegister& dst2, const CPURegister& dst3, const CPURegister& dst4, const CPURegister& dst5, const CPURegister& dst6, const CPURegister& dst7) { @@ -953,8 +1068,7 @@ void MacroAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1, PopPostamble(count, size); } - -void MacroAssembler::Push(const Register& src0, const FPRegister& src1) { +void TurboAssembler::Push(const Register& src0, const VRegister& src1) { int size = src0.SizeInBytes() + src1.SizeInBytes(); PushPreamble(size); @@ -1016,8 +1130,7 @@ void MacroAssembler::PushPopQueue::PopQueued() { queued_.clear(); } - -void MacroAssembler::PushCPURegList(CPURegList registers) { +void TurboAssembler::PushCPURegList(CPURegList registers) { int size = registers.RegisterSizeInBytes(); PushPreamble(registers.Count(), size); @@ -1035,8 +1148,7 @@ void MacroAssembler::PushCPURegList(CPURegList registers) { } } - -void MacroAssembler::PopCPURegList(CPURegList registers) { +void TurboAssembler::PopCPURegList(CPURegList registers) { int size = registers.RegisterSizeInBytes(); // Pop up to four registers at a time because if the current stack pointer is @@ -1138,9 +1250,7 @@ void MacroAssembler::PushMultipleTimes(CPURegister src, Register count) { } } - -void MacroAssembler::PushHelper(int count, int size, - const CPURegister& src0, +void TurboAssembler::PushHelper(int count, int size, const CPURegister& src0, const CPURegister& src1, const CPURegister& src2, const CPURegister& src3) { @@ -1178,11 +1288,8 @@ void MacroAssembler::PushHelper(int count, int size, } } - -void MacroAssembler::PopHelper(int count, int size, - const CPURegister& dst0, - const CPURegister& dst1, - const CPURegister& dst2, +void TurboAssembler::PopHelper(int count, int size, const CPURegister& dst0, + const CPURegister& dst1, const CPURegister& dst2, const CPURegister& dst3) { // Ensure that we don't unintentially modify scratch or debug registers. InstructionAccurateScope scope(this); @@ -1219,8 +1326,7 @@ void MacroAssembler::PopHelper(int count, int size, } } - -void MacroAssembler::PushPreamble(Operand total_size) { +void TurboAssembler::PushPreamble(Operand total_size) { if (csp.Is(StackPointer())) { // If the current stack pointer is csp, then it must be aligned to 16 bytes // on entry and the total size of the specified registers must also be a @@ -1239,8 +1345,7 @@ void MacroAssembler::PushPreamble(Operand total_size) { } } - -void MacroAssembler::PopPostamble(Operand total_size) { +void TurboAssembler::PopPostamble(Operand total_size) { if (csp.Is(StackPointer())) { // If the current stack pointer is csp, then it must be aligned to 16 bytes // on entry and the total size of the specified registers must also be a @@ -1259,14 +1364,14 @@ void MacroAssembler::PopPostamble(Operand total_size) { } } -void MacroAssembler::PushPreamble(int count, int size) { +void TurboAssembler::PushPreamble(int count, int size) { PushPreamble(count * size); } -void MacroAssembler::PopPostamble(int count, int size) { +void TurboAssembler::PopPostamble(int count, int size) { PopPostamble(count * size); } -void MacroAssembler::Poke(const CPURegister& src, const Operand& offset) { +void TurboAssembler::Poke(const CPURegister& src, const Operand& offset) { if (offset.IsImmediate()) { DCHECK(offset.ImmediateValue() >= 0); } else if (emit_debug_code()) { @@ -1289,9 +1394,7 @@ void MacroAssembler::Peek(const CPURegister& dst, const Operand& offset) { Ldr(dst, MemOperand(StackPointer(), offset)); } - -void MacroAssembler::PokePair(const CPURegister& src1, - const CPURegister& src2, +void TurboAssembler::PokePair(const CPURegister& src1, const CPURegister& src2, int offset) { DCHECK(AreSameSizeAndType(src1, src2)); DCHECK((offset >= 0) && ((offset % src1.SizeInBytes()) == 0)); @@ -1355,8 +1458,7 @@ void MacroAssembler::PopCalleeSavedRegisters() { ldp(d14, d15, tos); } - -void MacroAssembler::AssertStackConsistency() { +void TurboAssembler::AssertStackConsistency() { // Avoid emitting code when !use_real_abort() since non-real aborts cause too // much code to be generated. if (emit_debug_code() && use_real_aborts()) { @@ -1388,7 +1490,7 @@ void MacroAssembler::AssertStackConsistency() { } } -void MacroAssembler::AssertCspAligned() { +void TurboAssembler::AssertCspAligned() { if (emit_debug_code() && use_real_aborts()) { // TODO(titzer): use a real assert for alignment check? UseScratchRegisterScope scope(this); @@ -1397,7 +1499,7 @@ void MacroAssembler::AssertCspAligned() { } } -void MacroAssembler::AssertFPCRState(Register fpcr) { +void TurboAssembler::AssertFPCRState(Register fpcr) { if (emit_debug_code()) { Label unexpected_mode, done; UseScratchRegisterScope temps(this); @@ -1421,9 +1523,8 @@ void MacroAssembler::AssertFPCRState(Register fpcr) { } } - -void MacroAssembler::CanonicalizeNaN(const FPRegister& dst, - const FPRegister& src) { +void TurboAssembler::CanonicalizeNaN(const VRegister& dst, + const VRegister& src) { AssertFPCRState(); // Subtracting 0.0 preserves all inputs except for signalling NaNs, which @@ -1432,8 +1533,7 @@ void MacroAssembler::CanonicalizeNaN(const FPRegister& dst, Fsub(dst, src, fp_zero); } - -void MacroAssembler::LoadRoot(CPURegister destination, +void TurboAssembler::LoadRoot(CPURegister destination, Heap::RootListIndex index) { // TODO(jbramley): Most root values are constants, and can be synthesized // without a load. Refer to the ARM back end for details. @@ -1447,35 +1547,18 @@ void MacroAssembler::StoreRoot(Register source, Str(source, MemOperand(root, index << kPointerSizeLog2)); } - -void MacroAssembler::LoadTrueFalseRoots(Register true_root, - Register false_root) { - STATIC_ASSERT((Heap::kTrueValueRootIndex + 1) == Heap::kFalseValueRootIndex); - Ldp(true_root, false_root, - MemOperand(root, Heap::kTrueValueRootIndex << kPointerSizeLog2)); -} - - -void MacroAssembler::LoadHeapObject(Register result, - Handle object) { - Mov(result, Operand(object)); -} - void MacroAssembler::LoadObject(Register result, Handle object) { AllowDeferredHandleDereference heap_object_check; if (object->IsHeapObject()) { - LoadHeapObject(result, Handle::cast(object)); + Move(result, Handle::cast(object)); } else { - DCHECK(object->IsSmi()); - Mov(result, Operand(object)); + Mov(result, Operand(Smi::cast(*object))); } } -void MacroAssembler::Move(Register dst, Register src) { Mov(dst, src); } -void MacroAssembler::Move(Register dst, Handle x) { - LoadObject(dst, x); -} -void MacroAssembler::Move(Register dst, Smi* src) { Mov(dst, src); } +void TurboAssembler::Move(Register dst, Register src) { Mov(dst, src); } +void TurboAssembler::Move(Register dst, Handle x) { Mov(dst, x); } +void TurboAssembler::Move(Register dst, Smi* src) { Mov(dst, src); } void MacroAssembler::LoadInstanceDescriptors(Register map, Register descriptors) { @@ -1496,12 +1579,6 @@ void MacroAssembler::EnumLengthUntagged(Register dst, Register map) { } -void MacroAssembler::EnumLengthSmi(Register dst, Register map) { - EnumLengthUntagged(dst, map); - SmiTag(dst, dst); -} - - void MacroAssembler::LoadAccessor(Register dst, Register holder, int accessor_index, AccessorComponent accessor) { @@ -1570,51 +1647,6 @@ void MacroAssembler::CheckEnumCache(Register object, Register scratch0, B(ne, &next); } - -void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver, - Register scratch1, - Register scratch2, - Label* no_memento_found) { - Label map_check; - Label top_check; - ExternalReference new_space_allocation_top_adr = - ExternalReference::new_space_allocation_top_address(isolate()); - const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag; - const int kMementoLastWordOffset = - kMementoMapOffset + AllocationMemento::kSize - kPointerSize; - - // Bail out if the object is not in new space. - JumpIfNotInNewSpace(receiver, no_memento_found); - Add(scratch1, receiver, kMementoLastWordOffset); - // If the object is in new space, we need to check whether it is on the same - // page as the current top. - Mov(scratch2, new_space_allocation_top_adr); - Ldr(scratch2, MemOperand(scratch2)); - Eor(scratch2, scratch1, scratch2); - Tst(scratch2, ~Page::kPageAlignmentMask); - B(eq, &top_check); - // The object is on a different page than allocation top. Bail out if the - // object sits on the page boundary as no memento can follow and we cannot - // touch the memory following it. - Eor(scratch2, scratch1, receiver); - Tst(scratch2, ~Page::kPageAlignmentMask); - B(ne, no_memento_found); - // Continue with the actual map check. - jmp(&map_check); - // If top is on the same page as the current object, we need to check whether - // we are below top. - bind(&top_check); - Mov(scratch2, new_space_allocation_top_adr); - Ldr(scratch2, MemOperand(scratch2)); - Cmp(scratch1, scratch2); - B(ge, no_memento_found); - // Memento map check. - bind(&map_check); - Ldr(scratch1, MemOperand(receiver, kMementoMapOffset)); - Cmp(scratch1, Operand(isolate()->factory()->allocation_memento_map())); -} - - void MacroAssembler::InNewSpace(Register object, Condition cond, Label* branch) { @@ -1624,8 +1656,7 @@ void MacroAssembler::InNewSpace(Register object, MemoryChunk::kIsInNewSpaceMask, cond, branch); } - -void MacroAssembler::AssertSmi(Register object, BailoutReason reason) { +void TurboAssembler::AssertSmi(Register object, BailoutReason reason) { if (emit_debug_code()) { STATIC_ASSERT(kSmiTag == 0); Tst(object, kSmiTagMask); @@ -1642,6 +1673,17 @@ void MacroAssembler::AssertNotSmi(Register object, BailoutReason reason) { } } +void MacroAssembler::AssertFixedArray(Register object) { + if (emit_debug_code()) { + AssertNotSmi(object, kOperandIsASmiAndNotAFixedArray); + + UseScratchRegisterScope temps(this); + Register temp = temps.AcquireX(); + + CompareObjectType(object, temp, temp, FIXED_ARRAY_TYPE); + Check(eq, kOperandIsNotAFixedArray); + } +} void MacroAssembler::AssertFunction(Register object) { if (emit_debug_code()) { @@ -1668,8 +1710,7 @@ void MacroAssembler::AssertBoundFunction(Register object) { } } -void MacroAssembler::AssertGeneratorObject(Register object, Register flags) { - // `flags` should be an untagged integer. See `SuspendFlags` in src/globals.h +void MacroAssembler::AssertGeneratorObject(Register object) { if (!emit_debug_code()) return; AssertNotSmi(object, kOperandIsASmiAndNotAGeneratorObject); @@ -1681,16 +1722,11 @@ void MacroAssembler::AssertGeneratorObject(Register object, Register flags) { // Load instance type Ldrb(temp, FieldMemOperand(temp, Map::kInstanceTypeOffset)); - Label async, do_check; - STATIC_ASSERT(static_cast(SuspendFlags::kGeneratorTypeMask) == 4); - DCHECK(!temp.is(flags)); - B(&async, reg_bit_set, flags, 2); - + Label do_check; // Check if JSGeneratorObject Cmp(temp, JS_GENERATOR_OBJECT_TYPE); - jmp(&do_check); + B(eq, &do_check); - bind(&async); // Check if JSAsyncGeneratorObject Cmp(temp, JS_ASYNC_GENERATOR_OBJECT_TYPE); @@ -1712,8 +1748,7 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object, } } - -void MacroAssembler::AssertPositiveOrZero(Register value) { +void TurboAssembler::AssertPositiveOrZero(Register value) { if (emit_debug_code()) { Label done; int sign_bit = value.Is64Bits() ? kXSignBit : kWSignBit; @@ -1723,16 +1758,42 @@ void MacroAssembler::AssertPositiveOrZero(Register value) { } } -void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) { +void TurboAssembler::CallStubDelayed(CodeStub* stub) { DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs. - Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id); + BlockPoolsScope scope(this); +#ifdef DEBUG + Label start_call; + Bind(&start_call); +#endif + UseScratchRegisterScope temps(this); + Register temp = temps.AcquireX(); + Ldr(temp, Operand::EmbeddedCode(stub)); + Blr(temp); +#ifdef DEBUG + AssertSizeOfCodeGeneratedSince(&start_call, kCallSizeWithRelocation); +#endif } +void MacroAssembler::CallStub(CodeStub* stub) { + DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs. + Call(stub->GetCode(), RelocInfo::CODE_TARGET); +} void MacroAssembler::TailCallStub(CodeStub* stub) { Jump(stub->GetCode(), RelocInfo::CODE_TARGET); } +void TurboAssembler::CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid, + SaveFPRegsMode save_doubles) { + const Runtime::Function* f = Runtime::FunctionForId(fid); + // TODO(1236192): Most runtime routines don't need the number of + // arguments passed in because it is constant. At some point we + // should remove this need and make the runtime routine entry code + // smarter. + Mov(x0, f->nargs); + Mov(x1, ExternalReference(f, isolate())); + CallStubDelayed(new (zone) CEntryStub(nullptr, 1, save_doubles)); +} void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments, @@ -1783,7 +1844,7 @@ void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) { JumpToExternalReference(ExternalReference(fid, isolate())); } -int MacroAssembler::ActivationFrameAlignment() { +int TurboAssembler::ActivationFrameAlignment() { #if V8_HOST_ARCH_ARM64 // Running on the real platform. Use the alignment as mandated by the local // environment. @@ -1799,14 +1860,12 @@ int MacroAssembler::ActivationFrameAlignment() { #endif // V8_HOST_ARCH_ARM64 } - -void MacroAssembler::CallCFunction(ExternalReference function, +void TurboAssembler::CallCFunction(ExternalReference function, int num_of_reg_args) { CallCFunction(function, num_of_reg_args, 0); } - -void MacroAssembler::CallCFunction(ExternalReference function, +void TurboAssembler::CallCFunction(ExternalReference function, int num_of_reg_args, int num_of_double_args) { UseScratchRegisterScope temps(this); @@ -1817,8 +1876,7 @@ void MacroAssembler::CallCFunction(ExternalReference function, static const int kRegisterPassedArguments = 8; -void MacroAssembler::CallCFunction(Register function, - int num_of_reg_args, +void TurboAssembler::CallCFunction(Register function, int num_of_reg_args, int num_of_double_args) { DCHECK_LE(num_of_reg_args + num_of_double_args, kMaxCParameters); DCHECK(has_frame()); @@ -1903,13 +1961,9 @@ void MacroAssembler::CallCFunction(Register function, } } +void TurboAssembler::Jump(Register target) { Br(target); } -void MacroAssembler::Jump(Register target) { - Br(target); -} - - -void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode, +void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond) { if (cond == nv) return; UseScratchRegisterScope temps(this); @@ -1921,23 +1975,19 @@ void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode, Bind(&done); } - -void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode, +void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond) { DCHECK(!RelocInfo::IsCodeTarget(rmode)); Jump(reinterpret_cast(target), rmode, cond); } - -void MacroAssembler::Jump(Handle code, RelocInfo::Mode rmode, +void TurboAssembler::Jump(Handle code, RelocInfo::Mode rmode, Condition cond) { DCHECK(RelocInfo::IsCodeTarget(rmode)); - AllowDeferredHandleDereference embedding_raw_address; - Jump(reinterpret_cast(code.location()), rmode, cond); + Jump(reinterpret_cast(code.address()), rmode, cond); } - -void MacroAssembler::Call(Register target) { +void TurboAssembler::Call(Register target) { BlockPoolsScope scope(this); #ifdef DEBUG Label start_call; @@ -1951,8 +2001,7 @@ void MacroAssembler::Call(Register target) { #endif } - -void MacroAssembler::Call(Label* target) { +void TurboAssembler::Call(Label* target) { BlockPoolsScope scope(this); #ifdef DEBUG Label start_call; @@ -1966,10 +2015,9 @@ void MacroAssembler::Call(Label* target) { #endif } - -// MacroAssembler::CallSize is sensitive to changes in this function, as it +// TurboAssembler::CallSize is sensitive to changes in this function, as it // requires to know how many instructions are used to branch to the target. -void MacroAssembler::Call(Address target, RelocInfo::Mode rmode) { +void TurboAssembler::Call(Address target, RelocInfo::Mode rmode) { BlockPoolsScope scope(this); #ifdef DEBUG Label start_call; @@ -1999,43 +2047,31 @@ void MacroAssembler::Call(Address target, RelocInfo::Mode rmode) { #endif } - -void MacroAssembler::Call(Handle code, - RelocInfo::Mode rmode, - TypeFeedbackId ast_id) { +void TurboAssembler::Call(Handle code, RelocInfo::Mode rmode) { #ifdef DEBUG Label start_call; Bind(&start_call); #endif - if ((rmode == RelocInfo::CODE_TARGET) && (!ast_id.IsNone())) { - SetRecordedAstId(ast_id); - rmode = RelocInfo::CODE_TARGET_WITH_ID; - } - - AllowDeferredHandleDereference embedding_raw_address; - Call(reinterpret_cast
(code.location()), rmode); + Call(code.address(), rmode); #ifdef DEBUG // Check the size of the code generated. - AssertSizeOfCodeGeneratedSince(&start_call, CallSize(code, rmode, ast_id)); + AssertSizeOfCodeGeneratedSince(&start_call, CallSize(code, rmode)); #endif } - -int MacroAssembler::CallSize(Register target) { +int TurboAssembler::CallSize(Register target) { USE(target); return kInstructionSize; } - -int MacroAssembler::CallSize(Label* target) { +int TurboAssembler::CallSize(Label* target) { USE(target); return kInstructionSize; } - -int MacroAssembler::CallSize(Address target, RelocInfo::Mode rmode) { +int TurboAssembler::CallSize(Address target, RelocInfo::Mode rmode) { USE(target); // Addresses always have 64 bits, so we shouldn't encounter NONE32. @@ -2048,12 +2084,8 @@ int MacroAssembler::CallSize(Address target, RelocInfo::Mode rmode) { } } - -int MacroAssembler::CallSize(Handle code, - RelocInfo::Mode rmode, - TypeFeedbackId ast_id) { +int TurboAssembler::CallSize(Handle code, RelocInfo::Mode rmode) { USE(code); - USE(ast_id); // Addresses always have 64 bits, so we shouldn't encounter NONE32. DCHECK(rmode != RelocInfo::NONE32); @@ -2100,10 +2132,8 @@ void MacroAssembler::JumpIfNotHeapNumber(Register object, JumpIfNotRoot(temp, Heap::kHeapNumberMapRootIndex, on_not_heap_number); } - -void MacroAssembler::TryRepresentDoubleAsInt(Register as_int, - FPRegister value, - FPRegister scratch_d, +void MacroAssembler::TryRepresentDoubleAsInt(Register as_int, VRegister value, + VRegister scratch_d, Label* on_successful_conversion, Label* on_failed_conversion) { // Convert to an int and back again, then compare with the original value. @@ -2119,101 +2149,6 @@ void MacroAssembler::TryRepresentDoubleAsInt(Register as_int, } } - -void MacroAssembler::TestForMinusZero(DoubleRegister input) { - UseScratchRegisterScope temps(this); - Register temp = temps.AcquireX(); - // Floating point -0.0 is kMinInt as an integer, so subtracting 1 (cmp) will - // cause overflow. - Fmov(temp, input); - Cmp(temp, 1); -} - - -void MacroAssembler::JumpIfMinusZero(DoubleRegister input, - Label* on_negative_zero) { - TestForMinusZero(input); - B(vs, on_negative_zero); -} - - -void MacroAssembler::JumpIfMinusZero(Register input, - Label* on_negative_zero) { - DCHECK(input.Is64Bits()); - // Floating point value is in an integer register. Detect -0.0 by subtracting - // 1 (cmp), which will cause overflow. - Cmp(input, 1); - B(vs, on_negative_zero); -} - - -void MacroAssembler::ClampInt32ToUint8(Register output, Register input) { - // Clamp the value to [0..255]. - Cmp(input.W(), Operand(input.W(), UXTB)); - // If input < input & 0xff, it must be < 0, so saturate to 0. - Csel(output.W(), wzr, input.W(), lt); - // If input <= input & 0xff, it must be <= 255. Otherwise, saturate to 255. - Csel(output.W(), output.W(), 255, le); -} - - -void MacroAssembler::ClampInt32ToUint8(Register in_out) { - ClampInt32ToUint8(in_out, in_out); -} - - -void MacroAssembler::ClampDoubleToUint8(Register output, - DoubleRegister input, - DoubleRegister dbl_scratch) { - // This conversion follows the WebIDL "[Clamp]" rules for PIXEL types: - // - Inputs lower than 0 (including -infinity) produce 0. - // - Inputs higher than 255 (including +infinity) produce 255. - // Also, it seems that PIXEL types use round-to-nearest rather than - // round-towards-zero. - - // Squash +infinity before the conversion, since Fcvtnu will normally - // convert it to 0. - Fmov(dbl_scratch, 255); - Fmin(dbl_scratch, dbl_scratch, input); - - // Convert double to unsigned integer. Values less than zero become zero. - // Values greater than 255 have already been clamped to 255. - Fcvtnu(output, dbl_scratch); -} - -void MacroAssembler::InitializeFieldsWithFiller(Register current_address, - Register end_address, - Register filler) { - DCHECK(!current_address.Is(csp)); - UseScratchRegisterScope temps(this); - Register distance_in_words = temps.AcquireX(); - Label done; - - // Calculate the distance. If it's <= zero then there's nothing to do. - Subs(distance_in_words, end_address, current_address); - B(le, &done); - - // There's at least one field to fill, so do this unconditionally. - Str(filler, MemOperand(current_address)); - - // If the distance_in_words consists of odd number of words we advance - // start_address by one word, otherwise the pairs loop will ovwerite the - // field that was stored above. - And(distance_in_words, distance_in_words, kPointerSize); - Add(current_address, current_address, distance_in_words); - - // Store filler to memory in pairs. - Label loop, entry; - B(&entry); - Bind(&loop); - Stp(filler, filler, MemOperand(current_address, 2 * kPointerSize, PostIndex)); - Bind(&entry); - Cmp(current_address, end_address); - B(lo, &loop); - - Bind(&done); -} - void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte( Register first, Register second, Register scratch1, Register scratch2, Label* failure) { @@ -2243,7 +2178,7 @@ void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register type, B(ne, not_unique_name); } -void MacroAssembler::PrepareForTailCall(const ParameterCount& callee_args_count, +void TurboAssembler::PrepareForTailCall(const ParameterCount& callee_args_count, Register caller_args_count_reg, Register scratch0, Register scratch1) { #if DEBUG @@ -2529,8 +2464,7 @@ void MacroAssembler::InvokeFunction(Handle function, InvokeFunction(x1, expected, actual, flag, call_wrapper); } - -void MacroAssembler::TryConvertDoubleToInt64(Register result, +void TurboAssembler::TryConvertDoubleToInt64(Register result, DoubleRegister double_input, Label* done) { // Try to convert with an FPU convert instruction. It's trivial to compute @@ -2554,9 +2488,8 @@ void MacroAssembler::TryConvertDoubleToInt64(Register result, B(vc, done); } - -void MacroAssembler::TruncateDoubleToI(Register result, - DoubleRegister double_input) { +void TurboAssembler::TruncateDoubleToIDelayed(Zone* zone, Register result, + DoubleRegister double_input) { Label done; // Try to convert the double to an int64. If successful, the bottom 32 bits @@ -2577,13 +2510,11 @@ void MacroAssembler::TruncateDoubleToI(Register result, // If we fell through then inline version didn't succeed - call stub instead. Push(lr, double_input); - DoubleToIStub stub(isolate(), - jssp, - result, - 0, - true, // is_truncating - true); // skip_fastpath - CallStub(&stub); // DoubleToIStub preserves any registers it needs to clobber + auto stub = new (zone) DoubleToIStub(nullptr, jssp, result, 0, + true, // is_truncating + true); // skip_fastpath + // DoubleToIStub preserves any registers it needs to clobber. + CallStubDelayed(stub); DCHECK_EQ(xzr.SizeInBytes(), double_input.SizeInBytes()); Pop(xzr, lr); // xzr to drop the double input on the stack. @@ -2600,45 +2531,7 @@ void MacroAssembler::TruncateDoubleToI(Register result, Uxtw(result.W(), result.W()); } - -void MacroAssembler::TruncateHeapNumberToI(Register result, - Register object) { - Label done; - DCHECK(!result.is(object)); - DCHECK(jssp.Is(StackPointer())); - - Ldr(fp_scratch, FieldMemOperand(object, HeapNumber::kValueOffset)); - - // Try to convert the double to an int64. If successful, the bottom 32 bits - // contain our truncated int32 result. - TryConvertDoubleToInt64(result, fp_scratch, &done); - - // If we fell through then inline version didn't succeed - call stub instead. - Push(lr); - DoubleToIStub stub(isolate(), - object, - result, - HeapNumber::kValueOffset - kHeapObjectTag, - true, // is_truncating - true); // skip_fastpath - CallStub(&stub); // DoubleToIStub preserves any registers it needs to clobber - Pop(lr); - - Bind(&done); -} - -void MacroAssembler::StubPrologue(StackFrame::Type type, int frame_slots) { - UseScratchRegisterScope temps(this); - frame_slots -= TypedFrameConstants::kFixedSlotCountAboveFp; - Register temp = temps.AcquireX(); - Mov(temp, StackFrame::TypeToMarker(type)); - Push(lr, fp); - Mov(fp, StackPointer()); - Claim(frame_slots); - str(temp, MemOperand(fp, TypedFrameConstants::kFrameTypeOffset)); -} - -void MacroAssembler::Prologue(bool code_pre_aging) { +void TurboAssembler::Prologue(bool code_pre_aging) { if (code_pre_aging) { Code* stub = Code::GetPreAgedCodeAgeStub(isolate()); __ EmitCodeAgeSequence(stub); @@ -2653,15 +2546,7 @@ void MacroAssembler::EmitLoadFeedbackVector(Register vector) { Ldr(vector, FieldMemOperand(vector, Cell::kValueOffset)); } - -void MacroAssembler::EnterFrame(StackFrame::Type type, - bool load_constant_pool_pointer_reg) { - // Out-of-line constant pool not implemented on arm64. - UNREACHABLE(); -} - - -void MacroAssembler::EnterFrame(StackFrame::Type type) { +void TurboAssembler::EnterFrame(StackFrame::Type type) { UseScratchRegisterScope temps(this); Register type_reg = temps.AcquireX(); Register code_reg = temps.AcquireX(); @@ -2700,8 +2585,7 @@ void MacroAssembler::EnterFrame(StackFrame::Type type) { } } - -void MacroAssembler::LeaveFrame(StackFrame::Type type) { +void TurboAssembler::LeaveFrame(StackFrame::Type type) { if (type == StackFrame::WASM_COMPILED) { DCHECK(csp.Is(StackPointer())); Mov(csp, fp); @@ -2719,14 +2603,14 @@ void MacroAssembler::LeaveFrame(StackFrame::Type type) { void MacroAssembler::ExitFramePreserveFPRegs() { - PushCPURegList(kCallerSavedFP); + PushCPURegList(kCallerSavedV); } void MacroAssembler::ExitFrameRestoreFPRegs() { // Read the registers from the stack without popping them. The stack pointer // will be reset as part of the unwinding process. - CPURegList saved_fp_regs = kCallerSavedFP; + CPURegList saved_fp_regs = kCallerSavedV; DCHECK(saved_fp_regs.Count() % 2 == 0); int offset = ExitFrameConstants::kLastExitFrameField; @@ -2778,11 +2662,11 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, const Register& scratch, STATIC_ASSERT((-3 * kPointerSize) == ExitFrameConstants::kCodeOffset); // Save the frame pointer and context pointer in the top frame. - Mov(scratch, Operand(ExternalReference(Isolate::kCEntryFPAddress, + Mov(scratch, Operand(ExternalReference(IsolateAddressId::kCEntryFPAddress, isolate()))); Str(fp, MemOperand(scratch)); - Mov(scratch, Operand(ExternalReference(Isolate::kContextAddress, - isolate()))); + Mov(scratch, + Operand(ExternalReference(IsolateAddressId::kContextAddress, isolate()))); Str(cp, MemOperand(scratch)); STATIC_ASSERT((-3 * kPointerSize) == ExitFrameConstants::kLastExitFrameField); @@ -2838,19 +2722,19 @@ void MacroAssembler::LeaveExitFrame(bool restore_doubles, // Restore the context pointer from the top frame. if (restore_context) { - Mov(scratch, Operand(ExternalReference(Isolate::kContextAddress, + Mov(scratch, Operand(ExternalReference(IsolateAddressId::kContextAddress, isolate()))); Ldr(cp, MemOperand(scratch)); } if (emit_debug_code()) { // Also emit debug code to clear the cp in the top frame. - Mov(scratch, Operand(ExternalReference(Isolate::kContextAddress, + Mov(scratch, Operand(ExternalReference(IsolateAddressId::kContextAddress, isolate()))); Str(xzr, MemOperand(scratch)); } // Clear the frame pointer from the top frame. - Mov(scratch, Operand(ExternalReference(Isolate::kCEntryFPAddress, + Mov(scratch, Operand(ExternalReference(IsolateAddressId::kCEntryFPAddress, isolate()))); Str(xzr, MemOperand(scratch)); @@ -2865,16 +2749,6 @@ void MacroAssembler::LeaveExitFrame(bool restore_doubles, } -void MacroAssembler::SetCounter(StatsCounter* counter, int value, - Register scratch1, Register scratch2) { - if (FLAG_native_code_counters && counter->Enabled()) { - Mov(scratch1, value); - Mov(scratch2, ExternalReference(counter)); - Str(scratch1.W(), MemOperand(scratch2)); - } -} - - void MacroAssembler::IncrementCounter(StatsCounter* counter, int value, Register scratch1, Register scratch2) { DCHECK(value != 0); @@ -2929,7 +2803,7 @@ void MacroAssembler::PushStackHandler() { // (See JSEntryStub::GenerateBody().) // Link the current handler as the next handler. - Mov(x11, ExternalReference(Isolate::kHandlerAddress, isolate())); + Mov(x11, ExternalReference(IsolateAddressId::kHandlerAddress, isolate())); Ldr(x10, MemOperand(x11)); Push(x10); @@ -2941,7 +2815,7 @@ void MacroAssembler::PushStackHandler() { void MacroAssembler::PopStackHandler() { STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); Pop(x10); - Mov(x11, ExternalReference(Isolate::kHandlerAddress, isolate())); + Mov(x11, ExternalReference(IsolateAddressId::kHandlerAddress, isolate())); Drop(StackHandlerConstants::kSize - kXRegSize, kByteSizeInBytes); Str(x10, MemOperand(x11)); } @@ -2954,7 +2828,6 @@ void MacroAssembler::Allocate(int object_size, Label* gc_required, AllocationFlags flags) { DCHECK(object_size <= kMaxRegularHeapObjectSize); - DCHECK((flags & ALLOCATION_FOLDED) == 0); if (!FLAG_inline_new) { if (emit_debug_code()) { // Trash the registers to simulate an allocation failure. @@ -3018,10 +2891,7 @@ void MacroAssembler::Allocate(int object_size, Ccmp(result_end, alloc_limit, NoFlag, cc); B(hi, gc_required); - if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) { - // The top pointer is not updated for allocation folding dominators. - Str(result_end, MemOperand(top_address)); - } + Str(result_end, MemOperand(top_address)); // Tag the object. ObjectTag(result, result); @@ -3100,83 +2970,9 @@ void MacroAssembler::Allocate(Register object_size, Register result, Ccmp(result_end, alloc_limit, NoFlag, cc); B(hi, gc_required); - if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) { - // The top pointer is not updated for allocation folding dominators. - Str(result_end, MemOperand(top_address)); - } - - // Tag the object. - ObjectTag(result, result); -} - -void MacroAssembler::FastAllocate(int object_size, Register result, - Register scratch1, Register scratch2, - AllocationFlags flags) { - DCHECK(object_size <= kMaxRegularHeapObjectSize); - - DCHECK(!AreAliased(result, scratch1, scratch2)); - DCHECK(result.Is64Bits() && scratch1.Is64Bits() && scratch2.Is64Bits()); - - // Make object size into bytes. - if ((flags & SIZE_IN_WORDS) != 0) { - object_size *= kPointerSize; - } - DCHECK(0 == (object_size & kObjectAlignmentMask)); - - ExternalReference heap_allocation_top = - AllocationUtils::GetAllocationTopReference(isolate(), flags); - - // Set up allocation top address and allocation limit registers. - Register top_address = scratch1; - Register result_end = scratch2; - Mov(top_address, Operand(heap_allocation_top)); - Ldr(result, MemOperand(top_address)); - - // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have - // the same alignment on ARM64. - STATIC_ASSERT(kPointerAlignment == kDoubleAlignment); - - // Calculate new top and write it back. - Adds(result_end, result, object_size); - Str(result_end, MemOperand(top_address)); - - ObjectTag(result, result); -} - -void MacroAssembler::FastAllocate(Register object_size, Register result, - Register result_end, Register scratch, - AllocationFlags flags) { - // |object_size| and |result_end| may overlap, other registers must not. - DCHECK(!AreAliased(object_size, result, scratch)); - DCHECK(!AreAliased(result_end, result, scratch)); - DCHECK(object_size.Is64Bits() && result.Is64Bits() && scratch.Is64Bits() && - result_end.Is64Bits()); - - ExternalReference heap_allocation_top = - AllocationUtils::GetAllocationTopReference(isolate(), flags); - - // Set up allocation top address and allocation limit registers. - Register top_address = scratch; - Mov(top_address, heap_allocation_top); - Ldr(result, MemOperand(top_address)); - - // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have - // the same alignment on ARM64. - STATIC_ASSERT(kPointerAlignment == kDoubleAlignment); - - // Calculate new top and write it back. - if ((flags & SIZE_IN_WORDS) != 0) { - Adds(result_end, result, Operand(object_size, LSL, kPointerSizeLog2)); - } else { - Adds(result_end, result, object_size); - } Str(result_end, MemOperand(top_address)); - if (emit_debug_code()) { - Tst(result_end, kObjectAlignmentMask); - Check(eq, kUnalignedAllocationInNewSpace); - } - + // Tag the object. ObjectTag(result, result); } @@ -3205,7 +3001,7 @@ void MacroAssembler::AllocateHeapNumber(Register result, if (!heap_number_map.IsValid()) { // If we have a valid value register, use the same type of register to store // the map so we can use STP to store both in one instruction. - if (value.IsValid() && value.IsFPRegister()) { + if (value.IsValid() && value.IsVRegister()) { heap_number_map = temps.AcquireD(); } else { heap_number_map = scratch1; @@ -3214,7 +3010,7 @@ void MacroAssembler::AllocateHeapNumber(Register result, } if (emit_debug_code()) { Register map; - if (heap_number_map.IsFPRegister()) { + if (heap_number_map.IsVRegister()) { map = scratch1; Fmov(map, DoubleRegister(heap_number_map)); } else { @@ -3265,7 +3061,7 @@ void MacroAssembler::AllocateJSValue(Register result, Register constructor, LoadGlobalFunctionInitialMap(constructor, scratch1, scratch2); Str(scratch1, FieldMemOperand(result, HeapObject::kMapOffset)); LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex); - Str(scratch1, FieldMemOperand(result, JSObject::kPropertiesOffset)); + Str(scratch1, FieldMemOperand(result, JSObject::kPropertiesOrHashOffset)); Str(scratch1, FieldMemOperand(result, JSObject::kElementsOffset)); Str(value, FieldMemOperand(result, JSValue::kValueOffset)); STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize); @@ -3373,16 +3169,6 @@ void MacroAssembler::LoadWeakValue(Register value, Handle cell, JumpIfSmi(value, miss); } - -void MacroAssembler::TestMapBitfield(Register object, uint64_t mask) { - UseScratchRegisterScope temps(this); - Register temp = temps.AcquireX(); - Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset)); - Ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset)); - Tst(temp, mask); -} - - void MacroAssembler::LoadElementsKindFromMap(Register result, Register map) { // Load the map's "bit field 2". __ Ldrb(result, FieldMemOperand(map, Map::kBitField2Offset)); @@ -3477,43 +3263,10 @@ void MacroAssembler::TestAndSplit(const Register& reg, } } -bool MacroAssembler::AllowThisStubCall(CodeStub* stub) { - return has_frame_ || !stub->SometimesSetsUpAFrame(); -} - -void MacroAssembler::EmitSeqStringSetCharCheck( - Register string, - Register index, - SeqStringSetCharCheckIndexType index_type, - Register scratch, - uint32_t encoding_mask) { - DCHECK(!AreAliased(string, index, scratch)); - - if (index_type == kIndexIsSmi) { - AssertSmi(index); - } - - // Check that string is an object. - AssertNotSmi(string, kNonObject); - - // Check that string has an appropriate map. - Ldr(scratch, FieldMemOperand(string, HeapObject::kMapOffset)); - Ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); - - And(scratch, scratch, kStringRepresentationMask | kStringEncodingMask); - Cmp(scratch, encoding_mask); - Check(eq, kUnexpectedStringType); - - Ldr(scratch, FieldMemOperand(string, String::kLengthOffset)); - Cmp(index, index_type == kIndexIsSmi ? scratch : Operand::UntagSmi(scratch)); - Check(lt, kIndexIsTooLarge); - - DCHECK_EQ(static_cast(0), Smi::kZero); - Cmp(index, 0); - Check(ge, kIndexIsNegative); +bool TurboAssembler::AllowThisStubCall(CodeStub* stub) { + return has_frame() || !stub->SometimesSetsUpAFrame(); } - // Compute the hash code from the untagged key. This must be kept in sync with // ComputeIntegerHash in utils.h and KeyedLoadGenericStub in // code-stub-hydrogen.cc @@ -3672,22 +3425,6 @@ void MacroAssembler::PushSafepointRegisters() { PushXRegList(kSafepointSavedRegisters); } - -void MacroAssembler::PushSafepointRegistersAndDoubles() { - PushSafepointRegisters(); - PushCPURegList(CPURegList( - CPURegister::kFPRegister, kDRegSizeInBits, - RegisterConfiguration::Crankshaft()->allocatable_double_codes_mask())); -} - - -void MacroAssembler::PopSafepointRegistersAndDoubles() { - PopCPURegList(CPURegList( - CPURegister::kFPRegister, kDRegSizeInBits, - RegisterConfiguration::Crankshaft()->allocatable_double_codes_mask())); - PopSafepointRegisters(); -} - void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) { Poke(src, SafepointRegisterStackIndex(dst.code()) * kPointerSize); } @@ -3722,7 +3459,6 @@ int MacroAssembler::SafepointRegisterStackIndex(int reg_code) { } else { // This register has no safepoint register slot. UNREACHABLE(); - return -1; } } @@ -3738,19 +3474,16 @@ void MacroAssembler::CheckPageFlag(const Register& object, } } -void MacroAssembler::CheckPageFlagSet(const Register& object, - const Register& scratch, - int mask, +void TurboAssembler::CheckPageFlagSet(const Register& object, + const Register& scratch, int mask, Label* if_any_set) { And(scratch, object, ~Page::kPageAlignmentMask); Ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset)); TestAndBranchIfAnySet(scratch, mask, if_any_set); } - -void MacroAssembler::CheckPageFlagClear(const Register& object, - const Register& scratch, - int mask, +void TurboAssembler::CheckPageFlagClear(const Register& object, + const Register& scratch, int mask, Label* if_all_clear) { And(scratch, object, ~Page::kPageAlignmentMask); Ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset)); @@ -4068,22 +3801,12 @@ void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch, Tbz(load_scratch, 0, value_is_white); } - -void MacroAssembler::Assert(Condition cond, BailoutReason reason) { +void TurboAssembler::Assert(Condition cond, BailoutReason reason) { if (emit_debug_code()) { Check(cond, reason); } } - - -void MacroAssembler::AssertRegisterIsClear(Register reg, BailoutReason reason) { - if (emit_debug_code()) { - CheckRegisterIsClear(reg, reason); - } -} - - void MacroAssembler::AssertRegisterIsRoot(Register reg, Heap::RootListIndex index, BailoutReason reason) { @@ -4093,23 +3816,7 @@ void MacroAssembler::AssertRegisterIsRoot(Register reg, } } - - -void MacroAssembler::AssertIsString(const Register& object) { - if (emit_debug_code()) { - UseScratchRegisterScope temps(this); - Register temp = temps.AcquireX(); - STATIC_ASSERT(kSmiTag == 0); - Tst(object, kSmiTagMask); - Check(ne, kOperandIsNotAString); - Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset)); - CompareInstanceType(temp, temp, FIRST_NONSTRING_TYPE); - Check(lo, kOperandIsNotAString); - } -} - - -void MacroAssembler::Check(Condition cond, BailoutReason reason) { +void TurboAssembler::Check(Condition cond, BailoutReason reason) { Label ok; B(cond, &ok); Abort(reason); @@ -4117,17 +3824,7 @@ void MacroAssembler::Check(Condition cond, BailoutReason reason) { Bind(&ok); } - -void MacroAssembler::CheckRegisterIsClear(Register reg, BailoutReason reason) { - Label ok; - Cbz(reg, &ok); - Abort(reason); - // Will not return here. - Bind(&ok); -} - - -void MacroAssembler::Abort(BailoutReason reason) { +void TurboAssembler::Abort(BailoutReason reason) { #ifdef DEBUG RecordComment("Abort message: "); RecordComment(GetBailoutReason(reason)); @@ -4154,9 +3851,6 @@ void MacroAssembler::Abort(BailoutReason reason) { // Avoid infinite recursion; Push contains some assertions that use Abort. NoUseRealAbortsScope no_real_aborts(this); - // Check if Abort() has already been initialized. - DCHECK(isolate()->builtins()->Abort()->IsHeapObject()); - Move(x1, Smi::FromInt(static_cast(reason))); if (!has_frame_) { @@ -4235,7 +3929,7 @@ void MacroAssembler::PrintfNoPreserve(const char * format, static const CPURegList kPCSVarargs = CPURegList(CPURegister::kRegister, kXRegSizeInBits, 1, arg_count); static const CPURegList kPCSVarargsFP = - CPURegList(CPURegister::kFPRegister, kDRegSizeInBits, 0, arg_count - 1); + CPURegList(CPURegister::kVRegister, kDRegSizeInBits, 0, arg_count - 1); // We can use caller-saved registers as scratch values, except for the // arguments and the PCS registers where they might need to go. @@ -4244,7 +3938,7 @@ void MacroAssembler::PrintfNoPreserve(const char * format, tmp_list.Remove(kPCSVarargs); tmp_list.Remove(arg0, arg1, arg2, arg3); - CPURegList fp_tmp_list = kCallerSavedFP; + CPURegList fp_tmp_list = kCallerSavedV; fp_tmp_list.Remove(kPCSVarargsFP); fp_tmp_list.Remove(arg0, arg1, arg2, arg3); @@ -4269,7 +3963,7 @@ void MacroAssembler::PrintfNoPreserve(const char * format, // We might only need a W register here. We need to know the size of the // argument so we can properly encode it for the simulator call. if (args[i].Is32Bits()) pcs[i] = pcs[i].W(); - } else if (args[i].IsFPRegister()) { + } else if (args[i].IsVRegister()) { // In C, floats are always cast to doubles for varargs calls. pcs[i] = pcs_varargs_fp.PopLowestIndex().D(); } else { @@ -4291,8 +3985,8 @@ void MacroAssembler::PrintfNoPreserve(const char * format, Mov(new_arg, old_arg); args[i] = new_arg; } else { - FPRegister old_arg = FPRegister(args[i]); - FPRegister new_arg = temps.AcquireSameSizeAs(old_arg); + VRegister old_arg = VRegister(args[i]); + VRegister new_arg = temps.AcquireSameSizeAs(old_arg); Fmov(new_arg, old_arg); args[i] = new_arg; } @@ -4306,11 +4000,11 @@ void MacroAssembler::PrintfNoPreserve(const char * format, if (pcs[i].IsRegister()) { Mov(Register(pcs[i]), Register(args[i]), kDiscardForSameWReg); } else { - DCHECK(pcs[i].IsFPRegister()); + DCHECK(pcs[i].IsVRegister()); if (pcs[i].SizeInBytes() == args[i].SizeInBytes()) { - Fmov(FPRegister(pcs[i]), FPRegister(args[i])); + Fmov(VRegister(pcs[i]), VRegister(args[i])); } else { - Fcvt(FPRegister(pcs[i]), FPRegister(args[i])); + Fcvt(VRegister(pcs[i]), VRegister(args[i])); } } } @@ -4343,11 +4037,10 @@ void MacroAssembler::PrintfNoPreserve(const char * format, CallPrintf(arg_count, pcs); } - -void MacroAssembler::CallPrintf(int arg_count, const CPURegister * args) { - // A call to printf needs special handling for the simulator, since the system - // printf function will use a different instruction set and the procedure-call - // standard will not be compatible. +void TurboAssembler::CallPrintf(int arg_count, const CPURegister* args) { +// A call to printf needs special handling for the simulator, since the system +// printf function will use a different instruction set and the procedure-call +// standard will not be compatible. #ifdef USE_SIMULATOR { InstructionAccurateScope scope(this, kPrintfLength / kInstructionSize); hlt(kImmExceptionIsPrintf); @@ -4398,11 +4091,11 @@ void MacroAssembler::Printf(const char * format, // If csp is the stack pointer, PushCPURegList asserts that the size of each // list is a multiple of 16 bytes. PushCPURegList(kCallerSaved); - PushCPURegList(kCallerSavedFP); + PushCPURegList(kCallerSavedV); // We can use caller-saved registers as scratch values (except for argN). CPURegList tmp_list = kCallerSaved; - CPURegList fp_tmp_list = kCallerSavedFP; + CPURegList fp_tmp_list = kCallerSavedV; tmp_list.Remove(arg0, arg1, arg2, arg3); fp_tmp_list.Remove(arg0, arg1, arg2, arg3); TmpList()->set_list(tmp_list.list()); @@ -4421,7 +4114,7 @@ void MacroAssembler::Printf(const char * format, // to PrintfNoPreserve as an argument. Register arg_sp = temps.AcquireX(); Add(arg_sp, StackPointer(), - kCallerSaved.TotalSizeInBytes() + kCallerSavedFP.TotalSizeInBytes()); + kCallerSaved.TotalSizeInBytes() + kCallerSavedV.TotalSizeInBytes()); if (arg0_sp) arg0 = Register::Create(arg_sp.code(), arg0.SizeInBits()); if (arg1_sp) arg1 = Register::Create(arg_sp.code(), arg1.SizeInBits()); if (arg2_sp) arg2 = Register::Create(arg_sp.code(), arg2.SizeInBits()); @@ -4445,15 +4138,14 @@ void MacroAssembler::Printf(const char * format, } } - PopCPURegList(kCallerSavedFP); + PopCPURegList(kCallerSavedV); PopCPURegList(kCallerSaved); TmpList()->set_list(old_tmp_list); FPTmpList()->set_list(old_fp_tmp_list); } - -void MacroAssembler::EmitFrameSetupForCodeAgePatching() { +void TurboAssembler::EmitFrameSetupForCodeAgePatching() { // TODO(jbramley): Other architectures use the internal memcpy to copy the // sequence. If this is a performance bottleneck, we should consider caching // the sequence and copying it in the same way. @@ -4463,9 +4155,7 @@ void MacroAssembler::EmitFrameSetupForCodeAgePatching() { EmitFrameSetupForCodeAgePatching(this); } - - -void MacroAssembler::EmitCodeAgeSequence(Code* stub) { +void TurboAssembler::EmitCodeAgeSequence(Code* stub) { InstructionAccurateScope scope(this, kNoCodeAgeSequenceLength / kInstructionSize); DCHECK(jssp.Is(StackPointer())); @@ -4476,8 +4166,7 @@ void MacroAssembler::EmitCodeAgeSequence(Code* stub) { #undef __ #define __ assm-> - -void MacroAssembler::EmitFrameSetupForCodeAgePatching(Assembler * assm) { +void TurboAssembler::EmitFrameSetupForCodeAgePatching(Assembler* assm) { Label start; __ bind(&start); @@ -4494,9 +4183,7 @@ void MacroAssembler::EmitFrameSetupForCodeAgePatching(Assembler * assm) { __ AssertSizeOfCodeGeneratedSince(&start, kNoCodeAgeSequenceLength); } - -void MacroAssembler::EmitCodeAgeSequence(Assembler * assm, - Code * stub) { +void TurboAssembler::EmitCodeAgeSequence(Assembler* assm, Code* stub) { Label start; __ bind(&start); // When the stub is called, the sequence is replaced with the young sequence @@ -4526,25 +4213,6 @@ bool MacroAssembler::IsYoungSequence(Isolate* isolate, byte* sequence) { return is_young; } - -void MacroAssembler::TruncatingDiv(Register result, - Register dividend, - int32_t divisor) { - DCHECK(!AreAliased(result, dividend)); - DCHECK(result.Is32Bits() && dividend.Is32Bits()); - base::MagicNumbersForDivision mag = - base::SignedDivisionByConstant(static_cast(divisor)); - Mov(result, mag.multiplier); - Smull(result.X(), dividend, result); - Asr(result.X(), result.X(), 32); - bool neg = (mag.multiplier & (static_cast(1) << 31)) != 0; - if (divisor > 0 && neg) Add(result, result, dividend); - if (divisor < 0 && !neg && mag.multiplier > 0) Sub(result, result, dividend); - if (mag.shift > 0) Asr(result, result, mag.shift); - Add(result, result, Operand(dividend, LSR, 31)); -} - - #undef __ @@ -4559,10 +4227,9 @@ Register UseScratchRegisterScope::AcquireSameSizeAs(const Register& reg) { return Register::Create(code, reg.SizeInBits()); } - -FPRegister UseScratchRegisterScope::AcquireSameSizeAs(const FPRegister& reg) { +VRegister UseScratchRegisterScope::AcquireSameSizeAs(const VRegister& reg) { int code = AcquireNextAvailable(availablefp_).code(); - return FPRegister::Create(code, reg.SizeInBits()); + return VRegister::Create(code, reg.SizeInBits()); } diff --git a/deps/v8/src/arm64/macro-assembler-arm64.h b/deps/v8/src/arm64/macro-assembler-arm64.h index 6c77dd5b0110b5..12f7516f6b3fa8 100644 --- a/deps/v8/src/arm64/macro-assembler-arm64.h +++ b/deps/v8/src/arm64/macro-assembler-arm64.h @@ -52,15 +52,15 @@ namespace internal { #define kRuntimeCallFunctionRegister x1 #define kRuntimeCallArgCountRegister x0 -#define LS_MACRO_LIST(V) \ - V(Ldrb, Register&, rt, LDRB_w) \ - V(Strb, Register&, rt, STRB_w) \ - V(Ldrsb, Register&, rt, rt.Is64Bits() ? LDRSB_x : LDRSB_w) \ - V(Ldrh, Register&, rt, LDRH_w) \ - V(Strh, Register&, rt, STRH_w) \ - V(Ldrsh, Register&, rt, rt.Is64Bits() ? LDRSH_x : LDRSH_w) \ - V(Ldr, CPURegister&, rt, LoadOpFor(rt)) \ - V(Str, CPURegister&, rt, StoreOpFor(rt)) \ +#define LS_MACRO_LIST(V) \ + V(Ldrb, Register&, rt, LDRB_w) \ + V(Strb, Register&, rt, STRB_w) \ + V(Ldrsb, Register&, rt, rt.Is64Bits() ? LDRSB_x : LDRSB_w) \ + V(Ldrh, Register&, rt, LDRH_w) \ + V(Strh, Register&, rt, STRH_w) \ + V(Ldrsh, Register&, rt, rt.Is64Bits() ? LDRSH_x : LDRSH_w) \ + V(Ldr, CPURegister&, rt, LoadOpFor(rt)) \ + V(Str, CPURegister&, rt, StoreOpFor(rt)) \ V(Ldrsw, Register&, rt, LDRSW_x) #define LSPAIR_MACRO_LIST(V) \ @@ -177,159 +177,949 @@ enum PreShiftImmMode { kAnyShift // Allow any pre-shift. }; -class MacroAssembler : public Assembler { +class TurboAssembler : public Assembler { public: - MacroAssembler(Isolate* isolate, byte* buffer, unsigned buffer_size, - CodeObjectRequired create_code_object); + TurboAssembler(Isolate* isolate, void* buffer, int buffer_size, + CodeObjectRequired create_code_object) + : Assembler(isolate, buffer, buffer_size), + isolate_(isolate), +#if DEBUG + allow_macro_instructions_(true), +#endif + tmp_list_(DefaultTmpList()), + fptmp_list_(DefaultFPTmpList()), + sp_(jssp), + use_real_aborts_(true) { + if (create_code_object == CodeObjectRequired::kYes) { + code_object_ = + Handle::New(isolate->heap()->undefined_value(), isolate); + } + } + + // The Abort method should call a V8 runtime function, but the CallRuntime + // mechanism depends on CEntryStub. If use_real_aborts is false, Abort will + // use a simpler abort mechanism that doesn't depend on CEntryStub. + // + // The purpose of this is to allow Aborts to be compiled whilst CEntryStub is + // being generated. + bool use_real_aborts() const { return use_real_aborts_; } + + class NoUseRealAbortsScope { + public: + explicit NoUseRealAbortsScope(TurboAssembler* tasm) + : saved_(tasm->use_real_aborts_), tasm_(tasm) { + tasm_->use_real_aborts_ = false; + } + ~NoUseRealAbortsScope() { tasm_->use_real_aborts_ = saved_; } + + private: + bool saved_; + TurboAssembler* tasm_; + }; + + void set_has_frame(bool value) { has_frame_ = value; } + bool has_frame() const { return has_frame_; } + + Isolate* isolate() const { return isolate_; } + + Handle CodeObject() { + DCHECK(!code_object_.is_null()); + return code_object_; + } + +#if DEBUG + void set_allow_macro_instructions(bool value) { + allow_macro_instructions_ = value; + } + bool allow_macro_instructions() const { return allow_macro_instructions_; } +#endif + + // Set the current stack pointer, but don't generate any code. + inline void SetStackPointer(const Register& stack_pointer) { + DCHECK(!TmpList()->IncludesAliasOf(stack_pointer)); + sp_ = stack_pointer; + } + + // Activation support. + void EnterFrame(StackFrame::Type type); + void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg) { + // Out-of-line constant pool not implemented on arm64. + UNREACHABLE(); + } + void LeaveFrame(StackFrame::Type type); + + inline void InitializeRootRegister(); + + void Mov(const Register& rd, const Operand& operand, + DiscardMoveMode discard_mode = kDontDiscardForSameWReg); + void Mov(const Register& rd, uint64_t imm); + inline void Mov(const Register& rd, const Register& rm); + void Mov(const VRegister& vd, int vd_index, const VRegister& vn, + int vn_index) { + DCHECK(allow_macro_instructions()); + mov(vd, vd_index, vn, vn_index); + } + void Mov(const VRegister& vd, const VRegister& vn, int index) { + DCHECK(allow_macro_instructions()); + mov(vd, vn, index); + } + void Mov(const VRegister& vd, int vd_index, const Register& rn) { + DCHECK(allow_macro_instructions()); + mov(vd, vd_index, rn); + } + void Mov(const Register& rd, const VRegister& vn, int vn_index) { + DCHECK(allow_macro_instructions()); + mov(rd, vn, vn_index); + } + + // This is required for compatibility with architecture independent code. + // Remove if not needed. + void Move(Register dst, Register src); + void Move(Register dst, Handle x); + void Move(Register dst, Smi* src); + +// NEON by element instructions. +#define NEON_BYELEMENT_MACRO_LIST(V) \ + V(fmla, Fmla) \ + V(fmls, Fmls) \ + V(fmul, Fmul) \ + V(fmulx, Fmulx) \ + V(mul, Mul) \ + V(mla, Mla) \ + V(mls, Mls) \ + V(sqdmulh, Sqdmulh) \ + V(sqrdmulh, Sqrdmulh) \ + V(sqdmull, Sqdmull) \ + V(sqdmull2, Sqdmull2) \ + V(sqdmlal, Sqdmlal) \ + V(sqdmlal2, Sqdmlal2) \ + V(sqdmlsl, Sqdmlsl) \ + V(sqdmlsl2, Sqdmlsl2) \ + V(smull, Smull) \ + V(smull2, Smull2) \ + V(smlal, Smlal) \ + V(smlal2, Smlal2) \ + V(smlsl, Smlsl) \ + V(smlsl2, Smlsl2) \ + V(umull, Umull) \ + V(umull2, Umull2) \ + V(umlal, Umlal) \ + V(umlal2, Umlal2) \ + V(umlsl, Umlsl) \ + V(umlsl2, Umlsl2) + +#define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \ + void MASM(const VRegister& vd, const VRegister& vn, const VRegister& vm, \ + int vm_index) { \ + DCHECK(allow_macro_instructions()); \ + ASM(vd, vn, vm, vm_index); \ + } + NEON_BYELEMENT_MACRO_LIST(DEFINE_MACRO_ASM_FUNC) +#undef DEFINE_MACRO_ASM_FUNC + +// NEON 2 vector register instructions. +#define NEON_2VREG_MACRO_LIST(V) \ + V(abs, Abs) \ + V(addp, Addp) \ + V(addv, Addv) \ + V(cls, Cls) \ + V(clz, Clz) \ + V(cnt, Cnt) \ + V(faddp, Faddp) \ + V(fcvtas, Fcvtas) \ + V(fcvtau, Fcvtau) \ + V(fcvtms, Fcvtms) \ + V(fcvtmu, Fcvtmu) \ + V(fcvtns, Fcvtns) \ + V(fcvtnu, Fcvtnu) \ + V(fcvtps, Fcvtps) \ + V(fcvtpu, Fcvtpu) \ + V(fmaxnmp, Fmaxnmp) \ + V(fmaxnmv, Fmaxnmv) \ + V(fmaxp, Fmaxp) \ + V(fmaxv, Fmaxv) \ + V(fminnmp, Fminnmp) \ + V(fminnmv, Fminnmv) \ + V(fminp, Fminp) \ + V(fminv, Fminv) \ + V(fneg, Fneg) \ + V(frecpe, Frecpe) \ + V(frecpx, Frecpx) \ + V(frinta, Frinta) \ + V(frinti, Frinti) \ + V(frintm, Frintm) \ + V(frintn, Frintn) \ + V(frintp, Frintp) \ + V(frintx, Frintx) \ + V(frintz, Frintz) \ + V(frsqrte, Frsqrte) \ + V(fsqrt, Fsqrt) \ + V(mov, Mov) \ + V(mvn, Mvn) \ + V(neg, Neg) \ + V(not_, Not) \ + V(rbit, Rbit) \ + V(rev16, Rev16) \ + V(rev32, Rev32) \ + V(rev64, Rev64) \ + V(sadalp, Sadalp) \ + V(saddlp, Saddlp) \ + V(saddlv, Saddlv) \ + V(smaxv, Smaxv) \ + V(sminv, Sminv) \ + V(sqabs, Sqabs) \ + V(sqneg, Sqneg) \ + V(sqxtn2, Sqxtn2) \ + V(sqxtn, Sqxtn) \ + V(sqxtun2, Sqxtun2) \ + V(sqxtun, Sqxtun) \ + V(suqadd, Suqadd) \ + V(sxtl2, Sxtl2) \ + V(sxtl, Sxtl) \ + V(uadalp, Uadalp) \ + V(uaddlp, Uaddlp) \ + V(uaddlv, Uaddlv) \ + V(umaxv, Umaxv) \ + V(uminv, Uminv) \ + V(uqxtn2, Uqxtn2) \ + V(uqxtn, Uqxtn) \ + V(urecpe, Urecpe) \ + V(ursqrte, Ursqrte) \ + V(usqadd, Usqadd) \ + V(uxtl2, Uxtl2) \ + V(uxtl, Uxtl) \ + V(xtn2, Xtn2) \ + V(xtn, Xtn) + +#define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \ + void MASM(const VRegister& vd, const VRegister& vn) { \ + DCHECK(allow_macro_instructions()); \ + ASM(vd, vn); \ + } + NEON_2VREG_MACRO_LIST(DEFINE_MACRO_ASM_FUNC) +#undef DEFINE_MACRO_ASM_FUNC +#undef NEON_2VREG_MACRO_LIST + +// NEON 2 vector register with immediate instructions. +#define NEON_2VREG_FPIMM_MACRO_LIST(V) \ + V(fcmeq, Fcmeq) \ + V(fcmge, Fcmge) \ + V(fcmgt, Fcmgt) \ + V(fcmle, Fcmle) \ + V(fcmlt, Fcmlt) + +#define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \ + void MASM(const VRegister& vd, const VRegister& vn, double imm) { \ + DCHECK(allow_macro_instructions()); \ + ASM(vd, vn, imm); \ + } + NEON_2VREG_FPIMM_MACRO_LIST(DEFINE_MACRO_ASM_FUNC) +#undef DEFINE_MACRO_ASM_FUNC + +// NEON 3 vector register instructions. +#define NEON_3VREG_MACRO_LIST(V) \ + V(add, Add) \ + V(addhn2, Addhn2) \ + V(addhn, Addhn) \ + V(addp, Addp) \ + V(and_, And) \ + V(bic, Bic) \ + V(bif, Bif) \ + V(bit, Bit) \ + V(bsl, Bsl) \ + V(cmeq, Cmeq) \ + V(cmge, Cmge) \ + V(cmgt, Cmgt) \ + V(cmhi, Cmhi) \ + V(cmhs, Cmhs) \ + V(cmtst, Cmtst) \ + V(eor, Eor) \ + V(fabd, Fabd) \ + V(facge, Facge) \ + V(facgt, Facgt) \ + V(faddp, Faddp) \ + V(fcmeq, Fcmeq) \ + V(fcmge, Fcmge) \ + V(fcmgt, Fcmgt) \ + V(fmaxnmp, Fmaxnmp) \ + V(fmaxp, Fmaxp) \ + V(fminnmp, Fminnmp) \ + V(fminp, Fminp) \ + V(fmla, Fmla) \ + V(fmls, Fmls) \ + V(fmulx, Fmulx) \ + V(frecps, Frecps) \ + V(frsqrts, Frsqrts) \ + V(mla, Mla) \ + V(mls, Mls) \ + V(mul, Mul) \ + V(orn, Orn) \ + V(orr, Orr) \ + V(pmull2, Pmull2) \ + V(pmull, Pmull) \ + V(pmul, Pmul) \ + V(raddhn2, Raddhn2) \ + V(raddhn, Raddhn) \ + V(rsubhn2, Rsubhn2) \ + V(rsubhn, Rsubhn) \ + V(sabal2, Sabal2) \ + V(sabal, Sabal) \ + V(saba, Saba) \ + V(sabdl2, Sabdl2) \ + V(sabdl, Sabdl) \ + V(sabd, Sabd) \ + V(saddl2, Saddl2) \ + V(saddl, Saddl) \ + V(saddw2, Saddw2) \ + V(saddw, Saddw) \ + V(shadd, Shadd) \ + V(shsub, Shsub) \ + V(smaxp, Smaxp) \ + V(smax, Smax) \ + V(sminp, Sminp) \ + V(smin, Smin) \ + V(smlal2, Smlal2) \ + V(smlal, Smlal) \ + V(smlsl2, Smlsl2) \ + V(smlsl, Smlsl) \ + V(smull2, Smull2) \ + V(smull, Smull) \ + V(sqadd, Sqadd) \ + V(sqdmlal2, Sqdmlal2) \ + V(sqdmlal, Sqdmlal) \ + V(sqdmlsl2, Sqdmlsl2) \ + V(sqdmlsl, Sqdmlsl) \ + V(sqdmulh, Sqdmulh) \ + V(sqdmull2, Sqdmull2) \ + V(sqdmull, Sqdmull) \ + V(sqrdmulh, Sqrdmulh) \ + V(sqrshl, Sqrshl) \ + V(sqshl, Sqshl) \ + V(sqsub, Sqsub) \ + V(srhadd, Srhadd) \ + V(srshl, Srshl) \ + V(sshl, Sshl) \ + V(ssubl2, Ssubl2) \ + V(ssubl, Ssubl) \ + V(ssubw2, Ssubw2) \ + V(ssubw, Ssubw) \ + V(subhn2, Subhn2) \ + V(subhn, Subhn) \ + V(sub, Sub) \ + V(trn1, Trn1) \ + V(trn2, Trn2) \ + V(uabal2, Uabal2) \ + V(uabal, Uabal) \ + V(uaba, Uaba) \ + V(uabdl2, Uabdl2) \ + V(uabdl, Uabdl) \ + V(uabd, Uabd) \ + V(uaddl2, Uaddl2) \ + V(uaddl, Uaddl) \ + V(uaddw2, Uaddw2) \ + V(uaddw, Uaddw) \ + V(uhadd, Uhadd) \ + V(uhsub, Uhsub) \ + V(umaxp, Umaxp) \ + V(umax, Umax) \ + V(uminp, Uminp) \ + V(umin, Umin) \ + V(umlal2, Umlal2) \ + V(umlal, Umlal) \ + V(umlsl2, Umlsl2) \ + V(umlsl, Umlsl) \ + V(umull2, Umull2) \ + V(umull, Umull) \ + V(uqadd, Uqadd) \ + V(uqrshl, Uqrshl) \ + V(uqshl, Uqshl) \ + V(uqsub, Uqsub) \ + V(urhadd, Urhadd) \ + V(urshl, Urshl) \ + V(ushl, Ushl) \ + V(usubl2, Usubl2) \ + V(usubl, Usubl) \ + V(usubw2, Usubw2) \ + V(usubw, Usubw) \ + V(uzp1, Uzp1) \ + V(uzp2, Uzp2) \ + V(zip1, Zip1) \ + V(zip2, Zip2) + +#define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \ + void MASM(const VRegister& vd, const VRegister& vn, const VRegister& vm) { \ + DCHECK(allow_macro_instructions()); \ + ASM(vd, vn, vm); \ + } + NEON_3VREG_MACRO_LIST(DEFINE_MACRO_ASM_FUNC) +#undef DEFINE_MACRO_ASM_FUNC + + void Bic(const VRegister& vd, const int imm8, const int left_shift = 0) { + DCHECK(allow_macro_instructions()); + bic(vd, imm8, left_shift); + } + + // This is required for compatibility in architecture independent code. + inline void jmp(Label* L); + + void B(Label* label, BranchType type, Register reg = NoReg, int bit = -1); + inline void B(Label* label); + inline void B(Condition cond, Label* label); + void B(Label* label, Condition cond); + + void Tbnz(const Register& rt, unsigned bit_pos, Label* label); + void Tbz(const Register& rt, unsigned bit_pos, Label* label); + + void Cbnz(const Register& rt, Label* label); + void Cbz(const Register& rt, Label* label); + + bool AllowThisStubCall(CodeStub* stub); + void CallStubDelayed(CodeStub* stub); + void CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid, + SaveFPRegsMode save_doubles = kDontSaveFPRegs); + + // Removes current frame and its arguments from the stack preserving + // the arguments and a return address pushed to the stack for the next call. + // Both |callee_args_count| and |caller_args_count_reg| do not include + // receiver. |callee_args_count| is not modified, |caller_args_count_reg| + // is trashed. + void PrepareForTailCall(const ParameterCount& callee_args_count, + Register caller_args_count_reg, Register scratch0, + Register scratch1); + + inline void SmiUntag(Register dst, Register src); + inline void SmiUntag(Register smi); + + // Calls Abort(msg) if the condition cond is not satisfied. + // Use --debug_code to enable. + void Assert(Condition cond, BailoutReason reason); + + void AssertSmi(Register object, BailoutReason reason = kOperandIsNotASmi); + + // Like Assert(), but always enabled. + void Check(Condition cond, BailoutReason reason); + + inline void Debug(const char* message, uint32_t code, Instr params = BREAK); + + // Print a message to stderr and abort execution. + void Abort(BailoutReason reason); + + // If emit_debug_code() is true, emit a run-time check to ensure that + // StackPointer() does not point below the system stack pointer. + // + // Whilst it is architecturally legal for StackPointer() to point below csp, + // it can be evidence of a potential bug because the ABI forbids accesses + // below csp. + // + // If StackPointer() is the system stack pointer (csp), then csp will be + // dereferenced to cause the processor (or simulator) to abort if it is not + // properly aligned. + // + // If emit_debug_code() is false, this emits no code. + void AssertStackConsistency(); + + // Remaining instructions are simple pass-through calls to the assembler. + inline void Asr(const Register& rd, const Register& rn, unsigned shift); + inline void Asr(const Register& rd, const Register& rn, const Register& rm); + + // Try to move an immediate into the destination register in a single + // instruction. Returns true for success, and updates the contents of dst. + // Returns false, otherwise. + bool TryOneInstrMoveImmediate(const Register& dst, int64_t imm); + + inline void Bind(Label* label); + + static unsigned CountClearHalfWords(uint64_t imm, unsigned reg_size); + + CPURegList* TmpList() { return &tmp_list_; } + CPURegList* FPTmpList() { return &fptmp_list_; } + + static CPURegList DefaultTmpList(); + static CPURegList DefaultFPTmpList(); + + // Return the current stack pointer, as set by SetStackPointer. + inline const Register& StackPointer() const { return sp_; } + + // Move macros. + inline void Mvn(const Register& rd, uint64_t imm); + void Mvn(const Register& rd, const Operand& operand); + static bool IsImmMovn(uint64_t imm, unsigned reg_size); + static bool IsImmMovz(uint64_t imm, unsigned reg_size); + + void LogicalMacro(const Register& rd, const Register& rn, + const Operand& operand, LogicalOp op); + void AddSubMacro(const Register& rd, const Register& rn, + const Operand& operand, FlagsUpdate S, AddSubOp op); + inline void Orr(const Register& rd, const Register& rn, + const Operand& operand); + void Orr(const VRegister& vd, const int imm8, const int left_shift = 0) { + DCHECK(allow_macro_instructions()); + orr(vd, imm8, left_shift); + } + inline void Orn(const Register& rd, const Register& rn, + const Operand& operand); + inline void Eor(const Register& rd, const Register& rn, + const Operand& operand); + inline void Eon(const Register& rd, const Register& rn, + const Operand& operand); + inline void And(const Register& rd, const Register& rn, + const Operand& operand); + inline void Ands(const Register& rd, const Register& rn, + const Operand& operand); + inline void Tst(const Register& rn, const Operand& operand); + inline void Bic(const Register& rd, const Register& rn, + const Operand& operand); + inline void Blr(const Register& xn); + inline void Cmp(const Register& rn, const Operand& operand); + inline void Subs(const Register& rd, const Register& rn, + const Operand& operand); + + // Emits a runtime assert that the CSP is aligned. + void AssertCspAligned(); + + // Load a literal from the inline constant pool. + inline void Ldr(const CPURegister& rt, const Operand& imm); + // Helper function for double immediate. + inline void Ldr(const CPURegister& rt, double imm); + + // Claim or drop stack space without actually accessing memory. + // + // In debug mode, both of these will write invalid data into the claimed or + // dropped space. + // + // If the current stack pointer (according to StackPointer()) is csp, then it + // must be aligned to 16 bytes and the size claimed or dropped must be a + // multiple of 16 bytes. + // + // Note that unit_size must be specified in bytes. For variants which take a + // Register count, the unit size must be a power of two. + inline void Claim(int64_t count, uint64_t unit_size = kXRegSize); + inline void Claim(const Register& count, uint64_t unit_size = kXRegSize); + inline void Drop(int64_t count, uint64_t unit_size = kXRegSize); + inline void Drop(const Register& count, uint64_t unit_size = kXRegSize); + + // Re-synchronizes the system stack pointer (csp) with the current stack + // pointer (according to StackPointer()). + // + // This method asserts that StackPointer() is not csp, since the call does + // not make sense in that context. + inline void SyncSystemStackPointer(); + + // Push the system stack pointer (csp) down to allow the same to be done to + // the current stack pointer (according to StackPointer()). This must be + // called _before_ accessing the memory. + // + // This is necessary when pushing or otherwise adding things to the stack, to + // satisfy the AAPCS64 constraint that the memory below the system stack + // pointer is not accessed. The amount pushed will be increased as necessary + // to ensure csp remains aligned to 16 bytes. + // + // This method asserts that StackPointer() is not csp, since the call does + // not make sense in that context. + inline void BumpSystemStackPointer(const Operand& space); + + // Add and sub macros. + inline void Add(const Register& rd, const Register& rn, + const Operand& operand); + inline void Adds(const Register& rd, const Register& rn, + const Operand& operand); + inline void Sub(const Register& rd, const Register& rn, + const Operand& operand); + + // Abort execution if argument is not a positive or zero integer, enabled via + // --debug-code. + void AssertPositiveOrZero(Register value); + +#define DECLARE_FUNCTION(FN, REGTYPE, REG, OP) \ + inline void FN(const REGTYPE REG, const MemOperand& addr); + LS_MACRO_LIST(DECLARE_FUNCTION) +#undef DECLARE_FUNCTION + + // Push or pop up to 4 registers of the same width to or from the stack, + // using the current stack pointer as set by SetStackPointer. + // + // If an argument register is 'NoReg', all further arguments are also assumed + // to be 'NoReg', and are thus not pushed or popped. + // + // Arguments are ordered such that "Push(a, b);" is functionally equivalent + // to "Push(a); Push(b);". + // + // It is valid to push the same register more than once, and there is no + // restriction on the order in which registers are specified. + // + // It is not valid to pop into the same register more than once in one + // operation, not even into the zero register. + // + // If the current stack pointer (as set by SetStackPointer) is csp, then it + // must be aligned to 16 bytes on entry and the total size of the specified + // registers must also be a multiple of 16 bytes. + // + // Even if the current stack pointer is not the system stack pointer (csp), + // Push (and derived methods) will still modify the system stack pointer in + // order to comply with ABI rules about accessing memory below the system + // stack pointer. + // + // Other than the registers passed into Pop, the stack pointer and (possibly) + // the system stack pointer, these methods do not modify any other registers. + void Push(const CPURegister& src0, const CPURegister& src1 = NoReg, + const CPURegister& src2 = NoReg, const CPURegister& src3 = NoReg); + void Push(const CPURegister& src0, const CPURegister& src1, + const CPURegister& src2, const CPURegister& src3, + const CPURegister& src4, const CPURegister& src5 = NoReg, + const CPURegister& src6 = NoReg, const CPURegister& src7 = NoReg); + void Pop(const CPURegister& dst0, const CPURegister& dst1 = NoReg, + const CPURegister& dst2 = NoReg, const CPURegister& dst3 = NoReg); + void Pop(const CPURegister& dst0, const CPURegister& dst1, + const CPURegister& dst2, const CPURegister& dst3, + const CPURegister& dst4, const CPURegister& dst5 = NoReg, + const CPURegister& dst6 = NoReg, const CPURegister& dst7 = NoReg); + void Push(const Register& src0, const VRegister& src1); + + // This is a convenience method for pushing a single Handle. + inline void Push(Handle object); + inline void Push(Smi* smi); + + // Aliases of Push and Pop, required for V8 compatibility. + inline void push(Register src) { Push(src); } + inline void pop(Register dst) { Pop(dst); } + + // Alternative forms of Push and Pop, taking a RegList or CPURegList that + // specifies the registers that are to be pushed or popped. Higher-numbered + // registers are associated with higher memory addresses (as in the A32 push + // and pop instructions). + // + // (Push|Pop)SizeRegList allow you to specify the register size as a + // parameter. Only kXRegSizeInBits, kWRegSizeInBits, kDRegSizeInBits and + // kSRegSizeInBits are supported. + // + // Otherwise, (Push|Pop)(CPU|X|W|D|S)RegList is preferred. + void PushCPURegList(CPURegList registers); + void PopCPURegList(CPURegList registers); + + // Move an immediate into register dst, and return an Operand object for use + // with a subsequent instruction that accepts a shift. The value moved into + // dst is not necessarily equal to imm; it may have had a shifting operation + // applied to it that will be subsequently undone by the shift applied in the + // Operand. + Operand MoveImmediateForShiftedOp(const Register& dst, int64_t imm, + PreShiftImmMode mode); + + void CheckPageFlagSet(const Register& object, const Register& scratch, + int mask, Label* if_any_set); + + void CheckPageFlagClear(const Register& object, const Register& scratch, + int mask, Label* if_all_clear); + + // Perform necessary maintenance operations before a push or after a pop. + // + // Note that size is specified in bytes. + void PushPreamble(Operand total_size); + void PopPostamble(Operand total_size); + + void PushPreamble(int count, int size); + void PopPostamble(int count, int size); + + // Test the bits of register defined by bit_pattern, and branch if ANY of + // those bits are set. May corrupt the status flags. + inline void TestAndBranchIfAnySet(const Register& reg, + const uint64_t bit_pattern, Label* label); + + // Test the bits of register defined by bit_pattern, and branch if ALL of + // those bits are clear (ie. not set.) May corrupt the status flags. + inline void TestAndBranchIfAllClear(const Register& reg, + const uint64_t bit_pattern, Label* label); + + inline void Brk(int code); + + inline void JumpIfSmi(Register value, Label* smi_label, + Label* not_smi_label = NULL); + + inline void Fmov(VRegister fd, VRegister fn); + inline void Fmov(VRegister fd, Register rn); + // Provide explicit double and float interfaces for FP immediate moves, rather + // than relying on implicit C++ casts. This allows signalling NaNs to be + // preserved when the immediate matches the format of fd. Most systems convert + // signalling NaNs to quiet NaNs when converting between float and double. + inline void Fmov(VRegister fd, double imm); + inline void Fmov(VRegister fd, float imm); + // Provide a template to allow other types to be converted automatically. + template + void Fmov(VRegister fd, T imm) { + DCHECK(allow_macro_instructions()); + Fmov(fd, static_cast(imm)); + } + inline void Fmov(Register rd, VRegister fn); + + void Movi(const VRegister& vd, uint64_t imm, Shift shift = LSL, + int shift_amount = 0); + void Movi(const VRegister& vd, uint64_t hi, uint64_t lo); + + void Jump(Register target); + void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al); + void Jump(Handle code, RelocInfo::Mode rmode, Condition cond = al); + void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al); + + void Call(Register target); + void Call(Label* target); + void Call(Address target, RelocInfo::Mode rmode); + void Call(Handle code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET); + + // For every Call variant, there is a matching CallSize function that returns + // the size (in bytes) of the call sequence. + static int CallSize(Register target); + static int CallSize(Label* target); + static int CallSize(Address target, RelocInfo::Mode rmode); + static int CallSize(Handle code, + RelocInfo::Mode rmode = RelocInfo::CODE_TARGET); + + // Calls a C function. + // The called function is not allowed to trigger a + // garbage collection, since that might move the code and invalidate the + // return address (unless this is somehow accounted for by the called + // function). + void CallCFunction(ExternalReference function, int num_reg_arguments); + void CallCFunction(ExternalReference function, int num_reg_arguments, + int num_double_arguments); + void CallCFunction(Register function, int num_reg_arguments, + int num_double_arguments); + + // Performs a truncating conversion of a floating point number as used by + // the JS bitwise operations. See ECMA-262 9.5: ToInt32. + // Exits with 'result' holding the answer. + void TruncateDoubleToIDelayed(Zone* zone, Register result, + DoubleRegister double_input); + + inline void Mul(const Register& rd, const Register& rn, const Register& rm); + + inline void Fcvtzs(const Register& rd, const VRegister& fn); + void Fcvtzs(const VRegister& vd, const VRegister& vn, int fbits = 0) { + DCHECK(allow_macro_instructions()); + fcvtzs(vd, vn, fbits); + } + + inline void Fcvtzu(const Register& rd, const VRegister& fn); + void Fcvtzu(const VRegister& vd, const VRegister& vn, int fbits = 0) { + DCHECK(allow_macro_instructions()); + fcvtzu(vd, vn, fbits); + } + + inline void Madd(const Register& rd, const Register& rn, const Register& rm, + const Register& ra); + inline void Mneg(const Register& rd, const Register& rn, const Register& rm); + inline void Sdiv(const Register& rd, const Register& rn, const Register& rm); + inline void Udiv(const Register& rd, const Register& rn, const Register& rm); + inline void Msub(const Register& rd, const Register& rn, const Register& rm, + const Register& ra); + + inline void Lsl(const Register& rd, const Register& rn, unsigned shift); + inline void Lsl(const Register& rd, const Register& rn, const Register& rm); + inline void Umull(const Register& rd, const Register& rn, const Register& rm); + inline void Smull(const Register& rd, const Register& rn, const Register& rm); + + inline void Sxtb(const Register& rd, const Register& rn); + inline void Sxth(const Register& rd, const Register& rn); + inline void Sxtw(const Register& rd, const Register& rn); + inline void Ubfiz(const Register& rd, const Register& rn, unsigned lsb, + unsigned width); + inline void Ubfx(const Register& rd, const Register& rn, unsigned lsb, + unsigned width); + inline void Lsr(const Register& rd, const Register& rn, unsigned shift); + inline void Lsr(const Register& rd, const Register& rn, const Register& rm); + inline void Ror(const Register& rd, const Register& rs, unsigned shift); + inline void Ror(const Register& rd, const Register& rn, const Register& rm); + inline void Cmn(const Register& rn, const Operand& operand); + inline void Fadd(const VRegister& fd, const VRegister& fn, + const VRegister& fm); + inline void Fcmp(const VRegister& fn, const VRegister& fm); + inline void Fcmp(const VRegister& fn, double value); + inline void Fabs(const VRegister& fd, const VRegister& fn); + inline void Fmul(const VRegister& fd, const VRegister& fn, + const VRegister& fm); + inline void Fsub(const VRegister& fd, const VRegister& fn, + const VRegister& fm); + inline void Fdiv(const VRegister& fd, const VRegister& fn, + const VRegister& fm); + inline void Fmax(const VRegister& fd, const VRegister& fn, + const VRegister& fm); + inline void Fmin(const VRegister& fd, const VRegister& fn, + const VRegister& fm); + inline void Rbit(const Register& rd, const Register& rn); + + enum AdrHint { + // The target must be within the immediate range of adr. + kAdrNear, + // The target may be outside of the immediate range of adr. Additional + // instructions may be emitted. + kAdrFar + }; + void Adr(const Register& rd, Label* label, AdrHint = kAdrNear); + + // Add/sub with carry macros. + inline void Adc(const Register& rd, const Register& rn, + const Operand& operand); + + // Conditional macros. + inline void Ccmp(const Register& rn, const Operand& operand, StatusFlags nzcv, + Condition cond); + + inline void Clz(const Register& rd, const Register& rn); + + // Poke 'src' onto the stack. The offset is in bytes. + // + // If the current stack pointer (according to StackPointer()) is csp, then + // csp must be aligned to 16 bytes. + void Poke(const CPURegister& src, const Operand& offset); - Isolate* isolate() const { return isolate_; } + // Poke 'src1' and 'src2' onto the stack. The values written will be adjacent + // with 'src2' at a higher address than 'src1'. The offset is in bytes. + // + // If the current stack pointer (according to StackPointer()) is csp, then + // csp must be aligned to 16 bytes. + void PokePair(const CPURegister& src1, const CPURegister& src2, int offset); - Handle CodeObject() { - DCHECK(!code_object_.is_null()); - return code_object_; - } + inline void Sbfx(const Register& rd, const Register& rn, unsigned lsb, + unsigned width); - // Instruction set functions ------------------------------------------------ - // Logical macros. - inline void And(const Register& rd, - const Register& rn, - const Operand& operand); - inline void Ands(const Register& rd, - const Register& rn, - const Operand& operand); - inline void Bic(const Register& rd, - const Register& rn, - const Operand& operand); - inline void Bics(const Register& rd, - const Register& rn, - const Operand& operand); - inline void Orr(const Register& rd, - const Register& rn, - const Operand& operand); - inline void Orn(const Register& rd, - const Register& rn, - const Operand& operand); - inline void Eor(const Register& rd, - const Register& rn, - const Operand& operand); - inline void Eon(const Register& rd, - const Register& rn, - const Operand& operand); - inline void Tst(const Register& rn, const Operand& operand); - void LogicalMacro(const Register& rd, - const Register& rn, - const Operand& operand, - LogicalOp op); + inline void Bfi(const Register& rd, const Register& rn, unsigned lsb, + unsigned width); - // Add and sub macros. - inline void Add(const Register& rd, - const Register& rn, - const Operand& operand); - inline void Adds(const Register& rd, - const Register& rn, - const Operand& operand); - inline void Sub(const Register& rd, - const Register& rn, - const Operand& operand); - inline void Subs(const Register& rd, - const Register& rn, - const Operand& operand); - inline void Cmn(const Register& rn, const Operand& operand); - inline void Cmp(const Register& rn, const Operand& operand); - inline void Neg(const Register& rd, - const Operand& operand); - inline void Negs(const Register& rd, - const Operand& operand); + inline void Scvtf(const VRegister& fd, const Register& rn, + unsigned fbits = 0); + void Scvtf(const VRegister& vd, const VRegister& vn, int fbits = 0) { + DCHECK(allow_macro_instructions()); + scvtf(vd, vn, fbits); + } + inline void Ucvtf(const VRegister& fd, const Register& rn, + unsigned fbits = 0); + void Ucvtf(const VRegister& vd, const VRegister& vn, int fbits = 0) { + DCHECK(allow_macro_instructions()); + ucvtf(vd, vn, fbits); + } - void AddSubMacro(const Register& rd, - const Register& rn, - const Operand& operand, - FlagsUpdate S, - AddSubOp op); + void AssertFPCRState(Register fpcr = NoReg); + void CanonicalizeNaN(const VRegister& dst, const VRegister& src); + void CanonicalizeNaN(const VRegister& reg) { CanonicalizeNaN(reg, reg); } - // Add/sub with carry macros. - inline void Adc(const Register& rd, - const Register& rn, - const Operand& operand); - inline void Adcs(const Register& rd, - const Register& rn, - const Operand& operand); - inline void Sbc(const Register& rd, - const Register& rn, - const Operand& operand); - inline void Sbcs(const Register& rd, - const Register& rn, - const Operand& operand); - inline void Ngc(const Register& rd, - const Operand& operand); - inline void Ngcs(const Register& rd, - const Operand& operand); - void AddSubWithCarryMacro(const Register& rd, - const Register& rn, - const Operand& operand, - FlagsUpdate S, - AddSubWithCarryOp op); + inline void Cset(const Register& rd, Condition cond); + inline void Fccmp(const VRegister& fn, const VRegister& fm, StatusFlags nzcv, + Condition cond); + inline void Csinc(const Register& rd, const Register& rn, const Register& rm, + Condition cond); - // Move macros. - void Mov(const Register& rd, - const Operand& operand, - DiscardMoveMode discard_mode = kDontDiscardForSameWReg); - void Mov(const Register& rd, uint64_t imm); - inline void Mvn(const Register& rd, uint64_t imm); - void Mvn(const Register& rd, const Operand& operand); - static bool IsImmMovn(uint64_t imm, unsigned reg_size); - static bool IsImmMovz(uint64_t imm, unsigned reg_size); - static unsigned CountClearHalfWords(uint64_t imm, unsigned reg_size); + inline void Fcvt(const VRegister& fd, const VRegister& fn); - // Try to move an immediate into the destination register in a single - // instruction. Returns true for success, and updates the contents of dst. - // Returns false, otherwise. - bool TryOneInstrMoveImmediate(const Register& dst, int64_t imm); + int ActivationFrameAlignment(); - // Move an immediate into register dst, and return an Operand object for use - // with a subsequent instruction that accepts a shift. The value moved into - // dst is not necessarily equal to imm; it may have had a shifting operation - // applied to it that will be subsequently undone by the shift applied in the - // Operand. - Operand MoveImmediateForShiftedOp(const Register& dst, int64_t imm, - PreShiftImmMode mode); + void Ins(const VRegister& vd, int vd_index, const VRegister& vn, + int vn_index) { + DCHECK(allow_macro_instructions()); + ins(vd, vd_index, vn, vn_index); + } + void Ins(const VRegister& vd, int vd_index, const Register& rn) { + DCHECK(allow_macro_instructions()); + ins(vd, vd_index, rn); + } - // Conditional macros. - inline void Ccmp(const Register& rn, - const Operand& operand, - StatusFlags nzcv, - Condition cond); - inline void Ccmn(const Register& rn, - const Operand& operand, - StatusFlags nzcv, - Condition cond); - void ConditionalCompareMacro(const Register& rn, - const Operand& operand, - StatusFlags nzcv, - Condition cond, - ConditionalCompareOp op); - void Csel(const Register& rd, - const Register& rn, - const Operand& operand, - Condition cond); + inline void Bl(Label* label); + inline void Br(const Register& xn); - // Load/store macros. -#define DECLARE_FUNCTION(FN, REGTYPE, REG, OP) \ - inline void FN(const REGTYPE REG, const MemOperand& addr); - LS_MACRO_LIST(DECLARE_FUNCTION) -#undef DECLARE_FUNCTION + inline void Uxtb(const Register& rd, const Register& rn); + inline void Uxth(const Register& rd, const Register& rn); + inline void Uxtw(const Register& rd, const Register& rn); - void LoadStoreMacro(const CPURegister& rt, - const MemOperand& addr, - LoadStoreOp op); + void Dup(const VRegister& vd, const VRegister& vn, int index) { + DCHECK(allow_macro_instructions()); + dup(vd, vn, index); + } + void Dup(const VRegister& vd, const Register& rn) { + DCHECK(allow_macro_instructions()); + dup(vd, rn); + } #define DECLARE_FUNCTION(FN, REGTYPE, REG, REG2, OP) \ inline void FN(const REGTYPE REG, const REGTYPE REG2, const MemOperand& addr); LSPAIR_MACRO_LIST(DECLARE_FUNCTION) #undef DECLARE_FUNCTION - void LoadStorePairMacro(const CPURegister& rt, const CPURegister& rt2, - const MemOperand& addr, LoadStorePairOp op); +#define NEON_2VREG_SHIFT_MACRO_LIST(V) \ + V(rshrn, Rshrn) \ + V(rshrn2, Rshrn2) \ + V(shl, Shl) \ + V(shll, Shll) \ + V(shll2, Shll2) \ + V(shrn, Shrn) \ + V(shrn2, Shrn2) \ + V(sli, Sli) \ + V(sqrshrn, Sqrshrn) \ + V(sqrshrn2, Sqrshrn2) \ + V(sqrshrun, Sqrshrun) \ + V(sqrshrun2, Sqrshrun2) \ + V(sqshl, Sqshl) \ + V(sqshlu, Sqshlu) \ + V(sqshrn, Sqshrn) \ + V(sqshrn2, Sqshrn2) \ + V(sqshrun, Sqshrun) \ + V(sqshrun2, Sqshrun2) \ + V(sri, Sri) \ + V(srshr, Srshr) \ + V(srsra, Srsra) \ + V(sshll, Sshll) \ + V(sshll2, Sshll2) \ + V(sshr, Sshr) \ + V(ssra, Ssra) \ + V(uqrshrn, Uqrshrn) \ + V(uqrshrn2, Uqrshrn2) \ + V(uqshl, Uqshl) \ + V(uqshrn, Uqshrn) \ + V(uqshrn2, Uqshrn2) \ + V(urshr, Urshr) \ + V(ursra, Ursra) \ + V(ushll, Ushll) \ + V(ushll2, Ushll2) \ + V(ushr, Ushr) \ + V(usra, Usra) + +#define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \ + void MASM(const VRegister& vd, const VRegister& vn, int shift) { \ + DCHECK(allow_macro_instructions()); \ + ASM(vd, vn, shift); \ + } + NEON_2VREG_SHIFT_MACRO_LIST(DEFINE_MACRO_ASM_FUNC) +#undef DEFINE_MACRO_ASM_FUNC + + void Umov(const Register& rd, const VRegister& vn, int vn_index) { + DCHECK(allow_macro_instructions()); + umov(rd, vn, vn_index); + } + void Tbl(const VRegister& vd, const VRegister& vn, const VRegister& vm) { + DCHECK(allow_macro_instructions()); + tbl(vd, vn, vm); + } + void Tbl(const VRegister& vd, const VRegister& vn, const VRegister& vn2, + const VRegister& vm) { + DCHECK(allow_macro_instructions()); + tbl(vd, vn, vn2, vm); + } + void Tbl(const VRegister& vd, const VRegister& vn, const VRegister& vn2, + const VRegister& vn3, const VRegister& vm) { + DCHECK(allow_macro_instructions()); + tbl(vd, vn, vn2, vn3, vm); + } + void Tbl(const VRegister& vd, const VRegister& vn, const VRegister& vn2, + const VRegister& vn3, const VRegister& vn4, const VRegister& vm) { + DCHECK(allow_macro_instructions()); + tbl(vd, vn, vn2, vn3, vn4, vm); + } + void Ext(const VRegister& vd, const VRegister& vn, const VRegister& vm, + int index) { + DCHECK(allow_macro_instructions()); + ext(vd, vn, vm, index); + } + + void Smov(const Register& rd, const VRegister& vn, int vn_index) { + DCHECK(allow_macro_instructions()); + smov(rd, vn, vn_index); + } // Load-acquire/store-release macros. #define DECLARE_FUNCTION(FN, OP) \ @@ -337,6 +1127,197 @@ class MacroAssembler : public Assembler { LDA_STL_MACRO_LIST(DECLARE_FUNCTION) #undef DECLARE_FUNCTION + // Load an object from the root table. + void LoadRoot(CPURegister destination, Heap::RootListIndex index); + + inline void Ret(const Register& xn = lr); + + // Perform a conversion from a double to a signed int64. If the input fits in + // range of the 64-bit result, execution branches to done. Otherwise, + // execution falls through, and the sign of the result can be used to + // determine if overflow was towards positive or negative infinity. + // + // On successful conversion, the least significant 32 bits of the result are + // equivalent to the ECMA-262 operation "ToInt32". + // + // Only public for the test code in test-code-stubs-arm64.cc. + void TryConvertDoubleToInt64(Register result, DoubleRegister input, + Label* done); + + inline void Mrs(const Register& rt, SystemRegister sysreg); + + // Generates function prologue code. + void Prologue(bool code_pre_aging); + + // Code ageing support functions. + + // Code ageing on ARM64 works similarly to on ARM. When V8 wants to mark a + // function as old, it replaces some of the function prologue (generated by + // FullCodeGenerator::Generate) with a call to a special stub (ultimately + // generated by GenerateMakeCodeYoungAgainCommon). The stub restores the + // function prologue to its initial young state (indicating that it has been + // recently run) and continues. A young function is therefore one which has a + // normal frame setup sequence, and an old function has a code age sequence + // which calls a code ageing stub. + + // Set up a basic stack frame for young code (or code exempt from ageing) with + // type FUNCTION. It may be patched later for code ageing support. This is + // done by to Code::PatchPlatformCodeAge and EmitCodeAgeSequence. + // + // This function takes an Assembler so it can be called from either a + // MacroAssembler or a PatchingAssembler context. + static void EmitFrameSetupForCodeAgePatching(Assembler* assm); + + // Call EmitFrameSetupForCodeAgePatching from a MacroAssembler context. + void EmitFrameSetupForCodeAgePatching(); + + // Emit a code age sequence that calls the relevant code age stub. The code + // generated by this sequence is expected to replace the code generated by + // EmitFrameSetupForCodeAgePatching, and represents an old function. + // + // If stub is NULL, this function generates the code age sequence but omits + // the stub address that is normally embedded in the instruction stream. This + // can be used by debug code to verify code age sequences. + static void EmitCodeAgeSequence(Assembler* assm, Code* stub); + + // Call EmitCodeAgeSequence from a MacroAssembler context. + void EmitCodeAgeSequence(Code* stub); + + void Cmgt(const VRegister& vd, const VRegister& vn, int imm) { + DCHECK(allow_macro_instructions()); + cmgt(vd, vn, imm); + } + void Cmge(const VRegister& vd, const VRegister& vn, int imm) { + DCHECK(allow_macro_instructions()); + cmge(vd, vn, imm); + } + void Cmeq(const VRegister& vd, const VRegister& vn, int imm) { + DCHECK(allow_macro_instructions()); + cmeq(vd, vn, imm); + } + + inline void Neg(const Register& rd, const Operand& operand); + inline void Negs(const Register& rd, const Operand& operand); + + // Compute rd = abs(rm). + // This function clobbers the condition flags. On output the overflow flag is + // set iff the negation overflowed. + // + // If rm is the minimum representable value, the result is not representable. + // Handlers for each case can be specified using the relevant labels. + void Abs(const Register& rd, const Register& rm, + Label* is_not_representable = NULL, Label* is_representable = NULL); + + inline void Cls(const Register& rd, const Register& rn); + inline void Cneg(const Register& rd, const Register& rn, Condition cond); + inline void Rev16(const Register& rd, const Register& rn); + inline void Rev32(const Register& rd, const Register& rn); + inline void Fcvtns(const Register& rd, const VRegister& fn); + inline void Fcvtnu(const Register& rd, const VRegister& fn); + inline void Fcvtms(const Register& rd, const VRegister& fn); + inline void Fcvtmu(const Register& rd, const VRegister& fn); + inline void Fcvtas(const Register& rd, const VRegister& fn); + inline void Fcvtau(const Register& rd, const VRegister& fn); + + protected: + // The actual Push and Pop implementations. These don't generate any code + // other than that required for the push or pop. This allows + // (Push|Pop)CPURegList to bundle together run-time assertions for a large + // block of registers. + // + // Note that size is per register, and is specified in bytes. + void PushHelper(int count, int size, const CPURegister& src0, + const CPURegister& src1, const CPURegister& src2, + const CPURegister& src3); + void PopHelper(int count, int size, const CPURegister& dst0, + const CPURegister& dst1, const CPURegister& dst2, + const CPURegister& dst3); + + void ConditionalCompareMacro(const Register& rn, const Operand& operand, + StatusFlags nzcv, Condition cond, + ConditionalCompareOp op); + + void AddSubWithCarryMacro(const Register& rd, const Register& rn, + const Operand& operand, FlagsUpdate S, + AddSubWithCarryOp op); + + // Call Printf. On a native build, a simple call will be generated, but if the + // simulator is being used then a suitable pseudo-instruction is used. The + // arguments and stack (csp) must be prepared by the caller as for a normal + // AAPCS64 call to 'printf'. + // + // The 'args' argument should point to an array of variable arguments in their + // proper PCS registers (and in calling order). The argument registers can + // have mixed types. The format string (x0) should not be included. + void CallPrintf(int arg_count = 0, const CPURegister* args = NULL); + + private: + bool has_frame_ = false; + Isolate* const isolate_; +#if DEBUG + // Tell whether any of the macro instruction can be used. When false the + // MacroAssembler will assert if a method which can emit a variable number + // of instructions is called. + bool allow_macro_instructions_; +#endif + // This handle will be patched with the code object on installation. + Handle code_object_; + + // Scratch registers available for use by the MacroAssembler. + CPURegList tmp_list_; + CPURegList fptmp_list_; + + // The register to use as a stack pointer for stack operations. + Register sp_; + + bool use_real_aborts_; + + // Helps resolve branching to labels potentially out of range. + // If the label is not bound, it registers the information necessary to later + // be able to emit a veneer for this branch if necessary. + // If the label is bound, it returns true if the label (or the previous link + // in the label chain) is out of range. In that case the caller is responsible + // for generating appropriate code. + // Otherwise it returns false. + // This function also checks wether veneers need to be emitted. + bool NeedExtraInstructionsOrRegisterBranch(Label* label, + ImmBranchType branch_type); + + void Movi16bitHelper(const VRegister& vd, uint64_t imm); + void Movi32bitHelper(const VRegister& vd, uint64_t imm); + void Movi64bitHelper(const VRegister& vd, uint64_t imm); + + void LoadStoreMacro(const CPURegister& rt, const MemOperand& addr, + LoadStoreOp op); + + void LoadStorePairMacro(const CPURegister& rt, const CPURegister& rt2, + const MemOperand& addr, LoadStorePairOp op); +}; + +class MacroAssembler : public TurboAssembler { + public: + MacroAssembler(Isolate* isolate, byte* buffer, unsigned buffer_size, + CodeObjectRequired create_code_object); + + // Instruction set functions ------------------------------------------------ + // Logical macros. + inline void Bics(const Register& rd, const Register& rn, + const Operand& operand); + + inline void Adcs(const Register& rd, const Register& rn, + const Operand& operand); + inline void Sbc(const Register& rd, const Register& rn, + const Operand& operand); + inline void Sbcs(const Register& rd, const Register& rn, + const Operand& operand); + inline void Ngc(const Register& rd, const Operand& operand); + inline void Ngcs(const Register& rd, const Operand& operand); + + inline void Ccmn(const Register& rn, const Operand& operand, StatusFlags nzcv, + Condition cond); + void Csel(const Register& rd, const Register& rn, const Operand& operand, + Condition cond); + #define DECLARE_FUNCTION(FN, OP) \ inline void FN(const Register& rs, const Register& rt, const Register& rn); STLX_MACRO_LIST(DECLARE_FUNCTION) @@ -344,315 +1325,244 @@ class MacroAssembler : public Assembler { // V8-specific load/store helpers. void Load(const Register& rt, const MemOperand& addr, Representation r); - void Store(const Register& rt, const MemOperand& addr, Representation r); - - enum AdrHint { - // The target must be within the immediate range of adr. - kAdrNear, - // The target may be outside of the immediate range of adr. Additional - // instructions may be emitted. - kAdrFar - }; - void Adr(const Register& rd, Label* label, AdrHint = kAdrNear); - - // Remaining instructions are simple pass-through calls to the assembler. - inline void Asr(const Register& rd, const Register& rn, unsigned shift); - inline void Asr(const Register& rd, const Register& rn, const Register& rm); + void Store(const Register& rt, const MemOperand& addr, Representation r); // Branch type inversion relies on these relations. - STATIC_ASSERT((reg_zero == (reg_not_zero ^ 1)) && + STATIC_ASSERT((reg_zero == (reg_not_zero ^ 1)) && (reg_bit_clear == (reg_bit_set ^ 1)) && - (always == (never ^ 1))); - - void B(Label* label, BranchType type, Register reg = NoReg, int bit = -1); + (always == (never ^ 1))); - inline void B(Label* label); - inline void B(Condition cond, Label* label); - void B(Label* label, Condition cond); - inline void Bfi(const Register& rd, - const Register& rn, - unsigned lsb, - unsigned width); - inline void Bfxil(const Register& rd, - const Register& rn, - unsigned lsb, + inline void Bfxil(const Register& rd, const Register& rn, unsigned lsb, unsigned width); - inline void Bind(Label* label); - inline void Bl(Label* label); - inline void Blr(const Register& xn); - inline void Br(const Register& xn); - inline void Brk(int code); - void Cbnz(const Register& rt, Label* label); - void Cbz(const Register& rt, Label* label); inline void Cinc(const Register& rd, const Register& rn, Condition cond); inline void Cinv(const Register& rd, const Register& rn, Condition cond); - inline void Cls(const Register& rd, const Register& rn); - inline void Clz(const Register& rd, const Register& rn); - inline void Cneg(const Register& rd, const Register& rn, Condition cond); inline void CzeroX(const Register& rd, Condition cond); inline void CmovX(const Register& rd, const Register& rn, Condition cond); - inline void Cset(const Register& rd, Condition cond); inline void Csetm(const Register& rd, Condition cond); - inline void Csinc(const Register& rd, - const Register& rn, - const Register& rm, + inline void Csinv(const Register& rd, const Register& rn, const Register& rm, Condition cond); - inline void Csinv(const Register& rd, - const Register& rn, - const Register& rm, - Condition cond); - inline void Csneg(const Register& rd, - const Register& rn, - const Register& rm, + inline void Csneg(const Register& rd, const Register& rn, const Register& rm, Condition cond); inline void Dmb(BarrierDomain domain, BarrierType type); inline void Dsb(BarrierDomain domain, BarrierType type); - inline void Debug(const char* message, uint32_t code, Instr params = BREAK); - inline void Extr(const Register& rd, - const Register& rn, - const Register& rm, + inline void Extr(const Register& rd, const Register& rn, const Register& rm, unsigned lsb); - inline void Fabs(const FPRegister& fd, const FPRegister& fn); - inline void Fadd(const FPRegister& fd, - const FPRegister& fn, - const FPRegister& fm); - inline void Fccmp(const FPRegister& fn, - const FPRegister& fm, - StatusFlags nzcv, - Condition cond); - inline void Fcmp(const FPRegister& fn, const FPRegister& fm); - inline void Fcmp(const FPRegister& fn, double value); - inline void Fcsel(const FPRegister& fd, - const FPRegister& fn, - const FPRegister& fm, - Condition cond); - inline void Fcvt(const FPRegister& fd, const FPRegister& fn); - inline void Fcvtas(const Register& rd, const FPRegister& fn); - inline void Fcvtau(const Register& rd, const FPRegister& fn); - inline void Fcvtms(const Register& rd, const FPRegister& fn); - inline void Fcvtmu(const Register& rd, const FPRegister& fn); - inline void Fcvtns(const Register& rd, const FPRegister& fn); - inline void Fcvtnu(const Register& rd, const FPRegister& fn); - inline void Fcvtzs(const Register& rd, const FPRegister& fn); - inline void Fcvtzu(const Register& rd, const FPRegister& fn); - inline void Fdiv(const FPRegister& fd, - const FPRegister& fn, - const FPRegister& fm); - inline void Fmadd(const FPRegister& fd, - const FPRegister& fn, - const FPRegister& fm, - const FPRegister& fa); - inline void Fmax(const FPRegister& fd, - const FPRegister& fn, - const FPRegister& fm); - inline void Fmaxnm(const FPRegister& fd, - const FPRegister& fn, - const FPRegister& fm); - inline void Fmin(const FPRegister& fd, - const FPRegister& fn, - const FPRegister& fm); - inline void Fminnm(const FPRegister& fd, - const FPRegister& fn, - const FPRegister& fm); - inline void Fmov(FPRegister fd, FPRegister fn); - inline void Fmov(FPRegister fd, Register rn); - // Provide explicit double and float interfaces for FP immediate moves, rather - // than relying on implicit C++ casts. This allows signalling NaNs to be - // preserved when the immediate matches the format of fd. Most systems convert - // signalling NaNs to quiet NaNs when converting between float and double. - inline void Fmov(FPRegister fd, double imm); - inline void Fmov(FPRegister fd, float imm); - // Provide a template to allow other types to be converted automatically. - template - void Fmov(FPRegister fd, T imm) { - DCHECK(allow_macro_instructions_); - Fmov(fd, static_cast(imm)); + inline void Fcsel(const VRegister& fd, const VRegister& fn, + const VRegister& fm, Condition cond); + void Fcvtl(const VRegister& vd, const VRegister& vn) { + DCHECK(allow_macro_instructions()); + fcvtl(vd, vn); + } + void Fcvtl2(const VRegister& vd, const VRegister& vn) { + DCHECK(allow_macro_instructions()); + fcvtl2(vd, vn); + } + void Fcvtn(const VRegister& vd, const VRegister& vn) { + DCHECK(allow_macro_instructions()); + fcvtn(vd, vn); + } + void Fcvtn2(const VRegister& vd, const VRegister& vn) { + DCHECK(allow_macro_instructions()); + fcvtn2(vd, vn); + } + void Fcvtxn(const VRegister& vd, const VRegister& vn) { + DCHECK(allow_macro_instructions()); + fcvtxn(vd, vn); } - inline void Fmov(Register rd, FPRegister fn); - inline void Fmsub(const FPRegister& fd, - const FPRegister& fn, - const FPRegister& fm, - const FPRegister& fa); - inline void Fmul(const FPRegister& fd, - const FPRegister& fn, - const FPRegister& fm); - inline void Fneg(const FPRegister& fd, const FPRegister& fn); - inline void Fnmadd(const FPRegister& fd, - const FPRegister& fn, - const FPRegister& fm, - const FPRegister& fa); - inline void Fnmsub(const FPRegister& fd, - const FPRegister& fn, - const FPRegister& fm, - const FPRegister& fa); - inline void Frinta(const FPRegister& fd, const FPRegister& fn); - inline void Frintm(const FPRegister& fd, const FPRegister& fn); - inline void Frintn(const FPRegister& fd, const FPRegister& fn); - inline void Frintp(const FPRegister& fd, const FPRegister& fn); - inline void Frintz(const FPRegister& fd, const FPRegister& fn); - inline void Fsqrt(const FPRegister& fd, const FPRegister& fn); - inline void Fsub(const FPRegister& fd, - const FPRegister& fn, - const FPRegister& fm); + void Fcvtxn2(const VRegister& vd, const VRegister& vn) { + DCHECK(allow_macro_instructions()); + fcvtxn2(vd, vn); + } + inline void Fmadd(const VRegister& fd, const VRegister& fn, + const VRegister& fm, const VRegister& fa); + inline void Fmaxnm(const VRegister& fd, const VRegister& fn, + const VRegister& fm); + inline void Fminnm(const VRegister& fd, const VRegister& fn, + const VRegister& fm); + inline void Fmsub(const VRegister& fd, const VRegister& fn, + const VRegister& fm, const VRegister& fa); + inline void Fnmadd(const VRegister& fd, const VRegister& fn, + const VRegister& fm, const VRegister& fa); + inline void Fnmsub(const VRegister& fd, const VRegister& fn, + const VRegister& fm, const VRegister& fa); inline void Hint(SystemHint code); inline void Hlt(int code); inline void Isb(); - inline void Ldnp(const CPURegister& rt, - const CPURegister& rt2, + inline void Ldnp(const CPURegister& rt, const CPURegister& rt2, const MemOperand& src); - // Load a literal from the inline constant pool. - inline void Ldr(const CPURegister& rt, const Immediate& imm); - // Helper function for double immediate. - inline void Ldr(const CPURegister& rt, double imm); - inline void Lsl(const Register& rd, const Register& rn, unsigned shift); - inline void Lsl(const Register& rd, const Register& rn, const Register& rm); - inline void Lsr(const Register& rd, const Register& rn, unsigned shift); - inline void Lsr(const Register& rd, const Register& rn, const Register& rm); - inline void Madd(const Register& rd, - const Register& rn, - const Register& rm, - const Register& ra); - inline void Mneg(const Register& rd, const Register& rn, const Register& rm); - inline void Mov(const Register& rd, const Register& rm); inline void Movk(const Register& rd, uint64_t imm, int shift = -1); - inline void Mrs(const Register& rt, SystemRegister sysreg); inline void Msr(SystemRegister sysreg, const Register& rt); - inline void Msub(const Register& rd, - const Register& rn, - const Register& rm, - const Register& ra); - inline void Mul(const Register& rd, const Register& rn, const Register& rm); inline void Nop() { nop(); } - inline void Rbit(const Register& rd, const Register& rn); - inline void Ret(const Register& xn = lr); + void Mvni(const VRegister& vd, const int imm8, Shift shift = LSL, + const int shift_amount = 0) { + DCHECK(allow_macro_instructions()); + mvni(vd, imm8, shift, shift_amount); + } inline void Rev(const Register& rd, const Register& rn); - inline void Rev16(const Register& rd, const Register& rn); - inline void Rev32(const Register& rd, const Register& rn); - inline void Ror(const Register& rd, const Register& rs, unsigned shift); - inline void Ror(const Register& rd, const Register& rn, const Register& rm); - inline void Sbfiz(const Register& rd, - const Register& rn, - unsigned lsb, + inline void Sbfiz(const Register& rd, const Register& rn, unsigned lsb, unsigned width); - inline void Sbfx(const Register& rd, - const Register& rn, - unsigned lsb, - unsigned width); - inline void Scvtf(const FPRegister& fd, - const Register& rn, - unsigned fbits = 0); - inline void Sdiv(const Register& rd, const Register& rn, const Register& rm); - inline void Smaddl(const Register& rd, - const Register& rn, - const Register& rm, + inline void Smaddl(const Register& rd, const Register& rn, const Register& rm, const Register& ra); - inline void Smsubl(const Register& rd, - const Register& rn, - const Register& rm, + inline void Smsubl(const Register& rd, const Register& rn, const Register& rm, const Register& ra); - inline void Smull(const Register& rd, - const Register& rn, - const Register& rm); - inline void Smulh(const Register& rd, - const Register& rn, - const Register& rm); - inline void Umull(const Register& rd, const Register& rn, const Register& rm); - inline void Stnp(const CPURegister& rt, - const CPURegister& rt2, + inline void Smulh(const Register& rd, const Register& rn, const Register& rm); + inline void Stnp(const CPURegister& rt, const CPURegister& rt2, const MemOperand& dst); - inline void Sxtb(const Register& rd, const Register& rn); - inline void Sxth(const Register& rd, const Register& rn); - inline void Sxtw(const Register& rd, const Register& rn); - void Tbnz(const Register& rt, unsigned bit_pos, Label* label); - void Tbz(const Register& rt, unsigned bit_pos, Label* label); - inline void Ubfiz(const Register& rd, - const Register& rn, - unsigned lsb, - unsigned width); - inline void Ubfx(const Register& rd, - const Register& rn, - unsigned lsb, - unsigned width); - inline void Ucvtf(const FPRegister& fd, - const Register& rn, - unsigned fbits = 0); - inline void Udiv(const Register& rd, const Register& rn, const Register& rm); - inline void Umaddl(const Register& rd, - const Register& rn, - const Register& rm, + inline void Umaddl(const Register& rd, const Register& rn, const Register& rm, const Register& ra); - inline void Umsubl(const Register& rd, - const Register& rn, - const Register& rm, + inline void Umsubl(const Register& rd, const Register& rn, const Register& rm, const Register& ra); - inline void Uxtb(const Register& rd, const Register& rn); - inline void Uxth(const Register& rd, const Register& rn); - inline void Uxtw(const Register& rd, const Register& rn); - - // Pseudo-instructions ------------------------------------------------------ - // Compute rd = abs(rm). - // This function clobbers the condition flags. On output the overflow flag is - // set iff the negation overflowed. - // - // If rm is the minimum representable value, the result is not representable. - // Handlers for each case can be specified using the relevant labels. - void Abs(const Register& rd, const Register& rm, - Label * is_not_representable = NULL, - Label * is_representable = NULL); + void Cmle(const VRegister& vd, const VRegister& vn, int imm) { + DCHECK(allow_macro_instructions()); + cmle(vd, vn, imm); + } + void Cmlt(const VRegister& vd, const VRegister& vn, int imm) { + DCHECK(allow_macro_instructions()); + cmlt(vd, vn, imm); + } - // Push or pop up to 4 registers of the same width to or from the stack, - // using the current stack pointer as set by SetStackPointer. - // - // If an argument register is 'NoReg', all further arguments are also assumed - // to be 'NoReg', and are thus not pushed or popped. - // - // Arguments are ordered such that "Push(a, b);" is functionally equivalent - // to "Push(a); Push(b);". - // - // It is valid to push the same register more than once, and there is no - // restriction on the order in which registers are specified. - // - // It is not valid to pop into the same register more than once in one - // operation, not even into the zero register. - // - // If the current stack pointer (as set by SetStackPointer) is csp, then it - // must be aligned to 16 bytes on entry and the total size of the specified - // registers must also be a multiple of 16 bytes. - // - // Even if the current stack pointer is not the system stack pointer (csp), - // Push (and derived methods) will still modify the system stack pointer in - // order to comply with ABI rules about accessing memory below the system - // stack pointer. - // - // Other than the registers passed into Pop, the stack pointer and (possibly) - // the system stack pointer, these methods do not modify any other registers. - void Push(const CPURegister& src0, const CPURegister& src1 = NoReg, - const CPURegister& src2 = NoReg, const CPURegister& src3 = NoReg); - void Push(const CPURegister& src0, const CPURegister& src1, - const CPURegister& src2, const CPURegister& src3, - const CPURegister& src4, const CPURegister& src5 = NoReg, - const CPURegister& src6 = NoReg, const CPURegister& src7 = NoReg); - void Pop(const CPURegister& dst0, const CPURegister& dst1 = NoReg, - const CPURegister& dst2 = NoReg, const CPURegister& dst3 = NoReg); - void Pop(const CPURegister& dst0, const CPURegister& dst1, - const CPURegister& dst2, const CPURegister& dst3, - const CPURegister& dst4, const CPURegister& dst5 = NoReg, - const CPURegister& dst6 = NoReg, const CPURegister& dst7 = NoReg); - void Push(const Register& src0, const FPRegister& src1); + void Ld1(const VRegister& vt, const MemOperand& src) { + DCHECK(allow_macro_instructions()); + ld1(vt, src); + } + void Ld1(const VRegister& vt, const VRegister& vt2, const MemOperand& src) { + DCHECK(allow_macro_instructions()); + ld1(vt, vt2, src); + } + void Ld1(const VRegister& vt, const VRegister& vt2, const VRegister& vt3, + const MemOperand& src) { + DCHECK(allow_macro_instructions()); + ld1(vt, vt2, vt3, src); + } + void Ld1(const VRegister& vt, const VRegister& vt2, const VRegister& vt3, + const VRegister& vt4, const MemOperand& src) { + DCHECK(allow_macro_instructions()); + ld1(vt, vt2, vt3, vt4, src); + } + void Ld1(const VRegister& vt, int lane, const MemOperand& src) { + DCHECK(allow_macro_instructions()); + ld1(vt, lane, src); + } + void Ld1r(const VRegister& vt, const MemOperand& src) { + DCHECK(allow_macro_instructions()); + ld1r(vt, src); + } + void Ld2(const VRegister& vt, const VRegister& vt2, const MemOperand& src) { + DCHECK(allow_macro_instructions()); + ld2(vt, vt2, src); + } + void Ld2(const VRegister& vt, const VRegister& vt2, int lane, + const MemOperand& src) { + DCHECK(allow_macro_instructions()); + ld2(vt, vt2, lane, src); + } + void Ld2r(const VRegister& vt, const VRegister& vt2, const MemOperand& src) { + DCHECK(allow_macro_instructions()); + ld2r(vt, vt2, src); + } + void Ld3(const VRegister& vt, const VRegister& vt2, const VRegister& vt3, + const MemOperand& src) { + DCHECK(allow_macro_instructions()); + ld3(vt, vt2, vt3, src); + } + void Ld3(const VRegister& vt, const VRegister& vt2, const VRegister& vt3, + int lane, const MemOperand& src) { + DCHECK(allow_macro_instructions()); + ld3(vt, vt2, vt3, lane, src); + } + void Ld3r(const VRegister& vt, const VRegister& vt2, const VRegister& vt3, + const MemOperand& src) { + DCHECK(allow_macro_instructions()); + ld3r(vt, vt2, vt3, src); + } + void Ld4(const VRegister& vt, const VRegister& vt2, const VRegister& vt3, + const VRegister& vt4, const MemOperand& src) { + DCHECK(allow_macro_instructions()); + ld4(vt, vt2, vt3, vt4, src); + } + void Ld4(const VRegister& vt, const VRegister& vt2, const VRegister& vt3, + const VRegister& vt4, int lane, const MemOperand& src) { + DCHECK(allow_macro_instructions()); + ld4(vt, vt2, vt3, vt4, lane, src); + } + void Ld4r(const VRegister& vt, const VRegister& vt2, const VRegister& vt3, + const VRegister& vt4, const MemOperand& src) { + DCHECK(allow_macro_instructions()); + ld4r(vt, vt2, vt3, vt4, src); + } + void St1(const VRegister& vt, const MemOperand& dst) { + DCHECK(allow_macro_instructions()); + st1(vt, dst); + } + void St1(const VRegister& vt, const VRegister& vt2, const MemOperand& dst) { + DCHECK(allow_macro_instructions()); + st1(vt, vt2, dst); + } + void St1(const VRegister& vt, const VRegister& vt2, const VRegister& vt3, + const MemOperand& dst) { + DCHECK(allow_macro_instructions()); + st1(vt, vt2, vt3, dst); + } + void St1(const VRegister& vt, const VRegister& vt2, const VRegister& vt3, + const VRegister& vt4, const MemOperand& dst) { + DCHECK(allow_macro_instructions()); + st1(vt, vt2, vt3, vt4, dst); + } + void St1(const VRegister& vt, int lane, const MemOperand& dst) { + DCHECK(allow_macro_instructions()); + st1(vt, lane, dst); + } + void St2(const VRegister& vt, const VRegister& vt2, const MemOperand& dst) { + DCHECK(allow_macro_instructions()); + st2(vt, vt2, dst); + } + void St3(const VRegister& vt, const VRegister& vt2, const VRegister& vt3, + const MemOperand& dst) { + DCHECK(allow_macro_instructions()); + st3(vt, vt2, vt3, dst); + } + void St4(const VRegister& vt, const VRegister& vt2, const VRegister& vt3, + const VRegister& vt4, const MemOperand& dst) { + DCHECK(allow_macro_instructions()); + st4(vt, vt2, vt3, vt4, dst); + } + void St2(const VRegister& vt, const VRegister& vt2, int lane, + const MemOperand& dst) { + DCHECK(allow_macro_instructions()); + st2(vt, vt2, lane, dst); + } + void St3(const VRegister& vt, const VRegister& vt2, const VRegister& vt3, + int lane, const MemOperand& dst) { + DCHECK(allow_macro_instructions()); + st3(vt, vt2, vt3, lane, dst); + } + void St4(const VRegister& vt, const VRegister& vt2, const VRegister& vt3, + const VRegister& vt4, int lane, const MemOperand& dst) { + DCHECK(allow_macro_instructions()); + st4(vt, vt2, vt3, vt4, lane, dst); + } + void Tbx(const VRegister& vd, const VRegister& vn, const VRegister& vm) { + DCHECK(allow_macro_instructions()); + tbx(vd, vn, vm); + } + void Tbx(const VRegister& vd, const VRegister& vn, const VRegister& vn2, + const VRegister& vm) { + DCHECK(allow_macro_instructions()); + tbx(vd, vn, vn2, vm); + } + void Tbx(const VRegister& vd, const VRegister& vn, const VRegister& vn2, + const VRegister& vn3, const VRegister& vm) { + DCHECK(allow_macro_instructions()); + tbx(vd, vn, vn2, vn3, vm); + } + void Tbx(const VRegister& vd, const VRegister& vn, const VRegister& vn2, + const VRegister& vn3, const VRegister& vn4, const VRegister& vm) { + DCHECK(allow_macro_instructions()); + tbx(vd, vn, vn2, vn3, vn4, vm); + } - // Alternative forms of Push and Pop, taking a RegList or CPURegList that - // specifies the registers that are to be pushed or popped. Higher-numbered - // registers are associated with higher memory addresses (as in the A32 push - // and pop instructions). - // - // (Push|Pop)SizeRegList allow you to specify the register size as a - // parameter. Only kXRegSizeInBits, kWRegSizeInBits, kDRegSizeInBits and - // kSRegSizeInBits are supported. - // - // Otherwise, (Push|Pop)(CPU|X|W|D|S)RegList is preferred. - void PushCPURegList(CPURegList registers); - void PopCPURegList(CPURegList registers); + void LoadObject(Register result, Handle object); inline void PushSizeRegList(RegList registers, unsigned reg_size, CPURegister::RegisterType type = CPURegister::kRegister) { @@ -675,33 +1585,23 @@ class MacroAssembler : public Assembler { PopSizeRegList(regs, kWRegSizeInBits); } inline void PushDRegList(RegList regs) { - PushSizeRegList(regs, kDRegSizeInBits, CPURegister::kFPRegister); + PushSizeRegList(regs, kDRegSizeInBits, CPURegister::kVRegister); } inline void PopDRegList(RegList regs) { - PopSizeRegList(regs, kDRegSizeInBits, CPURegister::kFPRegister); + PopSizeRegList(regs, kDRegSizeInBits, CPURegister::kVRegister); } inline void PushSRegList(RegList regs) { - PushSizeRegList(regs, kSRegSizeInBits, CPURegister::kFPRegister); + PushSizeRegList(regs, kSRegSizeInBits, CPURegister::kVRegister); } inline void PopSRegList(RegList regs) { - PopSizeRegList(regs, kSRegSizeInBits, CPURegister::kFPRegister); + PopSizeRegList(regs, kSRegSizeInBits, CPURegister::kVRegister); } // Push the specified register 'count' times. void PushMultipleTimes(CPURegister src, Register count); void PushMultipleTimes(CPURegister src, int count); - // This is a convenience method for pushing a single Handle. - inline void Push(Handle handle); - inline void Push(Smi* smi); - - // Aliases of Push and Pop, required for V8 compatibility. - inline void push(Register src) { - Push(src); - } - inline void pop(Register dst) { - Pop(dst); - } + inline void PushObject(Handle handle); // Sometimes callers need to push or pop multiple registers in a way that is // difficult to structure efficiently for fixed Push or Pop calls. This scope @@ -736,25 +1636,12 @@ class MacroAssembler : public Assembler { std::vector queued_; }; - // Poke 'src' onto the stack. The offset is in bytes. - // - // If the current stack pointer (according to StackPointer()) is csp, then - // csp must be aligned to 16 bytes. - void Poke(const CPURegister& src, const Operand& offset); - // Peek at a value on the stack, and put it in 'dst'. The offset is in bytes. // // If the current stack pointer (according to StackPointer()) is csp, then // csp must be aligned to 16 bytes. void Peek(const CPURegister& dst, const Operand& offset); - // Poke 'src1' and 'src2' onto the stack. The values written will be adjacent - // with 'src2' at a higher address than 'src1'. The offset is in bytes. - // - // If the current stack pointer (according to StackPointer()) is csp, then - // csp must be aligned to 16 bytes. - void PokePair(const CPURegister& src1, const CPURegister& src2, int offset); - // Peek at two values on the stack, and put them in 'dst1' and 'dst2'. The // values peeked will be adjacent, with the value in 'dst2' being from a // higher address than 'dst1'. The offset is in bytes. @@ -775,24 +1662,6 @@ class MacroAssembler : public Assembler { UNIMPLEMENTED(); } - // Claim or drop stack space without actually accessing memory. - // - // In debug mode, both of these will write invalid data into the claimed or - // dropped space. - // - // If the current stack pointer (according to StackPointer()) is csp, then it - // must be aligned to 16 bytes and the size claimed or dropped must be a - // multiple of 16 bytes. - // - // Note that unit_size must be specified in bytes. For variants which take a - // Register count, the unit size must be a power of two. - inline void Claim(int64_t count, uint64_t unit_size = kXRegSize); - inline void Claim(const Register& count, - uint64_t unit_size = kXRegSize); - inline void Drop(int64_t count, uint64_t unit_size = kXRegSize); - inline void Drop(const Register& count, - uint64_t unit_size = kXRegSize); - // Variants of Claim and Drop, where the 'count' parameter is a SMI held in a // register. inline void ClaimBySMI(const Register& count_smi, @@ -807,18 +1676,6 @@ class MacroAssembler : public Assembler { Condition cond, Label* label); - // Test the bits of register defined by bit_pattern, and branch if ANY of - // those bits are set. May corrupt the status flags. - inline void TestAndBranchIfAnySet(const Register& reg, - const uint64_t bit_pattern, - Label* label); - - // Test the bits of register defined by bit_pattern, and branch if ALL of - // those bits are clear (ie. not set.) May corrupt the status flags. - inline void TestAndBranchIfAllClear(const Register& reg, - const uint64_t bit_pattern, - Label* label); - // Insert one or more instructions into the instruction stream that encode // some caller-defined data. The instructions used will be executable with no // side effects. @@ -836,23 +1693,6 @@ class MacroAssembler : public Assembler { // it will be encoded in the event marker. inline void AnnotateInstrumentation(const char* marker_name); - // If emit_debug_code() is true, emit a run-time check to ensure that - // StackPointer() does not point below the system stack pointer. - // - // Whilst it is architecturally legal for StackPointer() to point below csp, - // it can be evidence of a potential bug because the ABI forbids accesses - // below csp. - // - // If StackPointer() is the system stack pointer (csp), then csp will be - // dereferenced to cause the processor (or simulator) to abort if it is not - // properly aligned. - // - // If emit_debug_code() is false, this emits no code. - void AssertStackConsistency(); - - // Emits a runtime assert that the CSP is aligned. - void AssertCspAligned(); - // Preserve the callee-saved registers (as defined by AAPCS64). // // Higher-numbered registers are pushed before lower-numbered registers, and @@ -870,87 +1710,31 @@ class MacroAssembler : public Assembler { // Restore the callee-saved registers (as defined by AAPCS64). // - // Higher-numbered registers are popped after lower-numbered registers, and - // thus come from higher addresses. - // Floating-point registers are popped after general-purpose registers, and - // thus come from higher addresses. - // - // This method must not be called unless the current stack pointer (as set by - // SetStackPointer) is the system stack pointer (csp), and is aligned to - // ActivationFrameAlignment(). - void PopCalleeSavedRegisters(); - - // Set the current stack pointer, but don't generate any code. - inline void SetStackPointer(const Register& stack_pointer) { - DCHECK(!TmpList()->IncludesAliasOf(stack_pointer)); - sp_ = stack_pointer; - } - - // Return the current stack pointer, as set by SetStackPointer. - inline const Register& StackPointer() const { - return sp_; - } - - // Align csp for a frame, as per ActivationFrameAlignment, and make it the - // current stack pointer. - inline void AlignAndSetCSPForFrame(); - - // Push the system stack pointer (csp) down to allow the same to be done to - // the current stack pointer (according to StackPointer()). This must be - // called _before_ accessing the memory. - // - // This is necessary when pushing or otherwise adding things to the stack, to - // satisfy the AAPCS64 constraint that the memory below the system stack - // pointer is not accessed. The amount pushed will be increased as necessary - // to ensure csp remains aligned to 16 bytes. - // - // This method asserts that StackPointer() is not csp, since the call does - // not make sense in that context. - inline void BumpSystemStackPointer(const Operand& space); - - // Re-synchronizes the system stack pointer (csp) with the current stack - // pointer (according to StackPointer()). + // Higher-numbered registers are popped after lower-numbered registers, and + // thus come from higher addresses. + // Floating-point registers are popped after general-purpose registers, and + // thus come from higher addresses. // - // This method asserts that StackPointer() is not csp, since the call does - // not make sense in that context. - inline void SyncSystemStackPointer(); + // This method must not be called unless the current stack pointer (as set by + // SetStackPointer) is the system stack pointer (csp), and is aligned to + // ActivationFrameAlignment(). + void PopCalleeSavedRegisters(); - // Helpers ------------------------------------------------------------------ - // Root register. - inline void InitializeRootRegister(); + // Align csp for a frame, as per ActivationFrameAlignment, and make it the + // current stack pointer. + inline void AlignAndSetCSPForFrame(); - void AssertFPCRState(Register fpcr = NoReg); - void CanonicalizeNaN(const FPRegister& dst, const FPRegister& src); - void CanonicalizeNaN(const FPRegister& reg) { - CanonicalizeNaN(reg, reg); - } + // Helpers ------------------------------------------------------------------ - // Load an object from the root table. - void LoadRoot(CPURegister destination, - Heap::RootListIndex index); // Store an object to the root table. void StoreRoot(Register source, Heap::RootListIndex index); - // Load both TrueValue and FalseValue roots. - void LoadTrueFalseRoots(Register true_root, Register false_root); - - void LoadHeapObject(Register dst, Handle object); - - void LoadObject(Register result, Handle object); - static int SafepointRegisterStackIndex(int reg_code); - // This is required for compatibility with architecture independant code. - // Remove if not needed. - void Move(Register dst, Register src); - void Move(Register dst, Handle x); - void Move(Register dst, Smi* src); - void LoadInstanceDescriptors(Register map, Register descriptors); void EnumLengthUntagged(Register dst, Register map); - void EnumLengthSmi(Register dst, Register map); void NumberOfOwnDescriptors(Register dst, Register map); void LoadAccessor(Register dst, Register holder, int accessor_index, AccessorComponent accessor); @@ -971,22 +1755,15 @@ class MacroAssembler : public Assembler { inline void SmiTag(Register dst, Register src); inline void SmiTag(Register smi); - inline void SmiUntag(Register dst, Register src); - inline void SmiUntag(Register smi); - inline void SmiUntagToDouble(FPRegister dst, - Register src, + inline void SmiUntagToDouble(VRegister dst, Register src, UntagMode mode = kNotSpeculativeUntag); - inline void SmiUntagToFloat(FPRegister dst, - Register src, + inline void SmiUntagToFloat(VRegister dst, Register src, UntagMode mode = kNotSpeculativeUntag); // Tag and push in one step. inline void SmiTagAndPush(Register src); inline void SmiTagAndPush(Register src1, Register src2); - inline void JumpIfSmi(Register value, - Label* smi_label, - Label* not_smi_label = NULL); inline void JumpIfNotSmi(Register value, Label* not_smi_label); inline void JumpIfBothSmi(Register value1, Register value2, @@ -1005,17 +1782,19 @@ class MacroAssembler : public Assembler { // Abort execution if argument is a smi, enabled via --debug-code. void AssertNotSmi(Register object, BailoutReason reason = kOperandIsASmi); - void AssertSmi(Register object, BailoutReason reason = kOperandIsNotASmi); inline void ObjectTag(Register tagged_obj, Register obj); inline void ObjectUntag(Register untagged_obj, Register obj); + // Abort execution if argument is not a FixedArray, enabled via --debug-code. + void AssertFixedArray(Register object); + // Abort execution if argument is not a JSFunction, enabled via --debug-code. void AssertFunction(Register object); - // Abort execution if argument is not a JSGeneratorObject, + // Abort execution if argument is not a JSGeneratorObject (or subclass), // enabled via --debug-code. - void AssertGeneratorObject(Register object, Register suspend_flags); + void AssertGeneratorObject(Register object); // Abort execution if argument is not a JSBoundFunction, // enabled via --debug-code. @@ -1025,58 +1804,18 @@ class MacroAssembler : public Assembler { // via --debug-code. void AssertUndefinedOrAllocationSite(Register object, Register scratch); - // Abort execution if argument is not a positive or zero integer, enabled via - // --debug-code. - void AssertPositiveOrZero(Register value); - void JumpIfHeapNumber(Register object, Label* on_heap_number, SmiCheckType smi_check_type = DONT_DO_SMI_CHECK); void JumpIfNotHeapNumber(Register object, Label* on_not_heap_number, SmiCheckType smi_check_type = DONT_DO_SMI_CHECK); - // Sets the vs flag if the input is -0.0. - void TestForMinusZero(DoubleRegister input); - - // Jump to label if the input double register contains -0.0. - void JumpIfMinusZero(DoubleRegister input, Label* on_negative_zero); - - // Jump to label if the input integer register contains the double precision - // floating point representation of -0.0. - void JumpIfMinusZero(Register input, Label* on_negative_zero); - - // Saturate a signed 32-bit integer in input to an unsigned 8-bit integer in - // output. - void ClampInt32ToUint8(Register in_out); - void ClampInt32ToUint8(Register output, Register input); - - // Saturate a double in input to an unsigned 8-bit integer in output. - void ClampDoubleToUint8(Register output, - DoubleRegister input, - DoubleRegister dbl_scratch); - - // Try to represent a double as a signed 32-bit int. - // This succeeds if the result compares equal to the input, so inputs of -0.0 - // are represented as 0 and handled as a success. - // - // On output the Z flag is set if the operation was successful. - void TryRepresentDoubleAsInt32(Register as_int, - FPRegister value, - FPRegister scratch_d, - Label* on_successful_conversion = NULL, - Label* on_failed_conversion = NULL) { - DCHECK(as_int.Is32Bits()); - TryRepresentDoubleAsInt(as_int, value, scratch_d, on_successful_conversion, - on_failed_conversion); - } - // Try to represent a double as a signed 64-bit int. // This succeeds if the result compares equal to the input, so inputs of -0.0 // are represented as 0 and handled as a success. // // On output the Z flag is set if the operation was successful. - void TryRepresentDoubleAsInt64(Register as_int, - FPRegister value, - FPRegister scratch_d, + void TryRepresentDoubleAsInt64(Register as_int, VRegister value, + VRegister scratch_d, Label* on_successful_conversion = NULL, Label* on_failed_conversion = NULL) { DCHECK(as_int.Is64Bits()); @@ -1084,14 +1823,6 @@ class MacroAssembler : public Assembler { on_failed_conversion); } - // ---- Object Utilities ---- - - // Initialize fields with filler values. Fields starting at |current_address| - // not including |end_address| are overwritten with the value in |filler|. At - // the end the loop, |current_address| takes the value of |end_address|. - void InitializeFieldsWithFiller(Register current_address, - Register end_address, Register filler); - // ---- String Utilities ---- // Checks if both instance types are sequential one-byte strings and jumps to @@ -1104,10 +1835,7 @@ class MacroAssembler : public Assembler { // ---- Calling / Jumping helpers ---- - // This is required for compatibility in architecture indepenedant code. - inline void jmp(Label* L); - - void CallStub(CodeStub* stub, TypeFeedbackId ast_id = TypeFeedbackId::None()); + void CallStub(CodeStub* stub); void TailCallStub(CodeStub* stub); void CallRuntime(const Runtime::Function* f, @@ -1134,22 +1862,6 @@ class MacroAssembler : public Assembler { void TailCallRuntime(Runtime::FunctionId fid); - int ActivationFrameAlignment(); - - // Calls a C function. - // The called function is not allowed to trigger a - // garbage collection, since that might move the code and invalidate the - // return address (unless this is somehow accounted for by the called - // function). - void CallCFunction(ExternalReference function, - int num_reg_arguments); - void CallCFunction(ExternalReference function, - int num_reg_arguments, - int num_double_arguments); - void CallCFunction(Register function, - int num_reg_arguments, - int num_double_arguments); - // Jump to a runtime routine. void JumpToExternalReference(const ExternalReference& builtin, bool builtin_exit_frame = false); @@ -1159,36 +1871,6 @@ class MacroAssembler : public Assembler { int num_arguments); - void Jump(Register target); - void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al); - void Jump(Handle code, RelocInfo::Mode rmode, Condition cond = al); - void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al); - - void Call(Register target); - void Call(Label* target); - void Call(Address target, RelocInfo::Mode rmode); - void Call(Handle code, - RelocInfo::Mode rmode = RelocInfo::CODE_TARGET, - TypeFeedbackId ast_id = TypeFeedbackId::None()); - - // For every Call variant, there is a matching CallSize function that returns - // the size (in bytes) of the call sequence. - static int CallSize(Register target); - static int CallSize(Label* target); - static int CallSize(Address target, RelocInfo::Mode rmode); - static int CallSize(Handle code, - RelocInfo::Mode rmode = RelocInfo::CODE_TARGET, - TypeFeedbackId ast_id = TypeFeedbackId::None()); - - // Removes current frame and its arguments from the stack preserving - // the arguments and a return address pushed to the stack for the next call. - // Both |callee_args_count| and |caller_args_count_reg| do not include - // receiver. |callee_args_count| is not modified, |caller_args_count_reg| - // is trashed. - void PrepareForTailCall(const ParameterCount& callee_args_count, - Register caller_args_count_reg, Register scratch0, - Register scratch1); - // Registers used through the invocation chain are hard-coded. // We force passing the parameters to ensure the contracts are correctly // honoured by the caller. @@ -1229,70 +1911,8 @@ class MacroAssembler : public Assembler { InvokeFlag flag, const CallWrapper& call_wrapper); - - // ---- Floating point helpers ---- - - // Perform a conversion from a double to a signed int64. If the input fits in - // range of the 64-bit result, execution branches to done. Otherwise, - // execution falls through, and the sign of the result can be used to - // determine if overflow was towards positive or negative infinity. - // - // On successful conversion, the least significant 32 bits of the result are - // equivalent to the ECMA-262 operation "ToInt32". - // - // Only public for the test code in test-code-stubs-arm64.cc. - void TryConvertDoubleToInt64(Register result, - DoubleRegister input, - Label* done); - - // Performs a truncating conversion of a floating point number as used by - // the JS bitwise operations. See ECMA-262 9.5: ToInt32. - // Exits with 'result' holding the answer. - void TruncateDoubleToI(Register result, DoubleRegister double_input); - - // Performs a truncating conversion of a heap number as used by - // the JS bitwise operations. See ECMA-262 9.5: ToInt32. 'result' and 'input' - // must be different registers. Exits with 'result' holding the answer. - void TruncateHeapNumberToI(Register result, Register object); - - // Converts the smi or heap number in object to an int32 using the rules - // for ToInt32 as described in ECMAScript 9.5.: the value is truncated - // and brought into the range -2^31 .. +2^31 - 1. 'result' and 'input' must be - // different registers. - void TruncateNumberToI(Register object, - Register result, - Register heap_number_map, - Label* not_int32); - // ---- Code generation helpers ---- - void set_generating_stub(bool value) { generating_stub_ = value; } - bool generating_stub() const { return generating_stub_; } -#if DEBUG - void set_allow_macro_instructions(bool value) { - allow_macro_instructions_ = value; - } - bool allow_macro_instructions() const { return allow_macro_instructions_; } -#endif - bool use_real_aborts() const { return use_real_aborts_; } - void set_has_frame(bool value) { has_frame_ = value; } - bool has_frame() const { return has_frame_; } - bool AllowThisStubCall(CodeStub* stub); - - class NoUseRealAbortsScope { - public: - explicit NoUseRealAbortsScope(MacroAssembler* masm) : - saved_(masm->use_real_aborts_), masm_(masm) { - masm_->use_real_aborts_ = false; - } - ~NoUseRealAbortsScope() { - masm_->use_real_aborts_ = saved_; - } - private: - bool saved_; - MacroAssembler* masm_; - }; - // Frame restart support void MaybeDropFrames(); @@ -1325,25 +1945,14 @@ class MacroAssembler : public Assembler { Label* gc_required, AllocationFlags flags); - // FastAllocate is right now only used for folded allocations. It just - // increments the top pointer without checking against limit. This can only - // be done if it was proved earlier that the allocation will succeed. - void FastAllocate(Register object_size, Register result, Register result_end, - Register scratch, AllocationFlags flags); - - void FastAllocate(int object_size, Register result, Register scratch1, - Register scratch2, AllocationFlags flags); - // Allocates a heap number or jumps to the gc_required label if the young // space is full and a scavenge is needed. // All registers are clobbered. // If no heap_number_map register is provided, the function will take care of // loading it. - void AllocateHeapNumber(Register result, - Label* gc_required, - Register scratch1, - Register scratch2, - CPURegister value = NoFPReg, + void AllocateHeapNumber(Register result, Label* gc_required, + Register scratch1, Register scratch2, + CPURegister value = NoVReg, CPURegister heap_number_map = NoReg, MutableMode mode = IMMUTABLE); @@ -1444,10 +2053,6 @@ class MacroAssembler : public Assembler { // miss label if the weak cell was cleared. void LoadWeakValue(Register value, Handle cell, Label* miss); - // Test the bitfield of the heap object map with mask and set the condition - // flags. The object register is preserved. - void TestMapBitfield(Register object, uint64_t mask); - // Load the elements kind field from a map, and return it in the result // register. void LoadElementsKindFromMap(Register result, Register map); @@ -1497,12 +2102,6 @@ class MacroAssembler : public Assembler { // --------------------------------------------------------------------------- // Inline caching support. - void EmitSeqStringSetCharCheck(Register string, - Register index, - SeqStringSetCharCheckIndexType index_type, - Register scratch, - uint32_t encoding_mask); - // Hash the interger value in 'key' register. // It uses the same algorithm as ComputeIntegerHash in utils.h. void GetNumberHash(Register key, Register scratch); @@ -1513,11 +2112,6 @@ class MacroAssembler : public Assembler { // Load the type feedback vector from a JavaScript frame. void EmitLoadFeedbackVector(Register vector); - // Activation support. - void EnterFrame(StackFrame::Type type); - void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg); - void LeaveFrame(StackFrame::Type type); - void EnterBuiltinFrame(Register context, Register target, Register argc); void LeaveBuiltinFrame(Register context, Register target, Register argc); @@ -1526,27 +2120,12 @@ class MacroAssembler : public Assembler { Register scratch2, Register scratch3, Register scratch4, Label* call_runtime); - // AllocationMemento support. Arrays may have an associated - // AllocationMemento object that can be checked for in order to pretransition - // to another type. - // On entry, receiver should point to the array object. - // If allocation info is present, the Z flag is set (so that the eq - // condition will pass). - void TestJSArrayForAllocationMemento(Register receiver, - Register scratch1, - Register scratch2, - Label* no_memento_found); - // The stack pointer has to switch between csp and jssp when setting up and // destroying the exit frame. Hence preserving/restoring the registers is // slightly more complicated than simple push/pop operations. void ExitFramePreserveFPRegs(); void ExitFrameRestoreFPRegs(); - // Generates function and stub prologue code. - void StubPrologue(StackFrame::Type type, int frame_slots); - void Prologue(bool code_pre_aging); - // Enter exit frame. Exit frames are used when calling C code from generated // (JavaScript) code. // @@ -1601,15 +2180,9 @@ class MacroAssembler : public Assembler { LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst); } - // Emit code for a truncating division by a constant. The dividend register is - // unchanged. Dividend and result must be different. - void TruncatingDiv(Register result, Register dividend, int32_t divisor); - // --------------------------------------------------------------------------- // StatsCounter support - void SetCounter(StatsCounter* counter, int value, Register scratch1, - Register scratch2); void IncrementCounter(StatsCounter* counter, int value, Register scratch1, Register scratch2); void DecrementCounter(StatsCounter* counter, int value, Register scratch1, @@ -1637,9 +2210,6 @@ class MacroAssembler : public Assembler { void PushSafepointRegisters(); void PopSafepointRegisters(); - void PushSafepointRegistersAndDoubles(); - void PopSafepointRegistersAndDoubles(); - // Store value in register src in the safepoint stack slot for register dst. void StoreToSafepointRegisterSlot(Register src, Register dst); @@ -1650,16 +2220,6 @@ class MacroAssembler : public Assembler { void CheckPageFlag(const Register& object, const Register& scratch, int mask, Condition cc, Label* condition_met); - void CheckPageFlagSet(const Register& object, - const Register& scratch, - int mask, - Label* if_any_set); - - void CheckPageFlagClear(const Register& object, - const Register& scratch, - int mask, - Label* if_all_clear); - // Check if object is in new space and jump accordingly. // Register 'object' is preserved. void JumpIfNotInNewSpace(Register object, @@ -1772,10 +2332,6 @@ class MacroAssembler : public Assembler { // --------------------------------------------------------------------------- // Debugging. - // Calls Abort(msg) if the condition cond is not satisfied. - // Use --debug_code to enable. - void Assert(Condition cond, BailoutReason reason); - void AssertRegisterIsClear(Register reg, BailoutReason reason); void AssertRegisterIsRoot( Register reg, Heap::RootListIndex index, @@ -1787,18 +2343,6 @@ class MacroAssembler : public Assembler { // If emit_debug_code() is false, this emits no code. void AssertHasValidColor(const Register& reg); - // Abort if 'object' register doesn't point to a string object. - // - // If emit_debug_code() is false, this emits no code. - void AssertIsString(const Register& object); - - // Like Assert(), but always enabled. - void Check(Condition cond, BailoutReason reason); - void CheckRegisterIsClear(Register reg, BailoutReason reason); - - // Print a message to stderr and abort execution. - void Abort(BailoutReason reason); - void LoadNativeContextSlot(int index, Register dst); // Load the initial map from the global function. The registers function and @@ -1807,16 +2351,10 @@ class MacroAssembler : public Assembler { Register map, Register scratch); - CPURegList* TmpList() { return &tmp_list_; } - CPURegList* FPTmpList() { return &fptmp_list_; } - - static CPURegList DefaultTmpList(); - static CPURegList DefaultFPTmpList(); - // Like printf, but print at run-time from generated code. // // The caller must ensure that arguments for floating-point placeholders - // (such as %e, %f or %g) are FPRegisters, and that arguments for integer + // (such as %e, %f or %g) are VRegisters, and that arguments for integer // placeholders are Registers. // // At the moment it is only possible to print the value of csp if it is the @@ -1848,78 +2386,12 @@ class MacroAssembler : public Assembler { const CPURegister& arg2 = NoCPUReg, const CPURegister& arg3 = NoCPUReg); - // Code ageing support functions. - - // Code ageing on ARM64 works similarly to on ARM. When V8 wants to mark a - // function as old, it replaces some of the function prologue (generated by - // FullCodeGenerator::Generate) with a call to a special stub (ultimately - // generated by GenerateMakeCodeYoungAgainCommon). The stub restores the - // function prologue to its initial young state (indicating that it has been - // recently run) and continues. A young function is therefore one which has a - // normal frame setup sequence, and an old function has a code age sequence - // which calls a code ageing stub. - - // Set up a basic stack frame for young code (or code exempt from ageing) with - // type FUNCTION. It may be patched later for code ageing support. This is - // done by to Code::PatchPlatformCodeAge and EmitCodeAgeSequence. - // - // This function takes an Assembler so it can be called from either a - // MacroAssembler or a PatchingAssembler context. - static void EmitFrameSetupForCodeAgePatching(Assembler* assm); - - // Call EmitFrameSetupForCodeAgePatching from a MacroAssembler context. - void EmitFrameSetupForCodeAgePatching(); - - // Emit a code age sequence that calls the relevant code age stub. The code - // generated by this sequence is expected to replace the code generated by - // EmitFrameSetupForCodeAgePatching, and represents an old function. - // - // If stub is NULL, this function generates the code age sequence but omits - // the stub address that is normally embedded in the instruction stream. This - // can be used by debug code to verify code age sequences. - static void EmitCodeAgeSequence(Assembler* assm, Code* stub); - - // Call EmitCodeAgeSequence from a MacroAssembler context. - void EmitCodeAgeSequence(Code* stub); - // Return true if the sequence is a young sequence geneated by // EmitFrameSetupForCodeAgePatching. Otherwise, this method asserts that the // sequence is a code age sequence (emitted by EmitCodeAgeSequence). static bool IsYoungSequence(Isolate* isolate, byte* sequence); - // Perform necessary maintenance operations before a push or after a pop. - // - // Note that size is specified in bytes. - void PushPreamble(Operand total_size); - void PopPostamble(Operand total_size); - - void PushPreamble(int count, int size); - void PopPostamble(int count, int size); - private: - // The actual Push and Pop implementations. These don't generate any code - // other than that required for the push or pop. This allows - // (Push|Pop)CPURegList to bundle together run-time assertions for a large - // block of registers. - // - // Note that size is per register, and is specified in bytes. - void PushHelper(int count, int size, - const CPURegister& src0, const CPURegister& src1, - const CPURegister& src2, const CPURegister& src3); - void PopHelper(int count, int size, - const CPURegister& dst0, const CPURegister& dst1, - const CPURegister& dst2, const CPURegister& dst3); - - // Call Printf. On a native build, a simple call will be generated, but if the - // simulator is being used then a suitable pseudo-instruction is used. The - // arguments and stack (csp) must be prepared by the caller as for a normal - // AAPCS64 call to 'printf'. - // - // The 'args' argument should point to an array of variable arguments in their - // proper PCS registers (and in calling order). The argument registers can - // have mixed types. The format string (x0) should not be included. - void CallPrintf(int arg_count = 0, const CPURegister * args = NULL); - // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace. void InNewSpace(Register object, Condition cond, // eq for new space, ne otherwise. @@ -1934,40 +2406,11 @@ class MacroAssembler : public Assembler { // important it must be checked separately. // // On output the Z flag is set if the operation was successful. - void TryRepresentDoubleAsInt(Register as_int, - FPRegister value, - FPRegister scratch_d, + void TryRepresentDoubleAsInt(Register as_int, VRegister value, + VRegister scratch_d, Label* on_successful_conversion = NULL, Label* on_failed_conversion = NULL); - bool generating_stub_; -#if DEBUG - // Tell whether any of the macro instruction can be used. When false the - // MacroAssembler will assert if a method which can emit a variable number - // of instructions is called. - bool allow_macro_instructions_; -#endif - bool has_frame_; - Isolate* isolate_; - - // The Abort method should call a V8 runtime function, but the CallRuntime - // mechanism depends on CEntryStub. If use_real_aborts is false, Abort will - // use a simpler abort mechanism that doesn't depend on CEntryStub. - // - // The purpose of this is to allow Aborts to be compiled whilst CEntryStub is - // being generated. - bool use_real_aborts_; - - // This handle will be patched with the code object on installation. - Handle code_object_; - - // The register to use as a stack pointer for stack operations. - Register sp_; - - // Scratch registers available for use by the MacroAssembler. - CPURegList tmp_list_; - CPURegList fptmp_list_; - public: // Far branches resolving. // @@ -1981,17 +2424,6 @@ class MacroAssembler : public Assembler { // branch isntructions with a range of +-128MB. If that becomes too little // (!), the mechanism can be extended to generate special veneers for really // far targets. - - // Helps resolve branching to labels potentially out of range. - // If the label is not bound, it registers the information necessary to later - // be able to emit a veneer for this branch if necessary. - // If the label is bound, it returns true if the label (or the previous link - // in the label chain) is out of range. In that case the caller is responsible - // for generating appropriate code. - // Otherwise it returns false. - // This function also checks wether veneers need to be emitted. - bool NeedExtraInstructionsOrRegisterBranch(Label *label, - ImmBranchType branch_type); }; @@ -2001,39 +2433,39 @@ class MacroAssembler : public Assembler { // emitted is what you specified when creating the scope. class InstructionAccurateScope BASE_EMBEDDED { public: - explicit InstructionAccurateScope(MacroAssembler* masm, size_t count = 0) - : masm_(masm) + explicit InstructionAccurateScope(TurboAssembler* tasm, size_t count = 0) + : tasm_(tasm) #ifdef DEBUG , size_(count * kInstructionSize) #endif { // Before blocking the const pool, see if it needs to be emitted. - masm_->CheckConstPool(false, true); - masm_->CheckVeneerPool(false, true); + tasm_->CheckConstPool(false, true); + tasm_->CheckVeneerPool(false, true); - masm_->StartBlockPools(); + tasm_->StartBlockPools(); #ifdef DEBUG if (count != 0) { - masm_->bind(&start_); + tasm_->bind(&start_); } - previous_allow_macro_instructions_ = masm_->allow_macro_instructions(); - masm_->set_allow_macro_instructions(false); + previous_allow_macro_instructions_ = tasm_->allow_macro_instructions(); + tasm_->set_allow_macro_instructions(false); #endif } ~InstructionAccurateScope() { - masm_->EndBlockPools(); + tasm_->EndBlockPools(); #ifdef DEBUG if (start_.is_bound()) { - DCHECK(masm_->SizeOfCodeGeneratedSince(&start_) == size_); + DCHECK(tasm_->SizeOfCodeGeneratedSince(&start_) == size_); } - masm_->set_allow_macro_instructions(previous_allow_macro_instructions_); + tasm_->set_allow_macro_instructions(previous_allow_macro_instructions_); #endif } private: - MacroAssembler* masm_; + TurboAssembler* tasm_; #ifdef DEBUG size_t size_; Label start_; @@ -2041,23 +2473,24 @@ class InstructionAccurateScope BASE_EMBEDDED { #endif }; - // This scope utility allows scratch registers to be managed safely. The -// MacroAssembler's TmpList() (and FPTmpList()) is used as a pool of scratch +// TurboAssembler's TmpList() (and FPTmpList()) is used as a pool of scratch // registers. These registers can be allocated on demand, and will be returned // at the end of the scope. // // When the scope ends, the MacroAssembler's lists will be restored to their -// original state, even if the lists were modified by some other means. +// original state, even if the lists were modified by some other means. Note +// that this scope can be nested but the destructors need to run in the opposite +// order as the constructors. We do not have assertions for this. class UseScratchRegisterScope { public: - explicit UseScratchRegisterScope(MacroAssembler* masm) - : available_(masm->TmpList()), - availablefp_(masm->FPTmpList()), + explicit UseScratchRegisterScope(TurboAssembler* tasm) + : available_(tasm->TmpList()), + availablefp_(tasm->FPTmpList()), old_available_(available_->list()), old_availablefp_(availablefp_->list()) { - DCHECK(available_->type() == CPURegister::kRegister); - DCHECK(availablefp_->type() == CPURegister::kFPRegister); + DCHECK_EQ(available_->type(), CPURegister::kRegister); + DCHECK_EQ(availablefp_->type(), CPURegister::kVRegister); } ~UseScratchRegisterScope(); @@ -2066,15 +2499,18 @@ class UseScratchRegisterScope { // automatically when the scope ends. Register AcquireW() { return AcquireNextAvailable(available_).W(); } Register AcquireX() { return AcquireNextAvailable(available_).X(); } - FPRegister AcquireS() { return AcquireNextAvailable(availablefp_).S(); } - FPRegister AcquireD() { return AcquireNextAvailable(availablefp_).D(); } + VRegister AcquireS() { return AcquireNextAvailable(availablefp_).S(); } + VRegister AcquireD() { return AcquireNextAvailable(availablefp_).D(); } + VRegister AcquireV(VectorFormat format) { + return VRegister::Create(AcquireNextAvailable(availablefp_).code(), format); + } Register UnsafeAcquire(const Register& reg) { return Register(UnsafeAcquire(available_, reg)); } Register AcquireSameSizeAs(const Register& reg); - FPRegister AcquireSameSizeAs(const FPRegister& reg); + VRegister AcquireSameSizeAs(const VRegister& reg); private: static CPURegister AcquireNextAvailable(CPURegList* available); @@ -2083,11 +2519,11 @@ class UseScratchRegisterScope { // Available scratch registers. CPURegList* available_; // kRegister - CPURegList* availablefp_; // kFPRegister + CPURegList* availablefp_; // kVRegister // The state of the available lists at the start of this scope. RegList old_available_; // kRegister - RegList old_availablefp_; // kFPRegister + RegList old_availablefp_; // kVRegister }; MemOperand ContextMemOperand(Register context, int index = 0); diff --git a/deps/v8/src/arm64/simulator-arm64.cc b/deps/v8/src/arm64/simulator-arm64.cc index fb0e614982919b..231f4efd981c67 100644 --- a/deps/v8/src/arm64/simulator-arm64.cc +++ b/deps/v8/src/arm64/simulator-arm64.cc @@ -5,6 +5,7 @@ #include #include #include +#include #if V8_TARGET_ARCH_ARM64 @@ -43,14 +44,15 @@ namespace internal { #define MAGENTA "35" #define CYAN "36" #define WHITE "37" + typedef char const * const TEXT_COLOUR; TEXT_COLOUR clr_normal = FLAG_log_colour ? COLOUR(NORMAL) : ""; TEXT_COLOUR clr_flag_name = FLAG_log_colour ? COLOUR_BOLD(WHITE) : ""; TEXT_COLOUR clr_flag_value = FLAG_log_colour ? COLOUR(NORMAL) : ""; TEXT_COLOUR clr_reg_name = FLAG_log_colour ? COLOUR_BOLD(CYAN) : ""; TEXT_COLOUR clr_reg_value = FLAG_log_colour ? COLOUR(CYAN) : ""; -TEXT_COLOUR clr_fpreg_name = FLAG_log_colour ? COLOUR_BOLD(MAGENTA) : ""; -TEXT_COLOUR clr_fpreg_value = FLAG_log_colour ? COLOUR(MAGENTA) : ""; +TEXT_COLOUR clr_vreg_name = FLAG_log_colour ? COLOUR_BOLD(MAGENTA) : ""; +TEXT_COLOUR clr_vreg_value = FLAG_log_colour ? COLOUR(MAGENTA) : ""; TEXT_COLOUR clr_memory_address = FLAG_log_colour ? COLOUR_BOLD(BLUE) : ""; TEXT_COLOUR clr_debug_number = FLAG_log_colour ? COLOUR_BOLD(YELLOW) : ""; TEXT_COLOUR clr_debug_message = FLAG_log_colour ? COLOUR(YELLOW) : ""; @@ -94,7 +96,6 @@ SimSystemRegister SimSystemRegister::DefaultValueFor(SystemRegister id) { return SimSystemRegister(0x00000000, FPCRWriteIgnoreMask); default: UNREACHABLE(); - return SimSystemRegister(); } } @@ -231,20 +232,20 @@ void Simulator::CheckPCSComplianceAndRun() { #ifdef DEBUG CHECK_EQ(kNumberOfCalleeSavedRegisters, kCalleeSaved.Count()); - CHECK_EQ(kNumberOfCalleeSavedFPRegisters, kCalleeSavedFP.Count()); + CHECK_EQ(kNumberOfCalleeSavedVRegisters, kCalleeSavedV.Count()); int64_t saved_registers[kNumberOfCalleeSavedRegisters]; - uint64_t saved_fpregisters[kNumberOfCalleeSavedFPRegisters]; + uint64_t saved_fpregisters[kNumberOfCalleeSavedVRegisters]; CPURegList register_list = kCalleeSaved; - CPURegList fpregister_list = kCalleeSavedFP; + CPURegList fpregister_list = kCalleeSavedV; for (int i = 0; i < kNumberOfCalleeSavedRegisters; i++) { // x31 is not a caller saved register, so no need to specify if we want // the stack or zero. saved_registers[i] = xreg(register_list.PopLowestIndex().code()); } - for (int i = 0; i < kNumberOfCalleeSavedFPRegisters; i++) { + for (int i = 0; i < kNumberOfCalleeSavedVRegisters; i++) { saved_fpregisters[i] = dreg_bits(fpregister_list.PopLowestIndex().code()); } @@ -256,11 +257,11 @@ void Simulator::CheckPCSComplianceAndRun() { CHECK_EQ(original_stack, sp()); // Check that callee-saved registers have been preserved. register_list = kCalleeSaved; - fpregister_list = kCalleeSavedFP; + fpregister_list = kCalleeSavedV; for (int i = 0; i < kNumberOfCalleeSavedRegisters; i++) { CHECK_EQ(saved_registers[i], xreg(register_list.PopLowestIndex().code())); } - for (int i = 0; i < kNumberOfCalleeSavedFPRegisters; i++) { + for (int i = 0; i < kNumberOfCalleeSavedVRegisters; i++) { DCHECK(saved_fpregisters[i] == dreg_bits(fpregister_list.PopLowestIndex().code())); } @@ -275,11 +276,11 @@ void Simulator::CheckPCSComplianceAndRun() { // In theory d0 to d7 can be used for return values, but V8 only uses d0 // for now . - fpregister_list = kCallerSavedFP; + fpregister_list = kCallerSavedV; fpregister_list.Remove(d0); CorruptRegisters(®ister_list, kCallerSavedRegisterCorruptionValue); - CorruptRegisters(&fpregister_list, kCallerSavedFPRegisterCorruptionValue); + CorruptRegisters(&fpregister_list, kCallerSavedVRegisterCorruptionValue); #endif } @@ -294,7 +295,7 @@ void Simulator::CorruptRegisters(CPURegList* list, uint64_t value) { set_xreg(code, value | code); } } else { - DCHECK(list->type() == CPURegister::kFPRegister); + DCHECK_EQ(list->type(), CPURegister::kVRegister); while (!list->IsEmpty()) { unsigned code = list->PopLowestIndex().code(); set_dreg_bits(code, value | code); @@ -306,10 +307,10 @@ void Simulator::CorruptRegisters(CPURegList* list, uint64_t value) { void Simulator::CorruptAllCallerSavedCPURegisters() { // Corrupt alters its parameter so copy them first. CPURegList register_list = kCallerSaved; - CPURegList fpregister_list = kCallerSavedFP; + CPURegList fpregister_list = kCallerSavedV; CorruptRegisters(®ister_list, kCallerSavedRegisterCorruptionValue); - CorruptRegisters(&fpregister_list, kCallerSavedFPRegisterCorruptionValue); + CorruptRegisters(&fpregister_list, kCallerSavedVRegisterCorruptionValue); } #endif @@ -417,7 +418,7 @@ void Simulator::ResetState() { for (unsigned i = 0; i < kNumberOfRegisters; i++) { set_xreg(i, 0xbadbeef); } - for (unsigned i = 0; i < kNumberOfFPRegisters; i++) { + for (unsigned i = 0; i < kNumberOfVRegisters; i++) { // Set FP registers to a value that is NaN in both 32-bit and 64-bit FP. set_dreg_bits(i, 0x7ff000007f800001UL); } @@ -444,6 +445,10 @@ Simulator::~Simulator() { void Simulator::Run() { + // Flush any written registers before executing anything, so that + // manually-set registers are logged _before_ the first instruction. + LogAllWrittenRegisters(); + pc_modified_ = false; while (pc_ != kEndOfSimAddress) { ExecuteInstruction(); @@ -840,8 +845,9 @@ const char* Simulator::vreg_names[] = { const char* Simulator::WRegNameForCode(unsigned code, Reg31Mode mode) { - STATIC_ASSERT(arraysize(Simulator::wreg_names) == (kNumberOfRegisters + 1)); - DCHECK(code < kNumberOfRegisters); + static_assert(arraysize(Simulator::wreg_names) == (kNumberOfRegisters + 1), + "Array must be large enough to hold all register names."); + DCHECK_LT(code, static_cast(kNumberOfRegisters)); // The modulo operator has no effect here, but it silences a broken GCC // warning about out-of-bounds array accesses. code %= kNumberOfRegisters; @@ -855,8 +861,9 @@ const char* Simulator::WRegNameForCode(unsigned code, Reg31Mode mode) { const char* Simulator::XRegNameForCode(unsigned code, Reg31Mode mode) { - STATIC_ASSERT(arraysize(Simulator::xreg_names) == (kNumberOfRegisters + 1)); - DCHECK(code < kNumberOfRegisters); + static_assert(arraysize(Simulator::xreg_names) == (kNumberOfRegisters + 1), + "Array must be large enough to hold all register names."); + DCHECK_LT(code, static_cast(kNumberOfRegisters)); code %= kNumberOfRegisters; // If the code represents the stack pointer, index the name after zr. @@ -868,23 +875,70 @@ const char* Simulator::XRegNameForCode(unsigned code, Reg31Mode mode) { const char* Simulator::SRegNameForCode(unsigned code) { - STATIC_ASSERT(arraysize(Simulator::sreg_names) == kNumberOfFPRegisters); - DCHECK(code < kNumberOfFPRegisters); - return sreg_names[code % kNumberOfFPRegisters]; + static_assert(arraysize(Simulator::sreg_names) == kNumberOfVRegisters, + "Array must be large enough to hold all register names."); + DCHECK_LT(code, static_cast(kNumberOfVRegisters)); + return sreg_names[code % kNumberOfVRegisters]; } const char* Simulator::DRegNameForCode(unsigned code) { - STATIC_ASSERT(arraysize(Simulator::dreg_names) == kNumberOfFPRegisters); - DCHECK(code < kNumberOfFPRegisters); - return dreg_names[code % kNumberOfFPRegisters]; + static_assert(arraysize(Simulator::dreg_names) == kNumberOfVRegisters, + "Array must be large enough to hold all register names."); + DCHECK_LT(code, static_cast(kNumberOfVRegisters)); + return dreg_names[code % kNumberOfVRegisters]; } const char* Simulator::VRegNameForCode(unsigned code) { - STATIC_ASSERT(arraysize(Simulator::vreg_names) == kNumberOfFPRegisters); - DCHECK(code < kNumberOfFPRegisters); - return vreg_names[code % kNumberOfFPRegisters]; + static_assert(arraysize(Simulator::vreg_names) == kNumberOfVRegisters, + "Array must be large enough to hold all register names."); + DCHECK_LT(code, static_cast(kNumberOfVRegisters)); + return vreg_names[code % kNumberOfVRegisters]; +} + +void LogicVRegister::ReadUintFromMem(VectorFormat vform, int index, + uint64_t addr) const { + switch (LaneSizeInBitsFromFormat(vform)) { + case 8: + register_.Insert(index, SimMemory::Read(addr)); + break; + case 16: + register_.Insert(index, SimMemory::Read(addr)); + break; + case 32: + register_.Insert(index, SimMemory::Read(addr)); + break; + case 64: + register_.Insert(index, SimMemory::Read(addr)); + break; + default: + UNREACHABLE(); + return; + } +} + +void LogicVRegister::WriteUintToMem(VectorFormat vform, int index, + uint64_t addr) const { + switch (LaneSizeInBitsFromFormat(vform)) { + case 8: + SimMemory::Write(addr, static_cast(Uint(vform, index))); + break; + case 16: + SimMemory::Write(addr, + static_cast(Uint(vform, index))); + break; + case 32: + SimMemory::Write(addr, + static_cast(Uint(vform, index))); + break; + case 64: + SimMemory::Write(addr, Uint(vform, index)); + break; + default: + UNREACHABLE(); + return; + } } @@ -895,7 +949,7 @@ int Simulator::CodeFromName(const char* name) { return i; } } - for (unsigned i = 0; i < kNumberOfFPRegisters; i++) { + for (unsigned i = 0; i < kNumberOfVRegisters; i++) { if ((strcmp(vreg_names[i], name) == 0) || (strcmp(dreg_names[i], name) == 0) || (strcmp(sreg_names[i], name) == 0)) { @@ -964,7 +1018,7 @@ void Simulator::AddSubWithCarry(Instruction* instr) { template T Simulator::ShiftOperand(T value, Shift shift_type, unsigned amount) { - typedef typename make_unsigned::type unsignedT; + typedef typename std::make_unsigned::type unsignedT; if (amount == 0) { return value; @@ -1038,16 +1092,6 @@ void Simulator::Extract(Instruction* instr) { } -template<> double Simulator::FPDefaultNaN() const { - return kFP64DefaultNaN; -} - - -template<> float Simulator::FPDefaultNaN() const { - return kFP32DefaultNaN; -} - - void Simulator::FPCompare(double val0, double val1) { AssertSupportedFPCR(); @@ -1067,6 +1111,108 @@ void Simulator::FPCompare(double val0, double val1) { LogSystemRegister(NZCV); } +Simulator::PrintRegisterFormat Simulator::GetPrintRegisterFormatForSize( + size_t reg_size, size_t lane_size) { + DCHECK_GE(reg_size, lane_size); + + uint32_t format = 0; + if (reg_size != lane_size) { + switch (reg_size) { + default: + UNREACHABLE(); + case kQRegSize: + format = kPrintRegAsQVector; + break; + case kDRegSize: + format = kPrintRegAsDVector; + break; + } + } + + switch (lane_size) { + default: + UNREACHABLE(); + case kQRegSize: + format |= kPrintReg1Q; + break; + case kDRegSize: + format |= kPrintReg1D; + break; + case kSRegSize: + format |= kPrintReg1S; + break; + case kHRegSize: + format |= kPrintReg1H; + break; + case kBRegSize: + format |= kPrintReg1B; + break; + } + + // These sizes would be duplicate case labels. + static_assert(kXRegSize == kDRegSize, "X and D registers must be same size."); + static_assert(kWRegSize == kSRegSize, "W and S registers must be same size."); + static_assert(kPrintXReg == kPrintReg1D, + "X and D register printing code is shared."); + static_assert(kPrintWReg == kPrintReg1S, + "W and S register printing code is shared."); + + return static_cast(format); +} + +Simulator::PrintRegisterFormat Simulator::GetPrintRegisterFormat( + VectorFormat vform) { + switch (vform) { + default: + UNREACHABLE(); + case kFormat16B: + return kPrintReg16B; + case kFormat8B: + return kPrintReg8B; + case kFormat8H: + return kPrintReg8H; + case kFormat4H: + return kPrintReg4H; + case kFormat4S: + return kPrintReg4S; + case kFormat2S: + return kPrintReg2S; + case kFormat2D: + return kPrintReg2D; + case kFormat1D: + return kPrintReg1D; + + case kFormatB: + return kPrintReg1B; + case kFormatH: + return kPrintReg1H; + case kFormatS: + return kPrintReg1S; + case kFormatD: + return kPrintReg1D; + } +} + +Simulator::PrintRegisterFormat Simulator::GetPrintRegisterFormatFP( + VectorFormat vform) { + switch (vform) { + default: + UNREACHABLE(); + case kFormat4S: + return kPrintReg4SFP; + case kFormat2S: + return kPrintReg2SFP; + case kFormat2D: + return kPrintReg2DFP; + case kFormat1D: + return kPrintReg1DFP; + + case kFormatS: + return kPrintReg1SFP; + case kFormatD: + return kPrintReg1DFP; + } +} void Simulator::SetBreakpoint(Instruction* location) { for (unsigned i = 0; i < breakpoints_.size(); i++) { @@ -1130,6 +1276,18 @@ void Simulator::PrintInstructionsAt(Instruction* start, uint64_t count) { } } +void Simulator::PrintWrittenRegisters() { + for (unsigned i = 0; i < kNumberOfRegisters; i++) { + if (registers_[i].WrittenSinceLastLog()) PrintRegister(i); + } +} + +void Simulator::PrintWrittenVRegisters() { + for (unsigned i = 0; i < kNumberOfVRegisters; i++) { + // At this point there is no type information, so print as a raw 1Q. + if (vregisters_[i].WrittenSinceLastLog()) PrintVRegister(i, kPrintReg1Q); + } +} void Simulator::PrintSystemRegisters() { PrintSystemRegister(NZCV); @@ -1143,58 +1301,217 @@ void Simulator::PrintRegisters() { } } - -void Simulator::PrintFPRegisters() { - for (unsigned i = 0; i < kNumberOfFPRegisters; i++) { - PrintFPRegister(i); +void Simulator::PrintVRegisters() { + for (unsigned i = 0; i < kNumberOfVRegisters; i++) { + // At this point there is no type information, so print as a raw 1Q. + PrintVRegister(i, kPrintReg1Q); } } void Simulator::PrintRegister(unsigned code, Reg31Mode r31mode) { + registers_[code].NotifyRegisterLogged(); + // Don't print writes into xzr. if ((code == kZeroRegCode) && (r31mode == Reg31IsZeroRegister)) { return; } - // The template is "# x:value". - fprintf(stream_, "# %s%5s: %s0x%016" PRIx64 "%s\n", - clr_reg_name, XRegNameForCode(code, r31mode), - clr_reg_value, reg(code, r31mode), clr_normal); + // The template for all x and w registers: + // "# x{code}: 0x{value}" + // "# w{code}: 0x{value}" + + PrintRegisterRawHelper(code, r31mode); + fprintf(stream_, "\n"); +} + +// Print a register's name and raw value. +// +// The `bytes` and `lsb` arguments can be used to limit the bytes that are +// printed. These arguments are intended for use in cases where register hasn't +// actually been updated (such as in PrintVWrite). +// +// No newline is printed. This allows the caller to print more details (such as +// a floating-point interpretation or a memory access annotation). +void Simulator::PrintVRegisterRawHelper(unsigned code, int bytes, int lsb) { + // The template for vector types: + // "# v{code}: 0xffeeddccbbaa99887766554433221100". + // An example with bytes=4 and lsb=8: + // "# v{code}: 0xbbaa9988 ". + fprintf(stream_, "# %s%5s: %s", clr_vreg_name, VRegNameForCode(code), + clr_vreg_value); + + int msb = lsb + bytes - 1; + int byte = kQRegSize - 1; + + // Print leading padding spaces. (Two spaces per byte.) + while (byte > msb) { + fprintf(stream_, " "); + byte--; + } + + // Print the specified part of the value, byte by byte. + qreg_t rawbits = qreg(code); + fprintf(stream_, "0x"); + while (byte >= lsb) { + fprintf(stream_, "%02x", rawbits.val[byte]); + byte--; + } + + // Print trailing padding spaces. + while (byte >= 0) { + fprintf(stream_, " "); + byte--; + } + fprintf(stream_, "%s", clr_normal); +} + +// Print each of the specified lanes of a register as a float or double value. +// +// The `lane_count` and `lslane` arguments can be used to limit the lanes that +// are printed. These arguments are intended for use in cases where register +// hasn't actually been updated (such as in PrintVWrite). +// +// No newline is printed. This allows the caller to print more details (such as +// a memory access annotation). +void Simulator::PrintVRegisterFPHelper(unsigned code, + unsigned lane_size_in_bytes, + int lane_count, int rightmost_lane) { + DCHECK((lane_size_in_bytes == kSRegSize) || + (lane_size_in_bytes == kDRegSize)); + + unsigned msb = (lane_count + rightmost_lane) * lane_size_in_bytes; + DCHECK_LE(msb, static_cast(kQRegSize)); + + // For scalar types ((lane_count == 1) && (rightmost_lane == 0)), a register + // name is used: + // " (s{code}: {value})" + // " (d{code}: {value})" + // For vector types, "..." is used to represent one or more omitted lanes. + // " (..., {value}, {value}, ...)" + if ((lane_count == 1) && (rightmost_lane == 0)) { + const char* name = (lane_size_in_bytes == kSRegSize) + ? SRegNameForCode(code) + : DRegNameForCode(code); + fprintf(stream_, " (%s%s: ", clr_vreg_name, name); + } else { + if (msb < (kQRegSize - 1)) { + fprintf(stream_, " (..., "); + } else { + fprintf(stream_, " ("); + } + } + + // Print the list of values. + const char* separator = ""; + int leftmost_lane = rightmost_lane + lane_count - 1; + for (int lane = leftmost_lane; lane >= rightmost_lane; lane--) { + double value = (lane_size_in_bytes == kSRegSize) + ? vreg(code).Get(lane) + : vreg(code).Get(lane); + fprintf(stream_, "%s%s%#g%s", separator, clr_vreg_value, value, clr_normal); + separator = ", "; + } + + if (rightmost_lane > 0) { + fprintf(stream_, ", ..."); + } + fprintf(stream_, ")"); } +// Print a register's name and raw value. +// +// Only the least-significant `size_in_bytes` bytes of the register are printed, +// but the value is aligned as if the whole register had been printed. +// +// For typical register updates, size_in_bytes should be set to kXRegSize +// -- the default -- so that the whole register is printed. Other values of +// size_in_bytes are intended for use when the register hasn't actually been +// updated (such as in PrintWrite). +// +// No newline is printed. This allows the caller to print more details (such as +// a memory access annotation). +void Simulator::PrintRegisterRawHelper(unsigned code, Reg31Mode r31mode, + int size_in_bytes) { + // The template for all supported sizes. + // "# x{code}: 0xffeeddccbbaa9988" + // "# w{code}: 0xbbaa9988" + // "# w{code}<15:0>: 0x9988" + // "# w{code}<7:0>: 0x88" + unsigned padding_chars = (kXRegSize - size_in_bytes) * 2; + + const char* name = ""; + const char* suffix = ""; + switch (size_in_bytes) { + case kXRegSize: + name = XRegNameForCode(code, r31mode); + break; + case kWRegSize: + name = WRegNameForCode(code, r31mode); + break; + case 2: + name = WRegNameForCode(code, r31mode); + suffix = "<15:0>"; + padding_chars -= strlen(suffix); + break; + case 1: + name = WRegNameForCode(code, r31mode); + suffix = "<7:0>"; + padding_chars -= strlen(suffix); + break; + default: + UNREACHABLE(); + } + fprintf(stream_, "# %s%5s%s: ", clr_reg_name, name, suffix); + + // Print leading padding spaces. + DCHECK_LT(padding_chars, kXRegSize * 2U); + for (unsigned i = 0; i < padding_chars; i++) { + putc(' ', stream_); + } + + // Print the specified bits in hexadecimal format. + uint64_t bits = reg(code, r31mode); + bits &= kXRegMask >> ((kXRegSize - size_in_bytes) * 8); + static_assert(sizeof(bits) == kXRegSize, + "X registers and uint64_t must be the same size."); -void Simulator::PrintFPRegister(unsigned code, PrintFPRegisterSizes sizes) { - // The template is "# v:bits (d:value, ...)". + int chars = size_in_bytes * 2; + fprintf(stream_, "%s0x%0*" PRIx64 "%s", clr_reg_value, chars, bits, + clr_normal); +} - DCHECK(sizes != 0); - DCHECK((sizes & kPrintAllFPRegValues) == sizes); +void Simulator::PrintVRegister(unsigned code, PrintRegisterFormat format) { + vregisters_[code].NotifyRegisterLogged(); - // Print the raw bits. - fprintf(stream_, "# %s%5s: %s0x%016" PRIx64 "%s (", - clr_fpreg_name, VRegNameForCode(code), - clr_fpreg_value, fpreg(code), clr_normal); + int lane_size_log2 = format & kPrintRegLaneSizeMask; - // Print all requested value interpretations. - bool need_separator = false; - if (sizes & kPrintDRegValue) { - fprintf(stream_, "%s%s%s: %s%g%s", - need_separator ? ", " : "", - clr_fpreg_name, DRegNameForCode(code), - clr_fpreg_value, fpreg(code), clr_normal); - need_separator = true; + int reg_size_log2; + if (format & kPrintRegAsQVector) { + reg_size_log2 = kQRegSizeLog2; + } else if (format & kPrintRegAsDVector) { + reg_size_log2 = kDRegSizeLog2; + } else { + // Scalar types. + reg_size_log2 = lane_size_log2; } - if (sizes & kPrintSRegValue) { - fprintf(stream_, "%s%s%s: %s%g%s", - need_separator ? ", " : "", - clr_fpreg_name, SRegNameForCode(code), - clr_fpreg_value, fpreg(code), clr_normal); - need_separator = true; + int lane_count = 1 << (reg_size_log2 - lane_size_log2); + int lane_size = 1 << lane_size_log2; + + // The template for vector types: + // "# v{code}: 0x{rawbits} (..., {value}, ...)". + // The template for scalar types: + // "# v{code}: 0x{rawbits} ({reg}:{value})". + // The values in parentheses after the bit representations are floating-point + // interpretations. They are displayed only if the kPrintVRegAsFP bit is set. + + PrintVRegisterRawHelper(code); + if (format & kPrintRegAsFP) { + PrintVRegisterFPHelper(code, lane_size, lane_count); } - // End the value list. - fprintf(stream_, ")\n"); + fprintf(stream_, "\n"); } @@ -1226,109 +1543,61 @@ void Simulator::PrintSystemRegister(SystemRegister id) { } } +void Simulator::PrintRead(uintptr_t address, unsigned reg_code, + PrintRegisterFormat format) { + registers_[reg_code].NotifyRegisterLogged(); -void Simulator::PrintRead(uintptr_t address, - size_t size, - unsigned reg_code) { - USE(size); // Size is unused here. - - // The template is "# x:value <- address". - fprintf(stream_, "# %s%5s: %s0x%016" PRIx64 "%s", - clr_reg_name, XRegNameForCode(reg_code), - clr_reg_value, reg(reg_code), clr_normal); + USE(format); + // The template is "# {reg}: 0x{value} <- {address}". + PrintRegisterRawHelper(reg_code, Reg31IsZeroRegister); fprintf(stream_, " <- %s0x%016" PRIxPTR "%s\n", clr_memory_address, address, clr_normal); } +void Simulator::PrintVRead(uintptr_t address, unsigned reg_code, + PrintRegisterFormat format, unsigned lane) { + vregisters_[reg_code].NotifyRegisterLogged(); -void Simulator::PrintReadFP(uintptr_t address, - size_t size, - unsigned reg_code) { - // The template is "# reg:bits (reg:value) <- address". - switch (size) { - case kSRegSize: - fprintf(stream_, "# %s%5s: %s0x%016" PRIx64 "%s (%s%s: %s%gf%s)", - clr_fpreg_name, VRegNameForCode(reg_code), - clr_fpreg_value, fpreg(reg_code), clr_normal, - clr_fpreg_name, SRegNameForCode(reg_code), - clr_fpreg_value, fpreg(reg_code), clr_normal); - break; - case kDRegSize: - fprintf(stream_, "# %s%5s: %s0x%016" PRIx64 "%s (%s%s: %s%g%s)", - clr_fpreg_name, VRegNameForCode(reg_code), - clr_fpreg_value, fpreg(reg_code), clr_normal, - clr_fpreg_name, DRegNameForCode(reg_code), - clr_fpreg_value, fpreg(reg_code), clr_normal); - break; - default: - UNREACHABLE(); + // The template is "# v{code}: 0x{rawbits} <- address". + PrintVRegisterRawHelper(reg_code); + if (format & kPrintRegAsFP) { + PrintVRegisterFPHelper(reg_code, GetPrintRegLaneSizeInBytes(format), + GetPrintRegLaneCount(format), lane); } - fprintf(stream_, " <- %s0x%016" PRIxPTR "%s\n", clr_memory_address, address, clr_normal); } +void Simulator::PrintWrite(uintptr_t address, unsigned reg_code, + PrintRegisterFormat format) { + DCHECK_EQ(GetPrintRegLaneCount(format), 1U); -void Simulator::PrintWrite(uintptr_t address, - size_t size, - unsigned reg_code) { - // The template is "# reg:value -> address". To keep the trace tidy and - // readable, the value is aligned with the values in the register trace. - switch (size) { - case kByteSizeInBytes: - fprintf(stream_, "# %s%5s<7:0>: %s0x%02" PRIx8 "%s", - clr_reg_name, WRegNameForCode(reg_code), - clr_reg_value, reg(reg_code), clr_normal); - break; - case kHalfWordSizeInBytes: - fprintf(stream_, "# %s%5s<15:0>: %s0x%04" PRIx16 "%s", - clr_reg_name, WRegNameForCode(reg_code), - clr_reg_value, reg(reg_code), clr_normal); - break; - case kWRegSize: - fprintf(stream_, "# %s%5s: %s0x%08" PRIx32 "%s", - clr_reg_name, WRegNameForCode(reg_code), - clr_reg_value, reg(reg_code), clr_normal); - break; - case kXRegSize: - fprintf(stream_, "# %s%5s: %s0x%016" PRIx64 "%s", - clr_reg_name, XRegNameForCode(reg_code), - clr_reg_value, reg(reg_code), clr_normal); - break; - default: - UNREACHABLE(); - } - + // The template is "# v{code}: 0x{value} -> {address}". To keep the trace tidy + // and readable, the value is aligned with the values in the register trace. + PrintRegisterRawHelper(reg_code, Reg31IsZeroRegister, + GetPrintRegSizeInBytes(format)); fprintf(stream_, " -> %s0x%016" PRIxPTR "%s\n", clr_memory_address, address, clr_normal); } - -void Simulator::PrintWriteFP(uintptr_t address, - size_t size, - unsigned reg_code) { - // The template is "# reg:bits (reg:value) -> address". To keep the trace tidy - // and readable, the value is aligned with the values in the register trace. - switch (size) { - case kSRegSize: - fprintf(stream_, "# %s%5s<31:0>: %s0x%08" PRIx32 "%s (%s%s: %s%gf%s)", - clr_fpreg_name, VRegNameForCode(reg_code), - clr_fpreg_value, fpreg(reg_code), clr_normal, - clr_fpreg_name, SRegNameForCode(reg_code), - clr_fpreg_value, fpreg(reg_code), clr_normal); - break; - case kDRegSize: - fprintf(stream_, "# %s%5s: %s0x%016" PRIx64 "%s (%s%s: %s%g%s)", - clr_fpreg_name, VRegNameForCode(reg_code), - clr_fpreg_value, fpreg(reg_code), clr_normal, - clr_fpreg_name, DRegNameForCode(reg_code), - clr_fpreg_value, fpreg(reg_code), clr_normal); - break; - default: - UNREACHABLE(); +void Simulator::PrintVWrite(uintptr_t address, unsigned reg_code, + PrintRegisterFormat format, unsigned lane) { + // The templates: + // "# v{code}: 0x{rawbits} -> {address}" + // "# v{code}: 0x{rawbits} (..., {value}, ...) -> {address}". + // "# v{code}: 0x{rawbits} ({reg}:{value}) -> {address}" + // Because this trace doesn't represent a change to the source register's + // value, only the relevant part of the value is printed. To keep the trace + // tidy and readable, the raw value is aligned with the other values in the + // register trace. + int lane_count = GetPrintRegLaneCount(format); + int lane_size = GetPrintRegLaneSizeInBytes(format); + int reg_size = GetPrintRegSizeInBytes(format); + PrintVRegisterRawHelper(reg_code, reg_size, lane_size * lane); + if (format & kPrintRegAsFP) { + PrintVRegisterFPHelper(reg_code, lane_size, lane_count, lane); } - fprintf(stream_, " -> %s0x%016" PRIxPTR "%s\n", clr_memory_address, address, clr_normal); } @@ -1650,13 +1919,14 @@ void Simulator::LoadStoreHelper(Instruction* instr, uintptr_t address = LoadStoreAddress(addr_reg, offset, addrmode); uintptr_t stack = 0; - base::LockGuard lock_guard(&global_monitor_.Pointer()->mutex); - if (instr->IsLoad()) { - local_monitor_.NotifyLoad(address); - } else { - local_monitor_.NotifyStore(address); - global_monitor_.Pointer()->NotifyStore_Locked(address, - &global_monitor_processor_); + { + base::LockGuard lock_guard(&global_monitor_.Pointer()->mutex); + if (instr->IsLoad()) { + local_monitor_.NotifyLoad(); + } else { + local_monitor_.NotifyStore(); + global_monitor_.Pointer()->NotifyStore_Locked(&global_monitor_processor_); + } } // Handle the writeback for stores before the store. On a CPU the writeback @@ -1674,10 +1944,10 @@ void Simulator::LoadStoreHelper(Instruction* instr, stack = sp(); } - LoadStoreOp op = static_cast(instr->Mask(LoadStoreOpMask)); + LoadStoreOp op = static_cast(instr->Mask(LoadStoreMask)); switch (op) { // Use _no_log variants to suppress the register trace (LOG_REGS, - // LOG_FP_REGS). We will print a more detailed log. + // LOG_VREGS). We will print a more detailed log. case LDRB_w: set_wreg_no_log(srcdst, MemoryRead(address)); break; case LDRH_w: set_wreg_no_log(srcdst, MemoryRead(address)); break; case LDR_w: set_wreg_no_log(srcdst, MemoryRead(address)); break; @@ -1687,33 +1957,55 @@ void Simulator::LoadStoreHelper(Instruction* instr, case LDRSB_x: set_xreg_no_log(srcdst, MemoryRead(address)); break; case LDRSH_x: set_xreg_no_log(srcdst, MemoryRead(address)); break; case LDRSW_x: set_xreg_no_log(srcdst, MemoryRead(address)); break; + case LDR_b: + set_breg_no_log(srcdst, MemoryRead(address)); + break; + case LDR_h: + set_hreg_no_log(srcdst, MemoryRead(address)); + break; case LDR_s: set_sreg_no_log(srcdst, MemoryRead(address)); break; case LDR_d: set_dreg_no_log(srcdst, MemoryRead(address)); break; + case LDR_q: + set_qreg_no_log(srcdst, MemoryRead(address)); + break; case STRB_w: MemoryWrite(address, wreg(srcdst)); break; case STRH_w: MemoryWrite(address, wreg(srcdst)); break; case STR_w: MemoryWrite(address, wreg(srcdst)); break; case STR_x: MemoryWrite(address, xreg(srcdst)); break; + case STR_b: + MemoryWrite(address, breg(srcdst)); + break; + case STR_h: + MemoryWrite(address, hreg(srcdst)); + break; case STR_s: MemoryWrite(address, sreg(srcdst)); break; case STR_d: MemoryWrite(address, dreg(srcdst)); break; + case STR_q: + MemoryWrite(address, qreg(srcdst)); + break; default: UNIMPLEMENTED(); } // Print a detailed trace (including the memory address) instead of the basic // register:value trace generated by set_*reg(). - size_t access_size = 1 << instr->SizeLS(); + unsigned access_size = 1 << instr->SizeLS(); if (instr->IsLoad()) { if ((op == LDR_s) || (op == LDR_d)) { - LogReadFP(address, access_size, srcdst); + LogVRead(address, srcdst, GetPrintRegisterFormatForSizeFP(access_size)); + } else if ((op == LDR_b) || (op == LDR_h) || (op == LDR_q)) { + LogVRead(address, srcdst, GetPrintRegisterFormatForSize(access_size)); } else { - LogRead(address, access_size, srcdst); + LogRead(address, srcdst, GetPrintRegisterFormatForSize(access_size)); } } else { if ((op == STR_s) || (op == STR_d)) { - LogWriteFP(address, access_size, srcdst); + LogVWrite(address, srcdst, GetPrintRegisterFormatForSizeFP(access_size)); + } else if ((op == STR_b) || (op == STR_h) || (op == STR_q)) { + LogVWrite(address, srcdst, GetPrintRegisterFormatForSize(access_size)); } else { - LogWrite(address, access_size, srcdst); + LogWrite(address, srcdst, GetPrintRegisterFormatForSize(access_size)); } } @@ -1761,17 +2053,14 @@ void Simulator::LoadStorePairHelper(Instruction* instr, uintptr_t address2 = address + access_size; uintptr_t stack = 0; - base::LockGuard lock_guard(&global_monitor_.Pointer()->mutex); - if (instr->IsLoad()) { - local_monitor_.NotifyLoad(address); - local_monitor_.NotifyLoad(address2); - } else { - local_monitor_.NotifyStore(address); - local_monitor_.NotifyStore(address2); - global_monitor_.Pointer()->NotifyStore_Locked(address, - &global_monitor_processor_); - global_monitor_.Pointer()->NotifyStore_Locked(address2, - &global_monitor_processor_); + { + base::LockGuard lock_guard(&global_monitor_.Pointer()->mutex); + if (instr->IsLoad()) { + local_monitor_.NotifyLoad(); + } else { + local_monitor_.NotifyStore(); + global_monitor_.Pointer()->NotifyStore_Locked(&global_monitor_processor_); + } } // Handle the writeback for stores before the store. On a CPU the writeback @@ -1797,61 +2086,73 @@ void Simulator::LoadStorePairHelper(Instruction* instr, switch (op) { // Use _no_log variants to suppress the register trace (LOG_REGS, - // LOG_FP_REGS). We will print a more detailed log. + // LOG_VREGS). We will print a more detailed log. case LDP_w: { - DCHECK(access_size == kWRegSize); + DCHECK_EQ(access_size, static_cast(kWRegSize)); set_wreg_no_log(rt, MemoryRead(address)); set_wreg_no_log(rt2, MemoryRead(address2)); break; } case LDP_s: { - DCHECK(access_size == kSRegSize); + DCHECK_EQ(access_size, static_cast(kSRegSize)); set_sreg_no_log(rt, MemoryRead(address)); set_sreg_no_log(rt2, MemoryRead(address2)); break; } case LDP_x: { - DCHECK(access_size == kXRegSize); + DCHECK_EQ(access_size, static_cast(kXRegSize)); set_xreg_no_log(rt, MemoryRead(address)); set_xreg_no_log(rt2, MemoryRead(address2)); break; } case LDP_d: { - DCHECK(access_size == kDRegSize); + DCHECK_EQ(access_size, static_cast(kDRegSize)); set_dreg_no_log(rt, MemoryRead(address)); set_dreg_no_log(rt2, MemoryRead(address2)); break; } + case LDP_q: { + DCHECK_EQ(access_size, static_cast(kQRegSize)); + set_qreg(rt, MemoryRead(address), NoRegLog); + set_qreg(rt2, MemoryRead(address2), NoRegLog); + break; + } case LDPSW_x: { - DCHECK(access_size == kWRegSize); + DCHECK_EQ(access_size, static_cast(kWRegSize)); set_xreg_no_log(rt, MemoryRead(address)); set_xreg_no_log(rt2, MemoryRead(address2)); break; } case STP_w: { - DCHECK(access_size == kWRegSize); + DCHECK_EQ(access_size, static_cast(kWRegSize)); MemoryWrite(address, wreg(rt)); MemoryWrite(address2, wreg(rt2)); break; } case STP_s: { - DCHECK(access_size == kSRegSize); + DCHECK_EQ(access_size, static_cast(kSRegSize)); MemoryWrite(address, sreg(rt)); MemoryWrite(address2, sreg(rt2)); break; } case STP_x: { - DCHECK(access_size == kXRegSize); + DCHECK_EQ(access_size, static_cast(kXRegSize)); MemoryWrite(address, xreg(rt)); MemoryWrite(address2, xreg(rt2)); break; } case STP_d: { - DCHECK(access_size == kDRegSize); + DCHECK_EQ(access_size, static_cast(kDRegSize)); MemoryWrite(address, dreg(rt)); MemoryWrite(address2, dreg(rt2)); break; } + case STP_q: { + DCHECK_EQ(access_size, static_cast(kQRegSize)); + MemoryWrite(address, qreg(rt)); + MemoryWrite(address2, qreg(rt2)); + break; + } default: UNREACHABLE(); } @@ -1859,19 +2160,25 @@ void Simulator::LoadStorePairHelper(Instruction* instr, // register:value trace generated by set_*reg(). if (instr->IsLoad()) { if ((op == LDP_s) || (op == LDP_d)) { - LogReadFP(address, access_size, rt); - LogReadFP(address2, access_size, rt2); + LogVRead(address, rt, GetPrintRegisterFormatForSizeFP(access_size)); + LogVRead(address2, rt2, GetPrintRegisterFormatForSizeFP(access_size)); + } else if (op == LDP_q) { + LogVRead(address, rt, GetPrintRegisterFormatForSize(access_size)); + LogVRead(address2, rt2, GetPrintRegisterFormatForSize(access_size)); } else { - LogRead(address, access_size, rt); - LogRead(address2, access_size, rt2); + LogRead(address, rt, GetPrintRegisterFormatForSize(access_size)); + LogRead(address2, rt2, GetPrintRegisterFormatForSize(access_size)); } } else { if ((op == STP_s) || (op == STP_d)) { - LogWriteFP(address, access_size, rt); - LogWriteFP(address2, access_size, rt2); + LogVWrite(address, rt, GetPrintRegisterFormatForSizeFP(access_size)); + LogVWrite(address2, rt2, GetPrintRegisterFormatForSizeFP(access_size)); + } else if (op == STP_q) { + LogVWrite(address, rt, GetPrintRegisterFormatForSize(access_size)); + LogVWrite(address2, rt2, GetPrintRegisterFormatForSize(access_size)); } else { - LogWrite(address, access_size, rt); - LogWrite(address2, access_size, rt2); + LogWrite(address, rt, GetPrintRegisterFormatForSize(access_size)); + LogWrite(address2, rt2, GetPrintRegisterFormatForSize(access_size)); } } @@ -1897,27 +2204,29 @@ void Simulator::VisitLoadLiteral(Instruction* instr) { uintptr_t address = instr->LiteralAddress(); unsigned rt = instr->Rt(); - base::LockGuard lock_guard(&global_monitor_.Pointer()->mutex); - local_monitor_.NotifyLoad(address); + { + base::LockGuard lock_guard(&global_monitor_.Pointer()->mutex); + local_monitor_.NotifyLoad(); + } switch (instr->Mask(LoadLiteralMask)) { // Use _no_log variants to suppress the register trace (LOG_REGS, - // LOG_FP_REGS), then print a more detailed log. + // LOG_VREGS), then print a more detailed log. case LDR_w_lit: set_wreg_no_log(rt, MemoryRead(address)); - LogRead(address, kWRegSize, rt); + LogRead(address, rt, kPrintWReg); break; case LDR_x_lit: set_xreg_no_log(rt, MemoryRead(address)); - LogRead(address, kXRegSize, rt); + LogRead(address, rt, kPrintXReg); break; case LDR_s_lit: set_sreg_no_log(rt, MemoryRead(address)); - LogReadFP(address, kSRegSize, rt); + LogVRead(address, rt, kPrintSReg); break; case LDR_d_lit: set_dreg_no_log(rt, MemoryRead(address)); - LogReadFP(address, kDRegSize, rt); + LogVRead(address, rt, kPrintDReg); break; default: UNREACHABLE(); } @@ -1992,7 +2301,7 @@ void Simulator::VisitLoadStoreAcquireRelease(Instruction* instr) { global_monitor_.Pointer()->NotifyLoadExcl_Locked( address, &global_monitor_processor_); } else { - local_monitor_.NotifyLoad(address); + local_monitor_.NotifyLoad(); } switch (op) { case LDAR_b: @@ -2010,7 +2319,7 @@ void Simulator::VisitLoadStoreAcquireRelease(Instruction* instr) { default: UNIMPLEMENTED(); } - LogRead(address, access_size, rt); + LogRead(address, rt, GetPrintRegisterFormatForSize(access_size)); } else { if (is_exclusive) { unsigned rs = instr->Rs(); @@ -2031,15 +2340,14 @@ void Simulator::VisitLoadStoreAcquireRelease(Instruction* instr) { default: UNIMPLEMENTED(); } - LogWrite(address, access_size, rt); + LogWrite(address, rt, GetPrintRegisterFormatForSize(access_size)); set_wreg(rs, 0); } else { set_wreg(rs, 1); } } else { - local_monitor_.NotifyStore(address); - global_monitor_.Pointer()->NotifyStore_Locked(address, - &global_monitor_processor_); + local_monitor_.NotifyStore(); + global_monitor_.Pointer()->NotifyStore_Locked(&global_monitor_processor_); switch (op) { case STLR_b: MemoryWrite(address, wreg(rt)); @@ -2210,7 +2518,7 @@ void Simulator::DataProcessing2Source(Instruction* instr) { } case UDIV_w: case UDIV_x: { - typedef typename make_unsigned::type unsignedT; + typedef typename std::make_unsigned::type unsignedT; unsignedT rn = static_cast(reg(instr->Rn())); unsignedT rm = static_cast(reg(instr->Rm())); if (rm == 0) { @@ -2315,7 +2623,7 @@ void Simulator::VisitDataProcessing3Source(Instruction* instr) { template void Simulator::BitfieldHelper(Instruction* instr) { - typedef typename make_unsigned::type unsignedT; + typedef typename std::make_unsigned::type unsignedT; T reg_size = sizeof(T) * 8; T R = instr->ImmR(); T S = instr->ImmS(); @@ -2528,62 +2836,22 @@ void Simulator::VisitFPFixedPointConvert(Instruction* instr) { } -int32_t Simulator::FPToInt32(double value, FPRounding rmode) { - value = FPRoundInt(value, rmode); - if (value >= kWMaxInt) { - return kWMaxInt; - } else if (value < kWMinInt) { - return kWMinInt; - } - return std::isnan(value) ? 0 : static_cast(value); -} - - -int64_t Simulator::FPToInt64(double value, FPRounding rmode) { - value = FPRoundInt(value, rmode); - if (value >= kXMaxInt) { - return kXMaxInt; - } else if (value < kXMinInt) { - return kXMinInt; - } - return std::isnan(value) ? 0 : static_cast(value); -} - - -uint32_t Simulator::FPToUInt32(double value, FPRounding rmode) { - value = FPRoundInt(value, rmode); - if (value >= kWMaxUInt) { - return kWMaxUInt; - } else if (value < 0.0) { - return 0; - } - return std::isnan(value) ? 0 : static_cast(value); -} - - -uint64_t Simulator::FPToUInt64(double value, FPRounding rmode) { - value = FPRoundInt(value, rmode); - if (value >= kXMaxUInt) { - return kXMaxUInt; - } else if (value < 0.0) { - return 0; - } - return std::isnan(value) ? 0 : static_cast(value); -} - - void Simulator::VisitFPCompare(Instruction* instr) { AssertSupportedFPCR(); - unsigned reg_size = (instr->Mask(FP64) == FP64) ? kDRegSizeInBits - : kSRegSizeInBits; - double fn_val = fpreg(reg_size, instr->Rn()); - switch (instr->Mask(FPCompareMask)) { case FCMP_s: - case FCMP_d: FPCompare(fn_val, fpreg(reg_size, instr->Rm())); break; + FPCompare(sreg(instr->Rn()), sreg(instr->Rm())); + break; + case FCMP_d: + FPCompare(dreg(instr->Rn()), dreg(instr->Rm())); + break; case FCMP_s_zero: - case FCMP_d_zero: FPCompare(fn_val, 0.0); break; + FPCompare(sreg(instr->Rn()), 0.0f); + break; + case FCMP_d_zero: + FPCompare(dreg(instr->Rn()), 0.0); + break; default: UNIMPLEMENTED(); } } @@ -2594,13 +2862,16 @@ void Simulator::VisitFPConditionalCompare(Instruction* instr) { switch (instr->Mask(FPConditionalCompareMask)) { case FCCMP_s: + if (ConditionPassed(static_cast(instr->Condition()))) { + FPCompare(sreg(instr->Rn()), sreg(instr->Rm())); + } else { + nzcv().SetFlags(instr->Nzcv()); + LogSystemRegister(NZCV); + } + break; case FCCMP_d: { if (ConditionPassed(static_cast(instr->Condition()))) { - // If the condition passes, set the status flags to the result of - // comparing the operands. - unsigned reg_size = (instr->Mask(FP64) == FP64) ? kDRegSizeInBits - : kSRegSizeInBits; - FPCompare(fpreg(reg_size, instr->Rn()), fpreg(reg_size, instr->Rm())); + FPCompare(dreg(instr->Rn()), dreg(instr->Rm())); } else { // If the condition fails, set the status flags to the nzcv immediate. nzcv().SetFlags(instr->Nzcv()); @@ -2634,479 +2905,147 @@ void Simulator::VisitFPConditionalSelect(Instruction* instr) { void Simulator::VisitFPDataProcessing1Source(Instruction* instr) { AssertSupportedFPCR(); + FPRounding fpcr_rounding = static_cast(fpcr().RMode()); + VectorFormat vform = (instr->Mask(FP64) == FP64) ? kFormatD : kFormatS; + SimVRegister& rd = vreg(instr->Rd()); + SimVRegister& rn = vreg(instr->Rn()); + bool inexact_exception = false; + unsigned fd = instr->Rd(); unsigned fn = instr->Rn(); switch (instr->Mask(FPDataProcessing1SourceMask)) { - case FMOV_s: set_sreg(fd, sreg(fn)); break; - case FMOV_d: set_dreg(fd, dreg(fn)); break; - case FABS_s: set_sreg(fd, std::fabs(sreg(fn))); break; - case FABS_d: set_dreg(fd, std::fabs(dreg(fn))); break; - case FNEG_s: set_sreg(fd, -sreg(fn)); break; - case FNEG_d: set_dreg(fd, -dreg(fn)); break; - case FSQRT_s: set_sreg(fd, FPSqrt(sreg(fn))); break; - case FSQRT_d: set_dreg(fd, FPSqrt(dreg(fn))); break; - case FRINTA_s: set_sreg(fd, FPRoundInt(sreg(fn), FPTieAway)); break; - case FRINTA_d: set_dreg(fd, FPRoundInt(dreg(fn), FPTieAway)); break; + case FMOV_s: + set_sreg(fd, sreg(fn)); + return; + case FMOV_d: + set_dreg(fd, dreg(fn)); + return; + case FABS_s: + case FABS_d: + fabs_(vform, vreg(fd), vreg(fn)); + // Explicitly log the register update whilst we have type information. + LogVRegister(fd, GetPrintRegisterFormatFP(vform)); + return; + case FNEG_s: + case FNEG_d: + fneg(vform, vreg(fd), vreg(fn)); + // Explicitly log the register update whilst we have type information. + LogVRegister(fd, GetPrintRegisterFormatFP(vform)); + return; + case FCVT_ds: + set_dreg(fd, FPToDouble(sreg(fn))); + return; + case FCVT_sd: + set_sreg(fd, FPToFloat(dreg(fn), FPTieEven)); + return; + case FCVT_hs: + set_hreg(fd, FPToFloat16(sreg(fn), FPTieEven)); + return; + case FCVT_sh: + set_sreg(fd, FPToFloat(hreg(fn))); + return; + case FCVT_dh: + set_dreg(fd, FPToDouble(FPToFloat(hreg(fn)))); + return; + case FCVT_hd: + set_hreg(fd, FPToFloat16(dreg(fn), FPTieEven)); + return; + case FSQRT_s: + case FSQRT_d: + fsqrt(vform, rd, rn); + // Explicitly log the register update whilst we have type information. + LogVRegister(fd, GetPrintRegisterFormatFP(vform)); + return; + case FRINTI_s: + case FRINTI_d: + break; // Use FPCR rounding mode. + case FRINTX_s: + case FRINTX_d: + inexact_exception = true; + break; + case FRINTA_s: + case FRINTA_d: + fpcr_rounding = FPTieAway; + break; case FRINTM_s: - set_sreg(fd, FPRoundInt(sreg(fn), FPNegativeInfinity)); break; case FRINTM_d: - set_dreg(fd, FPRoundInt(dreg(fn), FPNegativeInfinity)); break; - case FRINTP_s: - set_sreg(fd, FPRoundInt(sreg(fn), FPPositiveInfinity)); + fpcr_rounding = FPNegativeInfinity; + break; + case FRINTN_s: + case FRINTN_d: + fpcr_rounding = FPTieEven; break; + case FRINTP_s: case FRINTP_d: - set_dreg(fd, FPRoundInt(dreg(fn), FPPositiveInfinity)); - break; - case FRINTN_s: set_sreg(fd, FPRoundInt(sreg(fn), FPTieEven)); break; - case FRINTN_d: set_dreg(fd, FPRoundInt(dreg(fn), FPTieEven)); break; - case FRINTZ_s: set_sreg(fd, FPRoundInt(sreg(fn), FPZero)); break; - case FRINTZ_d: set_dreg(fd, FPRoundInt(dreg(fn), FPZero)); break; - case FCVT_ds: set_dreg(fd, FPToDouble(sreg(fn))); break; - case FCVT_sd: set_sreg(fd, FPToFloat(dreg(fn), FPTieEven)); break; - default: UNIMPLEMENTED(); - } -} - - -// Assemble the specified IEEE-754 components into the target type and apply -// appropriate rounding. -// sign: 0 = positive, 1 = negative -// exponent: Unbiased IEEE-754 exponent. -// mantissa: The mantissa of the input. The top bit (which is not encoded for -// normal IEEE-754 values) must not be omitted. This bit has the -// value 'pow(2, exponent)'. -// -// The input value is assumed to be a normalized value. That is, the input may -// not be infinity or NaN. If the source value is subnormal, it must be -// normalized before calling this function such that the highest set bit in the -// mantissa has the value 'pow(2, exponent)'. -// -// Callers should use FPRoundToFloat or FPRoundToDouble directly, rather than -// calling a templated FPRound. -template -static T FPRound(int64_t sign, int64_t exponent, uint64_t mantissa, - FPRounding round_mode) { - DCHECK((sign == 0) || (sign == 1)); - - // Only the FPTieEven rounding mode is implemented. - DCHECK(round_mode == FPTieEven); - USE(round_mode); - - // Rounding can promote subnormals to normals, and normals to infinities. For - // example, a double with exponent 127 (FLT_MAX_EXP) would appear to be - // encodable as a float, but rounding based on the low-order mantissa bits - // could make it overflow. With ties-to-even rounding, this value would become - // an infinity. - - // ---- Rounding Method ---- - // - // The exponent is irrelevant in the rounding operation, so we treat the - // lowest-order bit that will fit into the result ('onebit') as having - // the value '1'. Similarly, the highest-order bit that won't fit into - // the result ('halfbit') has the value '0.5'. The 'point' sits between - // 'onebit' and 'halfbit': - // - // These bits fit into the result. - // |---------------------| - // mantissa = 0bxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx - // || - // / | - // / halfbit - // onebit - // - // For subnormal outputs, the range of representable bits is smaller and - // the position of onebit and halfbit depends on the exponent of the - // input, but the method is otherwise similar. - // - // onebit(frac) - // | - // | halfbit(frac) halfbit(adjusted) - // | / / - // | | | - // 0b00.0 (exact) -> 0b00.0 (exact) -> 0b00 - // 0b00.0... -> 0b00.0... -> 0b00 - // 0b00.1 (exact) -> 0b00.0111..111 -> 0b00 - // 0b00.1... -> 0b00.1... -> 0b01 - // 0b01.0 (exact) -> 0b01.0 (exact) -> 0b01 - // 0b01.0... -> 0b01.0... -> 0b01 - // 0b01.1 (exact) -> 0b01.1 (exact) -> 0b10 - // 0b01.1... -> 0b01.1... -> 0b10 - // 0b10.0 (exact) -> 0b10.0 (exact) -> 0b10 - // 0b10.0... -> 0b10.0... -> 0b10 - // 0b10.1 (exact) -> 0b10.0111..111 -> 0b10 - // 0b10.1... -> 0b10.1... -> 0b11 - // 0b11.0 (exact) -> 0b11.0 (exact) -> 0b11 - // ... / | / | - // / | / | - // / | - // adjusted = frac - (halfbit(mantissa) & ~onebit(frac)); / | - // - // mantissa = (mantissa >> shift) + halfbit(adjusted); - - static const int mantissa_offset = 0; - static const int exponent_offset = mantissa_offset + mbits; - static const int sign_offset = exponent_offset + ebits; - STATIC_ASSERT(sign_offset == (sizeof(T) * kByteSize - 1)); - - // Bail out early for zero inputs. - if (mantissa == 0) { - return static_cast(sign << sign_offset); - } - - // If all bits in the exponent are set, the value is infinite or NaN. - // This is true for all binary IEEE-754 formats. - static const int infinite_exponent = (1 << ebits) - 1; - static const int max_normal_exponent = infinite_exponent - 1; - - // Apply the exponent bias to encode it for the result. Doing this early makes - // it easy to detect values that will be infinite or subnormal. - exponent += max_normal_exponent >> 1; - - if (exponent > max_normal_exponent) { - // Overflow: The input is too large for the result type to represent. The - // FPTieEven rounding mode handles overflows using infinities. - exponent = infinite_exponent; - mantissa = 0; - return static_cast((sign << sign_offset) | - (exponent << exponent_offset) | - (mantissa << mantissa_offset)); - } - - // Calculate the shift required to move the top mantissa bit to the proper - // place in the destination type. - const int highest_significant_bit = 63 - CountLeadingZeros(mantissa, 64); - int shift = highest_significant_bit - mbits; - - if (exponent <= 0) { - // The output will be subnormal (before rounding). - - // For subnormal outputs, the shift must be adjusted by the exponent. The +1 - // is necessary because the exponent of a subnormal value (encoded as 0) is - // the same as the exponent of the smallest normal value (encoded as 1). - shift += -exponent + 1; - - // Handle inputs that would produce a zero output. - // - // Shifts higher than highest_significant_bit+1 will always produce a zero - // result. A shift of exactly highest_significant_bit+1 might produce a - // non-zero result after rounding. - if (shift > (highest_significant_bit + 1)) { - // The result will always be +/-0.0. - return static_cast(sign << sign_offset); - } - - // Properly encode the exponent for a subnormal output. - exponent = 0; - } else { - // Clear the topmost mantissa bit, since this is not encoded in IEEE-754 - // normal values. - mantissa &= ~(1UL << highest_significant_bit); - } - - if (shift > 0) { - // We have to shift the mantissa to the right. Some precision is lost, so we - // need to apply rounding. - uint64_t onebit_mantissa = (mantissa >> (shift)) & 1; - uint64_t halfbit_mantissa = (mantissa >> (shift-1)) & 1; - uint64_t adjusted = mantissa - (halfbit_mantissa & ~onebit_mantissa); - T halfbit_adjusted = (adjusted >> (shift-1)) & 1; - - T result = - static_cast((sign << sign_offset) | (exponent << exponent_offset) | - ((mantissa >> shift) << mantissa_offset)); - - // A very large mantissa can overflow during rounding. If this happens, the - // exponent should be incremented and the mantissa set to 1.0 (encoded as - // 0). Applying halfbit_adjusted after assembling the float has the nice - // side-effect that this case is handled for free. - // - // This also handles cases where a very large finite value overflows to - // infinity, or where a very large subnormal value overflows to become - // normal. - return result + halfbit_adjusted; - } else { - // We have to shift the mantissa to the left (or not at all). The input - // mantissa is exactly representable in the output mantissa, so apply no - // rounding correction. - return static_cast((sign << sign_offset) | - (exponent << exponent_offset) | - ((mantissa << -shift) << mantissa_offset)); - } -} - - -// See FPRound for a description of this function. -static inline double FPRoundToDouble(int64_t sign, int64_t exponent, - uint64_t mantissa, FPRounding round_mode) { - int64_t bits = - FPRound(sign, - exponent, - mantissa, - round_mode); - return rawbits_to_double(bits); -} - - -// See FPRound for a description of this function. -static inline float FPRoundToFloat(int64_t sign, int64_t exponent, - uint64_t mantissa, FPRounding round_mode) { - int32_t bits = - FPRound(sign, - exponent, - mantissa, - round_mode); - return rawbits_to_float(bits); -} - - -double Simulator::FixedToDouble(int64_t src, int fbits, FPRounding round) { - if (src >= 0) { - return UFixedToDouble(src, fbits, round); - } else { - // This works for all negative values, including INT64_MIN. - return -UFixedToDouble(-src, fbits, round); - } -} - - -double Simulator::UFixedToDouble(uint64_t src, int fbits, FPRounding round) { - // An input of 0 is a special case because the result is effectively - // subnormal: The exponent is encoded as 0 and there is no implicit 1 bit. - if (src == 0) { - return 0.0; + fpcr_rounding = FPPositiveInfinity; + break; + case FRINTZ_s: + case FRINTZ_d: + fpcr_rounding = FPZero; + break; + default: + UNIMPLEMENTED(); } - // Calculate the exponent. The highest significant bit will have the value - // 2^exponent. - const int highest_significant_bit = 63 - CountLeadingZeros(src, 64); - const int64_t exponent = highest_significant_bit - fbits; - - return FPRoundToDouble(0, exponent, src, round); + // Only FRINT* instructions fall through the switch above. + frint(vform, rd, rn, fpcr_rounding, inexact_exception); + // Explicitly log the register update whilst we have type information + LogVRegister(fd, GetPrintRegisterFormatFP(vform)); } +void Simulator::VisitFPDataProcessing2Source(Instruction* instr) { + AssertSupportedFPCR(); -float Simulator::FixedToFloat(int64_t src, int fbits, FPRounding round) { - if (src >= 0) { - return UFixedToFloat(src, fbits, round); - } else { - // This works for all negative values, including INT64_MIN. - return -UFixedToFloat(-src, fbits, round); - } -} - - -float Simulator::UFixedToFloat(uint64_t src, int fbits, FPRounding round) { - // An input of 0 is a special case because the result is effectively - // subnormal: The exponent is encoded as 0 and there is no implicit 1 bit. - if (src == 0) { - return 0.0f; - } - - // Calculate the exponent. The highest significant bit will have the value - // 2^exponent. - const int highest_significant_bit = 63 - CountLeadingZeros(src, 64); - const int32_t exponent = highest_significant_bit - fbits; - - return FPRoundToFloat(0, exponent, src, round); -} - + VectorFormat vform = (instr->Mask(FP64) == FP64) ? kFormatD : kFormatS; + SimVRegister& rd = vreg(instr->Rd()); + SimVRegister& rn = vreg(instr->Rn()); + SimVRegister& rm = vreg(instr->Rm()); -double Simulator::FPRoundInt(double value, FPRounding round_mode) { - if ((value == 0.0) || (value == kFP64PositiveInfinity) || - (value == kFP64NegativeInfinity)) { - return value; - } else if (std::isnan(value)) { - return FPProcessNaN(value); - } - - double int_result = floor(value); - double error = value - int_result; - switch (round_mode) { - case FPTieAway: { - // Take care of correctly handling the range ]-0.5, -0.0], which must - // yield -0.0. - if ((-0.5 < value) && (value < 0.0)) { - int_result = -0.0; - - } else if ((error > 0.5) || ((error == 0.5) && (int_result >= 0.0))) { - // If the error is greater than 0.5, or is equal to 0.5 and the integer - // result is positive, round up. - int_result++; - } + switch (instr->Mask(FPDataProcessing2SourceMask)) { + case FADD_s: + case FADD_d: + fadd(vform, rd, rn, rm); break; - } - case FPTieEven: { - // Take care of correctly handling the range [-0.5, -0.0], which must - // yield -0.0. - if ((-0.5 <= value) && (value < 0.0)) { - int_result = -0.0; - - // If the error is greater than 0.5, or is equal to 0.5 and the integer - // result is odd, round up. - } else if ((error > 0.5) || - ((error == 0.5) && (modulo(int_result, 2) != 0))) { - int_result++; - } + case FSUB_s: + case FSUB_d: + fsub(vform, rd, rn, rm); break; - } - case FPZero: { - // If value > 0 then we take floor(value) - // otherwise, ceil(value) - if (value < 0) { - int_result = ceil(value); - } + case FMUL_s: + case FMUL_d: + fmul(vform, rd, rn, rm); break; - } - case FPNegativeInfinity: { - // We always use floor(value). + case FNMUL_s: + case FNMUL_d: + fnmul(vform, rd, rn, rm); break; - } - case FPPositiveInfinity: { - int_result = ceil(value); + case FDIV_s: + case FDIV_d: + fdiv(vform, rd, rn, rm); + break; + case FMAX_s: + case FMAX_d: + fmax(vform, rd, rn, rm); + break; + case FMIN_s: + case FMIN_d: + fmin(vform, rd, rn, rm); break; - } - default: UNIMPLEMENTED(); - } - return int_result; -} - - -double Simulator::FPToDouble(float value) { - switch (std::fpclassify(value)) { - case FP_NAN: { - if (fpcr().DN()) return kFP64DefaultNaN; - - // Convert NaNs as the processor would: - // - The sign is propagated. - // - The payload (mantissa) is transferred entirely, except that the top - // bit is forced to '1', making the result a quiet NaN. The unused - // (low-order) payload bits are set to 0. - uint32_t raw = float_to_rawbits(value); - - uint64_t sign = raw >> 31; - uint64_t exponent = (1 << 11) - 1; - uint64_t payload = unsigned_bitextract_64(21, 0, raw); - payload <<= (52 - 23); // The unused low-order bits should be 0. - payload |= (1L << 51); // Force a quiet NaN. - - return rawbits_to_double((sign << 63) | (exponent << 52) | payload); - } - - case FP_ZERO: - case FP_NORMAL: - case FP_SUBNORMAL: - case FP_INFINITE: { - // All other inputs are preserved in a standard cast, because every value - // representable using an IEEE-754 float is also representable using an - // IEEE-754 double. - return static_cast(value); - } - } - - UNREACHABLE(); - return static_cast(value); -} - - -float Simulator::FPToFloat(double value, FPRounding round_mode) { - // Only the FPTieEven rounding mode is implemented. - DCHECK(round_mode == FPTieEven); - USE(round_mode); - - switch (std::fpclassify(value)) { - case FP_NAN: { - if (fpcr().DN()) return kFP32DefaultNaN; - - // Convert NaNs as the processor would: - // - The sign is propagated. - // - The payload (mantissa) is transferred as much as possible, except - // that the top bit is forced to '1', making the result a quiet NaN. - uint64_t raw = double_to_rawbits(value); - - uint32_t sign = raw >> 63; - uint32_t exponent = (1 << 8) - 1; - uint32_t payload = - static_cast(unsigned_bitextract_64(50, 52 - 23, raw)); - payload |= (1 << 22); // Force a quiet NaN. - - return rawbits_to_float((sign << 31) | (exponent << 23) | payload); - } - - case FP_ZERO: - case FP_INFINITE: { - // In a C++ cast, any value representable in the target type will be - // unchanged. This is always the case for +/-0.0 and infinities. - return static_cast(value); - } - - case FP_NORMAL: - case FP_SUBNORMAL: { - // Convert double-to-float as the processor would, assuming that FPCR.FZ - // (flush-to-zero) is not set. - uint64_t raw = double_to_rawbits(value); - // Extract the IEEE-754 double components. - uint32_t sign = raw >> 63; - // Extract the exponent and remove the IEEE-754 encoding bias. - int32_t exponent = - static_cast(unsigned_bitextract_64(62, 52, raw)) - 1023; - // Extract the mantissa and add the implicit '1' bit. - uint64_t mantissa = unsigned_bitextract_64(51, 0, raw); - if (std::fpclassify(value) == FP_NORMAL) { - mantissa |= (1UL << 52); - } - return FPRoundToFloat(sign, exponent, mantissa, round_mode); - } - } - - UNREACHABLE(); - return value; -} - - -void Simulator::VisitFPDataProcessing2Source(Instruction* instr) { - AssertSupportedFPCR(); - - unsigned fd = instr->Rd(); - unsigned fn = instr->Rn(); - unsigned fm = instr->Rm(); - - // Fmaxnm and Fminnm have special NaN handling. - switch (instr->Mask(FPDataProcessing2SourceMask)) { - case FMAXNM_s: set_sreg(fd, FPMaxNM(sreg(fn), sreg(fm))); return; - case FMAXNM_d: set_dreg(fd, FPMaxNM(dreg(fn), dreg(fm))); return; - case FMINNM_s: set_sreg(fd, FPMinNM(sreg(fn), sreg(fm))); return; - case FMINNM_d: set_dreg(fd, FPMinNM(dreg(fn), dreg(fm))); return; - default: - break; // Fall through. - } - - if (FPProcessNaNs(instr)) return; - - switch (instr->Mask(FPDataProcessing2SourceMask)) { - case FADD_s: set_sreg(fd, FPAdd(sreg(fn), sreg(fm))); break; - case FADD_d: set_dreg(fd, FPAdd(dreg(fn), dreg(fm))); break; - case FSUB_s: set_sreg(fd, FPSub(sreg(fn), sreg(fm))); break; - case FSUB_d: set_dreg(fd, FPSub(dreg(fn), dreg(fm))); break; - case FMUL_s: set_sreg(fd, FPMul(sreg(fn), sreg(fm))); break; - case FMUL_d: set_dreg(fd, FPMul(dreg(fn), dreg(fm))); break; - case FDIV_s: set_sreg(fd, FPDiv(sreg(fn), sreg(fm))); break; - case FDIV_d: set_dreg(fd, FPDiv(dreg(fn), dreg(fm))); break; - case FMAX_s: set_sreg(fd, FPMax(sreg(fn), sreg(fm))); break; - case FMAX_d: set_dreg(fd, FPMax(dreg(fn), dreg(fm))); break; - case FMIN_s: set_sreg(fd, FPMin(sreg(fn), sreg(fm))); break; - case FMIN_d: set_dreg(fd, FPMin(dreg(fn), dreg(fm))); break; case FMAXNM_s: case FMAXNM_d: + fmaxnm(vform, rd, rn, rm); + break; case FMINNM_s: case FMINNM_d: - // These were handled before the standard FPProcessNaNs() stage. + fminnm(vform, rd, rn, rm); + break; + default: UNREACHABLE(); - default: UNIMPLEMENTED(); } + // Explicitly log the register update whilst we have type information. + LogVRegister(instr->Rd(), GetPrintRegisterFormatFP(vform)); } - void Simulator::VisitFPDataProcessing3Source(Instruction* instr) { AssertSupportedFPCR(); @@ -3117,10 +3056,18 @@ void Simulator::VisitFPDataProcessing3Source(Instruction* instr) { switch (instr->Mask(FPDataProcessing3SourceMask)) { // fd = fa +/- (fn * fm) - case FMADD_s: set_sreg(fd, FPMulAdd(sreg(fa), sreg(fn), sreg(fm))); break; - case FMSUB_s: set_sreg(fd, FPMulAdd(sreg(fa), -sreg(fn), sreg(fm))); break; - case FMADD_d: set_dreg(fd, FPMulAdd(dreg(fa), dreg(fn), dreg(fm))); break; - case FMSUB_d: set_dreg(fd, FPMulAdd(dreg(fa), -dreg(fn), dreg(fm))); break; + case FMADD_s: + set_sreg(fd, FPMulAdd(sreg(fa), sreg(fn), sreg(fm))); + break; + case FMSUB_s: + set_sreg(fd, FPMulAdd(sreg(fa), -sreg(fn), sreg(fm))); + break; + case FMADD_d: + set_dreg(fd, FPMulAdd(dreg(fa), dreg(fn), dreg(fm))); + break; + case FMSUB_d: + set_dreg(fd, FPMulAdd(dreg(fa), -dreg(fn), dreg(fm))); + break; // Negated variants of the above. case FNMADD_s: set_sreg(fd, FPMulAdd(-sreg(fa), -sreg(fn), sreg(fm))); @@ -3134,232 +3081,11 @@ void Simulator::VisitFPDataProcessing3Source(Instruction* instr) { case FNMSUB_d: set_dreg(fd, FPMulAdd(-dreg(fa), dreg(fn), dreg(fm))); break; - default: UNIMPLEMENTED(); - } -} - - -template -T Simulator::FPAdd(T op1, T op2) { - // NaNs should be handled elsewhere. - DCHECK(!std::isnan(op1) && !std::isnan(op2)); - - if (std::isinf(op1) && std::isinf(op2) && (op1 != op2)) { - // inf + -inf returns the default NaN. - return FPDefaultNaN(); - } else { - // Other cases should be handled by standard arithmetic. - return op1 + op2; - } -} - - -template -T Simulator::FPDiv(T op1, T op2) { - // NaNs should be handled elsewhere. - DCHECK(!std::isnan(op1) && !std::isnan(op2)); - - if ((std::isinf(op1) && std::isinf(op2)) || ((op1 == 0.0) && (op2 == 0.0))) { - // inf / inf and 0.0 / 0.0 return the default NaN. - return FPDefaultNaN(); - } else { - // Other cases should be handled by standard arithmetic. - return op1 / op2; - } -} - - -template -T Simulator::FPMax(T a, T b) { - // NaNs should be handled elsewhere. - DCHECK(!std::isnan(a) && !std::isnan(b)); - - if ((a == 0.0) && (b == 0.0) && - (copysign(1.0, a) != copysign(1.0, b))) { - // a and b are zero, and the sign differs: return +0.0. - return 0.0; - } else { - return (a > b) ? a : b; - } -} - - -template -T Simulator::FPMaxNM(T a, T b) { - if (IsQuietNaN(a) && !IsQuietNaN(b)) { - a = kFP64NegativeInfinity; - } else if (!IsQuietNaN(a) && IsQuietNaN(b)) { - b = kFP64NegativeInfinity; - } - - T result = FPProcessNaNs(a, b); - return std::isnan(result) ? result : FPMax(a, b); -} - -template -T Simulator::FPMin(T a, T b) { - // NaNs should be handled elsewhere. - DCHECK(!std::isnan(a) && !std::isnan(b)); - - if ((a == 0.0) && (b == 0.0) && - (copysign(1.0, a) != copysign(1.0, b))) { - // a and b are zero, and the sign differs: return -0.0. - return -0.0; - } else { - return (a < b) ? a : b; - } -} - - -template -T Simulator::FPMinNM(T a, T b) { - if (IsQuietNaN(a) && !IsQuietNaN(b)) { - a = kFP64PositiveInfinity; - } else if (!IsQuietNaN(a) && IsQuietNaN(b)) { - b = kFP64PositiveInfinity; - } - - T result = FPProcessNaNs(a, b); - return std::isnan(result) ? result : FPMin(a, b); -} - - -template -T Simulator::FPMul(T op1, T op2) { - // NaNs should be handled elsewhere. - DCHECK(!std::isnan(op1) && !std::isnan(op2)); - - if ((std::isinf(op1) && (op2 == 0.0)) || (std::isinf(op2) && (op1 == 0.0))) { - // inf * 0.0 returns the default NaN. - return FPDefaultNaN(); - } else { - // Other cases should be handled by standard arithmetic. - return op1 * op2; - } -} - - -template -T Simulator::FPMulAdd(T a, T op1, T op2) { - T result = FPProcessNaNs3(a, op1, op2); - - T sign_a = copysign(1.0, a); - T sign_prod = copysign(1.0, op1) * copysign(1.0, op2); - bool isinf_prod = std::isinf(op1) || std::isinf(op2); - bool operation_generates_nan = - (std::isinf(op1) && (op2 == 0.0)) || // inf * 0.0 - (std::isinf(op2) && (op1 == 0.0)) || // 0.0 * inf - (std::isinf(a) && isinf_prod && (sign_a != sign_prod)); // inf - inf - - if (std::isnan(result)) { - // Generated NaNs override quiet NaNs propagated from a. - if (operation_generates_nan && IsQuietNaN(a)) { - return FPDefaultNaN(); - } else { - return result; - } - } - - // If the operation would produce a NaN, return the default NaN. - if (operation_generates_nan) { - return FPDefaultNaN(); - } - - // Work around broken fma implementations for exact zero results: The sign of - // exact 0.0 results is positive unless both a and op1 * op2 are negative. - if (((op1 == 0.0) || (op2 == 0.0)) && (a == 0.0)) { - return ((sign_a < 0) && (sign_prod < 0)) ? -0.0 : 0.0; - } - - result = FusedMultiplyAdd(op1, op2, a); - DCHECK(!std::isnan(result)); - - // Work around broken fma implementations for rounded zero results: If a is - // 0.0, the sign of the result is the sign of op1 * op2 before rounding. - if ((a == 0.0) && (result == 0.0)) { - return copysign(0.0, sign_prod); - } - - return result; -} - - -template -T Simulator::FPSqrt(T op) { - if (std::isnan(op)) { - return FPProcessNaN(op); - } else if (op < 0.0) { - return FPDefaultNaN(); - } else { - lazily_initialize_fast_sqrt(isolate_); - return fast_sqrt(op, isolate_); - } -} - - -template -T Simulator::FPSub(T op1, T op2) { - // NaNs should be handled elsewhere. - DCHECK(!std::isnan(op1) && !std::isnan(op2)); - - if (std::isinf(op1) && std::isinf(op2) && (op1 == op2)) { - // inf - inf returns the default NaN. - return FPDefaultNaN(); - } else { - // Other cases should be handled by standard arithmetic. - return op1 - op2; - } -} - - -template -T Simulator::FPProcessNaN(T op) { - DCHECK(std::isnan(op)); - return fpcr().DN() ? FPDefaultNaN() : ToQuietNaN(op); -} - - -template -T Simulator::FPProcessNaNs(T op1, T op2) { - if (IsSignallingNaN(op1)) { - return FPProcessNaN(op1); - } else if (IsSignallingNaN(op2)) { - return FPProcessNaN(op2); - } else if (std::isnan(op1)) { - DCHECK(IsQuietNaN(op1)); - return FPProcessNaN(op1); - } else if (std::isnan(op2)) { - DCHECK(IsQuietNaN(op2)); - return FPProcessNaN(op2); - } else { - return 0.0; - } -} - - -template -T Simulator::FPProcessNaNs3(T op1, T op2, T op3) { - if (IsSignallingNaN(op1)) { - return FPProcessNaN(op1); - } else if (IsSignallingNaN(op2)) { - return FPProcessNaN(op2); - } else if (IsSignallingNaN(op3)) { - return FPProcessNaN(op3); - } else if (std::isnan(op1)) { - DCHECK(IsQuietNaN(op1)); - return FPProcessNaN(op1); - } else if (std::isnan(op2)) { - DCHECK(IsQuietNaN(op2)); - return FPProcessNaN(op2); - } else if (std::isnan(op3)) { - DCHECK(IsQuietNaN(op3)); - return FPProcessNaN(op3); - } else { - return 0.0; + default: + UNIMPLEMENTED(); } } - bool Simulator::FPProcessNaNs(Instruction* instr) { unsigned fd = instr->Rd(); unsigned fn = instr->Rn(); @@ -3469,31 +3195,24 @@ bool Simulator::PrintValue(const char* desc) { } int i = CodeFromName(desc); - STATIC_ASSERT(kNumberOfRegisters == kNumberOfFPRegisters); - if (i < 0 || static_cast(i) >= kNumberOfFPRegisters) return false; + static_assert(kNumberOfRegisters == kNumberOfVRegisters, + "Must be same number of Registers as VRegisters."); + if (i < 0 || static_cast(i) >= kNumberOfVRegisters) return false; if (desc[0] == 'v') { PrintF(stream_, "%s %s:%s 0x%016" PRIx64 "%s (%s%s:%s %g%s %s:%s %g%s)\n", - clr_fpreg_name, VRegNameForCode(i), - clr_fpreg_value, double_to_rawbits(dreg(i)), - clr_normal, - clr_fpreg_name, DRegNameForCode(i), - clr_fpreg_value, dreg(i), - clr_fpreg_name, SRegNameForCode(i), - clr_fpreg_value, sreg(i), - clr_normal); + clr_vreg_name, VRegNameForCode(i), clr_vreg_value, + bit_cast(dreg(i)), clr_normal, clr_vreg_name, + DRegNameForCode(i), clr_vreg_value, dreg(i), clr_vreg_name, + SRegNameForCode(i), clr_vreg_value, sreg(i), clr_normal); return true; } else if (desc[0] == 'd') { - PrintF(stream_, "%s %s:%s %g%s\n", - clr_fpreg_name, DRegNameForCode(i), - clr_fpreg_value, dreg(i), - clr_normal); + PrintF(stream_, "%s %s:%s %g%s\n", clr_vreg_name, DRegNameForCode(i), + clr_vreg_value, dreg(i), clr_normal); return true; } else if (desc[0] == 's') { - PrintF(stream_, "%s %s:%s %g%s\n", - clr_fpreg_name, SRegNameForCode(i), - clr_fpreg_value, sreg(i), - clr_normal); + PrintF(stream_, "%s %s:%s %g%s\n", clr_vreg_name, SRegNameForCode(i), + clr_vreg_value, sreg(i), clr_normal); return true; } else if (desc[0] == 'w') { PrintF(stream_, "%s %s:%s 0x%08" PRIx32 "%s\n", @@ -3619,7 +3338,7 @@ void Simulator::Debug() { if (argc == 2) { if (strcmp(arg1, "all") == 0) { PrintRegisters(); - PrintFPRegisters(); + PrintVRegisters(); } else { if (!PrintValue(arg1)) { PrintF("%s unrecognized\n", arg1); @@ -3845,7 +3564,9 @@ void Simulator::VisitException(Instruction* instr) { set_log_parameters(log_parameters() | parameters); if (parameters & LOG_SYS_REGS) { PrintSystemRegisters(); } if (parameters & LOG_REGS) { PrintRegisters(); } - if (parameters & LOG_FP_REGS) { PrintFPRegisters(); } + if (parameters & LOG_VREGS) { + PrintVRegisters(); + } break; case TRACE_DISABLE: set_log_parameters(log_parameters() & ~parameters); @@ -3861,7 +3582,7 @@ void Simulator::VisitException(Instruction* instr) { // Print the requested information. if (parameters & LOG_SYS_REGS) PrintSystemRegisters(); if (parameters & LOG_REGS) PrintRegisters(); - if (parameters & LOG_FP_REGS) PrintFPRegisters(); + if (parameters & LOG_VREGS) PrintVRegisters(); } // The stop parameters are inlined in the code. Skip them: @@ -3892,85 +3613,2204 @@ void Simulator::VisitException(Instruction* instr) { } break; } - + case BRK: + base::OS::DebugBreak(); + break; default: UNIMPLEMENTED(); } } +void Simulator::VisitNEON2RegMisc(Instruction* instr) { + NEONFormatDecoder nfd(instr); + VectorFormat vf = nfd.GetVectorFormat(); -void Simulator::DoPrintf(Instruction* instr) { - DCHECK((instr->Mask(ExceptionMask) == HLT) && - (instr->ImmException() == kImmExceptionIsPrintf)); - - // Read the arguments encoded inline in the instruction stream. - uint32_t arg_count; - uint32_t arg_pattern_list; - STATIC_ASSERT(sizeof(*instr) == 1); - memcpy(&arg_count, - instr + kPrintfArgCountOffset, - sizeof(arg_count)); - memcpy(&arg_pattern_list, - instr + kPrintfArgPatternListOffset, - sizeof(arg_pattern_list)); - - DCHECK(arg_count <= kPrintfMaxArgCount); - DCHECK((arg_pattern_list >> (kPrintfArgPatternBits * arg_count)) == 0); + // Format mapping for "long pair" instructions, [su]addlp, [su]adalp. + static const NEONFormatMap map_lp = { + {23, 22, 30}, {NF_4H, NF_8H, NF_2S, NF_4S, NF_1D, NF_2D}}; + VectorFormat vf_lp = nfd.GetVectorFormat(&map_lp); - // We need to call the host printf function with a set of arguments defined by - // arg_pattern_list. Because we don't know the types and sizes of the - // arguments, this is very difficult to do in a robust and portable way. To - // work around the problem, we pick apart the format string, and print one - // format placeholder at a time. + static const NEONFormatMap map_fcvtl = {{22}, {NF_4S, NF_2D}}; + VectorFormat vf_fcvtl = nfd.GetVectorFormat(&map_fcvtl); - // Allocate space for the format string. We take a copy, so we can modify it. - // Leave enough space for one extra character per expected argument (plus the - // '\0' termination). - const char * format_base = reg(0); - DCHECK(format_base != NULL); - size_t length = strlen(format_base) + 1; - char * const format = new char[length + arg_count]; + static const NEONFormatMap map_fcvtn = {{22, 30}, + {NF_4H, NF_8H, NF_2S, NF_4S}}; + VectorFormat vf_fcvtn = nfd.GetVectorFormat(&map_fcvtn); - // A list of chunks, each with exactly one format placeholder. - const char * chunks[kPrintfMaxArgCount]; + SimVRegister& rd = vreg(instr->Rd()); + SimVRegister& rn = vreg(instr->Rn()); - // Copy the format string and search for format placeholders. - uint32_t placeholder_count = 0; - char * format_scratch = format; - for (size_t i = 0; i < length; i++) { - if (format_base[i] != '%') { - *format_scratch++ = format_base[i]; - } else { - if (format_base[i + 1] == '%') { - // Ignore explicit "%%" sequences. - *format_scratch++ = format_base[i]; + if (instr->Mask(NEON2RegMiscOpcode) <= NEON_NEG_opcode) { + // These instructions all use a two bit size field, except NOT and RBIT, + // which use the field to encode the operation. + switch (instr->Mask(NEON2RegMiscMask)) { + case NEON_REV64: + rev64(vf, rd, rn); + break; + case NEON_REV32: + rev32(vf, rd, rn); + break; + case NEON_REV16: + rev16(vf, rd, rn); + break; + case NEON_SUQADD: + suqadd(vf, rd, rn); + break; + case NEON_USQADD: + usqadd(vf, rd, rn); + break; + case NEON_CLS: + cls(vf, rd, rn); + break; + case NEON_CLZ: + clz(vf, rd, rn); + break; + case NEON_CNT: + cnt(vf, rd, rn); + break; + case NEON_SQABS: + abs(vf, rd, rn).SignedSaturate(vf); + break; + case NEON_SQNEG: + neg(vf, rd, rn).SignedSaturate(vf); + break; + case NEON_CMGT_zero: + cmp(vf, rd, rn, 0, gt); + break; + case NEON_CMGE_zero: + cmp(vf, rd, rn, 0, ge); + break; + case NEON_CMEQ_zero: + cmp(vf, rd, rn, 0, eq); + break; + case NEON_CMLE_zero: + cmp(vf, rd, rn, 0, le); + break; + case NEON_CMLT_zero: + cmp(vf, rd, rn, 0, lt); + break; + case NEON_ABS: + abs(vf, rd, rn); + break; + case NEON_NEG: + neg(vf, rd, rn); + break; + case NEON_SADDLP: + saddlp(vf_lp, rd, rn); + break; + case NEON_UADDLP: + uaddlp(vf_lp, rd, rn); + break; + case NEON_SADALP: + sadalp(vf_lp, rd, rn); + break; + case NEON_UADALP: + uadalp(vf_lp, rd, rn); + break; + case NEON_RBIT_NOT: + vf = nfd.GetVectorFormat(nfd.LogicalFormatMap()); + switch (instr->FPType()) { + case 0: + not_(vf, rd, rn); + break; + case 1: + rbit(vf, rd, rn); + break; + default: + UNIMPLEMENTED(); + } + break; + } + } else { + VectorFormat fpf = nfd.GetVectorFormat(nfd.FPFormatMap()); + FPRounding fpcr_rounding = static_cast(fpcr().RMode()); + bool inexact_exception = false; + + // These instructions all use a one bit size field, except XTN, SQXTUN, + // SHLL, SQXTN and UQXTN, which use a two bit size field. + switch (instr->Mask(NEON2RegMiscFPMask)) { + case NEON_FABS: + fabs_(fpf, rd, rn); + return; + case NEON_FNEG: + fneg(fpf, rd, rn); + return; + case NEON_FSQRT: + fsqrt(fpf, rd, rn); + return; + case NEON_FCVTL: + if (instr->Mask(NEON_Q)) { + fcvtl2(vf_fcvtl, rd, rn); + } else { + fcvtl(vf_fcvtl, rd, rn); + } + return; + case NEON_FCVTN: + if (instr->Mask(NEON_Q)) { + fcvtn2(vf_fcvtn, rd, rn); + } else { + fcvtn(vf_fcvtn, rd, rn); + } + return; + case NEON_FCVTXN: + if (instr->Mask(NEON_Q)) { + fcvtxn2(vf_fcvtn, rd, rn); + } else { + fcvtxn(vf_fcvtn, rd, rn); + } + return; + + // The following instructions break from the switch statement, rather + // than return. + case NEON_FRINTI: + break; // Use FPCR rounding mode. + case NEON_FRINTX: + inexact_exception = true; + break; + case NEON_FRINTA: + fpcr_rounding = FPTieAway; + break; + case NEON_FRINTM: + fpcr_rounding = FPNegativeInfinity; + break; + case NEON_FRINTN: + fpcr_rounding = FPTieEven; + break; + case NEON_FRINTP: + fpcr_rounding = FPPositiveInfinity; + break; + case NEON_FRINTZ: + fpcr_rounding = FPZero; + break; - if (placeholder_count == 0) { - // The first chunk is passed to printf using "%s", so we need to - // unescape "%%" sequences in this chunk. (Just skip the next '%'.) - i++; + // The remaining cases return to the caller. + case NEON_FCVTNS: + fcvts(fpf, rd, rn, FPTieEven); + return; + case NEON_FCVTNU: + fcvtu(fpf, rd, rn, FPTieEven); + return; + case NEON_FCVTPS: + fcvts(fpf, rd, rn, FPPositiveInfinity); + return; + case NEON_FCVTPU: + fcvtu(fpf, rd, rn, FPPositiveInfinity); + return; + case NEON_FCVTMS: + fcvts(fpf, rd, rn, FPNegativeInfinity); + return; + case NEON_FCVTMU: + fcvtu(fpf, rd, rn, FPNegativeInfinity); + return; + case NEON_FCVTZS: + fcvts(fpf, rd, rn, FPZero); + return; + case NEON_FCVTZU: + fcvtu(fpf, rd, rn, FPZero); + return; + case NEON_FCVTAS: + fcvts(fpf, rd, rn, FPTieAway); + return; + case NEON_FCVTAU: + fcvtu(fpf, rd, rn, FPTieAway); + return; + case NEON_SCVTF: + scvtf(fpf, rd, rn, 0, fpcr_rounding); + return; + case NEON_UCVTF: + ucvtf(fpf, rd, rn, 0, fpcr_rounding); + return; + case NEON_URSQRTE: + ursqrte(fpf, rd, rn); + return; + case NEON_URECPE: + urecpe(fpf, rd, rn); + return; + case NEON_FRSQRTE: + frsqrte(fpf, rd, rn); + return; + case NEON_FRECPE: + frecpe(fpf, rd, rn, fpcr_rounding); + return; + case NEON_FCMGT_zero: + fcmp_zero(fpf, rd, rn, gt); + return; + case NEON_FCMGE_zero: + fcmp_zero(fpf, rd, rn, ge); + return; + case NEON_FCMEQ_zero: + fcmp_zero(fpf, rd, rn, eq); + return; + case NEON_FCMLE_zero: + fcmp_zero(fpf, rd, rn, le); + return; + case NEON_FCMLT_zero: + fcmp_zero(fpf, rd, rn, lt); + return; + default: + if ((NEON_XTN_opcode <= instr->Mask(NEON2RegMiscOpcode)) && + (instr->Mask(NEON2RegMiscOpcode) <= NEON_UQXTN_opcode)) { + switch (instr->Mask(NEON2RegMiscMask)) { + case NEON_XTN: + xtn(vf, rd, rn); + return; + case NEON_SQXTN: + sqxtn(vf, rd, rn); + return; + case NEON_UQXTN: + uqxtn(vf, rd, rn); + return; + case NEON_SQXTUN: + sqxtun(vf, rd, rn); + return; + case NEON_SHLL: + vf = nfd.GetVectorFormat(nfd.LongIntegerFormatMap()); + if (instr->Mask(NEON_Q)) { + shll2(vf, rd, rn); + } else { + shll(vf, rd, rn); + } + return; + default: + UNIMPLEMENTED(); + } } else { - // Otherwise, pass through "%%" unchanged. - *format_scratch++ = format_base[++i]; + UNIMPLEMENTED(); } - } else { - CHECK(placeholder_count < arg_count); - // Insert '\0' before placeholders, and store their locations. - *format_scratch++ = '\0'; - chunks[placeholder_count++] = format_scratch; - *format_scratch++ = format_base[i]; - } } + + // Only FRINT* instructions fall through the switch above. + frint(fpf, rd, rn, fpcr_rounding, inexact_exception); } - DCHECK(format_scratch <= (format + length + arg_count)); - CHECK(placeholder_count == arg_count); +} - // Finally, call printf with each chunk, passing the appropriate register - // argument. Normally, printf returns the number of bytes transmitted, so we - // can emulate a single printf call by adding the result from each chunk. If - // any call returns a negative (error) value, though, just return that value. +void Simulator::VisitNEON3Same(Instruction* instr) { + NEONFormatDecoder nfd(instr); + SimVRegister& rd = vreg(instr->Rd()); + SimVRegister& rn = vreg(instr->Rn()); + SimVRegister& rm = vreg(instr->Rm()); - fprintf(stream_, "%s", clr_printf); + if (instr->Mask(NEON3SameLogicalFMask) == NEON3SameLogicalFixed) { + VectorFormat vf = nfd.GetVectorFormat(nfd.LogicalFormatMap()); + switch (instr->Mask(NEON3SameLogicalMask)) { + case NEON_AND: + and_(vf, rd, rn, rm); + break; + case NEON_ORR: + orr(vf, rd, rn, rm); + break; + case NEON_ORN: + orn(vf, rd, rn, rm); + break; + case NEON_EOR: + eor(vf, rd, rn, rm); + break; + case NEON_BIC: + bic(vf, rd, rn, rm); + break; + case NEON_BIF: + bif(vf, rd, rn, rm); + break; + case NEON_BIT: + bit(vf, rd, rn, rm); + break; + case NEON_BSL: + bsl(vf, rd, rn, rm); + break; + default: + UNIMPLEMENTED(); + } + } else if (instr->Mask(NEON3SameFPFMask) == NEON3SameFPFixed) { + VectorFormat vf = nfd.GetVectorFormat(nfd.FPFormatMap()); + switch (instr->Mask(NEON3SameFPMask)) { + case NEON_FADD: + fadd(vf, rd, rn, rm); + break; + case NEON_FSUB: + fsub(vf, rd, rn, rm); + break; + case NEON_FMUL: + fmul(vf, rd, rn, rm); + break; + case NEON_FDIV: + fdiv(vf, rd, rn, rm); + break; + case NEON_FMAX: + fmax(vf, rd, rn, rm); + break; + case NEON_FMIN: + fmin(vf, rd, rn, rm); + break; + case NEON_FMAXNM: + fmaxnm(vf, rd, rn, rm); + break; + case NEON_FMINNM: + fminnm(vf, rd, rn, rm); + break; + case NEON_FMLA: + fmla(vf, rd, rn, rm); + break; + case NEON_FMLS: + fmls(vf, rd, rn, rm); + break; + case NEON_FMULX: + fmulx(vf, rd, rn, rm); + break; + case NEON_FACGE: + fabscmp(vf, rd, rn, rm, ge); + break; + case NEON_FACGT: + fabscmp(vf, rd, rn, rm, gt); + break; + case NEON_FCMEQ: + fcmp(vf, rd, rn, rm, eq); + break; + case NEON_FCMGE: + fcmp(vf, rd, rn, rm, ge); + break; + case NEON_FCMGT: + fcmp(vf, rd, rn, rm, gt); + break; + case NEON_FRECPS: + frecps(vf, rd, rn, rm); + break; + case NEON_FRSQRTS: + frsqrts(vf, rd, rn, rm); + break; + case NEON_FABD: + fabd(vf, rd, rn, rm); + break; + case NEON_FADDP: + faddp(vf, rd, rn, rm); + break; + case NEON_FMAXP: + fmaxp(vf, rd, rn, rm); + break; + case NEON_FMAXNMP: + fmaxnmp(vf, rd, rn, rm); + break; + case NEON_FMINP: + fminp(vf, rd, rn, rm); + break; + case NEON_FMINNMP: + fminnmp(vf, rd, rn, rm); + break; + default: + UNIMPLEMENTED(); + } + } else { + VectorFormat vf = nfd.GetVectorFormat(); + switch (instr->Mask(NEON3SameMask)) { + case NEON_ADD: + add(vf, rd, rn, rm); + break; + case NEON_ADDP: + addp(vf, rd, rn, rm); + break; + case NEON_CMEQ: + cmp(vf, rd, rn, rm, eq); + break; + case NEON_CMGE: + cmp(vf, rd, rn, rm, ge); + break; + case NEON_CMGT: + cmp(vf, rd, rn, rm, gt); + break; + case NEON_CMHI: + cmp(vf, rd, rn, rm, hi); + break; + case NEON_CMHS: + cmp(vf, rd, rn, rm, hs); + break; + case NEON_CMTST: + cmptst(vf, rd, rn, rm); + break; + case NEON_MLS: + mls(vf, rd, rn, rm); + break; + case NEON_MLA: + mla(vf, rd, rn, rm); + break; + case NEON_MUL: + mul(vf, rd, rn, rm); + break; + case NEON_PMUL: + pmul(vf, rd, rn, rm); + break; + case NEON_SMAX: + smax(vf, rd, rn, rm); + break; + case NEON_SMAXP: + smaxp(vf, rd, rn, rm); + break; + case NEON_SMIN: + smin(vf, rd, rn, rm); + break; + case NEON_SMINP: + sminp(vf, rd, rn, rm); + break; + case NEON_SUB: + sub(vf, rd, rn, rm); + break; + case NEON_UMAX: + umax(vf, rd, rn, rm); + break; + case NEON_UMAXP: + umaxp(vf, rd, rn, rm); + break; + case NEON_UMIN: + umin(vf, rd, rn, rm); + break; + case NEON_UMINP: + uminp(vf, rd, rn, rm); + break; + case NEON_SSHL: + sshl(vf, rd, rn, rm); + break; + case NEON_USHL: + ushl(vf, rd, rn, rm); + break; + case NEON_SABD: + AbsDiff(vf, rd, rn, rm, true); + break; + case NEON_UABD: + AbsDiff(vf, rd, rn, rm, false); + break; + case NEON_SABA: + saba(vf, rd, rn, rm); + break; + case NEON_UABA: + uaba(vf, rd, rn, rm); + break; + case NEON_UQADD: + add(vf, rd, rn, rm).UnsignedSaturate(vf); + break; + case NEON_SQADD: + add(vf, rd, rn, rm).SignedSaturate(vf); + break; + case NEON_UQSUB: + sub(vf, rd, rn, rm).UnsignedSaturate(vf); + break; + case NEON_SQSUB: + sub(vf, rd, rn, rm).SignedSaturate(vf); + break; + case NEON_SQDMULH: + sqdmulh(vf, rd, rn, rm); + break; + case NEON_SQRDMULH: + sqrdmulh(vf, rd, rn, rm); + break; + case NEON_UQSHL: + ushl(vf, rd, rn, rm).UnsignedSaturate(vf); + break; + case NEON_SQSHL: + sshl(vf, rd, rn, rm).SignedSaturate(vf); + break; + case NEON_URSHL: + ushl(vf, rd, rn, rm).Round(vf); + break; + case NEON_SRSHL: + sshl(vf, rd, rn, rm).Round(vf); + break; + case NEON_UQRSHL: + ushl(vf, rd, rn, rm).Round(vf).UnsignedSaturate(vf); + break; + case NEON_SQRSHL: + sshl(vf, rd, rn, rm).Round(vf).SignedSaturate(vf); + break; + case NEON_UHADD: + add(vf, rd, rn, rm).Uhalve(vf); + break; + case NEON_URHADD: + add(vf, rd, rn, rm).Uhalve(vf).Round(vf); + break; + case NEON_SHADD: + add(vf, rd, rn, rm).Halve(vf); + break; + case NEON_SRHADD: + add(vf, rd, rn, rm).Halve(vf).Round(vf); + break; + case NEON_UHSUB: + sub(vf, rd, rn, rm).Uhalve(vf); + break; + case NEON_SHSUB: + sub(vf, rd, rn, rm).Halve(vf); + break; + default: + UNIMPLEMENTED(); + } + } +} + +void Simulator::VisitNEON3Different(Instruction* instr) { + NEONFormatDecoder nfd(instr); + VectorFormat vf = nfd.GetVectorFormat(); + VectorFormat vf_l = nfd.GetVectorFormat(nfd.LongIntegerFormatMap()); + + SimVRegister& rd = vreg(instr->Rd()); + SimVRegister& rn = vreg(instr->Rn()); + SimVRegister& rm = vreg(instr->Rm()); + + switch (instr->Mask(NEON3DifferentMask)) { + case NEON_PMULL: + pmull(vf_l, rd, rn, rm); + break; + case NEON_PMULL2: + pmull2(vf_l, rd, rn, rm); + break; + case NEON_UADDL: + uaddl(vf_l, rd, rn, rm); + break; + case NEON_UADDL2: + uaddl2(vf_l, rd, rn, rm); + break; + case NEON_SADDL: + saddl(vf_l, rd, rn, rm); + break; + case NEON_SADDL2: + saddl2(vf_l, rd, rn, rm); + break; + case NEON_USUBL: + usubl(vf_l, rd, rn, rm); + break; + case NEON_USUBL2: + usubl2(vf_l, rd, rn, rm); + break; + case NEON_SSUBL: + ssubl(vf_l, rd, rn, rm); + break; + case NEON_SSUBL2: + ssubl2(vf_l, rd, rn, rm); + break; + case NEON_SABAL: + sabal(vf_l, rd, rn, rm); + break; + case NEON_SABAL2: + sabal2(vf_l, rd, rn, rm); + break; + case NEON_UABAL: + uabal(vf_l, rd, rn, rm); + break; + case NEON_UABAL2: + uabal2(vf_l, rd, rn, rm); + break; + case NEON_SABDL: + sabdl(vf_l, rd, rn, rm); + break; + case NEON_SABDL2: + sabdl2(vf_l, rd, rn, rm); + break; + case NEON_UABDL: + uabdl(vf_l, rd, rn, rm); + break; + case NEON_UABDL2: + uabdl2(vf_l, rd, rn, rm); + break; + case NEON_SMLAL: + smlal(vf_l, rd, rn, rm); + break; + case NEON_SMLAL2: + smlal2(vf_l, rd, rn, rm); + break; + case NEON_UMLAL: + umlal(vf_l, rd, rn, rm); + break; + case NEON_UMLAL2: + umlal2(vf_l, rd, rn, rm); + break; + case NEON_SMLSL: + smlsl(vf_l, rd, rn, rm); + break; + case NEON_SMLSL2: + smlsl2(vf_l, rd, rn, rm); + break; + case NEON_UMLSL: + umlsl(vf_l, rd, rn, rm); + break; + case NEON_UMLSL2: + umlsl2(vf_l, rd, rn, rm); + break; + case NEON_SMULL: + smull(vf_l, rd, rn, rm); + break; + case NEON_SMULL2: + smull2(vf_l, rd, rn, rm); + break; + case NEON_UMULL: + umull(vf_l, rd, rn, rm); + break; + case NEON_UMULL2: + umull2(vf_l, rd, rn, rm); + break; + case NEON_SQDMLAL: + sqdmlal(vf_l, rd, rn, rm); + break; + case NEON_SQDMLAL2: + sqdmlal2(vf_l, rd, rn, rm); + break; + case NEON_SQDMLSL: + sqdmlsl(vf_l, rd, rn, rm); + break; + case NEON_SQDMLSL2: + sqdmlsl2(vf_l, rd, rn, rm); + break; + case NEON_SQDMULL: + sqdmull(vf_l, rd, rn, rm); + break; + case NEON_SQDMULL2: + sqdmull2(vf_l, rd, rn, rm); + break; + case NEON_UADDW: + uaddw(vf_l, rd, rn, rm); + break; + case NEON_UADDW2: + uaddw2(vf_l, rd, rn, rm); + break; + case NEON_SADDW: + saddw(vf_l, rd, rn, rm); + break; + case NEON_SADDW2: + saddw2(vf_l, rd, rn, rm); + break; + case NEON_USUBW: + usubw(vf_l, rd, rn, rm); + break; + case NEON_USUBW2: + usubw2(vf_l, rd, rn, rm); + break; + case NEON_SSUBW: + ssubw(vf_l, rd, rn, rm); + break; + case NEON_SSUBW2: + ssubw2(vf_l, rd, rn, rm); + break; + case NEON_ADDHN: + addhn(vf, rd, rn, rm); + break; + case NEON_ADDHN2: + addhn2(vf, rd, rn, rm); + break; + case NEON_RADDHN: + raddhn(vf, rd, rn, rm); + break; + case NEON_RADDHN2: + raddhn2(vf, rd, rn, rm); + break; + case NEON_SUBHN: + subhn(vf, rd, rn, rm); + break; + case NEON_SUBHN2: + subhn2(vf, rd, rn, rm); + break; + case NEON_RSUBHN: + rsubhn(vf, rd, rn, rm); + break; + case NEON_RSUBHN2: + rsubhn2(vf, rd, rn, rm); + break; + default: + UNIMPLEMENTED(); + } +} + +void Simulator::VisitNEONAcrossLanes(Instruction* instr) { + NEONFormatDecoder nfd(instr); + + SimVRegister& rd = vreg(instr->Rd()); + SimVRegister& rn = vreg(instr->Rn()); + + // The input operand's VectorFormat is passed for these instructions. + if (instr->Mask(NEONAcrossLanesFPFMask) == NEONAcrossLanesFPFixed) { + VectorFormat vf = nfd.GetVectorFormat(nfd.FPFormatMap()); + + switch (instr->Mask(NEONAcrossLanesFPMask)) { + case NEON_FMAXV: + fmaxv(vf, rd, rn); + break; + case NEON_FMINV: + fminv(vf, rd, rn); + break; + case NEON_FMAXNMV: + fmaxnmv(vf, rd, rn); + break; + case NEON_FMINNMV: + fminnmv(vf, rd, rn); + break; + default: + UNIMPLEMENTED(); + } + } else { + VectorFormat vf = nfd.GetVectorFormat(); + + switch (instr->Mask(NEONAcrossLanesMask)) { + case NEON_ADDV: + addv(vf, rd, rn); + break; + case NEON_SMAXV: + smaxv(vf, rd, rn); + break; + case NEON_SMINV: + sminv(vf, rd, rn); + break; + case NEON_UMAXV: + umaxv(vf, rd, rn); + break; + case NEON_UMINV: + uminv(vf, rd, rn); + break; + case NEON_SADDLV: + saddlv(vf, rd, rn); + break; + case NEON_UADDLV: + uaddlv(vf, rd, rn); + break; + default: + UNIMPLEMENTED(); + } + } +} + +void Simulator::VisitNEONByIndexedElement(Instruction* instr) { + NEONFormatDecoder nfd(instr); + VectorFormat vf_r = nfd.GetVectorFormat(); + VectorFormat vf = nfd.GetVectorFormat(nfd.LongIntegerFormatMap()); + + SimVRegister& rd = vreg(instr->Rd()); + SimVRegister& rn = vreg(instr->Rn()); + + ByElementOp Op = NULL; + + int rm_reg = instr->Rm(); + int index = (instr->NEONH() << 1) | instr->NEONL(); + if (instr->NEONSize() == 1) { + rm_reg &= 0xf; + index = (index << 1) | instr->NEONM(); + } + + switch (instr->Mask(NEONByIndexedElementMask)) { + case NEON_MUL_byelement: + Op = &Simulator::mul; + vf = vf_r; + break; + case NEON_MLA_byelement: + Op = &Simulator::mla; + vf = vf_r; + break; + case NEON_MLS_byelement: + Op = &Simulator::mls; + vf = vf_r; + break; + case NEON_SQDMULH_byelement: + Op = &Simulator::sqdmulh; + vf = vf_r; + break; + case NEON_SQRDMULH_byelement: + Op = &Simulator::sqrdmulh; + vf = vf_r; + break; + case NEON_SMULL_byelement: + if (instr->Mask(NEON_Q)) { + Op = &Simulator::smull2; + } else { + Op = &Simulator::smull; + } + break; + case NEON_UMULL_byelement: + if (instr->Mask(NEON_Q)) { + Op = &Simulator::umull2; + } else { + Op = &Simulator::umull; + } + break; + case NEON_SMLAL_byelement: + if (instr->Mask(NEON_Q)) { + Op = &Simulator::smlal2; + } else { + Op = &Simulator::smlal; + } + break; + case NEON_UMLAL_byelement: + if (instr->Mask(NEON_Q)) { + Op = &Simulator::umlal2; + } else { + Op = &Simulator::umlal; + } + break; + case NEON_SMLSL_byelement: + if (instr->Mask(NEON_Q)) { + Op = &Simulator::smlsl2; + } else { + Op = &Simulator::smlsl; + } + break; + case NEON_UMLSL_byelement: + if (instr->Mask(NEON_Q)) { + Op = &Simulator::umlsl2; + } else { + Op = &Simulator::umlsl; + } + break; + case NEON_SQDMULL_byelement: + if (instr->Mask(NEON_Q)) { + Op = &Simulator::sqdmull2; + } else { + Op = &Simulator::sqdmull; + } + break; + case NEON_SQDMLAL_byelement: + if (instr->Mask(NEON_Q)) { + Op = &Simulator::sqdmlal2; + } else { + Op = &Simulator::sqdmlal; + } + break; + case NEON_SQDMLSL_byelement: + if (instr->Mask(NEON_Q)) { + Op = &Simulator::sqdmlsl2; + } else { + Op = &Simulator::sqdmlsl; + } + break; + default: + index = instr->NEONH(); + if ((instr->FPType() & 1) == 0) { + index = (index << 1) | instr->NEONL(); + } + + vf = nfd.GetVectorFormat(nfd.FPFormatMap()); + + switch (instr->Mask(NEONByIndexedElementFPMask)) { + case NEON_FMUL_byelement: + Op = &Simulator::fmul; + break; + case NEON_FMLA_byelement: + Op = &Simulator::fmla; + break; + case NEON_FMLS_byelement: + Op = &Simulator::fmls; + break; + case NEON_FMULX_byelement: + Op = &Simulator::fmulx; + break; + default: + UNIMPLEMENTED(); + } + } + + (this->*Op)(vf, rd, rn, vreg(rm_reg), index); +} + +void Simulator::VisitNEONCopy(Instruction* instr) { + NEONFormatDecoder nfd(instr, NEONFormatDecoder::TriangularFormatMap()); + VectorFormat vf = nfd.GetVectorFormat(); + + SimVRegister& rd = vreg(instr->Rd()); + SimVRegister& rn = vreg(instr->Rn()); + int imm5 = instr->ImmNEON5(); + int lsb = LowestSetBitPosition(imm5); + int reg_index = imm5 >> lsb; + + if (instr->Mask(NEONCopyInsElementMask) == NEON_INS_ELEMENT) { + int imm4 = instr->ImmNEON4(); + DCHECK_GE(lsb, 1); + int rn_index = imm4 >> (lsb - 1); + ins_element(vf, rd, reg_index, rn, rn_index); + } else if (instr->Mask(NEONCopyInsGeneralMask) == NEON_INS_GENERAL) { + ins_immediate(vf, rd, reg_index, xreg(instr->Rn())); + } else if (instr->Mask(NEONCopyUmovMask) == NEON_UMOV) { + uint64_t value = LogicVRegister(rn).Uint(vf, reg_index); + value &= MaxUintFromFormat(vf); + set_xreg(instr->Rd(), value); + } else if (instr->Mask(NEONCopyUmovMask) == NEON_SMOV) { + int64_t value = LogicVRegister(rn).Int(vf, reg_index); + if (instr->NEONQ()) { + set_xreg(instr->Rd(), value); + } else { + DCHECK(is_int32(value)); + set_wreg(instr->Rd(), static_cast(value)); + } + } else if (instr->Mask(NEONCopyDupElementMask) == NEON_DUP_ELEMENT) { + dup_element(vf, rd, rn, reg_index); + } else if (instr->Mask(NEONCopyDupGeneralMask) == NEON_DUP_GENERAL) { + dup_immediate(vf, rd, xreg(instr->Rn())); + } else { + UNIMPLEMENTED(); + } +} + +void Simulator::VisitNEONExtract(Instruction* instr) { + NEONFormatDecoder nfd(instr, NEONFormatDecoder::LogicalFormatMap()); + VectorFormat vf = nfd.GetVectorFormat(); + SimVRegister& rd = vreg(instr->Rd()); + SimVRegister& rn = vreg(instr->Rn()); + SimVRegister& rm = vreg(instr->Rm()); + if (instr->Mask(NEONExtractMask) == NEON_EXT) { + int index = instr->ImmNEONExt(); + ext(vf, rd, rn, rm, index); + } else { + UNIMPLEMENTED(); + } +} + +void Simulator::NEONLoadStoreMultiStructHelper(const Instruction* instr, + AddrMode addr_mode) { + NEONFormatDecoder nfd(instr, NEONFormatDecoder::LoadStoreFormatMap()); + VectorFormat vf = nfd.GetVectorFormat(); + + uint64_t addr_base = xreg(instr->Rn(), Reg31IsStackPointer); + int reg_size = RegisterSizeInBytesFromFormat(vf); + + int reg[4]; + uint64_t addr[4]; + for (int i = 0; i < 4; i++) { + reg[i] = (instr->Rt() + i) % kNumberOfVRegisters; + addr[i] = addr_base + (i * reg_size); + } + int count = 1; + bool log_read = true; + + // Bit 23 determines whether this is an offset or post-index addressing mode. + // In offset mode, bits 20 to 16 should be zero; these bits encode the + // register of immediate in post-index mode. + if ((instr->Bit(23) == 0) && (instr->Bits(20, 16) != 0)) { + UNREACHABLE(); + } + + // We use the PostIndex mask here, as it works in this case for both Offset + // and PostIndex addressing. + switch (instr->Mask(NEONLoadStoreMultiStructPostIndexMask)) { + case NEON_LD1_4v: + case NEON_LD1_4v_post: + ld1(vf, vreg(reg[3]), addr[3]); + count++; // Fall through. + case NEON_LD1_3v: + case NEON_LD1_3v_post: + ld1(vf, vreg(reg[2]), addr[2]); + count++; // Fall through. + case NEON_LD1_2v: + case NEON_LD1_2v_post: + ld1(vf, vreg(reg[1]), addr[1]); + count++; // Fall through. + case NEON_LD1_1v: + case NEON_LD1_1v_post: + ld1(vf, vreg(reg[0]), addr[0]); + break; + case NEON_ST1_4v: + case NEON_ST1_4v_post: + st1(vf, vreg(reg[3]), addr[3]); + count++; // Fall through. + case NEON_ST1_3v: + case NEON_ST1_3v_post: + st1(vf, vreg(reg[2]), addr[2]); + count++; // Fall through. + case NEON_ST1_2v: + case NEON_ST1_2v_post: + st1(vf, vreg(reg[1]), addr[1]); + count++; // Fall through. + case NEON_ST1_1v: + case NEON_ST1_1v_post: + st1(vf, vreg(reg[0]), addr[0]); + log_read = false; + break; + case NEON_LD2_post: + case NEON_LD2: + ld2(vf, vreg(reg[0]), vreg(reg[1]), addr[0]); + count = 2; + break; + case NEON_ST2: + case NEON_ST2_post: + st2(vf, vreg(reg[0]), vreg(reg[1]), addr[0]); + count = 2; + log_read = false; + break; + case NEON_LD3_post: + case NEON_LD3: + ld3(vf, vreg(reg[0]), vreg(reg[1]), vreg(reg[2]), addr[0]); + count = 3; + break; + case NEON_ST3: + case NEON_ST3_post: + st3(vf, vreg(reg[0]), vreg(reg[1]), vreg(reg[2]), addr[0]); + count = 3; + log_read = false; + break; + case NEON_LD4_post: + case NEON_LD4: + ld4(vf, vreg(reg[0]), vreg(reg[1]), vreg(reg[2]), vreg(reg[3]), addr[0]); + count = 4; + break; + case NEON_ST4: + case NEON_ST4_post: + st4(vf, vreg(reg[0]), vreg(reg[1]), vreg(reg[2]), vreg(reg[3]), addr[0]); + count = 4; + log_read = false; + break; + default: + UNIMPLEMENTED(); + } + + { + base::LockGuard lock_guard(&global_monitor_.Pointer()->mutex); + if (log_read) { + local_monitor_.NotifyLoad(); + } else { + local_monitor_.NotifyStore(); + global_monitor_.Pointer()->NotifyStore_Locked(&global_monitor_processor_); + } + } + + // Explicitly log the register update whilst we have type information. + for (int i = 0; i < count; i++) { + // For de-interleaving loads, only print the base address. + int lane_size = LaneSizeInBytesFromFormat(vf); + PrintRegisterFormat format = GetPrintRegisterFormatTryFP( + GetPrintRegisterFormatForSize(reg_size, lane_size)); + if (log_read) { + LogVRead(addr_base, reg[i], format); + } else { + LogVWrite(addr_base, reg[i], format); + } + } + + if (addr_mode == PostIndex) { + int rm = instr->Rm(); + // The immediate post index addressing mode is indicated by rm = 31. + // The immediate is implied by the number of vector registers used. + addr_base += + (rm == 31) ? RegisterSizeInBytesFromFormat(vf) * count : xreg(rm); + set_xreg(instr->Rn(), addr_base); + } else { + DCHECK_EQ(addr_mode, Offset); + } +} + +void Simulator::VisitNEONLoadStoreMultiStruct(Instruction* instr) { + NEONLoadStoreMultiStructHelper(instr, Offset); +} + +void Simulator::VisitNEONLoadStoreMultiStructPostIndex(Instruction* instr) { + NEONLoadStoreMultiStructHelper(instr, PostIndex); +} + +void Simulator::NEONLoadStoreSingleStructHelper(const Instruction* instr, + AddrMode addr_mode) { + uint64_t addr = xreg(instr->Rn(), Reg31IsStackPointer); + int rt = instr->Rt(); + + // Bit 23 determines whether this is an offset or post-index addressing mode. + // In offset mode, bits 20 to 16 should be zero; these bits encode the + // register of immediate in post-index mode. + DCHECK_IMPLIES(instr->Bit(23) == 0, instr->Bits(20, 16) == 0); + + bool do_load = false; + + NEONFormatDecoder nfd(instr, NEONFormatDecoder::LoadStoreFormatMap()); + VectorFormat vf_t = nfd.GetVectorFormat(); + + VectorFormat vf = kFormat16B; + // We use the PostIndex mask here, as it works in this case for both Offset + // and PostIndex addressing. + switch (instr->Mask(NEONLoadStoreSingleStructPostIndexMask)) { + case NEON_LD1_b: + case NEON_LD1_b_post: + case NEON_LD2_b: + case NEON_LD2_b_post: + case NEON_LD3_b: + case NEON_LD3_b_post: + case NEON_LD4_b: + case NEON_LD4_b_post: + do_load = true; // Fall through. + case NEON_ST1_b: + case NEON_ST1_b_post: + case NEON_ST2_b: + case NEON_ST2_b_post: + case NEON_ST3_b: + case NEON_ST3_b_post: + case NEON_ST4_b: + case NEON_ST4_b_post: + break; + + case NEON_LD1_h: + case NEON_LD1_h_post: + case NEON_LD2_h: + case NEON_LD2_h_post: + case NEON_LD3_h: + case NEON_LD3_h_post: + case NEON_LD4_h: + case NEON_LD4_h_post: + do_load = true; // Fall through. + case NEON_ST1_h: + case NEON_ST1_h_post: + case NEON_ST2_h: + case NEON_ST2_h_post: + case NEON_ST3_h: + case NEON_ST3_h_post: + case NEON_ST4_h: + case NEON_ST4_h_post: + vf = kFormat8H; + break; + + case NEON_LD1_s: + case NEON_LD1_s_post: + case NEON_LD2_s: + case NEON_LD2_s_post: + case NEON_LD3_s: + case NEON_LD3_s_post: + case NEON_LD4_s: + case NEON_LD4_s_post: + do_load = true; // Fall through. + case NEON_ST1_s: + case NEON_ST1_s_post: + case NEON_ST2_s: + case NEON_ST2_s_post: + case NEON_ST3_s: + case NEON_ST3_s_post: + case NEON_ST4_s: + case NEON_ST4_s_post: { + static_assert((NEON_LD1_s | (1 << NEONLSSize_offset)) == NEON_LD1_d, + "LSB of size distinguishes S and D registers."); + static_assert( + (NEON_LD1_s_post | (1 << NEONLSSize_offset)) == NEON_LD1_d_post, + "LSB of size distinguishes S and D registers."); + static_assert((NEON_ST1_s | (1 << NEONLSSize_offset)) == NEON_ST1_d, + "LSB of size distinguishes S and D registers."); + static_assert( + (NEON_ST1_s_post | (1 << NEONLSSize_offset)) == NEON_ST1_d_post, + "LSB of size distinguishes S and D registers."); + vf = ((instr->NEONLSSize() & 1) == 0) ? kFormat4S : kFormat2D; + break; + } + + case NEON_LD1R: + case NEON_LD1R_post: { + vf = vf_t; + ld1r(vf, vreg(rt), addr); + do_load = true; + break; + } + + case NEON_LD2R: + case NEON_LD2R_post: { + vf = vf_t; + int rt2 = (rt + 1) % kNumberOfVRegisters; + ld2r(vf, vreg(rt), vreg(rt2), addr); + do_load = true; + break; + } + + case NEON_LD3R: + case NEON_LD3R_post: { + vf = vf_t; + int rt2 = (rt + 1) % kNumberOfVRegisters; + int rt3 = (rt2 + 1) % kNumberOfVRegisters; + ld3r(vf, vreg(rt), vreg(rt2), vreg(rt3), addr); + do_load = true; + break; + } + + case NEON_LD4R: + case NEON_LD4R_post: { + vf = vf_t; + int rt2 = (rt + 1) % kNumberOfVRegisters; + int rt3 = (rt2 + 1) % kNumberOfVRegisters; + int rt4 = (rt3 + 1) % kNumberOfVRegisters; + ld4r(vf, vreg(rt), vreg(rt2), vreg(rt3), vreg(rt4), addr); + do_load = true; + break; + } + default: + UNIMPLEMENTED(); + } + + PrintRegisterFormat print_format = + GetPrintRegisterFormatTryFP(GetPrintRegisterFormat(vf)); + // Make sure that the print_format only includes a single lane. + print_format = + static_cast(print_format & ~kPrintRegAsVectorMask); + + int esize = LaneSizeInBytesFromFormat(vf); + int index_shift = LaneSizeInBytesLog2FromFormat(vf); + int lane = instr->NEONLSIndex(index_shift); + int scale = 0; + int rt2 = (rt + 1) % kNumberOfVRegisters; + int rt3 = (rt2 + 1) % kNumberOfVRegisters; + int rt4 = (rt3 + 1) % kNumberOfVRegisters; + switch (instr->Mask(NEONLoadStoreSingleLenMask)) { + case NEONLoadStoreSingle1: + scale = 1; + if (do_load) { + ld1(vf, vreg(rt), lane, addr); + LogVRead(addr, rt, print_format, lane); + } else { + st1(vf, vreg(rt), lane, addr); + LogVWrite(addr, rt, print_format, lane); + } + break; + case NEONLoadStoreSingle2: + scale = 2; + if (do_load) { + ld2(vf, vreg(rt), vreg(rt2), lane, addr); + LogVRead(addr, rt, print_format, lane); + LogVRead(addr + esize, rt2, print_format, lane); + } else { + st2(vf, vreg(rt), vreg(rt2), lane, addr); + LogVWrite(addr, rt, print_format, lane); + LogVWrite(addr + esize, rt2, print_format, lane); + } + break; + case NEONLoadStoreSingle3: + scale = 3; + if (do_load) { + ld3(vf, vreg(rt), vreg(rt2), vreg(rt3), lane, addr); + LogVRead(addr, rt, print_format, lane); + LogVRead(addr + esize, rt2, print_format, lane); + LogVRead(addr + (2 * esize), rt3, print_format, lane); + } else { + st3(vf, vreg(rt), vreg(rt2), vreg(rt3), lane, addr); + LogVWrite(addr, rt, print_format, lane); + LogVWrite(addr + esize, rt2, print_format, lane); + LogVWrite(addr + (2 * esize), rt3, print_format, lane); + } + break; + case NEONLoadStoreSingle4: + scale = 4; + if (do_load) { + ld4(vf, vreg(rt), vreg(rt2), vreg(rt3), vreg(rt4), lane, addr); + LogVRead(addr, rt, print_format, lane); + LogVRead(addr + esize, rt2, print_format, lane); + LogVRead(addr + (2 * esize), rt3, print_format, lane); + LogVRead(addr + (3 * esize), rt4, print_format, lane); + } else { + st4(vf, vreg(rt), vreg(rt2), vreg(rt3), vreg(rt4), lane, addr); + LogVWrite(addr, rt, print_format, lane); + LogVWrite(addr + esize, rt2, print_format, lane); + LogVWrite(addr + (2 * esize), rt3, print_format, lane); + LogVWrite(addr + (3 * esize), rt4, print_format, lane); + } + break; + default: + UNIMPLEMENTED(); + } + + { + base::LockGuard lock_guard(&global_monitor_.Pointer()->mutex); + if (do_load) { + local_monitor_.NotifyLoad(); + } else { + local_monitor_.NotifyStore(); + global_monitor_.Pointer()->NotifyStore_Locked(&global_monitor_processor_); + } + } + + if (addr_mode == PostIndex) { + int rm = instr->Rm(); + int lane_size = LaneSizeInBytesFromFormat(vf); + set_xreg(instr->Rn(), addr + ((rm == 31) ? (scale * lane_size) : xreg(rm))); + } +} + +void Simulator::VisitNEONLoadStoreSingleStruct(Instruction* instr) { + NEONLoadStoreSingleStructHelper(instr, Offset); +} + +void Simulator::VisitNEONLoadStoreSingleStructPostIndex(Instruction* instr) { + NEONLoadStoreSingleStructHelper(instr, PostIndex); +} + +void Simulator::VisitNEONModifiedImmediate(Instruction* instr) { + SimVRegister& rd = vreg(instr->Rd()); + int cmode = instr->NEONCmode(); + int cmode_3_1 = (cmode >> 1) & 7; + int cmode_3 = (cmode >> 3) & 1; + int cmode_2 = (cmode >> 2) & 1; + int cmode_1 = (cmode >> 1) & 1; + int cmode_0 = cmode & 1; + int q = instr->NEONQ(); + int op_bit = instr->NEONModImmOp(); + uint64_t imm8 = instr->ImmNEONabcdefgh(); + + // Find the format and immediate value + uint64_t imm = 0; + VectorFormat vform = kFormatUndefined; + switch (cmode_3_1) { + case 0x0: + case 0x1: + case 0x2: + case 0x3: + vform = (q == 1) ? kFormat4S : kFormat2S; + imm = imm8 << (8 * cmode_3_1); + break; + case 0x4: + case 0x5: + vform = (q == 1) ? kFormat8H : kFormat4H; + imm = imm8 << (8 * cmode_1); + break; + case 0x6: + vform = (q == 1) ? kFormat4S : kFormat2S; + if (cmode_0 == 0) { + imm = imm8 << 8 | 0x000000ff; + } else { + imm = imm8 << 16 | 0x0000ffff; + } + break; + case 0x7: + if (cmode_0 == 0 && op_bit == 0) { + vform = q ? kFormat16B : kFormat8B; + imm = imm8; + } else if (cmode_0 == 0 && op_bit == 1) { + vform = q ? kFormat2D : kFormat1D; + imm = 0; + for (int i = 0; i < 8; ++i) { + if (imm8 & (1 << i)) { + imm |= (UINT64_C(0xff) << (8 * i)); + } + } + } else { // cmode_0 == 1, cmode == 0xf. + if (op_bit == 0) { + vform = q ? kFormat4S : kFormat2S; + imm = bit_cast(instr->ImmNEONFP32()); + } else if (q == 1) { + vform = kFormat2D; + imm = bit_cast(instr->ImmNEONFP64()); + } else { + DCHECK((q == 0) && (op_bit == 1) && (cmode == 0xf)); + VisitUnallocated(instr); + } + } + break; + default: + UNREACHABLE(); + } + + // Find the operation. + NEONModifiedImmediateOp op; + if (cmode_3 == 0) { + if (cmode_0 == 0) { + op = op_bit ? NEONModifiedImmediate_MVNI : NEONModifiedImmediate_MOVI; + } else { // cmode<0> == '1' + op = op_bit ? NEONModifiedImmediate_BIC : NEONModifiedImmediate_ORR; + } + } else { // cmode<3> == '1' + if (cmode_2 == 0) { + if (cmode_0 == 0) { + op = op_bit ? NEONModifiedImmediate_MVNI : NEONModifiedImmediate_MOVI; + } else { // cmode<0> == '1' + op = op_bit ? NEONModifiedImmediate_BIC : NEONModifiedImmediate_ORR; + } + } else { // cmode<2> == '1' + if (cmode_1 == 0) { + op = op_bit ? NEONModifiedImmediate_MVNI : NEONModifiedImmediate_MOVI; + } else { // cmode<1> == '1' + if (cmode_0 == 0) { + op = NEONModifiedImmediate_MOVI; + } else { // cmode<0> == '1' + op = NEONModifiedImmediate_MOVI; + } + } + } + } + + // Call the logic function. + switch (op) { + case NEONModifiedImmediate_ORR: + orr(vform, rd, rd, imm); + break; + case NEONModifiedImmediate_BIC: + bic(vform, rd, rd, imm); + break; + case NEONModifiedImmediate_MOVI: + movi(vform, rd, imm); + break; + case NEONModifiedImmediate_MVNI: + mvni(vform, rd, imm); + break; + default: + VisitUnimplemented(instr); + } +} + +void Simulator::VisitNEONScalar2RegMisc(Instruction* instr) { + NEONFormatDecoder nfd(instr, NEONFormatDecoder::ScalarFormatMap()); + VectorFormat vf = nfd.GetVectorFormat(); + + SimVRegister& rd = vreg(instr->Rd()); + SimVRegister& rn = vreg(instr->Rn()); + + if (instr->Mask(NEON2RegMiscOpcode) <= NEON_NEG_scalar_opcode) { + // These instructions all use a two bit size field, except NOT and RBIT, + // which use the field to encode the operation. + switch (instr->Mask(NEONScalar2RegMiscMask)) { + case NEON_CMEQ_zero_scalar: + cmp(vf, rd, rn, 0, eq); + break; + case NEON_CMGE_zero_scalar: + cmp(vf, rd, rn, 0, ge); + break; + case NEON_CMGT_zero_scalar: + cmp(vf, rd, rn, 0, gt); + break; + case NEON_CMLT_zero_scalar: + cmp(vf, rd, rn, 0, lt); + break; + case NEON_CMLE_zero_scalar: + cmp(vf, rd, rn, 0, le); + break; + case NEON_ABS_scalar: + abs(vf, rd, rn); + break; + case NEON_SQABS_scalar: + abs(vf, rd, rn).SignedSaturate(vf); + break; + case NEON_NEG_scalar: + neg(vf, rd, rn); + break; + case NEON_SQNEG_scalar: + neg(vf, rd, rn).SignedSaturate(vf); + break; + case NEON_SUQADD_scalar: + suqadd(vf, rd, rn); + break; + case NEON_USQADD_scalar: + usqadd(vf, rd, rn); + break; + default: + UNIMPLEMENTED(); + break; + } + } else { + VectorFormat fpf = nfd.GetVectorFormat(nfd.FPScalarFormatMap()); + FPRounding fpcr_rounding = static_cast(fpcr().RMode()); + + // These instructions all use a one bit size field, except SQXTUN, SQXTN + // and UQXTN, which use a two bit size field. + switch (instr->Mask(NEONScalar2RegMiscFPMask)) { + case NEON_FRECPE_scalar: + frecpe(fpf, rd, rn, fpcr_rounding); + break; + case NEON_FRECPX_scalar: + frecpx(fpf, rd, rn); + break; + case NEON_FRSQRTE_scalar: + frsqrte(fpf, rd, rn); + break; + case NEON_FCMGT_zero_scalar: + fcmp_zero(fpf, rd, rn, gt); + break; + case NEON_FCMGE_zero_scalar: + fcmp_zero(fpf, rd, rn, ge); + break; + case NEON_FCMEQ_zero_scalar: + fcmp_zero(fpf, rd, rn, eq); + break; + case NEON_FCMLE_zero_scalar: + fcmp_zero(fpf, rd, rn, le); + break; + case NEON_FCMLT_zero_scalar: + fcmp_zero(fpf, rd, rn, lt); + break; + case NEON_SCVTF_scalar: + scvtf(fpf, rd, rn, 0, fpcr_rounding); + break; + case NEON_UCVTF_scalar: + ucvtf(fpf, rd, rn, 0, fpcr_rounding); + break; + case NEON_FCVTNS_scalar: + fcvts(fpf, rd, rn, FPTieEven); + break; + case NEON_FCVTNU_scalar: + fcvtu(fpf, rd, rn, FPTieEven); + break; + case NEON_FCVTPS_scalar: + fcvts(fpf, rd, rn, FPPositiveInfinity); + break; + case NEON_FCVTPU_scalar: + fcvtu(fpf, rd, rn, FPPositiveInfinity); + break; + case NEON_FCVTMS_scalar: + fcvts(fpf, rd, rn, FPNegativeInfinity); + break; + case NEON_FCVTMU_scalar: + fcvtu(fpf, rd, rn, FPNegativeInfinity); + break; + case NEON_FCVTZS_scalar: + fcvts(fpf, rd, rn, FPZero); + break; + case NEON_FCVTZU_scalar: + fcvtu(fpf, rd, rn, FPZero); + break; + case NEON_FCVTAS_scalar: + fcvts(fpf, rd, rn, FPTieAway); + break; + case NEON_FCVTAU_scalar: + fcvtu(fpf, rd, rn, FPTieAway); + break; + case NEON_FCVTXN_scalar: + // Unlike all of the other FP instructions above, fcvtxn encodes dest + // size S as size<0>=1. There's only one case, so we ignore the form. + DCHECK_EQ(instr->Bit(22), 1); + fcvtxn(kFormatS, rd, rn); + break; + default: + switch (instr->Mask(NEONScalar2RegMiscMask)) { + case NEON_SQXTN_scalar: + sqxtn(vf, rd, rn); + break; + case NEON_UQXTN_scalar: + uqxtn(vf, rd, rn); + break; + case NEON_SQXTUN_scalar: + sqxtun(vf, rd, rn); + break; + default: + UNIMPLEMENTED(); + } + } + } +} + +void Simulator::VisitNEONScalar3Diff(Instruction* instr) { + NEONFormatDecoder nfd(instr, NEONFormatDecoder::LongScalarFormatMap()); + VectorFormat vf = nfd.GetVectorFormat(); + + SimVRegister& rd = vreg(instr->Rd()); + SimVRegister& rn = vreg(instr->Rn()); + SimVRegister& rm = vreg(instr->Rm()); + switch (instr->Mask(NEONScalar3DiffMask)) { + case NEON_SQDMLAL_scalar: + sqdmlal(vf, rd, rn, rm); + break; + case NEON_SQDMLSL_scalar: + sqdmlsl(vf, rd, rn, rm); + break; + case NEON_SQDMULL_scalar: + sqdmull(vf, rd, rn, rm); + break; + default: + UNIMPLEMENTED(); + } +} + +void Simulator::VisitNEONScalar3Same(Instruction* instr) { + NEONFormatDecoder nfd(instr, NEONFormatDecoder::ScalarFormatMap()); + VectorFormat vf = nfd.GetVectorFormat(); + + SimVRegister& rd = vreg(instr->Rd()); + SimVRegister& rn = vreg(instr->Rn()); + SimVRegister& rm = vreg(instr->Rm()); + + if (instr->Mask(NEONScalar3SameFPFMask) == NEONScalar3SameFPFixed) { + vf = nfd.GetVectorFormat(nfd.FPScalarFormatMap()); + switch (instr->Mask(NEONScalar3SameFPMask)) { + case NEON_FMULX_scalar: + fmulx(vf, rd, rn, rm); + break; + case NEON_FACGE_scalar: + fabscmp(vf, rd, rn, rm, ge); + break; + case NEON_FACGT_scalar: + fabscmp(vf, rd, rn, rm, gt); + break; + case NEON_FCMEQ_scalar: + fcmp(vf, rd, rn, rm, eq); + break; + case NEON_FCMGE_scalar: + fcmp(vf, rd, rn, rm, ge); + break; + case NEON_FCMGT_scalar: + fcmp(vf, rd, rn, rm, gt); + break; + case NEON_FRECPS_scalar: + frecps(vf, rd, rn, rm); + break; + case NEON_FRSQRTS_scalar: + frsqrts(vf, rd, rn, rm); + break; + case NEON_FABD_scalar: + fabd(vf, rd, rn, rm); + break; + default: + UNIMPLEMENTED(); + } + } else { + switch (instr->Mask(NEONScalar3SameMask)) { + case NEON_ADD_scalar: + add(vf, rd, rn, rm); + break; + case NEON_SUB_scalar: + sub(vf, rd, rn, rm); + break; + case NEON_CMEQ_scalar: + cmp(vf, rd, rn, rm, eq); + break; + case NEON_CMGE_scalar: + cmp(vf, rd, rn, rm, ge); + break; + case NEON_CMGT_scalar: + cmp(vf, rd, rn, rm, gt); + break; + case NEON_CMHI_scalar: + cmp(vf, rd, rn, rm, hi); + break; + case NEON_CMHS_scalar: + cmp(vf, rd, rn, rm, hs); + break; + case NEON_CMTST_scalar: + cmptst(vf, rd, rn, rm); + break; + case NEON_USHL_scalar: + ushl(vf, rd, rn, rm); + break; + case NEON_SSHL_scalar: + sshl(vf, rd, rn, rm); + break; + case NEON_SQDMULH_scalar: + sqdmulh(vf, rd, rn, rm); + break; + case NEON_SQRDMULH_scalar: + sqrdmulh(vf, rd, rn, rm); + break; + case NEON_UQADD_scalar: + add(vf, rd, rn, rm).UnsignedSaturate(vf); + break; + case NEON_SQADD_scalar: + add(vf, rd, rn, rm).SignedSaturate(vf); + break; + case NEON_UQSUB_scalar: + sub(vf, rd, rn, rm).UnsignedSaturate(vf); + break; + case NEON_SQSUB_scalar: + sub(vf, rd, rn, rm).SignedSaturate(vf); + break; + case NEON_UQSHL_scalar: + ushl(vf, rd, rn, rm).UnsignedSaturate(vf); + break; + case NEON_SQSHL_scalar: + sshl(vf, rd, rn, rm).SignedSaturate(vf); + break; + case NEON_URSHL_scalar: + ushl(vf, rd, rn, rm).Round(vf); + break; + case NEON_SRSHL_scalar: + sshl(vf, rd, rn, rm).Round(vf); + break; + case NEON_UQRSHL_scalar: + ushl(vf, rd, rn, rm).Round(vf).UnsignedSaturate(vf); + break; + case NEON_SQRSHL_scalar: + sshl(vf, rd, rn, rm).Round(vf).SignedSaturate(vf); + break; + default: + UNIMPLEMENTED(); + } + } +} + +void Simulator::VisitNEONScalarByIndexedElement(Instruction* instr) { + NEONFormatDecoder nfd(instr, NEONFormatDecoder::LongScalarFormatMap()); + VectorFormat vf = nfd.GetVectorFormat(); + VectorFormat vf_r = nfd.GetVectorFormat(nfd.ScalarFormatMap()); + + SimVRegister& rd = vreg(instr->Rd()); + SimVRegister& rn = vreg(instr->Rn()); + ByElementOp Op = NULL; + + int rm_reg = instr->Rm(); + int index = (instr->NEONH() << 1) | instr->NEONL(); + if (instr->NEONSize() == 1) { + rm_reg &= 0xf; + index = (index << 1) | instr->NEONM(); + } + + switch (instr->Mask(NEONScalarByIndexedElementMask)) { + case NEON_SQDMULL_byelement_scalar: + Op = &Simulator::sqdmull; + break; + case NEON_SQDMLAL_byelement_scalar: + Op = &Simulator::sqdmlal; + break; + case NEON_SQDMLSL_byelement_scalar: + Op = &Simulator::sqdmlsl; + break; + case NEON_SQDMULH_byelement_scalar: + Op = &Simulator::sqdmulh; + vf = vf_r; + break; + case NEON_SQRDMULH_byelement_scalar: + Op = &Simulator::sqrdmulh; + vf = vf_r; + break; + default: + vf = nfd.GetVectorFormat(nfd.FPScalarFormatMap()); + index = instr->NEONH(); + if ((instr->FPType() & 1) == 0) { + index = (index << 1) | instr->NEONL(); + } + switch (instr->Mask(NEONScalarByIndexedElementFPMask)) { + case NEON_FMUL_byelement_scalar: + Op = &Simulator::fmul; + break; + case NEON_FMLA_byelement_scalar: + Op = &Simulator::fmla; + break; + case NEON_FMLS_byelement_scalar: + Op = &Simulator::fmls; + break; + case NEON_FMULX_byelement_scalar: + Op = &Simulator::fmulx; + break; + default: + UNIMPLEMENTED(); + } + } + + (this->*Op)(vf, rd, rn, vreg(rm_reg), index); +} + +void Simulator::VisitNEONScalarCopy(Instruction* instr) { + NEONFormatDecoder nfd(instr, NEONFormatDecoder::TriangularScalarFormatMap()); + VectorFormat vf = nfd.GetVectorFormat(); + + SimVRegister& rd = vreg(instr->Rd()); + SimVRegister& rn = vreg(instr->Rn()); + + if (instr->Mask(NEONScalarCopyMask) == NEON_DUP_ELEMENT_scalar) { + int imm5 = instr->ImmNEON5(); + int lsb = LowestSetBitPosition(imm5); + int rn_index = imm5 >> lsb; + dup_element(vf, rd, rn, rn_index); + } else { + UNIMPLEMENTED(); + } +} + +void Simulator::VisitNEONScalarPairwise(Instruction* instr) { + NEONFormatDecoder nfd(instr, NEONFormatDecoder::FPScalarFormatMap()); + VectorFormat vf = nfd.GetVectorFormat(); + + SimVRegister& rd = vreg(instr->Rd()); + SimVRegister& rn = vreg(instr->Rn()); + switch (instr->Mask(NEONScalarPairwiseMask)) { + case NEON_ADDP_scalar: + addp(vf, rd, rn); + break; + case NEON_FADDP_scalar: + faddp(vf, rd, rn); + break; + case NEON_FMAXP_scalar: + fmaxp(vf, rd, rn); + break; + case NEON_FMAXNMP_scalar: + fmaxnmp(vf, rd, rn); + break; + case NEON_FMINP_scalar: + fminp(vf, rd, rn); + break; + case NEON_FMINNMP_scalar: + fminnmp(vf, rd, rn); + break; + default: + UNIMPLEMENTED(); + } +} + +void Simulator::VisitNEONScalarShiftImmediate(Instruction* instr) { + SimVRegister& rd = vreg(instr->Rd()); + SimVRegister& rn = vreg(instr->Rn()); + FPRounding fpcr_rounding = static_cast(fpcr().RMode()); + + static const NEONFormatMap map = { + {22, 21, 20, 19}, + {NF_UNDEF, NF_B, NF_H, NF_H, NF_S, NF_S, NF_S, NF_S, NF_D, NF_D, NF_D, + NF_D, NF_D, NF_D, NF_D, NF_D}}; + NEONFormatDecoder nfd(instr, &map); + VectorFormat vf = nfd.GetVectorFormat(); + + int highestSetBit = HighestSetBitPosition(instr->ImmNEONImmh()); + int immhimmb = instr->ImmNEONImmhImmb(); + int right_shift = (16 << highestSetBit) - immhimmb; + int left_shift = immhimmb - (8 << highestSetBit); + switch (instr->Mask(NEONScalarShiftImmediateMask)) { + case NEON_SHL_scalar: + shl(vf, rd, rn, left_shift); + break; + case NEON_SLI_scalar: + sli(vf, rd, rn, left_shift); + break; + case NEON_SQSHL_imm_scalar: + sqshl(vf, rd, rn, left_shift); + break; + case NEON_UQSHL_imm_scalar: + uqshl(vf, rd, rn, left_shift); + break; + case NEON_SQSHLU_scalar: + sqshlu(vf, rd, rn, left_shift); + break; + case NEON_SRI_scalar: + sri(vf, rd, rn, right_shift); + break; + case NEON_SSHR_scalar: + sshr(vf, rd, rn, right_shift); + break; + case NEON_USHR_scalar: + ushr(vf, rd, rn, right_shift); + break; + case NEON_SRSHR_scalar: + sshr(vf, rd, rn, right_shift).Round(vf); + break; + case NEON_URSHR_scalar: + ushr(vf, rd, rn, right_shift).Round(vf); + break; + case NEON_SSRA_scalar: + ssra(vf, rd, rn, right_shift); + break; + case NEON_USRA_scalar: + usra(vf, rd, rn, right_shift); + break; + case NEON_SRSRA_scalar: + srsra(vf, rd, rn, right_shift); + break; + case NEON_URSRA_scalar: + ursra(vf, rd, rn, right_shift); + break; + case NEON_UQSHRN_scalar: + uqshrn(vf, rd, rn, right_shift); + break; + case NEON_UQRSHRN_scalar: + uqrshrn(vf, rd, rn, right_shift); + break; + case NEON_SQSHRN_scalar: + sqshrn(vf, rd, rn, right_shift); + break; + case NEON_SQRSHRN_scalar: + sqrshrn(vf, rd, rn, right_shift); + break; + case NEON_SQSHRUN_scalar: + sqshrun(vf, rd, rn, right_shift); + break; + case NEON_SQRSHRUN_scalar: + sqrshrun(vf, rd, rn, right_shift); + break; + case NEON_FCVTZS_imm_scalar: + fcvts(vf, rd, rn, FPZero, right_shift); + break; + case NEON_FCVTZU_imm_scalar: + fcvtu(vf, rd, rn, FPZero, right_shift); + break; + case NEON_SCVTF_imm_scalar: + scvtf(vf, rd, rn, right_shift, fpcr_rounding); + break; + case NEON_UCVTF_imm_scalar: + ucvtf(vf, rd, rn, right_shift, fpcr_rounding); + break; + default: + UNIMPLEMENTED(); + } +} + +void Simulator::VisitNEONShiftImmediate(Instruction* instr) { + SimVRegister& rd = vreg(instr->Rd()); + SimVRegister& rn = vreg(instr->Rn()); + FPRounding fpcr_rounding = static_cast(fpcr().RMode()); + + // 00010->8B, 00011->16B, 001x0->4H, 001x1->8H, + // 01xx0->2S, 01xx1->4S, 1xxx1->2D, all others undefined. + static const NEONFormatMap map = { + {22, 21, 20, 19, 30}, + {NF_UNDEF, NF_UNDEF, NF_8B, NF_16B, NF_4H, NF_8H, NF_4H, NF_8H, + NF_2S, NF_4S, NF_2S, NF_4S, NF_2S, NF_4S, NF_2S, NF_4S, + NF_UNDEF, NF_2D, NF_UNDEF, NF_2D, NF_UNDEF, NF_2D, NF_UNDEF, NF_2D, + NF_UNDEF, NF_2D, NF_UNDEF, NF_2D, NF_UNDEF, NF_2D, NF_UNDEF, NF_2D}}; + NEONFormatDecoder nfd(instr, &map); + VectorFormat vf = nfd.GetVectorFormat(); + + // 0001->8H, 001x->4S, 01xx->2D, all others undefined. + static const NEONFormatMap map_l = { + {22, 21, 20, 19}, + {NF_UNDEF, NF_8H, NF_4S, NF_4S, NF_2D, NF_2D, NF_2D, NF_2D}}; + VectorFormat vf_l = nfd.GetVectorFormat(&map_l); + + int highestSetBit = HighestSetBitPosition(instr->ImmNEONImmh()); + int immhimmb = instr->ImmNEONImmhImmb(); + int right_shift = (16 << highestSetBit) - immhimmb; + int left_shift = immhimmb - (8 << highestSetBit); + + switch (instr->Mask(NEONShiftImmediateMask)) { + case NEON_SHL: + shl(vf, rd, rn, left_shift); + break; + case NEON_SLI: + sli(vf, rd, rn, left_shift); + break; + case NEON_SQSHLU: + sqshlu(vf, rd, rn, left_shift); + break; + case NEON_SRI: + sri(vf, rd, rn, right_shift); + break; + case NEON_SSHR: + sshr(vf, rd, rn, right_shift); + break; + case NEON_USHR: + ushr(vf, rd, rn, right_shift); + break; + case NEON_SRSHR: + sshr(vf, rd, rn, right_shift).Round(vf); + break; + case NEON_URSHR: + ushr(vf, rd, rn, right_shift).Round(vf); + break; + case NEON_SSRA: + ssra(vf, rd, rn, right_shift); + break; + case NEON_USRA: + usra(vf, rd, rn, right_shift); + break; + case NEON_SRSRA: + srsra(vf, rd, rn, right_shift); + break; + case NEON_URSRA: + ursra(vf, rd, rn, right_shift); + break; + case NEON_SQSHL_imm: + sqshl(vf, rd, rn, left_shift); + break; + case NEON_UQSHL_imm: + uqshl(vf, rd, rn, left_shift); + break; + case NEON_SCVTF_imm: + scvtf(vf, rd, rn, right_shift, fpcr_rounding); + break; + case NEON_UCVTF_imm: + ucvtf(vf, rd, rn, right_shift, fpcr_rounding); + break; + case NEON_FCVTZS_imm: + fcvts(vf, rd, rn, FPZero, right_shift); + break; + case NEON_FCVTZU_imm: + fcvtu(vf, rd, rn, FPZero, right_shift); + break; + case NEON_SSHLL: + vf = vf_l; + if (instr->Mask(NEON_Q)) { + sshll2(vf, rd, rn, left_shift); + } else { + sshll(vf, rd, rn, left_shift); + } + break; + case NEON_USHLL: + vf = vf_l; + if (instr->Mask(NEON_Q)) { + ushll2(vf, rd, rn, left_shift); + } else { + ushll(vf, rd, rn, left_shift); + } + break; + case NEON_SHRN: + if (instr->Mask(NEON_Q)) { + shrn2(vf, rd, rn, right_shift); + } else { + shrn(vf, rd, rn, right_shift); + } + break; + case NEON_RSHRN: + if (instr->Mask(NEON_Q)) { + rshrn2(vf, rd, rn, right_shift); + } else { + rshrn(vf, rd, rn, right_shift); + } + break; + case NEON_UQSHRN: + if (instr->Mask(NEON_Q)) { + uqshrn2(vf, rd, rn, right_shift); + } else { + uqshrn(vf, rd, rn, right_shift); + } + break; + case NEON_UQRSHRN: + if (instr->Mask(NEON_Q)) { + uqrshrn2(vf, rd, rn, right_shift); + } else { + uqrshrn(vf, rd, rn, right_shift); + } + break; + case NEON_SQSHRN: + if (instr->Mask(NEON_Q)) { + sqshrn2(vf, rd, rn, right_shift); + } else { + sqshrn(vf, rd, rn, right_shift); + } + break; + case NEON_SQRSHRN: + if (instr->Mask(NEON_Q)) { + sqrshrn2(vf, rd, rn, right_shift); + } else { + sqrshrn(vf, rd, rn, right_shift); + } + break; + case NEON_SQSHRUN: + if (instr->Mask(NEON_Q)) { + sqshrun2(vf, rd, rn, right_shift); + } else { + sqshrun(vf, rd, rn, right_shift); + } + break; + case NEON_SQRSHRUN: + if (instr->Mask(NEON_Q)) { + sqrshrun2(vf, rd, rn, right_shift); + } else { + sqrshrun(vf, rd, rn, right_shift); + } + break; + default: + UNIMPLEMENTED(); + } +} + +void Simulator::VisitNEONTable(Instruction* instr) { + NEONFormatDecoder nfd(instr, NEONFormatDecoder::LogicalFormatMap()); + VectorFormat vf = nfd.GetVectorFormat(); + + SimVRegister& rd = vreg(instr->Rd()); + SimVRegister& rn = vreg(instr->Rn()); + SimVRegister& rn2 = vreg((instr->Rn() + 1) % kNumberOfVRegisters); + SimVRegister& rn3 = vreg((instr->Rn() + 2) % kNumberOfVRegisters); + SimVRegister& rn4 = vreg((instr->Rn() + 3) % kNumberOfVRegisters); + SimVRegister& rm = vreg(instr->Rm()); + + switch (instr->Mask(NEONTableMask)) { + case NEON_TBL_1v: + tbl(vf, rd, rn, rm); + break; + case NEON_TBL_2v: + tbl(vf, rd, rn, rn2, rm); + break; + case NEON_TBL_3v: + tbl(vf, rd, rn, rn2, rn3, rm); + break; + case NEON_TBL_4v: + tbl(vf, rd, rn, rn2, rn3, rn4, rm); + break; + case NEON_TBX_1v: + tbx(vf, rd, rn, rm); + break; + case NEON_TBX_2v: + tbx(vf, rd, rn, rn2, rm); + break; + case NEON_TBX_3v: + tbx(vf, rd, rn, rn2, rn3, rm); + break; + case NEON_TBX_4v: + tbx(vf, rd, rn, rn2, rn3, rn4, rm); + break; + default: + UNIMPLEMENTED(); + } +} + +void Simulator::VisitNEONPerm(Instruction* instr) { + NEONFormatDecoder nfd(instr); + VectorFormat vf = nfd.GetVectorFormat(); + + SimVRegister& rd = vreg(instr->Rd()); + SimVRegister& rn = vreg(instr->Rn()); + SimVRegister& rm = vreg(instr->Rm()); + + switch (instr->Mask(NEONPermMask)) { + case NEON_TRN1: + trn1(vf, rd, rn, rm); + break; + case NEON_TRN2: + trn2(vf, rd, rn, rm); + break; + case NEON_UZP1: + uzp1(vf, rd, rn, rm); + break; + case NEON_UZP2: + uzp2(vf, rd, rn, rm); + break; + case NEON_ZIP1: + zip1(vf, rd, rn, rm); + break; + case NEON_ZIP2: + zip2(vf, rd, rn, rm); + break; + default: + UNIMPLEMENTED(); + } +} + +void Simulator::DoPrintf(Instruction* instr) { + DCHECK((instr->Mask(ExceptionMask) == HLT) && + (instr->ImmException() == kImmExceptionIsPrintf)); + + // Read the arguments encoded inline in the instruction stream. + uint32_t arg_count; + uint32_t arg_pattern_list; + STATIC_ASSERT(sizeof(*instr) == 1); + memcpy(&arg_count, + instr + kPrintfArgCountOffset, + sizeof(arg_count)); + memcpy(&arg_pattern_list, + instr + kPrintfArgPatternListOffset, + sizeof(arg_pattern_list)); + + DCHECK(arg_count <= kPrintfMaxArgCount); + DCHECK((arg_pattern_list >> (kPrintfArgPatternBits * arg_count)) == 0); + + // We need to call the host printf function with a set of arguments defined by + // arg_pattern_list. Because we don't know the types and sizes of the + // arguments, this is very difficult to do in a robust and portable way. To + // work around the problem, we pick apart the format string, and print one + // format placeholder at a time. + + // Allocate space for the format string. We take a copy, so we can modify it. + // Leave enough space for one extra character per expected argument (plus the + // '\0' termination). + const char * format_base = reg(0); + DCHECK(format_base != NULL); + size_t length = strlen(format_base) + 1; + char * const format = new char[length + arg_count]; + + // A list of chunks, each with exactly one format placeholder. + const char * chunks[kPrintfMaxArgCount]; + + // Copy the format string and search for format placeholders. + uint32_t placeholder_count = 0; + char * format_scratch = format; + for (size_t i = 0; i < length; i++) { + if (format_base[i] != '%') { + *format_scratch++ = format_base[i]; + } else { + if (format_base[i + 1] == '%') { + // Ignore explicit "%%" sequences. + *format_scratch++ = format_base[i]; + + if (placeholder_count == 0) { + // The first chunk is passed to printf using "%s", so we need to + // unescape "%%" sequences in this chunk. (Just skip the next '%'.) + i++; + } else { + // Otherwise, pass through "%%" unchanged. + *format_scratch++ = format_base[++i]; + } + } else { + CHECK(placeholder_count < arg_count); + // Insert '\0' before placeholders, and store their locations. + *format_scratch++ = '\0'; + chunks[placeholder_count++] = format_scratch; + *format_scratch++ = format_base[i]; + } + } + } + DCHECK(format_scratch <= (format + length + arg_count)); + CHECK(placeholder_count == arg_count); + + // Finally, call printf with each chunk, passing the appropriate register + // argument. Normally, printf returns the number of bytes transmitted, so we + // can emulate a single printf call by adding the result from each chunk. If + // any call returns a negative (error) value, though, just return that value. + + fprintf(stream_, "%s", clr_printf); // Because '\0' is inserted before each placeholder, the first string in // 'format' contains no format placeholders and should be printed literally. @@ -4035,7 +5875,7 @@ void Simulator::LocalMonitor::Clear() { size_ = TransactionSize::None; } -void Simulator::LocalMonitor::NotifyLoad(uintptr_t addr) { +void Simulator::LocalMonitor::NotifyLoad() { if (access_state_ == MonitorAccess::Exclusive) { // A non exclusive load could clear the local monitor. As a result, it's // most strict to unconditionally clear the local monitor on load. @@ -4050,7 +5890,7 @@ void Simulator::LocalMonitor::NotifyLoadExcl(uintptr_t addr, size_ = size; } -void Simulator::LocalMonitor::NotifyStore(uintptr_t addr) { +void Simulator::LocalMonitor::NotifyStore() { if (access_state_ == MonitorAccess::Exclusive) { // A non exclusive store could clear the local monitor. As a result, it's // most strict to unconditionally clear the local monitor on store. @@ -4098,7 +5938,7 @@ void Simulator::GlobalMonitor::Processor::NotifyLoadExcl_Locked( } void Simulator::GlobalMonitor::Processor::NotifyStore_Locked( - uintptr_t addr, bool is_requesting_processor) { + bool is_requesting_processor) { if (access_state_ == MonitorAccess::Exclusive) { // A non exclusive store could clear the global monitor. As a result, it's // most strict to unconditionally clear global monitors on store. @@ -4144,12 +5984,11 @@ void Simulator::GlobalMonitor::NotifyLoadExcl_Locked(uintptr_t addr, PrependProcessor_Locked(processor); } -void Simulator::GlobalMonitor::NotifyStore_Locked(uintptr_t addr, - Processor* processor) { +void Simulator::GlobalMonitor::NotifyStore_Locked(Processor* processor) { // Notify each processor of the store operation. for (Processor* iter = head_; iter; iter = iter->next_) { bool is_requesting_processor = iter == processor; - iter->NotifyStore_Locked(addr, is_requesting_processor); + iter->NotifyStore_Locked(is_requesting_processor); } } diff --git a/deps/v8/src/arm64/simulator-arm64.h b/deps/v8/src/arm64/simulator-arm64.h index 48fc1c7bc61990..c82bdd8c7a48cf 100644 --- a/deps/v8/src/arm64/simulator-arm64.h +++ b/deps/v8/src/arm64/simulator-arm64.h @@ -67,6 +67,239 @@ class SimulatorStack : public v8::internal::AllStatic { #else // !defined(USE_SIMULATOR) +// Assemble the specified IEEE-754 components into the target type and apply +// appropriate rounding. +// sign: 0 = positive, 1 = negative +// exponent: Unbiased IEEE-754 exponent. +// mantissa: The mantissa of the input. The top bit (which is not encoded for +// normal IEEE-754 values) must not be omitted. This bit has the +// value 'pow(2, exponent)'. +// +// The input value is assumed to be a normalized value. That is, the input may +// not be infinity or NaN. If the source value is subnormal, it must be +// normalized before calling this function such that the highest set bit in the +// mantissa has the value 'pow(2, exponent)'. +// +// Callers should use FPRoundToFloat or FPRoundToDouble directly, rather than +// calling a templated FPRound. +template +T FPRound(int64_t sign, int64_t exponent, uint64_t mantissa, + FPRounding round_mode) { + static_assert((sizeof(T) * 8) >= (1 + ebits + mbits), + "destination type T not large enough"); + static_assert(sizeof(T) <= sizeof(uint64_t), + "maximum size of destination type T is 64 bits"); + static_assert(std::is_unsigned::value, + "destination type T must be unsigned"); + + DCHECK((sign == 0) || (sign == 1)); + + // Only FPTieEven and FPRoundOdd rounding modes are implemented. + DCHECK((round_mode == FPTieEven) || (round_mode == FPRoundOdd)); + + // Rounding can promote subnormals to normals, and normals to infinities. For + // example, a double with exponent 127 (FLT_MAX_EXP) would appear to be + // encodable as a float, but rounding based on the low-order mantissa bits + // could make it overflow. With ties-to-even rounding, this value would become + // an infinity. + + // ---- Rounding Method ---- + // + // The exponent is irrelevant in the rounding operation, so we treat the + // lowest-order bit that will fit into the result ('onebit') as having + // the value '1'. Similarly, the highest-order bit that won't fit into + // the result ('halfbit') has the value '0.5'. The 'point' sits between + // 'onebit' and 'halfbit': + // + // These bits fit into the result. + // |---------------------| + // mantissa = 0bxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + // || + // / | + // / halfbit + // onebit + // + // For subnormal outputs, the range of representable bits is smaller and + // the position of onebit and halfbit depends on the exponent of the + // input, but the method is otherwise similar. + // + // onebit(frac) + // | + // | halfbit(frac) halfbit(adjusted) + // | / / + // | | | + // 0b00.0 (exact) -> 0b00.0 (exact) -> 0b00 + // 0b00.0... -> 0b00.0... -> 0b00 + // 0b00.1 (exact) -> 0b00.0111..111 -> 0b00 + // 0b00.1... -> 0b00.1... -> 0b01 + // 0b01.0 (exact) -> 0b01.0 (exact) -> 0b01 + // 0b01.0... -> 0b01.0... -> 0b01 + // 0b01.1 (exact) -> 0b01.1 (exact) -> 0b10 + // 0b01.1... -> 0b01.1... -> 0b10 + // 0b10.0 (exact) -> 0b10.0 (exact) -> 0b10 + // 0b10.0... -> 0b10.0... -> 0b10 + // 0b10.1 (exact) -> 0b10.0111..111 -> 0b10 + // 0b10.1... -> 0b10.1... -> 0b11 + // 0b11.0 (exact) -> 0b11.0 (exact) -> 0b11 + // ... / | / | + // / | / | + // / | + // adjusted = frac - (halfbit(mantissa) & ~onebit(frac)); / | + // + // mantissa = (mantissa >> shift) + halfbit(adjusted); + + const int mantissa_offset = 0; + const int exponent_offset = mantissa_offset + mbits; + const int sign_offset = exponent_offset + ebits; + DCHECK_EQ(sign_offset, static_cast(sizeof(T) * 8 - 1)); + + // Bail out early for zero inputs. + if (mantissa == 0) { + return static_cast(sign << sign_offset); + } + + // If all bits in the exponent are set, the value is infinite or NaN. + // This is true for all binary IEEE-754 formats. + const int infinite_exponent = (1 << ebits) - 1; + const int max_normal_exponent = infinite_exponent - 1; + + // Apply the exponent bias to encode it for the result. Doing this early makes + // it easy to detect values that will be infinite or subnormal. + exponent += max_normal_exponent >> 1; + + if (exponent > max_normal_exponent) { + // Overflow: the input is too large for the result type to represent. + if (round_mode == FPTieEven) { + // FPTieEven rounding mode handles overflows using infinities. + exponent = infinite_exponent; + mantissa = 0; + } else { + DCHECK_EQ(round_mode, FPRoundOdd); + // FPRoundOdd rounding mode handles overflows using the largest magnitude + // normal number. + exponent = max_normal_exponent; + mantissa = (UINT64_C(1) << exponent_offset) - 1; + } + return static_cast((sign << sign_offset) | + (exponent << exponent_offset) | + (mantissa << mantissa_offset)); + } + + // Calculate the shift required to move the top mantissa bit to the proper + // place in the destination type. + const int highest_significant_bit = 63 - CountLeadingZeros(mantissa, 64); + int shift = highest_significant_bit - mbits; + + if (exponent <= 0) { + // The output will be subnormal (before rounding). + // For subnormal outputs, the shift must be adjusted by the exponent. The +1 + // is necessary because the exponent of a subnormal value (encoded as 0) is + // the same as the exponent of the smallest normal value (encoded as 1). + shift += -exponent + 1; + + // Handle inputs that would produce a zero output. + // + // Shifts higher than highest_significant_bit+1 will always produce a zero + // result. A shift of exactly highest_significant_bit+1 might produce a + // non-zero result after rounding. + if (shift > (highest_significant_bit + 1)) { + if (round_mode == FPTieEven) { + // The result will always be +/-0.0. + return static_cast(sign << sign_offset); + } else { + DCHECK_EQ(round_mode, FPRoundOdd); + DCHECK_NE(mantissa, 0U); + // For FPRoundOdd, if the mantissa is too small to represent and + // non-zero return the next "odd" value. + return static_cast((sign << sign_offset) | 1); + } + } + + // Properly encode the exponent for a subnormal output. + exponent = 0; + } else { + // Clear the topmost mantissa bit, since this is not encoded in IEEE-754 + // normal values. + mantissa &= ~(UINT64_C(1) << highest_significant_bit); + } + + if (shift > 0) { + if (round_mode == FPTieEven) { + // We have to shift the mantissa to the right. Some precision is lost, so + // we need to apply rounding. + uint64_t onebit_mantissa = (mantissa >> (shift)) & 1; + uint64_t halfbit_mantissa = (mantissa >> (shift - 1)) & 1; + uint64_t adjustment = (halfbit_mantissa & ~onebit_mantissa); + uint64_t adjusted = mantissa - adjustment; + T halfbit_adjusted = (adjusted >> (shift - 1)) & 1; + + T result = + static_cast((sign << sign_offset) | (exponent << exponent_offset) | + ((mantissa >> shift) << mantissa_offset)); + + // A very large mantissa can overflow during rounding. If this happens, + // the exponent should be incremented and the mantissa set to 1.0 + // (encoded as 0). Applying halfbit_adjusted after assembling the float + // has the nice side-effect that this case is handled for free. + // + // This also handles cases where a very large finite value overflows to + // infinity, or where a very large subnormal value overflows to become + // normal. + return result + halfbit_adjusted; + } else { + DCHECK_EQ(round_mode, FPRoundOdd); + // If any bits at position halfbit or below are set, onebit (ie. the + // bottom bit of the resulting mantissa) must be set. + uint64_t fractional_bits = mantissa & ((UINT64_C(1) << shift) - 1); + if (fractional_bits != 0) { + mantissa |= UINT64_C(1) << shift; + } + + return static_cast((sign << sign_offset) | + (exponent << exponent_offset) | + ((mantissa >> shift) << mantissa_offset)); + } + } else { + // We have to shift the mantissa to the left (or not at all). The input + // mantissa is exactly representable in the output mantissa, so apply no + // rounding correction. + return static_cast((sign << sign_offset) | + (exponent << exponent_offset) | + ((mantissa << -shift) << mantissa_offset)); + } +} + +// Representation of memory, with typed getters and setters for access. +class SimMemory { + public: + template + static T AddressUntag(T address) { + // Cast the address using a C-style cast. A reinterpret_cast would be + // appropriate, but it can't cast one integral type to another. + uint64_t bits = (uint64_t)address; + return (T)(bits & ~kAddressTagMask); + } + + template + static T Read(A address) { + T value; + address = AddressUntag(address); + DCHECK((sizeof(value) == 1) || (sizeof(value) == 2) || + (sizeof(value) == 4) || (sizeof(value) == 8) || + (sizeof(value) == 16)); + memcpy(&value, reinterpret_cast(address), sizeof(value)); + return value; + } + + template + static void Write(A address, T value) { + address = AddressUntag(address); + DCHECK((sizeof(value) == 1) || (sizeof(value) == 2) || + (sizeof(value) == 4) || (sizeof(value) == 8) || + (sizeof(value) == 16)); + memcpy(reinterpret_cast(address), &value, sizeof(value)); + } +}; // The proper way to initialize a simulated system register (such as NZCV) is as // follows: @@ -122,29 +355,330 @@ class SimSystemRegister { // Represent a register (r0-r31, v0-v31). +template class SimRegisterBase { public: template void Set(T new_value) { - value_ = 0; + static_assert(sizeof(new_value) <= kSizeInBytes, + "Size of new_value must be <= size of template type."); + if (sizeof(new_value) < kSizeInBytes) { + // All AArch64 registers are zero-extending. + memset(value_ + sizeof(new_value), 0, kSizeInBytes - sizeof(new_value)); + } memcpy(&value_, &new_value, sizeof(T)); + NotifyRegisterWrite(); } - template - T Get() const { + // Insert a typed value into a register, leaving the rest of the register + // unchanged. The lane parameter indicates where in the register the value + // should be inserted, in the range [ 0, sizeof(value_) / sizeof(T) ), where + // 0 represents the least significant bits. + template + void Insert(int lane, T new_value) { + DCHECK_GE(lane, 0); + DCHECK_LE(sizeof(new_value) + (lane * sizeof(new_value)), + static_cast(kSizeInBytes)); + memcpy(&value_[lane * sizeof(new_value)], &new_value, sizeof(new_value)); + NotifyRegisterWrite(); + } + + template + T Get(int lane = 0) const { T result; - memcpy(&result, &value_, sizeof(T)); + DCHECK_GE(lane, 0); + DCHECK_LE(sizeof(result) + (lane * sizeof(result)), + static_cast(kSizeInBytes)); + memcpy(&result, &value_[lane * sizeof(result)], sizeof(result)); return result; } + // TODO(all): Make this return a map of updated bytes, so that we can + // highlight updated lanes for load-and-insert. (That never happens for scalar + // code, but NEON has some instructions that can update individual lanes.) + bool WrittenSinceLastLog() const { return written_since_last_log_; } + + void NotifyRegisterLogged() { written_since_last_log_ = false; } + protected: - int64_t value_; + uint8_t value_[kSizeInBytes]; + + // Helpers to aid with register tracing. + bool written_since_last_log_; + + void NotifyRegisterWrite() { written_since_last_log_ = true; } }; +typedef SimRegisterBase SimRegister; // r0-r31 +typedef SimRegisterBase SimVRegister; // v0-v31 + +// Representation of a vector register, with typed getters and setters for lanes +// and additional information to represent lane state. +class LogicVRegister { + public: + inline LogicVRegister(SimVRegister& other) // NOLINT + : register_(other) { + for (unsigned i = 0; i < arraysize(saturated_); i++) { + saturated_[i] = kNotSaturated; + } + for (unsigned i = 0; i < arraysize(round_); i++) { + round_[i] = false; + } + } + + int64_t Int(VectorFormat vform, int index) const { + int64_t element; + switch (LaneSizeInBitsFromFormat(vform)) { + case 8: + element = register_.Get(index); + break; + case 16: + element = register_.Get(index); + break; + case 32: + element = register_.Get(index); + break; + case 64: + element = register_.Get(index); + break; + default: + UNREACHABLE(); + return 0; + } + return element; + } + + uint64_t Uint(VectorFormat vform, int index) const { + uint64_t element; + switch (LaneSizeInBitsFromFormat(vform)) { + case 8: + element = register_.Get(index); + break; + case 16: + element = register_.Get(index); + break; + case 32: + element = register_.Get(index); + break; + case 64: + element = register_.Get(index); + break; + default: + UNREACHABLE(); + return 0; + } + return element; + } + + uint64_t UintLeftJustified(VectorFormat vform, int index) const { + return Uint(vform, index) << (64 - LaneSizeInBitsFromFormat(vform)); + } + + int64_t IntLeftJustified(VectorFormat vform, int index) const { + uint64_t value = UintLeftJustified(vform, index); + int64_t result; + memcpy(&result, &value, sizeof(result)); + return result; + } + + void SetInt(VectorFormat vform, int index, int64_t value) const { + switch (LaneSizeInBitsFromFormat(vform)) { + case 8: + register_.Insert(index, static_cast(value)); + break; + case 16: + register_.Insert(index, static_cast(value)); + break; + case 32: + register_.Insert(index, static_cast(value)); + break; + case 64: + register_.Insert(index, static_cast(value)); + break; + default: + UNREACHABLE(); + return; + } + } + + void SetIntArray(VectorFormat vform, const int64_t* src) const { + ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + SetInt(vform, i, src[i]); + } + } + + void SetUint(VectorFormat vform, int index, uint64_t value) const { + switch (LaneSizeInBitsFromFormat(vform)) { + case 8: + register_.Insert(index, static_cast(value)); + break; + case 16: + register_.Insert(index, static_cast(value)); + break; + case 32: + register_.Insert(index, static_cast(value)); + break; + case 64: + register_.Insert(index, static_cast(value)); + break; + default: + UNREACHABLE(); + return; + } + } + + void SetUintArray(VectorFormat vform, const uint64_t* src) const { + ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + SetUint(vform, i, src[i]); + } + } + + void ReadUintFromMem(VectorFormat vform, int index, uint64_t addr) const; + + void WriteUintToMem(VectorFormat vform, int index, uint64_t addr) const; + + template + T Float(int index) const { + return register_.Get(index); + } + + template + void SetFloat(int index, T value) const { + register_.Insert(index, value); + } + + // When setting a result in a register of size less than Q, the top bits of + // the Q register must be cleared. + void ClearForWrite(VectorFormat vform) const { + unsigned size = RegisterSizeInBytesFromFormat(vform); + for (unsigned i = size; i < kQRegSize; i++) { + SetUint(kFormat16B, i, 0); + } + } -typedef SimRegisterBase SimRegister; // r0-r31 -typedef SimRegisterBase SimFPRegister; // v0-v31 + // Saturation state for each lane of a vector. + enum Saturation { + kNotSaturated = 0, + kSignedSatPositive = 1 << 0, + kSignedSatNegative = 1 << 1, + kSignedSatMask = kSignedSatPositive | kSignedSatNegative, + kSignedSatUndefined = kSignedSatMask, + kUnsignedSatPositive = 1 << 2, + kUnsignedSatNegative = 1 << 3, + kUnsignedSatMask = kUnsignedSatPositive | kUnsignedSatNegative, + kUnsignedSatUndefined = kUnsignedSatMask + }; + + // Getters for saturation state. + Saturation GetSignedSaturation(int index) { + return static_cast(saturated_[index] & kSignedSatMask); + } + + Saturation GetUnsignedSaturation(int index) { + return static_cast(saturated_[index] & kUnsignedSatMask); + } + + // Setters for saturation state. + void ClearSat(int index) { saturated_[index] = kNotSaturated; } + + void SetSignedSat(int index, bool positive) { + SetSatFlag(index, positive ? kSignedSatPositive : kSignedSatNegative); + } + void SetUnsignedSat(int index, bool positive) { + SetSatFlag(index, positive ? kUnsignedSatPositive : kUnsignedSatNegative); + } + + void SetSatFlag(int index, Saturation sat) { + saturated_[index] = static_cast(saturated_[index] | sat); + DCHECK_NE(sat & kUnsignedSatMask, kUnsignedSatUndefined); + DCHECK_NE(sat & kSignedSatMask, kSignedSatUndefined); + } + + // Saturate lanes of a vector based on saturation state. + LogicVRegister& SignedSaturate(VectorFormat vform) { + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + Saturation sat = GetSignedSaturation(i); + if (sat == kSignedSatPositive) { + SetInt(vform, i, MaxIntFromFormat(vform)); + } else if (sat == kSignedSatNegative) { + SetInt(vform, i, MinIntFromFormat(vform)); + } + } + return *this; + } + + LogicVRegister& UnsignedSaturate(VectorFormat vform) { + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + Saturation sat = GetUnsignedSaturation(i); + if (sat == kUnsignedSatPositive) { + SetUint(vform, i, MaxUintFromFormat(vform)); + } else if (sat == kUnsignedSatNegative) { + SetUint(vform, i, 0); + } + } + return *this; + } + + // Getter for rounding state. + bool GetRounding(int index) { return round_[index]; } + + // Setter for rounding state. + void SetRounding(int index, bool round) { round_[index] = round; } + + // Round lanes of a vector based on rounding state. + LogicVRegister& Round(VectorFormat vform) { + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + SetUint(vform, i, Uint(vform, i) + (GetRounding(i) ? 1 : 0)); + } + return *this; + } + + // Unsigned halve lanes of a vector, and use the saturation state to set the + // top bit. + LogicVRegister& Uhalve(VectorFormat vform) { + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + uint64_t val = Uint(vform, i); + SetRounding(i, (val & 1) == 1); + val >>= 1; + if (GetUnsignedSaturation(i) != kNotSaturated) { + // If the operation causes unsigned saturation, the bit shifted into the + // most significant bit must be set. + val |= (MaxUintFromFormat(vform) >> 1) + 1; + } + SetInt(vform, i, val); + } + return *this; + } + + // Signed halve lanes of a vector, and use the carry state to set the top bit. + LogicVRegister& Halve(VectorFormat vform) { + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + int64_t val = Int(vform, i); + SetRounding(i, (val & 1) == 1); + val >>= 1; + if (GetSignedSaturation(i) != kNotSaturated) { + // If the operation causes signed saturation, the sign bit must be + // inverted. + val ^= (MaxUintFromFormat(vform) >> 1) + 1; + } + SetInt(vform, i, val); + } + return *this; + } + + private: + SimVRegister& register_; + + // Allocate one saturation state entry per lane; largest register is type Q, + // and lanes can be a minimum of one byte wide. + Saturation saturated_[kQRegSize]; + + // Allocate one rounding state entry per lane. + bool round_[kQRegSize]; +}; class Simulator : public DecoderVisitor { public: @@ -311,6 +845,7 @@ class Simulator : public DecoderVisitor { CheckBreakNext(); Decode(pc_); increment_pc(); + LogAllWrittenRegisters(); CheckBreakpoints(); } @@ -329,7 +864,7 @@ class Simulator : public DecoderVisitor { // template T reg(unsigned code, Reg31Mode r31mode = Reg31IsZeroRegister) const { - DCHECK(code < kNumberOfRegisters); + DCHECK_LT(code, static_cast(kNumberOfRegisters)); if (IsZeroRegister(code, r31mode)) { return 0; } @@ -345,6 +880,8 @@ class Simulator : public DecoderVisitor { return reg(code, r31mode); } + enum RegLogMode { LogRegWrites, NoRegLog }; + // Write 'value' into an integer register. The value is zero-extended. This // behaviour matches AArch64 register writes. template @@ -369,7 +906,7 @@ class Simulator : public DecoderVisitor { template void set_reg_no_log(unsigned code, T value, Reg31Mode r31mode = Reg31IsZeroRegister) { - DCHECK(code < kNumberOfRegisters); + DCHECK_LT(code, static_cast(kNumberOfRegisters)); if (!IsZeroRegister(code, r31mode)) { registers_[code].Set(value); } @@ -388,16 +925,39 @@ class Simulator : public DecoderVisitor { // Commonly-used special cases. template void set_lr(T value) { - DCHECK(sizeof(T) == kPointerSize); + DCHECK_EQ(sizeof(T), static_cast(kPointerSize)); set_reg(kLinkRegCode, value); } template void set_sp(T value) { - DCHECK(sizeof(T) == kPointerSize); + DCHECK_EQ(sizeof(T), static_cast(kPointerSize)); set_reg(31, value, Reg31IsStackPointer); } + // Vector register accessors. + // These are equivalent to the integer register accessors, but for vector + // registers. + + // A structure for representing a 128-bit Q register. + struct qreg_t { + uint8_t val[kQRegSize]; + }; + + // Basic accessor: read the register as the specified type. + template + T vreg(unsigned code) const { + static_assert((sizeof(T) == kBRegSize) || (sizeof(T) == kHRegSize) || + (sizeof(T) == kSRegSize) || (sizeof(T) == kDRegSize) || + (sizeof(T) == kQRegSize), + "Template type must match size of register."); + DCHECK_LT(code, static_cast(kNumberOfVRegisters)); + + return vregisters_[code].Get(); + } + + inline SimVRegister& vreg(unsigned code) { return vregisters_[code]; } + int64_t sp() { return xreg(31, Reg31IsStackPointer); } int64_t jssp() { return xreg(kJSSPCode, Reg31IsStackPointer); } int64_t fp() { @@ -407,87 +967,134 @@ class Simulator : public DecoderVisitor { Address get_sp() const { return reg
(31, Reg31IsStackPointer); } - template - T fpreg(unsigned code) const { - DCHECK(code < kNumberOfRegisters); - return fpregisters_[code].Get(); - } + // Common specialized accessors for the vreg() template. + uint8_t breg(unsigned code) const { return vreg(code); } - // Common specialized accessors for the fpreg() template. - float sreg(unsigned code) const { - return fpreg(code); - } + float hreg(unsigned code) const { return vreg(code); } - uint32_t sreg_bits(unsigned code) const { - return fpreg(code); - } + float sreg(unsigned code) const { return vreg(code); } - double dreg(unsigned code) const { - return fpreg(code); - } + uint32_t sreg_bits(unsigned code) const { return vreg(code); } - uint64_t dreg_bits(unsigned code) const { - return fpreg(code); - } + double dreg(unsigned code) const { return vreg(code); } + + uint64_t dreg_bits(unsigned code) const { return vreg(code); } + + qreg_t qreg(unsigned code) const { return vreg(code); } + + // As above, with parameterized size and return type. The value is + // either zero-extended or truncated to fit, as required. + template + T vreg(unsigned size, unsigned code) const { + uint64_t raw = 0; + T result; - double fpreg(unsigned size, unsigned code) const { switch (size) { - case kSRegSizeInBits: return sreg(code); - case kDRegSizeInBits: return dreg(code); + case kSRegSize: + raw = vreg(code); + break; + case kDRegSize: + raw = vreg(code); + break; default: UNREACHABLE(); - return 0.0; } + + static_assert(sizeof(result) <= sizeof(raw), + "Template type must be <= 64 bits."); + // Copy the result and truncate to fit. This assumes a little-endian host. + memcpy(&result, &raw, sizeof(result)); + return result; } // Write 'value' into a floating-point register. The value is zero-extended. // This behaviour matches AArch64 register writes. - template - void set_fpreg(unsigned code, T value) { - set_fpreg_no_log(code, value); - - if (sizeof(value) <= kSRegSize) { - LogFPRegister(code, kPrintSRegValue); - } else { - LogFPRegister(code, kPrintDRegValue); + template + void set_vreg(unsigned code, T value, RegLogMode log_mode = LogRegWrites) { + static_assert( + (sizeof(value) == kBRegSize) || (sizeof(value) == kHRegSize) || + (sizeof(value) == kSRegSize) || (sizeof(value) == kDRegSize) || + (sizeof(value) == kQRegSize), + "Template type must match size of register."); + DCHECK_LT(code, static_cast(kNumberOfVRegisters)); + vregisters_[code].Set(value); + + if (log_mode == LogRegWrites) { + LogVRegister(code, GetPrintRegisterFormat(value)); } } - // Common specialized accessors for the set_fpreg() template. - void set_sreg(unsigned code, float value) { - set_fpreg(code, value); + // Common specialized accessors for the set_vreg() template. + void set_breg(unsigned code, int8_t value, + RegLogMode log_mode = LogRegWrites) { + set_vreg(code, value, log_mode); + } + + void set_hreg(unsigned code, int16_t value, + RegLogMode log_mode = LogRegWrites) { + set_vreg(code, value, log_mode); + } + + void set_sreg(unsigned code, float value, + RegLogMode log_mode = LogRegWrites) { + set_vreg(code, value, log_mode); + } + + void set_sreg_bits(unsigned code, uint32_t value, + RegLogMode log_mode = LogRegWrites) { + set_vreg(code, value, log_mode); } - void set_sreg_bits(unsigned code, uint32_t value) { - set_fpreg(code, value); + void set_dreg(unsigned code, double value, + RegLogMode log_mode = LogRegWrites) { + set_vreg(code, value, log_mode); } - void set_dreg(unsigned code, double value) { - set_fpreg(code, value); + void set_dreg_bits(unsigned code, uint64_t value, + RegLogMode log_mode = LogRegWrites) { + set_vreg(code, value, log_mode); } - void set_dreg_bits(unsigned code, uint64_t value) { - set_fpreg(code, value); + void set_qreg(unsigned code, qreg_t value, + RegLogMode log_mode = LogRegWrites) { + set_vreg(code, value, log_mode); } // As above, but don't automatically log the register update. template - void set_fpreg_no_log(unsigned code, T value) { - DCHECK((sizeof(value) == kDRegSize) || (sizeof(value) == kSRegSize)); - DCHECK(code < kNumberOfFPRegisters); - fpregisters_[code].Set(value); + void set_vreg_no_log(unsigned code, T value) { + STATIC_ASSERT((sizeof(value) == kBRegSize) || + (sizeof(value) == kHRegSize) || + (sizeof(value) == kSRegSize) || + (sizeof(value) == kDRegSize) || (sizeof(value) == kQRegSize)); + DCHECK_LT(code, static_cast(kNumberOfVRegisters)); + vregisters_[code].Set(value); + } + + void set_breg_no_log(unsigned code, uint8_t value) { + set_vreg_no_log(code, value); + } + + void set_hreg_no_log(unsigned code, uint16_t value) { + set_vreg_no_log(code, value); } void set_sreg_no_log(unsigned code, float value) { - set_fpreg_no_log(code, value); + set_vreg_no_log(code, value); } void set_dreg_no_log(unsigned code, double value) { - set_fpreg_no_log(code, value); + set_vreg_no_log(code, value); + } + + void set_qreg_no_log(unsigned code, qreg_t value) { + set_vreg_no_log(code, value); } SimSystemRegister& nzcv() { return nzcv_; } SimSystemRegister& fpcr() { return fpcr_; } + FPRounding RMode() { return static_cast(fpcr_.RMode()); } + bool DN() { return fpcr_.DN() != 0; } // Debug helpers @@ -514,66 +1121,195 @@ class Simulator : public DecoderVisitor { // Print all registers of the specified types. void PrintRegisters(); - void PrintFPRegisters(); + void PrintVRegisters(); void PrintSystemRegisters(); - // Like Print* (above), but respect log_parameters(). - void LogSystemRegisters() { - if (log_parameters() & LOG_SYS_REGS) PrintSystemRegisters(); + // As above, but only print the registers that have been updated. + void PrintWrittenRegisters(); + void PrintWrittenVRegisters(); + + // As above, but respect LOG_REG and LOG_VREG. + void LogWrittenRegisters() { + if (log_parameters() & LOG_REGS) PrintWrittenRegisters(); + } + void LogWrittenVRegisters() { + if (log_parameters() & LOG_VREGS) PrintWrittenVRegisters(); + } + void LogAllWrittenRegisters() { + LogWrittenRegisters(); + LogWrittenVRegisters(); + } + + // Specify relevant register formats for Print(V)Register and related helpers. + enum PrintRegisterFormat { + // The lane size. + kPrintRegLaneSizeB = 0 << 0, + kPrintRegLaneSizeH = 1 << 0, + kPrintRegLaneSizeS = 2 << 0, + kPrintRegLaneSizeW = kPrintRegLaneSizeS, + kPrintRegLaneSizeD = 3 << 0, + kPrintRegLaneSizeX = kPrintRegLaneSizeD, + kPrintRegLaneSizeQ = 4 << 0, + + kPrintRegLaneSizeOffset = 0, + kPrintRegLaneSizeMask = 7 << 0, + + // The lane count. + kPrintRegAsScalar = 0, + kPrintRegAsDVector = 1 << 3, + kPrintRegAsQVector = 2 << 3, + + kPrintRegAsVectorMask = 3 << 3, + + // Indicate floating-point format lanes. (This flag is only supported for S- + // and D-sized lanes.) + kPrintRegAsFP = 1 << 5, + + // Supported combinations. + + kPrintXReg = kPrintRegLaneSizeX | kPrintRegAsScalar, + kPrintWReg = kPrintRegLaneSizeW | kPrintRegAsScalar, + kPrintSReg = kPrintRegLaneSizeS | kPrintRegAsScalar | kPrintRegAsFP, + kPrintDReg = kPrintRegLaneSizeD | kPrintRegAsScalar | kPrintRegAsFP, + + kPrintReg1B = kPrintRegLaneSizeB | kPrintRegAsScalar, + kPrintReg8B = kPrintRegLaneSizeB | kPrintRegAsDVector, + kPrintReg16B = kPrintRegLaneSizeB | kPrintRegAsQVector, + kPrintReg1H = kPrintRegLaneSizeH | kPrintRegAsScalar, + kPrintReg4H = kPrintRegLaneSizeH | kPrintRegAsDVector, + kPrintReg8H = kPrintRegLaneSizeH | kPrintRegAsQVector, + kPrintReg1S = kPrintRegLaneSizeS | kPrintRegAsScalar, + kPrintReg2S = kPrintRegLaneSizeS | kPrintRegAsDVector, + kPrintReg4S = kPrintRegLaneSizeS | kPrintRegAsQVector, + kPrintReg1SFP = kPrintRegLaneSizeS | kPrintRegAsScalar | kPrintRegAsFP, + kPrintReg2SFP = kPrintRegLaneSizeS | kPrintRegAsDVector | kPrintRegAsFP, + kPrintReg4SFP = kPrintRegLaneSizeS | kPrintRegAsQVector | kPrintRegAsFP, + kPrintReg1D = kPrintRegLaneSizeD | kPrintRegAsScalar, + kPrintReg2D = kPrintRegLaneSizeD | kPrintRegAsQVector, + kPrintReg1DFP = kPrintRegLaneSizeD | kPrintRegAsScalar | kPrintRegAsFP, + kPrintReg2DFP = kPrintRegLaneSizeD | kPrintRegAsQVector | kPrintRegAsFP, + kPrintReg1Q = kPrintRegLaneSizeQ | kPrintRegAsScalar + }; + + unsigned GetPrintRegLaneSizeInBytesLog2(PrintRegisterFormat format) { + return (format & kPrintRegLaneSizeMask) >> kPrintRegLaneSizeOffset; } - void LogRegisters() { - if (log_parameters() & LOG_REGS) PrintRegisters(); + + unsigned GetPrintRegLaneSizeInBytes(PrintRegisterFormat format) { + return 1 << GetPrintRegLaneSizeInBytesLog2(format); } - void LogFPRegisters() { - if (log_parameters() & LOG_FP_REGS) PrintFPRegisters(); + + unsigned GetPrintRegSizeInBytesLog2(PrintRegisterFormat format) { + if (format & kPrintRegAsDVector) return kDRegSizeLog2; + if (format & kPrintRegAsQVector) return kQRegSizeLog2; + + // Scalar types. + return GetPrintRegLaneSizeInBytesLog2(format); } - // Specify relevant register sizes, for PrintFPRegister. - // - // These values are bit masks; they can be combined in case multiple views of - // a machine register are interesting. - enum PrintFPRegisterSizes { - kPrintDRegValue = 1 << kDRegSize, - kPrintSRegValue = 1 << kSRegSize, - kPrintAllFPRegValues = kPrintDRegValue | kPrintSRegValue - }; + unsigned GetPrintRegSizeInBytes(PrintRegisterFormat format) { + return 1 << GetPrintRegSizeInBytesLog2(format); + } + + unsigned GetPrintRegLaneCount(PrintRegisterFormat format) { + unsigned reg_size_log2 = GetPrintRegSizeInBytesLog2(format); + unsigned lane_size_log2 = GetPrintRegLaneSizeInBytesLog2(format); + DCHECK_GE(reg_size_log2, lane_size_log2); + return 1 << (reg_size_log2 - lane_size_log2); + } + + template + PrintRegisterFormat GetPrintRegisterFormat(T value) { + return GetPrintRegisterFormatForSize(sizeof(value)); + } + + PrintRegisterFormat GetPrintRegisterFormat(double value) { + static_assert(sizeof(value) == kDRegSize, + "D register must be size of double."); + return GetPrintRegisterFormatForSizeFP(sizeof(value)); + } + + PrintRegisterFormat GetPrintRegisterFormat(float value) { + static_assert(sizeof(value) == kSRegSize, + "S register must be size of float."); + return GetPrintRegisterFormatForSizeFP(sizeof(value)); + } + + PrintRegisterFormat GetPrintRegisterFormat(VectorFormat vform); + PrintRegisterFormat GetPrintRegisterFormatFP(VectorFormat vform); + + PrintRegisterFormat GetPrintRegisterFormatForSize(size_t reg_size, + size_t lane_size); + + PrintRegisterFormat GetPrintRegisterFormatForSize(size_t size) { + return GetPrintRegisterFormatForSize(size, size); + } + + PrintRegisterFormat GetPrintRegisterFormatForSizeFP(size_t size) { + switch (size) { + default: + UNREACHABLE(); + case kDRegSize: + return kPrintDReg; + case kSRegSize: + return kPrintSReg; + } + } + + PrintRegisterFormat GetPrintRegisterFormatTryFP(PrintRegisterFormat format) { + if ((GetPrintRegLaneSizeInBytes(format) == kSRegSize) || + (GetPrintRegLaneSizeInBytes(format) == kDRegSize)) { + return static_cast(format | kPrintRegAsFP); + } + return format; + } // Print individual register values (after update). void PrintRegister(unsigned code, Reg31Mode r31mode = Reg31IsStackPointer); - void PrintFPRegister(unsigned code, - PrintFPRegisterSizes sizes = kPrintAllFPRegValues); + void PrintVRegister(unsigned code, PrintRegisterFormat sizes); void PrintSystemRegister(SystemRegister id); // Like Print* (above), but respect log_parameters(). void LogRegister(unsigned code, Reg31Mode r31mode = Reg31IsStackPointer) { if (log_parameters() & LOG_REGS) PrintRegister(code, r31mode); } - void LogFPRegister(unsigned code, - PrintFPRegisterSizes sizes = kPrintAllFPRegValues) { - if (log_parameters() & LOG_FP_REGS) PrintFPRegister(code, sizes); + void LogVRegister(unsigned code, PrintRegisterFormat format) { + if (log_parameters() & LOG_VREGS) PrintVRegister(code, format); } void LogSystemRegister(SystemRegister id) { if (log_parameters() & LOG_SYS_REGS) PrintSystemRegister(id); } // Print memory accesses. - void PrintRead(uintptr_t address, size_t size, unsigned reg_code); - void PrintReadFP(uintptr_t address, size_t size, unsigned reg_code); - void PrintWrite(uintptr_t address, size_t size, unsigned reg_code); - void PrintWriteFP(uintptr_t address, size_t size, unsigned reg_code); + void PrintRead(uintptr_t address, unsigned reg_code, + PrintRegisterFormat format); + void PrintWrite(uintptr_t address, unsigned reg_code, + PrintRegisterFormat format); + void PrintVRead(uintptr_t address, unsigned reg_code, + PrintRegisterFormat format, unsigned lane); + void PrintVWrite(uintptr_t address, unsigned reg_code, + PrintRegisterFormat format, unsigned lane); // Like Print* (above), but respect log_parameters(). - void LogRead(uintptr_t address, size_t size, unsigned reg_code) { - if (log_parameters() & LOG_REGS) PrintRead(address, size, reg_code); - } - void LogReadFP(uintptr_t address, size_t size, unsigned reg_code) { - if (log_parameters() & LOG_FP_REGS) PrintReadFP(address, size, reg_code); - } - void LogWrite(uintptr_t address, size_t size, unsigned reg_code) { - if (log_parameters() & LOG_WRITE) PrintWrite(address, size, reg_code); + void LogRead(uintptr_t address, unsigned reg_code, + PrintRegisterFormat format) { + if (log_parameters() & LOG_REGS) PrintRead(address, reg_code, format); + } + void LogWrite(uintptr_t address, unsigned reg_code, + PrintRegisterFormat format) { + if (log_parameters() & LOG_WRITE) PrintWrite(address, reg_code, format); + } + void LogVRead(uintptr_t address, unsigned reg_code, + PrintRegisterFormat format, unsigned lane = 0) { + if (log_parameters() & LOG_VREGS) { + PrintVRead(address, reg_code, format, lane); + } } - void LogWriteFP(uintptr_t address, size_t size, unsigned reg_code) { - if (log_parameters() & LOG_WRITE) PrintWriteFP(address, size, reg_code); + void LogVWrite(uintptr_t address, unsigned reg_code, + PrintRegisterFormat format, unsigned lane = 0) { + if (log_parameters() & LOG_WRITE) { + PrintVWrite(address, reg_code, format, lane); + } } int log_parameters() { return log_parameters_; } @@ -592,6 +1328,14 @@ class Simulator : public DecoderVisitor { } } + // Helper functions for register tracing. + void PrintRegisterRawHelper(unsigned code, Reg31Mode r31mode, + int size_in_bytes = kXRegSize); + void PrintVRegisterRawHelper(unsigned code, int bytes = kQRegSize, + int lsb = 0); + void PrintVRegisterFPHelper(unsigned code, unsigned lane_size_in_bytes, + int lane_count = 1, int rightmost_lane = 0); + static inline const char* WRegNameForCode(unsigned code, Reg31Mode mode = Reg31IsZeroRegister); static inline const char* XRegNameForCode(unsigned code, @@ -639,7 +1383,6 @@ class Simulator : public DecoderVisitor { return true; default: UNREACHABLE(); - return false; } } @@ -666,6 +1409,10 @@ class Simulator : public DecoderVisitor { void LoadStoreWriteBack(unsigned addr_reg, int64_t offset, AddrMode addrmode); + void NEONLoadStoreMultiStructHelper(const Instruction* instr, + AddrMode addr_mode); + void NEONLoadStoreSingleStructHelper(const Instruction* instr, + AddrMode addr_mode); void CheckMemoryAccess(uintptr_t address, uintptr_t stack); // Memory read helpers. @@ -673,7 +1420,8 @@ class Simulator : public DecoderVisitor { T MemoryRead(A address) { T value; STATIC_ASSERT((sizeof(value) == 1) || (sizeof(value) == 2) || - (sizeof(value) == 4) || (sizeof(value) == 8)); + (sizeof(value) == 4) || (sizeof(value) == 8) || + (sizeof(value) == 16)); memcpy(&value, reinterpret_cast(address), sizeof(value)); return value; } @@ -682,7 +1430,8 @@ class Simulator : public DecoderVisitor { template void MemoryWrite(A address, T value) { STATIC_ASSERT((sizeof(value) == 1) || (sizeof(value) == 2) || - (sizeof(value) == 4) || (sizeof(value) == 8)); + (sizeof(value) == 4) || (sizeof(value) == 8) || + (sizeof(value) == 16)); memcpy(reinterpret_cast(address), &value, sizeof(value)); } @@ -700,14 +1449,652 @@ class Simulator : public DecoderVisitor { void DataProcessing2Source(Instruction* instr); template void BitfieldHelper(Instruction* instr); + uint16_t PolynomialMult(uint8_t op1, uint8_t op2); + + void ld1(VectorFormat vform, LogicVRegister dst, uint64_t addr); + void ld1(VectorFormat vform, LogicVRegister dst, int index, uint64_t addr); + void ld1r(VectorFormat vform, LogicVRegister dst, uint64_t addr); + void ld2(VectorFormat vform, LogicVRegister dst1, LogicVRegister dst2, + uint64_t addr); + void ld2(VectorFormat vform, LogicVRegister dst1, LogicVRegister dst2, + int index, uint64_t addr); + void ld2r(VectorFormat vform, LogicVRegister dst1, LogicVRegister dst2, + uint64_t addr); + void ld3(VectorFormat vform, LogicVRegister dst1, LogicVRegister dst2, + LogicVRegister dst3, uint64_t addr); + void ld3(VectorFormat vform, LogicVRegister dst1, LogicVRegister dst2, + LogicVRegister dst3, int index, uint64_t addr); + void ld3r(VectorFormat vform, LogicVRegister dst1, LogicVRegister dst2, + LogicVRegister dst3, uint64_t addr); + void ld4(VectorFormat vform, LogicVRegister dst1, LogicVRegister dst2, + LogicVRegister dst3, LogicVRegister dst4, uint64_t addr); + void ld4(VectorFormat vform, LogicVRegister dst1, LogicVRegister dst2, + LogicVRegister dst3, LogicVRegister dst4, int index, uint64_t addr); + void ld4r(VectorFormat vform, LogicVRegister dst1, LogicVRegister dst2, + LogicVRegister dst3, LogicVRegister dst4, uint64_t addr); + void st1(VectorFormat vform, LogicVRegister src, uint64_t addr); + void st1(VectorFormat vform, LogicVRegister src, int index, uint64_t addr); + void st2(VectorFormat vform, LogicVRegister src, LogicVRegister src2, + uint64_t addr); + void st2(VectorFormat vform, LogicVRegister src, LogicVRegister src2, + int index, uint64_t addr); + void st3(VectorFormat vform, LogicVRegister src, LogicVRegister src2, + LogicVRegister src3, uint64_t addr); + void st3(VectorFormat vform, LogicVRegister src, LogicVRegister src2, + LogicVRegister src3, int index, uint64_t addr); + void st4(VectorFormat vform, LogicVRegister src, LogicVRegister src2, + LogicVRegister src3, LogicVRegister src4, uint64_t addr); + void st4(VectorFormat vform, LogicVRegister src, LogicVRegister src2, + LogicVRegister src3, LogicVRegister src4, int index, uint64_t addr); + LogicVRegister cmp(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, const LogicVRegister& src2, + Condition cond); + LogicVRegister cmp(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, int imm, Condition cond); + LogicVRegister cmptst(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, const LogicVRegister& src2); + LogicVRegister add(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, const LogicVRegister& src2); + LogicVRegister addp(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, const LogicVRegister& src2); + LogicVRegister mla(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, const LogicVRegister& src2); + LogicVRegister mls(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, const LogicVRegister& src2); + LogicVRegister mul(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, const LogicVRegister& src2); + LogicVRegister mul(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, const LogicVRegister& src2, + int index); + LogicVRegister mla(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, const LogicVRegister& src2, + int index); + LogicVRegister mls(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, const LogicVRegister& src2, + int index); + LogicVRegister pmul(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, const LogicVRegister& src2); + + typedef LogicVRegister (Simulator::*ByElementOp)(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister fmul(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, const LogicVRegister& src2, + int index); + LogicVRegister fmla(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, const LogicVRegister& src2, + int index); + LogicVRegister fmls(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, const LogicVRegister& src2, + int index); + LogicVRegister fmulx(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, const LogicVRegister& src2, + int index); + LogicVRegister smull(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, const LogicVRegister& src2, + int index); + LogicVRegister smull2(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, const LogicVRegister& src2, + int index); + LogicVRegister umull(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, const LogicVRegister& src2, + int index); + LogicVRegister umull2(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, const LogicVRegister& src2, + int index); + LogicVRegister smlal(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, const LogicVRegister& src2, + int index); + LogicVRegister smlal2(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, const LogicVRegister& src2, + int index); + LogicVRegister umlal(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, const LogicVRegister& src2, + int index); + LogicVRegister umlal2(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, const LogicVRegister& src2, + int index); + LogicVRegister smlsl(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, const LogicVRegister& src2, + int index); + LogicVRegister smlsl2(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, const LogicVRegister& src2, + int index); + LogicVRegister umlsl(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, const LogicVRegister& src2, + int index); + LogicVRegister umlsl2(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, const LogicVRegister& src2, + int index); + LogicVRegister sqdmull(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, const LogicVRegister& src2, + int index); + LogicVRegister sqdmull2(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, int index); + LogicVRegister sqdmlal(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, const LogicVRegister& src2, + int index); + LogicVRegister sqdmlal2(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, int index); + LogicVRegister sqdmlsl(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, const LogicVRegister& src2, + int index); + LogicVRegister sqdmlsl2(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, int index); + LogicVRegister sqdmulh(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, const LogicVRegister& src2, + int index); + LogicVRegister sqrdmulh(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, int index); + LogicVRegister sub(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, const LogicVRegister& src2); + LogicVRegister and_(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, const LogicVRegister& src2); + LogicVRegister orr(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, const LogicVRegister& src2); + LogicVRegister orn(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, const LogicVRegister& src2); + LogicVRegister eor(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, const LogicVRegister& src2); + LogicVRegister bic(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, const LogicVRegister& src2); + LogicVRegister bic(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src, uint64_t imm); + LogicVRegister bif(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, const LogicVRegister& src2); + LogicVRegister bit(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, const LogicVRegister& src2); + LogicVRegister bsl(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, const LogicVRegister& src2); + LogicVRegister cls(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister clz(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister cnt(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister not_(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister rbit(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister rev(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src, int revSize); + LogicVRegister rev16(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister rev32(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister rev64(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister addlp(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src, bool is_signed, + bool do_accumulate); + LogicVRegister saddlp(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister uaddlp(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister sadalp(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister uadalp(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister ext(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, const LogicVRegister& src2, + int index); + LogicVRegister ins_element(VectorFormat vform, LogicVRegister dst, + int dst_index, const LogicVRegister& src, + int src_index); + LogicVRegister ins_immediate(VectorFormat vform, LogicVRegister dst, + int dst_index, uint64_t imm); + LogicVRegister dup_element(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src, int src_index); + LogicVRegister dup_immediate(VectorFormat vform, LogicVRegister dst, + uint64_t imm); + LogicVRegister movi(VectorFormat vform, LogicVRegister dst, uint64_t imm); + LogicVRegister mvni(VectorFormat vform, LogicVRegister dst, uint64_t imm); + LogicVRegister orr(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src, uint64_t imm); + LogicVRegister sshl(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, const LogicVRegister& src2); + LogicVRegister ushl(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, const LogicVRegister& src2); + LogicVRegister SMinMax(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, const LogicVRegister& src2, + bool max); + LogicVRegister smax(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, const LogicVRegister& src2); + LogicVRegister smin(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, const LogicVRegister& src2); + LogicVRegister SMinMaxP(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, bool max); + LogicVRegister smaxp(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, const LogicVRegister& src2); + LogicVRegister sminp(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, const LogicVRegister& src2); + LogicVRegister addp(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister addv(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister uaddlv(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister saddlv(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister SMinMaxV(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src, bool max); + LogicVRegister smaxv(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister sminv(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister uxtl(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister uxtl2(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister sxtl(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister sxtl2(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister Table(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& ind, bool zero_out_of_bounds, + const LogicVRegister* tab1, + const LogicVRegister* tab2 = NULL, + const LogicVRegister* tab3 = NULL, + const LogicVRegister* tab4 = NULL); + LogicVRegister tbl(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& tab, const LogicVRegister& ind); + LogicVRegister tbl(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& tab, const LogicVRegister& tab2, + const LogicVRegister& ind); + LogicVRegister tbl(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& tab, const LogicVRegister& tab2, + const LogicVRegister& tab3, const LogicVRegister& ind); + LogicVRegister tbl(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& tab, const LogicVRegister& tab2, + const LogicVRegister& tab3, const LogicVRegister& tab4, + const LogicVRegister& ind); + LogicVRegister tbx(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& tab, const LogicVRegister& ind); + LogicVRegister tbx(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& tab, const LogicVRegister& tab2, + const LogicVRegister& ind); + LogicVRegister tbx(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& tab, const LogicVRegister& tab2, + const LogicVRegister& tab3, const LogicVRegister& ind); + LogicVRegister tbx(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& tab, const LogicVRegister& tab2, + const LogicVRegister& tab3, const LogicVRegister& tab4, + const LogicVRegister& ind); + LogicVRegister uaddl(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, const LogicVRegister& src2); + LogicVRegister uaddl2(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, const LogicVRegister& src2); + LogicVRegister uaddw(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, const LogicVRegister& src2); + LogicVRegister uaddw2(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, const LogicVRegister& src2); + LogicVRegister saddl(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, const LogicVRegister& src2); + LogicVRegister saddl2(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, const LogicVRegister& src2); + LogicVRegister saddw(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, const LogicVRegister& src2); + LogicVRegister saddw2(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, const LogicVRegister& src2); + LogicVRegister usubl(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, const LogicVRegister& src2); + LogicVRegister usubl2(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, const LogicVRegister& src2); + LogicVRegister usubw(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, const LogicVRegister& src2); + LogicVRegister usubw2(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, const LogicVRegister& src2); + LogicVRegister ssubl(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, const LogicVRegister& src2); + LogicVRegister ssubl2(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, const LogicVRegister& src2); + LogicVRegister ssubw(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, const LogicVRegister& src2); + LogicVRegister ssubw2(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, const LogicVRegister& src2); + LogicVRegister UMinMax(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, const LogicVRegister& src2, + bool max); + LogicVRegister umax(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, const LogicVRegister& src2); + LogicVRegister umin(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, const LogicVRegister& src2); + LogicVRegister UMinMaxP(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, bool max); + LogicVRegister umaxp(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, const LogicVRegister& src2); + LogicVRegister uminp(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, const LogicVRegister& src2); + LogicVRegister UMinMaxV(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src, bool max); + LogicVRegister umaxv(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister uminv(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister trn1(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, const LogicVRegister& src2); + LogicVRegister trn2(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, const LogicVRegister& src2); + LogicVRegister zip1(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, const LogicVRegister& src2); + LogicVRegister zip2(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, const LogicVRegister& src2); + LogicVRegister uzp1(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, const LogicVRegister& src2); + LogicVRegister uzp2(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, const LogicVRegister& src2); + LogicVRegister shl(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src, int shift); + LogicVRegister scvtf(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src, int fbits, + FPRounding rounding_mode); + LogicVRegister ucvtf(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src, int fbits, + FPRounding rounding_mode); + LogicVRegister sshll(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src, int shift); + LogicVRegister sshll2(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src, int shift); + LogicVRegister shll(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister shll2(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister ushll(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src, int shift); + LogicVRegister ushll2(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src, int shift); + LogicVRegister sli(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src, int shift); + LogicVRegister sri(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src, int shift); + LogicVRegister sshr(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src, int shift); + LogicVRegister ushr(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src, int shift); + LogicVRegister ssra(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src, int shift); + LogicVRegister usra(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src, int shift); + LogicVRegister srsra(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src, int shift); + LogicVRegister ursra(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src, int shift); + LogicVRegister suqadd(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister usqadd(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister sqshl(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src, int shift); + LogicVRegister uqshl(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src, int shift); + LogicVRegister sqshlu(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src, int shift); + LogicVRegister abs(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister neg(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister ExtractNarrow(VectorFormat vform, LogicVRegister dst, + bool dstIsSigned, const LogicVRegister& src, + bool srcIsSigned); + LogicVRegister xtn(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister sqxtn(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister uqxtn(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister sqxtun(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister AbsDiff(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, const LogicVRegister& src2, + bool issigned); + LogicVRegister saba(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, const LogicVRegister& src2); + LogicVRegister uaba(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, const LogicVRegister& src2); + LogicVRegister shrn(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src, int shift); + LogicVRegister shrn2(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src, int shift); + LogicVRegister rshrn(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src, int shift); + LogicVRegister rshrn2(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src, int shift); + LogicVRegister uqshrn(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src, int shift); + LogicVRegister uqshrn2(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src, int shift); + LogicVRegister uqrshrn(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src, int shift); + LogicVRegister uqrshrn2(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src, int shift); + LogicVRegister sqshrn(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src, int shift); + LogicVRegister sqshrn2(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src, int shift); + LogicVRegister sqrshrn(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src, int shift); + LogicVRegister sqrshrn2(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src, int shift); + LogicVRegister sqshrun(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src, int shift); + LogicVRegister sqshrun2(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src, int shift); + LogicVRegister sqrshrun(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src, int shift); + LogicVRegister sqrshrun2(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src, int shift); + LogicVRegister sqrdmulh(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, bool round = true); + LogicVRegister sqdmulh(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); +#define NEON_3VREG_LOGIC_LIST(V) \ + V(addhn) \ + V(addhn2) \ + V(raddhn) \ + V(raddhn2) \ + V(subhn) \ + V(subhn2) \ + V(rsubhn) \ + V(rsubhn2) \ + V(pmull) \ + V(pmull2) \ + V(sabal) \ + V(sabal2) \ + V(uabal) \ + V(uabal2) \ + V(sabdl) \ + V(sabdl2) \ + V(uabdl) \ + V(uabdl2) \ + V(smull) \ + V(smull2) \ + V(umull) \ + V(umull2) \ + V(smlal) \ + V(smlal2) \ + V(umlal) \ + V(umlal2) \ + V(smlsl) \ + V(smlsl2) \ + V(umlsl) \ + V(umlsl2) \ + V(sqdmlal) \ + V(sqdmlal2) \ + V(sqdmlsl) \ + V(sqdmlsl2) \ + V(sqdmull) \ + V(sqdmull2) + +#define DEFINE_LOGIC_FUNC(FXN) \ + LogicVRegister FXN(VectorFormat vform, LogicVRegister dst, \ + const LogicVRegister& src1, const LogicVRegister& src2); + NEON_3VREG_LOGIC_LIST(DEFINE_LOGIC_FUNC) +#undef DEFINE_LOGIC_FUNC + +#define NEON_FP3SAME_LIST(V) \ + V(fadd, FPAdd, false) \ + V(fsub, FPSub, true) \ + V(fmul, FPMul, true) \ + V(fmulx, FPMulx, true) \ + V(fdiv, FPDiv, true) \ + V(fmax, FPMax, false) \ + V(fmin, FPMin, false) \ + V(fmaxnm, FPMaxNM, false) \ + V(fminnm, FPMinNM, false) + +#define DECLARE_NEON_FP_VECTOR_OP(FN, OP, PROCNAN) \ + template \ + LogicVRegister FN(VectorFormat vform, LogicVRegister dst, \ + const LogicVRegister& src1, const LogicVRegister& src2); \ + LogicVRegister FN(VectorFormat vform, LogicVRegister dst, \ + const LogicVRegister& src1, const LogicVRegister& src2); + NEON_FP3SAME_LIST(DECLARE_NEON_FP_VECTOR_OP) +#undef DECLARE_NEON_FP_VECTOR_OP + +#define NEON_FPPAIRWISE_LIST(V) \ + V(faddp, fadd, FPAdd) \ + V(fmaxp, fmax, FPMax) \ + V(fmaxnmp, fmaxnm, FPMaxNM) \ + V(fminp, fmin, FPMin) \ + V(fminnmp, fminnm, FPMinNM) + +#define DECLARE_NEON_FP_PAIR_OP(FNP, FN, OP) \ + LogicVRegister FNP(VectorFormat vform, LogicVRegister dst, \ + const LogicVRegister& src1, const LogicVRegister& src2); \ + LogicVRegister FNP(VectorFormat vform, LogicVRegister dst, \ + const LogicVRegister& src); + NEON_FPPAIRWISE_LIST(DECLARE_NEON_FP_PAIR_OP) +#undef DECLARE_NEON_FP_PAIR_OP + + template + LogicVRegister frecps(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, const LogicVRegister& src2); + LogicVRegister frecps(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, const LogicVRegister& src2); + template + LogicVRegister frsqrts(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister frsqrts(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + template + LogicVRegister fmla(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, const LogicVRegister& src2); + LogicVRegister fmla(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, const LogicVRegister& src2); + template + LogicVRegister fmls(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, const LogicVRegister& src2); + LogicVRegister fmls(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, const LogicVRegister& src2); + LogicVRegister fnmul(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, const LogicVRegister& src2); template - T FPDefaultNaN() const; + LogicVRegister fcmp(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, const LogicVRegister& src2, + Condition cond); + LogicVRegister fcmp(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, const LogicVRegister& src2, + Condition cond); + LogicVRegister fabscmp(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, const LogicVRegister& src2, + Condition cond); + LogicVRegister fcmp_zero(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src, Condition cond); + + template + LogicVRegister fneg(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister fneg(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src); + template + LogicVRegister frecpx(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister frecpx(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src); + template + LogicVRegister fabs_(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister fabs_(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister fabd(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, const LogicVRegister& src2); + LogicVRegister frint(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src, FPRounding rounding_mode, + bool inexact_exception = false); + LogicVRegister fcvts(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src, FPRounding rounding_mode, + int fbits = 0); + LogicVRegister fcvtu(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src, FPRounding rounding_mode, + int fbits = 0); + LogicVRegister fcvtl(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister fcvtl2(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister fcvtn(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister fcvtn2(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister fcvtxn(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister fcvtxn2(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister fsqrt(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister frsqrte(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister frecpe(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src, FPRounding rounding); + LogicVRegister ursqrte(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister urecpe(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src); + + typedef float (Simulator::*FPMinMaxOp)(float a, float b); + + LogicVRegister FMinMaxV(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src, FPMinMaxOp Op); + + LogicVRegister fminv(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister fmaxv(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister fminnmv(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister fmaxnmv(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src); + + template + T FPRecipSqrtEstimate(T op); + template + T FPRecipEstimate(T op, FPRounding rounding); + template + R FPToFixed(T op, int fbits, bool is_signed, FPRounding rounding); void FPCompare(double val0, double val1); double FPRoundInt(double value, FPRounding round_mode); double FPToDouble(float value); float FPToFloat(double value, FPRounding round_mode); + float FPToFloat(float16 value); + float16 FPToFloat16(float value, FPRounding round_mode); + float16 FPToFloat16(double value, FPRounding round_mode); + double recip_sqrt_estimate(double a); + double recip_estimate(double a); + double FPRecipSqrtEstimate(double a); + double FPRecipEstimate(double a); double FixedToDouble(int64_t src, int fbits, FPRounding round_mode); double UFixedToDouble(uint64_t src, int fbits, FPRounding round_mode); float FixedToFloat(int64_t src, int fbits, FPRounding round_mode); @@ -738,6 +2125,9 @@ class Simulator : public DecoderVisitor { template T FPMul(T op1, T op2); + template + T FPMulx(T op1, T op2); + template T FPMulAdd(T a, T op1, T op2); @@ -747,17 +2137,18 @@ class Simulator : public DecoderVisitor { template T FPSub(T op1, T op2); - // Standard NaN processing. template - T FPProcessNaN(T op); - - bool FPProcessNaNs(Instruction* instr); + T FPRecipStepFused(T op1, T op2); template - T FPProcessNaNs(T op1, T op2); + T FPRSqrtStepFused(T op1, T op2); - template - T FPProcessNaNs3(T op1, T op2, T op3); + // This doesn't do anything at the moment. We'll need it if we want support + // for cumulative exception bits or floating-point exceptions. + void FPProcessException() {} + + // Standard NaN processing. + bool FPProcessNaNs(Instruction* instr); void CheckStackAlignment(); @@ -769,7 +2160,7 @@ class Simulator : public DecoderVisitor { static const uint64_t kCallerSavedRegisterCorruptionValue = 0xca11edc0de000000UL; // This value is a NaN in both 32-bit and 64-bit FP. - static const uint64_t kCallerSavedFPRegisterCorruptionValue = + static const uint64_t kCallerSavedVRegisterCorruptionValue = 0x7ff000007f801000UL; // This value is a mix of 32/64-bits NaN and "verbose" immediate. static const uint64_t kDefaultCPURegisterCorruptionValue = @@ -797,7 +2188,7 @@ class Simulator : public DecoderVisitor { SimRegister registers_[kNumberOfRegisters]; // Floating point registers - SimFPRegister fpregisters_[kNumberOfFPRegisters]; + SimVRegister vregisters_[kNumberOfVRegisters]; // Processor state // bits[31, 27]: Condition flags N, Z, C, and V. @@ -889,9 +2280,9 @@ class Simulator : public DecoderVisitor { // not actually perform loads and stores. NotifyStoreExcl only returns // true if the exclusive store is allowed; the global monitor will still // have to be checked to see whether the memory should be updated. - void NotifyLoad(uintptr_t addr); + void NotifyLoad(); void NotifyLoadExcl(uintptr_t addr, TransactionSize size); - void NotifyStore(uintptr_t addr); + void NotifyStore(); bool NotifyStoreExcl(uintptr_t addr, TransactionSize size); private: @@ -916,7 +2307,7 @@ class Simulator : public DecoderVisitor { // not actually perform loads and stores. void Clear_Locked(); void NotifyLoadExcl_Locked(uintptr_t addr); - void NotifyStore_Locked(uintptr_t addr, bool is_requesting_processor); + void NotifyStore_Locked(bool is_requesting_processor); bool NotifyStoreExcl_Locked(uintptr_t addr, bool is_requesting_processor); MonitorAccess access_state_; @@ -935,7 +2326,7 @@ class Simulator : public DecoderVisitor { base::Mutex mutex; void NotifyLoadExcl_Locked(uintptr_t addr, Processor* processor); - void NotifyStore_Locked(uintptr_t addr, Processor* processor); + void NotifyStore_Locked(Processor* processor); bool NotifyStoreExcl_Locked(uintptr_t addr, Processor* processor); // Called when the simulator is destroyed. @@ -955,10 +2346,67 @@ class Simulator : public DecoderVisitor { private: void Init(FILE* stream); + template + static T FPDefaultNaN(); + + template + T FPProcessNaN(T op) { + DCHECK(std::isnan(op)); + return fpcr().DN() ? FPDefaultNaN() : ToQuietNaN(op); + } + + template + T FPProcessNaNs(T op1, T op2) { + if (IsSignallingNaN(op1)) { + return FPProcessNaN(op1); + } else if (IsSignallingNaN(op2)) { + return FPProcessNaN(op2); + } else if (std::isnan(op1)) { + DCHECK(IsQuietNaN(op1)); + return FPProcessNaN(op1); + } else if (std::isnan(op2)) { + DCHECK(IsQuietNaN(op2)); + return FPProcessNaN(op2); + } else { + return 0.0; + } + } + + template + T FPProcessNaNs3(T op1, T op2, T op3) { + if (IsSignallingNaN(op1)) { + return FPProcessNaN(op1); + } else if (IsSignallingNaN(op2)) { + return FPProcessNaN(op2); + } else if (IsSignallingNaN(op3)) { + return FPProcessNaN(op3); + } else if (std::isnan(op1)) { + DCHECK(IsQuietNaN(op1)); + return FPProcessNaN(op1); + } else if (std::isnan(op2)) { + DCHECK(IsQuietNaN(op2)); + return FPProcessNaN(op2); + } else if (std::isnan(op3)) { + DCHECK(IsQuietNaN(op3)); + return FPProcessNaN(op3); + } else { + return 0.0; + } + } + int log_parameters_; Isolate* isolate_; }; +template <> +inline double Simulator::FPDefaultNaN() { + return kFP64DefaultNaN; +} + +template <> +inline float Simulator::FPDefaultNaN() { + return kFP32DefaultNaN; +} // When running with the simulator transition into simulated execution at this // point. diff --git a/deps/v8/src/arm64/simulator-logic-arm64.cc b/deps/v8/src/arm64/simulator-logic-arm64.cc new file mode 100644 index 00000000000000..44a31c4097631b --- /dev/null +++ b/deps/v8/src/arm64/simulator-logic-arm64.cc @@ -0,0 +1,4191 @@ +// Copyright 2016 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#if V8_TARGET_ARCH_ARM64 + +#include +#include "src/arm64/simulator-arm64.h" + +namespace v8 { +namespace internal { + +#if defined(USE_SIMULATOR) + +namespace { + +// See FPRound for a description of this function. +inline double FPRoundToDouble(int64_t sign, int64_t exponent, uint64_t mantissa, + FPRounding round_mode) { + uint64_t bits = FPRound( + sign, exponent, mantissa, round_mode); + return bit_cast(bits); +} + +// See FPRound for a description of this function. +inline float FPRoundToFloat(int64_t sign, int64_t exponent, uint64_t mantissa, + FPRounding round_mode) { + uint32_t bits = FPRound( + sign, exponent, mantissa, round_mode); + return bit_cast(bits); +} + +// See FPRound for a description of this function. +inline float16 FPRoundToFloat16(int64_t sign, int64_t exponent, + uint64_t mantissa, FPRounding round_mode) { + return FPRound( + sign, exponent, mantissa, round_mode); +} + +} // namespace + +double Simulator::FixedToDouble(int64_t src, int fbits, FPRounding round) { + if (src >= 0) { + return UFixedToDouble(src, fbits, round); + } else if (src == INT64_MIN) { + return -UFixedToDouble(src, fbits, round); + } else { + return -UFixedToDouble(-src, fbits, round); + } +} + +double Simulator::UFixedToDouble(uint64_t src, int fbits, FPRounding round) { + // An input of 0 is a special case because the result is effectively + // subnormal: The exponent is encoded as 0 and there is no implicit 1 bit. + if (src == 0) { + return 0.0; + } + + // Calculate the exponent. The highest significant bit will have the value + // 2^exponent. + const int highest_significant_bit = 63 - CountLeadingZeros(src, 64); + const int64_t exponent = highest_significant_bit - fbits; + + return FPRoundToDouble(0, exponent, src, round); +} + +float Simulator::FixedToFloat(int64_t src, int fbits, FPRounding round) { + if (src >= 0) { + return UFixedToFloat(src, fbits, round); + } else if (src == INT64_MIN) { + return -UFixedToFloat(src, fbits, round); + } else { + return -UFixedToFloat(-src, fbits, round); + } +} + +float Simulator::UFixedToFloat(uint64_t src, int fbits, FPRounding round) { + // An input of 0 is a special case because the result is effectively + // subnormal: The exponent is encoded as 0 and there is no implicit 1 bit. + if (src == 0) { + return 0.0f; + } + + // Calculate the exponent. The highest significant bit will have the value + // 2^exponent. + const int highest_significant_bit = 63 - CountLeadingZeros(src, 64); + const int32_t exponent = highest_significant_bit - fbits; + + return FPRoundToFloat(0, exponent, src, round); +} + +double Simulator::FPToDouble(float value) { + switch (std::fpclassify(value)) { + case FP_NAN: { + if (IsSignallingNaN(value)) { + FPProcessException(); + } + if (DN()) return kFP64DefaultNaN; + + // Convert NaNs as the processor would: + // - The sign is propagated. + // - The mantissa is transferred entirely, except that the top bit is + // forced to '1', making the result a quiet NaN. The unused (low-order) + // mantissa bits are set to 0. + uint32_t raw = bit_cast(value); + + uint64_t sign = raw >> 31; + uint64_t exponent = (1 << kDoubleExponentBits) - 1; + uint64_t mantissa = unsigned_bitextract_64(21, 0, raw); + + // Unused low-order bits remain zero. + mantissa <<= (kDoubleMantissaBits - kFloatMantissaBits); + + // Force a quiet NaN. + mantissa |= (UINT64_C(1) << (kDoubleMantissaBits - 1)); + + return double_pack(sign, exponent, mantissa); + } + + case FP_ZERO: + case FP_NORMAL: + case FP_SUBNORMAL: + case FP_INFINITE: { + // All other inputs are preserved in a standard cast, because every value + // representable using an IEEE-754 float is also representable using an + // IEEE-754 double. + return static_cast(value); + } + } + + UNREACHABLE(); +} + +float Simulator::FPToFloat(float16 value) { + uint32_t sign = value >> 15; + uint32_t exponent = + unsigned_bitextract_32(kFloat16MantissaBits + kFloat16ExponentBits - 1, + kFloat16MantissaBits, value); + uint32_t mantissa = + unsigned_bitextract_32(kFloat16MantissaBits - 1, 0, value); + + switch (float16classify(value)) { + case FP_ZERO: + return (sign == 0) ? 0.0f : -0.0f; + + case FP_INFINITE: + return (sign == 0) ? kFP32PositiveInfinity : kFP32NegativeInfinity; + + case FP_SUBNORMAL: { + // Calculate shift required to put mantissa into the most-significant bits + // of the destination mantissa. + int shift = CountLeadingZeros(mantissa << (32 - 10), 32); + + // Shift mantissa and discard implicit '1'. + mantissa <<= (kFloatMantissaBits - kFloat16MantissaBits) + shift + 1; + mantissa &= (1 << kFloatMantissaBits) - 1; + + // Adjust the exponent for the shift applied, and rebias. + exponent = exponent - shift + (kFloatExponentBias - kFloat16ExponentBias); + break; + } + + case FP_NAN: { + if (IsSignallingNaN(value)) { + FPProcessException(); + } + if (DN()) return kFP32DefaultNaN; + + // Convert NaNs as the processor would: + // - The sign is propagated. + // - The mantissa is transferred entirely, except that the top bit is + // forced to '1', making the result a quiet NaN. The unused (low-order) + // mantissa bits are set to 0. + exponent = (1 << kFloatExponentBits) - 1; + + // Increase bits in mantissa, making low-order bits 0. + mantissa <<= (kFloatMantissaBits - kFloat16MantissaBits); + mantissa |= 1 << (kFloatMantissaBits - 1); // Force a quiet NaN. + break; + } + + case FP_NORMAL: { + // Increase bits in mantissa, making low-order bits 0. + mantissa <<= (kFloatMantissaBits - kFloat16MantissaBits); + + // Change exponent bias. + exponent += (kFloatExponentBias - kFloat16ExponentBias); + break; + } + + default: + UNREACHABLE(); + } + return float_pack(sign, exponent, mantissa); +} + +float16 Simulator::FPToFloat16(float value, FPRounding round_mode) { + // Only the FPTieEven rounding mode is implemented. + DCHECK_EQ(round_mode, FPTieEven); + USE(round_mode); + + int64_t sign = float_sign(value); + int64_t exponent = + static_cast(float_exp(value)) - kFloatExponentBias; + uint32_t mantissa = float_mantissa(value); + + switch (std::fpclassify(value)) { + case FP_NAN: { + if (IsSignallingNaN(value)) { + FPProcessException(); + } + if (DN()) return kFP16DefaultNaN; + + // Convert NaNs as the processor would: + // - The sign is propagated. + // - The mantissa is transferred as much as possible, except that the top + // bit is forced to '1', making the result a quiet NaN. + float16 result = + (sign == 0) ? kFP16PositiveInfinity : kFP16NegativeInfinity; + result |= mantissa >> (kFloatMantissaBits - kFloat16MantissaBits); + result |= (1 << (kFloat16MantissaBits - 1)); // Force a quiet NaN; + return result; + } + + case FP_ZERO: + return (sign == 0) ? 0 : 0x8000; + + case FP_INFINITE: + return (sign == 0) ? kFP16PositiveInfinity : kFP16NegativeInfinity; + + case FP_NORMAL: + case FP_SUBNORMAL: { + // Convert float-to-half as the processor would, assuming that FPCR.FZ + // (flush-to-zero) is not set. + + // Add the implicit '1' bit to the mantissa. + mantissa += (1 << kFloatMantissaBits); + return FPRoundToFloat16(sign, exponent, mantissa, round_mode); + } + } + + UNREACHABLE(); +} + +float16 Simulator::FPToFloat16(double value, FPRounding round_mode) { + // Only the FPTieEven rounding mode is implemented. + DCHECK_EQ(round_mode, FPTieEven); + USE(round_mode); + + int64_t sign = double_sign(value); + int64_t exponent = + static_cast(double_exp(value)) - kDoubleExponentBias; + uint64_t mantissa = double_mantissa(value); + + switch (std::fpclassify(value)) { + case FP_NAN: { + if (IsSignallingNaN(value)) { + FPProcessException(); + } + if (DN()) return kFP16DefaultNaN; + + // Convert NaNs as the processor would: + // - The sign is propagated. + // - The mantissa is transferred as much as possible, except that the top + // bit is forced to '1', making the result a quiet NaN. + float16 result = + (sign == 0) ? kFP16PositiveInfinity : kFP16NegativeInfinity; + result |= mantissa >> (kDoubleMantissaBits - kFloat16MantissaBits); + result |= (1 << (kFloat16MantissaBits - 1)); // Force a quiet NaN; + return result; + } + + case FP_ZERO: + return (sign == 0) ? 0 : 0x8000; + + case FP_INFINITE: + return (sign == 0) ? kFP16PositiveInfinity : kFP16NegativeInfinity; + + case FP_NORMAL: + case FP_SUBNORMAL: { + // Convert double-to-half as the processor would, assuming that FPCR.FZ + // (flush-to-zero) is not set. + + // Add the implicit '1' bit to the mantissa. + mantissa += (UINT64_C(1) << kDoubleMantissaBits); + return FPRoundToFloat16(sign, exponent, mantissa, round_mode); + } + } + + UNREACHABLE(); +} + +float Simulator::FPToFloat(double value, FPRounding round_mode) { + // Only the FPTieEven rounding mode is implemented. + DCHECK((round_mode == FPTieEven) || (round_mode == FPRoundOdd)); + USE(round_mode); + + switch (std::fpclassify(value)) { + case FP_NAN: { + if (IsSignallingNaN(value)) { + FPProcessException(); + } + if (DN()) return kFP32DefaultNaN; + + // Convert NaNs as the processor would: + // - The sign is propagated. + // - The mantissa is transferred as much as possible, except that the + // top bit is forced to '1', making the result a quiet NaN. + + uint64_t raw = bit_cast(value); + + uint32_t sign = raw >> 63; + uint32_t exponent = (1 << 8) - 1; + uint32_t mantissa = static_cast(unsigned_bitextract_64( + 50, kDoubleMantissaBits - kFloatMantissaBits, raw)); + mantissa |= (1 << (kFloatMantissaBits - 1)); // Force a quiet NaN. + + return float_pack(sign, exponent, mantissa); + } + + case FP_ZERO: + case FP_INFINITE: { + // In a C++ cast, any value representable in the target type will be + // unchanged. This is always the case for +/-0.0 and infinities. + return static_cast(value); + } + + case FP_NORMAL: + case FP_SUBNORMAL: { + // Convert double-to-float as the processor would, assuming that FPCR.FZ + // (flush-to-zero) is not set. + uint32_t sign = double_sign(value); + int64_t exponent = + static_cast(double_exp(value)) - kDoubleExponentBias; + uint64_t mantissa = double_mantissa(value); + if (std::fpclassify(value) == FP_NORMAL) { + // For normal FP values, add the hidden bit. + mantissa |= (UINT64_C(1) << kDoubleMantissaBits); + } + return FPRoundToFloat(sign, exponent, mantissa, round_mode); + } + } + + UNREACHABLE(); +} + +void Simulator::ld1(VectorFormat vform, LogicVRegister dst, uint64_t addr) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst.ReadUintFromMem(vform, i, addr); + addr += LaneSizeInBytesFromFormat(vform); + } +} + +void Simulator::ld1(VectorFormat vform, LogicVRegister dst, int index, + uint64_t addr) { + dst.ReadUintFromMem(vform, index, addr); +} + +void Simulator::ld1r(VectorFormat vform, LogicVRegister dst, uint64_t addr) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst.ReadUintFromMem(vform, i, addr); + } +} + +void Simulator::ld2(VectorFormat vform, LogicVRegister dst1, + LogicVRegister dst2, uint64_t addr1) { + dst1.ClearForWrite(vform); + dst2.ClearForWrite(vform); + int esize = LaneSizeInBytesFromFormat(vform); + uint64_t addr2 = addr1 + esize; + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst1.ReadUintFromMem(vform, i, addr1); + dst2.ReadUintFromMem(vform, i, addr2); + addr1 += 2 * esize; + addr2 += 2 * esize; + } +} + +void Simulator::ld2(VectorFormat vform, LogicVRegister dst1, + LogicVRegister dst2, int index, uint64_t addr1) { + dst1.ClearForWrite(vform); + dst2.ClearForWrite(vform); + uint64_t addr2 = addr1 + LaneSizeInBytesFromFormat(vform); + dst1.ReadUintFromMem(vform, index, addr1); + dst2.ReadUintFromMem(vform, index, addr2); +} + +void Simulator::ld2r(VectorFormat vform, LogicVRegister dst1, + LogicVRegister dst2, uint64_t addr) { + dst1.ClearForWrite(vform); + dst2.ClearForWrite(vform); + uint64_t addr2 = addr + LaneSizeInBytesFromFormat(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst1.ReadUintFromMem(vform, i, addr); + dst2.ReadUintFromMem(vform, i, addr2); + } +} + +void Simulator::ld3(VectorFormat vform, LogicVRegister dst1, + LogicVRegister dst2, LogicVRegister dst3, uint64_t addr1) { + dst1.ClearForWrite(vform); + dst2.ClearForWrite(vform); + dst3.ClearForWrite(vform); + int esize = LaneSizeInBytesFromFormat(vform); + uint64_t addr2 = addr1 + esize; + uint64_t addr3 = addr2 + esize; + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst1.ReadUintFromMem(vform, i, addr1); + dst2.ReadUintFromMem(vform, i, addr2); + dst3.ReadUintFromMem(vform, i, addr3); + addr1 += 3 * esize; + addr2 += 3 * esize; + addr3 += 3 * esize; + } +} + +void Simulator::ld3(VectorFormat vform, LogicVRegister dst1, + LogicVRegister dst2, LogicVRegister dst3, int index, + uint64_t addr1) { + dst1.ClearForWrite(vform); + dst2.ClearForWrite(vform); + dst3.ClearForWrite(vform); + uint64_t addr2 = addr1 + LaneSizeInBytesFromFormat(vform); + uint64_t addr3 = addr2 + LaneSizeInBytesFromFormat(vform); + dst1.ReadUintFromMem(vform, index, addr1); + dst2.ReadUintFromMem(vform, index, addr2); + dst3.ReadUintFromMem(vform, index, addr3); +} + +void Simulator::ld3r(VectorFormat vform, LogicVRegister dst1, + LogicVRegister dst2, LogicVRegister dst3, uint64_t addr) { + dst1.ClearForWrite(vform); + dst2.ClearForWrite(vform); + dst3.ClearForWrite(vform); + uint64_t addr2 = addr + LaneSizeInBytesFromFormat(vform); + uint64_t addr3 = addr2 + LaneSizeInBytesFromFormat(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst1.ReadUintFromMem(vform, i, addr); + dst2.ReadUintFromMem(vform, i, addr2); + dst3.ReadUintFromMem(vform, i, addr3); + } +} + +void Simulator::ld4(VectorFormat vform, LogicVRegister dst1, + LogicVRegister dst2, LogicVRegister dst3, + LogicVRegister dst4, uint64_t addr1) { + dst1.ClearForWrite(vform); + dst2.ClearForWrite(vform); + dst3.ClearForWrite(vform); + dst4.ClearForWrite(vform); + int esize = LaneSizeInBytesFromFormat(vform); + uint64_t addr2 = addr1 + esize; + uint64_t addr3 = addr2 + esize; + uint64_t addr4 = addr3 + esize; + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst1.ReadUintFromMem(vform, i, addr1); + dst2.ReadUintFromMem(vform, i, addr2); + dst3.ReadUintFromMem(vform, i, addr3); + dst4.ReadUintFromMem(vform, i, addr4); + addr1 += 4 * esize; + addr2 += 4 * esize; + addr3 += 4 * esize; + addr4 += 4 * esize; + } +} + +void Simulator::ld4(VectorFormat vform, LogicVRegister dst1, + LogicVRegister dst2, LogicVRegister dst3, + LogicVRegister dst4, int index, uint64_t addr1) { + dst1.ClearForWrite(vform); + dst2.ClearForWrite(vform); + dst3.ClearForWrite(vform); + dst4.ClearForWrite(vform); + uint64_t addr2 = addr1 + LaneSizeInBytesFromFormat(vform); + uint64_t addr3 = addr2 + LaneSizeInBytesFromFormat(vform); + uint64_t addr4 = addr3 + LaneSizeInBytesFromFormat(vform); + dst1.ReadUintFromMem(vform, index, addr1); + dst2.ReadUintFromMem(vform, index, addr2); + dst3.ReadUintFromMem(vform, index, addr3); + dst4.ReadUintFromMem(vform, index, addr4); +} + +void Simulator::ld4r(VectorFormat vform, LogicVRegister dst1, + LogicVRegister dst2, LogicVRegister dst3, + LogicVRegister dst4, uint64_t addr) { + dst1.ClearForWrite(vform); + dst2.ClearForWrite(vform); + dst3.ClearForWrite(vform); + dst4.ClearForWrite(vform); + uint64_t addr2 = addr + LaneSizeInBytesFromFormat(vform); + uint64_t addr3 = addr2 + LaneSizeInBytesFromFormat(vform); + uint64_t addr4 = addr3 + LaneSizeInBytesFromFormat(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst1.ReadUintFromMem(vform, i, addr); + dst2.ReadUintFromMem(vform, i, addr2); + dst3.ReadUintFromMem(vform, i, addr3); + dst4.ReadUintFromMem(vform, i, addr4); + } +} + +void Simulator::st1(VectorFormat vform, LogicVRegister src, uint64_t addr) { + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + src.WriteUintToMem(vform, i, addr); + addr += LaneSizeInBytesFromFormat(vform); + } +} + +void Simulator::st1(VectorFormat vform, LogicVRegister src, int index, + uint64_t addr) { + src.WriteUintToMem(vform, index, addr); +} + +void Simulator::st2(VectorFormat vform, LogicVRegister dst, LogicVRegister dst2, + uint64_t addr) { + int esize = LaneSizeInBytesFromFormat(vform); + uint64_t addr2 = addr + esize; + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst.WriteUintToMem(vform, i, addr); + dst2.WriteUintToMem(vform, i, addr2); + addr += 2 * esize; + addr2 += 2 * esize; + } +} + +void Simulator::st2(VectorFormat vform, LogicVRegister dst, LogicVRegister dst2, + int index, uint64_t addr) { + int esize = LaneSizeInBytesFromFormat(vform); + dst.WriteUintToMem(vform, index, addr); + dst2.WriteUintToMem(vform, index, addr + 1 * esize); +} + +void Simulator::st3(VectorFormat vform, LogicVRegister dst, LogicVRegister dst2, + LogicVRegister dst3, uint64_t addr) { + int esize = LaneSizeInBytesFromFormat(vform); + uint64_t addr2 = addr + esize; + uint64_t addr3 = addr2 + esize; + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst.WriteUintToMem(vform, i, addr); + dst2.WriteUintToMem(vform, i, addr2); + dst3.WriteUintToMem(vform, i, addr3); + addr += 3 * esize; + addr2 += 3 * esize; + addr3 += 3 * esize; + } +} + +void Simulator::st3(VectorFormat vform, LogicVRegister dst, LogicVRegister dst2, + LogicVRegister dst3, int index, uint64_t addr) { + int esize = LaneSizeInBytesFromFormat(vform); + dst.WriteUintToMem(vform, index, addr); + dst2.WriteUintToMem(vform, index, addr + 1 * esize); + dst3.WriteUintToMem(vform, index, addr + 2 * esize); +} + +void Simulator::st4(VectorFormat vform, LogicVRegister dst, LogicVRegister dst2, + LogicVRegister dst3, LogicVRegister dst4, uint64_t addr) { + int esize = LaneSizeInBytesFromFormat(vform); + uint64_t addr2 = addr + esize; + uint64_t addr3 = addr2 + esize; + uint64_t addr4 = addr3 + esize; + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst.WriteUintToMem(vform, i, addr); + dst2.WriteUintToMem(vform, i, addr2); + dst3.WriteUintToMem(vform, i, addr3); + dst4.WriteUintToMem(vform, i, addr4); + addr += 4 * esize; + addr2 += 4 * esize; + addr3 += 4 * esize; + addr4 += 4 * esize; + } +} + +void Simulator::st4(VectorFormat vform, LogicVRegister dst, LogicVRegister dst2, + LogicVRegister dst3, LogicVRegister dst4, int index, + uint64_t addr) { + int esize = LaneSizeInBytesFromFormat(vform); + dst.WriteUintToMem(vform, index, addr); + dst2.WriteUintToMem(vform, index, addr + 1 * esize); + dst3.WriteUintToMem(vform, index, addr + 2 * esize); + dst4.WriteUintToMem(vform, index, addr + 3 * esize); +} + +LogicVRegister Simulator::cmp(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, Condition cond) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + int64_t sa = src1.Int(vform, i); + int64_t sb = src2.Int(vform, i); + uint64_t ua = src1.Uint(vform, i); + uint64_t ub = src2.Uint(vform, i); + bool result = false; + switch (cond) { + case eq: + result = (ua == ub); + break; + case ge: + result = (sa >= sb); + break; + case gt: + result = (sa > sb); + break; + case hi: + result = (ua > ub); + break; + case hs: + result = (ua >= ub); + break; + case lt: + result = (sa < sb); + break; + case le: + result = (sa <= sb); + break; + default: + UNREACHABLE(); + } + dst.SetUint(vform, i, result ? MaxUintFromFormat(vform) : 0); + } + return dst; +} + +LogicVRegister Simulator::cmp(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, int imm, + Condition cond) { + SimVRegister temp; + LogicVRegister imm_reg = dup_immediate(vform, temp, imm); + return cmp(vform, dst, src1, imm_reg, cond); +} + +LogicVRegister Simulator::cmptst(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + uint64_t ua = src1.Uint(vform, i); + uint64_t ub = src2.Uint(vform, i); + dst.SetUint(vform, i, ((ua & ub) != 0) ? MaxUintFromFormat(vform) : 0); + } + return dst; +} + +LogicVRegister Simulator::add(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + int lane_size = LaneSizeInBitsFromFormat(vform); + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + // Test for unsigned saturation. + uint64_t ua = src1.UintLeftJustified(vform, i); + uint64_t ub = src2.UintLeftJustified(vform, i); + uint64_t ur = ua + ub; + if (ur < ua) { + dst.SetUnsignedSat(i, true); + } + + // Test for signed saturation. + bool pos_a = (ua >> 63) == 0; + bool pos_b = (ub >> 63) == 0; + bool pos_r = (ur >> 63) == 0; + // If the signs of the operands are the same, but different from the result, + // there was an overflow. + if ((pos_a == pos_b) && (pos_a != pos_r)) { + dst.SetSignedSat(i, pos_a); + } + + dst.SetInt(vform, i, ur >> (64 - lane_size)); + } + return dst; +} + +LogicVRegister Simulator::addp(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + uzp1(vform, temp1, src1, src2); + uzp2(vform, temp2, src1, src2); + add(vform, dst, temp1, temp2); + return dst; +} + +LogicVRegister Simulator::mla(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + mul(vform, temp, src1, src2); + add(vform, dst, dst, temp); + return dst; +} + +LogicVRegister Simulator::mls(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + mul(vform, temp, src1, src2); + sub(vform, dst, dst, temp); + return dst; +} + +LogicVRegister Simulator::mul(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst.SetUint(vform, i, src1.Uint(vform, i) * src2.Uint(vform, i)); + } + return dst; +} + +LogicVRegister Simulator::mul(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, int index) { + SimVRegister temp; + VectorFormat indexform = VectorFormatFillQ(vform); + return mul(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + +LogicVRegister Simulator::mla(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, int index) { + SimVRegister temp; + VectorFormat indexform = VectorFormatFillQ(vform); + return mla(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + +LogicVRegister Simulator::mls(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, int index) { + SimVRegister temp; + VectorFormat indexform = VectorFormatFillQ(vform); + return mls(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + +LogicVRegister Simulator::smull(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, int index) { + SimVRegister temp; + VectorFormat indexform = + VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform)); + return smull(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + +LogicVRegister Simulator::smull2(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, int index) { + SimVRegister temp; + VectorFormat indexform = + VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform)); + return smull2(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + +LogicVRegister Simulator::umull(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, int index) { + SimVRegister temp; + VectorFormat indexform = + VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform)); + return umull(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + +LogicVRegister Simulator::umull2(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, int index) { + SimVRegister temp; + VectorFormat indexform = + VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform)); + return umull2(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + +LogicVRegister Simulator::smlal(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, int index) { + SimVRegister temp; + VectorFormat indexform = + VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform)); + return smlal(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + +LogicVRegister Simulator::smlal2(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, int index) { + SimVRegister temp; + VectorFormat indexform = + VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform)); + return smlal2(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + +LogicVRegister Simulator::umlal(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, int index) { + SimVRegister temp; + VectorFormat indexform = + VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform)); + return umlal(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + +LogicVRegister Simulator::umlal2(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, int index) { + SimVRegister temp; + VectorFormat indexform = + VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform)); + return umlal2(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + +LogicVRegister Simulator::smlsl(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, int index) { + SimVRegister temp; + VectorFormat indexform = + VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform)); + return smlsl(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + +LogicVRegister Simulator::smlsl2(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, int index) { + SimVRegister temp; + VectorFormat indexform = + VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform)); + return smlsl2(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + +LogicVRegister Simulator::umlsl(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, int index) { + SimVRegister temp; + VectorFormat indexform = + VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform)); + return umlsl(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + +LogicVRegister Simulator::umlsl2(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, int index) { + SimVRegister temp; + VectorFormat indexform = + VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform)); + return umlsl2(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + +LogicVRegister Simulator::sqdmull(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, int index) { + SimVRegister temp; + VectorFormat indexform = + VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform)); + return sqdmull(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + +LogicVRegister Simulator::sqdmull2(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, int index) { + SimVRegister temp; + VectorFormat indexform = + VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform)); + return sqdmull2(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + +LogicVRegister Simulator::sqdmlal(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, int index) { + SimVRegister temp; + VectorFormat indexform = + VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform)); + return sqdmlal(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + +LogicVRegister Simulator::sqdmlal2(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, int index) { + SimVRegister temp; + VectorFormat indexform = + VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform)); + return sqdmlal2(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + +LogicVRegister Simulator::sqdmlsl(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, int index) { + SimVRegister temp; + VectorFormat indexform = + VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform)); + return sqdmlsl(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + +LogicVRegister Simulator::sqdmlsl2(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, int index) { + SimVRegister temp; + VectorFormat indexform = + VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform)); + return sqdmlsl2(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + +LogicVRegister Simulator::sqdmulh(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, int index) { + SimVRegister temp; + VectorFormat indexform = VectorFormatFillQ(vform); + return sqdmulh(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + +LogicVRegister Simulator::sqrdmulh(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, int index) { + SimVRegister temp; + VectorFormat indexform = VectorFormatFillQ(vform); + return sqrdmulh(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + +uint16_t Simulator::PolynomialMult(uint8_t op1, uint8_t op2) { + uint16_t result = 0; + uint16_t extended_op2 = op2; + for (int i = 0; i < 8; ++i) { + if ((op1 >> i) & 1) { + result = result ^ (extended_op2 << i); + } + } + return result; +} + +LogicVRegister Simulator::pmul(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst.SetUint(vform, i, + PolynomialMult(src1.Uint(vform, i), src2.Uint(vform, i))); + } + return dst; +} + +LogicVRegister Simulator::pmull(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + VectorFormat vform_src = VectorFormatHalfWidth(vform); + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst.SetUint( + vform, i, + PolynomialMult(src1.Uint(vform_src, i), src2.Uint(vform_src, i))); + } + return dst; +} + +LogicVRegister Simulator::pmull2(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + VectorFormat vform_src = VectorFormatHalfWidthDoubleLanes(vform); + dst.ClearForWrite(vform); + int lane_count = LaneCountFromFormat(vform); + for (int i = 0; i < lane_count; i++) { + dst.SetUint(vform, i, + PolynomialMult(src1.Uint(vform_src, lane_count + i), + src2.Uint(vform_src, lane_count + i))); + } + return dst; +} + +LogicVRegister Simulator::sub(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + int lane_size = LaneSizeInBitsFromFormat(vform); + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + // Test for unsigned saturation. + uint64_t ua = src1.UintLeftJustified(vform, i); + uint64_t ub = src2.UintLeftJustified(vform, i); + uint64_t ur = ua - ub; + if (ub > ua) { + dst.SetUnsignedSat(i, false); + } + + // Test for signed saturation. + bool pos_a = (ua >> 63) == 0; + bool pos_b = (ub >> 63) == 0; + bool pos_r = (ur >> 63) == 0; + // If the signs of the operands are different, and the sign of the first + // operand doesn't match the result, there was an overflow. + if ((pos_a != pos_b) && (pos_a != pos_r)) { + dst.SetSignedSat(i, pos_a); + } + + dst.SetInt(vform, i, ur >> (64 - lane_size)); + } + return dst; +} + +LogicVRegister Simulator::and_(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst.SetUint(vform, i, src1.Uint(vform, i) & src2.Uint(vform, i)); + } + return dst; +} + +LogicVRegister Simulator::orr(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst.SetUint(vform, i, src1.Uint(vform, i) | src2.Uint(vform, i)); + } + return dst; +} + +LogicVRegister Simulator::orn(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst.SetUint(vform, i, src1.Uint(vform, i) | ~src2.Uint(vform, i)); + } + return dst; +} + +LogicVRegister Simulator::eor(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst.SetUint(vform, i, src1.Uint(vform, i) ^ src2.Uint(vform, i)); + } + return dst; +} + +LogicVRegister Simulator::bic(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst.SetUint(vform, i, src1.Uint(vform, i) & ~src2.Uint(vform, i)); + } + return dst; +} + +LogicVRegister Simulator::bic(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src, uint64_t imm) { + uint64_t result[16]; + int laneCount = LaneCountFromFormat(vform); + for (int i = 0; i < laneCount; ++i) { + result[i] = src.Uint(vform, i) & ~imm; + } + dst.SetUintArray(vform, result); + return dst; +} + +LogicVRegister Simulator::bif(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + uint64_t operand1 = dst.Uint(vform, i); + uint64_t operand2 = ~src2.Uint(vform, i); + uint64_t operand3 = src1.Uint(vform, i); + uint64_t result = operand1 ^ ((operand1 ^ operand3) & operand2); + dst.SetUint(vform, i, result); + } + return dst; +} + +LogicVRegister Simulator::bit(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + uint64_t operand1 = dst.Uint(vform, i); + uint64_t operand2 = src2.Uint(vform, i); + uint64_t operand3 = src1.Uint(vform, i); + uint64_t result = operand1 ^ ((operand1 ^ operand3) & operand2); + dst.SetUint(vform, i, result); + } + return dst; +} + +LogicVRegister Simulator::bsl(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + uint64_t operand1 = src2.Uint(vform, i); + uint64_t operand2 = dst.Uint(vform, i); + uint64_t operand3 = src1.Uint(vform, i); + uint64_t result = operand1 ^ ((operand1 ^ operand3) & operand2); + dst.SetUint(vform, i, result); + } + return dst; +} + +LogicVRegister Simulator::SMinMax(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, bool max) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + int64_t src1_val = src1.Int(vform, i); + int64_t src2_val = src2.Int(vform, i); + int64_t dst_val; + if (max) { + dst_val = (src1_val > src2_val) ? src1_val : src2_val; + } else { + dst_val = (src1_val < src2_val) ? src1_val : src2_val; + } + dst.SetInt(vform, i, dst_val); + } + return dst; +} + +LogicVRegister Simulator::smax(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + return SMinMax(vform, dst, src1, src2, true); +} + +LogicVRegister Simulator::smin(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + return SMinMax(vform, dst, src1, src2, false); +} + +LogicVRegister Simulator::SMinMaxP(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, bool max) { + int lanes = LaneCountFromFormat(vform); + int64_t result[kMaxLanesPerVector]; + const LogicVRegister* src = &src1; + for (int j = 0; j < 2; j++) { + for (int i = 0; i < lanes; i += 2) { + int64_t first_val = src->Int(vform, i); + int64_t second_val = src->Int(vform, i + 1); + int64_t dst_val; + if (max) { + dst_val = (first_val > second_val) ? first_val : second_val; + } else { + dst_val = (first_val < second_val) ? first_val : second_val; + } + DCHECK_LT((i >> 1) + (j * lanes / 2), kMaxLanesPerVector); + result[(i >> 1) + (j * lanes / 2)] = dst_val; + } + src = &src2; + } + dst.SetIntArray(vform, result); + return dst; +} + +LogicVRegister Simulator::smaxp(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + return SMinMaxP(vform, dst, src1, src2, true); +} + +LogicVRegister Simulator::sminp(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + return SMinMaxP(vform, dst, src1, src2, false); +} + +LogicVRegister Simulator::addp(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src) { + DCHECK_EQ(vform, kFormatD); + + uint64_t dst_val = src.Uint(kFormat2D, 0) + src.Uint(kFormat2D, 1); + dst.ClearForWrite(vform); + dst.SetUint(vform, 0, dst_val); + return dst; +} + +LogicVRegister Simulator::addv(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src) { + VectorFormat vform_dst = + ScalarFormatFromLaneSize(LaneSizeInBitsFromFormat(vform)); + + int64_t dst_val = 0; + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst_val += src.Int(vform, i); + } + + dst.ClearForWrite(vform_dst); + dst.SetInt(vform_dst, 0, dst_val); + return dst; +} + +LogicVRegister Simulator::saddlv(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src) { + VectorFormat vform_dst = + ScalarFormatFromLaneSize(LaneSizeInBitsFromFormat(vform) * 2); + + int64_t dst_val = 0; + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst_val += src.Int(vform, i); + } + + dst.ClearForWrite(vform_dst); + dst.SetInt(vform_dst, 0, dst_val); + return dst; +} + +LogicVRegister Simulator::uaddlv(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src) { + VectorFormat vform_dst = + ScalarFormatFromLaneSize(LaneSizeInBitsFromFormat(vform) * 2); + + uint64_t dst_val = 0; + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst_val += src.Uint(vform, i); + } + + dst.ClearForWrite(vform_dst); + dst.SetUint(vform_dst, 0, dst_val); + return dst; +} + +LogicVRegister Simulator::SMinMaxV(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src, bool max) { + int64_t dst_val = max ? INT64_MIN : INT64_MAX; + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + int64_t src_val = src.Int(vform, i); + if (max) { + dst_val = (src_val > dst_val) ? src_val : dst_val; + } else { + dst_val = (src_val < dst_val) ? src_val : dst_val; + } + } + dst.ClearForWrite(ScalarFormatFromFormat(vform)); + dst.SetInt(vform, 0, dst_val); + return dst; +} + +LogicVRegister Simulator::smaxv(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src) { + SMinMaxV(vform, dst, src, true); + return dst; +} + +LogicVRegister Simulator::sminv(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src) { + SMinMaxV(vform, dst, src, false); + return dst; +} + +LogicVRegister Simulator::UMinMax(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, bool max) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + uint64_t src1_val = src1.Uint(vform, i); + uint64_t src2_val = src2.Uint(vform, i); + uint64_t dst_val; + if (max) { + dst_val = (src1_val > src2_val) ? src1_val : src2_val; + } else { + dst_val = (src1_val < src2_val) ? src1_val : src2_val; + } + dst.SetUint(vform, i, dst_val); + } + return dst; +} + +LogicVRegister Simulator::umax(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + return UMinMax(vform, dst, src1, src2, true); +} + +LogicVRegister Simulator::umin(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + return UMinMax(vform, dst, src1, src2, false); +} + +LogicVRegister Simulator::UMinMaxP(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, bool max) { + int lanes = LaneCountFromFormat(vform); + uint64_t result[kMaxLanesPerVector]; + const LogicVRegister* src = &src1; + for (int j = 0; j < 2; j++) { + for (int i = 0; i < LaneCountFromFormat(vform); i += 2) { + uint64_t first_val = src->Uint(vform, i); + uint64_t second_val = src->Uint(vform, i + 1); + uint64_t dst_val; + if (max) { + dst_val = (first_val > second_val) ? first_val : second_val; + } else { + dst_val = (first_val < second_val) ? first_val : second_val; + } + DCHECK_LT((i >> 1) + (j * lanes / 2), kMaxLanesPerVector); + result[(i >> 1) + (j * lanes / 2)] = dst_val; + } + src = &src2; + } + dst.SetUintArray(vform, result); + return dst; +} + +LogicVRegister Simulator::umaxp(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + return UMinMaxP(vform, dst, src1, src2, true); +} + +LogicVRegister Simulator::uminp(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + return UMinMaxP(vform, dst, src1, src2, false); +} + +LogicVRegister Simulator::UMinMaxV(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src, bool max) { + uint64_t dst_val = max ? 0 : UINT64_MAX; + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + uint64_t src_val = src.Uint(vform, i); + if (max) { + dst_val = (src_val > dst_val) ? src_val : dst_val; + } else { + dst_val = (src_val < dst_val) ? src_val : dst_val; + } + } + dst.ClearForWrite(ScalarFormatFromFormat(vform)); + dst.SetUint(vform, 0, dst_val); + return dst; +} + +LogicVRegister Simulator::umaxv(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src) { + UMinMaxV(vform, dst, src, true); + return dst; +} + +LogicVRegister Simulator::uminv(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src) { + UMinMaxV(vform, dst, src, false); + return dst; +} + +LogicVRegister Simulator::shl(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src, int shift) { + DCHECK_GE(shift, 0); + SimVRegister temp; + LogicVRegister shiftreg = dup_immediate(vform, temp, shift); + return ushl(vform, dst, src, shiftreg); +} + +LogicVRegister Simulator::sshll(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src, int shift) { + DCHECK_GE(shift, 0); + SimVRegister temp1, temp2; + LogicVRegister shiftreg = dup_immediate(vform, temp1, shift); + LogicVRegister extendedreg = sxtl(vform, temp2, src); + return sshl(vform, dst, extendedreg, shiftreg); +} + +LogicVRegister Simulator::sshll2(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src, int shift) { + DCHECK_GE(shift, 0); + SimVRegister temp1, temp2; + LogicVRegister shiftreg = dup_immediate(vform, temp1, shift); + LogicVRegister extendedreg = sxtl2(vform, temp2, src); + return sshl(vform, dst, extendedreg, shiftreg); +} + +LogicVRegister Simulator::shll(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src) { + int shift = LaneSizeInBitsFromFormat(vform) / 2; + return sshll(vform, dst, src, shift); +} + +LogicVRegister Simulator::shll2(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src) { + int shift = LaneSizeInBitsFromFormat(vform) / 2; + return sshll2(vform, dst, src, shift); +} + +LogicVRegister Simulator::ushll(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src, int shift) { + DCHECK_GE(shift, 0); + SimVRegister temp1, temp2; + LogicVRegister shiftreg = dup_immediate(vform, temp1, shift); + LogicVRegister extendedreg = uxtl(vform, temp2, src); + return ushl(vform, dst, extendedreg, shiftreg); +} + +LogicVRegister Simulator::ushll2(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src, int shift) { + DCHECK_GE(shift, 0); + SimVRegister temp1, temp2; + LogicVRegister shiftreg = dup_immediate(vform, temp1, shift); + LogicVRegister extendedreg = uxtl2(vform, temp2, src); + return ushl(vform, dst, extendedreg, shiftreg); +} + +LogicVRegister Simulator::sli(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src, int shift) { + dst.ClearForWrite(vform); + int laneCount = LaneCountFromFormat(vform); + for (int i = 0; i < laneCount; i++) { + uint64_t src_lane = src.Uint(vform, i); + uint64_t dst_lane = dst.Uint(vform, i); + uint64_t shifted = src_lane << shift; + uint64_t mask = MaxUintFromFormat(vform) << shift; + dst.SetUint(vform, i, (dst_lane & ~mask) | shifted); + } + return dst; +} + +LogicVRegister Simulator::sqshl(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src, int shift) { + DCHECK_GE(shift, 0); + SimVRegister temp; + LogicVRegister shiftreg = dup_immediate(vform, temp, shift); + return sshl(vform, dst, src, shiftreg).SignedSaturate(vform); +} + +LogicVRegister Simulator::uqshl(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src, int shift) { + DCHECK_GE(shift, 0); + SimVRegister temp; + LogicVRegister shiftreg = dup_immediate(vform, temp, shift); + return ushl(vform, dst, src, shiftreg).UnsignedSaturate(vform); +} + +LogicVRegister Simulator::sqshlu(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src, int shift) { + DCHECK_GE(shift, 0); + SimVRegister temp; + LogicVRegister shiftreg = dup_immediate(vform, temp, shift); + return sshl(vform, dst, src, shiftreg).UnsignedSaturate(vform); +} + +LogicVRegister Simulator::sri(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src, int shift) { + dst.ClearForWrite(vform); + int laneCount = LaneCountFromFormat(vform); + DCHECK((shift > 0) && + (shift <= static_cast(LaneSizeInBitsFromFormat(vform)))); + for (int i = 0; i < laneCount; i++) { + uint64_t src_lane = src.Uint(vform, i); + uint64_t dst_lane = dst.Uint(vform, i); + uint64_t shifted; + uint64_t mask; + if (shift == 64) { + shifted = 0; + mask = 0; + } else { + shifted = src_lane >> shift; + mask = MaxUintFromFormat(vform) >> shift; + } + dst.SetUint(vform, i, (dst_lane & ~mask) | shifted); + } + return dst; +} + +LogicVRegister Simulator::ushr(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src, int shift) { + DCHECK_GE(shift, 0); + SimVRegister temp; + LogicVRegister shiftreg = dup_immediate(vform, temp, -shift); + return ushl(vform, dst, src, shiftreg); +} + +LogicVRegister Simulator::sshr(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src, int shift) { + DCHECK_GE(shift, 0); + SimVRegister temp; + LogicVRegister shiftreg = dup_immediate(vform, temp, -shift); + return sshl(vform, dst, src, shiftreg); +} + +LogicVRegister Simulator::ssra(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src, int shift) { + SimVRegister temp; + LogicVRegister shifted_reg = sshr(vform, temp, src, shift); + return add(vform, dst, dst, shifted_reg); +} + +LogicVRegister Simulator::usra(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src, int shift) { + SimVRegister temp; + LogicVRegister shifted_reg = ushr(vform, temp, src, shift); + return add(vform, dst, dst, shifted_reg); +} + +LogicVRegister Simulator::srsra(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src, int shift) { + SimVRegister temp; + LogicVRegister shifted_reg = sshr(vform, temp, src, shift).Round(vform); + return add(vform, dst, dst, shifted_reg); +} + +LogicVRegister Simulator::ursra(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src, int shift) { + SimVRegister temp; + LogicVRegister shifted_reg = ushr(vform, temp, src, shift).Round(vform); + return add(vform, dst, dst, shifted_reg); +} + +LogicVRegister Simulator::cls(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src) { + uint64_t result[16]; + int laneSizeInBits = LaneSizeInBitsFromFormat(vform); + int laneCount = LaneCountFromFormat(vform); + for (int i = 0; i < laneCount; i++) { + result[i] = CountLeadingSignBits(src.Int(vform, i), laneSizeInBits); + } + + dst.SetUintArray(vform, result); + return dst; +} + +LogicVRegister Simulator::clz(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src) { + uint64_t result[16]; + int laneSizeInBits = LaneSizeInBitsFromFormat(vform); + int laneCount = LaneCountFromFormat(vform); + for (int i = 0; i < laneCount; i++) { + result[i] = CountLeadingZeros(src.Uint(vform, i), laneSizeInBits); + } + + dst.SetUintArray(vform, result); + return dst; +} + +LogicVRegister Simulator::cnt(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src) { + uint64_t result[16]; + int laneSizeInBits = LaneSizeInBitsFromFormat(vform); + int laneCount = LaneCountFromFormat(vform); + for (int i = 0; i < laneCount; i++) { + uint64_t value = src.Uint(vform, i); + result[i] = 0; + for (int j = 0; j < laneSizeInBits; j++) { + result[i] += (value & 1); + value >>= 1; + } + } + + dst.SetUintArray(vform, result); + return dst; +} + +LogicVRegister Simulator::sshl(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + int8_t shift_val = src2.Int(vform, i); + int64_t lj_src_val = src1.IntLeftJustified(vform, i); + + // Set signed saturation state. + if ((shift_val > CountLeadingSignBits(lj_src_val, 64)) && + (lj_src_val != 0)) { + dst.SetSignedSat(i, lj_src_val >= 0); + } + + // Set unsigned saturation state. + if (lj_src_val < 0) { + dst.SetUnsignedSat(i, false); + } else if ((shift_val > CountLeadingZeros(lj_src_val, 64)) && + (lj_src_val != 0)) { + dst.SetUnsignedSat(i, true); + } + + int64_t src_val = src1.Int(vform, i); + bool src_is_negative = src_val < 0; + if (shift_val > 63) { + dst.SetInt(vform, i, 0); + } else if (shift_val < -63) { + dst.SetRounding(i, src_is_negative); + dst.SetInt(vform, i, src_is_negative ? -1 : 0); + } else { + // Use unsigned types for shifts, as behaviour is undefined for signed + // lhs. + uint64_t usrc_val = static_cast(src_val); + + if (shift_val < 0) { + // Convert to right shift. + shift_val = -shift_val; + + // Set rounding state by testing most-significant bit shifted out. + // Rounding only needed on right shifts. + if (((usrc_val >> (shift_val - 1)) & 1) == 1) { + dst.SetRounding(i, true); + } + + usrc_val >>= shift_val; + + if (src_is_negative) { + // Simulate sign-extension. + usrc_val |= (~UINT64_C(0) << (64 - shift_val)); + } + } else { + usrc_val <<= shift_val; + } + dst.SetUint(vform, i, usrc_val); + } + } + return dst; +} + +LogicVRegister Simulator::ushl(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + int8_t shift_val = src2.Int(vform, i); + uint64_t lj_src_val = src1.UintLeftJustified(vform, i); + + // Set saturation state. + if ((shift_val > CountLeadingZeros(lj_src_val, 64)) && (lj_src_val != 0)) { + dst.SetUnsignedSat(i, true); + } + + uint64_t src_val = src1.Uint(vform, i); + if ((shift_val > 63) || (shift_val < -64)) { + dst.SetUint(vform, i, 0); + } else { + if (shift_val < 0) { + // Set rounding state. Rounding only needed on right shifts. + if (((src_val >> (-shift_val - 1)) & 1) == 1) { + dst.SetRounding(i, true); + } + + if (shift_val == -64) { + src_val = 0; + } else { + src_val >>= -shift_val; + } + } else { + src_val <<= shift_val; + } + dst.SetUint(vform, i, src_val); + } + } + return dst; +} + +LogicVRegister Simulator::neg(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + // Test for signed saturation. + int64_t sa = src.Int(vform, i); + if (sa == MinIntFromFormat(vform)) { + dst.SetSignedSat(i, true); + } + dst.SetInt(vform, i, (sa == INT64_MIN) ? sa : -sa); + } + return dst; +} + +LogicVRegister Simulator::suqadd(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + int64_t sa = dst.IntLeftJustified(vform, i); + uint64_t ub = src.UintLeftJustified(vform, i); + uint64_t ur = sa + ub; + + int64_t sr = bit_cast(ur); + if (sr < sa) { // Test for signed positive saturation. + dst.SetInt(vform, i, MaxIntFromFormat(vform)); + } else { + dst.SetUint(vform, i, dst.Int(vform, i) + src.Uint(vform, i)); + } + } + return dst; +} + +LogicVRegister Simulator::usqadd(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + uint64_t ua = dst.UintLeftJustified(vform, i); + int64_t sb = src.IntLeftJustified(vform, i); + uint64_t ur = ua + sb; + + if ((sb > 0) && (ur <= ua)) { + dst.SetUint(vform, i, MaxUintFromFormat(vform)); // Positive saturation. + } else if ((sb < 0) && (ur >= ua)) { + dst.SetUint(vform, i, 0); // Negative saturation. + } else { + dst.SetUint(vform, i, dst.Uint(vform, i) + src.Int(vform, i)); + } + } + return dst; +} + +LogicVRegister Simulator::abs(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + // Test for signed saturation. + int64_t sa = src.Int(vform, i); + if (sa == MinIntFromFormat(vform)) { + dst.SetSignedSat(i, true); + } + if (sa < 0) { + dst.SetInt(vform, i, (sa == INT64_MIN) ? sa : -sa); + } else { + dst.SetInt(vform, i, sa); + } + } + return dst; +} + +LogicVRegister Simulator::ExtractNarrow(VectorFormat dstform, + LogicVRegister dst, bool dstIsSigned, + const LogicVRegister& src, + bool srcIsSigned) { + bool upperhalf = false; + VectorFormat srcform = kFormatUndefined; + int64_t ssrc[8]; + uint64_t usrc[8]; + + switch (dstform) { + case kFormat8B: + upperhalf = false; + srcform = kFormat8H; + break; + case kFormat16B: + upperhalf = true; + srcform = kFormat8H; + break; + case kFormat4H: + upperhalf = false; + srcform = kFormat4S; + break; + case kFormat8H: + upperhalf = true; + srcform = kFormat4S; + break; + case kFormat2S: + upperhalf = false; + srcform = kFormat2D; + break; + case kFormat4S: + upperhalf = true; + srcform = kFormat2D; + break; + case kFormatB: + upperhalf = false; + srcform = kFormatH; + break; + case kFormatH: + upperhalf = false; + srcform = kFormatS; + break; + case kFormatS: + upperhalf = false; + srcform = kFormatD; + break; + default: + UNIMPLEMENTED(); + } + + for (int i = 0; i < LaneCountFromFormat(srcform); i++) { + ssrc[i] = src.Int(srcform, i); + usrc[i] = src.Uint(srcform, i); + } + + int offset; + if (upperhalf) { + offset = LaneCountFromFormat(dstform) / 2; + } else { + offset = 0; + dst.ClearForWrite(dstform); + } + + for (int i = 0; i < LaneCountFromFormat(srcform); i++) { + // Test for signed saturation + if (ssrc[i] > MaxIntFromFormat(dstform)) { + dst.SetSignedSat(offset + i, true); + } else if (ssrc[i] < MinIntFromFormat(dstform)) { + dst.SetSignedSat(offset + i, false); + } + + // Test for unsigned saturation + if (srcIsSigned) { + if (ssrc[i] > static_cast(MaxUintFromFormat(dstform))) { + dst.SetUnsignedSat(offset + i, true); + } else if (ssrc[i] < 0) { + dst.SetUnsignedSat(offset + i, false); + } + } else { + if (usrc[i] > MaxUintFromFormat(dstform)) { + dst.SetUnsignedSat(offset + i, true); + } + } + + int64_t result; + if (srcIsSigned) { + result = ssrc[i] & MaxUintFromFormat(dstform); + } else { + result = usrc[i] & MaxUintFromFormat(dstform); + } + + if (dstIsSigned) { + dst.SetInt(dstform, offset + i, result); + } else { + dst.SetUint(dstform, offset + i, result); + } + } + return dst; +} + +LogicVRegister Simulator::xtn(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src) { + return ExtractNarrow(vform, dst, true, src, true); +} + +LogicVRegister Simulator::sqxtn(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src) { + return ExtractNarrow(vform, dst, true, src, true).SignedSaturate(vform); +} + +LogicVRegister Simulator::sqxtun(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src) { + return ExtractNarrow(vform, dst, false, src, true).UnsignedSaturate(vform); +} + +LogicVRegister Simulator::uqxtn(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src) { + return ExtractNarrow(vform, dst, false, src, false).UnsignedSaturate(vform); +} + +LogicVRegister Simulator::AbsDiff(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, bool issigned) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + if (issigned) { + int64_t sr = src1.Int(vform, i) - src2.Int(vform, i); + sr = sr > 0 ? sr : -sr; + dst.SetInt(vform, i, sr); + } else { + int64_t sr = src1.Uint(vform, i) - src2.Uint(vform, i); + sr = sr > 0 ? sr : -sr; + dst.SetUint(vform, i, sr); + } + } + return dst; +} + +LogicVRegister Simulator::saba(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + dst.ClearForWrite(vform); + AbsDiff(vform, temp, src1, src2, true); + add(vform, dst, dst, temp); + return dst; +} + +LogicVRegister Simulator::uaba(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + dst.ClearForWrite(vform); + AbsDiff(vform, temp, src1, src2, false); + add(vform, dst, dst, temp); + return dst; +} + +LogicVRegister Simulator::not_(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst.SetUint(vform, i, ~src.Uint(vform, i)); + } + return dst; +} + +LogicVRegister Simulator::rbit(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src) { + uint64_t result[16]; + int laneCount = LaneCountFromFormat(vform); + int laneSizeInBits = LaneSizeInBitsFromFormat(vform); + uint64_t reversed_value; + uint64_t value; + for (int i = 0; i < laneCount; i++) { + value = src.Uint(vform, i); + reversed_value = 0; + for (int j = 0; j < laneSizeInBits; j++) { + reversed_value = (reversed_value << 1) | (value & 1); + value >>= 1; + } + result[i] = reversed_value; + } + + dst.SetUintArray(vform, result); + return dst; +} + +LogicVRegister Simulator::rev(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src, int revSize) { + uint64_t result[16]; + int laneCount = LaneCountFromFormat(vform); + int laneSize = LaneSizeInBytesFromFormat(vform); + int lanesPerLoop = revSize / laneSize; + for (int i = 0; i < laneCount; i += lanesPerLoop) { + for (int j = 0; j < lanesPerLoop; j++) { + result[i + lanesPerLoop - 1 - j] = src.Uint(vform, i + j); + } + } + dst.SetUintArray(vform, result); + return dst; +} + +LogicVRegister Simulator::rev16(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src) { + return rev(vform, dst, src, 2); +} + +LogicVRegister Simulator::rev32(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src) { + return rev(vform, dst, src, 4); +} + +LogicVRegister Simulator::rev64(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src) { + return rev(vform, dst, src, 8); +} + +LogicVRegister Simulator::addlp(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src, bool is_signed, + bool do_accumulate) { + VectorFormat vformsrc = VectorFormatHalfWidthDoubleLanes(vform); + DCHECK_LE(LaneSizeInBitsFromFormat(vformsrc), 32U); + DCHECK_LE(LaneCountFromFormat(vform), 8); + + uint64_t result[8]; + int lane_count = LaneCountFromFormat(vform); + for (int i = 0; i < lane_count; i++) { + if (is_signed) { + result[i] = static_cast(src.Int(vformsrc, 2 * i) + + src.Int(vformsrc, 2 * i + 1)); + } else { + result[i] = src.Uint(vformsrc, 2 * i) + src.Uint(vformsrc, 2 * i + 1); + } + } + + dst.ClearForWrite(vform); + for (int i = 0; i < lane_count; ++i) { + if (do_accumulate) { + result[i] += dst.Uint(vform, i); + } + dst.SetUint(vform, i, result[i]); + } + + return dst; +} + +LogicVRegister Simulator::saddlp(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src) { + return addlp(vform, dst, src, true, false); +} + +LogicVRegister Simulator::uaddlp(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src) { + return addlp(vform, dst, src, false, false); +} + +LogicVRegister Simulator::sadalp(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src) { + return addlp(vform, dst, src, true, true); +} + +LogicVRegister Simulator::uadalp(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src) { + return addlp(vform, dst, src, false, true); +} + +LogicVRegister Simulator::ext(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, int index) { + uint8_t result[16]; + int laneCount = LaneCountFromFormat(vform); + for (int i = 0; i < laneCount - index; ++i) { + result[i] = src1.Uint(vform, i + index); + } + for (int i = 0; i < index; ++i) { + result[laneCount - index + i] = src2.Uint(vform, i); + } + dst.ClearForWrite(vform); + for (int i = 0; i < laneCount; ++i) { + dst.SetUint(vform, i, result[i]); + } + return dst; +} + +LogicVRegister Simulator::dup_element(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src, + int src_index) { + int laneCount = LaneCountFromFormat(vform); + uint64_t value = src.Uint(vform, src_index); + dst.ClearForWrite(vform); + for (int i = 0; i < laneCount; ++i) { + dst.SetUint(vform, i, value); + } + return dst; +} + +LogicVRegister Simulator::dup_immediate(VectorFormat vform, LogicVRegister dst, + uint64_t imm) { + int laneCount = LaneCountFromFormat(vform); + uint64_t value = imm & MaxUintFromFormat(vform); + dst.ClearForWrite(vform); + for (int i = 0; i < laneCount; ++i) { + dst.SetUint(vform, i, value); + } + return dst; +} + +LogicVRegister Simulator::ins_element(VectorFormat vform, LogicVRegister dst, + int dst_index, const LogicVRegister& src, + int src_index) { + dst.SetUint(vform, dst_index, src.Uint(vform, src_index)); + return dst; +} + +LogicVRegister Simulator::ins_immediate(VectorFormat vform, LogicVRegister dst, + int dst_index, uint64_t imm) { + uint64_t value = imm & MaxUintFromFormat(vform); + dst.SetUint(vform, dst_index, value); + return dst; +} + +LogicVRegister Simulator::movi(VectorFormat vform, LogicVRegister dst, + uint64_t imm) { + int laneCount = LaneCountFromFormat(vform); + dst.ClearForWrite(vform); + for (int i = 0; i < laneCount; ++i) { + dst.SetUint(vform, i, imm); + } + return dst; +} + +LogicVRegister Simulator::mvni(VectorFormat vform, LogicVRegister dst, + uint64_t imm) { + int laneCount = LaneCountFromFormat(vform); + dst.ClearForWrite(vform); + for (int i = 0; i < laneCount; ++i) { + dst.SetUint(vform, i, ~imm); + } + return dst; +} + +LogicVRegister Simulator::orr(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src, uint64_t imm) { + uint64_t result[16]; + int laneCount = LaneCountFromFormat(vform); + for (int i = 0; i < laneCount; ++i) { + result[i] = src.Uint(vform, i) | imm; + } + dst.SetUintArray(vform, result); + return dst; +} + +LogicVRegister Simulator::uxtl(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src) { + VectorFormat vform_half = VectorFormatHalfWidth(vform); + + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst.SetUint(vform, i, src.Uint(vform_half, i)); + } + return dst; +} + +LogicVRegister Simulator::sxtl(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src) { + VectorFormat vform_half = VectorFormatHalfWidth(vform); + + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst.SetInt(vform, i, src.Int(vform_half, i)); + } + return dst; +} + +LogicVRegister Simulator::uxtl2(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src) { + VectorFormat vform_half = VectorFormatHalfWidth(vform); + int lane_count = LaneCountFromFormat(vform); + + dst.ClearForWrite(vform); + for (int i = 0; i < lane_count; i++) { + dst.SetUint(vform, i, src.Uint(vform_half, lane_count + i)); + } + return dst; +} + +LogicVRegister Simulator::sxtl2(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src) { + VectorFormat vform_half = VectorFormatHalfWidth(vform); + int lane_count = LaneCountFromFormat(vform); + + dst.ClearForWrite(vform); + for (int i = 0; i < lane_count; i++) { + dst.SetInt(vform, i, src.Int(vform_half, lane_count + i)); + } + return dst; +} + +LogicVRegister Simulator::shrn(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src, int shift) { + SimVRegister temp; + VectorFormat vform_src = VectorFormatDoubleWidth(vform); + VectorFormat vform_dst = vform; + LogicVRegister shifted_src = ushr(vform_src, temp, src, shift); + return ExtractNarrow(vform_dst, dst, false, shifted_src, false); +} + +LogicVRegister Simulator::shrn2(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src, int shift) { + SimVRegister temp; + VectorFormat vformsrc = VectorFormatDoubleWidth(VectorFormatHalfLanes(vform)); + VectorFormat vformdst = vform; + LogicVRegister shifted_src = ushr(vformsrc, temp, src, shift); + return ExtractNarrow(vformdst, dst, false, shifted_src, false); +} + +LogicVRegister Simulator::rshrn(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src, int shift) { + SimVRegister temp; + VectorFormat vformsrc = VectorFormatDoubleWidth(vform); + VectorFormat vformdst = vform; + LogicVRegister shifted_src = ushr(vformsrc, temp, src, shift).Round(vformsrc); + return ExtractNarrow(vformdst, dst, false, shifted_src, false); +} + +LogicVRegister Simulator::rshrn2(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src, int shift) { + SimVRegister temp; + VectorFormat vformsrc = VectorFormatDoubleWidth(VectorFormatHalfLanes(vform)); + VectorFormat vformdst = vform; + LogicVRegister shifted_src = ushr(vformsrc, temp, src, shift).Round(vformsrc); + return ExtractNarrow(vformdst, dst, false, shifted_src, false); +} + +LogicVRegister Simulator::Table(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& ind, + bool zero_out_of_bounds, + const LogicVRegister* tab1, + const LogicVRegister* tab2, + const LogicVRegister* tab3, + const LogicVRegister* tab4) { + DCHECK_NOT_NULL(tab1); + const LogicVRegister* tab[4] = {tab1, tab2, tab3, tab4}; + uint64_t result[kMaxLanesPerVector]; + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + result[i] = zero_out_of_bounds ? 0 : dst.Uint(kFormat16B, i); + } + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + uint64_t j = ind.Uint(vform, i); + int tab_idx = static_cast(j >> 4); + int j_idx = static_cast(j & 15); + if ((tab_idx < 4) && (tab[tab_idx] != NULL)) { + result[i] = tab[tab_idx]->Uint(kFormat16B, j_idx); + } + } + dst.SetUintArray(vform, result); + return dst; +} + +LogicVRegister Simulator::tbl(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& tab, + const LogicVRegister& ind) { + return Table(vform, dst, ind, true, &tab); +} + +LogicVRegister Simulator::tbl(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& tab, + const LogicVRegister& tab2, + const LogicVRegister& ind) { + return Table(vform, dst, ind, true, &tab, &tab2); +} + +LogicVRegister Simulator::tbl(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& tab, + const LogicVRegister& tab2, + const LogicVRegister& tab3, + const LogicVRegister& ind) { + return Table(vform, dst, ind, true, &tab, &tab2, &tab3); +} + +LogicVRegister Simulator::tbl(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& tab, + const LogicVRegister& tab2, + const LogicVRegister& tab3, + const LogicVRegister& tab4, + const LogicVRegister& ind) { + return Table(vform, dst, ind, true, &tab, &tab2, &tab3, &tab4); +} + +LogicVRegister Simulator::tbx(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& tab, + const LogicVRegister& ind) { + return Table(vform, dst, ind, false, &tab); +} + +LogicVRegister Simulator::tbx(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& tab, + const LogicVRegister& tab2, + const LogicVRegister& ind) { + return Table(vform, dst, ind, false, &tab, &tab2); +} + +LogicVRegister Simulator::tbx(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& tab, + const LogicVRegister& tab2, + const LogicVRegister& tab3, + const LogicVRegister& ind) { + return Table(vform, dst, ind, false, &tab, &tab2, &tab3); +} + +LogicVRegister Simulator::tbx(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& tab, + const LogicVRegister& tab2, + const LogicVRegister& tab3, + const LogicVRegister& tab4, + const LogicVRegister& ind) { + return Table(vform, dst, ind, false, &tab, &tab2, &tab3, &tab4); +} + +LogicVRegister Simulator::uqshrn(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src, int shift) { + return shrn(vform, dst, src, shift).UnsignedSaturate(vform); +} + +LogicVRegister Simulator::uqshrn2(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src, int shift) { + return shrn2(vform, dst, src, shift).UnsignedSaturate(vform); +} + +LogicVRegister Simulator::uqrshrn(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src, int shift) { + return rshrn(vform, dst, src, shift).UnsignedSaturate(vform); +} + +LogicVRegister Simulator::uqrshrn2(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src, int shift) { + return rshrn2(vform, dst, src, shift).UnsignedSaturate(vform); +} + +LogicVRegister Simulator::sqshrn(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src, int shift) { + SimVRegister temp; + VectorFormat vformsrc = VectorFormatDoubleWidth(vform); + VectorFormat vformdst = vform; + LogicVRegister shifted_src = sshr(vformsrc, temp, src, shift); + return sqxtn(vformdst, dst, shifted_src); +} + +LogicVRegister Simulator::sqshrn2(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src, int shift) { + SimVRegister temp; + VectorFormat vformsrc = VectorFormatDoubleWidth(VectorFormatHalfLanes(vform)); + VectorFormat vformdst = vform; + LogicVRegister shifted_src = sshr(vformsrc, temp, src, shift); + return sqxtn(vformdst, dst, shifted_src); +} + +LogicVRegister Simulator::sqrshrn(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src, int shift) { + SimVRegister temp; + VectorFormat vformsrc = VectorFormatDoubleWidth(vform); + VectorFormat vformdst = vform; + LogicVRegister shifted_src = sshr(vformsrc, temp, src, shift).Round(vformsrc); + return sqxtn(vformdst, dst, shifted_src); +} + +LogicVRegister Simulator::sqrshrn2(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src, int shift) { + SimVRegister temp; + VectorFormat vformsrc = VectorFormatDoubleWidth(VectorFormatHalfLanes(vform)); + VectorFormat vformdst = vform; + LogicVRegister shifted_src = sshr(vformsrc, temp, src, shift).Round(vformsrc); + return sqxtn(vformdst, dst, shifted_src); +} + +LogicVRegister Simulator::sqshrun(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src, int shift) { + SimVRegister temp; + VectorFormat vformsrc = VectorFormatDoubleWidth(vform); + VectorFormat vformdst = vform; + LogicVRegister shifted_src = sshr(vformsrc, temp, src, shift); + return sqxtun(vformdst, dst, shifted_src); +} + +LogicVRegister Simulator::sqshrun2(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src, int shift) { + SimVRegister temp; + VectorFormat vformsrc = VectorFormatDoubleWidth(VectorFormatHalfLanes(vform)); + VectorFormat vformdst = vform; + LogicVRegister shifted_src = sshr(vformsrc, temp, src, shift); + return sqxtun(vformdst, dst, shifted_src); +} + +LogicVRegister Simulator::sqrshrun(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src, int shift) { + SimVRegister temp; + VectorFormat vformsrc = VectorFormatDoubleWidth(vform); + VectorFormat vformdst = vform; + LogicVRegister shifted_src = sshr(vformsrc, temp, src, shift).Round(vformsrc); + return sqxtun(vformdst, dst, shifted_src); +} + +LogicVRegister Simulator::sqrshrun2(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src, int shift) { + SimVRegister temp; + VectorFormat vformsrc = VectorFormatDoubleWidth(VectorFormatHalfLanes(vform)); + VectorFormat vformdst = vform; + LogicVRegister shifted_src = sshr(vformsrc, temp, src, shift).Round(vformsrc); + return sqxtun(vformdst, dst, shifted_src); +} + +LogicVRegister Simulator::uaddl(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + uxtl(vform, temp1, src1); + uxtl(vform, temp2, src2); + add(vform, dst, temp1, temp2); + return dst; +} + +LogicVRegister Simulator::uaddl2(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + uxtl2(vform, temp1, src1); + uxtl2(vform, temp2, src2); + add(vform, dst, temp1, temp2); + return dst; +} + +LogicVRegister Simulator::uaddw(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + uxtl(vform, temp, src2); + add(vform, dst, src1, temp); + return dst; +} + +LogicVRegister Simulator::uaddw2(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + uxtl2(vform, temp, src2); + add(vform, dst, src1, temp); + return dst; +} + +LogicVRegister Simulator::saddl(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + sxtl(vform, temp1, src1); + sxtl(vform, temp2, src2); + add(vform, dst, temp1, temp2); + return dst; +} + +LogicVRegister Simulator::saddl2(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + sxtl2(vform, temp1, src1); + sxtl2(vform, temp2, src2); + add(vform, dst, temp1, temp2); + return dst; +} + +LogicVRegister Simulator::saddw(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + sxtl(vform, temp, src2); + add(vform, dst, src1, temp); + return dst; +} + +LogicVRegister Simulator::saddw2(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + sxtl2(vform, temp, src2); + add(vform, dst, src1, temp); + return dst; +} + +LogicVRegister Simulator::usubl(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + uxtl(vform, temp1, src1); + uxtl(vform, temp2, src2); + sub(vform, dst, temp1, temp2); + return dst; +} + +LogicVRegister Simulator::usubl2(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + uxtl2(vform, temp1, src1); + uxtl2(vform, temp2, src2); + sub(vform, dst, temp1, temp2); + return dst; +} + +LogicVRegister Simulator::usubw(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + uxtl(vform, temp, src2); + sub(vform, dst, src1, temp); + return dst; +} + +LogicVRegister Simulator::usubw2(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + uxtl2(vform, temp, src2); + sub(vform, dst, src1, temp); + return dst; +} + +LogicVRegister Simulator::ssubl(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + sxtl(vform, temp1, src1); + sxtl(vform, temp2, src2); + sub(vform, dst, temp1, temp2); + return dst; +} + +LogicVRegister Simulator::ssubl2(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + sxtl2(vform, temp1, src1); + sxtl2(vform, temp2, src2); + sub(vform, dst, temp1, temp2); + return dst; +} + +LogicVRegister Simulator::ssubw(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + sxtl(vform, temp, src2); + sub(vform, dst, src1, temp); + return dst; +} + +LogicVRegister Simulator::ssubw2(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + sxtl2(vform, temp, src2); + sub(vform, dst, src1, temp); + return dst; +} + +LogicVRegister Simulator::uabal(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + uxtl(vform, temp1, src1); + uxtl(vform, temp2, src2); + uaba(vform, dst, temp1, temp2); + return dst; +} + +LogicVRegister Simulator::uabal2(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + uxtl2(vform, temp1, src1); + uxtl2(vform, temp2, src2); + uaba(vform, dst, temp1, temp2); + return dst; +} + +LogicVRegister Simulator::sabal(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + sxtl(vform, temp1, src1); + sxtl(vform, temp2, src2); + saba(vform, dst, temp1, temp2); + return dst; +} + +LogicVRegister Simulator::sabal2(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + sxtl2(vform, temp1, src1); + sxtl2(vform, temp2, src2); + saba(vform, dst, temp1, temp2); + return dst; +} + +LogicVRegister Simulator::uabdl(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + uxtl(vform, temp1, src1); + uxtl(vform, temp2, src2); + AbsDiff(vform, dst, temp1, temp2, false); + return dst; +} + +LogicVRegister Simulator::uabdl2(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + uxtl2(vform, temp1, src1); + uxtl2(vform, temp2, src2); + AbsDiff(vform, dst, temp1, temp2, false); + return dst; +} + +LogicVRegister Simulator::sabdl(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + sxtl(vform, temp1, src1); + sxtl(vform, temp2, src2); + AbsDiff(vform, dst, temp1, temp2, true); + return dst; +} + +LogicVRegister Simulator::sabdl2(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + sxtl2(vform, temp1, src1); + sxtl2(vform, temp2, src2); + AbsDiff(vform, dst, temp1, temp2, true); + return dst; +} + +LogicVRegister Simulator::umull(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + uxtl(vform, temp1, src1); + uxtl(vform, temp2, src2); + mul(vform, dst, temp1, temp2); + return dst; +} + +LogicVRegister Simulator::umull2(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + uxtl2(vform, temp1, src1); + uxtl2(vform, temp2, src2); + mul(vform, dst, temp1, temp2); + return dst; +} + +LogicVRegister Simulator::smull(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + sxtl(vform, temp1, src1); + sxtl(vform, temp2, src2); + mul(vform, dst, temp1, temp2); + return dst; +} + +LogicVRegister Simulator::smull2(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + sxtl2(vform, temp1, src1); + sxtl2(vform, temp2, src2); + mul(vform, dst, temp1, temp2); + return dst; +} + +LogicVRegister Simulator::umlsl(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + uxtl(vform, temp1, src1); + uxtl(vform, temp2, src2); + mls(vform, dst, temp1, temp2); + return dst; +} + +LogicVRegister Simulator::umlsl2(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + uxtl2(vform, temp1, src1); + uxtl2(vform, temp2, src2); + mls(vform, dst, temp1, temp2); + return dst; +} + +LogicVRegister Simulator::smlsl(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + sxtl(vform, temp1, src1); + sxtl(vform, temp2, src2); + mls(vform, dst, temp1, temp2); + return dst; +} + +LogicVRegister Simulator::smlsl2(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + sxtl2(vform, temp1, src1); + sxtl2(vform, temp2, src2); + mls(vform, dst, temp1, temp2); + return dst; +} + +LogicVRegister Simulator::umlal(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + uxtl(vform, temp1, src1); + uxtl(vform, temp2, src2); + mla(vform, dst, temp1, temp2); + return dst; +} + +LogicVRegister Simulator::umlal2(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + uxtl2(vform, temp1, src1); + uxtl2(vform, temp2, src2); + mla(vform, dst, temp1, temp2); + return dst; +} + +LogicVRegister Simulator::smlal(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + sxtl(vform, temp1, src1); + sxtl(vform, temp2, src2); + mla(vform, dst, temp1, temp2); + return dst; +} + +LogicVRegister Simulator::smlal2(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + sxtl2(vform, temp1, src1); + sxtl2(vform, temp2, src2); + mla(vform, dst, temp1, temp2); + return dst; +} + +LogicVRegister Simulator::sqdmlal(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + LogicVRegister product = sqdmull(vform, temp, src1, src2); + return add(vform, dst, dst, product).SignedSaturate(vform); +} + +LogicVRegister Simulator::sqdmlal2(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + LogicVRegister product = sqdmull2(vform, temp, src1, src2); + return add(vform, dst, dst, product).SignedSaturate(vform); +} + +LogicVRegister Simulator::sqdmlsl(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + LogicVRegister product = sqdmull(vform, temp, src1, src2); + return sub(vform, dst, dst, product).SignedSaturate(vform); +} + +LogicVRegister Simulator::sqdmlsl2(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + LogicVRegister product = sqdmull2(vform, temp, src1, src2); + return sub(vform, dst, dst, product).SignedSaturate(vform); +} + +LogicVRegister Simulator::sqdmull(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + LogicVRegister product = smull(vform, temp, src1, src2); + return add(vform, dst, product, product).SignedSaturate(vform); +} + +LogicVRegister Simulator::sqdmull2(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + LogicVRegister product = smull2(vform, temp, src1, src2); + return add(vform, dst, product, product).SignedSaturate(vform); +} + +LogicVRegister Simulator::sqrdmulh(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, bool round) { + // 2 * INT_32_MIN * INT_32_MIN causes int64_t to overflow. + // To avoid this, we use (src1 * src2 + 1 << (esize - 2)) >> (esize - 1) + // which is same as (2 * src1 * src2 + 1 << (esize - 1)) >> esize. + + int esize = LaneSizeInBitsFromFormat(vform); + int round_const = round ? (1 << (esize - 2)) : 0; + int64_t product; + + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + product = src1.Int(vform, i) * src2.Int(vform, i); + product += round_const; + product = product >> (esize - 1); + + if (product > MaxIntFromFormat(vform)) { + product = MaxIntFromFormat(vform); + } else if (product < MinIntFromFormat(vform)) { + product = MinIntFromFormat(vform); + } + dst.SetInt(vform, i, product); + } + return dst; +} + +LogicVRegister Simulator::sqdmulh(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + return sqrdmulh(vform, dst, src1, src2, false); +} + +LogicVRegister Simulator::addhn(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + add(VectorFormatDoubleWidth(vform), temp, src1, src2); + shrn(vform, dst, temp, LaneSizeInBitsFromFormat(vform)); + return dst; +} + +LogicVRegister Simulator::addhn2(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + add(VectorFormatDoubleWidth(VectorFormatHalfLanes(vform)), temp, src1, src2); + shrn2(vform, dst, temp, LaneSizeInBitsFromFormat(vform)); + return dst; +} + +LogicVRegister Simulator::raddhn(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + add(VectorFormatDoubleWidth(vform), temp, src1, src2); + rshrn(vform, dst, temp, LaneSizeInBitsFromFormat(vform)); + return dst; +} + +LogicVRegister Simulator::raddhn2(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + add(VectorFormatDoubleWidth(VectorFormatHalfLanes(vform)), temp, src1, src2); + rshrn2(vform, dst, temp, LaneSizeInBitsFromFormat(vform)); + return dst; +} + +LogicVRegister Simulator::subhn(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + sub(VectorFormatDoubleWidth(vform), temp, src1, src2); + shrn(vform, dst, temp, LaneSizeInBitsFromFormat(vform)); + return dst; +} + +LogicVRegister Simulator::subhn2(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + sub(VectorFormatDoubleWidth(VectorFormatHalfLanes(vform)), temp, src1, src2); + shrn2(vform, dst, temp, LaneSizeInBitsFromFormat(vform)); + return dst; +} + +LogicVRegister Simulator::rsubhn(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + sub(VectorFormatDoubleWidth(vform), temp, src1, src2); + rshrn(vform, dst, temp, LaneSizeInBitsFromFormat(vform)); + return dst; +} + +LogicVRegister Simulator::rsubhn2(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + sub(VectorFormatDoubleWidth(VectorFormatHalfLanes(vform)), temp, src1, src2); + rshrn2(vform, dst, temp, LaneSizeInBitsFromFormat(vform)); + return dst; +} + +LogicVRegister Simulator::trn1(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + uint64_t result[16]; + int laneCount = LaneCountFromFormat(vform); + int pairs = laneCount / 2; + for (int i = 0; i < pairs; ++i) { + result[2 * i] = src1.Uint(vform, 2 * i); + result[(2 * i) + 1] = src2.Uint(vform, 2 * i); + } + + dst.SetUintArray(vform, result); + return dst; +} + +LogicVRegister Simulator::trn2(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + uint64_t result[16]; + int laneCount = LaneCountFromFormat(vform); + int pairs = laneCount / 2; + for (int i = 0; i < pairs; ++i) { + result[2 * i] = src1.Uint(vform, (2 * i) + 1); + result[(2 * i) + 1] = src2.Uint(vform, (2 * i) + 1); + } + + dst.SetUintArray(vform, result); + return dst; +} + +LogicVRegister Simulator::zip1(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + uint64_t result[16]; + int laneCount = LaneCountFromFormat(vform); + int pairs = laneCount / 2; + for (int i = 0; i < pairs; ++i) { + result[2 * i] = src1.Uint(vform, i); + result[(2 * i) + 1] = src2.Uint(vform, i); + } + + dst.SetUintArray(vform, result); + return dst; +} + +LogicVRegister Simulator::zip2(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + uint64_t result[16]; + int laneCount = LaneCountFromFormat(vform); + int pairs = laneCount / 2; + for (int i = 0; i < pairs; ++i) { + result[2 * i] = src1.Uint(vform, pairs + i); + result[(2 * i) + 1] = src2.Uint(vform, pairs + i); + } + + dst.SetUintArray(vform, result); + return dst; +} + +LogicVRegister Simulator::uzp1(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + uint64_t result[32]; + int laneCount = LaneCountFromFormat(vform); + for (int i = 0; i < laneCount; ++i) { + result[i] = src1.Uint(vform, i); + result[laneCount + i] = src2.Uint(vform, i); + } + + dst.ClearForWrite(vform); + for (int i = 0; i < laneCount; ++i) { + dst.SetUint(vform, i, result[2 * i]); + } + return dst; +} + +LogicVRegister Simulator::uzp2(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + uint64_t result[32]; + int laneCount = LaneCountFromFormat(vform); + for (int i = 0; i < laneCount; ++i) { + result[i] = src1.Uint(vform, i); + result[laneCount + i] = src2.Uint(vform, i); + } + + dst.ClearForWrite(vform); + for (int i = 0; i < laneCount; ++i) { + dst.SetUint(vform, i, result[(2 * i) + 1]); + } + return dst; +} + +template +T Simulator::FPAdd(T op1, T op2) { + T result = FPProcessNaNs(op1, op2); + if (std::isnan(result)) return result; + + if (std::isinf(op1) && std::isinf(op2) && (op1 != op2)) { + // inf + -inf returns the default NaN. + FPProcessException(); + return FPDefaultNaN(); + } else { + // Other cases should be handled by standard arithmetic. + return op1 + op2; + } +} + +template +T Simulator::FPSub(T op1, T op2) { + // NaNs should be handled elsewhere. + DCHECK(!std::isnan(op1) && !std::isnan(op2)); + + if (std::isinf(op1) && std::isinf(op2) && (op1 == op2)) { + // inf - inf returns the default NaN. + FPProcessException(); + return FPDefaultNaN(); + } else { + // Other cases should be handled by standard arithmetic. + return op1 - op2; + } +} + +template +T Simulator::FPMul(T op1, T op2) { + // NaNs should be handled elsewhere. + DCHECK(!std::isnan(op1) && !std::isnan(op2)); + + if ((std::isinf(op1) && (op2 == 0.0)) || (std::isinf(op2) && (op1 == 0.0))) { + // inf * 0.0 returns the default NaN. + FPProcessException(); + return FPDefaultNaN(); + } else { + // Other cases should be handled by standard arithmetic. + return op1 * op2; + } +} + +template +T Simulator::FPMulx(T op1, T op2) { + if ((std::isinf(op1) && (op2 == 0.0)) || (std::isinf(op2) && (op1 == 0.0))) { + // inf * 0.0 returns +/-2.0. + T two = 2.0; + return copysign(1.0, op1) * copysign(1.0, op2) * two; + } + return FPMul(op1, op2); +} + +template +T Simulator::FPMulAdd(T a, T op1, T op2) { + T result = FPProcessNaNs3(a, op1, op2); + + T sign_a = copysign(1.0, a); + T sign_prod = copysign(1.0, op1) * copysign(1.0, op2); + bool isinf_prod = std::isinf(op1) || std::isinf(op2); + bool operation_generates_nan = + (std::isinf(op1) && (op2 == 0.0)) || // inf * 0.0 + (std::isinf(op2) && (op1 == 0.0)) || // 0.0 * inf + (std::isinf(a) && isinf_prod && (sign_a != sign_prod)); // inf - inf + + if (std::isnan(result)) { + // Generated NaNs override quiet NaNs propagated from a. + if (operation_generates_nan && IsQuietNaN(a)) { + FPProcessException(); + return FPDefaultNaN(); + } else { + return result; + } + } + + // If the operation would produce a NaN, return the default NaN. + if (operation_generates_nan) { + FPProcessException(); + return FPDefaultNaN(); + } + + // Work around broken fma implementations for exact zero results: The sign of + // exact 0.0 results is positive unless both a and op1 * op2 are negative. + if (((op1 == 0.0) || (op2 == 0.0)) && (a == 0.0)) { + return ((sign_a < 0) && (sign_prod < 0)) ? -0.0 : 0.0; + } + + result = FusedMultiplyAdd(op1, op2, a); + DCHECK(!std::isnan(result)); + + // Work around broken fma implementations for rounded zero results: If a is + // 0.0, the sign of the result is the sign of op1 * op2 before rounding. + if ((a == 0.0) && (result == 0.0)) { + return copysign(0.0, sign_prod); + } + + return result; +} + +template +T Simulator::FPDiv(T op1, T op2) { + // NaNs should be handled elsewhere. + DCHECK(!std::isnan(op1) && !std::isnan(op2)); + + if ((std::isinf(op1) && std::isinf(op2)) || ((op1 == 0.0) && (op2 == 0.0))) { + // inf / inf and 0.0 / 0.0 return the default NaN. + FPProcessException(); + return FPDefaultNaN(); + } else { + if (op2 == 0.0) { + FPProcessException(); + if (!std::isnan(op1)) { + double op1_sign = copysign(1.0, op1); + double op2_sign = copysign(1.0, op2); + return static_cast(op1_sign * op2_sign * kFP64PositiveInfinity); + } + } + + // Other cases should be handled by standard arithmetic. + return op1 / op2; + } +} + +template +T Simulator::FPSqrt(T op) { + if (std::isnan(op)) { + return FPProcessNaN(op); + } else if (op < 0.0) { + FPProcessException(); + return FPDefaultNaN(); + } else { + return sqrt(op); + } +} + +template +T Simulator::FPMax(T a, T b) { + T result = FPProcessNaNs(a, b); + if (std::isnan(result)) return result; + + if ((a == 0.0) && (b == 0.0) && (copysign(1.0, a) != copysign(1.0, b))) { + // a and b are zero, and the sign differs: return +0.0. + return 0.0; + } else { + return (a > b) ? a : b; + } +} + +template +T Simulator::FPMaxNM(T a, T b) { + if (IsQuietNaN(a) && !IsQuietNaN(b)) { + a = kFP64NegativeInfinity; + } else if (!IsQuietNaN(a) && IsQuietNaN(b)) { + b = kFP64NegativeInfinity; + } + + T result = FPProcessNaNs(a, b); + return std::isnan(result) ? result : FPMax(a, b); +} + +template +T Simulator::FPMin(T a, T b) { + T result = FPProcessNaNs(a, b); + if (std::isnan(result)) return result; + + if ((a == 0.0) && (b == 0.0) && (copysign(1.0, a) != copysign(1.0, b))) { + // a and b are zero, and the sign differs: return -0.0. + return -0.0; + } else { + return (a < b) ? a : b; + } +} + +template +T Simulator::FPMinNM(T a, T b) { + if (IsQuietNaN(a) && !IsQuietNaN(b)) { + a = kFP64PositiveInfinity; + } else if (!IsQuietNaN(a) && IsQuietNaN(b)) { + b = kFP64PositiveInfinity; + } + + T result = FPProcessNaNs(a, b); + return std::isnan(result) ? result : FPMin(a, b); +} + +template +T Simulator::FPRecipStepFused(T op1, T op2) { + const T two = 2.0; + if ((std::isinf(op1) && (op2 == 0.0)) || + ((op1 == 0.0) && (std::isinf(op2)))) { + return two; + } else if (std::isinf(op1) || std::isinf(op2)) { + // Return +inf if signs match, otherwise -inf. + return ((op1 >= 0.0) == (op2 >= 0.0)) ? kFP64PositiveInfinity + : kFP64NegativeInfinity; + } else { + return FusedMultiplyAdd(op1, op2, two); + } +} + +template +T Simulator::FPRSqrtStepFused(T op1, T op2) { + const T one_point_five = 1.5; + const T two = 2.0; + + if ((std::isinf(op1) && (op2 == 0.0)) || + ((op1 == 0.0) && (std::isinf(op2)))) { + return one_point_five; + } else if (std::isinf(op1) || std::isinf(op2)) { + // Return +inf if signs match, otherwise -inf. + return ((op1 >= 0.0) == (op2 >= 0.0)) ? kFP64PositiveInfinity + : kFP64NegativeInfinity; + } else { + // The multiply-add-halve operation must be fully fused, so avoid interim + // rounding by checking which operand can be losslessly divided by two + // before doing the multiply-add. + if (std::isnormal(op1 / two)) { + return FusedMultiplyAdd(op1 / two, op2, one_point_five); + } else if (std::isnormal(op2 / two)) { + return FusedMultiplyAdd(op1, op2 / two, one_point_five); + } else { + // Neither operand is normal after halving: the result is dominated by + // the addition term, so just return that. + return one_point_five; + } + } +} + +double Simulator::FPRoundInt(double value, FPRounding round_mode) { + if ((value == 0.0) || (value == kFP64PositiveInfinity) || + (value == kFP64NegativeInfinity)) { + return value; + } else if (std::isnan(value)) { + return FPProcessNaN(value); + } + + double int_result = std::floor(value); + double error = value - int_result; + switch (round_mode) { + case FPTieAway: { + // Take care of correctly handling the range ]-0.5, -0.0], which must + // yield -0.0. + if ((-0.5 < value) && (value < 0.0)) { + int_result = -0.0; + + } else if ((error > 0.5) || ((error == 0.5) && (int_result >= 0.0))) { + // If the error is greater than 0.5, or is equal to 0.5 and the integer + // result is positive, round up. + int_result++; + } + break; + } + case FPTieEven: { + // Take care of correctly handling the range [-0.5, -0.0], which must + // yield -0.0. + if ((-0.5 <= value) && (value < 0.0)) { + int_result = -0.0; + + // If the error is greater than 0.5, or is equal to 0.5 and the integer + // result is odd, round up. + } else if ((error > 0.5) || + ((error == 0.5) && (std::fmod(int_result, 2) != 0))) { + int_result++; + } + break; + } + case FPZero: { + // If value>0 then we take floor(value) + // otherwise, ceil(value). + if (value < 0) { + int_result = ceil(value); + } + break; + } + case FPNegativeInfinity: { + // We always use floor(value). + break; + } + case FPPositiveInfinity: { + // Take care of correctly handling the range ]-1.0, -0.0], which must + // yield -0.0. + if ((-1.0 < value) && (value < 0.0)) { + int_result = -0.0; + + // If the error is non-zero, round up. + } else if (error > 0.0) { + int_result++; + } + break; + } + default: + UNIMPLEMENTED(); + } + return int_result; +} + +int32_t Simulator::FPToInt32(double value, FPRounding rmode) { + value = FPRoundInt(value, rmode); + if (value >= kWMaxInt) { + return kWMaxInt; + } else if (value < kWMinInt) { + return kWMinInt; + } + return std::isnan(value) ? 0 : static_cast(value); +} + +int64_t Simulator::FPToInt64(double value, FPRounding rmode) { + value = FPRoundInt(value, rmode); + if (value >= kXMaxInt) { + return kXMaxInt; + } else if (value < kXMinInt) { + return kXMinInt; + } + return std::isnan(value) ? 0 : static_cast(value); +} + +uint32_t Simulator::FPToUInt32(double value, FPRounding rmode) { + value = FPRoundInt(value, rmode); + if (value >= kWMaxUInt) { + return kWMaxUInt; + } else if (value < 0.0) { + return 0; + } + return std::isnan(value) ? 0 : static_cast(value); +} + +uint64_t Simulator::FPToUInt64(double value, FPRounding rmode) { + value = FPRoundInt(value, rmode); + if (value >= kXMaxUInt) { + return kXMaxUInt; + } else if (value < 0.0) { + return 0; + } + return std::isnan(value) ? 0 : static_cast(value); +} + +#define DEFINE_NEON_FP_VECTOR_OP(FN, OP, PROCNAN) \ + template \ + LogicVRegister Simulator::FN(VectorFormat vform, LogicVRegister dst, \ + const LogicVRegister& src1, \ + const LogicVRegister& src2) { \ + dst.ClearForWrite(vform); \ + for (int i = 0; i < LaneCountFromFormat(vform); i++) { \ + T op1 = src1.Float(i); \ + T op2 = src2.Float(i); \ + T result; \ + if (PROCNAN) { \ + result = FPProcessNaNs(op1, op2); \ + if (!std::isnan(result)) { \ + result = OP(op1, op2); \ + } \ + } else { \ + result = OP(op1, op2); \ + } \ + dst.SetFloat(i, result); \ + } \ + return dst; \ + } \ + \ + LogicVRegister Simulator::FN(VectorFormat vform, LogicVRegister dst, \ + const LogicVRegister& src1, \ + const LogicVRegister& src2) { \ + if (LaneSizeInBytesFromFormat(vform) == kSRegSize) { \ + FN(vform, dst, src1, src2); \ + } else { \ + DCHECK_EQ(LaneSizeInBytesFromFormat(vform), kDRegSize); \ + FN(vform, dst, src1, src2); \ + } \ + return dst; \ + } +NEON_FP3SAME_LIST(DEFINE_NEON_FP_VECTOR_OP) +#undef DEFINE_NEON_FP_VECTOR_OP + +LogicVRegister Simulator::fnmul(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + LogicVRegister product = fmul(vform, temp, src1, src2); + return fneg(vform, dst, product); +} + +template +LogicVRegister Simulator::frecps(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + T op1 = -src1.Float(i); + T op2 = src2.Float(i); + T result = FPProcessNaNs(op1, op2); + dst.SetFloat(i, std::isnan(result) ? result : FPRecipStepFused(op1, op2)); + } + return dst; +} + +LogicVRegister Simulator::frecps(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + if (LaneSizeInBytesFromFormat(vform) == kSRegSize) { + frecps(vform, dst, src1, src2); + } else { + DCHECK_EQ(LaneSizeInBytesFromFormat(vform), kDRegSize); + frecps(vform, dst, src1, src2); + } + return dst; +} + +template +LogicVRegister Simulator::frsqrts(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + T op1 = -src1.Float(i); + T op2 = src2.Float(i); + T result = FPProcessNaNs(op1, op2); + dst.SetFloat(i, std::isnan(result) ? result : FPRSqrtStepFused(op1, op2)); + } + return dst; +} + +LogicVRegister Simulator::frsqrts(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + if (LaneSizeInBytesFromFormat(vform) == kSRegSize) { + frsqrts(vform, dst, src1, src2); + } else { + DCHECK_EQ(LaneSizeInBytesFromFormat(vform), kDRegSize); + frsqrts(vform, dst, src1, src2); + } + return dst; +} + +template +LogicVRegister Simulator::fcmp(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, Condition cond) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + bool result = false; + T op1 = src1.Float(i); + T op2 = src2.Float(i); + T nan_result = FPProcessNaNs(op1, op2); + if (!std::isnan(nan_result)) { + switch (cond) { + case eq: + result = (op1 == op2); + break; + case ge: + result = (op1 >= op2); + break; + case gt: + result = (op1 > op2); + break; + case le: + result = (op1 <= op2); + break; + case lt: + result = (op1 < op2); + break; + default: + UNREACHABLE(); + } + } + dst.SetUint(vform, i, result ? MaxUintFromFormat(vform) : 0); + } + return dst; +} + +LogicVRegister Simulator::fcmp(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, Condition cond) { + if (LaneSizeInBytesFromFormat(vform) == kSRegSize) { + fcmp(vform, dst, src1, src2, cond); + } else { + DCHECK_EQ(LaneSizeInBytesFromFormat(vform), kDRegSize); + fcmp(vform, dst, src1, src2, cond); + } + return dst; +} + +LogicVRegister Simulator::fcmp_zero(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src, Condition cond) { + SimVRegister temp; + if (LaneSizeInBytesFromFormat(vform) == kSRegSize) { + LogicVRegister zero_reg = + dup_immediate(vform, temp, bit_cast(0.0f)); + fcmp(vform, dst, src, zero_reg, cond); + } else { + DCHECK_EQ(LaneSizeInBytesFromFormat(vform), kDRegSize); + LogicVRegister zero_reg = + dup_immediate(vform, temp, bit_cast(0.0)); + fcmp(vform, dst, src, zero_reg, cond); + } + return dst; +} + +LogicVRegister Simulator::fabscmp(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, Condition cond) { + SimVRegister temp1, temp2; + if (LaneSizeInBytesFromFormat(vform) == kSRegSize) { + LogicVRegister abs_src1 = fabs_(vform, temp1, src1); + LogicVRegister abs_src2 = fabs_(vform, temp2, src2); + fcmp(vform, dst, abs_src1, abs_src2, cond); + } else { + DCHECK_EQ(LaneSizeInBytesFromFormat(vform), kDRegSize); + LogicVRegister abs_src1 = fabs_(vform, temp1, src1); + LogicVRegister abs_src2 = fabs_(vform, temp2, src2); + fcmp(vform, dst, abs_src1, abs_src2, cond); + } + return dst; +} + +template +LogicVRegister Simulator::fmla(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + T op1 = src1.Float(i); + T op2 = src2.Float(i); + T acc = dst.Float(i); + T result = FPMulAdd(acc, op1, op2); + dst.SetFloat(i, result); + } + return dst; +} + +LogicVRegister Simulator::fmla(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + if (LaneSizeInBytesFromFormat(vform) == kSRegSize) { + fmla(vform, dst, src1, src2); + } else { + DCHECK_EQ(LaneSizeInBytesFromFormat(vform), kDRegSize); + fmla(vform, dst, src1, src2); + } + return dst; +} + +template +LogicVRegister Simulator::fmls(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + T op1 = -src1.Float(i); + T op2 = src2.Float(i); + T acc = dst.Float(i); + T result = FPMulAdd(acc, op1, op2); + dst.SetFloat(i, result); + } + return dst; +} + +LogicVRegister Simulator::fmls(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + if (LaneSizeInBytesFromFormat(vform) == kSRegSize) { + fmls(vform, dst, src1, src2); + } else { + DCHECK_EQ(LaneSizeInBytesFromFormat(vform), kDRegSize); + fmls(vform, dst, src1, src2); + } + return dst; +} + +template +LogicVRegister Simulator::fneg(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + T op = src.Float(i); + op = -op; + dst.SetFloat(i, op); + } + return dst; +} + +LogicVRegister Simulator::fneg(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src) { + if (LaneSizeInBytesFromFormat(vform) == kSRegSize) { + fneg(vform, dst, src); + } else { + DCHECK_EQ(LaneSizeInBytesFromFormat(vform), kDRegSize); + fneg(vform, dst, src); + } + return dst; +} + +template +LogicVRegister Simulator::fabs_(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + T op = src.Float(i); + if (copysign(1.0, op) < 0.0) { + op = -op; + } + dst.SetFloat(i, op); + } + return dst; +} + +LogicVRegister Simulator::fabs_(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src) { + if (LaneSizeInBytesFromFormat(vform) == kSRegSize) { + fabs_(vform, dst, src); + } else { + DCHECK_EQ(LaneSizeInBytesFromFormat(vform), kDRegSize); + fabs_(vform, dst, src); + } + return dst; +} + +LogicVRegister Simulator::fabd(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + fsub(vform, temp, src1, src2); + fabs_(vform, dst, temp); + return dst; +} + +LogicVRegister Simulator::fsqrt(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src) { + dst.ClearForWrite(vform); + if (LaneSizeInBytesFromFormat(vform) == kSRegSize) { + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + float result = FPSqrt(src.Float(i)); + dst.SetFloat(i, result); + } + } else { + DCHECK_EQ(LaneSizeInBytesFromFormat(vform), kDRegSize); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + double result = FPSqrt(src.Float(i)); + dst.SetFloat(i, result); + } + } + return dst; +} + +#define DEFINE_NEON_FP_PAIR_OP(FNP, FN, OP) \ + LogicVRegister Simulator::FNP(VectorFormat vform, LogicVRegister dst, \ + const LogicVRegister& src1, \ + const LogicVRegister& src2) { \ + SimVRegister temp1, temp2; \ + uzp1(vform, temp1, src1, src2); \ + uzp2(vform, temp2, src1, src2); \ + FN(vform, dst, temp1, temp2); \ + return dst; \ + } \ + \ + LogicVRegister Simulator::FNP(VectorFormat vform, LogicVRegister dst, \ + const LogicVRegister& src) { \ + if (vform == kFormatS) { \ + float result = OP(src.Float(0), src.Float(1)); \ + dst.SetFloat(0, result); \ + } else { \ + DCHECK_EQ(vform, kFormatD); \ + double result = OP(src.Float(0), src.Float(1)); \ + dst.SetFloat(0, result); \ + } \ + dst.ClearForWrite(vform); \ + return dst; \ + } +NEON_FPPAIRWISE_LIST(DEFINE_NEON_FP_PAIR_OP) +#undef DEFINE_NEON_FP_PAIR_OP + +LogicVRegister Simulator::FMinMaxV(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src, FPMinMaxOp Op) { + DCHECK_EQ(vform, kFormat4S); + USE(vform); + float result1 = (this->*Op)(src.Float(0), src.Float(1)); + float result2 = (this->*Op)(src.Float(2), src.Float(3)); + float result = (this->*Op)(result1, result2); + dst.ClearForWrite(kFormatS); + dst.SetFloat(0, result); + return dst; +} + +LogicVRegister Simulator::fmaxv(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src) { + return FMinMaxV(vform, dst, src, &Simulator::FPMax); +} + +LogicVRegister Simulator::fminv(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src) { + return FMinMaxV(vform, dst, src, &Simulator::FPMin); +} + +LogicVRegister Simulator::fmaxnmv(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src) { + return FMinMaxV(vform, dst, src, &Simulator::FPMaxNM); +} + +LogicVRegister Simulator::fminnmv(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src) { + return FMinMaxV(vform, dst, src, &Simulator::FPMinNM); +} + +LogicVRegister Simulator::fmul(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, int index) { + dst.ClearForWrite(vform); + SimVRegister temp; + if (LaneSizeInBytesFromFormat(vform) == kSRegSize) { + LogicVRegister index_reg = dup_element(kFormat4S, temp, src2, index); + fmul(vform, dst, src1, index_reg); + } else { + DCHECK_EQ(LaneSizeInBytesFromFormat(vform), kDRegSize); + LogicVRegister index_reg = dup_element(kFormat2D, temp, src2, index); + fmul(vform, dst, src1, index_reg); + } + return dst; +} + +LogicVRegister Simulator::fmla(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, int index) { + dst.ClearForWrite(vform); + SimVRegister temp; + if (LaneSizeInBytesFromFormat(vform) == kSRegSize) { + LogicVRegister index_reg = dup_element(kFormat4S, temp, src2, index); + fmla(vform, dst, src1, index_reg); + } else { + DCHECK_EQ(LaneSizeInBytesFromFormat(vform), kDRegSize); + LogicVRegister index_reg = dup_element(kFormat2D, temp, src2, index); + fmla(vform, dst, src1, index_reg); + } + return dst; +} + +LogicVRegister Simulator::fmls(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, int index) { + dst.ClearForWrite(vform); + SimVRegister temp; + if (LaneSizeInBytesFromFormat(vform) == kSRegSize) { + LogicVRegister index_reg = dup_element(kFormat4S, temp, src2, index); + fmls(vform, dst, src1, index_reg); + } else { + DCHECK_EQ(LaneSizeInBytesFromFormat(vform), kDRegSize); + LogicVRegister index_reg = dup_element(kFormat2D, temp, src2, index); + fmls(vform, dst, src1, index_reg); + } + return dst; +} + +LogicVRegister Simulator::fmulx(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, int index) { + dst.ClearForWrite(vform); + SimVRegister temp; + if (LaneSizeInBytesFromFormat(vform) == kSRegSize) { + LogicVRegister index_reg = dup_element(kFormat4S, temp, src2, index); + fmulx(vform, dst, src1, index_reg); + + } else { + DCHECK_EQ(LaneSizeInBytesFromFormat(vform), kDRegSize); + LogicVRegister index_reg = dup_element(kFormat2D, temp, src2, index); + fmulx(vform, dst, src1, index_reg); + } + return dst; +} + +LogicVRegister Simulator::frint(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src, + FPRounding rounding_mode, + bool inexact_exception) { + dst.ClearForWrite(vform); + if (LaneSizeInBytesFromFormat(vform) == kSRegSize) { + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + float input = src.Float(i); + float rounded = FPRoundInt(input, rounding_mode); + if (inexact_exception && !std::isnan(input) && (input != rounded)) { + FPProcessException(); + } + dst.SetFloat(i, rounded); + } + } else { + DCHECK_EQ(LaneSizeInBytesFromFormat(vform), kDRegSize); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + double input = src.Float(i); + double rounded = FPRoundInt(input, rounding_mode); + if (inexact_exception && !std::isnan(input) && (input != rounded)) { + FPProcessException(); + } + dst.SetFloat(i, rounded); + } + } + return dst; +} + +LogicVRegister Simulator::fcvts(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src, + FPRounding rounding_mode, int fbits) { + dst.ClearForWrite(vform); + if (LaneSizeInBytesFromFormat(vform) == kSRegSize) { + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + float op = src.Float(i) * std::pow(2.0f, fbits); + dst.SetInt(vform, i, FPToInt32(op, rounding_mode)); + } + } else { + DCHECK_EQ(LaneSizeInBytesFromFormat(vform), kDRegSize); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + double op = src.Float(i) * std::pow(2.0, fbits); + dst.SetInt(vform, i, FPToInt64(op, rounding_mode)); + } + } + return dst; +} + +LogicVRegister Simulator::fcvtu(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src, + FPRounding rounding_mode, int fbits) { + dst.ClearForWrite(vform); + if (LaneSizeInBytesFromFormat(vform) == kSRegSize) { + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + float op = src.Float(i) * std::pow(2.0f, fbits); + dst.SetUint(vform, i, FPToUInt32(op, rounding_mode)); + } + } else { + DCHECK_EQ(LaneSizeInBytesFromFormat(vform), kDRegSize); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + double op = src.Float(i) * std::pow(2.0, fbits); + dst.SetUint(vform, i, FPToUInt64(op, rounding_mode)); + } + } + return dst; +} + +LogicVRegister Simulator::fcvtl(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src) { + if (LaneSizeInBytesFromFormat(vform) == kSRegSize) { + for (int i = LaneCountFromFormat(vform) - 1; i >= 0; i--) { + dst.SetFloat(i, FPToFloat(src.Float(i))); + } + } else { + DCHECK_EQ(LaneSizeInBytesFromFormat(vform), kDRegSize); + for (int i = LaneCountFromFormat(vform) - 1; i >= 0; i--) { + dst.SetFloat(i, FPToDouble(src.Float(i))); + } + } + return dst; +} + +LogicVRegister Simulator::fcvtl2(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src) { + int lane_count = LaneCountFromFormat(vform); + if (LaneSizeInBytesFromFormat(vform) == kSRegSize) { + for (int i = 0; i < lane_count; i++) { + dst.SetFloat(i, FPToFloat(src.Float(i + lane_count))); + } + } else { + DCHECK_EQ(LaneSizeInBytesFromFormat(vform), kDRegSize); + for (int i = 0; i < lane_count; i++) { + dst.SetFloat(i, FPToDouble(src.Float(i + lane_count))); + } + } + return dst; +} + +LogicVRegister Simulator::fcvtn(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src) { + if (LaneSizeInBytesFromFormat(vform) == kHRegSize) { + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst.SetFloat(i, FPToFloat16(src.Float(i), FPTieEven)); + } + } else { + DCHECK_EQ(LaneSizeInBytesFromFormat(vform), kSRegSize); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst.SetFloat(i, FPToFloat(src.Float(i), FPTieEven)); + } + } + return dst; +} + +LogicVRegister Simulator::fcvtn2(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src) { + int lane_count = LaneCountFromFormat(vform) / 2; + if (LaneSizeInBytesFromFormat(vform) == kHRegSize) { + for (int i = lane_count - 1; i >= 0; i--) { + dst.SetFloat(i + lane_count, FPToFloat16(src.Float(i), FPTieEven)); + } + } else { + DCHECK_EQ(LaneSizeInBytesFromFormat(vform), kSRegSize); + for (int i = lane_count - 1; i >= 0; i--) { + dst.SetFloat(i + lane_count, FPToFloat(src.Float(i), FPTieEven)); + } + } + return dst; +} + +LogicVRegister Simulator::fcvtxn(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src) { + dst.ClearForWrite(vform); + DCHECK_EQ(LaneSizeInBytesFromFormat(vform), kSRegSize); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst.SetFloat(i, FPToFloat(src.Float(i), FPRoundOdd)); + } + return dst; +} + +LogicVRegister Simulator::fcvtxn2(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src) { + DCHECK_EQ(LaneSizeInBytesFromFormat(vform), kSRegSize); + int lane_count = LaneCountFromFormat(vform) / 2; + for (int i = lane_count - 1; i >= 0; i--) { + dst.SetFloat(i + lane_count, FPToFloat(src.Float(i), FPRoundOdd)); + } + return dst; +} + +// Based on reference C function recip_sqrt_estimate from ARM ARM. +double Simulator::recip_sqrt_estimate(double a) { + int q0, q1, s; + double r; + if (a < 0.5) { + q0 = static_cast(a * 512.0); + r = 1.0 / sqrt((static_cast(q0) + 0.5) / 512.0); + } else { + q1 = static_cast(a * 256.0); + r = 1.0 / sqrt((static_cast(q1) + 0.5) / 256.0); + } + s = static_cast(256.0 * r + 0.5); + return static_cast(s) / 256.0; +} + +namespace { + +inline uint64_t Bits(uint64_t val, int start_bit, int end_bit) { + return unsigned_bitextract_64(start_bit, end_bit, val); +} + +} // anonymous namespace + +template +T Simulator::FPRecipSqrtEstimate(T op) { + static_assert(std::is_same::value || std::is_same::value, + "T must be a float or double"); + + if (std::isnan(op)) { + return FPProcessNaN(op); + } else if (op == 0.0) { + if (copysign(1.0, op) < 0.0) { + return kFP64NegativeInfinity; + } else { + return kFP64PositiveInfinity; + } + } else if (copysign(1.0, op) < 0.0) { + FPProcessException(); + return FPDefaultNaN(); + } else if (std::isinf(op)) { + return 0.0; + } else { + uint64_t fraction; + int32_t exp, result_exp; + + if (sizeof(T) == sizeof(float)) { + exp = static_cast(float_exp(op)); + fraction = float_mantissa(op); + fraction <<= 29; + } else { + exp = static_cast(double_exp(op)); + fraction = double_mantissa(op); + } + + if (exp == 0) { + while (Bits(fraction, 51, 51) == 0) { + fraction = Bits(fraction, 50, 0) << 1; + exp -= 1; + } + fraction = Bits(fraction, 50, 0) << 1; + } + + double scaled; + if (Bits(exp, 0, 0) == 0) { + scaled = double_pack(0, 1022, Bits(fraction, 51, 44) << 44); + } else { + scaled = double_pack(0, 1021, Bits(fraction, 51, 44) << 44); + } + + if (sizeof(T) == sizeof(float)) { + result_exp = (380 - exp) / 2; + } else { + result_exp = (3068 - exp) / 2; + } + + uint64_t estimate = bit_cast(recip_sqrt_estimate(scaled)); + + if (sizeof(T) == sizeof(float)) { + uint32_t exp_bits = static_cast(Bits(result_exp, 7, 0)); + uint32_t est_bits = static_cast(Bits(estimate, 51, 29)); + return float_pack(0, exp_bits, est_bits); + } else { + return double_pack(0, Bits(result_exp, 10, 0), Bits(estimate, 51, 0)); + } + } +} + +LogicVRegister Simulator::frsqrte(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src) { + dst.ClearForWrite(vform); + if (LaneSizeInBytesFromFormat(vform) == kSRegSize) { + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + float input = src.Float(i); + dst.SetFloat(i, FPRecipSqrtEstimate(input)); + } + } else { + DCHECK_EQ(LaneSizeInBytesFromFormat(vform), kDRegSize); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + double input = src.Float(i); + dst.SetFloat(i, FPRecipSqrtEstimate(input)); + } + } + return dst; +} + +template +T Simulator::FPRecipEstimate(T op, FPRounding rounding) { + static_assert(std::is_same::value || std::is_same::value, + "T must be a float or double"); + uint32_t sign; + + if (sizeof(T) == sizeof(float)) { + sign = float_sign(op); + } else { + sign = double_sign(op); + } + + if (std::isnan(op)) { + return FPProcessNaN(op); + } else if (std::isinf(op)) { + return (sign == 1) ? -0.0 : 0.0; + } else if (op == 0.0) { + FPProcessException(); // FPExc_DivideByZero exception. + return (sign == 1) ? kFP64NegativeInfinity : kFP64PositiveInfinity; + } else if (((sizeof(T) == sizeof(float)) && + (std::fabs(op) < std::pow(2.0, -128.0))) || + ((sizeof(T) == sizeof(double)) && + (std::fabs(op) < std::pow(2.0, -1024.0)))) { + bool overflow_to_inf = false; + switch (rounding) { + case FPTieEven: + overflow_to_inf = true; + break; + case FPPositiveInfinity: + overflow_to_inf = (sign == 0); + break; + case FPNegativeInfinity: + overflow_to_inf = (sign == 1); + break; + case FPZero: + overflow_to_inf = false; + break; + default: + break; + } + FPProcessException(); // FPExc_Overflow and FPExc_Inexact. + if (overflow_to_inf) { + return (sign == 1) ? kFP64NegativeInfinity : kFP64PositiveInfinity; + } else { + // Return FPMaxNormal(sign). + if (sizeof(T) == sizeof(float)) { + return float_pack(sign, 0xfe, 0x07fffff); + } else { + return double_pack(sign, 0x7fe, 0x0fffffffffffffl); + } + } + } else { + uint64_t fraction; + int32_t exp, result_exp; + uint32_t sign; + + if (sizeof(T) == sizeof(float)) { + sign = float_sign(op); + exp = static_cast(float_exp(op)); + fraction = float_mantissa(op); + fraction <<= 29; + } else { + sign = double_sign(op); + exp = static_cast(double_exp(op)); + fraction = double_mantissa(op); + } + + if (exp == 0) { + if (Bits(fraction, 51, 51) == 0) { + exp -= 1; + fraction = Bits(fraction, 49, 0) << 2; + } else { + fraction = Bits(fraction, 50, 0) << 1; + } + } + + double scaled = double_pack(0, 1022, Bits(fraction, 51, 44) << 44); + + if (sizeof(T) == sizeof(float)) { + result_exp = 253 - exp; + } else { + result_exp = 2045 - exp; + } + + double estimate = recip_estimate(scaled); + + fraction = double_mantissa(estimate); + if (result_exp == 0) { + fraction = (UINT64_C(1) << 51) | Bits(fraction, 51, 1); + } else if (result_exp == -1) { + fraction = (UINT64_C(1) << 50) | Bits(fraction, 51, 2); + result_exp = 0; + } + if (sizeof(T) == sizeof(float)) { + uint32_t exp_bits = static_cast(Bits(result_exp, 7, 0)); + uint32_t frac_bits = static_cast(Bits(fraction, 51, 29)); + return float_pack(sign, exp_bits, frac_bits); + } else { + return double_pack(sign, Bits(result_exp, 10, 0), Bits(fraction, 51, 0)); + } + } +} + +LogicVRegister Simulator::frecpe(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src, FPRounding round) { + dst.ClearForWrite(vform); + if (LaneSizeInBytesFromFormat(vform) == kSRegSize) { + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + float input = src.Float(i); + dst.SetFloat(i, FPRecipEstimate(input, round)); + } + } else { + DCHECK_EQ(LaneSizeInBytesFromFormat(vform), kDRegSize); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + double input = src.Float(i); + dst.SetFloat(i, FPRecipEstimate(input, round)); + } + } + return dst; +} + +LogicVRegister Simulator::ursqrte(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src) { + dst.ClearForWrite(vform); + uint64_t operand; + uint32_t result; + double dp_operand, dp_result; + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + operand = src.Uint(vform, i); + if (operand <= 0x3FFFFFFF) { + result = 0xFFFFFFFF; + } else { + dp_operand = operand * std::pow(2.0, -32); + dp_result = recip_sqrt_estimate(dp_operand) * std::pow(2.0, 31); + result = static_cast(dp_result); + } + dst.SetUint(vform, i, result); + } + return dst; +} + +// Based on reference C function recip_estimate from ARM ARM. +double Simulator::recip_estimate(double a) { + int q, s; + double r; + q = static_cast(a * 512.0); + r = 1.0 / ((static_cast(q) + 0.5) / 512.0); + s = static_cast(256.0 * r + 0.5); + return static_cast(s) / 256.0; +} + +LogicVRegister Simulator::urecpe(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src) { + dst.ClearForWrite(vform); + uint64_t operand; + uint32_t result; + double dp_operand, dp_result; + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + operand = src.Uint(vform, i); + if (operand <= 0x7FFFFFFF) { + result = 0xFFFFFFFF; + } else { + dp_operand = operand * std::pow(2.0, -32); + dp_result = recip_estimate(dp_operand) * std::pow(2.0, 31); + result = static_cast(dp_result); + } + dst.SetUint(vform, i, result); + } + return dst; +} + +template +LogicVRegister Simulator::frecpx(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + T op = src.Float(i); + T result; + if (std::isnan(op)) { + result = FPProcessNaN(op); + } else { + int exp; + uint32_t sign; + if (sizeof(T) == sizeof(float)) { + sign = float_sign(op); + exp = static_cast(float_exp(op)); + exp = (exp == 0) ? (0xFF - 1) : static_cast(Bits(~exp, 7, 0)); + result = float_pack(sign, exp, 0); + } else { + sign = double_sign(op); + exp = static_cast(double_exp(op)); + exp = (exp == 0) ? (0x7FF - 1) : static_cast(Bits(~exp, 10, 0)); + result = double_pack(sign, exp, 0); + } + } + dst.SetFloat(i, result); + } + return dst; +} + +LogicVRegister Simulator::frecpx(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src) { + if (LaneSizeInBytesFromFormat(vform) == kSRegSize) { + frecpx(vform, dst, src); + } else { + DCHECK_EQ(LaneSizeInBytesFromFormat(vform), kDRegSize); + frecpx(vform, dst, src); + } + return dst; +} + +LogicVRegister Simulator::scvtf(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src, int fbits, + FPRounding round) { + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + if (LaneSizeInBytesFromFormat(vform) == kSRegSize) { + float result = FixedToFloat(src.Int(kFormatS, i), fbits, round); + dst.SetFloat(i, result); + } else { + DCHECK_EQ(LaneSizeInBytesFromFormat(vform), kDRegSize); + double result = FixedToDouble(src.Int(kFormatD, i), fbits, round); + dst.SetFloat(i, result); + } + } + return dst; +} + +LogicVRegister Simulator::ucvtf(VectorFormat vform, LogicVRegister dst, + const LogicVRegister& src, int fbits, + FPRounding round) { + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + if (LaneSizeInBytesFromFormat(vform) == kSRegSize) { + float result = UFixedToFloat(src.Uint(kFormatS, i), fbits, round); + dst.SetFloat(i, result); + } else { + DCHECK_EQ(LaneSizeInBytesFromFormat(vform), kDRegSize); + double result = UFixedToDouble(src.Uint(kFormatD, i), fbits, round); + dst.SetFloat(i, result); + } + } + return dst; +} + +#endif // USE_SIMULATOR + +} // namespace internal +} // namespace v8 + +#endif // V8_TARGET_ARCH_ARM64 diff --git a/deps/v8/src/arm64/utils-arm64.cc b/deps/v8/src/arm64/utils-arm64.cc index 1cd97854178851..38ec8478fc8f67 100644 --- a/deps/v8/src/arm64/utils-arm64.cc +++ b/deps/v8/src/arm64/utils-arm64.cc @@ -12,23 +12,78 @@ namespace internal { #define __ assm-> +uint32_t float_sign(float val) { + uint32_t bits = bit_cast(val); + return unsigned_bitextract_32(31, 31, bits); +} + +uint32_t float_exp(float val) { + uint32_t bits = bit_cast(val); + return unsigned_bitextract_32(30, 23, bits); +} + +uint32_t float_mantissa(float val) { + uint32_t bits = bit_cast(val); + return unsigned_bitextract_32(22, 0, bits); +} + +uint32_t double_sign(double val) { + uint64_t bits = bit_cast(val); + return static_cast(unsigned_bitextract_64(63, 63, bits)); +} + +uint32_t double_exp(double val) { + uint64_t bits = bit_cast(val); + return static_cast(unsigned_bitextract_64(62, 52, bits)); +} + +uint64_t double_mantissa(double val) { + uint64_t bits = bit_cast(val); + return unsigned_bitextract_64(51, 0, bits); +} + +float float_pack(uint32_t sign, uint32_t exp, uint32_t mantissa) { + uint32_t bits = sign << kFloatExponentBits | exp; + return bit_cast((bits << kFloatMantissaBits) | mantissa); +} + +double double_pack(uint64_t sign, uint64_t exp, uint64_t mantissa) { + uint64_t bits = sign << kDoubleExponentBits | exp; + return bit_cast((bits << kDoubleMantissaBits) | mantissa); +} + +int float16classify(float16 value) { + const uint16_t exponent_max = (1 << kFloat16ExponentBits) - 1; + const uint16_t exponent_mask = exponent_max << kFloat16MantissaBits; + const uint16_t mantissa_mask = (1 << kFloat16MantissaBits) - 1; + + const uint16_t exponent = (value & exponent_mask) >> kFloat16MantissaBits; + const uint16_t mantissa = value & mantissa_mask; + if (exponent == 0) { + if (mantissa == 0) { + return FP_ZERO; + } + return FP_SUBNORMAL; + } else if (exponent == exponent_max) { + if (mantissa == 0) { + return FP_INFINITE; + } + return FP_NAN; + } + return FP_NORMAL; +} int CountLeadingZeros(uint64_t value, int width) { - // TODO(jbramley): Optimize this for ARM64 hosts. - DCHECK((width == 32) || (width == 64)); - int count = 0; - uint64_t bit_test = 1UL << (width - 1); - while ((count < width) && ((bit_test & value) == 0)) { - count++; - bit_test >>= 1; + DCHECK(base::bits::IsPowerOfTwo(width) && (width <= 64)); + if (value == 0) { + return width; } - return count; + return base::bits::CountLeadingZeros64(value << (64 - width)); } int CountLeadingSignBits(int64_t value, int width) { - // TODO(jbramley): Optimize this for ARM64 hosts. - DCHECK((width == 32) || (width == 64)); + DCHECK(base::bits::IsPowerOfTwo(width) && (width <= 64)); if (value >= 0) { return CountLeadingZeros(value, width) - 1; } else { @@ -38,43 +93,32 @@ int CountLeadingSignBits(int64_t value, int width) { int CountTrailingZeros(uint64_t value, int width) { - // TODO(jbramley): Optimize this for ARM64 hosts. DCHECK((width == 32) || (width == 64)); - int count = 0; - while ((count < width) && (((value >> count) & 1) == 0)) { - count++; + if (width == 64) { + return static_cast(base::bits::CountTrailingZeros64(value)); } - return count; + return static_cast(base::bits::CountTrailingZeros32( + static_cast(value & 0xfffffffff))); } int CountSetBits(uint64_t value, int width) { - // TODO(jbramley): Would it be useful to allow other widths? The - // implementation already supports them. DCHECK((width == 32) || (width == 64)); + if (width == 64) { + return static_cast(base::bits::CountPopulation64(value)); + } + return static_cast(base::bits::CountPopulation32( + static_cast(value & 0xfffffffff))); +} - // Mask out unused bits to ensure that they are not counted. - value &= (0xffffffffffffffffUL >> (64-width)); - - // Add up the set bits. - // The algorithm works by adding pairs of bit fields together iteratively, - // where the size of each bit field doubles each time. - // An example for an 8-bit value: - // Bits: h g f e d c b a - // \ | \ | \ | \ | - // value = h+g f+e d+c b+a - // \ | \ | - // value = h+g+f+e d+c+b+a - // \ | - // value = h+g+f+e+d+c+b+a - value = ((value >> 1) & 0x5555555555555555) + (value & 0x5555555555555555); - value = ((value >> 2) & 0x3333333333333333) + (value & 0x3333333333333333); - value = ((value >> 4) & 0x0f0f0f0f0f0f0f0f) + (value & 0x0f0f0f0f0f0f0f0f); - value = ((value >> 8) & 0x00ff00ff00ff00ff) + (value & 0x00ff00ff00ff00ff); - value = ((value >> 16) & 0x0000ffff0000ffff) + (value & 0x0000ffff0000ffff); - value = ((value >> 32) & 0x00000000ffffffff) + (value & 0x00000000ffffffff); +int LowestSetBitPosition(uint64_t value) { + DCHECK_NE(value, 0U); + return CountTrailingZeros(value, 64) + 1; +} - return static_cast(value); +int HighestSetBitPosition(uint64_t value) { + DCHECK_NE(value, 0U); + return 63 - CountLeadingZeros(value, 64); } @@ -84,7 +128,7 @@ uint64_t LargestPowerOf2Divisor(uint64_t value) { int MaskToBit(uint64_t mask) { - DCHECK(CountSetBits(mask, 64) == 1); + DCHECK_EQ(CountSetBits(mask, 64), 1); return CountTrailingZeros(mask, 64); } diff --git a/deps/v8/src/arm64/utils-arm64.h b/deps/v8/src/arm64/utils-arm64.h index 35d982483750d6..920a84dbdfd24a 100644 --- a/deps/v8/src/arm64/utils-arm64.h +++ b/deps/v8/src/arm64/utils-arm64.h @@ -8,6 +8,7 @@ #include #include "src/arm64/constants-arm64.h" +#include "src/utils.h" namespace v8 { namespace internal { @@ -16,40 +17,26 @@ namespace internal { STATIC_ASSERT((static_cast(-1) >> 1) == -1); STATIC_ASSERT((static_cast(-1) >> 1) == 0x7FFFFFFF); -// Floating point representation. -static inline uint32_t float_to_rawbits(float value) { - uint32_t bits = 0; - memcpy(&bits, &value, 4); - return bits; -} - - -static inline uint64_t double_to_rawbits(double value) { - uint64_t bits = 0; - memcpy(&bits, &value, 8); - return bits; -} - - -static inline float rawbits_to_float(uint32_t bits) { - float value = 0.0; - memcpy(&value, &bits, 4); - return value; -} +uint32_t float_sign(float val); +uint32_t float_exp(float val); +uint32_t float_mantissa(float val); +uint32_t double_sign(double val); +uint32_t double_exp(double val); +uint64_t double_mantissa(double val); +float float_pack(uint32_t sign, uint32_t exp, uint32_t mantissa); +double double_pack(uint64_t sign, uint64_t exp, uint64_t mantissa); -static inline double rawbits_to_double(uint64_t bits) { - double value = 0.0; - memcpy(&value, &bits, 8); - return value; -} - +// An fpclassify() function for 16-bit half-precision floats. +int float16classify(float16 value); // Bit counting. int CountLeadingZeros(uint64_t value, int width); int CountLeadingSignBits(int64_t value, int width); int CountTrailingZeros(uint64_t value, int width); int CountSetBits(uint64_t value, int width); +int LowestSetBitPosition(uint64_t value); +int HighestSetBitPosition(uint64_t value); uint64_t LargestPowerOf2Divisor(uint64_t value); int MaskToBit(uint64_t mask); @@ -86,7 +73,7 @@ T ReverseBytes(T value, int block_bytes_log2) { // NaN tests. inline bool IsSignallingNaN(double num) { - uint64_t raw = double_to_rawbits(num); + uint64_t raw = bit_cast(num); if (std::isnan(num) && ((raw & kDQuietNanMask) == 0)) { return true; } @@ -95,13 +82,17 @@ inline bool IsSignallingNaN(double num) { inline bool IsSignallingNaN(float num) { - uint32_t raw = float_to_rawbits(num); + uint32_t raw = bit_cast(num); if (std::isnan(num) && ((raw & kSQuietNanMask) == 0)) { return true; } return false; } +inline bool IsSignallingNaN(float16 num) { + const uint16_t kFP16QuietNaNMask = 0x0200; + return (float16classify(num) == FP_NAN) && ((num & kFP16QuietNaNMask) == 0); +} template inline bool IsQuietNaN(T num) { @@ -112,13 +103,14 @@ inline bool IsQuietNaN(T num) { // Convert the NaN in 'num' to a quiet NaN. inline double ToQuietNaN(double num) { DCHECK(std::isnan(num)); - return rawbits_to_double(double_to_rawbits(num) | kDQuietNanMask); + return bit_cast(bit_cast(num) | kDQuietNanMask); } inline float ToQuietNaN(float num) { DCHECK(std::isnan(num)); - return rawbits_to_float(float_to_rawbits(num) | kSQuietNanMask); + return bit_cast(bit_cast(num) | + static_cast(kSQuietNanMask)); } diff --git a/deps/v8/src/asmjs/OWNERS b/deps/v8/src/asmjs/OWNERS index 4f54661aeb02b1..e40f5b57f35f8b 100644 --- a/deps/v8/src/asmjs/OWNERS +++ b/deps/v8/src/asmjs/OWNERS @@ -6,3 +6,5 @@ clemensh@chromium.org mtrofin@chromium.org rossberg@chromium.org titzer@chromium.org + +# COMPONENT: Blink>JavaScript>WebAssembly diff --git a/deps/v8/src/asmjs/asm-js.cc b/deps/v8/src/asmjs/asm-js.cc index 516bce25433175..fb257e316ea5b3 100644 --- a/deps/v8/src/asmjs/asm-js.cc +++ b/deps/v8/src/asmjs/asm-js.cc @@ -4,8 +4,6 @@ #include "src/asmjs/asm-js.h" -#include "src/api-natives.h" -#include "src/api.h" #include "src/asmjs/asm-names.h" #include "src/asmjs/asm-parser.h" #include "src/assert-scope.h" @@ -17,7 +15,8 @@ #include "src/handles.h" #include "src/isolate.h" #include "src/objects-inl.h" -#include "src/objects.h" +#include "src/parsing/scanner-character-streams.h" +#include "src/parsing/scanner.h" #include "src/wasm/module-decoder.h" #include "src/wasm/wasm-js.h" @@ -54,12 +53,12 @@ bool IsStdlibMemberValid(Isolate* isolate, Handle stdlib, bool* is_typed_array) { switch (member) { case wasm::AsmJsParser::StandardMember::kInfinity: { - Handle name = isolate->factory()->infinity_string(); + Handle name = isolate->factory()->Infinity_string(); Handle value = JSReceiver::GetDataProperty(stdlib, name); return value->IsNumber() && std::isinf(value->Number()); } case wasm::AsmJsParser::StandardMember::kNaN: { - Handle name = isolate->factory()->nan_string(); + Handle name = isolate->factory()->NaN_string(); Handle value = JSReceiver::GetDataProperty(stdlib, name); return value->IsNaN(); } @@ -105,7 +104,6 @@ bool IsStdlibMemberValid(Isolate* isolate, Handle stdlib, #undef STDLIB_ARRAY_TYPE } UNREACHABLE(); - return false; } void Report(Handle