diff --git a/.gitmodules b/.gitmodules index 5fb940856d..ebb566b165 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,4 +1,8 @@ [submodule "bridge/third_party/quickjs/vendor/mimalloc"] path = bridge/third_party/quickjs/vendor/mimalloc url = https://github.com/microsoft/mimalloc - tag = v1.7.9 \ No newline at end of file + tag = v1.7.9 +[submodule "bridge/third_party/v8"] + path = bridge/third_party/v8 + url = https://github.com/openwebf/v8-release.git + branch = feat/macos_x86_64 diff --git a/bridge/CMakeLists.txt b/bridge/CMakeLists.txt index 85d9171f2f..43cd1af013 100644 --- a/bridge/CMakeLists.txt +++ b/bridge/CMakeLists.txt @@ -1,13 +1,13 @@ cmake_minimum_required(VERSION 3.10.0) -set(CMAKE_OSX_DEPLOYMENT_TARGET 10.11) +set(CMAKE_OSX_DEPLOYMENT_TARGET 13.3) project(WebF) -set(CMAKE_OSX_DEPLOYMENT_TARGET 10.11) +set(CMAKE_OSX_DEPLOYMENT_TARGET 13.3) # MSBuild are slower than Clang/GCC to support new CXX standards. if(MSVC) set(CMAKE_CXX_STANDARD 20) else() - set(CMAKE_CXX_STANDARD 17) + set(CMAKE_CXX_STANDARD 20) endif() set(CMAKE_CXX_STANDARD_REQUIRED ON) set(CMAKE_EXPORT_COMPILE_COMMANDS ON) @@ -18,7 +18,13 @@ if(MSVC) endif() if (${CMAKE_SYSTEM_NAME} MATCHES "Darwin") - set(CMAKE_OSX_ARCHITECTURES "x86_64;arm64") + if(CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64") + set(CMAKE_OSX_ARCHITECTURES "x86_64") + elseif(CMAKE_SYSTEM_PROCESSOR STREQUAL "arm64") + set(CMAKE_OSX_ARCHITECTURES "arm64") + else() + message(FATAL_ERROR "Unsupported architecture: ${CMAKE_SYSTEM_PROCESSOR}") + endif() endif() if(WIN32) @@ -59,18 +65,6 @@ list(APPEND WEBF_PUBLIC_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/include/webf_bridge.h ) -set(QUICKJS_PUBLIC_HEADERS - third_party/quickjs/cutils.h - third_party/quickjs/libregexp.h - third_party/quickjs/libregexp-opcode.h - third_party/quickjs/libunicode.h - third_party/quickjs/libunicode-table.h - third_party/quickjs/list.h - third_party/quickjs/quickjs.h - third_party/quickjs/quickjs-atom.h - third_party/quickjs/quickjs-opcode.h -) - if (${CMAKE_SYSTEM_NAME} STREQUAL "Linux") add_compile_options(-fPIC) endif() @@ -86,7 +80,7 @@ if (${CMAKE_BUILD_TYPE} STREQUAL "Debug") set(CMAKE_C_FLAGS_RELEASE "/O1") else() # Avoid quickjs stackoverflow. - add_compile_options(-O1) +# add_compile_options(-O1) endif() endif() @@ -99,19 +93,19 @@ endif() list(APPEND BRIDGE_SOURCE foundation/logging.cc foundation/native_string.cc - foundation/ui_task_queue.cc +# foundation/ui_task_queue.cc foundation/shared_ui_command.cc - foundation/inspector_task_queue.cc - foundation/task_queue.cc +# foundation/inspector_task_queue.cc +# foundation/task_queue.cc foundation/string_view.cc - foundation/native_value.cc - foundation/native_type.cc - foundation/stop_watch.cc - foundation/profiler.cc +# foundation/native_value.cc +# foundation/native_type.cc +# foundation/stop_watch.cc +# foundation/profiler.cc foundation/dart_readable.cc foundation/ui_command_buffer.cc foundation/ui_command_strategy.cc - polyfill/dist/polyfill.cc +# polyfill/dist/polyfill.cc multiple_threading/dispatcher.cc multiple_threading/looper.cc ${CMAKE_CURRENT_LIST_DIR}/third_party/dart/include/dart_api_dl.c @@ -143,6 +137,18 @@ list(APPEND BRIDGE_INCLUDE ) if ($ENV{WEBF_JS_ENGINE} MATCHES "quickjs") + set(QUICKJS_PUBLIC_HEADERS + third_party/quickjs/cutils.h + third_party/quickjs/libregexp.h + third_party/quickjs/libregexp-opcode.h + third_party/quickjs/libunicode.h + third_party/quickjs/libunicode-table.h + third_party/quickjs/list.h + third_party/quickjs/quickjs.h + third_party/quickjs/quickjs-atom.h + third_party/quickjs/quickjs-opcode.h + ) + add_compile_options(-DWEBF_QUICK_JS_ENGINE=1) execute_process( @@ -238,7 +244,6 @@ if ($ENV{WEBF_JS_ENGINE} MATCHES "quickjs") list(APPEND BRIDGE_INCLUDE ${CMAKE_CURRENT_SOURCE_DIR}/third_party) list(APPEND BRIDGE_INCLUDE ${CMAKE_CURRENT_SOURCE_DIR}/third_party/modp_b64/include) list(APPEND BRIDGE_LINK_LIBS quickjs) - list(APPEND BRIDGE_LINK_LIBS modb) list(APPEND BRIDGE_SOURCE # Binding files @@ -554,7 +559,7 @@ if ($ENV{WEBF_JS_ENGINE} MATCHES "quickjs") out/qjs_svg_ellipse_element.cc out/qjs_svg_style_element.cc out/qjs_svg_line_element.cc - ) + ) if (NOT MSVC) @@ -564,7 +569,265 @@ if ($ENV{WEBF_JS_ENGINE} MATCHES "quickjs") endif() target_compile_options(quickjs PUBLIC -DCONFIG_VERSION=${\"QUICKJS_VERSION\"}) -endif () +elseif ($ENV{WEBF_JS_ENGINE} MATCHES "v8") + + add_compile_options(-DWEBF_V8_JS_ENGINE=1) + + list(APPEND BRIDGE_SOURCE + # Binding files + bindings/v8/atomic_string.cc + bindings/v8/native_string_utils.cc + bindings/v8/script_value.cc + bindings/v8/wrapper_type_info.cc + bindings/v8/v8_interface_bridge_base.cc + bindings/v8/v8_initializer.cc + bindings/v8/v8_member_installer.cc + bindings/v8/v8_binding_initializer.cc + bindings/v8/generated_code_helper.h + bindings/v8/exception_state.cc + bindings/v8/v8_throw_exception.cc + bindings/v8/platform/scoped_persistent.h + bindings/v8/platform/v8_per_context_data.cc + bindings/v8/platform/v8_per_isolate_data.cc + bindings/v8/platform/heap/garbage_collected.h + bindings/v8/platform/heap/member.h + bindings/v8/platform/heap/self_keep_alive.h + bindings/v8/platform/heap/persistent.h + bindings/v8/platform/heap/thread_state_storage.cc + bindings/v8/platform/heap/write_barrier.h + bindings/v8/platform/heap/custom_spaces.cc + bindings/v8/platform/util/gc_plugin.h + bindings/v8/platform/util/main_thread_util.cc + bindings/v8/union_base.cc + bindings/v8/name_client.cc + bindings/v8/dictionary_base.cc + bindings/v8/trace_wrapper_v8_reference.cc + bindings/v8/for_build/build_config.h + bindings/v8/for_build/buildflag.h + bindings/v8/platform/platform_export.h + bindings/v8/platform/wtf/dtoa.cc + bindings/v8/platform/wtf/stack_util.cc + bindings/v8/platform/wtf/atomic_operations.cc + bindings/v8/platform/wtf/type_traits.cc + bindings/v8/platform/wtf/vector_traits.cc + bindings/v8/platform/wtf/hash_traits.h + bindings/v8/platform/wtf/hash_functions.h + bindings/v8/platform/wtf/hash_table_deleted_value_type.h + bindings/v8/platform/wtf/webf_size_t.h + bindings/v8/platform/wtf/conditional_destructor.h + bindings/v8/platform/wtf/construct_traits.h + bindings/v8/platform/wtf/sanitizers.h + bindings/v8/base/apple/scoped_cftyperef.h + bindings/v8/base/apple/scoped_typeref.h + bindings/v8/base/memory/scoped_policy.h + bindings/v8/base/threading/platform_thread.cc + bindings/v8/base/threading/thread_local_storage.cc + bindings/v8/base/containers/checked_iterators.h + bindings/v8/base/containers/dynamic_extent.h + bindings/v8/base/containers/flat_map.h + bindings/v8/base/containers/flat_set.h + bindings/v8/base/containers/flat_tree.h + bindings/v8/base/containers/util.h + bindings/v8/base/numerics/clamped_math.h + bindings/v8/base/numerics/clamped_math_impl.h + bindings/v8/base/ranges/algorithm.h + bindings/v8/base/ranges/functional.h + bindings/v8/base/ranges/ranges.h + bindings/v8/base/time/time.cc + bindings/v8/base/types/to_address.h + bindings/v8/base/types/supports_ostream_operator.h + bindings/v8/base/base_export.h + bindings/v8/base/notreached.h + bindings/v8/base/logging_buildflags.h + bindings/v8/base/is_empty.h + bindings/v8/base/check.h + bindings/v8/base/check_op.cc + bindings/v8/base/dcheck_is_on.h + bindings/v8/base/template_util.h + bindings/v8/base/thread_annotations.h + bindings/v8/base/location.cc + bindings/v8/base/not_fatal_until.h + bindings/v8/base/numerics/checked_math.h + bindings/v8/base/numerics/safe_conversions.h + bindings/v8/base/numerics/safe_conversions_impl.h + bindings/v8/base/numerics/safe_conversions_arm_impl.h + bindings/v8/base/numerics/safe_math_shared_impl.h + bindings/v8/base/numerics/safe_math_clang_gcc_impl.h + bindings/v8/base/memory/raw_ptr.h + bindings/v8/base/memory/scoped_refptr.h + bindings/v8/base/memory/stack_allocated.h + bindings/v8/base/compiler_specific.h + bindings/v8/base/bit_cast.h + bindings/v8/base/bits.h + bindings/v8/gin/public/context_holder.h + bindings/v8/gin/public/isolate_holder.h + bindings/v8/gin/public/gin_embedders.h + bindings/v8/gin/public/v8_idle_task_runner.h + bindings/v8/gin/public/wrapper_info.h + bindings/v8/gin/gin_export.h + bindings/v8/gin/per_context_data.cc + bindings/v8/gin/context_holder.cc + + ) + + # Gen sources. + list(APPEND BRIDGE_SOURCE + out/built_in_string.cc + out/v8_window_or_worker_global_scope.cc + ) + if(${CMAKE_SYSTEM_NAME} MATCHES "Darwin") + add_library(v8 SHARED IMPORTED) + set_target_properties(v8 PROPERTIES IMPORTED_LOCATION "${CMAKE_CURRENT_SOURCE_DIR}/third_party/v8/lib/macos/${CMAKE_OSX_ARCHITECTURES}/libv8.dylib") + add_library(v8_platform SHARED IMPORTED) + set_target_properties(v8_platform PROPERTIES IMPORTED_LOCATION "${CMAKE_CURRENT_SOURCE_DIR}/third_party/v8/lib/macos/${CMAKE_OSX_ARCHITECTURES}/libv8_libplatform.dylib") + list(APPEND BRIDGE_LINK_LIBS v8 v8_platform) + endif() + list(APPEND BRIDGE_INCLUDE ${CMAKE_CURRENT_SOURCE_DIR}/third_party/v8/include/macos/${CMAKE_OSX_ARCHITECTURES}) + list(APPEND BRIDGE_INCLUDE ${CMAKE_CURRENT_SOURCE_DIR}/third_party/v8/include/macos/${CMAKE_OSX_ARCHITECTURES}/v8) + list(APPEND BRIDGE_INCLUDE ${CMAKE_CURRENT_SOURCE_DIR}/third_party/v8/include/macos/${CMAKE_OSX_ARCHITECTURES}/v8/cppgc) + list(APPEND BRIDGE_INCLUDE ${CMAKE_CURRENT_SOURCE_DIR}/third_party/v8/include/macos/${CMAKE_OSX_ARCHITECTURES}/v8/libplatform) +endif() + +#list(APPEND BRIDGE_LINK_LIBS modb) + +list(APPEND BRIDGE_SOURCE + # Core sources + webf_bridge.cc + core/api/api.cc + core/executing_context.cc +# core/script_forbidden_scope.cc + core/script_state.cc + core/page.cc + core/dart_methods.cc + core/dart_isolate_context.cc +# core/dart_context_data.cc +# core/executing_context_data.cc +# core/fileapi/blob.cc +# core/fileapi/blob_part.cc +# core/fileapi/blob_property_bag.cc +# core/frame/console.cc + core/frame/dom_timer.cc + core/frame/dom_timer_coordinator.cc + core/frame/window_or_worker_global_scope.cc +# core/frame/module_listener.cc +# core/frame/module_listener_container.cc +# core/frame/module_manager.cc +# core/frame/module_callback.cc +# core/frame/module_context_coordinator.cc +# core/frame/window.cc +# core/frame/screen.cc +# core/frame/legacy/location.cc +# core/timing/performance.cc +# core/timing/performance_mark.cc +# core/timing/performance_entry.cc +# core/timing/performance_measure.cc +# core/css/css_style_declaration.cc +# core/css/inline_css_style_declaration.cc +# core/css/computed_css_style_declaration.cc +# core/dom/frame_request_callback_collection.cc +# core/dom/events/registered_eventListener.cc +# core/dom/events/event_listener_map.cc +# core/dom/events/event.cc +# core/dom/events/custom_event.cc +# core/dom/events/event_target.cc +# core/dom/events/event_listener_map.cc +# core/dom/events/event_target_impl.cc +# core/binding_object.cc +# core/dom/node.cc +# core/dom/node_list.cc +# core/dom/static_node_list.cc +# core/dom/node_traversal.cc +# core/dom/live_node_list_base.cc +# core/dom/character_data.cc +# core/dom/comment.cc +# core/dom/text.cc +# core/dom/tree_scope.cc +# core/dom/element.cc +# core/dom/parent_node.cc +# core/dom/element_data.cc +# core/dom/document.cc +# core/dom/dom_token_list.cc +# core/dom/dom_string_map.cc +# core/dom/space_split_string.cc +# core/dom/scripted_animation_controller.cc +# core/dom/node_data.cc +# core/dom/document_fragment.cc +# core/dom/child_node_list.cc +# core/dom/empty_node_list.cc +# core/dom/mutation_observer.cc +# core/dom/mutation_observer_registration.cc +# core/dom/mutation_observer_interest_group.cc +# core/dom/mutation_record.cc +# core/dom/child_list_mutation_scope.cc +# core/dom/container_node.cc +# core/html/custom/widget_element.cc +# core/events/error_event.cc +# core/events/message_event.cc +# core/events/animation_event.cc +# core/events/close_event.cc +# core/events/ui_event.cc +# core/events/focus_event.cc +# core/events/gesture_event.cc +# core/events/input_event.cc +# core/events/touch_event.cc +# core/events/mouse_event.cc +# core/events/pop_state_event.cc +# core/events/pointer_event.cc +# core/events/transition_event.cc +# core/events/intersection_change_event.cc +# core/events/keyboard_event.cc +# core/events/promise_rejection_event.cc +# core/html/parser/html_parser.cc +# core/html/html_element.cc +# core/html/html_div_element.cc +# core/html/html_head_element.cc +# core/html/html_body_element.cc +# core/html/html_html_element.cc +# core/html/html_template_element.cc +# core/html/html_all_collection.cc +# core/html/html_anchor_element.cc +# core/html/html_image_element.cc +# core/html/html_script_element.cc +# core/html/html_iframe_element.cc +# core/html/html_link_element.cc +# core/html/html_unknown_element.cc +# core/html/image.cc +# core/html/html_collection.cc +# core/html/canvas/html_canvas_element.cc +# core/html/canvas/canvas_rendering_context.cc +# core/html/canvas/canvas_rendering_context_2d.cc +# core/html/canvas/canvas_gradient.cc +# core/html/canvas/canvas_pattern.cc +# core/geometry/dom_matrix.cc +# core/geometry/dom_matrix_readonly.cc +# core/html/forms/html_button_element.cc +# core/html/forms/html_input_element.cc +# core/html/forms/html_form_element.cc +# core/html/forms/html_textarea_element.cc + + # SVG files +# core/svg/svg_element.cc +# core/svg/svg_graphics_element.cc +# core/svg/svg_geometry_element.cc +# core/svg/svg_text_content_element.cc +# core/svg/svg_text_positioning_element.cc +# +# core/svg/svg_svg_element.cc +# core/svg/svg_path_element.cc +# core/svg/svg_rect_element.cc +# core/svg/svg_text_element.cc +# core/svg/svg_g_element.cc +# core/svg/svg_circle_element.cc +# core/svg/svg_ellipse_element.cc +# core/svg/svg_style_element.cc +# core/svg/svg_line_element.cc + + # Legacy implements, should remove them in the future. +# core/dom/legacy/element_attributes.cc +# core/dom/legacy/bounding_client_rect.cc +# core/input/touch.cc +# core/input/touch_list.cc +) list(APPEND PUBLIC_HEADER include/webf_bridge.h @@ -593,6 +856,10 @@ if (${IS_ANDROID}) list(APPEND BRIDGE_LINK_LIBS ${log-lib}) elseif(${IS_IOS}) add_definitions(-DIS_IOS=1) +elseif(${CMAKE_SYSTEM_NAME} MATCHES "Darwin") + if(CMAKE_OSX_ARCHITECTURES MATCHES "x86_64") + target_compile_definitions(webf PRIVATE V8_COMPRESS_POINTERS=1 V8_ENABLE_SANDBOX=1) + endif() endif() diff --git a/bridge/bindings/qjs/native_string_utils.h b/bridge/bindings/qjs/native_string_utils.h index b9e83a1ff7..b657909532 100644 --- a/bridge/bindings/qjs/native_string_utils.h +++ b/bridge/bindings/qjs/native_string_utils.h @@ -6,7 +6,9 @@ #ifndef BRIDGE_NATIVE_STRING_UTILS_H #define BRIDGE_NATIVE_STRING_UTILS_H +#if WEBF_QUICKJS_JS_ENGINE #include +#endif #include #include #include diff --git a/bridge/bindings/v8/atomic_string.cc b/bridge/bindings/v8/atomic_string.cc new file mode 100644 index 0000000000..61a1f248de --- /dev/null +++ b/bridge/bindings/v8/atomic_string.cc @@ -0,0 +1,317 @@ +/* +* Copyright (C) 2019-2022 The Kraken authors. All rights reserved. +* Copyright (C) 2022-present The WebF authors. All rights reserved. +*/ + +#include "atomic_string.h" +#include +#include +#include "built_in_string.h" +#include "foundation/native_string.h" +#include "native_string_utils.h" + +namespace webf { + +AtomicString AtomicString::Empty() { + return built_in_string::kempty_string; +} + +AtomicString AtomicString::Null() { + return built_in_string::kNULL; +} + +namespace { + +AtomicString::StringKind GetStringKind(const std::string& string, size_t length) { + char first_char = string[0]; + + if (first_char < 0 || first_char > 255) { + return AtomicString::StringKind::kUnknown; + } + + AtomicString::StringKind predictKind = + std::islower(string[0]) ? AtomicString::StringKind::kIsLowerCase : AtomicString::StringKind::kIsUpperCase; + for (int i = 0; i < length; i++) { + char c = string[i]; + + if (c < 0 || c > 255) { + return AtomicString::StringKind::kUnknown; + } + + if (predictKind == AtomicString::StringKind::kIsUpperCase && !std::isupper(c)) { + return AtomicString::StringKind::kIsMixed; + } else if (predictKind == AtomicString::StringKind::kIsLowerCase && !std::islower(c)) { + return AtomicString::StringKind::kIsMixed; + } + } + return predictKind; +} + +AtomicString::StringKind GetStringKind(const SharedNativeString* native_string) { + if (!native_string->length()) { + return AtomicString::StringKind::kIsMixed; + } + + AtomicString::StringKind predictKind = std::islower(native_string->string()[0]) + ? AtomicString::StringKind::kIsLowerCase + : AtomicString::StringKind::kIsUpperCase; + for (int i = 0; i < native_string->length(); i++) { + uint16_t c = native_string->string()[i]; + if (predictKind == AtomicString::StringKind::kIsUpperCase && !std::isupper(c)) { + return AtomicString::StringKind::kIsMixed; + } else if (predictKind == AtomicString::StringKind::kIsLowerCase && !std::islower(c)) { + return AtomicString::StringKind::kIsMixed; + } + } + + return predictKind; +} + +} // namespace + +class AtomicStringOneByteResource : public v8::String::ExternalOneByteStringResource { + public: + AtomicStringOneByteResource(const std::string& string) : string_(string){}; + + const char* data() const override { return string_.data(); }; + size_t length() const override { return string_.length(); }; + + private: + std::string string_; +}; + +class AtomicStringTwoByteResource : public v8::String::ExternalStringResource { + public: + AtomicStringTwoByteResource(std::unique_ptr&& native_string) + : string_(std::move(native_string)) {} + + const uint16_t* data() const override { return string_->string(); } + size_t length() const override { return string_->length(); } + + private: + std::unique_ptr string_; +}; + +AtomicString::AtomicString(v8::Isolate* isolate, const std::string& string) + : kind_(GetStringKind(string, string.size())), isolate_(isolate) { + auto* external_string_resource = new AtomicStringOneByteResource(string); + string_ = v8::String::NewExternalOneByte(isolate, external_string_resource).ToLocalChecked(); +} + +AtomicString::AtomicString(v8::Isolate* isolate, const char* str, size_t length) + : kind_(GetStringKind(str, length)), isolate_(isolate) { + auto* external_string_resource = new AtomicStringOneByteResource(std::string(str, length)); + string_ = v8::String::NewExternalOneByte(isolate, external_string_resource).ToLocalChecked(); +} + +AtomicString::AtomicString(v8::Isolate* isolate, std::unique_ptr&& native_string) + : isolate_(isolate) { + kind_ = GetStringKind(native_string.get()); + auto* external_resource = new AtomicStringTwoByteResource(std::move(native_string)); + string_ = v8::String::NewExternalTwoByte(isolate, external_resource).ToLocalChecked(); +} + +AtomicString::AtomicString(v8::Isolate* isolate, const uint16_t* str, size_t length) : isolate_(isolate) { + auto native_string = std::unique_ptr( + reinterpret_cast(new SharedNativeString(str, length))); + kind_ = GetStringKind(native_string.get()); + auto* external_resource = new AtomicStringTwoByteResource(std::move(native_string)); + string_ = v8::String::NewExternalTwoByte(isolate, external_resource).ToLocalChecked(); +} + +AtomicString::AtomicString(v8::Local context, v8::Local v8_value) { + auto&& raw_native_string = jsValueToNativeString(context, v8_value); + auto native_string = + std::unique_ptr(reinterpret_cast(raw_native_string.release())); + kind_ = GetStringKind(native_string.get()); + auto* external_resource = new AtomicStringTwoByteResource(std::move(native_string)); + string_ = v8::String::NewExternalTwoByte(context->GetIsolate(), external_resource).ToLocalChecked(); +} + +bool AtomicString::IsEmpty() const { + return *this == built_in_string::kempty_string || IsNull(); +} + +bool AtomicString::IsNull() const { + return string_->IsNull(); +} + +bool AtomicString::Is8Bit() const { + return string_->IsExternalOneByte(); +} + +const uint8_t* AtomicString::Character8() const { + assert(string_->IsExternal()); + return reinterpret_cast(string_->GetExternalOneByteStringResource()->data()); +} + +const uint16_t* AtomicString::Character16() const { + assert(string_->IsExternal()); + return string_->GetExternalStringResource()->data(); +} + +int AtomicString::Find(bool (*CharacterMatchFunction)(char)) const { + // return JS_FindCharacterInAtom(runtime_, atom_, CharacterMatchFunction); +} + +int AtomicString::Find(bool (*CharacterMatchFunction)(uint16_t)) const { + // return JS_FindWCharacterInAtom(runtime_, atom_, CharacterMatchFunction); +} + +std::string AtomicString::ToStdString(v8::Isolate* isolate) const { + if (IsEmpty()) + return ""; + + if (string_->IsExternalOneByte()) { + return {string_->GetExternalOneByteStringResource()->data()}; + } + + std::string result; + size_t length = string_->Utf8Length(isolate); + result.reserve(length); + + string_->WriteUtf8(isolate, result.data()); + return result; +} + +std::unique_ptr AtomicString::ToNativeString(v8::Isolate* isolate) const { + if (IsNull()) { + // Null string is same like empty string + return built_in_string::kempty_string.ToNativeString(isolate); + } + + if (string_->IsExternalTwoByte()) { + auto* resource = string_->GetExternalStringResource(); + return SharedNativeString::FromTemporaryString(resource->data(), resource->length()); + } + + size_t length = string_->Length(); + std::vector buffer; + buffer.reserve(length); + string_->Write(isolate, buffer.data()); + return SharedNativeString::FromTemporaryString(buffer.data(), length); +} + +StringView AtomicString::ToStringView() const { + if (IsNull()) { + return built_in_string::kempty_string.ToStringView(); + } + + if (string_->IsExternalOneByte()) { + auto* resource = string_->GetExternalOneByteStringResource(); + return StringView((void*)(resource->data()), resource->length(), false); + } + + auto* resource = string_->GetExternalStringResource(); + + return StringView((void*)(resource->data()), resource->length(), true); +} + +AtomicString AtomicString::ToUpperIfNecessary(v8::Isolate* isolate) const { + if (kind_ == StringKind::kIsUpperCase) { + return *this; + } + if (!string_upper_->IsNull() || IsNull()) + return *this; + AtomicString upperString = ToUpperSlow(isolate); + string_upper_ = v8::Local(upperString.string_); + return upperString; +} + +AtomicString AtomicString::ToUpperSlow(v8::Isolate* isolate) const { + std::string str = ToStdString(isolate); + std::transform(str.begin(), str.end(), str.begin(), toupper); + return {isolate, str}; +} + +AtomicString AtomicString::ToLowerIfNecessary(v8::Isolate* isolate) const { + if (kind_ == StringKind::kIsLowerCase) { + return *this; + } + if (!string_lower_->IsNull() || IsNull()) + return *this; + AtomicString lowerString = ToLowerSlow(isolate); + string_lower_ = lowerString.string_; + return lowerString; +} + +AtomicString AtomicString::ToLowerSlow(v8::Isolate* isolate) const { + std::string str = ToStdString(isolate); + std::transform(str.begin(), str.end(), str.begin(), tolower); + return {isolate, str}; +} + +template +inline AtomicString RemoveCharactersInternal(v8::Isolate* isolate, + const AtomicString& self, + const CharType* characters, + size_t len, + CharacterMatchFunctionPtr find_match) { + const CharType* from = characters; + const CharType* fromend = from + len; + + // Assume the common case will not remove any characters + while (from != fromend && !find_match(*from)) + ++from; + if (from == fromend) + return self; + + auto* to = (CharType*)malloc(len); + size_t outc = static_cast(from - characters); + + if (outc) + memcpy(to, characters, outc * sizeof(CharType)); + + while (true) { + while (from != fromend && find_match(*from)) + ++from; + while (from != fromend && !find_match(*from)) + to[outc++] = *from++; + if (from == fromend) + break; + } + + AtomicString str; + + if (outc == 0) { + return AtomicString::Empty(); + } + + auto data = (CharType*)malloc(outc); + memcpy(data, to, outc); + free(to); + if (self.Is8Bit()) { + str = AtomicString(isolate, reinterpret_cast(data), outc); + } else { + str = AtomicString(isolate, reinterpret_cast(data), outc); + } + + free(data); + return str; +} + +AtomicString AtomicString::RemoveCharacters(v8::Isolate* isolate, CharacterMatchFunctionPtr find_match) { + if (IsEmpty()) + return AtomicString::Empty(); + if (Is8Bit()) + return RemoveCharactersInternal(isolate, *this, Character8(), string_->Utf8Length(isolate), find_match); + return RemoveCharactersInternal(isolate, *this, Character16(), string_->Length(), find_match); +} + +AtomicString::AtomicString(const webf::AtomicString& value) { + string_ = v8::Local(value.string_); +} +AtomicString& AtomicString::operator=(const webf::AtomicString& other) noexcept { + string_ = other.string_; + return *this; +} + +AtomicString::AtomicString(webf::AtomicString&& value) noexcept { + string_ = v8::Local(value.string_); +} +AtomicString& AtomicString::operator=(webf::AtomicString&& value) noexcept { + string_ = v8::Local(value.string_); + return *this; +} + +} // namespace webf diff --git a/bridge/bindings/v8/atomic_string.h b/bridge/bindings/v8/atomic_string.h new file mode 100644 index 0000000000..82b849571b --- /dev/null +++ b/bridge/bindings/v8/atomic_string.h @@ -0,0 +1,120 @@ +/* +* Copyright (C) 2019-2022 The Kraken authors. All rights reserved. +* Copyright (C) 2022-present The WebF authors. All rights reserved. +*/ + +#ifndef BRIDGE_BINDINGS_V8_ATOMIC_STRING_H_ +#define BRIDGE_BINDINGS_V8_ATOMIC_STRING_H_ + +#include +#include +#include +#include +#include "foundation/macros.h" +#include "foundation/native_string.h" +#include "foundation/string_view.h" + +namespace webf { + +typedef bool (*CharacterMatchFunctionPtr)(char); + +// An AtomicString instance represents a string, and multiple AtomicString +// instances can share their string storage if the strings are +// identical. Comparing two AtomicString instances is much faster than comparing +// two String instances because we just check string storage identity. +class AtomicString { + WEBF_DISALLOW_NEW(); + + public: + enum class StringKind { kIsLowerCase, kIsUpperCase, kIsMixed, kUnknown }; + + struct KeyHasher { + std::size_t operator()(const AtomicString& k) const { return k.string_->GetIdentityHash(); } + }; + + static AtomicString Empty(); + static AtomicString Null(); + + AtomicString() = default; + AtomicString(v8::Isolate* isolate, const std::string& string); + AtomicString(v8::Isolate* isolate, const char* str, size_t length); + AtomicString(v8::Isolate* isolate, std::unique_ptr&& native_string); + AtomicString(v8::Isolate* isolate, const uint16_t* str, size_t length); + AtomicString(v8::Local context, v8::Local v8_value); + + // Return the undefined string value from atom key. + v8::Local ToV8(v8::Isolate* isolate) const { return string_.As(); } + + bool IsEmpty() const; + bool IsNull() const; + + int64_t length() const { return string_->Length(); } + + bool Is8Bit() const; + const uint8_t* Character8() const; + const uint16_t* Character16() const; + + int Find(bool (*CharacterMatchFunction)(char)) const; + int Find(bool (*CharacterMatchFunction)(uint16_t)) const; + + [[nodiscard]] std::string ToStdString(v8::Isolate* isolate) const; + [[nodiscard]] std::unique_ptr ToNativeString(v8::Isolate* isolate) const; + + StringView ToStringView() const; + + AtomicString ToUpperIfNecessary(v8::Isolate* isolate) const; + AtomicString ToUpperSlow(v8::Isolate* isolate) const; + + AtomicString ToLowerIfNecessary(v8::Isolate* isolate) const; + AtomicString ToLowerSlow(v8::Isolate* isolate) const; + + inline bool ContainsOnlyLatin1OrEmpty() const; + AtomicString RemoveCharacters(v8::Isolate* isolate, CharacterMatchFunctionPtr find_match); + + // Copy assignment + AtomicString(AtomicString const& value); + AtomicString& operator=(const AtomicString& other) noexcept; + + // Move assignment + AtomicString(AtomicString&& value) noexcept; + AtomicString& operator=(AtomicString&& value) noexcept; + + bool operator==(const AtomicString& other) const { return other.string_->StringEquals(string_); } + bool operator!=(const AtomicString& other) const { return !other.string_->StringEquals(string_); }; + + protected: + StringKind kind_; + v8::Isolate* isolate_; + v8::Local string_; + mutable v8::Local string_upper_; + mutable v8::Local string_lower_; +}; + +bool AtomicString::ContainsOnlyLatin1OrEmpty() const { + if (IsEmpty()) + return true; + + if (Is8Bit()) + return true; + + const uint16_t* characters = Character16(); + uint16_t ored = 0; + for (size_t i = 0; i < string_->Length(); ++i) + ored |= characters[i]; + return !(ored & 0xFF00); +} + +inline v8::Local V8AtomicString(v8::Isolate* isolate, + const char* string) { + assert(isolate); + if (!string || string[0] == '\0') + return v8::String::Empty(isolate); + return v8::String::NewFromOneByte( + isolate, reinterpret_cast(string), + v8::NewStringType::kInternalized, static_cast(strlen(string))) + .ToLocalChecked(); +} + +} // namespace webf + +#endif // BRIDGE_BINDINGS_QJS_ATOMIC_STRING_H_ diff --git a/bridge/bindings/v8/atomic_string_test.cc b/bridge/bindings/v8/atomic_string_test.cc new file mode 100644 index 0000000000..6d40fe49e3 --- /dev/null +++ b/bridge/bindings/v8/atomic_string_test.cc @@ -0,0 +1,148 @@ +/* +* Copyright (C) 2019-2022 The Kraken authors. All rights reserved. +* Copyright (C) 2022-present The WebF authors. All rights reserved. +*/ + +#include "atomic_string.h" +#include +#include +#include "bindings/v8/native_string_utils.h" +#include "built_in_string.h" +#include "event_type_names.h" +#include "gtest/gtest.h" +#include "libplatform.h" + +using namespace webf; + +using TestCallback = void (*)(v8::Local ctx); + +bool v8_platform_inited = false; +std::unique_ptr platform; + +void TestAtomicString(TestCallback callback) { + if (!v8_platform_inited) { + // Initialize V8. + v8::V8::InitializeICUDefaultLocation(nullptr); + v8::V8::InitializeExternalStartupData(nullptr); + platform = v8::platform::NewDefaultPlatform(); + v8::V8::InitializePlatform(platform.get()); + v8::V8::Initialize(); + v8_platform_inited = true; + } + + // Create a new Isolate and make it the current one. + v8::Isolate::CreateParams create_params; + create_params.array_buffer_allocator = v8::ArrayBuffer::Allocator::NewDefaultAllocator(); + v8::Isolate* isolate = v8::Isolate::New(create_params); + + { + v8::Isolate::Scope isolate_scope(isolate); + // Create a stack-allocated handle scope. + v8::HandleScope handle_scope(isolate); + + // Create a new context. + v8::Local context = v8::Context::New(isolate); + + // Enter the context for compiling and running the hello world script. + v8::Context::Scope context_scope(context); + + built_in_string::Init(isolate); + + callback(context); + } + + built_in_string::Dispose(); + + // Dispose the isolate and tear down V8. + isolate->Dispose(); +} + +TEST(AtomicString, Empty) { + TestAtomicString([](v8::Local ctx) { + AtomicString atomic_string = AtomicString::Empty(); + EXPECT_STREQ(atomic_string.ToStdString(ctx->GetIsolate()).c_str(), ""); + }); +} + +TEST(AtomicString, FromNativeString) { + TestAtomicString([](v8::Local ctx) { + auto nativeString = stringToNativeString("helloworld"); + AtomicString value = + AtomicString(ctx->GetIsolate(), + std::unique_ptr(static_cast(nativeString.release()))); + + EXPECT_STREQ(value.ToStdString(ctx->GetIsolate()).c_str(), "helloworld"); + }); +} + +TEST(AtomicString, CreateFromStdString) { + TestAtomicString([](v8::Local ctx) { + AtomicString&& value = AtomicString(ctx->GetIsolate(), "helloworld"); + EXPECT_STREQ(value.ToStdString(ctx->GetIsolate()).c_str(), "helloworld"); + }); +} + +TEST(AtomicString, CreateFromJSValue) { + TestAtomicString([](v8::Local ctx) { + v8::Local string = v8::String::NewFromUtf8(ctx->GetIsolate(), "helloworld").ToLocalChecked(); + AtomicString&& value = AtomicString(ctx, string); + EXPECT_STREQ(value.ToStdString(ctx->GetIsolate()).c_str(), "helloworld"); + }); +} + +TEST(AtomicString, ToV8) { + TestAtomicString([](v8::Local ctx) { + AtomicString&& value = AtomicString(ctx->GetIsolate(), "helloworld"); + v8::Local v8_string_value = value.ToV8(ctx->GetIsolate())->ToString(ctx).ToLocalChecked(); + size_t utf_len = v8_string_value->Utf8Length(ctx->GetIsolate()); + char* str_buffer = new char[utf_len]; + v8_string_value->WriteUtf8(ctx->GetIsolate(), str_buffer); + + EXPECT_STREQ(str_buffer, "helloworld"); + }); +} + +TEST(AtomicString, ToNativeString) { + TestAtomicString([](v8::Local ctx) { + AtomicString&& value = AtomicString(ctx->GetIsolate(), "helloworld"); + auto native_string = value.ToNativeString(ctx->GetIsolate()); + const uint16_t* p = native_string->string(); + EXPECT_EQ(native_string->length(), 10); + + uint16_t result[10] = {'h', 'e', 'l', 'l', 'o', 'w', 'o', 'r', 'l', 'd'}; + for (int i = 0; i < native_string->length(); i++) { + EXPECT_EQ(result[i], p[i]); + } + }); +} + +TEST(AtomicString, CopyAssignment) { + TestAtomicString([](v8::Local ctx) { + AtomicString str = AtomicString(ctx->GetIsolate(), "helloworld"); + struct P { + AtomicString str; + }; + P p{AtomicString::Empty()}; + v8::Local v = str.ToV8(ctx->GetIsolate()); + p.str = str; + EXPECT_EQ(p.str == str, true); + }); +} + +TEST(AtomicString, MoveAssignment) { + TestAtomicString([](v8::Local ctx) { + auto&& str = AtomicString(ctx->GetIsolate(), "helloworld"); + auto&& str2 = AtomicString(std::move(str)); + EXPECT_STREQ(str2.ToStdString(ctx->GetIsolate()).c_str(), "helloworld"); + }); +} + +TEST(AtomicString, CopyToRightReference) { + TestAtomicString([](v8::Local ctx) { + AtomicString str = AtomicString::Empty(); + if (1 + 1 == 2) { + str = AtomicString(ctx->GetIsolate(), "helloworld"); + } + EXPECT_STREQ(str.ToStdString(ctx->GetIsolate()).c_str(), "helloworld"); + }); +} diff --git a/bridge/bindings/v8/base/apple/scoped_cftyperef.h b/bridge/bindings/v8/base/apple/scoped_cftyperef.h new file mode 100644 index 0000000000..c39acd8a66 --- /dev/null +++ b/bridge/bindings/v8/base/apple/scoped_cftyperef.h @@ -0,0 +1,50 @@ +/* +* Copyright (C) 2019-2022 The Kraken authors. All rights reserved. +* Copyright (C) 2022-present The WebF authors. All rights reserved. +*/ + +#ifndef BASE_APPLE_SCOPED_CFTYPEREF_H_ +#define BASE_APPLE_SCOPED_CFTYPEREF_H_ + +#include + +#include "bindings/v8/base/apple/scoped_typeref.h" + +namespace base::apple { + +// ScopedCFTypeRef<> is patterned after std::unique_ptr<>, but maintains +// ownership of a CoreFoundation object: any object that can be represented +// as a CFTypeRef. Style deviations here are solely for compatibility with +// std::unique_ptr<>'s interface, with which everyone is already familiar. +// +// By default, ScopedCFTypeRef<> takes ownership of an object (in the +// constructor or in reset()) by taking over the caller's existing ownership +// claim. The caller must own the object it gives to ScopedCFTypeRef<>, and +// relinquishes an ownership claim to that object. ScopedCFTypeRef<> does not +// call CFRetain(). This behavior is parameterized by the |OwnershipPolicy| +// enum. If the value |RETAIN| is passed (in the constructor or in reset()), +// then ScopedCFTypeRef<> will call CFRetain() on the object, and the initial +// ownership is not changed. + +namespace internal { + +template +struct ScopedCFTypeRefTraits { + static CFT InvalidValue() { return nullptr; } + static CFT Retain(CFT object) { + CFRetain(object); + return object; + } + static void Release(CFT object) { CFRelease(object); } +}; + +} // namespace internal + +template +using ScopedCFTypeRef = + ScopedTypeRef>; + +} // namespace base::apple + +#endif // BASE_APPLE_SCOPED_CFTYPEREF_H_ + diff --git a/bridge/bindings/v8/base/apple/scoped_typeref.h b/bridge/bindings/v8/base/apple/scoped_typeref.h new file mode 100644 index 0000000000..3a9c4a410d --- /dev/null +++ b/bridge/bindings/v8/base/apple/scoped_typeref.h @@ -0,0 +1,194 @@ +/* +* Copyright (C) 2019-2022 The Kraken authors. All rights reserved. +* Copyright (C) 2022-present The WebF authors. All rights reserved. +*/ + +#ifndef BASE_APPLE_SCOPED_TYPEREF_H_ +#define BASE_APPLE_SCOPED_TYPEREF_H_ + +#include "bindings/v8/base/check_op.h" +#include "bindings/v8/base/memory/scoped_policy.h" + +namespace base::apple { + +// ScopedTypeRef<> is patterned after std::shared_ptr<>, but maintains ownership +// of a reference to any type that is maintained by Retain and Release methods. +// +// The Traits structure must provide the Retain and Release methods for type T. +// A default ScopedTypeRefTraits is used but not defined, and should be defined +// for each type to use this interface. For example, an appropriate definition +// of ScopedTypeRefTraits for CGLContextObj would be: +// +// template<> +// struct ScopedTypeRefTraits { +// static CGLContextObj InvalidValue() { return nullptr; } +// static CGLContextObj Retain(CGLContextObj object) { +// CGLContextRetain(object); +// return object; +// } +// static void Release(CGLContextObj object) { CGLContextRelease(object); } +// }; +// +// For the many types that have pass-by-pointer create functions, the function +// InitializeInto() is provided to allow direct initialization and assumption +// of ownership of the object. For example, continuing to use the above +// CGLContextObj specialization: +// +// base::apple::ScopedTypeRef context; +// CGLCreateContext(pixel_format, share_group, context.InitializeInto()); +// +// For initialization with an existing object, the caller may specify whether +// the ScopedTypeRef<> being initialized is assuming the caller's existing +// ownership of the object (and should not call Retain in initialization) or if +// it should not assume this ownership and must create its own (by calling +// Retain in initialization). This behavior is based on the `policy` parameter, +// with `ASSUME` for the former and `RETAIN` for the latter. The default policy +// is to `ASSUME`. + +template +struct ScopedTypeRefTraits; + +template > +class ScopedTypeRef { + public: + using element_type = T; + + // Construction from underlying type + + explicit constexpr ScopedTypeRef( + element_type object = Traits::InvalidValue(), + base::scoped_policy::OwnershipPolicy policy = base::scoped_policy::ASSUME) + : object_(object) { + if (object_ != Traits::InvalidValue() && + policy == base::scoped_policy::RETAIN) { + object_ = Traits::Retain(object_); + } + } + + // The pattern in the four [copy|move] [constructors|assignment operators] + // below is that for each of them there is the standard version for use by + // scopers wrapping objects of this type, and a templated version to handle + // scopers wrapping objects of subtypes. One might think that one could get + // away only the templated versions, as their templates should match the + // usage, but that doesn't work. Having a templated function that matches the + // types of, say, a copy constructor, doesn't count as a copy constructor, and + // the compiler's generated copy constructor is incorrect. + + // Copy construction + + ScopedTypeRef(const ScopedTypeRef& that) : object_(that.get()) { + if (object_ != Traits::InvalidValue()) { + object_ = Traits::Retain(object_); + } + } + + template + ScopedTypeRef(const ScopedTypeRef& that) : object_(that.get()) { + if (object_ != Traits::InvalidValue()) { + object_ = Traits::Retain(object_); + } + } + + // Copy assignment + + ScopedTypeRef& operator=(const ScopedTypeRef& that) { + reset(that.get(), base::scoped_policy::RETAIN); + return *this; + } + + template + ScopedTypeRef& operator=(const ScopedTypeRef& that) { + reset(that.get(), base::scoped_policy::RETAIN); + return *this; + } + + // Move construction + + ScopedTypeRef(ScopedTypeRef&& that) : object_(that.release()) {} + + template + ScopedTypeRef(ScopedTypeRef&& that) : object_(that.release()) {} + + // Move assignment + + ScopedTypeRef& operator=(ScopedTypeRef&& that) { + reset(that.release(), base::scoped_policy::ASSUME); + return *this; + } + + template + ScopedTypeRef& operator=(ScopedTypeRef&& that) { + reset(that.release(), base::scoped_policy::ASSUME); + return *this; + } + + // Resetting + + template + void reset(const ScopedTypeRef& that) { + reset(that.get(), base::scoped_policy::RETAIN); + } + + void reset(element_type object = Traits::InvalidValue(), + base::scoped_policy::OwnershipPolicy policy = + base::scoped_policy::ASSUME) { + if (object != Traits::InvalidValue() && + policy == base::scoped_policy::RETAIN) { + object = Traits::Retain(object); + } + if (object_ != Traits::InvalidValue()) { + Traits::Release(object_); + } + object_ = object; + } + + // Destruction + + ~ScopedTypeRef() { + if (object_ != Traits::InvalidValue()) { + Traits::Release(object_); + } + } + + // This is to be used only to take ownership of objects that are created by + // pass-by-pointer create functions. To enforce this, require that this object + // be empty before use. + [[nodiscard]] element_type* InitializeInto() { + CHECK_EQ(object_, Traits::InvalidValue()); + return &object_; + } + + bool operator==(const ScopedTypeRef& that) const { + return object_ == that.object_; + } + + bool operator!=(const ScopedTypeRef& that) const { + return object_ != that.object_; + } + + explicit operator bool() const { return object_ != Traits::InvalidValue(); } + + element_type get() const { return object_; } + + void swap(ScopedTypeRef& that) { + element_type temp = that.object_; + that.object_ = object_; + object_ = temp; + } + + // ScopedTypeRef<>::release() is like std::unique_ptr<>::release. It is NOT + // a wrapper for Release(). To force a ScopedTypeRef<> object to call + // Release(), use ScopedTypeRef<>::reset(). + [[nodiscard]] element_type release() { + element_type temp = object_; + object_ = Traits::InvalidValue(); + return temp; + } + + private: + element_type object_; +}; + +} // namespace base::apple + +#endif // BASE_APPLE_SCOPED_TYPEREF_H_ diff --git a/bridge/bindings/v8/base/base_export.h b/bridge/bindings/v8/base/base_export.h new file mode 100644 index 0000000000..023bd5e76e --- /dev/null +++ b/bridge/bindings/v8/base/base_export.h @@ -0,0 +1,31 @@ +/* +* Copyright (C) 2019-2022 The Kraken authors. All rights reserved. +* Copyright (C) 2022-present The WebF authors. All rights reserved. +*/ + +#ifndef BASE_BASE_EXPORT_H_ +#define BASE_BASE_EXPORT_H_ + +#if defined(COMPONENT_BUILD) +#if defined(WIN32) + +#if defined(BASE_IMPLEMENTATION) +#define BASE_EXPORT __declspec(dllexport) +#else +#define BASE_EXPORT __declspec(dllimport) +#endif // defined(BASE_IMPLEMENTATION) + +#else // defined(WIN32) +#if defined(BASE_IMPLEMENTATION) +#define BASE_EXPORT __attribute__((visibility("default"))) +#else +#define BASE_EXPORT +#endif // defined(BASE_IMPLEMENTATION) +#endif + +#else // defined(COMPONENT_BUILD) +#define BASE_EXPORT +#endif + +#endif // BASE_BASE_EXPORT_H_ + diff --git a/bridge/bindings/v8/base/bit_cast.h b/bridge/bindings/v8/base/bit_cast.h new file mode 100644 index 0000000000..fac33f1131 --- /dev/null +++ b/bridge/bindings/v8/base/bit_cast.h @@ -0,0 +1,46 @@ +/* +* Copyright (C) 2019-2022 The Kraken authors. All rights reserved. +* Copyright (C) 2022-present The WebF authors. All rights reserved. +*/ + +#ifndef WEBF_BIT_CAST_H +#define WEBF_BIT_CAST_H + +#include + +namespace base { + +// This is an equivalent to C++20's std::bit_cast<>(), but with additional +// warnings. It morally does what `*reinterpret_cast(&source)` does, but +// the cast/deref pair is undefined behavior, while bit_cast<>() isn't. +// +// This is not a magic "get out of UB free" card. This must only be used on +// values, not on references or pointers. For pointers, use +// reinterpret_cast<>(), and then look at https://eel.is/c++draft/basic.lval#11 +// as that's probably UB also. + +template +constexpr Dest bit_cast(const Source& source) { + static_assert(!std::is_pointer_v, + "bit_cast must not be used on pointer types"); + static_assert(!std::is_pointer_v, + "bit_cast must not be used on pointer types"); + static_assert(!std::is_reference_v, + "bit_cast must not be used on reference types"); + static_assert(!std::is_reference_v, + "bit_cast must not be used on reference types"); + static_assert( + sizeof(Dest) == sizeof(Source), + "bit_cast requires source and destination types to be the same size"); + static_assert(std::is_trivially_copyable_v, + "bit_cast requires the source type to be trivially copyable"); + static_assert( + std::is_trivially_copyable_v, + "bit_cast requires the destination type to be trivially copyable"); + + return __builtin_bit_cast(Dest, source); +} + +} // namespace base + +#endif // WEBF_BIT_CAST_H diff --git a/bridge/bindings/v8/base/bits.h b/bridge/bindings/v8/base/bits.h new file mode 100644 index 0000000000..192112c9c6 --- /dev/null +++ b/bridge/bindings/v8/base/bits.h @@ -0,0 +1,150 @@ +/* +* Copyright (C) 2019-2022 The Kraken authors. All rights reserved. +* Copyright (C) 2022-present The WebF authors. All rights reserved. +*/ + +#ifndef BASE_BITS_H_ +#define BASE_BITS_H_ + +#include +#include + +#include +#include + +#include "bindings/v8/base/check.h" + +namespace base::bits { + +// Bit functions in are restricted to a specific set of types of unsigned +// integer; restrict functions in this file that are related to those in that +// header to match for consistency. +template +concept UnsignedInteger = + std::unsigned_integral && !std::same_as && + !std::same_as && !std::same_as && + !std::same_as && !std::same_as && + !std::same_as; + +// We want to migrate all users of these functions to use the unsigned type +// versions of the functions, but until they are all moved over, create a +// concept that captures all the types that must be supported for compatibility +// but that we want to remove. +// +// TODO(crbug.com/40256225): Switch uses to supported functions and +// remove. +template +concept SignedIntegerDeprecatedDoNotUse = + std::integral && !UnsignedInteger; + +// Returns true iff |value| is a power of 2. DEPRECATED; use +// std::has_single_bit() instead. +// +// TODO(crbug.com/40256225): Switch uses and remove. +template +requires SignedIntegerDeprecatedDoNotUse + constexpr bool IsPowerOfTwoDeprecatedDoNotUse(T value) { + // From "Hacker's Delight": Section 2.1 Manipulating Rightmost Bits. + // + // Only positive integers with a single bit set are powers of two. If only one + // bit is set in x (e.g. 0b00000100000000) then |x-1| will have that bit set + // to zero and all bits to its right set to 1 (e.g. 0b00000011111111). Hence + // |x & (x-1)| is 0 iff x is a power of two. + return value > 0 && (value & (value - 1)) == 0; +} + +// Round down |size| to a multiple of alignment, which must be a power of two. +template +requires UnsignedInteger + inline constexpr T AlignDown(T size, T alignment) { + DCHECK(std::has_single_bit(alignment)); + return size & ~(alignment - 1); +} + +// Round down |size| to a multiple of alignment, which must be a power of two. +// DEPRECATED; use the UnsignedInteger version. +// +// TODO(crbug.com/40256225): Switch uses and remove. +template +requires SignedIntegerDeprecatedDoNotUse + inline constexpr T AlignDownDeprecatedDoNotUse(T size, T alignment) { + DCHECK(IsPowerOfTwoDeprecatedDoNotUse(alignment)); + return size & ~(alignment - 1); +} + +// Move |ptr| back to the previous multiple of alignment, which must be a power +// of two. Defined for types where sizeof(T) is one byte. +template +requires(sizeof(T) == 1) + inline T* AlignDown(T* ptr, uintptr_t alignment) { + return reinterpret_cast( + AlignDown(reinterpret_cast(ptr), alignment)); +} + +// Round up |size| to a multiple of alignment, which must be a power of two. +template +requires UnsignedInteger + inline constexpr T AlignUp(T size, T alignment) { + DCHECK(std::has_single_bit(alignment)); + return (size + alignment - 1) & ~(alignment - 1); +} + +// Round up |size| to a multiple of alignment, which must be a power of two. +// DEPRECATED; use the UnsignedInteger version. +// +// TODO(crbug.com/40256225): Switch uses and remove. +template +requires SignedIntegerDeprecatedDoNotUse + inline constexpr T AlignUpDeprecatedDoNotUse(T size, T alignment) { + DCHECK(IsPowerOfTwoDeprecatedDoNotUse(alignment)); + return (size + alignment - 1) & ~(alignment - 1); +} + +// Advance |ptr| to the next multiple of alignment, which must be a power of +// two. Defined for types where sizeof(T) is one byte. +template +requires(sizeof(T) == 1) + inline T* AlignUp(T* ptr, uintptr_t alignment) { + return reinterpret_cast( + AlignUp(reinterpret_cast(ptr), alignment)); +} + +// Returns the integer i such as 2^i <= n < 2^(i+1). +// +// A common use for this function is to measure the number of bits required to +// contain a value; for that case use std::bit_width(). +// +// A common use for this function is to take its result and use it to left-shift +// a bit; instead of doing so, use std::bit_floor(). +constexpr int Log2Floor(uint32_t n) { + return 31 - std::countl_zero(n); +} + +// Returns the integer i such as 2^(i-1) < n <= 2^i. +// +// A common use for this function is to measure the number of bits required to +// contain a value; for that case use std::bit_width(). +// +// A common use for this function is to take its result and use it to left-shift +// a bit; instead of doing so, use std::bit_ceil(). +constexpr int Log2Ceiling(uint32_t n) { + // When n == 0, we want the function to return -1. + // When n == 0, (n - 1) will underflow to 0xFFFFFFFF, which is + // why the statement below starts with (n ? 32 : -1). + return (n ? 32 : -1) - std::countl_zero(n - 1); +} + +// Returns a value of type T with a single bit set in the left-most position. +// Can be used instead of manually shifting a 1 to the left. Unlike the other +// functions in this file, usable for any integral type. +template +requires std::integral + constexpr T LeftmostBit() { + T one(1u); + return one << (8 * sizeof(T) - 1); +} + +} // namespace base::bits + +#endif // BASE_BITS_H_ + diff --git a/bridge/bindings/v8/base/check.h b/bridge/bindings/v8/base/check.h new file mode 100644 index 0000000000..e5914c701d --- /dev/null +++ b/bridge/bindings/v8/base/check.h @@ -0,0 +1,304 @@ +/* +* Copyright (C) 2019-2022 The Kraken authors. All rights reserved. +* Copyright (C) 2022-present The WebF authors. All rights reserved. +*/ + +#ifndef BASE_CHECK_H_ +#define BASE_CHECK_H_ + +#include +#include + +#include "bindings/v8/base/compiler_specific.h" +#include "bindings/v8/base/check_op.h" +#include "bindings/v8/base/not_fatal_until.h" +#include "bindings/v8/base/location.h" + +// This header defines the CHECK, DCHECK, and DPCHECK macros. +// +// CHECK dies with a fatal error if its condition is not true. It is not +// controlled by NDEBUG, so the check will be executed regardless of compilation +// mode. +// +// DCHECK, the "debug mode" check, is enabled depending on NDEBUG and +// DCHECK_ALWAYS_ON, and its severity depends on DCHECK_IS_CONFIGURABLE. +// +// (D)PCHECK is like (D)CHECK, but includes the system error code (c.f. +// perror(3)). +// +// Additional information can be streamed to these macros and will be included +// in the log output if the condition doesn't hold (you may need to include +// ): +// +// CHECK(condition) << "Additional info."; +// +// The condition is evaluated exactly once. Even in build modes where e.g. +// DCHECK is disabled, the condition and any stream arguments are still +// referenced to avoid warnings about unused variables and functions. +// +// An optional base::NotFatalUntil argument can be provided to make the +// instance non-fatal (dumps without crashing) before a provided milestone. That +// is: CHECK(false, base::NotFatalUntil::M120); starts crashing in M120. CHECKs +// with a milestone argument preserve logging even in official builds, and +// will upload the CHECK's log message in crash reports for remote diagnostics. +// This is recommended for use in situations that are not flag guarded, or where +// we have low pre-stable coverage. Using this lets us probe for would-be CHECK +// failures for a milestone or two before rolling out a CHECK. +// +// For the (D)CHECK_EQ, etc. macros, see base/check_op.h. However, that header +// is *significantly* larger than check.h, so try to avoid including it in +// header files. + +namespace logging { + +// Class used to explicitly ignore an ostream, and optionally a boolean value. +class VoidifyStream { + public: + VoidifyStream() = default; + explicit VoidifyStream(bool) {} + + // Binary & has lower precedence than << but higher than ?: + void operator&(std::ostream&) {} +}; + +// Macro which uses but does not evaluate expr and any stream parameters. +#define EAT_CHECK_STREAM_PARAMS(expr) \ + true ? (void)0 \ + : ::logging::VoidifyStream(expr) & (*::logging::g_swallow_stream) +extern std::ostream* g_swallow_stream; + +class LogMessage; + +// Class used for raising a check error upon destruction. +class CheckError { + public: + static CheckError Check( + const char* condition, + base::NotFatalUntil fatal_milestone = + base::NotFatalUntil::NoSpecifiedMilestoneInternal, + const base::Location& location = base::Location::Current()); + // Takes ownership over (free()s after using) `log_message_str`, for use with + // CHECK_op macros. + static CheckError CheckOp( + char* log_message_str, + base::NotFatalUntil fatal_milestone = + base::NotFatalUntil::NoSpecifiedMilestoneInternal, + const base::Location& location = base::Location::Current()); + + static CheckError DCheck( + const char* condition, + const base::Location& location = base::Location::Current()); + // Takes ownership over (free()s after using) `log_message_str`, for use with + // DCHECK_op macros. + static CheckError DCheckOp( + char* log_message_str, + const base::Location& location = base::Location::Current()); + + static CheckError DumpWillBeCheck( + const char* condition, + const base::Location& location = base::Location::Current()); + // Takes ownership over (free()s after using) `log_message_str`, for use with + // DUMP_WILL_BE_CHECK_op macros. + static CheckError DumpWillBeCheckOp( + char* log_message_str, + const base::Location& location = base::Location::Current()); + + static CheckError PCheck( + const char* condition, + const base::Location& location = base::Location::Current()); + static CheckError PCheck( + const base::Location& location = base::Location::Current()); + + static CheckError DPCheck( + const char* condition, + const base::Location& location = base::Location::Current()); + + static CheckError DumpWillBeNotReachedNoreturn( + const base::Location& location = base::Location::Current()); + + static CheckError NotImplemented( + const char* function, + const base::Location& location = base::Location::Current()); + + // Stream for adding optional details to the error message. + std::ostream& stream(); + + // Try really hard to get the call site and callee as separate stack frames in + // crash reports. + NOMERGE NOINLINE NOT_TAIL_CALLED ~CheckError(); + + CheckError(const CheckError&) = delete; + CheckError& operator=(const CheckError&) = delete; + + template + std::ostream& operator<<(T&& streamed_type) { + return stream() << streamed_type; + } + + protected: + // Takes ownership of `log_message`. + explicit CheckError(LogMessage* log_message); + + std::unique_ptr log_message_; +}; + +class NotReachedError : public CheckError { + public: + static NotReachedError NotReached( + base::NotFatalUntil fatal_milestone = + base::NotFatalUntil::NoSpecifiedMilestoneInternal, + const base::Location& location = base::Location::Current()); + + // Used to trigger a NOTREACHED_IN_MIGRATION() without providing file or line + // while also discarding log-stream arguments. See base/notreached.h. + NOMERGE NOINLINE NOT_TAIL_CALLED static void TriggerNotReached(); + + // TODO(crbug.com/40580068): Mark [[noreturn]] once this is CHECK-fatal on all + // builds. + NOMERGE NOINLINE NOT_TAIL_CALLED ~NotReachedError(); + + private: + using CheckError::CheckError; +}; + +// TODO(crbug.com/40580068): This should take the name of the above class once +// all callers of NOTREACHED_IN_MIGRATION() have migrated to the CHECK-fatal +// version. +class NotReachedNoreturnError : public CheckError { + public: + explicit NotReachedNoreturnError( + const base::Location& location = base::Location::Current()); + + [[noreturn]] NOMERGE NOINLINE NOT_TAIL_CALLED ~NotReachedNoreturnError(); +}; + +// A helper macro for checks that log to streams that makes it easier for the +// compiler to identify and warn about dead code, e.g.: +// +// return 2; +// NOTREACHED_IN_MIGRATION(); +// +// The 'switch' is used to prevent the 'else' from being ambiguous when the +// macro is used in an 'if' clause such as: +// if (a == 1) +// CHECK(Foo()); +// +// TODO(crbug.com/40244950): Remove the const bool when the blink-gc plugin has +// been updated to accept `if (LIKELY(!field_))` as well as `if (!field_)`. +#define LOGGING_CHECK_FUNCTION_IMPL(check_stream, condition) \ + switch (0) \ + case 0: \ + default: \ + /* Hint to the optimizer that `condition` is unlikely to be false. */ \ + /* The optimizer can use this as a hint to place the failure path */ \ + /* out-of-line, e.g. at the tail of the function. */ \ + if (const bool probably_true = static_cast(condition); \ + LIKELY(ANALYZER_ASSUME_TRUE(probably_true))) \ + ; \ + else \ + (check_stream) + +#if defined(OFFICIAL_BUILD) && !defined(NDEBUG) +#error "Debug builds are not expected to be optimized as official builds." +#endif // defined(OFFICIAL_BUILD) && !defined(NDEBUG) + +#if defined(OFFICIAL_BUILD) && !DCHECK_IS_ON() +// Note that this uses IMMEDIATE_CRASH_ALWAYS_INLINE to force-inline in debug +// mode as well. See LoggingTest.CheckCausesDistinctBreakpoints. +[[noreturn]] IMMEDIATE_CRASH_ALWAYS_INLINE void CheckFailure() { + base::ImmediateCrash(); +} + +// Discard log strings to reduce code bloat when there is no NotFatalUntil +// argument (which temporarily preserves logging both locally and in crash +// reports). +// +// This is not calling BreakDebugger since this is called frequently, and +// calling an out-of-line function instead of a noreturn inline macro prevents +// compiler optimizations. Unlike the other check macros, this one does not use +// LOGGING_CHECK_FUNCTION_IMPL(), since it is incompatible with +// EAT_CHECK_STREAM_PARAMETERS(). +#define CHECK(condition, ...) \ + BASE_IF(BASE_IS_EMPTY(__VA_ARGS__), \ + UNLIKELY(!(condition)) ? logging::CheckFailure() \ + : EAT_CHECK_STREAM_PARAMS(), \ + LOGGING_CHECK_FUNCTION_IMPL( \ + logging::CheckError::Check(#condition, __VA_ARGS__), condition)) + +#define CHECK_WILL_STREAM() false + +// Strip the conditional string from official builds. +#define PCHECK(condition) \ + LOGGING_CHECK_FUNCTION_IMPL(::logging::CheckError::PCheck(), condition) + +#else + +#define CHECK_WILL_STREAM() true + +#define CHECK(condition, ...) \ + LOGGING_CHECK_FUNCTION_IMPL( \ + ::logging::CheckError::Check(#condition __VA_OPT__(, ) __VA_ARGS__), \ + condition) + +#define PCHECK(condition) \ + LOGGING_CHECK_FUNCTION_IMPL(::logging::CheckError::PCheck(#condition), \ + condition) + +#endif + +#if DCHECK_IS_ON() + +#define DCHECK(condition) \ + LOGGING_CHECK_FUNCTION_IMPL(::logging::CheckError::DCheck(#condition), \ + condition) +#define DPCHECK(condition) \ + LOGGING_CHECK_FUNCTION_IMPL(::logging::CheckError::DPCheck(#condition), \ + condition) + +#else + +#define DCHECK(condition) EAT_CHECK_STREAM_PARAMS(!(condition)) +#define DPCHECK(condition) EAT_CHECK_STREAM_PARAMS(!(condition)) + +#endif // DCHECK_IS_ON() + +// The DUMP_WILL_BE_CHECK() macro provides a convenient way to non-fatally dump +// in official builds if a condition is false. This is used to more cautiously +// roll out a new CHECK() (or upgrade a DCHECK) where the caller isn't entirely +// sure that something holds true in practice (but asserts that it should). This +// is especially useful for platforms that have a low pre-stable population and +// code areas that are rarely exercised. +// +// On DCHECK builds this macro matches DCHECK behavior. +// +// This macro isn't optimized (preserves filename, line number and log messages +// in official builds), as they are expected to be in product temporarily. When +// using this macro, leave a TODO(crbug.com/nnnn) entry referring to a bug +// related to its rollout. Then put a NextAction on the bug to come back and +// clean this up (replace with a CHECK). A DUMP_WILL_BE_CHECK() that's been left +// untouched for a long time without bug updates suggests that issues that +// would've prevented enabling this CHECK have either not been discovered or +// have been resolved. +// +// Using this macro is preferred over direct base::debug::DumpWithoutCrashing() +// invocations as it communicates intent to eventually end up as a CHECK. It +// also preserves the log message so setting crash keys to get additional debug +// info isn't required as often. +#define DUMP_WILL_BE_CHECK(condition, ...) \ + LOGGING_CHECK_FUNCTION_IMPL(::logging::CheckError::DumpWillBeCheck( \ + #condition __VA_OPT__(, ) __VA_ARGS__), \ + condition) + +// Async signal safe checking mechanism. +[[noreturn]] void RawCheckFailure(const char* message); +#define RAW_CHECK(condition) \ + do { \ + if (UNLIKELY(!(condition))) { \ + ::logging::RawCheckFailure("Check failed: " #condition "\n"); \ + } \ + } while (0) + +} // namespace logging + +#endif // BASE_CHECK_H_ + diff --git a/bridge/bindings/v8/base/check_op.cc b/bridge/bindings/v8/base/check_op.cc new file mode 100644 index 0000000000..5997f4939d --- /dev/null +++ b/bridge/bindings/v8/base/check_op.cc @@ -0,0 +1,108 @@ +/* +* Copyright (C) 2019-2022 The Kraken authors. All rights reserved. +* Copyright (C) 2022-present The WebF authors. All rights reserved. +*/ + +#ifdef UNSAFE_BUFFERS_BUILD +// TODO(crbug.com/40284755): Remove this and spanify to fix the errors. +#pragma allow_unsafe_buffers +#endif + +#include "bindings/v8/base/check_op.h" + +#include + +#include +#include +#include + +//#include "base/logging.h" + +namespace logging { + +char* CheckOpValueStr(int v) { + char buf[50]; + snprintf(buf, sizeof(buf), "%d", v); + return strdup(buf); +} + +char* CheckOpValueStr(unsigned v) { + char buf[50]; + snprintf(buf, sizeof(buf), "%u", v); + return strdup(buf); +} + +char* CheckOpValueStr(long v) { + char buf[50]; + snprintf(buf, sizeof(buf), "%ld", v); + return strdup(buf); +} + +char* CheckOpValueStr(unsigned long v) { + char buf[50]; + snprintf(buf, sizeof(buf), "%lu", v); + return strdup(buf); +} + +char* CheckOpValueStr(long long v) { + char buf[50]; + snprintf(buf, sizeof(buf), "%lld", v); + return strdup(buf); +} + +char* CheckOpValueStr(unsigned long long v) { + char buf[50]; + snprintf(buf, sizeof(buf), "%llu", v); + return strdup(buf); +} + +char* CheckOpValueStr(const void* v) { + char buf[50]; + snprintf(buf, sizeof(buf), "%p", v); + return strdup(buf); +} + +char* CheckOpValueStr(std::nullptr_t v) { + return strdup("nullptr"); +} + +char* CheckOpValueStr(const std::string& v) { + return strdup(v.c_str()); +} + +char* CheckOpValueStr(std::string_view v) { + // Ideally this would be `strndup`, but `strndup` is not portable. + char* ret = static_cast(malloc(v.size() + 1)); + if (ret) { + std::copy(v.begin(), v.end(), ret); + ret[v.size()] = 0; + } + return ret; +} + +char* CheckOpValueStr(double v) { + char buf[50]; + snprintf(buf, sizeof(buf), "%.6lf", v); + return strdup(buf); +} + +char* StreamValToStr(const void* v, + void (*stream_func)(std::ostream&, const void*)) { + std::stringstream ss; + stream_func(ss, v); + return strdup(ss.str().c_str()); +} + +char* CreateCheckOpLogMessageString(const char* expr_str, + char* v1_str, + char* v2_str) { + std::stringstream ss; + ss << "Check failed: " << expr_str << " (" << v1_str << " vs. " << v2_str + << ")"; + free(v1_str); + free(v2_str); + return strdup(ss.str().c_str()); +} + +} // namespace logging + diff --git a/bridge/bindings/v8/base/check_op.h b/bridge/bindings/v8/base/check_op.h new file mode 100644 index 0000000000..1f175aa199 --- /dev/null +++ b/bridge/bindings/v8/base/check_op.h @@ -0,0 +1,255 @@ +/* +* Copyright (C) 2019-2022 The Kraken authors. All rights reserved. +* Copyright (C) 2022-present The WebF authors. All rights reserved. +*/ + +#ifndef BASE_CHECK_OP_H_ +#define BASE_CHECK_OP_H_ + +#include +#include +#include +#include + +#include "bindings/v8/base/dcheck_is_on.h" +//#include "bindings/v8/base/memory/raw_ptr_exclusion.h" +#include "bindings/v8/base/strings/to_string.h" +#include "bindings/v8/base/types/supports_ostream_operator.h" +#include "bindings/v8/base/check.h" + +// This header defines the (DP)CHECK_EQ etc. macros. +// +// (DP)CHECK_EQ(x, y) is similar to (DP)CHECK(x == y) but will also log the +// values of x and y if the condition doesn't hold. This works for basic types +// and types with an operator<< or .ToString() method. +// +// The operands are evaluated exactly once, and even in build modes where e.g. +// DCHECK is disabled, the operands and their stringification methods are still +// referenced to avoid warnings about unused variables or functions. +// +// Like (D)CHECK (D)CHECK_EQ also supports an optional base::NotFatalUntil +// parameter. See base/check.h. +// +// To support the stringification of the check operands, this header is +// *significantly* larger than base/check.h, so it should be avoided in common +// headers. +// +// This header also provides the (DP)CHECK macros (by including check.h), so if +// you use e.g. both CHECK_EQ and CHECK, including this header is enough. If you +// only use CHECK however, please include the smaller check.h instead. + +namespace logging { + +// Functions for turning check operand values into NUL-terminated C strings. +// Caller takes ownership of the result and must release it with `free`. +// This would normally be defined by , but this header tries to avoid +// including to reduce compile-time. See https://crrev.com/c/2128112. +char* CheckOpValueStr(int v); +char* CheckOpValueStr(unsigned v); +char* CheckOpValueStr(long v); +char* CheckOpValueStr(unsigned long v); +char* CheckOpValueStr(long long v); +char* CheckOpValueStr(unsigned long long v); +char* CheckOpValueStr(const void* v); +char* CheckOpValueStr(std::nullptr_t v); +char* CheckOpValueStr(double v); +// Although the standard defines operator<< for std::string and std::string_view +// in their respective headers, libc++ requires for them. See +// https://github.com/llvm/llvm-project/issues/61070. So we define non- +// versions here too. +char* CheckOpValueStr(const std::string& v); +char* CheckOpValueStr(std::string_view v); + +// Convert a streamable value to string out-of-line to avoid . +char* StreamValToStr(const void* v, + void (*stream_func)(std::ostream&, + const void*)); + +#ifdef __has_builtin +#define SUPPORTS_BUILTIN_ADDRESSOF (__has_builtin(__builtin_addressof)) +#else +#define SUPPORTS_BUILTIN_ADDRESSOF 0 +#endif + +template +requires(base::internal::SupportsOstreamOperator && + !std::is_function_v>) + inline char* CheckOpValueStr(const T& v) { + auto f = [](std::ostream& s, const void* p) { + s << *reinterpret_cast(p); + }; + + // operator& might be overloaded, so do the std::addressof dance. + // __builtin_addressof is preferred since it also handles Obj-C ARC pointers. + // Some casting is still needed, because T might be volatile. +#if SUPPORTS_BUILTIN_ADDRESSOF + const void* vp = const_cast( + reinterpret_cast(__builtin_addressof(v))); +#else + const void* vp = reinterpret_cast( + const_cast(&reinterpret_cast(v))); +#endif + return StreamValToStr(vp, f); +} + +#undef SUPPORTS_BUILTIN_ADDRESSOF + +// Overload for types that have no operator<< but do have .ToString() defined. +template +requires(!base::internal::SupportsOstreamOperator && + base::internal::SupportsToString) + inline char* CheckOpValueStr(const T& v) { + // .ToString() may not return a std::string, e.g. blink::WTF::String. + return CheckOpValueStr(v.ToString()); +} + +// Provide an overload for functions and function pointers. Function pointers +// don't implicitly convert to void* but do implicitly convert to bool, so +// without this function pointers are always printed as 1 or 0. (MSVC isn't +// standards-conforming here and converts function pointers to regular +// pointers, so this is a no-op for MSVC.) +template +requires(std::is_function_v>) + inline char* CheckOpValueStr(const T& v) { + return CheckOpValueStr(reinterpret_cast(v)); +} + +// We need overloads for enums that don't support operator<<. +// (i.e. scoped enums where no operator<< overload was declared). +template +requires(!base::internal::SupportsOstreamOperator && + std::is_enum_v) + inline char* CheckOpValueStr(const T& v) { + return CheckOpValueStr(static_cast>(v)); +} + +// Takes ownership of `v1_str` and `v2_str`, destroying them with free(). For +// use with CheckOpValueStr() which allocates these strings using strdup(). +// Returns allocated string (with strdup) for passing into +// ::logging::CheckError::(D)CheckOp methods. +// TODO(pbos): Annotate this ABSL_ATTRIBUTE_RETURNS_NONNULL after solving +// compile failure. +char* CreateCheckOpLogMessageString(const char* expr_str, + char* v1_str, + char* v2_str); + +// Helper macro for binary operators. +// The 'switch' is used to prevent the 'else' from being ambiguous when the +// macro is used in an 'if' clause such as: +// if (a == 1) +// CHECK_EQ(2, a); +#define CHECK_OP_FUNCTION_IMPL(check_failure_function, name, op, val1, val2, \ + ...) \ + switch (0) \ + case 0: \ + default: \ + if (char* const message_on_fail = ::logging::Check##name##Impl( \ + (val1), (val2), #val1 " " #op " " #val2); \ + !message_on_fail) \ + ; \ + else \ + check_failure_function(message_on_fail __VA_OPT__(, ) __VA_ARGS__) + +//#if !CHECK_WILL_STREAM() +// +//// Discard log strings to reduce code bloat. +//#define CHECK_OP(name, op, val1, val2, ...) \ +// BASE_IF(BASE_IS_EMPTY(__VA_ARGS__), CHECK((val1)op(val2)), \ +// CHECK_OP_FUNCTION_IMPL(::logging::CheckError::CheckOp, name, op, \ +// val1, val2, __VA_ARGS__)) +// +//#else + +#define CHECK_OP(name, op, val1, val2, ...) \ + CHECK_OP_FUNCTION_IMPL(::logging::CheckError::CheckOp, name, op, val1, \ + val2 __VA_OPT__(, ) __VA_ARGS__) + +//#endif + +// The second overload avoids address-taking of static members for +// fundamental types. +#define DEFINE_CHECK_OP_IMPL(name, op) \ + template \ + requires(!std::is_fundamental_v || !std::is_fundamental_v) \ + constexpr char* Check##name##Impl(const T& v1, const U& v2, \ + const char* expr_str) { \ + if (LIKELY(ANALYZER_ASSUME_TRUE(v1 op v2))) \ + return nullptr; \ + return CreateCheckOpLogMessageString(expr_str, CheckOpValueStr(v1), \ + CheckOpValueStr(v2)); \ + } \ + template \ + requires(std::is_fundamental_v && std::is_fundamental_v) \ + constexpr char* Check##name##Impl(T v1, U v2, const char* expr_str) { \ + if (LIKELY(ANALYZER_ASSUME_TRUE(v1 op v2))) \ + return nullptr; \ + return CreateCheckOpLogMessageString(expr_str, CheckOpValueStr(v1), \ + CheckOpValueStr(v2)); \ + } + +// clang-format off +DEFINE_CHECK_OP_IMPL(EQ, ==) +DEFINE_CHECK_OP_IMPL(NE, !=) +DEFINE_CHECK_OP_IMPL(LE, <=) +DEFINE_CHECK_OP_IMPL(LT, < ) +DEFINE_CHECK_OP_IMPL(GE, >=) +DEFINE_CHECK_OP_IMPL(GT, > ) +#undef DEFINE_CHECK_OP_IMPL +#define CHECK_EQ(val1, val2, ...) \ + CHECK_OP(EQ, ==, val1, val2 __VA_OPT__(, ) __VA_ARGS__) +#define CHECK_NE(val1, val2, ...) \ + CHECK_OP(NE, !=, val1, val2 __VA_OPT__(, ) __VA_ARGS__) +#define CHECK_LE(val1, val2, ...) \ + CHECK_OP(LE, <=, val1, val2 __VA_OPT__(, ) __VA_ARGS__) +#define CHECK_LT(val1, val2, ...) \ + CHECK_OP(LT, < , val1, val2 __VA_OPT__(, ) __VA_ARGS__) +#define CHECK_GE(val1, val2, ...) \ + CHECK_OP(GE, >=, val1, val2 __VA_OPT__(, ) __VA_ARGS__) +#define CHECK_GT(val1, val2, ...) \ + CHECK_OP(GT, > , val1, val2 __VA_OPT__(, ) __VA_ARGS__) +// clang-format on + +#if DCHECK_IS_ON() + +#define DCHECK_OP(name, op, val1, val2) \ + CHECK_OP_FUNCTION_IMPL(::logging::CheckError::DCheckOp, name, op, val1, val2) + +#else + +// Don't do any evaluation but still reference the same stuff as when enabled. +#define DCHECK_OP(name, op, val1, val2) \ + EAT_CHECK_STREAM_PARAMS((::logging::CheckOpValueStr(val1), \ + ::logging::CheckOpValueStr(val2), (val1)op(val2))) + +#endif + +// clang-format off +#define DCHECK_EQ(val1, val2) DCHECK_OP(EQ, ==, val1, val2) +#define DCHECK_NE(val1, val2) DCHECK_OP(NE, !=, val1, val2) +#define DCHECK_LE(val1, val2) DCHECK_OP(LE, <=, val1, val2) +#define DCHECK_LT(val1, val2) DCHECK_OP(LT, < , val1, val2) +#define DCHECK_GE(val1, val2) DCHECK_OP(GE, >=, val1, val2) +#define DCHECK_GT(val1, val2) DCHECK_OP(GT, > , val1, val2) +// clang-format on + +#define DUMP_WILL_BE_CHECK_OP(name, op, val1, val2) \ + CHECK_OP_FUNCTION_IMPL(::logging::CheckError::DumpWillBeCheckOp, name, op, \ + val1, val2) + +#define DUMP_WILL_BE_CHECK_EQ(val1, val2) \ + DUMP_WILL_BE_CHECK_OP(EQ, ==, val1, val2) +#define DUMP_WILL_BE_CHECK_NE(val1, val2) \ + DUMP_WILL_BE_CHECK_OP(NE, !=, val1, val2) +#define DUMP_WILL_BE_CHECK_LE(val1, val2) \ + DUMP_WILL_BE_CHECK_OP(LE, <=, val1, val2) +#define DUMP_WILL_BE_CHECK_LT(val1, val2) \ + DUMP_WILL_BE_CHECK_OP(LT, <, val1, val2) +#define DUMP_WILL_BE_CHECK_GE(val1, val2) \ + DUMP_WILL_BE_CHECK_OP(GE, >=, val1, val2) +#define DUMP_WILL_BE_CHECK_GT(val1, val2) \ + DUMP_WILL_BE_CHECK_OP(GT, >, val1, val2) + +} // namespace logging + +#endif // BASE_CHECK_OP_H_ + diff --git a/bridge/bindings/v8/base/compiler_specific.h b/bridge/bindings/v8/base/compiler_specific.h new file mode 100644 index 0000000000..6844adcbec --- /dev/null +++ b/bridge/bindings/v8/base/compiler_specific.h @@ -0,0 +1,630 @@ +/* +* Copyright (C) 2019-2022 The Kraken authors. All rights reserved. +* Copyright (C) 2022-present The WebF authors. All rights reserved. +*/ + +#ifndef WEBF_COMPILER_SPECIFIC_H +#define WEBF_COMPILER_SPECIFIC_H + +#include "../for_build//build_config.h" + +#if defined(COMPILER_MSVC) && !defined(__clang__) +#error "Only clang-cl is supported on Windows, see https://crbug.com/988071" +#endif + +// This is a wrapper around `__has_cpp_attribute`, which can be used to test for +// the presence of an attribute. In case the compiler does not support this +// macro it will simply evaluate to 0. +// +// References: +// https://wg21.link/sd6#testing-for-the-presence-of-an-attribute-__has_cpp_attribute +// https://wg21.link/cpp.cond#:__has_cpp_attribute +#if defined(__has_cpp_attribute) +#define HAS_CPP_ATTRIBUTE(x) __has_cpp_attribute(x) +#else +#define HAS_CPP_ATTRIBUTE(x) 0 +#endif + +// A wrapper around `__has_attribute`, similar to HAS_CPP_ATTRIBUTE. +#if defined(__has_attribute) +#define HAS_ATTRIBUTE(x) __has_attribute(x) +#else +#define HAS_ATTRIBUTE(x) 0 +#endif + +// A wrapper around `__has_builtin`, similar to HAS_CPP_ATTRIBUTE. +#if defined(__has_builtin) +#define HAS_BUILTIN(x) __has_builtin(x) +#else +#define HAS_BUILTIN(x) 0 +#endif + +// Annotate a function indicating it should not be inlined. +// Use like: +// NOINLINE void DoStuff() { ... } +#if defined(__clang__) && HAS_ATTRIBUTE(noinline) +#define NOINLINE [[clang::noinline]] +#elif defined(COMPILER_GCC) && HAS_ATTRIBUTE(noinline) +#define NOINLINE __attribute__((noinline)) +#elif defined(COMPILER_MSVC) +#define NOINLINE __declspec(noinline) +#else +#define NOINLINE +#endif + +// Annotate a function indicating it should not be optimized. +#if defined(__clang__) && HAS_ATTRIBUTE(optnone) +#define NOOPT [[clang::optnone]] +#elif defined(COMPILER_GCC) && HAS_ATTRIBUTE(optimize) +#define NOOPT __attribute__((optimize(0))) +#else +#define NOOPT +#endif + +#if defined(__clang__) && defined(NDEBUG) && HAS_ATTRIBUTE(always_inline) +#define ALWAYS_INLINE [[clang::always_inline]] inline +#elif defined(COMPILER_GCC) && defined(NDEBUG) && HAS_ATTRIBUTE(always_inline) +#define ALWAYS_INLINE inline __attribute__((__always_inline__)) +#elif defined(COMPILER_MSVC) && defined(NDEBUG) +#define ALWAYS_INLINE __forceinline +#else +#define ALWAYS_INLINE inline +#endif + +// Annotate a function indicating it should never be tail called. Useful to make +// sure callers of the annotated function are never omitted from call-stacks. +// To provide the complementary behavior (prevent the annotated function from +// being omitted) look at NOINLINE. Also note that this doesn't prevent code +// folding of multiple identical caller functions into a single signature. To +// prevent code folding, see NO_CODE_FOLDING() in base/debug/alias.h. +// Use like: +// NOT_TAIL_CALLED void FooBar(); +#if defined(__clang__) && HAS_ATTRIBUTE(not_tail_called) +#define NOT_TAIL_CALLED [[clang::not_tail_called]] +#else +#define NOT_TAIL_CALLED +#endif + +// Specify memory alignment for structs, classes, etc. +// Use like: +// class ALIGNAS(16) MyClass { ... } +// ALIGNAS(16) int array[4]; +// +// In most places you can use the C++11 keyword "alignas", which is preferred. +// +// Historically, compilers had trouble mixing __attribute__((...)) syntax with +// alignas(...) syntax. However, at least Clang is very accepting nowadays. It +// may be that this macro can be removed entirely. +#if defined(__clang__) +#define ALIGNAS(byte_alignment) alignas(byte_alignment) +#elif defined(COMPILER_MSVC) +#define ALIGNAS(byte_alignment) __declspec(align(byte_alignment)) +#elif defined(COMPILER_GCC) && HAS_ATTRIBUTE(aligned) +#define ALIGNAS(byte_alignment) __attribute__((aligned(byte_alignment))) +#endif + +// In case the compiler supports it NO_UNIQUE_ADDRESS evaluates to the C++20 +// attribute [[no_unique_address]]. This allows annotating data members so that +// they need not have an address distinct from all other non-static data members +// of its class. +// +// References: +// * https://en.cppreference.com/w/cpp/language/attributes/no_unique_address +// * https://wg21.link/dcl.attr.nouniqueaddr +#if defined(COMPILER_MSVC) && HAS_CPP_ATTRIBUTE(msvc::no_unique_address) +// Unfortunately MSVC ignores [[no_unique_address]] (see +// https://devblogs.microsoft.com/cppblog/msvc-cpp20-and-the-std-cpp20-switch/#msvc-extensions-and-abi), +// and clang-cl matches it for ABI compatibility reasons. We need to prefer +// [[msvc::no_unique_address]] when available if we actually want any effect. +#define NO_UNIQUE_ADDRESS [[msvc::no_unique_address]] +#elif HAS_CPP_ATTRIBUTE(no_unique_address) +#define NO_UNIQUE_ADDRESS [[no_unique_address]] +#else +#define NO_UNIQUE_ADDRESS +#endif + +// Tells the compiler a function is using a printf-style format string. +// |format_param| is the one-based index of the format string parameter; +// |dots_param| is the one-based index of the "..." parameter. +// For v*printf functions (which take a va_list), pass 0 for dots_param. +// (This is undocumented but matches what the system C headers do.) +// For member functions, the implicit this parameter counts as index 1. +#if (defined(COMPILER_GCC) || defined(__clang__)) && HAS_ATTRIBUTE(format) +#define PRINTF_FORMAT(format_param, dots_param) \ + __attribute__((format(printf, format_param, dots_param))) +#else +#define PRINTF_FORMAT(format_param, dots_param) +#endif + +// WPRINTF_FORMAT is the same, but for wide format strings. +// This doesn't appear to yet be implemented in any compiler. +// See http://gcc.gnu.org/bugzilla/show_bug.cgi?id=38308 . +#define WPRINTF_FORMAT(format_param, dots_param) +// If available, it would look like: +// __attribute__((format(wprintf, format_param, dots_param))) + +// Sanitizers annotations. +#if HAS_ATTRIBUTE(no_sanitize) +#define NO_SANITIZE(what) __attribute__((no_sanitize(what))) +#endif +#if !defined(NO_SANITIZE) +#define NO_SANITIZE(what) +#endif + +// MemorySanitizer annotations. +#if defined(MEMORY_SANITIZER) && !BUILDFLAG(IS_NACL) +#include + +// Mark a memory region fully initialized. +// Use this to annotate code that deliberately reads uninitialized data, for +// example a GC scavenging root set pointers from the stack. +#define MSAN_UNPOISON(p, size) __msan_unpoison(p, size) + +// Check a memory region for initializedness, as if it was being used here. +// If any bits are uninitialized, crash with an MSan report. +// Use this to sanitize data which MSan won't be able to track, e.g. before +// passing data to another process via shared memory. +#define MSAN_CHECK_MEM_IS_INITIALIZED(p, size) \ + __msan_check_mem_is_initialized(p, size) +#else // MEMORY_SANITIZER +#define MSAN_UNPOISON(p, size) +#define MSAN_CHECK_MEM_IS_INITIALIZED(p, size) +#endif // MEMORY_SANITIZER + +// DISABLE_CFI_PERF -- Disable Control Flow Integrity for perf reasons. +#if !defined(DISABLE_CFI_PERF) +#if defined(__clang__) && defined(OFFICIAL_BUILD) +#define DISABLE_CFI_PERF NO_SANITIZE("cfi") +#else +#define DISABLE_CFI_PERF +#endif +#endif + +// DISABLE_CFI_ICALL -- Disable Control Flow Integrity indirect call checks. +// Security Note: if you just need to allow calling of dlsym functions use +// DISABLE_CFI_DLSYM. +#if !defined(DISABLE_CFI_ICALL) +#if BUILDFLAG(IS_WIN) +// Windows also needs __declspec(guard(nocf)). +#define DISABLE_CFI_ICALL NO_SANITIZE("cfi-icall") __declspec(guard(nocf)) +#else +#define DISABLE_CFI_ICALL NO_SANITIZE("cfi-icall") +#endif +#endif +#if !defined(DISABLE_CFI_ICALL) +#define DISABLE_CFI_ICALL +#endif + +// DISABLE_CFI_DLSYM -- applies DISABLE_CFI_ICALL on platforms where dlsym +// functions must be called. Retains CFI checks on platforms where loaded +// modules participate in CFI (e.g. Windows). +#if !defined(DISABLE_CFI_DLSYM) +#if BUILDFLAG(IS_WIN) +// Windows modules register functions when loaded so can be checked by CFG. +#define DISABLE_CFI_DLSYM +#else +#define DISABLE_CFI_DLSYM DISABLE_CFI_ICALL +#endif +#endif +#if !defined(DISABLE_CFI_DLSYM) +#define DISABLE_CFI_DLSYM +#endif + +// Macro useful for writing cross-platform function pointers. +#if !defined(CDECL) +#if BUILDFLAG(IS_WIN) +#define CDECL __cdecl +#else // BUILDFLAG(IS_WIN) +#define CDECL +#endif // BUILDFLAG(IS_WIN) +#endif // !defined(CDECL) + +// Macro for hinting that an expression is likely to be false. +#if !defined(UNLIKELY) +#if defined(COMPILER_GCC) || defined(__clang__) +#define UNLIKELY(x) __builtin_expect(!!(x), 0) +#else +#define UNLIKELY(x) (x) +#endif // defined(COMPILER_GCC) +#endif // !defined(UNLIKELY) + +#if !defined(LIKELY) +#if defined(COMPILER_GCC) || defined(__clang__) +#define LIKELY(x) __builtin_expect(!!(x), 1) +#else +#define LIKELY(x) (x) +#endif // defined(COMPILER_GCC) +#endif // !defined(LIKELY) + +// Compiler feature-detection. +// clang.llvm.org/docs/LanguageExtensions.html#has-feature-and-has-extension +#if defined(__has_feature) +#define HAS_FEATURE(FEATURE) __has_feature(FEATURE) +#else +#define HAS_FEATURE(FEATURE) 0 +#endif + +#if defined(COMPILER_GCC) +#define PRETTY_FUNCTION __PRETTY_FUNCTION__ +#elif defined(COMPILER_MSVC) +#define PRETTY_FUNCTION __FUNCSIG__ +#else +// See https://en.cppreference.com/w/c/language/function_definition#func +#define PRETTY_FUNCTION __func__ +#endif + +#if !defined(CPU_ARM_NEON) +#if defined(__arm__) +#if !defined(__ARMEB__) && !defined(__ARM_EABI__) && !defined(__EABI__) && \ + !defined(__VFP_FP__) && !defined(_WIN32_WCE) && !defined(ANDROID) +#error Chromium does not support middle endian architecture +#endif +#if defined(__ARM_NEON__) +#define CPU_ARM_NEON 1 +#endif +#endif // defined(__arm__) +#endif // !defined(CPU_ARM_NEON) + +#if !defined(HAVE_MIPS_MSA_INTRINSICS) +#if defined(__mips_msa) && defined(__mips_isa_rev) && (__mips_isa_rev >= 5) +#define HAVE_MIPS_MSA_INTRINSICS 1 +#endif +#endif + +#if defined(__clang__) && HAS_ATTRIBUTE(uninitialized) +// Attribute "uninitialized" disables -ftrivial-auto-var-init=pattern for +// the specified variable. +// Library-wide alternative is +// 'configs -= [ "//build/config/compiler:default_init_stack_vars" ]' in .gn +// file. +// +// See "init_stack_vars" in build/config/compiler/BUILD.gn and +// http://crbug.com/977230 +// "init_stack_vars" is enabled for non-official builds and we hope to enable it +// in official build in 2020 as well. The flag writes fixed pattern into +// uninitialized parts of all local variables. In rare cases such initialization +// is undesirable and attribute can be used: +// 1. Degraded performance +// In most cases compiler is able to remove additional stores. E.g. if memory is +// never accessed or properly initialized later. Preserved stores mostly will +// not affect program performance. However if compiler failed on some +// performance critical code we can get a visible regression in a benchmark. +// 2. memset, memcpy calls +// Compiler may replaces some memory writes with memset or memcpy calls. This is +// not -ftrivial-auto-var-init specific, but it can happen more likely with the +// flag. It can be a problem if code is not linked with C run-time library. +// +// Note: The flag is security risk mitigation feature. So in future the +// attribute uses should be avoided when possible. However to enable this +// mitigation on the most of the code we need to be less strict now and minimize +// number of exceptions later. So if in doubt feel free to use attribute, but +// please document the problem for someone who is going to cleanup it later. +// E.g. platform, bot, benchmark or test name in patch description or next to +// the attribute. +#define STACK_UNINITIALIZED [[clang::uninitialized]] +#else +#define STACK_UNINITIALIZED +#endif + +// Attribute "no_stack_protector" disables -fstack-protector for the specified +// function. +// +// "stack_protector" is enabled on most POSIX builds. The flag adds a canary +// to each stack frame, which on function return is checked against a reference +// canary. If the canaries do not match, it's likely that a stack buffer +// overflow has occurred, so immediately crashing will prevent exploitation in +// many cases. +// +// In some cases it's desirable to remove this, e.g. on hot functions, or if +// we have purposely changed the reference canary. +#if defined(COMPILER_GCC) || defined(__clang__) +#if HAS_ATTRIBUTE(__no_stack_protector__) +#define NO_STACK_PROTECTOR __attribute__((__no_stack_protector__)) +#else +#define NO_STACK_PROTECTOR __attribute__((__optimize__("-fno-stack-protector"))) +#endif +#else +#define NO_STACK_PROTECTOR +#endif + +// The ANALYZER_ASSUME_TRUE(bool arg) macro adds compiler-specific hints +// to Clang which control what code paths are statically analyzed, +// and is meant to be used in conjunction with assert & assert-like functions. +// The expression is passed straight through if analysis isn't enabled. +// +// ANALYZER_SKIP_THIS_PATH() suppresses static analysis for the current +// codepath and any other branching codepaths that might follow. +#if defined(__clang_analyzer__) + +inline constexpr bool AnalyzerNoReturn() __attribute__((analyzer_noreturn)) { + return false; +} + +inline constexpr bool AnalyzerAssumeTrue(bool arg) { + // AnalyzerNoReturn() is invoked and analysis is terminated if |arg| is + // false. + return arg || AnalyzerNoReturn(); +} + +#define ANALYZER_ASSUME_TRUE(arg) ::AnalyzerAssumeTrue(!!(arg)) +#define ANALYZER_SKIP_THIS_PATH() static_cast(::AnalyzerNoReturn()) + +#else // !defined(__clang_analyzer__) + +#define ANALYZER_ASSUME_TRUE(arg) (arg) +#define ANALYZER_SKIP_THIS_PATH() + +#endif // defined(__clang_analyzer__) + +// Use nomerge attribute to disable optimization of merging multiple same calls. +#if defined(__clang__) && HAS_ATTRIBUTE(nomerge) +#define NOMERGE [[clang::nomerge]] +#else +#define NOMERGE +#endif + +// Marks a type as being eligible for the "trivial" ABI despite having a +// non-trivial destructor or copy/move constructor. Such types can be relocated +// after construction by simply copying their memory, which makes them eligible +// to be passed in registers. The canonical example is std::unique_ptr. +// +// Use with caution; this has some subtle effects on constructor/destructor +// ordering and will be very incorrect if the type relies on its address +// remaining constant. When used as a function argument (by value), the value +// may be constructed in the caller's stack frame, passed in a register, and +// then used and destructed in the callee's stack frame. A similar thing can +// occur when values are returned. +// +// TRIVIAL_ABI is not needed for types which have a trivial destructor and +// copy/move constructors, such as base::TimeTicks and other POD. +// +// It is also not likely to be effective on types too large to be passed in one +// or two registers on typical target ABIs. +// +// See also: +// https://clang.llvm.org/docs/AttributeReference.html#trivial-abi +// https://libcxx.llvm.org/docs/DesignDocs/UniquePtrTrivialAbi.html +#if defined(__clang__) && HAS_ATTRIBUTE(trivial_abi) +#define TRIVIAL_ABI [[clang::trivial_abi]] +#else +#define TRIVIAL_ABI +#endif + +// Detect whether a type is trivially relocatable, ie. a move-and-destroy +// sequence can replaced with memmove(). This can be used to optimise the +// implementation of containers. This is automatically true for types that were +// defined with TRIVIAL_ABI such as scoped_refptr. +// +// See also: +// https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2023/p1144r8.html +// https://clang.llvm.org/docs/LanguageExtensions.html#:~:text=__is_trivially_relocatable +#if defined(__clang__) && HAS_BUILTIN(__is_trivially_relocatable) +#define IS_TRIVIALLY_RELOCATABLE(t) __is_trivially_relocatable(t) +#else +#define IS_TRIVIALLY_RELOCATABLE(t) false +#endif + +// Marks a member function as reinitializing a moved-from variable. +// See also +// https://clang.llvm.org/extra/clang-tidy/checks/bugprone/use-after-move.html#reinitialization +#if defined(__clang__) && HAS_ATTRIBUTE(reinitializes) +#define REINITIALIZES_AFTER_MOVE [[clang::reinitializes]] +#else +#define REINITIALIZES_AFTER_MOVE +#endif + +#if defined(__clang__) +#define GSL_OWNER [[gsl::Owner]] +#define GSL_POINTER [[gsl::Pointer]] +#else +#define GSL_OWNER +#define GSL_POINTER +#endif + +// Adds the "logically_const" tag to a symbol's mangled name. The "Mutable +// Constants" check [1] detects instances of constants that aren't in .rodata, +// e.g. due to a missing `const`. Using this tag suppresses the check for this +// symbol, allowing it to live outside .rodata without a warning. +// +// [1]: +// https://crsrc.org/c/docs/speed/binary_size/android_binary_size_trybot.md#Mutable-Constants +#if defined(COMPILER_GCC) || defined(__clang__) +#define LOGICALLY_CONST [[gnu::abi_tag("logically_const")]] +#else +#define LOGICALLY_CONST +#endif + +// preserve_most clang's calling convention. Reduces register pressure for the +// caller and as such can be used for cold calls. Support for the +// "preserve_most" attribute is limited: +// - 32-bit platforms do not implement it, +// - component builds fail because _dl_runtime_resolve() clobbers registers, +// - there are crashes on arm64 on Windows (https://crbug.com/v8/14065), which +// can hopefully be fixed in the future. +// Additionally, the initial implementation in clang <= 16 overwrote the return +// register(s) in the epilogue of a preserve_most function, so we only use +// preserve_most in clang >= 17 (see https://reviews.llvm.org/D143425). +// Clang only supports preserve_most on X86-64 and AArch64 for now. +// See https://clang.llvm.org/docs/AttributeReference.html#preserve-most for +// more details. +#if (defined(ARCH_CPU_ARM64) || defined(ARCH_CPU_X86_64)) && \ + !(BUILDFLAG(IS_WIN) && defined(ARCH_CPU_ARM64)) && \ + !defined(COMPONENT_BUILD) && defined(__clang__) && \ + __clang_major__ >= 17 && HAS_ATTRIBUTE(preserve_most) +#define PRESERVE_MOST __attribute__((preserve_most)) +#else +#define PRESERVE_MOST +#endif + +// Mark parameters or return types as having a lifetime attached to the class. +// +// When used to mark a method's pointer/reference parameter, the compiler is +// made aware that it will be stored internally in the class and the pointee +// must outlive the class. Typically used on constructor arguments. It should +// appear to the right of the parameter's variable name. +// +// Example: +// ``` +// struct S { +// S(int* p LIFETIME_BOUND) : ptr_(p) {} +// +// int* ptr_; +// }; +// ``` +// +// When used on a method with a return value, the compiler is made aware that +// the returned type is/has a pointer to the internals of the class, and must +// not outlive the class object. It should appear after any method qualifiers. +// +// Example: +// ``` +// struct S { +// int* GetPtr() const LIFETIME_BOUND { return i_; }; +// +// int i_; +// }; +// ``` +// +// This allows the compiler to warn in (a limited set of) cases where the +// pointer would otherwise be left dangling, especially in cases where the +// pointee would be a destroyed temporary. +// +// Docs: https://clang.llvm.org/docs/AttributeReference.html#lifetimebound +#if defined(__clang__) +#define LIFETIME_BOUND [[clang::lifetimebound]] +#else +#define LIFETIME_BOUND +#endif + +// Mark a function as pure, meaning that it does not have side effects, meaning +// that it does not write anything external to the function's local variables +// and return value. +// +// WARNING: If this attribute is mis-used it will result in UB and +// miscompilation, as the optimizator may fold multiple calls into one and +// reorder them inappropriately. This shouldn't appear outside of key vocabulary +// types. It allows callers to work with the vocab type directly, and call its +// methods without having to worry about caching things into local variables in +// hot code. +// +// This attribute must not appear on functions that make use of function +// pointers, virtual methods, or methods of templates (including operators like +// comparison), as the "pure" function can not know what those functions do and +// can not guarantee there will never be sideeffects. +#if defined(COMPILER_GCC) || defined(__clang__) +#define PURE_FUNCTION [[gnu::pure]] +#else +#define PURE_FUNCTION +#endif + +// Functions should be marked with UNSAFE_BUFFER_USAGE when they lead to +// out-of-bounds bugs when called with incorrect inputs. +// +// Ideally such functions should be paired with a safer version that works with +// safe primitives like `base::span`. Otherwise, another safer coding pattern +// should be documented along side the use of `UNSAFE_BUFFER_USAGE`. +// +// All functions marked with UNSAFE_BUFFER_USAGE should come with a safety +// comment that explains the requirements of the function to prevent an +// out-of-bounds bug. For example: +// ``` +// // Function to do things between `input` and `end`. +// // +// // # Safety +// // The `input` must point to an array with size at least 5. The `end` must +// // point within the same allocation of `input` and not come before `input`. +// ``` +// +// The requirements described in the safety comment must be sufficient to +// guarantee that the function never goes out of bounds. Annotating a function +// in this way means that all callers will be required to wrap the call in an +// `UNSAFE_BUFFERS()` macro (see below), with a comment justifying how it meets +// the requirements. +#if defined(__clang__) && HAS_ATTRIBUTE(unsafe_buffer_usage) +#define UNSAFE_BUFFER_USAGE [[clang::unsafe_buffer_usage]] +#else +#define UNSAFE_BUFFER_USAGE +#endif + +// UNSAFE_BUFFERS() wraps code that violates the -Wunsafe-buffer-usage warning, +// such as: +// - pointer arithmetic, +// - pointer subscripting, and +// - calls to functions annotated with UNSAFE_BUFFER_USAGE. +// +// This indicates code whose bounds correctness cannot be ensured +// systematically, and thus requires manual review. +// +// ** USE OF THIS MACRO SHOULD BE VERY RARE.** This should only be used when +// strictly necessary. Prefer to use `base::span` instead of pointers, or other +// safer coding patterns (like std containers) that avoid the opportunity for +// out-of-bounds bugs to creep into the code. Any use of UNSAFE_BUFFERS() can +// lead to a critical security bug if any assumptions are wrong, or ever become +// wrong in the future. +// +// The macro should be used to wrap the minimum necessary code, to make it clear +// what is unsafe, and prevent accidentally opting extra things out of the +// warning. +// +// All usage of UNSAFE_BUFFERS() should come with a `// SAFETY: ...` comment +// that explains how we have guaranteed that the pointer usage can never go +// out-of-bounds, or that the requirements of the UNSAFE_BUFFER_USAGE function +// are met. The safety comment should allow a reader to check that all +// requirements have been met, using only local invariants. Examples of local +// invariants include: +// - Runtime conditions or CHECKs near the UNSAFE_BUFFERS macros +// - Invariants guaranteed by types in the surrounding code +// - Invariants guaranteed by function calls in the surrounding code +// - Caller requirements, if the containing function is itself marked with +// UNSAFE_BUFFER_USAGE +// +// The last case should be an option of last resort. It is less safe and will +// require the caller also use the UNSAFE_BUFFERS() macro. Prefer directly +// capturing such invariants in types like `base::span`. +// +// Safety explanations may not rely on invariants that are not fully +// encapsulated close to the UNSAFE_BUFFERS() usage. Instead, use safer coding +// patterns or stronger invariants. +#if defined(__clang__) +// clang-format off +// Formatting is off so that we can put each _Pragma on its own line, as +// recommended by the gcc docs. +#define UNSAFE_BUFFERS(...) \ + _Pragma("clang unsafe_buffer_usage begin") \ + __VA_ARGS__ \ + _Pragma("clang unsafe_buffer_usage end") +// clang-format on +#else +#define UNSAFE_BUFFERS(...) __VA_ARGS__ +#endif + +// Defines a condition for a function to be checked at compile time if the +// parameter's value is known at compile time. If the condition is failed, the +// function is omitted from the overload set resolution, much like `requires`. +// +// If the parameter is a runtime value, then the condition is unable to be +// checked and the function will be omitted from the overload set resolution. +// This ensures the function can only be called with values known at compile +// time. This is a clang extension. +// +// Example: +// ``` +// void f(int a) ENABLE_IF_ATTR(a > 0) {} +// f(1); // Ok. +// f(0); // Error: no valid f() found. +// ``` +// +// The `ENABLE_IF_ATTR` annotation is preferred over `consteval` with a check +// that breaks compile because metaprogramming does not observe such checks. So +// with `consteval`, the function looks callable to concepts/type_traits but is +// not and will fail to compile even though it reports it's usable. Whereas +// `ENABLE_IF_ATTR` interacts correctly with metaprogramming. This is especially +// painful for constructors. See also +// https://github.com/chromium/subspace/issues/266. +#if defined(__clang__) +#define ENABLE_IF_ATTR(cond, msg) __attribute__((enable_if(cond, msg))) +#else +#define ENABLE_IF_ATTR(cond, msg) +#endif + +#endif // WEBF_COMPILER_SPECIFIC_H diff --git a/bridge/bindings/v8/base/containers/checked_iterators.h b/bridge/bindings/v8/base/containers/checked_iterators.h new file mode 100644 index 0000000000..c87dcb5576 --- /dev/null +++ b/bridge/bindings/v8/base/containers/checked_iterators.h @@ -0,0 +1,240 @@ +/* +* Copyright (C) 2019-2022 The Kraken authors. All rights reserved. +* Copyright (C) 2022-present The WebF authors. All rights reserved. +*/ + +#ifdef UNSAFE_BUFFERS_BUILD +// TODO(crbug.com/40284755): Remove this and spanify to fix the errors. +#pragma allow_unsafe_buffers +#endif + +#ifndef BASE_CONTAINERS_CHECKED_ITERATORS_H_ +#define BASE_CONTAINERS_CHECKED_ITERATORS_H_ + +#include +#include +#include +#include + +#include "bindings/v8/base/check_op.h" +#include "bindings/v8/base/compiler_specific.h" +#include "bindings/v8/base/containers/util.h" +#include "bindings/v8/base/memory/raw_ptr_exclusion.h" +#include "bindings/v8/for_build/build_config.h" + +namespace base { + +template +class CheckedContiguousIterator { + public: + using difference_type = std::ptrdiff_t; + using value_type = std::remove_cv_t; + using pointer = T*; + using reference = T&; + using iterator_category = std::contiguous_iterator_tag; + using iterator_concept = std::contiguous_iterator_tag; + + // Required for converting constructor below. + template + friend class CheckedContiguousIterator; + + // Required to be able to get to the underlying pointer without triggering + // CHECK failures. + template + friend struct std::pointer_traits; + + constexpr CheckedContiguousIterator() = default; + + UNSAFE_BUFFER_USAGE constexpr CheckedContiguousIterator(T* start, + const T* end) + : CheckedContiguousIterator(start, start, end) {} + + UNSAFE_BUFFER_USAGE constexpr CheckedContiguousIterator(const T* start, + T* current, + const T* end) + : start_(start), current_(current), end_(end) { + CHECK_LE(start, current); + CHECK_LE(current, end); + } + + constexpr CheckedContiguousIterator(const CheckedContiguousIterator& other) = + default; + + // Converting constructor allowing conversions like CCI to CCI, + // but disallowing CCI to CCI or CCI to CCI, which + // are unsafe. Furthermore, this is the same condition as used by the + // converting constructors of std::span and std::unique_ptr. + // See https://wg21.link/n4042 for details. + template + constexpr CheckedContiguousIterator(const CheckedContiguousIterator& other) + requires(std::convertible_to) + : start_(other.start_), current_(other.current_), end_(other.end_) { + // We explicitly don't delegate to the 3-argument constructor here. Its + // CHECKs would be redundant, since we expect |other| to maintain its own + // invariant. However, DCHECKs never hurt anybody. Presumably. + DCHECK_LE(other.start_, other.current_); + DCHECK_LE(other.current_, other.end_); + } + + ~CheckedContiguousIterator() = default; + + constexpr CheckedContiguousIterator& operator=( + const CheckedContiguousIterator& other) = default; + + friend constexpr bool operator==(const CheckedContiguousIterator& lhs, + const CheckedContiguousIterator& rhs) { + lhs.CheckComparable(rhs); + return lhs.current_ == rhs.current_; + } + + friend constexpr auto operator<=>(const CheckedContiguousIterator& lhs, + const CheckedContiguousIterator& rhs) { + lhs.CheckComparable(rhs); + return lhs.current_ <=> rhs.current_; + } + + constexpr CheckedContiguousIterator& operator++() { + CHECK_NE(current_, end_); + ++current_; + return *this; + } + + constexpr CheckedContiguousIterator operator++(int) { + CheckedContiguousIterator old = *this; + ++*this; + return old; + } + + constexpr CheckedContiguousIterator& operator--() { + CHECK_NE(current_, start_); + --current_; + return *this; + } + + constexpr CheckedContiguousIterator operator--(int) { + CheckedContiguousIterator old = *this; + --*this; + return old; + } + + constexpr CheckedContiguousIterator& operator+=(difference_type rhs) { + if (rhs > 0) { + CHECK_LE(rhs, end_ - current_); + } else { + CHECK_LE(-rhs, current_ - start_); + } + current_ += rhs; + return *this; + } + + constexpr CheckedContiguousIterator operator+(difference_type rhs) const { + CheckedContiguousIterator it = *this; + it += rhs; + return it; + } + + constexpr friend CheckedContiguousIterator operator+( + difference_type lhs, + const CheckedContiguousIterator& rhs) { + return rhs + lhs; + } + + constexpr CheckedContiguousIterator& operator-=(difference_type rhs) { + if (rhs < 0) { + CHECK_LE(-rhs, end_ - current_); + } else { + CHECK_LE(rhs, current_ - start_); + } + current_ -= rhs; + return *this; + } + + constexpr CheckedContiguousIterator operator-(difference_type rhs) const { + CheckedContiguousIterator it = *this; + it -= rhs; + return it; + } + + constexpr friend difference_type operator-( + const CheckedContiguousIterator& lhs, + const CheckedContiguousIterator& rhs) { + lhs.CheckComparable(rhs); + return lhs.current_ - rhs.current_; + } + + constexpr reference operator*() const { + CHECK_NE(current_, end_); + return *current_; + } + + constexpr pointer operator->() const { + CHECK_NE(current_, end_); + return current_; + } + + constexpr reference operator[](difference_type rhs) const { + CHECK_GE(rhs, 0); + CHECK_LT(rhs, end_ - current_); + return current_[rhs]; + } + + [[nodiscard]] static bool IsRangeMoveSafe( + const CheckedContiguousIterator& from_begin, + const CheckedContiguousIterator& from_end, + const CheckedContiguousIterator& to) { + if (from_end < from_begin) + return false; + const auto from_begin_uintptr = get_uintptr(from_begin.current_); + const auto from_end_uintptr = get_uintptr(from_end.current_); + const auto to_begin_uintptr = get_uintptr(to.current_); + const auto to_end_uintptr = + get_uintptr((to + std::distance(from_begin, from_end)).current_); + + return to_begin_uintptr >= from_end_uintptr || + to_end_uintptr <= from_begin_uintptr; + } + + private: + constexpr void CheckComparable(const CheckedContiguousIterator& other) const { + CHECK_EQ(start_, other.start_); + CHECK_EQ(end_, other.end_); + } + + // RAW_PTR_EXCLUSION: The embedding class is stack-scoped. + RAW_PTR_EXCLUSION const T* start_ = nullptr; + RAW_PTR_EXCLUSION T* current_ = nullptr; + RAW_PTR_EXCLUSION const T* end_ = nullptr; +}; + +template +using CheckedContiguousConstIterator = CheckedContiguousIterator; + +} // namespace base + +// Specialize std::pointer_traits so that we can obtain the underlying raw +// pointer without resulting in CHECK failures. The important bit is the +// `to_address(pointer)` overload, which is the standard blessed way to +// customize `std::to_address(pointer)` in C++20 [1]. +// +// [1] https://wg21.link/pointer.traits.optmem + +template +struct std::pointer_traits<::base::CheckedContiguousIterator> { + using pointer = ::base::CheckedContiguousIterator; + using element_type = T; + using difference_type = ptrdiff_t; + + template + using rebind = ::base::CheckedContiguousIterator; + + static constexpr pointer pointer_to(element_type& r) noexcept { + return pointer(&r, &r); + } + + static constexpr element_type* to_address(pointer p) noexcept { + return p.current_; + } +}; + +#endif // BASE_CONTAINERS_CHECKED_ITERATORS_H_ + diff --git a/bridge/bindings/v8/base/containers/dynamic_extent.h b/bridge/bindings/v8/base/containers/dynamic_extent.h new file mode 100644 index 0000000000..2ce323ba01 --- /dev/null +++ b/bridge/bindings/v8/base/containers/dynamic_extent.h @@ -0,0 +1,20 @@ +/* +* Copyright (C) 2019-2022 The Kraken authors. All rights reserved. +* Copyright (C) 2022-present The WebF authors. All rights reserved. +*/ + +#ifndef BASE_CONTAINERS_DYNAMIC_EXTENT_H_ +#define BASE_CONTAINERS_DYNAMIC_EXTENT_H_ + +#include +#include + +namespace base { + +// [views.constants] +inline constexpr size_t dynamic_extent = std::numeric_limits::max(); + +} // namespace base + +#endif // BASE_CONTAINERS_DYNAMIC_EXTENT_H_ + diff --git a/bridge/bindings/v8/base/containers/flat_map.h b/bridge/bindings/v8/base/containers/flat_map.h new file mode 100644 index 0000000000..23db17aa51 --- /dev/null +++ b/bridge/bindings/v8/base/containers/flat_map.h @@ -0,0 +1,401 @@ +/* +* Copyright (C) 2019-2022 The Kraken authors. All rights reserved. +* Copyright (C) 2022-present The WebF authors. All rights reserved. +*/ + +#ifndef BASE_CONTAINERS_FLAT_MAP_H_ +#define BASE_CONTAINERS_FLAT_MAP_H_ + +#include +#include +#include +#include +#include + +#include "bindings/v8/base/check.h" +#include "bindings/v8/base/containers/flat_tree.h" + +namespace base { + +namespace internal { + +// An implementation of the flat_tree GetKeyFromValue template parameter that +// extracts the key as the first element of a pair. +struct GetFirst { + template + constexpr const Key& operator()(const std::pair& p) const { + return p.first; + } +}; + +} // namespace internal + +// flat_map is a container with a std::map-like interface that stores its +// contents in a sorted container, by default a vector. +// +// Its implementation mostly tracks the corresponding standardization proposal +// https://wg21.link/P0429, except that the storage of keys and values is not +// split. +// +// Please see //base/containers/README.md for an overview of which container +// to select. +// +// PROS +// +// - Good memory locality. +// - Low overhead, especially for smaller maps. +// - Performance is good for more workloads than you might expect (see +// overview link above). +// - Supports C++14 map interface. +// +// CONS +// +// - Inserts and removals are O(n). +// +// IMPORTANT NOTES +// +// - Iterators are invalidated across mutations. This means that the following +// line of code has undefined behavior since adding a new element could +// resize the container, invalidating all iterators: +// container["new element"] = it.second; +// - If possible, construct a flat_map in one operation by inserting into +// a container and moving that container into the flat_map constructor. +// +// QUICK REFERENCE +// +// Most of the core functionality is inherited from flat_tree. Please see +// flat_tree.h for more details for most of these functions. As a quick +// reference, the functions available are: +// +// Constructors (inputs need not be sorted): +// flat_map(const flat_map&); +// flat_map(flat_map&&); +// flat_map(InputIterator first, InputIterator last, +// const Compare& compare = Compare()); +// flat_map(const container_type& items, +// const Compare& compare = Compare()); +// flat_map(container_type&& items, +// const Compare& compare = Compare()); // Re-use storage. +// flat_map(std::initializer_list ilist, +// const Compare& comp = Compare()); +// +// Constructors (inputs need to be sorted): +// flat_map(sorted_unique_t, +// InputIterator first, InputIterator last, +// const Compare& compare = Compare()); +// flat_map(sorted_unique_t, +// const container_type& items, +// const Compare& compare = Compare()); +// flat_map(sorted_unique_t, +// container_type&& items, +// const Compare& compare = Compare()); // Re-use storage. +// flat_map(sorted_unique_t, +// std::initializer_list ilist, +// const Compare& comp = Compare()); +// +// Assignment functions: +// flat_map& operator=(const flat_map&); +// flat_map& operator=(flat_map&&); +// flat_map& operator=(initializer_list); +// +// Memory management functions: +// void reserve(size_t); +// size_t capacity() const; +// void shrink_to_fit(); +// +// Size management functions: +// void clear(); +// size_t size() const; +// size_t max_size() const; +// bool empty() const; +// +// Iterator functions: +// iterator begin(); +// const_iterator begin() const; +// const_iterator cbegin() const; +// iterator end(); +// const_iterator end() const; +// const_iterator cend() const; +// reverse_iterator rbegin(); +// const reverse_iterator rbegin() const; +// const_reverse_iterator crbegin() const; +// reverse_iterator rend(); +// const_reverse_iterator rend() const; +// const_reverse_iterator crend() const; +// +// Insert and accessor functions: +// mapped_type& operator[](const key_type&); +// mapped_type& operator[](key_type&&); +// mapped_type& at(const K&); +// const mapped_type& at(const K&) const; +// pair insert(const value_type&); +// pair insert(value_type&&); +// iterator insert(const_iterator hint, const value_type&); +// iterator insert(const_iterator hint, value_type&&); +// void insert(InputIterator first, InputIterator last); +// pair insert_or_assign(K&&, M&&); +// iterator insert_or_assign(const_iterator hint, K&&, M&&); +// pair emplace(Args&&...); +// iterator emplace_hint(const_iterator, Args&&...); +// pair try_emplace(K&&, Args&&...); +// iterator try_emplace(const_iterator hint, K&&, Args&&...); + +// Underlying type functions: +// container_type extract() &&; +// void replace(container_type&&); +// +// Erase functions: +// iterator erase(iterator); +// iterator erase(const_iterator); +// iterator erase(const_iterator first, const_iterator& last); +// template size_t erase(const K& key); +// +// Comparators (see std::map documentation). +// key_compare key_comp() const; +// value_compare value_comp() const; +// +// Search functions: +// template size_t count(const K&) const; +// template iterator find(const K&); +// template const_iterator find(const K&) const; +// template bool contains(const K&) const; +// template pair equal_range(const K&); +// template iterator lower_bound(const K&); +// template const_iterator lower_bound(const K&) const; +// template iterator upper_bound(const K&); +// template const_iterator upper_bound(const K&) const; +// +// General functions: +// void swap(flat_map&); +// +// Non-member operators: +// bool operator==(const flat_map&, const flat_map); +// bool operator!=(const flat_map&, const flat_map); +// bool operator<(const flat_map&, const flat_map); +// bool operator>(const flat_map&, const flat_map); +// bool operator>=(const flat_map&, const flat_map); +// bool operator<=(const flat_map&, const flat_map); +// +template , + class Container = std::vector>> +class flat_map : public ::base::internal:: + flat_tree { + private: + using tree = typename ::base::internal:: + flat_tree; + + public: + using key_type = typename tree::key_type; + using mapped_type = Mapped; + using value_type = typename tree::value_type; + using reference = typename Container::reference; + using const_reference = typename Container::const_reference; + using size_type = typename Container::size_type; + using difference_type = typename Container::difference_type; + using iterator = typename tree::iterator; + using const_iterator = typename tree::const_iterator; + using reverse_iterator = typename tree::reverse_iterator; + using const_reverse_iterator = typename tree::const_reverse_iterator; + using container_type = typename tree::container_type; + + // -------------------------------------------------------------------------- + // Lifetime and assignments. + // + // Note: we explicitly bring operator= in because otherwise + // flat_map<...> x; + // x = {...}; + // Would first create a flat_map and then move assign it. This most likely + // would be optimized away but still affects our debug builds. + + using tree::tree; + using tree::operator=; + + // Out-of-bound calls to at() will CHECK. + template + mapped_type& at(const K& key); + template + const mapped_type& at(const K& key) const; + + // -------------------------------------------------------------------------- + // Map-specific insert operations. + // + // Normal insert() functions are inherited from flat_tree. + // + // Assume that every operation invalidates iterators and references. + // Insertion of one element can take O(size). + + mapped_type& operator[](const key_type& key); + mapped_type& operator[](key_type&& key); + + template + std::pair insert_or_assign(K&& key, M&& obj); + template + iterator insert_or_assign(const_iterator hint, K&& key, M&& obj); + + template + std::enable_if_t, + std::pair> + try_emplace(K&& key, Args&&... args); + + template + std::enable_if_t, iterator> + try_emplace(const_iterator hint, K&& key, Args&&... args); + + // -------------------------------------------------------------------------- + // General operations. + // + // Assume that swap invalidates iterators and references. + + void swap(flat_map& other) noexcept; + + friend void swap(flat_map& lhs, flat_map& rhs) noexcept { lhs.swap(rhs); } +}; + +// ---------------------------------------------------------------------------- +// Lookups. + +template +template +auto flat_map::at(const K& key) + -> mapped_type& { + iterator found = tree::find(key); + CHECK(found != tree::end()); + return found->second; +} + +template +template +auto flat_map::at(const K& key) const + -> const mapped_type& { + const_iterator found = tree::find(key); + CHECK(found != tree::cend()); + return found->second; +} + +// ---------------------------------------------------------------------------- +// Insert operations. + +template +auto flat_map::operator[](const key_type& key) + -> mapped_type& { + iterator found = tree::lower_bound(key); + if (found == tree::end() || tree::key_comp()(key, found->first)) + found = tree::unsafe_emplace(found, key, mapped_type()); + return found->second; +} + +template +auto flat_map::operator[](key_type&& key) + -> mapped_type& { + iterator found = tree::lower_bound(key); + if (found == tree::end() || tree::key_comp()(key, found->first)) + found = tree::unsafe_emplace(found, std::move(key), mapped_type()); + return found->second; +} + +template +template +auto flat_map::insert_or_assign(K&& key, + M&& obj) + -> std::pair { + auto result = + tree::emplace_key_args(key, std::forward(key), std::forward(obj)); + if (!result.second) + result.first->second = std::forward(obj); + return result; +} + +template +template +auto flat_map::insert_or_assign( + const_iterator hint, + K&& key, + M&& obj) -> iterator { + auto result = tree::emplace_hint_key_args(hint, key, std::forward(key), + std::forward(obj)); + if (!result.second) + result.first->second = std::forward(obj); + return result.first; +} + +template +template +auto flat_map::try_emplace(K&& key, + Args&&... args) + -> std::enable_if_t, + std::pair> { + return tree::emplace_key_args( + key, std::piecewise_construct, + std::forward_as_tuple(std::forward(key)), + std::forward_as_tuple(std::forward(args)...)); +} + +template +template +auto flat_map::try_emplace(const_iterator hint, + K&& key, + Args&&... args) + -> std::enable_if_t, iterator> { + return tree::emplace_hint_key_args( + hint, key, std::piecewise_construct, + std::forward_as_tuple(std::forward(key)), + std::forward_as_tuple(std::forward(args)...)) + .first; +} + +// ---------------------------------------------------------------------------- +// General operations. + +template +void flat_map::swap(flat_map& other) noexcept { + tree::swap(other); +} + +// ---------------------------------------------------------------------------- +// Utility functions. + +// Utility function to simplify constructing a flat_set from a fixed list of +// keys and values. The key/value pairs are obtained by applying |proj| to the +// |unprojected_elements|. The map's keys are sorted by |comp|. +// +// Example usage (creates a set {{16, "4"}, {9, "3"}, {4, "2"}, {1, "1"}}): +// auto map = base::MakeFlatMap( +// std::vector{1, 2, 3, 4}, +// [](int i, int j) { return i > j; }, +// [](int i) { return std::make_pair(i * i, base::NumberToString(i)); }); +template , + class Container = std::vector>, + class InputContainer, + class Projection = std::identity> +constexpr flat_map MakeFlatMap( + const InputContainer& unprojected_elements, + const KeyCompare& comp = KeyCompare(), + const Projection& proj = Projection()) { + Container elements; + internal::ReserveIfSupported(elements, unprojected_elements); + base::ranges::transform(unprojected_elements, std::back_inserter(elements), + proj); + return flat_map(std::move(elements), + comp); +} + +// Deduction guide to construct a flat_map from a Container of std::pair elements. The container does not have to be sorted or contain only +// unique keys; construction will automatically discard duplicate keys, keeping +// only the first. +template < + class Container, + class Compare = std::less<>, + class Key = typename std::decay_t::value_type::first_type, + class Mapped = typename std::decay_t::value_type::second_type> +flat_map(Container&&, Compare comp = {}) + -> flat_map>; + +} // namespace base + +#endif // BASE_CONTAINERS_FLAT_MAP_H_ + diff --git a/bridge/bindings/v8/base/containers/flat_set.h b/bridge/bindings/v8/base/containers/flat_set.h new file mode 100644 index 0000000000..ab730a6b92 --- /dev/null +++ b/bridge/bindings/v8/base/containers/flat_set.h @@ -0,0 +1,186 @@ +/* +* Copyright (C) 2019-2022 The Kraken authors. All rights reserved. +* Copyright (C) 2022-present The WebF authors. All rights reserved. +*/ + +#ifndef BASE_CONTAINERS_FLAT_SET_H_ +#define BASE_CONTAINERS_FLAT_SET_H_ + +#include +#include + +#include "bindings/v8/base/containers/flat_tree.h" +#include "bindings/v8/base/ranges/algorithm.h" + +namespace base { + +// flat_set is a container with a std::set-like interface that stores its +// contents in a sorted container, by default a vector. +// +// Its implementation mostly tracks the corresponding standardization proposal +// https://wg21.link/P1222. +// +// Please see //base/containers/README.md for an overview of which container +// to select. +// +// PROS +// +// - Good memory locality. +// - Low overhead, especially for smaller sets. +// - Performance is good for more workloads than you might expect (see +// overview link above). +// - Supports C++14 set interface. +// +// CONS +// +// - Inserts and removals are O(n). +// +// IMPORTANT NOTES +// +// - Iterators are invalidated across mutations. +// - If possible, construct a flat_set in one operation by inserting into +// a container and moving that container into the flat_set constructor. +// - For multiple removals use base::EraseIf() which is O(n) rather than +// O(n * removed_items). +// +// QUICK REFERENCE +// +// Most of the core functionality is inherited from flat_tree. Please see +// flat_tree.h for more details for most of these functions. As a quick +// reference, the functions available are: +// +// Constructors (inputs need not be sorted): +// flat_set(const flat_set&); +// flat_set(flat_set&&); +// flat_set(InputIterator first, InputIterator last, +// const Compare& compare = Compare()); +// flat_set(const container_type& items, +// const Compare& compare = Compare()); +// flat_set(container_type&& items, +// const Compare& compare = Compare()); // Re-use storage. +// flat_set(std::initializer_list ilist, +// const Compare& comp = Compare()); +// +// Constructors (inputs need to be sorted): +// flat_set(sorted_unique_t, +// InputIterator first, InputIterator last, +// const Compare& compare = Compare()); +// flat_set(sorted_unique_t, +// const container_type& items, +// const Compare& compare = Compare()); +// flat_set(sorted_unique_t, +// container_type&& items, +// const Compare& compare = Compare()); // Re-use storage. +// flat_set(sorted_unique_t, +// std::initializer_list ilist, +// const Compare& comp = Compare()); +// +// Assignment functions: +// flat_set& operator=(const flat_set&); +// flat_set& operator=(flat_set&&); +// flat_set& operator=(initializer_list); +// +// Memory management functions: +// void reserve(size_t); +// size_t capacity() const; +// void shrink_to_fit(); +// +// Size management functions: +// void clear(); +// size_t size() const; +// size_t max_size() const; +// bool empty() const; +// +// Iterator functions: +// iterator begin(); +// const_iterator begin() const; +// const_iterator cbegin() const; +// iterator end(); +// const_iterator end() const; +// const_iterator cend() const; +// reverse_iterator rbegin(); +// const reverse_iterator rbegin() const; +// const_reverse_iterator crbegin() const; +// reverse_iterator rend(); +// const_reverse_iterator rend() const; +// const_reverse_iterator crend() const; +// +// Insert and accessor functions: +// pair insert(const key_type&); +// pair insert(key_type&&); +// void insert(InputIterator first, InputIterator last); +// iterator insert(const_iterator hint, const key_type&); +// iterator insert(const_iterator hint, key_type&&); +// pair emplace(Args&&...); +// iterator emplace_hint(const_iterator, Args&&...); +// +// Underlying type functions: +// container_type extract() &&; +// void replace(container_type&&); +// +// Erase functions: +// iterator erase(iterator); +// iterator erase(const_iterator); +// iterator erase(const_iterator first, const_iterator& last); +// template size_t erase(const K& key); +// +// Comparators (see std::set documentation). +// key_compare key_comp() const; +// value_compare value_comp() const; +// +// Search functions: +// template size_t count(const K&) const; +// template iterator find(const K&); +// template const_iterator find(const K&) const; +// template bool contains(const K&) const; +// template pair equal_range(K&); +// template iterator lower_bound(const K&); +// template const_iterator lower_bound(const K&) const; +// template iterator upper_bound(const K&); +// template const_iterator upper_bound(const K&) const; +// +// General functions: +// void swap(flat_set&); +// +// Non-member operators: +// bool operator==(const flat_set&, const flat_set); +// bool operator!=(const flat_set&, const flat_set); +// bool operator<(const flat_set&, const flat_set); +// bool operator>(const flat_set&, const flat_set); +// bool operator>=(const flat_set&, const flat_set); +// bool operator<=(const flat_set&, const flat_set); +// +template , + class Container = std::vector> +using flat_set = typename ::base::internal:: + flat_tree; + +// Utility function to simplify constructing a flat_set from a fixed list +// of keys. The keys are obtained by applying the projection |proj| to the +// |unprojected_elements|. The set's keys are sorted by |comp|. +// +// Example usage (creates a set {16, 9, 4, 1}): +// auto set = base::MakeFlatSet( +// std::vector{1, 2, 3, 4}, [](int i, int j) { return i > j; }, +// [](int i) { return i * i; }); +template , + class Container = std::vector, + class InputContainer, + class Projection = std::identity> +constexpr flat_set MakeFlatSet( + const InputContainer& unprojected_elements, + const Compare& comp = Compare(), + const Projection& proj = Projection()) { + Container elements; + internal::ReserveIfSupported(elements, unprojected_elements); + base::ranges::transform(unprojected_elements, std::back_inserter(elements), + proj); + return flat_set(std::move(elements), comp); +} + +} // namespace base + +#endif // BASE_CONTAINERS_FLAT_SET_H_ + diff --git a/bridge/bindings/v8/base/containers/flat_tree.h b/bridge/bindings/v8/base/containers/flat_tree.h new file mode 100644 index 0000000000..2ad3ddc243 --- /dev/null +++ b/bridge/bindings/v8/base/containers/flat_tree.h @@ -0,0 +1,1127 @@ +/* +* Copyright (C) 2019-2022 The Kraken authors. All rights reserved. +* Copyright (C) 2022-present The WebF authors. All rights reserved. +*/ + +#ifdef UNSAFE_BUFFERS_BUILD +// TODO(crbug.com/40284755): Remove this and spanify to fix the errors. +#pragma allow_unsafe_buffers +#endif + +#ifndef BASE_CONTAINERS_FLAT_TREE_H_ +#define BASE_CONTAINERS_FLAT_TREE_H_ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "bindings/v8/base/check.h" +#include "bindings/v8/base/compiler_specific.h" +#include "bindings/v8/base/memory/raw_ptr_exclusion.h" +#include "bindings/v8/base/ranges/algorithm.h" + +namespace base { + +// Tag type that allows skipping the sort_and_unique step when constructing a +// flat_tree in case the underlying container is already sorted and has no +// duplicate elements. +struct sorted_unique_t { + constexpr explicit sorted_unique_t() = default; +}; +inline constexpr sorted_unique_t sorted_unique; + +namespace internal { + +// Helper functions used in DCHECKs below to make sure that inputs tagged with +// sorted_unique are indeed sorted and unique. +template +constexpr bool is_sorted_and_unique(const Range& range, Comp comp) { + // Being unique implies that there are no adjacent elements that + // compare equal. So this checks that each element is strictly less + // than the element after it. + return ranges::adjacent_find(range, std::not_fn(comp)) == ranges::end(range); +} + +// Helper inspired by C++20's std::to_array to convert a C-style array to a +// std::array. As opposed to the C++20 version this implementation does not +// provide an overload for rvalues and does not strip cv qualifers from the +// returned std::array::value_type. The returned value_type needs to be +// specified explicitly, allowing the construction of std::arrays with const +// elements. +// +// Reference: https://en.cppreference.com/w/cpp/container/array/to_array +template +constexpr std::array ToArrayImpl(const T (&data)[N], + std::index_sequence) { + return {{data[I]...}}; +} + +template +constexpr std::array ToArray(const T (&data)[N]) { + return ToArrayImpl(data, std::make_index_sequence()); +} + +// Helper that calls `container.reserve(std::size(source))`. +template +void ReserveIfSupported(T& container, const U& source) { + if constexpr (requires { container.reserve(std::size(source)); }) { + container.reserve(std::size(source)); + } +} + +// Implementation ------------------------------------------------------------- + +// Implementation for the sorted associative flat_set and flat_map using a +// sorted vector as the backing store. Do not use directly. +// +// The use of "value" in this is like std::map uses, meaning it's the thing +// contained (in the case of map it's a pair). The Key is how +// things are looked up. In the case of a set, Key == Value. In the case of +// a map, the Key is a component of a Value. +// +// The helper class GetKeyFromValue provides the means to extract a key from a +// value for comparison purposes. It should implement: +// const Key& operator()(const Value&). +template +class flat_tree { + public: + // -------------------------------------------------------------------------- + // Types. + // + using key_type = Key; + using key_compare = KeyCompare; + using value_type = typename Container::value_type; + + // Wraps the templated key comparison to compare values. + struct value_compare { + constexpr bool operator()(const value_type& left, + const value_type& right) const { + GetKeyFromValue extractor; + return comp(extractor(left), extractor(right)); + } + + NO_UNIQUE_ADDRESS key_compare comp; + }; + + using pointer = typename Container::pointer; + using const_pointer = typename Container::const_pointer; + using reference = typename Container::reference; + using const_reference = typename Container::const_reference; + using size_type = typename Container::size_type; + using difference_type = typename Container::difference_type; + using iterator = typename Container::iterator; + using const_iterator = typename Container::const_iterator; + using reverse_iterator = typename Container::reverse_iterator; + using const_reverse_iterator = typename Container::const_reverse_iterator; + using container_type = Container; + + // -------------------------------------------------------------------------- + // Lifetime. + // + // Constructors that take range guarantee O(N * log^2(N)) + O(N) complexity + // and take O(N * log(N)) + O(N) if extra memory is available (N is a range + // length). + // + // Assume that move constructors invalidate iterators and references. + // + // The constructors that take ranges, lists, and vectors do not require that + // the input be sorted. + // + // When passing the base::sorted_unique tag as the first argument no sort and + // unique step takes places. This is useful if the underlying container + // already has the required properties. + + flat_tree() = default; + flat_tree(const flat_tree&) = default; + flat_tree(flat_tree&&) = default; + + explicit flat_tree(const key_compare& comp); + + template + flat_tree(InputIterator first, + InputIterator last, + const key_compare& comp = key_compare()); + + flat_tree(const container_type& items, + const key_compare& comp = key_compare()); + + flat_tree(container_type&& items, const key_compare& comp = key_compare()); + + flat_tree(std::initializer_list ilist, + const key_compare& comp = key_compare()); + + template + flat_tree(sorted_unique_t, + InputIterator first, + InputIterator last, + const key_compare& comp = key_compare()); + + flat_tree(sorted_unique_t, + const container_type& items, + const key_compare& comp = key_compare()); + + constexpr flat_tree(sorted_unique_t, + container_type&& items, + const key_compare& comp = key_compare()); + + flat_tree(sorted_unique_t, + std::initializer_list ilist, + const key_compare& comp = key_compare()); + + ~flat_tree() = default; + + // -------------------------------------------------------------------------- + // Assignments. + // + // Assume that move assignment invalidates iterators and references. + + flat_tree& operator=(const flat_tree&) = default; + flat_tree& operator=(flat_tree&&) = default; + // Takes the first if there are duplicates in the initializer list. + flat_tree& operator=(std::initializer_list ilist); + + // -------------------------------------------------------------------------- + // Memory management. + // + // Beware that shrink_to_fit() simply forwards the request to the + // container_type and its implementation is free to optimize otherwise and + // leave capacity() to be greater that its size. + // + // reserve() and shrink_to_fit() invalidate iterators and references. + + void reserve(size_type new_capacity); + size_type capacity() const; + void shrink_to_fit(); + + // -------------------------------------------------------------------------- + // Size management. + // + // clear() leaves the capacity() of the flat_tree unchanged. + + void clear(); + + constexpr size_type size() const; + constexpr size_type max_size() const; + constexpr bool empty() const; + + // -------------------------------------------------------------------------- + // Iterators. + // + // Iterators follow the ordering defined by the key comparator used in + // construction of the flat_tree. + + iterator begin(); + constexpr const_iterator begin() const; + const_iterator cbegin() const; + + iterator end(); + constexpr const_iterator end() const; + const_iterator cend() const; + + reverse_iterator rbegin(); + const_reverse_iterator rbegin() const; + const_reverse_iterator crbegin() const; + + reverse_iterator rend(); + const_reverse_iterator rend() const; + const_reverse_iterator crend() const; + + // -------------------------------------------------------------------------- + // Insert operations. + // + // Assume that every operation invalidates iterators and references. + // Insertion of one element can take O(size). Capacity of flat_tree grows in + // an implementation-defined manner. + // + // NOTE: Prefer to build a new flat_tree from a std::vector (or similar) + // instead of calling insert() repeatedly. + + std::pair insert(const value_type& val); + std::pair insert(value_type&& val); + + iterator insert(const_iterator position_hint, const value_type& x); + iterator insert(const_iterator position_hint, value_type&& x); + + // This method inserts the values from the range [first, last) into the + // current tree. + template + void insert(InputIterator first, InputIterator last); + + template + std::pair emplace(Args&&... args); + + template + iterator emplace_hint(const_iterator position_hint, Args&&... args); + + // -------------------------------------------------------------------------- + // Underlying type operations. + // + // Assume that either operation invalidates iterators and references. + + // Extracts the container_type and returns it to the caller. Ensures that + // `this` is `empty()` afterwards. + container_type extract() &&; + + // Replaces the container_type with `body`. Expects that `body` is sorted + // and has no repeated elements with regard to value_comp(). + void replace(container_type&& body); + + // -------------------------------------------------------------------------- + // Erase operations. + // + // Assume that every operation invalidates iterators and references. + // + // erase(position), erase(first, last) can take O(size). + // erase(key) may take O(size) + O(log(size)). + // + // Prefer base::EraseIf() or some other variation on erase(remove(), end()) + // idiom when deleting multiple non-consecutive elements. + + iterator erase(iterator position); + // Artificially templatized to break ambiguity if `iterator` and + // `const_iterator` are the same type. + template + iterator erase(const_iterator position); + iterator erase(const_iterator first, const_iterator last); + size_type erase(const Key& key); + template + size_type erase(const K& key); + + // -------------------------------------------------------------------------- + // Comparators. + + constexpr key_compare key_comp() const; + constexpr value_compare value_comp() const; + + // -------------------------------------------------------------------------- + // Search operations. + // + // Search operations have O(log(size)) complexity. + + size_type count(const Key& key) const; + template + size_type count(const K& key) const; + + iterator find(const Key& key); + const_iterator find(const Key& key) const; + template + iterator find(const K& key); + template + const_iterator find(const K& key) const; + + bool contains(const Key& key) const; + template + bool contains(const K& key) const; + + std::pair equal_range(const Key& key); + std::pair equal_range(const Key& key) const; + template + std::pair equal_range(const K& key); + template + std::pair equal_range(const K& key) const; + + iterator lower_bound(const Key& key); + const_iterator lower_bound(const Key& key) const; + template + iterator lower_bound(const K& key); + template + const_iterator lower_bound(const K& key) const; + + iterator upper_bound(const Key& key); + const_iterator upper_bound(const Key& key) const; + template + iterator upper_bound(const K& key); + template + const_iterator upper_bound(const K& key) const; + + // -------------------------------------------------------------------------- + // General operations. + // + // Assume that swap invalidates iterators and references. + // + // Implementation note: currently we use operator==() and operator<() on + // std::vector, because they have the same contract we need, so we use them + // directly for brevity and in case it is more optimal than calling equal() + // and lexicograhpical_compare(). If the underlying container type is changed, + // this code may need to be modified. + + void swap(flat_tree& other) noexcept; + + friend bool operator==(const flat_tree& lhs, const flat_tree& rhs) { + return lhs.body_ == rhs.body_; + } + + friend auto operator<=>(const flat_tree& lhs, const flat_tree& rhs) { + return lhs.body_ <=> rhs.body_; + } + + friend void swap(flat_tree& lhs, flat_tree& rhs) noexcept { lhs.swap(rhs); } + + protected: + // Emplaces a new item into the tree that is known not to be in it. This + // is for implementing map operator[]. + template + iterator unsafe_emplace(const_iterator position, Args&&... args); + + // Attempts to emplace a new element with key |key|. Only if |key| is not yet + // present, construct value_type from |args| and insert it. Returns an + // iterator to the element with key |key| and a bool indicating whether an + // insertion happened. + template + std::pair emplace_key_args(const K& key, Args&&... args); + + // Similar to |emplace_key_args|, but checks |hint| first as a possible + // insertion position. + template + std::pair emplace_hint_key_args(const_iterator hint, + const K& key, + Args&&... args); + + private: + // Helper class for e.g. lower_bound that can compare a value on the left + // to a key on the right. + struct KeyValueCompare { + // The key comparison object must outlive this class. + explicit KeyValueCompare(const key_compare& comp) : comp_(comp) {} + + template + bool operator()(const T& lhs, const U& rhs) const { + return comp_(extract_if_value_type(lhs), extract_if_value_type(rhs)); + } + + private: + const key_type& extract_if_value_type(const value_type& v) const { + GetKeyFromValue extractor; + return extractor(v); + } + + template + const K& extract_if_value_type(const K& k) const { + return k; + } + // RAW_PTR_EXCLUSION: Binary size increase. There's also little value to + // rewriting this member as it points to `flat_tree::comp_` and flat_tree + // itself should be holding raw_ptr/raw_ref if necessary. + RAW_PTR_EXCLUSION const key_compare& comp_; + }; + + iterator const_cast_it(const_iterator c_it) { + auto distance = std::distance(cbegin(), c_it); + return std::next(begin(), distance); + } + + // This method is inspired by both std::map::insert(P&&) and + // std::map::insert_or_assign(const K&, V&&). It inserts val if an equivalent + // element is not present yet, otherwise it overwrites. It returns an iterator + // to the modified element and a flag indicating whether insertion or + // assignment happened. + template + std::pair insert_or_assign(V&& val) { + auto position = lower_bound(GetKeyFromValue()(val)); + + if (position == end() || value_comp()(val, *position)) + return {body_.emplace(position, std::forward(val)), true}; + + *position = std::forward(val); + return {position, false}; + } + + // This method is similar to insert_or_assign, with the following differences: + // - Instead of searching [begin(), end()) it only searches [first, last). + // - In case no equivalent element is found, val is appended to the end of the + // underlying body and an iterator to the next bigger element in [first, + // last) is returned. + template + std::pair append_or_assign(iterator first, + iterator last, + V&& val) { + auto position = std::lower_bound(first, last, val, value_comp()); + + if (position == last || value_comp()(val, *position)) { + // emplace_back might invalidate position, which is why distance needs to + // be cached. + const difference_type distance = std::distance(begin(), position); + body_.emplace_back(std::forward(val)); + return {std::next(begin(), distance), true}; + } + + *position = std::forward(val); + return {position, false}; + } + + // This method is similar to insert, with the following differences: + // - Instead of searching [begin(), end()) it only searches [first, last). + // - In case no equivalent element is found, val is appended to the end of the + // underlying body and an iterator to the next bigger element in [first, + // last) is returned. + template + std::pair append_unique(iterator first, + iterator last, + V&& val) { + auto position = std::lower_bound(first, last, val, value_comp()); + + if (position == last || value_comp()(val, *position)) { + // emplace_back might invalidate position, which is why distance needs to + // be cached. + const difference_type distance = std::distance(begin(), position); + body_.emplace_back(std::forward(val)); + return {std::next(begin(), distance), true}; + } + + return {position, false}; + } + + void sort_and_unique(iterator first, iterator last) { + // Preserve stability for the unique code below. + std::stable_sort(first, last, value_comp()); + + // lhs is already <= rhs due to sort, therefore !(lhs < rhs) <=> lhs == rhs. + auto equal_comp = std::not_fn(value_comp()); + erase(std::unique(first, last, equal_comp), last); + } + + void sort_and_unique() { sort_and_unique(begin(), end()); } + + // To support comparators that may not be possible to default-construct, we + // have to store an instance of Compare. Since Compare commonly is stateless, + // we use the NO_UNIQUE_ADDRESS attribute to save space. + NO_UNIQUE_ADDRESS key_compare comp_; + // Declare after |key_compare_comp_| to workaround GCC ICE. For details + // see https://crbug.com/1156268 + container_type body_; + + // If the compare is not transparent we want to construct key_type once. + template + using KeyTypeOrK = std::conditional_t; +}; + +// ---------------------------------------------------------------------------- +// Lifetime. + +template +flat_tree::flat_tree( + const KeyCompare& comp) + : comp_(comp) {} + +template +template +flat_tree::flat_tree( + InputIterator first, + InputIterator last, + const KeyCompare& comp) + : comp_(comp), body_(first, last) { + sort_and_unique(); +} + +template +flat_tree::flat_tree( + const container_type& items, + const KeyCompare& comp) + : comp_(comp), body_(items) { + sort_and_unique(); +} + +template +flat_tree::flat_tree( + container_type&& items, + const KeyCompare& comp) + : comp_(comp), body_(std::move(items)) { + sort_and_unique(); +} + +template +flat_tree::flat_tree( + std::initializer_list ilist, + const KeyCompare& comp) + : flat_tree(std::begin(ilist), std::end(ilist), comp) {} + +template +template +flat_tree::flat_tree( + sorted_unique_t, + InputIterator first, + InputIterator last, + const KeyCompare& comp) + : comp_(comp), body_(first, last) { + DCHECK(is_sorted_and_unique(*this, value_comp())); +} + +template +flat_tree::flat_tree( + sorted_unique_t, + const container_type& items, + const KeyCompare& comp) + : comp_(comp), body_(items) { + DCHECK(is_sorted_and_unique(*this, value_comp())); +} + +template +constexpr flat_tree::flat_tree( + sorted_unique_t, + container_type&& items, + const KeyCompare& comp) + : comp_(comp), body_(std::move(items)) { + DCHECK(is_sorted_and_unique(*this, value_comp())); +} + +template +flat_tree::flat_tree( + sorted_unique_t, + std::initializer_list ilist, + const KeyCompare& comp) + : flat_tree(sorted_unique, std::begin(ilist), std::end(ilist), comp) {} + +// ---------------------------------------------------------------------------- +// Assignments. + +template +auto flat_tree::operator=( + std::initializer_list ilist) -> flat_tree& { + body_ = ilist; + sort_and_unique(); + return *this; +} + +// ---------------------------------------------------------------------------- +// Memory management. + +template +void flat_tree::reserve( + size_type new_capacity) { + body_.reserve(new_capacity); +} + +template +auto flat_tree::capacity() const + -> size_type { + return body_.capacity(); +} + +template +void flat_tree::shrink_to_fit() { + body_.shrink_to_fit(); +} + +// ---------------------------------------------------------------------------- +// Size management. + +template +void flat_tree::clear() { + body_.clear(); +} + +template +constexpr auto flat_tree::size() + const -> size_type { + return body_.size(); +} + +template +constexpr auto +flat_tree::max_size() const + -> size_type { + return body_.max_size(); +} + +template +constexpr bool flat_tree::empty() + const { + return body_.empty(); +} + +// ---------------------------------------------------------------------------- +// Iterators. + +template +auto flat_tree::begin() + -> iterator { + return body_.begin(); +} + +template +constexpr auto flat_tree::begin() + const -> const_iterator { + return ranges::begin(body_); +} + +template +auto flat_tree::cbegin() const + -> const_iterator { + return body_.cbegin(); +} + +template +auto flat_tree::end() -> iterator { + return body_.end(); +} + +template +constexpr auto flat_tree::end() + const -> const_iterator { + return ranges::end(body_); +} + +template +auto flat_tree::cend() const + -> const_iterator { + return body_.cend(); +} + +template +auto flat_tree::rbegin() + -> reverse_iterator { + return body_.rbegin(); +} + +template +auto flat_tree::rbegin() const + -> const_reverse_iterator { + return body_.rbegin(); +} + +template +auto flat_tree::crbegin() const + -> const_reverse_iterator { + return body_.crbegin(); +} + +template +auto flat_tree::rend() + -> reverse_iterator { + return body_.rend(); +} + +template +auto flat_tree::rend() const + -> const_reverse_iterator { + return body_.rend(); +} + +template +auto flat_tree::crend() const + -> const_reverse_iterator { + return body_.crend(); +} + +// ---------------------------------------------------------------------------- +// Insert operations. +// +// Currently we use position_hint the same way as eastl or boost: +// https://github.com/electronicarts/EASTL/blob/master/include/EASTL/vector_set.h#L493 + +template +auto flat_tree::insert( + const value_type& val) -> std::pair { + return emplace_key_args(GetKeyFromValue()(val), val); +} + +template +auto flat_tree::insert( + value_type&& val) -> std::pair { + return emplace_key_args(GetKeyFromValue()(val), std::move(val)); +} + +template +auto flat_tree::insert( + const_iterator position_hint, + const value_type& val) -> iterator { + return emplace_hint_key_args(position_hint, GetKeyFromValue()(val), val) + .first; +} + +template +auto flat_tree::insert( + const_iterator position_hint, + value_type&& val) -> iterator { + return emplace_hint_key_args(position_hint, GetKeyFromValue()(val), + std::move(val)) + .first; +} + +template +template +void flat_tree::insert( + InputIterator first, + InputIterator last) { + if (first == last) + return; + + // Dispatch to single element insert if the input range contains a single + // element. + if (std::forward_iterator && std::next(first) == last) { + insert(end(), *first); + return; + } + + // Provide a convenience lambda to obtain an iterator pointing past the last + // old element. This needs to be dymanic due to possible re-allocations. + auto middle = [this, size = size()] { + return std::next(begin(), static_cast(size)); + }; + + // For batch updates initialize the first insertion point. + auto pos_first_new = static_cast(size()); + + // Loop over the input range while appending new values and overwriting + // existing ones, if applicable. Keep track of the first insertion point. + for (; first != last; ++first) { + std::pair result = append_unique(begin(), middle(), *first); + if (result.second) { + pos_first_new = + std::min(pos_first_new, std::distance(begin(), result.first)); + } + } + + // The new elements might be unordered and contain duplicates, so post-process + // the just inserted elements and merge them with the rest, inserting them at + // the previously found spot. + sort_and_unique(middle(), end()); + std::inplace_merge(std::next(begin(), pos_first_new), middle(), end(), + value_comp()); +} + +template +template +auto flat_tree::emplace( + Args&&... args) -> std::pair { + return insert(value_type(std::forward(args)...)); +} + +template +template +auto flat_tree::emplace_hint( + const_iterator position_hint, + Args&&... args) -> iterator { + return insert(position_hint, value_type(std::forward(args)...)); +} + +// ---------------------------------------------------------------------------- +// Underlying type operations. + +template +auto flat_tree:: + extract() && -> container_type { + return std::exchange(body_, container_type()); +} + +template +void flat_tree::replace( + container_type&& body) { + // Ensure that `body` is sorted and has no repeated elements according to + // `value_comp()`. + DCHECK(is_sorted_and_unique(body, value_comp())); + body_ = std::move(body); +} + +// ---------------------------------------------------------------------------- +// Erase operations. + +template +auto flat_tree::erase( + iterator position) -> iterator { + CHECK(position != body_.end()); + return body_.erase(position); +} + +template +template +auto flat_tree::erase( + const_iterator position) -> iterator { + CHECK(position != body_.end()); + return body_.erase(position); +} + +template +auto flat_tree::erase( + const Key& val) -> size_type { + auto eq_range = equal_range(val); + auto res = + static_cast(std::distance(eq_range.first, eq_range.second)); + erase(eq_range.first, eq_range.second); + return res; +} + +template +template +auto flat_tree::erase(const K& val) + -> size_type { + auto eq_range = equal_range(val); + auto res = + static_cast(std::distance(eq_range.first, eq_range.second)); + erase(eq_range.first, eq_range.second); + return res; +} + +template +auto flat_tree::erase( + const_iterator first, + const_iterator last) -> iterator { + return body_.erase(first, last); +} + +// ---------------------------------------------------------------------------- +// Comparators. + +template +constexpr auto +flat_tree::key_comp() const + -> key_compare { + return comp_; +} + +template +constexpr auto +flat_tree::value_comp() const + -> value_compare { + return value_compare{comp_}; +} + +// ---------------------------------------------------------------------------- +// Search operations. + +template +template +auto flat_tree::count( + const K& key) const -> size_type { + auto eq_range = equal_range(key); + return static_cast(std::distance(eq_range.first, eq_range.second)); +} + +template +auto flat_tree::count( + const Key& key) const -> size_type { + auto eq_range = equal_range(key); + return static_cast(std::distance(eq_range.first, eq_range.second)); +} + +template +auto flat_tree::find( + const Key& key) -> iterator { + return const_cast_it(std::as_const(*this).find(key)); +} + +template +auto flat_tree::find( + const Key& key) const -> const_iterator { + auto eq_range = equal_range(key); + return (eq_range.first == eq_range.second) ? end() : eq_range.first; +} + +template +template +auto flat_tree::find(const K& key) + -> iterator { + return const_cast_it(std::as_const(*this).find(key)); +} + +template +template +auto flat_tree::find( + const K& key) const -> const_iterator { + auto eq_range = equal_range(key); + return (eq_range.first == eq_range.second) ? end() : eq_range.first; +} + +template +bool flat_tree::contains( + const Key& key) const { + auto lower = lower_bound(key); + return lower != end() && !comp_(key, GetKeyFromValue()(*lower)); +} + +template +template +bool flat_tree::contains( + const K& key) const { + auto lower = lower_bound(key); + return lower != end() && !comp_(key, GetKeyFromValue()(*lower)); +} + +template +auto flat_tree::equal_range( + const Key& key) -> std::pair { + auto res = std::as_const(*this).equal_range(key); + return {const_cast_it(res.first), const_cast_it(res.second)}; +} + +template +auto flat_tree::equal_range( + const Key& key) const -> std::pair { + auto lower = lower_bound(key); + + KeyValueCompare comp(comp_); + if (lower == end() || comp(key, *lower)) + return {lower, lower}; + + return {lower, std::next(lower)}; +} + +template +template +auto flat_tree::equal_range( + const K& key) -> std::pair { + auto res = std::as_const(*this).equal_range(key); + return {const_cast_it(res.first), const_cast_it(res.second)}; +} + +template +template +auto flat_tree::equal_range( + const K& key) const -> std::pair { + auto lower = lower_bound(key); + + KeyValueCompare comp(comp_); + if (lower == end() || comp(key, *lower)) + return {lower, lower}; + + return {lower, std::next(lower)}; +} + +template +auto flat_tree::lower_bound( + const Key& key) -> iterator { + return const_cast_it(std::as_const(*this).lower_bound(key)); +} + +template +auto flat_tree::lower_bound( + const Key& key) const -> const_iterator { + KeyValueCompare comp(comp_); + return ranges::lower_bound(*this, key, comp); +} + +template +template +auto flat_tree::lower_bound( + const K& key) -> iterator { + return const_cast_it(std::as_const(*this).lower_bound(key)); +} + +template +template +auto flat_tree::lower_bound( + const K& key) const -> const_iterator { + static_assert(std::is_convertible_v&, const K&>, + "Requested type cannot be bound to the container's key_type " + "which is required for a non-transparent compare."); + + const KeyTypeOrK& key_ref = key; + + KeyValueCompare comp(comp_); + return ranges::lower_bound(*this, key_ref, comp); +} + +template +auto flat_tree::upper_bound( + const Key& key) -> iterator { + return const_cast_it(std::as_const(*this).upper_bound(key)); +} + +template +auto flat_tree::upper_bound( + const Key& key) const -> const_iterator { + KeyValueCompare comp(comp_); + return ranges::upper_bound(*this, key, comp); +} + +template +template +auto flat_tree::upper_bound( + const K& key) -> iterator { + return const_cast_it(std::as_const(*this).upper_bound(key)); +} + +template +template +auto flat_tree::upper_bound( + const K& key) const -> const_iterator { + static_assert(std::is_convertible_v&, const K&>, + "Requested type cannot be bound to the container's key_type " + "which is required for a non-transparent compare."); + + const KeyTypeOrK& key_ref = key; + + KeyValueCompare comp(comp_); + return ranges::upper_bound(*this, key_ref, comp); +} + +// ---------------------------------------------------------------------------- +// General operations. + +template +void flat_tree::swap( + flat_tree& other) noexcept { + std::swap(*this, other); +} + +template +template +auto flat_tree::unsafe_emplace( + const_iterator position, + Args&&... args) -> iterator { + return body_.emplace(position, std::forward(args)...); +} + +template +template +auto flat_tree::emplace_key_args( + const K& key, + Args&&... args) -> std::pair { + auto lower = lower_bound(key); + if (lower == end() || comp_(key, GetKeyFromValue()(*lower))) + return {unsafe_emplace(lower, std::forward(args)...), true}; + return {lower, false}; +} + +template +template +auto flat_tree:: + emplace_hint_key_args(const_iterator hint, const K& key, Args&&... args) + -> std::pair { + KeyValueCompare comp(comp_); + if ((hint == begin() || comp(*std::prev(hint), key))) { + if (hint == end() || comp(key, *hint)) { + // *(hint - 1) < key < *hint => key did not exist and hint is correct. + return {unsafe_emplace(hint, std::forward(args)...), true}; + } + if (!comp(*hint, key)) { + // key == *hint => no-op, return correct hint. + return {const_cast_it(hint), false}; + } + } + // hint was not helpful, dispatch to hintless version. + return emplace_key_args(key, std::forward(args)...); +} + +} // namespace internal + +// ---------------------------------------------------------------------------- +// Free functions. + +// Erases all elements that match predicate. It has O(size) complexity. +template +size_t EraseIf( + base::internal::flat_tree& + container, + Predicate pred) { + auto it = ranges::remove_if(container, pred); + size_t removed = std::distance(it, container.end()); + container.erase(it, container.end()); + return removed; +} + +} // namespace base + +#endif // BASE_CONTAINERS_FLAT_TREE_H_ + diff --git a/bridge/bindings/v8/base/containers/span.h b/bridge/bindings/v8/base/containers/span.h new file mode 100644 index 0000000000..c3ba9d4251 --- /dev/null +++ b/bridge/bindings/v8/base/containers/span.h @@ -0,0 +1,1621 @@ +/* +* Copyright (C) 2019-2022 The Kraken authors. All rights reserved. +* Copyright (C) 2022-present The WebF authors. All rights reserved. +*/ + +#ifndef BASE_CONTAINERS_SPAN_H_ +#define BASE_CONTAINERS_SPAN_H_ + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "bindings/v8/base/check.h" +#include "bindings/v8/base/compiler_specific.h" +#include "bindings/v8/base/containers/checked_iterators.h" +#include "bindings/v8/base/containers/dynamic_extent.h" +#include "bindings/v8/base/numerics/safe_conversions.h" +#include "bindings/v8/base/types/to_address.h" +//#include "third_party/abseil-cpp/absl/base/attributes.h" + +namespace base { + +template +class span; + +namespace internal { + +template +concept LegalDataConversion = + std::convertible_to (*)[], + std::remove_reference_t (*)[]>; + +template +concept CompatibleIter = std::contiguous_iterator && + LegalDataConversion, T>; + +template +concept CompatibleRange = + std::ranges::contiguous_range && std::ranges::sized_range && + LegalDataConversion, T> && + (std::ranges::borrowed_range || std::is_const_v); + +template +concept LegacyRangeDataIsPointer = std::is_pointer_v; + +template +concept LegacyRange = requires(R& r) { + { std::ranges::data(r) } -> LegacyRangeDataIsPointer; + { std::ranges::size(r) } -> std::convertible_to; +}; + +// NOTE: Ideally we'd just use `CompatibleRange`, however this currently breaks +// code that was written prior to C++20 being standardized and assumes providing +// .data() and .size() is sufficient. +// TODO: https://crbug.com/1504998 - Remove in favor of CompatibleRange and fix +// callsites. +template +concept LegacyCompatibleRange = LegacyRange && requires(R& r) { + { *std::ranges::data(r) } -> LegalDataConversion; +}; + +template +using size_constant = std::integral_constant; + +template +struct ExtentImpl : size_constant {}; + +template +struct ExtentImpl : size_constant {}; + +template +struct ExtentImpl> : size_constant {}; + +template +struct ExtentImpl> : size_constant {}; + +template +using Extent = ExtentImpl>; + +template +inline constexpr size_t ExtentV = Extent::value; + +// must_not_be_dynamic_extent prevents |dynamic_extent| from being returned in a +// constexpr context. +template +constexpr size_t must_not_be_dynamic_extent() { + static_assert( + kExtent != dynamic_extent, + "EXTENT should only be used for containers with a static extent."); + return kExtent; +} + +template +requires((N == M || N == dynamic_extent || M == dynamic_extent) && + std::equality_comparable_with) + constexpr bool span_eq(span l, span r); +template +requires((N == M || N == dynamic_extent || M == dynamic_extent) && + std::three_way_comparable_with) + constexpr auto span_cmp(span l, span r) + -> decltype(l[0u] <=> r[0u]); +template +constexpr std::ostream& span_stream(std::ostream& l, span r); + +} // namespace internal + +// A span is a value type that represents an array of elements of type T. Since +// it only consists of a pointer to memory with an associated size, it is very +// light-weight. It is cheap to construct, copy, move and use spans, so that +// users are encouraged to use it as a pass-by-value parameter. A span does not +// own the underlying memory, so care must be taken to ensure that a span does +// not outlive the backing store. +// +// span is somewhat analogous to std::string_view, but with arbitrary element +// types, allowing mutation if T is non-const. +// +// span is implicitly convertible from C++ arrays, as well as most [1] +// container-like types that provide a data() and size() method (such as +// std::vector). A mutable span can also be implicitly converted to an +// immutable span. +// +// Consider using a span for functions that take a data pointer and size +// parameter: it allows the function to still act on an array-like type, while +// allowing the caller code to be a bit more concise. +// +// For read-only data access pass a span: the caller can supply either +// a span or a span, while the callee will have a read-only view. +// For read-write access a mutable span is required. +// +// Without span: +// Read-Only: +// // std::string HexEncode(const uint8_t* data, size_t size); +// std::vector data_buffer = GenerateData(); +// std::string r = HexEncode(data_buffer.data(), data_buffer.size()); +// +// Mutable: +// // ssize_t SafeSNPrintf(char* buf, size_t N, const char* fmt, Args...); +// char str_buffer[100]; +// SafeSNPrintf(str_buffer, sizeof(str_buffer), "Pi ~= %lf", 3.14); +// +// With span: +// Read-Only: +// // std::string HexEncode(base::span data); +// std::vector data_buffer = GenerateData(); +// std::string r = HexEncode(data_buffer); +// +// Mutable: +// // ssize_t SafeSNPrintf(base::span, const char* fmt, Args...); +// char str_buffer[100]; +// SafeSNPrintf(str_buffer, "Pi ~= %lf", 3.14); +// +// Dynamic vs Fixed size spans +// --------------------------- +// +// Normally spans have a dynamic size, which is represented as a type as +// `span`. However it is possible to encode the size of the span into the +// type as a second parameter such as `span`. When working with fixed-size +// spans, the compiler will check the size of operations and prevent compilation +// when an invalid size is used for an operation such as assignment or +// `copy_from()`. However operations that produce a new span will make a +// dynamic-sized span by default. See below for how to prevent that. +// +// Fixed-size spans implicitly convert to a dynamic-size span, throwing away the +// compile-time size information from the type signature. So most code should +// work with dynamic-sized `span` types and not worry about the existence of +// fixed-size spans. +// +// It is possible to convert from a dynamic-size to a fixed-size span (or to +// move from a fixed-size span to another fixed-size span) but it requires +// writing an the size explicitly in the code. Methods like `first` can be +// passed a size as a template argument, such as `first()` to generate a +// fixed-size span. And the `make_span` function can be given a compile-time +// size in a similar way with `make_span()`. +// +// Spans with "const" and pointers +// ------------------------------- +// +// Const and pointers can get confusing. Here are vectors of pointers and their +// corresponding spans: +// +// const std::vector => base::span +// std::vector => base::span +// const std::vector => base::span +// +// Differences from the C++ standard +// --------------------------------- +// +// http://eel.is/c++draft/views.span contains the latest C++ draft of std::span. +// Chromium tries to follow the draft as close as possible. Differences between +// the draft and the implementation are documented in subsections below. +// +// Differences from [span.overview]: +// - Dynamic spans are implemented as a partial specialization of the regular +// class template. This leads to significantly simpler checks involving the +// extent, at the expense of some duplicated code. The same strategy is used +// by libc++. +// +// Differences from [span.objectrep]: +// - as_bytes() and as_writable_bytes() return spans of uint8_t instead of +// std::byte. +// +// Differences from [span.cons]: +// - The constructors from a contiguous range apart from a C array are folded +// into a single one, using a construct similarly to the one proposed +// (but not standardized) in https://wg21.link/P1419. +// The C array constructor is kept so that a span can be constructed from +// an init list like {{1, 2, 3}}. +// TODO: https://crbug.com/828324 - Consider adding C++26's constructor from +// a std::initializer_list instead. +// - The conversion constructors from a contiguous range into a dynamic span +// don't check for the range concept, but rather whether std::ranges::data +// and std::ranges::size are well formed. This is due to legacy reasons and +// should be fixed. +// +// Differences from [span.deduct]: +// - The deduction guides from a contiguous range are folded into a single one, +// and treat borrowed ranges correctly. +// - Add deduction guide from rvalue array. +// +// Other differences: +// - Using StrictNumeric instead of size_t where possible. +// +// Additions beyond the C++ standard draft +// - as_chars() function. +// - as_writable_chars() function. +// - as_byte_span() function. +// - as_writable_byte_span() function. +// - copy_from() method. +// - span_from_ref() function. +// - byte_span_from_ref() function. +// - span_from_cstring() function. +// - span_with_nul_from_cstring() function. +// - byte_span_from_cstring() function. +// - byte_span_with_nul_from_cstring() function. +// - split_at() method. +// - operator==() comparator function. +// - operator<=>() comparator function. +// - operator<<() printing function. +// +// Furthermore, all constructors and methods are marked noexcept due to the lack +// of exceptions in Chromium. +// +// Due to the lack of class template argument deduction guides in C++14 +// appropriate make_span() utility functions are provided for historic reasons. + +// [span], class template span +template +class GSL_POINTER span { + public: + using element_type = T; + using value_type = std::remove_cv_t; + using size_type = size_t; + using difference_type = ptrdiff_t; + using pointer = T*; + using const_pointer = const T*; + using reference = T&; + using const_reference = const T&; + using iterator = CheckedContiguousIterator; + using reverse_iterator = std::reverse_iterator; + static constexpr size_t extent = N; + + // [span.cons], span constructors, copy, assignment, and destructor + constexpr span() noexcept + requires(N == 0) + = default; + + // Constructs a span from a contiguous iterator and a size. + // + // # Checks + // The function CHECKs that `count` matches the template parameter `N` and + // will terminate otherwise. + // + // # Safety + // The iterator must point to the first of at least `count` many elements, or + // Undefined Behaviour can result as the span will allow access beyond the + // valid range of the collection pointed to by the iterator. + template + requires(internal::CompatibleIter) + UNSAFE_BUFFER_USAGE explicit constexpr span( + It first, + StrictNumeric count) noexcept + : // The use of to_address() here is to handle the case where the + // iterator `first` is pointing to the container's `end()`. In that + // case we can not use the address returned from the iterator, or + // dereference it through the iterator's `operator*`, but we can store + // it. We must assume in this case that `count` is 0, since the + // iterator does not point to valid data. Future hardening of iterators + // may disallow pulling the address from `end()`, as demonstrated by + // asserts() in libstdc++: + // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=93960. + // + // The span API dictates that the `data()` is accessible when size is + // 0, since the pointer may be valid, so we cannot prevent storing and + // giving out an invalid pointer here without breaking API + // compatibility and our unit tests. Thus protecting against this can + // likely only be successful from inside iterators themselves, where + // the context about the pointer is known. + // + // We can not protect here generally against an invalid iterator/count + // being passed in, since we have no context to determine if the + // iterator or count are valid. + data_(base::to_address(first)) { + // Guarantees that the N in the type signature is correct. + CHECK(N == count); + } + + // Constructs a span from a contiguous iterator and a size. + // + // # Checks + // The function CHECKs that `it <= end` and will terminate otherwise. + // + // # Safety + // The begin and end iterators must be for the same allocation or Undefined + // Behaviour can result as the span will allow access beyond the valid range + // of the collection pointed to by `begin`. + template + requires(internal::CompatibleIter && + std::sized_sentinel_for && + !std::convertible_to) + UNSAFE_BUFFER_USAGE explicit constexpr span(It begin, End end) noexcept + // SAFETY: The caller must guarantee that the iterator and end sentinel + // are part of the same allocation, in which case it is the number of + // elements between the iterators and thus a valid size for the pointer to + // the element at `begin`. + // + // We CHECK that `end - begin` did not underflow below. Normally checking + // correctness afterward is flawed, however underflow is not UB and the + // size is not converted to an invalid pointer (which would be UB) before + // we CHECK for underflow. + : UNSAFE_BUFFERS(span(begin, static_cast(end - begin))) { + // Verify `end - begin` did not underflow. + CHECK(begin <= end); + } + + // NOLINTNEXTLINE(google-explicit-constructor) + constexpr span(T (&arr)[N]) noexcept + // SAFETY: The std::ranges::size() function gives the number of elements + // pointed to by the std::ranges::data() function, which meets the + // requirement of span. + : UNSAFE_BUFFERS(span(std::ranges::data(arr), std::ranges::size(arr))) {} + + template > + requires(internal::CompatibleRange && (X == N || X == dynamic_extent)) + // NOLINTNEXTLINE(google-explicit-constructor) + explicit(X == dynamic_extent) constexpr span(R&& range) noexcept + // SAFETY: The std::ranges::size() function gives the number of elements + // pointed to by the std::ranges::data() function, which meets the + // requirement of span. + : UNSAFE_BUFFERS( + span(std::ranges::data(range), std::ranges::size(range))) {} + + // [span.sub], span subviews + template + constexpr span first() const noexcept + requires(Count <= N) + { + // SAFETY: span provides that data() points to at least `N` many elements. + // `Count` is non-negative by its type and `Count <= N` from the requires + // condition. So `Count` is a valid new size for `data()`. + return UNSAFE_BUFFERS(span(data(), Count)); + } + + template + constexpr span last() const noexcept + requires(Count <= N) + { + // SAFETY: span provides that data() points to at least `N` many elements. + // `Count` is non-negative by its type and `Count <= N` from the requires + // condition. So `0 <= N - Count <= N`, meaning `N - Count` is a valid new + // size for `data()` and it will point to `Count` many elements.` + return UNSAFE_BUFFERS(span(data() + (N - Count), Count)); + } + + // Returns a span over the first `count` elements. + // + // # Checks + // The function CHECKs that the span contains at least `count` elements and + // will terminate otherwise. + constexpr span first(StrictNumeric count) const noexcept { + CHECK_LE(size_t{count}, size()); + // SAFETY: span provides that data() points to at least `N` many elements. + // `count` is non-negative by its type and `count <= N` from the CHECK + // above. So `count` is a valid new size for `data()`. + return UNSAFE_BUFFERS({data(), count}); + } + + // Returns a span over the last `count` elements. + // + // # Checks + // The function CHECKs that the span contains at least `count` elements and + // will terminate otherwise. + constexpr span last(StrictNumeric count) const noexcept { + CHECK_LE(size_t{count}, N); + // SAFETY: span provides that data() points to at least `N` many elements. + // `count` is non-negative by its type and `count <= N` from the CHECK + // above. So `0 <= N - count <= N`, meaning `N - count` is a valid new size + // for `data()` and it will point to `count` many elements. + return UNSAFE_BUFFERS({data() + (N - size_t{count}), count}); + } + + template + constexpr auto subspan() const noexcept + requires(Offset <= N && (Count == dynamic_extent || Count <= N - Offset)) + { + constexpr size_t kExtent = Count != dynamic_extent ? Count : N - Offset; + // SAFETY: span provides that data() points to at least `N` many elements. + // + // If Count is dynamic_extent, kExtent becomes `N - Offset`. Since `Offset + // <= N` from the requires condition, then `Offset` is a valid offset for + // data(), and `Offset + kExtent = Offset + N - Offset = N >= Offset` is + // also a valid offset that is not before `Offset`. This makes a span at + // `Offset` with size `kExtent` valid. + // + // Otherwise `Count <= N - Offset` and `0 <= Offset <= N` by the requires + // condition, so `Offset <= N - Count` and `N - Count` can not underflow. + // Then `Offset` is a valid offset for data() and `kExtent` is `Count <= N - + // Offset`, so `Offset + kExtent <= Offset + N - Offset = N` which makes + // both `Offset` and `Offset + kExtent` valid offsets for data(), and since + // `kExtent` is non-negative, `Offset + kExtent` is not before `Offset` so + // `kExtent` is a valid size for the span at `data() + Offset`. + return UNSAFE_BUFFERS(span(data() + Offset, kExtent)); + } + + // Returns a span over the first `count` elements starting at the given + // `offset` from the start of the span. + // + // # Checks + // The function CHECKs that the span contains at least `offset + count` + // elements, or at least `offset` elements if `count` is not specified, and + // will terminate otherwise. + constexpr span subspan(size_t offset, + size_t count = dynamic_extent) const noexcept { + CHECK_LE(offset, N); + CHECK(count == dynamic_extent || count <= N - offset); + const size_t new_extent = count != dynamic_extent ? count : N - offset; + // SAFETY: span provides that data() points to at least `N` many elements. + // + // If Count is dynamic_extent, `new_extent` becomes `N - offset`. Since + // `offset <= N` from the requires condition, then `offset` is a valid + // offset for data(), and `offset + new_extent = offset + N - offset = N >= + // offset` is also a valid offset that is not before `offset`. This makes a + // span at `offset` with size `new_extent` valid. + // + // Otherwise `count <= N - offset` and `0 <= offset <= N` by the requires + // condition, so `offset <= N - count` and `N - count` can not underflow. + // Then `offset` is a valid offset for data() and `new_extent` is `count <= + // N - offset`, so `offset + new_extent <= offset + N - offset = N` which + // makes both `offset` and `offset + new_extent` valid offsets for data(), + // and since `new_extent` is non-negative, `offset + new_extent` is not + // before `offset` so `new_extent` is a valid size for the span at `data() + + // offset`. + return UNSAFE_BUFFERS({data() + offset, new_extent}); + } + + // Splits a span into two at the given `offset`, returning two spans that + // cover the full range of the original span. + // + // Similar to calling subspan() with the `offset` as the length on the first + // call, and then the `offset` as the offset in the second. + // + // The split_at() overload allows construction of a fixed-size span from a + // compile-time constant. If the input span is fixed-size, both output output + // spans will be. Otherwise, the first will be fixed-size and the second will + // be dynamic-size. + // + // This is a non-std extension that is inspired by the Rust slice::split_at() + // and split_at_mut() methods. + // + // # Checks + // The function CHECKs that the span contains at least `offset` elements and + // will terminate otherwise. + constexpr std::pair, span> split_at(size_t offset) const noexcept { + return {first(offset), subspan(offset)}; + } + + template + requires(Offset <= N) + constexpr std::pair, span> split_at() + const noexcept { + return {first(), subspan()}; + } + + // [span.obs], span observers + constexpr size_t size() const noexcept { return N; } + constexpr size_t size_bytes() const noexcept { return size() * sizeof(T); } + [[nodiscard]] constexpr bool empty() const noexcept { return size() == 0; } + + // [span.elem], span element access + // + // # Checks + // The function CHECKs that the `idx` is inside the span and will terminate + // otherwise. + constexpr T& operator[](size_t idx) const noexcept { + CHECK_LT(idx, size()); + // SAFETY: Since data() always points to at least `N` elements, the check + // above ensures `idx < N` and is thus in range for data(). + return UNSAFE_BUFFERS(data()[idx]); + } + + constexpr T& front() const noexcept + requires(N > 0) + { + // SAFETY: Since data() always points to at least `N` elements, the requires + // constraint above ensures `0 < N` and is thus in range for data(). + return UNSAFE_BUFFERS(data()[0]); + } + + constexpr T& back() const noexcept + requires(N > 0) + { + // SAFETY: Since data() always points to at least `N` elements, the requires + // constraint above ensures `N > 0` and thus `N - 1` does not underflow and + // is in range for data(). + return UNSAFE_BUFFERS(data()[N - 1]); + } + + // Returns a pointer to the first element in the span. If the span is empty + // (`size()` is 0), the returned pointer may or may not be null, and it must + // not be dereferenced. + // + // It is always valid to add `size()` to the the pointer in C++ code, though + // it may be invalid in C code when the span is empty. + constexpr T* data() const noexcept { return data_; } + + // [span.iter], span iterator support + constexpr iterator begin() const noexcept { + // SAFETY: span provides that data() points to at least `size()` many + // elements, and size() is non-negative. So data() + size() is a valid + // pointer for the data() allocation. + return UNSAFE_BUFFERS(iterator(data(), data() + size())); + } + + constexpr iterator end() const noexcept { + // SAFETY: span provides that data() points to at least `size()` many + // elements, and size() is non-negative. So data() + size() is a valid + // pointer for the data() allocation. + return UNSAFE_BUFFERS(iterator(data(), data() + size(), data() + size())); + } + + constexpr reverse_iterator rbegin() const noexcept { + return reverse_iterator(end()); + } + + constexpr reverse_iterator rend() const noexcept { + return reverse_iterator(begin()); + } + + // Bounds-checked copy from a non-overlapping span. The spans must be the + // exact same size or a hard CHECK() occurs. If the two spans overlap, + // Undefined Behaviour occurs. + // + // This is a non-std extension that is inspired by the Rust + // slice::copy_from_slice() method. + // + // # Checks + // The function CHECKs that the `other` span has the same size as itself and + // will terminate otherwise. + constexpr void copy_from(span other) + requires(!std::is_const_v) + { + CHECK_EQ(size_bytes(), other.size_bytes()); + // Verify non-overlapping in developer builds. + // + // SAFETY: span provides that data() points to at least size() many + // elements, so adding size() to the data() pointer is well-defined. + DCHECK(UNSAFE_BUFFERS(data() + size()) <= other.data() || + data() >= UNSAFE_BUFFERS(other.data() + other.size())); + // When compiling with -Oz, std::ranges::copy() does not get inlined, which + // makes copy_from() very expensive compared to memcpy for small sizes (up + // to around 4x slower). We observe that this is because ranges::copy() uses + // begin()/end() and span's iterators are checked iterators, not just + // pointers. This additional complexity prevents inlining and breaks the + // ability for the compiler to eliminate code. + // + // See also https://crbug.com/1396134. + // + // We also see std::copy() (with pointer arguments! not iterators) optimize + // and inline better than memcpy() since memcpy() needs to rely on + // size_bytes(), which while computable at compile time when `other` has a + // fixed size, the optimizer stumbles on with -Oz. + // + // SAFETY: The copy() here does not check bounds, but we have verified that + // `this` and `other` have the same bounds above (and are pointers of the + // same type), so `data()` and `other.data()` both have at least + // `other.size()` elements. + UNSAFE_BUFFERS( + std::copy(other.data(), other.data() + other.size(), data())); + } + + // Implicit conversion from std::span to base::span. + // + // We get other conversions for free from std::span's constructors, but it + // does not deduce N on its range constructor. + span(std::span, N> other) + : // SAFETY: std::span contains a valid data pointer and size such + // that pointer+size remains valid. + UNSAFE_BUFFERS( + span(std::ranges::data(other), std::ranges::size(other))) {} + span(std::span other) + requires(std::is_const_v) + : // SAFETY: std::span contains a valid data pointer and size such + // that pointer+size remains valid. + UNSAFE_BUFFERS( + span(std::ranges::data(other), std::ranges::size(other))) {} + + // Implicit conversion from base::span to std::span. + // + // We get other conversions for free from std::span's constructors, but it + // does not deduce N on its range constructor. + operator std::span() const { return std::span(*this); } + operator std::span() const + requires(!std::is_const_v) + { + return std::span(*this); + } + + // Compares two spans for equality by comparing the objects pointed to by the + // spans. The operation is defined for spans of different types as long as the + // types are themselves comparable. + // + // For primitive types, this replaces the less safe `memcmp` function, where + // `memcmp(a.data(), b.data(), a.size()) == 0` can be written as `a == b` and + // can no longer go outside the bounds of `b`. Otherwise, it replaced + // std::equal or std::ranges::equal when working with spans, and when no + // projection is needed. + // + // If the spans are of different sizes, they are not equal. If both spans are + // empty, they are always equal (even though their data pointers may differ). + // + // # Implementation note + // The non-template overloads allow implicit conversions to span for + // comparison. + friend constexpr bool operator==(span lhs, span rhs) + requires(std::is_const_v && std::equality_comparable) + { + return internal::span_eq(span(lhs), span(rhs)); + } + friend constexpr bool operator==(span lhs, span rhs) + requires(!std::is_const_v && std::equality_comparable) + { + return internal::span_eq(span(lhs), span(rhs)); + } + template + requires((N == M || M == dynamic_extent) && + std::equality_comparable_with) + friend constexpr bool operator==(span lhs, span rhs) { + return internal::span_eq(span(lhs), span(rhs)); + } + + // Compares two spans for ordering by comparing the objects pointed to by the + // spans. The operation is defined for spans of different types as long as the + // types are themselves ordered via `<=>`. + // + // For primitive types, this replaces the less safe `memcmp` function, where + // `memcmp(a.data(), b.data(), a.size()) < 0` can be written as `a < b` and + // can no longer go outside the bounds of `b`. + // + // If both spans are empty, they are always equal (even though their data + // pointers may differ). + // + // # Implementation note + // The non-template overloads allow implicit conversions to span for + // comparison. + friend constexpr auto operator<=>(span lhs, span rhs) + requires(std::is_const_v && std::three_way_comparable) + { + return internal::span_cmp(span(lhs), span(rhs)); + } + friend constexpr auto operator<=>(span lhs, span rhs) + requires(!std::is_const_v && std::three_way_comparable) + { + return internal::span_cmp(span(lhs), span(rhs)); + } + template + requires((N == M || M == dynamic_extent) && + std::three_way_comparable_with) + friend constexpr auto operator<=>(span lhs, span rhs) { + return internal::span_cmp(span(lhs), span(rhs)); + } + + private: + // This field is not a raw_ptr<> since span is mostly used for stack + // variables. Use `raw_span` instead for class fields, which does use + // raw_ptr<> internally. + InternalPtrType data_ = nullptr; +}; + +// [span], class template span +template +class GSL_POINTER span { + public: + using element_type = T; + using value_type = std::remove_cv_t; + using size_type = size_t; + using difference_type = ptrdiff_t; + using pointer = T*; + using const_pointer = const T*; + using reference = T&; + using const_reference = const T&; + using iterator = CheckedContiguousIterator; + using reverse_iterator = std::reverse_iterator; + static constexpr size_t extent = dynamic_extent; + + constexpr span() noexcept = default; + + // Constructs a span from a contiguous iterator and a size. + // + // # Safety + // The iterator must point to the first of at least `count` many elements, or + // Undefined Behaviour can result as the span will allow access beyond the + // valid range of the collection pointed to by the iterator. + template + requires(internal::CompatibleIter) + UNSAFE_BUFFER_USAGE constexpr span(It first, + StrictNumeric count) noexcept + // The use of to_address() here is to handle the case where the iterator + // `first` is pointing to the container's `end()`. In that case we can + // not use the address returned from the iterator, or dereference it + // through the iterator's `operator*`, but we can store it. We must + // assume in this case that `count` is 0, since the iterator does not + // point to valid data. Future hardening of iterators may disallow + // pulling the address from `end()`, as demonstrated by asserts() in + // libstdc++: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=93960. + // + // The span API dictates that the `data()` is accessible when size is 0, + // since the pointer may be valid, so we cannot prevent storing and + // giving out an invalid pointer here without breaking API compatibility + // and our unit tests. Thus protecting against this can likely only be + // successful from inside iterators themselves, where the context about + // the pointer is known. + // + // We can not protect here generally against an invalid iterator/count + // being passed in, since we have no context to determine if the + // iterator or count are valid. + : data_(base::to_address(first)), size_(count) {} + + // Constructs a span from a contiguous iterator and a size. + // + // # Safety + // The begin and end iterators must be for the same allocation, and `begin <= + // end` or Undefined Behaviour can result as the span will allow access beyond + // the valid range of the collection pointed to by `begin`. + template + requires(internal::CompatibleIter && + std::sized_sentinel_for && + !std::convertible_to) + UNSAFE_BUFFER_USAGE constexpr span(It begin, End end) noexcept + // SAFETY: The caller must guarantee that the iterator and end sentinel + // are part of the same allocation, in which case it is the number of + // elements between the iterators and thus a valid size for the pointer to + // the element at `begin`. + // + // We CHECK that `end - begin` did not underflow below. Normally checking + // correctness afterward is flawed, however underflow is not UB and the + // size is not converted to an invalid pointer (which would be UB) before + // we CHECK for underflow. + : UNSAFE_BUFFERS(span(begin, static_cast(end - begin))) { + // Verify `end - begin` did not underflow. + CHECK(begin <= end); + } + + template + // NOLINTNEXTLINE(google-explicit-constructor) + constexpr span(T (&arr)[N]) noexcept + // SAFETY: The std::ranges::size() function gives the number of elements + // pointed to by the std::ranges::data() function, which meets the + // requirement of span. + : UNSAFE_BUFFERS(span(std::ranges::data(arr), std::ranges::size(arr))) {} + + template + requires(internal::LegacyCompatibleRange) + // NOLINTNEXTLINE(google-explicit-constructor) + constexpr span(R&& range) noexcept + // SAFETY: The std::ranges::size() function gives the number of elements + // pointed to by the std::ranges::data() function, which meets the + // requirement of span. + : UNSAFE_BUFFERS( + span(std::ranges::data(range), std::ranges::size(range))) {} + + // [span.sub], span subviews + template + constexpr span first() const noexcept { + CHECK_LE(Count, size()); + // SAFETY: span provides that data() points to at least `size()` many + // elements. `Count` is non-negative by its type and `Count <= size()` from + // the CHECK above. So `Count` is a valid new size for `data()`. + return UNSAFE_BUFFERS(span(data(), Count)); + } + + template + constexpr span last() const noexcept { + CHECK_LE(Count, size()); + // SAFETY: span provides that data() points to at least `size()` many + // elements. `Count` is non-negative by its type and `Count <= size()` from + // the check above. So `0 <= size() - Count <= size()`, meaning + // `size() - Count` is a valid new size for `data()` and it will point to + // `Count` many elements. + return UNSAFE_BUFFERS(span(data() + (size() - Count), Count)); + } + + // Returns a span over the first `count` elements. + // + // # Checks + // The function CHECKs that the span contains at least `count` elements and + // will terminate otherwise. + constexpr span first(StrictNumeric count) const noexcept { + CHECK_LE(size_t{count}, size()); + // SAFETY: span provides that data() points to at least `size()` many + // elements. `count` is non-negative by its type and `count <= size()` from + // the CHECK above. So `count` is a valid new size for `data()`. + return UNSAFE_BUFFERS({data(), count}); + } + + // Returns a span over the last `count` elements. + // + // # Checks + // The function CHECKs that the span contains at least `count` elements and + // will terminate otherwise. + constexpr span last(StrictNumeric count) const noexcept { + CHECK_LE(size_t{count}, size()); + // SAFETY: span provides that data() points to at least `size()` many + // elements. `count` is non-negative by its type and `count <= size()` from + // the CHECK above. So `0 <= size() - count <= size()`, meaning + // `size() - count` is a valid new size for `data()` and it will point to + // `count` many elements. + return UNSAFE_BUFFERS({data() + (size() - size_t{count}), count}); + } + + template + constexpr span subspan() const noexcept { + CHECK_LE(Offset, size()); + CHECK(Count == dynamic_extent || Count <= size() - Offset); + const size_t new_extent = Count != dynamic_extent ? Count : size() - Offset; + // SAFETY: span provides that data() points to at least `size()` many + // elements. + // + // If Count is dynamic_extent, `new_extent` becomes `size() - Offset`. Since + // `Offset <= size()` from the check above, then `Offset` is a valid offset + // for data(), and `Offset + new_extent = Offset + size() - Offset = size() + // >= Offset` is also a valid offset that is not before `Offset`. This makes + // a span at `Offset` with size `new_extent` valid. + // + // Otherwise `Count <= size() - Offset` and `0 <= Offset <= size()` by the + // check above, so `Offset <= size() - Count` and `size() - Count` can not + // underflow. Then `Offset` is a valid offset for data() and `new_extent` is + // `Count <= size() - Offset`, so `Offset + extent <= Offset + size() - + // Offset = size()` which makes both `Offset` and `Offset + new_extent` + // valid offsets for data(), and since `new_extent` is non-negative, `Offset + // + new_extent` is not before `Offset` so `new_extent` is a valid size for + // the span at `data() + Offset`. + return UNSAFE_BUFFERS(span(data() + Offset, new_extent)); + } + + // Returns a span over the first `count` elements starting at the given + // `offset` from the start of the span. + // + // # Checks + // The function CHECKs that the span contains at least `offset + count` + // elements, or at least `offset` elements if `count` is not specified, and + // will terminate otherwise. + constexpr span subspan(size_t offset, + size_t count = dynamic_extent) const noexcept { + CHECK_LE(offset, size()); + CHECK(count == dynamic_extent || count <= size() - offset); + const size_t new_extent = count != dynamic_extent ? count : size() - offset; + // SAFETY: span provides that data() points to at least `size()` many + // elements. + // + // If count is dynamic_extent, `new_extent` becomes `size() - offset`. Since + // `offset <= size()` from the check above, then `offset` is a valid offset + // for data(), and `offset + new_extent = offset + size() - offset = size() + // >= offset` is also a valid offset that is not before `offset`. This makes + // a span at `offset` with size `new_extent` valid. + // + // Otherwise `count <= size() - offset` and `0 <= offset <= size()` by the + // checks above, so `offset <= size() - count` and `size() - count` can not + // underflow. Then `offset` is a valid offset for data() and `new_extent` is + // `count <= size() - offset`, so `offset + new_extent <= offset + size() - + // offset = size()` which makes both `offset` and `offset + new_extent` + // valid offsets for data(), and since `new_extent` is non-negative, `offset + // + new_extent` is not before `offset` so `new_extent` is a valid size for + // the span at `data() + offset`. + return UNSAFE_BUFFERS({data() + offset, new_extent}); + } + + // Splits a span into two at the given `offset`, returning two spans that + // cover the full range of the original span. + // + // Similar to calling subspan() with the `offset` as the length on the first + // call, and then the `offset` as the offset in the second. + // + // The split_at() overload allows construction of a fixed-size span from a + // compile-time constant. If the input span is fixed-size, both output output + // spans will be. Otherwise, the first will be fixed-size and the second will + // be dynamic-size. + // + // This is a non-std extension that is inspired by the Rust slice::split_at() + // and split_at_mut() methods. + // + // # Checks + // The function CHECKs that the span contains at least `offset` elements and + // will terminate otherwise. + constexpr std::pair, span> split_at(size_t offset) const noexcept { + return {first(offset), subspan(offset)}; + } + + // An overload of `split_at` which returns a fixed-size span. + // + // # Checks + // The function CHECKs that the span contains at least `Offset` elements and + // will terminate otherwise. + template + constexpr std::pair, span> split_at() const noexcept { + CHECK_LE(Offset, size()); + return {first(), subspan(Offset)}; + } + + // [span.obs], span observers + constexpr size_t size() const noexcept { return size_; } + constexpr size_t size_bytes() const noexcept { return size() * sizeof(T); } + [[nodiscard]] constexpr bool empty() const noexcept { return size() == 0; } + + // [span.elem], span element access + // + // # Checks + // The function CHECKs that the `idx` is inside the span and will terminate + // otherwise. + constexpr T& operator[](size_t idx) const noexcept { + CHECK_LT(idx, size()); + // SAFETY: Since data() always points to at least `size()` elements, the + // check above ensures `idx < size()` and is thus in range for data(). + return UNSAFE_BUFFERS(data()[idx]); + } + + // Returns a reference to the first element in the span. + // + // # Checks + // The function CHECKs that the span is not empty and will terminate + // otherwise. + constexpr T& front() const noexcept { + CHECK(!empty()); + // SAFETY: Since data() always points to at least `size()` elements, the + // check above above ensures `0 < size()` and is thus in range for data(). + return UNSAFE_BUFFERS(data()[0]); + } + + // Returns a reference to the last element in the span. + // + // # Checks + // The function CHECKs that the span is not empty and will terminate + // otherwise. + constexpr T& back() const noexcept { + CHECK(!empty()); + // SAFETY: Since data() always points to at least `size()` elements, the + // check above above ensures `size() > 0` and thus `size() - 1` does not + // underflow and is in range for data(). + return UNSAFE_BUFFERS(data()[size() - 1]); + } + + // Returns a pointer to the first element in the span. If the span is empty + // (`size()` is 0), the returned pointer may or may not be null, and it must + // not be dereferenced. + // + // It is always valid to add `size()` to the the pointer in C++ code, though + // it may be invalid in C code when the span is empty. + constexpr T* data() const noexcept { return data_; } + + // [span.iter], span iterator support + constexpr iterator begin() const noexcept { + // SAFETY: span provides that data() points to at least `size()` many + // elements, and size() is non-negative. So data() + size() is a valid + // pointer for the data() allocation. + return UNSAFE_BUFFERS(iterator(data(), data() + size())); + } + + constexpr iterator end() const noexcept { + // SAFETY: span provides that data() points to at least `size()` many + // elements, and size() is non-negative. So data() + size() is a valid + // pointer for the data() allocation. + return UNSAFE_BUFFERS(iterator(data(), data() + size(), data() + size())); + } + + constexpr reverse_iterator rbegin() const noexcept { + return reverse_iterator(end()); + } + + constexpr reverse_iterator rend() const noexcept { + return reverse_iterator(begin()); + } + + // Bounds-checked copy from a non-overlapping span. The spans must be the + // exact same size or a hard CHECK() occurs. If the two spans overlap, + // Undefined Behaviour occurs. + // + // This is a non-std extension that is inspired by the Rust + // slice::copy_from_slice() method. + // + // # Checks + // The function CHECKs that the `other` span has the same size as itself and + // will terminate otherwise. + constexpr void copy_from(span other) + requires(!std::is_const_v) + { + CHECK_EQ(size_bytes(), other.size_bytes()); + // Verify non-overlapping in developer builds. + // + // SAFETY: span provides that data() points to at least size() many + // elements, so adding size() to the data() pointer is well-defined. + DCHECK(UNSAFE_BUFFERS(data() + size()) <= other.data() || + data() >= UNSAFE_BUFFERS(other.data() + other.size())); + // When compiling with -Oz, std::ranges::copy() does not get inlined, which + // makes copy_from() very expensive compared to memcpy for small sizes (up + // to around 4x slower). We observe that this is because ranges::copy() uses + // begin()/end() and span's iterators are checked iterators, not just + // pointers. This additional complexity prevents inlining and breaks the + // ability for the compiler to eliminate code. + // + // See also https://crbug.com/1396134. + // + // We also see std::copy() (with pointer arguments! not iterators) optimize + // and inline better than memcpy() since memcpy() needs to rely on + // size_bytes(), which while computable at compile time when `other` has a + // fixed size, the optimizer stumbles on with -Oz. + // + // SAFETY: The copy() here does not check bounds, but we have verified that + // `this` and `other` have the same bounds above (and are pointers of the + // same type), so `data()` and `other.data()` both have at least + // `other.size()` elements. + UNSAFE_BUFFERS( + std::copy(other.data(), other.data() + other.size(), data())); + } + + // Compares two spans for equality by comparing the objects pointed to by the + // spans. The operation is defined for spans of different types as long as the + // types are themselves comparable. + // + // For primitive types, this replaces the less safe `memcmp` function, where + // `memcmp(a.data(), b.data(), a.size()) == 0` can be written as `a == b` and + // can no longer go outside the bounds of `b`. Otherwise, it replaced + // std::equal or std::ranges::equal when working with spans, and when no + // projection is needed. + // + // If the spans are of different sizes, they are not equal. If both spans are + // empty, they are always equal (even though their data pointers may differ). + // + // # Implementation note + // The non-template overloads allow implicit conversions to span for + // comparison. + friend constexpr bool operator==(span lhs, span rhs) + requires(std::is_const_v && std::equality_comparable) + { + return internal::span_eq(span(lhs), span(rhs)); + } + friend constexpr bool operator==(span lhs, span rhs) + requires(!std::is_const_v && std::equality_comparable) + { + return internal::span_eq(span(lhs), span(rhs)); + } + template + requires(std::equality_comparable_with) + friend constexpr bool operator==(span lhs, span rhs) { + return internal::span_eq(span(lhs), span(rhs)); + } + + // Compares two spans for ordering by comparing the objects pointed to by the + // spans. The operation is defined for spans of different types as long as the + // types are themselves ordered via `<=>`. + // + // For primitive types, this replaces the less safe `memcmp` function, where + // `memcmp(a.data(), b.data(), a.size()) < 0` can be written as `a < b` and + // can no longer go outside the bounds of `b`. + // + // If both spans are empty, they are always equal (even though their data + // pointers may differ). + // + // # Implementation note + // The non-template overloads allow implicit conversions to span for + // comparison. + friend constexpr auto operator<=>(span lhs, span rhs) + requires(std::is_const_v && std::three_way_comparable) + { + return internal::span_cmp(span(lhs), span(rhs)); + } + friend constexpr auto operator<=>(span lhs, span rhs) + requires(!std::is_const_v && std::three_way_comparable) + { + return internal::span_cmp(span(lhs), span(rhs)); + } + template + requires(std::three_way_comparable_with) + friend constexpr auto operator<=>(span lhs, span rhs) { + return internal::span_cmp(span(lhs), span(rhs)); + } + + private: + // This field is not a raw_ptr<> since span is mostly used for stack + // variables. Use `raw_span` instead for class fields, which does use + // raw_ptr<> internally. + InternalPtrType data_ = nullptr; + size_t size_ = 0; +}; + +// [span.deduct], deduction guides. +template +requires(std::contiguous_iterator) + span(It, EndOrSize) -> span>>; + +template < + typename R, + typename T = std::remove_reference_t>> +requires(std::ranges::contiguous_range) + span(R&&) + -> span, T, const T>, + internal::ExtentV>; + +// This guide prefers to let the contiguous_range guide match, since it can +// produce a fixed-size span. Whereas, LegacyRange only produces a dynamic-sized +// span. +template +requires(!std::ranges::contiguous_range && internal::LegacyRange) + span(R&& r) noexcept + -> span>; + +template +span(const T (&)[N]) -> span; + +// span can be printed and will print each of its values, including in Gtests. +// +// TODO(danakj): This could move to a ToString() member method if gtest printers +// were hooked up to base::ToString(). +template +constexpr std::ostream& operator<<(std::ostream& l, span r) { + return internal::span_stream(l, r); +} + +// [span.objectrep], views of object representation +template +constexpr auto as_bytes(span s) noexcept { + constexpr size_t N = X == dynamic_extent ? dynamic_extent : sizeof(T) * X; + // SAFETY: span provides that data() points to at least size_bytes() many + // bytes. So since `uint8_t` has a size of 1 byte, the size_bytes() value is + // a valid size for a span at data() when viewed as `uint8_t*`. + // + // The reinterpret_cast is valid as the alignment of uint8_t (which is 1) is + // always less-than or equal to the alignment of T. + return UNSAFE_BUFFERS(span( + reinterpret_cast(s.data()), s.size_bytes())); +} + +template +requires(!std::is_const_v) + constexpr auto as_writable_bytes(span s) noexcept { + constexpr size_t N = X == dynamic_extent ? dynamic_extent : sizeof(T) * X; + // SAFETY: span provides that data() points to at least size_bytes() many + // bytes. So since `uint8_t` has a size of 1 byte, the size_bytes() value is a + // valid size for a span at data() when viewed as `uint8_t*`. + // + // The reinterpret_cast is valid as the alignment of uint8_t (which is 1) is + // always less-than or equal to the alignment of T. + return UNSAFE_BUFFERS( + span(reinterpret_cast(s.data()), s.size_bytes())); +} + +// as_chars() is the equivalent of as_bytes(), except that it returns a +// span of const char rather than const uint8_t. This non-std function is +// added since chrome still represents many things as char arrays which +// rightfully should be uint8_t. +template +constexpr auto as_chars(span s) noexcept { + constexpr size_t N = X == dynamic_extent ? dynamic_extent : sizeof(T) * X; + // SAFETY: span provides that data() points to at least size_bytes() many + // bytes. So since `char` has a size of 1 byte, the size_bytes() value is a + // valid size for a span at data() when viewed as `char*`. + // + // The reinterpret_cast is valid as the alignment of char (which is 1) is + // always less-than or equal to the alignment of T. + return UNSAFE_BUFFERS(span( + reinterpret_cast(s.data()), s.size_bytes())); +} + +// as_string_view() converts a span over byte-sized primitives (holding chars or +// uint8_t) into a std::string_view, where each byte is represented as a char. +// It also accepts any type that can implicitly convert to a span, such as +// arrays. +// +// If you want to view an arbitrary span type as a string, first explicitly +// convert it to bytes via `base::as_bytes()`. +// +// For spans over byte-sized primitives, this is sugar for: +// ``` +// std::string_view(as_chars(span).begin(), as_chars(span).end()) +// ``` +constexpr std::string_view as_string_view(span s) noexcept { + return std::string_view(s.begin(), s.end()); +} +constexpr std::string_view as_string_view( + span s) noexcept { + const auto c = as_chars(s); + return std::string_view(c.begin(), c.end()); +} + +// as_writable_chars() is the equivalent of as_writable_bytes(), except that +// it returns a span of char rather than uint8_t. This non-std function is +// added since chrome still represents many things as char arrays which +// rightfully should be uint8_t. +template +requires(!std::is_const_v) + auto as_writable_chars(span s) noexcept { + constexpr size_t N = X == dynamic_extent ? dynamic_extent : sizeof(T) * X; + // SAFETY: span provides that data() points to at least size_bytes() many + // bytes. So since `char` has a size of 1 byte, the size_bytes() value is + // a valid size for a span at data() when viewed as `char*`. + // + // The reinterpret_cast is valid as the alignment of char (which is 1) is + // always less-than or equal to the alignment of T. + return UNSAFE_BUFFERS( + span(reinterpret_cast(s.data()), s.size_bytes())); +} + +// Type-deducing helper for constructing a span. +// +// # Safety +// The contiguous iterator `it` must point to the first element of at least +// `size` many elements or Undefined Behaviour may result as the span may give +// access beyond the bounds of the collection pointed to by `it`. +template +UNSAFE_BUFFER_USAGE constexpr auto make_span( + It it, + StrictNumeric size) noexcept { + using T = std::remove_reference_t>; + // SAFETY: The caller guarantees that `it` is the first of at least `size` + // many elements. + return UNSAFE_BUFFERS(span(it, size)); +} + +// Type-deducing helper for constructing a span. +// +// # Checks +// The function CHECKs that `it <= end` and will terminate otherwise. +// +// # Safety +// The contiguous iterator `it` and its end sentinel `end` must be for the same +// allocation or Undefined Behaviour may result as the span may give access +// beyond the bounds of the collection pointed to by `it`. +template >> +UNSAFE_BUFFER_USAGE constexpr auto make_span(It it, End end) noexcept { + using T = std::remove_reference_t>; + // SAFETY: The caller guarantees that `it` and `end` are iterators of the + // same allocation. + return UNSAFE_BUFFERS(span(it, end)); +} + +// make_span utility function that deduces both the span's value_type and extent +// from the passed in argument. +// +// Usage: auto span = base::make_span(...); +template +constexpr auto make_span(Container&& container) noexcept { + using T = + std::remove_pointer_t()))>; + using Extent = internal::Extent; + return span(std::forward(container)); +} + +// make_span utility function that allows callers to explicit specify the span's +// extent, the value_type is deduced automatically. This is useful when passing +// a dynamically sized container to a method expecting static spans, when the +// container is known to have the correct size. +// +// Note: This will CHECK that N indeed matches size(container). +// +// # Usage +// As this function is unsafe, the caller must guarantee that the size is +// correct for the iterator, and will not allow the span to reach out of bounds. +// ``` +// // SAFETY: . +// auto static_span = UNSAFE_BUFFERS(base::make_span(it, size)); +// ``` +// +// # Safety +// The contiguous iterator `it` must point to the first element of at least +// `size` many elements or Undefined Behaviour may result as the span may give +// access beyond the bounds of the collection pointed to by `it`. +template +UNSAFE_BUFFER_USAGE constexpr auto make_span( + It it, + StrictNumeric size) noexcept { + using T = std::remove_reference_t>; + // SAFETY: The caller guarantees that `it` is the first of at least `size` + // many elements. + return UNSAFE_BUFFERS(span(it, size)); +} + +// make_span utility function that allows callers to explicit specify the span's +// extent, the value_type is deduced automatically. This is useful when passing +// a dynamically sized container to a method expecting static spans, when the +// container is known to have the correct size. +// +// Note: This will CHECK that N indeed matches size(container). +// +// # Usage +// As this function is unsafe, the caller must guarantee that the `end` is from +// the same allocation as the `it` iterator. +// ``` +// // SAFETY: . +// auto static_span = UNSAFE_BUFFERS(base::make_span(it, end)); +// ``` +// +// # Checks +// The function CHECKs that `it <= end` and will terminate otherwise. +// +// # Safety +// The contiguous iterator `it` and its end sentinel `end` must be for the same +// allocation or Undefined Behaviour may result as the span may give access +// beyond the bounds of the collection pointed to by `it`. +template >> +UNSAFE_BUFFER_USAGE constexpr auto make_span(It it, End end) noexcept { + using T = std::remove_reference_t>; + // SAFETY: The caller guarantees that `it` and `end` are iterators of the + // same allocation. + return UNSAFE_BUFFERS(span(it, end)); +} + +template +constexpr auto make_span(Container&& container) noexcept { + using T = + std::remove_pointer_t()))>; + // SAFETY: The std::size() function gives the number of elements pointed to by + // the std::data() function, which meets the requirement of span. + return UNSAFE_BUFFERS(span(std::data(container), std::size(container))); +} + +// `span_from_ref` converts a reference to T into a span of length 1. This is a +// non-std helper that is inspired by the `std::slice::from_ref()` function from +// Rust. +//template +//constexpr span span_from_ref( +// T& single_object ABSL_ATTRIBUTE_LIFETIME_BOUND) noexcept { +// // SAFETY: Given a valid reference to `single_object` the span of size 1 will +// // be a valid span that points to the `single_object`. +// return UNSAFE_BUFFERS(span(std::addressof(single_object), 1u)); +//} + +// `byte_span_from_ref` converts a reference to T into a span of uint8_t of +// length sizeof(T). This is a non-std helper that is a sugar for +// `as_writable_bytes(span_from_ref(x))`. +// +// Const references are turned into a `span` while mutable +// references are turned into a `span`. +//template +//constexpr span byte_span_from_ref( +// const T& single_object ABSL_ATTRIBUTE_LIFETIME_BOUND) noexcept { +// return as_bytes(span_from_ref(single_object)); +//} +//template +//constexpr span byte_span_from_ref( +// T& single_object ABSL_ATTRIBUTE_LIFETIME_BOUND) noexcept { +// return as_writable_bytes(span_from_ref(single_object)); +//} + +// Converts a string literal (such as `"hello"`) to a span of `char` while +// omitting the terminating NUL character. These two are equivalent: +// ``` +// base::span s1 = base::span_from_cstring("hello"); +// base::span s2 = base::span(std::string_view("hello")); +// ``` +// +// If you want to include the NUL terminator in the span, then use +// `span_with_nul_from_cstring()`. +// +// Internal NUL characters (ie. that are not at the end of the string) are +// always preserved. +//template +//constexpr span span_from_cstring( +// const char (&lit ABSL_ATTRIBUTE_LIFETIME_BOUND)[N]) +// ENABLE_IF_ATTR(lit[N - 1u] == '\0', "requires string literal as input") { +// return span(lit).template first(); +//} + +// Converts a string literal (such as `"hello"`) to a span of `char` that +// includes the terminating NUL character. These two are equivalent: +// ``` +// base::span s1 = base::span_with_nul_from_cstring("hello"); +// auto h = std::cstring_view("hello"); +// base::span s2 = +// UNSAFE_BUFFERS(base::span(h.data(), h.size() + 1u)); +// ``` +// +// If you do not want to include the NUL terminator, then use +// `span_from_cstring()` or use a view type (`base::cstring_view` or +// `std::string_view`) in place of a string literal. +// +// Internal NUL characters (ie. that are not at the end of the string) are +// always preserved. +//template +//constexpr span span_with_nul_from_cstring( +// const char (&lit ABSL_ATTRIBUTE_LIFETIME_BOUND)[N]) +// ENABLE_IF_ATTR(lit[N - 1u] == '\0', "requires string literal as input") { +// return span(lit); +//} + +// Converts a string literal (such as `"hello"`) to a span of `uint8_t` while +// omitting the terminating NUL character. These two are equivalent: +// ``` +// base::span s1 = base::byte_span_from_cstring("hello"); +// base::span s2 = base::as_byte_span(std::string_view("hello")); +// ``` +// +// If you want to include the NUL terminator in the span, then use +// `byte_span_with_nul_from_cstring()`. +// +// Internal NUL characters (ie. that are not at the end of the string) are +// always preserved. +//template +//constexpr span byte_span_from_cstring( +// const char (&lit ABSL_ATTRIBUTE_LIFETIME_BOUND)[N]) +// ENABLE_IF_ATTR(lit[N - 1u] == '\0', "requires string literal as input") { +// return as_bytes(span(lit).template first()); +//} + +// Converts a string literal (such as `"hello"`) to a span of `uint8_t` that +// includes the terminating NUL character. These two are equivalent: +// ``` +// base::span s1 = base::byte_span_with_nul_from_cstring("hello"); +// auto h = base::cstring_view("hello"); +// base::span s2 = base::as_bytes( +// UNSAFE_BUFFERS(base::span(h.data(), h.size() + 1u))); +// ``` +// +// If you do not want to include the NUL terminator, then use +// `byte_span_from_cstring()` or use a view type (`base::cstring_view` or +// `std::string_view`) in place of a string literal and `as_byte_span()`. +// +// Internal NUL characters (ie. that are not at the end of the string) are +// always preserved. +//template +//constexpr span byte_span_with_nul_from_cstring( +// const char (&lit ABSL_ATTRIBUTE_LIFETIME_BOUND)[N]) +// ENABLE_IF_ATTR(lit[N - 1u] == '\0', "requires string literal as input") { +// return as_bytes(span(lit)); +//} + +// Convenience function for converting an object which is itself convertible +// to span into a span of bytes (i.e. span of const uint8_t). Typically used +// to convert std::string or string-objects holding chars, or std::vector +// or vector-like objects holding other scalar types, prior to passing them +// into an API that requires byte spans. +template +requires requires(const T& arg) { + requires !std::is_array_v>; + make_span(arg); +} +constexpr span as_byte_span(const T& arg) { + return as_bytes(make_span(arg)); +} + +// This overload for arrays preserves the compile-time size N of the array in +// the span type signature span. +//template +//constexpr span as_byte_span( +// const T (&arr ABSL_ATTRIBUTE_LIFETIME_BOUND)[N]) { +// return as_bytes(make_span(arr)); +//} + +// This overload adds a compile-time size that must be explicitly given, +// checking that the size is correct at runtime. The template argument `N` is +// the number of _bytes_ in the input range, not the number of elements. +// +// This is sugar for `base::span(base::as_byte_span(x))`. +// +// Example: +// ``` +// std::string foo = "hello"; +// base::span s = base::as_byte_span<5>(foo); +// ``` +template +requires requires(const T& arg) { + requires !std::is_array_v>; + make_span(arg); +} +constexpr span as_byte_span(const T& arg) { + return span(as_byte_span(arg)); +} + +// Convenience function for converting an object which is itself convertible +// to span into a span of mutable bytes (i.e. span of uint8_t). Typically used +// to convert std::string or string-objects holding chars, or std::vector +// or vector-like objects holding other scalar types, prior to passing them +// into an API that requires mutable byte spans. +template +requires requires(T&& arg) { + requires !std::is_array_v>; + make_span(arg); + requires !std::is_const_v; +} +constexpr span as_writable_byte_span(T&& arg) { + return as_writable_bytes(make_span(arg)); +} + +// This overload for arrays preserves the compile-time size N of the array in +// the span type signature span. +//template +//requires(!std::is_const_v) +// constexpr span as_writable_byte_span( +// T (&arr ABSL_ATTRIBUTE_LIFETIME_BOUND)[N]) { +// return as_writable_bytes(make_span(arr)); +//} +//template +//requires(!std::is_const_v) +// constexpr span as_writable_byte_span( +// T (&&arr ABSL_ATTRIBUTE_LIFETIME_BOUND)[N]) { +// return as_writable_bytes(make_span(arr)); +//} + +// This overload adds a compile-time size that must be explicitly given, +// checking that the size is correct at runtime. The template argument `N` is +// the number of _bytes_ in the input range, not the number of elements. +// +// This is sugar for `base::span(base::as_byte_span(x))`. +// +// Example: +// ``` +// std::string foo = "hello"; +// base::span s = base::as_writable_byte_span<5>(foo); +// ``` +template +requires requires(T&& arg) { + requires !std::is_array_v>; + make_span(arg); + requires !std::is_const_v; +} +constexpr span as_writable_byte_span(T&& arg) { + return span(as_writable_byte_span(arg)); +} + +namespace internal { + +// Template helper for implementing operator==. +template +requires((N == M || N == dynamic_extent || M == dynamic_extent) && + std::equality_comparable_with) + constexpr bool span_eq(span l, span r) { + return l.size() == r.size() && std::equal(l.begin(), l.end(), r.begin()); +} + +// Template helper for implementing operator<=>. +//template +//requires((N == M || N == dynamic_extent || M == dynamic_extent) && +// std::three_way_comparable_with) +// constexpr auto span_cmp(span l, span r) +// -> decltype(l[0u] <=> r[0u]) { +// return std::lexicographical_compare_three_way(l.begin(), l.end(), r.begin(), +// r.end()); +//} + +// Template helper for implementing printing. +template +constexpr std::ostream& span_stream(std::ostream& l, span r) { + l << "["; + if constexpr (!std::same_as, char>) { + if (!r.empty()) { + l << base::ToString(r.front()); + for (const T& e : r.subspan(1u)) { + l << ", "; + l << base::ToString(e); + } + } + } else { + l << '\"'; + l << as_string_view(r); + l << '\"'; + } + l << "]"; + return l; +} + +} // namespace internal + +} // namespace base + +template +inline constexpr bool + std::ranges::enable_borrowed_range> = true; + +template +inline constexpr bool std::ranges::enable_view> = true; + +// EXTENT returns the size of any type that can be converted to a |base::span| +// with definite extent, i.e. everything that is a contiguous storage of some +// sort with static size. Specifically, this works for std::array in a constexpr +// context. Note: +// * |std::size| should be preferred for plain arrays. +// * In run-time contexts, functions such as |std::array::size| should be +// preferred. +#define EXTENT(x) \ + ::base::internal::must_not_be_dynamic_extent() + +#endif // BASE_CONTAINERS_SPAN_H_ + diff --git a/bridge/bindings/v8/base/containers/util.h b/bridge/bindings/v8/base/containers/util.h new file mode 100644 index 0000000000..55e7fe5ec7 --- /dev/null +++ b/bridge/bindings/v8/base/containers/util.h @@ -0,0 +1,23 @@ +/* +* Copyright (C) 2019-2022 The Kraken authors. All rights reserved. +* Copyright (C) 2022-present The WebF authors. All rights reserved. +*/ + +#ifndef BASE_CONTAINERS_UTIL_H_ +#define BASE_CONTAINERS_UTIL_H_ + +#include + +namespace base { + +// TODO(crbug.com/40565371): What we really need is for checked_math.h to be +// able to do checked arithmetic on pointers. +template +inline uintptr_t get_uintptr(const T* t) { + return reinterpret_cast(t); +} + +} // namespace base + +#endif // BASE_CONTAINERS_UTIL_H_ + diff --git a/bridge/bindings/v8/base/dcheck_is_on.h b/bridge/bindings/v8/base/dcheck_is_on.h new file mode 100644 index 0000000000..4f011e5c5c --- /dev/null +++ b/bridge/bindings/v8/base/dcheck_is_on.h @@ -0,0 +1,12 @@ +/* +* Copyright (C) 2019-2022 The Kraken authors. All rights reserved. +* Copyright (C) 2022-present The WebF authors. All rights reserved. +*/ + +#ifndef WEBF_DCHECK_IS_ON_H +#define WEBF_DCHECK_IS_ON_H + +#define DCHECK_IS_ON() false +#define EXPENSIVE_DCHECKS_ARE_ON() false + +#endif // WEBF_DCHECK_IS_ON_H diff --git a/bridge/bindings/v8/base/is_empty.h b/bridge/bindings/v8/base/is_empty.h new file mode 100644 index 0000000000..63f2934103 --- /dev/null +++ b/bridge/bindings/v8/base/is_empty.h @@ -0,0 +1,17 @@ +/* +* Copyright (C) 2019-2022 The Kraken authors. All rights reserved. +* Copyright (C) 2022-present The WebF authors. All rights reserved. +*/ + +#ifndef BASE_MACROS_IS_EMPTY_H_ +#define BASE_MACROS_IS_EMPTY_H_ + +// A macro that substitutes with 1 if called without arguments, otherwise 0. +#define BASE_IS_EMPTY(...) BASE_INTERNAL_IS_EMPTY_EXPANDED(__VA_ARGS__) +#define BASE_INTERNAL_IS_EMPTY_EXPANDED(...) \ + BASE_INTERNAL_IS_EMPTY_INNER(_, ##__VA_ARGS__) +#define BASE_INTERNAL_IS_EMPTY_INNER(...) \ + BASE_INTERNAL_IS_EMPTY_INNER_EXPANDED(__VA_ARGS__, 0, 1) +#define BASE_INTERNAL_IS_EMPTY_INNER_EXPANDED(e0, e1, is_empty, ...) is_empty + +#endif // BASE_MACROS_IS_EMPTY_H_ diff --git a/bridge/bindings/v8/base/location.cc b/bridge/bindings/v8/base/location.cc new file mode 100644 index 0000000000..ecc7108471 --- /dev/null +++ b/bridge/bindings/v8/base/location.cc @@ -0,0 +1,153 @@ +/* +* Copyright (C) 2019-2022 The Kraken authors. All rights reserved. +* Copyright (C) 2022-present The WebF authors. All rights reserved. +*/ + +#ifdef UNSAFE_BUFFERS_BUILD +// TODO(crbug.com/40284755): Remove this and spanify to fix the errors. +#pragma allow_unsafe_buffers +#endif + +#include "bindings/v8/base/location.h" + +#include "bindings/v8/base/compiler_specific.h" +#include +//#include "bindings/v8/base/strings/string_number_conversions.h" +//#include "bindings/v8/base/strings/stringprintf.h" +//#include "bindings/v8/base/trace_event/base_tracing.h" + +#if defined(COMPILER_MSVC) +#include +#endif + +namespace base { + +namespace { + +// Returns the length of the given null terminated c-string. +constexpr size_t StrLen(const char* str) { + size_t str_len = 0; + for (str_len = 0; str[str_len] != '\0'; ++str_len) + ; + return str_len; +} + +// Finds the length of the build folder prefix from the file path. +// TODO(ssid): Strip prefixes from stored strings in the binary. This code only +// skips the prefix while reading the file name strings at runtime. +constexpr size_t StrippedFilePathPrefixLength() { + constexpr char path[] = __FILE__; + // Only keep the file path starting from the src directory. +#if defined(__clang__) && defined(_MSC_VER) + constexpr char stripped[] = "base\\location.cc"; +#else + constexpr char stripped[] = "base/location.cc"; +#endif + constexpr size_t path_len = StrLen(path); + constexpr size_t stripped_len = StrLen(stripped); + static_assert(path_len >= stripped_len, + "Invalid file path for base/location.cc."); + return path_len - stripped_len; +} + +constexpr size_t kStrippedPrefixLength = StrippedFilePathPrefixLength(); + +// Returns true if the |name| string has |prefix_len| characters in the prefix +// and the suffix matches the |expected| string. +// TODO(ssid): With C++20 we can make base::EndsWith() constexpr and use it +// instead. +constexpr bool StrEndsWith(const char* name, + size_t prefix_len, + const char* expected) { + const size_t name_len = StrLen(name); + const size_t expected_len = StrLen(expected); + if (name_len != prefix_len + expected_len) + return false; + for (size_t i = 0; i < expected_len; ++i) { + if (name[i + prefix_len] != expected[i]) + return false; + } + return true; +} + +#if defined(__clang__) && defined(_MSC_VER) +static_assert(StrEndsWith(__FILE__, kStrippedPrefixLength, "base\\location.cc"), + "The file name does not match the expected prefix format."); +#else +static_assert(StrEndsWith(__FILE__, kStrippedPrefixLength, "base/location.cc"), + "The file name does not match the expected prefix format."); +#endif + +} // namespace + +Location::Location() = default; +Location::Location(const Location& other) = default; +Location::Location(Location&& other) noexcept = default; +Location& Location::operator=(const Location& other) = default; + +Location::Location(const char* file_name, const void* program_counter) + : file_name_(file_name), program_counter_(program_counter) {} + +Location::Location(const char* function_name, + const char* file_name, + int line_number, + const void* program_counter) + : function_name_(function_name), + file_name_(file_name), + line_number_(line_number), + program_counter_(program_counter) { +#if !BUILDFLAG(IS_NACL) + // The program counter should not be null except in a default constructed + // (empty) Location object. This value is used for identity, so if it doesn't + // uniquely identify a location, things will break. + // + // The program counter isn't supported in NaCl so location objects won't work + // properly in that context. + assert(program_counter); +#endif +} + +std::string Location::ToString() const { + // TODO webf +// if (has_source_info()) { +// return std::string(function_name_) + "@" + file_name_ + ":" + +// NumberToString(line_number_); +// } +// return StringPrintf("pc:%p", program_counter_); + + std::string result; + return result; +} + +// TODO webf +//void Location::WriteIntoTrace(perfetto::TracedValue context) const { +// auto dict = std::move(context).WriteDictionary(); +// dict.Add("function_name", function_name_); +// dict.Add("file_name", file_name_); +// dict.Add("line_number", line_number_); +//} + +#if defined(COMPILER_MSVC) +#define RETURN_ADDRESS() _ReturnAddress() +#elif defined(COMPILER_GCC) && !BUILDFLAG(IS_NACL) +#define RETURN_ADDRESS() \ + __builtin_extract_return_addr(__builtin_return_address(0)) +#else +#define RETURN_ADDRESS() nullptr +#endif + +// static +NOINLINE Location Location::Current(const char* function_name, + const char* file_name, + int line_number) { + return Location(function_name, file_name + kStrippedPrefixLength, line_number, + RETURN_ADDRESS()); +} + +//------------------------------------------------------------------------------ +NOINLINE const void* GetProgramCounter() { + return RETURN_ADDRESS(); +} + +} // namespace base + diff --git a/bridge/bindings/v8/base/location.h b/bridge/bindings/v8/base/location.h new file mode 100644 index 0000000000..22691adf33 --- /dev/null +++ b/bridge/bindings/v8/base/location.h @@ -0,0 +1,114 @@ +/* +* Copyright (C) 2019-2022 The Kraken authors. All rights reserved. +* Copyright (C) 2022-present The WebF authors. All rights reserved. +*/ + + +#ifndef BASE_LOCATION_H_ +#define BASE_LOCATION_H_ + +#include +#include + +//#include "base/base_export.h" +//#include "bindings/v8/base/memory/raw_ptr_exclusion.h" +//#include "base/trace_event/base_tracing_forward.h" +#include "bindings/v8/for_build/build_config.h" + +namespace base { + +// Location provides basic info where of an object was constructed, or was +// significantly brought to life. +class Location { + public: + Location(); + Location(const Location& other); + Location(Location&& other) noexcept; + Location& operator=(const Location& other); + + static Location CreateForTesting(const char* function_name, + const char* file_name, + int line_number, + const void* program_counter) { + return Location(function_name, file_name, line_number, program_counter); + } + + // Comparator for testing. The program counter should uniquely + // identify a location. + friend bool operator==(const Location& lhs, const Location& rhs) { + return lhs.program_counter_ == rhs.program_counter_; + } + + // The program counter should uniquely identify a location. There is no + // guarantee that a program counter corresponds to unique function/file/line + // values, based on how it's constructed, and therefore equivalent locations + // could be distinguishable. + friend std::weak_ordering operator<=>(const Location& lhs, + const Location& rhs) { + return lhs.program_counter_ <=> rhs.program_counter_; + } + + // Returns true if there is source code location info. If this is false, + // the Location object only contains a program counter or is + // default-initialized (the program counter is also null). + bool has_source_info() const { return function_name_ && file_name_; } + + // Will be nullptr for default initialized Location objects and when source + // names are disabled. + const char* function_name() const { return function_name_; } + + // Will be nullptr for default initialized Location objects and when source + // names are disabled. + const char* file_name() const { return file_name_; } + + // Will be -1 for default initialized Location objects and when source names + // are disabled. + int line_number() const { return line_number_; } + + // The address of the code generating this Location object. Should always be + // valid except for default initialized Location objects, which will be + // nullptr. + const void* program_counter() const { return program_counter_; } + + // Converts to the most user-readable form possible. If function and filename + // are not available, this will return "pc:". + std::string ToString() const; + + // Write a representation of this object into a trace. +// void WriteIntoTrace(perfetto::TracedValue context) const; + + static Location Current(const char* function_name = __builtin_FUNCTION(), + const char* file_name = __builtin_FILE(), + int line_number = __builtin_LINE()); + + private: + // Only initializes the file name and program counter, the source information + // will be null for the strings, and -1 for the line number. + // TODO(http://crbug.com/760702) remove file name from this constructor. + Location(const char* file_name, const void* program_counter); + + // Constructor should be called with a long-lived char*, such as __FILE__. + // It assumes the provided value will persist as a global constant, and it + // will not make a copy of it. + Location(const char* function_name, + const char* file_name, + int line_number, + const void* program_counter); + + const char* function_name_ = nullptr; + const char* file_name_ = nullptr; + int line_number_ = -1; + + // `program_counter_` is not a raw_ptr<...> for performance reasons (based on + // analysis of sampling profiler data and tab_search:top100:2020). + const void* program_counter_ = nullptr; +}; + +const void* GetProgramCounter(); + +#define FROM_HERE ::base::Location::Current() + +} // namespace base + +#endif // BASE_LOCATION_H_ + diff --git a/bridge/bindings/v8/base/logging_buildflags.h b/bridge/bindings/v8/base/logging_buildflags.h new file mode 100644 index 0000000000..8eade48af5 --- /dev/null +++ b/bridge/bindings/v8/base/logging_buildflags.h @@ -0,0 +1,14 @@ +/* +* Copyright (C) 2019-2022 The Kraken authors. All rights reserved. +* Copyright (C) 2022-present The WebF authors. All rights reserved. +*/ + +#ifndef BASE_LOGGING_BUILDFLAGS_H_ +#define BASE_LOGGING_BUILDFLAGS_H_ + +#include "bindings/v8/for_build/buildflag.h" // IWYU pragma: export + +#define BUILDFLAG_INTERNAL_ENABLE_LOG_ERROR_NOT_REACHED() (0) + +#endif // BASE_LOGGING_BUILDFLAGS_H_ + diff --git a/bridge/bindings/v8/base/memory/raw_ptr.h b/bridge/bindings/v8/base/memory/raw_ptr.h new file mode 100644 index 0000000000..ae7cf25801 --- /dev/null +++ b/bridge/bindings/v8/base/memory/raw_ptr.h @@ -0,0 +1,15 @@ +/* +* Copyright (C) 2019-2022 The Kraken authors. All rights reserved. +* Copyright (C) 2022-present The WebF authors. All rights reserved. +*/ + +#ifndef BASE_MEMORY_RAW_PTR_H_ +#define BASE_MEMORY_RAW_PTR_H_ + +#include "bindings/v8/base/compiler_specific.h" + +// Although `raw_ptr` is part of the standalone PA distribution, it is +// easier to use the shorter path in `//base/memory`. We retain this +// facade header for ease of typing. + +#endif // BASE_MEMORY_RAW_PTR_H_ \ No newline at end of file diff --git a/bridge/bindings/v8/base/memory/scoped_policy.h b/bridge/bindings/v8/base/memory/scoped_policy.h new file mode 100644 index 0000000000..934fb1a14b --- /dev/null +++ b/bridge/bindings/v8/base/memory/scoped_policy.h @@ -0,0 +1,27 @@ +/* +* Copyright (C) 2019-2022 The Kraken authors. All rights reserved. +* Copyright (C) 2022-present The WebF authors. All rights reserved. +*/ + +#ifndef BASE_MEMORY_SCOPED_POLICY_H_ +#define BASE_MEMORY_SCOPED_POLICY_H_ + +namespace base { +namespace scoped_policy { + +// Defines the ownership policy for a scoped object. +enum OwnershipPolicy { + // The scoped object takes ownership of an object by taking over an existing + // ownership claim. + ASSUME, + + // The scoped object will retain the object and any initial ownership is + // not changed. + RETAIN +}; + +} // namespace scoped_policy +} // namespace base + +#endif // BASE_MEMORY_SCOPED_POLICY_H_ + diff --git a/bridge/bindings/v8/base/memory/scoped_refptr.h b/bridge/bindings/v8/base/memory/scoped_refptr.h new file mode 100644 index 0000000000..6b690c6a0d --- /dev/null +++ b/bridge/bindings/v8/base/memory/scoped_refptr.h @@ -0,0 +1,399 @@ +/* +* Copyright (C) 2019-2022 The Kraken authors. All rights reserved. +* Copyright (C) 2022-present The WebF authors. All rights reserved. +*/ + +#ifndef WEBF_SCOPED_REFPTR_H +#define WEBF_SCOPED_REFPTR_H + +#include + +#include +#include +#include +#include +#include + +//#include "base/check.h" +#include "../compiler_specific.h" +//#include "bindings/v8/base/memory/raw_ptr_exclusion.h" +//#include "base/memory/raw_ptr_exclusion.h" + +template +class scoped_refptr; + +namespace base { + +template +class RefCounted; +template +class RefCountedThreadSafe; +template +class RefCountedDeleteOnSequence; +class SequencedTaskRunner; + +template +scoped_refptr AdoptRef(T* t); + +namespace subtle { + +enum AdoptRefTag { kAdoptRefTag }; +enum StartRefCountFromZeroTag { kStartRefCountFromZeroTag }; +enum StartRefCountFromOneTag { kStartRefCountFromOneTag }; + +template +struct RefCountPreferenceTagTraits; + +template <> +struct RefCountPreferenceTagTraits { + static constexpr StartRefCountFromZeroTag kTag = kStartRefCountFromZeroTag; +}; + +template <> +struct RefCountPreferenceTagTraits { + static constexpr StartRefCountFromOneTag kTag = kStartRefCountFromOneTag; +}; + +template +constexpr Tag GetRefCountPreference() { + return RefCountPreferenceTagTraits::kTag; +} + +// scoped_refptr is typically used with one of several RefCounted base +// classes or with custom AddRef and Release methods. These overloads dispatch +// on which was used. + +template +constexpr bool IsRefCountPreferenceOverridden(const T*, + const RefCounted*) { + return !std::same_as())>, + std::decay_t())>>; +} + +template +constexpr bool IsRefCountPreferenceOverridden( + const T*, + const RefCountedThreadSafe*) { + return !std::same_as())>, + std::decay_t())>>; +} + +template +constexpr bool IsRefCountPreferenceOverridden( + const T*, + const RefCountedDeleteOnSequence*) { + return !std::same_as())>, + std::decay_t())>>; +} + +constexpr bool IsRefCountPreferenceOverridden(...) { + return false; +} + +template +constexpr void AssertRefCountBaseMatches(const T*, const RefCounted*) { + static_assert(std::derived_from, + "T implements RefCounted, but U is not a base of T."); +} + +template +constexpr void AssertRefCountBaseMatches(const T*, + const RefCountedThreadSafe*) { + static_assert( + std::derived_from, + "T implements RefCountedThreadSafe, but U is not a base of T."); +} + +template +constexpr void AssertRefCountBaseMatches(const T*, + const RefCountedDeleteOnSequence*) { + static_assert( + std::derived_from, + "T implements RefCountedDeleteOnSequence, but U is not a base of T."); +} + +constexpr void AssertRefCountBaseMatches(...) {} + +} // namespace subtle + +// Creates a scoped_refptr from a raw pointer without incrementing the reference +// count. Use this only for a newly created object whose reference count starts +// from 1 instead of 0. +template +scoped_refptr AdoptRef(T* obj) { + using Tag = std::decay_t())>; + static_assert(std::same_as, + "Use AdoptRef only if the reference count starts from one."); + + DCHECK(obj); + DCHECK(obj->HasOneRef()); + obj->Adopted(); + return scoped_refptr(obj, subtle::kAdoptRefTag); +} + +namespace subtle { + +template +scoped_refptr AdoptRefIfNeeded(T* obj, StartRefCountFromZeroTag) { + return scoped_refptr(obj); +} + +template +scoped_refptr AdoptRefIfNeeded(T* obj, StartRefCountFromOneTag) { + return AdoptRef(obj); +} + +} // namespace subtle + +// Constructs an instance of T, which is a ref counted type, and wraps the +// object into a scoped_refptr. +template +scoped_refptr MakeRefCounted(Args&&... args) { + T* obj = new T(std::forward(args)...); + return subtle::AdoptRefIfNeeded(obj, subtle::GetRefCountPreference()); +} + +// Takes an instance of T, which is a ref counted type, and wraps the object +// into a scoped_refptr. +template +scoped_refptr WrapRefCounted(T* t) { + return scoped_refptr(t); +} + +} // namespace base + +// +// A smart pointer class for reference counted objects. Use this class instead +// of calling AddRef and Release manually on a reference counted object to +// avoid common memory leaks caused by forgetting to Release an object +// reference. Sample usage: +// +// class MyFoo : public RefCounted { +// ... +// private: +// friend class RefCounted; // Allow destruction by RefCounted<>. +// ~MyFoo(); // Destructor must be private/protected. +// }; +// +// void some_function() { +// scoped_refptr foo = MakeRefCounted(); +// foo->Method(param); +// // |foo| is released when this function returns +// } +// +// void some_other_function() { +// scoped_refptr foo = MakeRefCounted(); +// ... +// foo.reset(); // explicitly releases |foo| +// ... +// if (foo) +// foo->Method(param); +// } +// +// The above examples show how scoped_refptr acts like a pointer to T. +// Given two scoped_refptr classes, it is also possible to exchange +// references between the two objects, like so: +// +// { +// scoped_refptr a = MakeRefCounted(); +// scoped_refptr b; +// +// b.swap(a); +// // now, |b| references the MyFoo object, and |a| references nullptr. +// } +// +// To make both |a| and |b| in the above example reference the same MyFoo +// object, simply use the assignment operator: +// +// { +// scoped_refptr a = MakeRefCounted(); +// scoped_refptr b; +// +// b = a; +// // now, |a| and |b| each own a reference to the same MyFoo object. +// } +// +// Also see Chromium's ownership and calling conventions: +// https://chromium.googlesource.com/chromium/src/+/lkgr/styleguide/c++/c++.md#object-ownership-and-calling-conventions +// Specifically: +// If the function (at least sometimes) takes a ref on a refcounted object, +// declare the param as scoped_refptr. The caller can decide whether it +// wishes to transfer ownership (by calling std::move(t) when passing t) or +// retain its ref (by simply passing t directly). +// In other words, use scoped_refptr like you would a std::unique_ptr except +// in the odd case where it's required to hold on to a ref while handing one +// to another component (if a component merely needs to use t on the stack +// without keeping a ref: pass t as a raw T*). +template +class TRIVIAL_ABI scoped_refptr { + public: + typedef T element_type; + + constexpr scoped_refptr() = default; + + // Allow implicit construction from nullptr. + constexpr scoped_refptr(std::nullptr_t) {} + + // Constructs from a raw pointer. Note that this constructor allows implicit + // conversion from T* to scoped_refptr which is strongly discouraged. If + // you are creating a new ref-counted object please use + // base::MakeRefCounted() or base::WrapRefCounted(). Otherwise you + // should move or copy construct from an existing scoped_refptr to the + // ref-counted object. + scoped_refptr(T* p) : ptr_(p) { + if (ptr_) + AddRef(ptr_); + } + + // Copy constructor. This is required in addition to the copy conversion + // constructor below. + scoped_refptr(const scoped_refptr& r) : scoped_refptr(r.ptr_) {} + + // Copy conversion constructor. + template + requires(std::convertible_to) + scoped_refptr(const scoped_refptr& r) : scoped_refptr(r.ptr_) {} + + // Move constructor. This is required in addition to the move conversion + // constructor below. + scoped_refptr(scoped_refptr&& r) noexcept : ptr_(r.ptr_) { r.ptr_ = nullptr; } + + // Move conversion constructor. + template + requires(std::convertible_to) + scoped_refptr(scoped_refptr&& r) noexcept : ptr_(r.ptr_) { + r.ptr_ = nullptr; + } + + ~scoped_refptr() { + static_assert(!base::subtle::IsRefCountPreferenceOverridden( + static_cast(nullptr), static_cast(nullptr)), + "It's unsafe to override the ref count preference." + " Please remove REQUIRE_ADOPTION_FOR_REFCOUNTED_TYPE" + " from subclasses."); + if (ptr_) + Release(ptr_); + } + + T* get() const { return ptr_; } + + T& operator*() const { + DCHECK(ptr_); + return *ptr_; + } + + T* operator->() const { + DCHECK(ptr_); + return ptr_; + } + + scoped_refptr& operator=(std::nullptr_t) { + reset(); + return *this; + } + + scoped_refptr& operator=(T* p) { return *this = scoped_refptr(p); } + + // Unified assignment operator. + scoped_refptr& operator=(scoped_refptr r) noexcept { + swap(r); + return *this; + } + + // Sets managed object to null and releases reference to the previous managed + // object, if it existed. + void reset() { scoped_refptr().swap(*this); } + + // Returns the owned pointer (if any), releasing ownership to the caller. The + // caller is responsible for managing the lifetime of the reference. + [[nodiscard]] T* release(); + + void swap(scoped_refptr& r) noexcept { std::swap(ptr_, r.ptr_); } + + explicit operator bool() const { return ptr_ != nullptr; } + + template + friend bool operator==(const scoped_refptr& lhs, + const scoped_refptr& rhs) { + return lhs.ptr_ == rhs.ptr_; + } + + // This operator is an optimization to avoid implicitly constructing a + // scoped_refptr when comparing scoped_refptr against raw pointer. If the + // implicit conversion is ever removed this operator can also be removed. + template + friend bool operator==(const scoped_refptr& lhs, const U* rhs) { + return lhs.ptr_ == rhs; + } + + friend bool operator==(const scoped_refptr& lhs, std::nullptr_t null) { + return !static_cast(lhs); + } + + template + friend auto operator<=>(const scoped_refptr& lhs, + const scoped_refptr& rhs) { + return lhs.ptr_ <=> rhs.ptr_; + } + + friend auto operator<=>(const scoped_refptr& lhs, std::nullptr_t null) { + return lhs.ptr_ <=> static_cast(nullptr); + } + + protected: + // RAW_PTR_EXCLUSION: scoped_refptr<> has its own UaF prevention mechanism. + // Given how widespread it is, we it'll likely a perf regression for no + // additional security benefit. + T* ptr_ = nullptr; + + private: + template + friend scoped_refptr base::AdoptRef(U*); + friend class ::base::SequencedTaskRunner; + + scoped_refptr(T* p, base::subtle::AdoptRefTag) : ptr_(p) {} + + // Friend required for move constructors that set r.ptr_ to null. + template + friend class scoped_refptr; + + // Non-inline helpers to allow: + // class Opaque; + // extern template class scoped_refptr; + // Otherwise the compiler will complain that Opaque is an incomplete type. + static void AddRef(T* ptr); + static void Release(T* ptr); +}; + +template +T* scoped_refptr::release() { + T* ptr = ptr_; + ptr_ = nullptr; + return ptr; +} + +// static +template +void scoped_refptr::AddRef(T* ptr) { + base::subtle::AssertRefCountBaseMatches(ptr, ptr); + ptr->AddRef(); +} + +// static +template +void scoped_refptr::Release(T* ptr) { + base::subtle::AssertRefCountBaseMatches(ptr, ptr); + ptr->Release(); +} + +template +std::ostream& operator<<(std::ostream& out, const scoped_refptr& p) { + return out << p.get(); +} + +template +void swap(scoped_refptr& lhs, scoped_refptr& rhs) noexcept { + lhs.swap(rhs); +} + +#endif // WEBF_SCOPED_REFPTR_H diff --git a/bridge/bindings/v8/base/memory/stack_allocated.h b/bridge/bindings/v8/base/memory/stack_allocated.h new file mode 100644 index 0000000000..e57df0f8a7 --- /dev/null +++ b/bridge/bindings/v8/base/memory/stack_allocated.h @@ -0,0 +1,60 @@ +/* +* Copyright (C) 2019-2022 The Kraken authors. All rights reserved. +* Copyright (C) 2022-present The WebF authors. All rights reserved. +*/ + +#ifndef WEBF_STACK_ALLOCATED_H +#define WEBF_STACK_ALLOCATED_H + +#include + +#if defined(__clang__) +#define STACK_ALLOCATED_IGNORE(reason) \ + __attribute__((annotate("stack_allocated_ignore"))) +#else // !defined(__clang__) +#define STACK_ALLOCATED_IGNORE(reason) +#endif // !defined(__clang__) + +// If a class or one of its ancestor classes is annotated with STACK_ALLOCATED() +// in its class definition, then instances of the class may not be allocated on +// the heap or as a member variable of a non-stack-allocated class. +#define STACK_ALLOCATED() \ + public: \ + using IsStackAllocatedTypeMarker [[maybe_unused]] = int; \ + \ + private: \ + void* operator new(size_t) = delete; \ + void* operator new(size_t, ::base::NotNullTag, void*) = delete; \ + void* operator new(size_t, void*) = delete + +namespace base { + +// NotNullTag was originally added to WebKit here: +// https://trac.webkit.org/changeset/103243/webkit +// ...with the stated goal of improving the performance of the placement new +// operator and potentially enabling the -fomit-frame-pointer compiler flag. +// +// TODO(szager): The placement new operator which uses this tag is currently +// defined in third_party/blink/renderer/platform/wtf/allocator/allocator.h, +// in the global namespace. It should probably move to /base. +// +// It's unknown at the time of writing whether it still provides any benefit +// (or if it ever did). It is used by placing the kNotNull tag before the +// address of the object when calling placement new. +// +// If the kNotNull tag is specified to placement new for a null pointer, +// Undefined Behaviour can result. +// +// Example: +// +// union { int i; } u; +// +// // Typically placement new looks like this. +// new (&u.i) int(3); +// // But we can promise `&u.i` is not null like this. +// new (base::NotNullTag::kNotNull, &u.i) int(3); +enum class NotNullTag { kNotNull }; + +} // namespace base + +#endif // WEBF_STACK_ALLOCATED_H diff --git a/bridge/bindings/v8/base/not_fatal_until.h b/bridge/bindings/v8/base/not_fatal_until.h new file mode 100644 index 0000000000..84a2ce0351 --- /dev/null +++ b/bridge/bindings/v8/base/not_fatal_until.h @@ -0,0 +1,105 @@ +/* +* Copyright (C) 2019-2022 The Kraken authors. All rights reserved. +* Copyright (C) 2022-present The WebF authors. All rights reserved. +*/ + +#ifndef BASE_NOT_FATAL_UNTIL_H_ +#define BASE_NOT_FATAL_UNTIL_H_ + +namespace base { + +// Add new entries a few milestones into the future whenever necessary. +// M here refers to milestones, see chrome/VERSION's MAJOR field that updates +// when chromium branches. +// +// To clean up old entries remove the already-fatal argument from CHECKs as well +// as from this list. This generates better-optimized CHECKs in official builds. +enum class NotFatalUntil { + NoSpecifiedMilestoneInternal = -1, + M120 = 120, + M121 = 121, + M122 = 122, + M123 = 123, + M124 = 124, + M125 = 125, + M126 = 126, + M127 = 127, + M128 = 128, + M129 = 129, + M130 = 130, + M131 = 131, + M132 = 132, + M133 = 133, + M134 = 134, + M135 = 135, + M136 = 136, + M137 = 137, + M138 = 138, + M139 = 139, + M140 = 140, + M141 = 141, + M142 = 142, + M143 = 143, + M144 = 144, + M145 = 145, + M146 = 146, + M147 = 147, + M148 = 148, + M149 = 149, + M150 = 150, + M151 = 151, + M152 = 152, + M153 = 153, + M154 = 154, + M155 = 155, + M156 = 156, + M157 = 157, + M158 = 158, + M159 = 159, + M160 = 160, + M161 = 161, + M162 = 162, + M163 = 163, + M164 = 164, + M165 = 165, + M166 = 166, + M167 = 167, + M168 = 168, + M169 = 169, + M170 = 170, + M171 = 171, + M172 = 172, + M173 = 173, + M174 = 174, + M175 = 175, + M176 = 176, + M177 = 177, + M178 = 178, + M179 = 179, + M180 = 180, + M181 = 181, + M182 = 182, + M183 = 183, + M184 = 184, + M185 = 185, + M186 = 186, + M187 = 187, + M188 = 188, + M189 = 189, + M190 = 190, + M191 = 191, + M192 = 192, + M193 = 193, + M194 = 194, + M195 = 195, + M196 = 196, + M197 = 197, + M198 = 198, + M199 = 199, + M200 = 200, +}; + +} // namespace base + +#endif // BASE_NOT_FATAL_UNTIL_H_ + diff --git a/bridge/bindings/v8/base/notreached.h b/bridge/bindings/v8/base/notreached.h new file mode 100644 index 0000000000..6429b5377f --- /dev/null +++ b/bridge/bindings/v8/base/notreached.h @@ -0,0 +1,82 @@ +/* +* Copyright (C) 2019-2022 The Kraken authors. All rights reserved. +* Copyright (C) 2022-present The WebF authors. All rights reserved. +*/ + +#ifndef BASE_NOTREACHED_H_ +#define BASE_NOTREACHED_H_ + +#include "bindings/v8/base/check.h" +#include "bindings/v8/base/dcheck_is_on.h" +#include "bindings/v8/base/logging_buildflags.h" +#include "bindings/v8/base/is_empty.h" + +namespace logging { + +// Migration in progress: For new code call either NOTREACHED_NORETURN() or +// NOTREACHED(base::NotFatalUntil::M*). Do not add new callers to NOTREACHED() +// without a parameter until this comment is updated. Existing NOTREACHED() +// instances will be renamed to NOTREACHED_IN_MIGRATION() ASAP, then +// NOTREACHED() without a parameter will refer to the [[noreturn]] +// always-fatal version which is currently spelled NOTREACHED_NORETURN(). +// +// NOTREACHED() annotates should-be unreachable code. When a base::NotFatalUntil +// milestone is provided the instance is non-fatal (dumps without crashing) +// until that milestone is hit. That is: `NOTREACHED(base::NotFatalUntil::M120)` +// starts crashing in M120. See base/check.h. +// +// Under the kNotReachedIsFatal experiment all NOTREACHED() without a milestone +// argument are fatal. As of 2024-03-19 this experiment is 50/50 enabled on M124 +// Canary and Dev with intent to roll out to stable in M124 absent any blocking +// issues that come up. +// +// TODO(crbug.com/40580068): After kNotReachedIsFatal is universally rolled out +// then move callers without a non-fatal milestone argument to +// NOTREACHED_NORETURN(). Then rename the [[noreturn]] version back to +// NOTREACHED(). +#if CHECK_WILL_STREAM() || BUILDFLAG(ENABLE_LOG_ERROR_NOT_REACHED) +#define NOTREACHED_IN_MIGRATION() \ + LOGGING_CHECK_FUNCTION_IMPL(::logging::NotReachedError::NotReached(), false) +#else +#define NOTREACHED_IN_MIGRATION() \ + (true) ? ::logging::NotReachedError::TriggerNotReached() \ + : EAT_CHECK_STREAM_PARAMS() +#endif + + +// TODO(crbug.com/40580068): Migrate existing NOTREACHED() instances to +// NOTREACHED_IN_MIGRATION() then replace the NOTREACHED_IN_MIGRATION() branch +// here with the NOTREACHED_NORETURN() implementation. +#define NOTREACHED(...) \ + BASE_IF(BASE_IS_EMPTY(__VA_ARGS__), NOTREACHED_IN_MIGRATION(), \ + LOGGING_CHECK_FUNCTION_IMPL( \ + ::logging::NotReachedError::NotReached(__VA_ARGS__), false)) + +// NOTREACHED_NORETURN() annotates paths that are supposed to be unreachable. +// They crash if they are ever hit. +// TODO(crbug.com/40580068): Rename back to NOTREACHED() once there are no +// callers of the old non-CHECK-fatal macro. +#if CHECK_WILL_STREAM() +#define NOTREACHED_NORETURN() ::logging::NotReachedNoreturnError() +#else +// This function is used to be able to detect NOTREACHED() failures in stack +// traces where this symbol is preserved (even if inlined). Its implementation +// matches logging::CheckFailure() but intentionally uses a different signature. +[[noreturn]] IMMEDIATE_CRASH_ALWAYS_INLINE void NotReachedFailure() { + base::ImmediateCrash(); +} + +#define NOTREACHED_NORETURN() \ + (true) ? ::logging::NotReachedFailure() : EAT_CHECK_STREAM_PARAMS() +#endif + +// The DUMP_WILL_BE_NOTREACHED_NORETURN() macro provides a convenient way to +// non-fatally dump in official builds if ever hit. See DUMP_WILL_BE_CHECK for +// suggested usage. +#define DUMP_WILL_BE_NOTREACHED_NORETURN() \ + ::logging::CheckError::DumpWillBeNotReachedNoreturn() + +} // namespace logging + +#endif // BASE_NOTREACHED_H_ + diff --git a/bridge/bindings/v8/base/numerics/checked_math.h b/bridge/bindings/v8/base/numerics/checked_math.h new file mode 100644 index 0000000000..92a1c50dbd --- /dev/null +++ b/bridge/bindings/v8/base/numerics/checked_math.h @@ -0,0 +1,378 @@ +/* +* Copyright (C) 2019-2022 The Kraken authors. All rights reserved. +* Copyright (C) 2022-present The WebF authors. All rights reserved. +*/ + +#ifndef WEBF_CHECKED_MATH_H +#define WEBF_CHECKED_MATH_H + +#include +#include +#include + +#include "bindings/v8/base/numerics/checked_math_impl.h" // IWYU pragma: export +#include "bindings/v8/base/numerics/safe_conversions.h" +#include "bindings/v8/base/numerics/safe_math_shared_impl.h" // IWYU pragma: export + +namespace base { +namespace internal { + +template +class CheckedNumeric { + static_assert(std::is_arithmetic_v, + "CheckedNumeric: T must be a numeric type."); + + public: + template + friend class CheckedNumeric; + + using type = T; + + constexpr CheckedNumeric() = default; + + // Copy constructor. + template + constexpr CheckedNumeric(const CheckedNumeric& rhs) + : state_(rhs.state_.value(), rhs.IsValid()) {} + + // Strictly speaking, this is not necessary, but declaring this allows class + // template argument deduction to be used so that it is possible to simply + // write `CheckedNumeric(777)` instead of `CheckedNumeric(777)`. + // NOLINTNEXTLINE(google-explicit-constructor) + constexpr CheckedNumeric(T value) : state_(value) {} + + // This is not an explicit constructor because we implicitly upgrade regular + // numerics to CheckedNumerics to make them easier to use. + template + requires(std::is_arithmetic_v) + // NOLINTNEXTLINE(google-explicit-constructor) + constexpr CheckedNumeric(Src value) : state_(value) {} + + // This is not an explicit constructor because we want a seamless conversion + // from StrictNumeric types. + template + // NOLINTNEXTLINE(google-explicit-constructor) + constexpr CheckedNumeric(StrictNumeric value) + : state_(static_cast(value)) {} + + // IsValid() - The public API to test if a CheckedNumeric is currently valid. + // A range checked destination type can be supplied using the Dst template + // parameter. + template + constexpr bool IsValid() const { + return state_.is_valid() && + IsValueInRangeForNumericType(state_.value()); + } + + // AssignIfValid(Dst) - Assigns the underlying value if it is currently valid + // and is within the range supported by the destination type. Returns true if + // successful and false otherwise. + template +#if defined(__clang__) || defined(__GNUC__) + __attribute__((warn_unused_result)) +#elif defined(_MSC_VER) + _Check_return_ +#endif + constexpr bool + AssignIfValid(Dst* result) const { + return BASE_NUMERICS_LIKELY(IsValid()) + ? ((*result = static_cast(state_.value())), true) + : false; + } + + // ValueOrDie() - The primary accessor for the underlying value. If the + // current state is not valid it will CHECK and crash. + // A range checked destination type can be supplied using the Dst template + // parameter, which will trigger a CHECK if the value is not in bounds for + // the destination. + // The CHECK behavior can be overridden by supplying a handler as a + // template parameter, for test code, etc. However, the handler cannot access + // the underlying value, and it is not available through other means. + template + constexpr StrictNumeric ValueOrDie() const { + return BASE_NUMERICS_LIKELY(IsValid()) + ? static_cast(state_.value()) + : CheckHandler::template HandleFailure(); + } + + // ValueOrDefault(T default_value) - A convenience method that returns the + // current value if the state is valid, and the supplied default_value for + // any other state. + // A range checked destination type can be supplied using the Dst template + // parameter. WARNING: This function may fail to compile or CHECK at runtime + // if the supplied default_value is not within range of the destination type. + template + constexpr StrictNumeric ValueOrDefault(const Src default_value) const { + return BASE_NUMERICS_LIKELY(IsValid()) + ? static_cast(state_.value()) + : checked_cast(default_value); + } + + // Returns a checked numeric of the specified type, cast from the current + // CheckedNumeric. If the current state is invalid or the destination cannot + // represent the result then the returned CheckedNumeric will be invalid. + template + constexpr CheckedNumeric::type> Cast() const { + return *this; + } + + // This friend method is available solely for providing more detailed logging + // in the tests. Do not implement it in production code, because the + // underlying values may change at any time. + template + friend U GetNumericValueForTest(const CheckedNumeric& src); + + // Prototypes for the supported arithmetic operator overloads. + template + constexpr CheckedNumeric& operator+=(const Src rhs); + template + constexpr CheckedNumeric& operator-=(const Src rhs); + template + constexpr CheckedNumeric& operator*=(const Src rhs); + template + constexpr CheckedNumeric& operator/=(const Src rhs); + template + constexpr CheckedNumeric& operator%=(const Src rhs); + template + constexpr CheckedNumeric& operator<<=(const Src rhs); + template + constexpr CheckedNumeric& operator>>=(const Src rhs); + template + constexpr CheckedNumeric& operator&=(const Src rhs); + template + constexpr CheckedNumeric& operator|=(const Src rhs); + template + constexpr CheckedNumeric& operator^=(const Src rhs); + + constexpr CheckedNumeric operator-() const { + // Use an optimized code path for a known run-time variable. + if (!IsConstantEvaluated() && std::is_signed_v && + std::is_floating_point_v) { + return FastRuntimeNegate(); + } + // The negation of two's complement int min is int min. + const bool is_valid = + IsValid() && + (!std::is_signed_v || std::is_floating_point_v || + NegateWrapper(state_.value()) != std::numeric_limits::lowest()); + return CheckedNumeric(NegateWrapper(state_.value()), is_valid); + } + + constexpr CheckedNumeric operator~() const { + return CheckedNumeric( + InvertWrapper(state_.value()), IsValid()); + } + + constexpr CheckedNumeric Abs() const { + return !IsValueNegative(state_.value()) ? *this : -*this; + } + + template + constexpr CheckedNumeric::type> Max( + const U rhs) const { + return CheckMax(*this, rhs); + } + + template + constexpr CheckedNumeric::type> Min( + const U rhs) const { + return CheckMin(*this, rhs); + } + + // This function is available only for integral types. It returns an unsigned + // integer of the same width as the source type, containing the absolute value + // of the source, and properly handling signed min. + constexpr CheckedNumeric::type> + UnsignedAbs() const { + return CheckedNumeric::type>( + SafeUnsignedAbs(state_.value()), state_.is_valid()); + } + + constexpr CheckedNumeric& operator++() { + *this += 1; + return *this; + } + + constexpr CheckedNumeric operator++(int) { + CheckedNumeric value = *this; + *this += 1; + return value; + } + + constexpr CheckedNumeric& operator--() { + *this -= 1; + return *this; + } + + constexpr CheckedNumeric operator--(int) { + // TODO(pkasting): Consider std::exchange() once it's constexpr in C++20. + const CheckedNumeric value = *this; + *this -= 1; + return value; + } + + // These perform the actual math operations on the CheckedNumerics. + // Binary arithmetic operations. + template