From 973b426d163ba32a356c8ebf415ad3a94790c152 Mon Sep 17 00:00:00 2001 From: panlei-coder <62509266+panlei-coder@users.noreply.github.com> Date: Wed, 6 Mar 2024 16:57:27 +0800 Subject: [PATCH 01/33] feat:import braft (#130) * feat: import braft --------- Co-authored-by: century <919745273@qq.com> Co-authored-by: alexstocks Co-authored-by: dingxiaoshuai123 <2486016589@qq.com> --- CMakeLists.txt | 55 +++++++-- build.sh | 2 +- cmake/boost.cmake | 4 +- cmake/braft.cmake | 47 ++++++++ cmake/brpc.cmake | 49 ++++++++ cmake/double-conversion.cmake | 5 +- cmake/findTools.cmake | 43 ++++--- cmake/fmt.cmake | 4 +- cmake/folly.cmake | 42 +++---- cmake/gflags.cmake | 17 ++- cmake/glog.cmake | 13 +- cmake/gtest.cmake | 3 +- cmake/leveldb.cmake | 10 ++ cmake/libevent.cmake | 4 + cmake/llhttp.cmake | 6 + cmake/modules/glog/FindUnwind.cmake | 10 +- cmake/modules/glog/Findgflags.cmake | 9 +- cmake/modules/spdlog/fmtConfig.cmake | 8 +- cmake/openssl.cmake | 26 ++++ cmake/protobuf.cmake | 167 ++++++++++++++++++++++++++ cmake/rocksdb.cmake | 4 +- cmake/spdlog.cmake | 4 +- cmake/unwind.cmake | 30 ++--- cmake/utils.cmake | 48 ++++---- cmake/zlib.cmake | 41 +++++++ src/CMakeLists.txt | 25 +++- src/net/CMakeLists.txt | 4 + src/pstd/CMakeLists.txt | 1 + src/storage/CMakeLists.txt | 18 ++- src/storage/include/storage/storage.h | 2 + src/storage/src/base_filter.h | 2 + src/storage/src/debug.h | 3 - src/store.h | 2 + tests/hash_test.go | 1 - 34 files changed, 576 insertions(+), 133 deletions(-) create mode 100644 cmake/braft.cmake create mode 100644 cmake/brpc.cmake create mode 100644 cmake/openssl.cmake create mode 100644 cmake/protobuf.cmake create mode 100644 cmake/zlib.cmake diff --git a/CMakeLists.txt b/CMakeLists.txt index 23e84d870..6eb2725c2 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,3 +1,8 @@ +# Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. An additional grant +# of patent rights can be found in the PATENTS file in the same directory. + CMAKE_MINIMUM_REQUIRED(VERSION 3.14) PROJECT(PikiwiDB) @@ -25,7 +30,7 @@ ELSEIF (CMAKE_CXX_COMPILER_ID STREQUAL "GNU") ENDIF () -############# You should enable sanitizer if you are developing pika ############# +############# You should enable sanitizer if you are developing pikiwidb ############# # Uncomment the following two lines to enable AddressSanitizer to detect memory leaks and other memory-related bugs. # SET(CMAKE_BUILD_TYPE "Debug") # SET(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -fsanitize=address -O0 -fno-omit-frame-pointer -fno-optimize-sibling-calls") @@ -91,25 +96,55 @@ SET(INSTALL_LIBDIR ${STAGED_INSTALL_PREFIX}/lib) SET(INSTALL_LIBDIR_64 ${STAGED_INSTALL_PREFIX}/lib64) SET(CMAKE_PREFIX_PATH ${CMAKE_PREFIX_PATH} ${STAGED_INSTALL_PREFIX}) SET(BUILD_SUPPORT_DIR ${PROJECT_SOURCE_DIR}/build_support) +# make sure we use the same compiler for all dependencies +SET(CMAKE_POSITION_INDEPENDENT_CODE ON) MESSAGE(STATUS "${PROJECT_NAME} staged install: ${STAGED_INSTALL_PREFIX}") MESSAGE(STATUS "Current platform: ${OS_VERSION} ") CMAKE_HOST_SYSTEM_INFORMATION(RESULT CPU_CORE QUERY NUMBER_OF_LOGICAL_CORES) MESSAGE(STATUS "CPU core ${CPU_CORE}") +#openssl +FIND_PACKAGE(OpenSSL REQUIRED) + +MESSAGE(STATUS "ssl:" ${OPENSSL_SSL_LIBRARY}) +MESSAGE(STATUS "crypto:" ${OPENSSL_CRYPTO_LIBRARY}) + +ADD_LIBRARY(ssl SHARED IMPORTED GLOBAL) +SET_PROPERTY(TARGET ssl PROPERTY IMPORTED_LOCATION ${OPENSSL_SSL_LIBRARY}) + +ADD_LIBRARY(crypto SHARED IMPORTED GLOBAL) +SET_PROPERTY(TARGET crypto PROPERTY IMPORTED_LOCATION ${OPENSSL_CRYPTO_LIBRARY}) + +SET(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_CURRENT_SOURCE_DIR}/cmake") + +SET(THIRD_PARTY_PATH ${CMAKE_CURRENT_BINARY_DIR}/third-party) +SET(THIRD_PARTY_BUILD_TYPE Release) +SET(EXTERNAL_PROJECT_LOG_ARGS + LOG_DOWNLOAD 0 + LOG_UPDATE 1 + LOG_CONFIGURE 0 + LOG_BUILD 0 + LOG_TEST 1 + LOG_INSTALL 0) INCLUDE(FetchContent) -#include(cmake/CmakeLists.txt) -include(cmake/findTools.cmake) -include(cmake/libevent.cmake) -include(cmake/llhttp.cmake) -include(cmake/fmt.cmake) -include(cmake/spdlog.cmake) -include(cmake/gtest.cmake) -include(cmake/rocksdb.cmake) +INCLUDE(gflags) +INCLUDE(findTools) +INCLUDE(leveldb) +INCLUDE(libevent) +INCLUDE(llhttp) +INCLUDE(fmt) +INCLUDE(spdlog) +INCLUDE(gtest) +INCLUDE(rocksdb) +INCLUDE(zlib) +INCLUDE(protobuf) +INCLUDE(brpc) +INCLUDE(braft) -enable_testing() +ENABLE_TESTING() ADD_SUBDIRECTORY(src/pstd) ADD_SUBDIRECTORY(src/net) diff --git a/build.sh b/build.sh index a7d6c28d9..9b9990249 100755 --- a/build.sh +++ b/build.sh @@ -12,7 +12,7 @@ BUILD_TIME=${BUILD_TIME: 0: 10} COMMIT_ID=$(git rev-parse HEAD) SHORT_COMMIT_ID=${COMMIT_ID: 0: 8} -BUILD_TYPE=release +BUILD_TYPE=Release VERBOSE=0 CMAKE_FLAGS="" MAKE_FLAGS="" diff --git a/cmake/boost.cmake b/cmake/boost.cmake index cf7f239e8..565d4bee9 100644 --- a/cmake/boost.cmake +++ b/cmake/boost.cmake @@ -3,9 +3,9 @@ # LICENSE file in the root directory of this source tree. An additional grant # of patent rights can be found in the PATENTS file in the same directory. -include_guard() +INCLUDE_GUARD() -include(cmake/utils.cmake) +INCLUDE(cmake/utils.cmake) FetchContent_DeclareGitHubWithMirror(pikiwidb-boost pikiwidb/boost boost-1.83.0 diff --git a/cmake/braft.cmake b/cmake/braft.cmake new file mode 100644 index 000000000..853266f52 --- /dev/null +++ b/cmake/braft.cmake @@ -0,0 +1,47 @@ +# Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. An additional grant +# of patent rights can be found in the PATENTS file in the same directory. + +INCLUDE(ExternalProject) + +SET(BRAFT_SOURCES_DIR ${THIRD_PARTY_PATH}/braft) +SET(BRAFT_INSTALL_DIR ${THIRD_PARTY_PATH}/install/braft) +SET(BRAFT_INCLUDE_DIR "${BRAFT_INSTALL_DIR}/include" CACHE PATH "braft include directory." FORCE) +SET(BRAFT_LIBRARIES "${BRAFT_INSTALL_DIR}/lib/libbraft.a" CACHE FILEPATH "braft library." FORCE) + +SET(prefix_path "${THIRD_PARTY_PATH}/install/brpc|${CMAKE_CURRENT_BINARY_DIR}/_deps/gflags-build|${THIRD_PARTY_PATH}/install/protobuf|${THIRD_PARTY_PATH}/install/zlib|${CMAKE_CURRENT_BINARY_DIR}/_deps/leveldb-build|${CMAKE_CURRENT_BINARY_DIR}/_deps/leveldb-src") + +ExternalProject_Add( + extern_braft + ${EXTERNAL_PROJECT_LOG_ARGS} + DEPENDS brpc + GIT_REPOSITORY https://github.com/baidu/braft.git + GIT_TAG master + PREFIX ${BRAFT_SOURCES_DIR} + UPDATE_COMMAND "" + CMAKE_ARGS -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} + -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} + -DCMAKE_CXX_FLAGS=${CMAKE_CXX_FLAGS} + -DCMAKE_C_FLAGS=${CMAKE_C_FLAGS} + -DCMAKE_INSTALL_PREFIX=${BRAFT_INSTALL_DIR} + -DCMAKE_INSTALL_LIBDIR=${BRAFT_INSTALL_DIR}/lib + -DCMAKE_POSITION_INDEPENDENT_CODE=ON + -DCMAKE_BUILD_TYPE=${THIRD_PARTY_BUILD_TYPE} + -DCMAKE_PREFIX_PATH=${prefix_path} + -DBRPC_WITH_GLOG=OFF + -DWITH_DEBUG_SYMBOLS=OFF + ${EXTERNAL_OPTIONAL_ARGS} + LIST_SEPARATOR | + CMAKE_CACHE_ARGS -DCMAKE_INSTALL_PREFIX:PATH=${BRAFT_INSTALL_DIR} + -DCMAKE_INSTALL_LIBDIR:PATH=${BRAFT_INSTALL_DIR}/lib + -DCMAKE_POSITION_INDEPENDENT_CODE:BOOL=ON + -DCMAKE_BUILD_TYPE:STRING=${THIRD_PARTY_BUILD_TYPE} + BUILD_IN_SOURCE 1 + BUILD_COMMAND $(MAKE) -j ${CPU_CORE} braft-static + INSTALL_COMMAND mkdir -p ${BRAFT_INSTALL_DIR}/lib/ COMMAND cp ${BRAFT_SOURCES_DIR}/src/extern_braft/output/lib/libbraft.a ${BRAFT_LIBRARIES} COMMAND cp -r ${BRAFT_SOURCES_DIR}/src/extern_braft/output/include ${BRAFT_INCLUDE_DIR}/ +) +ADD_DEPENDENCIES(extern_braft brpc) +ADD_LIBRARY(braft STATIC IMPORTED GLOBAL) +SET_PROPERTY(TARGET braft PROPERTY IMPORTED_LOCATION ${BRAFT_LIBRARIES}) +ADD_DEPENDENCIES(braft extern_braft) \ No newline at end of file diff --git a/cmake/brpc.cmake b/cmake/brpc.cmake new file mode 100644 index 000000000..13305f203 --- /dev/null +++ b/cmake/brpc.cmake @@ -0,0 +1,49 @@ +# Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. An additional grant +# of patent rights can be found in the PATENTS file in the same directory. + +INCLUDE(ExternalProject) + +SET(BRPC_SOURCES_DIR ${THIRD_PARTY_PATH}/brpc) +SET(BRPC_INSTALL_DIR ${THIRD_PARTY_PATH}/install/brpc) +SET(BRPC_INCLUDE_DIR "${BRPC_INSTALL_DIR}/include" CACHE PATH "brpc include directory." FORCE) +SET(BRPC_LIBRARIES "${BRPC_INSTALL_DIR}/lib/libbrpc.a" CACHE FILEPATH "brpc library." FORCE) + +# Reference https://stackoverflow.com/questions/45414507/pass-a-list-of-prefix-paths-to-externalproject-add-in-cmake-args +SET(prefix_path "${CMAKE_CURRENT_BINARY_DIR}/_deps/gflags-build|${THIRD_PARTY_PATH}/install/protobuf|${THIRD_PARTY_PATH}/install/zlib|${CMAKE_CURRENT_BINARY_DIR}/_deps/leveldb-build|${CMAKE_CURRENT_BINARY_DIR}/_deps/leveldb-src") +SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-deprecated-declarations") +# If minimal .a is need, you can set WITH_DEBUG_SYMBOLS=OFF +EXTERNALPROJECT_ADD( + extern_brpc + ${EXTERNAL_PROJECT_LOG_ARGS} + DEPENDS ssl crypto zlib protobuf leveldb gflags + URL "https://github.com/apache/brpc/archive/1.3.0.tar.gz" + URL_HASH SHA256=b9d638b76725552ed11178c650d7fc95e30f252db7972a93dc309a0698c7d2b8 + PREFIX ${BRPC_SOURCES_DIR} + UPDATE_COMMAND "" + CMAKE_ARGS -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} + -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} + -DCMAKE_CXX_FLAGS=${CMAKE_CXX_FLAGS} + -DCMAKE_C_FLAGS=${CMAKE_C_FLAGS} + -DCMAKE_INSTALL_PREFIX=${BRPC_INSTALL_DIR} + -DCMAKE_INSTALL_LIBDIR=${BRPC_INSTALL_DIR}/lib + -DCMAKE_POSITION_INDEPENDENT_CODE=ON + -DCMAKE_BUILD_TYPE=${THIRD_PARTY_BUILD_TYPE} + -DCMAKE_PREFIX_PATH=${prefix_path} + -DWITH_GLOG=OFF + -DDOWNLOAD_GTEST=OFF + ${EXTERNAL_OPTIONAL_ARGS} + LIST_SEPARATOR | + CMAKE_CACHE_ARGS -DCMAKE_INSTALL_PREFIX:PATH=${BRPC_INSTALL_DIR} + -DCMAKE_INSTALL_LIBDIR:PATH=${BRPC_INSTALL_DIR}/lib + -DCMAKE_POSITION_INDEPENDENT_CODE:BOOL=ON + -DCMAKE_BUILD_TYPE:STRING=${THIRD_PARTY_BUILD_TYPE} + BUILD_IN_SOURCE 1 + BUILD_COMMAND $(MAKE) -j ${CPU_CORE} brpc-static + INSTALL_COMMAND mkdir -p ${BRPC_INSTALL_DIR}/lib/ COMMAND cp ${BRPC_SOURCES_DIR}/src/extern_brpc/output/lib/libbrpc.a ${BRPC_LIBRARIES} COMMAND cp -r ${BRPC_SOURCES_DIR}/src/extern_brpc/output/include ${BRPC_INCLUDE_DIR}/ +) +ADD_DEPENDENCIES(extern_brpc ssl crypto zlib protobuf leveldb gflags) +ADD_LIBRARY(brpc STATIC IMPORTED GLOBAL) +SET_PROPERTY(TARGET brpc PROPERTY IMPORTED_LOCATION ${BRPC_LIBRARIES}) +ADD_DEPENDENCIES(brpc extern_brpc) diff --git a/cmake/double-conversion.cmake b/cmake/double-conversion.cmake index 88b69768e..8e0662ae6 100644 --- a/cmake/double-conversion.cmake +++ b/cmake/double-conversion.cmake @@ -3,14 +3,15 @@ # LICENSE file in the root directory of this source tree. An additional grant # of patent rights can be found in the PATENTS file in the same directory. -include_guard() +INCLUDE_GUARD() -include(cmake/utils.cmake) +INCLUDE(cmake/utils.cmake) FetchContent_DeclareGitHubWithMirror(double-conversion google/double-conversion v3.3.0 SHA256=4080014235f90854ffade6d1c423940b314bbca273a338235f049da296e47183 ) + FetchContent_MakeAvailableWithArgs(double-conversion BUILD_TESTING=OFF ) diff --git a/cmake/findTools.cmake b/cmake/findTools.cmake index 9b8bb651c..af55b7973 100644 --- a/cmake/findTools.cmake +++ b/cmake/findTools.cmake @@ -1,59 +1,64 @@ +# Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. An additional grant +# of patent rights can be found in the PATENTS file in the same directory. + FIND_PROGRAM(AUTOCONF autoconf PATHS /usr/bin /usr/local/bin) -IF(${AUTOCONF} MATCHES AUTOCONF-NOTFOUND) +IF (${AUTOCONF} MATCHES AUTOCONF-NOTFOUND) MESSAGE(FATAL_ERROR "not find autoconf on localhost") -ENDIF() +ENDIF () #set(CLANG_SEARCH_PATH "/usr/local/bin" "/usr/bin" "/usr/local/opt/llvm/bin" # "/usr/local/opt/llvm@12/bin") FIND_PROGRAM(CLANG_FORMAT_BIN NAMES clang-format HINTS ${CLANG_SEARCH_PATH}) -IF("${CLANG_FORMAT_BIN}" STREQUAL "CLANG_FORMAT_BIN-NOTFOUND") +IF ("${CLANG_FORMAT_BIN}" STREQUAL "CLANG_FORMAT_BIN-NOTFOUND") MESSAGE(WARNING "couldn't find clang-format.") -ELSE() +ELSE () MESSAGE(STATUS "found clang-format at ${CLANG_FORMAT_BIN}") -ENDIF() +ENDIF () FIND_PROGRAM(CLANG_TIDY_BIN NAMES clang-tidy clang-tidy-12 HINTS ${CLANG_SEARCH_PATH}) -IF("${CLANG_TIDY_BIN}" STREQUAL "CLANG_TIDY_BIN-NOTFOUND") +IF ("${CLANG_TIDY_BIN}" STREQUAL "CLANG_TIDY_BIN-NOTFOUND") MESSAGE(WARNING "couldn't find clang-tidy.") -ELSE() +ELSE () MESSAGE(STATUS "found clang-tidy at ${CLANG_TIDY_BIN}") -ENDIF() +ENDIF () FIND_PROGRAM(CPPLINT_BIN NAMES cpplint cpplint.py HINTS "${BUILD_SUPPORT_DIR}") -IF("${CPPLINT_BIN}" STREQUAL "CPPLINT_BIN-NOTFOUND") +IF ("${CPPLINT_BIN}" STREQUAL "CPPLINT_BIN-NOTFOUND") MESSAGE(WARNING "couldn't find cpplint.py") -ELSE() +ELSE () MESSAGE(STATUS "found cpplint at ${CPPLINT_BIN}") -ENDIF() +ENDIF () FIND_PROGRAM(CLANG_APPLY_REPLACEMENTS_BIN NAMES clang-apply-replacements clang-apply-replacements-12 HINTS ${CLANG_SEARCH_PATH}) -IF("${CLANG_APPLY_REPLACEMENTS_BIN}" STREQUAL "CLANG_APPLY_REPLACEMENTS_BIN-NOTFOUND") +IF ("${CLANG_APPLY_REPLACEMENTS_BIN}" STREQUAL "CLANG_APPLY_REPLACEMENTS_BIN-NOTFOUND") MESSAGE(WARNING "couldn't find clang-apply-replacements.") -ELSE() +ELSE () MESSAGE(STATUS "found clang-apply-replacements at ${CLANG_APPLY_REPLACEMENTS_BIN}") -ENDIF() +ENDIF () OPTION(WITH_COMMAND_DOCS "build with command docs support" OFF) -IF(WITH_COMMAND_DOCS) +IF (WITH_COMMAND_DOCS) ADD_DEFINITIONS(-DWITH_COMMAND_DOCS) -ENDIF() +ENDIF () -IF(${CMAKE_BUILD_TYPE} MATCHES "RELEASE") +IF (${CMAKE_BUILD_TYPE} MATCHES "RELEASE") MESSAGE(STATUS "make RELEASE version") ADD_DEFINITIONS(-DBUILD_RELEASE) SET(BuildType "Release") -ELSE() +ELSE () MESSAGE(STATUS "make DEBUG version") ADD_DEFINITIONS(-DBUILD_DEBUG) SET(BuildType "Debug") -ENDIF() +ENDIF () diff --git a/cmake/fmt.cmake b/cmake/fmt.cmake index 2e2ca4f73..52bcad694 100644 --- a/cmake/fmt.cmake +++ b/cmake/fmt.cmake @@ -3,9 +3,9 @@ # LICENSE file in the root directory of this source tree. An additional grant # of patent rights can be found in the PATENTS file in the same directory. -include_guard() +INCLUDE_GUARD() -include(cmake/utils.cmake) +INCLUDE(cmake/utils.cmake) FetchContent_DeclareGitHubWithMirror(fmt fmtlib/fmt 10.1.1 diff --git a/cmake/folly.cmake b/cmake/folly.cmake index 8185434eb..7f8f8b7dd 100644 --- a/cmake/folly.cmake +++ b/cmake/folly.cmake @@ -3,33 +3,33 @@ # LICENSE file in the root directory of this source tree. An additional grant # of patent rights can be found in the PATENTS file in the same directory. -set(DEPS_FETCH_PROXY "" CACHE STRING +SET(DEPS_FETCH_PROXY "" CACHE STRING "a template URL to proxy the traffic for fetching dependencies, e.g. with DEPS_FETCH_PROXY = https://some-proxy/, https://example/some-dep.zip -> https://some-proxy/https://example/some-dep.zip") -cmake_host_system_information(RESULT CPU_CORE QUERY NUMBER_OF_LOGICAL_CORES) +CMAKE_HOST_SYSTEM_INFORMATION(RESULT CPU_CORE QUERY NUMBER_OF_LOGICAL_CORES) -if(CMAKE_GENERATOR STREQUAL "Ninja") - set(MAKE_COMMAND make -j${CPU_CORE}) -else() - set(MAKE_COMMAND $(MAKE) -j${CPU_CORE}) -endif() +IF (CMAKE_GENERATOR STREQUAL "Ninja") + SET(MAKE_COMMAND make -j${CPU_CORE}) +ELSE () + SET(MAKE_COMMAND $(MAKE) -j${CPU_CORE}) +ENDIF () -if (CMAKE_VERSION VERSION_GREATER_EQUAL "3.24.0") - cmake_policy(SET CMP0135 NEW) -endif() +IF (CMAKE_VERSION VERSION_GREATER_EQUAL "3.24.0") + CMAKE_POLICY(SET CMP0135 NEW) +ENDIF () -include(cmake/utils.cmake) -include(cmake/boost.cmake) -include(cmake/unwind.cmake) -include(cmake/gflags.cmake) -include(cmake/glog.cmake) -include(cmake/double-conversion.cmake) -include(cmake/fmt.cmake) +INCLUDE(cmake/utils.cmake) +INCLUDE(cmake/boost.cmake) +INCLUDE(cmake/unwind.cmake) +INCLUDE(cmake/gflags.cmake) +INCLUDE(cmake/glog.cmake) +INCLUDE(cmake/double-conversion.cmake) +INCLUDE(cmake/fmt.cmake) -add_compile_definitions(FOLLY_NO_CONFIG) +ADD_COMPILE_DEFINITIONS(FOLLY_NO_CONFIG) -FetchContent_Declare(pikiwidb-folly +FETCHCONTENT_DECLARE(pikiwidb-folly URL https://github.com/pikiwidb/folly/archive/v2023.10.16.00.zip URL_HASH SHA256=EB29DC13474E3979A0680F624FF5820FA7A4E9CE0110607669AE87D69CFC104D PATCH_COMMAND patch -p1 -s -E -i ${PROJECT_SOURCE_DIR}/cmake/patches/folly_coroutine.patch @@ -37,5 +37,5 @@ FetchContent_Declare(pikiwidb-folly FetchContent_MakeAvailableWithArgs(pikiwidb-folly) -target_link_libraries(pikiwidb-folly pikiwidb-boost glog double-conversion fmt) -target_include_directories(pikiwidb-folly PUBLIC $) +TARGET_LINK_LIBRARIES(pikiwidb-folly pikiwidb-boost glog double-conversion fmt) +TARGET_INCLUDE_DIRECTORIES(pikiwidb-folly PUBLIC $) diff --git a/cmake/gflags.cmake b/cmake/gflags.cmake index 410b1b067..04f317de9 100644 --- a/cmake/gflags.cmake +++ b/cmake/gflags.cmake @@ -3,9 +3,12 @@ # LICENSE file in the root directory of this source tree. An additional grant # of patent rights can be found in the PATENTS file in the same directory. -include_guard() +INCLUDE_GUARD() -include(cmake/utils.cmake) +INCLUDE(cmake/utils.cmake) + +SET(MY_BUILD_TYPE ${CMAKE_BUILD_TYPE}) +SET(CMAKE_BUILD_TYPE ${THIRD_PARTY_BUILD_TYPE}) FetchContent_DeclareGitHubWithMirror(gflags gflags/gflags v2.2.2 @@ -21,6 +24,12 @@ FetchContent_MakeAvailableWithArgs(gflags BUILD_TESTING=OFF ) -find_package(Threads REQUIRED) +FIND_PACKAGE(Threads REQUIRED) + +TARGET_LINK_LIBRARIES(gflags_static Threads::Threads) + +SET(GFLAGS_INCLUDE_PATH ${CMAKE_CURRENT_BINARY_DIR}/_deps/gflags-build/include) +SET(GFLAGS_LIBRARY ${CMAKE_CURRENT_BINARY_DIR}/_deps/gflags-build/libgflags.a) +SET(GFLAGS_LIB ${CMAKE_CURRENT_BINARY_DIR}/_deps/gflags-build/libgflags.a) -target_link_libraries(gflags_static Threads::Threads) +SET(CMAKE_BUILD_TYPE ${MY_BUILD_TYPE}) \ No newline at end of file diff --git a/cmake/glog.cmake b/cmake/glog.cmake index fbbde6fe9..4113ba48b 100644 --- a/cmake/glog.cmake +++ b/cmake/glog.cmake @@ -3,11 +3,11 @@ # LICENSE file in the root directory of this source tree. An additional grant # of patent rights can be found in the PATENTS file in the same directory. -include_guard() +INCLUDE_GUARD() -include(cmake/utils.cmake) +INCLUDE(cmake/utils.cmake) -FetchContent_Declare(glog +FETCHCONTENT_DECLARE(glog URL https://github.com/google/glog/archive/v0.6.0.zip URL_HASH SHA256=122fb6b712808ef43fbf80f75c52a21c9760683dae470154f02bddfc61135022 PATCH_COMMAND patch -p1 -s -E -i ${PROJECT_SOURCE_DIR}/cmake/patches/glog_demangle.patch @@ -20,3 +20,10 @@ FetchContent_MakeAvailableWithArgs(glog BUILD_SHARED_LIBS=OFF WITH_UNWIND=ON ) + +SET(GLOG_INCLUDE_PATH ${CMAKE_CURRENT_BINARY_DIR}/_deps/glog-src/src CACHE BOOL "" FORCE) +IF (CMAKE_BUILD_TYPE STREQUAL "Release") + SET(GLOG_LIB ${CMAKE_CURRENT_BINARY_DIR}/_deps/glog-build/libglog.a CACHE BOOL "" FORCE) +ELSEIF (CMAKE_BUILD_TYPE STREQUAL "Debug") + SET(GLOG_LIB ${CMAKE_CURRENT_BINARY_DIR}/_deps/glog-build/libglogd.a CACHE BOOL "" FORCE) +ENDIF() diff --git a/cmake/gtest.cmake b/cmake/gtest.cmake index 6c6dfbcca..0c6140910 100644 --- a/cmake/gtest.cmake +++ b/cmake/gtest.cmake @@ -8,5 +8,6 @@ FETCHCONTENT_DECLARE( GIT_REPOSITORY https://github.com/google/googletest.git GIT_TAG v1.14.0 ) -set(gtest_force_shared_crt ON CACHE BOOL "" FORCE) + +SET(gtest_force_shared_crt ON CACHE BOOL "" FORCE) FETCHCONTENT_MAKEAVAILABLE(gtest) diff --git a/cmake/leveldb.cmake b/cmake/leveldb.cmake index 8ad9c008e..17a3cf034 100644 --- a/cmake/leveldb.cmake +++ b/cmake/leveldb.cmake @@ -1,3 +1,10 @@ +# Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. An additional grant +# of patent rights can be found in the PATENTS file in the same directory. + +INCLUDE_GUARD() + FETCHCONTENT_DECLARE( leveldb GIT_REPOSITORY https://github.com/google/leveldb.git @@ -7,3 +14,6 @@ SET(LEVELDB_BUILD_TESTS OFF CACHE BOOL "" FORCE) SET(LEVELDB_BUILD_BENCHMARKS OFF CACHE BOOL "" FORCE) SET(LEVELDB_INSTALL OFF CACHE BOOL "" FORCE) FETCHCONTENT_MAKEAVAILABLE(leveldb) + +SET(LEVELDB_INCLUDE_PATH ${CMAKE_CURRENT_BINARY_DIR}/_deps/leveldb-src/include) +SET(LEVELDB_LIB ${CMAKE_CURRENT_BINARY_DIR}/_deps/leveldb-build/libleveldb.a) \ No newline at end of file diff --git a/cmake/libevent.cmake b/cmake/libevent.cmake index 0d7220e71..beaf76a62 100644 --- a/cmake/libevent.cmake +++ b/cmake/libevent.cmake @@ -1,3 +1,7 @@ +# Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. An additional grant +# of patent rights can be found in the PATENTS file in the same directory. # libevent FETCHCONTENT_DECLARE( diff --git a/cmake/llhttp.cmake b/cmake/llhttp.cmake index b92290e38..a2a455625 100644 --- a/cmake/llhttp.cmake +++ b/cmake/llhttp.cmake @@ -1,3 +1,8 @@ +# Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. An additional grant +# of patent rights can be found in the PATENTS file in the same directory. + # nodejs/llhttp FETCHCONTENT_DECLARE( llhttp @@ -16,4 +21,5 @@ FETCHCONTENT_DECLARE( -DBUILD_SHARED_LIBS=OFF BUILD_COMMAND make -j${CPU_CORE} ) + FETCHCONTENT_MAKEAVAILABLE(llhttp) \ No newline at end of file diff --git a/cmake/modules/glog/FindUnwind.cmake b/cmake/modules/glog/FindUnwind.cmake index 34a6aa33b..80ad9c562 100644 --- a/cmake/modules/glog/FindUnwind.cmake +++ b/cmake/modules/glog/FindUnwind.cmake @@ -3,9 +3,9 @@ # LICENSE file in the root directory of this source tree. An additional grant # of patent rights can be found in the PATENTS file in the same directory. -if(unwind_SOURCE_DIR) - message(STATUS "Found unwind in ${unwind_SOURCE_DIR}") +IF (unwind_SOURCE_DIR) + MESSAGE(STATUS "Found unwind in ${unwind_SOURCE_DIR}") - add_library(unwind::unwind ALIAS unwind) - install(TARGETS unwind EXPORT glog-targets) -endif() + ADD_LIBRARY(unwind::unwind ALIAS unwind) + INSTALL(TARGETS unwind EXPORT glog-targets) +ENDIF() diff --git a/cmake/modules/glog/Findgflags.cmake b/cmake/modules/glog/Findgflags.cmake index ffcec18c6..97701ee84 100644 --- a/cmake/modules/glog/Findgflags.cmake +++ b/cmake/modules/glog/Findgflags.cmake @@ -3,8 +3,9 @@ # LICENSE file in the root directory of this source tree. An additional grant # of patent rights can be found in the PATENTS file in the same directory. -if(gflags_SOURCE_DIR) - message(STATUS "Found gflags in ${gflags_SOURCE_DIR}") +IF (gflags_SOURCE_DIR) + MESSAGE(STATUS "Found gflags in ${gflags_SOURCE_DIR}") - install(TARGETS gflags_static EXPORT glog-targets) -endif() + # add_library(gflags_static::gflags_static ALIAS gflags_static) + INSTALL(TARGETS gflags_static EXPORT glog-targets) +ENDIF () diff --git a/cmake/modules/spdlog/fmtConfig.cmake b/cmake/modules/spdlog/fmtConfig.cmake index e7342c31e..22ef09f58 100644 --- a/cmake/modules/spdlog/fmtConfig.cmake +++ b/cmake/modules/spdlog/fmtConfig.cmake @@ -3,8 +3,8 @@ # LICENSE file in the root directory of this source tree. An additional grant # of patent rights can be found in the PATENTS file in the same directory. -if(fmt_SOURCE_DIR) - message(STATUS "Found fmt in ${fmt_SOURCE_DIR}") +IF (fmt_SOURCE_DIR) + MESSAGE(STATUS "Found fmt in ${fmt_SOURCE_DIR}") - add_library(fmt::fmt ALIAS fmt) -endif() + ADD_LIBRARY(fmt::fmt ALIAS fmt) +ENDIF() diff --git a/cmake/openssl.cmake b/cmake/openssl.cmake new file mode 100644 index 000000000..127c1aa70 --- /dev/null +++ b/cmake/openssl.cmake @@ -0,0 +1,26 @@ +# Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. An additional grant +# of patent rights can be found in the PATENTS file in the same directory. + +SET(OPENSSL_FETCH_INFO + URL https://www.openssl.org/source/openssl-1.1.1h.tar.gz + URL_HASH SHA256=5c9ca8774bd7b03e5784f26ae9e9e6d749c9da2438545077e6b3d755a06595d9 + ) +SET(OPENSSL_USE_STATIC_LIBS ON) + +FETCHCONTENT_DECLARE( + openssl + GIT_REPOSITORY https://github.com/jc-lab/openssl-cmake.git + GIT_TAG 39af37e0964d71c516da5b1836849dd0a03df7a4 # Change to the latest commit ID +) + +FETCHCONTENT_GETPROPERTIES(openssl) +IF (NOT openssl_POPULATED) + FETCHCONTENT_POPULATE(openssl) + ADD_SUBDIRECTORY(${openssl_SOURCE_DIR} ${openssl_BINARY_DIR}) +ENDIF () + +SET(OPENSSL_INCLUDE_DIR ${CMAKE_CURRENT_BINARY_DIR}/_deps/openssl_src-src/include) +SET(OPENSSL_ROOT_DIR ${CMAKE_CURRENT_BINARY_DIR}/_deps/openssl_src-src) +SET(OPENSSL_CRYPTO_LIBRARY ${CMAKE_CURRENT_BINARY_DIR}/_deps/openssl_src-src) \ No newline at end of file diff --git a/cmake/protobuf.cmake b/cmake/protobuf.cmake new file mode 100644 index 000000000..67ffd150e --- /dev/null +++ b/cmake/protobuf.cmake @@ -0,0 +1,167 @@ +# Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. An additional grant +# of patent rights can be found in the PATENTS file in the same directory. + +INCLUDE(ExternalProject) +# Always invoke `FIND_PACKAGE(Protobuf)` for importing function protobuf_generate_cpp +FIND_PACKAGE(Protobuf QUIET) +MACRO(UNSET_VAR VAR_NAME) + UNSET(${VAR_NAME} CACHE) + UNSET(${VAR_NAME}) +ENDMACRO() + +UNSET_VAR(PROTOBUF_INCLUDE_DIR) +UNSET_VAR(PROTOBUF_FOUND) +UNSET_VAR(PROTOBUF_PROTOC_EXECUTABLE) +UNSET_VAR(PROTOBUF_PROTOC_LIBRARY) +UNSET_VAR(PROTOBUF_LITE_LIBRARY) +UNSET_VAR(PROTOBUF_LIBRARY) +UNSET_VAR(PROTOBUF_INCLUDE_DIR) +UNSET_VAR(Protobuf_PROTOC_EXECUTABLE) + +# Print and set the protobuf library information, +# finish this cmake process and exit from this file. +MACRO(PROMPT_PROTOBUF_LIB) + SET(protobuf_DEPS ${ARGN}) + + MESSAGE(STATUS "Protobuf protoc executable: ${PROTOBUF_PROTOC_EXECUTABLE}") + MESSAGE(STATUS "Protobuf-lite library: ${PROTOBUF_LITE_LIBRARY}") + MESSAGE(STATUS "Protobuf library: ${PROTOBUF_LIBRARY}") + MESSAGE(STATUS "Protoc library: ${PROTOBUF_PROTOC_LIBRARY}") + MESSAGE(STATUS "Protobuf version: ${PROTOBUF_VERSION}") + INCLUDE_DIRECTORIES(${PROTOBUF_INCLUDE_DIR}) + + # Assuming that all the protobuf libraries are of the same type. + IF (${PROTOBUF_LIBRARY} MATCHES ${CMAKE_STATIC_LIBRARY_SUFFIX}) + SET(protobuf_LIBTYPE STATIC) + ELSEIF (${PROTOBUF_LIBRARY} MATCHES "${CMAKE_SHARED_LIBRARY_SUFFIX}$") + SET(protobuf_LIBTYPE SHARED) + ELSE () + MESSAGE(FATAL_ERROR "Unknown library type: ${PROTOBUF_LIBRARY}") + ENDIF () + + ADD_LIBRARY(protobuf ${protobuf_LIBTYPE} IMPORTED GLOBAL) + SET_PROPERTY(TARGET protobuf PROPERTY IMPORTED_LOCATION ${PROTOBUF_LIBRARY}) + + ADD_LIBRARY(protobuf_lite ${protobuf_LIBTYPE} IMPORTED GLOBAL) + SET_PROPERTY(TARGET protobuf_lite PROPERTY IMPORTED_LOCATION ${PROTOBUF_LITE_LIBRARY}) + + ADD_LIBRARY(libprotoc ${protobuf_LIBTYPE} IMPORTED GLOBAL) + SET_PROPERTY(TARGET libprotoc PROPERTY IMPORTED_LOCATION ${PROTOC_LIBRARY}) + + ADD_EXECUTABLE(protoc IMPORTED GLOBAL) + SET_PROPERTY(TARGET protoc PROPERTY IMPORTED_LOCATION ${PROTOBUF_PROTOC_EXECUTABLE}) + SET(Protobuf_PROTOC_EXECUTABLE ${PROTOBUF_PROTOC_EXECUTABLE}) + + FOREACH (dep ${protobuf_DEPS}) + ADD_DEPENDENCIES(protobuf ${dep}) + ADD_DEPENDENCIES(protobuf_lite ${dep}) + ADD_DEPENDENCIES(libprotoc ${dep}) + ADD_DEPENDENCIES(protoc ${dep}) + ENDFOREACH () + + RETURN() +ENDMACRO() + +MACRO(SET_PROTOBUF_VERSION) + EXEC_PROGRAM(${PROTOBUF_PROTOC_EXECUTABLE} ARGS --version OUTPUT_VARIABLE PROTOBUF_VERSION) + STRING(REGEX MATCH "[0-9]+.[0-9]+" PROTOBUF_VERSION "${PROTOBUF_VERSION}") +ENDMACRO() + +SET(PROTOBUF_ROOT "" CACHE PATH "Folder contains protobuf") + +IF (NOT "${PROTOBUF_ROOT}" STREQUAL "") + MESSAGE("found system protobuf") + + FIND_PATH(PROTOBUF_INCLUDE_DIR google/protobuf/message.h PATHS ${PROTOBUF_ROOT}/include NO_DEFAULT_PATH) + FIND_LIBRARY(PROTOBUF_LIBRARY protobuf libprotobuf.lib PATHS ${PROTOBUF_ROOT}/lib NO_DEFAULT_PATH) + FIND_LIBRARY(PROTOBUF_LITE_LIBRARY protobuf-lite libprotobuf-lite.lib PATHS ${PROTOBUF_ROOT}/lib NO_DEFAULT_PATH) + FIND_LIBRARY(PROTOBUF_PROTOC_LIBRARY protoc libprotoc.lib PATHS ${PROTOBUF_ROOT}/lib NO_DEFAULT_PATH) + FIND_PROGRAM(PROTOBUF_PROTOC_EXECUTABLE protoc PATHS ${PROTOBUF_ROOT}/bin NO_DEFAULT_PATH) + IF (PROTOBUF_INCLUDE_DIR AND PROTOBUF_LIBRARY AND PROTOBUF_LITE_LIBRARY AND PROTOBUF_PROTOC_LIBRARY AND PROTOBUF_PROTOC_EXECUTABLE) + MESSAGE(STATUS "Using custom protobuf library in ${PROTOBUF_ROOT}.") + SET(PROTOBUF_FOUND TRUE) + SET_PROTOBUF_VERSION() + PROMPT_PROTOBUF_LIB() + ELSE () + MESSAGE(WARNING "Cannot find protobuf library in ${PROTOBUF_ROOT}") + ENDIF () +ENDIF () + +FUNCTION(build_protobuf TARGET_NAME) + STRING(REPLACE "extern_" "" TARGET_DIR_NAME "${TARGET_NAME}") + SET(PROTOBUF_SOURCES_DIR ${THIRD_PARTY_PATH}/${TARGET_DIR_NAME}) + SET(PROTOBUF_INSTALL_DIR ${THIRD_PARTY_PATH}/install/${TARGET_DIR_NAME}) + + SET(${TARGET_NAME}_INCLUDE_DIR "${PROTOBUF_INSTALL_DIR}/include" PARENT_SCOPE) + SET(PROTOBUF_INCLUDE_DIR "${PROTOBUF_INSTALL_DIR}/include" PARENT_SCOPE) + SET(${TARGET_NAME}_LITE_LIBRARY + "${PROTOBUF_INSTALL_DIR}/lib/libprotobuf-lite${CMAKE_STATIC_LIBRARY_SUFFIX}" + PARENT_SCOPE) + SET(${TARGET_NAME}_LIBRARY + "${PROTOBUF_INSTALL_DIR}/lib/libprotobuf${CMAKE_STATIC_LIBRARY_SUFFIX}" + PARENT_SCOPE) + SET(${TARGET_NAME}_PROTOC_LIBRARY + "${PROTOBUF_INSTALL_DIR}/lib/libprotoc${CMAKE_STATIC_LIBRARY_SUFFIX}" + PARENT_SCOPE) + SET(${TARGET_NAME}_PROTOC_EXECUTABLE + "${PROTOBUF_INSTALL_DIR}/bin/protoc${CMAKE_EXECUTABLE_SUFFIX}" + PARENT_SCOPE) + + set(prefix_path "${THIRD_PARTY_PATH}/install/zlib") + + # Make sure zlib's two headers are in your /Path/to/install/include path, + # and delete libz.so which we don't need + IF (CMAKE_SYSTEM_NAME MATCHES "Darwin") + FILE(WRITE ${PROTOBUF_SOURCES_DIR}/src/config.sh + "rm -f ${THIRD_PARTY_PATH}/install/zlib/lib/*.dylib && mkdir -p ${THIRD_PARTY_PATH}/install/protobuf/include && cp ${THIRD_PARTY_PATH}/install/zlib/include/* ${THIRD_PARTY_PATH}/install/protobuf/include/ && ${CMAKE_COMMAND} ${PROTOBUF_SOURCES_DIR}/src/${TARGET_NAME}/cmake -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} -DCMAKE_C_FLAGS='${CMAKE_C_FLAGS}' -DCMAKE_C_FLAGS_DEBUG=${CMAKE_C_FLAGS_DEBUG} -DCMAKE_C_FLAGS_RELEASE='${CMAKE_C_FLAGS_RELEASE}' -DCMAKE_CXX_FLAGS='${CMAKE_CXX_FLAGS}' -DCMAKE_CXX_FLAGS_RELEASE='${CMAKE_CXX_FLAGS_RELEASE}' -DCMAKE_CXX_FLAGS_DEBUG='${CMAKE_CXX_FLAGS_DEBUG}' -DCMAKE_POSITION_INDEPENDENT_CODE=ON -DCMAKE_SKIP_RPATH=ON -Dprotobuf_WITH_ZLIB=ON -DZLIB_INCLUDE_DIR=${THIRD_PARTY_PATH}/install/zlib/include -Dprotobuf_BUILD_TESTS=OFF -Dprotobuf_BUILD_SHARED_LIBS=OFF -DCMAKE_INSTALL_PREFIX=${PROTOBUF_INSTALL_DIR} -DCMAKE_INSTALL_LIBDIR=lib -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE='${THIRD_PARTY_BUILD_TYPE}' -DCMAKE_PREFIX_PATH=${prefix_path} ${EXTERNAL_OPTIONAL_ARGS}" + ) + ELSEIF (CMAKE_SYSTEM_NAME MATCHES "Linux") + FILE(WRITE ${PROTOBUF_SOURCES_DIR}/src/config.sh + "rm ${THIRD_PARTY_PATH}/install/zlib/lib/libz.so* -f && mkdir -p ${THIRD_PARTY_PATH}/install/protobuf/include && cp ${THIRD_PARTY_PATH}/install/zlib/include/* ${THIRD_PARTY_PATH}/install/protobuf/include/ && ${CMAKE_COMMAND} ${PROTOBUF_SOURCES_DIR}/src/${TARGET_NAME}/cmake -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} -DCMAKE_C_FLAGS='${CMAKE_C_FLAGS}' -DCMAKE_C_FLAGS_DEBUG=${CMAKE_C_FLAGS_DEBUG} -DCMAKE_C_FLAGS_RELEASE='${CMAKE_C_FLAGS_RELEASE}' -DCMAKE_CXX_FLAGS='${CMAKE_CXX_FLAGS}' -DCMAKE_CXX_FLAGS_RELEASE='${CMAKE_CXX_FLAGS_RELEASE}' -DCMAKE_CXX_FLAGS_DEBUG='${CMAKE_CXX_FLAGS_DEBUG}' -DCMAKE_POSITION_INDEPENDENT_CODE=ON -DCMAKE_SKIP_RPATH=ON -Dprotobuf_WITH_ZLIB=ON -DZLIB_INCLUDE_DIR=${THIRD_PARTY_PATH}/install/zlib/include -Dprotobuf_BUILD_TESTS=OFF -Dprotobuf_BUILD_SHARED_LIBS=OFF -DCMAKE_INSTALL_PREFIX=${PROTOBUF_INSTALL_DIR} -DCMAKE_INSTALL_LIBDIR=lib -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE='${THIRD_PARTY_BUILD_TYPE}' -DCMAKE_PREFIX_PATH=${prefix_path} ${EXTERNAL_OPTIONAL_ARGS}" + ) + ELSE () + MESSAGE(FATAL_ERROR "only support linux or macOS") + ENDIF () + + ExternalProject_Add( + ${TARGET_NAME} + ${EXTERNAL_PROJECT_LOG_ARGS} + PREFIX ${PROTOBUF_SOURCES_DIR} + UPDATE_COMMAND "" + DEPENDS zlib + URL "https://github.com/protocolbuffers/protobuf/archive/v3.18.0.tar.gz" + CONFIGURE_COMMAND mv ../config.sh . COMMAND sh config.sh + CMAKE_CACHE_ARGS + -DCMAKE_INSTALL_PREFIX:PATH=${PROTOBUF_INSTALL_DIR} + -DCMAKE_BUILD_TYPE:STRING=${THIRD_PARTY_BUILD_TYPE} + -DCMAKE_VERBOSE_MAKEFILE:BOOL=OFF + -DCMAKE_POSITION_INDEPENDENT_CODE:BOOL=ON + ${OPTIONAL_CACHE_ARGS} + ) + +ENDFUNCTION() + +SET(PROTOBUF_VERSION 3.18.0) + +IF (NOT PROTOBUF_FOUND) + MESSAGE("build protobuf") + + build_protobuf(extern_protobuf) + + SET(PROTOBUF_INCLUDE_DIR ${extern_protobuf_INCLUDE_DIR} + CACHE PATH "protobuf include directory." FORCE) + SET(PROTOBUF_LITE_LIBRARY ${extern_protobuf_LITE_LIBRARY} + CACHE FILEPATH "protobuf lite library." FORCE) + SET(PROTOBUF_LIBRARY ${extern_protobuf_LIBRARY} + CACHE FILEPATH "protobuf library." FORCE) + SET(PROTOBUF_LIBRARIES ${extern_protobuf_LIBRARY} + CACHE FILEPATH "protobuf library." FORCE) + SET(PROTOBUF_PROTOC_LIBRARY ${extern_protobuf_PROTOC_LIBRARY} + CACHE FILEPATH "protoc library." FORCE) + + SET(PROTOBUF_PROTOC_EXECUTABLE ${extern_protobuf_PROTOC_EXECUTABLE} + CACHE FILEPATH "protobuf executable." FORCE) + PROMPT_PROTOBUF_LIB(extern_protobuf zlib) +ENDIF (NOT PROTOBUF_FOUND) diff --git a/cmake/rocksdb.cmake b/cmake/rocksdb.cmake index 14a0b6a42..9ddb4dfab 100644 --- a/cmake/rocksdb.cmake +++ b/cmake/rocksdb.cmake @@ -3,9 +3,9 @@ # LICENSE file in the root directory of this source tree. An additional grant # of patent rights can be found in the PATENTS file in the same directory. -include_guard() +INCLUDE_GUARD() -FetchContent_Declare( +FETCHCONTENT_DECLARE( rocksdb GIT_REPOSITORY https://github.com/facebook/rocksdb.git GIT_TAG v8.3.3 diff --git a/cmake/spdlog.cmake b/cmake/spdlog.cmake index c7e701428..3697830f0 100644 --- a/cmake/spdlog.cmake +++ b/cmake/spdlog.cmake @@ -3,9 +3,9 @@ # LICENSE file in the root directory of this source tree. An additional grant # of patent rights can be found in the PATENTS file in the same directory. -include_guard() +INCLUDE_GUARD() -include(cmake/utils.cmake) +INCLUDE(cmake/utils.cmake) FetchContent_DeclareGitHubWithMirror(spdlog gabime/spdlog v1.12.0 diff --git a/cmake/unwind.cmake b/cmake/unwind.cmake index 43f7e4f28..4ff2e342c 100644 --- a/cmake/unwind.cmake +++ b/cmake/unwind.cmake @@ -3,33 +3,33 @@ # LICENSE file in the root directory of this source tree. An additional grant # of patent rights can be found in the PATENTS file in the same directory. -include_guard() +INCLUDE_GUARD() -include(cmake/utils.cmake) +INCLUDE(cmake/utils.cmake) FetchContent_DeclareGitHubWithMirror(unwind libunwind/libunwind v1.7.2 SHA256=f39929bff6ebd4426e806f0e834e077f2dc3c16fa19dbfb8996a1c93b3caf8cb ) -FetchContent_GetProperties(unwind) -if(NOT unwind_POPULATED) - FetchContent_Populate(unwind) - - execute_process(COMMAND autoreconf -i +FETCHCONTENT_GETPROPERTIES(unwind) +IF (NOT unwind_POPULATED) + FETCHCONTENT_POPULATE(unwind) + + EXECUTE_PROCESS(COMMAND autoreconf -i WORKING_DIRECTORY ${unwind_SOURCE_DIR} ) - execute_process(COMMAND ${unwind_SOURCE_DIR}/configure CC=${CMAKE_C_COMPILER} -C --enable-static=yes --enable-shared=no --enable-minidebuginfo=no --enable-zlibdebuginfo=no --disable-documentation --disable-tests + EXECUTE_PROCESS(COMMAND ${unwind_SOURCE_DIR}/configure CC=${CMAKE_C_COMPILER} -C --enable-static=yes --enable-shared=no --enable-minidebuginfo=no --enable-zlibdebuginfo=no --disable-documentation --disable-tests WORKING_DIRECTORY ${unwind_BINARY_DIR} - ) - add_custom_target(make_unwind + ) + ADD_CUSTOM_TARGET(make_unwind COMMAND ${MAKE_COMMAND} WORKING_DIRECTORY ${unwind_BINARY_DIR} BYPRODUCTS ${unwind_BINARY_DIR}/src/.libs/libunwind.a ) -endif() +ENDIF () -add_library(unwind INTERFACE) -target_include_directories(unwind INTERFACE $ $) -target_link_libraries(unwind INTERFACE $) -add_dependencies(unwind make_unwind) +ADD_LIBRARY(unwind INTERFACE) +TARGET_INCLUDE_DIRECTORIES(unwind INTERFACE $ $) +TARGET_LINK_LIBRARIES(unwind INTERFACE $) +ADD_DEPENDENCIES(unwind make_unwind) diff --git a/cmake/utils.cmake b/cmake/utils.cmake index 79502e0d2..2241392d0 100644 --- a/cmake/utils.cmake +++ b/cmake/utils.cmake @@ -3,45 +3,45 @@ # LICENSE file in the root directory of this source tree. An additional grant # of patent rights can be found in the PATENTS file in the same directory. -include_guard() +INCLUDE_GUARD() -include(FetchContent) +INCLUDE(FetchContent) -macro(parse_var arg key value) - string(REGEX REPLACE "^(.+)=(.+)$" "\\1;\\2" REGEX_RESULT ${arg}) - list(GET REGEX_RESULT 0 ${key}) - list(GET REGEX_RESULT 1 ${value}) -endmacro() +MACRO(parse_var arg key value) + STRING(REGEX REPLACE "^(.+)=(.+)$" "\\1;\\2" REGEX_RESULT ${arg}) + LIST(GET REGEX_RESULT 0 ${key}) + LIST(GET REGEX_RESULT 1 ${value}) +ENDMACRO() -function(FetchContent_MakeAvailableWithArgs dep) - if(NOT ${dep}_POPULATED) - FetchContent_Populate(${dep}) +FUNCTION(FetchContent_MakeAvailableWithArgs dep) + IF (NOT ${dep}_POPULATED) + FETCHCONTENT_POPULATE(${dep}) - foreach(arg IN LISTS ARGN) + FOREACH(arg IN LISTS ARGN) parse_var(${arg} key value) - set(${key}_OLD ${${key}}) - set(${key} ${value} CACHE INTERNAL "") - endforeach() + SET(${key}_OLD ${${key}}) + SET(${key} ${value} CACHE INTERNAL "") + ENDFOREACH() - add_subdirectory(${${dep}_SOURCE_DIR} ${${dep}_BINARY_DIR} EXCLUDE_FROM_ALL) + ADD_SUBDIRECTORY(${${dep}_SOURCE_DIR} ${${dep}_BINARY_DIR}) - foreach(arg IN LISTS ARGN) + FOREACH(arg IN LISTS ARGN) parse_var(${arg} key value) - set(${key} ${${key}_OLD} CACHE INTERNAL "") - endforeach() - endif() -endfunction() + SET(${key} ${${key}_OLD} CACHE INTERNAL "") + ENDFOREACH() + ENDIF () +ENDFUNCTION() -function(FetchContent_DeclareWithMirror dep url hash) +FUNCTION(FetchContent_DeclareWithMirror dep url hash) FetchContent_Declare(${dep} URL ${DEPS_FETCH_PROXY}${url} URL_HASH ${hash} ) -endfunction() +ENDFUNCTION() -function(FetchContent_DeclareGitHubWithMirror dep repo tag hash) +FUNCTION(FetchContent_DeclareGitHubWithMirror dep repo tag hash) FetchContent_DeclareWithMirror(${dep} https://github.com/${repo}/archive/${tag}.zip ${hash} ) -endfunction() +ENDFUNCTION() diff --git a/cmake/zlib.cmake b/cmake/zlib.cmake new file mode 100644 index 000000000..7e5963a1c --- /dev/null +++ b/cmake/zlib.cmake @@ -0,0 +1,41 @@ +# Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. An additional grant +# of patent rights can be found in the PATENTS file in the same directory. + +INCLUDE(ExternalProject) + +SET(ZLIB_SOURCES_DIR ${THIRD_PARTY_PATH}/zlib) +SET(ZLIB_INSTALL_DIR ${THIRD_PARTY_PATH}/install/zlib) +SET(ZLIB_ROOT ${ZLIB_INSTALL_DIR} CACHE FILEPATH "zlib root directory." FORCE) +SET(ZLIB_INCLUDE_DIR "${ZLIB_INSTALL_DIR}/include" CACHE PATH "zlib include directory." FORCE) + +ExternalProject_Add( + extern_zlib + ${EXTERNAL_PROJECT_LOG_ARGS} + GIT_REPOSITORY "https://github.com/madler/zlib.git" + GIT_TAG "v1.2.8" + PREFIX ${ZLIB_SOURCES_DIR} + UPDATE_COMMAND "" + CMAKE_ARGS -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} + -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} + -DCMAKE_C_FLAGS=${CMAKE_C_FLAGS} + -DCMAKE_CXX_FLAGS=${CMAKE_CXX_FLAGS} + -DCMAKE_INSTALL_PREFIX=${ZLIB_INSTALL_DIR} + -DCMAKE_INSTALL_LIBDIR=${ZLIB_INSTALL_DIR}/lib + -DBUILD_SHARED_LIBS=OFF + -DCMAKE_POSITION_INDEPENDENT_CODE=ON + -DCMAKE_MACOSX_RPATH=ON + -DCMAKE_BUILD_TYPE=${THIRD_PARTY_BUILD_TYPE} + ${EXTERNAL_OPTIONAL_ARGS} + CMAKE_CACHE_ARGS -DCMAKE_INSTALL_PREFIX:PATH=${ZLIB_INSTALL_DIR} + -DCMAKE_INSTALL_LIBDIR:PATH=${ZLIB_INSTALL_DIR}/lib + -DCMAKE_POSITION_INDEPENDENT_CODE:BOOL=ON + -DCMAKE_BUILD_TYPE:STRING=${THIRD_PARTY_BUILD_TYPE} +) + +SET(ZLIB_LIBRARIES "${ZLIB_INSTALL_DIR}/lib/libz.a" CACHE FILEPATH "zlib library." FORCE) + +ADD_LIBRARY(zlib STATIC IMPORTED GLOBAL) +SET_PROPERTY(TARGET zlib PROPERTY IMPORTED_LOCATION ${ZLIB_LIBRARIES}) +ADD_DEPENDENCIES(zlib extern_zlib) \ No newline at end of file diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index d08915c3b..efea214ed 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -1,3 +1,7 @@ +# Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. An additional grant +# of patent rights can be found in the PATENTS file in the same directory. AUX_SOURCE_DIRECTORY(. PIKIWIDB_SRC) @@ -11,7 +15,24 @@ TARGET_INCLUDE_DIRECTORIES(pikiwidb PRIVATE ${PROJECT_SOURCE_DIR}/src/storage/include ${rocksdb_SOURCE_DIR}/ ${rocksdb_SOURCE_DIR}/include -) + PRIVATE ${BRAFT_INCLUDE_DIR} + PRIVATE ${BRPC_INCLUDE_DIR} + ) -TARGET_LINK_LIBRARIES(pikiwidb net; dl; fmt; storage; rocksdb) +IF (CMAKE_SYSTEM_NAME MATCHES "Darwin") + FIND_LIBRARY(COREFOUNDATION_LIBRARY CoreFoundation) + FIND_LIBRARY(CSSERVICES_LIBRARY CoreServices) + FIND_LIBRARY(CFN_LIBRARY CFNetwork) + FIND_LIBRARY(SCY_LIBRARY Security) + FIND_LIBRARY(COREGRAPHICS_LIBRARY CoreGraphics) + FIND_LIBRARY(CORETEXT_LIBRARY CoreText) + FIND_LIBRARY(Foundation_LIBRARY Foundation) + LIST(APPEND MAC_LIBRARY ${COREFOUNDATION_LIBRARY} ${CSSERVICES_LIBRARY} ${CFN_LIBRARY} ${SCY_LIBRARY} ${COREGRAPHICS_LIBRARY} ${CORETEXT_LIBRARY} ${Foundation_LIBRARY}) +ELSEIF (CMAKE_SYSTEM_NAME MATCHES "Linux") + SET(MAC_LIBRARY "") +ELSE () + MESSAGE(FATAL_ERROR "only support linux or macOS") +ENDIF () + +TARGET_LINK_LIBRARIES(pikiwidb net; dl; fmt; storage; rocksdb; braft brpc ssl crypto zlib protobuf leveldb gflags "${MAC_LIBRARY}") SET_TARGET_PROPERTIES(pikiwidb PROPERTIES LINKER_LANGUAGE CXX) diff --git a/src/net/CMakeLists.txt b/src/net/CMakeLists.txt index 2a9bbc76f..8aceed319 100644 --- a/src/net/CMakeLists.txt +++ b/src/net/CMakeLists.txt @@ -1,3 +1,7 @@ +# Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. An additional grant +# of patent rights can be found in the PATENTS file in the same directory. AUX_SOURCE_DIRECTORY(. NET_SRC) AUX_SOURCE_DIRECTORY(./lzf NET_SRC) diff --git a/src/pstd/CMakeLists.txt b/src/pstd/CMakeLists.txt index a67210604..002b84aa8 100644 --- a/src/pstd/CMakeLists.txt +++ b/src/pstd/CMakeLists.txt @@ -11,6 +11,7 @@ ADD_SUBDIRECTORY(tests) TARGET_INCLUDE_DIRECTORIES(pstd PRIVATE ${rocksdb_SOURCE_DIR}/include + PRIVATE ${GLOG_INCLUDE_DIR} ) TARGET_LINK_LIBRARIES(pstd; spdlog pthread) diff --git a/src/storage/CMakeLists.txt b/src/storage/CMakeLists.txt index 87f0b3ca8..babd7f5d2 100644 --- a/src/storage/CMakeLists.txt +++ b/src/storage/CMakeLists.txt @@ -1,4 +1,8 @@ -#AUX_SOURCE_DIRECTORY(./src STORAGE_SRC) +# Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. An additional grant +# of patent rights can be found in the PATENTS file in the same directory. + FILE(GLOB STORAGE_SRC "${CMAKE_CURRENT_SOURCE_DIR}/src/*.cc" "${CMAKE_CURRENT_SOURCE_DIR}/src/*.h" @@ -9,12 +13,14 @@ ADD_LIBRARY(storage ${STORAGE_SRC}) TARGET_INCLUDE_DIRECTORIES(storage PUBLIC ${CMAKE_SOURCE_DIR}/src - PUBLIC ${CMAKE_CURRENT_SOURCE_DIR} - PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/include - PRIVATE ${rocksdb_SOURCE_DIR} + PUBLIC ${CMAKE_CURRENT_SOURCE_DIR} + PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/include + PRIVATE ${rocksdb_SOURCE_DIR}/ PRIVATE ${rocksdb_SOURCE_DIR}/include - ) + PRIVATE ${BRAFT_INCLUDE_DIR} + PRIVATE ${BRPC_INCLUDE_DIR} +) -TARGET_LINK_LIBRARIES(storage pstd rocksdb) +TARGET_LINK_LIBRARIES (storage pstd braft brpc ssl crypto zlib protobuf leveldb gflags rocksdb) SET_TARGET_PROPERTIES(storage PROPERTIES LINKER_LANGUAGE CXX) diff --git a/src/storage/include/storage/storage.h b/src/storage/include/storage/storage.h index 487f5165e..479167697 100644 --- a/src/storage/include/storage/storage.h +++ b/src/storage/include/storage/storage.h @@ -25,6 +25,8 @@ #include "pstd/pstd_mutex.h" #include "storage/slot_indexer.h" +#include "braft/raft.h" + namespace storage { inline constexpr double ZSET_SCORE_MAX = std::numeric_limits::max(); diff --git a/src/storage/src/base_filter.h b/src/storage/src/base_filter.h index d2c7a629f..222fdf0ee 100644 --- a/src/storage/src/base_filter.h +++ b/src/storage/src/base_filter.h @@ -16,6 +16,8 @@ #include "src/base_meta_value_format.h" #include "src/debug.h" +#include "braft/raft.h" + namespace storage { class BaseMetaFilter : public rocksdb::CompactionFilter { diff --git a/src/storage/src/debug.h b/src/storage/src/debug.h index 882b718d1..8d2888a67 100644 --- a/src/storage/src/debug.h +++ b/src/storage/src/debug.h @@ -7,10 +7,7 @@ #ifndef NDEBUG # define TRACE(M, ...) fprintf(stderr, "[TRACE] (%s:%d) " M "\n", __FILE__, __LINE__, ##__VA_ARGS__) -# define DEBUG(M, ...) fprintf(stderr, "[Debug] (%s:%d) " M "\n", __FILE__, __LINE__, ##__VA_ARGS__) #else # define TRACE(M, ...) \ {} -# define DEBUG(M, ...) \ - {} #endif // NDEBUG diff --git a/src/store.h b/src/store.h index ae8fe14f9..e3e7b1639 100644 --- a/src/store.h +++ b/src/store.h @@ -18,6 +18,8 @@ #include #include +#include "braft/raft.h" + namespace pikiwidb { class PStore { diff --git a/tests/hash_test.go b/tests/hash_test.go index caf90762f..2bffb0231 100644 --- a/tests/hash_test.go +++ b/tests/hash_test.go @@ -12,7 +12,6 @@ import ( "log" "strconv" "time" - . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "github.com/redis/go-redis/v9" From 65e299c6563230d7ce08425276e6bc5724a1a31a Mon Sep 17 00:00:00 2001 From: k <40883262+KKorpse@users.noreply.github.com> Date: Fri, 8 Mar 2024 17:17:28 +0800 Subject: [PATCH 02/33] feat: Implementing Redis-Raft commands to build a cluster. (#136) * feat: Implementing Redis-Raft commands to build a cluster Co-authored-by: panlei-coder Co-authored-by: century <919745273@qq.com> Co-authored-by: panlei-coder <62509266+panlei-coder@users.noreply.github.com> Co-authored-by: alexstocks Co-authored-by: dingxiaoshuai123 <2486016589@qq.com> Co-authored-by: longfar --- CMakeLists.txt | 2 +- cmake/gflags.cmake | 8 +- cmake/glog.cmake | 2 +- cmake/leveldb.cmake | 6 +- cmake/modules/glog/Findgflags.cmake | 2 +- cmake/protobuf.cmake | 2 +- cmake/utils.cmake | 2 +- pikiwidb.conf | 3 + src/CMakeLists.txt | 21 +- src/base_cmd.h | 9 +- src/client.cc | 24 +- src/client.h | 3 + src/cmd_admin.cc | 75 ++++++ src/cmd_admin.h | 11 + src/cmd_raft.cc | 198 +++++++++++++++ src/cmd_raft.h | 38 +++ src/cmd_table_manager.cc | 8 + src/config.cc | 1 + src/config.h | 1 + src/pikiwidb.cc | 3 +- src/pikiwidb.h | 5 + src/praft/CMakeLists.txt | 33 +++ src/praft/praft.cc | 361 ++++++++++++++++++++++++++++ src/praft/praft.h | 154 ++++++++++++ src/praft/praft.proto | 13 + 25 files changed, 959 insertions(+), 26 deletions(-) create mode 100644 src/cmd_raft.cc create mode 100644 src/cmd_raft.h create mode 100644 src/praft/CMakeLists.txt create mode 100644 src/praft/praft.cc create mode 100644 src/praft/praft.h create mode 100644 src/praft/praft.proto diff --git a/CMakeLists.txt b/CMakeLists.txt index 6eb2725c2..426847bb8 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -148,6 +148,7 @@ ENABLE_TESTING() ADD_SUBDIRECTORY(src/pstd) ADD_SUBDIRECTORY(src/net) +ADD_SUBDIRECTORY(src/praft) ADD_SUBDIRECTORY(src/storage) ADD_SUBDIRECTORY(src) @@ -200,4 +201,3 @@ ADD_CUSTOM_TARGET(cpplint echo '${LINT_FILES}' | xargs -n12 -P8 --linelength=120 --filter=-legal/copyright,-build/header_guard,-runtime/references ) - diff --git a/cmake/gflags.cmake b/cmake/gflags.cmake index 04f317de9..5b45360b9 100644 --- a/cmake/gflags.cmake +++ b/cmake/gflags.cmake @@ -7,9 +7,6 @@ INCLUDE_GUARD() INCLUDE(cmake/utils.cmake) -SET(MY_BUILD_TYPE ${CMAKE_BUILD_TYPE}) -SET(CMAKE_BUILD_TYPE ${THIRD_PARTY_BUILD_TYPE}) - FetchContent_DeclareGitHubWithMirror(gflags gflags/gflags v2.2.2 SHA256=19713a36c9f32b33df59d1c79b4958434cb005b5b47dc5400a7a4b078111d9b5 @@ -22,6 +19,7 @@ FetchContent_MakeAvailableWithArgs(gflags BUILD_gflags_LIB=ON BUILD_gflags_nothreads_LIB=OFF BUILD_TESTING=OFF + CMAKE_BUILD_TYPE=${THIRD_PARTY_BUILD_TYPE} ) FIND_PACKAGE(Threads REQUIRED) @@ -30,6 +28,4 @@ TARGET_LINK_LIBRARIES(gflags_static Threads::Threads) SET(GFLAGS_INCLUDE_PATH ${CMAKE_CURRENT_BINARY_DIR}/_deps/gflags-build/include) SET(GFLAGS_LIBRARY ${CMAKE_CURRENT_BINARY_DIR}/_deps/gflags-build/libgflags.a) -SET(GFLAGS_LIB ${CMAKE_CURRENT_BINARY_DIR}/_deps/gflags-build/libgflags.a) - -SET(CMAKE_BUILD_TYPE ${MY_BUILD_TYPE}) \ No newline at end of file +SET(GFLAGS_LIB ${CMAKE_CURRENT_BINARY_DIR}/_deps/gflags-build/libgflags.a) \ No newline at end of file diff --git a/cmake/glog.cmake b/cmake/glog.cmake index 4113ba48b..e694689f0 100644 --- a/cmake/glog.cmake +++ b/cmake/glog.cmake @@ -26,4 +26,4 @@ IF (CMAKE_BUILD_TYPE STREQUAL "Release") SET(GLOG_LIB ${CMAKE_CURRENT_BINARY_DIR}/_deps/glog-build/libglog.a CACHE BOOL "" FORCE) ELSEIF (CMAKE_BUILD_TYPE STREQUAL "Debug") SET(GLOG_LIB ${CMAKE_CURRENT_BINARY_DIR}/_deps/glog-build/libglogd.a CACHE BOOL "" FORCE) -ENDIF() +ENDIF() \ No newline at end of file diff --git a/cmake/leveldb.cmake b/cmake/leveldb.cmake index 17a3cf034..939df493b 100644 --- a/cmake/leveldb.cmake +++ b/cmake/leveldb.cmake @@ -6,9 +6,9 @@ INCLUDE_GUARD() FETCHCONTENT_DECLARE( - leveldb - GIT_REPOSITORY https://github.com/google/leveldb.git - GIT_TAG main + leveldb + GIT_REPOSITORY https://github.com/google/leveldb.git + GIT_TAG main ) SET(LEVELDB_BUILD_TESTS OFF CACHE BOOL "" FORCE) SET(LEVELDB_BUILD_BENCHMARKS OFF CACHE BOOL "" FORCE) diff --git a/cmake/modules/glog/Findgflags.cmake b/cmake/modules/glog/Findgflags.cmake index 97701ee84..b9b83ac84 100644 --- a/cmake/modules/glog/Findgflags.cmake +++ b/cmake/modules/glog/Findgflags.cmake @@ -8,4 +8,4 @@ IF (gflags_SOURCE_DIR) # add_library(gflags_static::gflags_static ALIAS gflags_static) INSTALL(TARGETS gflags_static EXPORT glog-targets) -ENDIF () +ENDIF () \ No newline at end of file diff --git a/cmake/protobuf.cmake b/cmake/protobuf.cmake index 67ffd150e..2717ef5aa 100644 --- a/cmake/protobuf.cmake +++ b/cmake/protobuf.cmake @@ -164,4 +164,4 @@ IF (NOT PROTOBUF_FOUND) SET(PROTOBUF_PROTOC_EXECUTABLE ${extern_protobuf_PROTOC_EXECUTABLE} CACHE FILEPATH "protobuf executable." FORCE) PROMPT_PROTOBUF_LIB(extern_protobuf zlib) -ENDIF (NOT PROTOBUF_FOUND) +ENDIF (NOT PROTOBUF_FOUND) \ No newline at end of file diff --git a/cmake/utils.cmake b/cmake/utils.cmake index 2241392d0..71aea8ba5 100644 --- a/cmake/utils.cmake +++ b/cmake/utils.cmake @@ -44,4 +44,4 @@ FUNCTION(FetchContent_DeclareGitHubWithMirror dep repo tag hash) https://github.com/${repo}/archive/${tag}.zip ${hash} ) -ENDFUNCTION() +ENDFUNCTION() \ No newline at end of file diff --git a/pikiwidb.conf b/pikiwidb.conf index e720a2133..3affebcc5 100644 --- a/pikiwidb.conf +++ b/pikiwidb.conf @@ -7,6 +7,9 @@ daemonize no # port 0 is not permitted. port 9221 +# Braft relies on brpc to communicate via the default port number plus the port offset +raft-port-offset 10 + # If you want you can bind a single interface, if the bind option is not # specified all the interfaces will listen for incoming connections. # diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index efea214ed..c5451e69f 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -8,15 +8,17 @@ AUX_SOURCE_DIRECTORY(. PIKIWIDB_SRC) ADD_EXECUTABLE(pikiwidb ${PIKIWIDB_SRC}) SET(EXECUTABLE_OUTPUT_PATH ${PROJECT_SOURCE_DIR}/bin) -TARGET_INCLUDE_DIRECTORIES(pikiwidb PRIVATE - ${PROJECT_SOURCE_DIR}/src - ${PROJECT_SOURCE_DIR}/src/pstd - ${PROJECT_SOURCE_DIR}/src/net - ${PROJECT_SOURCE_DIR}/src/storage/include - ${rocksdb_SOURCE_DIR}/ - ${rocksdb_SOURCE_DIR}/include +TARGET_INCLUDE_DIRECTORIES(pikiwidb + PRIVATE ${PROJECT_SOURCE_DIR}/src + PRIVATE ${PROJECT_SOURCE_DIR}/src/pstd + PRIVATE ${PROJECT_SOURCE_DIR}/src/net + PRIVATE ${PROJECT_SOURCE_DIR}/src/storage/include + PRIVATE ${rocksdb_SOURCE_DIR}/ + PRIVATE ${rocksdb_SOURCE_DIR}/include PRIVATE ${BRAFT_INCLUDE_DIR} PRIVATE ${BRPC_INCLUDE_DIR} + PRIVATE ${GFLAGS_INCLUDE_PATH} + PRIVATE ${PROJECT_SOURCE_DIR}/src/praft ) IF (CMAKE_SYSTEM_NAME MATCHES "Darwin") @@ -34,5 +36,6 @@ ELSE () MESSAGE(FATAL_ERROR "only support linux or macOS") ENDIF () -TARGET_LINK_LIBRARIES(pikiwidb net; dl; fmt; storage; rocksdb; braft brpc ssl crypto zlib protobuf leveldb gflags "${MAC_LIBRARY}") -SET_TARGET_PROPERTIES(pikiwidb PROPERTIES LINKER_LANGUAGE CXX) +TARGET_LINK_LIBRARIES(pikiwidb net; dl; fmt; storage; rocksdb; pstd braft brpc ssl crypto zlib protobuf leveldb gflags rt crypto dl z praft "${MAC_LIBRARY}") + +SET_TARGET_PROPERTIES(pikiwidb PROPERTIES LINKER_LANGUAGE CXX) \ No newline at end of file diff --git a/src/base_cmd.h b/src/base_cmd.h index f7b0f0b6c..5fb96e0d9 100644 --- a/src/base_cmd.h +++ b/src/base_cmd.h @@ -26,6 +26,10 @@ const std::string kCmdNameDel = "del"; const std::string kCmdNameExists = "exists"; const std::string kCmdNamePExpire = "pexpire"; +// raft cmd +const std::string kCmdNameRaftCluster = "raft.cluster"; +const std::string kCmdNameRaftNode = "raft.node"; + // string cmd const std::string kCmdNameSet = "set"; const std::string kCmdNameGet = "get"; @@ -62,6 +66,7 @@ const std::string kCmdNameFlushdb = "flushdb"; const std::string kCmdNameFlushall = "flushall"; const std::string kCmdNameAuth = "auth"; const std::string kCmdNameSelect = "select"; +const std::string kCmdNameInfo = "info"; // hash cmd const std::string kCmdNameHSet = "hset"; @@ -119,6 +124,7 @@ enum CmdFlags { kCmdFlagsProtected = (1 << 12), // Don't accept in scripts kCmdFlagsModuleNoCluster = (1 << 13), // No cluster mode support kCmdFlagsNoMulti = (1 << 14), // Cannot be pipelined + kCmdFlagsRaft = (1 << 15), // raft }; enum AclCategory { @@ -142,7 +148,8 @@ enum AclCategory { kAclCategoryDangerous = (1 << 17), kAclCategoryConnection = (1 << 18), kAclCategoryTransaction = (1 << 19), - kAclCategoryScripting = (1 << 20) + kAclCategoryScripting = (1 << 20), + kAclCategoryRaft = (1 << 21), }; /** diff --git a/src/client.cc b/src/client.cc index 4296a7b96..0434489dc 100644 --- a/src/client.cc +++ b/src/client.cc @@ -15,6 +15,7 @@ #include "pstd_string.h" #include "slow_log.h" #include "store.h" +#include "praft.h" namespace pikiwidb { @@ -130,6 +131,10 @@ void CmdRes::SetRes(CmdRes::CmdRet _ret, const std::string& content) { case kInvalidCursor: AppendStringRaw("-ERR invalid cursor"); break; + case kWrongLeader: + AppendStringRaw("-ERR wrong leader"); + AppendStringRaw(content); + AppendStringRaw(CRLF); default: break; } @@ -268,6 +273,15 @@ int PClient::handlePacket(const char* start, int bytes) { } } + if (isJoinCmdTarget()) { + // Proccees the packet at one turn. + auto [len, is_disconnect] = PRAFT.ProcessClusterJoinCmdResponse(this, start, bytes); + if (is_disconnect) { + conn->ActiveClose(); + } + return len; + } + auto parseRet = parser_.ParseRequest(ptr, end); if (parseRet == PParseResult::kError) { if (!parser_.IsInitialState()) { @@ -410,7 +424,7 @@ PClient::PClient(TcpConnection* obj) int PClient::HandlePackets(pikiwidb::TcpConnection* obj, const char* start, int size) { int total = 0; - + LOG(INFO) << start; while (total < size) { auto processed = handlePacket(start + total, size - total); if (processed <= 0) { @@ -437,6 +451,9 @@ void PClient::OnConnect() { if (g_config.masterauth.empty()) { SetAuth(); } + } else if (isJoinCmdTarget()) { + SetName("ClusterJoinCmdConnection"); + PRAFT.SendNodeInfoRequest(this); } else { if (g_config.password.empty()) { SetAuth(); @@ -509,6 +526,10 @@ bool PClient::isPeerMaster() const { return repl_addr.GetIP() == PeerIP() && repl_addr.GetPort() == PeerPort(); } +bool PClient::isJoinCmdTarget() const { + return PRAFT.GetJoinCtx().GetPeerIp() == PeerIP() && PRAFT.GetJoinCtx().GetPort() == PeerPort(); +} + int PClient::uniqueID() const { if (auto c = getTcpConnection(); c) { return c->GetUniqueId(); @@ -673,6 +694,7 @@ void PClient::FeedMonitors(const std::vector& params) { } } } + void PClient::SetKey(std::vector& names) { keys_ = std::move(names); // use std::move clear copy expense } diff --git a/src/client.h b/src/client.h index e4aefe02e..16e470495 100644 --- a/src/client.h +++ b/src/client.h @@ -48,6 +48,7 @@ class CmdRes { kErrOther, KIncrByOverFlow, kInvalidCursor, + kWrongLeader, }; CmdRes() = default; @@ -209,6 +210,8 @@ class PClient : public std::enable_shared_from_this, public CmdRes { bool isPeerMaster() const; int uniqueID() const; + bool isJoinCmdTarget() const; + // TcpConnection's life is undetermined, so use weak ptr for safety. std::weak_ptr tcp_connection_; diff --git a/src/cmd_admin.cc b/src/cmd_admin.cc index 34f72ca31..adc463a94 100644 --- a/src/cmd_admin.cc +++ b/src/cmd_admin.cc @@ -7,6 +7,8 @@ #include "cmd_admin.h" #include "store.h" +#include "braft/raft.h" +#include "praft.h" namespace pikiwidb { @@ -78,4 +80,77 @@ void SelectCmd::DoCmd(PClient* client) { client->SetRes(CmdRes::kOK); } +InfoCmd::InfoCmd(const std::string& name, int16_t arity) + : BaseCmd(name, arity, kCmdFlagsAdmin | kCmdFlagsReadonly, kAclCategoryAdmin) {} + +bool InfoCmd::DoInitial(PClient* client) { return true; } + +/* +* INFO raft +* Querying Node Information. +* Reply: +* raft_node_id:595100767 + raft_state:up + raft_role:follower + raft_is_voting:yes + raft_leader_id:1733428433 + raft_current_term:1 + raft_num_nodes:2 + raft_num_voting_nodes:2 + raft_node1:id=1733428433,state=connected,voting=yes,addr=localhost,port=5001,last_conn_secs=5,conn_errors=0,conn_oks=1 +*/ +// @todo The info raft command is only supported for the time being +void InfoCmd::DoCmd(PClient* client) { + if (client->argv_.size() <= 1) { + return client->SetRes(CmdRes::kWrongNum, client->CmdName()); + } + + auto cmd = client->argv_[1]; + if (!strcasecmp(cmd.c_str(), "RAFT")) { + if (client->argv_.size() != 2) { + return client->SetRes(CmdRes::kWrongNum, client->CmdName()); + } + + if (!PRAFT.IsInitialized()) { + return client->SetRes(CmdRes::kErrOther, "don't already cluster member"); + } + + auto node_status = PRAFT.GetNodeStatus(); + if (node_status.state == braft::State::STATE_END) { + return client->SetRes(CmdRes::kErrOther, "Node is not initialized"); + } + + std::string message(""); + message += "raft_group_id:" + PRAFT.GetGroupId() + "\r\n"; + message += "raft_node_id:" + PRAFT.GetNodeId() + "\r\n"; + if (braft::is_active_state(node_status.state)) { + message += "raft_state:up\r\n"; + } else { + message += "raft_state:down\r\n"; + } + message += "raft_role:" + std::string(braft::state2str(node_status.state)) + "\r\n"; + // message += "raft_is_voting:" + node_status.is_voting + "\r\n"; + message += "raft_leader_id:" + node_status.leader_id.to_string() + "\r\n"; + message += "raft_current_term:" + std::to_string(node_status.term) + "\r\n"; + // message += "raft_num_nodes:" + std::to_string(node_status.num_nodes) + "\r\n"; + // message += "raft_num_voting_nodes:" + std::to_string(node_status.num_voting_nodes) + "\r\n"; + + if (PRAFT.IsLeader()) { + std::vector peers; + auto status = PRAFT.GetListPeers(&peers); + if (!status.ok()) { + return client->SetRes(CmdRes::kErrOther, status.error_str()); + } + + for (int i = 0; i < peers.size(); i++) { + message += "raft_node" + std::to_string(i) + ":addr=" + butil::ip2str(peers[i].addr.ip).c_str() + ",port=" + std::to_string(peers[i].addr.port) + "\r\n"; + } + } + + client->AppendString(message); + } else { + client->SetRes(CmdRes::kErrOther, "ERR the cmd is not supported"); + } +} + } // namespace pikiwidb \ No newline at end of file diff --git a/src/cmd_admin.h b/src/cmd_admin.h index 9b20bca3f..476660e01 100644 --- a/src/cmd_admin.h +++ b/src/cmd_admin.h @@ -84,4 +84,15 @@ class SelectCmd : public BaseCmd { void DoCmd(PClient* client) override; }; +class InfoCmd : public BaseCmd { + public: + InfoCmd(const std::string& name, int16_t arity); + + protected: + bool DoInitial(PClient* client) override; + + private: + void DoCmd(PClient* client) override; +}; + } // namespace pikiwidb diff --git a/src/cmd_raft.cc b/src/cmd_raft.cc new file mode 100644 index 000000000..08b17b4e1 --- /dev/null +++ b/src/cmd_raft.cc @@ -0,0 +1,198 @@ +/* + * Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. An additional grant + * of patent rights can be found in the PATENTS file in the same directory. + */ + +#include +#include +#include + +#include "braft/configuration.h" +#include "client.h" +#include "cmd_raft.h" +#include "event_loop.h" +#include "pikiwidb.h" +#include "praft.h" +#include "pstd_status.h" +#include "pstd_string.h" + +#define VALID_NODE_ID(x) ((x) > 0) + +namespace pikiwidb { + +RaftNodeCmd::RaftNodeCmd(const std::string& name, int16_t arity) + : BaseCmd(name, arity, kCmdFlagsRaft, kAclCategoryRaft) {} + +bool RaftNodeCmd::DoInitial(PClient* client) { return true; } + +/* RAFT.NODE ADD [id] [address:port] + * Add a new node to the cluster. The [id] can be an explicit non-zero value, + * or zero to let the cluster choose one. + * Reply: + * -NOCLUSTER || + * -LOADING || + * -CLUSTERDOWN || + * -MOVED : || + * *2 + * : + * : + * + * RAFT.NODE REMOVE [id] + * Remove an existing node from the cluster. + * Reply: + * -NOCLUSTER || + * -LOADING || + * -CLUSTERDOWN || + * -MOVED : || + * +OK + */ +void RaftNodeCmd::DoCmd(PClient* client) { + // Check whether it is a leader. If it is not a leader, return the leader information + if (!PRAFT.IsLeader()) { + return client->SetRes(CmdRes::kWrongLeader, PRAFT.GetLeaderId()); + } + + auto cmd = client->argv_[1]; + if (!strcasecmp(cmd.c_str(), "ADD")) { + if (client->argv_.size() != 4) { + return client->SetRes(CmdRes::kWrongNum, client->CmdName()); + } + + // RedisRaft has nodeid, but in Braft, NodeId is IP:Port. + // So we do not need to parse and use nodeid like redis; + auto s = PRAFT.AddPeer(client->argv_[3]); + if (s.ok()) { + client->SetRes(CmdRes::kOK); + } else { + client->SetRes(CmdRes::kErrOther); + } + } else if (!strcasecmp(cmd.c_str(), "REMOVE")) { + if (client->argv_.size() != 3) { + return client->SetRes(CmdRes::kWrongNum, client->CmdName()); + } + + // (KKorpse)TODO: Redirect to leader if not leader. + auto s = PRAFT.RemovePeer(client->argv_[2]); + if (s.ok()) { + client->SetRes(CmdRes::kOK); + } else { + client->SetRes(CmdRes::kErrOther); + } + } else { + client->SetRes(CmdRes::kErrOther, "ERR RAFT.NODE supports ADD / REMOVE only"); + } +} + +RaftClusterCmd::RaftClusterCmd(const std::string& name, int16_t arity) + : BaseCmd(name, arity, kCmdFlagsRaft, kAclCategoryRaft) {} + +bool RaftClusterCmd::DoInitial(PClient* client) { return true; } + +// The endpoint must be in the league format of ip:port +std::string GetIpFromEndPoint(std::string& endpoint) { + auto pos = endpoint.find(':'); + if (pos == std::string::npos) { + return ""; + } + + return endpoint.substr(0, pos); +} + +// The endpoint must be in the league format of ip:port +int GetPortFromEndPoint(std::string& endpoint) { + auto pos = endpoint.find(':'); + if (pos == std::string::npos) { + return 0; + } + + int ret = 0; + pstd::String2int(endpoint.substr(pos + 1), &ret); + return ret; +} + +/* RAFT.CLUSTER INIT + * Initializes a new Raft cluster. + * is an optional 32 character string, if set, cluster will use it for the id + * Reply: + * +OK [dbid] + * + * RAFT.CLUSTER JOIN [addr:port] + * Join an existing cluster. + * The operation is asynchronous and may take place/retry in the background. + * Reply: + * +OK + */ +void RaftClusterCmd::DoCmd(PClient* client) { + if (client->argv_.size() < 2) { + return client->SetRes(CmdRes::kWrongNum, client->CmdName()); + } + + if (PRAFT.IsInitialized()) { + return client->SetRes(CmdRes::kErrOther, "ERR Already cluster member"); + } + + auto cmd = client->argv_[1]; + if (!strcasecmp(cmd.c_str(), "INIT")) { + if (client->argv_.size() != 2 && client->argv_.size() != 3) { + return client->SetRes(CmdRes::kWrongNum, client->CmdName()); + } + + std::string cluster_id; + if (client->argv_.size() == 3) { + cluster_id = client->argv_[2]; + if (cluster_id.size() != RAFT_DBID_LEN) { + return client->SetRes(CmdRes::kInvalidParameter, + "ERR cluster id must be " + std::to_string(RAFT_DBID_LEN) + " characters"); + } + } else { + cluster_id = pstd::RandomHexChars(RAFT_DBID_LEN); + } + auto s = PRAFT.Init(cluster_id, false); + if (!s.ok()) { + return client->SetRes(CmdRes::kErrOther, s.error_str()); + } + client->SetRes(CmdRes::kOK); + } else if (!strcasecmp(cmd.c_str(), "JOIN")) { + if (client->argv_.size() < 3) { + return client->SetRes(CmdRes::kWrongNum, client->CmdName()); + } + + // (KKorpse)TODO: Support multiple nodes join at the same time. + if (client->argv_.size() > 3) { + return client->SetRes(CmdRes::kInvalidParameter, "ERR too many arguments"); + } + + auto addr = client->argv_[2]; + if (braft::PeerId(addr).is_empty()) { + return client->SetRes(CmdRes::kInvalidParameter, "ERR invalid ip::port: " + addr); + } + + auto on_new_conn = [](TcpConnection* obj) { + if (g_pikiwidb) { + g_pikiwidb->OnNewConnection(obj); + } + }; + auto fail_cb = [&](EventLoop* loop, const char* peer_ip, int port) { + PRAFT.OnJoinCmdConnectionFailed(loop, peer_ip, port); + }; + + auto loop = EventLoop::Self(); + auto peer_ip = GetIpFromEndPoint(addr); + auto port = GetPortFromEndPoint(addr); + // FIXME: The client here is not smart pointer, may cause undefined behavior. + // should use shared_ptr in DoCmd() rather than raw pointer. + auto ret = PRAFT.GetJoinCtx().Set(client, peer_ip, port); + if (!ret) { // other clients have joined + client->SetRes(CmdRes::kErrOther, "other clients have joined"); + } else { + loop->Connect(peer_ip.c_str(), port, on_new_conn, fail_cb); + // Not reply any message here, we will reply after the connection is established. + client->Clear(); + } + } else { + client->SetRes(CmdRes::kErrOther, "ERR RAFT.CLUSTER supports INIT / JOIN only"); + } +} +} // namespace pikiwidb \ No newline at end of file diff --git a/src/cmd_raft.h b/src/cmd_raft.h new file mode 100644 index 000000000..26652d269 --- /dev/null +++ b/src/cmd_raft.h @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. An additional grant + * of patent rights can be found in the PATENTS file in the same directory. + */ + +#pragma once + +#include "braft/raft.h" +#include "brpc/server.h" +#include "base_cmd.h" + +namespace pikiwidb { + +class RaftNodeCmd : public BaseCmd { + public: + RaftNodeCmd(const std::string &name, int16_t arity); + + protected: + bool DoInitial(PClient *client) override; + + private: + void DoCmd(PClient *client) override; +}; + +class RaftClusterCmd : public BaseCmd { + public: + RaftClusterCmd(const std::string &name, int16_t arity); + + protected: + bool DoInitial(PClient *client) override; + + private: + void DoCmd(PClient *client) override; +}; + +} // namespace pikiwidb \ No newline at end of file diff --git a/src/cmd_table_manager.cc b/src/cmd_table_manager.cc index 74cce77b5..159ba3da4 100644 --- a/src/cmd_table_manager.cc +++ b/src/cmd_table_manager.cc @@ -13,6 +13,7 @@ #include "cmd_kv.h" #include "cmd_list.h" #include "cmd_set.h" +#include "cmd_raft.h" #include "cmd_table_manager.h" namespace pikiwidb { @@ -42,6 +43,13 @@ void CmdTableManager::InitCmdTable() { ADD_COMMAND(Flushall, 1); ADD_COMMAND(Select, 2); + // info + ADD_COMMAND(Info, -1); + + // raft + ADD_COMMAND(RaftCluster, -1); + ADD_COMMAND(RaftNode, -2); + // keyspace ADD_COMMAND(Del, -2); ADD_COMMAND(Exists, -2); diff --git a/src/config.cc b/src/config.cc index dbbe20979..592df73c5 100644 --- a/src/config.cc +++ b/src/config.cc @@ -91,6 +91,7 @@ bool LoadPikiwiDBConfig(const char* cfgFile, PConfig& cfg) { cfg.ip = parser.GetData("bind", cfg.ip); cfg.port = parser.GetData("port"); + cfg.raft_port_offset = parser.GetData("raft-port-offset"); cfg.timeout = parser.GetData("timeout"); cfg.dbpath = parser.GetData("db-path"); diff --git a/src/config.h b/src/config.h index 14aecf70e..1c5728523 100644 --- a/src/config.h +++ b/src/config.h @@ -27,6 +27,7 @@ struct PConfig { PString ip; unsigned short port; + unsigned short raft_port_offset; int timeout; diff --git a/src/pikiwidb.cc b/src/pikiwidb.cc index e0c9f5ba7..369cfa333 100644 --- a/src/pikiwidb.cc +++ b/src/pikiwidb.cc @@ -256,6 +256,8 @@ void PikiwiDB::Run() { } void PikiwiDB::Stop() { + pikiwidb::PRAFT.ShutDown(); + pikiwidb::PRAFT.Join(); slave_threads_.Exit(); worker_threads_.Exit(); } @@ -293,7 +295,6 @@ static void closeStd() { int main(int ac, char* av[]) { [[maybe_unused]] rocksdb::DB* db; g_pikiwidb = std::make_unique(); - if (!g_pikiwidb->ParseArgs(ac - 1, av + 1)) { Usage(); return -1; diff --git a/src/pikiwidb.h b/src/pikiwidb.h index 4dfdb685b..389096e24 100644 --- a/src/pikiwidb.h +++ b/src/pikiwidb.h @@ -10,9 +10,14 @@ #include "event_loop.h" #include "io_thread_pool.h" #include "tcp_connection.h" +#include "praft/praft.h" #define kPIKIWIDB_VERSION "4.0.0" +namespace pikiwidb { +class PRaft; +} // namespace pikiwidb + class PikiwiDB final { public: PikiwiDB() = default; diff --git a/src/praft/CMakeLists.txt b/src/praft/CMakeLists.txt new file mode 100644 index 000000000..30d7c4505 --- /dev/null +++ b/src/praft/CMakeLists.txt @@ -0,0 +1,33 @@ +# Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. An additional grant +# of patent rights can be found in the PATENTS file in the same directory. + +FILE(GLOB PRAFT_PROTO "${CMAKE_CURRENT_SOURCE_DIR}/*.proto") +EXECUTE_PROCESS( + COMMAND ${PROTOBUF_PROTOC_EXECUTABLE} --cpp_out=${CMAKE_CURRENT_SOURCE_DIR} -I=${CMAKE_CURRENT_SOURCE_DIR} ${PRAFT_PROTO} +) + +FILE(GLOB PRAFT_SRC + "${CMAKE_CURRENT_SOURCE_DIR}/*.cc" + "${CMAKE_CURRENT_SOURCE_DIR}/*.h" +) +SET(LIBRARY_OUTPUT_PATH ${PROJECT_SOURCE_DIR}/bin) +ADD_LIBRARY(praft ${PRAFT_SRC}) + +TARGET_INCLUDE_DIRECTORIES(praft + PRIVATE ${PROJECT_SOURCE_DIR}/src + PRIVATE ${PROJECT_SOURCE_DIR}/src/pstd + PRIVATE ${PROJECT_SOURCE_DIR}/src/net + PRIVATE ${PROJECT_SOURCE_DIR}/src/storage/include + PRIVATE ${rocksdb_SOURCE_DIR}/ + PRIVATE ${rocksdb_SOURCE_DIR}/include + PRIVATE ${BRAFT_INCLUDE_DIR} + PRIVATE ${BRPC_INCLUDE_DIR} + PRIVATE ${GFLAGS_INCLUDE_PATH} + PRIVATE ${PROJECT_SOURCE_DIR}/src/praft +) + +TARGET_LINK_LIBRARIES(praft net; dl; fmt; storage; pstd braft brpc ssl crypto zlib protobuf leveldb gflags rocksdb rt crypto dl z) + +SET_TARGET_PROPERTIES(praft PROPERTIES LINKER_LANGUAGE CXX) \ No newline at end of file diff --git a/src/praft/praft.cc b/src/praft/praft.cc new file mode 100644 index 000000000..595fb3f08 --- /dev/null +++ b/src/praft/praft.cc @@ -0,0 +1,361 @@ +/* + * Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. An additional grant + * of patent rights can be found in the PATENTS file in the same directory. + */ + +// +// praft.cc + +#include "praft.h" +#include +#include +#include +#include "client.h" +#include "config.h" +#include "pstd_string.h" +#include "braft/configuration.h" +#include "event_loop.h" +#include "pikiwidb.h" + +namespace pikiwidb { + +PRaft& PRaft::Instance() { + static PRaft store; + return store; +} + +butil::Status PRaft::Init(std::string& group_id, bool initial_conf_is_null) { + if (node_ && server_) { + return {0, "OK"}; + } + + server_ = std::make_unique(); + DummyServiceImpl service(&PRAFT); + auto port = g_config.port + pikiwidb::g_config.raft_port_offset; + // Add your service into RPC server + if (server_->AddService(&service, + brpc::SERVER_DOESNT_OWN_SERVICE) != 0) { + LOG(ERROR) << "Fail to add service"; + return {EINVAL, "Fail to add service"}; + } + // raft can share the same RPC server. Notice the second parameter, because + // adding services into a running server is not allowed and the listen + // address of this server is impossible to get before the server starts. You + // have to specify the address of the server. + if (braft::add_service(server_.get(), port) != 0) { + LOG(ERROR) << "Fail to add raft service"; + return {EINVAL, "Fail to add raft service"}; + } + + // It's recommended to start the server before Counter is started to avoid + // the case that it becomes the leader while the service is unreacheable by + // clients. + // Notice the default options of server is used here. Check out details from + // the doc of brpc if you would like change some options; + if (server_->Start(port, nullptr) != 0) { + LOG(ERROR) << "Fail to start Server"; + return {EINVAL, "Fail to start Server"}; + } + + // It's ok to start PRaft; + assert(group_id.size() == RAFT_DBID_LEN); + this->dbid_ = group_id; + + // FIXME: g_config.ip is default to 127.0.0.0, which may not work in cluster. + raw_addr_ = g_config.ip + ":" + std::to_string(port); + butil::ip_t ip; + auto ret = butil::str2ip(g_config.ip.c_str(), &ip); + if (ret != 0) { + LOG(ERROR) << "Fail to covert str_ip to butil::ip_t"; + return {EINVAL, "Fail to covert str_ip to butil::ip_t"}; + } + butil::EndPoint addr(ip, port); + + // Default init in one node. + /* + initial_conf takes effect only when the replication group is started from an empty node. + The Configuration is restored from the snapshot and log files when the data in the replication group is not empty. + initial_conf is used only to create replication groups. + The first node adds itself to initial_conf and then calls add_peer to add other nodes. + Set initial_conf to empty for other nodes. + You can also start empty nodes simultaneously by setting the same inital_conf(ip:port of multiple nodes) for multiple nodes. + */ + std::string initial_conf(""); + if (!initial_conf_is_null) { + initial_conf = raw_addr_ + ":0,"; + } + if (node_options_.initial_conf.parse_from(initial_conf) != 0) { + LOG(ERROR) << "Fail to parse configuration, address: " << raw_addr_; + return {EINVAL, "Fail to parse address."}; + } + + // node_options_.election_timeout_ms = FLAGS_election_timeout_ms; + node_options_.fsm = this; + node_options_.node_owns_fsm = false; + // node_options_.snapshot_interval_s = FLAGS_snapshot_interval; + std::string prefix = "local://" + g_config.dbpath + "_praft"; + node_options_.log_uri = prefix + "/log"; + node_options_.raft_meta_uri = prefix + "/raft_meta"; + node_options_.snapshot_uri = prefix + "/snapshot"; + // node_options_.disable_cli = FLAGS_disable_cli; + node_ = std::make_unique("pikiwidb", braft::PeerId(addr)); // group_id + if (node_->init(node_options_) != 0) { + node_.reset(); + LOG(ERROR) << "Fail to init raft node"; + return {EINVAL, "Fail to init raft node"}; + } + + return {0, "OK"}; +} + +bool PRaft::IsLeader() const { + if (!node_) { + LOG(ERROR) << "Node is not initialized"; + return false; + } + return node_->is_leader(); +} + +std::string PRaft::GetLeaderId() const { + if (!node_) { + LOG(ERROR) << "Node is not initialized"; + return std::string("Fail to get leader id"); + } + return node_->leader_id().to_string(); +} + +std::string PRaft::GetNodeId() const { + if (!node_) { + LOG(ERROR) << "Node is not initialized"; + return std::string("Fail to get node id"); + } + return node_->node_id().to_string(); +} + +std::string PRaft::GetGroupId() const { + if (!node_) { + LOG(ERROR) << "Node is not initialized"; + return std::string("Fail to get cluster id"); + } + return dbid_; +} + +braft::NodeStatus PRaft::GetNodeStatus() const { + braft::NodeStatus node_status; + if (!node_) { + LOG(ERROR) << "Node is not initialized"; + } else { + node_->get_status(&node_status); + } + + return node_status; +} + +butil::Status PRaft::GetListPeers(std::vector* peers) { + if (!node_) { + LOG(ERROR) << "Node is not initialized"; + } else { + return node_->list_peers(peers); + } +} + +// Gets the cluster id, which is used to initialize node +void PRaft::SendNodeInfoRequest(PClient *client) { + assert(client); + + UnboundedBuffer req; + req.PushData("INFO raft", 9); + req.PushData("\r\n", 2); + client->SendPacket(req); +} + +void PRaft::SendNodeAddRequest(PClient *client) { + assert(client); + + // Node id in braft are ip:port, the node id param in RAFT.NODE ADD cmd will be ignored. + int unused_node_id = 0; + auto port = g_config.port + pikiwidb::g_config.raft_port_offset; + auto raw_addr = g_config.ip + ":" + std::to_string(port); + UnboundedBuffer req; + req.PushData("RAFT.NODE ADD ", 14); + req.PushData(std::to_string(unused_node_id).c_str(), std::to_string(unused_node_id).size()); + req.PushData(" ", 1); + req.PushData(raw_addr.data(), raw_addr.size()); + req.PushData("\r\n", 2); + client->SendPacket(req); +} + +std::tuple PRaft::ProcessClusterJoinCmdResponse(PClient* client, const char* start, int len) { + assert(start); + auto join_client = join_ctx_.GetClient(); + if (!join_client) { + LOG(WARNING) << "No client when processing cluster join cmd response."; + return std::make_tuple(0, true); + } + + bool is_disconnect = true; + std::string reply(start, len); + if (reply.find("+OK") != std::string::npos) { + LOG(INFO) << "Joined Raft cluster, node id:" << PRAFT.GetNodeId() << "dbid:" << PRAFT.dbid_; + join_client->SetRes(CmdRes::kOK); + join_client->SendPacket(join_client->Message()); + is_disconnect = false; + } else if (reply.find("-ERR wrong leader") != std::string::npos) { + // Resolve the ip address of the leader + pstd::StringTrimLeft(reply, "-ERR wrong leader"); + pstd::StringTrim(reply); + braft::PeerId peerId; + peerId.parse(reply); + + // Establish a connection with the leader and send the add request + auto on_new_conn = [](TcpConnection* obj) { + if (g_pikiwidb) { + g_pikiwidb->OnNewConnection(obj); + } + }; + auto fail_cb = [&](EventLoop* loop, const char* peer_ip, int port) { + PRAFT.OnJoinCmdConnectionFailed(loop, peer_ip, port); + }; + + auto loop = EventLoop::Self(); + auto peer_ip = std::string(butil::ip2str(peerId.addr.ip).c_str()); + auto port = peerId.addr.port; + // FIXME: The client here is not smart pointer, may cause undefined behavior. + // should use shared_ptr in DoCmd() rather than raw pointer. + PRAFT.GetJoinCtx().Set(join_client, peer_ip, port); + loop->Connect(peer_ip.c_str(), port, on_new_conn, fail_cb); + + // Not reply any message here, we will reply after the connection is established. + join_client->Clear(); + } else if (reply.find("raft_group_id") != std::string::npos) { + std::string prefix = "raft_group_id:"; + std::string::size_type prefix_length = prefix.length(); + std::string::size_type group_id_start = reply.find(prefix); + group_id_start += prefix_length; // 定位到raft_group_id的起始位置 + std::string::size_type group_id_end = reply.find("\r\n", group_id_start); + if (group_id_end != std::string::npos) { + std::string raft_group_id = reply.substr(group_id_start, group_id_end - group_id_start); + // initialize the slave node + auto s = PRAFT.Init(raft_group_id, true); + if (!s.ok()) { + join_client->SetRes(CmdRes::kErrOther, s.error_str()); + join_client->SendPacket(join_client->Message()); + return std::make_tuple(len, is_disconnect); + } + + PRAFT.SendNodeAddRequest(client); + is_disconnect = false; + } else { + LOG(ERROR) << "Joined Raft cluster fail, because of invalid raft_group_id"; + join_client->SetRes(CmdRes::kErrOther, "Invalid raft_group_id"); + join_client->SendPacket(join_client->Message()); + } + } else { + LOG(ERROR) << "Joined Raft cluster fail, " << start; + join_client->SetRes(CmdRes::kErrOther, std::string(start, len)); + join_client->SendPacket(join_client->Message()); + } + + return std::make_tuple(len, is_disconnect); +} + +butil::Status PRaft::AddPeer(const std::string& peer) { + if (!node_) { + LOG(ERROR) << "Node is not initialized"; + return {EINVAL, "Node is not initialized"}; + } + + braft::SynchronizedClosure done; + node_->add_peer(peer, &done); + done.wait(); + + if (!done.status().ok()) { + LOG(WARNING) << "Fail to add peer " << peer << " to node " << node_->node_id() << ", status " << done.status(); + return done.status(); + } + + return {0, "OK"}; +} + +butil::Status PRaft::RemovePeer(const std::string& peer) { + if (!node_) { + LOG(ERROR) << "Node is not initialized"; + return {EINVAL, "Node is not initialized"}; + } + + braft::SynchronizedClosure done; + node_->remove_peer(peer, &done); + done.wait(); + + if (!done.status().ok()) { + LOG(WARNING) << "Fail to remove peer " << peer << " from node " << node_->node_id() << ", status " << done.status(); + return done.status(); + } + + return {0, "OK"}; +} + +void PRaft::OnJoinCmdConnectionFailed([[maybe_unused]] EventLoop* loop, const char* peer_ip, int port) { + auto cli = join_ctx_.GetClient(); + if (cli) { + cli->SetRes(CmdRes::kErrOther, + "ERR failed to connect to cluster for join, please check logs " + std::string(peer_ip) + ":" + std::to_string(port)); + cli->SendPacket(cli->Message()); + } +} + +// Shut this node and server down. +void PRaft::ShutDown() { + if (node_) { + node_->shutdown(nullptr); + } + + if (server_) { + server_->Stop(0); + } +} + +// Blocking this thread until the node is eventually down. +void PRaft::Join() { + if (node_) { + node_->join(); + } + + if (server_) { + server_->Join(); + } +} + +void PRaft::Apply(braft::Task& task) { + if (node_) { + node_->apply(task); + } +} + +// @braft::StateMachine +void PRaft::on_apply(braft::Iterator& iter) { + // A batch of tasks are committed, which must be processed through + // |iter| + for (; iter.valid(); iter.next()) { + } +} + +void PRaft::on_snapshot_save(braft::SnapshotWriter* writer, braft::Closure* done) {} + +int PRaft::on_snapshot_load(braft::SnapshotReader* reader) { return 0; } + +void PRaft::on_leader_start(int64_t term) { + LOG(WARNING) << "Node " << node_->node_id() << "start to be leader, term" << term; +} + +void PRaft::on_leader_stop(const butil::Status& status) {} + +void PRaft::on_shutdown() {} +void PRaft::on_error(const ::braft::Error& e) {} +void PRaft::on_configuration_committed(const ::braft::Configuration& conf) {} +void PRaft::on_stop_following(const ::braft::LeaderChangeContext& ctx) {} +void PRaft::on_start_following(const ::braft::LeaderChangeContext& ctx) {} + +} // namespace pikiwidb \ No newline at end of file diff --git a/src/praft/praft.h b/src/praft/praft.h new file mode 100644 index 000000000..41d87283a --- /dev/null +++ b/src/praft/praft.h @@ -0,0 +1,154 @@ +/* + * Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. An additional grant + * of patent rights can be found in the PATENTS file in the same directory. + */ + +#pragma once + +#include +#include +#include +#include +#include + +#include "braft/configuration.h" +#include "braft/raft.h" +#include "braft/util.h" +#include "brpc/server.h" +#include "brpc/controller.h" +#include "butil/status.h" +#include "client.h" +#include "common.h" +#include "config.h" +#include "event_loop.h" +#include "gflags/gflags.h" +#include "praft.pb.h" +#include "tcp_connection.h" + +namespace pikiwidb { + +#define RAFT_DBID_LEN 32 + +#define PRAFT PRaft::Instance() + +extern PConfig g_config; + +class JoinCmdContext { + friend class PRaft; + + public: + JoinCmdContext() = default; + ~JoinCmdContext() = default; + + bool Set(PClient* client, const std::string& peer_ip, int port) { + std::unique_lock lck(mtx_); + if (client_ != nullptr) { + return false; + } + assert(client); + client_ = client; + peer_ip_ = peer_ip; + port_ = port; + return true; + } + + void Clear() { + std::unique_lock lck(mtx_); + client_ = nullptr; + peer_ip_.clear(); + port_ = 0; + } + + // @todo the function seems useless + bool IsEmpty() { + std::unique_lock lck(mtx_); + return client_ == nullptr; + } + + PClient* GetClient() { return client_; } + braft::PeerId GetPeerIp() { return peer_ip_; } + int GetPort() { return port_; } + + private: + std::mutex mtx_; + PClient* client_ = nullptr; + std::string peer_ip_; + int port_ = 0; +}; + +class PRaft : public braft::StateMachine { + public: + PRaft() : server_(nullptr), node_(nullptr) {} + + ~PRaft() override = default; + + static PRaft& Instance(); + + //===--------------------------------------------------------------------===// + // Braft API + //===--------------------------------------------------------------------===// + butil::Status Init(std::string& cluster_id, bool initial_conf_is_null); + butil::Status AddPeer(const std::string& peer); + butil::Status RemovePeer(const std::string& peer); + butil::Status RaftRecvEntry(); + + void ShutDown(); + void Join(); + void Apply(braft::Task& task); + + //===--------------------------------------------------------------------===// + // ClusterJoin command + //===--------------------------------------------------------------------===// + JoinCmdContext& GetJoinCtx() { return join_ctx_; } + void SendNodeInfoRequest(PClient *client); + void SendNodeAddRequest(PClient *client); + std::tuple ProcessClusterJoinCmdResponse(PClient* client, const char* start, int len); + void OnJoinCmdConnectionFailed(EventLoop*, const char* peer_ip, int port); + + bool IsLeader() const; + std::string GetLeaderId() const; + std::string GetNodeId() const; + std::string GetGroupId() const; + braft::NodeStatus GetNodeStatus() const; + butil::Status GetListPeers(std::vector* peers); + + bool IsInitialized() const { return node_ != nullptr && server_ != nullptr; } + + private: + void on_apply(braft::Iterator& iter) override; + void on_snapshot_save(braft::SnapshotWriter* writer, braft::Closure* done) override; + int on_snapshot_load(braft::SnapshotReader* reader) override; + + void on_leader_start(int64_t term) override; + void on_leader_stop(const butil::Status& status) override; + + void on_shutdown() override; + void on_error(const ::braft::Error& e) override; + void on_configuration_committed(const ::braft::Configuration& conf) override; + void on_stop_following(const ::braft::LeaderChangeContext& ctx) override; + void on_start_following(const ::braft::LeaderChangeContext& ctx) override;; + + private: + std::unique_ptr server_; // brpc + std::unique_ptr node_; + braft::NodeOptions node_options_; // options for raft node + std::string raw_addr_; // ip:port of this node + + JoinCmdContext join_ctx_; // context for cluster join command + std::string dbid_; // dbid of group, +}; + +class DummyServiceImpl : public DummyService { +public: + explicit DummyServiceImpl(PRaft* praft) : praft_(praft) {} + void DummyMethod(::google::protobuf::RpcController* controller, + const ::pikiwidb::DummyRequest* request, + ::pikiwidb::DummyResponse* response, + ::google::protobuf::Closure* done) {} +private: + PRaft* praft_; +}; + +} // namespace pikiwidb \ No newline at end of file diff --git a/src/praft/praft.proto b/src/praft/praft.proto new file mode 100644 index 000000000..61a495f21 --- /dev/null +++ b/src/praft/praft.proto @@ -0,0 +1,13 @@ +syntax="proto3"; +package pikiwidb; +option cc_generic_services = true; + +message DummyRequest { +}; + +message DummyResponse { +}; + +service DummyService { + rpc DummyMethod(DummyRequest) returns (DummyResponse); +}; From 45c44093006f7a9fc608dd4de87d0978e07dfb01 Mon Sep 17 00:00:00 2001 From: Changyuan Ning <77976092+longfar-ncy@users.noreply.github.com> Date: Sat, 9 Mar 2024 18:37:15 +0800 Subject: [PATCH 03/33] =?UTF-8?q?fix:=20return=20error=20status=20in=20Get?= =?UTF-8?q?ListPeers=20and=20add=20check=20for=20return-typ=E2=80=A6=20(#1?= =?UTF-8?q?93)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * fix: return error status in GetListPeers and add check for return-type in cmake * ci: run actions when pull request to import-braft * Revert "ci: run actions when pull request to import-braft" This reverts commit 727c84d0b996e04332b0c0f16fbc57dd4028883b. --- CMakeLists.txt | 4 ++-- src/praft/praft.cc | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 426847bb8..f5b81d952 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -6,7 +6,7 @@ CMAKE_MINIMUM_REQUIRED(VERSION 3.14) PROJECT(PikiwiDB) -SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-deprecated -g -D'GIT_COMMIT_ID=\"${GIT_COMMIT_ID}\"'") +SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror=return-type -D'GIT_COMMIT_ID=\"${GIT_COMMIT_ID}\"'") # Avoid warning about DOWNLOAD_EXTRACT_TIMESTAMP in CMake 3.24: IF (CMAKE_VERSION VERSION_GREATER_EQUAL "3.24.0") @@ -73,7 +73,7 @@ ELSEIF (CMAKE_SYSTEM_NAME MATCHES "Linux") set(CMAKE_THREAD_LIBS_INIT "-lpthread") ELSEIF (CMAKE_CXX_COMPILER_ID STREQUAL "GNU") SET(CMAKE_EXE_LINKER_FLAGS "-static-libgcc -static-libstdc++") - SET(CMAKE_CXX_FLAGS "-pthread -Wl,--no-as-needed -ldl -Wno-restrict") + SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -pthread -Wl,--no-as-needed -ldl") ENDIF () ADD_DEFINITIONS(-DOS_LINUX) ELSE () diff --git a/src/praft/praft.cc b/src/praft/praft.cc index 595fb3f08..4759466c9 100644 --- a/src/praft/praft.cc +++ b/src/praft/praft.cc @@ -156,9 +156,9 @@ braft::NodeStatus PRaft::GetNodeStatus() const { butil::Status PRaft::GetListPeers(std::vector* peers) { if (!node_) { LOG(ERROR) << "Node is not initialized"; - } else { - return node_->list_peers(peers); + return {EINVAL, "Node is not initialized"}; } + return node_->list_peers(peers); } // Gets the cluster id, which is used to initialize node From ff0180be74e0eda40f2ac6e596660132ef22b77e Mon Sep 17 00:00:00 2001 From: Changyuan Ning <77976092+longfar-ncy@users.noreply.github.com> Date: Sun, 10 Mar 2024 12:44:36 +0800 Subject: [PATCH 04/33] fix: an unknown bug in master branch of braft causes failure of leader to add follower (#197) --- cmake/braft.cmake | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cmake/braft.cmake b/cmake/braft.cmake index 853266f52..ffb421901 100644 --- a/cmake/braft.cmake +++ b/cmake/braft.cmake @@ -16,8 +16,8 @@ ExternalProject_Add( extern_braft ${EXTERNAL_PROJECT_LOG_ARGS} DEPENDS brpc - GIT_REPOSITORY https://github.com/baidu/braft.git - GIT_TAG master + URL "https://github.com/baidu/braft/archive/v1.1.2.tar.gz" + URL_HASH SHA256=bb3705f61874f8488e616ae38464efdec1a20610ddd6cd82468adc814488f14e PREFIX ${BRAFT_SOURCES_DIR} UPDATE_COMMAND "" CMAKE_ARGS -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} From ad055b91ad5b88a6b61eee4e5edd21b1139930ec Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E4=B8=81=E5=B0=8F=E5=B8=85?= <56024577+dingxiaoshuai123@users.noreply.github.com> Date: Mon, 11 Mar 2024 15:52:08 +0800 Subject: [PATCH 05/33] fix build on mac (#201) --- src/CMakeLists.txt | 31 ++++++++++++++++--------------- src/praft/CMakeLists.txt | 6 +++++- 2 files changed, 21 insertions(+), 16 deletions(-) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index c5451e69f..d0b2df684 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -21,21 +21,22 @@ TARGET_INCLUDE_DIRECTORIES(pikiwidb PRIVATE ${PROJECT_SOURCE_DIR}/src/praft ) -IF (CMAKE_SYSTEM_NAME MATCHES "Darwin") - FIND_LIBRARY(COREFOUNDATION_LIBRARY CoreFoundation) - FIND_LIBRARY(CSSERVICES_LIBRARY CoreServices) - FIND_LIBRARY(CFN_LIBRARY CFNetwork) - FIND_LIBRARY(SCY_LIBRARY Security) - FIND_LIBRARY(COREGRAPHICS_LIBRARY CoreGraphics) - FIND_LIBRARY(CORETEXT_LIBRARY CoreText) - FIND_LIBRARY(Foundation_LIBRARY Foundation) - LIST(APPEND MAC_LIBRARY ${COREFOUNDATION_LIBRARY} ${CSSERVICES_LIBRARY} ${CFN_LIBRARY} ${SCY_LIBRARY} ${COREGRAPHICS_LIBRARY} ${CORETEXT_LIBRARY} ${Foundation_LIBRARY}) -ELSEIF (CMAKE_SYSTEM_NAME MATCHES "Linux") - SET(MAC_LIBRARY "") -ELSE () - MESSAGE(FATAL_ERROR "only support linux or macOS") -ENDIF () -TARGET_LINK_LIBRARIES(pikiwidb net; dl; fmt; storage; rocksdb; pstd braft brpc ssl crypto zlib protobuf leveldb gflags rt crypto dl z praft "${MAC_LIBRARY}") +IF(CMAKE_SYSTEM_NAME STREQUAL "Linux") + SET(LIB rt) +ELSEIF(CMAKE_SYSTEM_NAME STREQUAL "Darwin") + SET(LIB + "-framework CoreFoundation" + "-framework CoreGraphics" + "-framework CoreData" + "-framework CoreText" + "-framework Security" + "-framework Foundation" + "-Wl,-U,_MallocExtension_ReleaseFreeMemory" + "-Wl,-U,_ProfilerStart" + "-Wl,-U,_ProfilerStop") +ENDIF() + +TARGET_LINK_LIBRARIES(pikiwidb net; dl; fmt; storage; rocksdb; pstd braft brpc ssl crypto zlib protobuf leveldb gflags z praft "${LIB}") SET_TARGET_PROPERTIES(pikiwidb PROPERTIES LINKER_LANGUAGE CXX) \ No newline at end of file diff --git a/src/praft/CMakeLists.txt b/src/praft/CMakeLists.txt index 30d7c4505..c2b059b89 100644 --- a/src/praft/CMakeLists.txt +++ b/src/praft/CMakeLists.txt @@ -28,6 +28,10 @@ TARGET_INCLUDE_DIRECTORIES(praft PRIVATE ${PROJECT_SOURCE_DIR}/src/praft ) -TARGET_LINK_LIBRARIES(praft net; dl; fmt; storage; pstd braft brpc ssl crypto zlib protobuf leveldb gflags rocksdb rt crypto dl z) +IF(CMAKE_SYSTEM_NAME STREQUAL "Linux") + SET(PRAFT_LIB ${PRAFT_LIB} rt) +ENDIF() + +TARGET_LINK_LIBRARIES(praft net; dl; fmt; storage; pstd braft brpc ssl crypto zlib protobuf leveldb gflags rocksdb z ${PRAFT_LIB}) SET_TARGET_PROPERTIES(praft PROPERTIES LINKER_LANGUAGE CXX) \ No newline at end of file From 7adc3d81f9635d66b6e52349fd2208ba5662a850 Mon Sep 17 00:00:00 2001 From: Changyuan Ning <77976092+longfar-ncy@users.noreply.github.com> Date: Wed, 13 Mar 2024 16:03:07 +0800 Subject: [PATCH 06/33] fix: disappearing roles bug (#204) * fix: disappearing roles bug * fix: can not find libgflags.a for build in debug version * fix: add download prefix of braft * fix: rm -f xxx --- CMakeLists.txt | 2 +- cmake/braft.cmake | 39 +++++++++++++++++---------------------- cmake/brpc.cmake | 33 +++++++++++++++------------------ cmake/gflags.cmake | 4 ++-- cmake/protobuf.cmake | 8 ++++---- 5 files changed, 39 insertions(+), 47 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index f5b81d952..6d0831901 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -119,7 +119,7 @@ SET_PROPERTY(TARGET crypto PROPERTY IMPORTED_LOCATION ${OPENSSL_CRYPTO_LIBRARY}) SET(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_CURRENT_SOURCE_DIR}/cmake") SET(THIRD_PARTY_PATH ${CMAKE_CURRENT_BINARY_DIR}/third-party) -SET(THIRD_PARTY_BUILD_TYPE Release) +SET(THIRD_PARTY_BUILD_TYPE ${CMAKE_BUILD_TYPE}) SET(EXTERNAL_PROJECT_LOG_ARGS LOG_DOWNLOAD 0 LOG_UPDATE 1 diff --git a/cmake/braft.cmake b/cmake/braft.cmake index ffb421901..051d0089c 100644 --- a/cmake/braft.cmake +++ b/cmake/braft.cmake @@ -16,32 +16,27 @@ ExternalProject_Add( extern_braft ${EXTERNAL_PROJECT_LOG_ARGS} DEPENDS brpc - URL "https://github.com/baidu/braft/archive/v1.1.2.tar.gz" - URL_HASH SHA256=bb3705f61874f8488e616ae38464efdec1a20610ddd6cd82468adc814488f14e - PREFIX ${BRAFT_SOURCES_DIR} + GIT_REPOSITORY https://github.com/baidu/braft.git + GIT_TAG master + PREFIX ${BRAFT_SOURCES_DIR} UPDATE_COMMAND "" - CMAKE_ARGS -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} - -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} - -DCMAKE_CXX_FLAGS=${CMAKE_CXX_FLAGS} - -DCMAKE_C_FLAGS=${CMAKE_C_FLAGS} - -DCMAKE_INSTALL_PREFIX=${BRAFT_INSTALL_DIR} - -DCMAKE_INSTALL_LIBDIR=${BRAFT_INSTALL_DIR}/lib - -DCMAKE_POSITION_INDEPENDENT_CODE=ON - -DCMAKE_BUILD_TYPE=${THIRD_PARTY_BUILD_TYPE} - -DCMAKE_PREFIX_PATH=${prefix_path} - -DBRPC_WITH_GLOG=OFF - -DWITH_DEBUG_SYMBOLS=OFF - ${EXTERNAL_OPTIONAL_ARGS} - LIST_SEPARATOR | - CMAKE_CACHE_ARGS -DCMAKE_INSTALL_PREFIX:PATH=${BRAFT_INSTALL_DIR} - -DCMAKE_INSTALL_LIBDIR:PATH=${BRAFT_INSTALL_DIR}/lib - -DCMAKE_POSITION_INDEPENDENT_CODE:BOOL=ON - -DCMAKE_BUILD_TYPE:STRING=${THIRD_PARTY_BUILD_TYPE} + CMAKE_ARGS + -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} + -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} + -DCMAKE_CXX_FLAGS=${CMAKE_CXX_FLAGS} + -DCMAKE_C_FLAGS=${CMAKE_C_FLAGS} + ${EXTERNAL_OPTIONAL_ARGS} + LIST_SEPARATOR | + CMAKE_CACHE_ARGS + -DCMAKE_PREFIX_PATH:PATH=${prefix_path} + -DCMAKE_INSTALL_PREFIX:PATH=${BRAFT_INSTALL_DIR} + -DCMAKE_BUILD_TYPE:STRING=${THIRD_PARTY_BUILD_TYPE} # Set build type + -DBRPC_WITH_GLOG:BOOL=OFF + -DBUILD_SHARED_LIBS:BOOL=OFF BUILD_IN_SOURCE 1 BUILD_COMMAND $(MAKE) -j ${CPU_CORE} braft-static - INSTALL_COMMAND mkdir -p ${BRAFT_INSTALL_DIR}/lib/ COMMAND cp ${BRAFT_SOURCES_DIR}/src/extern_braft/output/lib/libbraft.a ${BRAFT_LIBRARIES} COMMAND cp -r ${BRAFT_SOURCES_DIR}/src/extern_braft/output/include ${BRAFT_INCLUDE_DIR}/ ) ADD_DEPENDENCIES(extern_braft brpc) ADD_LIBRARY(braft STATIC IMPORTED GLOBAL) SET_PROPERTY(TARGET braft PROPERTY IMPORTED_LOCATION ${BRAFT_LIBRARIES}) -ADD_DEPENDENCIES(braft extern_braft) \ No newline at end of file +ADD_DEPENDENCIES(braft extern_braft) diff --git a/cmake/brpc.cmake b/cmake/brpc.cmake index 13305f203..6cb88b001 100644 --- a/cmake/brpc.cmake +++ b/cmake/brpc.cmake @@ -22,26 +22,23 @@ EXTERNALPROJECT_ADD( URL_HASH SHA256=b9d638b76725552ed11178c650d7fc95e30f252db7972a93dc309a0698c7d2b8 PREFIX ${BRPC_SOURCES_DIR} UPDATE_COMMAND "" - CMAKE_ARGS -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} - -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} - -DCMAKE_CXX_FLAGS=${CMAKE_CXX_FLAGS} - -DCMAKE_C_FLAGS=${CMAKE_C_FLAGS} - -DCMAKE_INSTALL_PREFIX=${BRPC_INSTALL_DIR} - -DCMAKE_INSTALL_LIBDIR=${BRPC_INSTALL_DIR}/lib - -DCMAKE_POSITION_INDEPENDENT_CODE=ON - -DCMAKE_BUILD_TYPE=${THIRD_PARTY_BUILD_TYPE} - -DCMAKE_PREFIX_PATH=${prefix_path} - -DWITH_GLOG=OFF - -DDOWNLOAD_GTEST=OFF - ${EXTERNAL_OPTIONAL_ARGS} - LIST_SEPARATOR | - CMAKE_CACHE_ARGS -DCMAKE_INSTALL_PREFIX:PATH=${BRPC_INSTALL_DIR} - -DCMAKE_INSTALL_LIBDIR:PATH=${BRPC_INSTALL_DIR}/lib - -DCMAKE_POSITION_INDEPENDENT_CODE:BOOL=ON - -DCMAKE_BUILD_TYPE:STRING=${THIRD_PARTY_BUILD_TYPE} + CMAKE_ARGS + -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} + -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} + -DCMAKE_CXX_FLAGS=${CMAKE_CXX_FLAGS} + -DCMAKE_C_FLAGS=${CMAKE_C_FLAGS} + ${EXTERNAL_OPTIONAL_ARGS} + LIST_SEPARATOR | + CMAKE_CACHE_ARGS + -DCMAKE_PREFIX_PATH:PATH=${prefix_path} + -DCMAKE_INSTALL_PREFIX:PATH=${BRPC_INSTALL_DIR} + -DCMAKE_INSTALL_LIBDIR:PATH=${BRPC_INSTALL_DIR}/lib + -DCMAKE_BUILD_TYPE:STRING=${THIRD_PARTY_BUILD_TYPE} + -DWITH_GLOG:BOOL=OFF + -DDOWNLOAD_GTEST:BOOL=OFF + -DBUILD_SHARED_LIBS:BOOL=OFF BUILD_IN_SOURCE 1 BUILD_COMMAND $(MAKE) -j ${CPU_CORE} brpc-static - INSTALL_COMMAND mkdir -p ${BRPC_INSTALL_DIR}/lib/ COMMAND cp ${BRPC_SOURCES_DIR}/src/extern_brpc/output/lib/libbrpc.a ${BRPC_LIBRARIES} COMMAND cp -r ${BRPC_SOURCES_DIR}/src/extern_brpc/output/include ${BRPC_INCLUDE_DIR}/ ) ADD_DEPENDENCIES(extern_brpc ssl crypto zlib protobuf leveldb gflags) ADD_LIBRARY(brpc STATIC IMPORTED GLOBAL) diff --git a/cmake/gflags.cmake b/cmake/gflags.cmake index 5b45360b9..68960ceed 100644 --- a/cmake/gflags.cmake +++ b/cmake/gflags.cmake @@ -19,7 +19,7 @@ FetchContent_MakeAvailableWithArgs(gflags BUILD_gflags_LIB=ON BUILD_gflags_nothreads_LIB=OFF BUILD_TESTING=OFF - CMAKE_BUILD_TYPE=${THIRD_PARTY_BUILD_TYPE} + CMAKE_BUILD_TYPE=Release ) FIND_PACKAGE(Threads REQUIRED) @@ -28,4 +28,4 @@ TARGET_LINK_LIBRARIES(gflags_static Threads::Threads) SET(GFLAGS_INCLUDE_PATH ${CMAKE_CURRENT_BINARY_DIR}/_deps/gflags-build/include) SET(GFLAGS_LIBRARY ${CMAKE_CURRENT_BINARY_DIR}/_deps/gflags-build/libgflags.a) -SET(GFLAGS_LIB ${CMAKE_CURRENT_BINARY_DIR}/_deps/gflags-build/libgflags.a) \ No newline at end of file +SET(GFLAGS_LIB ${CMAKE_CURRENT_BINARY_DIR}/_deps/gflags-build/libgflags.a) diff --git a/cmake/protobuf.cmake b/cmake/protobuf.cmake index 2717ef5aa..ee4b5f0db 100644 --- a/cmake/protobuf.cmake +++ b/cmake/protobuf.cmake @@ -115,11 +115,11 @@ FUNCTION(build_protobuf TARGET_NAME) # and delete libz.so which we don't need IF (CMAKE_SYSTEM_NAME MATCHES "Darwin") FILE(WRITE ${PROTOBUF_SOURCES_DIR}/src/config.sh - "rm -f ${THIRD_PARTY_PATH}/install/zlib/lib/*.dylib && mkdir -p ${THIRD_PARTY_PATH}/install/protobuf/include && cp ${THIRD_PARTY_PATH}/install/zlib/include/* ${THIRD_PARTY_PATH}/install/protobuf/include/ && ${CMAKE_COMMAND} ${PROTOBUF_SOURCES_DIR}/src/${TARGET_NAME}/cmake -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} -DCMAKE_C_FLAGS='${CMAKE_C_FLAGS}' -DCMAKE_C_FLAGS_DEBUG=${CMAKE_C_FLAGS_DEBUG} -DCMAKE_C_FLAGS_RELEASE='${CMAKE_C_FLAGS_RELEASE}' -DCMAKE_CXX_FLAGS='${CMAKE_CXX_FLAGS}' -DCMAKE_CXX_FLAGS_RELEASE='${CMAKE_CXX_FLAGS_RELEASE}' -DCMAKE_CXX_FLAGS_DEBUG='${CMAKE_CXX_FLAGS_DEBUG}' -DCMAKE_POSITION_INDEPENDENT_CODE=ON -DCMAKE_SKIP_RPATH=ON -Dprotobuf_WITH_ZLIB=ON -DZLIB_INCLUDE_DIR=${THIRD_PARTY_PATH}/install/zlib/include -Dprotobuf_BUILD_TESTS=OFF -Dprotobuf_BUILD_SHARED_LIBS=OFF -DCMAKE_INSTALL_PREFIX=${PROTOBUF_INSTALL_DIR} -DCMAKE_INSTALL_LIBDIR=lib -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE='${THIRD_PARTY_BUILD_TYPE}' -DCMAKE_PREFIX_PATH=${prefix_path} ${EXTERNAL_OPTIONAL_ARGS}" + "rm -f ${THIRD_PARTY_PATH}/install/zlib/lib/*.dylib && mkdir -p ${THIRD_PARTY_PATH}/install/protobuf/include && cp ${THIRD_PARTY_PATH}/install/zlib/include/* ${THIRD_PARTY_PATH}/install/protobuf/include/ && ${CMAKE_COMMAND} ${PROTOBUF_SOURCES_DIR}/src/${TARGET_NAME}/cmake -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} -DCMAKE_C_FLAGS='${CMAKE_C_FLAGS}' -DCMAKE_C_FLAGS_DEBUG=${CMAKE_C_FLAGS_DEBUG} -DCMAKE_C_FLAGS_RELEASE='${CMAKE_C_FLAGS_RELEASE}' -DCMAKE_CXX_FLAGS='${CMAKE_CXX_FLAGS}' -DCMAKE_CXX_FLAGS_RELEASE='${CMAKE_CXX_FLAGS_RELEASE}' -DCMAKE_CXX_FLAGS_DEBUG='${CMAKE_CXX_FLAGS_DEBUG}' -DCMAKE_POSITION_INDEPENDENT_CODE=ON -DCMAKE_SKIP_RPATH=ON -Dprotobuf_WITH_ZLIB=ON -DZLIB_INCLUDE_DIR=${THIRD_PARTY_PATH}/install/zlib/include -Dprotobuf_BUILD_TESTS=OFF -Dprotobuf_BUILD_SHARED_LIBS=OFF -DCMAKE_INSTALL_PREFIX=${PROTOBUF_INSTALL_DIR} -DCMAKE_INSTALL_LIBDIR=lib -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=Release -DCMAKE_PREFIX_PATH=${prefix_path} ${EXTERNAL_OPTIONAL_ARGS}" ) ELSEIF (CMAKE_SYSTEM_NAME MATCHES "Linux") FILE(WRITE ${PROTOBUF_SOURCES_DIR}/src/config.sh - "rm ${THIRD_PARTY_PATH}/install/zlib/lib/libz.so* -f && mkdir -p ${THIRD_PARTY_PATH}/install/protobuf/include && cp ${THIRD_PARTY_PATH}/install/zlib/include/* ${THIRD_PARTY_PATH}/install/protobuf/include/ && ${CMAKE_COMMAND} ${PROTOBUF_SOURCES_DIR}/src/${TARGET_NAME}/cmake -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} -DCMAKE_C_FLAGS='${CMAKE_C_FLAGS}' -DCMAKE_C_FLAGS_DEBUG=${CMAKE_C_FLAGS_DEBUG} -DCMAKE_C_FLAGS_RELEASE='${CMAKE_C_FLAGS_RELEASE}' -DCMAKE_CXX_FLAGS='${CMAKE_CXX_FLAGS}' -DCMAKE_CXX_FLAGS_RELEASE='${CMAKE_CXX_FLAGS_RELEASE}' -DCMAKE_CXX_FLAGS_DEBUG='${CMAKE_CXX_FLAGS_DEBUG}' -DCMAKE_POSITION_INDEPENDENT_CODE=ON -DCMAKE_SKIP_RPATH=ON -Dprotobuf_WITH_ZLIB=ON -DZLIB_INCLUDE_DIR=${THIRD_PARTY_PATH}/install/zlib/include -Dprotobuf_BUILD_TESTS=OFF -Dprotobuf_BUILD_SHARED_LIBS=OFF -DCMAKE_INSTALL_PREFIX=${PROTOBUF_INSTALL_DIR} -DCMAKE_INSTALL_LIBDIR=lib -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE='${THIRD_PARTY_BUILD_TYPE}' -DCMAKE_PREFIX_PATH=${prefix_path} ${EXTERNAL_OPTIONAL_ARGS}" + "rm -f ${THIRD_PARTY_PATH}/install/zlib/lib/libz.so* && mkdir -p ${THIRD_PARTY_PATH}/install/protobuf/include && cp ${THIRD_PARTY_PATH}/install/zlib/include/* ${THIRD_PARTY_PATH}/install/protobuf/include/ && ${CMAKE_COMMAND} ${PROTOBUF_SOURCES_DIR}/src/${TARGET_NAME}/cmake -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} -DCMAKE_C_FLAGS='${CMAKE_C_FLAGS}' -DCMAKE_C_FLAGS_DEBUG=${CMAKE_C_FLAGS_DEBUG} -DCMAKE_C_FLAGS_RELEASE='${CMAKE_C_FLAGS_RELEASE}' -DCMAKE_CXX_FLAGS='${CMAKE_CXX_FLAGS}' -DCMAKE_CXX_FLAGS_RELEASE='${CMAKE_CXX_FLAGS_RELEASE}' -DCMAKE_CXX_FLAGS_DEBUG='${CMAKE_CXX_FLAGS_DEBUG}' -DCMAKE_POSITION_INDEPENDENT_CODE=ON -DCMAKE_SKIP_RPATH=ON -Dprotobuf_WITH_ZLIB=ON -DZLIB_INCLUDE_DIR=${THIRD_PARTY_PATH}/install/zlib/include -Dprotobuf_BUILD_TESTS=OFF -Dprotobuf_BUILD_SHARED_LIBS=OFF -DCMAKE_INSTALL_PREFIX=${PROTOBUF_INSTALL_DIR} -DCMAKE_INSTALL_LIBDIR=lib -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=Release -DCMAKE_PREFIX_PATH=${prefix_path} ${EXTERNAL_OPTIONAL_ARGS}" ) ELSE () MESSAGE(FATAL_ERROR "only support linux or macOS") @@ -135,7 +135,7 @@ FUNCTION(build_protobuf TARGET_NAME) CONFIGURE_COMMAND mv ../config.sh . COMMAND sh config.sh CMAKE_CACHE_ARGS -DCMAKE_INSTALL_PREFIX:PATH=${PROTOBUF_INSTALL_DIR} - -DCMAKE_BUILD_TYPE:STRING=${THIRD_PARTY_BUILD_TYPE} + -DCMAKE_BUILD_TYPE:STRING=Release -DCMAKE_VERBOSE_MAKEFILE:BOOL=OFF -DCMAKE_POSITION_INDEPENDENT_CODE:BOOL=ON ${OPTIONAL_CACHE_ARGS} @@ -164,4 +164,4 @@ IF (NOT PROTOBUF_FOUND) SET(PROTOBUF_PROTOC_EXECUTABLE ${extern_protobuf_PROTOC_EXECUTABLE} CACHE FILEPATH "protobuf executable." FORCE) PROMPT_PROTOBUF_LIB(extern_protobuf zlib) -ENDIF (NOT PROTOBUF_FOUND) \ No newline at end of file +ENDIF (NOT PROTOBUF_FOUND) From d497ead714130e871f905fe3297b79cf609e878c Mon Sep 17 00:00:00 2001 From: Changyuan Ning <77976092+longfar-ncy@users.noreply.github.com> Date: Wed, 13 Mar 2024 22:10:22 +0800 Subject: [PATCH 07/33] Revert "fix: disappearing roles bug (#204)" for build failure on mac (#206) This reverts commit 7adc3d81f9635d66b6e52349fd2208ba5662a850. --- CMakeLists.txt | 2 +- cmake/braft.cmake | 39 ++++++++++++++++++++++----------------- cmake/brpc.cmake | 33 ++++++++++++++++++--------------- cmake/gflags.cmake | 4 ++-- cmake/protobuf.cmake | 8 ++++---- 5 files changed, 47 insertions(+), 39 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 6d0831901..f5b81d952 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -119,7 +119,7 @@ SET_PROPERTY(TARGET crypto PROPERTY IMPORTED_LOCATION ${OPENSSL_CRYPTO_LIBRARY}) SET(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_CURRENT_SOURCE_DIR}/cmake") SET(THIRD_PARTY_PATH ${CMAKE_CURRENT_BINARY_DIR}/third-party) -SET(THIRD_PARTY_BUILD_TYPE ${CMAKE_BUILD_TYPE}) +SET(THIRD_PARTY_BUILD_TYPE Release) SET(EXTERNAL_PROJECT_LOG_ARGS LOG_DOWNLOAD 0 LOG_UPDATE 1 diff --git a/cmake/braft.cmake b/cmake/braft.cmake index 051d0089c..ffb421901 100644 --- a/cmake/braft.cmake +++ b/cmake/braft.cmake @@ -16,27 +16,32 @@ ExternalProject_Add( extern_braft ${EXTERNAL_PROJECT_LOG_ARGS} DEPENDS brpc - GIT_REPOSITORY https://github.com/baidu/braft.git - GIT_TAG master - PREFIX ${BRAFT_SOURCES_DIR} + URL "https://github.com/baidu/braft/archive/v1.1.2.tar.gz" + URL_HASH SHA256=bb3705f61874f8488e616ae38464efdec1a20610ddd6cd82468adc814488f14e + PREFIX ${BRAFT_SOURCES_DIR} UPDATE_COMMAND "" - CMAKE_ARGS - -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} - -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} - -DCMAKE_CXX_FLAGS=${CMAKE_CXX_FLAGS} - -DCMAKE_C_FLAGS=${CMAKE_C_FLAGS} - ${EXTERNAL_OPTIONAL_ARGS} - LIST_SEPARATOR | - CMAKE_CACHE_ARGS - -DCMAKE_PREFIX_PATH:PATH=${prefix_path} - -DCMAKE_INSTALL_PREFIX:PATH=${BRAFT_INSTALL_DIR} - -DCMAKE_BUILD_TYPE:STRING=${THIRD_PARTY_BUILD_TYPE} # Set build type - -DBRPC_WITH_GLOG:BOOL=OFF - -DBUILD_SHARED_LIBS:BOOL=OFF + CMAKE_ARGS -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} + -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} + -DCMAKE_CXX_FLAGS=${CMAKE_CXX_FLAGS} + -DCMAKE_C_FLAGS=${CMAKE_C_FLAGS} + -DCMAKE_INSTALL_PREFIX=${BRAFT_INSTALL_DIR} + -DCMAKE_INSTALL_LIBDIR=${BRAFT_INSTALL_DIR}/lib + -DCMAKE_POSITION_INDEPENDENT_CODE=ON + -DCMAKE_BUILD_TYPE=${THIRD_PARTY_BUILD_TYPE} + -DCMAKE_PREFIX_PATH=${prefix_path} + -DBRPC_WITH_GLOG=OFF + -DWITH_DEBUG_SYMBOLS=OFF + ${EXTERNAL_OPTIONAL_ARGS} + LIST_SEPARATOR | + CMAKE_CACHE_ARGS -DCMAKE_INSTALL_PREFIX:PATH=${BRAFT_INSTALL_DIR} + -DCMAKE_INSTALL_LIBDIR:PATH=${BRAFT_INSTALL_DIR}/lib + -DCMAKE_POSITION_INDEPENDENT_CODE:BOOL=ON + -DCMAKE_BUILD_TYPE:STRING=${THIRD_PARTY_BUILD_TYPE} BUILD_IN_SOURCE 1 BUILD_COMMAND $(MAKE) -j ${CPU_CORE} braft-static + INSTALL_COMMAND mkdir -p ${BRAFT_INSTALL_DIR}/lib/ COMMAND cp ${BRAFT_SOURCES_DIR}/src/extern_braft/output/lib/libbraft.a ${BRAFT_LIBRARIES} COMMAND cp -r ${BRAFT_SOURCES_DIR}/src/extern_braft/output/include ${BRAFT_INCLUDE_DIR}/ ) ADD_DEPENDENCIES(extern_braft brpc) ADD_LIBRARY(braft STATIC IMPORTED GLOBAL) SET_PROPERTY(TARGET braft PROPERTY IMPORTED_LOCATION ${BRAFT_LIBRARIES}) -ADD_DEPENDENCIES(braft extern_braft) +ADD_DEPENDENCIES(braft extern_braft) \ No newline at end of file diff --git a/cmake/brpc.cmake b/cmake/brpc.cmake index 6cb88b001..13305f203 100644 --- a/cmake/brpc.cmake +++ b/cmake/brpc.cmake @@ -22,23 +22,26 @@ EXTERNALPROJECT_ADD( URL_HASH SHA256=b9d638b76725552ed11178c650d7fc95e30f252db7972a93dc309a0698c7d2b8 PREFIX ${BRPC_SOURCES_DIR} UPDATE_COMMAND "" - CMAKE_ARGS - -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} - -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} - -DCMAKE_CXX_FLAGS=${CMAKE_CXX_FLAGS} - -DCMAKE_C_FLAGS=${CMAKE_C_FLAGS} - ${EXTERNAL_OPTIONAL_ARGS} - LIST_SEPARATOR | - CMAKE_CACHE_ARGS - -DCMAKE_PREFIX_PATH:PATH=${prefix_path} - -DCMAKE_INSTALL_PREFIX:PATH=${BRPC_INSTALL_DIR} - -DCMAKE_INSTALL_LIBDIR:PATH=${BRPC_INSTALL_DIR}/lib - -DCMAKE_BUILD_TYPE:STRING=${THIRD_PARTY_BUILD_TYPE} - -DWITH_GLOG:BOOL=OFF - -DDOWNLOAD_GTEST:BOOL=OFF - -DBUILD_SHARED_LIBS:BOOL=OFF + CMAKE_ARGS -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} + -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} + -DCMAKE_CXX_FLAGS=${CMAKE_CXX_FLAGS} + -DCMAKE_C_FLAGS=${CMAKE_C_FLAGS} + -DCMAKE_INSTALL_PREFIX=${BRPC_INSTALL_DIR} + -DCMAKE_INSTALL_LIBDIR=${BRPC_INSTALL_DIR}/lib + -DCMAKE_POSITION_INDEPENDENT_CODE=ON + -DCMAKE_BUILD_TYPE=${THIRD_PARTY_BUILD_TYPE} + -DCMAKE_PREFIX_PATH=${prefix_path} + -DWITH_GLOG=OFF + -DDOWNLOAD_GTEST=OFF + ${EXTERNAL_OPTIONAL_ARGS} + LIST_SEPARATOR | + CMAKE_CACHE_ARGS -DCMAKE_INSTALL_PREFIX:PATH=${BRPC_INSTALL_DIR} + -DCMAKE_INSTALL_LIBDIR:PATH=${BRPC_INSTALL_DIR}/lib + -DCMAKE_POSITION_INDEPENDENT_CODE:BOOL=ON + -DCMAKE_BUILD_TYPE:STRING=${THIRD_PARTY_BUILD_TYPE} BUILD_IN_SOURCE 1 BUILD_COMMAND $(MAKE) -j ${CPU_CORE} brpc-static + INSTALL_COMMAND mkdir -p ${BRPC_INSTALL_DIR}/lib/ COMMAND cp ${BRPC_SOURCES_DIR}/src/extern_brpc/output/lib/libbrpc.a ${BRPC_LIBRARIES} COMMAND cp -r ${BRPC_SOURCES_DIR}/src/extern_brpc/output/include ${BRPC_INCLUDE_DIR}/ ) ADD_DEPENDENCIES(extern_brpc ssl crypto zlib protobuf leveldb gflags) ADD_LIBRARY(brpc STATIC IMPORTED GLOBAL) diff --git a/cmake/gflags.cmake b/cmake/gflags.cmake index 68960ceed..5b45360b9 100644 --- a/cmake/gflags.cmake +++ b/cmake/gflags.cmake @@ -19,7 +19,7 @@ FetchContent_MakeAvailableWithArgs(gflags BUILD_gflags_LIB=ON BUILD_gflags_nothreads_LIB=OFF BUILD_TESTING=OFF - CMAKE_BUILD_TYPE=Release + CMAKE_BUILD_TYPE=${THIRD_PARTY_BUILD_TYPE} ) FIND_PACKAGE(Threads REQUIRED) @@ -28,4 +28,4 @@ TARGET_LINK_LIBRARIES(gflags_static Threads::Threads) SET(GFLAGS_INCLUDE_PATH ${CMAKE_CURRENT_BINARY_DIR}/_deps/gflags-build/include) SET(GFLAGS_LIBRARY ${CMAKE_CURRENT_BINARY_DIR}/_deps/gflags-build/libgflags.a) -SET(GFLAGS_LIB ${CMAKE_CURRENT_BINARY_DIR}/_deps/gflags-build/libgflags.a) +SET(GFLAGS_LIB ${CMAKE_CURRENT_BINARY_DIR}/_deps/gflags-build/libgflags.a) \ No newline at end of file diff --git a/cmake/protobuf.cmake b/cmake/protobuf.cmake index ee4b5f0db..2717ef5aa 100644 --- a/cmake/protobuf.cmake +++ b/cmake/protobuf.cmake @@ -115,11 +115,11 @@ FUNCTION(build_protobuf TARGET_NAME) # and delete libz.so which we don't need IF (CMAKE_SYSTEM_NAME MATCHES "Darwin") FILE(WRITE ${PROTOBUF_SOURCES_DIR}/src/config.sh - "rm -f ${THIRD_PARTY_PATH}/install/zlib/lib/*.dylib && mkdir -p ${THIRD_PARTY_PATH}/install/protobuf/include && cp ${THIRD_PARTY_PATH}/install/zlib/include/* ${THIRD_PARTY_PATH}/install/protobuf/include/ && ${CMAKE_COMMAND} ${PROTOBUF_SOURCES_DIR}/src/${TARGET_NAME}/cmake -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} -DCMAKE_C_FLAGS='${CMAKE_C_FLAGS}' -DCMAKE_C_FLAGS_DEBUG=${CMAKE_C_FLAGS_DEBUG} -DCMAKE_C_FLAGS_RELEASE='${CMAKE_C_FLAGS_RELEASE}' -DCMAKE_CXX_FLAGS='${CMAKE_CXX_FLAGS}' -DCMAKE_CXX_FLAGS_RELEASE='${CMAKE_CXX_FLAGS_RELEASE}' -DCMAKE_CXX_FLAGS_DEBUG='${CMAKE_CXX_FLAGS_DEBUG}' -DCMAKE_POSITION_INDEPENDENT_CODE=ON -DCMAKE_SKIP_RPATH=ON -Dprotobuf_WITH_ZLIB=ON -DZLIB_INCLUDE_DIR=${THIRD_PARTY_PATH}/install/zlib/include -Dprotobuf_BUILD_TESTS=OFF -Dprotobuf_BUILD_SHARED_LIBS=OFF -DCMAKE_INSTALL_PREFIX=${PROTOBUF_INSTALL_DIR} -DCMAKE_INSTALL_LIBDIR=lib -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=Release -DCMAKE_PREFIX_PATH=${prefix_path} ${EXTERNAL_OPTIONAL_ARGS}" + "rm -f ${THIRD_PARTY_PATH}/install/zlib/lib/*.dylib && mkdir -p ${THIRD_PARTY_PATH}/install/protobuf/include && cp ${THIRD_PARTY_PATH}/install/zlib/include/* ${THIRD_PARTY_PATH}/install/protobuf/include/ && ${CMAKE_COMMAND} ${PROTOBUF_SOURCES_DIR}/src/${TARGET_NAME}/cmake -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} -DCMAKE_C_FLAGS='${CMAKE_C_FLAGS}' -DCMAKE_C_FLAGS_DEBUG=${CMAKE_C_FLAGS_DEBUG} -DCMAKE_C_FLAGS_RELEASE='${CMAKE_C_FLAGS_RELEASE}' -DCMAKE_CXX_FLAGS='${CMAKE_CXX_FLAGS}' -DCMAKE_CXX_FLAGS_RELEASE='${CMAKE_CXX_FLAGS_RELEASE}' -DCMAKE_CXX_FLAGS_DEBUG='${CMAKE_CXX_FLAGS_DEBUG}' -DCMAKE_POSITION_INDEPENDENT_CODE=ON -DCMAKE_SKIP_RPATH=ON -Dprotobuf_WITH_ZLIB=ON -DZLIB_INCLUDE_DIR=${THIRD_PARTY_PATH}/install/zlib/include -Dprotobuf_BUILD_TESTS=OFF -Dprotobuf_BUILD_SHARED_LIBS=OFF -DCMAKE_INSTALL_PREFIX=${PROTOBUF_INSTALL_DIR} -DCMAKE_INSTALL_LIBDIR=lib -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE='${THIRD_PARTY_BUILD_TYPE}' -DCMAKE_PREFIX_PATH=${prefix_path} ${EXTERNAL_OPTIONAL_ARGS}" ) ELSEIF (CMAKE_SYSTEM_NAME MATCHES "Linux") FILE(WRITE ${PROTOBUF_SOURCES_DIR}/src/config.sh - "rm -f ${THIRD_PARTY_PATH}/install/zlib/lib/libz.so* && mkdir -p ${THIRD_PARTY_PATH}/install/protobuf/include && cp ${THIRD_PARTY_PATH}/install/zlib/include/* ${THIRD_PARTY_PATH}/install/protobuf/include/ && ${CMAKE_COMMAND} ${PROTOBUF_SOURCES_DIR}/src/${TARGET_NAME}/cmake -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} -DCMAKE_C_FLAGS='${CMAKE_C_FLAGS}' -DCMAKE_C_FLAGS_DEBUG=${CMAKE_C_FLAGS_DEBUG} -DCMAKE_C_FLAGS_RELEASE='${CMAKE_C_FLAGS_RELEASE}' -DCMAKE_CXX_FLAGS='${CMAKE_CXX_FLAGS}' -DCMAKE_CXX_FLAGS_RELEASE='${CMAKE_CXX_FLAGS_RELEASE}' -DCMAKE_CXX_FLAGS_DEBUG='${CMAKE_CXX_FLAGS_DEBUG}' -DCMAKE_POSITION_INDEPENDENT_CODE=ON -DCMAKE_SKIP_RPATH=ON -Dprotobuf_WITH_ZLIB=ON -DZLIB_INCLUDE_DIR=${THIRD_PARTY_PATH}/install/zlib/include -Dprotobuf_BUILD_TESTS=OFF -Dprotobuf_BUILD_SHARED_LIBS=OFF -DCMAKE_INSTALL_PREFIX=${PROTOBUF_INSTALL_DIR} -DCMAKE_INSTALL_LIBDIR=lib -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=Release -DCMAKE_PREFIX_PATH=${prefix_path} ${EXTERNAL_OPTIONAL_ARGS}" + "rm ${THIRD_PARTY_PATH}/install/zlib/lib/libz.so* -f && mkdir -p ${THIRD_PARTY_PATH}/install/protobuf/include && cp ${THIRD_PARTY_PATH}/install/zlib/include/* ${THIRD_PARTY_PATH}/install/protobuf/include/ && ${CMAKE_COMMAND} ${PROTOBUF_SOURCES_DIR}/src/${TARGET_NAME}/cmake -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} -DCMAKE_C_FLAGS='${CMAKE_C_FLAGS}' -DCMAKE_C_FLAGS_DEBUG=${CMAKE_C_FLAGS_DEBUG} -DCMAKE_C_FLAGS_RELEASE='${CMAKE_C_FLAGS_RELEASE}' -DCMAKE_CXX_FLAGS='${CMAKE_CXX_FLAGS}' -DCMAKE_CXX_FLAGS_RELEASE='${CMAKE_CXX_FLAGS_RELEASE}' -DCMAKE_CXX_FLAGS_DEBUG='${CMAKE_CXX_FLAGS_DEBUG}' -DCMAKE_POSITION_INDEPENDENT_CODE=ON -DCMAKE_SKIP_RPATH=ON -Dprotobuf_WITH_ZLIB=ON -DZLIB_INCLUDE_DIR=${THIRD_PARTY_PATH}/install/zlib/include -Dprotobuf_BUILD_TESTS=OFF -Dprotobuf_BUILD_SHARED_LIBS=OFF -DCMAKE_INSTALL_PREFIX=${PROTOBUF_INSTALL_DIR} -DCMAKE_INSTALL_LIBDIR=lib -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE='${THIRD_PARTY_BUILD_TYPE}' -DCMAKE_PREFIX_PATH=${prefix_path} ${EXTERNAL_OPTIONAL_ARGS}" ) ELSE () MESSAGE(FATAL_ERROR "only support linux or macOS") @@ -135,7 +135,7 @@ FUNCTION(build_protobuf TARGET_NAME) CONFIGURE_COMMAND mv ../config.sh . COMMAND sh config.sh CMAKE_CACHE_ARGS -DCMAKE_INSTALL_PREFIX:PATH=${PROTOBUF_INSTALL_DIR} - -DCMAKE_BUILD_TYPE:STRING=Release + -DCMAKE_BUILD_TYPE:STRING=${THIRD_PARTY_BUILD_TYPE} -DCMAKE_VERBOSE_MAKEFILE:BOOL=OFF -DCMAKE_POSITION_INDEPENDENT_CODE:BOOL=ON ${OPTIONAL_CACHE_ARGS} @@ -164,4 +164,4 @@ IF (NOT PROTOBUF_FOUND) SET(PROTOBUF_PROTOC_EXECUTABLE ${extern_protobuf_PROTOC_EXECUTABLE} CACHE FILEPATH "protobuf executable." FORCE) PROMPT_PROTOBUF_LIB(extern_protobuf zlib) -ENDIF (NOT PROTOBUF_FOUND) +ENDIF (NOT PROTOBUF_FOUND) \ No newline at end of file From 7ff06eefa475d68bc87da4a8ccfdd180e55f0cc8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E4=B8=81=E5=B0=8F=E5=B8=85?= <56024577+dingxiaoshuai123@users.noreply.github.com> Date: Sat, 16 Mar 2024 19:16:39 +0800 Subject: [PATCH 08/33] update brpc & cmake (#209) fix:update brpc & cmake --- CMakeLists.txt | 2 +- cmake/braft.cmake | 1 - cmake/brpc.cmake | 4 ++-- cmake/gflags.cmake | 2 +- cmake/protobuf.cmake | 9 +++++---- src/CMakeLists.txt | 4 +++- src/client.cc | 1 - src/praft/praft.h | 3 ++- 8 files changed, 14 insertions(+), 12 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index f5b81d952..6d0831901 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -119,7 +119,7 @@ SET_PROPERTY(TARGET crypto PROPERTY IMPORTED_LOCATION ${OPENSSL_CRYPTO_LIBRARY}) SET(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_CURRENT_SOURCE_DIR}/cmake") SET(THIRD_PARTY_PATH ${CMAKE_CURRENT_BINARY_DIR}/third-party) -SET(THIRD_PARTY_BUILD_TYPE Release) +SET(THIRD_PARTY_BUILD_TYPE ${CMAKE_BUILD_TYPE}) SET(EXTERNAL_PROJECT_LOG_ARGS LOG_DOWNLOAD 0 LOG_UPDATE 1 diff --git a/cmake/braft.cmake b/cmake/braft.cmake index ffb421901..87aff68a1 100644 --- a/cmake/braft.cmake +++ b/cmake/braft.cmake @@ -30,7 +30,6 @@ ExternalProject_Add( -DCMAKE_BUILD_TYPE=${THIRD_PARTY_BUILD_TYPE} -DCMAKE_PREFIX_PATH=${prefix_path} -DBRPC_WITH_GLOG=OFF - -DWITH_DEBUG_SYMBOLS=OFF ${EXTERNAL_OPTIONAL_ARGS} LIST_SEPARATOR | CMAKE_CACHE_ARGS -DCMAKE_INSTALL_PREFIX:PATH=${BRAFT_INSTALL_DIR} diff --git a/cmake/brpc.cmake b/cmake/brpc.cmake index 13305f203..fbace60c8 100644 --- a/cmake/brpc.cmake +++ b/cmake/brpc.cmake @@ -18,8 +18,8 @@ EXTERNALPROJECT_ADD( extern_brpc ${EXTERNAL_PROJECT_LOG_ARGS} DEPENDS ssl crypto zlib protobuf leveldb gflags - URL "https://github.com/apache/brpc/archive/1.3.0.tar.gz" - URL_HASH SHA256=b9d638b76725552ed11178c650d7fc95e30f252db7972a93dc309a0698c7d2b8 + URL https://github.com/apache/brpc/archive/refs/tags/1.8.0.tar.gz + URL_HASH SHA256=13ffb2f1f57c679379a20367c744b3e597614a793ec036cd7580aae90798019d PREFIX ${BRPC_SOURCES_DIR} UPDATE_COMMAND "" CMAKE_ARGS -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} diff --git a/cmake/gflags.cmake b/cmake/gflags.cmake index 5b45360b9..ff4f9efae 100644 --- a/cmake/gflags.cmake +++ b/cmake/gflags.cmake @@ -19,7 +19,7 @@ FetchContent_MakeAvailableWithArgs(gflags BUILD_gflags_LIB=ON BUILD_gflags_nothreads_LIB=OFF BUILD_TESTING=OFF - CMAKE_BUILD_TYPE=${THIRD_PARTY_BUILD_TYPE} + CMAKE_BUILD_TYPE=Release ) FIND_PACKAGE(Threads REQUIRED) diff --git a/cmake/protobuf.cmake b/cmake/protobuf.cmake index 2717ef5aa..9a8539bb9 100644 --- a/cmake/protobuf.cmake +++ b/cmake/protobuf.cmake @@ -115,11 +115,11 @@ FUNCTION(build_protobuf TARGET_NAME) # and delete libz.so which we don't need IF (CMAKE_SYSTEM_NAME MATCHES "Darwin") FILE(WRITE ${PROTOBUF_SOURCES_DIR}/src/config.sh - "rm -f ${THIRD_PARTY_PATH}/install/zlib/lib/*.dylib && mkdir -p ${THIRD_PARTY_PATH}/install/protobuf/include && cp ${THIRD_PARTY_PATH}/install/zlib/include/* ${THIRD_PARTY_PATH}/install/protobuf/include/ && ${CMAKE_COMMAND} ${PROTOBUF_SOURCES_DIR}/src/${TARGET_NAME}/cmake -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} -DCMAKE_C_FLAGS='${CMAKE_C_FLAGS}' -DCMAKE_C_FLAGS_DEBUG=${CMAKE_C_FLAGS_DEBUG} -DCMAKE_C_FLAGS_RELEASE='${CMAKE_C_FLAGS_RELEASE}' -DCMAKE_CXX_FLAGS='${CMAKE_CXX_FLAGS}' -DCMAKE_CXX_FLAGS_RELEASE='${CMAKE_CXX_FLAGS_RELEASE}' -DCMAKE_CXX_FLAGS_DEBUG='${CMAKE_CXX_FLAGS_DEBUG}' -DCMAKE_POSITION_INDEPENDENT_CODE=ON -DCMAKE_SKIP_RPATH=ON -Dprotobuf_WITH_ZLIB=ON -DZLIB_INCLUDE_DIR=${THIRD_PARTY_PATH}/install/zlib/include -Dprotobuf_BUILD_TESTS=OFF -Dprotobuf_BUILD_SHARED_LIBS=OFF -DCMAKE_INSTALL_PREFIX=${PROTOBUF_INSTALL_DIR} -DCMAKE_INSTALL_LIBDIR=lib -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE='${THIRD_PARTY_BUILD_TYPE}' -DCMAKE_PREFIX_PATH=${prefix_path} ${EXTERNAL_OPTIONAL_ARGS}" + "rm -f ${THIRD_PARTY_PATH}/install/zlib/lib/*.dylib && mkdir -p ${THIRD_PARTY_PATH}/install/protobuf/include && cp ${THIRD_PARTY_PATH}/install/zlib/include/* ${THIRD_PARTY_PATH}/install/protobuf/include/ && ${CMAKE_COMMAND} ${PROTOBUF_SOURCES_DIR}/src/${TARGET_NAME}/cmake -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} -DCMAKE_C_FLAGS='${CMAKE_C_FLAGS}' -DCMAKE_C_FLAGS_DEBUG=${CMAKE_C_FLAGS_DEBUG} -DCMAKE_C_FLAGS_RELEASE='${CMAKE_C_FLAGS_RELEASE}' -DCMAKE_CXX_FLAGS='${CMAKE_CXX_FLAGS}' -DCMAKE_CXX_FLAGS_RELEASE='${CMAKE_CXX_FLAGS_RELEASE}' -DCMAKE_CXX_FLAGS_DEBUG='${CMAKE_CXX_FLAGS_DEBUG}' -DCMAKE_POSITION_INDEPENDENT_CODE=ON -DCMAKE_SKIP_RPATH=ON -Dprotobuf_WITH_ZLIB=ON -DZLIB_INCLUDE_DIR=${THIRD_PARTY_PATH}/install/zlib/include -Dprotobuf_BUILD_TESTS=OFF -Dprotobuf_BUILD_SHARED_LIBS=OFF -DCMAKE_INSTALL_PREFIX=${PROTOBUF_INSTALL_DIR} -DCMAKE_INSTALL_LIBDIR=lib -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=Release -DCMAKE_PREFIX_PATH=${prefix_path} ${EXTERNAL_OPTIONAL_ARGS}" ) ELSEIF (CMAKE_SYSTEM_NAME MATCHES "Linux") FILE(WRITE ${PROTOBUF_SOURCES_DIR}/src/config.sh - "rm ${THIRD_PARTY_PATH}/install/zlib/lib/libz.so* -f && mkdir -p ${THIRD_PARTY_PATH}/install/protobuf/include && cp ${THIRD_PARTY_PATH}/install/zlib/include/* ${THIRD_PARTY_PATH}/install/protobuf/include/ && ${CMAKE_COMMAND} ${PROTOBUF_SOURCES_DIR}/src/${TARGET_NAME}/cmake -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} -DCMAKE_C_FLAGS='${CMAKE_C_FLAGS}' -DCMAKE_C_FLAGS_DEBUG=${CMAKE_C_FLAGS_DEBUG} -DCMAKE_C_FLAGS_RELEASE='${CMAKE_C_FLAGS_RELEASE}' -DCMAKE_CXX_FLAGS='${CMAKE_CXX_FLAGS}' -DCMAKE_CXX_FLAGS_RELEASE='${CMAKE_CXX_FLAGS_RELEASE}' -DCMAKE_CXX_FLAGS_DEBUG='${CMAKE_CXX_FLAGS_DEBUG}' -DCMAKE_POSITION_INDEPENDENT_CODE=ON -DCMAKE_SKIP_RPATH=ON -Dprotobuf_WITH_ZLIB=ON -DZLIB_INCLUDE_DIR=${THIRD_PARTY_PATH}/install/zlib/include -Dprotobuf_BUILD_TESTS=OFF -Dprotobuf_BUILD_SHARED_LIBS=OFF -DCMAKE_INSTALL_PREFIX=${PROTOBUF_INSTALL_DIR} -DCMAKE_INSTALL_LIBDIR=lib -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE='${THIRD_PARTY_BUILD_TYPE}' -DCMAKE_PREFIX_PATH=${prefix_path} ${EXTERNAL_OPTIONAL_ARGS}" + "rm -f ${THIRD_PARTY_PATH}/install/zlib/lib/libz.so* && mkdir -p ${THIRD_PARTY_PATH}/install/protobuf/include && cp ${THIRD_PARTY_PATH}/install/zlib/include/* ${THIRD_PARTY_PATH}/install/protobuf/include/ && ${CMAKE_COMMAND} ${PROTOBUF_SOURCES_DIR}/src/${TARGET_NAME}/cmake -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} -DCMAKE_C_FLAGS='${CMAKE_C_FLAGS}' -DCMAKE_C_FLAGS_DEBUG=${CMAKE_C_FLAGS_DEBUG} -DCMAKE_C_FLAGS_RELEASE='${CMAKE_C_FLAGS_RELEASE}' -DCMAKE_CXX_FLAGS='${CMAKE_CXX_FLAGS}' -DCMAKE_CXX_FLAGS_RELEASE='${CMAKE_CXX_FLAGS_RELEASE}' -DCMAKE_CXX_FLAGS_DEBUG='${CMAKE_CXX_FLAGS_DEBUG}' -DCMAKE_POSITION_INDEPENDENT_CODE=ON -DCMAKE_SKIP_RPATH=ON -Dprotobuf_WITH_ZLIB=ON -DZLIB_INCLUDE_DIR=${THIRD_PARTY_PATH}/install/zlib/include -Dprotobuf_BUILD_TESTS=OFF -Dprotobuf_BUILD_SHARED_LIBS=OFF -DCMAKE_INSTALL_PREFIX=${PROTOBUF_INSTALL_DIR} -DCMAKE_INSTALL_LIBDIR=lib -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=Release -DCMAKE_PREFIX_PATH=${prefix_path} ${EXTERNAL_OPTIONAL_ARGS}" ) ELSE () MESSAGE(FATAL_ERROR "only support linux or macOS") @@ -135,7 +135,7 @@ FUNCTION(build_protobuf TARGET_NAME) CONFIGURE_COMMAND mv ../config.sh . COMMAND sh config.sh CMAKE_CACHE_ARGS -DCMAKE_INSTALL_PREFIX:PATH=${PROTOBUF_INSTALL_DIR} - -DCMAKE_BUILD_TYPE:STRING=${THIRD_PARTY_BUILD_TYPE} + -DCMAKE_BUILD_TYPE:STRING=Release -DCMAKE_VERBOSE_MAKEFILE:BOOL=OFF -DCMAKE_POSITION_INDEPENDENT_CODE:BOOL=ON ${OPTIONAL_CACHE_ARGS} @@ -164,4 +164,5 @@ IF (NOT PROTOBUF_FOUND) SET(PROTOBUF_PROTOC_EXECUTABLE ${extern_protobuf_PROTOC_EXECUTABLE} CACHE FILEPATH "protobuf executable." FORCE) PROMPT_PROTOBUF_LIB(extern_protobuf zlib) -ENDIF (NOT PROTOBUF_FOUND) \ No newline at end of file +ENDIF (NOT PROTOBUF_FOUND) + diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index d0b2df684..508680ac2 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -26,6 +26,7 @@ IF(CMAKE_SYSTEM_NAME STREQUAL "Linux") SET(LIB rt) ELSEIF(CMAKE_SYSTEM_NAME STREQUAL "Darwin") SET(LIB + pthread "-framework CoreFoundation" "-framework CoreGraphics" "-framework CoreData" @@ -34,7 +35,8 @@ ELSEIF(CMAKE_SYSTEM_NAME STREQUAL "Darwin") "-framework Foundation" "-Wl,-U,_MallocExtension_ReleaseFreeMemory" "-Wl,-U,_ProfilerStart" - "-Wl,-U,_ProfilerStop") + "-Wl,-U,_ProfilerStop" + "-Wl,-U,__Z13GetStackTracePPvii") ENDIF() TARGET_LINK_LIBRARIES(pikiwidb net; dl; fmt; storage; rocksdb; pstd braft brpc ssl crypto zlib protobuf leveldb gflags z praft "${LIB}") diff --git a/src/client.cc b/src/client.cc index 0434489dc..60b7b6d04 100644 --- a/src/client.cc +++ b/src/client.cc @@ -424,7 +424,6 @@ PClient::PClient(TcpConnection* obj) int PClient::HandlePackets(pikiwidb::TcpConnection* obj, const char* start, int size) { int total = 0; - LOG(INFO) << start; while (total < size) { auto processed = handlePacket(start + total, size - total); if (processed <= 0) { diff --git a/src/praft/praft.h b/src/praft/praft.h index 41d87283a..934285e92 100644 --- a/src/praft/praft.h +++ b/src/praft/praft.h @@ -12,6 +12,7 @@ #include #include #include +#include #include "braft/configuration.h" #include "braft/raft.h" @@ -68,7 +69,7 @@ class JoinCmdContext { } PClient* GetClient() { return client_; } - braft::PeerId GetPeerIp() { return peer_ip_; } + const std::string& GetPeerIp() { return peer_ip_; } int GetPort() { return port_; } private: From c9edc87d41b24a92ef5acd91d380ca567accfdbf Mon Sep 17 00:00:00 2001 From: Changyuan Ning <77976092+longfar-ncy@users.noreply.github.com> Date: Sat, 16 Mar 2024 19:19:12 +0800 Subject: [PATCH 09/33] style: use spdlog and better format of praft (#200) * style: use spdlog and format codes --- src/cmd_raft.cc | 246 ++++++++++++++++++++------------------------- src/cmd_raft.h | 47 ++++++++- src/praft/praft.cc | 111 +++++++++++--------- src/praft/praft.h | 37 ++----- 4 files changed, 227 insertions(+), 214 deletions(-) diff --git a/src/cmd_raft.cc b/src/cmd_raft.cc index 08b17b4e1..27f349fa9 100644 --- a/src/cmd_raft.cc +++ b/src/cmd_raft.cc @@ -5,21 +5,18 @@ * of patent rights can be found in the PATENTS file in the same directory. */ -#include #include +#include #include -#include "braft/configuration.h" #include "client.h" #include "cmd_raft.h" #include "event_loop.h" +#include "log.h" #include "pikiwidb.h" #include "praft.h" -#include "pstd_status.h" #include "pstd_string.h" -#define VALID_NODE_ID(x) ((x) > 0) - namespace pikiwidb { RaftNodeCmd::RaftNodeCmd(const std::string& name, int16_t arity) @@ -27,27 +24,6 @@ RaftNodeCmd::RaftNodeCmd(const std::string& name, int16_t arity) bool RaftNodeCmd::DoInitial(PClient* client) { return true; } -/* RAFT.NODE ADD [id] [address:port] - * Add a new node to the cluster. The [id] can be an explicit non-zero value, - * or zero to let the cluster choose one. - * Reply: - * -NOCLUSTER || - * -LOADING || - * -CLUSTERDOWN || - * -MOVED : || - * *2 - * : - * : - * - * RAFT.NODE REMOVE [id] - * Remove an existing node from the cluster. - * Reply: - * -NOCLUSTER || - * -LOADING || - * -CLUSTERDOWN || - * -MOVED : || - * +OK - */ void RaftNodeCmd::DoCmd(PClient* client) { // Check whether it is a leader. If it is not a leader, return the leader information if (!PRAFT.IsLeader()) { @@ -55,33 +31,42 @@ void RaftNodeCmd::DoCmd(PClient* client) { } auto cmd = client->argv_[1]; + pstd::StringToUpper(cmd); if (!strcasecmp(cmd.c_str(), "ADD")) { - if (client->argv_.size() != 4) { - return client->SetRes(CmdRes::kWrongNum, client->CmdName()); - } - - // RedisRaft has nodeid, but in Braft, NodeId is IP:Port. - // So we do not need to parse and use nodeid like redis; - auto s = PRAFT.AddPeer(client->argv_[3]); - if (s.ok()) { - client->SetRes(CmdRes::kOK); - } else { - client->SetRes(CmdRes::kErrOther); - } + DoCmdAdd(client); } else if (!strcasecmp(cmd.c_str(), "REMOVE")) { - if (client->argv_.size() != 3) { - return client->SetRes(CmdRes::kWrongNum, client->CmdName()); - } + DoCmdRemove(client); + } else { + client->SetRes(CmdRes::kErrOther, "RAFT.NODE supports ADD / REMOVE only"); + } +} - // (KKorpse)TODO: Redirect to leader if not leader. - auto s = PRAFT.RemovePeer(client->argv_[2]); - if (s.ok()) { - client->SetRes(CmdRes::kOK); - } else { - client->SetRes(CmdRes::kErrOther); - } +void RaftNodeCmd::DoCmdAdd(PClient* client) { + if (client->argv_.size() != 4) { + return client->SetRes(CmdRes::kWrongNum, client->CmdName()); + } + + // RedisRaft has nodeid, but in Braft, NodeId is IP:Port. + // So we do not need to parse and use nodeid like redis; + auto s = PRAFT.AddPeer(client->argv_[3]); + if (s.ok()) { + client->SetRes(CmdRes::kOK); } else { - client->SetRes(CmdRes::kErrOther, "ERR RAFT.NODE supports ADD / REMOVE only"); + client->SetRes(CmdRes::kErrOther, fmt::format("Failed to add peer: {}", s.error_str())); + } +} + +void RaftNodeCmd::DoCmdRemove(PClient* client) { + if (client->argv_.size() != 3) { + return client->SetRes(CmdRes::kWrongNum, client->CmdName()); + } + + // (KKorpse)TODO: Redirect to leader if not leader. + auto s = PRAFT.RemovePeer(client->argv_[2]); + if (s.ok()) { + client->SetRes(CmdRes::kOK); + } else { + client->SetRes(CmdRes::kErrOther, fmt::format("Failed to remove peer: {}", s.error_str())); } } @@ -90,109 +75,100 @@ RaftClusterCmd::RaftClusterCmd(const std::string& name, int16_t arity) bool RaftClusterCmd::DoInitial(PClient* client) { return true; } -// The endpoint must be in the league format of ip:port -std::string GetIpFromEndPoint(std::string& endpoint) { - auto pos = endpoint.find(':'); - if (pos == std::string::npos) { - return ""; +void RaftClusterCmd::DoCmd(PClient* client) { + // parse arguments + if (client->argv_.size() < 2) { + return client->SetRes(CmdRes::kWrongNum, client->CmdName()); + } + auto cmd = client->argv_[1]; + + if (PRAFT.IsInitialized()) { + return client->SetRes(CmdRes::kErrOther, "Already cluster member"); } - - return endpoint.substr(0, pos); + + pstd::StringToUpper(cmd); + if (cmd == kInitCmd) { + DoCmdInit(client); + } else if (cmd == kJoinCmd) { + DoCmdJoin(client); + } else { + client->SetRes(CmdRes::kErrOther, "RAFT.CLUSTER supports INIT/JOIN only"); + } +} + +void RaftClusterCmd::DoCmdInit(PClient* client) { + if (client->argv_.size() != 2 && client->argv_.size() != 3) { + return client->SetRes(CmdRes::kWrongNum, client->CmdName()); + } + + std::string cluster_id; + if (client->argv_.size() == 3) { + cluster_id = client->argv_[2]; + if (cluster_id.size() != RAFT_DBID_LEN) { + return client->SetRes(CmdRes::kInvalidParameter, + "Cluster id must be " + std::to_string(RAFT_DBID_LEN) + " characters"); + } + } else { + cluster_id = pstd::RandomHexChars(RAFT_DBID_LEN); + } + auto s = PRAFT.Init(cluster_id, false); + if (!s.ok()) { + return client->SetRes(CmdRes::kErrOther, fmt::format("Failed to init node: ", s.error_str())); + } + client->SetRes(CmdRes::kOK); } -// The endpoint must be in the league format of ip:port -int GetPortFromEndPoint(std::string& endpoint) { +static inline std::optional> GetIpAndPortFromEndPoint(const std::string& endpoint) { auto pos = endpoint.find(':'); if (pos == std::string::npos) { - return 0; + return std::nullopt; } - int ret = 0; + int32_t ret = 0; pstd::String2int(endpoint.substr(pos + 1), &ret); - return ret; + return {{endpoint.substr(0, pos), ret}}; } -/* RAFT.CLUSTER INIT - * Initializes a new Raft cluster. - * is an optional 32 character string, if set, cluster will use it for the id - * Reply: - * +OK [dbid] - * - * RAFT.CLUSTER JOIN [addr:port] - * Join an existing cluster. - * The operation is asynchronous and may take place/retry in the background. - * Reply: - * +OK - */ -void RaftClusterCmd::DoCmd(PClient* client) { - if (client->argv_.size() < 2) { +void RaftClusterCmd::DoCmdJoin(PClient* client) { + if (client->argv_.size() < 3) { return client->SetRes(CmdRes::kWrongNum, client->CmdName()); } - if (PRAFT.IsInitialized()) { - return client->SetRes(CmdRes::kErrOther, "ERR Already cluster member"); + // (KKorpse)TODO: Support multiple nodes join at the same time. + if (client->argv_.size() > 3) { + return client->SetRes(CmdRes::kInvalidParameter, "Too many arguments"); } - auto cmd = client->argv_[1]; - if (!strcasecmp(cmd.c_str(), "INIT")) { - if (client->argv_.size() != 2 && client->argv_.size() != 3) { - return client->SetRes(CmdRes::kWrongNum, client->CmdName()); - } - - std::string cluster_id; - if (client->argv_.size() == 3) { - cluster_id = client->argv_[2]; - if (cluster_id.size() != RAFT_DBID_LEN) { - return client->SetRes(CmdRes::kInvalidParameter, - "ERR cluster id must be " + std::to_string(RAFT_DBID_LEN) + " characters"); - } - } else { - cluster_id = pstd::RandomHexChars(RAFT_DBID_LEN); - } - auto s = PRAFT.Init(cluster_id, false); - if (!s.ok()) { - return client->SetRes(CmdRes::kErrOther, s.error_str()); - } - client->SetRes(CmdRes::kOK); - } else if (!strcasecmp(cmd.c_str(), "JOIN")) { - if (client->argv_.size() < 3) { - return client->SetRes(CmdRes::kWrongNum, client->CmdName()); - } - - // (KKorpse)TODO: Support multiple nodes join at the same time. - if (client->argv_.size() > 3) { - return client->SetRes(CmdRes::kInvalidParameter, "ERR too many arguments"); - } - - auto addr = client->argv_[2]; - if (braft::PeerId(addr).is_empty()) { - return client->SetRes(CmdRes::kInvalidParameter, "ERR invalid ip::port: " + addr); - } + auto addr = client->argv_[2]; + if (braft::PeerId(addr).is_empty()) { + return client->SetRes(CmdRes::kErrOther, fmt::format("Invalid ip::port: {}", addr)); + } - auto on_new_conn = [](TcpConnection* obj) { - if (g_pikiwidb) { - g_pikiwidb->OnNewConnection(obj); - } - }; - auto fail_cb = [&](EventLoop* loop, const char* peer_ip, int port) { - PRAFT.OnJoinCmdConnectionFailed(loop, peer_ip, port); - }; - - auto loop = EventLoop::Self(); - auto peer_ip = GetIpFromEndPoint(addr); - auto port = GetPortFromEndPoint(addr); - // FIXME: The client here is not smart pointer, may cause undefined behavior. - // should use shared_ptr in DoCmd() rather than raw pointer. - auto ret = PRAFT.GetJoinCtx().Set(client, peer_ip, port); - if (!ret) { // other clients have joined - client->SetRes(CmdRes::kErrOther, "other clients have joined"); - } else { - loop->Connect(peer_ip.c_str(), port, on_new_conn, fail_cb); - // Not reply any message here, we will reply after the connection is established. - client->Clear(); + auto on_new_conn = [](TcpConnection* obj) { + if (g_pikiwidb) { + g_pikiwidb->OnNewConnection(obj); } - } else { - client->SetRes(CmdRes::kErrOther, "ERR RAFT.CLUSTER supports INIT / JOIN only"); + }; + auto on_fail = [&](EventLoop* loop, const char* peer_ip, int port) { + PRAFT.OnJoinCmdConnectionFailed(loop, peer_ip, port); + }; + + auto loop = EventLoop::Self(); + auto ip_port = GetIpAndPortFromEndPoint(addr); + if (!ip_port.has_value()) { + return client->SetRes(CmdRes::kErrOther, fmt::format("Invalid ip::port: {}", addr)); + } + auto& [peer_ip, port] = *ip_port; + // FIXME: The client here is not smart pointer, may cause undefined behavior. + // should use shared_ptr in DoCmd() rather than raw pointer. + auto ret = PRAFT.GetJoinCtx().Set(client, peer_ip, port); + if (!ret) { // other clients have joined + return client->SetRes(CmdRes::kErrOther, "Other clients have joined"); } + loop->Connect(peer_ip.c_str(), port, on_new_conn, on_fail); + INFO("Sent join request to leader successfully"); + // Not reply any message here, we will reply after the connection is established. + client->Clear(); } + } // namespace pikiwidb \ No newline at end of file diff --git a/src/cmd_raft.h b/src/cmd_raft.h index 26652d269..a5e8f924d 100644 --- a/src/cmd_raft.h +++ b/src/cmd_raft.h @@ -7,12 +7,33 @@ #pragma once -#include "braft/raft.h" -#include "brpc/server.h" +#include + #include "base_cmd.h" namespace pikiwidb { +/* RAFT.NODE ADD [id] [address:port] + * Add a new node to the cluster. The [id] can be an explicit non-zero value, + * or zero to let the cluster choose one. + * Reply: + * -NOCLUSTER || + * -LOADING || + * -CLUSTERDOWN || + * -MOVED : || + * *2 + * : + * : + * + * RAFT.NODE REMOVE [id] + * Remove an existing node from the cluster. + * Reply: + * -NOCLUSTER || + * -LOADING || + * -CLUSTERDOWN || + * -MOVED : || + * +OK + */ class RaftNodeCmd : public BaseCmd { public: RaftNodeCmd(const std::string &name, int16_t arity); @@ -22,8 +43,25 @@ class RaftNodeCmd : public BaseCmd { private: void DoCmd(PClient *client) override; + void DoCmdAdd(PClient *client); + void DoCmdRemove(PClient *client); + + static constexpr std::string_view kAddCmd = "ADD"; + static constexpr std::string_view kRemoveCmd = "REMOVE"; }; +/* RAFT.CLUSTER INIT + * Initializes a new Raft cluster. + * is an optional 32 character string, if set, cluster will use it for the id + * Reply: + * +OK [dbid] + * + * RAFT.CLUSTER JOIN [addr:port] + * Join an existing cluster. + * The operation is asynchronous and may take place/retry in the background. + * Reply: + * +OK + */ class RaftClusterCmd : public BaseCmd { public: RaftClusterCmd(const std::string &name, int16_t arity); @@ -33,6 +71,11 @@ class RaftClusterCmd : public BaseCmd { private: void DoCmd(PClient *client) override; + void DoCmdInit(PClient *client); + void DoCmdJoin(PClient *client); + + static constexpr std::string_view kInitCmd = "INIT"; + static constexpr std::string_view kJoinCmd = "JOIN"; }; } // namespace pikiwidb \ No newline at end of file diff --git a/src/praft/praft.cc b/src/praft/praft.cc index 4759466c9..d68480c70 100644 --- a/src/praft/praft.cc +++ b/src/praft/praft.cc @@ -9,18 +9,37 @@ // praft.cc #include "praft.h" + #include #include #include + #include "client.h" #include "config.h" -#include "pstd_string.h" -#include "braft/configuration.h" #include "event_loop.h" +#include "log.h" #include "pikiwidb.h" +#include "praft.pb.h" +#include "pstd_string.h" + +#define ERROR_LOG_AND_STATUS(msg) \ + ({ \ + ERROR(msg); \ + butil::Status(EINVAL, msg); \ + }) namespace pikiwidb { +class DummyServiceImpl : public DummyService { + public: + explicit DummyServiceImpl(PRaft* praft) : praft_(praft) {} + void DummyMethod(::google::protobuf::RpcController* controller, const ::pikiwidb::DummyRequest* request, + ::pikiwidb::DummyResponse* response, ::google::protobuf::Closure* done) {} + + private: + PRaft* praft_; +}; + PRaft& PRaft::Instance() { static PRaft store; return store; @@ -35,18 +54,15 @@ butil::Status PRaft::Init(std::string& group_id, bool initial_conf_is_null) { DummyServiceImpl service(&PRAFT); auto port = g_config.port + pikiwidb::g_config.raft_port_offset; // Add your service into RPC server - if (server_->AddService(&service, - brpc::SERVER_DOESNT_OWN_SERVICE) != 0) { - LOG(ERROR) << "Fail to add service"; - return {EINVAL, "Fail to add service"}; + if (server_->AddService(&service, brpc::SERVER_DOESNT_OWN_SERVICE) != 0) { + return ERROR_LOG_AND_STATUS("Failed to add service"); } // raft can share the same RPC server. Notice the second parameter, because // adding services into a running server is not allowed and the listen // address of this server is impossible to get before the server starts. You // have to specify the address of the server. if (braft::add_service(server_.get(), port) != 0) { - LOG(ERROR) << "Fail to add raft service"; - return {EINVAL, "Fail to add raft service"}; + return ERROR_LOG_AND_STATUS("Failed to add raft service"); } // It's recommended to start the server before Counter is started to avoid @@ -55,8 +71,7 @@ butil::Status PRaft::Init(std::string& group_id, bool initial_conf_is_null) { // Notice the default options of server is used here. Check out details from // the doc of brpc if you would like change some options; if (server_->Start(port, nullptr) != 0) { - LOG(ERROR) << "Fail to start Server"; - return {EINVAL, "Fail to start Server"}; + return ERROR_LOG_AND_STATUS("Failed to start server"); } // It's ok to start PRaft; @@ -68,27 +83,26 @@ butil::Status PRaft::Init(std::string& group_id, bool initial_conf_is_null) { butil::ip_t ip; auto ret = butil::str2ip(g_config.ip.c_str(), &ip); if (ret != 0) { - LOG(ERROR) << "Fail to covert str_ip to butil::ip_t"; - return {EINVAL, "Fail to covert str_ip to butil::ip_t"}; + return ERROR_LOG_AND_STATUS("Failed to convert str_ip to butil::ip_t"); } butil::EndPoint addr(ip, port); // Default init in one node. /* - initial_conf takes effect only when the replication group is started from an empty node. - The Configuration is restored from the snapshot and log files when the data in the replication group is not empty. - initial_conf is used only to create replication groups. - The first node adds itself to initial_conf and then calls add_peer to add other nodes. - Set initial_conf to empty for other nodes. - You can also start empty nodes simultaneously by setting the same inital_conf(ip:port of multiple nodes) for multiple nodes. + initial_conf takes effect only when the replication group is started from an empty node. + The Configuration is restored from the snapshot and log files when the data in the replication group is not empty. + initial_conf is used only to create replication groups. + The first node adds itself to initial_conf and then calls add_peer to add other nodes. + Set initial_conf to empty for other nodes. + You can also start empty nodes simultaneously by setting the same inital_conf(ip:port of multiple nodes) for multiple + nodes. */ - std::string initial_conf(""); + std::string initial_conf; if (!initial_conf_is_null) { initial_conf = raw_addr_ + ":0,"; } if (node_options_.initial_conf.parse_from(initial_conf) != 0) { - LOG(ERROR) << "Fail to parse configuration, address: " << raw_addr_; - return {EINVAL, "Fail to parse address."}; + return ERROR_LOG_AND_STATUS("Failed to parse configuration"); } // node_options_.election_timeout_ms = FLAGS_election_timeout_ms; @@ -100,11 +114,10 @@ butil::Status PRaft::Init(std::string& group_id, bool initial_conf_is_null) { node_options_.raft_meta_uri = prefix + "/raft_meta"; node_options_.snapshot_uri = prefix + "/snapshot"; // node_options_.disable_cli = FLAGS_disable_cli; - node_ = std::make_unique("pikiwidb", braft::PeerId(addr)); // group_id + node_ = std::make_unique("pikiwidb", braft::PeerId(addr)); // group_id if (node_->init(node_options_) != 0) { node_.reset(); - LOG(ERROR) << "Fail to init raft node"; - return {EINVAL, "Fail to init raft node"}; + return ERROR_LOG_AND_STATUS("Failed to init raft node"); } return {0, "OK"}; @@ -112,7 +125,7 @@ butil::Status PRaft::Init(std::string& group_id, bool initial_conf_is_null) { bool PRaft::IsLeader() const { if (!node_) { - LOG(ERROR) << "Node is not initialized"; + ERROR("Node is not initialized"); return false; } return node_->is_leader(); @@ -120,24 +133,24 @@ bool PRaft::IsLeader() const { std::string PRaft::GetLeaderId() const { if (!node_) { - LOG(ERROR) << "Node is not initialized"; - return std::string("Fail to get leader id"); + ERROR("Node is not initialized"); + return "Failed to get leader id"; } return node_->leader_id().to_string(); } std::string PRaft::GetNodeId() const { if (!node_) { - LOG(ERROR) << "Node is not initialized"; - return std::string("Fail to get node id"); + ERROR("Node is not initialized"); + return "Failed to get node id"; } return node_->node_id().to_string(); } std::string PRaft::GetGroupId() const { if (!node_) { - LOG(ERROR) << "Node is not initialized"; - return std::string("Fail to get cluster id"); + ERROR("Node is not initialized"); + return "Failed to get cluster id"; } return dbid_; } @@ -145,7 +158,7 @@ std::string PRaft::GetGroupId() const { braft::NodeStatus PRaft::GetNodeStatus() const { braft::NodeStatus node_status; if (!node_) { - LOG(ERROR) << "Node is not initialized"; + ERROR("Node is not initialized"); } else { node_->get_status(&node_status); } @@ -155,14 +168,13 @@ braft::NodeStatus PRaft::GetNodeStatus() const { butil::Status PRaft::GetListPeers(std::vector* peers) { if (!node_) { - LOG(ERROR) << "Node is not initialized"; - return {EINVAL, "Node is not initialized"}; + ERROR_LOG_AND_STATUS("Node is not initialized"); } return node_->list_peers(peers); } // Gets the cluster id, which is used to initialize node -void PRaft::SendNodeInfoRequest(PClient *client) { +void PRaft::SendNodeInfoRequest(PClient* client) { assert(client); UnboundedBuffer req; @@ -171,7 +183,7 @@ void PRaft::SendNodeInfoRequest(PClient *client) { client->SendPacket(req); } -void PRaft::SendNodeAddRequest(PClient *client) { +void PRaft::SendNodeAddRequest(PClient* client) { assert(client); // Node id in braft are ip:port, the node id param in RAFT.NODE ADD cmd will be ignored. @@ -191,14 +203,14 @@ std::tuple PRaft::ProcessClusterJoinCmdResponse(PClient* client, cons assert(start); auto join_client = join_ctx_.GetClient(); if (!join_client) { - LOG(WARNING) << "No client when processing cluster join cmd response."; + WARN("No client when processing cluster join cmd response."); return std::make_tuple(0, true); } bool is_disconnect = true; std::string reply(start, len); if (reply.find("+OK") != std::string::npos) { - LOG(INFO) << "Joined Raft cluster, node id:" << PRAFT.GetNodeId() << "dbid:" << PRAFT.dbid_; + INFO("Joined Raft cluster, node id: {}, dbid: {}", PRAFT.GetNodeId(), PRAFT.dbid_); join_client->SetRes(CmdRes::kOK); join_client->SendPacket(join_client->Message()); is_disconnect = false; @@ -248,12 +260,12 @@ std::tuple PRaft::ProcessClusterJoinCmdResponse(PClient* client, cons PRAFT.SendNodeAddRequest(client); is_disconnect = false; } else { - LOG(ERROR) << "Joined Raft cluster fail, because of invalid raft_group_id"; + ERROR("Joined Raft cluster fail, because of invalid raft_group_id"); join_client->SetRes(CmdRes::kErrOther, "Invalid raft_group_id"); join_client->SendPacket(join_client->Message()); } } else { - LOG(ERROR) << "Joined Raft cluster fail, " << start; + ERROR("Joined Raft cluster fail, str: {}", start); join_client->SetRes(CmdRes::kErrOther, std::string(start, len)); join_client->SendPacket(join_client->Message()); } @@ -263,8 +275,7 @@ std::tuple PRaft::ProcessClusterJoinCmdResponse(PClient* client, cons butil::Status PRaft::AddPeer(const std::string& peer) { if (!node_) { - LOG(ERROR) << "Node is not initialized"; - return {EINVAL, "Node is not initialized"}; + ERROR_LOG_AND_STATUS("Node is not initialized"); } braft::SynchronizedClosure done; @@ -272,7 +283,7 @@ butil::Status PRaft::AddPeer(const std::string& peer) { done.wait(); if (!done.status().ok()) { - LOG(WARNING) << "Fail to add peer " << peer << " to node " << node_->node_id() << ", status " << done.status(); + WARN("Failed to add peer {} to node {}, status: {}", peer, node_->node_id().to_string(), done.status().error_str()); return done.status(); } @@ -281,8 +292,7 @@ butil::Status PRaft::AddPeer(const std::string& peer) { butil::Status PRaft::RemovePeer(const std::string& peer) { if (!node_) { - LOG(ERROR) << "Node is not initialized"; - return {EINVAL, "Node is not initialized"}; + return ERROR_LOG_AND_STATUS("Node is not initialized"); } braft::SynchronizedClosure done; @@ -290,7 +300,8 @@ butil::Status PRaft::RemovePeer(const std::string& peer) { done.wait(); if (!done.status().ok()) { - LOG(WARNING) << "Fail to remove peer " << peer << " from node " << node_->node_id() << ", status " << done.status(); + WARN("Failed to remove peer {} from node {}, status: {}", peer, node_->node_id().to_string(), + done.status().error_str()); return done.status(); } @@ -300,8 +311,8 @@ butil::Status PRaft::RemovePeer(const std::string& peer) { void PRaft::OnJoinCmdConnectionFailed([[maybe_unused]] EventLoop* loop, const char* peer_ip, int port) { auto cli = join_ctx_.GetClient(); if (cli) { - cli->SetRes(CmdRes::kErrOther, - "ERR failed to connect to cluster for join, please check logs " + std::string(peer_ip) + ":" + std::to_string(port)); + cli->SetRes(CmdRes::kErrOther, "ERR failed to connect to cluster for join, please check logs " + + std::string(peer_ip) + ":" + std::to_string(port)); cli->SendPacket(cli->Message()); } } @@ -347,7 +358,7 @@ void PRaft::on_snapshot_save(braft::SnapshotWriter* writer, braft::Closure* done int PRaft::on_snapshot_load(braft::SnapshotReader* reader) { return 0; } void PRaft::on_leader_start(int64_t term) { - LOG(WARNING) << "Node " << node_->node_id() << "start to be leader, term" << term; + WARN("Node {} start to be leader, term={}", node_->node_id().to_string(), term); } void PRaft::on_leader_stop(const butil::Status& status) {} @@ -358,4 +369,4 @@ void PRaft::on_configuration_committed(const ::braft::Configuration& conf) {} void PRaft::on_stop_following(const ::braft::LeaderChangeContext& ctx) {} void PRaft::on_start_following(const ::braft::LeaderChangeContext& ctx) {} -} // namespace pikiwidb \ No newline at end of file +} // namespace pikiwidb diff --git a/src/praft/praft.h b/src/praft/praft.h index 934285e92..8d8b1976a 100644 --- a/src/praft/praft.h +++ b/src/praft/praft.h @@ -7,7 +7,6 @@ #pragma once -#include #include #include #include @@ -17,15 +16,12 @@ #include "braft/configuration.h" #include "braft/raft.h" #include "braft/util.h" -#include "brpc/server.h" #include "brpc/controller.h" +#include "brpc/server.h" #include "butil/status.h" + #include "client.h" -#include "common.h" -#include "config.h" #include "event_loop.h" -#include "gflags/gflags.h" -#include "praft.pb.h" #include "tcp_connection.h" namespace pikiwidb { @@ -34,8 +30,6 @@ namespace pikiwidb { #define PRAFT PRaft::Instance() -extern PConfig g_config; - class JoinCmdContext { friend class PRaft; @@ -81,7 +75,7 @@ class JoinCmdContext { class PRaft : public braft::StateMachine { public: - PRaft() : server_(nullptr), node_(nullptr) {} + PRaft() : server_(nullptr), node_(nullptr) {} ~PRaft() override = default; @@ -90,7 +84,7 @@ class PRaft : public braft::StateMachine { //===--------------------------------------------------------------------===// // Braft API //===--------------------------------------------------------------------===// - butil::Status Init(std::string& cluster_id, bool initial_conf_is_null); + butil::Status Init(std::string& group_id, bool initial_conf_is_null); butil::Status AddPeer(const std::string& peer); butil::Status RemovePeer(const std::string& peer); butil::Status RaftRecvEntry(); @@ -98,13 +92,13 @@ class PRaft : public braft::StateMachine { void ShutDown(); void Join(); void Apply(braft::Task& task); - + //===--------------------------------------------------------------------===// // ClusterJoin command //===--------------------------------------------------------------------===// JoinCmdContext& GetJoinCtx() { return join_ctx_; } - void SendNodeInfoRequest(PClient *client); - void SendNodeAddRequest(PClient *client); + void SendNodeInfoRequest(PClient* client); + void SendNodeAddRequest(PClient* client); std::tuple ProcessClusterJoinCmdResponse(PClient* client, const char* start, int len); void OnJoinCmdConnectionFailed(EventLoop*, const char* peer_ip, int port); @@ -129,10 +123,10 @@ class PRaft : public braft::StateMachine { void on_error(const ::braft::Error& e) override; void on_configuration_committed(const ::braft::Configuration& conf) override; void on_stop_following(const ::braft::LeaderChangeContext& ctx) override; - void on_start_following(const ::braft::LeaderChangeContext& ctx) override;; + void on_start_following(const ::braft::LeaderChangeContext& ctx) override; private: - std::unique_ptr server_; // brpc + std::unique_ptr server_; // brpc std::unique_ptr node_; braft::NodeOptions node_options_; // options for raft node std::string raw_addr_; // ip:port of this node @@ -141,15 +135,4 @@ class PRaft : public braft::StateMachine { std::string dbid_; // dbid of group, }; -class DummyServiceImpl : public DummyService { -public: - explicit DummyServiceImpl(PRaft* praft) : praft_(praft) {} - void DummyMethod(::google::protobuf::RpcController* controller, - const ::pikiwidb::DummyRequest* request, - ::pikiwidb::DummyResponse* response, - ::google::protobuf::Closure* done) {} -private: - PRaft* praft_; -}; - -} // namespace pikiwidb \ No newline at end of file +} // namespace pikiwidb From 3bb3f4ca9ba9f7b7423a98dd941e998a83acf31b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E4=B8=81=E5=B0=8F=E5=B8=85?= <56024577+dingxiaoshuai123@users.noreply.github.com> Date: Mon, 25 Mar 2024 11:03:01 +0800 Subject: [PATCH 10/33] fix: Rebase import-braft to unstable (#232) rebase to ubstable branch --- .github/workflows/issue-translator.yml | 15 ++ CMakeLists.txt | 2 +- README.md | 6 +- README_CN.md | 6 +- docs/images/pikiwidb-logo.png | Bin 0 -> 99671 bytes src/base_cmd.cc | 9 + src/base_cmd.h | 29 ++- src/cmd_hash.cc | 33 +-- src/cmd_keys.cc | 99 +++++++- src/cmd_keys.h | 44 ++++ src/cmd_kv.cc | 59 +++-- src/cmd_list.cc | 125 +++++++++- src/cmd_list.h | 54 ++++ src/cmd_set.cc | 92 ++++++- src/cmd_set.h | 33 +++ src/cmd_table_manager.cc | 21 ++ src/cmd_zset.cc | 326 +++++++++++++++++++++++++ src/cmd_zset.h | 70 ++++++ src/db.cpp | 32 +++ src/db.h | 58 +++++ src/storage/include/storage/storage.h | 7 +- src/storage/src/storage.cc | 15 +- src/store.cc | 36 +-- src/store.h | 13 +- tests/key_test.go | 89 +++++++ tests/list_test.go | 106 ++++++++ tests/set_test.go | 60 +++++ tests/string_test.go | 6 +- tests/zset_test.go | 147 ++++++++++- 29 files changed, 1479 insertions(+), 113 deletions(-) create mode 100644 .github/workflows/issue-translator.yml create mode 100644 docs/images/pikiwidb-logo.png create mode 100644 src/cmd_zset.cc create mode 100644 src/cmd_zset.h create mode 100644 src/db.cpp create mode 100644 src/db.h diff --git a/.github/workflows/issue-translator.yml b/.github/workflows/issue-translator.yml new file mode 100644 index 000000000..8eeb05f68 --- /dev/null +++ b/.github/workflows/issue-translator.yml @@ -0,0 +1,15 @@ +name: Issue Translator +on: + issue_comment: + types: [created] + issues: + types: [opened] + +jobs: + build: + runs-on: ubuntu-latest + steps: + - uses: usthe/issues-translate-action@v2.7 + with: + IS_MODIFY_TITLE: false + CUSTOM_BOT_NOTE: Bot detected the issue body's language is not English, translate it automatically. \ No newline at end of file diff --git a/CMakeLists.txt b/CMakeLists.txt index 6d0831901..506a07586 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -88,7 +88,7 @@ ENDIF () SET(EP_BASE_SUFFIX "buildtrees") SET_PROPERTY(DIRECTORY PROPERTY EP_BASE ${CMAKE_CURRENT_SOURCE_DIR}/${EP_BASE_SUFFIX}) -LIST(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_LIST_DIR}/cmake/modules/") +SET(CMAKE_MODULE_PATH "${CMAKE_CURRENT_LIST_DIR}/cmake/modules/") SET(STAGED_INSTALL_PREFIX ${CMAKE_CURRENT_SOURCE_DIR}/deps) SET(CMAKE_UTILS_DIR ${CMAKE_CURRENT_SOURCE_DIR}/utils) SET(INSTALL_INCLUDEDIR ${STAGED_INSTALL_PREFIX}/include) diff --git a/README.md b/README.md index 459745a64..6bce1e508 100644 --- a/README.md +++ b/README.md @@ -1,8 +1,8 @@ # PikiwiDB - +![](docs/images/pikiwidb-logo.png) [中文](README_CN.md) -A C++11 implementation of Redis Server, use Leveldb for persist storage.(not including cluster yet) +A C++11 implementation of Redis Server, use RocksDB for persist storage.(not including cluster yet) ## Requirements * C++11 @@ -13,7 +13,7 @@ A C++11 implementation of Redis Server, use Leveldb for persist storage.(not inc I added three commands(ldel, skeys, hgets) for demonstration. ## Persistence: Not limited to memory - Leveldb can be configured as backend for PikiwiDB. + RocksDB can be configured as backend for PikiwiDB. ## Fully compatible with redis You can test PikiwiDB with redis-cli, redis-benchmark, or use redis as master with PikiwiDB as slave or conversely, it also can work with redis sentinel. diff --git a/README_CN.md b/README_CN.md index 16b4004b8..9f41a66a2 100644 --- a/README_CN.md +++ b/README_CN.md @@ -1,8 +1,8 @@ # PikiwiDB - +![](docs/images/pikiwidb-logo.png) [Click me switch to English](README.en.md) -C++11实现的增强版Redis服务器,使用Leveldb作为持久化存储引擎。(集群支持尚正在计划中) +C++11实现的增强版Redis服务器,使用RocksDB作为持久化存储引擎。(集群支持尚正在计划中) ## 环境需求 * C++11、CMake @@ -39,7 +39,7 @@ C++11实现的增强版Redis服务器,使用Leveldb作为持久化存储引擎 这些特性PikiwiDB都有:-) ## 持久化:内存不再是上限 - Leveldb可以配置为PikiwiDB的持久化存储引擎,可以存储更多的数据。 + RocksDB可以配置为PikiwiDB的持久化存储引擎,可以存储更多的数据。 ## 命令列表 diff --git a/docs/images/pikiwidb-logo.png b/docs/images/pikiwidb-logo.png new file mode 100644 index 0000000000000000000000000000000000000000..69dd9060ef600fc8958efceed59a8f3a4f191d4e GIT binary patch literal 99671 zcmeEuXHb*f_iZeQAcFKNs5I#!(viFtKtMoRLXT1fq_?1fC{m$}{XR4-JmN!SRAN$cN^07t&*`~&`2~eV#U-WHHMMp1 z4UJ9Bon7eep5E_${i9>!6O&WFre`o%-0$U;)wT5v!tUPw0r8M@bo{Sf=KvS}x6A&A zVgH9+R41G}fAQjli?sjRb?$rs^?iZ)BF#1-UVg=5I~{a27EdHmLBl; z^4A4_SHa(n@HZCx4Tpab;V&BgMZ>>n_!kZTqTydO{ELQv(eN)C{zb#TX!sWm|Dxev zH2jN(f6?$S8vf^@;e`%$HPU-x;?H|oERg9g45AYm$L+;i+ss1WS3R5#|@xE_L6iyB5?6F;GZ5mWc@T2y04}6JTOfLdGar(&(h} zm^FUz+2!_SaVYq}Scyy8DV!b9#Zr6tB!Hj`frYz37UXzlHFFLI%l zY9}2HNE&dv#c630r^5$$=jMd`!zjL##78Br{1;hrG&MRu%dagacHau;+$nD4m5f+6que;wFh-3-FT4}q z7>DBokv21Hi}h%S%>F?sWow+MgKydZzszYad3QvM|5I z{a9DvPm}0->ZEy4!mYYpxXQ8ifZnDd!yx-D^;aS~x#@}eCl$A~OpfN-B?Ni}bS(nT z05STN{C#JDM$Z*mUN?@?iK@fb3JrRFYg6Ko>N5ad7ZM3<&_XZ1TM(asyPpAQ7N7>b z%>nTq_l7Uc|7pMW6;B)0`T%3L-=^-oBagc5oSN0 zD}O;ToZ(cOWVv?KnSH~a1kCFRdZdh*Ghlqyy_)QOvcru2tZA94%NZG_!o~21(~|uI z(x{!DO#*i(smFGbu{Jm5rB3bFMvpbf#l3h|xonLDay<(!CQwZSU;{im_s`$Uf4Km*Og11b z665ilT?5yhh&Cia%wjzJ+A2Xat7ws=ibgT}BEnNG;9)CYC!EIBG#INYO3gMDL9fFq zlZq-odc1N>lrpj;6?}5uaR#naSbpg9E;>E9qTqAQ)i*KNNWy#L8{FJsfidX37e_3- zAb?}Yzlie;z=Mmt+gLK#X4!*JfqIu&xS1w9pBNYCPMiU_y1j-+RkCYOc>3%F0?z<_ z#wm8ZG2mC~e)?dzBVON9Q}HR`egJXd3=o1h^B@CHSko}Cls<@W))HtVt5kQUZ&JEQ z>ztoiPO|MK>ZV*h8smRhBi^KuXvk`^YVA|&!0xfbS=dDNTprp((#c6lgHfo$|oVner^KTsN^I;kz>mnJz`9q1ZH&P1)vla+AhP_p;$l)#iVTxa`bX^+i6P&zZl&4BGp`r4 z3*$Mb&Hzh=D7^LM3l&rED7T^3?{vMvL}1({?XG+IcGe`n;M_n#kY;RhyqK;Q&l85a z!@z(MEh22ZCJ)UgoFP(RD2lT$6*G2zcsM{pFrH@Gk0&P?WEF_%Nd0p(2aZ8$L#+)Y zhrSv-f%gG&$19i50J7#9kih&gJ(&2q=nlUu65e3~T&nq*ngk++5wsqKJsk}N));i@ z@hhd$@I&vSI(`*FmoDCGhwXl7aJOw@|91woYZ9tjG+kE0pY%#Sz0Uw2%DT;_^~OD> z$qBf?v6#|(RjB zUl4f)=m5c&ibiO8cD5u4#!pUX9?!PQV<1fIV8@q3Zm^q0$q9Wo9WUx^oTn8prCnHF zd^DVEwRMWXKrY(_t?OCJ5ALZp=I2*fs-X8Y#UwfwnBCe5{W+b%{WSDsTD4Xtbz^oIpq9QIC-``gZD1bpKW+~anYgUi|6zMp_SM0_dWOfDGVBI%Qt^|9544sA2 z62Lw2>}3!{JT$>(ifA6X_Nu&M(^NIEzuggUnHb4BrV!dsgctJt?EQ=^aQ_{8k$| zV1_b&2Dtbf@6-#4L$KBjUpsk(PYZWfyMRS~u#qPa-{YiMjVHG%4uY8b&Hue)O&e&z0fCsgvWmu&Y~*F!F#vx;(NTQ&s36jcQIWw4>9Mi4}=c{N<^ zmBzY@3{@$$&YpXp4UQZ1;Tya1=|!3C3qDPrc1}*4Hc{=76myl6gluLXTjz;9@!(P5 zS_EWI8eX3BU-0oCj;9xd5|#1-9em?%kK`w6a|wC%ysc?zxmNt)OO4z$w)5HmQ_Z`X zd6zYfGk@oCbt~%_CDqz-l{JXWXba7{f7dH661$h1~O_e-a zBjAXl2|w|`?wN$*nJ^6uimOzAu!y{F-=C;mnBk*+Z!AD*1Db`YlXfsh^V3DgcMBYp z3HpGwkzRJ0^J99#9Gg2;gY)nX%`?FBXid{JBKkBlvr+OYvKx76p%{Q$d2=7+_h_xs z^uzD-%1EQM5%@Z%xgyg=H3J>#;TgPR;Df^vaL1UpjhCJj(kC%k8fvXqAahMgWAR9C zu08jt+3$B+UXAh_1WDZBgmq|CRtQx$+))Vfd}|4fMcVf(iCtu8)7ngc`3 zyu5VxiT#fUu%FLf`v!*#=SyJaXR20zPnxZh}}p5AP}`_A@k#-ns9?FkvAMprpEZV+T9qifrW zL%;#g**!Y};DZf1;VGDB#}X;F%78f6iGqF3W6-2tS0`WJ?G~Ub8piqH&omgT0rqQ3Pj{|}{fv&ko8_*I-yU|=N=@T1 zE#bbm?SnZTH18CZ7DzT-S23$Z^H=m+aU2DE#_@H0yU_0V{p=pr@BGd zikmhVXTzFSQ{%FCT<`Wl?_QG4;JClbx@tk6@w)2Lrg9VX{dJ4kH)`*dt!AVIw>(Yi zXBU>i`coi}j3%m6DO9H_e{o=j_G10ko@={$gazNwc*v;GFv~L%d0(^JuNOocbs!cz z#FT&fcy3pn^~+ebDHJ8;ZsYn(ynnzWu$u^?dbAK9r?C7(EN2{Asnarua8Gnepc+ zC&kgiu=SsVBvqNBKK5;|dS{&slhYaBanH{h+Hb0oe(TdSMIO1!3)u^1WuF0nz!lFb;tIh!V zq>Ymtk!|)=4)$`(A?P07OF-M;f{V`^{i}lnkX17td388ju`R7(FsESFA0giMgXt>O z0|D@*Pe3NW1_Ec7$22WAEmWQy9q3kBcLun6!hV{4aN!f{ScK=t>zlPF+yLw6sL7|(p7 zoZsZ}S{CcFgD~(P>+>8)om)8F$+b#v;NL(z)u-1zw36zUqh{b~@nVHdev^dWi75~N z$qDj|>)-;s;|K8k&w*0}Sq^623}Cvk0IlgDifGQXgyvoxhkfMrWC-`}XOAkejox^6 znoPN=oAgIr!qZJ;mYCfoGDP4j-5%l}HG{mIsd3TC%5d2dTD)#h2xO@AaOh~F<_T#t zcGbsVfAGq(X`o;G+!V+kO#PjxNL>V5u#2Z<6a|vg5~u{<)q|it(Z!YIkHMJAUDH>h zu|UQ@>bITUSc3Ac4-x` z(th)WQq3*gtFnk3J`nuf!0t&f`16u?pGlZ_Qz3uctwQlQ#jEd=amE7dHes(6I1%hT zekF#L>p}4mi`wm+xNM;g{NSrJ+3&KSGqd;_m?O~`0`#e-l%{bwOa8iM4F15ixDk_4 zWRztwS8J7{JZ5}4gxp)Br2VadQfr{d0B7f73u`mT%uPd#_kHpoydp6_B%O6a}y8Ln(ui|=L=ShmWZ{b%A&NK2YGpx{Y@hu%K zqvake)Ax#59}mq_(ib;$&i`JbpJm}w0J>J9lP|1appsx*WRy7;gHh5}>Su}eO14i= zbh|eO1n$6>k`Z|QmNNiw_%yMdt(W+j$6USJu5`V{Jb&-Wd$Ou&9a-{CqnR7*Qz~$- z^m4Ec@I{>g9Qmqr=XKK_W&DeiYX3&uo$J|2x0A^9h%(3<{kL+5Vwi{!AqVovqU=lmTMo z^7bqd5)I*(^g+1?oJM0~Q5ms$9vLV{OUL=0mWjN5;<5S9mEQ#S+NKJ)TSqp(t<=C9 zZ*nk8NnZ;}R7N3lY8iw@jP!QAPF-1QcA07rd4t|>sHY68L@3PI5GN;FQ1XG)@x-Y{ zI#@+s1fHv_P=$q!PoLP5zOM*N8YBr2BjR@6#hhp3ew?^<;cn`H04QSX@-f(GnV5a+ zxvW~1p$qDwr&{2A;%eGN;~U2DjEQ3J&aD|bb7>WXrpKkCU>i>KXiGB@_5dxD0>Ri! z$(0ouyh#UnYiLHnC_Vppy<2Ao63~ z+0}U1M+Av%BhO4+KiC<_Jq$p}QR(TWe*siY)Rfs-fa_IMqVCLze%5pc*G%W0X3H~L z!z#m?i4WK|3rKC=x}08vn#V=3p1F9dsNU@(_o(|6JrJFoi?!@u5Dws$r7kj>%auJ(;$1#~9`ye@yImCW!* zE!$kEDUF2*6u+f(tU2oGGjJk@_SPIVZM0L2Dn1GfEIJ4l`7(~&p=^!C-))c)ZT1>J z7KU1?yIUD>7_~I}?!99DJxPqO>vs{U*e3Vu&j;2i4!(@UfbvhlRTovKGT zM3~XT%5z-_et4t6Sy7s_;b&f9)0tBxTEzUI8zluinPQl$@YmaZO7Iv#1tcEn(3`wo zCdEp0JAn1_Q9h{iXgBb!#+Iz-+kO7+9Tjv+;Y&uOuiAi=GpK{r5H*4jwxmU}mL)DA zgIJ-9Nm)7Bny%?5dD$&*b#2eD?94Sz*X|q?Qzn>r>;|_;7dnsA8bqr)3D%6(M@>zn zJm$e5DR+HK)jex(!tmOgZ~6s1(wR4?6)2;H%hzVl1mCg_599zy5YUMB-xkdal($HU=#bf6KP z;I8Xvu3+bB8$;r!Rs%^7|1aY+Agisib``{&<3P$bw^-L}%HKTV0m1y#!>6yHs&TLdo;U4d zVjUB;6)nr?(G@URB4t+Jy-4}rrP@opFy+V|5=%ut8!UaywDMZBW zalOFXt0r5As7LA4$ol@p@AHmTr}fOUfp;DRkX5+8#eBb0WBrIQ^w9d2sC4Rc?OX4b z(a7Y3mz2pT4KyF{9NONL-2wsDWW?P#d>)V~=Vd?K(R4Nx(yh?8Esl7^xuaquSh%y@ zQ$%N}yCv2eX;XUN^uEQ6*!sWOTQ&b(trrgD=TJw?c?X_9OYLQjnUHf53_PWuh~VvURU%3%t>O2?Fr;=B$_D#9H^mX-yNWt8**bF!aj5Ml6; z^!hlMs!fJRjp2~^bl^Nna6{%-0Oj&ERE@zF^AaQ;$R4b+j$p=;ASJz6kIN+0&IP7% zH6YDSv|CP-o`A`eU+g36)+|ZbeJJs`yD-Dfsl|A_q5?$Rl~P&Q=rTD;T0ckD5{PQ& z_F{^F31GHm!dAN*wH?eS12a~kulfrGM9PekdN6l48F{61^vz<1LS2^1Y^&I}!a66) z@%y5YCA!R^WP_{0(XQ_s8ziwYdy&N}gg#eE#|ttY4hNo9CGnw-Uk?aoQH|+|CioOS z*=u<^S${yG>^cbrbfXogH^4`}aGQh&UNqyn_iIP2{vp@vU5vbEP#2{zE$FB@v(f)c zl%c@jMpSe1hEYZlD0AH;wkgNxZYa(-AIU8&46U7*EGewD?=j3Y$+V;0R2|tgorqQ+xMyw&nE{#{&w0reE?oJ;-{XR+LA^&0L>46CIBAE4BCetA4(PmCdUZu1 zTWsxB2guDoI8L%mJyAN@J_YWyb!&Z3;1f@5!wF|`S}L{PL!oQV6`H9(_J( z1YIVdn$BI56~f=pu*y~6TAYMZhp4#SS;@Pud9)k=W|cnICNz3z|J85QE7|~4WJLWV zuR0WG8B@W9)*NsryMqJ0ZsdJp#)QiLe8yg~Y2t|LV#xR_Do8KY#kLQYQo7tnQvvHE ze>!!a2Qr{$W?UYA3+i5w=4~k5@2|;=yN!xw_Z)C`C~|dfP`q ziJQm#))sJ`T7)qCs%6R__hYpO`P3y9G{>_g3(Gv#{gGS==)Q2Cik{Cz8dn7iJ3ZzP}AWV`q$f!mqbxf+Bj zQE%k?C&nFoTW4x1sTI4Os0;b+^TO|HE9>iCORYC_ar#Y|E%{xBFa2(Js&63)pU_M0 zACPEB-+4{zPONy5hpffOye^Z|)O6!At@vh?M`W-dEO(L-Iqw3*CM=uAk6plq>GQ6& zWeInl0fPQ??Hrymf7?)i<9%$4Qrt>O>Y5)f5VKT(zrL%{SnJO>*QX~WZqOt41Cs?6 zxP@lA*6`H%J6uV-Z^a+L>P0nHHB>C&Ph+@rxVjNG+1S!uqUr6b%N-4AxWABqf%& zPz~D9K$RPTzf8&w|+A)?+71vOcI1@M! z_Tve0<#)(6O}H`A&KCLhZ&&-@ajzEg{Y~YOLeN$QQxrKA3M?Mp4@*?2%+?c zRxWpOjcGLvB4~dn3cq*t;icuKjtD+YQH*)n8wU3o5I$&Cj8X6puo~?(stXFfJviPx zs*T?LV|;Aqva%N+yiVq6Wh$7gHw&cf^mS_vn{0bFr&3ukt-wj@p;7Qpc-I{_MKB3? z2GF}k+dQ%ZUyyR&gAXh701xyakHWes0~IbK5#eplbx|E&4TttKd9gX`E(sBP8oD^G z&ld5cdr$NT5N(@S*!w|OwfhE^-2*kQ7$+LI9d!&Qj#MnQ1ow&o-D3Sp{<@mjWiN7> zwe*@~IlPTQf0QXhQW=@sWeaq4c~+pKPG{l1j`eIzolJIXu>vFq?#a34ByV-#B`i)G53Y_~& z8vBm?)`P_AOa-Fgt!@--y=y_b`|wqx{fUK0??NP7ld%?E$vRcF8eLH>0WKA(R=ij6 z9Rw3&FL#XaVXb-aTD}j%=yO(HF2Y2h-tj} zw81xSI@?mqmNy_WXJWoQnYW+WR(RsrDt!joSpGiP-N@N}_I1s_Yo7yY)CS}^sP+@@jjQvo);G3TE3YJiw9XZ;L-3|y z=f}Wf@TDApl6hEto)cN?Z9jL4@U^+#y{r8gh%;}>TjfjbNmpWy`B96{t00<%*tzRg zQt&JL{`${|$pv{edOc)$KgW3KIwn3~giUPnU=G|W8K|m?AKwd=(T8liN!7-MD8Siw-?<-kpjpUBRT|&L{M-JT9x&nSH zWwAj>Hsuc9B}_`${BdKtOWoe`(5T>>l#D?7jf4YN zrkMvIe1CO)o-08av|ey=sG2wVyw45CyWx^nvND%8$xuefq=IX8LtEdmDw@=X~jkCb`v-V`TZ!)!YV-NZI5H{7eQ_``a0`8fD zS8vRCbZaG^=iNMxMMRrpoc`%(E&DW}X`=w5eJL4pxjzA-5#Hm;V7KK7t-KcFBGOjL z)2@>z&wX#|zLew7MXJ%-xRqj(aZ>r2fiA}cPe0B1lhlT0WVmhvR)`=cLOlty^s}nV+U*MQh zHN8O0lU6fC&FaYwei4JHF&)3#C+B27D6H!)k+64jMx@oC>7VOdg&G{DZsETXE=2G= zZ;6BKc@Q(F@UxVgmy91NXY0wZpeEG{tpqLyfvmI{>Pfv>{jfx{V=Glj(R$NmL$&t6 z2I8wg8<}7I0u#W;r7%-U3(n=aU7VHJX-cg5?}DvVQ%T4+hfT`VL{YlHduf(Ii^!RSqYOXU$ALM zUdkxWul{AF>-@qFy5?~!Y`LKUxPBLHn{ZQ1O-gig?&?02P=jtwOk?pBjo{Nlf4=(4 z0Y)P^b>U&Cr>U%&`9rR#XBCm?XVE62N}UxNS6{&x2%^m zpFPAbY7qwKKHrzUS`;SoDIP>u2mcU7-hezr6bPGpYjrH#)E``^RE${bHQ1RP66-uo zlV5y1oPmhXzV^UJU3q|qvdqw*cLw0u6~uP{}U zu3)z|-ZH8?){x+y#X;=-ReW!?fuAF&~7PZ+*k8bnd+Rq+j8 zOZ(_a1pP`V!j!2A{(*k0V4wEb!ZcnDCS05{Q)i=qUx}%uEW67Tsrtj}gLsu!_dnuo zm{JJ&mXwtT-SV6i8iqu}Bbz@ZVWKCTS`@mE8N?I#M8R~6M;hJ-_hiq1l~-*Mpb<+t zf(^@X^AdgHHGws_^%v5XB#-xDTfy#1OIm$FM@>qJ0n2{|1{+#}Wcn#d{0wEYQLg<{ zb&c>3bq(&XN<2o1dsPu$RLNnD3Z$-G+)#=HX=tmOHpv5@guV6s!N#}wHHDv;H!IH; z=RwGaX`7qJ#^Y&{jjFeD%NOt4$YIj1t60R@fdYc=4+OkiL5#ldSSu^vlPx|2q;fO9 zLLhhUe2qh3QTDdf{1gsc%B&}=A-PYUwlB5>Ob4@*BGoEdyii+(!>WhZ+aaH0V^NOA z0YL)O9s7uq!QlkYLV?st;v6JZH~SDJ^OZQcT#W+Cpqm2g8q_Gs3(P%pO>sU(hbR&4 zXi!}PF?%>TwS9;6ghpH=lswaSj+w`rco;bGZjAJbomMBg8`1jMR$ijaNr7{>xbFUKl?YycY_o5 zo_Y6>-1Pc1>eNLCd5hjy8%*QpAI~YI)%aMbgM}TB3ifO9)zYAxZsWR5T=MhLlzOj~ zLEF>q%zhc`S1+g>hnAnx`3ya(wInd}dVR|M#`|xOtEu1t8dbgF9$x6BpStx&Ny|o@ zK3kxDG0MPj{=JnvX+eB?vh|Kp5i>zI^i>Afr14gTp2f1&*VGD|kru1bV{4KfSABK+ zy$sNyUCQqb154*&^tHH*mcEy@E!FTxFlFkBXnqOjc5{=VFjG1f@z2fxyz2qkmXDAf zcwmP`Kv;g9gH{-pU^>+ujwc63-+FRkgE{`j5jp0cuP4Kil(43V3!sV!oI$?UNXCus zpDV@#rCyYvs6m{#yH37S(&*B}XK66E#xADtG?(bVTCHAloABo<`c4t}CcdjXraw$q z37gjy=*P!uq}^TayEhdYpm*P;cm5DLP_iF}*fJ$Q*cj3B$%l8SoB^D1uLxQV*!9w%nE0y31vSzRMovM&djW`p@iyw18EkFj!NI*;_qjy2|H#L$4BjDo25_y=oSa|2Kw*|LUfMxlnHG$9y`-m# z*@;&(_mil85-MD36+*H1(w5M;DPaBa^!mwNEca;M`(rbxRCopG4vIL*8cT8yb&x15 zh{D=j?4RJv=AQ`VL(NIxDXxYEPJUWqKL`^- zGp{y@9?x)F$As(0Q;!UXWMCWx*DmD6Z-AW0C7r*Xm`#?@Vy>6poPF%P&$ROyN3 z&jDQ46;qJpt}mJ4eNFA?#UF0=4UvlI4CF97*H09Ochh5wS77(UyH}yw_p(ydG-0gbi&;JDpx6oY(gy>%kXENvVnEOlGW;Y+KZQnjuHqZ73+16u;&&NShg5OAdWg*kBn zBFLGRY&y?0Ew9McoUN`Y*U-iVwc5~7AIWTeT==_nPH@d(8R4!JrTuO%&G=oZ-R ziB8-%3NB(zjkw}ZCH%=u=X0X#HLbCmHV~aY3$+O>@uQ9tkG1$wT&2rz!rcjFdWTptr zxbwqAZM+rB4o&tdwx=n8JN{}4bN5n)2w2i2;_g?#-6Q+2_B#dS+fWDXW{TWu-Oc_j z9;@qVBZ(;!8OeJa>pfHYYLWG0DVwIUC%0c5$dlMu&9-%sa4pD7Gj?8>*;6l_ znYXi|Dy%4Oo+pFI%@%w=SH$snBVjSh-98w;oSCrhU&c9*u%{sdR5VCFG(JskWy}+{ z>>iTHTDL`}wa7WK!9-nc!u*;L)o6k?CuyRgo?1oEH~CdKO*qI1TJy$vSm4yVuj0TErg zG^a(0j&kUct||0$QA-xQQnOh(haz2Mmlk_uVJW7-DD_r__{W4(FHFFPPcp2H22AK* zPp?01E}E?34t|j~URot!^5afbfl-ADG=*N5S<}~xO=7fkZ8Ry&lzS``Clpiw2^Gi< z`wwoN=6A(XH#3lya??d%WxJGXe1iWXHOKg}Kvk z#il0T+C8-}*T`wROXgRCK-#@lZe3;`CHPe?0_ef}V{{eVv`;BQvH$C$MPd=RZBRHZ z=fku~VIGq_@wxVqQs1$Z99}4i!v9^)qJ8n+_gz>&s>UR!9w^yiCr)PjJt5=#A;&0q zP0%!j!4QcO?e~ zWx&+P7y;R1VQeY}ZNR#TC!i$P2|Mf$y_&uJlYLBuq4inWCAIrbDbnYb%)5JFoSnwe zbvS9hzGY4)KVQ6?>b>+mu&gxd#qh5}vePWwZfDwkuYZ0ysEim)((_v0nMFBeU__@; zw(hgBvtZ>J*+s4ObGRXaj9<`M>@wpUAF==ov>%};brjenSG?!Fr)ooNsoz{dgtzle zG}JCJ(v=Ln@xfPWsM<_5D4d%ig6oOaF_!Tp(Zx++zniHBzo=3w=!Ucug$8!7HvU54 z&0752o%b6Zii!kQ>;3jb_!Aey-|=xaQu6*gV6?P&g%liRDPN-MZ#8MnqP&U$ha1C& zyB4k(kTd0y#vmkyJ6`caQ$bI)QA-){uR>cAz|A-0`z4=Rlp-(@;Zoz=bW+Pg9L(ha z`%SaQu2fy+#xvR+rUIE!mGqGK*mmJM8nz}M-+F0(nh<=YRDJG~+MW99 z*m5qn>=~@sSjxHjCoO5Wr!^ha!+-_7qc+LoIGn-tCQYKLoD+zruBSMn6%H2%0{r>-))~+){NlV2k^_T3LVKcs&S7I4JSP6wqns?eW7( zA!9G3)mmuem0PL41dWU*G2b`dO@C~zA8FE8&p5YVd<+452f`!OZ`oP4S`?)j_2y%f z(+yLNtu?i^mA&&fC-z_N%BnL#1@&ag@yIKf(Fq@HQFz;S%1qCgn?gFcx!HewFP;>T zcEXG>ncS>33+AH?jr}<;=&lRT$aOvgaMzjDNj=Bh*Ts-sH5-R{Y#N1dGdsDjes1-rEOvUbteNBP<2Gbsz#?k?o)df!+>mDS$U+#4!@UTV1W$P4< z^)Kx~R1MZ=_mTN|T%?vb7mkKnOz@@h`K;4*Q%q!kFl-xap_(_IHZt2$(xlj-d$0?T{Um(vk1at?SZjzvry1^3#w1m;5><@cZeqh?r_eJKl?Avn0H^bA z#9cVLtb38!Pllb8p&ipilQSl7`NY!xLEbH{hrvNu&QXtE6x&*>C@(1wJ)I3v3GS~q zep?ZSX-#8gG!-EJNDIHyq+lAGlB828Lu-+{l9k_|L3t`qfWQt(_ zQ~l|#r^DSmSGPouwSOx+vP$)Um*T`qLi09(bxQH!1R1$*O{q7^*p1)jJz8NE7MU#6 znh^`TgwO`o0m3Xcm9}JFG-BLY-pdJLe~7qdPpB}S2{Movbe0t6m}=v>_T!Fnn3-{M z2BL;IbSHROSU=HZDw|qBXh40f=n{*@YHjJ-vt0vC^&+lVTP0-;5{E;kUR_>OxGSwv zqBb>VbXvUlM8~-V-w$V~SY19A zc^PfZ8xRXg8(&gvF5}bve|W;T$({lTlHXN;vu!?bqqs21Ys(P_ddR_%9h7l z>D`ieUM*Rb1}ILTo4e0NnULWe&t0XaxVs^VD=}c!PJPA0Wc$78;=Zy1QM&vCuaxp5 z$j_Y+$s=f9RC4*|Y(I}dp0mSLmncFyw=b12S_|sR4zKm;Yz+28qo|D1 zs`Y;{$~B+P*x;RLQn>VS6UKYGjLIl=GTOF~avK#DQ;Mb35MGBk?@PipB8Xo}VfnM7 zpQczg+OOd~V{tJO4QX;4^~@d1J=OQ(u%73@;-#^u@eXPe#Wlj+6!Bn2&&j)G%O;rj zLwmiq`}MkgjN_P?#WP2}3}dVneJ5rUmwzMo?_;90^{sp|o`tj|mo42T-z%Tb|1)Y> z-Qu?A`&wv+e4Q)>r6oQ*4OORm2|V9=f#CDa!sdP6;)X?gRV8!^s}=HvAQ06GF4N_j zI5>Un6<9ExaTHcjm_gH4kW$N`;+Rq7Jkk@~x1xP=AG^a$n%a-R&Gn+C1((_dyH}Jp z)j4UX&3pVD!=(;^>INa9*;k|nN}9Qv&j8@}WtQwl6)p!ZwzAN2eQ_h3*;L?9E7lY| zQ@EO97$y^`-oJ+NJa7f0ZGHF_rQ>9-$03CeVnE_HqhUZ-VR~Pxn1$gnc{!S!L=1qx zAOU;T=}8f}MUiUfa7_ps*k zP1}o6`R^bfR$)Md=a9NNgvI_difP5OK)-f(b4RLY*^D$&o{ zV@k2MN03N{TIAK;e)^hNhubUlIo5ilQn)dJH>k?cVoHbYjIYvfg=ok3Abp zMl(H0pI@wuOZ0?+=__MqVomwA@8u8oc(4S2N8rt^ALR4EPuK-IjC;O`$CI3xB5c@h znjH0?`*|W9y!N;|*I8{fRoBl@GEwY~%%OrQ4*b)DA@2Z@s}Q8GU4I6^y(Fl6W(4Cr z$d8n}dUO-Sigz`cix-7#r)uHDW<%$XADVg-ZRD3n^~~WtHpK%ISv0>L2j8>|BMtZ=+=w+{9t{`ET zk~cD3-qZWwvbb;a@&T2cg2mYJS`ezG)D@$~il#PQ%8+C<`d(2M719x(+qg&`#LuMV zFEp$Nb`U}35K+XR4WMEBoBIsBd)agBq+=9@n0e@upE^iku zvN~qY%?;Km;FNk(E4D79(t4m8`(HBPgj(mxwy|Rbx0f`#uy7fu-_yS?QBR_~r7KYG zTdOXWOnsvg-PHxURO-=|7y>f7D_ojsD~xb{&5GNsLp%XXRaWOL1hOGY;;k1&R+H|; zk5nTIiZ+jPHWviC7$StcoV6(T(yos9HnEh@o&o4Ssp%ymS{QX3$E_v}Yll;Npi=v3 zB|l0D{&w;D)JlKM89*KZlv_@u7RHb|hjG;_)n5KB!Rt%>!qkcAQNy*<_F)?_0SD)7 z+5lpTTUot#gJy%XzBB4ZX!!YhZ zVL(ZjWoieQ0$k9|z_d;c*r{DPeyqP;z%amn2H30{C|nph1AJ=Th@ak`YOZL@g))7h ziUS|vc$2K%EI84f(EfF8Ma!NPj~_sj^WXCN`33>89EBo&PkXOuTBoyqihk?^$yx$i81>d6l)~r z8}ed3&&-3(x5oz7#0Qw;df;DdSfRpQHeAn3u!8jFD0Wc6t6;cIOyIi8$K|rn{+mL} z?>fvI^-9$AWz20|-YDuaJvyLAB^u0x{MY}W*d30wIY&@LFNSA};XOhqqR_IX#ppUq z>QWAM#cI8`8>_5~XK;NMd}z=XqzJBW9;?feDl&;S8`ey_S{w@d|a+ zTT;9jMqRvL?#E&1Zm?3HCzQIfT=4aQlBSZ3Zf{GrZC`4!hm>itzv~fq8>WfP-;h)g zzgG(Cvc+#yax_Gu18o$Q`>L9Pt+VVJ-w4=CB_Ld z$#a6DGo2@L3_ELqS#U1uo>4_DwI~+)k5N~0Zt(Sq=R#YSOfhiI6L$Emu*+dLN7^|a zIiI`zw$WPe;dk9j{!Smy0EI^aqprl)&xo$9`}RNZek@>UHtkX-nTcaIIk_kfdo)>` z1 z;Y*(d@Yb#?8C%p6Ic-eq%Sx{52jmy9r-`)Odyi#1uo8p(w{c8r1AMoXzQmO0Jk%Wd z47BKLPZg+{Wz0E%(|M~!c9-cCg;&rb@Q%g@gvK&d^O z!AYi8ZAczR?x=v~c!5=EDVSBMQu8NhL7>h^t!;&bxjg)nnl&}ReWJ#df%*K{QmAVW zMKOLQ(Hd1d^-i9}^JyH#;6w0jGXuFKw+ow5Vj>BHjgJGFQ)MQrA%&2ij?qz_j!l;y zNC{pc8KRX;Y7JuocZXQe#-VPY{JP^^^+~WRHcYuu`x=r@aRHfIHQzY$p%cMdeGu8d-HIK+}A-CVXix)$Mf## z)O6s47ef{gruJf8guNilVrhGr0HiYjhKbtM#Y!^J#@A;p?R;ofs8~96>Q+}7lV3xA zA+V#~*Zpyoa#+?hQCUq_FE@ca1 z+U2oxMan%Zr1HIR*l)Pv&26d4(D>ZMVTXZ<* zEGD?-^u@mMQm`_nwJUOuTJVt=yvvZr`z|LWFKvbN`Y2eC_wz`%X1W;db=Z-083WVdIzX(4>C; zUQyg&6-ljzhG$|8^KyVQ9&Ox-$pf=SW=X2^Cslyx3tk_HjpT7`m}cJO*7mWJ_4VXZ z1F5pC@~L0B3gNUKD^eLV@!T)4C<_xe%(AF?7Mkfi7E`47vQt`5lvF}J<=-iQH^}19 z{ZLUzn#Iz&HHgJOA=zCoj~C|zzcWU+3*yOI;?-_1y#E_{?-|w9+O~_LE=5H|lnxS? z2uKZ8S|Cdi5D+3rZxIoY8j47eKy37;RH<1=lN#w#BVD?5=_C=5o=`#{#XZ+M_BiMJ z-m%X)-x+(4bAIeU{9?>G^UP=7&wXFlbzk>2bJqt0XOmnHpFc z8L28UqBinxlkMEqoUI+66r3q|wx%AJKVwW@VLs~$k%CBcsC8c^+_%dOx%s4oz1Lo~ zAhru{ljuH-NfA3-Sh!t|ZUd}cD!1x0T0vBK+P(Gdgt}Yf`|$7=LRU^S^Pg%XgKe`p zBenU09YsX0T)(@qTtE8=>YNrg5hU!~)=u_+3&2vcjlRhtx0X%0QztGYl3e2|iaB|YrI|JrqQ$K!3;Vmh4UKdh-4sK&pr$}nH+k__#8CMr=< zv=4@_L@^xx&1~SrUeA^ZO`i&-=u$-p!wHy65Je)6iwbU2c-qm)1HV|)!fQ>wT2R+e zI{nL{G5Uwc!gW$t(hu$8^;>dqBSvAl+tqJPiO?$T00^iMc^mH%NYn#8Sr8&Ecyvtb z24nPucIWi25ap>t6}5J_({Q=+aAa1K^k5}t=kU3(PP2}V^*DR$et4X=QF!Y1PcFVxU}V8mAxpWUY7)5MZe3 zk!>vjdMWug%aaf2pC?Efl$h~WKPUm=NE*{V(y}jShLJfsL2CWW>g=Uafs3Ig=nRpgUWRTrD|gk_0tHF5 z^({do02m9*e0;EIl$z1BwxW)u3yqN)xtwyDeOg-MPfq=`wg z`a#5~T}6Zc=WGLynpQO{?IyL(4?ZI-@3SJHa+t#rrU-eKxDwqM=eY6Cc4I6D2JhU$BU$*ynb%9A$I? zU<V{t7)wjpz}+MS7mJ zv>1uwKs`qB63;^98sCw^YxdT71SZq5$QLp;8z#81%PVZXTi_av7F)|NpTAV{!e=hq zx?7CgWLYfA_V{yrdM^L;(Cn3T89u+){D&LSm+yjueL9R)k3FKx zAjN=d!*TDtDuf%N1JTYqcskKOkqz5`o@##BNiA-tHuo#^|8hR6Gyi~gb>VCaMtpi~ z_j$jP%h&4ORJKix8PU{i-Lb?HD`{FVyk#eKN1pK)_GdNa-T$r{O!->GMJgH|+g0() z=2k5yOuDDkE!8I0Md5~h82DQHbFU2N+1SU{XXLhWHicc&okv&u_bxm?iJF`H0dWvv zllAD@^M6pxI*7czaM*mE*;lWL61&3H;A>^4C3dT}bfn1I*4)D^HlQo5dkh|0OPOviDI zBZnhNS#>jumIK8z3wmLZORX2X7B^7h9ag0~2eC0y?@W}*J_cy(Do_foL>FSfiQkM<8$mcXunxt-%!EFmgYZG^#~Dh z)~&_u(}&!?7n}#O@(N`u9Qlg`ntS#2RU5sebTQy>E?PoJ<%`ZAd=gGPs&Cod#7Mgj ziqrWVvpn%G6B(#^JM1nf?l+550`=O#moLD{J;X6B*da=EPSM-C3AyyEWR@c_1J+f} ziBCfHy(*s&l_mWx`u4BTiFZgmO?#aN-jvKya+#|#W~W**a6B`OtEnn)1RiYM2^kLl&$R&{%!uQ1$~Ee2HO%IeW*CX@U%9Q zeUxTW6GPTlAA3}WR)1tkM*XCGF@98`I7G;Q=Ivk z1&kswasnlr2J+H6ONi~`DmH#-Jsf6z(blHq^&7x0+ajj3TDr+?6D>>dd=B-**qQhvRBFN}=K(>A@*T>}>n3iKa?Fzb06X%4T zzO1Q70Bufx>@A1W^}d(Ig03jTd4IFC2l^LNf$V5=rVitZ4{@JQ(~0S@dC&7AWB%zR zzP7-Se@B^(DpQ8S0AdPZX5xYhbwXO@0<|bU{uD?1QCI+wV+MhbTMW3AZh%_KjSEZlC|MH z5hOKQ^t$tQnL^uSt@<1l3BR18W>#t;SI~dV1|N)A|Bn#J}n5;%W>3T3O{Kl zVn17?gCeXo=H6Rhx99T-Zn2tc2?R@ z(anDPAX%QT+4+U}8oqd<|a488s#j8GfZaW!b#M zqTqGglG_A@Lt)_RS=x*5rIDr`k4RQQBc;*BFi!Ev+zT2MOgg;EmCoC5OWx@$=WU1A zh`$LHw5FZ`AlkXqBnPsShZg9yn^XKQp?%Mwsd9{% z!&I8AkjWTbJ#OlTPY=Gt4{`UnItaLCJ%mI0<+#aYJX12&{DU4e{BlA~>6?DV3s~9S zR91!Jvj%1okDn56666(%B9cEVm6VL*vLWinKE8&8Sga^HFib(Z6!4sS=965_1Qn$} zvVckJm>a8Rh4O+vtjhH~`ea<7N@&GKwm<&-!GmxW;(RYy05`f)p6_Nu$qvlh$Z)}w zt*O7&1qf=xiL&zAtLl`K+I-%di0c_ME7$w-dz6gde!B4lV67!TBj;S~qx0hW$;hmW zD(uP+IbzzTO2fJw*Kdp#>v7)kDK&7rm7J^EH(0uk&Nu({pv$Bo9`_!`N z{J6Vbs7>;^1R#qs9*waPmM9JF!&OmQ?Q0jOaZ6$~PkKHCAl@u;1YD*D_%<&fpFjQ) zU@2G~Veb960BryiD=f3zCj}-OcG{gxX#b4z>il6YSj5}Esi5{(%}62nf6X{I z)A!)rGtDc;l2kQot|;!$>up~h557S27e~cUj^z>~qWx=lX z<&SrGusm6|dNJ^JI2)Olk^}NVd6(1TLH%+~rB6ve+;4??m>n1&=fDVHswgl0iYd}z z$DWbOZoUxS$n19+dT^U#i1;y5m$2((cK}qx^;-ZqEQ655Hc06|SU|%$2lX zDx2ms9dw&G^W>TX4mAvN7wPSH<#}cg_4?f1cD>epMl}jsA<&6Q_XTpmv<-JeoR5>WTbufm_;M zjfCSJDUa-xH`b4|2K{?;!PlV4y`VEfrItJz748mLle`kZR_|31r zJcxE`?A>2|YOVU}&#KRx6qp!4D<03X%RN70mz2UkQtDN5v!NXwyKoCQLghlo9i$B7 za3o9EOchCs5+ZIaUM|CD0w$-d5aeO@}c)hIXqbjpCmPnTQFm2X-;(k-^GVP&V=UA6(nGFNr-TX+!brl$ zKzBaC?e+c>@Q$*eUbGxJ79o}XQBZ#{Dt{YAM0uJz8q>k9?nDQpHNAb5-={y6>CaYOf1zP|JRR z#(};35y7`2^e3UhUd>?0r?+ zYIL-XuGJmo`7!+Ea;Tvxfyhzi2>l+Z{)lWt{KUUP- z-s&GD^-&IknuTB^wp}H>6e~!KpjT^own^CG>_+nE>{QBp&}^?Vn0g&b=OP+4MNi{g zios4f0k5UzRS5NMhO>8UU7XYUsBx6cdt9vowzD42M|Ks#ZuAlpl51_7ITr!F5G*+Xl6{wJQ93Cx-6YJ8$V11DyYgvW4B*Po6G-+S5-Pr! zv)|w!iqwHyo4w_UpSs5!1xTtKpgk=f>WuiYxRme}6Q#b~iX)=`_?0n>3}5MHdclmw zb|CtMd32W>eqQ#Nz$e-;MpyKzxc#?np9s&U5@FKT)2K?Es=uF321n2zk$V!Fweg;< z75493sq~ZmZja^mNgv9LE?~v_{eoPO&6<)ae;QuNnCM@3QFa{My3!>Y*(x&A2mgh6 zJ13s*qZcu!(!t*?##h!a)A86r_St%0t$?}w2fBrt<4yA{CyCLvjw?daR~tZ8-r2=| zqLh@-OQF&K5%U5K9MXAl6g_Gt8Dv3%Z=G72KV?M*zpWV#?0i>DXd4KPk)g=3BpCOK0ygDwCxuzMD;PBfU-PyRS6Q(qd4&S3*AWaL9qp&w3%Y8{W+qU zs2-~=yWxf`$SQZ3dXB~u+J)oBJq5*N=)G)aD}=k_W(C5t9RX1C#}9jb{P0QYW7}VF zOENU_fZtTZ1GyiqCPQxRY*py*+LeHQa)eHORhgp!f{ep zJJZF_l5&jVkDK?-b1P;M3V0#H`md8yq>UZpI&U{}-E{QXE4NTueYnB5-j^!KW`K-p z&xY+=)V~IvP3Ggb=moyNW99pUe6M$>YYt8DYYr;|d3}nzLAgPStM`lQY<+8Me`90Q zs$UiJ@0ojJMFM7PqhUu9?MlSJ!2YwJx_3&eY;x?$J$OsH>`s^im}>i*g^h#Y+K!;=Jfmh(tH^M{OlHZlYDC4_c3eD;#&TU3tAcu!}>49}iEoRv*k=p^O~2 zI)%4CE{8g8E)&SIdd#XPNO}^@R_z-TXPgYJMWO{6*_n9F)a$9&srOy0OE+7vRTRFk z&BfaGiE>>sC99vD&;ugfnI!uU-vlDj8|JjiLu2~|xDfIyb$-DGX{WEjuHCVv#^~GW z7tzQEx1ipYGe$aq%hMo6H@dAla3*sjwbWo{^2*TQfOjq?o)#tk>W{iwNIs-M-bY#MbZw4+Hb3$OO}}h5oA@ep%Ub z`E!+*hG*xuHdXvmwl|UCO#qwxo%!8j8K5Qk8Tnf5>q|U80v0)_vmFI#?pv}-G$@y+ zXiWHnC+ADb_+wIFt^XVd_2Cn0=vtI4G4=`o=ME242@MTxuY<3^Z?5S3)y%}ry7%g+ zp%%n{`FHu?#;Lf7(j7z@-d|dD-vU=v6jxW@O3~M7nno@Fism!Rp$h`mPKDGSK!0)_ z)h%B@J?y!a#C@@i=ib3$*zsMHfj;hOQ^M0Eq>0A6d$d>_1;Y@QcayfzBcz(kaJANV zWI=7tO{=N)%M#{1m;ad6@5>U~3N%*H8wL)f1@Q;C$^lhvzS{cFgNo{YweuB~*0REq z7S(RXCYJSz=ZERHrYawJLu%-dY+XsvqSCtQOhK=)KL zr2Zex1v$D9GqX!$6tfTpSeb#hD>5tvEj(NzMt13}e+;{U|7wVotVYVq{|;MCKm zB}k?uF|vynk*m6m4YRq_qWgx!Dn@m?h{^SN$S;Ea@(h=_YO$=X#>`H&NB#S(l5Uy8 z>?x1uIR8~qAlU&1S7em+73nYvA1JcexAIWUN>2J%zeX7bO12nJxO_b_k3WGeHDm59 z{$^ppcwHQ&r5#~v0D-`k7Y!OZ13JH{r4Tvgwj)jAJh&7c zsE95=@ln2cfZ^D9?mKr|z7|&#uQ4kXV`OQ7c`&k57QmL}eD3y)!SLQ5thnq+ens}k ze%QVCm#T}A1HGif9`D$1YfFn)s;Uyp${Iz<78ZPxeSckheOBap#{3KH0LSiy-z;I% zAPgX8^t)Y}G^+2KR8(BGrLf3N-wV`}G_-9bRkN zX6`XAqL*f|oX9s_Akuvd+j^_mR6qi!djdIuFvn5s(U`rB4JCDyx@}yL3Hk){^%=RF zADO4X6?>VB#pBJT#80CZBc1^!R4q@&W*PgP|5LaF)FB*kP;bO+(NYop%Hq;?3b}(g zSRc1d=PyGZ>QMb%IbJ@K;Ey; zf3r}}gw9+oI0Wx$wKC$-OJCrhdAF*`V#7;^9WWteeH;2pZBYjF995gJ8uqa5&_OA6 z=EwZ72R_82$AZin0j^aJ6ML$pk?r0OY>7kmBDXZ(&S4XE?rRmJ9;L>V14(5OVDbt5 z7=u-Q|6h_8urBpde?mhIM}(FDZsZ1; z<4tYpAgiU9*zpG8BE~nUD3iv|?DbcU%TMa4(tWKktGmHrs;}=ioj=(kT$-Cy$~99x zGV46H64aGOzsd@TIlecx)YRs5P~MMSYiO*m&opVFTK=u?T-2phERr^-2})0zjFNdjSfV=6HHC|pNvA_%YYO0z77lh0%`-M zAhLbItp?8dYgjI>pyo^B@Pn}cHmNV=2katNqdSfn{br-ehPFm}u*N8SLtWgE{W^esZK#EmSWa5AX*^ zPpUmPE|aXaEGnsus+J#CesrD3o=aUyftL}Po*%;6Q&=@%@mDT|~TjJ;LM3&wVU0fl6n?*W=MXaGkt zM$alts`g9b5>S5zEI~i$Zt{3O4HdD1eaN*s^R6s-`6wMy_?zXnY9IyusCwuwGil+B zMRKAyL?e5YAS0CM%{!n_uiUqtofl*yAU=f4@Q3A>qj2fia}e#m}{-N-?5O{Nv4J#ImLZPHW796lxR;*yiHsKXvuTW^b>C# zO)7vt++#^X_Qs(DdeP3tCecL~jHP;tU918nbm2dm&xM6)bkz_qI2$YWw42%h2T+5p zZ*^5BeFwDP-D8qmY(m02x{LC(;yTR>q%;wdoZmi=>aXX~=0l|zDN8fIS?EGonyFnn z6wgCtC9�#N6~)C5T5Egl@e)iYr2Pw7PB1b68u`6+|=uf12Hpj_koJ~-j!i-y> zp4`{Ph3e-fjbpk;X_3eX=0*O0QZo;oq0HYb=T+J@hRl$``0|5qv0mK(!Tc86l_P$` zb(MjCUw^@Sc6Vyg8B?wOYwG3l@f7{;h4sF{6_j~I!EpHwcoUT=p+5@?(6KEz&_eL; zensb!RVh)3j3_)~0C z>c?fEvd2b@Kj^2H7XLub#;Jn914zBdFN$J0A>lobza*&)$_16QnH6nSl|AQG>n8jv zX-JXiXM34*YCXUy-3&0%5J)NxFF#;jW?VwlpFy<!x*9zDd5j{g8d%-z?R_)|IJ{ME6zG0Fbi&P=2C`v7ev2{D|(Z*R;&#>1Se>!j$`s zIbVDAe$vrVQ+d9bXo;{mMEKdf{y$WuPz-to&O8s;XP-t=17$zw`~kej$LwrIpA+$q zCx1=`U}ub+E8{9ox|g!H0q>T*{7d4zW7AXbKQ@ZOp0!*&H_c2!n(uoYbCV%*rUXzy z(tx~GJLqftG=jUze6_pMAQl}7&ODd3ZP4g@LI%H0_`F73V~8gvqOoa zS>~u3T^YieOVBGSuL5==Fr?J3V%rS_A)D)$`bM=e^K9eTek9zY+KPywg*@A+6w58J zhk=*T_jg!P9HfZIFH+~0aC|+HYp&_H9HR-+#l9EqI~RY9wNHH2yV(QOK+9xNVmLp^cVgH`@1qiSN)tP?DcGom-JO+_n?NoU7=Tz zF?h{SB;gd|PAn5jvZxOHn>uTU=?Z)i>mWo8&fjSCshyz9f3$i!^VoxVy?LbeCKd$< zj?Rf;J`Rh!dj!UgMxzgE*nTLdZo-ZuQmQEaE24(;?qZF?sYlVaFg8fS83D1E*#0xHWJQ zU_XbwCtqPkEDE6U4=d2S0X+6;b1(njT$x=HOayh!Uv%>QNH?P<r`;}eJ2QU50g%)abXKhBIYc>x zG>q?wl6{8I(tdQjl0wM#xL-BiRf&+0C4QbnY}o|dzVWWFvPq$f&*8qq_Yh#@o5%~n z{t?iejPF&T>l5CYa3mh838?PCUC?8v*1~#+(!Eo3AE_x-;MqoSO!jQx+3b;)%zkJ$ z2Sa1+VD&LNn!)??Uzs)YmDH&R|4FiO>Im?Rep7^77D96XwDFb}5J}f7sZ;ibPh-^rQ?YXv3Ji!3y6J5X&yTJ>uUflA+$HdU0 zp2_eMd0*AGfQ^Fq%gbt^oJiT-ew(J(3I;v0xnB74%TnPFF1&%gdH(!@51|zRNFok^ z>ppuhV>lR!`-!$m>GKBCOIAEDK<6^g>ytT{=MHJgNYH)i)lX{~;2zivTj%j%ShFa> z=*|$k0kJKVgOrox7EHwQ|FUu30yu*Z0=xPSoD>>S$ZoShXzznv)+(XY`>&DyqJ1wc z(ceSKS9>%CgN+8(jk?^LX8W6&N|tQp?oCtKP!kwKlf1Snr~a0T2>d19!Duo$H! zH&v10qQJS_npbAWhWvQB`8kEM!QE0L^V7DA>)N#)ljs?F_zl0hg)N+g51Le^bshbk zMugEN1BRK#MgN4F+dv^FLJ((gv;v=adl4Xp{<$vQ1a4qKw|}EX{bteLiGz}gr=Umb zF|z&Je}Vq(8XxwkuoN>ru1<=?~9(u zno{M9YOCRilN{;Kr-5aoYqC38Yu+edsRJ6OsF-oq2#7v@k?)rPIGQekeF_A|`Y5G- z8AQi|C~2kGsp6>sC9c@EC$Px@f>pq9|=r<81)K$mRVKCbb+2w{|06co!0TL>pR z2ld*EKgpS@oeeqERd$*9n4LL+6QRDrQL$u0^X7)}09cd*bi9_W*Cx1W1MnO`M(^&f zyU@n$=Iz`SV32aUsln!IL&bs>1vojQG5;U$x&Mppd?q!M{D!a)CxN*~EgsFqEdaaR4?(4Uxv zS_xyrEocj-5?+O)Up+^H)e+%p0M@IWigB8+tTuTs-qCGLLQ}b0eki)l=<7Y|L3QbJz4aa z;YnWsR9GH1#+aX;d#&@Ire}h*yobSqw7Vlmp@0|9Aa$|j)~erXHAk8YUn{I+N&W-I zZE%x`4a3@Ek}@-srrYi2nyXfpTN<2$tv{G|Ieyv;_S89VSGPi`EgoB5scmS;FApw- z2bX&L*5mlKF1eLGG@+J~7GaJe?G}eUT70#R(odqUUk@==xN+y~v+m_<;itH_zb|&wQ2{;_d)ThK% zZt0UU&1;=Rlq>5d4`|Py>Q1_ZPjwuUlOnEPw^TeyxVm&pD*=2AVn~XZTYN?8bX%$S z>PVJUoRa;NqW3JtU^-RUqWNJ4Y|dS_NH!uZii~Sp<#3H6dPX;2osEzc=RQNs^OkF$ zu!RP~f*owvQPYG)>dipOsPIdhVBLf*)HiDJ*to<%{%SdL!?_B z-+COB;~0y-??KJq@Nv<&8tV0%WhJM4V9~6CmIpvA+{}qZlDBu|YDgY_xXre?5lc74 zR7G@hoI84nVElwmquEjN%h&b;|b6*(b% zFUNA5R;$&}WnB-!NY<0DW5GfG2EYyCiI|UCBAJ+*_}=w28pkP@FM>N|A>pvs%Qj7P znYFyCI?QGh?J>ITOeiQ zP^rhK)v^;MZ_EFogJnU+XgOUO!fkh4ai-uV;nVV7aa`v_*Tars+4jf2$-h2rwK-f1 z;ZracX}83e34=riD@}8*aAXn{%4V`JNLNZc@I78ow~BQB-l^*yci$lH{DVC87;c-C zb7qKRO$$qN=SO{C6{_BuC&z}HDxAIcle6ugDK}sNx0*4dTN&aY_J*fPo7-vewB9PyYRDjvRL?6|c`Cd0Z19yhjGCAg?@bmPXVJ3`A$?R2iEh{Q zNztX<=07CHy9gGbE726WQkxv%%M?C!)!6%Er50K6>O3v8xz+x&eTI8dK)dDZb#$BJ zzW;R_y5CguZE4tYvyBP>;>3LZ;zJWUGezgl?L<{4cIv<8I&$uPusAV|-aI_EZ~|KC zIyReR-Vz7-iC<=47nV&a*;j4Sx{x7_W$n+n@D$o#$gGM@b`Jb-gjfHTu46s(G8}|NvM=BD zXfC`u-r9L_4d5m>dyn;q0>?v&9jHP|(|{^Q@l)ROBpk)^q6=c)sCT#6jkiC@uZvC* zUvtGfv(Y4l%;ReJJDmCMIrO#w%jd{bSJhJMPOZ&uGK#yB)0}%+3y4>hIN!w7x(`2n*h+*1AG`QI#F&>0RrMounOh6oLJxJLGVk6olH+Th5tX@qhOi*bm2E*y!m8m+zO z`K(;b{z4q9{sdMUaW^BSWqk6^kfWRCMC)&sLFPN?Us_<2NQYfuls~aKCPrZ{Q!o|3 zH+Kdk+4YlJCMzxjsIf>u>%g0!Dtwd{3 zWV2-iUX`@%l=QfgiZZur;)Vau_okzS7H<|%zf88_-Ko1GS0w|TG*r8U#&LGBp z9*>AHiSf3Fy?f1e4IbYMqVkE6?aLEBeye#l$u*s{3cIA`ELT3d2LTwk@Q`_K>m!0W z&`6yivR9PgNbxAe=&1pV?k6y+AxaSRth*QxC$HgMH}p#mz$y}5+Ln^V9Narxt3!rmMCWR%IGhe;JeYY)=aC^7je= z@2-vp|EllA>E0)Xk*tR~o$X9g$f0cVozDCf_TkvHNvo_>Hh@irg zNYq_<^R>romfqv;qoukaELD%dd`OsNRl&aMmrvJKywU0C=JXj^QbU%bSWyYiud&+P z23#pH_s_@=6Gf6rtGds&cG`WiaQCE+d>F`oX&<43v&@S|0Gv{Sf&l|coJmfFMwbrD(cS@0Iwp~JBc2f*8 zx)vXzInh@ssVSskoNVcOg9#KqV(|v0i=odwwhj06@W3LWZQvxg2znND%LCBHIJH5K zvkdJ%;KGHmySH-5hGPfNMgfBQotLV(DA2AkJ@_}vV|X-rzbC}VyTx*amAYhE;d7b= zork6iYW!xo35>C1q@rU%$AM!TL!Cetat!{qo<~AWGfrBZ`H$@d0DQ9t04l6%C@nH9 z4!4v(SVlzvrX^*qymU0xiOkx~?j>k&>>7sAB@e&En7zNmOM?I?SbQ^-ec+Wr8+*r` zfhk}9dv&=%$&7;-!^t&fxOocx*BAh+!S+63|6107h5u#&+Q&`P_*9eXqoz3JrSP#~7AYI|gQRNzX@i*_GND@BKvlA!7y zcVOFqzrVz9mPBQmGkV&XYWbVx6E@l-hMTNC!gST)sNH42HmVxf{~=D8f8v(5%I~;o zNie%?9`evM(_U&RtaGw&n;8{TW4>C9z30^6jCQ*^7k3PJgNqtC$eT51Fw6X|cr+xAA8dHuMM849I0fa#jf~LZq$;O<7lG*aErW)^J zU)Nx7>n&wX1&UONp8k0G_Wq#3b29H;gF=IAzn{_uCTTcz7l>WBr%Hzw1+I9?WwaG)U;gsfl{Hi6jPl#?o8>^9=(2w z(g-Np5b#QwBaj$%URqK*jYy|2iO`z0!7= zBc;w|y0lISZeG0yQ!I%3N+=yE`~`NraDPKxKC8Ifp^r;qgq{}%5yrQw4_f$UtTbD{ z?qoQG3r;^dVtDd!Bu3OMC;KE7MCKXiw-oJ&8Yqp=na!-G=)(vh2qyaqBko|Df4fya zbgHuS#t1~4koFRh7wo2geImY^6l0b=+bOJbZy?=KIw$!4;Fr*Ymy80Y>N44+;4AZd zZ}A;>^m+m({%1*>=vLxaa^dCkWm~)we!uV_m%mdIhUB3-92}K04P;i(jq#5}Mt#bJ z@nssv20y!-pYE1uNDxLbDG<_r$Yd{NBt4mDWRr&}QMCg)mRt20b_5wZT&KGWIZ?PB z>8yD6$EpVbAD95%cp5sv!S949M~2{25Fsj?H+?Mx_V=4DYY6wv3mF?`fzgeO*=5v% z3@mr`WD`wTjPN74G1wqo6CP}<(w}0|{QU6Q3uqAB_!ck;i#?lq^-uIBZU#Z9{U8dd zMB_Qmy6c$T+&h&Sw2zgm_P*ICayDN|`YjK_pMz!r`6~fDNx!+Yer%YzezXt}*3`7@ zOQ|7e6-!BoEf0*jb!mXLu-}Q{*^C7K)nrP2?bkHQy6@6-=@<9Ot{Mzx%xgbb0^#6O)&_TjzgX{@jzr0o>F|@e7uM{iCX6 zI8~w_tX9fw>As|&AwFz0b;x&a&pqUVM*$7P>1G7Lx>v%)_5Gh& zCU9De9Hdm>QnG-uoGu6^n*w;oG`iNcgVugB=rN!}tdHFPK42B}Y7lTRwtrJOv{K%&7c~8f=mfBlcB{C0T@qz;o-6 z*i9r+vbeUT0v3}O7=q}H#d=81Of`)ZG!hX&+?|2X*r;@;(e9uwG#mXqgR3M{GHCK0 zp7kV3iAvJDhXdCx-hKP8l{JS*CIXgSq_Iwc%gZ*7WFfU^yQv9(^8pefb=EL-Ypt&y za*WfikcS;YEIZ7(4@(V|K5i|1Z?~N4qwJ|R0hs5lDawxsf44;*ob$#{v-hv(0E^@D zx4YcE@v7fZSDW6TUY!P1AX*GH)aNYai6Zb?Yv9e_jgzUQL;jWO8sc}ah(I= zN*QP?eC$WNn4^LN2eHltaFMuDQe=@Hl~d2}LZIw+n5 zqYHM>{5OOZzt^Az76Bty#G8r$4P>)6kqseGA6YP3Ve$qH__ETvG#|vGAWo?e& z@nStT<5>5p(*6L0HB}s0d_nVUy@1l6>hVg&I<>|bfL0bdDlI0=2ay##XKXGGV~fa(08Co~B!Sr6 zo$PFp2S~7WC5F5lOub?L4qxK`eVot5U|Ui`7{nkP$_bnn3)ghD`Hc`dV*zIJiEndl zX>M(IoMKZ%yiG1DbBo7Fa62azDvE$CS*lcb+4f;g`r!7EXdyz zSXI4q>1|yVRtqKulQzE+a=J)*Ckb2y3HXawu>8SQso-n9>%|Vt&1}ybNi7QrP20oi z5NjHsj+R_eD2i%5&HQE~$O=>Ts@Y*d%I7_v*6EJ(|5#(2e(y)qsh8i(DweSmy9i;e z4_xD-DabIQ#iJ~^NNF_T{(-*DAb{wa_7qQR>}h;yGFY3&@W+!@+s>&;yIvq& zL$p`nf+1&1_2_JugT}by2=mAH8@zsQ^42i_^DX(inoYfIFCboB==;O`3IY&tw;DS3 zzw!3w;ZVPC`?%gMT1^qM&O5SXo02WdOdCQdLe{AyA%=v(m}#>wQwn993fakCw#l9? z*|VDw+09tSi&^^KeV*s@Y`^C?e#i4Yp5yxuhsev^bKlo}o#%C4=Y_lV{AyEohkzB& z&dzrsxe@R7u^nm^@33@&Q?-7`#b)bi3=L))rdeb+t4azL+hdG_xfx^SD^M zEklS>`&mh%PV-~hwU%M?xAdWu8x(e0M6~_|m1&LfbMzlO0|2KJ*6?dvs_p9P6e<$8Qfh~r75u3aJ2W&Uap~1q8 zSW*DNB9*sv$pOm2u3H}B-QB9Hsz;Yr99nv*xNEe}7p^waS9V!EHBBUKx4gVBQU5jp1>YPln58^K@s`;*kMue`Gwp8w$ir@nu zG0M$5c0AvSF1X{cpRM_Tqx;+wU<_-;3?9=QI*;EASsiDA&n3ycq4Mr)fjPYHY8mh7 zXNlrK`)${y+)=I5GKR#s$2)C>Di3{#|EnsF>E=_`O~J!~K5X(4N}Lt+9(S4s^3gl& zi_PPTWPAB@J*B9SfWihhcUy~EjNFJ(Z&^G1AXdL&%B(gyqde}GK(<)_*y!_Nm*=hC zD%Vi6*71Cd=UAVH*Bph;HS{ss5RXv^wxdPpwU+}d!gZ2{_35$_jA*9|mHWI2vVs|F ze(JbHi%rvua<&!J=*c|WGUHiR8!37%@m8^&0PL~r%#H1w5P=X8>B5)wH30?R)AadF zcR#8Rbd4>QHH-E!^AUCFj!aGy-0r2p7*5aurV`|MjYjwa4-prIoGEN)bI#C!;zO0{ z)CH@)ficC~RsB9emMhl=RAnm4qb}Y{s90-q7BMW(-kS*S)@BDuY#|j9>0d6WMTbY8 zF_G{1s9~Bl>2{a*ok`4rz4L~pvy-$Nni}_c_$_i}%t}Gv`d0l)%*D7~lw3ut*h3@l zzM1R9QjDEky{{xuF>v~DgIsK?MDden&JHyJabEn%e}8t#iE|QmX+5u>Xtn9eTHn6; z4=u%SE_DoaR-I#(O-FHB6Y7ZU^Lg^!4TOCGSd0C`aroiC>vsI<2q=nE7xX&YFU zH=oB4bV%1lHuH-I6wN5fBj+=E>p0m&TH<#Wm8u->8 zanQXtQ%rQX+M3=BEeTzH3$zhZggSZDYzGp^Stc7=;H-gbOF6%}v}ZCVjJ#GS9I+l#9QryMX_t?!T?$8fm?{`_2CtQHSGiyo? zy-WHkTU=}7^t@`>wZ?)m(J8a_7CZsn^qcDqqzeGD86dncwE{RP2u>FPiv4h$?v$+{ zWJSrhZRVVT_@?AH=)tLlltcSbOYkGEXc)`dn_+r*ZaX35;ejB1o1}*E;{*Xy>OF<& zvK}8DE%TUO)Beqja1X;XMpjNzA8Oq&sUha+u^#HZwuFTBt|`~5gMZXDCBW4hYR$r3 z124sC7SG?0kgl$T_~@ZjRDGn?Xa5r^4{d|L<7MWK>^q_0Fz^i#wmKJYm@ZXQKoU!L zEgH!x)Z)#zVi}-1@u3!i*Z?&$x{vdC0Z4LSU`Mzjexxz1*rnBHGqARdR0u&qM+)D4 zv>A&HSg?F~qq|z4GSFAs=5ri~1F%-tzvFD*N~bYI5KDX4qdnTRR*G#8WV|{rJ{&cD z$Pq}{@2^W=i>k2VRFQTP8g(XUml4}4XjT`g6lk)MiM+M5Q4KWmwA-afRvzPz@6_z0*MPPx9m5U*u8QGaXRP-U@2fCk!qa2?)8PKZ0jL* z0u>R%vUhU)IbM{sB&Jo&%R z@a#c^vDE*enHUV#wtqYOubPQ5zD8h@ES~UB%|xq9)|~J_bavf8H5287<$?pfe>4*( z&NQ9yXuGtO)4h2PZwTZGv~y(W1R>lflI?wsOPXIlV<1t4ZH0AVKo!#R8ZbBzeJqw6n%jlE0%mGXNs}f z0uif#&oQN>-lYr56&V z-p>uABgUs1k%#Fu1#zOicDiYVdhba^xBBr@q)Sg`X6ENdbUzJD_v9B8tSGrJPNgnW z4Lnq3U?Il-_XbCo!(TZn_7H$h0jUh~@<8(BbM*Q>@)il@vcd^cCEUno0jflSu*Dk` zhni-gw@6<2R@6trVg~cTy>VqQX{wM`$l>>RLFYE_DI*CE6Y9O_lFh%lhPUIxU3{EA z*s;^7h$wA)pIZz!+mYARDavlL(rbRBx(eYU=_U_Sl2ew-!|?|E$C}lrEo-+cwn+AU zNB;=6Xn=I!b|bXjWk-`2U$d_N=IYC>?;i_>oPm~2bYqj4hI*kne`-H?LoUIdqG=u# zEI#gL!d@glw!t29r%MNUH<1oiAEMgdy@x!79T*0#b$$)P4%u1lhpMgKYhlx?D&I_n zoZ>xk{P6B;cCaJY)M~~=Y{d*;37d2|^{o@3kd}W$(@QN>KaMy-6v#FadHtM~QSml0 z66gawL}c-naKee8&qn)Cqx8>tWQi4%>=`KQOTZEu)lh#TwKR&4Ok0CMB!8K;-HBdi z*DbyS#$%vq;s=1sAQZo5nSk7_Yl_G*Nn20_?H1g{q0XD3tc!rD-j(;8>%Ajg>o*q+ z6$6x9#D9PModr0}otwowWnLB-h}es}LtD>_dBW6(>himMM9y9)mT+U(OuxXEe>}&) z2=Such(6PE1b<&Oev)x&joI~-qe-ayFtK+-eu{PeAgvxB1F;_}pk4G1qJB;%zT`L6z2;Gxkg2~Kfa|Uia%*E-&6ClPH`Av;^6kG=5W4(0 zJRr=zioqd41Go_csP9~@Df@t9XbPE5ZWS%7Rpsz+m24FhJe6Ee;_Tz5B~e;hP!Lce zg1J~})L+k3h~liXbkJC0(sli(^>nw@04`}rC%>NWIb|;d5b1BJp{io!h1H?Qd z(zY_`DI8Mh2j|vHzdV<<%4`^F*t$w+cmrZc0&3j}wFgDB&IQ4U`k!-yGhjz55oX)) z2MLoUfL@+%5+5#HYhQsm!=}&w=HhV%cP3C`biZPO?4j;%A!sd&_z&p;1`WP1PN+Ls zZM3U=o4DvqV+@~tf6u-Yb{J^Vk||>Q`S`nntKHJ<>SVd)wRu~VktYlU&Iu>xTLe|C zm9V}CY7pdDgbt27YO;htI|mPe7bkp~IKmkq74NB-IziKaRIvUTDaV4-L*8mzNrzK9 z{jAHn6k0^neZ;! z?zR^AFJ+~F)$#pLKjH7=`+yi8_G6840ZPciNkUCjxVR0pVAQLUVzXlW9WM&Kv?x{^ z?tc{!9acIpT~XBSBgj0;=ov+(5PUFh3ZN46DOKmfIAdUNJE`9ET}$-1Dy={yT0>HS zGCZmi*)-+((94?0H+@5MzTmIwkDo$qckKTZg%?1Mr`#~Rk*6aZ_s(>rH&a2?hChdc zYEC^15x$&lx*r;L+CTk_=FxjrDWXc(q6&>0_>3*nv#Cz|Zv=D=wecopSb9FoSo|er z$$uOcqe_A_S{MgLIoWKIqxsSMSKyjefqq+dS|Xr)4)&Ui(H?0)rTi6u)IZpbT{DX+sBb$+_^l7nu2K`Noa-cx?Fejl1Ism6_2^!^1;kDiVX^7)3vLc?qVqu)p?(*uju!l*#X}~!{~9Pz?w2fGA(4#2r>5a zM~(Vm>%EoCSFRu`c%vH)$Uk>7Pe2Ict-Zgw!pTk1-nLtuKc<+IBpB-{?A5%V(h2)e z!jDO^6YL-m1iv6gZ7s`>qnE*Sz)77k4>Eyt*I{kbT``VSx%?TYpx-O6mcT6h%c<`n zVMS4B40_Ili>DD%dKP@vAh4h<92k&pNW%L4d4qWZc_X&bE1|Y(LkwsG6r&z_i!&8R+ zi>exbA8=BQUM5L3^{JP!ve(6k| zN$xnDI|M-AfkKL3B=Q)cz7ZnP(m1?TL7EdB0q!~yKhTFCq02W+3=Mxv$=ga5qU--q zMJ-nJp-RZbKIrw6wL|YF<^u@JwS&L(8KOG@Mw^EV-Wj#!?No zpxAXSXoy;JC#bgJi-NaDJtu6omtmXh?2uc);~)t9a4P~9u57wNJkfH8JUld@zcFhT z$|!_6l1sd!WQrTFHKMtr85T8>xr;%;gt7MFTR8(6%S;oeEk!B@l4xO_5NRjWT|!|N zVm&31(oiet?NRDmy>g$JbV#B0*V)}yZeD!7zX<8cX>Y3q3Gq<5(QNy=XL{KKYpb0E zCkMNEgF;3WTJe3}O2_EZHI)QOmW6Fq zT<|kc1-Iz_>zJNFV<{i!z`{KPsL3I%q|d_CUdCwZ{B6l|l`UWPNk7l|skSi7Q7gGg zXxHGhKK_arC+mLU^v_?vooz|uNs)ChtL1JC>x~{CXlQ?AR>d@946n4|Y9(h~G^4+f z!g-TREgL;^UlxyV2=c1W){r{Zg7i6u5}@qE@FCQn(#2Hp*FL-mmT0siQjK<~BHSk` z_-e2%SJj!5*Y^u?(he}KG%gTUYGGc|a4Sp>-NY)gr#+6dDOYD*(UDkHY>+8EOe9*( zx~H^2qF!;=N1)G0rI-po==AI8xV-K1{nTB^|Ic z-qkj^aw6GxLSl6xhzA;0-zFdREVIvFB9~>^PuQ+HL6L!#nfVh{cWS1(%21FxjImgB zZ~%{FzutJ!Z`RO4Ix_i+cuMx5D*Bn`LqjGk@ynI3{MMHGK{*pf!R#!8e%?jH3Lb3H z;}r#c9$L+1nYNLVI(mV$hq_%jQ)R^EQ`T6aTtzou%l&BS6CwMEhn5VPxQNwllXoqv~g*TNRgt1r2qp$3&+urDmbk(Z(+4J13@n*Q&# zk2j{;PIGrt2rj7Zq{C@TMWgPIX26Iy&lKxV_V(Zn6Nxg7V;28>Fi4ppR{LAjMx-kA zkfq^&H`*{5XZs{Oot-dww`MVwGv!G`v#u;@kbmvOPiR&&uIzOgM}60p0@j}kg+IEe zreOjTN2;X?#%2?B;?{Y`9q!p6g9+L5o5Ksk>4k6WRD)~YkZ=}!^<=tKRKs0BnCvBj#PFL=LDh}NlwTb?&?wW@ZKulquz^L;{n z#Cb#HG8M$P1iRgugtrF%ig646a%TuLkZmNC*r5L9w(G5r>+^aRa<@K0uE03uw9=qjzd>f~hi zNBERQ?y&x?oO$0JS_?Q(- z*vVgu(@84xP3=mu#l#?wRtKNma;~NESF0li;o1Q~S+lU^Et#l)wk>JOx+AU=tX2Qf zu=KAwQ*J@JB*$Qa2woT6l=FHeJAW{T_2oC0=>Cmwz*J!!)CHe96Q%_nK$I`;qCfr3 zWwIUQ{{!e3b+^Q><6%jtw}xN({)hGd|CWS6g%TkxTaJ{&3RNK7`@P$+@#3lKk6XF5E!I6scSG*>RO`N- zkg6@BEV&Nto+0L056!E^yc~I#6P0Z9%07`(OHoO5S?pTPVONvykN^ipE+aI0kc7F``8}e6LupxCETAy?v#&Ute@jmy%OeW7Rwt&Jb0c#ODfES$ZjnL5a@W z8!nrSfc)V2>`c2R|IB&I_(6ucr`oJX-pX;KNZZ%8(EHSVyf;QFPpSS)X{zJ`ZzgOF z?D40W0HR5fVC2Of4iG!de}e`cLIfcU3$v=Ph>0cy}wBeH2}jSO~|tF554)%zOjkw%FZbOt$S)v( z3tjx2*v>6=OTxZt>%d#S5!kgn%u;@#dn?7cBARGeNoWj(rncTY(~MY%RaC1(jd_^t zPBt72E)1LlJ7F>&zw>dk;1#Hh9@7kelXSY3`Z$~KX+55K^_4sN)f8dew@{0ZWi?Bd z%pumTIj_{*bp78!P`WzRg4a|8Pc?b%T$(sYPwz$!@KmL}uoiL4DgG&{93uM2b*?Yl z&^1HuWrmshbjn#nzUbb_()WFF)M4L%553(lWt+Xck{vaw9Fz^O`TALz%nhXvCVu`U z+){^ria*{3sKRCj{I;uIn3$t1BV*0;}c&SJ%7P&uWaN+N;ggr@|>{3gB41go@eBE z6m&39vTcL#Ba(4?u&kw z8J$GXIwwD;EeO1sasV{O6K61tMoH48`XW=FwYU4-3;QTQ2^T(d9InDO>vylH_?%1X z^@=L!oclT|zx;rh;oK9E{7M-VS1-1hD9OhbAj^H^t5&o1#V!i}oNkvtDbv}uS%M4{ znVfj0Y;vP^F9q;}6Dp^^f038B&kW5DHIUC89;jT#HBq}DX%g$m4L#dNJU1jo>72dd z-NMwz_SPqCFZ44nLbbHVTW#$}i| z2>!HgQB9GrH>uc7&C&bKRTb%(EM1dXCEJ$!0P662O4c_*3&zvNc^3@yBTGaN6?VBu znIhA;G5#5{ZHR>HeY|J{MCR<92B2bpDU3mmA?;-eQw>mFnVg~F6A9WM&5!zmL@#<{ z;mz*PsS>`8s}gw_&&3cu0vJz}-m)Z;-@b@LPF-15I?bMu`BGi4QjSTrC|-Q3lp$z7 z)G7b%^veg)8rcuBSeB@I!uv5Trit{|2b+G&J#^T?Up1evpSF&d!@Nw?(k+ekcDE;6 zvtt4MG8s<(Xi27`j^KpZ=^TNsDYT%CK4s}*Ga%WrVKEy??z@+#YBhP@_-71%h)$bb za~K}6cNG*U_cRTZ$_QnYr+JBGMCj6M&)lj_C11{~kZ;%cRsT&X^ZC!Rj5m~Gh1_Ry zo{T*Oj>5_SrQf5@-kxt4i)k@`XOl0gs#!K~-q2hwIghn5%MW|S9^aL_?nNW>M>Wwl zkKKvwC|V!jEpUwbHuZ*jJlIFle7@B6+LHzfZ8dwZNgl;SIcu(mv5_hK@D<^qwiZ>9 zyJ?sQ=-dvtX*!2ht5Ow}+ikx4ED10s%z9K)B+V*fR!sZWJ1_JYxMS{P+?`^m!!zUh z5%*H;k7e&6cO`czbUd(Y5qotP5_PanoPJ%cxASy+5ft#r&VySdI9E<1SxNqo&8ggr zM=s>an1Dg{O0;$hlppRcZWBx-SyA>*vPhBKv{0b?+X+Pay;EgIaLu_&Uiul zl%Nfu+Nw6*kNq`-mF>S#k8_zkKlz0&fBAy}*{fh{m-22Ww9ZBUryU&x)TV$g z6D98Tl}vTg=FS;5B>b?u$gXNvd*649*-z!2^U`%{m30KHe;uiremZbAOb?tkK zh?DH8s#gc!?fx0KZ`|k^h)BhE$i?NUj{KH|Jv-$KjrCTo5r4Iuj;t&$m#}lca600p z$?~($36-zxJ}1;qab7>Duh@%qi^`NA(k2|PmbV;Vk3WB{!YwyY*L``(nJPTU5^bkP z_!1y}f)hC;(dH$yC?Al^nrm`1NU^+!q(k96yBZ$kIU>9p7~BjqnyxB*2_>kJ zE3mYp4da;>dVm=pkUtXDDQYpQ;(_s86WPsH*5Q#=NztNUx(SIpFGg z)}drq(Bm@P);T0|w^~8JC$}jP)HRy!Xy_1T;8Gb%L{moPk9-04a@>fO>Gol8t0;gm z8XQ&p82k_tF~WM-s}e~Pz`+Mu$DJ`#OWc=<=Zfg~lqp-(5xW-0z_g*}O5@rp%4eLq zB-+kJVqo}}>7dFj=>#{gg9!n|t^D+EJ#kpkaCQ3c zxbM6CB`kbRU&U$^+&42kRcvHcTyZf#aclwf8&ud(HUIiwt48C3nJ%1Bbjjf25gML3 zCAh>*o$fB3aZ7^l#p)Yg_JN7v^ys3e{OOQeo#swE+u97=2t|Ex(;PT|AFA%2fI=;M z7GcA#lzrEIBQ}BrlcdeASz%Zc?>OFo4a6)90We z>F>UnA3Tr|=;-t?V{tM${eA2z`oY>sZ-XIwvN`)0XvzSVfxo#9f_R3)3x5Qr7YII< z2m7t|t<2%VMq)(j$pN2y!}{;HMZ&d3XcGnl2qoKw@dHKG;df$}oy7m9J1w5fbt@jR z)qM{IHjMiiCa+vd<1aQqB(1TI^zVI;zdXLO?tp%?9cdj;IE$uv0}{L1fMN>C5Fh!V z4q%_!(gxDO5KHHVNc}#QLpGpLRvT!I7slOprIQ_Kw0G>kE665=Wnhr%yNK%$(8Nms*qowU3Ba1g1TiQBJ4PN30#uh z+Pd!xNCiKNA|x0AQ9QT9ilT#V$QqtbQ1CHWtR;9g$v2hxrIr7B_TQaZB%tpV@IQ?w z{5DC$g)HF)rkxwhwyL6qM&GB<$dL-} zD+s65>C%aV*vN*;-BbG7WT~#0A`b_hIc{;eeHyLu(s@f`uC?pGxpuyfDo94>us~6c zG!XnC=^xBC_e=$g-y1-xUczKDsTdVrt3Kiq;2e&ZavL5NxCY(E*XcE`l-1=&6! zo8t}_A$Y8eh%mzG_;*tTpq?pB0AWeMO?}hbK_8TP_tkz*!@B98nN+O^sXqR8Y%W!R z*7PLH!d8s?a_`co(eBcy3XezenJHhYyER2x0fDt!OKZ z2xx4w>&Ok$gN*IW&{|n+ukk18eY+wL_0;9%sWZ$(Grp06#5$vY!v0w846L^ERqR!D zJ~q@i>S8W)9QfO`zCU$4(@bqQBS@VRK0BzS63D>kIHOqyU}2 zW)(80irU;cDfS^L&tY`7viT)-O#O=KVpWi3^N3aPQ=?)!e|+QznpMW)&wN@tM!EtG zKn>S?Ceo*1fq+7-fm;DTXSwI`pElqMDH8dHn&1wmuhYi@9!(zzKg=$xmt z-&_VY-DLv3sx|6$<3foDQS8c%;!h!$mwU#j-Rp?G=$d!dW^m!Z*Cb&tyF)`&jcTr2 za4?sgEG?reD=__}UoD_$QSzU?Nc+eBaaY9Hf@7mu_!l(A2#3IQX>ObrjnLQ?p;-}$ z191(8Mbr;p$0O-iUz*e55k7%BuuHvDG8P(vohI7@Lp(gf=e#aP@wzvw8LsGl_rn;~ z;JP-_p(C_{hDl5v8f^GI>tlMG)+G>Nn(D=O8)M8<-SSNxk{UGmV_y0dx+Yj5q?*2E z7T6DW<+`e$xxCsH=&xPSlsp(G(<#j?XKUB$yILCf8i*HOcHRQ2xH==e63(iJF!^qx zV$YilmI)O6VXhor=G>BGBnCWsGs6`Y@8{oy5~v!J<)x(+eQ8gIK@+88#4*RqQ@Z;j zj_z>C&}ixnt*qUR(Z}eakXXn(%2@! zpIrc+oQ*$CCqyA(_#b@OiYA8>^h*OM-de#XCq!6Tr>Sjh&)I4O)h|)Ys&Bc#W=!gi z-U|uYg0C5mSpja= zIsMdsbgJTrp(Yn6o(}s)(~}s65Z%e;D99WSH~M9?On ziJvhx6T+aL!x~}@TAhEf-3#~ud7u^f$Y}->+BLD@YYRl9a2uJHIk)|O4(GMS0e9rp zoHd>=|6CYY7ZH*Jg>M)I&f>!`2IzNdvTWx*PT;>y@UnJin0nH)UN*sxvb6kZz4ewK z(?44mC{-I!JC%;z9H#1F-Wz zeQZ{F78HRE(NDLZJ=Z=-Eet~mm(8B=d8OEA2?k4)O&oYbRpyO}M1{w8s14&%bqF> z1%KaF%y1yX52!g&`lUq5uZB*GN!r~P-WhZ4nWp~fBeRJU&wOObWf24y*ONh6u7a?f zQF~LdpT4A99(0p+NUO?4ruwY;<{6`aB$Or!PwA5kmA1)+JZOqUch!f;Vjp z49kteDRyG9M8$U@0K45zv`Vp+iL{K`Qr*lzv;j$+zPMJCHwq%!p~H;Jv!~{q`Ikp( zrmfiS0dog9zpTXFRg7jx08Q7D%085cg0p+*7kA{PqF1pfL2H&#d5neoVs|@`5IBcd z=-C+T8;yd!UNl$w!pV*0^bq!VDnBDr?Q`yY23;z&I6VzqTT&iy)NX0nt`sBly&7@u1nD|7yqIKdKeW8R66ow@>z zFg4r+d926Ef&!^AZxqtS_r)VZG`n4d&VfN+br*3TjMIYfDUaCMcy1dNI&L>LW>2!& zwHpdm&%8WC125pLAC;P8D8?feBFxOz+xSqFJwJ`7?C z>NvfqG~bggBj4$QO0_FO_0Ybo`N0}Ci8M8yeKIh|Mm5lo?jS{YkBNANS)BK*m77?b zQg3YDmdPA=FpBz6-j9sObd(XGzXw@IbB&b`pq@D#rgL3c($sJ23i%%XaXGD>Gfo!> z)4P!OwDSlq_HPX@3H=Q$VpwY3s!5iKSNT>v?q=_ZMMSu2xCpKgoSj~;FOEe1^d3+y zn`%Fa{XtBFMX6vGS4H*yg(Mr{UIsbw*5Ow}&uM%CnPY9xl z2P)*Na^zhm4s&?sX-_zo2S!~`M-c8)=YL7hort@3`lK&f0)v?UIAKD5AEU&ZZPYZn zOY+!f)o@CxdYwsoDyy_8`OJm%jAbpoQxD=OZKTr7&Z+)wEvQ}3@XTcLl_VD#BQ*-^ zlE;=`%E94u-p#m?SflYOyW$E42g#TxgPa|AgC&D+a-?0VRJ19RK`$O zjjfDn*>vYfRTAHMijkO!bYLJQT{lN=V$@nC(c6vB)#yW)UuJ&UbPHs?UPRXq#yQd_ zqBS<3(8}(4F5g+j32^g=H)xB?YjB!dKc2O?GIb<4(ZX{@L|YOY;M3UQCg zcy|m#m;Gi#D0#QKIqMK~7jYqYKR+go*5>f{e*SX?eYdU?_D?OHf?r07D_1FSM6j`jD08*kS0Oi(c`E0zvkna$mdG5O?Pq^)(J}!Lo*kN2bZ6+;4MTQjjPPWbS z`jZ`nC0~M0Azxa%MK(ZP&2gf_ZVONc(lw=g#`=Tcu@6<**)k=@;sP>tuzf`aIn~_H zTM{?7%l3a#FLZvEf4-%)d0;lkh-N-1Q%4?ZuY1ALfy8LP8c>I{#S;dFlPggndH8P? z!x=qIV$IpPXp>Fh_>%1Qg940BaMTkjNO#K0O8Fw)Y_VyqP@=Z(6p3$b$O{oBYMSI_ z=}TLuJ#V@DiRem$43!>aKC7e5HKz*hd=_jN=bER+6TDx1IN$YKQm#$7@W0iwDgKPzU0=IZ9Y_gwD0rry@mFLkvfzA zmJ6);g0kLA!n7QT52#1q;DGw3Pq@BDRGpTP%lc+W7FkEXfaGo8kSb9$4|P^X=p+W% z=8BLv%%I|rjf8#MEo&+n^g`0luJ!kY8VmnMRJ$p^FR7A~IC=N(aSIDQ>o+lflt*#{s2`;xDt?<_N_}tPqTNheXcym(u-cBaK)=#fU zKF5C}l_fW;A8w`R*1bs2IHtF%J9)dOnHZf%&--2)w(pL$qXO3pZ$Lez{Tua^xBV~F zlQNM1>2u7OOnbuE&fi=Ew;)A_csK;QX)DO<*`jfTa74mSVzb^l@j-6CSEIJ`CpaBJiSqra+sVMlv|W-9#VI)++b$s9}x=g5O)?AM>%D%sxkXG{S~1~r+5rdh5$BGHU7 ze~Gw1xPd>;`93b+?XiLs!twQZ)Q^v{nQmx)>^4UXBr0K80g>%PkGU zL2K5oo5RS`Y{RrA@nW{1A6r>Q6{g-zAidytq02It2=q0^2oPuE+}ZDNZa&m50oTBM zl@zrdQp9I0KFZH{gZnrs5UIA-MzDbmiGHt}weF<`F6k{Br8wI6sRjl7<}&NY=p4In z$}*yug-!~xZasNciwt2-;99RsE)#yp0NWZSU;*ncauBwxQ_ykBOr&nlbWtVrl5S-C zWCpPVcZ$Xyjg_0nOZ45o;8AA#v!c?J2Ww?=#;sPOvf>j{;qY9%aC3e}h$4-Y4LBSXSa8kH8nm$F9gI?6u6GN8^&g>AYx||Hu@H#j`AmT zZa9xy>;~2#suQ$_o2)b<5Xb|o*akG)5+%Hld4Q$Q0Vp`*QgcN;xsfbQNL}(vZUK%$ z97CB$nJP3MwFVe zgfPs2)G1TCIz{*gWNWT$boeiUPf$=GjiN~XbWO1_(KkcojLC~meiB~5>M3!xMhDIk zVgFKfa*+Xk^PJtp8G*_xrh|phapxr&KzRYWbN4AW$tUIFwIEbITEIuE>k(3=;eN!z z6)2nGF5hs{mw7`fF0?Th^Vfo(nOa|rqHL>Vx@*VjLUGpz%Qe%h6SU6{$39fAGXd29 z0jBaMu9^G`K;Y~L|1ysn_U7=d45v|Gi#6{jwkl;7*|cp_0jOX|uziRwNL~5iBUM_d zMI0uSAmOvMJbenHIgtd&uuQgB+Irt|eRKB%j}dJ(UL_CFs2B;^Xjm zurpDV2b>AedY~f#hRz}R*$GJaTqMisiGFL-#Bu7$uh zF{JR;?K@U2??Q-*S{pCPt@p_0@g>#ytXz5k`8&sJ0Z1GcL3DaRxVwk_7B!ic%0nM7 z{Ght>gm4#q$PF&E5$;^3Jhwvq@dWEg`H;G~>nNCkPPeRpN2_FL#9GjF)>;~QkAz6r zl@2c`BJBx>kau6D`4_aKS6Mz=@HyBY{(T3>gbZv~foaIn;BPKx7dm*WEd9de`!iXZ zgh?VNHs<7?@*f{-h*oATvk3!h`U=>83$sB_awZO7!al1e)h0m510Z(hedzEOK67Kp zE`$PT76J@T^Q~aTV}6WqT8T}jY{P0r`yc2K0u}w@j~mOGExv*@0LvK%L0j|BapYg1 zpl}Sb2NXXt*x^#`NzkIWOsuaF{28bNY=Hh9o^h{&aKP;{bW!FDVQA+y_lbLD&}TYt z8{%_CE1Bl;QhTdSaExqRK2p|yx+>u8YDW+jxb;xbZ-Rr;=DR34RVJQqVH*I=JTW5x z-SjOoQ_b`lc{_Y=W4fPn0yu(v`++<12mBJh%XPf{$9w!gXq8wZKr*IFsua|Z2}Tce zglPo*I!70b&^}4aMGtPcJD#V#?zL^yjMA)lf-|OsIri=;3i{yk;l}btdtf-eKgfgR z9O53;vlvZHx1-4Bi`WMlKKpzpX$~}@00*^5ZU#W=X%E;ResisK)ss0`i`ESwl2swV zSJfHaONa(m14p8c9Zi@W1@mHp(UTU!7MKS?Ci-6gxgp_5m8wS}nmGP=-t8oXh6kaa>>rf!9MUcIu|RkP!y zwyHhU3+uXQZ2#ee({a21v}y9jhrt_-DFcCW)@fKL$a z5Ut&AaJ|(hAFf9iNRx1_*uJE$JXD^^lj4l=^Hs1}JrH@r<4&-|r_be|pNwV1k2{(6 zlro~vfs=z z4D;pUjvlwve`ly3F3r zCOz+GQv%aMQftxx8}5z-JHplFrU+>`x8CfhJfX3wp3J-PH1zJZ7wK4O=*j-rwsUvP z3MdxeE9N2eJMNxL!3+fVT9)e|)6%Q{lVU8!&Lb$)*g97=J*Cgq%LV6-NrycxMItr6 z7-1$Sy`%J&3b(uxt}o?`$*bS#hk}ea2VMXq*z|E+g8hDZ!$5MaLH`3w44QmEqaBg1 zfxMjD5vfBrq$2zt7k!?NQI457AIv6A7>Sz7lR}PqzDOu>y-}b(xqG?G-|(o$`b403^q!OGgh*E_x5RlOjIKnV%`lhEZb%UV%I+Pf2sAyej6SA~p6{?$^n;`*^K zcDZ`Lrgrx?3)Nb#DLOg{-OR5i5e_gFaRJ%R5mShLo~`Lyx0302i9DZvL-{8z=9M-D zHa*63K9y^(Yy?Xqx?eOhTCOa}=~3&+^K)sY?3>ADK@@?Ck{^>GW=>9nHXr3bemEtC z&Fx?I``-okB26jL+VJUdFHD|mt+nOok?UW@&fUG&DofT4Wyd+l{;|W6@#N=r0d_dt zjT*UHwHAU_yPwV&Tep^pTMio<*l)$*Usp>2I~=31d3TH(S|Ke+l`Jh$legmLuwdE0 z_$gYS!{Y||@xDyQw<1+)3FEhX;1S#6@tM1Ec{LSJe4eO3X})rN_3PEzd$-Le>N#!V z(n)B0I!U6TWvr^6=6fqLAhYXUf7eHY)v5=~_BU!1Qn7d^%m+(zerC+B4p*Ir`KgpF zrT76M56K|Rhj`1Gn!!VV7WrFG!bm!&=|$d6;`ltOR7C4*#vc8lrP>1$^_Iq8)Q2;9 z9-Y2^SB?y})63`6wP#xV&#JIH)I)MkN6z@-2MWbnJ2i&N7PD7d%lxJMX8O0C4H@q! zJw{tObX)gaL=lgep_(7_L1jttP(a~g4YergzZ)(WTxfr3S_j4t?uqHw#F!knJ#q(!F4ydH6-MHeQTn489 zVH5d3Vx6?*bIs(+41FBUo{3~-4@`TZK2!qdKjPw_eBs~V=|3{W|Ib%DS!3kMpA%b# znT@}>n9}GysEs`z3-u4Nq~^gr7hBj2Mq=Ri56n0(6ImkBKqY<)68Yme*u28NUaOFI z8eWetVs)Oxx&7KY!~hI60#r1BYaix%>da)ONfyS#)hGPgM{Th&YLQE_t% zj^%6`566MQ?4^C?Rqpo&7*^vm-iZ;ebM1G?T@~|$H`4+7e_!R^21!$0KH-oD>o|x| zbUG;p^F9s^CO8RArT7{6Ucd2ywh~;8m?Z2vN55H8g$P?m@avz5Y0JEQ>3QL+UOUg* zDf3dp@^p95qt&k)n3~TCegx#bV9_UsCbo13g9(cztec=}TT62A zm4OO@wWbC9=}gxw??+WsG73`QW(ktO^A>GkVPt0zGh$ZGK}k%DExiW&fn(?MaY#sX zO>$p4o$uo5a=J{TywmXgYCgW2_(;zM|EZJai>;ItxA)Q9r~s>LET_5YUzBFi~v)XhPL?i^mWZ$roVMxRF#Lba)5 z(YJ|rS4Bz5Fc=90n5C8!f?s?wt-e)D3F>S9iRwCcFM3S6>4mV_B3!626h9Pje)F9ih(S>DN1C3t#m`s=NH(UfPg@X!H zP8zzmnJjU{jlM2KUAKsD`Mx0uPS*cyM$G?@8z$%vu4^WjXyNeSFZF;1n*;Px6Yxsx z#Q#Ixo5w@l|Np}6k~FF8X}ThktwNGzE-i#KQH*_(B*avbkujgEWDi9sVy?1fo$OgB z#*!@|WW<;evd>t?hgq)s-Sz#Q-*TVdIrr~(?sM*QpMN}h2s88fyg#qy`FcK|FP`kJNuk#KAx zM6*Au*4T4k&#A#!Gk`+LBZu~K9(@JwAY@v~7=2DGH!7UY8~{gD9A9EyQst|#ZW1!@ zC+%G$_orr>9l?O1=OlSHI#yB5K)C7QmsnNT1Fae)Y$GFn!h6-dGvUnPG~t4*WZHzZ z_dF1&2#==eMBY-2=(tDKvf7FtkM4`RFl^oZHE)B_zKuP2WbdDK6JzjYiWWqD=YRg@M)RE$$rul{AyY!*a6IlU zNYJln4saU{BoPJ##1CyJ#Wji+GNFSgvRk4Tto z?@<9jMZ>45w*??nYVssVsa;b(mC=v?K(!7Cb_9aVwVw~NBG}x7f~G&S4?ipg6KymP zq=_EOCR8Mr38ffHMgfc(??mC-N!BW-~vhx&CD;2=<(B(VU%V&mUjwS0_1g~p&b zT`z(cPi4t{$;oNkub=;y)=X#sTarY@ej96%T(=_jV9O*MNsnyE4~+BEB>As1;2h7y z=N-q?Rtof(G0%fGJ2_19RRp{Frn5Ic-)~?3{cwaXIrOL#S%oOUH-6zfM^5-_1NMsm z)LWy`l#&XbSPG+<29r34;L1@lO=Rpl&7fiWfB_suHUi8QCiP-vh5y%qGq4j=PNm@? zg<323hHG8cHxV`(gtDyeeM%!%Gu3Wx|281*Ve`=9R7){TiTTT6j zsgK(247NnVyRw3s#pbxqom=G-&zbn$ylm3Pnmll~Md(beq1c$A3d8n#Zf`+E*si-0 zxaU+c`xkCIti4)Pn_a9uOBu>%Qq3Z-JaIeHltI(KJpd+9`U$%30TLFk&7K~<^rFO( zy!oYBf^M1enUtn4`3zxGWwYbO@usCTeT*gl5#&;EwZvJ_f-2^I8h-_@mI+l>FG4!W z!$Vz_r-{>h9n{maCu5uAv2ArrBFoY1zmLwo7o5VENi`joLN^+!lFgL$GG(lnJYO)$ zRhAfgy#hpP9s?JeFh7kCs`sw0IKpZ+zvNx!RoPo_jGh>-uz!6Lw82~ILAD}z{vkpd zM2tSM-2p82SwUGbcb;~fxSv?05ZB_n$L{OVO1E<&%Tn0P>>i9TPebOzx}%cJJcZj6Iceg@6ErwdNjQ*n4cc_J&myUtdTR=eWQ&_=)qA>Q5xD<>TY zI_&vp-!^yl3RLkl%qKtZZ+&HQm-G|1X>x0;rNb>12e5Cb#O~P4!@|zweXI#>XWq*+ z(N8YOkUXqVNP}5x9WIjB>hxhmqa$zGiS_tx>4~R3!3*XF9S$R_e7=2qyO-Tupk^3o zyGKy4((qDZdD2*64B{2HR*!k<)lpH#nbGNyU46*a^QN)t_nd0z$F}^s#WdIDcxpy2 zmHORlHE0$ia($Gp;!pWn(xDK<%~UyWcmCeJ>WT@QbtSyD<;Wi9ZNqiT&qrFlydb1T zA%7%!g)q?bUHuKfdA|!9C3U?Jk*Km=$&=SWx)3Hc47Fbq8ZmUF3rUPtuk*>*(;{M2 zejcNj$+t|~*v5#l;>G2BfbHRjst}uz_GHrj(}beAS)cV!OGo*;2`AG^fg|7=T3Ku_qI#lO-a50LN5wJ(U$AnwNvkitje8k_cfp4V`I!}=rS~3eV4_D5 z$BGjcA16S6HVFDq;-i0U7$Ru1R>hDHW<(q!j>Vo^<4;!>YOA~ZVEq;|dMB&yE_ns* z`f$>Gbw8%@>qu39b^^vp_kJh>QC>0Nq}J{oq{O)oPSZwWC#9sT8QRM8U}KsQa=eRS zjDAW6Ol}z2%ifm4pzMSRF!@|%B=;PJeYh{($#KT!p-;{hnauK7AKE!dOk(-_={Qe& z_o4eU3rTDgCl=0Tx^J4S5qfb4jkuEmlhm4TtkwA@GD|KFoUk)Ae8GZwCo#sw>E;`; z;!kp}2xDX|T|KSd*q5@NH!%63Du@|YQn!0Bj1DiJKh)yBmgc)ZTl}ot+cB3O_=IeKmJg0Ah`;!F8leeX~BHzPVw_zeyDUW zw$d)f{U-M{b?K3Qa`Nb{nW5E$v7D)sNe_}kG7I=2eYJDZb2T~-Fy-+kz8?M_zA~M^ z%2pA59f{T5RpvXp^;%SY3ZJ%pBqbFXm&=qn8Xih;G?Yi!WXde7TuC_k)ypbA#NOJ& z!ubAl?`yXmFDAv0n4W4?Og4Dv_nXb`JhMAFW*PEUsb=@Cgj}#tK6B@_!_zlM%8sC_F2-tWoi9OmdQeH ztu#Af=~L4l@0xDMEniUQzLoJIYG0-rhtS{UzZ z9@|mfr(NJYuI7FHd(-vuvhZq0`n06gS=X#@XMNvczdH=tcj4F1)a&_Ur&cn8n9vOs4#5&kIuf55>xyeil2SY%!d%19nMYuXr0uiQ|qJF&dXRSqaL ztH3NZpUSeYRYkE}{@Q?prmS>-Y}qTzbD7gO`+86Ghfuj`pn6)mAXkBmjF`RXRMf^9zg*Lp%f#iUE;*g z$zhRKEZ?&i8t0%!8JnW96sJwhVYS{1V6Ai1P8)&bSB=JsvzOTprY9@1&%7sxLB%WE z1pO6PS#9b|o;HJObS zs4kkzm*~goX;v5AREYC>*BJQ+>J18(r}K8VGZAxRP!{1HyowC3lO(oQc`xs^{hlG3 zg>M;q(cddS>~Ua=&IN^@8kyXzA$vHK)9cWwSr^Mt_^N$DJKBW0eqK*i-^XBKoLa1Ts5FA=*ILiD7+WwjnKC#299rN z+Y^W~S=%URS@|Ky_c%Rp4$L^4Z{G--61GPFT*04x%Y#;?*FW?wco!Z_zU3BU zyz=(z+l+D#R1L$kv+#3E>=iNoBvhY=EG@=Qavb4K?lCXPpX-w!q3~xwR@`6QVdKi; zGQaEiTMgL`waQDo^yvFD==-pqnhr;syfvaqzkWEEIIpaZziV0gmGa7NoeWEGW0KXU zT1u*xAr_>$o>_S1+MkH#E?5IIrw!WcE>L@R!()ai?+%AOCF<0>at+{9(uAC>c&4L& zqCh+FUQ9_oha|GRuz%PAE!2sFg9QgP2?QtUM02Tnw$Nc{^TulS9$8tOa4QX z-#C-Jy%5I_Rpvj}#XZgwPHSBeMKmMDwM4q4hYnYN&*K@gg3ZU!;Ub6LSjw1=E+D)v zM`XC{w=$PKb$3zgYLdcX^&F4sWo*P{#~$-8tOb2E2rvk@PVq@$vs~~GL;jlr)%^Zm zDoJvoG&VL(R>h~k{T^wbnye+;uOX{@?>qRF`&f#L7DC}(RM4W9zhWT%`jg5WllEaq zjUpm$ZIA21QlRF^vCP{yp*Jl&S!u3WHpsKz5LU2q&Pc$>5&la<`58m;7@^l)cj$<^ z-?{?E=-cQWjtdc}3Y>l*V@8i4^|pxB`9AWD6jg{65uNJ`ouG8y0;_FvqiVc)WY$CS zz&t9w>R10H00Cii96`Io69J`)zbAdCmbX!Xz6%!(51fM?JQL^h0uhgv{SRotVeby! z6VLa#ecwr`{^3gz)lh_N?pwvNySVPqSZ(v=NJ^I*KY);CotloDc(4YlwDt+;b<;y3 z)lsg!mu6wbrs+xc1C~l-c2hHZ&3&YkIiCv^^JYB9nK?ICtl^V>!+SY{zjj#gK~yp zymz#l#$TztzAM4Y&VtTsHYBe)B)i)SsqrG29NE!P4uL;j^RJ^|&4FW4H)`es%487AfRS zpO%4ylk>u*9zW3si~IWY&2xUz4(FZC^zdJGAg}0MjsLXh;-L1=8ZG#1!?HWz zx5>r+0r6IDah540R??bKF=}yf-rq5)$mD?nmGa5p#a|ne*w_gqYm^SFKzwOV z8OZgnqoCt6us5jHnB*N67|jAp;K;u=0D4{6HgFvs{@U=ftBlOkLjp{4ZMu7I3PLe7 z77=d1))ljsFi#nMOlR3tUwGOhXh}PCe`j74HiEJl2mF^BC7`{9N-F#bu>|M*^9M$f zmdKUAY-~Z!*I-gQ)=*JYj4~Ju?%O{vpnQ0ppj$fFu%n{Zzw#1dKfP<_gti~Mp1oz~}dr{LA zz$~VWFWE#C8z>`-;IpX@Qxhu~3wvPSt94&i_(FEQKJwMQWkHg5y-HIFXXF}DuKoU_@C>7K4WhN?xv2a|XbQBA0_A4`H(WEn0TtP`h+7N4o~1?u2xmO9TVjcaK0)DsbvjZ_8zUcAmcWGcb4G z(Xrf7&7Bqj3a_*6I*-QteD$Cv?uPZ6k?&i0lEwVY7Pq6C7kkTHC=G)AiBQ9dn~`n9Y5-<@?+&~eoyAho6>%!2I}cMbwrKh%$=s2 z?7aP5#}QIcy&g1Lhfz~b;^eUDk=AEE9b^GY+9UR6v$E9Iv*mgQW=m9ww-?&?YrSub zD&ke{!a%bDNR~Hoxp$1U%OKSC4YI6mVDGN_u8My)%}){HV*=^f4>tA%4j)8RJE!<0$||J z585eIp=l}gr+YQ`gb91)5gfy=q@BN6FJ^ba$oP)ghDnZU{FK-7j*K@ux1oZIrCUE3 z$FrhP9~EjBBFjY{Pd>?(NPN9ARu$X0hN0d(02V)I{a{PoUOFCG`g%at?L$hbzIsLaikna_fq!>TJ9vH(8c)flcTjzh~;+_l9f(Ue7zR z8@motahBtC^nu0L%++q^{NKz~*}jk8YfY-#_7?2)G2JrO$v!KWbj@}9&pG9HO~ZC| zx9H5{b=zU8uo=E?M_18av-!)3NTgDrx2mDa?jI`{OYa50^okfS}sfL3LT6RgDMjw;RSFd(q?um5yK7|+CS43dPC>IFjV%@;ub^al|+ zgBo-2E*qAf)Yn$*9`n>%)#-Lt!EG(JlqrLa zk6rfqIY)hcr9mKbt$0K=8!G~ioh$nl(IL|NdT~vj*sMV)Vz0%_nSzT~kH1{VE1x_g z_328UjCHzIG^&4x;1tUD{~;3nM+faL{}y~7q!g2Rd+D4ee(6fPffC0bxb|aZJbb?g z=+x;~zXyrRi;)-}q2S?nvWSgnw8(<3guUgTeLtCi%Nh1sEjq98(B}C&r!5uugbQbx zan25uO3$mV%D2gry$h&nK;2dVGmy;;VC!Av0=KaB?0O82rYoJ+V7@M**ibV#Cjd`a@6)ZovilT zP~H;s!3b_3Xo8-% zV&J$n;sxIIf~Q?Gz}XPw)%Y-e0Wdkk0}}9+2(Nfa7PU`c{MAtTAu)UPtfR?=%m01% zw!b6(=9MwQ_H33f2MLdJJz(>Xzzk$EqHvinF>BY#N~)KMtj{K#hO^mfe%nJ+MoHn$ zM|6suJj#=D%1SkMsnynz#%i(dp$jWBCdutoqDEWE>ucX7^QM#TQw)dJ^0MBJdk~V0 zxu_Xz{a+ihAEabW(N3niWOztO*xSTs+SyN4IVWY&>Rq!vs{Edc(FSd8?d)5`%Tg0k zOD!E}w4tW`5-gMbmd4NDzFXSWF!x`kFaPtH+P}ELkYLvz{8#7(+)y?Zh181LTF&a4 zGh)r4`*8E}v?5cs663ZE?lHl7kCDvLGsXA|yi;GtpT(_KC1&OfU$A%8JiyZ;3mqte zg8PU%x?U<~z+Imro;~immR}ez&zTX3F7PuUMk-}A3n_fR6Me-3)qpJB^{SAtk*z~g z9P+PahkaYYZtymF{+q*K-C?|cgUnS)RT3a#`O8&Mhy{QtAq=s%U<^S+jN)d~-%P8oqw2|9oOl=6Wd*g3-rhCN0Il9(hZo?~F1#rE#nXOKU9E8*{HaCVJcFx&o_y>IOlqC; zlOpv^@?;XY`UnX@53*EfP9t|Ppvu3#E3^trCGYIizi=HZMX>EVTvPj1)y-uhsNw=E z>9Mwx`VZp=E$(9*6esXrN#ZKT$LHtyk~~RR))EsRl*9x*lhP{bB2*99TZ?GM!XSo| z-FsW?X0IQ#kFbdylkx6iNqrv85$JRMZq4s|B>IAeYMk1qw{c?H_Okg&B@vBCBxjLF z`f2UQY6ts+zY31dyq`p+W8sr$ISJ@s3hS~6J=nf>@;QS{gBZ;fqeJ)~uj@TYy9q+# zEwMbgfIA(8qyQ}^ot6Xp--rHQ+?9h*`z#yqB$ner>%&?fM={;1LdUK`zD zV2#kVY)ICh{-y5F?}Bx^IIodX0K&46AAJz}P7P2{+$dpH{-NpzBWMnOK>8q~g4mBA znGiNw6wwHyGv*%!?lbH1&Vlc(k1$=e0JrR?OZqKezrR`K7{1eDzkdQHfvAC54{BM0 zoVJ-PWbrRs?jAVwTL$={iMAsrQ6-25;Z2{OK6Op0U%&C? z)8-9k=KCU4ajQ3?yT2w}LnmS2ctG#bb77hAR2B^Zmf7~JTm4V0^+ef2z)@i_5&4ZS zXhF*W(;Yb24w!iHQCFQbVo<VYW;OTZ zHxq*^jK{umIXS5RH6|n*e@7BZ6s*8jAFMEeAv$9QF5f=_Ahf1 z)P`hKlUSA$o^*~4dW|7`fBX5U=5_THW`K6;p4QvTL*8hr`<31~{Z4nQ-Tp4Qx=O-& zAM&#M3d)vy7j*NCR7_Qr(m|gTypC++`$?sBadp8&201&B8Wd7Q4p{I~s+9_ho7N40uJw9#g zk@#$irj|MW*+>1x4j(03RmSGk0kOnr>@rHEMr}+(3Z`!TSR?cYb;9v%40Y`8i#u>l z%htrpF_Ic@R4=KQUA$yiw!6dOzdEYocouOg%JuJ65bw-y|Y0r2W{+ zt;?xqr*0dorx!%L%dyOsIrY#_DWG#G*-UGUlT}Ljfs90rj%j=|KfgyYjG;k8HfwG! zSLz{%jPg&k3U12&NNI$`v;se+AFY1mblT$tJ0}9dy9)Z-pqr9CvdDH-sxGTbJ{a@* z?(4Iu%$b(+^UY}(suXQ+!f-14SV+2J7QYYmkxem=-Faug_O_73%{?^rP`jk=td)nA zu9~+EF6609nW3*%+SE|=@Fc;mTd>2e!=U=o*}4OJW)m3In=qnoG!_xw4eL4B21lb236e+P~XH}ipR#-u_PZ z!?|(8o%}?i?9b!R2v69kM#NsYa?Cq&U%O($TlnzJO%@&2hyExQ|Gn+%Cwdlm7fFxb zHH#Rq10#K<6>HPm@HpjG*TWt2Z}#6r8Q$?ZY1-zerZ9dpIdV-O*Q#{-Zx?GFUqRBP0UrY3W16Wq_;Rg^2A99=p#qq=d5aGHkBlT*I*(a6EqRA zS_PPoLQd!%Giefn!S`RZWgQCi8f9WCgEGBNJE!OUoZCklN_yrcF)*MZo|SgLlGw+4 zwcN2T^Mh{8%MSTa9SoCny72C(&TQ(34y0Iqon13Fg>SO=m5Y~Ux#@p z?Du9y7DW6qD=d`X#T1lSoeH;3d8!;J)Wwa$POkBf{Bvmf_ny9u0v|f-9*&oJg$eQF zHG+}r!M2$6P{E!R=v6Cbj2N|u+Un)2xsoK=$m<4FTK?;JK4|CKY+Sh51+jiv92Sp-u}O!FB*pZb|MTb(HbJ&TbW< zJ+4;lg=PacosEC!*J7D!%WlU4o{|$=CT^&1%>2iPisL;4I;|u&G!b+dC=`VO7PEy9 zn+EL4W&ux-ll<4q7S8?F$-Ryw2Kq_a9I@>x;Dh@uh?O*5kZ!TfFv3a9HQ^_8p_{$-jGE zCjV}8Zupzu#kcPL$I{nu`OMXo)eUyjh+V%4e*Zp5U^u| zJcE3|h{XFaRlF}4rXs^u#2ug$Hx?DSrb`%wx31zZX3iiIE$BVmB$<10Rb{mokwaGm z+dH|wf_l1WzM%%sgM|*&hC+L%*|(Bgq70?sS2gb0@M!`2ZPW#vRI^s+$a}E(#2-r3jFygA zI_VE2`SoU{+`p0sJwQ9hQsS4EW@acWEB_D&f(Kn_$q5u&XsGR!0N}fWG=etppmAjv zR^6dALZi_7)r(#P72wR{zBWG)(D8r-sL4Ox;zdAZBLYn=HpdUHqdp-xDDuIDUuDwT zJSZI!P&co_0I}b)&}XPTb^4wY+`_rWt_xwS_uP7OVj3|D?RGpn8XJDvyLyv^%8`PV zR7A6Vo(|HD^}E~FZtJRJJ>xk(aW-r7l=cZXYiG$hayxmA7rT^7{(by3QW&4@%+iUh z(Hz6=nAW@{IKl#^KDd1_4pTnFiPmst3DUjuZmmbxt=w43_Y*M>d|9EQ4kz7v{`@S) z!XWMG$*+HHa9S9Q8jA_znhQSD!2tReV%J-vOx`uvZFB*7`0;Vq3}-m(Txnh3vb1Rc zs;35(*BbQJ>5)RNdcLBFR_>PQA8q=@F9J`-{FslQFATi&26D}`b)HU69j*;c$)f$5 z#t+=fkVv#Sd8f8%c=+q0c#Le|E%`W)%;yR6(bC^SwtRj1*9HZRp;%62H6NT*FFFm$ ziKtBUxGgxsvt%n)FfuBmnIDQ-YN?;T2o6kL$%u?m?Bua|xi{jrW7V@6C?t%g)@ifqGTxk-J|M@o ziFGghnbanpdS-a4g46p);q;c*n!C_(eJSYsvjUTQ{un1ec{ps0BFa-^3B>^GPo{ut|+h*LBi>^I{Dw{ptoQ=enP_j`bZ(k zWl!#ToSQcexAX4VeRs(P`_jdMZDCN0h`;x$Key{hme4xj%achZdqA%g#=~%pykzb< z!0BK;WJF+4)^KghD3rW?m{ZnoU`J-ZexPeC{1v7344M!Q`P~z}%`w z@(=I70%r=8)=C(j#prpYKonqIaoSurXz&t@0YYMOq*s(U`)s$s{XC*8poTfsc< zI12>I%RxJ8A_!KX;uuDTJkqfT6^`w`C*Bi9dRSr_TiA|H_DJ^Z#Ptb`(HHe zMZ6k}WjL4{t`I*NZmfi_a1{j|NRb8a~=w-`t>f^@(6V9%ob&Y3ybk zU^UTTsTz~JXj=Jwo2&E1;YpxdiQA`oT1Tx^aBqk}9yligdiYyc2L_gjjYoA6 zt3LzM0Y{Yo`W1=u5Z*Q40pk2NqGXBYo{o-@?O)n8kGI~uZmJMxs;m9OocYU6MgPGO!m6k zI~Z)5m0a~uYsJFw=^}bBZv%`N^ZkuYsz*H`s%wfCllJm-SUz=bPWdS}1by9wtI_%G zp`U0Sr}wDLE|T`>pZ%l@z5lQpeZtuU58Ys23P`%AmyPG6JDTsdwX@0bb(2UFu)SFaM}2~dBjhsO$A$0Dxe09 zQ74W1Ej|`uE-d?7+YEKFs)Yl5CPU!KM=7sk_zJ}s$zV3Q@#y#R3Qomt=F&<=WzvZ* zw^zzu6zv71-^gjCd^F3eAwP&u*}&@51KKOA_9Nrfc^>@N@5>j<@>lGq2{TLx{te*g zI#|u`XIV%qR(_B00f*RK1Pn~}mf1s@aV_a1xaWC1XgH{&ddIfONB6|JHSP5WSGd0m z5?=Y2WTV86RW=@CNLh8B(hJUQGeteNW(7zfB&|EEb+0yF%4*Y$0Nq+TIv8K*(Gse1 z?Z*7L{(-3?GZWfBjZEG~pQ6^-&7evofvxB!5GV9i33fsae{B$hB|lcRJaukH zNx)>LPLub6r6!q#te30)GR|?UG(mxfcji>0MZn!-?|W|F|3<#CSXg!(^7Zo8N69j~ zB0-~0&-g5_nA22=-FvFnQQUuEyz0r`|3N{jk?wkbBCu~*w&s8)y>dm5W zalcL;&RLN@84%Bw*p%q;{N~I3boBz|hBO_G#-1jZ@zc1(Oq7SQ)l<(aN!3=b3r?Av zHfd7GuMIB$wk+H43vzMe#xN4U3Ugf z!tvyLnsEh_awggh^}rrr8fME&^L1YBiJ68@80s4IaaMrtTI(+iKKLhM4s=LY0R5z2 zKm+)X9!H5^uI~z2Z`8XzK&ODrX%|70 z9mYs%L_ew1D)U+p;-OinNC6U`+xbAKlC|)(R0ZG%Gsl|UOkM46^p?qnM2zTMeU`WH z_Vr|Mt^TR3+RNJa2Z9ib5^x{umt=So@fg7hRvEiMh?O8H!jAwH5k~yYQaVOi8z(d~ zKocGBe6PxwB_A&Gs+Jy$v-MG(c+3>e3_bU}_Ey=E9JOTh!YTU_uacq(tw!k2B%aI+ z*DN*?`~I&DpfzwGw&HfN5wk7qqa6ChG7qsIVT_ih)-tpVeDBQ3{O>T0&UC8^(^R$L zaGtjuZQe6-VCO||ZS^>8WVb-#^sVY9@;DScuvEE<GAiRi&tfp9w4XvPM!;KHOw zJ+z?Af>!fEsH^u~^&+;tUZ+yy3414n2!pcu1Qv6sVx2=Ucf_nrFY_VKR%V!9j3!4I?} zvObrLp>}%+4)nfa+MMY&@q2e(VK7eH_W58&w2`01V%A8j&gv$Nhk7ABKu|~D$=e7z z#}K5b00|22RLY>f8f&2bz&@5xXu%z!!5UC8ZlhAGwClG0zIcD`G);fg??X)iYUM+L zW|>51q~F)9OSy^lHn=VoEB$-cNOIUby&mnfsyN%Yknw7%?{=(tGJBo;Wg1 zv%8jja;vkBoBWoZ&Ad>GC;hPw*uv{RcO0PI2i{g}LO+2d2!5mHy7S>4Wumgbvqb@g zaV7erI$TyOtTpF6+?H-oyL_p;x$*7#mn1D$^(oX6R`E`&UD+TB)dv`40N{aj+`+Vp zwpk#Y!=h7Vq9)VynE&jolI_N>HL=hf@IFZ()QS9RygzfNKj>1toFI} z^Vms1C_p{S(=#3(u5&2iF;%*CFI9g$upM}<)8$|l1|@iwQd;A{7HX>7&oT)qofBY( zcqYu+>s?0Osx=K#O0+UPuB}dj9-;HwsV2qQlt6`{wN01@PWMp*@f-rI!{{0u`=#%E zWViZscF#y0*~8^<4_^y^RCm2lTE| z`h9+Ua6({t-jmiQe!BN`unYPIy;hxYQWvL_oa~qDApMYSZy(j1Fme@(Wn(G zo>4x4Lw68Kx#~$RNMKXAvV(kV_Fo#q&1=617ln04V91ysi*lz2_c;BUW;u+3D*k|m z+!P0KjBClFP|;y^C%RX-Kj*%N4N8=-uxiT65GaZ-WA$Zzc zy=pbTu&Mzov8I{lmZkw6WhyO#|e6|1FR8U*7otfMD~V_HG@+yfd+}t( zK(L`YJTQul&_8?NWikZPSlHWk!5+%46w&L{D<5P6YA;I|SkXRvE3Osz+}1dsF!!;% z?oq4S>Vf@tZ(4rjxy7>$1M342;dah;s*XkaN{w}NE>d&7S!C~%EcVf0|HM57c-Ldo zWvb^9`Iocj?yMXtT@xs`7S$L%cP)NeZPtvaL3ft(uxRnH&o;W&5S$~*J= zo%Nm6qiLTr){ZtUz)|eYg6*wbSA33K0f=y5+)|oY`lNAN!q^dk!m!=e?efq2y^ygt zX@|CPY}30pJo_|qqrSaWX>~x9e_DTc>$v!ppBTo?O{Nw#H%-(fGMy@vjx_Z);(h0C zmS4Kp!mY_m+@)U>yY#?oIe@)+e@bkuqf3PI!o248$-UV(@@Wa`M}EZGynRm_9>G*Y zy8u&JAWG*K*BC1xSp&#j1i~@r<751@%*>YoXMHRW3Od=@i6RQOZ(y|(LeCsOug2d< zC6#u4{QgVx+bGE-?)NvtPEW`_QCV#02TMBbBsI;f%ru^B&Hi|iO405lM%4Z0s`1wb z=?TkZp8-yR<9zCP;0*V5GwogZfF{Mn4SHGjE0F{Wc5F@$APo*sHtJ)F+Q=NC#vq@e zPRr^&Cg8b4Ki1cNa(kyRq0-Rpd(@IQ-(8KL1sTu!YxKXvN5wCl*WqV2z&mQ9hlZ0l z#!&Kz+4`cdiHV1S1{+TX4#Y15xiYUcVoE%MFglIYl+GZ3wfdjy&RtGc{UfqW1pFJF zx3Aa8CJ{9_J%FwFeHIwQFe&xi@{QY!hg8UN-=?ofN`HHAXVzHQzoX+3<@a_X32bWF z-}KJBv8vL~^wc}*iP8C!Z%HSU2hyqEo~rJU!AU-c{6-pEo%LdAFoP=QHhUvOYRq)Qs2WC)As#ZmYZKQc_RZRy~q{M6Vxl2QB5HF1j!O~nIr_>{dAbP(Y2z0es>eeu-37NH<5UmQ@3^A z<0Jd-lzF+NN3`f3KCP8|_5M$nJ0ty``Ps&?bJ5UNQg{8o;lBNAA>Ds)+O`VT?>7Ik zhUbAuS)?)MFUZA8*hs}PP_}}bFY2_Hc&`L6tS9CEEy?ch7y92a!VyG+n|XVI_jaa% z!m$$^gQn={f3uu%S=d(Q)p~h+rq{Yn_8YINo1}tU5Bo`h88fF(7Pxumc-WUGL2cg{kmpI?u;8;FlpSxL7vffsPX?@tj(bY(LHmuq`ffYD)S7OP z_(mv5h9{V%*LH&jmWS+^$U!HK#0+C_F!DLC5n~E~K~4$)dya$TzHk;oJb|tQZESFJ znsYXi;~z@g!h_fbGsT|oy2N9ivj&)1CHFeVr|-^(b~S({8=1C#M{Eovn{@6@-~BdC zYx#HPH=G-MIB){~|GSs{ zFa}Qg(X}*Oxb$+)P!?E5o?}V!W72?4Q}1}RQpqy(3^@xq8;$%u@kLI8KOv+^1V-Xu znELop6FcyHFSC?InKNLm8nb^y<0PNortH=~D9_S~QHwj)5qn1`wR(P06|K|2OS8C?417&Z){&pfK-MmTSm(&_k)&YCXezQShCD`( zQf5Yx9@F;{vFR^uf5NpzQ4933;Ovh{JErk`Ug zmfdxChy3?T#EsdfVe_Pm(Oy(aQo$P2~MpNC3|=37$l8 zo(dQgRW|~EKP@C|jBQNQXDgicNyx~IiKC^?wcF#-M#X)HVg#I#HoHQeWscDnD+h;` zyZT=;rj~T==UW||P@{s7tI4SafhzORX}>Qwh^tV21-Self9(a{s>;GVT~W~}!+UXj zSte0}6Vr!oJ)a#PNTeY)!AFOOTUrA>m}qHxIQ^0s^NL2uj~B#gX~5truqZ6{Ad(j= zI(p)21`;W6c}`u+phE^8%Ycc7hIQ#2FtQG@1}`DpE70($f(Cggt|{RNScg-naj1f8 zkWr_{l7hmVm5ZJVAJFxA%i=_gT6hZZV--t<`sXJs_9dwq40pW=O_&;5bHFO)@*KD^ z_H8{BKezz(=^rkq>)@UP?7jYqp*Z-eTPM%+`9iscfG-NwFFYNQVhz(g4aLCeXB@6# ztu`VyIeS9i1|&}-hw`%o9Rv3@#Af;>SJxrhg8bLp1igLz7S{$avtYaHkn zNZtvpZ0&6eCA{uW;063ZV$UKWz}Ix>K%FF-WyG^J6e38ow$8^hCfE;vXCsscFErcd zdOGW}MeCwDc}Yb@Nf&$c#s73D7-?_KV$BT?Zt9=z+x^C(H);aTSgCL;=FvNjT3F=W zPIBg>si(p6=4DXw-J#r6gn`I$s|;w}MI9~$QnnF0(!~4gj!nm#dnI&;znbo+rl+@heVj61P7ja(v}{KjhU9|S!%h_ZLUw_1}jC4o1sM|w=!Lsa6Kz%65Pu1v|MSUI?YX%bVmR@a>mS2PO_ ze&ty(MU8*I)bycGJ1`}$Jans}Q^ETL@44SZww79+iIAC(Giz0-mRB6=tEOm%WPt(C zunv$xf-Au4BT5hq*yuSr8)M{v+()?iMV~U+!Lt~Df%Se2eK>s zrw!u&x$QL^3U6;VCz(Z>#c@3d+xeM1-#_tDO$YYD*5sM>@}fWCiyj3`Ib%78dP7B4 z9 zfTd)W#9nB1l7R(L^XQND{J%CNqhi-&xwkk;3;}5V#I6aNDa$_CQaXR_*J!e9M{I7_ zS~}>Fo(2AY{xdhOK=t3CB4*%E4y7aK)p0e-48$%^ z$%n&L?SiT{gfvVvl}9SRk=1E87v}Z$wn8br^Eaya>8q4pfiUkqagbErH7CRk;JH-8 zecW^ST)Go`_j{+k)Z|?euKegx@VeJ!^HPSeBCy_>y~Em}2{_+?yZT9zt5sQ*xsGkC zRTH4%)^tpJc}A=6ZEax1gsbx37V`2x+KP}?f<|cL|JB}iMm3qX>pC_@iik8Rl93`H zB_dToGL8ZQ1CcH@Dk4ol1SBYsQF>R9Di9GNASHA}0+AL31O$XYkdh!EO%iE=l-ZBp z%sStkZ-3{kz4zIBt-aUrS62QcdEeacbKT{-u3H^K3g3Fo%3=SoILhxS4_4U{!)ul9 z%1N$g6I&~nDwkSxM?NLA-!3BlLbX8y8??xobhX;yg0HiD`diZaUG6GRL}tHMKO zl6@StkL6$5t@H5TYi0CjW&D4Cft|zON~X*4U{)SbOnKY+!JxN}m(Z;~z-35DqrqFj z32ROdC$qD}vs?0CXhfDFOExc&dOOlrG4YJP(;h&!tZ6lhfU!PJwT%>R)xp1Xm z)5n8tPT`GvLgx^PYBhOCC&tXhwkmoOK8YB2x>{LwShuqr_>uxsF$v*?tDfdtN9PJ0 zc=Uo+%OTxhLp3;CyVCWzv&ivVV)VjW)>G;T2=kZdH~b>}o==drLESl2S?Mny8wX1LRJ*}O)i71L} zY{(_Uf%K07tNYp>RHIMLC=+-3$&A-5TV>Xh95Ey4DiB1W#W(R=LENr(7WX_{W8c-p z+uJug5t|COlao3_&-jl|FFEo(A#XK|*4(9-BvsR#J9a`(i-cAye zCvywfdl-v^c{#sq8eF&SFEn~pDsxZBySsKx5TYE-KKu<-VvF!aOY2AsIfCSryAh<`V$B_2H7W~66XD=&5Rq_*YrP;x8N0qr zo->Z}p*Nh)(>Y+>O3-?#cGe}`s#HW>?^hdyb3}GcVGl|766=(%{gs#MbBW!q2D$p$ znKCKlf@tji;Q&vorp`7VSqV?A?7*Wz3(d)^6Eo-zLpq-7Muv1|so60#WjlGJ^!CzY z*vA_m&o{nD5T*o&aKD2nx0-@zvI8J`zhz1()+HiCF4$uuhR-E@-d!54MMhC!u2$#ap!TT4yu#A6RAJt zyuX+K{$!p`+*_I{KhPIQ9r+m+iJ~h3UlDKietim-w7w-zt?L|U)k+Wbr!2JcNa!>% zz#QkdotmXQO{yX{Q8)h3WPd$yWfLeF_}|Kl^Gy&+3PHZ5nJWg=U4IKM$-dz3Tt%8Sj67B@Dzx zx3Mc{%h_Xy%2i@~%kj@j6-)#xOT7snFCko6^V>kV7yCelNhpkhji`MxrP~`{F3O`o z@Uv?bAnw-Z&+zEz4;Q4YnJ0+^wGX7+^)+&5APQJ|Hv?Vzj%(UuM^H}5>ps|vpZ*qM zyc=Sm9Jm2<2@L|;&`{tLEY)Q%PJ+e*4(s$0chXv9c^#KV{f7d&%1o&V1GaZ5A>(vs zuop){V6J~>-pR`Gr%G;ZHFTJBeF|l2i=;o(F_7)4;O@J$&8T&5&~_&bHZc$>6Kljd zm)$EBE`ZCkpvgY$$V#q*llZqJhgvj|I)*Kyy8a9~dipk@!6w_g1o!$C-JCLUpGS&v zpw zfI`!&y$qS!#b-2s>`U!{um_~crhZx$9fMX|$$LD!K-@q`y?o*=wwaDsMlYh+HsjjYu;J%kC?4qi`ID0^zG;*nMN<7v z?5%%)81BQjs6LOLLhp!gZYLM8cxR_xzx&sz$DLB0Q<61JmDbh=va*r{tqn|6kM7xX z)acDK-lZV+kDwAc$O<`6CsW{@VT1Sv-ta&aC?J0b=o-cu&46DOcH*HElz2)~rJm>0~($huAI_e>lr)0?2 z&b=uR^(lD`VmchKDrQzWq!!Pmq+m2uRvs`3H5v8>D-dfZwFy72-YQd6EO&_`_4o9X zbf=h!|w{vAkd;WGp!v<2>hYRZSAapr=yGLQ{kQja(LheW(I`b%1 z8YRHiJbxnb_3>1L&+RWCziqX+4@NG)it~xt|5|rBOf-7*54^GckGrIY47pWsHO7lT zFq#OsW>N>hdeZ~BlQCBM2eM-J90hx%+Q7{S0HtZFvnuOpz@*_8k@Z4XC5ViOM;yUH z{mAI>>BAxCua1&HZKo%(?2Vc7Y&k=dNgH!0=O8$%nOv!zgc)Q{P1sPkBf=&niWdVZ zy)4Kja)-6Ww&a)8rckjJT*iFBP-OE0UVmbMxUmU)Nt*l5G-LQ{aQu*BC|-rLcy1&R zGuSO2CvjHlw)NoP16xB2>$C^lhxQ-wERg@jJV!>(?5IV;^N+3~`cr2T(y~)L)P_yf z15Q3kO-S+c06aL4&%_JT9yxs{qgzC`Q+7-t>5~57gdUW6sGWv}K|7b*<7}8cW zJ=Gt_A@kd<-@1aFy_UZA!)9?AX`h(hosA~pIA^<#tp0szb-hTIt(b8?$thsih0sZz zK0Ld&xe_izSa)YPq+RtBUdGnv>nvAbqUhn@wnlLxbF)nmTQF9Rs0UYVgJ@9e+TPDW zmbwwbDfgRs^@l5NK6>1d((?tkdMy?4Yz}Ya`$E~`$!%v1{LFBmUE;FLT>ku#d zW$kmTPm-%{M8+8iZ>-1;B`-eCbIr^tRR)7XK7U<_YsN^=v@`GGd1Ky*y9s=%3?^_% zrF(~$_lmt76A$F1C95W0EL32op?(bE>ZJu#WmbrhLu5kKM%>$lGd~nY;T(*JZ;0=N zIGc5vw634tlyA5}St;KH#4`6V0-Ha%n~3tnk7Iup`uw;Y%)bcJQ8s6u@97n|Jl;*Q z$|PU+6F2|GY%J##MWbsL3E7MD>vtW$(@Ij6IEhj()9l=Zh(k&eGi_MoLyLqQ1upYn=*?tqQx|7&tMCpD966 z;YtT`eequCaR1EyQFbSdJ5Soe|JaULdwurvx3GJ7HP?*27jiDB!0X2=>gkRX~kkPs8<-zSw-*h0y$$Elyw#i!eYzA2fAzFOVz#+~h z#L*EaUuJbfvKN}_fTo``ixPpaN@v$DHwo=R+6EFxv7yC%UngbTKMl9s% zz!MKR8?)!dq$#4rYG0$W67!K#g%9?%Y_U4v?hXUy)6=Uk>H?gOr9!*x-IAF+#giWw z3v7TRS=Fn`k+On@bcX(DZK(4wd^F(P&3=U@qlIC0FD{+B`lgzuD|tg-MuYLWI0Nbc z5c5z%;2g-vc6y5#gGwr@BY`23wfYFERV;^Yq~Mv$^6##Zrra&}O+2jJEY+v2hWEKN@S6F2xgr5q&bJ zt-fekoNtk;v0V7lf>3w_k%9w0#gwz zOC5L+CgM)=jm49_m`A8+QXrHUX)$a%s)VlsOVfz)u5?0211R8Yr@iSQbhC6--4Z8_phpJ;o(Mq!#G>C*zeIbM5Y4hb+?Ftv9*$pJcXm%)5?2Nm8ia1$)JxlPYbH!SZ@1Q3}{Eq*1w)Q6r`3EKxRLL%=nIr_&e2s)*bDm@9Gw;#4 z7}}_|AS${GMJ>jY1!xGS()0s8VfF~fi4imKcHV+gHsrmnn1eKSI0M*|C5IQ=AcwVc7tbY(r)wPjd- z8`MNPJwq;&lOvhu$PS?5zA}9X=glf9^O!o7YJsa^K-;m$>94oCx~s2glGMe#bnH~; zI(2$8m(hF{{O0CqE0Y^um~4Y5`Em{K$}xevs=JL9mbafBb|H`g=dm*&@zM@}xqncH zb9?OuCgQUMoI~fnqy>3<^FGz-jyBBdB%vuM= zpx_oA0HU4gy@0#%nx5eTT;2_1d~oVu1`Z3yInBB6vJ2ewQiEAlGl(N}^v|fePJ!et z7h)>+idz6=Y9n^Qmrg9uzZk7Q)dpYlzcE@}0{2VkA_cxSQHArbc zrF*ASQt>gsUhCP%#{>fcWt@%)x=J58|BQE5fz3($Mp32LbWCYC(m|7YKbd+rITVtx z8JXIJs<}+#h{OrHkXP+l6&32hDPD!B>xBj052T>TFBra-!WpqXn+|u)AYR$<1d_lg zTa3Exs`!gZdclN;UDJ+^7fP==a5^ub4yXO4o`J))tU(D7dUO+;6BK0z;9<^M-vW1V zVb~$`@3;2SmNgIu0_(1S5>&}$=4=!*9l?ybe+u18$F9RFed$sGXsUH`r1Qb>EztXZ z7#TKj?}G$C)j6&b=t-WU$j-WM0z83Eis=No!6G`7l>HrIE5z`l8wPqc1$yLk43Qz=m6H|5B+2JA$8S0WuEW zRepvSyVIGMZp1hfq6?xpgT`gd%4hPSMvgdQd)fj6~)L4$4nb_lx^yL9XdDQRk>Fh^`$G;!gA66 z^!a}hki^Y6*Z|O}^nyJnr~kZb0qjE{e?)=&u|nH0fhkq$39CkoZ?K>py9WzG{eU=T zcI#%w+nVYTZ)KmGU!T$Y6o+;$cdX5|i>I_qRuwdIT4Id{ra!aa{oOw=7R5%P}GA+uOPCzDb-dS*L5pqGJ&5S_vYO4lsePe-Yv zc}Sl38rmD{l1rUR*`*^U0p(*KT4)w8ZqKhAc@Z z#Cd97Mu>Vb$;2J!H07jjQX11Dc_xbq~ADh69$5N!3+XR#w*~DR}89=(s;`sHc^xXfVXdqrl`D=}r0_B*xxOQlt4%r-F>R4j{EJFX4N? z80r=;Bk`#$$PQ~^mHq_*5uSHsGvoxHc-TA_IH+hOSzb8Alatd*%hr6{rZ zpd-(eYlPQZQeVHuZYOlUfz<>Z&#!`U+E>rrDvLFY*J*ujt*l>(1RTr*!q9^Y|C8yDubqLh%68S79q)2zRgv z%|Qp|o&qpm@XQ{eRiMd?rcnFgEfoFwZhZl{`QFx{{iEq1mktRowR6|hyr+9OmW3h1 zxj*L_RIXI^QD%z_gD^{(A37%0P1RWoyU-C9;c>yJ8QWwNMJ!>(p5L$WzTMlK8_=I= zhCn|ns?U9-hh%i`N#FkOjSbzpFXsDk;}Ifm7@)t-?K zrOfj*;&1BcAMH_Xe$;6KU@PwW#FANIy_Ss(!*=X3$T0`8G zAwx6Ai?f=-!uMrEMVIFvm&^6&e(WLE)xq&DyDAm_SHF#xfi+wigld`$B!s`B+;k98 zN0CXxZ%_id9H5788pcOwZ^Ki~D~Hq@sxnMRb=3wYf65(F5Yv=XSK zbDk*K@WCvRm4+%mm6U_^Dk>vxj>Wd;I^3*B5&h};v;n3V=vN%LN=?TG7JY{758LJZR~7}xqfKG|2yeXq3@i=*o`{}HgMag;jQ*#8QNgkR z3JtujGk)JADba)Z!!`2+$Pyj`^cn;+-1!hzCTeG6@GmgJ`|nC?KqUNQnymI0B){-S zTx6m;&P`Kor+3v5%mH)_07kuRy(&XMX83A{`g-Ee-0+{=4#<@PCeULj;&+bxiEj7Y zB2hCFF#0J>j4R(ar6&*u3v!)SrI*ga9zZ28ziQ{3NxVQ%^$vOTDx#7U^wRGMO|7Zn zGkW=|6=oc2gEv7x!|zAU48s$&b34nYSH&3COl@jloSMa?JL(`#mI8~hPyBeR;zlik zkKPw1Znr4>V+ekukh&IgRZ&L&!3XKSoGO!*PQ0yeD`H*ga_Ef~d-h!x{4;tBw1OWh zvfrSqc(yVq*l0Yb0|nA~PSW9iDQ`SpaM9t zTiCI|y3o{G{w3QZFN|~6%PsC{x{kZIgd+N~2{|s+Ol&c0f*$Y;@>|l(`;jJuad9be{@k#8 z^I3y^L%4e8iqEZ|KB%8_GakJf*X!sM)IKp-x-tR3(!BZ_JnI8g)sn*ju`1bTSVM#wteeP2y5*2W4(Om z>)x0Q{Nw;X+G)5#fRA>E6`ne}G_=tKc603lbrx(U3Q`%paz6A0efSd9dW|rhU~e@_ z+=2;)sq;WPGNvFSN^vH~7alNWy)om+3vH_QlNeG!v-5G;%e1gOKT&#xtBCG8_UvMq z^>`RIXeU?{1a^4^13!#eo)3F#bMnFekv;k!*V+DACv!;A_O)gQb@=W@4v^W{VlR?e z+ANC8i+vPi(uDpE8y!D zwf+&#MLVqDk*TQRlt8#4(&Cguz8&1cndF*wy-MV!GT71zIn%q0F%)}HZjse&uKi3yYG>xjAQ0 z=0i3&u(?~qyhsC{;ByQpeWWORb}NE~o$|g#xog;{H>*%|2biZfjanC$HZ?V~13^P=938cj1cw9oeqa* zdms&<5b`?gpqB166Q;vG@&uDXe-KS5ac_K4LP5fypKy)krX#s&SPQeQ9VYz!fUp=2 z5~FpmiKLY$S`uAf(cP0C6gyvCH`guJ#3-gsy0LjMtm3QdFtP-lMdi0@laFWI1ORb( zN6)EOb}R&-h5SK;7dFSib6$PwZfFa~Rteq6GJ^qcPv*Owk-D&(@XAePE~(ycm*Aha z-py!;|75&26;k?ifd;wIqL#=m>C}ZH;6clLeU%OOoM5gG2zSai9f{S02snfYik{?& z^-I$%bXnLyp4>ztehk4EV&XgR7IqiaW=dtWznZA?pTG6{o-0p-uA8F0GuXqZf+Wq1 zWi80j>UbckCW))0SCu?|#JP`;J<^kl?L0I{g=;%O8-u$SsoO&e9c5S@ zvLZ$#gF4N1!zB9aJ0CBDB#1_TFzr=Bn8NYc_Q4XTYp3H{B(17fq-p9e2?=iT<>Klg zu8t=myK8TJxm96cr$-$(kGHJ?dt|4>0JxDEm zG-WboY@#dBkEi*==+>G>XSRE}&yBb<{pssxVkYcQ#g`jhm$5k(WRs26R-3h{m1~50 zowCAMB%HENIGd~OUv8|LVxD&Cly$Qz%2 z8rNi=YJ++drcMVn;DP#Th9UC=zItHg8mihuh?{9cbFP{1@Sg)MH&0|dd33eOY%60W zQ8r6rOmgBRtgc#ucXz+-7Gfd~id>GGk8pz1+p9x(r8gGfOIdFGr0s31bJfX;Sc{-o zx`b<9sMI28;JH!Z(ShEwy&>zzW9E~`y(X@IeM%xV-q-ljJT{J&qEF9anBp!+d$v&3 zzHfMxxziky_al%Tj<;T^VAI?x!UiQIKb%fjmh&jP`|!87_Uc)@t0ElH_!>?hPJBX( z^z|h>O*pKbCgI#XVYEKl%RJM2nwSzFez^l-^ZZGU2sJo^KU9hG6!QkB^o!qr6iSDr&caE+0Ys-R1vJPwF$9v3ZpXw7{x>vdh^?sbwYq%& z=k$TTuVI775*zwNt{7$XeXU3g>wAo}V?aF#5L)I9k;EO>knIlV)7^720$(r?>?g+@ zcE4k9`cX5F3#oZ=di*N0yoDdd@nqgJZC}bWZnqdF8Lt@%Z-#U|&B$`FQ!t#GpL?oj zNJFnWfd6LmXCQY-BeBOw0-nc204?#R9aGlF)?dZ_OD%6i_OFM3xW2#2xb^2BkDWS& zT|Fxtj;~2^#E)&_c;88rJabWkiwo-f)keliE(B!HV{hpl*#mb!X5*fs4rI~>h8`o9 z-_tm_Q-ad;c+w9FH8okbeNvO$4*Qx~-#xAn76FnY;TBy`;%nNDaqPE@>w)1ipnt$}kwBkR1qE>+z(4P)=RJq+F3Kj)1NP%ZVe)qrYb zc=&Ow!-s3y@y|23kVa|q&SKY){BpD7euoj!Z;NkhxMfgOMDpycZPRbBhe+%bi%GoI zz#ARmY5A^nDA4^T7e{1aOroxnr!~xWSBC~m$ijUXA&ZI~lu6}3gO z%I$#B16NBaw>NTit?jG*(uHz9oVwQB>@jpF-^`C)?8OX+HT=&%J^l+%&V5}i+Vs>} z!$`g3p7sN8pi$1E^hTPbSf9pmoz^@B+ER$tP=%t8vW=4AWxW#_7l}UwZ;Xkp$``%M zzBfgv`Em2u7pW-H+A8y051n6&5~9q};iJ%pghno05VRD``@}q}EJv{Duz0Z$*{)c9 zjM?%W6Ia~)r0R0I%=^@64I!&J?bn0?$cgC^r@}BdfhPr!c7Hw9-AjX(Na&Ar?@ogy)UxtEh+8j;d_kvo zdBA~DRn#PZPHcz23s%3z6z8~l(wY^CJCy&DuZnq`+S@6i`pQb_EIe;w78ZZ?RjNCe zLZ@d*Ci$9x-raGO2N`hQ!K3~~YP?}1oFd*@w+2=q;4qR!pVIVv?_&3Us*^X^zMZgq z*52RD{}WD+;|xt|(Hx!y<^KBa4BRl|)7VFXhqKM7_=V(z$=cL}qeDstx}zCgQ)(c^ zh(NW5uH>2Dr#eu;J9Af#fnk`7(l1^=?TO{Sj6jCJ(UYvrllurl7_!XI`R+s0EF-Q#9 zkvq-%Q7n@00yuv!wCVA#E1#aY#?Z*6;ti7=6Q@7-@fUb^C#)W0VNQY?iDSVSH7-a2W=#Uw(1*NBQ=>_&eHhq7{ zn6fA7KDpzlowm6x>M`NXy9(QqqID4lVqw*&H&}7@VD|nNQTi|izRzx&zWi(BQ0n>S zlOM@LYZ3KbYNNhqI zAxgPLNUg+>#~$~YkS24$3}#9EBs&2l@Bh!W+@B@g_m_WQ7Ja`V-*3qGhwJuTWzx%K2tA59v@0jx)bG~EFcg*>YGT%|=I~smR!|!PL9Sy&u;deCrj)vdS@V_G( J?)iQEUjUedYA^r* literal 0 HcmV?d00001 diff --git a/src/base_cmd.cc b/src/base_cmd.cc index e4a0369b6..d4218d226 100644 --- a/src/base_cmd.cc +++ b/src/base_cmd.cc @@ -29,10 +29,19 @@ bool BaseCmd::CheckArg(size_t num) const { std::vector BaseCmd::CurrentKey(PClient* client) const { return std::vector{client->Key()}; } void BaseCmd::Execute(PClient* client) { + auto dbIndex = client->GetCurrentDB(); + if (!isExclusive()) { + PSTORE.GetBackend(dbIndex)->LockShared(); + } + if (!DoInitial(client)) { return; } DoCmd(client); + + if (!isExclusive()) { + PSTORE.GetBackend(dbIndex)->UnLockShared(); + } } std::string BaseCmd::ToBinlog(uint32_t exec_time, uint32_t term_id, uint64_t logic_id, uint32_t filenum, diff --git a/src/base_cmd.h b/src/base_cmd.h index 5fb96e0d9..cf774b11e 100644 --- a/src/base_cmd.h +++ b/src/base_cmd.h @@ -16,6 +16,7 @@ #include #include "client.h" +#include "store.h" namespace pikiwidb { @@ -25,6 +26,14 @@ namespace pikiwidb { const std::string kCmdNameDel = "del"; const std::string kCmdNameExists = "exists"; const std::string kCmdNamePExpire = "pexpire"; +const std::string kCmdNameExpireat = "expireat"; +const std::string kCmdNamePExpireat = "pexpireat"; +const std::string kCmdNamePersist = "persist"; +const std::string kCmdNameKeys = "keys"; + +// raft cmd +const std::string kCmdNameRaftCluster = "raft.cluster"; +const std::string kCmdNameRaftNode = "raft.node"; // raft cmd const std::string kCmdNameRaftCluster = "raft.cluster"; @@ -97,16 +106,31 @@ const std::string kCmdNameSCard = "scard"; const std::string kCmdNameSMove = "smove"; const std::string kCmdNameSRandMember = "srandmember"; const std::string kCmdNameSPop = "spop"; +const std::string kCmdNameSMembers = "smembers"; +const std::string kCmdNameSDiff = "sdiff"; +const std::string kCmdNameSDiffstore = "sdiffstore"; // list cmd const std::string kCmdNameLPush = "lpush"; +const std::string kCmdNameLPushx = "lpushx"; const std::string kCmdNameRPush = "rpush"; +const std::string kCmdNameRPushx = "rpushx"; +const std::string kCmdNameLPop = "lpop"; const std::string kCmdNameRPop = "rpop"; const std::string kCmdNameLRem = "lrem"; const std::string kCmdNameLRange = "lrange"; const std::string kCmdNameLTrim = "ltrim"; const std::string kCmdNameLSet = "lset"; const std::string kCmdNameLInsert = "linsert"; +const std::string kCmdNameLIndex = "lindex"; +const std::string kCmdNameLLen = "llen"; + +// zset cmd +const std::string kCmdNameZAdd = "zadd"; +const std::string kCmdNameZRevrange = "zrevrange"; +const std::string kCmdNameZRangebyscore = "zrangebyscore"; +const std::string kCmdNameZRevRangeByScore = "zrevrangebyscore"; +const std::string kCmdNameZCard = "zcard"; enum CmdFlags { kCmdFlagsWrite = (1 << 0), // May modify the dataset @@ -124,7 +148,8 @@ enum CmdFlags { kCmdFlagsProtected = (1 << 12), // Don't accept in scripts kCmdFlagsModuleNoCluster = (1 << 13), // No cluster mode support kCmdFlagsNoMulti = (1 << 14), // Cannot be pipelined - kCmdFlagsRaft = (1 << 15), // raft + kCmdFlagsExclusive = (1 << 15), // May change Storage pointer, like pika's kCmdFlagsSuspend + kCmdFlagsRaft = (1 << 16), // raft }; enum AclCategory { @@ -261,6 +286,8 @@ class BaseCmd : public std::enable_shared_from_this { uint32_t GetCmdId() const; + bool isExclusive() { return static_cast(flag_ & kCmdFlagsExclusive); } + protected: // Execute a specific command virtual void DoCmd(PClient* client) = 0; diff --git a/src/cmd_hash.cc b/src/cmd_hash.cc index 32996edc7..7b3358e6f 100644 --- a/src/cmd_hash.cc +++ b/src/cmd_hash.cc @@ -37,7 +37,7 @@ void HSetCmd::DoCmd(PClient* client) { auto value = client->argv_[i + 1]; int32_t temp = 0; // TODO(century): current bw doesn't support multiple fvs, fix it when necessary - s = PSTORE.GetBackend(client->GetCurrentDB())->HSet(client->Key(), field, value, &temp); + s = PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->HSet(client->Key(), field, value, &temp); if (s.ok()) { ret += temp; } else { @@ -61,7 +61,7 @@ bool HGetCmd::DoInitial(PClient* client) { void HGetCmd::DoCmd(PClient* client) { PString value; auto field = client->argv_[2]; - storage::Status s = PSTORE.GetBackend(client->GetCurrentDB())->HGet(client->Key(), field, &value); + storage::Status s = PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->HGet(client->Key(), field, &value); if (s.ok()) { client->AppendString(value); } else if (s.IsNotFound()) { @@ -82,7 +82,7 @@ bool HDelCmd::DoInitial(PClient* client) { void HDelCmd::DoCmd(PClient* client) { int32_t res{}; std::vector fields(client->argv_.begin() + 2, client->argv_.end()); - auto s = PSTORE.GetBackend(client->GetCurrentDB())->HDel(client->Key(), fields, &res); + auto s = PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->HDel(client->Key(), fields, &res); if (!s.ok() && !s.IsNotFound()) { client->SetRes(CmdRes::kErrOther, s.ToString()); return; @@ -104,7 +104,7 @@ bool HMSetCmd::DoInitial(PClient* client) { } void HMSetCmd::DoCmd(PClient* client) { - storage::Status s = PSTORE.GetBackend(client->GetCurrentDB())->HMSet(client->Key(), client->Fvs()); + storage::Status s = PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->HMSet(client->Key(), client->Fvs()); if (s.ok()) { client->SetRes(CmdRes::kOK); } else { @@ -125,7 +125,7 @@ bool HMGetCmd::DoInitial(PClient* client) { void HMGetCmd::DoCmd(PClient* client) { std::vector vss; - auto s = PSTORE.GetBackend(client->GetCurrentDB())->HMGet(client->Key(), client->Fields(), &vss); + auto s = PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->HMGet(client->Key(), client->Fields(), &vss); if (s.ok() || s.IsNotFound()) { client->AppendArrayLenUint64(vss.size()); for (size_t i = 0; i < vss.size(); ++i) { @@ -160,6 +160,7 @@ void HGetAllCmd::DoCmd(PClient* client) { do { fvs.clear(); s = PSTORE.GetBackend(client->GetCurrentDB()) + ->GetStorage() ->HScan(client->Key(), cursor, "*", PIKIWIDB_SCAN_STEP_LENGTH, &fvs, &next_cursor); if (!s.ok()) { raw.clear(); @@ -199,7 +200,7 @@ bool HKeysCmd::DoInitial(PClient* client) { void HKeysCmd::DoCmd(PClient* client) { std::vector fields; - auto s = PSTORE.GetBackend(client->GetCurrentDB())->HKeys(client->Key(), &fields); + auto s = PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->HKeys(client->Key(), &fields); if (s.ok() || s.IsNotFound()) { client->AppendArrayLenUint64(fields.size()); for (const auto& field : fields) { @@ -223,7 +224,7 @@ bool HLenCmd::DoInitial(PClient* client) { void HLenCmd::DoCmd(PClient* client) { int32_t len = 0; - auto s = PSTORE.GetBackend(client->GetCurrentDB())->HLen(client->Key(), &len); + auto s = PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->HLen(client->Key(), &len); if (s.ok() || s.IsNotFound()) { client->AppendInteger(len); } else { @@ -241,7 +242,7 @@ bool HStrLenCmd::DoInitial(PClient* client) { void HStrLenCmd::DoCmd(PClient* client) { int32_t len = 0; - auto s = PSTORE.GetBackend(client->GetCurrentDB())->HStrlen(client->Key(), client->argv_[2], &len); + auto s = PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->HStrlen(client->Key(), client->argv_[2], &len); if (s.ok() || s.IsNotFound()) { client->AppendInteger(len); } else { @@ -288,8 +289,9 @@ void HScanCmd::DoCmd(PClient* client) { // execute command std::vector fvs; int64_t next_cursor{}; - auto status = - PSTORE.GetBackend(client->GetCurrentDB())->HScan(client->Key(), cursor, pattern, count, &fvs, &next_cursor); + auto status = PSTORE.GetBackend(client->GetCurrentDB()) + ->GetStorage() + ->HScan(client->Key(), cursor, pattern, count, &fvs, &next_cursor); if (!status.ok() && !status.IsNotFound()) { client->SetRes(CmdRes::kErrOther, status.ToString()); return; @@ -315,7 +317,7 @@ bool HValsCmd::DoInitial(PClient* client) { void HValsCmd::DoCmd(PClient* client) { std::vector valueVec; - storage::Status s = PSTORE.GetBackend(client->GetCurrentDB())->HVals(client->Key(), &valueVec); + storage::Status s = PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->HVals(client->Key(), &valueVec); if (s.ok() || s.IsNotFound()) { client->AppendStringVector(valueVec); } else { @@ -344,6 +346,7 @@ void HIncrbyFloatCmd::DoCmd(PClient* client) { } std::string newValue; storage::Status s = PSTORE.GetBackend(client->GetCurrentDB()) + ->GetStorage() ->HIncrbyfloat(client->Key(), client->argv_[2], client->argv_[3], &newValue); if (s.ok() || s.IsNotFound()) { client->AppendString(newValue); @@ -363,7 +366,9 @@ bool HSetNXCmd::DoInitial(PClient* client) { void HSetNXCmd::DoCmd(PClient* client) { int32_t temp = 0; storage::Status s; - s = PSTORE.GetBackend(client->GetCurrentDB())->HSetnx(client->Key(), client->argv_[2], client->argv_[3], &temp); + s = PSTORE.GetBackend(client->GetCurrentDB()) + ->GetStorage() + ->HSetnx(client->Key(), client->argv_[2], client->argv_[3], &temp); if (s.ok()) { client->AppendInteger(temp); } else { @@ -389,7 +394,7 @@ void HIncrbyCmd::DoCmd(PClient* client) { int64_t temp = 0; storage::Status s = - PSTORE.GetBackend(client->GetCurrentDB())->HIncrby(client->Key(), client->argv_[2], int_by, &temp); + PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->HIncrby(client->Key(), client->argv_[2], int_by, &temp); if (s.ok() || s.IsNotFound()) { client->AppendInteger(temp); } else { @@ -431,7 +436,7 @@ void HRandFieldCmd::DoCmd(PClient* client) { // execute command std::vector res; - auto s = PSTORE.GetBackend(client->GetCurrentDB())->HRandField(client->Key(), count, with_values, &res); + auto s = PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->HRandField(client->Key(), count, with_values, &res); if (s.IsNotFound()) { client->AppendString(""); return; diff --git a/src/cmd_keys.cc b/src/cmd_keys.cc index 673b7636e..8499e247e 100644 --- a/src/cmd_keys.cc +++ b/src/cmd_keys.cc @@ -23,7 +23,7 @@ bool DelCmd::DoInitial(PClient* client) { } void DelCmd::DoCmd(PClient* client) { - int64_t count = PSTORE.GetBackend(client->GetCurrentDB())->Del(client->Keys()); + int64_t count = PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->Del(client->Keys()); if (count >= 0) { client->AppendInteger(count); } else { @@ -41,7 +41,7 @@ bool ExistsCmd::DoInitial(PClient* client) { } void ExistsCmd::DoCmd(PClient* client) { - int64_t count = PSTORE.GetBackend(client->GetCurrentDB())->Exists(client->Keys()); + int64_t count = PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->Exists(client->Keys()); if (count >= 0) { client->AppendInteger(count); // if (PSTORE.ExistsKey(client->Key())) { @@ -68,11 +68,102 @@ void PExpireCmd::DoCmd(PClient* client) { client->SetRes(CmdRes ::kInvalidInt); return; } - auto res = PSTORE.GetBackend(client->GetCurrentDB())->Expire(client->Key(), msec / 1000); + auto res = PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->Expire(client->Key(), msec / 1000); if (res != -1) { client->AppendInteger(res); } else { - client->SetRes(CmdRes::kErrOther, "exists internal error"); + client->SetRes(CmdRes::kErrOther, "pexpire internal error"); + } +} + +ExpireatCmd::ExpireatCmd(const std::string& name, int16_t arity) + : BaseCmd(name, arity, kCmdFlagsWrite, kAclCategoryWrite | kAclCategoryKeyspace) {} + +bool ExpireatCmd::DoInitial(PClient* client) { + client->SetKey(client->argv_[1]); + return true; +} + +void ExpireatCmd::DoCmd(PClient* client) { + int64_t time_stamp = 0; + if (pstd::String2int(client->argv_[2], &time_stamp) == 0) { + client->SetRes(CmdRes ::kInvalidInt); + return; + } + auto res = PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->Expireat(client->Key(), time_stamp); + if (res != -1) { + client->AppendInteger(res); + } else { + client->SetRes(CmdRes::kErrOther, "expireat internal error"); + } +} + +PExpireatCmd::PExpireatCmd(const std::string& name, int16_t arity) + : BaseCmd(name, arity, kCmdFlagsWrite, kAclCategoryWrite | kAclCategoryKeyspace) {} + +bool PExpireatCmd::DoInitial(PClient* client) { + client->SetKey(client->argv_[1]); + return true; +} + +// PExpireatCmd actually invoke Expireat +void PExpireatCmd::DoCmd(PClient* client) { + int64_t time_stamp_ms = 0; + if (pstd::String2int(client->argv_[2], &time_stamp_ms) == 0) { + client->SetRes(CmdRes ::kInvalidInt); + return; + } + auto res = PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->Expireat(client->Key(), time_stamp_ms / 1000); + if (res != -1) { + client->AppendInteger(res); + } else { + client->SetRes(CmdRes::kErrOther, "pexpireat internal error"); + } +} + +PersistCmd::PersistCmd(const std::string& name, int16_t arity) + : BaseCmd(name, arity, kCmdFlagsWrite, kAclCategoryWrite | kAclCategoryKeyspace) {} + +bool PersistCmd::DoInitial(PClient* client) { + client->SetKey(client->argv_[1]); + return true; +} + +void PersistCmd::DoCmd(PClient* client) { + std::map type_status; + auto res = PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->Persist(client->Key(), &type_status); + if (res != -1) { + client->AppendInteger(res); + } else { + std::string cnt; + for (auto const& s : type_status) { + cnt.append(storage::DataTypeToString[s.first]); + cnt.append(" - "); + cnt.append(s.second.ToString()); + cnt.append(";"); + } + client->SetRes(CmdRes::kErrOther, cnt); + } +} + +KeysCmd::KeysCmd(const std::string& name, int16_t arity) + : BaseCmd(name, arity, kCmdFlagsReadonly, kAclCategoryRead | kAclCategoryKeyspace) {} + +bool KeysCmd::DoInitial(PClient* client) { + client->SetKey(client->argv_[1]); + return true; +} + +void KeysCmd::DoCmd(PClient* client) { + std::vector keys; + auto s = PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->Keys(storage::DataType::kAll, client->Key(), &keys); + if (s.ok()) { + client->AppendArrayLen(keys.size()); + for (auto k : keys) { + client->AppendString(k); + } + } else { + client->SetRes(CmdRes::kErrOther, s.ToString()); } } diff --git a/src/cmd_keys.h b/src/cmd_keys.h index ac079e101..c78453cee 100644 --- a/src/cmd_keys.h +++ b/src/cmd_keys.h @@ -43,4 +43,48 @@ class PExpireCmd : public BaseCmd { private: void DoCmd(PClient* client) override; }; + +class ExpireatCmd : public BaseCmd { + public: + ExpireatCmd(const std::string& name, int16_t arity); + + protected: + bool DoInitial(PClient* client) override; + + private: + void DoCmd(PClient* client) override; +}; + +class PExpireatCmd : public BaseCmd { + public: + PExpireatCmd(const std::string& name, int16_t arity); + + protected: + bool DoInitial(PClient* client) override; + + private: + void DoCmd(PClient* client) override; +}; + +class PersistCmd : public BaseCmd { + public: + PersistCmd(const std::string& name, int16_t arity); + + protected: + bool DoInitial(PClient* client) override; + + private: + void DoCmd(PClient* client) override; +}; + +class KeysCmd : public BaseCmd { + public: + KeysCmd(const std::string& name, int16_t arity); + + protected: + bool DoInitial(PClient* client) override; + + private: + void DoCmd(PClient* client) override; +}; } // namespace pikiwidb diff --git a/src/cmd_kv.cc b/src/cmd_kv.cc index cedc899fd..3f222fae6 100644 --- a/src/cmd_kv.cc +++ b/src/cmd_kv.cc @@ -24,7 +24,7 @@ bool GetCmd::DoInitial(PClient* client) { void GetCmd::DoCmd(PClient* client) { PString value; uint64_t ttl = -1; - storage::Status s = PSTORE.GetBackend(client->GetCurrentDB())->GetWithTTL(client->Key(), &value, &ttl); + storage::Status s = PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->GetWithTTL(client->Key(), &value, &ttl); if (s.ok()) { client->AppendString(value); } else if (s.IsNotFound()) { @@ -43,7 +43,7 @@ bool SetCmd::DoInitial(PClient* client) { } void SetCmd::DoCmd(PClient* client) { - storage::Status s = PSTORE.GetBackend(client->GetCurrentDB())->Set(client->Key(), client->argv_[2]); + storage::Status s = PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->Set(client->Key(), client->argv_[2]); if (s.ok()) { client->SetRes(CmdRes::kOK); } else { @@ -61,7 +61,8 @@ bool AppendCmd::DoInitial(PClient* client) { void AppendCmd::DoCmd(PClient* client) { int32_t new_len = 0; - storage::Status s = PSTORE.GetBackend(client->GetCurrentDB())->Append(client->Key(), client->argv_[2], &new_len); + storage::Status s = + PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->Append(client->Key(), client->argv_[2], &new_len); if (s.ok() || s.IsNotFound()) { client->AppendInteger(new_len); } else { @@ -79,7 +80,8 @@ bool GetSetCmd::DoInitial(PClient* client) { void GetSetCmd::DoCmd(PClient* client) { std::string old_value; - storage::Status s = PSTORE.GetBackend(client->GetCurrentDB())->GetSet(client->Key(), client->argv_[2], &old_value); + storage::Status s = + PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->GetSet(client->Key(), client->argv_[2], &old_value); if (s.ok()) { if (old_value.empty()) { client->AppendContent("$-1"); @@ -104,7 +106,8 @@ bool MGetCmd::DoInitial(PClient* client) { void MGetCmd::DoCmd(PClient* client) { std::vector db_value_status_array; - storage::Status s = PSTORE.GetBackend(client->GetCurrentDB())->MGet(client->Keys(), &db_value_status_array); + storage::Status s = + PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->MGet(client->Keys(), &db_value_status_array); if (s.ok()) { client->AppendArrayLen(db_value_status_array.size()); for (const auto& vs : db_value_status_array) { @@ -142,7 +145,7 @@ void MSetCmd::DoCmd(PClient* client) { for (size_t index = 1; index != client->argv_.size(); index += 2) { kvs.push_back({client->argv_[index], client->argv_[index + 1]}); } - storage::Status s = PSTORE.GetBackend(client->GetCurrentDB())->MSet(kvs); + storage::Status s = PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->MSet(kvs); if (s.ok()) { client->SetRes(CmdRes::kOK); } else { @@ -167,7 +170,7 @@ void BitCountCmd::DoCmd(PClient* client) { storage::Status s; int32_t count = 0; if (client->argv_.size() == 2) { - s = PSTORE.GetBackend(client->GetCurrentDB())->BitCount(client->Key(), 0, 0, &count, false); + s = PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->BitCount(client->Key(), 0, 0, &count, false); } else { int64_t start_offset = 0; int64_t end_offset = 0; @@ -177,7 +180,9 @@ void BitCountCmd::DoCmd(PClient* client) { return; } - s = PSTORE.GetBackend(client->GetCurrentDB())->BitCount(client->Key(), start_offset, end_offset, &count, true); + s = PSTORE.GetBackend(client->GetCurrentDB()) + ->GetStorage() + ->BitCount(client->Key(), start_offset, end_offset, &count, true); } if (s.ok() || s.IsNotFound()) { @@ -197,7 +202,7 @@ bool DecrCmd::DoInitial(pikiwidb::PClient* client) { void DecrCmd::DoCmd(pikiwidb::PClient* client) { int64_t ret = 0; - storage::Status s = PSTORE.GetBackend(client->GetCurrentDB())->Decrby(client->Key(), 1, &ret); + storage::Status s = PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->Decrby(client->Key(), 1, &ret); if (s.ok()) { client->AppendContent(":" + std::to_string(ret)); } else if (s.IsCorruption() && s.ToString() == "Corruption: Value is not a integer") { @@ -219,7 +224,7 @@ bool IncrCmd::DoInitial(pikiwidb::PClient* client) { void IncrCmd::DoCmd(pikiwidb::PClient* client) { int64_t ret = 0; - storage::Status s = PSTORE.GetBackend(client->GetCurrentDB())->Incrby(client->Key(), 1, &ret); + storage::Status s = PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->Incrby(client->Key(), 1, &ret); if (s.ok()) { client->AppendContent(":" + std::to_string(ret)); } else if (s.IsCorruption() && s.ToString() == "Corruption: Value is not a integer") { @@ -280,8 +285,9 @@ void BitOpCmd::DoCmd(PClient* client) { } else { PString value; int64_t result_length = 0; - storage::Status s = - PSTORE.GetBackend(client->GetCurrentDB())->BitOp(op, client->argv_[2], keys, value, &result_length); + storage::Status s = PSTORE.GetBackend(client->GetCurrentDB()) + ->GetStorage() + ->BitOp(op, client->argv_[2], keys, value, &result_length); if (s.ok()) { client->AppendInteger(result_length); } else { @@ -300,7 +306,7 @@ bool StrlenCmd::DoInitial(PClient* client) { void StrlenCmd::DoCmd(PClient* client) { int32_t len = 0; - storage::Status s = PSTORE.GetBackend(client->GetCurrentDB())->Strlen(client->Key(), &len); + storage::Status s = PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->Strlen(client->Key(), &len); if (s.ok() || s.IsNotFound()) { client->AppendInteger(len); } else { @@ -324,7 +330,8 @@ bool SetExCmd::DoInitial(PClient* client) { void SetExCmd::DoCmd(PClient* client) { int64_t sec = 0; pstd::String2int(client->argv_[2], &sec); - storage::Status s = PSTORE.GetBackend(client->GetCurrentDB())->Setex(client->Key(), client->argv_[3], sec); + storage::Status s = + PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->Setex(client->Key(), client->argv_[3], sec); if (s.ok()) { client->SetRes(CmdRes::kOK); } else { @@ -349,6 +356,7 @@ void PSetExCmd::DoCmd(PClient* client) { int64_t msec = 0; pstd::String2int(client->argv_[2], &msec); storage::Status s = PSTORE.GetBackend(client->GetCurrentDB()) + ->GetStorage() ->Setex(client->Key(), client->argv_[3], static_cast(msec / 1000)); if (s.ok()) { client->SetRes(CmdRes::kOK); @@ -374,7 +382,7 @@ void IncrbyCmd::DoCmd(PClient* client) { int64_t ret = 0; int64_t by = 0; pstd::String2int(client->argv_[2].data(), client->argv_[2].size(), &by); - storage::Status s = PSTORE.GetBackend(client->GetCurrentDB())->Incrby(client->Key(), by, &ret); + storage::Status s = PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->Incrby(client->Key(), by, &ret); if (s.ok()) { client->AppendContent(":" + std::to_string(ret)); } else if (s.IsCorruption() && s.ToString() == "Corruption: Value is not a integer") { @@ -406,7 +414,7 @@ void DecrbyCmd::DoCmd(PClient* client) { client->SetRes(CmdRes::kInvalidInt); return; } - storage::Status s = PSTORE.GetBackend(client->GetCurrentDB())->Decrby(client->Key(), by, &ret); + storage::Status s = PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->Decrby(client->Key(), by, &ret); if (s.ok()) { client->AppendContent(":" + std::to_string(ret)); } else if (s.IsCorruption() && s.ToString() == "Corruption: Value is not a integer") { @@ -433,7 +441,8 @@ bool IncrbyFloatCmd::DoInitial(PClient* client) { void IncrbyFloatCmd::DoCmd(PClient* client) { PString ret; - storage::Status s = PSTORE.GetBackend(client->GetCurrentDB())->Incrbyfloat(client->Key(), client->argv_[2], &ret); + storage::Status s = + PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->Incrbyfloat(client->Key(), client->argv_[2], &ret); if (s.ok()) { client->AppendStringLen(ret.size()); client->AppendContent(ret); @@ -456,7 +465,8 @@ bool SetNXCmd::DoInitial(PClient* client) { void SetNXCmd::DoCmd(PClient* client) { int32_t success = 0; - storage::Status s = PSTORE.GetBackend(client->GetCurrentDB())->Setnx(client->Key(), client->argv_[2], &success); + storage::Status s = + PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->Setnx(client->Key(), client->argv_[2], &success); if (s.ok()) { client->AppendInteger(success); } else { @@ -479,7 +489,7 @@ void GetBitCmd::DoCmd(PClient* client) { client->SetRes(CmdRes::kInvalidInt); return; } - storage::Status s = PSTORE.GetBackend(client->GetCurrentDB())->GetBit(client->Key(), offset, &bit_val); + storage::Status s = PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->GetBit(client->Key(), offset, &bit_val); if (s.ok()) { client->AppendInteger(bit_val); } else { @@ -510,7 +520,7 @@ void GetRangeCmd::DoCmd(PClient* client) { int64_t end = 0; pstd::String2int(client->argv_[2].data(), client->argv_[2].size(), &start); pstd::String2int(client->argv_[3].data(), client->argv_[3].size(), &end); - auto s = PSTORE.GetBackend(client->GetCurrentDB())->Getrange(client->Key(), start, end, &ret); + auto s = PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->Getrange(client->Key(), start, end, &ret); if (!s.ok()) { if (s.IsNotFound()) { client->AppendString(""); @@ -551,8 +561,9 @@ void SetBitCmd::DoCmd(PClient* client) { PString value; int32_t bit_val = 0; - storage::Status s = - PSTORE.GetBackend(client->GetCurrentDB())->SetBit(client->Key(), offset, static_cast(on), &bit_val); + storage::Status s = PSTORE.GetBackend(client->GetCurrentDB()) + ->GetStorage() + ->SetBit(client->Key(), offset, static_cast(on), &bit_val); if (s.ok()) { client->AppendInteger(static_cast(bit_val)); } else { @@ -578,11 +589,11 @@ void SetRangeCmd::DoCmd(PClient* client) { int32_t ret = 0; storage::Status s = - PSTORE.GetBackend(client->GetCurrentDB())->Setrange(client->Key(), offset, client->argv_[3], &ret); + PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->Setrange(client->Key(), offset, client->argv_[3], &ret); if (!s.ok()) { client->SetRes(CmdRes::kErrOther, "setrange cmd error"); return; } client->AppendInteger(static_cast(ret)); } -} // namespace pikiwidb \ No newline at end of file +} // namespace pikiwidb diff --git a/src/cmd_list.cc b/src/cmd_list.cc index 2a8b3db10..bbdf8dc1e 100644 --- a/src/cmd_list.cc +++ b/src/cmd_list.cc @@ -21,7 +21,8 @@ bool LPushCmd::DoInitial(PClient* client) { void LPushCmd::DoCmd(PClient* client) { std::vector list_values(client->argv_.begin() + 2, client->argv_.end()); uint64_t reply_num = 0; - storage::Status s = PSTORE.GetBackend(client->GetCurrentDB())->LPush(client->Key(), list_values, &reply_num); + storage::Status s = + PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->LPush(client->Key(), list_values, &reply_num); if (s.ok()) { client->AppendInteger(reply_num); } else { @@ -29,6 +30,26 @@ void LPushCmd::DoCmd(PClient* client) { } } +LPushxCmd::LPushxCmd(const std::string& name, int16_t arity) + : BaseCmd(name, arity, kCmdFlagsWrite, kAclCategoryWrite | kAclCategoryList) {} + +bool LPushxCmd::DoInitial(PClient* client) { + client->SetKey(client->argv_[1]); + return true; +} + +void LPushxCmd::DoCmd(PClient* client) { + std::vector list_values(client->argv_.begin() + 2, client->argv_.end()); + uint64_t reply_num = 0; + storage::Status s = + PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->LPushx(client->Key(), list_values, &reply_num); + if (s.ok() || s.IsNotFound()) { + client->AppendInteger(reply_num); + } else { + client->SetRes(CmdRes::kErrOther, s.ToString()); + } +} + RPushCmd::RPushCmd(const std::string& name, int16_t arity) : BaseCmd(name, arity, kCmdFlagsWrite, kAclCategoryWrite | kAclCategoryList) {} @@ -40,7 +61,8 @@ bool RPushCmd::DoInitial(PClient* client) { void RPushCmd::DoCmd(PClient* client) { std::vector list_values(client->argv_.begin() + 2, client->argv_.end()); uint64_t reply_num = 0; - storage::Status s = PSTORE.GetBackend(client->GetCurrentDB())->RPush(client->Key(), list_values, &reply_num); + storage::Status s = + PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->RPush(client->Key(), list_values, &reply_num); if (s.ok()) { client->AppendInteger(reply_num); } else { @@ -48,6 +70,46 @@ void RPushCmd::DoCmd(PClient* client) { } } +RPushxCmd::RPushxCmd(const std::string& name, int16_t arity) + : BaseCmd(name, arity, kCmdFlagsWrite, kAclCategoryWrite | kAclCategoryList) {} + +bool RPushxCmd::DoInitial(PClient* client) { + client->SetKey(client->argv_[1]); + return true; +} + +void RPushxCmd::DoCmd(PClient* client) { + std::vector list_values(client->argv_.begin() + 2, client->argv_.end()); + uint64_t reply_num = 0; + storage::Status s = + PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->RPushx(client->Key(), list_values, &reply_num); + if (s.ok() || s.IsNotFound()) { + client->AppendInteger(reply_num); + } else { + client->SetRes(CmdRes::kErrOther, s.ToString()); + } +} + +LPopCmd::LPopCmd(const std::string& name, int16_t arity) + : BaseCmd(name, arity, kCmdFlagsWrite, kAclCategoryWrite | kAclCategoryList) {} + +bool LPopCmd::DoInitial(PClient* client) { + client->SetKey(client->argv_[1]); + return true; +} + +void LPopCmd::DoCmd(PClient* client) { + std::vector elements; + storage::Status s = PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->LPop(client->Key(), 1, &elements); + if (s.ok()) { + client->AppendString(elements[0]); + } else if (s.IsNotFound()) { + client->AppendStringLen(-1); + } else { + client->SetRes(CmdRes::kErrOther, s.ToString()); + } +} + RPopCmd::RPopCmd(const std::string& name, int16_t arity) : BaseCmd(name, arity, kCmdFlagsWrite, kAclCategoryWrite | kAclCategoryList) {} @@ -58,7 +120,7 @@ bool RPopCmd::DoInitial(PClient* client) { void RPopCmd::DoCmd(PClient* client) { std::vector elements; - storage::Status s = PSTORE.GetBackend(client->GetCurrentDB())->RPop(client->Key(), 1, &elements); + storage::Status s = PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->RPop(client->Key(), 1, &elements); if (s.ok()) { client->AppendString(elements[0]); } else if (s.IsNotFound()) { @@ -84,7 +146,8 @@ void LRangeCmd::DoCmd(PClient* client) { client->SetRes(CmdRes::kInvalidInt); return; } - storage::Status s = PSTORE.GetBackend(client->GetCurrentDB())->LRange(client->Key(), start_index, end_index, &ret); + storage::Status s = + PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->LRange(client->Key(), start_index, end_index, &ret); if (!s.ok() && !s.IsNotFound()) { client->SetRes(CmdRes::kSyntaxErr, "lrange cmd error"); return; @@ -110,7 +173,7 @@ void LRemCmd::DoCmd(PClient* client) { uint64_t reply_num = 0; storage::Status s = - PSTORE.GetBackend(client->GetCurrentDB())->LRem(client->Key(), freq_, client->argv_[3], &reply_num); + PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->LRem(client->Key(), freq_, client->argv_[3], &reply_num); if (s.ok() || s.IsNotFound()) { client->AppendInteger(reply_num); } else { @@ -134,7 +197,8 @@ void LTrimCmd::DoCmd(PClient* client) { client->SetRes(CmdRes::kInvalidInt); return; } - storage::Status s = PSTORE.GetBackend(client->GetCurrentDB())->LTrim(client->Key(), start_index, end_index); + storage::Status s = + PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->LTrim(client->Key(), start_index, end_index); if (s.ok() || s.IsNotFound()) { client->SetRes(CmdRes::kOK); } else { @@ -161,7 +225,8 @@ void LSetCmd::DoCmd(PClient* client) { client->SetRes(CmdRes::kErrOther, "lset cmd error"); // this will not happend in normal case return; } - storage::Status s = PSTORE.GetBackend(client->GetCurrentDB())->LSet(client->Key(), val, client->argv_[3]); + storage::Status s = + PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->LSet(client->Key(), val, client->argv_[3]); if (s.ok()) { client->SetRes(CmdRes::kOK); } else if (s.IsNotFound()) { @@ -195,6 +260,7 @@ void LInsertCmd::DoCmd(PClient* client) { before_or_after = storage::After; } storage::Status s = PSTORE.GetBackend(client->GetCurrentDB()) + ->GetStorage() ->LInsert(client->Key(), before_or_after, client->argv_[3], client->argv_[4], &ret); if (!s.ok() && s.IsNotFound()) { client->SetRes(CmdRes::kSyntaxErr, "linsert cmd error"); // just a safeguard @@ -202,4 +268,49 @@ void LInsertCmd::DoCmd(PClient* client) { } client->AppendInteger(ret); } + +LIndexCmd::LIndexCmd(const std::string& name, int16_t arity) + : BaseCmd(name, arity, kCmdFlagsReadonly, kAclCategoryRead | kAclCategoryList) {} + +bool LIndexCmd::DoInitial(PClient* client) { + client->SetKey(client->argv_[1]); + return true; +} + +void LIndexCmd::DoCmd(PClient* client) { + int64_t freq_ = 0; + std::string count = client->argv_[2]; + if (pstd::String2int(count, &freq_) == 0) { + client->SetRes(CmdRes::kInvalidInt); + return; + } + + std::string value; + storage::Status s = PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->LIndex(client->Key(), freq_, &value); + if (s.ok()) { + client->AppendString(value); + } else if (s.IsNotFound()) { + client->AppendStringLen(-1); + } else { + client->SetRes(CmdRes::kErrOther, s.ToString()); + } +} + +LLenCmd::LLenCmd(const std::string& name, int16_t arity) + : BaseCmd(name, arity, kCmdFlagsReadonly, kAclCategoryRead | kAclCategoryList) {} + +bool LLenCmd::DoInitial(PClient* client) { + client->SetKey(client->argv_[1]); + return true; +} + +void LLenCmd::DoCmd(PClient* client) { + uint64_t llen = 0; + storage::Status s = PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->LLen(client->Key(), &llen); + if (s.ok() || s.IsNotFound()) { + client->AppendInteger(static_cast(llen)); + } else { + client->SetRes(CmdRes::kErrOther, s.ToString()); + } +} } // namespace pikiwidb diff --git a/src/cmd_list.h b/src/cmd_list.h index 55f3678f4..ebdfd29bb 100644 --- a/src/cmd_list.h +++ b/src/cmd_list.h @@ -96,4 +96,58 @@ class LInsertCmd : public BaseCmd { void DoCmd(PClient* client) override; }; +class LPushxCmd : public BaseCmd { + public: + LPushxCmd(const std::string& name, int16_t arity); + + protected: + bool DoInitial(PClient* client) override; + + private: + void DoCmd(PClient* client) override; +}; + +class RPushxCmd : public BaseCmd { + public: + RPushxCmd(const std::string& name, int16_t arity); + + protected: + bool DoInitial(PClient* client) override; + + private: + void DoCmd(PClient* client) override; +}; + +class LPopCmd : public BaseCmd { + public: + LPopCmd(const std::string& name, int16_t arity); + + protected: + bool DoInitial(PClient* client) override; + + private: + void DoCmd(PClient* client) override; +}; + +class LIndexCmd : public BaseCmd { + public: + LIndexCmd(const std::string& name, int16_t arity); + + protected: + bool DoInitial(PClient* client) override; + + private: + void DoCmd(PClient* client) override; +}; + +class LLenCmd : public BaseCmd { + public: + LLenCmd(const std::string& name, int16_t arity); + + protected: + bool DoInitial(PClient* client) override; + + private: + void DoCmd(PClient* client) override; +}; } // namespace pikiwidb \ No newline at end of file diff --git a/src/cmd_set.cc b/src/cmd_set.cc index e4a0e366b..7cf56ca06 100644 --- a/src/cmd_set.cc +++ b/src/cmd_set.cc @@ -22,7 +22,7 @@ bool SIsMemberCmd::DoInitial(PClient* client) { } void SIsMemberCmd::DoCmd(PClient* client) { int32_t reply_Num = 0; // only change to 1 if ismember . key not exist it is 0 - PSTORE.GetBackend(client->GetCurrentDB())->SIsmember(client->Key(), client->argv_[2], &reply_Num); + PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->SIsmember(client->Key(), client->argv_[2], &reply_Num); client->AppendInteger(reply_Num); } @@ -39,7 +39,7 @@ bool SAddCmd::DoInitial(PClient* client) { void SAddCmd::DoCmd(PClient* client) { const std::vector members(client->argv_.begin() + 2, client->argv_.end()); int32_t ret = 0; - storage::Status s = PSTORE.GetBackend(client->GetCurrentDB())->SAdd(client->Key(), members, &ret); + storage::Status s = PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->SAdd(client->Key(), members, &ret); if (s.ok()) { client->AppendInteger(ret); } else { @@ -60,8 +60,9 @@ void SUnionStoreCmd::DoCmd(PClient* client) { std::vector keys(client->Keys().begin() + 1, client->Keys().end()); std::vector value_to_dest; int32_t ret = 0; - storage::Status s = - PSTORE.GetBackend(client->GetCurrentDB())->SUnionstore(client->Keys().at(0), keys, value_to_dest, &ret); + storage::Status s = PSTORE.GetBackend(client->GetCurrentDB()) + ->GetStorage() + ->SUnionstore(client->Keys().at(0), keys, value_to_dest, &ret); if (!s.ok()) { client->SetRes(CmdRes::kSyntaxErr, "sunionstore cmd error"); } @@ -79,7 +80,7 @@ bool SInterCmd::DoInitial(PClient* client) { void SInterCmd::DoCmd(PClient* client) { std::vector res_vt; - storage::Status s = PSTORE.GetBackend(client->GetCurrentDB())->SInter(client->Keys(), &res_vt); + storage::Status s = PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->SInter(client->Keys(), &res_vt); if (!s.ok()) { client->SetRes(CmdRes::kErrOther, "sinter cmd error"); return; @@ -98,7 +99,8 @@ bool SRemCmd::DoInitial(PClient* client) { void SRemCmd::DoCmd(PClient* client) { std::vector to_delete_members(client->argv_.begin() + 2, client->argv_.end()); int32_t reply_num = 0; - storage::Status s = PSTORE.GetBackend(client->GetCurrentDB())->SRem(client->Key(), to_delete_members, &reply_num); + storage::Status s = + PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->SRem(client->Key(), to_delete_members, &reply_num); if (!s.ok()) { client->SetRes(CmdRes::kErrOther, "srem cmd error"); } @@ -116,7 +118,7 @@ bool SUnionCmd::DoInitial(PClient* client) { void SUnionCmd::DoCmd(PClient* client) { std::vector res_vt; - storage::Status s = PSTORE.GetBackend(client->GetCurrentDB())->SUnion(client->Keys(), &res_vt); + storage::Status s = PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->SUnion(client->Keys(), &res_vt); if (!s.ok()) { client->SetRes(CmdRes::kErrOther, "sunion cmd error"); } @@ -136,8 +138,9 @@ void SInterStoreCmd::DoCmd(PClient* client) { int32_t reply_num = 0; std::vector inter_keys(client->argv_.begin() + 2, client->argv_.end()); - storage::Status s = - PSTORE.GetBackend(client->GetCurrentDB())->SInterstore(client->Key(), inter_keys, value_to_dest, &reply_num); + storage::Status s = PSTORE.GetBackend(client->GetCurrentDB()) + ->GetStorage() + ->SInterstore(client->Key(), inter_keys, value_to_dest, &reply_num); if (!s.ok()) { client->SetRes(CmdRes::kSyntaxErr, "sinterstore cmd error"); return; @@ -154,7 +157,7 @@ bool SCardCmd::DoInitial(PClient* client) { } void SCardCmd::DoCmd(PClient* client) { int32_t reply_Num = 0; - storage::Status s = PSTORE.GetBackend(client->GetCurrentDB())->SCard(client->Key(), &reply_Num); + storage::Status s = PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->SCard(client->Key(), &reply_Num); if (!s.ok()) { client->SetRes(CmdRes::kSyntaxErr, "scard cmd error"); return; @@ -170,6 +173,7 @@ bool SMoveCmd::DoInitial(PClient* client) { return true; } void SMoveCmd::DoCmd(PClient* client) { int32_t reply_num = 0; storage::Status s = PSTORE.GetBackend(client->GetCurrentDB()) + ->GetStorage() ->SMove(client->argv_[1], client->argv_[2], client->argv_[3], &reply_num); if (!s.ok()) { client->SetRes(CmdRes::kErrOther, "smove cmd error"); @@ -198,7 +202,8 @@ bool SRandMemberCmd::DoInitial(PClient* client) { void SRandMemberCmd::DoCmd(PClient* client) { std::vector vec_ret; - storage::Status s = PSTORE.GetBackend(client->GetCurrentDB())->SRandmember(client->Key(), this->num_rand, &vec_ret); + storage::Status s = + PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->SRandmember(client->Key(), this->num_rand, &vec_ret); if (!s.ok()) { client->SetRes(CmdRes::kSyntaxErr, "srandmember cmd error"); return; @@ -224,7 +229,8 @@ void SPopCmd::DoCmd(PClient* client) { if ((client->argv_.size()) == 2) { int64_t cnt = 1; std::vector delete_member; - storage::Status s = PSTORE.GetBackend(client->GetCurrentDB())->SPop(client->Key(), &delete_member, cnt); + storage::Status s = + PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->SPop(client->Key(), &delete_member, cnt); if (!s.ok()) { client->SetRes(CmdRes::kSyntaxErr, "spop cmd error"); return; @@ -238,7 +244,8 @@ void SPopCmd::DoCmd(PClient* client) { client->SetRes(CmdRes::kInvalidInt); return; } - storage::Status s = PSTORE.GetBackend(client->GetCurrentDB())->SPop(client->Key(), &delete_members, cnt); + storage::Status s = + PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->SPop(client->Key(), &delete_members, cnt); if (!s.ok()) { client->SetRes(CmdRes::kSyntaxErr, "spop cmd error"); return; @@ -250,4 +257,63 @@ void SPopCmd::DoCmd(PClient* client) { return; } } + +SMembersCmd::SMembersCmd(const std::string& name, int16_t arity) + : BaseCmd(name, arity, kCmdFlagsReadonly, kAclCategoryRead | kAclCategorySet) {} + +bool SMembersCmd::DoInitial(PClient* client) { + client->SetKey(client->argv_[1]); + return true; +} + +void SMembersCmd::DoCmd(PClient* client) { + std::vector delete_members; + storage::Status s = PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->SMembers(client->Key(), &delete_members); + if (!s.ok()) { + client->SetRes(CmdRes::kSyntaxErr, "smembers cmd error"); + return; + } + client->AppendStringVector(delete_members); +} + +SDiffCmd::SDiffCmd(const std::string& name, int16_t arity) + : BaseCmd(name, arity, kCmdFlagsReadonly, kAclCategoryRead | kAclCategorySet) {} + +bool SDiffCmd::DoInitial(PClient* client) { + client->SetKey(client->argv_[1]); + return true; +} + +void SDiffCmd::DoCmd(PClient* client) { + std::vector diff_members; + std::vector diff_keys(client->argv_.begin() + 1, client->argv_.end()); + storage::Status s = PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->SDiff(diff_keys, &diff_members); + if (!s.ok()) { + client->SetRes(CmdRes::kSyntaxErr, "sdiff cmd error"); + return; + } + client->AppendStringVector(diff_members); +} + +SDiffstoreCmd::SDiffstoreCmd(const std::string& name, int16_t arity) + : BaseCmd(name, arity, kCmdFlagsWrite, kAclCategoryWrite | kAclCategorySet) {} + +bool SDiffstoreCmd::DoInitial(PClient* client) { + client->SetKey(client->argv_[1]); + return true; +} + +void SDiffstoreCmd::DoCmd(PClient* client) { + std::vector value_to_dest; + int32_t reply_num = 0; + std::vector diffstore_keys(client->argv_.begin() + 2, client->argv_.end()); + storage::Status s = PSTORE.GetBackend(client->GetCurrentDB()) + ->GetStorage() + ->SDiffstore(client->Key(), diffstore_keys, value_to_dest, &reply_num); + if (!s.ok()) { + client->SetRes(CmdRes::kSyntaxErr, "sdiffstore cmd error"); + return; + } + client->AppendInteger(reply_num); +} } // namespace pikiwidb diff --git a/src/cmd_set.h b/src/cmd_set.h index 9a96fda70..78f00e395 100644 --- a/src/cmd_set.h +++ b/src/cmd_set.h @@ -132,4 +132,37 @@ class SPopCmd : public BaseCmd { void DoCmd(PClient *client) override; }; +class SMembersCmd : public BaseCmd { + public: + SMembersCmd(const std::string &name, int16_t arity); + + protected: + bool DoInitial(PClient *client) override; + + private: + void DoCmd(PClient *client) override; +}; + +class SDiffCmd : public BaseCmd { + public: + SDiffCmd(const std::string &name, int16_t arity); + + protected: + bool DoInitial(PClient *client) override; + + private: + void DoCmd(PClient *client) override; +}; + +class SDiffstoreCmd : public BaseCmd { + public: + SDiffstoreCmd(const std::string &name, int16_t arity); + + protected: + bool DoInitial(PClient *client) override; + + private: + void DoCmd(PClient *client) override; +}; + } // namespace pikiwidb diff --git a/src/cmd_table_manager.cc b/src/cmd_table_manager.cc index 159ba3da4..2ae39a0a2 100644 --- a/src/cmd_table_manager.cc +++ b/src/cmd_table_manager.cc @@ -15,6 +15,7 @@ #include "cmd_set.h" #include "cmd_raft.h" #include "cmd_table_manager.h" +#include "cmd_zset.h" namespace pikiwidb { @@ -54,6 +55,11 @@ void CmdTableManager::InitCmdTable() { ADD_COMMAND(Del, -2); ADD_COMMAND(Exists, -2); ADD_COMMAND(PExpire, 3); + ADD_COMMAND(Expireat, 3); + ADD_COMMAND(PExpireat, 3); + ADD_COMMAND(Persist, 2); + ADD_COMMAND(Keys, 2); + // kv ADD_COMMAND(Get, 2); ADD_COMMAND(Set, -3); @@ -106,6 +112,9 @@ void CmdTableManager::InitCmdTable() { ADD_COMMAND(SMove, 4); ADD_COMMAND(SRandMember, -2); // Added the count argument since Redis 3.2.0 ADD_COMMAND(SPop, -2); + ADD_COMMAND(SMembers, 2); + ADD_COMMAND(SDiff, -2); + ADD_COMMAND(SDiffstore, -3); // list ADD_COMMAND(LPush, -3); @@ -116,6 +125,18 @@ void CmdTableManager::InitCmdTable() { ADD_COMMAND(LTrim, 4); ADD_COMMAND(LSet, 4); ADD_COMMAND(LInsert, 5); + ADD_COMMAND(LPushx, -3); + ADD_COMMAND(RPushx, -3); + ADD_COMMAND(LPop, 2); + ADD_COMMAND(LIndex, 3); + ADD_COMMAND(LLen, 2); + + // zset + ADD_COMMAND(ZAdd, -4); + ADD_COMMAND(ZRevrange, -4); + ADD_COMMAND(ZRangebyscore, -4); + ADD_COMMAND(ZRevRangeByScore, -4); + ADD_COMMAND(ZCard, 2); } std::pair CmdTableManager::GetCommand(const std::string& cmdName, PClient* client) { diff --git a/src/cmd_zset.cc b/src/cmd_zset.cc new file mode 100644 index 000000000..30127b77b --- /dev/null +++ b/src/cmd_zset.cc @@ -0,0 +1,326 @@ +/* + * Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. An additional grant + * of patent rights can be found in the PATENTS file in the same directory. + */ + +#include "cmd_zset.h" + +#include + +#include "pstd/pstd_string.h" +#include "store.h" + +namespace pikiwidb { + +static void FitLimit(int64_t& count, int64_t& offset, const int64_t size) { + count = count >= 0 ? count : size; + offset = (offset >= 0 && offset < size) ? offset : size; + count = (offset + count < size) ? count : size - offset; +} + +int32_t DoScoreStrRange(std::string begin_score, std::string end_score, bool* left_close, bool* right_close, + double* min_score, double* max_score) { + if (!begin_score.empty() && begin_score.at(0) == '(') { + *left_close = false; + begin_score.erase(begin_score.begin()); + } + if (begin_score == "-inf") { + *min_score = storage::ZSET_SCORE_MIN; + } else if (begin_score == "inf" || begin_score == "+inf") { + *min_score = storage::ZSET_SCORE_MAX; + } else if (pstd::String2d(begin_score.data(), begin_score.size(), min_score) == 0) { + return -1; + } + + if (!end_score.empty() && end_score.at(0) == '(') { + *right_close = false; + end_score.erase(end_score.begin()); + } + if (end_score == "+inf" || end_score == "inf") { + *max_score = storage::ZSET_SCORE_MAX; + } else if (end_score == "-inf") { + *max_score = storage::ZSET_SCORE_MIN; + } else if (pstd::String2d(end_score.data(), end_score.size(), max_score) == 0) { + return -1; + } + return 0; +} + +ZAddCmd::ZAddCmd(const std::string& name, int16_t arity) + : BaseCmd(name, arity, kCmdFlagsWrite, kAclCategoryWrite | kAclCategorySortedSet) {} + +bool ZAddCmd::DoInitial(PClient* client) { + client->SetKey(client->argv_[1]); + return true; +} + +void ZAddCmd::DoCmd(PClient* client) { + size_t argc = client->argv_.size(); + if (argc % 2 == 1) { + client->SetRes(CmdRes::kSyntaxErr); + return; + } + score_members_.clear(); + double score = 0.0; + size_t index = 2; + for (; index < argc; index += 2) { + if (pstd::String2d(client->argv_[index].data(), client->argv_[index].size(), &score) == 0) { + client->SetRes(CmdRes::kInvalidFloat); + return; + } + score_members_.push_back({score, client->argv_[index + 1]}); + } + client->SetKey(client->argv_[1]); + int32_t count = 0; + storage::Status s = + PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->ZAdd(client->Key(), score_members_, &count); + if (s.ok()) { + client->AppendInteger(count); + } else { + client->SetRes(CmdRes::kErrOther, s.ToString()); + } +} + +ZRevrangeCmd::ZRevrangeCmd(const std::string& name, int16_t arity) + : BaseCmd(name, arity, kCmdFlagsReadonly, kAclCategoryRead | kAclCategorySortedSet) {} + +bool ZRevrangeCmd::DoInitial(PClient* client) { + client->SetKey(client->argv_[1]); + return true; +} + +void ZRevrangeCmd::DoCmd(PClient* client) { + std::string key; + int64_t start = 0; + int64_t stop = -1; + bool is_ws = false; + if (client->argv_.size() == 5 && (strcasecmp(client->argv_[4].data(), "withscores") == 0)) { + is_ws = true; + } else if (client->argv_.size() != 4) { + client->SetRes(CmdRes::kSyntaxErr); + return; + } + if (pstd::String2int(client->argv_[2].data(), client->argv_[2].size(), &start) == 0) { + client->SetRes(CmdRes::kInvalidInt); + return; + } + if (pstd::String2int(client->argv_[3].data(), client->argv_[3].size(), &stop) == 0) { + client->SetRes(CmdRes::kInvalidInt); + return; + } + std::vector score_members; + storage::Status s = + PSTORE.GetBackend(client->GetCurrentDB()) + ->GetStorage() + ->ZRevrange(client->Key(), static_cast(start), static_cast(stop), &score_members); + if (s.ok() || s.IsNotFound()) { + if (is_ws) { + char buf[32]; + int64_t len; + client->AppendArrayLenUint64(score_members.size() * 2); + for (const auto& sm : score_members) { + client->AppendStringLenUint64(sm.member.size()); + client->AppendContent(sm.member); + len = pstd::D2string(buf, sizeof(buf), sm.score); + client->AppendStringLen(len); + client->AppendContent(buf); + } + } else { + client->AppendArrayLenUint64(score_members.size()); + for (const auto& sm : score_members) { + client->AppendStringLenUint64(sm.member.size()); + client->AppendContent(sm.member); + } + } + } else { + client->SetRes(CmdRes::kErrOther, s.ToString()); + } +} + +ZRangebyscoreCmd::ZRangebyscoreCmd(const std::string& name, int16_t arity) + : BaseCmd(name, arity, kCmdFlagsReadonly, kAclCategoryRead | kAclCategorySortedSet) {} + +bool ZRangebyscoreCmd::DoInitial(PClient* client) { + client->SetKey(client->argv_[1]); + return true; +} + +void ZRangebyscoreCmd::DoCmd(PClient* client) { + double min_score = 0, max_score = 0; + bool left_close = true, right_close = true, with_scores = false; + int64_t offset = 0, count = -1; + int32_t ret = DoScoreStrRange(client->argv_[2], client->argv_[3], &left_close, &right_close, &min_score, &max_score); + if (ret == -1) { + client->SetRes(CmdRes::kErrOther, "min or max is not a float"); + return; + } + size_t argc = client->argv_.size(); + if (argc >= 5) { + size_t index = 4; + while (index < argc) { + if (strcasecmp(client->argv_[index].data(), "withscores") == 0) { + with_scores = true; + } else if (strcasecmp(client->argv_[index].data(), "limit") == 0) { + if (index + 3 > argc) { + client->SetRes(CmdRes::kSyntaxErr); + return; + } + index++; + if (pstd::String2int(client->argv_[index].data(), client->argv_[index].size(), &offset) == 0) { + client->SetRes(CmdRes::kInvalidInt); + return; + } + index++; + if (pstd::String2int(client->argv_[index].data(), client->argv_[index].size(), &count) == 0) { + client->SetRes(CmdRes::kInvalidInt); + return; + } + } else { + client->SetRes(CmdRes::kSyntaxErr); + return; + } + index++; + } + } + + if (min_score == storage::ZSET_SCORE_MAX || max_score == storage::ZSET_SCORE_MIN) { + client->AppendContent("*0"); + return; + } + std::vector score_members; + storage::Status s = PSTORE.GetBackend(client->GetCurrentDB()) + ->GetStorage() + ->ZRangebyscore(client->Key(), min_score, max_score, left_close, right_close, &score_members); + if (!s.ok() && !s.IsNotFound()) { + client->SetRes(CmdRes::kErrOther, s.ToString()); + return; + } + FitLimit(count, offset, static_cast(score_members.size())); + size_t start = offset; + size_t end = offset + count; + if (with_scores) { + char buf[32]; + int64_t len = 0; + client->AppendArrayLen(count * 2); + for (; start < end; start++) { + client->AppendStringLenUint64(score_members[start].member.size()); + client->AppendContent(score_members[start].member); + len = pstd::D2string(buf, sizeof(buf), score_members[start].score); + client->AppendStringLen(len); + client->AppendContent(buf); + } + } else { + client->AppendArrayLen(count); + for (; start < end; start++) { + client->AppendStringLenUint64(score_members[start].member.size()); + client->AppendContent(score_members[start].member); + } + } +} + +ZCardCmd::ZCardCmd(const std::string& name, int16_t arity) + : BaseCmd(name, arity, kCmdFlagsReadonly, kAclCategoryRead | kAclCategorySortedSet) {} + +bool ZCardCmd::DoInitial(PClient* client) { + client->SetKey(client->argv_[1]); + return true; +} + +void ZCardCmd::DoCmd(PClient* client) { + int32_t reply_Num = 0; + storage::Status s = PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->ZCard(client->Key(), &reply_Num); + if (!s.ok()) { + client->SetRes(CmdRes::kSyntaxErr, "ZCard cmd error"); + return; + } + client->AppendInteger(reply_Num); +} + +ZRevRangeByScoreCmd::ZRevRangeByScoreCmd(const std::string& name, int16_t arity) + : BaseCmd(name, arity, kCmdFlagsWrite, kAclCategoryWrite | kAclCategorySortedSet) {} + +bool ZRevRangeByScoreCmd::DoInitial(PClient* client) { + client->SetKey(client->argv_[1]); + return true; +} + +void ZRevRangeByScoreCmd::DoCmd(PClient* client) { + double min_score = 0; + double max_score = 0; + bool right_close = true; + bool left_close = true; + bool with_scores = false; + int64_t offset = 0, count = -1; + int32_t ret = DoScoreStrRange(client->argv_[3], client->argv_[2], &left_close, &right_close, &min_score, &max_score); + if (ret == -1) { + client->SetRes(CmdRes::kErrOther, "min or max is not a float"); + return; + } + size_t argc = client->argv_.size(); + if (argc >= 5) { + size_t index = 4; + while (index < argc) { + if (strcasecmp(client->argv_[index].data(), "withscores") == 0) { + with_scores = true; + } else if (strcasecmp(client->argv_[index].data(), "limit") == 0) { + if (index + 3 > argc) { + client->SetRes(CmdRes::kSyntaxErr); + return; + } + index++; + if (pstd::String2int(client->argv_[index].data(), client->argv_[index].size(), &offset) == 0) { + client->SetRes(CmdRes::kInvalidInt); + return; + } + index++; + if (pstd::String2int(client->argv_[index].data(), client->argv_[index].size(), &count) == 0) { + client->SetRes(CmdRes::kInvalidInt); + return; + } + } else { + client->SetRes(CmdRes::kSyntaxErr); + return; + } + index++; + } + } + + if (min_score == storage::ZSET_SCORE_MAX || max_score == storage::ZSET_SCORE_MIN) { + client->AppendContent("*0"); + return; + } + std::vector score_members; + storage::Status s = PSTORE.GetBackend(client->GetCurrentDB()) + ->GetStorage() + ->ZRevrangebyscore(client->Key(), min_score, max_score, left_close, right_close, count, + offset, &score_members); + if (!s.ok() && !s.IsNotFound()) { + client->SetRes(CmdRes::kErrOther, s.ToString()); + return; + } + FitLimit(count, offset, static_cast(score_members.size())); + size_t start = offset; + size_t end = offset + count; + if (with_scores) { + char buf[32]; + int64_t len = 0; + client->AppendArrayLen(count * 2); + for (; start < end; start++) { + client->AppendStringLenUint64(score_members[start].member.size()); + client->AppendContent(score_members[start].member); + len = pstd::D2string(buf, sizeof(buf), score_members[start].score); + client->AppendStringLen(len); + client->AppendContent(buf); + } + } else { + client->AppendArrayLen(count); + for (; start < end; start++) { + client->AppendStringLenUint64(score_members[start].member.size()); + client->AppendContent(score_members[start].member); + } + } +} + +} // namespace pikiwidb \ No newline at end of file diff --git a/src/cmd_zset.h b/src/cmd_zset.h new file mode 100644 index 000000000..ddef7b956 --- /dev/null +++ b/src/cmd_zset.h @@ -0,0 +1,70 @@ +/* + * Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. An additional grant + * of patent rights can be found in the PATENTS file in the same directory. + */ + +#pragma once +#include "base_cmd.h" + +namespace pikiwidb { + +class ZAddCmd : public BaseCmd { + public: + ZAddCmd(const std::string &name, int16_t arity); + + protected: + bool DoInitial(PClient *client) override; + + private: + std::string key_; + std::vector score_members_; + void DoCmd(PClient *client) override; +}; + +class ZRevrangeCmd : public BaseCmd { + public: + ZRevrangeCmd(const std::string &name, int16_t arity); + + protected: + bool DoInitial(PClient *client) override; + + private: + void DoCmd(PClient *client) override; +}; + +class ZRangebyscoreCmd : public BaseCmd { + public: + ZRangebyscoreCmd(const std::string &name, int16_t arity); + + protected: + bool DoInitial(PClient *client) override; + + private: + void DoCmd(PClient *client) override; +}; + +class ZRevRangeByScoreCmd : public BaseCmd { + public: + ZRevRangeByScoreCmd(const std::string &name, int16_t arity); + + protected: + bool DoInitial(PClient *client) override; + + private: + void DoCmd(PClient *client) override; +}; + +class ZCardCmd : public BaseCmd { + public: + ZCardCmd(const std::string &name, int16_t arity); + + protected: + bool DoInitial(PClient *client) override; + + private: + void DoCmd(PClient *client) override; +}; + +} // namespace pikiwidb \ No newline at end of file diff --git a/src/db.cpp b/src/db.cpp new file mode 100644 index 000000000..ba0c4ba09 --- /dev/null +++ b/src/db.cpp @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2024-present, Qihoo, Inc. All rights reserved. + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. An additional grant + * of patent rights can be found in the PATENTS file in the same directory. + */ + +#include "db.h" +#include "config.h" + +extern pikiwidb::PConfig g_config; + +namespace pikiwidb { + +DB::DB(int db_id, const std::string &db_path) : db_id_(db_id), db_path_(db_path + std::to_string(db_id) + '/') { + storage::StorageOptions storage_options; + storage_options.options.create_if_missing = true; + storage_options.db_instance_num = g_config.db_instance_num; + storage_options.db_id = db_id; + + // options for CF + storage_options.options.ttl = g_config.rocksdb_ttl_second; + storage_options.options.periodic_compaction_seconds = g_config.rocksdb_periodic_second; + storage_ = std::make_unique(); + if (auto s = storage_->Open(storage_options, db_path_); !s.ok()) { + ERROR("Storage open failed! {}", s.ToString()); + abort(); + } + opened_ = true; + INFO("Open DB{} success!", db_id); +} +} // namespace pikiwidb diff --git a/src/db.h b/src/db.h new file mode 100644 index 000000000..cdb0081a8 --- /dev/null +++ b/src/db.h @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2024-present, Qihoo, Inc. All rights reserved. + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. An additional grant + * of patent rights can be found in the PATENTS file in the same directory. + */ + +#ifndef PIKIWIDB_DB_H +#define PIKIWIDB_DB_H + +#include + +#include "log.h" +#include "pstd/noncopyable.h" +#include "storage/storage.h" + +namespace pikiwidb { +class DB { + public: + DB(int db_id, const std::string& db_path); + std::unique_ptr& GetStorage() { return storage_; } + + void Lock() { storage_mutex_.lock(); } + + void UnLock() { storage_mutex_.unlock(); } + + void LockShared() { storage_mutex_.lock_shared(); } + + void UnLockShared() { storage_mutex_.unlock_shared(); } + + private: + const int db_id_; + const std::string db_path_; + + /** + * If you want to change the pointer that points to storage, + * you must first acquire a mutex lock. + * If you only want to access the pointer, + * you just need to obtain a shared lock. + */ + std::shared_mutex storage_mutex_; + std::unique_ptr storage_; + bool opened_ = false; + + /** + * If you want to change the status below,you must first acquire + * a mutex lock. + * If you only want to access the status below, + * you just need to obtain a shared lock. + */ + std::shared_mutex checkpoint_mutex_; + bool checkpoint_in_process_ = false; + int64_t last_checkpoint_time_ = -1; + bool last_checkpoint_success_ = false; +}; +} // namespace pikiwidb + +#endif // PIKIWIDB_DB_H diff --git a/src/storage/include/storage/storage.h b/src/storage/include/storage/storage.h index 479167697..7cd346e92 100644 --- a/src/storage/include/storage/storage.h +++ b/src/storage/include/storage/storage.h @@ -59,6 +59,7 @@ struct StorageOptions { size_t small_compaction_threshold = 5000; size_t small_compaction_duration_threshold = 10000; size_t db_instance_num = 3; // default = 3 + int db_id; Status ResetOptions(const OptionType& option_type, const std::unordered_map& options_map); }; @@ -123,6 +124,7 @@ enum BeforeOrAfter { Before, After }; enum DataType { kAll, kStrings, kHashes, kSets, kLists, kZSets }; +const std::string DataTypeToString[] = {"all", "string", "hash", "set", "list", "zset"}; const char DataTypeTag[] = {'a', 'k', 'h', 's', 'l', 'z'}; enum class OptionType { @@ -1001,7 +1003,7 @@ class Storage { // return -1 operation exception errors happen in database // return 0 if key does not exist // return >=1 if the timueout was set - int32_t Expireat(const Slice& key, uint64_t timestamp, std::map* type_status); + int32_t Expireat(const Slice& key, uint64_t timestamp); // Remove the existing timeout on key, turning the key from volatile (a key // with an expire set) to persistent (a key that will never expire as no @@ -1097,7 +1099,8 @@ class Storage { // For scan keys in data base std::atomic scan_keynum_exit_ = false; - int32_t db_instance_num_; + size_t db_instance_num_ = 3; + int db_id_ = 0; }; } // namespace storage diff --git a/src/storage/src/storage.cc b/src/storage/src/storage.cc index 872542f70..b46601c8b 100644 --- a/src/storage/src/storage.cc +++ b/src/storage/src/storage.cc @@ -82,15 +82,19 @@ static std::string AppendSubDirectory(const std::string& db_path, int index) { Status Storage::Open(const StorageOptions& storage_options, const std::string& db_path) { mkpath(db_path.c_str(), 0755); db_instance_num_ = storage_options.db_instance_num; - for (int index = 0; index < db_instance_num_; index++) { + for (size_t index = 0; index < db_instance_num_; index++) { insts_.emplace_back(std::make_unique(this, index)); Status s = insts_.back()->Open(storage_options, AppendSubDirectory(db_path, index)); if (!s.ok()) { - ERROR("open db failed", s.ToString()); + ERROR("open RocksDB{} failed {}", index, s.ToString()); + return Status::IOError(); } + INFO("open RocksDB{} success!", index); } slot_indexer_ = std::make_unique(db_instance_num_); + db_id_ = storage_options.db_id; + is_opened_.store(true); return Status::OK(); } @@ -1508,7 +1512,7 @@ Status Storage::Scanx(const DataType& data_type, const std::string& start_key, c return Status::OK(); } -int32_t Storage::Expireat(const Slice& key, uint64_t timestamp, std::map* type_status) { +int32_t Storage::Expireat(const Slice& key, uint64_t timestamp) { Status s; int32_t count = 0; bool is_corruption = false; @@ -1519,7 +1523,6 @@ int32_t Storage::Expireat(const Slice& key, uint64_t timestamp, std::mapHashesExpireat(key, timestamp); @@ -1527,7 +1530,6 @@ int32_t Storage::Expireat(const Slice& key, uint64_t timestamp, std::mapSetsExpireat(key, timestamp); @@ -1535,7 +1537,6 @@ int32_t Storage::Expireat(const Slice& key, uint64_t timestamp, std::mapListsExpireat(key, timestamp); @@ -1543,7 +1544,6 @@ int32_t Storage::Expireat(const Slice& key, uint64_t timestamp, std::mapZsetsExpireat(key, timestamp); @@ -1551,7 +1551,6 @@ int32_t Storage::Expireat(const Slice& key, uint64_t timestamp, std::map -#include #include + #include "config.h" +#include "db.h" #include "log.h" -#include "multi.h" +#include "store.h" + namespace pikiwidb { PStore& PStore::Instance() { @@ -24,33 +24,15 @@ void PStore::Init(int dbNum) { return; } - backends_.resize(dbNum); + backends_.reserve(dbNum); if (g_config.backend == kBackEndRocksDB) { - for (size_t i = 0; i < dbNum; ++i) { - std::unique_ptr db = std::make_unique(); - storage::StorageOptions storage_options; - storage_options.options.create_if_missing = true; - storage_options.db_instance_num = g_config.db_instance_num; - - // options for CF - storage_options.options.ttl = g_config.rocksdb_ttl_second; - storage_options.options.periodic_compaction_seconds = g_config.rocksdb_periodic_second; - - PString dbpath = g_config.dbpath + std::to_string(i) + '/'; - - storage::Status s = db->Open(storage_options, dbpath.data()); - if (!s.ok()) { - assert(false); - } else { - INFO("Open RocksDB {} success", dbpath); - } - - backends_[i] = std::move(db); + for (int i = 0; i < dbNum; i++) { + auto db = std::make_unique(i, g_config.dbpath); + backends_.push_back(std::move(db)); } } else { - // ERROR: unsupport backend - return; + ERROR("unsupport backend!"); } } diff --git a/src/store.h b/src/store.h index e3e7b1639..63c32fde6 100644 --- a/src/store.h +++ b/src/store.h @@ -10,6 +10,7 @@ #define GLOG_NO_ABBREVIATED_SEVERITIES #include "common.h" +#include "db.h" #include "storage/storage.h" #include @@ -31,12 +32,20 @@ class PStore { void Init(int dbNum); - std::unique_ptr& GetBackend(int32_t index) { return backends_[index]; }; + std::unique_ptr& GetBackend(int32_t index) { return backends_[index]; }; + + std::shared_mutex& SharedMutex() { return dbs_mutex_; } private: PStore() = default; - std::vector> backends_; + /** + * If you want to access all the DBs at the same time, + * then you must hold the lock. + * For example: you want to execute flushall or bgsave. + */ + std::shared_mutex dbs_mutex_; + std::vector> backends_; }; #define PSTORE PStore::Instance() diff --git a/tests/key_test.go b/tests/key_test.go index e3d5841fb..a0e5d874c 100644 --- a/tests/key_test.go +++ b/tests/key_test.go @@ -151,4 +151,93 @@ var _ = Describe("Keyspace", Ordered, func() { Expect(client.Do(ctx, "pexpire", DefaultKey, "err").Err()).To(MatchError("ERR value is not an integer or out of range")) }) + + It("should expireat", func() { + Expect(client.Set(ctx, DefaultKey, DefaultValue, 0).Val()).To(Equal(OK)) + Expect(client.ExpireAt(ctx, DefaultKey, time.Now().Add(time.Second*-1)).Val()).To(Equal(true)) + Expect(client.Exists(ctx, DefaultKey).Val()).To(Equal(int64(0))) + + }) + + It("should expireat", func() { + Expect(client.Set(ctx, DefaultKey, DefaultValue, 0).Val()).To(Equal(OK)) + Expect(client.ExpireAt(ctx, DefaultKey, time.Now().Add(time.Second*3)).Val()).To(Equal(true)) + Expect(client.Exists(ctx, DefaultKey).Val()).To(Equal(int64(1))) + + time.Sleep(4 * time.Second) + + Expect(client.Get(ctx, DefaultKey).Err()).To(MatchError(redis.Nil)) + Expect(client.Exists(ctx, DefaultKey).Val()).To(Equal(int64(0))) + }) + + It("should pexpirat", func() { + Expect(client.Set(ctx, DefaultKey, DefaultValue, 0).Val()).To(Equal(OK)) + Expect(client.PExpireAt(ctx, DefaultKey, time.Now().Add(time.Second*-1)).Val()).To(Equal(true)) + Expect(client.Exists(ctx, DefaultKey).Val()).To(Equal(int64(0))) + + }) + + It("should pexpirat", func() { + Expect(client.Set(ctx, DefaultKey, DefaultValue, 0).Val()).To(Equal(OK)) + Expect(client.PExpireAt(ctx, DefaultKey, time.Now().Add(time.Second*3)).Val()).To(Equal(true)) + Expect(client.Exists(ctx, DefaultKey).Val()).To(Equal(int64(1))) + + time.Sleep(4 * time.Second) + + Expect(client.Get(ctx, DefaultKey).Err()).To(MatchError(redis.Nil)) + Expect(client.Exists(ctx, DefaultKey).Val()).To(Equal(int64(0))) + + }) + + It("persist", func() { + // return 0 if key does not exist + Expect(client.Persist(ctx, DefaultKey).Val()).To(Equal(false)) + + // return 0 if key does not have an associated timeout + Expect(client.Set(ctx, DefaultKey, DefaultValue, 0).Val()).To(Equal(OK)) + Expect(client.Persist(ctx, DefaultKey).Val()).To(Equal(false)) + + // return 1 if the timueout was set + Expect(client.PExpireAt(ctx, DefaultKey, time.Now().Add(time.Second*3)).Val()).To(Equal(true)) + Expect(client.Persist(ctx, DefaultKey).Val()).To(Equal(true)) + time.Sleep(5 * time.Second) + Expect(client.Exists(ctx, DefaultKey).Val()).To(Equal(int64(1))) + + // multi data type + Expect(client.LPush(ctx, DefaultKey, "l").Err()).NotTo(HaveOccurred()) + Expect(client.HSet(ctx, DefaultKey, "h", "h").Err()).NotTo(HaveOccurred()) + Expect(client.SAdd(ctx, DefaultKey, "s").Err()).NotTo(HaveOccurred()) + Expect(client.ZAdd(ctx, DefaultKey, redis.Z{Score: 1, Member: "z"}).Err()).NotTo(HaveOccurred()) + Expect(client.Set(ctx, DefaultKey, DefaultValue, 0).Val()).To(Equal(OK)) + Expect(client.PExpireAt(ctx, DefaultKey, time.Now().Add(time.Second*1000)).Err()).NotTo(HaveOccurred()) + Expect(client.Persist(ctx, DefaultKey).Err()).NotTo(HaveOccurred()) + + // del keys + Expect(client.PExpireAt(ctx, DefaultKey, time.Now().Add(time.Second*1)).Err()).NotTo(HaveOccurred()) + time.Sleep(2 * time.Second) + }) + + It("keys", func() { + // empty + Expect(client.Keys(ctx, "*").Val()).To(Equal([]string{})) + Expect(client.Keys(ctx, "dummy").Val()).To(Equal([]string{})) + Expect(client.Keys(ctx, "dummy*").Val()).To(Equal([]string{})) + + Expect(client.Set(ctx, "a1", "v1", 0).Val()).To(Equal(OK)) + Expect(client.Set(ctx, "k1", "v1", 0).Val()).To(Equal(OK)) + Expect(client.SAdd(ctx, "k2", "v2").Val()).To(Equal(int64(1))) + Expect(client.HSet(ctx, "k3", "k3", "v3").Val()).To(Equal(int64(1))) + Expect(client.LPush(ctx, "k4", "v4").Val()).To(Equal(int64(1))) + Expect(client.ZAdd(ctx, "k5", redis.Z{Score: 1, Member: "v5"}).Val()).To(Equal(int64(1))) + + // all + Expect(client.Keys(ctx, "*").Val()).To(Equal([]string{"a1", "k1", "k3", "k4", "k5", "k2"})) + + // pattern + Expect(client.Keys(ctx, "k*").Val()).To(Equal([]string{"k1", "k3", "k4", "k5", "k2"})) + Expect(client.Keys(ctx, "k1").Val()).To(Equal([]string{"k1"})) + + // del keys + Expect(client.Del(ctx, "a1", "k1", "k2", "k3", "k4", "k5").Err()).NotTo(HaveOccurred()) + }) }) diff --git a/tests/list_test.go b/tests/list_test.go index 53c36781c..4c1a5a807 100644 --- a/tests/list_test.go +++ b/tests/list_test.go @@ -86,6 +86,22 @@ var _ = Describe("List", Ordered, func() { del := client.Del(ctx, DefaultKey) Expect(del.Err()).NotTo(HaveOccurred()) }) + + It("Cmd LPUSHX", func() { + Expect(client.LPushX(ctx, DefaultKey, s2s["key_1"]).Val()).To(Equal(int64(0))) + Expect(client.LRange(ctx, DefaultKey, 0, -1).Val()).To(Equal([]string{})) + + Expect(client.LPush(ctx, DefaultKey, s2s["key_2"]).Val()).To(Equal(int64(1))) + Expect(client.LPushX(ctx, DefaultKey, s2s["key_3"]).Val()).To(Equal(int64(2))) + Expect(client.LRange(ctx, DefaultKey, 0, -1).Val()).To(Equal([]string{s2s["key_3"], s2s["key_2"]})) + + Expect(client.LPushX(ctx, DefaultKey, s2s["key_4"], s2s["key_5"]).Val()).To(Equal(int64(4))) + Expect(client.LRange(ctx, DefaultKey, 0, -1).Val()).To(Equal([]string{s2s["key_5"], s2s["key_4"], s2s["key_3"], s2s["key_2"]})) + + del := client.Del(ctx, DefaultKey) + Expect(del.Err()).NotTo(HaveOccurred()) + }) + It("Cmd RPUSH", func() { log.Println("Cmd RPUSH Begin") Expect(client.RPush(ctx, DefaultKey, s2s["key_1"]).Val()).To(Equal(int64(1))) @@ -97,6 +113,44 @@ var _ = Describe("List", Ordered, func() { Expect(del.Err()).NotTo(HaveOccurred()) }) + It("Cmd RPUSHX", func() { + Expect(client.RPushX(ctx, DefaultKey, s2s["key_1"]).Val()).To(Equal(int64(0))) + Expect(client.LRange(ctx, DefaultKey, 0, -1).Val()).To(Equal([]string{})) + + Expect(client.RPush(ctx, DefaultKey, s2s["key_2"]).Val()).To(Equal(int64(1))) + Expect(client.RPushX(ctx, DefaultKey, s2s["key_3"]).Val()).To(Equal(int64(2))) + Expect(client.LRange(ctx, DefaultKey, 0, -1).Val()).To(Equal([]string{s2s["key_2"], s2s["key_3"]})) + + Expect(client.RPushX(ctx, DefaultKey, s2s["key_4"], s2s["key_5"]).Val()).To(Equal(int64(4))) + Expect(client.LRange(ctx, DefaultKey, 0, -1).Val()).To(Equal([]string{s2s["key_2"], s2s["key_3"], s2s["key_4"], s2s["key_5"]})) + + del := client.Del(ctx, DefaultKey) + Expect(del.Err()).NotTo(HaveOccurred()) + }) + + It("Cmd LPop", func() { + rPush := client.RPush(ctx, DefaultKey, s2s["key_1"]) + Expect(rPush.Err()).NotTo(HaveOccurred()) + rPush = client.RPush(ctx, DefaultKey, s2s["key_2"]) + Expect(rPush.Err()).NotTo(HaveOccurred()) + rPush = client.RPush(ctx, DefaultKey, s2s["key_3"]) + Expect(rPush.Err()).NotTo(HaveOccurred()) + + lPop := client.LPop(ctx, DefaultKey) + Expect(lPop.Err()).NotTo(HaveOccurred()) + Expect(lPop.Val()).To(Equal(s2s["key_1"])) + + lRange := client.LRange(ctx, DefaultKey, 0, -1) + Expect(lRange.Err()).NotTo(HaveOccurred()) + Expect(lRange.Val()).To(Equal([]string{s2s["key_2"], s2s["key_3"]})) + + err := client.Do(ctx, "LPOP", DefaultKey, 1, 2).Err() + Expect(err).To(MatchError(ContainSubstring("ERR wrong number of arguments for 'lpop' command"))) + + del := client.Del(ctx, DefaultKey) + Expect(del.Err()).NotTo(HaveOccurred()) + }) + It("should RPop", func() { rPush := client.RPush(ctx, DefaultKey, s2s["key_1"]) Expect(rPush.Err()).NotTo(HaveOccurred()) @@ -207,4 +261,56 @@ var _ = Describe("List", Ordered, func() { del := client.Del(ctx, DefaultKey) Expect(del.Err()).NotTo(HaveOccurred()) }) + + It("SHOULD LIndex", func() { + rPush := client.RPush(ctx, DefaultKey, s2s["key_1"]) + Expect(rPush.Err()).NotTo(HaveOccurred()) + rPush = client.RPush(ctx, DefaultKey, s2s["key_2"]) + Expect(rPush.Err()).NotTo(HaveOccurred()) + rPush = client.RPush(ctx, DefaultKey, s2s["key_3"]) + Expect(rPush.Err()).NotTo(HaveOccurred()) + + lIndex := client.LIndex(ctx, DefaultKey, 0) + Expect(lIndex.Err()).NotTo(HaveOccurred()) + Expect(lIndex.Val()).To(Equal(s2s["key_1"])) + + lIndex = client.LIndex(ctx, DefaultKey, 1) + Expect(lIndex.Err()).NotTo(HaveOccurred()) + Expect(lIndex.Val()).To(Equal(s2s["key_2"])) + + lIndex = client.LIndex(ctx, DefaultKey, -1) + Expect(lIndex.Err()).NotTo(HaveOccurred()) + Expect(lIndex.Val()).To(Equal(s2s["key_3"])) + + lIndex = client.LIndex(ctx, DefaultKey, 4) + Expect(lIndex.Err()).To(Equal(redis.Nil)) + Expect(lIndex.Val()).To(Equal("")) + + err := client.Do(ctx, "lindex", DefaultKey, 1, 2).Err() + Expect(err).To(MatchError(ContainSubstring("ERR wrong number of arguments for 'lindex' command"))) + + del := client.Del(ctx, DefaultKey) + Expect(del.Err()).NotTo(HaveOccurred()) + }) + + It("SHOULD LLen", func() { + lLen := client.LLen(ctx, DefaultKey) + Expect(lLen.Err()).NotTo(HaveOccurred()) + Expect(lLen.Val()).To(Equal(int64(0))) + + rPush := client.RPush(ctx, DefaultKey, s2s["key_1"]) + Expect(rPush.Err()).NotTo(HaveOccurred()) + rPush = client.RPush(ctx, DefaultKey, s2s["key_2"]) + Expect(rPush.Err()).NotTo(HaveOccurred()) + + lLen = client.LLen(ctx, DefaultKey) + Expect(lLen.Err()).NotTo(HaveOccurred()) + Expect(lLen.Val()).To(Equal(int64(2))) + + err := client.Do(ctx, "llen", DefaultKey, 1).Err() + Expect(err).To(MatchError(ContainSubstring("ERR wrong number of arguments for 'llen' command"))) + + del := client.Del(ctx, DefaultKey) + Expect(del.Err()).NotTo(HaveOccurred()) + }) }) diff --git a/tests/set_test.go b/tests/set_test.go index d148fb02a..0944eaf20 100644 --- a/tests/set_test.go +++ b/tests/set_test.go @@ -313,4 +313,64 @@ var _ = Describe("Set", Ordered, func() { Expect(err).NotTo(HaveOccurred()) Expect(members).To(HaveLen(2)) }) + + It("should SMembers", func() { + sAdd := client.SAdd(ctx, "setSMembers", "Hello") + Expect(sAdd.Err()).NotTo(HaveOccurred()) + sAdd = client.SAdd(ctx, "setSMembers", "World") + Expect(sAdd.Err()).NotTo(HaveOccurred()) + + sMembers := client.SMembers(ctx, "setSMembers") + Expect(sMembers.Err()).NotTo(HaveOccurred()) + Expect(sMembers.Val()).To(ConsistOf([]string{"Hello", "World"})) + }) + + It("should SDiff", func() { + sAdd := client.SAdd(ctx, "setSDiff1", "a") + Expect(sAdd.Err()).NotTo(HaveOccurred()) + sAdd = client.SAdd(ctx, "setSDiff1", "b") + Expect(sAdd.Err()).NotTo(HaveOccurred()) + sAdd = client.SAdd(ctx, "setSDiff1", "c") + Expect(sAdd.Err()).NotTo(HaveOccurred()) + + sAdd = client.SAdd(ctx, "setSDiff2", "c") + Expect(sAdd.Err()).NotTo(HaveOccurred()) + sAdd = client.SAdd(ctx, "setSDiff2", "d") + Expect(sAdd.Err()).NotTo(HaveOccurred()) + sAdd = client.SAdd(ctx, "setSDiff2", "e") + Expect(sAdd.Err()).NotTo(HaveOccurred()) + + sDiff := client.SDiff(ctx, "setSDiff1", "setSDiff2") + Expect(sDiff.Err()).NotTo(HaveOccurred()) + Expect(sDiff.Val()).To(ConsistOf([]string{"a", "b"})) + + sDiff = client.SDiff(ctx, "nonexistent_setSDiff1", "nonexistent_setSDiff2") + Expect(sDiff.Err()).NotTo(HaveOccurred()) + Expect(sDiff.Val()).To(HaveLen(0)) + }) + + It("should SDiffstore", func() { + sAdd := client.SAdd(ctx, "setSDiffstore1", "a") + Expect(sAdd.Err()).NotTo(HaveOccurred()) + sAdd = client.SAdd(ctx, "setSDiffstore1", "b") + Expect(sAdd.Err()).NotTo(HaveOccurred()) + sAdd = client.SAdd(ctx, "setSDiffstore1", "c") + Expect(sAdd.Err()).NotTo(HaveOccurred()) + + sAdd = client.SAdd(ctx, "setSDiffstore2", "c") + Expect(sAdd.Err()).NotTo(HaveOccurred()) + sAdd = client.SAdd(ctx, "setSDiffstore2", "d") + Expect(sAdd.Err()).NotTo(HaveOccurred()) + sAdd = client.SAdd(ctx, "setSDiffstore2", "e") + Expect(sAdd.Err()).NotTo(HaveOccurred()) + + sDiffStore := client.SDiffStore(ctx, "setKey", "setSDiffstore1", "setSDiffstore2") + Expect(sDiffStore.Err()).NotTo(HaveOccurred()) + Expect(sDiffStore.Val()).To(Equal(int64(2))) + + sMembers := client.SMembers(ctx, "setKey") + Expect(sMembers.Err()).NotTo(HaveOccurred()) + Expect(sMembers.Val()).To(ConsistOf([]string{"a", "b"})) + }) + }) diff --git a/tests/string_test.go b/tests/string_test.go index e722891b6..51a25117b 100644 --- a/tests/string_test.go +++ b/tests/string_test.go @@ -195,15 +195,15 @@ var _ = Describe("String", Ordered, func() { }) It("should GetSet", func() { - incr := client.Incr(ctx, "testKeyGS") + incr := client.Incr(ctx, DefaultKey) Expect(incr.Err()).NotTo(HaveOccurred()) Expect(incr.Val()).To(Equal(int64(1))) - getSet := client.GetSet(ctx, "testKeyGS", "0") + getSet := client.GetSet(ctx, DefaultKey, "0") Expect(getSet.Err()).NotTo(HaveOccurred()) Expect(getSet.Val()).To(Equal("1")) - get := client.Get(ctx, "testKeyGS") + get := client.Get(ctx, DefaultKey) Expect(get.Err()).NotTo(HaveOccurred()) Expect(get.Val()).To(Equal("0")) }) diff --git a/tests/zset_test.go b/tests/zset_test.go index 23faf0190..5cdf309f6 100644 --- a/tests/zset_test.go +++ b/tests/zset_test.go @@ -53,7 +53,7 @@ var _ = Describe("Zset", Ordered, func() { BeforeEach(func() { client = s.NewClient() Expect(client.FlushDB(ctx).Err()).NotTo(HaveOccurred()) - time.Sleep(1 * time.Second) + time.Sleep(1 * time.Second) }) // nodes that run after the spec's subject(It). @@ -70,4 +70,149 @@ var _ = Describe("Zset", Ordered, func() { log.Println("Cmd ZADD Begin") Expect(client.ZAdd(ctx, "myset", redis.Z{Score: 1, Member: "one"}).Val()).NotTo(Equal("FooBar")) }) + + It("should ZAdd", func() { + Expect(client.Del(ctx, "zset").Err()).NotTo(HaveOccurred()) + added, err := client.ZAdd(ctx, "zset", redis.Z{ + Score: 1, + Member: "one", + }).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(added).To(Equal(int64(1))) + + added, err = client.ZAdd(ctx, "zset", redis.Z{ + Score: 1, + Member: "uno", + }).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(added).To(Equal(int64(1))) + + added, err = client.ZAdd(ctx, "zset", redis.Z{ + Score: 2, + Member: "two", + }).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(added).To(Equal(int64(1))) + + added, err = client.ZAdd(ctx, "zset", redis.Z{ + Score: 3, + Member: "two", + }).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(added).To(Equal(int64(0))) + + // vals, err := client.ZRangeWithScores(ctx, "zset", 0, -1).Result() + // Expect(err).NotTo(HaveOccurred()) + // Expect(vals).To(Equal([]redis.Z{{ + // Score: 1, + // Member: "one", + // }, { + // Score: 1, + // Member: "uno", + // }, { + // Score: 3, + // Member: "two", + // }})) + }) + + It("should ZRangeByScore", func() { + Expect(client.Del(ctx, "zset").Err()).NotTo(HaveOccurred()) + err := client.ZAdd(ctx, "zset", redis.Z{Score: 1, Member: "one"}).Err() + Expect(err).NotTo(HaveOccurred()) + err = client.ZAdd(ctx, "zset", redis.Z{Score: 2, Member: "two"}).Err() + Expect(err).NotTo(HaveOccurred()) + err = client.ZAdd(ctx, "zset", redis.Z{Score: 3, Member: "three"}).Err() + Expect(err).NotTo(HaveOccurred()) + + zRangeByScore := client.ZRangeByScore(ctx, "zset", &redis.ZRangeBy{ + Min: "-inf", + Max: "+inf", + }) + Expect(zRangeByScore.Err()).NotTo(HaveOccurred()) + Expect(zRangeByScore.Val()).To(Equal([]string{"one", "two", "three"})) + + zRangeByScore = client.ZRangeByScore(ctx, "zset", &redis.ZRangeBy{ + Min: "1", + Max: "2", + }) + Expect(zRangeByScore.Err()).NotTo(HaveOccurred()) + Expect(zRangeByScore.Val()).To(Equal([]string{"one", "two"})) + + zRangeByScore = client.ZRangeByScore(ctx, "zset", &redis.ZRangeBy{ + Min: "(1", + Max: "2", + }) + Expect(zRangeByScore.Err()).NotTo(HaveOccurred()) + Expect(zRangeByScore.Val()).To(Equal([]string{"two"})) + + zRangeByScore = client.ZRangeByScore(ctx, "zset", &redis.ZRangeBy{ + Min: "(1", + Max: "(2", + }) + Expect(zRangeByScore.Err()).NotTo(HaveOccurred()) + Expect(zRangeByScore.Val()).To(Equal([]string{})) + }) + + It("should ZRevRange", func() { + Expect(client.Del(ctx, "zset").Err()).NotTo(HaveOccurred()) + err := client.ZAdd(ctx, "zset", redis.Z{Score: 1, Member: "one"}).Err() + Expect(err).NotTo(HaveOccurred()) + err = client.ZAdd(ctx, "zset", redis.Z{Score: 2, Member: "two"}).Err() + Expect(err).NotTo(HaveOccurred()) + err = client.ZAdd(ctx, "zset", redis.Z{Score: 3, Member: "three"}).Err() + Expect(err).NotTo(HaveOccurred()) + + zRevRange := client.ZRevRange(ctx, "zset", 0, -1) + Expect(zRevRange.Err()).NotTo(HaveOccurred()) + Expect(zRevRange.Val()).To(Equal([]string{"three", "two", "one"})) + + zRevRange = client.ZRevRange(ctx, "zset", 2, 3) + Expect(zRevRange.Err()).NotTo(HaveOccurred()) + Expect(zRevRange.Val()).To(Equal([]string{"one"})) + + zRevRange = client.ZRevRange(ctx, "zset", -2, -1) + Expect(zRevRange.Err()).NotTo(HaveOccurred()) + Expect(zRevRange.Val()).To(Equal([]string{"two", "one"})) + }) + + It("should ZRevRangeByScore", func() { + err := client.ZAdd(ctx, "zset", redis.Z{Score: 1, Member: "one"}).Err() + Expect(err).NotTo(HaveOccurred()) + err = client.ZAdd(ctx, "zset", redis.Z{Score: 2, Member: "two"}).Err() + Expect(err).NotTo(HaveOccurred()) + err = client.ZAdd(ctx, "zset", redis.Z{Score: 3, Member: "three"}).Err() + Expect(err).NotTo(HaveOccurred()) + + vals, err := client.ZRevRangeByScore( + ctx, "zset", &redis.ZRangeBy{Max: "+inf", Min: "-inf"}).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(vals).To(Equal([]string{"three", "two", "one"})) + + vals, err = client.ZRevRangeByScore( + ctx, "zset", &redis.ZRangeBy{Max: "2", Min: "(1"}).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(vals).To(Equal([]string{"two"})) + + vals, err = client.ZRevRangeByScore( + ctx, "zset", &redis.ZRangeBy{Max: "(2", Min: "(1"}).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(vals).To(Equal([]string{})) + }) + + It("should ZCard", func() { + err := client.ZAdd(ctx, "zsetZCard", redis.Z{ + Score: 1, + Member: "one", + }).Err() + Expect(err).NotTo(HaveOccurred()) + err = client.ZAdd(ctx, "zsetZCard", redis.Z{ + Score: 2, + Member: "two", + }).Err() + Expect(err).NotTo(HaveOccurred()) + + card, err := client.ZCard(ctx, "zsetZCard").Result() + Expect(err).NotTo(HaveOccurred()) + Expect(card).To(Equal(int64(2))) + }) }) From fbf365fe67ea061cda1398e749ed5f8d07a81876 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E4=B8=81=E5=B0=8F=E5=B8=85?= <56024577+dingxiaoshuai123@users.noreply.github.com> Date: Mon, 25 Mar 2024 11:39:06 +0800 Subject: [PATCH 11/33] feat: Check point (#226) PSTORE checkpoint interface --- src/checkpoint_manager.cpp | 49 ++++++++++++++++++++++ src/checkpoint_manager.h | 51 +++++++++++++++++++++++ src/cmd_kv.cc | 1 + src/db.cpp | 24 ++++++++++- src/db.h | 30 ++++++++------ src/pikiwidb.cc | 2 +- src/storage/include/storage/storage.h | 6 +++ src/storage/src/storage.cc | 59 +++++++++++++++++++++++++++ src/storage/src/storage_murmur3.h | 2 +- src/store.cc | 43 +++++++++++++++++-- src/store.h | 37 ++++++++++++++++- 11 files changed, 283 insertions(+), 21 deletions(-) create mode 100644 src/checkpoint_manager.cpp create mode 100644 src/checkpoint_manager.h diff --git a/src/checkpoint_manager.cpp b/src/checkpoint_manager.cpp new file mode 100644 index 000000000..9938499c4 --- /dev/null +++ b/src/checkpoint_manager.cpp @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. An additional grant + * of patent rights can be found in the PATENTS file in the same directory. + */ + +#include "checkpoint_manager.h" +#include "db.h" +#include "log.h" +#include "pstd/env.h" + +namespace pikiwidb { + +class DB; + +void CheckpointManager::Init(int instNum, DB* db) { + checkpoint_num_ = instNum; + checkpoint_infoes_.resize(checkpoint_num_); + res_.reserve(checkpoint_num_); + db_ = db; +} + +void CheckpointManager::CreateCheckpoint(const std::string& path) { + res_.clear(); + + if (!pstd::FileExists(path)) { + if (0 != pstd::CreatePath(path)) { + WARN("Create Dir {} fail!", path); + return; + } + INFO("Create Dir {} success!", path); + } + + std::lock_guard Lock(shared_mutex_); + for (int i = 0; i < checkpoint_num_; ++i) { + checkpoint_infoes_[i].checkpoint_in_process = true; + auto res = std::async(std::launch::async, &DB::DoBgSave, db_, std::ref(checkpoint_infoes_[i]), path, i); + res_.push_back(std::move(res)); + } +} + +void CheckpointManager::WaitForCheckpointDone() { + for (auto& r : res_) { + r.get(); + } +} + +} // namespace pikiwidb \ No newline at end of file diff --git a/src/checkpoint_manager.h b/src/checkpoint_manager.h new file mode 100644 index 000000000..ec7ebd3e2 --- /dev/null +++ b/src/checkpoint_manager.h @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. An additional grant + * of patent rights can be found in the PATENTS file in the same directory. + */ +#ifndef PIKIWIDB_CHECKPOINT_MANAGER_H +#define PIKIWIDB_CHECKPOINT_MANAGER_H + +#include +#include +#include +#include +#include + +#include "rocksdb/db.h" +#include "rocksdb/utilities/checkpoint.h" + +namespace pikiwidb { + +class DB; + +struct CheckpointInfo { + bool checkpoint_in_process = false; +}; + +class CheckpointManager { + public: + CheckpointManager() = default; + ~CheckpointManager() = default; + ; + + void Init(int instNum, DB* db); + + void CreateCheckpoint(const std::string& path); + + void WaitForCheckpointDone(); + + bool CheckpointInProcess(); + + private: + int checkpoint_num_; + std::vector> res_; + DB* db_ = nullptr; + + std::shared_mutex shared_mutex_; + std::vector checkpoint_infoes_; +}; +} // namespace pikiwidb + +#endif // PIKIWIDB_CHECKPOINT_MANAGER_H diff --git a/src/cmd_kv.cc b/src/cmd_kv.cc index 3f222fae6..b978be23a 100644 --- a/src/cmd_kv.cc +++ b/src/cmd_kv.cc @@ -13,6 +13,7 @@ namespace pikiwidb { +using pikiwidb::TasksVector; GetCmd::GetCmd(const std::string& name, int16_t arity) : BaseCmd(name, arity, kCmdFlagsReadonly, kAclCategoryRead | kAclCategoryString) {} diff --git a/src/db.cpp b/src/db.cpp index ba0c4ba09..6b7e4e6a9 100644 --- a/src/db.cpp +++ b/src/db.cpp @@ -12,16 +12,20 @@ extern pikiwidb::PConfig g_config; namespace pikiwidb { -DB::DB(int db_id, const std::string &db_path) : db_id_(db_id), db_path_(db_path + std::to_string(db_id) + '/') { +struct CheckPointInfo; + +DB::DB(int db_index, const std::string& db_path) + : db_index_(db_index), db_path_(db_path + std::to_string(db_index_) + '/') { storage::StorageOptions storage_options; storage_options.options.create_if_missing = true; storage_options.db_instance_num = g_config.db_instance_num; - storage_options.db_id = db_id; + storage_options.db_id = db_index_; // options for CF storage_options.options.ttl = g_config.rocksdb_ttl_second; storage_options.options.periodic_compaction_seconds = g_config.rocksdb_periodic_second; storage_ = std::make_unique(); + if (auto s = storage_->Open(storage_options, db_path_); !s.ok()) { ERROR("Storage open failed! {}", s.ToString()); abort(); @@ -29,4 +33,20 @@ DB::DB(int db_id, const std::string &db_path) : db_id_(db_id), db_path_(db_path opened_ = true; INFO("Open DB{} success!", db_id); } + +void DB::DoBgSave(CheckpointInfo& checkpoint_info, const std::string& path, int i) { + // 1) always hold storage's sharedLock + std::shared_lock sharedLock(storage_mutex_); + + // 2)Create the storage's checkpoint 。 + auto status = storage_->CreateCheckpoint(path, i); + + // 3) write the status + checkpoint_info.checkpoint_in_process = false; +} + +void DB::CreateCheckpoint(const std::string& path) { checkpoint_manager_->CreateCheckpoint(path); } + +void DB::WaitForCheckpointDone() { checkpoint_manager_->WaitForCheckpointDone(); } + } // namespace pikiwidb diff --git a/src/db.h b/src/db.h index cdb0081a8..1f5e28962 100644 --- a/src/db.h +++ b/src/db.h @@ -10,14 +10,16 @@ #include +#include "checkpoint_manager.h" #include "log.h" #include "pstd/noncopyable.h" #include "storage/storage.h" - namespace pikiwidb { + class DB { public: - DB(int db_id, const std::string& db_path); + DB(int db_index, const std::string& db_path); + std::unique_ptr& GetStorage() { return storage_; } void Lock() { storage_mutex_.lock(); } @@ -28,9 +30,19 @@ class DB { void UnLockShared() { storage_mutex_.unlock_shared(); } + void CreateCheckpoint(const std::string& path); + + [[maybe_unused]] void DoBgSave(CheckpointInfo&, const std::string&, int i); + + void WaitForCheckpointDone(); + + int GetDbIndex() { return db_index_; } + private: - const int db_id_; + const int db_index_; const std::string db_path_; + const std::string dump_parent_path_; + const std::string dump_path_; /** * If you want to change the pointer that points to storage, @@ -42,16 +54,8 @@ class DB { std::unique_ptr storage_; bool opened_ = false; - /** - * If you want to change the status below,you must first acquire - * a mutex lock. - * If you only want to access the status below, - * you just need to obtain a shared lock. - */ - std::shared_mutex checkpoint_mutex_; - bool checkpoint_in_process_ = false; - int64_t last_checkpoint_time_ = -1; - bool last_checkpoint_success_ = false; + std::unique_ptr checkpoint_manager_; + }; } // namespace pikiwidb diff --git a/src/pikiwidb.cc b/src/pikiwidb.cc index 369cfa333..33c141344 100644 --- a/src/pikiwidb.cc +++ b/src/pikiwidb.cc @@ -214,7 +214,7 @@ bool PikiwiDB::Init() { worker_threads_.SetWorkerNum(static_cast(g_config.worker_threads_num)); slave_threads_.SetWorkerNum(static_cast(g_config.slave_threads_num)); - PSTORE.Init(g_config.databases); + PSTORE.Init(); // Only if there is no backend, load rdb if (g_config.backend == pikiwidb::kBackEndNone) { diff --git a/src/storage/include/storage/storage.h b/src/storage/include/storage/storage.h index 7cd346e92..c92cf59d7 100644 --- a/src/storage/include/storage/storage.h +++ b/src/storage/include/storage/storage.h @@ -22,6 +22,7 @@ #include "rocksdb/status.h" #include "rocksdb/table.h" +#include "pstd/env.h" #include "pstd/pstd_mutex.h" #include "storage/slot_indexer.h" @@ -39,10 +40,13 @@ inline const std::string PROPERTY_TYPE_ROCKSDB_BACKGROUND_ERRORS = "rocksdb.back inline constexpr size_t BATCH_DELETE_LIMIT = 100; inline constexpr size_t COMPACT_THRESHOLD_COUNT = 2000; +inline constexpr uint64_t kNoFlush = std::numeric_limits::max(); + using Options = rocksdb::Options; using BlockBasedTableOptions = rocksdb::BlockBasedTableOptions; using Status = rocksdb::Status; using Slice = rocksdb::Slice; +using Env = rocksdb::Env; class Redis; enum class OptionType; @@ -166,6 +170,8 @@ class Storage { Status Open(const StorageOptions& storage_options, const std::string& db_path); + Status CreateCheckpoint(const std::string& dump_path, int index); + Status LoadCursorStartKey(const DataType& dtype, int64_t cursor, char* type, std::string* start_key); Status StoreCursorStartKey(const DataType& dtype, int64_t cursor, char type, const std::string& next_key); diff --git a/src/storage/src/storage.cc b/src/storage/src/storage.cc index b46601c8b..02e4312b5 100644 --- a/src/storage/src/storage.cc +++ b/src/storage/src/storage.cc @@ -6,6 +6,8 @@ #include #include +#include "rocksdb/utilities/checkpoint.h" + #include "config.h" #include "pstd/log.h" #include "pstd/pikiwidb_slot.h" @@ -99,6 +101,63 @@ Status Storage::Open(const StorageOptions& storage_options, const std::string& d return Status::OK(); } +Status Storage::CreateCheckpoint(const std::string& dump_path, int i) { + INFO("DB{}'s RocksDB {} begin to generate a checkpoint!", db_id_, i); + auto source_dir = AppendSubDirectory(dump_path, db_id_); + if (!pstd::FileExists(source_dir)) { + if (0 != pstd::CreatePath(source_dir)) { + WARN("Create Dir {} fail!", source_dir); + return Status::IOError("CreatePath() fail! dir_name : {} ", source_dir); + } + INFO("Create Dir {} success!", source_dir); + } + + source_dir = AppendSubDirectory(source_dir, i); + + auto tmp_dir = source_dir + ".tmp"; + // 1) Make sure the temporary directory does not exist + if (!pstd::DeleteDirIfExist(tmp_dir)) { + WARN("DB{}'s RocksDB {} delete dir fail!", db_id_, i); + return Status::IOError("DeleteDirIfExist() fail! dir_name : {} ", tmp_dir); + } + + // 2) Create checkpoint of this RocksDB + rocksdb::Checkpoint* checkpoint = nullptr; + auto db = insts_[i]->GetDB(); + rocksdb::Status s = rocksdb::Checkpoint::Create(db, &checkpoint); + if (!s.ok()) { + WARN("DB{}'s RocksDB {} create checkpoint object failed!. Error: ", db_id_, i, s.ToString()); + return s; + } + + // 3) Create a checkpoint + std::unique_ptr checkpoint_guard(checkpoint); + s = checkpoint->CreateCheckpoint(tmp_dir, kNoFlush, nullptr); + if (!s.ok()) { + WARN("DB{}'s RocksDB {} create checkpoint failed!. Error: {}", db_id_, i, s.ToString()); + return s; + } + + // 4) Make sure the source directory does not exist + if (!pstd::DeleteDirIfExist(source_dir)) { + WARN("DB{}'s RocksDB {} delete dir {} fail!", db_id_, i, source_dir); + return Status::IOError("DeleteDirIfExist() fail! dir_name : {} ", source_dir); + } + + // 5) Rename the temporary directory to source directory + if (auto status = pstd::RenameFile(tmp_dir, source_dir); status != 0) { + WARN("DB{}'s RocksDB {} rename temporary directory {} to source directory {} fail!", db_id_, i, tmp_dir, + source_dir); + if (!pstd::DeleteDirIfExist(tmp_dir)) { + WARN("DB{}'s RocksDB {} fail to delete the rename failed directory {} ", db_id_, i, tmp_dir); + } + return Status::IOError("Rename dir {} fail!", tmp_dir); + } + + INFO("DB{}'s RocksDB {} create checkpoint {} success!", db_id_, i, source_dir); + return Status::OK(); +} + Status Storage::LoadCursorStartKey(const DataType& dtype, int64_t cursor, char* type, std::string* start_key) { std::string index_key = DataTypeTag[dtype] + std::to_string(cursor); std::string index_value; diff --git a/src/storage/src/storage_murmur3.h b/src/storage/src/storage_murmur3.h index 958c5dbf1..f899a86cd 100644 --- a/src/storage/src/storage_murmur3.h +++ b/src/storage/src/storage_murmur3.h @@ -148,4 +148,4 @@ extern } // namespace storage -#endif +#endif \ No newline at end of file diff --git a/src/store.cc b/src/store.cc index bd2b6c684..1d0170586 100644 --- a/src/store.cc +++ b/src/store.cc @@ -19,15 +19,18 @@ PStore& PStore::Instance() { return store; } -void PStore::Init(int dbNum) { +void PStore::Init() { if (g_config.backend == kBackEndNone) { return; } - backends_.reserve(dbNum); + backends_.reserve(dbNum_); + dbNum_ = g_config.databases; + backends_.reserve(dbNum_); if (g_config.backend == kBackEndRocksDB) { - for (int i = 0; i < dbNum; i++) { + + for (int i = 0; i < dbNum_; i++) { auto db = std::make_unique(i, g_config.dbpath); backends_.push_back(std::move(db)); } @@ -36,4 +39,38 @@ void PStore::Init(int dbNum) { } } +void PStore::DoSomeThingSpecificDB(const TasksVector tasks) { + std::for_each(tasks.begin(), tasks.end(), [this](const auto& task) { + switch (task.type) { + case kCheckpoint: + if (task.db < 0 || task.db >= dbNum_) { + WARN("The database index is out of range."); + return; + } + auto& db = backends_[task.db]; + if (auto s = task.args.find(kCheckpointPath); s == task.args.end()) { + WARN("The critical parameter 'path' is missing in the checkpoint."); + return; + } + auto path = task.args.find(kCheckpointPath)->second; + trimSlash(path); + db->CreateCheckpoint(path); + break; + }; + }); +} + +void PStore::WaitForCheckpointDone() { + for (auto& db : backends_) { + db->WaitForCheckpointDone(); + } +} + +void PStore::trimSlash(std::string& dirName) { + while (dirName.back() == '/') { + dirName.pop_back(); + + } +} + } // namespace pikiwidb diff --git a/src/store.h b/src/store.h index 63c32fde6..597b01c7f 100644 --- a/src/store.h +++ b/src/store.h @@ -16,28 +16,63 @@ #include #include #include +#include #include #include +#include "checkpoint_manager.h" +#include "common.h" +#include "db.h" +#include "storage/storage.h" #include "braft/raft.h" namespace pikiwidb { +enum TaskType { + kCheckpoint, +}; + +enum TaskArg { + kCheckpointPath, +}; + +struct TaskContext { + TaskType type; + int db; + std::map args; + TaskContext(TaskType t) : type(t) {} + TaskContext(TaskType t, int d) : type(t), db(d) {} + TaskContext(TaskType t, int d, const std::map& a) : type(t), db(d), args(a) {} +}; + +using TasksVector = std::vector; + +class CheckpointManager; + class PStore { public: + friend class CheckpointManager; static PStore& Instance(); PStore(const PStore&) = delete; void operator=(const PStore&) = delete; - void Init(int dbNum); + void Init(); std::unique_ptr& GetBackend(int32_t index) { return backends_[index]; }; + void DoSomeThingSpecificDB(const TasksVector task); + + void WaitForCheckpointDone(); + + std::shared_mutex& SharedMutex() { return dbs_mutex_; } private: PStore() = default; + void trimSlash(std::string& dirName); + + int dbNum_ = 0; /** * If you want to access all the DBs at the same time, From 3dcec989ddb8677a4c5eddb37b6e494b97ac3904 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E4=B8=81=E5=B0=8F=E5=B8=85?= <56024577+dingxiaoshuai123@users.noreply.github.com> Date: Mon, 25 Mar 2024 12:49:11 +0800 Subject: [PATCH 12/33] fix_rebase_bug (#233) --- src/base_cmd.h | 4 ---- src/db.cpp | 2 +- 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/src/base_cmd.h b/src/base_cmd.h index cf774b11e..f5f9a0cc8 100644 --- a/src/base_cmd.h +++ b/src/base_cmd.h @@ -35,10 +35,6 @@ const std::string kCmdNameKeys = "keys"; const std::string kCmdNameRaftCluster = "raft.cluster"; const std::string kCmdNameRaftNode = "raft.node"; -// raft cmd -const std::string kCmdNameRaftCluster = "raft.cluster"; -const std::string kCmdNameRaftNode = "raft.node"; - // string cmd const std::string kCmdNameSet = "set"; const std::string kCmdNameGet = "get"; diff --git a/src/db.cpp b/src/db.cpp index 6b7e4e6a9..3224f9610 100644 --- a/src/db.cpp +++ b/src/db.cpp @@ -31,7 +31,7 @@ DB::DB(int db_index, const std::string& db_path) abort(); } opened_ = true; - INFO("Open DB{} success!", db_id); + INFO("Open DB{} success!", db_index_); } void DB::DoBgSave(CheckpointInfo& checkpoint_info, const std::string& path, int i) { From 2374c2f12165bd781eee7356a19cc6b5c836f8f5 Mon Sep 17 00:00:00 2001 From: Changyuan Ning <77976092+longfar-ncy@users.noreply.github.com> Date: Mon, 8 Apr 2024 13:06:43 +0800 Subject: [PATCH 13/33] feat: hset/hdel wrote by braft and binlog (#213) 'hset & hdel' wrote by braft --- CMakeLists.txt | 3 + cmake/protobuf.cmake | 1 + pikiwidb.conf | 3 + src/CMakeLists.txt | 2 +- src/client.cc | 20 +++- src/client.h | 2 +- src/config.cc | 2 + src/config.h | 4 +- src/db.cpp | 9 +- src/db.h | 2 - src/pikiwidb.cc | 13 +-- src/pikiwidb.h | 4 +- src/praft/CMakeLists.txt | 30 +++-- src/praft/binlog.proto | 22 ++++ src/praft/praft.cc | 82 ++++++++++---- src/praft/praft.h | 45 ++++---- src/praft/praft_service.h | 19 ++++ src/replication.h | 4 +- src/storage/CMakeLists.txt | 15 ++- src/storage/include/storage/storage.h | 14 ++- src/storage/src/batch.h | 102 +++++++++++++++++ src/storage/src/redis.cc | 2 + src/storage/src/redis.h | 8 ++ src/storage/src/redis_hashes.cc | 27 ++--- src/storage/src/storage.cc | 29 ++++- tests/consistency_test.go | 152 ++++++++++++++++++++++++++ 26 files changed, 521 insertions(+), 95 deletions(-) create mode 100644 src/praft/binlog.proto create mode 100644 src/praft/praft_service.h create mode 100644 src/storage/src/batch.h create mode 100644 tests/consistency_test.go diff --git a/CMakeLists.txt b/CMakeLists.txt index 506a07586..b214fb258 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -146,6 +146,9 @@ INCLUDE(braft) ENABLE_TESTING() +SET(PROTO_OUTPUT_DIR "${CMAKE_BINARY_DIR}/generated_pb") +FILE(MAKE_DIRECTORY "${PROTO_OUTPUT_DIR}") + ADD_SUBDIRECTORY(src/pstd) ADD_SUBDIRECTORY(src/net) ADD_SUBDIRECTORY(src/praft) diff --git a/cmake/protobuf.cmake b/cmake/protobuf.cmake index 9a8539bb9..2754a7fc5 100644 --- a/cmake/protobuf.cmake +++ b/cmake/protobuf.cmake @@ -132,6 +132,7 @@ FUNCTION(build_protobuf TARGET_NAME) UPDATE_COMMAND "" DEPENDS zlib URL "https://github.com/protocolbuffers/protobuf/archive/v3.18.0.tar.gz" + URL_HASH SHA256=14e8042b5da37652c92ef6a2759e7d2979d295f60afd7767825e3de68c856c54 CONFIGURE_COMMAND mv ../config.sh . COMMAND sh config.sh CMAKE_CACHE_ARGS -DCMAKE_INSTALL_PREFIX:PATH=${PROTOBUF_INSTALL_DIR} diff --git a/pikiwidb.conf b/pikiwidb.conf index 3affebcc5..55fe6d3d2 100644 --- a/pikiwidb.conf +++ b/pikiwidb.conf @@ -352,3 +352,6 @@ db-instance-num 5 rocksdb-ttl-second 604800 # default 86400 * 3 rocksdb-periodic-second 259200; + +############################### RAFT ############################### +use-raft yes diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 508680ac2..8b2e97d7f 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -39,6 +39,6 @@ ELSEIF(CMAKE_SYSTEM_NAME STREQUAL "Darwin") "-Wl,-U,__Z13GetStackTracePPvii") ENDIF() -TARGET_LINK_LIBRARIES(pikiwidb net; dl; fmt; storage; rocksdb; pstd braft brpc ssl crypto zlib protobuf leveldb gflags z praft "${LIB}") +TARGET_LINK_LIBRARIES(pikiwidb net; dl; fmt; storage; rocksdb; pstd braft brpc ssl crypto zlib protobuf leveldb gflags z praft praft_pb "${LIB}") SET_TARGET_PROPERTIES(pikiwidb PROPERTIES LINKER_LANGUAGE CXX) \ No newline at end of file diff --git a/src/client.cc b/src/client.cc index 60b7b6d04..3cf827975 100644 --- a/src/client.cc +++ b/src/client.cc @@ -5,17 +5,19 @@ * of patent rights can be found in the PATENTS file in the same directory. */ +#include "client.h" + #include #include -#include "client.h" +#include "fmt/core.h" +#include "praft/praft.h" +#include "pstd/log.h" +#include "pstd/pstd_string.h" + +#include "base_cmd.h" #include "config.h" -#include "log.h" #include "pikiwidb.h" -#include "pstd_string.h" -#include "slow_log.h" -#include "store.h" -#include "praft.h" namespace pikiwidb { @@ -406,6 +408,12 @@ void PClient::executeCommand() { return; } + // if user send write command to a node which is not leader, he should get the info of leader + if (cmdPtr->HasFlag(kCmdFlagsWrite) && PRAFT.IsInitialized() && !PRAFT.IsLeader()) { + SetRes(CmdRes::kErrOther, fmt::format("MOVED {}", PRAFT.GetLeaderAddress())); + return; + } + // execute a specific command cmdPtr->Execute(this); } diff --git a/src/client.h b/src/client.h index 16e470495..dc95d8c27 100644 --- a/src/client.h +++ b/src/client.h @@ -13,10 +13,10 @@ #include #include "common.h" +#include "net/tcp_connection.h" #include "proto_parser.h" #include "replication.h" #include "storage/storage.h" -#include "tcp_connection.h" namespace pikiwidb { diff --git a/src/config.cc b/src/config.cc index 592df73c5..40680d918 100644 --- a/src/config.cc +++ b/src/config.cc @@ -186,6 +186,8 @@ bool LoadPikiwiDBConfig(const char* cfgFile, PConfig& cfg) { cfg.rocksdb_ttl_second = parser.GetData("rocksdb-ttl-second"); cfg.rocksdb_periodic_second = parser.GetData("rocksdb-periodic-second"); + cfg.use_raft = (parser.GetData("use-raft") == "yes"); + return cfg.CheckArgs(); } diff --git a/src/config.h b/src/config.h index 1c5728523..705b624ce 100644 --- a/src/config.h +++ b/src/config.h @@ -8,7 +8,6 @@ #pragma once #include -#include #include #include "common.h" @@ -88,6 +87,9 @@ struct PConfig { int db_instance_num; uint64_t rocksdb_ttl_second; uint64_t rocksdb_periodic_second; + + bool use_raft{true}; + PConfig(); bool CheckArgs() const; diff --git a/src/db.cpp b/src/db.cpp index 3224f9610..d0b79e508 100644 --- a/src/db.cpp +++ b/src/db.cpp @@ -7,6 +7,8 @@ #include "db.h" #include "config.h" +#include "praft/praft.h" +#include "pstd/log.h" extern pikiwidb::PConfig g_config; @@ -24,6 +26,11 @@ DB::DB(int db_index, const std::string& db_path) // options for CF storage_options.options.ttl = g_config.rocksdb_ttl_second; storage_options.options.periodic_compaction_seconds = g_config.rocksdb_periodic_second; + if (g_config.use_raft) { + storage_options.append_log_function = [&r = PRAFT](const Binlog& log, std::promise&& promise) { + r.AppendLog(log, std::move(promise)); + }; + } storage_ = std::make_unique(); if (auto s = storage_->Open(storage_options, db_path_); !s.ok()) { @@ -48,5 +55,5 @@ void DB::DoBgSave(CheckpointInfo& checkpoint_info, const std::string& path, int void DB::CreateCheckpoint(const std::string& path) { checkpoint_manager_->CreateCheckpoint(path); } void DB::WaitForCheckpointDone() { checkpoint_manager_->WaitForCheckpointDone(); } - + } // namespace pikiwidb diff --git a/src/db.h b/src/db.h index 1f5e28962..461886f9f 100644 --- a/src/db.h +++ b/src/db.h @@ -11,8 +11,6 @@ #include #include "checkpoint_manager.h" -#include "log.h" -#include "pstd/noncopyable.h" #include "storage/storage.h" namespace pikiwidb { diff --git a/src/pikiwidb.cc b/src/pikiwidb.cc index 33c141344..be89bf02a 100644 --- a/src/pikiwidb.cc +++ b/src/pikiwidb.cc @@ -14,19 +14,17 @@ #include #include -#include "log.h" -#include "rocksdb/db.h" +#include "praft/praft.h" +#include "pstd/pstd_util.h" +#include "pstd/log.h" #include "client.h" -#include "store.h" - #include "config.h" -#include "slow_log.h" - #include "helper.h" #include "pikiwidb.h" #include "pikiwidb_logo.h" -#include "pstd_util.h" +#include "slow_log.h" +#include "store.h" std::unique_ptr g_pikiwidb; @@ -293,7 +291,6 @@ static void closeStd() { } int main(int ac, char* av[]) { - [[maybe_unused]] rocksdb::DB* db; g_pikiwidb = std::make_unique(); if (!g_pikiwidb->ParseArgs(ac - 1, av + 1)) { Usage(); diff --git a/src/pikiwidb.h b/src/pikiwidb.h index 389096e24..9ade9d867 100644 --- a/src/pikiwidb.h +++ b/src/pikiwidb.h @@ -7,10 +7,8 @@ #include "cmd_table_manager.h" #include "common.h" -#include "event_loop.h" #include "io_thread_pool.h" -#include "tcp_connection.h" -#include "praft/praft.h" +#include "net/tcp_connection.h" #define kPIKIWIDB_VERSION "4.0.0" diff --git a/src/praft/CMakeLists.txt b/src/praft/CMakeLists.txt index c2b059b89..a929fc655 100644 --- a/src/praft/CMakeLists.txt +++ b/src/praft/CMakeLists.txt @@ -2,30 +2,38 @@ # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. An additional grant # of patent rights can be found in the PATENTS file in the same directory. +ADD_CUSTOM_COMMAND( + OUTPUT "${PROTO_OUTPUT_DIR}/binlog.pb.cc" + DEPENDS extern_protobuf + COMMAND ${PROTOBUF_PROTOC_EXECUTABLE} + ARGS -I ${CMAKE_CURRENT_SOURCE_DIR} + --cpp_out ${PROTO_OUTPUT_DIR} + ${CMAKE_CURRENT_SOURCE_DIR}/binlog.proto +) +ADD_LIBRARY(binlog_pb STATIC "${PROTO_OUTPUT_DIR}/binlog.pb.cc") -FILE(GLOB PRAFT_PROTO "${CMAKE_CURRENT_SOURCE_DIR}/*.proto") -EXECUTE_PROCESS( - COMMAND ${PROTOBUF_PROTOC_EXECUTABLE} --cpp_out=${CMAKE_CURRENT_SOURCE_DIR} -I=${CMAKE_CURRENT_SOURCE_DIR} ${PRAFT_PROTO} +ADD_CUSTOM_COMMAND( + OUTPUT "${PROTO_OUTPUT_DIR}/praft.pb.cc" + DEPENDS extern_protobuf + COMMAND ${PROTOBUF_PROTOC_EXECUTABLE} + ARGS -I ${CMAKE_CURRENT_SOURCE_DIR} + --cpp_out ${PROTO_OUTPUT_DIR} + ${CMAKE_CURRENT_SOURCE_DIR}/praft.proto ) +ADD_LIBRARY(praft_pb STATIC "${PROTO_OUTPUT_DIR}/praft.pb.cc") FILE(GLOB PRAFT_SRC "${CMAKE_CURRENT_SOURCE_DIR}/*.cc" - "${CMAKE_CURRENT_SOURCE_DIR}/*.h" ) SET(LIBRARY_OUTPUT_PATH ${PROJECT_SOURCE_DIR}/bin) ADD_LIBRARY(praft ${PRAFT_SRC}) TARGET_INCLUDE_DIRECTORIES(praft PRIVATE ${PROJECT_SOURCE_DIR}/src - PRIVATE ${PROJECT_SOURCE_DIR}/src/pstd - PRIVATE ${PROJECT_SOURCE_DIR}/src/net - PRIVATE ${PROJECT_SOURCE_DIR}/src/storage/include - PRIVATE ${rocksdb_SOURCE_DIR}/ PRIVATE ${rocksdb_SOURCE_DIR}/include PRIVATE ${BRAFT_INCLUDE_DIR} PRIVATE ${BRPC_INCLUDE_DIR} - PRIVATE ${GFLAGS_INCLUDE_PATH} - PRIVATE ${PROJECT_SOURCE_DIR}/src/praft + PRIVATE ${PROTO_OUTPUT_DIR} ) IF(CMAKE_SYSTEM_NAME STREQUAL "Linux") @@ -34,4 +42,4 @@ ENDIF() TARGET_LINK_LIBRARIES(praft net; dl; fmt; storage; pstd braft brpc ssl crypto zlib protobuf leveldb gflags rocksdb z ${PRAFT_LIB}) -SET_TARGET_PROPERTIES(praft PROPERTIES LINKER_LANGUAGE CXX) \ No newline at end of file +SET_TARGET_PROPERTIES(praft PROPERTIES LINKER_LANGUAGE CXX) diff --git a/src/praft/binlog.proto b/src/praft/binlog.proto new file mode 100644 index 000000000..8f1dc3c99 --- /dev/null +++ b/src/praft/binlog.proto @@ -0,0 +1,22 @@ +syntax = "proto3"; +package pikiwidb; +option optimize_for = LITE_RUNTIME; + +enum OperateType { + kNoOperate = 0; + kPut = 1; + kDelete = 2; +} + +message BinlogEntry { + uint32 cf_idx = 1; + OperateType op_type = 2; + bytes key = 3; + optional bytes value = 4; +} + +message Binlog { + uint32 db_id = 1; + uint32 slot_idx = 2; + repeated BinlogEntry entries = 3; +} diff --git a/src/praft/praft.cc b/src/praft/praft.cc index d68480c70..da97b9fa1 100644 --- a/src/praft/praft.cc +++ b/src/praft/praft.cc @@ -11,16 +11,19 @@ #include "praft.h" #include -#include -#include +#include "braft/util.h" +#include "brpc/server.h" + +#include "pstd/log.h" +#include "pstd/pstd_string.h" + +#include "binlog.pb.h" #include "client.h" #include "config.h" -#include "event_loop.h" -#include "log.h" #include "pikiwidb.h" -#include "praft.pb.h" -#include "pstd_string.h" +#include "praft_service.h" +#include "store.h" #define ERROR_LOG_AND_STATUS(msg) \ ({ \ @@ -30,16 +33,6 @@ namespace pikiwidb { -class DummyServiceImpl : public DummyService { - public: - explicit DummyServiceImpl(PRaft* praft) : praft_(praft) {} - void DummyMethod(::google::protobuf::RpcController* controller, const ::pikiwidb::DummyRequest* request, - ::pikiwidb::DummyResponse* response, ::google::protobuf::Closure* done) {} - - private: - PRaft* praft_; -}; - PRaft& PRaft::Instance() { static PRaft store; return store; @@ -51,9 +44,9 @@ butil::Status PRaft::Init(std::string& group_id, bool initial_conf_is_null) { } server_ = std::make_unique(); - DummyServiceImpl service(&PRAFT); auto port = g_config.port + pikiwidb::g_config.raft_port_offset; // Add your service into RPC server + DummyServiceImpl service(&PRAFT); if (server_->AddService(&service, brpc::SERVER_DOESNT_OWN_SERVICE) != 0) { return ERROR_LOG_AND_STATUS("Failed to add service"); } @@ -139,6 +132,17 @@ std::string PRaft::GetLeaderId() const { return node_->leader_id().to_string(); } +std::string PRaft::GetLeaderAddress() const { + if (!node_) { + ERROR("Node is not initialized"); + return "Failed to get leader id"; + } + auto id = node_->leader_id(); + id.addr.port -= g_config.raft_port_offset; + auto addr = butil::endpoint2str(id.addr); + return addr.c_str(); +} + std::string PRaft::GetNodeId() const { if (!node_) { ERROR("Node is not initialized"); @@ -339,17 +343,51 @@ void PRaft::Join() { } } -void PRaft::Apply(braft::Task& task) { - if (node_) { - node_->apply(task); +void PRaft::AppendLog(const Binlog& log, std::promise&& promise) { + assert(node_); + assert(node_->is_leader()); + butil::IOBuf data; + butil::IOBufAsZeroCopyOutputStream wrapper(&data); + auto done = new PRaftWriteDoneClosure(std::move(promise)); + if (!log.SerializeToZeroCopyStream(&wrapper)) { + done->SetStatus(rocksdb::Status::Incomplete("Failed to serialize binlog")); + done->Run(); + return; } + DEBUG("append binlog: {}", log.ShortDebugString()); + braft::Task task; + task.data = &data; + task.done = done; + node_->apply(task); } -// @braft::StateMachine void PRaft::on_apply(braft::Iterator& iter) { // A batch of tasks are committed, which must be processed through - // |iter| for (; iter.valid(); iter.next()) { + auto done = iter.done(); + brpc::ClosureGuard done_guard(done); + + Binlog log; + butil::IOBufAsZeroCopyInputStream wrapper(iter.data()); + bool success = log.ParseFromZeroCopyStream(&wrapper); + DEBUG("apply binlog: {}", log.ShortDebugString()); + + if (!success) { + static constexpr std::string_view kMsg = "Failed to parse from protobuf when on_apply"; + ERROR(kMsg); + if (done) { // in leader + dynamic_cast(done)->SetStatus(rocksdb::Status::Incomplete(kMsg)); + } + braft::run_closure_in_bthread(done_guard.release()); + return; + } + + auto s = PSTORE.GetBackend(log.db_id())->GetStorage()->OnBinlogWrite(log); + if (done) { // in leader + dynamic_cast(done)->SetStatus(s); + } + // _applied_index = iter.index(); // consider to maintain a member applied_idx + braft::run_closure_in_bthread(done_guard.release()); } } diff --git a/src/praft/praft.h b/src/praft/praft.h index 8d8b1976a..d1217b597 100644 --- a/src/praft/praft.h +++ b/src/praft/praft.h @@ -7,22 +7,10 @@ #pragma once -#include -#include -#include -#include -#include +#include -#include "braft/configuration.h" #include "braft/raft.h" -#include "braft/util.h" -#include "brpc/controller.h" -#include "brpc/server.h" -#include "butil/status.h" - -#include "client.h" -#include "event_loop.h" -#include "tcp_connection.h" +#include "rocksdb/status.h" namespace pikiwidb { @@ -30,6 +18,10 @@ namespace pikiwidb { #define PRAFT PRaft::Instance() +class PClient; +class EventLoop; +class Binlog; + class JoinCmdContext { friend class PRaft; @@ -73,10 +65,24 @@ class JoinCmdContext { int port_ = 0; }; -class PRaft : public braft::StateMachine { +class PRaftWriteDoneClosure : public braft::Closure { public: - PRaft() : server_(nullptr), node_(nullptr) {} + explicit PRaftWriteDoneClosure(std::promise&& promise) : promise_(std::move(promise)) {} + + void Run() override { + promise_.set_value(result_); + delete this; + } + void SetStatus(rocksdb::Status status) { result_ = std::move(status); } + private: + std::promise promise_; + rocksdb::Status result_{rocksdb::Status::Aborted("Unknown error")}; +}; + +class PRaft : public braft::StateMachine { + public: + PRaft() = default; ~PRaft() override = default; static PRaft& Instance(); @@ -91,7 +97,7 @@ class PRaft : public braft::StateMachine { void ShutDown(); void Join(); - void Apply(braft::Task& task); + void AppendLog(const Binlog& log, std::promise&& promise); //===--------------------------------------------------------------------===// // ClusterJoin command @@ -104,6 +110,7 @@ class PRaft : public braft::StateMachine { bool IsLeader() const; std::string GetLeaderId() const; + std::string GetLeaderAddress() const; std::string GetNodeId() const; std::string GetGroupId() const; braft::NodeStatus GetNodeStatus() const; @@ -126,8 +133,8 @@ class PRaft : public braft::StateMachine { void on_start_following(const ::braft::LeaderChangeContext& ctx) override; private: - std::unique_ptr server_; // brpc - std::unique_ptr node_; + std::unique_ptr server_{nullptr}; // brpc + std::unique_ptr node_{nullptr}; braft::NodeOptions node_options_; // options for raft node std::string raw_addr_; // ip:port of this node diff --git a/src/praft/praft_service.h b/src/praft/praft_service.h new file mode 100644 index 000000000..8efc5f51a --- /dev/null +++ b/src/praft/praft_service.h @@ -0,0 +1,19 @@ +#pragma once + +#include "praft.pb.h" + +namespace pikiwidb { + +class PRaft; + +class DummyServiceImpl : public DummyService { + public: + explicit DummyServiceImpl(PRaft* praft) : praft_(praft) {} + void DummyMethod(::google::protobuf::RpcController* controller, const ::pikiwidb::DummyRequest* request, + ::pikiwidb::DummyResponse* response, ::google::protobuf::Closure* done) override {} + + private: + PRaft* praft_; +}; + +} // namespace pikiwidb \ No newline at end of file diff --git a/src/replication.h b/src/replication.h index 142656788..11d8807f8 100644 --- a/src/replication.h +++ b/src/replication.h @@ -12,9 +12,9 @@ #include #include "common.h" -#include "memory_file.h" +#include "net/unbounded_buffer.h" #include "net/util.h" -#include "unbounded_buffer.h" +#include "pstd/memory_file.h" namespace pikiwidb { diff --git a/src/storage/CMakeLists.txt b/src/storage/CMakeLists.txt index babd7f5d2..b34adb24f 100644 --- a/src/storage/CMakeLists.txt +++ b/src/storage/CMakeLists.txt @@ -19,8 +19,21 @@ TARGET_INCLUDE_DIRECTORIES(storage PRIVATE ${rocksdb_SOURCE_DIR}/include PRIVATE ${BRAFT_INCLUDE_DIR} PRIVATE ${BRPC_INCLUDE_DIR} + PRIVATE ${PROTO_OUTPUT_DIR} ) -TARGET_LINK_LIBRARIES (storage pstd braft brpc ssl crypto zlib protobuf leveldb gflags rocksdb) +TARGET_LINK_LIBRARIES (storage + pstd + braft + brpc + ssl + crypto + zlib + leveldb + gflags + rocksdb + binlog_pb + protobuf +) SET_TARGET_PROPERTIES(storage PROPERTIES LINKER_LANGUAGE CXX) diff --git a/src/storage/include/storage/storage.h b/src/storage/include/storage/storage.h index c92cf59d7..843b8d00b 100644 --- a/src/storage/include/storage/storage.h +++ b/src/storage/include/storage/storage.h @@ -7,7 +7,10 @@ #define INCLUDE_STORAGE_STORAGE_H_ #include -#include +#include +#include +#include +#include #include #include #include @@ -26,7 +29,9 @@ #include "pstd/pstd_mutex.h" #include "storage/slot_indexer.h" -#include "braft/raft.h" +namespace pikiwidb { +class Binlog; +} namespace storage { @@ -54,6 +59,8 @@ enum class OptionType; template class LRUCache; +using AppendLogFunction = std::function&&)>; + struct StorageOptions { rocksdb::Options options; rocksdb::BlockBasedTableOptions table_options; @@ -64,6 +71,8 @@ struct StorageOptions { size_t small_compaction_duration_threshold = 10000; size_t db_instance_num = 3; // default = 3 int db_id; + AppendLogFunction append_log_function; + uint32_t raft_timeout_s = std::numeric_limits::max(); Status ResetOptions(const OptionType& option_type, const std::unordered_map& options_map); }; @@ -1086,6 +1095,7 @@ class Storage { Status SetOptions(const OptionType& option_type, const std::unordered_map& options); void GetRocksDBInfo(std::string& info); + Status OnBinlogWrite(const pikiwidb::Binlog& log); private: std::vector> insts_; diff --git a/src/storage/src/batch.h b/src/storage/src/batch.h new file mode 100644 index 000000000..1e8992822 --- /dev/null +++ b/src/storage/src/batch.h @@ -0,0 +1,102 @@ +/* + * Copyright (c) 2024-present, Qihoo, Inc. All rights reserved. + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. An additional grant + * of patent rights can be found in the PATENTS file in the same directory. + */ + +#pragma once + +#include +#include +#include +#include + +#include "rocksdb/db.h" + +#include "binlog.pb.h" +#include "src/redis.h" +#include "storage/storage.h" +#include "storage/storage_define.h" + +namespace storage { + +class Batch { + public: + virtual ~Batch() = default; + + virtual void Put(ColumnFamilyIndex cf_idx, const Slice& key, const Slice& val) = 0; + virtual void Delete(ColumnFamilyIndex cf_idx, const Slice& key) = 0; + virtual auto Commit() -> Status = 0; + + static auto CreateBatch(Redis* redis) -> std::unique_ptr; +}; + +class RocksBatch : public Batch { + public: + RocksBatch(rocksdb::DB* db, const rocksdb::WriteOptions& options, + const std::vector& handles) + : db_(db), options_(options), handles_(handles) {} + + void Put(ColumnFamilyIndex cf_idx, const Slice& key, const Slice& val) override { + batch_.Put(handles_[cf_idx], key, val); + } + void Delete(ColumnFamilyIndex cf_idx, const Slice& key) override { batch_.Delete(handles_[cf_idx], key); } + auto Commit() -> Status override { return db_->Write(options_, &batch_); } + + private: + rocksdb::WriteBatch batch_; + rocksdb::DB* db_; + const rocksdb::WriteOptions& options_; + const std::vector& handles_; +}; + +class BinlogBatch : public Batch { + public: + BinlogBatch(AppendLogFunction func, int32_t index, uint32_t seconds = 10) + : func_(std::move(func)), seconds_(seconds) { + binlog_.set_db_id(0); + binlog_.set_slot_idx(index); + } + + void Put(ColumnFamilyIndex cf_idx, const Slice& key, const Slice& value) override { + auto entry = binlog_.add_entries(); + entry->set_cf_idx(cf_idx); + entry->set_op_type(pikiwidb::OperateType::kPut); + entry->set_key(key.ToString()); + entry->set_value(value.ToString()); + } + + void Delete(ColumnFamilyIndex cf_idx, const Slice& key) override { + auto entry = binlog_.add_entries(); + entry->set_cf_idx(cf_idx); + entry->set_op_type(pikiwidb::OperateType::kDelete); + entry->set_key(key.ToString()); + } + + Status Commit() override { + // FIXME(longfar): We should make sure that in non-RAFT mode, the code doesn't run here + std::promise promise; + auto future = promise.get_future(); + func_(binlog_, std::move(promise)); + auto status = future.wait_for(std::chrono::seconds(seconds_)); + if (status == std::future_status::timeout) { + return Status::Incomplete("Wait for write timeout"); + } + return future.get(); + } + + private: + AppendLogFunction func_; + pikiwidb::Binlog binlog_; + uint32_t seconds_ = 10; +}; + +inline auto Batch::CreateBatch(Redis* redis) -> std::unique_ptr { + if (redis->GetAppendLogFunction()) { + return std::make_unique(redis->GetAppendLogFunction(), redis->GetIndex(), redis->GetRaftTimeout()); + } + return std::make_unique(redis->GetDB(), redis->GetWriteOptions(), redis->GetColumnFamilyHandles()); +} + +} // namespace storage diff --git a/src/storage/src/redis.cc b/src/storage/src/redis.cc index 5203e3bb3..3840db071 100644 --- a/src/storage/src/redis.cc +++ b/src/storage/src/redis.cc @@ -52,6 +52,8 @@ Redis::~Redis() { } Status Redis::Open(const StorageOptions& storage_options, const std::string& db_path) { + append_log_function_ = storage_options.append_log_function; + raft_timeout_s_ = storage_options.raft_timeout_s; statistics_store_->SetCapacity(storage_options.statistics_max_size); small_compaction_threshold_ = storage_options.small_compaction_threshold; diff --git a/src/storage/src/redis.h b/src/storage/src/redis.h index e5439042a..cf008b7e2 100644 --- a/src/storage/src/redis.h +++ b/src/storage/src/redis.h @@ -210,6 +210,10 @@ class Redis { Status SetSmallCompactionThreshold(uint64_t small_compaction_threshold); Status SetSmallCompactionDurationThreshold(uint64_t small_compaction_duration_threshold); void GetRocksDBInfo(std::string& info, const char* prefix); + auto GetWriteOptions() const -> const rocksdb::WriteOptions& { return default_write_options_; } + auto GetColumnFamilyHandles() const -> const std::vector& { return handles_; } + auto GetRaftTimeout() const -> uint32_t { return raft_timeout_s_; } + auto GetAppendLogFunction() const -> const AppendLogFunction& { return append_log_function_; } // Sets Commands Status SAdd(const Slice& key, const std::vector& members, int32_t* ret); @@ -354,6 +358,10 @@ class Redis { std::atomic_uint64_t small_compaction_duration_threshold_; std::unique_ptr> statistics_store_; + // For raft + uint32_t raft_timeout_s_ = 10; + AppendLogFunction append_log_function_; + Status UpdateSpecificKeyStatistics(const DataType& dtype, const std::string& key, uint64_t count); Status UpdateSpecificKeyDuration(const DataType& dtype, const std::string& key, uint64_t duration); Status AddCompactKeyTaskIfNeeded(const DataType& dtype, const std::string& key, uint64_t count, uint64_t duration); diff --git a/src/storage/src/redis_hashes.cc b/src/storage/src/redis_hashes.cc index 9abf51e87..e479e0c26 100644 --- a/src/storage/src/redis_hashes.cc +++ b/src/storage/src/redis_hashes.cc @@ -12,6 +12,7 @@ #include +#include "batch.h" #include "pstd/log.h" #include "src/base_data_key_format.h" #include "src/base_data_value_format.h" @@ -118,7 +119,7 @@ Status Redis::HDel(const Slice& key, const std::vector& fields, int } } - rocksdb::WriteBatch batch; + auto batch = Batch::CreateBatch(this); rocksdb::ReadOptions read_options; const rocksdb::Snapshot* snapshot; @@ -145,7 +146,7 @@ Status Redis::HDel(const Slice& key, const std::vector& fields, int if (s.ok()) { del_cnt++; statistic++; - batch.Delete(handles_[kHashesDataCF], hashes_data_key.Encode()); + batch->Delete(kHashesDataCF, hashes_data_key.Encode()); } else if (s.IsNotFound()) { continue; } else { @@ -157,7 +158,7 @@ Status Redis::HDel(const Slice& key, const std::vector& fields, int return Status::InvalidArgument("hash size overflow"); } parsed_hashes_meta_value.ModifyCount(-del_cnt); - batch.Put(handles_[kHashesMetaCF], base_meta_key.Encode(), meta_value); + batch->Put(kHashesMetaCF, base_meta_key.Encode(), meta_value); } } else if (s.IsNotFound()) { *ret = 0; @@ -165,7 +166,7 @@ Status Redis::HDel(const Slice& key, const std::vector& fields, int } else { return s; } - s = db_->Write(default_write_options_, &batch); + s = batch->Commit(); UpdateSpecificKeyStatistics(DataType::kHashes, key.ToString(), statistic); return s; } @@ -616,7 +617,7 @@ Status Redis::HMSet(const Slice& key, const std::vector& fvs) { } Status Redis::HSet(const Slice& key, const Slice& field, const Slice& value, int32_t* res) { - rocksdb::WriteBatch batch; + auto batch = Batch::CreateBatch(this); ScopeRecordLock l(lock_mgr_, key); uint64_t version = 0; @@ -631,10 +632,10 @@ Status Redis::HSet(const Slice& key, const Slice& field, const Slice& value, int if (parsed_hashes_meta_value.IsStale() || parsed_hashes_meta_value.Count() == 0) { version = parsed_hashes_meta_value.InitialMetaValue(); parsed_hashes_meta_value.SetCount(1); - batch.Put(handles_[kHashesMetaCF], base_meta_key.Encode(), meta_value); + batch->Put(kHashesMetaCF, base_meta_key.Encode(), meta_value); HashesDataKey data_key(key, version, field); BaseDataValue internal_value(value); - batch.Put(handles_[kHashesDataCF], data_key.Encode(), internal_value.Encode()); + batch->Put(kHashesDataCF, data_key.Encode(), internal_value.Encode()); *res = 1; } else { version = parsed_hashes_meta_value.Version(); @@ -647,7 +648,7 @@ Status Redis::HSet(const Slice& key, const Slice& field, const Slice& value, int return Status::OK(); } else { BaseDataValue internal_value(value); - batch.Put(handles_[kHashesDataCF], hashes_data_key.Encode(), internal_value.Encode()); + batch->Put(kHashesDataCF, hashes_data_key.Encode(), internal_value.Encode()); statistic++; } } else if (s.IsNotFound()) { @@ -656,8 +657,8 @@ Status Redis::HSet(const Slice& key, const Slice& field, const Slice& value, int } parsed_hashes_meta_value.ModifyCount(1); BaseDataValue internal_value(value); - batch.Put(handles_[kHashesMetaCF], base_meta_key.Encode(), meta_value); - batch.Put(handles_[kHashesDataCF], hashes_data_key.Encode(), internal_value.Encode()); + batch->Put(kHashesMetaCF, base_meta_key.Encode(), meta_value); + batch->Put(kHashesDataCF, hashes_data_key.Encode(), internal_value.Encode()); *res = 1; } else { return s; @@ -667,15 +668,15 @@ Status Redis::HSet(const Slice& key, const Slice& field, const Slice& value, int EncodeFixed32(meta_value_buf, 1); HashesMetaValue meta_value(Slice(meta_value_buf, sizeof(int32_t))); version = meta_value.UpdateVersion(); - batch.Put(handles_[kHashesMetaCF], base_meta_key.Encode(), meta_value.Encode()); + batch->Put(kHashesMetaCF, base_meta_key.Encode(), meta_value.Encode()); HashesDataKey data_key(key, version, field); BaseDataValue internal_value(value); - batch.Put(handles_[kHashesDataCF], data_key.Encode(), internal_value.Encode()); + batch->Put(kHashesDataCF, data_key.Encode(), internal_value.Encode()); *res = 1; } else { return s; } - s = db_->Write(default_write_options_, &batch); + s = batch->Commit(); UpdateSpecificKeyStatistics(DataType::kHashes, key.ToString(), statistic); return s; } diff --git a/src/storage/src/storage.cc b/src/storage/src/storage.cc index 02e4312b5..2ef595054 100644 --- a/src/storage/src/storage.cc +++ b/src/storage/src/storage.cc @@ -4,13 +4,14 @@ // of patent rights can be found in the PATENTS file in the same directory. #include +#include #include -#include "rocksdb/utilities/checkpoint.h" - +#include "binlog.pb.h" #include "config.h" #include "pstd/log.h" #include "pstd/pikiwidb_slot.h" +#include "rocksdb/utilities/checkpoint.h" #include "scope_snapshot.h" #include "src/lru_cache.h" #include "src/mutex_impl.h" @@ -2258,4 +2259,28 @@ void Storage::DisableWal(const bool is_wal_disable) { } } +Status Storage::OnBinlogWrite(const pikiwidb::Binlog& log) { + auto& inst = insts_[log.slot_idx()]; + + rocksdb::WriteBatch batch; + for (const auto& entry : log.entries()) { + switch (entry.op_type()) { + case pikiwidb::OperateType::kPut: { + assert(entry.has_value()); + batch.Put(inst->GetColumnFamilyHandles()[entry.cf_idx()], entry.key(), entry.value()); + } break; + case pikiwidb::OperateType::kDelete: { + assert(!entry.has_value()); + batch.Delete(inst->GetColumnFamilyHandles()[entry.cf_idx()], entry.key()); + } break; + default: + static constexpr std::string_view msg = "Unknown operate type in binlog"; + ERROR(msg); + return Status::Incomplete(msg); + } + } + + return inst->GetDB()->Write(inst->GetWriteOptions(), &batch); +} + } // namespace storage diff --git a/tests/consistency_test.go b/tests/consistency_test.go new file mode 100644 index 000000000..d01072426 --- /dev/null +++ b/tests/consistency_test.go @@ -0,0 +1,152 @@ +package pikiwidb_test + +import ( + "context" + "log" + "strconv" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/redis/go-redis/v9" + + "github.com/OpenAtomFoundation/pikiwidb/tests/util" +) + +var _ = Describe("Consistency", Ordered, func() { + var ( + ctx = context.TODO() + servers []*util.Server + followers []*redis.Client + leader *redis.Client + ) + + const ( + testKey = "consistency-test" + ) + + BeforeAll(func() { + for i := 0; i < 3; i++ { + config := util.GetConfPath(false, int64(i)) + s := util.StartServer(config, map[string]string{"port": strconv.Itoa(12000 + (i+1)*111)}, true) + Expect(s).NotTo(BeNil()) + servers = append(servers, s) + + if i == 0 { + leader = s.NewClient() + Expect(leader).NotTo(BeNil()) + Expect(leader.FlushDB(ctx).Err()).NotTo(HaveOccurred()) + } else { + c := s.NewClient() + Expect(c).NotTo(BeNil()) + Expect(c.FlushDB(ctx).Err()).NotTo(HaveOccurred()) + followers = append(followers, c) + } + } + + res, err := leader.Do(ctx, "RAFT.CLUSTER", "INIT").Result() + Expect(err).NotTo(HaveOccurred()) + msg, ok := res.(string) + Expect(ok).To(BeTrue()) + Expect(msg).To(Equal("OK")) + err = leader.Close() + Expect(err).NotTo(HaveOccurred()) + leader = nil + + for _, f := range followers { + res, err := f.Do(ctx, "RAFT.CLUSTER", "JOIN", "127.0.0.1:12111").Result() + Expect(err).NotTo(HaveOccurred()) + msg, ok := res.(string) + Expect(ok).To(BeTrue()) + Expect(msg).To(Equal("OK")) + err = f.Close() + Expect(err).NotTo(HaveOccurred()) + } + followers = nil + }) + + AfterAll(func() { + for _, s := range servers { + err := s.Close() + if err != nil { + log.Println("Close Server fail.", err.Error()) + return + } + } + }) + + BeforeEach(func() { + for i, s := range servers { + if i == 0 { + leader = s.NewClient() + Expect(leader).NotTo(BeNil()) + Expect(leader.FlushDB(ctx).Err()).NotTo(HaveOccurred()) + } else { + c := s.NewClient() + Expect(c).NotTo(BeNil()) + Expect(c.FlushDB(ctx).Err().Error()).To(Equal("ERR MOVED 127.0.0.1:12111")) + followers = append(followers, c) + } + } + }) + + AfterEach(func() { + err := leader.Close() + Expect(err).NotTo(HaveOccurred()) + leader = nil + + for _, f := range followers { + err = f.Close() + Expect(err).NotTo(HaveOccurred()) + } + followers = nil + }) + + It("SimpleWriteConsistencyTest", func() { + set, err := leader.HSet(ctx, testKey, map[string]string{ + "fa": "va", + "fb": "vb", + "fc": "vc", + }).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(set).To(Equal(int64(3))) + + getall, err := leader.HGetAll(ctx, testKey).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(getall).To(Equal(map[string]string{ + "fa": "va", + "fb": "vb", + "fc": "vc", + })) + time.Sleep(10000 * time.Millisecond) + for _, f := range followers { + getall, err := f.HGetAll(ctx, testKey).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(getall).To(Equal(map[string]string{ + "fa": "va", + "fb": "vb", + "fc": "vc", + })) + } + + del, err := leader.HDel(ctx, testKey, "fb").Result() + Expect(err).NotTo(HaveOccurred()) + Expect(del).To(Equal(int64(1))) + + getall, err = leader.HGetAll(ctx, testKey).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(getall).To(Equal(map[string]string{ + "fa": "va", + "fc": "vc", + })) + time.Sleep(100 * time.Millisecond) + for _, f := range followers { + getall, err := f.HGetAll(ctx, testKey).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(getall).To(Equal(map[string]string{ + "fa": "va", + "fc": "vc", + })) + } + }) +}) From 7f80585efa77bb7287dd93cb1b72e7095a81e7eb Mon Sep 17 00:00:00 2001 From: panlei-coder <62509266+panlei-coder@users.noreply.github.com> Date: Thu, 11 Apr 2024 22:17:58 +0800 Subject: [PATCH 14/33] fix: fix some code style and formatting issues with checkpoint pr (#242) * fix: fix some code style and formatting issues with checkpoint pr * fix: resolve code conflicts * fix: fix format * fix: remove unnecessary forward references --- ...oint_manager.cpp => checkpoint_manager.cc} | 8 ++----- src/checkpoint_manager.h | 22 ++++--------------- src/{db.cpp => db.cc} | 15 ++++++------- src/db.h | 17 +++++++------- src/store.cc | 16 +++++++------- src/store.h | 15 +------------ 6 files changed, 31 insertions(+), 62 deletions(-) rename src/{checkpoint_manager.cpp => checkpoint_manager.cc} (85%) rename src/{db.cpp => db.cc} (88%) diff --git a/src/checkpoint_manager.cpp b/src/checkpoint_manager.cc similarity index 85% rename from src/checkpoint_manager.cpp rename to src/checkpoint_manager.cc index 9938499c4..a29211e6c 100644 --- a/src/checkpoint_manager.cpp +++ b/src/checkpoint_manager.cc @@ -12,11 +12,8 @@ namespace pikiwidb { -class DB; - void CheckpointManager::Init(int instNum, DB* db) { checkpoint_num_ = instNum; - checkpoint_infoes_.resize(checkpoint_num_); res_.reserve(checkpoint_num_); db_ = db; } @@ -34,8 +31,7 @@ void CheckpointManager::CreateCheckpoint(const std::string& path) { std::lock_guard Lock(shared_mutex_); for (int i = 0; i < checkpoint_num_; ++i) { - checkpoint_infoes_[i].checkpoint_in_process = true; - auto res = std::async(std::launch::async, &DB::DoBgSave, db_, std::ref(checkpoint_infoes_[i]), path, i); + auto res = std::async(std::launch::async, &DB::DoBgSave, db_, path, i); res_.push_back(std::move(res)); } } @@ -46,4 +42,4 @@ void CheckpointManager::WaitForCheckpointDone() { } } -} // namespace pikiwidb \ No newline at end of file +} // namespace pikiwidb diff --git a/src/checkpoint_manager.h b/src/checkpoint_manager.h index ec7ebd3e2..065027424 100644 --- a/src/checkpoint_manager.h +++ b/src/checkpoint_manager.h @@ -4,31 +4,21 @@ * LICENSE file in the root directory of this source tree. An additional grant * of patent rights can be found in the PATENTS file in the same directory. */ -#ifndef PIKIWIDB_CHECKPOINT_MANAGER_H -#define PIKIWIDB_CHECKPOINT_MANAGER_H + +#pragma once #include #include -#include -#include #include -#include "rocksdb/db.h" -#include "rocksdb/utilities/checkpoint.h" - namespace pikiwidb { class DB; -struct CheckpointInfo { - bool checkpoint_in_process = false; -}; - class CheckpointManager { public: CheckpointManager() = default; ~CheckpointManager() = default; - ; void Init(int instNum, DB* db); @@ -36,16 +26,12 @@ class CheckpointManager { void WaitForCheckpointDone(); - bool CheckpointInProcess(); - private: - int checkpoint_num_; + int checkpoint_num_ = 0; std::vector> res_; DB* db_ = nullptr; std::shared_mutex shared_mutex_; - std::vector checkpoint_infoes_; }; -} // namespace pikiwidb -#endif // PIKIWIDB_CHECKPOINT_MANAGER_H +} // namespace pikiwidb diff --git a/src/db.cpp b/src/db.cc similarity index 88% rename from src/db.cpp rename to src/db.cc index d0b79e508..3a1cadd50 100644 --- a/src/db.cpp +++ b/src/db.cc @@ -6,16 +6,14 @@ */ #include "db.h" +#include "checkpoint_manager.h" #include "config.h" -#include "praft/praft.h" -#include "pstd/log.h" +#include "log.h" extern pikiwidb::PConfig g_config; namespace pikiwidb { -struct CheckPointInfo; - DB::DB(int db_index, const std::string& db_path) : db_index_(db_index), db_path_(db_path + std::to_string(db_index_) + '/') { storage::StorageOptions storage_options; @@ -37,19 +35,20 @@ DB::DB(int db_index, const std::string& db_path) ERROR("Storage open failed! {}", s.ToString()); abort(); } + + checkpoint_manager_ = std::make_unique(); + checkpoint_manager_->Init(g_config.db_instance_num, this); + opened_ = true; INFO("Open DB{} success!", db_index_); } -void DB::DoBgSave(CheckpointInfo& checkpoint_info, const std::string& path, int i) { +void DB::DoBgSave(const std::string& path, int i) { // 1) always hold storage's sharedLock std::shared_lock sharedLock(storage_mutex_); // 2)Create the storage's checkpoint 。 auto status = storage_->CreateCheckpoint(path, i); - - // 3) write the status - checkpoint_info.checkpoint_in_process = false; } void DB::CreateCheckpoint(const std::string& path) { checkpoint_manager_->CreateCheckpoint(path); } diff --git a/src/db.h b/src/db.h index 461886f9f..7b455f2ac 100644 --- a/src/db.h +++ b/src/db.h @@ -5,15 +5,18 @@ * of patent rights can be found in the PATENTS file in the same directory. */ -#ifndef PIKIWIDB_DB_H -#define PIKIWIDB_DB_H +#pragma once +#include +#include #include -#include "checkpoint_manager.h" #include "storage/storage.h" + namespace pikiwidb { +class CheckpointManager; + class DB { public: DB(int db_index, const std::string& db_path); @@ -30,14 +33,14 @@ class DB { void CreateCheckpoint(const std::string& path); - [[maybe_unused]] void DoBgSave(CheckpointInfo&, const std::string&, int i); + [[maybe_unused]] void DoBgSave(const std::string&, int i); void WaitForCheckpointDone(); int GetDbIndex() { return db_index_; } private: - const int db_index_; + const int db_index_ = 0; const std::string db_path_; const std::string dump_parent_path_; const std::string dump_path_; @@ -53,8 +56,6 @@ class DB { bool opened_ = false; std::unique_ptr checkpoint_manager_; - }; -} // namespace pikiwidb -#endif // PIKIWIDB_DB_H +} // namespace pikiwidb diff --git a/src/store.cc b/src/store.cc index 1d0170586..ca739e5b0 100644 --- a/src/store.cc +++ b/src/store.cc @@ -7,8 +7,8 @@ #include +#include "checkpoint_manager.h" #include "config.h" -#include "db.h" #include "log.h" #include "store.h" @@ -24,12 +24,9 @@ void PStore::Init() { return; } - backends_.reserve(dbNum_); - dbNum_ = g_config.databases; backends_.reserve(dbNum_); if (g_config.backend == kBackEndRocksDB) { - for (int i = 0; i < dbNum_; i++) { auto db = std::make_unique(i, g_config.dbpath); backends_.push_back(std::move(db)); @@ -39,10 +36,10 @@ void PStore::Init() { } } -void PStore::DoSomeThingSpecificDB(const TasksVector tasks) { +void PStore::DoSomeThingSpecificDB(const TasksVector& tasks) { std::for_each(tasks.begin(), tasks.end(), [this](const auto& task) { switch (task.type) { - case kCheckpoint: + case kCheckpoint: { if (task.db < 0 || task.db >= dbNum_) { WARN("The database index is out of range."); return; @@ -56,7 +53,11 @@ void PStore::DoSomeThingSpecificDB(const TasksVector tasks) { trimSlash(path); db->CreateCheckpoint(path); break; - }; + } + + default: + break; + } }); } @@ -69,7 +70,6 @@ void PStore::WaitForCheckpointDone() { void PStore::trimSlash(std::string& dirName) { while (dirName.back() == '/') { dirName.pop_back(); - } } diff --git a/src/store.h b/src/store.h index 597b01c7f..7cc26331f 100644 --- a/src/store.h +++ b/src/store.h @@ -9,22 +9,12 @@ #define GLOG_NO_ABBREVIATED_SEVERITIES -#include "common.h" -#include "db.h" -#include "storage/storage.h" - #include #include -#include -#include #include #include -#include "checkpoint_manager.h" -#include "common.h" #include "db.h" -#include "storage/storage.h" -#include "braft/raft.h" namespace pikiwidb { @@ -47,8 +37,6 @@ struct TaskContext { using TasksVector = std::vector; -class CheckpointManager; - class PStore { public: friend class CheckpointManager; @@ -61,11 +49,10 @@ class PStore { std::unique_ptr& GetBackend(int32_t index) { return backends_[index]; }; - void DoSomeThingSpecificDB(const TasksVector task); + void DoSomeThingSpecificDB(const TasksVector& task); void WaitForCheckpointDone(); - std::shared_mutex& SharedMutex() { return dbs_mutex_; } private: From df6bc2d588d67bfd623f5005f9407f746cc6d1dd Mon Sep 17 00:00:00 2001 From: Changyuan Ning <77976092+longfar-ncy@users.noreply.github.com> Date: Fri, 12 Apr 2024 22:28:08 +0800 Subject: [PATCH 15/33] fix: include praft.h in db.cc (#271) --- src/db.cc | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/db.cc b/src/db.cc index 3a1cadd50..122d84b4c 100644 --- a/src/db.cc +++ b/src/db.cc @@ -6,9 +6,12 @@ */ #include "db.h" + +#include "praft/praft.h" +#include "pstd/log.h" + #include "checkpoint_manager.h" #include "config.h" -#include "log.h" extern pikiwidb::PConfig g_config; From 540f937dc82c86db912bae1132883e7d4b63a7e4 Mon Sep 17 00:00:00 2001 From: Changyuan Ning <77976092+longfar-ncy@users.noreply.github.com> Date: Fri, 12 Apr 2024 23:05:07 +0800 Subject: [PATCH 16/33] feat: maintain the logidx-seqno in SST files (#246) * feat: move log_index codes * feat: init log index and update when write successfully * refactor: use deque instead of list to save logidx-seqno --- CMakeLists.txt | 18 ++ src/CMakeLists.txt | 17 -- src/db.h | 4 - src/praft/praft.cc | 4 +- src/pstd/thread_pool.h | 12 +- src/storage/CMakeLists.txt | 2 + src/storage/include/storage/storage.h | 7 +- src/storage/include/storage/storage_define.h | 1 + src/storage/src/log_index.cc | 127 ++++++++ src/storage/src/log_index.h | 167 +++++++++++ src/storage/src/redis.cc | 32 +- src/storage/src/redis.h | 14 + src/storage/src/storage.cc | 27 +- src/storage/tests/CMakeLists.txt | 30 ++ src/storage/tests/log_index_collector_test.cc | 176 +++++++++++ src/storage/tests/log_index_test.cc | 273 ++++++++++++++++++ 16 files changed, 874 insertions(+), 37 deletions(-) create mode 100644 src/storage/src/log_index.cc create mode 100644 src/storage/src/log_index.h create mode 100644 src/storage/tests/CMakeLists.txt create mode 100644 src/storage/tests/log_index_collector_test.cc create mode 100644 src/storage/tests/log_index_test.cc diff --git a/CMakeLists.txt b/CMakeLists.txt index b214fb258..cbbe3485f 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -128,6 +128,24 @@ SET(EXTERNAL_PROJECT_LOG_ARGS LOG_TEST 1 LOG_INSTALL 0) +IF(CMAKE_SYSTEM_NAME STREQUAL "Linux") + SET(LIB rt) +ELSEIF(CMAKE_SYSTEM_NAME STREQUAL "Darwin") + SET(LIB + pthread + "-framework CoreFoundation" + "-framework CoreGraphics" + "-framework CoreData" + "-framework CoreText" + "-framework Security" + "-framework Foundation" + "-Wl,-U,_MallocExtension_ReleaseFreeMemory" + "-Wl,-U,_ProfilerStart" + "-Wl,-U,_ProfilerStop" + "-Wl,-U,__Z13GetStackTracePPvii") +ENDIF() +SET(LIB ${LIB} CACHE INTERNAL "libs which should be linked for executable target") + INCLUDE(FetchContent) INCLUDE(gflags) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 8b2e97d7f..76b5345ae 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -22,23 +22,6 @@ TARGET_INCLUDE_DIRECTORIES(pikiwidb ) -IF(CMAKE_SYSTEM_NAME STREQUAL "Linux") - SET(LIB rt) -ELSEIF(CMAKE_SYSTEM_NAME STREQUAL "Darwin") - SET(LIB - pthread - "-framework CoreFoundation" - "-framework CoreGraphics" - "-framework CoreData" - "-framework CoreText" - "-framework Security" - "-framework Foundation" - "-Wl,-U,_MallocExtension_ReleaseFreeMemory" - "-Wl,-U,_ProfilerStart" - "-Wl,-U,_ProfilerStop" - "-Wl,-U,__Z13GetStackTracePPvii") -ENDIF() - TARGET_LINK_LIBRARIES(pikiwidb net; dl; fmt; storage; rocksdb; pstd braft brpc ssl crypto zlib protobuf leveldb gflags z praft praft_pb "${LIB}") SET_TARGET_PROPERTIES(pikiwidb PROPERTIES LINKER_LANGUAGE CXX) \ No newline at end of file diff --git a/src/db.h b/src/db.h index 7b455f2ac..11dc3a207 100644 --- a/src/db.h +++ b/src/db.h @@ -7,10 +7,6 @@ #pragma once -#include -#include -#include - #include "storage/storage.h" namespace pikiwidb { diff --git a/src/praft/praft.cc b/src/praft/praft.cc index da97b9fa1..fdaeda8f9 100644 --- a/src/praft/praft.cc +++ b/src/praft/praft.cc @@ -370,7 +370,7 @@ void PRaft::on_apply(braft::Iterator& iter) { Binlog log; butil::IOBufAsZeroCopyInputStream wrapper(iter.data()); bool success = log.ParseFromZeroCopyStream(&wrapper); - DEBUG("apply binlog: {}", log.ShortDebugString()); + DEBUG("apply binlog{}: {}", iter.index(), log.ShortDebugString()); if (!success) { static constexpr std::string_view kMsg = "Failed to parse from protobuf when on_apply"; @@ -382,7 +382,7 @@ void PRaft::on_apply(braft::Iterator& iter) { return; } - auto s = PSTORE.GetBackend(log.db_id())->GetStorage()->OnBinlogWrite(log); + auto s = PSTORE.GetBackend(log.db_id())->GetStorage()->OnBinlogWrite(log, iter.index()); if (done) { // in leader dynamic_cast(done)->SetStatus(s); } diff --git a/src/pstd/thread_pool.h b/src/pstd/thread_pool.h index 4f1bbd931..92ca86ad6 100644 --- a/src/pstd/thread_pool.h +++ b/src/pstd/thread_pool.h @@ -27,7 +27,7 @@ class ThreadPool final { void operator=(const ThreadPool&) = delete; template - auto ExecuteTask(F&& f, Args&&... args) -> std::future::type>; + auto ExecuteTask(F&& f, Args&&... args) -> std::future>; void JoinAll(); void SetMaxIdleThread(unsigned int m); @@ -48,17 +48,17 @@ class ThreadPool final { std::condition_variable cond_; unsigned waiters_; bool shutdown_; - std::deque > tasks_; + std::deque> tasks_; static const int kMaxThreads = 256; }; template -auto ThreadPool::ExecuteTask(F&& f, Args&&... args) -> std::future::type> { - using resultType = typename std::invoke_result::type; +auto ThreadPool::ExecuteTask(F&& f, Args&&... args) -> std::future> { + using resultType = std::invoke_result_t; auto task = - std::make_shared >(std::bind(std::forward(f), std::forward(args)...)); + std::make_shared>(std::bind(std::forward(f), std::forward(args)...)); { std::unique_lock guard(mutex_); @@ -77,4 +77,4 @@ auto ThreadPool::ExecuteTask(F&& f, Args&&... args) -> std::futureget_future(); } -} // namespace pstd \ No newline at end of file +} // namespace pstd diff --git a/src/storage/CMakeLists.txt b/src/storage/CMakeLists.txt index b34adb24f..f734fefb0 100644 --- a/src/storage/CMakeLists.txt +++ b/src/storage/CMakeLists.txt @@ -37,3 +37,5 @@ TARGET_LINK_LIBRARIES (storage ) SET_TARGET_PROPERTIES(storage PROPERTIES LINKER_LANGUAGE CXX) + +ADD_SUBDIRECTORY(tests) diff --git a/src/storage/include/storage/storage.h b/src/storage/include/storage/storage.h index 843b8d00b..bec856b58 100644 --- a/src/storage/include/storage/storage.h +++ b/src/storage/include/storage/storage.h @@ -52,6 +52,7 @@ using BlockBasedTableOptions = rocksdb::BlockBasedTableOptions; using Status = rocksdb::Status; using Slice = rocksdb::Slice; using Env = rocksdb::Env; +using LogIndex = int64_t; class Redis; enum class OptionType; @@ -70,8 +71,8 @@ struct StorageOptions { size_t small_compaction_threshold = 5000; size_t small_compaction_duration_threshold = 10000; size_t db_instance_num = 3; // default = 3 - int db_id; - AppendLogFunction append_log_function; + int db_id = 0; + AppendLogFunction append_log_function = nullptr; uint32_t raft_timeout_s = std::numeric_limits::max(); Status ResetOptions(const OptionType& option_type, const std::unordered_map& options_map); }; @@ -1095,7 +1096,7 @@ class Storage { Status SetOptions(const OptionType& option_type, const std::unordered_map& options); void GetRocksDBInfo(std::string& info); - Status OnBinlogWrite(const pikiwidb::Binlog& log); + Status OnBinlogWrite(const pikiwidb::Binlog& log, LogIndex log_idx); private: std::vector> insts_; diff --git a/src/storage/include/storage/storage_define.h b/src/storage/include/storage/storage_define.h index 053177c17..4b27c860c 100644 --- a/src/storage/include/storage/storage_define.h +++ b/src/storage/include/storage/storage_define.h @@ -42,6 +42,7 @@ enum ColumnFamilyIndex { kZsetsMetaCF = 7, kZsetsDataCF = 8, kZsetsScoreCF = 9, + kColumnFamilyNum = 10, }; const static char kNeedTransformCharacter = '\u0000'; diff --git a/src/storage/src/log_index.cc b/src/storage/src/log_index.cc new file mode 100644 index 000000000..3d4458f56 --- /dev/null +++ b/src/storage/src/log_index.cc @@ -0,0 +1,127 @@ +/* + * Copyright (c) 2024-present, Qihoo, Inc. All rights reserved. + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. An additional grant + * of patent rights can be found in the PATENTS file in the same directory. + */ + +#include "log_index.h" + +#include +#include +#include +#include + +#include "redis.h" + +namespace storage { + +rocksdb::Status storage::LogIndexOfColumnFamilies::Init(Redis *db) { + for (int i = 0; i < cf_.size(); i++) { + rocksdb::TablePropertiesCollection collection; + auto s = db->GetDB()->GetPropertiesOfAllTables(db->GetColumnFamilyHandles()[i], &collection); + if (!s.ok()) { + return s; + } + auto res = LogIndexTablePropertiesCollector::GetLargestLogIndexFromTableCollection(collection); + if (res.has_value()) { + cf_[i].applied_log_index.store(res->GetAppliedLogIndex()); + cf_[i].flushed_log_index.store(res->GetAppliedLogIndex()); + } + } + return Status::OK(); +} + +LogIndex LogIndexOfColumnFamilies::GetSmallestLogIndex(std::function &&f) const { + auto smallest_log_index = std::numeric_limits::max(); + for (const auto &it : cf_) { + smallest_log_index = std::min(f(it), smallest_log_index); + } + return smallest_log_index; +} + +std::optional storage::LogIndexTablePropertiesCollector::ReadStatsFromTableProps( + const std::shared_ptr &table_props) { + const auto &user_properties = table_props->user_collected_properties; + const auto it = user_properties.find(kPropertyName.data()); + if (it == user_properties.end()) { + return std::nullopt; + } + std::string s = it->second; + LogIndex applied_log_index; + SequenceNumber largest_seqno; + auto res = sscanf(s.c_str(), "%" PRIi64 "/%" PRIu64 "", &applied_log_index, &largest_seqno); + assert(res == 2); + + return LogIndexAndSequencePair(applied_log_index, largest_seqno); +} + +LogIndex LogIndexAndSequenceCollector::FindAppliedLogIndex(SequenceNumber seqno) const { + if (seqno == 0) { // the seqno will be 0 when executing compaction + return 0; + } + std::shared_lock gd(mutex_); + if (list_.empty() || seqno < list_.front().GetSequenceNumber()) { + return 0; + } + if (seqno >= list_.back().GetSequenceNumber()) { + return list_.back().GetAppliedLogIndex(); + } + + auto it = std::lower_bound( + list_.begin(), list_.end(), seqno, + [](const LogIndexAndSequencePair &p, SequenceNumber tar) { return p.GetSequenceNumber() <= tar; }); + if (it->GetSequenceNumber() > seqno) { + --it; + } + assert(it->GetSequenceNumber() <= seqno); + return it->GetAppliedLogIndex(); +} + +void LogIndexAndSequenceCollector::Update(LogIndex smallest_applied_log_index, SequenceNumber smallest_flush_seqno) { + /* + If step length > 1, log index is sampled and sacrifice precision to save memory usage. + It means that extra applied log may be applied again on start stage. + */ + if ((smallest_applied_log_index & step_length_mask_) == 0) { + std::lock_guard gd(mutex_); + list_.emplace_back(smallest_applied_log_index, smallest_flush_seqno); + } +} + +// TODO(longfar): find the iterator which should be deleted and erase from begin to the iterator +void LogIndexAndSequenceCollector::Purge(LogIndex smallest_applied_log_index) { + /* + * The reason that we use smallest applied log index of all column families instead of smallest flushed log index is + * that the log index corresponding to the largest sequence number in the next flush must be greater than or equal to + * the smallest applied log index at this moment. + * So we just need to make sure that there is an element in the queue which is less than or equal to the smallest + * applied log index to ensure that we can find a correct log index while doing next flush. + */ + std::lock_guard gd(mutex_); + if (list_.size() < 2) { + return; + } + auto second = std::next(list_.begin()); + while (list_.size() >= 2 && second->GetAppliedLogIndex() <= smallest_applied_log_index) { + list_.pop_front(); + ++second; + } +} + +auto LogIndexTablePropertiesCollector::GetLargestLogIndexFromTableCollection( + const rocksdb::TablePropertiesCollection &collection) -> std::optional { + LogIndex max_flushed_log_index{-1}; + rocksdb::SequenceNumber seqno{}; + for (const auto &[_, props] : collection) { + auto res = LogIndexTablePropertiesCollector::ReadStatsFromTableProps(props); + if (res.has_value() && res->GetAppliedLogIndex() > max_flushed_log_index) { + max_flushed_log_index = res->GetAppliedLogIndex(); + seqno = res->GetSequenceNumber(); + } + } + return max_flushed_log_index == -1 ? std::nullopt + : std::make_optional(max_flushed_log_index, seqno); +} + +} // namespace storage diff --git a/src/storage/src/log_index.h b/src/storage/src/log_index.h new file mode 100644 index 000000000..44d486338 --- /dev/null +++ b/src/storage/src/log_index.h @@ -0,0 +1,167 @@ +/* + * Copyright (c) 2024-present, Qihoo, Inc. All rights reserved. + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. An additional grant + * of patent rights can be found in the PATENTS file in the same directory. + */ + +#pragma once + +#include +#include +#include +#include +#include +#include + +#include "fmt/core.h" +#include "rocksdb/db.h" +#include "rocksdb/listener.h" +#include "rocksdb/table_properties.h" +#include "rocksdb/types.h" +#include "storage/storage_define.h" + +namespace storage { + +using LogIndex = int64_t; +using rocksdb::SequenceNumber; +class Redis; + +class LogIndexAndSequencePair { + public: + LogIndexAndSequencePair(LogIndex applied_log_index, SequenceNumber seqno) + : applied_log_index_(applied_log_index), seqno_(seqno) {} + + void SetAppliedLogIndex(LogIndex applied_log_index) { applied_log_index_ = applied_log_index; } + void SetSequenceNumber(SequenceNumber seqno) { seqno_ = seqno; } + + LogIndex GetAppliedLogIndex() const { return applied_log_index_; } + SequenceNumber GetSequenceNumber() const { return seqno_; } + + private: + LogIndex applied_log_index_ = 0; + SequenceNumber seqno_ = 0; +}; + +class LogIndexOfColumnFamilies { + struct LogIndexPair { + std::atomic applied_log_index = 0; // newest record in memtable. + std::atomic flushed_log_index = 0; // newest record in sst file. + }; + + public: + // Read the largest log index of each column family from all sst files + rocksdb::Status Init(Redis *db); + + LogIndex GetSmallestAppliedLogIndex() const { + return GetSmallestLogIndex([](const LogIndexPair &p) { return p.applied_log_index.load(); }); + } + // LogIndex GetSmallestFlushedLogIndex() const { + // return GetSmallestLogIndex([](const LogIndexPair &p) { return p.flushed_log_index.load(); }); + // } + void SetFlushedLogIndex(size_t cf_id, LogIndex log_index) { + cf_[cf_id].flushed_log_index = std::max(cf_[cf_id].flushed_log_index.load(), log_index); + } + + bool IsApplied(size_t cf_id, LogIndex cur_log_index) const { + return cur_log_index < cf_[cf_id].applied_log_index.load(); + } + void Update(size_t cf_id, LogIndex cur_log_index) { cf_[cf_id].applied_log_index.store(cur_log_index); } + + private: + LogIndex GetSmallestLogIndex(std::function &&f) const; + std::array cf_; +}; + +class LogIndexAndSequenceCollector { + public: + explicit LogIndexAndSequenceCollector(uint8_t step_length_bit = 0) { step_length_mask_ = (1 << step_length_bit) - 1; } + + // find the index of log which contain seqno or before it + LogIndex FindAppliedLogIndex(SequenceNumber seqno) const; + + // if there's a new pair, add it to list; otherwise, do nothing + void Update(LogIndex smallest_applied_log_index, SequenceNumber smallest_flush_seqno); + + // purge out dated log index after memtable flushed. + void Purge(LogIndex smallest_applied_log_index); + + private: + uint64_t step_length_mask_ = 0; + mutable std::shared_mutex mutex_; + std::deque list_; +}; + +class LogIndexTablePropertiesCollector : public rocksdb::TablePropertiesCollector { + public: + static constexpr std::string_view kPropertyName = "LargestLogIndex/LargestSequenceNumber"; + + explicit LogIndexTablePropertiesCollector(const LogIndexAndSequenceCollector &collector) : collector_(collector) {} + + rocksdb::Status AddUserKey(const rocksdb::Slice &key, const rocksdb::Slice &value, rocksdb::EntryType type, + SequenceNumber seq, uint64_t file_size) override { + largest_seqno_ = std::max(largest_seqno_, seq); + return rocksdb::Status::OK(); + } + rocksdb::Status Finish(rocksdb::UserCollectedProperties *properties) override { + properties->insert(Materialize()); + return rocksdb::Status::OK(); + } + const char *Name() const override { return "LogIndexTablePropertiesCollector"; } + rocksdb::UserCollectedProperties GetReadableProperties() const override { + return rocksdb::UserCollectedProperties{Materialize()}; + } + + static std::optional ReadStatsFromTableProps( + const std::shared_ptr &table_props); + + static auto GetLargestLogIndexFromTableCollection(const rocksdb::TablePropertiesCollection &collection) + -> std::optional; + + private: + std::pair Materialize() const { + if (-1 == cache_) { + cache_ = collector_.FindAppliedLogIndex(largest_seqno_); + } + return std::make_pair(static_cast(kPropertyName), fmt::format("{}/{}", cache_, largest_seqno_)); + } + + private: + const LogIndexAndSequenceCollector &collector_; + SequenceNumber largest_seqno_ = 0; + mutable LogIndex cache_{-1}; +}; + +class LogIndexTablePropertiesCollectorFactory : public rocksdb::TablePropertiesCollectorFactory { + public: + explicit LogIndexTablePropertiesCollectorFactory(const LogIndexAndSequenceCollector &collector) + : collector_(collector) {} + ~LogIndexTablePropertiesCollectorFactory() override = default; + + rocksdb::TablePropertiesCollector *CreateTablePropertiesCollector( + [[maybe_unused]] rocksdb::TablePropertiesCollectorFactory::Context context) override { + return new LogIndexTablePropertiesCollector(collector_); + } + const char *Name() const override { return "LogIndexTablePropertiesCollectorFactory"; } + + private: + const LogIndexAndSequenceCollector &collector_; +}; + +class LogIndexAndSequenceCollectorPurger : public rocksdb::EventListener { + public: + explicit LogIndexAndSequenceCollectorPurger(LogIndexAndSequenceCollector *collector, LogIndexOfColumnFamilies *cf) + : collector_(collector), cf_(cf) {} + + void OnFlushCompleted(rocksdb::DB *db, const rocksdb::FlushJobInfo &flush_job_info) override { + cf_->SetFlushedLogIndex(flush_job_info.cf_id, collector_->FindAppliedLogIndex(flush_job_info.largest_seqno)); + auto log_idx = cf_->GetSmallestAppliedLogIndex(); + collector_->Purge(log_idx); + } + + private: + LogIndexAndSequenceCollector *collector_ = nullptr; + LogIndexOfColumnFamilies *cf_ = nullptr; +}; + +} // namespace storage diff --git a/src/storage/src/redis.cc b/src/storage/src/redis.cc index 3840db071..10febd50a 100644 --- a/src/storage/src/redis.cc +++ b/src/storage/src/redis.cc @@ -7,13 +7,16 @@ #include "rocksdb/env.h" -#include "config.h" #include "src/base_filter.h" #include "src/lists_filter.h" #include "src/redis.h" #include "src/strings_filter.h" #include "src/zsets_filter.h" +#define ADD_TABLE_PROPERTY_COLLECTOR_FACTORY(type) \ + type##_cf_ops.table_properties_collector_factories.push_back( \ + std::make_shared(log_index_collector_)); + namespace storage { const rocksdb::Comparator* ListsDataKeyComparator() { static ListsDataKeyComparatorImpl ldkc; @@ -138,6 +141,26 @@ Status Redis::Open(const StorageOptions& storage_options, const std::string& db_ zset_data_cf_ops.table_factory.reset(rocksdb::NewBlockBasedTableFactory(zset_data_cf_table_ops)); zset_score_cf_ops.table_factory.reset(rocksdb::NewBlockBasedTableFactory(zset_score_cf_table_ops)); + if (append_log_function_) { + // Add log index table property collector factory to each column family + ADD_TABLE_PROPERTY_COLLECTOR_FACTORY(string); + ADD_TABLE_PROPERTY_COLLECTOR_FACTORY(hash_meta); + ADD_TABLE_PROPERTY_COLLECTOR_FACTORY(hash_data); + ADD_TABLE_PROPERTY_COLLECTOR_FACTORY(list_meta); + ADD_TABLE_PROPERTY_COLLECTOR_FACTORY(list_data); + ADD_TABLE_PROPERTY_COLLECTOR_FACTORY(set_meta); + ADD_TABLE_PROPERTY_COLLECTOR_FACTORY(set_data); + ADD_TABLE_PROPERTY_COLLECTOR_FACTORY(zset_meta); + ADD_TABLE_PROPERTY_COLLECTOR_FACTORY(zset_data); + ADD_TABLE_PROPERTY_COLLECTOR_FACTORY(zset_score); + + // Add a listener on flush to purge log index collector + db_ops.listeners.push_back( + std::make_shared(&log_index_collector_, &log_index_of_all_cfs_)); + + // TODO(longfar): Add snapshot caller + } + std::vector column_families; column_families.emplace_back(rocksdb::kDefaultColumnFamilyName, string_cf_ops); // hash CF @@ -153,7 +176,12 @@ Status Redis::Open(const StorageOptions& storage_options, const std::string& db_ column_families.emplace_back("zset_meta_cf", zset_meta_cf_ops); column_families.emplace_back("zset_data_cf", zset_data_cf_ops); column_families.emplace_back("zset_score_cf", zset_score_cf_ops); - return rocksdb::DB::Open(db_ops, db_path, column_families, &handles_, &db_); + + auto s = rocksdb::DB::Open(db_ops, db_path, column_families, &handles_, &db_); + if (!s.ok()) { + return s; + } + return log_index_of_all_cfs_.Init(this); } Status Redis::GetScanStartPoint(const DataType& type, const Slice& key, const Slice& pattern, int64_t cursor, diff --git a/src/storage/src/redis.h b/src/storage/src/redis.h index cf008b7e2..314f5f8fc 100644 --- a/src/storage/src/redis.h +++ b/src/storage/src/redis.h @@ -14,6 +14,7 @@ #include "rocksdb/slice.h" #include "rocksdb/status.h" +#include "log_index.h" #include "pstd/env.h" #include "pstd/log.h" #include "src/custom_comparator.h" @@ -103,6 +104,12 @@ class Redis { const ColumnFamilyType& type = kMetaAndData); virtual Status GetProperty(const std::string& property, uint64_t* out); + bool IsApplied(size_t cf_idx, LogIndex logidx) const { return log_index_of_all_cfs_.IsApplied(cf_idx, logidx); } + void UpdateAppliedLogIndexOfColumnFamily(size_t cf_idx, LogIndex logidx) { + log_index_of_all_cfs_.Update(cf_idx, logidx); + } + bool IsRestarting() const { return is_starting_; } + void StartingPhaseEnd() { is_starting_ = false; } Status ScanKeyNum(std::vector* key_info); Status ScanStringsKeyNum(KeyInfo* key_info); @@ -299,6 +306,10 @@ class Redis { void ScanZsets(); void ScanSets(); + void UpdateLogIndex(LogIndex applied_log_index, SequenceNumber seqno) { + log_index_collector_.Update(applied_log_index, seqno); + } + TypeIterator* CreateIterator(const DataType& type, const std::string& pattern, const Slice* lower_bound, const Slice* upper_bound) { return CreateIterator(DataTypeTag[type], pattern, lower_bound, upper_bound); @@ -361,6 +372,9 @@ class Redis { // For raft uint32_t raft_timeout_s_ = 10; AppendLogFunction append_log_function_; + LogIndexAndSequenceCollector log_index_collector_; + LogIndexOfColumnFamilies log_index_of_all_cfs_; + bool is_starting_{true}; Status UpdateSpecificKeyStatistics(const DataType& dtype, const std::string& key, uint64_t count); Status UpdateSpecificKeyDuration(const DataType& dtype, const std::string& key, uint64_t duration); diff --git a/src/storage/src/storage.cc b/src/storage/src/storage.cc index 2ef595054..cccf48a47 100644 --- a/src/storage/src/storage.cc +++ b/src/storage/src/storage.cc @@ -2259,11 +2259,20 @@ void Storage::DisableWal(const bool is_wal_disable) { } } -Status Storage::OnBinlogWrite(const pikiwidb::Binlog& log) { +Status Storage::OnBinlogWrite(const pikiwidb::Binlog& log, LogIndex log_idx) { auto& inst = insts_[log.slot_idx()]; rocksdb::WriteBatch batch; + bool is_finished_start = true; for (const auto& entry : log.entries()) { + if (inst->IsRestarting() && inst->IsApplied(entry.cf_idx(), log_idx)) [[unlikely]] { + // If the starting phase is over, the log must not have been applied + // If the starting phase is not over and the log has been applied, skip it. + WARN("Log {} has been applied", log_idx); + is_finished_start = false; + continue; + } + switch (entry.op_type()) { case pikiwidb::OperateType::kPut: { assert(entry.has_value()); @@ -2278,9 +2287,21 @@ Status Storage::OnBinlogWrite(const pikiwidb::Binlog& log) { ERROR(msg); return Status::Incomplete(msg); } - } - return inst->GetDB()->Write(inst->GetWriteOptions(), &batch); + inst->UpdateAppliedLogIndexOfColumnFamily(entry.cf_idx(), log_idx); + } + if (inst->IsRestarting() && is_finished_start) [[unlikely]] { + INFO("Redis {} finished start phase", inst->GetIndex()); + inst->StartingPhaseEnd(); + } + auto first_seqno = inst->GetDB()->GetLatestSequenceNumber() + 1; + auto s = inst->GetDB()->Write(inst->GetWriteOptions(), &batch); + if (!s.ok()) { + // TODO(longfar): What we should do if the write operation failed ? 💥 + return s; + } + inst->UpdateLogIndex(log_idx, first_seqno); + return s; } } // namespace storage diff --git a/src/storage/tests/CMakeLists.txt b/src/storage/tests/CMakeLists.txt new file mode 100644 index 000000000..2c8f258f5 --- /dev/null +++ b/src/storage/tests/CMakeLists.txt @@ -0,0 +1,30 @@ +# Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. An additional grant +# of patent rights can be found in the PATENTS file in the same directory. + +INCLUDE(GoogleTest) + +FILE(GLOB_RECURSE TEST_SOURCES "${CMAKE_CURRENT_SOURCE_DIR}/*test.cc") + +FOREACH (TEST_SOURCE ${TEST_SOURCES}) + GET_FILENAME_COMPONENT(TEST_FILENAME ${TEST_SOURCE} NAME) + STRING(REPLACE ".cc" "" TEST_NAME ${TEST_FILENAME}) + + ADD_EXECUTABLE(${TEST_NAME} ${TEST_SOURCE}) + + TARGET_INCLUDE_DIRECTORIES(${TEST_NAME} + PUBLIC storage + PRIVATE ${rocksdb_SOURCE_DIR} + PRIVATE ${rocksdb_SOURCE_DIR}/include + PRIVATE ${BRAFT_INCLUDE_DIR} + PRIVATE ${BRPC_INCLUDE_DIR} + ) + TARGET_LINK_LIBRARIES(${TEST_NAME} + PUBLIC storage + PRIVATE gtest + PRIVATE gtest_main + PRIVATE fmt + ${LIB} + ) +ENDFOREACH() diff --git a/src/storage/tests/log_index_collector_test.cc b/src/storage/tests/log_index_collector_test.cc new file mode 100644 index 000000000..6646b5447 --- /dev/null +++ b/src/storage/tests/log_index_collector_test.cc @@ -0,0 +1,176 @@ +/* + * Copyright (c) 2024-present, Qihoo, Inc. All rights reserved. + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. An additional grant + * of patent rights can be found in the PATENTS file in the same directory. + */ + +#include +#include +#include +#include +#include + +#include "fmt/core.h" +#include "gtest/gtest.h" + +#include "src/log_index.h" + +using namespace storage; // NOLINT + +template +class NumberCreator { + public: + explicit NumberCreator(T start = 0) : next_num_(start) {} + auto Next() -> T { return next_num_.fetch_add(STEP); } + + private: + std::atomic next_num_; +}; +using SequenceNumberCreator = NumberCreator; +using LogIndexCreator = NumberCreator; + +TEST(LogIndexAndSequenceCollectorTest, OneStepTest) { // NOLINT + LogIndexAndSequenceCollector collector; + SequenceNumberCreator seqno_creator(100); + LogIndexCreator logidx_creator(4); + for (int i = 0; i < 100; i++) { + collector.Update(logidx_creator.Next(), seqno_creator.Next()); + } + + // the target seqno is smaller than the smallest seqno in the list, should return 0 + for (rocksdb::SequenceNumber seq = 0; seq < 100; seq++) { + EXPECT_EQ(collector.FindAppliedLogIndex(seq), 0); + } + // the target seqno is in the list' range, should return the correct idx + for (rocksdb::SequenceNumber seq = 100; seq < 300; seq++) { + EXPECT_EQ(collector.FindAppliedLogIndex(seq), (seq - 100) / 2 + 4); + } + // the target seqno is larger than the largest seqno in the list, should return the largest idx + for (rocksdb::SequenceNumber seq = 300; seq < 400; seq++) { + EXPECT_EQ(collector.FindAppliedLogIndex(seq), 103); + } + + // if smallest flushed log index is 44 whose seqno is 180,181 + collector.Purge(44); + for (rocksdb::SequenceNumber seq = 0; seq < 180; seq++) { + EXPECT_EQ(collector.FindAppliedLogIndex(seq), 0); + } + for (rocksdb::SequenceNumber seq = 180; seq < 300; seq++) { + EXPECT_EQ(collector.FindAppliedLogIndex(seq), (seq - 100) / 2 + 4); + } + for (rocksdb::SequenceNumber seq = 300; seq < 400; seq++) { + EXPECT_EQ(collector.FindAppliedLogIndex(seq), 103); + } + collector.Purge(46); // should remove log44 and log55 + for (rocksdb::SequenceNumber seq = 0; seq < 184; seq++) { + EXPECT_EQ(collector.FindAppliedLogIndex(seq), 0); + } + for (rocksdb::SequenceNumber seq = 184; seq < 300; seq++) { + EXPECT_EQ(collector.FindAppliedLogIndex(seq), (seq - 100) / 2 + 4); + } + for (rocksdb::SequenceNumber seq = 300; seq < 400; seq++) { + EXPECT_EQ(collector.FindAppliedLogIndex(seq), 103); + } +} + +TEST(LogIndexAndSequenceCollectorTest, MutiStepTest) { // NOLINT + SequenceNumberCreator seqno_creator(100); + LogIndexCreator logidx_creator(4); + LogIndexAndSequenceCollector collector(2); // update only when log index is multiple of 4 + for (int i = 0; i < 100; i++) { + collector.Update(logidx_creator.Next(), seqno_creator.Next()); + } + + // the target seqno is smaller than the smallest seqno in the list, should return 0 + for (rocksdb::SequenceNumber seq = 0; seq < 100; seq++) { + EXPECT_EQ(collector.FindAppliedLogIndex(seq), 0); + } + // the target seqno is in the list' range, should return the correct idx + for (rocksdb::SequenceNumber seq = 100; seq < 300; seq++) { + EXPECT_EQ(collector.FindAppliedLogIndex(seq), (seq - 100) / 8 * 4 + 4); + } + // the target seqno is larger than the largest seqno in the list, should return the largest idx + for (rocksdb::SequenceNumber seq = 300; seq < 400; seq++) { + EXPECT_EQ(collector.FindAppliedLogIndex(seq), 100); + } + + // if smallest flushed log index is 44 whose seqno is 180,181 + collector.Purge(44); + for (rocksdb::SequenceNumber seq = 0; seq < 180; seq++) { + EXPECT_EQ(collector.FindAppliedLogIndex(seq), 0); + } + for (rocksdb::SequenceNumber seq = 180; seq < 300; seq++) { + EXPECT_EQ(collector.FindAppliedLogIndex(seq), (seq - 100) / 8 * 4 + 4); + } + for (rocksdb::SequenceNumber seq = 300; seq < 400; seq++) { + EXPECT_EQ(collector.FindAppliedLogIndex(seq), 100); + } + collector.Purge(45); // should do nothing + for (rocksdb::SequenceNumber seq = 0; seq < 180; seq++) { + EXPECT_EQ(collector.FindAppliedLogIndex(seq), 0); + } + for (rocksdb::SequenceNumber seq = 180; seq < 300; seq++) { + EXPECT_EQ(collector.FindAppliedLogIndex(seq), (seq - 100) / 8 * 4 + 4); + } + for (rocksdb::SequenceNumber seq = 300; seq < 400; seq++) { + EXPECT_EQ(collector.FindAppliedLogIndex(seq), 100); + } + collector.Purge(49); // should remove the log44 + for (rocksdb::SequenceNumber seq = 0; seq < 188; seq++) { + EXPECT_EQ(collector.FindAppliedLogIndex(seq), 0); + } + for (rocksdb::SequenceNumber seq = 188; seq < 300; seq++) { + EXPECT_EQ(collector.FindAppliedLogIndex(seq), (seq - 100) / 8 * 4 + 4); + } + for (rocksdb::SequenceNumber seq = 300; seq < 400; seq++) { + EXPECT_EQ(collector.FindAppliedLogIndex(seq), 100); + } +} + +struct TimerGuard { + TimerGuard(std::string_view name = "Test") : name_(name), start_(std::chrono::system_clock::now()) {} + ~TimerGuard() { + auto end = std::chrono::system_clock::now(); + auto duration = std::chrono::duration_cast(end - start_); + fmt::println("{} cost {}ms", name_, duration.count()); + } + + std::string_view name_; + std::chrono::time_point start_; +}; + +TEST(LogIndexAndSequenceCollectorTest, FindBenchmark) { + LogIndexAndSequenceCollector collector; + SequenceNumberCreator seq_creator(1); + LogIndexCreator log_creator(4); + size_t size = 0; + { + for (; size < 100; size++) { + collector.Update(log_creator.Next(), seq_creator.Next()); + } + // There are 100 pair in the collector: 1:4, 3:5, 5:6, 7:7, 9:8,..., 199:103 + constexpr int kFindTimes = 100; + TimerGuard timer("100 size test"); + for (int i = 0; i < kFindTimes; i++) { + for (int n = 1; n <= 200; n++) { + auto res = collector.FindAppliedLogIndex(n); + ASSERT_EQ(res, (n - 1) / 2 + 4); + } + } + } + { + for (; size < 1000; size++) { + collector.Update(log_creator.Next(), seq_creator.Next()); + } + // There are 1000 pair in the collector: 1:4, 3:5, 5:6, 7:7, 9:8,..., 1999:1003 + constexpr int kFindTimes = 100; + TimerGuard timer("1000 size test"); + for (int i = 0; i < kFindTimes; i++) { + for (int n = 1; n <= 2000; n++) { + auto res = collector.FindAppliedLogIndex(n); + ASSERT_EQ(res, (n - 1) / 2 + 4); + } + } + } +} diff --git a/src/storage/tests/log_index_test.cc b/src/storage/tests/log_index_test.cc new file mode 100644 index 000000000..4b39cb8ee --- /dev/null +++ b/src/storage/tests/log_index_test.cc @@ -0,0 +1,273 @@ +/* + * Copyright (c) 2024-present, Qihoo, Inc. All rights reserved. + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. An additional grant + * of patent rights can be found in the PATENTS file in the same directory. + */ + +#include +#include +#include +#include +#include +#include + +#include "fmt/core.h" +#include "gtest/gtest.h" +#include "rocksdb/db.h" +#include "rocksdb/listener.h" +#include "rocksdb/metadata.h" +#include "rocksdb/options.h" + +#include "pstd/log.h" +#include "pstd/thread_pool.h" +#include "src/log_index.h" +#include "src/redis.h" +#include "storage/storage.h" +#include "storage/util.h" + +using namespace storage; // NOLINT + +class LogIniter { + public: + LogIniter() { + logger::Init("./log_index_test.log"); + spdlog::set_level(spdlog::level::info); + } +}; +static LogIniter initer; + +TEST(TablePropertyTest, SimpleTest) { + constexpr const char* kDbPath = "./log_index_test_db"; + rocksdb::Options options; + options.create_if_missing = true; + LogIndexAndSequenceCollector collector; + options.table_properties_collector_factories.push_back( + std::make_shared(collector)); + rocksdb::DB* db{nullptr}; + auto s = rocksdb::DB::Open(options, kDbPath, &db); + EXPECT_TRUE(s.ok()); + + std::string key = "table-property-test"; + s = db->Put(rocksdb::WriteOptions(), key, key); + EXPECT_TRUE(s.ok()); + std::string res; + s = db->Get(rocksdb::ReadOptions(), key, &res); + EXPECT_TRUE(s.ok()); + EXPECT_EQ(key, res); + collector.Update(233333, db->GetLatestSequenceNumber()); + db->Flush(rocksdb::FlushOptions()); + + rocksdb::TablePropertiesCollection properties; + s = db->GetPropertiesOfAllTables(&properties); + EXPECT_TRUE(s.ok()); + EXPECT_TRUE(properties.size() == 1); + for (auto& [name, prop] : properties) { + const auto& collector = prop->user_collected_properties; + auto it = collector.find(static_cast(LogIndexTablePropertiesCollector::kPropertyName)); + EXPECT_NE(it, collector.cend()); + EXPECT_EQ(it->second, "233333/" + std::to_string(db->GetLatestSequenceNumber())); + } + + db->Close(); + DeleteFiles(kDbPath); +} + +class LogQueue : public pstd::noncopyable { + public: + using WriteCallback = std::function; + + explicit LogQueue(WriteCallback&& cb) : write_cb_(std::move(cb)) { consumer_.SetMaxIdleThread(1); } + + void AppendLog(const pikiwidb::Binlog& log, std::promise&& promise) { + auto task = [&] { + auto idx = next_log_idx_.fetch_add(1); + auto s = write_cb_(log, idx); + promise.set_value(s); + }; + consumer_.ExecuteTask(std::move(task)); + } + + private: + WriteCallback write_cb_ = nullptr; + pstd::ThreadPool consumer_; + std::atomic next_log_idx_{1}; +}; + +class LogIndexTest : public ::testing::Test { + public: + LogIndexTest() + : log_queue_([this](const pikiwidb::Binlog& log, LogIndex log_idx) { return db_.OnBinlogWrite(log, log_idx); }) { + options_.options.create_if_missing = true; + options_.db_instance_num = 1; + options_.raft_timeout_s = 10000; + options_.append_log_function = [this](const pikiwidb::Binlog& log, std::promise&& promise) { + log_queue_.AppendLog(log, std::move(promise)); + }; + } + ~LogIndexTest() override { DeleteFiles(db_path_.c_str()); } + + void SetUp() override { + if (access(db_path_.c_str(), F_OK) == 0) { + std::filesystem::remove_all(db_path_.c_str()); + } + mkdir(db_path_.c_str(), 0755); + auto s = db_.Open(options_, db_path_); + ASSERT_TRUE(s.ok()); + } + + std::string db_path_{"./test_db/log_index_test"}; + StorageOptions options_; + Storage db_; + uint32_t test_times_ = 100; + std::string key_ = "log-index-test"; + std::string field_prefix_ = "field"; + std::string value_prefix_ = "value"; + rocksdb::WriteOptions write_options_; + rocksdb::ReadOptions read_options_; + LogQueue log_queue_; + + auto CreateRandomKey(int i, size_t length) -> std::string { + auto res = CreateRandomFieldValue(i, length); + res.append(key_); + return res; + } + static auto CreateRandomFieldValue(int i, size_t length) -> std::string { + std::mt19937 gen(i); + std::string str(length, 0); + for (int i = 0; i < length; i++) { + str[i] = chars[gen() % (sizeof(chars) / sizeof(char))]; + } + return str; + } + constexpr static char chars[] = {'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', + 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'A', 'B', 'C', 'D', 'E', 'F', + 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', + 'W', 'X', 'Y', 'Z', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9'}; +}; + +TEST_F(LogIndexTest, DoNothing) {} + +TEST_F(LogIndexTest, SimpleTest) { // NOLINT + auto& redis = db_.GetDBInstance(key_); + auto add_kvs = [&](int start, int end) { + for (int i = start; i < end; i++) { + auto key = CreateRandomKey(i, 256); + auto fv = CreateRandomFieldValue(i, 512); + int32_t res{}; + auto s = redis->HSet(key, fv, fv, &res); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(1, res); + + std::string get_res; + s = redis->HGet(key, fv, &get_res); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(fv, get_res); + } + }; + auto flushdb = [&]() { + auto s = redis->GetDB()->Flush(rocksdb::FlushOptions(), redis->GetColumnFamilyHandles()[kHashesMetaCF]); + ASSERT_TRUE(s.ok()); + s = redis->GetDB()->Flush(rocksdb::FlushOptions(), redis->GetColumnFamilyHandles()[kHashesDataCF]); + ASSERT_TRUE(s.ok()); + }; + + // one key test + { + add_kvs(0, 1); + flushdb(); + + rocksdb::TablePropertiesCollection properties; + auto s = redis->GetDB()->GetPropertiesOfAllTables(redis->GetColumnFamilyHandles()[kHashesMetaCF], &properties); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(properties.size() == 1); + auto res = LogIndexTablePropertiesCollector::GetLargestLogIndexFromTableCollection(properties); + EXPECT_TRUE(res.has_value()); + assert(res.has_value()); + EXPECT_EQ(res->GetAppliedLogIndex(), 1); + EXPECT_EQ(res->GetSequenceNumber(), 1); + + properties.clear(); + s = redis->GetDB()->GetPropertiesOfAllTables(redis->GetColumnFamilyHandles()[kHashesDataCF], &properties); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(properties.size() == 1); + res = LogIndexTablePropertiesCollector::GetLargestLogIndexFromTableCollection(properties); + EXPECT_TRUE(res.has_value()); + assert(res.has_value()); + EXPECT_EQ(res->GetAppliedLogIndex(), 1); + EXPECT_EQ(res->GetSequenceNumber(), 2); + } + + // more keys + { + add_kvs(1, 10000); + flushdb(); + + rocksdb::TablePropertiesCollection properties; + auto s = redis->GetDB()->GetPropertiesOfAllTables(redis->GetColumnFamilyHandles()[kHashesMetaCF], &properties); + ASSERT_TRUE(s.ok()); + auto res = LogIndexTablePropertiesCollector::GetLargestLogIndexFromTableCollection(properties); + EXPECT_TRUE(res.has_value()); + assert(res.has_value()); + EXPECT_EQ(res->GetAppliedLogIndex(), 10000); + EXPECT_EQ(res->GetSequenceNumber(), 19999); + + properties.clear(); + s = redis->GetDB()->GetPropertiesOfAllTables(redis->GetColumnFamilyHandles()[kHashesDataCF], &properties); + ASSERT_TRUE(s.ok()); + res = LogIndexTablePropertiesCollector::GetLargestLogIndexFromTableCollection(properties); + EXPECT_TRUE(res.has_value()); + assert(res.has_value()); + EXPECT_EQ(res->GetAppliedLogIndex(), 10000); + EXPECT_EQ(res->GetSequenceNumber(), 20000); + } + + // more flush + { + for (int i = 1; i < 20; i++) { + fmt::println("==================i={} start==========================", i); + auto start = i * 10000; + auto end = start + 10000; + + add_kvs(start, end); + flushdb(); + // sleep(1); + + { + rocksdb::TablePropertiesCollection properties; + auto s = redis->GetDB()->GetPropertiesOfAllTables(redis->GetColumnFamilyHandles()[kHashesMetaCF], &properties); + s = redis->GetDB()->GetPropertiesOfAllTables(redis->GetColumnFamilyHandles()[kHashesDataCF], &properties); + std::vector metas; + redis->GetDB()->GetLiveFilesMetaData(&metas); + for (const auto& meta : metas) { + auto file = meta.directory + meta.name; + if (!properties.contains(file)) { + fmt::println("{}: L{}, {}, not contains", file, meta.level, meta.column_family_name); + continue; + } + auto res = LogIndexTablePropertiesCollector::ReadStatsFromTableProps(properties.at(file)); + assert(res.has_value()); + fmt::println("{}: L{}, {}, logidx={}", file, meta.level, meta.column_family_name, res->GetAppliedLogIndex()); + } + } + + rocksdb::TablePropertiesCollection properties; + auto s = redis->GetDB()->GetPropertiesOfAllTables(redis->GetColumnFamilyHandles()[kHashesMetaCF], &properties); + ASSERT_TRUE(s.ok()); + auto res = LogIndexTablePropertiesCollector::GetLargestLogIndexFromTableCollection(properties); + EXPECT_TRUE(res.has_value()); + assert(res.has_value()); + EXPECT_EQ(res->GetAppliedLogIndex(), end); + EXPECT_EQ(res->GetSequenceNumber(), end * 2 - 1); + + properties.clear(); + s = redis->GetDB()->GetPropertiesOfAllTables(redis->GetColumnFamilyHandles()[kHashesDataCF], &properties); + ASSERT_TRUE(s.ok()); + res = LogIndexTablePropertiesCollector::GetLargestLogIndexFromTableCollection(properties); + EXPECT_TRUE(res.has_value()); + assert(res.has_value()); + EXPECT_EQ(res->GetAppliedLogIndex(), end); + EXPECT_EQ(res->GetSequenceNumber(), end * 2); + } + } +} From 74c584bf78f924772efbd90ccf6b01b30f9f1596 Mon Sep 17 00:00:00 2001 From: Changyuan Ning <77976092+longfar-ncy@users.noreply.github.com> Date: Mon, 15 Apr 2024 16:56:52 +0800 Subject: [PATCH 17/33] ci: import braft actions (#274) * ci: add a actions for import-braft branch temporarily --- .github/workflows/import_braft.yml | 58 ++++++++++++++++++++++++++++++ cmake/braft.cmake | 4 +-- src/cmd_admin.cc | 16 ++++----- src/cmd_admin.h | 12 +++---- src/cmd_raft.h | 2 +- src/cmd_table_manager.cc | 7 ++-- src/pikiwidb.cc | 2 +- src/storage/src/log_index.h | 1 + 8 files changed, 80 insertions(+), 22 deletions(-) create mode 100644 .github/workflows/import_braft.yml diff --git a/.github/workflows/import_braft.yml b/.github/workflows/import_braft.yml new file mode 100644 index 000000000..c0c01f3fe --- /dev/null +++ b/.github/workflows/import_braft.yml @@ -0,0 +1,58 @@ +name: Import BRaft Actions (Temporary) + +on: + push: + pull_request: + branches: [ "import-braft" ] + +jobs: + check_format: + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + + - name: Build + run: bash ci/build.sh + + - name: Check Format + working-directory: ${{ github.workspace }}/build + run: make check-format + + build_on_macos: + runs-on: macos-latest + needs: check_format + + steps: + - uses: actions/checkout@v4 + + - name: Build + env: + CPLUS_INCLUDE_PATH: /usr/local/opt/openssl/include + run: | + sh build.sh + + - name: Run Go E2E Tests + working-directory: ${{ github.workspace }}/build + run: | + cd ../tests + go mod tidy + go test ./pikiwidb_suite_test.go ./consistency_test.go -v + + build_on_ubuntu: + runs-on: ubuntu-latest + needs: check_format + + steps: + - uses: actions/checkout@v4 + + - name: Build + run: | + bash build.sh + + - name: Run Go E2E Tests + working-directory: ${{ github.workspace }}/build + run: | + cd ../tests + go mod tidy + go test ./pikiwidb_suite_test.go ./consistency_test.go -v diff --git a/cmake/braft.cmake b/cmake/braft.cmake index 87aff68a1..43ea4a350 100644 --- a/cmake/braft.cmake +++ b/cmake/braft.cmake @@ -16,8 +16,8 @@ ExternalProject_Add( extern_braft ${EXTERNAL_PROJECT_LOG_ARGS} DEPENDS brpc - URL "https://github.com/baidu/braft/archive/v1.1.2.tar.gz" - URL_HASH SHA256=bb3705f61874f8488e616ae38464efdec1a20610ddd6cd82468adc814488f14e + URL "https://github.com/pikiwidb/braft/archive/refs/heads/stable.zip" + URL_HASH SHA256=e73831f9768ac57d07f01ed81a11c8368e259c25315a960c29a6422f31f42fd1 PREFIX ${BRAFT_SOURCES_DIR} UPDATE_COMMAND "" CMAKE_ARGS -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} diff --git a/src/cmd_admin.cc b/src/cmd_admin.cc index adc463a94..b43575f60 100644 --- a/src/cmd_admin.cc +++ b/src/cmd_admin.cc @@ -6,9 +6,8 @@ */ #include "cmd_admin.h" -#include "store.h" -#include "braft/raft.h" -#include "praft.h" + +#include "praft/praft.h" namespace pikiwidb { @@ -80,7 +79,7 @@ void SelectCmd::DoCmd(PClient* client) { client->SetRes(CmdRes::kOK); } -InfoCmd::InfoCmd(const std::string& name, int16_t arity) +InfoCmd::InfoCmd(const std::string& name, int16_t arity) : BaseCmd(name, arity, kCmdFlagsAdmin | kCmdFlagsReadonly, kAclCategoryAdmin) {} bool InfoCmd::DoInitial(PClient* client) { return true; } @@ -127,7 +126,7 @@ void InfoCmd::DoCmd(PClient* client) { message += "raft_state:up\r\n"; } else { message += "raft_state:down\r\n"; - } + } message += "raft_role:" + std::string(braft::state2str(node_status.state)) + "\r\n"; // message += "raft_is_voting:" + node_status.is_voting + "\r\n"; message += "raft_leader_id:" + node_status.leader_id.to_string() + "\r\n"; @@ -141,9 +140,10 @@ void InfoCmd::DoCmd(PClient* client) { if (!status.ok()) { return client->SetRes(CmdRes::kErrOther, status.error_str()); } - + for (int i = 0; i < peers.size(); i++) { - message += "raft_node" + std::to_string(i) + ":addr=" + butil::ip2str(peers[i].addr.ip).c_str() + ",port=" + std::to_string(peers[i].addr.port) + "\r\n"; + message += "raft_node" + std::to_string(i) + ":addr=" + butil::ip2str(peers[i].addr.ip).c_str() + + ",port=" + std::to_string(peers[i].addr.port) + "\r\n"; } } @@ -153,4 +153,4 @@ void InfoCmd::DoCmd(PClient* client) { } } -} // namespace pikiwidb \ No newline at end of file +} // namespace pikiwidb diff --git a/src/cmd_admin.h b/src/cmd_admin.h index 476660e01..8ef4d6e58 100644 --- a/src/cmd_admin.h +++ b/src/cmd_admin.h @@ -85,14 +85,14 @@ class SelectCmd : public BaseCmd { }; class InfoCmd : public BaseCmd { - public: - InfoCmd(const std::string& name, int16_t arity); + public: + InfoCmd(const std::string& name, int16_t arity); - protected: - bool DoInitial(PClient* client) override; + protected: + bool DoInitial(PClient* client) override; - private: - void DoCmd(PClient* client) override; + private: + void DoCmd(PClient* client) override; }; } // namespace pikiwidb diff --git a/src/cmd_raft.h b/src/cmd_raft.h index a5e8f924d..534c90576 100644 --- a/src/cmd_raft.h +++ b/src/cmd_raft.h @@ -25,7 +25,7 @@ namespace pikiwidb { * : * : * - * RAFT.NODE REMOVE [id] + * RAFT.NODE REMOVE [id] * Remove an existing node from the cluster. * Reply: * -NOCLUSTER || diff --git a/src/cmd_table_manager.cc b/src/cmd_table_manager.cc index 2ae39a0a2..edc375596 100644 --- a/src/cmd_table_manager.cc +++ b/src/cmd_table_manager.cc @@ -5,16 +5,15 @@ * of patent rights can be found in the PATENTS file in the same directory. */ -#include +#include "cmd_table_manager.h" #include "cmd_admin.h" #include "cmd_hash.h" #include "cmd_keys.h" #include "cmd_kv.h" #include "cmd_list.h" -#include "cmd_set.h" #include "cmd_raft.h" -#include "cmd_table_manager.h" +#include "cmd_set.h" #include "cmd_zset.h" namespace pikiwidb { @@ -46,7 +45,7 @@ void CmdTableManager::InitCmdTable() { // info ADD_COMMAND(Info, -1); - + // raft ADD_COMMAND(RaftCluster, -1); ADD_COMMAND(RaftNode, -2); diff --git a/src/pikiwidb.cc b/src/pikiwidb.cc index be89bf02a..0b6558182 100644 --- a/src/pikiwidb.cc +++ b/src/pikiwidb.cc @@ -15,8 +15,8 @@ #include #include "praft/praft.h" -#include "pstd/pstd_util.h" #include "pstd/log.h" +#include "pstd/pstd_util.h" #include "client.h" #include "config.h" diff --git a/src/storage/src/log_index.h b/src/storage/src/log_index.h index 44d486338..a2bfc9d40 100644 --- a/src/storage/src/log_index.h +++ b/src/storage/src/log_index.h @@ -7,6 +7,7 @@ #pragma once +#include #include #include #include From 273a2d9d48e73767b7f4e55d39e45839dfaa49be Mon Sep 17 00:00:00 2001 From: panlei-coder <62509266+panlei-coder@users.noreply.github.com> Date: Fri, 19 Apr 2024 10:35:56 +0800 Subject: [PATCH 18/33] feat: Support raft.node remove cmd (#221) * fix: Modify the string matching bug --------- Co-authored-by: longfar --- src/client.cc | 38 ++-- src/client.h | 3 +- src/cmd_admin.cc | 106 ++++++----- src/cmd_admin.h | 3 + src/cmd_raft.cc | 132 +++++++++----- src/cmd_raft.h | 4 +- src/pikiwidb.cc | 1 + src/praft/praft.cc | 374 ++++++++++++++++++++++++++++++-------- src/praft/praft.h | 90 +++++---- src/praft/praft_service.h | 9 +- src/replication.cc | 21 +-- src/replication.h | 6 + tests/consistency_test.go | 32 +++- 13 files changed, 580 insertions(+), 239 deletions(-) diff --git a/src/client.cc b/src/client.cc index 3cf827975..5e7530a4b 100644 --- a/src/client.cc +++ b/src/client.cc @@ -198,7 +198,6 @@ static int ProcessMaster(const char* start, const char* end) { // discard all requests before sync; // or continue serve with old data? TODO return static_cast(end - start); - case kPReplStateWaitAuth: if (end - start >= 5) { if (strncasecmp(start, "+OK\r\n", 5) == 0) { @@ -268,20 +267,20 @@ int PClient::handlePacket(const char* start, int bytes) { const char* ptr = start; if (isPeerMaster()) { - // check slave state - auto recved = ProcessMaster(start, end); - if (recved != -1) { - return recved; - } - } - - if (isJoinCmdTarget()) { - // Proccees the packet at one turn. - auto [len, is_disconnect] = PRAFT.ProcessClusterJoinCmdResponse(this, start, bytes); - if (is_disconnect) { - conn->ActiveClose(); + if (isClusterCmdTarget()) { + // Proccees the packet at one turn. + int len = PRAFT.ProcessClusterCmdResponse(this, start, bytes); // @todo + if (len > 0) { + return len; + } + } else { + // Proccees the packet at one turn. + // check slave state + auto recved = ProcessMaster(start, end); + if (recved != -1) { + return recved; + } } - return len; } auto parseRet = parser_.ParseRequest(ptr, end); @@ -458,9 +457,10 @@ void PClient::OnConnect() { if (g_config.masterauth.empty()) { SetAuth(); } - } else if (isJoinCmdTarget()) { - SetName("ClusterJoinCmdConnection"); - PRAFT.SendNodeInfoRequest(this); + + if (isClusterCmdTarget()) { + PRAFT.SendNodeRequest(this); + } } else { if (g_config.password.empty()) { SetAuth(); @@ -533,8 +533,8 @@ bool PClient::isPeerMaster() const { return repl_addr.GetIP() == PeerIP() && repl_addr.GetPort() == PeerPort(); } -bool PClient::isJoinCmdTarget() const { - return PRAFT.GetJoinCtx().GetPeerIp() == PeerIP() && PRAFT.GetJoinCtx().GetPort() == PeerPort(); +bool PClient::isClusterCmdTarget() const { + return PRAFT.GetClusterCmdCtx().GetPeerIp() == PeerIP() && PRAFT.GetClusterCmdCtx().GetPort() == PeerPort(); } int PClient::uniqueID() const { diff --git a/src/client.h b/src/client.h index dc95d8c27..a890697af 100644 --- a/src/client.h +++ b/src/client.h @@ -196,6 +196,7 @@ class PClient : public std::enable_shared_from_this, public CmdRes { void SetAuth() { auth_ = true; } bool GetAuth() const { return auth_; } void RewriteCmd(std::vector& params) { parser_.SetParams(params); } + void Reexecutecommand() { this->executeCommand(); } // All parameters of this command (including the command itself) // e.g:["set","key","value"] @@ -210,7 +211,7 @@ class PClient : public std::enable_shared_from_this, public CmdRes { bool isPeerMaster() const; int uniqueID() const; - bool isJoinCmdTarget() const; + bool isClusterCmdTarget() const; // TcpConnection's life is undetermined, so use weak ptr for safety. std::weak_ptr tcp_connection_; diff --git a/src/cmd_admin.cc b/src/cmd_admin.cc index b43575f60..1057bd3fc 100644 --- a/src/cmd_admin.cc +++ b/src/cmd_admin.cc @@ -7,6 +7,9 @@ #include "cmd_admin.h" +#include "braft/raft.h" +#include "rocksdb/version.h" + #include "praft/praft.h" namespace pikiwidb { @@ -84,6 +87,22 @@ InfoCmd::InfoCmd(const std::string& name, int16_t arity) bool InfoCmd::DoInitial(PClient* client) { return true; } +// @todo The info raft command is only supported for the time being +void InfoCmd::DoCmd(PClient* client) { + if (client->argv_.size() <= 1) { + return client->SetRes(CmdRes::kWrongNum, client->CmdName()); + } + + auto cmd = client->argv_[1]; + if (!strcasecmp(cmd.c_str(), "RAFT")) { + InfoRaft(client); + } else if (!strcasecmp(cmd.c_str(), "data")) { + InfoData(client); + } else { + client->SetRes(CmdRes::kErrOther, "the cmd is not supported"); + } +} + /* * INFO raft * Querying Node Information. @@ -98,59 +117,60 @@ bool InfoCmd::DoInitial(PClient* client) { return true; } raft_num_voting_nodes:2 raft_node1:id=1733428433,state=connected,voting=yes,addr=localhost,port=5001,last_conn_secs=5,conn_errors=0,conn_oks=1 */ -// @todo The info raft command is only supported for the time being -void InfoCmd::DoCmd(PClient* client) { - if (client->argv_.size() <= 1) { +void InfoCmd::InfoRaft(PClient* client) { + if (client->argv_.size() != 2) { return client->SetRes(CmdRes::kWrongNum, client->CmdName()); } - auto cmd = client->argv_[1]; - if (!strcasecmp(cmd.c_str(), "RAFT")) { - if (client->argv_.size() != 2) { - return client->SetRes(CmdRes::kWrongNum, client->CmdName()); - } + if (!PRAFT.IsInitialized()) { + return client->SetRes(CmdRes::kErrOther, "Don't already cluster member"); + } - if (!PRAFT.IsInitialized()) { - return client->SetRes(CmdRes::kErrOther, "don't already cluster member"); - } + auto node_status = PRAFT.GetNodeStatus(); + if (node_status.state == braft::State::STATE_END) { + return client->SetRes(CmdRes::kErrOther, "Node is not initialized"); + } - auto node_status = PRAFT.GetNodeStatus(); - if (node_status.state == braft::State::STATE_END) { - return client->SetRes(CmdRes::kErrOther, "Node is not initialized"); + std::string message; + message += "raft_group_id:" + PRAFT.GetGroupID() + "\r\n"; + message += "raft_node_id:" + PRAFT.GetNodeID() + "\r\n"; + message += "raft_peer_id:" + PRAFT.GetPeerID() + "\r\n"; + if (braft::is_active_state(node_status.state)) { + message += "raft_state:up\r\n"; + } else { + message += "raft_state:down\r\n"; + } + message += "raft_role:" + std::string(braft::state2str(node_status.state)) + "\r\n"; + message += "raft_leader_id:" + node_status.leader_id.to_string() + "\r\n"; + message += "raft_current_term:" + std::to_string(node_status.term) + "\r\n"; + + if (PRAFT.IsLeader()) { + std::vector peers; + auto status = PRAFT.GetListPeers(&peers); + if (!status.ok()) { + return client->SetRes(CmdRes::kErrOther, status.error_str()); } - std::string message(""); - message += "raft_group_id:" + PRAFT.GetGroupId() + "\r\n"; - message += "raft_node_id:" + PRAFT.GetNodeId() + "\r\n"; - if (braft::is_active_state(node_status.state)) { - message += "raft_state:up\r\n"; - } else { - message += "raft_state:down\r\n"; - } - message += "raft_role:" + std::string(braft::state2str(node_status.state)) + "\r\n"; - // message += "raft_is_voting:" + node_status.is_voting + "\r\n"; - message += "raft_leader_id:" + node_status.leader_id.to_string() + "\r\n"; - message += "raft_current_term:" + std::to_string(node_status.term) + "\r\n"; - // message += "raft_num_nodes:" + std::to_string(node_status.num_nodes) + "\r\n"; - // message += "raft_num_voting_nodes:" + std::to_string(node_status.num_voting_nodes) + "\r\n"; - - if (PRAFT.IsLeader()) { - std::vector peers; - auto status = PRAFT.GetListPeers(&peers); - if (!status.ok()) { - return client->SetRes(CmdRes::kErrOther, status.error_str()); - } - - for (int i = 0; i < peers.size(); i++) { - message += "raft_node" + std::to_string(i) + ":addr=" + butil::ip2str(peers[i].addr.ip).c_str() + - ",port=" + std::to_string(peers[i].addr.port) + "\r\n"; - } + for (int i = 0; i < peers.size(); i++) { + message += "raft_node" + std::to_string(i) + ":addr=" + butil::ip2str(peers[i].addr.ip).c_str() + + ",port=" + std::to_string(peers[i].addr.port) + "\r\n"; } + } - client->AppendString(message); - } else { - client->SetRes(CmdRes::kErrOther, "ERR the cmd is not supported"); + client->AppendString(message); +} + +void InfoCmd::InfoData(PClient* client) { + if (client->argv_.size() != 2) { + return client->SetRes(CmdRes::kWrongNum, client->CmdName()); } + + std::string message; + message += DATABASES_NUM + std::string(":") + std::to_string(pikiwidb::g_config.databases) + "\r\n"; + message += ROCKSDB_NUM + std::string(":") + std::to_string(pikiwidb::g_config.db_instance_num) + "\r\n"; + message += ROCKSDB_VERSION + std::string(":") + ROCKSDB_NAMESPACE::GetRocksVersionAsString() + "\r\n"; + + client->AppendString(message); } } // namespace pikiwidb diff --git a/src/cmd_admin.h b/src/cmd_admin.h index 8ef4d6e58..270ccbb9a 100644 --- a/src/cmd_admin.h +++ b/src/cmd_admin.h @@ -93,6 +93,9 @@ class InfoCmd : public BaseCmd { private: void DoCmd(PClient* client) override; + + void InfoRaft(PClient* client); + void InfoData(PClient* client); }; } // namespace pikiwidb diff --git a/src/cmd_raft.cc b/src/cmd_raft.cc index 27f349fa9..ebbef035e 100644 --- a/src/cmd_raft.cc +++ b/src/cmd_raft.cc @@ -5,45 +5,56 @@ * of patent rights can be found in the PATENTS file in the same directory. */ +#include "cmd_raft.h" + #include #include #include +#include "praft/praft.h" +#include "pstd/log.h" +#include "pstd/pstd_string.h" + #include "client.h" -#include "cmd_raft.h" -#include "event_loop.h" -#include "log.h" +#include "config.h" #include "pikiwidb.h" -#include "praft.h" -#include "pstd_string.h" +#include "replication.h" namespace pikiwidb { RaftNodeCmd::RaftNodeCmd(const std::string& name, int16_t arity) : BaseCmd(name, arity, kCmdFlagsRaft, kAclCategoryRaft) {} -bool RaftNodeCmd::DoInitial(PClient* client) { return true; } - -void RaftNodeCmd::DoCmd(PClient* client) { - // Check whether it is a leader. If it is not a leader, return the leader information - if (!PRAFT.IsLeader()) { - return client->SetRes(CmdRes::kWrongLeader, PRAFT.GetLeaderId()); +bool RaftNodeCmd::DoInitial(PClient* client) { + auto cmd = client->argv_[1]; + pstd::StringToUpper(cmd); + if (cmd != kAddCmd && cmd != kRemoveCmd) { + client->SetRes(CmdRes::kErrOther, "RAFT.NODE supports ADD / REMOVE only"); + return false; } + return true; +} +void RaftNodeCmd::DoCmd(PClient* client) { auto cmd = client->argv_[1]; pstd::StringToUpper(cmd); - if (!strcasecmp(cmd.c_str(), "ADD")) { + if (cmd == kAddCmd) { DoCmdAdd(client); - } else if (!strcasecmp(cmd.c_str(), "REMOVE")) { - DoCmdRemove(client); } else { - client->SetRes(CmdRes::kErrOther, "RAFT.NODE supports ADD / REMOVE only"); + DoCmdRemove(client); } } void RaftNodeCmd::DoCmdAdd(PClient* client) { + // Check whether it is a leader. If it is not a leader, return the leader information + if (!PRAFT.IsLeader()) { + client->SetRes(CmdRes::kWrongLeader, PRAFT.GetLeaderID()); + return; + } + if (client->argv_.size() != 4) { - return client->SetRes(CmdRes::kWrongNum, client->CmdName()); + client->SetRes(CmdRes::kWrongNum, client->CmdName()); + return; } // RedisRaft has nodeid, but in Braft, NodeId is IP:Port. @@ -57,11 +68,45 @@ void RaftNodeCmd::DoCmdAdd(PClient* client) { } void RaftNodeCmd::DoCmdRemove(PClient* client) { + // If the node has been initialized, it needs to close the previous initialization and rejoin the other group + if (!PRAFT.IsInitialized()) { + client->SetRes(CmdRes::kErrOther, "Don't already cluster member"); + return; + } + if (client->argv_.size() != 3) { - return client->SetRes(CmdRes::kWrongNum, client->CmdName()); + client->SetRes(CmdRes::kWrongNum, client->CmdName()); + return; + } + + // Check whether it is a leader. If it is not a leader, send remove request to leader + if (!PRAFT.IsLeader()) { + // Get the leader information + braft::PeerId leader_peer_id(PRAFT.GetLeaderID()); + // @todo There will be an unreasonable address, need to consider how to deal with it + if (leader_peer_id.is_empty()) { + client->SetRes(CmdRes::kErrOther, + "The leader address of the cluster is incorrect, try again or delete the node from another node"); + return; + } + + // Connect target + std::string peer_ip = butil::ip2str(leader_peer_id.addr.ip).c_str(); + auto port = leader_peer_id.addr.port - pikiwidb::g_config.raft_port_offset; + auto peer_id = client->argv_[2]; + auto ret = + PRAFT.GetClusterCmdCtx().Set(ClusterCmdType::kRemove, client, std::move(peer_ip), port, std::move(peer_id)); + if (!ret) { // other clients have removed + return client->SetRes(CmdRes::kErrOther, "Other clients have removed"); + } + PRAFT.GetClusterCmdCtx().ConnectTargetNode(); + INFO("Sent remove request to leader successfully"); + + // Not reply any message here, we will reply after the connection is established. + client->Clear(); + return; } - // (KKorpse)TODO: Redirect to leader if not leader. auto s = PRAFT.RemovePeer(client->argv_[2]); if (s.ok()) { client->SetRes(CmdRes::kOK); @@ -73,26 +118,32 @@ void RaftNodeCmd::DoCmdRemove(PClient* client) { RaftClusterCmd::RaftClusterCmd(const std::string& name, int16_t arity) : BaseCmd(name, arity, kCmdFlagsRaft, kAclCategoryRaft) {} -bool RaftClusterCmd::DoInitial(PClient* client) { return true; } +bool RaftClusterCmd::DoInitial(PClient* client) { + auto cmd = client->argv_[1]; + pstd::StringToUpper(cmd); + if (cmd != kInitCmd && cmd != kJoinCmd) { + client->SetRes(CmdRes::kErrOther, "RAFT.CLUSTER supports INIT/JOIN only"); + return false; + } + return true; +} void RaftClusterCmd::DoCmd(PClient* client) { // parse arguments if (client->argv_.size() < 2) { return client->SetRes(CmdRes::kWrongNum, client->CmdName()); } - auto cmd = client->argv_[1]; if (PRAFT.IsInitialized()) { return client->SetRes(CmdRes::kErrOther, "Already cluster member"); } + auto cmd = client->argv_[1]; pstd::StringToUpper(cmd); if (cmd == kInitCmd) { DoCmdInit(client); - } else if (cmd == kJoinCmd) { - DoCmdJoin(client); } else { - client->SetRes(CmdRes::kErrOther, "RAFT.CLUSTER supports INIT/JOIN only"); + DoCmdJoin(client); } } @@ -104,12 +155,12 @@ void RaftClusterCmd::DoCmdInit(PClient* client) { std::string cluster_id; if (client->argv_.size() == 3) { cluster_id = client->argv_[2]; - if (cluster_id.size() != RAFT_DBID_LEN) { + if (cluster_id.size() != RAFT_GROUPID_LEN) { return client->SetRes(CmdRes::kInvalidParameter, - "Cluster id must be " + std::to_string(RAFT_DBID_LEN) + " characters"); + "Cluster id must be " + std::to_string(RAFT_GROUPID_LEN) + " characters"); } } else { - cluster_id = pstd::RandomHexChars(RAFT_DBID_LEN); + cluster_id = pstd::RandomHexChars(RAFT_GROUPID_LEN); } auto s = PRAFT.Init(cluster_id, false); if (!s.ok()) { @@ -130,6 +181,13 @@ static inline std::optional> GetIpAndPortFromEnd } void RaftClusterCmd::DoCmdJoin(PClient* client) { + // If the node has been initialized, it needs to close the previous initialization and rejoin the other group + if (PRAFT.IsInitialized()) { + return client->SetRes(CmdRes::kErrOther, + "A node that has been added to a cluster must be removed \ + from the old cluster before it can be added to the new cluster"); + } + if (client->argv_.size() < 3) { return client->SetRes(CmdRes::kWrongNum, client->CmdName()); } @@ -144,31 +202,21 @@ void RaftClusterCmd::DoCmdJoin(PClient* client) { return client->SetRes(CmdRes::kErrOther, fmt::format("Invalid ip::port: {}", addr)); } - auto on_new_conn = [](TcpConnection* obj) { - if (g_pikiwidb) { - g_pikiwidb->OnNewConnection(obj); - } - }; - auto on_fail = [&](EventLoop* loop, const char* peer_ip, int port) { - PRAFT.OnJoinCmdConnectionFailed(loop, peer_ip, port); - }; - - auto loop = EventLoop::Self(); auto ip_port = GetIpAndPortFromEndPoint(addr); if (!ip_port.has_value()) { return client->SetRes(CmdRes::kErrOther, fmt::format("Invalid ip::port: {}", addr)); } auto& [peer_ip, port] = *ip_port; - // FIXME: The client here is not smart pointer, may cause undefined behavior. - // should use shared_ptr in DoCmd() rather than raw pointer. - auto ret = PRAFT.GetJoinCtx().Set(client, peer_ip, port); + + // Connect target + auto ret = PRAFT.GetClusterCmdCtx().Set(ClusterCmdType::kJoin, client, std::move(peer_ip), port); if (!ret) { // other clients have joined return client->SetRes(CmdRes::kErrOther, "Other clients have joined"); } - loop->Connect(peer_ip.c_str(), port, on_new_conn, on_fail); + PRAFT.GetClusterCmdCtx().ConnectTargetNode(); INFO("Sent join request to leader successfully"); + // Not reply any message here, we will reply after the connection is established. client->Clear(); } - -} // namespace pikiwidb \ No newline at end of file +} // namespace pikiwidb diff --git a/src/cmd_raft.h b/src/cmd_raft.h index 534c90576..b9df47e2c 100644 --- a/src/cmd_raft.h +++ b/src/cmd_raft.h @@ -23,7 +23,7 @@ namespace pikiwidb { * -MOVED : || * *2 * : - * : + * : * * RAFT.NODE REMOVE [id] * Remove an existing node from the cluster. @@ -54,7 +54,7 @@ class RaftNodeCmd : public BaseCmd { * Initializes a new Raft cluster. * is an optional 32 character string, if set, cluster will use it for the id * Reply: - * +OK [dbid] + * +OK [group_id] * * RAFT.CLUSTER JOIN [addr:port] * Join an existing cluster. diff --git a/src/pikiwidb.cc b/src/pikiwidb.cc index 0b6558182..76516df5c 100644 --- a/src/pikiwidb.cc +++ b/src/pikiwidb.cc @@ -256,6 +256,7 @@ void PikiwiDB::Run() { void PikiwiDB::Stop() { pikiwidb::PRAFT.ShutDown(); pikiwidb::PRAFT.Join(); + pikiwidb::PRAFT.Clear(); slave_threads_.Exit(); worker_threads_.Exit(); } diff --git a/src/praft/praft.cc b/src/praft/praft.cc index fdaeda8f9..4eb1e385e 100644 --- a/src/praft/praft.cc +++ b/src/praft/praft.cc @@ -1,13 +1,10 @@ /* - * Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. + * Copyright (c) 2024-present, Qihoo, Inc. All rights reserved. * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. An additional grant * of patent rights can be found in the PATENTS file in the same directory. */ -// -// praft.cc - #include "praft.h" #include @@ -19,10 +16,10 @@ #include "pstd/pstd_string.h" #include "binlog.pb.h" -#include "client.h" #include "config.h" #include "pikiwidb.h" #include "praft_service.h" +#include "replication.h" #include "store.h" #define ERROR_LOG_AND_STATUS(msg) \ @@ -33,6 +30,52 @@ namespace pikiwidb { +bool ClusterCmdContext::Set(ClusterCmdType cluster_cmd_type, PClient* client, std::string&& peer_ip, int port, + std::string&& peer_id) { + std::unique_lock lck(mtx_); + if (client_ != nullptr) { + return false; + } + assert(client); + cluster_cmd_type_ = cluster_cmd_type; + client_ = client; + peer_ip_ = std::move(peer_ip); + port_ = port; + peer_id_ = std::move(peer_id); + return true; +} + +void ClusterCmdContext::Clear() { + std::unique_lock lck(mtx_); + cluster_cmd_type_ = ClusterCmdType::kNone; + client_ = nullptr; + peer_ip_.clear(); + port_ = 0; + peer_id_.clear(); +} + +bool ClusterCmdContext::IsEmpty() { + std::unique_lock lck(mtx_); + return client_ == nullptr; +} + +void ClusterCmdContext::ConnectTargetNode() { + auto ip = PREPL.GetMasterAddr().GetIP(); + auto port = PREPL.GetMasterAddr().GetPort(); + if (ip == peer_ip_ && port == port_ && PREPL.GetMasterState() == kPReplStateConnected) { + PRAFT.SendNodeRequest(PREPL.GetMaster()); + return; + } + + // reconnect + auto fail_cb = [&](EventLoop*, const char* peer_ip, int port) { + PRAFT.OnClusterCmdConnectionFailed(EventLoop::Self(), peer_ip, port); + }; + PREPL.SetFailCallback(fail_cb); + PREPL.SetMasterState(kPReplStateNone); + PREPL.SetMasterAddr(peer_ip_.c_str(), port_); +} + PRaft& PRaft::Instance() { static PRaft store; return store; @@ -48,6 +91,7 @@ butil::Status PRaft::Init(std::string& group_id, bool initial_conf_is_null) { // Add your service into RPC server DummyServiceImpl service(&PRAFT); if (server_->AddService(&service, brpc::SERVER_DOESNT_OWN_SERVICE) != 0) { + server_.reset(); return ERROR_LOG_AND_STATUS("Failed to add service"); } // raft can share the same RPC server. Notice the second parameter, because @@ -55,6 +99,7 @@ butil::Status PRaft::Init(std::string& group_id, bool initial_conf_is_null) { // address of this server is impossible to get before the server starts. You // have to specify the address of the server. if (braft::add_service(server_.get(), port) != 0) { + server_.reset(); return ERROR_LOG_AND_STATUS("Failed to add raft service"); } @@ -64,18 +109,20 @@ butil::Status PRaft::Init(std::string& group_id, bool initial_conf_is_null) { // Notice the default options of server is used here. Check out details from // the doc of brpc if you would like change some options; if (server_->Start(port, nullptr) != 0) { + server_.reset(); return ERROR_LOG_AND_STATUS("Failed to start server"); } // It's ok to start PRaft; - assert(group_id.size() == RAFT_DBID_LEN); - this->dbid_ = group_id; + assert(group_id.size() == RAFT_GROUPID_LEN); + this->group_id_ = group_id; // FIXME: g_config.ip is default to 127.0.0.0, which may not work in cluster. raw_addr_ = g_config.ip + ":" + std::to_string(port); butil::ip_t ip; auto ret = butil::str2ip(g_config.ip.c_str(), &ip); if (ret != 0) { + server_.reset(); return ERROR_LOG_AND_STATUS("Failed to convert str_ip to butil::ip_t"); } butil::EndPoint addr(ip, port); @@ -95,6 +142,7 @@ butil::Status PRaft::Init(std::string& group_id, bool initial_conf_is_null) { initial_conf = raw_addr_ + ":0,"; } if (node_options_.initial_conf.parse_from(initial_conf) != 0) { + server_.reset(); return ERROR_LOG_AND_STATUS("Failed to parse configuration"); } @@ -107,8 +155,9 @@ butil::Status PRaft::Init(std::string& group_id, bool initial_conf_is_null) { node_options_.raft_meta_uri = prefix + "/raft_meta"; node_options_.snapshot_uri = prefix + "/snapshot"; // node_options_.disable_cli = FLAGS_disable_cli; - node_ = std::make_unique("pikiwidb", braft::PeerId(addr)); // group_id + node_ = std::make_unique(group_id, braft::PeerId(addr)); if (node_->init(node_options_) != 0) { + server_.reset(); node_.reset(); return ERROR_LOG_AND_STATUS("Failed to init raft node"); } @@ -124,7 +173,7 @@ bool PRaft::IsLeader() const { return node_->is_leader(); } -std::string PRaft::GetLeaderId() const { +std::string PRaft::GetLeaderID() const { if (!node_) { ERROR("Node is not initialized"); return "Failed to get leader id"; @@ -143,7 +192,7 @@ std::string PRaft::GetLeaderAddress() const { return addr.c_str(); } -std::string PRaft::GetNodeId() const { +std::string PRaft::GetNodeID() const { if (!node_) { ERROR("Node is not initialized"); return "Failed to get node id"; @@ -151,12 +200,24 @@ std::string PRaft::GetNodeId() const { return node_->node_id().to_string(); } -std::string PRaft::GetGroupId() const { +std::string PRaft::GetPeerID() const { + if (!node_) { + ERROR("Node is not initialized"); + return "Failed to get node id"; + } + + auto node_id = node_->node_id().to_string(); + auto pos = node_id.find(':'); + auto peer_id = node_id.substr(pos + 1, node_id.size()); + return peer_id; +} + +std::string PRaft::GetGroupID() const { if (!node_) { ERROR("Node is not initialized"); return "Failed to get cluster id"; } - return dbid_; + return group_id_; } braft::NodeStatus PRaft::GetNodeStatus() const { @@ -177,14 +238,30 @@ butil::Status PRaft::GetListPeers(std::vector* peers) { return node_->list_peers(peers); } +void PRaft::SendNodeRequest(PClient* client) { + assert(client); + + auto cluster_cmd_type = cluster_cmd_ctx_.GetClusterCmdType(); + switch (cluster_cmd_type) { + case ClusterCmdType::kJoin: + SendNodeInfoRequest(client, "DATA"); + break; + case ClusterCmdType::kRemove: + SendNodeRemoveRequest(client); + break; + default: + client->SetRes(CmdRes::kErrOther, "the command sent to the leader is incorrect"); + break; + } +} + // Gets the cluster id, which is used to initialize node -void PRaft::SendNodeInfoRequest(PClient* client) { +void PRaft::SendNodeInfoRequest(PClient* client, const std::string& info_type) { assert(client); - UnboundedBuffer req; - req.PushData("INFO raft", 9); - req.PushData("\r\n", 2); - client->SendPacket(req); + const std::string cmd_str = "INFO " + info_type + "\r\n"; + client->SendPacket(cmd_str); + client->Clear(); } void PRaft::SendNodeAddRequest(PClient* client) { @@ -201,80 +278,205 @@ void PRaft::SendNodeAddRequest(PClient* client) { req.PushData(raw_addr.data(), raw_addr.size()); req.PushData("\r\n", 2); client->SendPacket(req); + client->Clear(); +} + +void PRaft::SendNodeRemoveRequest(PClient* client) { + assert(client); + + UnboundedBuffer req; + req.PushData("RAFT.NODE REMOVE ", 17); + req.PushData(cluster_cmd_ctx_.GetPeerID().c_str(), cluster_cmd_ctx_.GetPeerID().size()); + req.PushData("\r\n", 2); + client->SendPacket(req); + client->Clear(); +} + +int PRaft::ProcessClusterCmdResponse(PClient* client, const char* start, int len) { + auto cluster_cmd_type = cluster_cmd_ctx_.GetClusterCmdType(); + int ret = 0; + switch (cluster_cmd_type) { + case ClusterCmdType::kJoin: + ret = PRAFT.ProcessClusterJoinCmdResponse(client, start, len); + break; + case ClusterCmdType::kRemove: + ret = PRAFT.ProcessClusterRemoveCmdResponse(client, start, len); + break; + default: + client->SetRes(CmdRes::kErrOther, "RAFT.CLUSTER response supports JOIN/REMOVE only"); + break; + } + + return ret; +} + +void PRaft::CheckRocksDBConfiguration(PClient* client, PClient* join_client, const std::string& reply) { + int databases_num = 0; + int rocksdb_num = 0; + std::string rockdb_version; + std::string line; + std::istringstream iss(reply); + + while (std::getline(iss, line)) { + std::string::size_type pos = line.find(':'); + if (pos != std::string::npos) { + std::string key = line.substr(0, pos); + std::string value = line.substr(pos + 1); + + if (key == DATABASES_NUM && pstd::String2int(value, &databases_num) == 0) { + join_client->SetRes(CmdRes::kErrOther, "Config of databases_num invalid"); + join_client->SendPacket(join_client->Message()); + join_client->Clear(); + // If the join fails, clear clusterContext and set it again by using the join command + cluster_cmd_ctx_.Clear(); + } else if (key == ROCKSDB_NUM && pstd::String2int(value, &rocksdb_num) == 0) { + join_client->SetRes(CmdRes::kErrOther, "Config of rocksdb_num invalid"); + join_client->SendPacket(join_client->Message()); + join_client->Clear(); + // If the join fails, clear clusterContext and set it again by using the join command + cluster_cmd_ctx_.Clear(); + } else if (key == ROCKSDB_VERSION) { + rockdb_version = pstd::StringTrimRight(value, "\r"); + } + } + } + + int current_databases_num = pikiwidb::g_config.databases; + int current_rocksdb_num = pikiwidb::g_config.db_instance_num; + std::string current_rocksdb_version = ROCKSDB_NAMESPACE::GetRocksVersionAsString(); + if (current_databases_num != databases_num || current_rocksdb_num != rocksdb_num || + current_rocksdb_version != rockdb_version) { + join_client->SetRes(CmdRes::kErrOther, "Config of databases_num, rocksdb_num or rocksdb_version mismatch"); + join_client->SendPacket(join_client->Message()); + join_client->Clear(); + // If the join fails, clear clusterContext and set it again by using the join command + cluster_cmd_ctx_.Clear(); + } else { + SendNodeInfoRequest(client, "RAFT"); + } +} + +void PRaft::LeaderRedirection(PClient* join_client, const std::string& reply) { + // Resolve the ip address of the leader + pstd::StringTrimLeft(reply, WRONG_LEADER); + pstd::StringTrim(reply); + braft::PeerId peerId; + peerId.parse(reply); + auto peer_ip = std::string(butil::ip2str(peerId.addr.ip).c_str()); + auto port = peerId.addr.port; + + // Reset the target of the connection + cluster_cmd_ctx_.Clear(); + auto ret = PRAFT.GetClusterCmdCtx().Set(ClusterCmdType::kJoin, join_client, std::move(peer_ip), port); + if (!ret) { // other clients have joined + join_client->SetRes(CmdRes::kErrOther, "Other clients have joined"); + join_client->SendPacket(join_client->Message()); + join_client->Clear(); + return; + } + PRAFT.GetClusterCmdCtx().ConnectTargetNode(); + + // Not reply any message here, we will reply after the connection is established. + join_client->Clear(); } -std::tuple PRaft::ProcessClusterJoinCmdResponse(PClient* client, const char* start, int len) { +void PRaft::InitializeNodeBeforeAdd(PClient* client, PClient* join_client, const std::string& reply) { + std::string prefix = RAFT_GROUP_ID; + std::string::size_type prefix_length = prefix.length(); + std::string::size_type group_id_start = reply.find(prefix); + group_id_start += prefix_length; // locate the start location of "raft_group_id" + std::string::size_type group_id_end = reply.find("\r\n", group_id_start); + if (group_id_end != std::string::npos) { + std::string raft_group_id = reply.substr(group_id_start, group_id_end - group_id_start); + // initialize the slave node + auto s = PRAFT.Init(raft_group_id, true); + if (!s.ok()) { + join_client->SetRes(CmdRes::kErrOther, s.error_str()); + join_client->SendPacket(join_client->Message()); + join_client->Clear(); + // If the join fails, clear clusterContext and set it again by using the join command + cluster_cmd_ctx_.Clear(); + return; + } + + PRAFT.SendNodeAddRequest(client); + } else { + ERROR("Joined Raft cluster fail, because of invalid raft_group_id"); + join_client->SetRes(CmdRes::kErrOther, "Invalid raft_group_id"); + join_client->SendPacket(join_client->Message()); + join_client->Clear(); + // If the join fails, clear clusterContext and set it again by using the join command + cluster_cmd_ctx_.Clear(); + } +} + +int PRaft::ProcessClusterJoinCmdResponse(PClient* client, const char* start, int len) { assert(start); - auto join_client = join_ctx_.GetClient(); + auto join_client = cluster_cmd_ctx_.GetClient(); if (!join_client) { WARN("No client when processing cluster join cmd response."); - return std::make_tuple(0, true); + return 0; } - bool is_disconnect = true; std::string reply(start, len); - if (reply.find("+OK") != std::string::npos) { - INFO("Joined Raft cluster, node id: {}, dbid: {}", PRAFT.GetNodeId(), PRAFT.dbid_); + if (reply.find(OK) != std::string::npos) { + INFO("Joined Raft cluster, node id: {}, group_id: {}", PRAFT.GetNodeID(), PRAFT.group_id_); join_client->SetRes(CmdRes::kOK); join_client->SendPacket(join_client->Message()); - is_disconnect = false; - } else if (reply.find("-ERR wrong leader") != std::string::npos) { - // Resolve the ip address of the leader - pstd::StringTrimLeft(reply, "-ERR wrong leader"); - pstd::StringTrim(reply); - braft::PeerId peerId; - peerId.parse(reply); - - // Establish a connection with the leader and send the add request - auto on_new_conn = [](TcpConnection* obj) { - if (g_pikiwidb) { - g_pikiwidb->OnNewConnection(obj); - } - }; - auto fail_cb = [&](EventLoop* loop, const char* peer_ip, int port) { - PRAFT.OnJoinCmdConnectionFailed(loop, peer_ip, port); - }; - - auto loop = EventLoop::Self(); - auto peer_ip = std::string(butil::ip2str(peerId.addr.ip).c_str()); - auto port = peerId.addr.port; - // FIXME: The client here is not smart pointer, may cause undefined behavior. - // should use shared_ptr in DoCmd() rather than raw pointer. - PRAFT.GetJoinCtx().Set(join_client, peer_ip, port); - loop->Connect(peer_ip.c_str(), port, on_new_conn, fail_cb); - - // Not reply any message here, we will reply after the connection is established. join_client->Clear(); - } else if (reply.find("raft_group_id") != std::string::npos) { - std::string prefix = "raft_group_id:"; - std::string::size_type prefix_length = prefix.length(); - std::string::size_type group_id_start = reply.find(prefix); - group_id_start += prefix_length; // 定位到raft_group_id的起始位置 - std::string::size_type group_id_end = reply.find("\r\n", group_id_start); - if (group_id_end != std::string::npos) { - std::string raft_group_id = reply.substr(group_id_start, group_id_end - group_id_start); - // initialize the slave node - auto s = PRAFT.Init(raft_group_id, true); - if (!s.ok()) { - join_client->SetRes(CmdRes::kErrOther, s.error_str()); - join_client->SendPacket(join_client->Message()); - return std::make_tuple(len, is_disconnect); - } - - PRAFT.SendNodeAddRequest(client); - is_disconnect = false; - } else { - ERROR("Joined Raft cluster fail, because of invalid raft_group_id"); - join_client->SetRes(CmdRes::kErrOther, "Invalid raft_group_id"); - join_client->SendPacket(join_client->Message()); - } + // If the join fails, clear clusterContext and set it again by using the join command + cluster_cmd_ctx_.Clear(); + } else if (reply.find(DATABASES_NUM) != std::string::npos) { + CheckRocksDBConfiguration(client, join_client, reply); + } else if (reply.find(WRONG_LEADER) != std::string::npos) { + LeaderRedirection(join_client, reply); + } else if (reply.find(RAFT_GROUP_ID) != std::string::npos) { + InitializeNodeBeforeAdd(client, join_client, reply); } else { - ERROR("Joined Raft cluster fail, str: {}", start); - join_client->SetRes(CmdRes::kErrOther, std::string(start, len)); + ERROR("Joined Raft cluster fail, str: {}", reply); + join_client->SetRes(CmdRes::kErrOther, reply); join_client->SendPacket(join_client->Message()); + join_client->Clear(); + // If the join fails, clear clusterContext and set it again by using the join command + cluster_cmd_ctx_.Clear(); + } + + return len; +} + +int PRaft::ProcessClusterRemoveCmdResponse(PClient* client, const char* start, int len) { + assert(start); + auto remove_client = cluster_cmd_ctx_.GetClient(); + if (!remove_client) { + WARN("No client when processing cluster remove cmd response."); + return 0; + } + + std::string reply(start, len); + if (reply.find(OK) != std::string::npos) { + INFO("Removed Raft cluster, node id: {}, group_id: {}", PRAFT.GetNodeID(), PRAFT.group_id_); + ShutDown(); + Join(); + Clear(); + + remove_client->SetRes(CmdRes::kOK); + remove_client->SendPacket(remove_client->Message()); + remove_client->Clear(); + } else if (reply.find(NOT_LEADER) != std::string::npos) { + auto remove_client = cluster_cmd_ctx_.GetClient(); + remove_client->Clear(); + remove_client->Reexecutecommand(); + } else { + ERROR("Removed Raft cluster fail, str: {}", reply); + remove_client->SetRes(CmdRes::kErrOther, reply); + remove_client->SendPacket(remove_client->Message()); + remove_client->Clear(); } - return std::make_tuple(len, is_disconnect); + // If the remove fails, clear clusterContext and set it again by using the join command + cluster_cmd_ctx_.Clear(); + + return len; } butil::Status PRaft::AddPeer(const std::string& peer) { @@ -312,13 +514,17 @@ butil::Status PRaft::RemovePeer(const std::string& peer) { return {0, "OK"}; } -void PRaft::OnJoinCmdConnectionFailed([[maybe_unused]] EventLoop* loop, const char* peer_ip, int port) { - auto cli = join_ctx_.GetClient(); +void PRaft::OnClusterCmdConnectionFailed([[maybe_unused]] EventLoop* loop, const char* peer_ip, int port) { + auto cli = cluster_cmd_ctx_.GetClient(); if (cli) { - cli->SetRes(CmdRes::kErrOther, "ERR failed to connect to cluster for join, please check logs " + + cli->SetRes(CmdRes::kErrOther, "Failed to connect to cluster for join or remove, please check logs " + std::string(peer_ip) + ":" + std::to_string(port)); cli->SendPacket(cli->Message()); + cli->Clear(); } + cluster_cmd_ctx_.Clear(); + + PREPL.GetMasterAddr().Clear(); } // Shut this node and server down. @@ -361,6 +567,16 @@ void PRaft::AppendLog(const Binlog& log, std::promise&& promise node_->apply(task); } +void PRaft::Clear() { + if (node_) { + node_.reset(); + } + + if (server_) { + server_.reset(); + } +} + void PRaft::on_apply(braft::Iterator& iter) { // A batch of tasks are committed, which must be processed through for (; iter.valid(); iter.next()) { diff --git a/src/praft/praft.h b/src/praft/praft.h index d1217b597..05fbded9a 100644 --- a/src/praft/praft.h +++ b/src/praft/praft.h @@ -10,59 +10,64 @@ #include #include "braft/raft.h" +#include "brpc/server.h" #include "rocksdb/status.h" +#include "client.h" + namespace pikiwidb { -#define RAFT_DBID_LEN 32 +#define RAFT_GROUPID_LEN 32 + +#define OK "+OK" +#define DATABASES_NUM "databases_num" +#define ROCKSDB_NUM "rocksdb_num" +#define ROCKSDB_VERSION "rocksdb_version" +#define WRONG_LEADER "-ERR wrong leader" +#define RAFT_GROUP_ID "raft_group_id:" +#define NOT_LEADER "Not leader" #define PRAFT PRaft::Instance() -class PClient; class EventLoop; class Binlog; -class JoinCmdContext { +enum ClusterCmdType { + kNone, + kJoin, + kRemove, +}; + +class ClusterCmdContext { friend class PRaft; public: - JoinCmdContext() = default; - ~JoinCmdContext() = default; - - bool Set(PClient* client, const std::string& peer_ip, int port) { - std::unique_lock lck(mtx_); - if (client_ != nullptr) { - return false; - } - assert(client); - client_ = client; - peer_ip_ = peer_ip; - port_ = port; - return true; - } + ClusterCmdContext() = default; + ~ClusterCmdContext() = default; - void Clear() { - std::unique_lock lck(mtx_); - client_ = nullptr; - peer_ip_.clear(); - port_ = 0; - } + bool Set(ClusterCmdType cluster_cmd_type, PClient* client, std::string&& peer_ip, int port, + std::string&& peer_id = ""); + + void Clear(); // @todo the function seems useless - bool IsEmpty() { - std::unique_lock lck(mtx_); - return client_ == nullptr; - } + bool IsEmpty(); + ClusterCmdType GetClusterCmdType() { return cluster_cmd_type_; } PClient* GetClient() { return client_; } const std::string& GetPeerIp() { return peer_ip_; } int GetPort() { return port_; } + const std::string& GetPeerID() { return peer_id_; } + + void ConnectTargetNode(); private: + ClusterCmdType cluster_cmd_type_ = ClusterCmdType::kNone; std::mutex mtx_; PClient* client_ = nullptr; std::string peer_ip_; int port_ = 0; + std::string peer_id_; }; class PRaftWriteDoneClosure : public braft::Closure { @@ -98,21 +103,32 @@ class PRaft : public braft::StateMachine { void ShutDown(); void Join(); void AppendLog(const Binlog& log, std::promise&& promise); + void Clear(); //===--------------------------------------------------------------------===// - // ClusterJoin command + // Cluster command //===--------------------------------------------------------------------===// - JoinCmdContext& GetJoinCtx() { return join_ctx_; } - void SendNodeInfoRequest(PClient* client); + ClusterCmdContext& GetClusterCmdCtx() { return cluster_cmd_ctx_; } + void SendNodeRequest(PClient* client); + void SendNodeInfoRequest(PClient* client, const std::string& info_type); void SendNodeAddRequest(PClient* client); - std::tuple ProcessClusterJoinCmdResponse(PClient* client, const char* start, int len); - void OnJoinCmdConnectionFailed(EventLoop*, const char* peer_ip, int port); + void SendNodeRemoveRequest(PClient* client); + + int ProcessClusterCmdResponse(PClient* client, const char* start, int len); + void CheckRocksDBConfiguration(PClient* client, PClient* join_client, const std::string& reply); + void LeaderRedirection(PClient* join_client, const std::string& reply); + void InitializeNodeBeforeAdd(PClient* client, PClient* join_client, const std::string& reply); + int ProcessClusterJoinCmdResponse(PClient* client, const char* start, int len); + int ProcessClusterRemoveCmdResponse(PClient* client, const char* start, int len); + + void OnClusterCmdConnectionFailed(EventLoop*, const char* peer_ip, int port); bool IsLeader() const; - std::string GetLeaderId() const; std::string GetLeaderAddress() const; - std::string GetNodeId() const; - std::string GetGroupId() const; + std::string GetLeaderID() const; + std::string GetNodeID() const; + std::string GetPeerID() const; + std::string GetGroupID() const; braft::NodeStatus GetNodeStatus() const; butil::Status GetListPeers(std::vector* peers); @@ -138,8 +154,8 @@ class PRaft : public braft::StateMachine { braft::NodeOptions node_options_; // options for raft node std::string raw_addr_; // ip:port of this node - JoinCmdContext join_ctx_; // context for cluster join command - std::string dbid_; // dbid of group, + ClusterCmdContext cluster_cmd_ctx_; // context for cluster join/remove command + std::string group_id_; // group id }; } // namespace pikiwidb diff --git a/src/praft/praft_service.h b/src/praft/praft_service.h index 8efc5f51a..e0a44d6a5 100644 --- a/src/praft/praft_service.h +++ b/src/praft/praft_service.h @@ -1,3 +1,10 @@ +/* + * Copyright (c) 2024-present, Qihoo, Inc. All rights reserved. + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. An additional grant + * of patent rights can be found in the PATENTS file in the same directory. + */ + #pragma once #include "praft.pb.h" @@ -16,4 +23,4 @@ class DummyServiceImpl : public DummyService { PRaft* praft_; }; -} // namespace pikiwidb \ No newline at end of file +} // namespace pikiwidb diff --git a/src/replication.cc b/src/replication.cc index 8dcdac7ad..3a7a0c534 100644 --- a/src/replication.cc +++ b/src/replication.cc @@ -191,6 +191,7 @@ void PReplication::Cron() { g_pikiwidb->OnNewConnection(obj); } }; + auto fail_cb = [&](EventLoop*, const char* peer_ip, int port) { WARN("OnCallback: Connect master {}:{} failed", peer_ip, port); @@ -198,6 +199,11 @@ void PReplication::Cron() { if (!masterInfo_.downSince) { masterInfo_.downSince = ::time(nullptr); } + + if (on_fail_) { + on_fail_(EventLoop::Self(), peer_ip, port); + on_fail_ = nullptr; + } }; auto loop = EventLoop::Self(); @@ -207,20 +213,7 @@ void PReplication::Cron() { } break; case kPReplStateConnected: - if (!g_config.masterauth.empty()) { - if (auto master = master_.lock()) { - UnboundedBuffer req; - req.PushData("auth ", 5); - req.PushData(g_config.masterauth.data(), g_config.masterauth.size()); - req.PushData("\r\n", 2); - master->SendPacket(req); - INFO("send auth with password {}", g_config.masterauth); - - masterInfo_.state = kPReplStateWaitAuth; - break; - } - } - // fall through to next case. + break; case kPReplStateWaitAuth: { auto master = master_.lock(); diff --git a/src/replication.h b/src/replication.h index 11d8807f8..8201b610b 100644 --- a/src/replication.h +++ b/src/replication.h @@ -12,6 +12,7 @@ #include #include "common.h" +#include "net/tcp_connection.h" #include "net/unbounded_buffer.h" #include "net/util.h" #include "pstd/memory_file.h" @@ -126,12 +127,14 @@ class PReplication { void SendToSlaves(const std::vector& params); // slave side + void SetFailCallback(TcpConnectionFailCallback cb) { on_fail_ = std::move(cb); } void SaveTmpRdb(const char* data, std::size_t& len); void SetMaster(const std::shared_ptr& cli); void SetMasterState(PReplState s); void SetMasterAddr(const char* ip, unsigned short port); void SetRdbSize(std::size_t s); PReplState GetMasterState() const; + PClient* GetMaster() const { return master_.lock().get(); } SocketAddr GetMasterAddr() const; std::size_t GetRdbSize() const; @@ -151,6 +154,9 @@ class PReplication { PMasterInfo masterInfo_; std::weak_ptr master_; pstd::OutputMemoryFile rdb_; + + // Callback function that failed to connect to the master node + TcpConnectionFailCallback on_fail_ = nullptr; }; } // namespace pikiwidb diff --git a/tests/consistency_test.go b/tests/consistency_test.go index d01072426..9dd15c98e 100644 --- a/tests/consistency_test.go +++ b/tests/consistency_test.go @@ -1,9 +1,11 @@ package pikiwidb_test import ( + "bufio" "context" "log" "strconv" + "strings" "time" . "github.com/onsi/ginkgo/v2" @@ -139,7 +141,7 @@ var _ = Describe("Consistency", Ordered, func() { "fa": "va", "fc": "vc", })) - time.Sleep(100 * time.Millisecond) + time.Sleep(10000 * time.Millisecond) for _, f := range followers { getall, err := f.HGetAll(ctx, testKey).Result() Expect(err).NotTo(HaveOccurred()) @@ -149,4 +151,32 @@ var _ = Describe("Consistency", Ordered, func() { })) } }) + + It("ThreeNodesClusterConstructionTest", func() { + for _, follower := range followers { + info, err := follower.Do(ctx, "info", "raft").Result() + Expect(err).NotTo(HaveOccurred()) + info_str := info.(string) + scanner := bufio.NewScanner(strings.NewReader(info_str)) + var peer_id string + var is_member bool + for scanner.Scan() { + line := scanner.Text() + if strings.Contains(line, "raft_peer_id") { + parts := strings.Split(line, ":") + if len(parts) >= 2 { + peer_id = parts[1] + is_member = true + break + } + } + } + + if is_member { + ret, err := follower.Do(ctx, "raft.node", "remove", peer_id).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(ret).To(Equal(OK)) + } + } + }) }) From 34114954c34bfdded78119db0c353bacaa23442e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E4=B8=81=E5=B0=8F=E5=B8=85?= <56024577+dingxiaoshuai123@users.noreply.github.com> Date: Mon, 22 Apr 2024 17:59:02 +0800 Subject: [PATCH 19/33] feat: Snapshot save & load (#238) snapshot save & load --- pikiwidb.conf | 4 +- save_load.sh | 19 +++++ src/checkpoint_manager.cc | 45 ----------- src/checkpoint_manager.h | 37 --------- src/client.h | 3 +- src/cmd_admin.cc | 2 +- src/cmd_raft.cc | 17 +++- src/cmd_raft.h | 4 +- src/cmd_table_manager.cc | 2 + src/db.cc | 100 +++++++++++++++++++---- src/db.h | 23 +++--- src/pikiwidb.cc | 3 +- src/praft/praft.cc | 56 ++++++++++++- src/praft/praft.h | 10 +++ src/pstd/pstd_string.cc | 6 ++ src/pstd/pstd_string.h | 2 + src/storage/include/storage/storage.h | 3 + src/storage/src/redis.cc | 5 ++ src/storage/src/storage.cc | 109 +++++++++++++++++++++----- src/store.cc | 52 ++++++------ src/store.h | 36 +++------ 21 files changed, 345 insertions(+), 193 deletions(-) create mode 100755 save_load.sh delete mode 100644 src/checkpoint_manager.cc delete mode 100644 src/checkpoint_manager.h diff --git a/pikiwidb.conf b/pikiwidb.conf index 55fe6d3d2..b375faeb0 100644 --- a/pikiwidb.conf +++ b/pikiwidb.conf @@ -38,7 +38,7 @@ logfile stdout # Set the number of databases. The default database is DB 0, you can select # a different one on a per-connection basis using SELECT where # dbid is a number between 0 and 'databases'-1 -databases 3 +databases 1 ################################ SNAPSHOTTING ################################# # @@ -347,7 +347,7 @@ backendpath dump # the frequency of dump to backend per second backendhz 10 # the rocksdb number per db -db-instance-num 5 +db-instance-num 3 # default 86400 * 7 rocksdb-ttl-second 604800 # default 86400 * 3 diff --git a/save_load.sh b/save_load.sh new file mode 100755 index 000000000..b2b6fd836 --- /dev/null +++ b/save_load.sh @@ -0,0 +1,19 @@ +#!/bin/bash +killall -9 pikiwidb +mkdir leader follower1 follower2 + +cd leader && ulimit -n 99999 && rm -fr * && ../bin/pikiwidb ../pikiwidb.conf --port 7777 & + +cd follower1 && ulimit -n 99999 && rm -fr * && ../bin/pikiwidb ../pikiwidb.conf --port 8888 & +sleep 10 +redis-cli -p 7777 raft.cluster init +redis-benchmark -p 7777 -c 5 -n 10000 -r 10000000 -d 1024 -t hset + + +redis-cli -p 7777 raft.node DOSNAPSHOT +redis-cli -p 7777 raft.node DOSNAPSHOT + +redis-cli -p 8888 raft.cluster join 127.0.0.1:7777 + + + diff --git a/src/checkpoint_manager.cc b/src/checkpoint_manager.cc deleted file mode 100644 index a29211e6c..000000000 --- a/src/checkpoint_manager.cc +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. - * This source code is licensed under the BSD-style license found in the - * LICENSE file in the root directory of this source tree. An additional grant - * of patent rights can be found in the PATENTS file in the same directory. - */ - -#include "checkpoint_manager.h" -#include "db.h" -#include "log.h" -#include "pstd/env.h" - -namespace pikiwidb { - -void CheckpointManager::Init(int instNum, DB* db) { - checkpoint_num_ = instNum; - res_.reserve(checkpoint_num_); - db_ = db; -} - -void CheckpointManager::CreateCheckpoint(const std::string& path) { - res_.clear(); - - if (!pstd::FileExists(path)) { - if (0 != pstd::CreatePath(path)) { - WARN("Create Dir {} fail!", path); - return; - } - INFO("Create Dir {} success!", path); - } - - std::lock_guard Lock(shared_mutex_); - for (int i = 0; i < checkpoint_num_; ++i) { - auto res = std::async(std::launch::async, &DB::DoBgSave, db_, path, i); - res_.push_back(std::move(res)); - } -} - -void CheckpointManager::WaitForCheckpointDone() { - for (auto& r : res_) { - r.get(); - } -} - -} // namespace pikiwidb diff --git a/src/checkpoint_manager.h b/src/checkpoint_manager.h deleted file mode 100644 index 065027424..000000000 --- a/src/checkpoint_manager.h +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. - * This source code is licensed under the BSD-style license found in the - * LICENSE file in the root directory of this source tree. An additional grant - * of patent rights can be found in the PATENTS file in the same directory. - */ - -#pragma once - -#include -#include -#include - -namespace pikiwidb { - -class DB; - -class CheckpointManager { - public: - CheckpointManager() = default; - ~CheckpointManager() = default; - - void Init(int instNum, DB* db); - - void CreateCheckpoint(const std::string& path); - - void WaitForCheckpointDone(); - - private: - int checkpoint_num_ = 0; - std::vector> res_; - DB* db_ = nullptr; - - std::shared_mutex shared_mutex_; -}; - -} // namespace pikiwidb diff --git a/src/client.h b/src/client.h index a890697af..f983b3ef2 100644 --- a/src/client.h +++ b/src/client.h @@ -102,7 +102,6 @@ enum ClientFlag { kClientFlagMaster = (1 << 3), }; -class DB; struct PSlaveInfo; class PClient : public std::enable_shared_from_this, public CmdRes { @@ -251,4 +250,4 @@ class PClient : public std::enable_shared_from_this, public CmdRes { static thread_local PClient* s_current; }; -} // namespace pikiwidb \ No newline at end of file +} // namespace pikiwidb diff --git a/src/cmd_admin.cc b/src/cmd_admin.cc index 1057bd3fc..d65278728 100644 --- a/src/cmd_admin.cc +++ b/src/cmd_admin.cc @@ -6,11 +6,11 @@ */ #include "cmd_admin.h" - #include "braft/raft.h" #include "rocksdb/version.h" #include "praft/praft.h" +#include "store.h" namespace pikiwidb { diff --git a/src/cmd_raft.cc b/src/cmd_raft.cc index ebbef035e..43f9c6af2 100644 --- a/src/cmd_raft.cc +++ b/src/cmd_raft.cc @@ -11,6 +11,7 @@ #include #include +#include "net/event_loop.h" #include "praft/praft.h" #include "pstd/log.h" #include "pstd/pstd_string.h" @@ -28,8 +29,9 @@ RaftNodeCmd::RaftNodeCmd(const std::string& name, int16_t arity) bool RaftNodeCmd::DoInitial(PClient* client) { auto cmd = client->argv_[1]; pstd::StringToUpper(cmd); - if (cmd != kAddCmd && cmd != kRemoveCmd) { - client->SetRes(CmdRes::kErrOther, "RAFT.NODE supports ADD / REMOVE only"); + + if (cmd != kAddCmd && cmd != kRemoveCmd && cmd != kDoSnapshot) { + client->SetRes(CmdRes::kErrOther, "RAFT.NODE supports ADD / REMOVE / DOSNAPSHOT only"); return false; } return true; @@ -40,8 +42,10 @@ void RaftNodeCmd::DoCmd(PClient* client) { pstd::StringToUpper(cmd); if (cmd == kAddCmd) { DoCmdAdd(client); - } else { + } else if (cmd == kRemoveCmd) { DoCmdRemove(client); + } else { + DoCmdSnapshot(client); } } @@ -115,6 +119,13 @@ void RaftNodeCmd::DoCmdRemove(PClient* client) { } } +void RaftNodeCmd::DoCmdSnapshot(PClient* client) { + auto s = PRAFT.DoSnapshot(); + if (s.ok()) { + client->SetRes(CmdRes::kOK); + } +} + RaftClusterCmd::RaftClusterCmd(const std::string& name, int16_t arity) : BaseCmd(name, arity, kCmdFlagsRaft, kAclCategoryRaft) {} diff --git a/src/cmd_raft.h b/src/cmd_raft.h index b9df47e2c..6a4c1f869 100644 --- a/src/cmd_raft.h +++ b/src/cmd_raft.h @@ -45,9 +45,11 @@ class RaftNodeCmd : public BaseCmd { void DoCmd(PClient *client) override; void DoCmdAdd(PClient *client); void DoCmdRemove(PClient *client); + void DoCmdSnapshot(PClient *client); static constexpr std::string_view kAddCmd = "ADD"; static constexpr std::string_view kRemoveCmd = "REMOVE"; + static constexpr std::string_view kDoSnapshot = "DOSNAPSHOT"; }; /* RAFT.CLUSTER INIT @@ -78,4 +80,4 @@ class RaftClusterCmd : public BaseCmd { static constexpr std::string_view kJoinCmd = "JOIN"; }; -} // namespace pikiwidb \ No newline at end of file +} // namespace pikiwidb diff --git a/src/cmd_table_manager.cc b/src/cmd_table_manager.cc index edc375596..19bced6dc 100644 --- a/src/cmd_table_manager.cc +++ b/src/cmd_table_manager.cc @@ -7,6 +7,8 @@ #include "cmd_table_manager.h" +#include + #include "cmd_admin.h" #include "cmd_hash.h" #include "cmd_keys.h" diff --git a/src/db.cc b/src/db.cc index 122d84b4c..4c002e146 100644 --- a/src/db.cc +++ b/src/db.cc @@ -7,21 +7,19 @@ #include "db.h" +#include "config.h" #include "praft/praft.h" #include "pstd/log.h" -#include "checkpoint_manager.h" -#include "config.h" - extern pikiwidb::PConfig g_config; namespace pikiwidb { -DB::DB(int db_index, const std::string& db_path) - : db_index_(db_index), db_path_(db_path + std::to_string(db_index_) + '/') { +DB::DB(int db_index, const std::string& db_path, int rocksdb_inst_num) + : db_index_(db_index), db_path_(db_path + std::to_string(db_index_) + '/'), rocksdb_inst_num_(rocksdb_inst_num) { storage::StorageOptions storage_options; storage_options.options.create_if_missing = true; - storage_options.db_instance_num = g_config.db_instance_num; + storage_options.db_instance_num = rocksdb_inst_num_; storage_options.db_id = db_index_; // options for CF @@ -38,24 +36,94 @@ DB::DB(int db_index, const std::string& db_path) ERROR("Storage open failed! {}", s.ToString()); abort(); } - - checkpoint_manager_ = std::make_unique(); - checkpoint_manager_->Init(g_config.db_instance_num, this); - - opened_ = true; + opened_.store(true); INFO("Open DB{} success!", db_index_); } -void DB::DoBgSave(const std::string& path, int i) { - // 1) always hold storage's sharedLock +void DB::DoCheckpoint(const std::string& path, int i) { + // 1) always hold the storage's shared lock std::shared_lock sharedLock(storage_mutex_); - // 2)Create the storage's checkpoint 。 + // 2)Create the checkpoint of rocksdb i. auto status = storage_->CreateCheckpoint(path, i); } -void DB::CreateCheckpoint(const std::string& path) { checkpoint_manager_->CreateCheckpoint(path); } +void DB::LoadCheckpoint(const std::string& path, const std::string& db_path, int i) { + // 1) Already holding the storage's exclusion lock -void DB::WaitForCheckpointDone() { checkpoint_manager_->WaitForCheckpointDone(); } + // 2) Load the checkpoint of rocksdb i. + auto status = storage_->LoadCheckpoint(path, db_path, i); +} + +void DB::CreateCheckpoint(const std::string& path, bool sync) { + auto tmp_path = path + '/' + std::to_string(db_index_); + if (0 != pstd::CreatePath(tmp_path)) { + WARN("Create dir {} fail !", tmp_path); + return; + } + std::vector> result; + result.reserve(rocksdb_inst_num_); + for (int i = 0; i < rocksdb_inst_num_; ++i) { + // In a new thread, create a checkpoint for the specified rocksdb i + // In DB::DoBgSave, a read lock is always held to protect the Storage + // corresponding to this rocksdb i. + auto res = std::async(std::launch::async, &DB::DoCheckpoint, this, path, i); + result.push_back(std::move(res)); + } + if (sync) { + for (auto& r : result) { + r.get(); + } + } +} + +void DB::LoadDBFromCheckpoint(const std::string& path, bool sync) { + opened_.store(false); + auto checkpoint_path = path + '/' + std::to_string(db_index_); + if (0 != pstd::IsDir(path)) { + WARN("Checkpoint dir {} does not exist!", checkpoint_path); + return; + } + if (0 != pstd::IsDir(db_path_)) { + if (0 != pstd::CreateDir(db_path_)) { + WARN("Create dir {} fail !", db_path_); + return; + } + } + + std::lock_guard lock(storage_mutex_); + std::vector> result; + result.reserve(rocksdb_inst_num_); + for (int i = 0; i < rocksdb_inst_num_; ++i) { + // In a new thread, Load a checkpoint for the specified rocksdb i + auto res = std::async(std::launch::async, &DB::LoadCheckpoint, this, checkpoint_path, db_path_, i); + result.push_back(std::move(res)); + } + for (auto& r : result) { + r.get(); + } + + storage::StorageOptions storage_options; + storage_options.options.create_if_missing = true; + storage_options.db_instance_num = rocksdb_inst_num_; + storage_options.db_id = db_index_; + + // options for CF + storage_options.options.ttl = g_config.rocksdb_ttl_second; + storage_options.options.periodic_compaction_seconds = g_config.rocksdb_periodic_second; + if (g_config.use_raft) { + storage_options.append_log_function = [&r = PRAFT](const Binlog& log, std::promise&& promise) { + r.AppendLog(log, std::move(promise)); + }; + } + storage_ = std::make_unique(); + + if (auto s = storage_->Open(storage_options, db_path_); !s.ok()) { + ERROR("Storage open failed! {}", s.ToString()); + abort(); + } + opened_.store(true); + INFO("DB{} load a checkpoint from {} success!", db_index_, path); +} } // namespace pikiwidb diff --git a/src/db.h b/src/db.h index 11dc3a207..a127e4f16 100644 --- a/src/db.h +++ b/src/db.h @@ -7,15 +7,14 @@ #pragma once +#include #include "storage/storage.h" namespace pikiwidb { -class CheckpointManager; - class DB { public: - DB(int db_index, const std::string& db_path); + DB(int db_index, const std::string& db_path, int rocksdb_inst_num); std::unique_ptr& GetStorage() { return storage_; } @@ -27,20 +26,20 @@ class DB { void UnLockShared() { storage_mutex_.unlock_shared(); } - void CreateCheckpoint(const std::string& path); - - [[maybe_unused]] void DoBgSave(const std::string&, int i); + void CreateCheckpoint(const std::string& path, bool sync); - void WaitForCheckpointDone(); + void LoadDBFromCheckpoint(const std::string& path, bool sync = false); int GetDbIndex() { return db_index_; } + private: + void DoCheckpoint(const std::string&, int i); + void LoadCheckpoint(const std::string&, const std::string& db_path, int i); + private: const int db_index_ = 0; const std::string db_path_; - const std::string dump_parent_path_; - const std::string dump_path_; - + int rocksdb_inst_num_ = 0; /** * If you want to change the pointer that points to storage, * you must first acquire a mutex lock. @@ -49,9 +48,7 @@ class DB { */ std::shared_mutex storage_mutex_; std::unique_ptr storage_; - bool opened_ = false; - - std::unique_ptr checkpoint_manager_; + std::atomic_bool opened_ = false; }; } // namespace pikiwidb diff --git a/src/pikiwidb.cc b/src/pikiwidb.cc index 76516df5c..0c9aab727 100644 --- a/src/pikiwidb.cc +++ b/src/pikiwidb.cc @@ -8,6 +8,8 @@ // // PikiwiDB.cc +#include "pikiwidb.h" + #include #include #include @@ -21,7 +23,6 @@ #include "client.h" #include "config.h" #include "helper.h" -#include "pikiwidb.h" #include "pikiwidb_logo.h" #include "slow_log.h" #include "store.h" diff --git a/src/praft/praft.cc b/src/praft/praft.cc index 4eb1e385e..ecb4b002c 100644 --- a/src/praft/praft.cc +++ b/src/praft/praft.cc @@ -9,6 +9,7 @@ #include +#include "braft/snapshot.h" #include "braft/util.h" #include "brpc/server.h" @@ -18,6 +19,8 @@ #include "binlog.pb.h" #include "config.h" #include "pikiwidb.h" +#include "praft.h" + #include "praft_service.h" #include "replication.h" #include "store.h" @@ -112,7 +115,6 @@ butil::Status PRaft::Init(std::string& group_id, bool initial_conf_is_null) { server_.reset(); return ERROR_LOG_AND_STATUS("Failed to start server"); } - // It's ok to start PRaft; assert(group_id.size() == RAFT_GROUPID_LEN); this->group_id_ = group_id; @@ -514,6 +516,16 @@ butil::Status PRaft::RemovePeer(const std::string& peer) { return {0, "OK"}; } +butil::Status PRaft::DoSnapshot() { + if (!node_) { + return ERROR_LOG_AND_STATUS("Node is not initialized"); + } + braft::SynchronizedClosure done; + node_->snapshot(&done); + done.wait(); + return done.status(); +} + void PRaft::OnClusterCmdConnectionFailed([[maybe_unused]] EventLoop* loop, const char* peer_ip, int port) { auto cli = cluster_cmd_ctx_.GetClient(); if (cli) { @@ -567,6 +579,26 @@ void PRaft::AppendLog(const Binlog& log, std::promise&& promise node_->apply(task); } +int PRaft::AddAllFiles(const std::filesystem::path& dir, braft::SnapshotWriter* writer, const std::string& path) { + assert(writer); + for (const auto& entry : std::filesystem::directory_iterator(dir)) { + if (entry.is_directory()) { + if (entry.path() != "." && entry.path() != "..") { + DEBUG("dir_path = {}", entry.path().string()); + AddAllFiles(entry.path(), writer, path); + } + } else { + DEBUG("file_path = {}", std::filesystem::relative(entry.path(), path).string()); + if (writer->add_file(std::filesystem::relative(entry.path(), path)) != 0) { + ERROR("add file {} to snapshot fail!", entry.path().string()); + return -1; + } + } + } + return 0; +} + +// @braft::StateMachine void PRaft::Clear() { if (node_) { node_.reset(); @@ -607,9 +639,27 @@ void PRaft::on_apply(braft::Iterator& iter) { } } -void PRaft::on_snapshot_save(braft::SnapshotWriter* writer, braft::Closure* done) {} +void PRaft::on_snapshot_save(braft::SnapshotWriter* writer, braft::Closure* done) { + assert(writer); + brpc::ClosureGuard done_guard(done); + auto path = writer->get_path(); + INFO("Saving snapshot to {}", path); + TasksVector tasks(1, {TaskType::kCheckpoint, db_id_, {{TaskArg::kCheckpointPath, path}}, true}); + PSTORE.HandleTaskSpecificDB(tasks); + if (auto res = AddAllFiles(path, writer, path); res != 0) { + done->status().set_error(EIO, "Fail to add file to writer"); + } +} -int PRaft::on_snapshot_load(braft::SnapshotReader* reader) { return 0; } +int PRaft::on_snapshot_load(braft::SnapshotReader* reader) { + CHECK(!IsLeader()) << "Leader is not supposed to load snapshot"; + assert(reader); + auto reader_path = reader->get_path(); // xx/snapshot_0000001 + auto path = g_config.dbpath + std::to_string(db_id_); // db/db_id + TasksVector tasks(1, {TaskType::kLoadDBFromCheckpoint, db_id_, {{TaskArg::kCheckpointPath, reader_path}}, true}); + PSTORE.HandleTaskSpecificDB(tasks); + return 0; +} void PRaft::on_leader_start(int64_t term) { WARN("Node {} start to be leader, term={}", node_->node_id().to_string(), term); diff --git a/src/praft/praft.h b/src/praft/praft.h index 05fbded9a..c2ff888be 100644 --- a/src/praft/praft.h +++ b/src/praft/praft.h @@ -7,7 +7,12 @@ #pragma once +#include #include +#include +#include +#include +#include #include "braft/raft.h" #include "brpc/server.h" @@ -99,6 +104,7 @@ class PRaft : public braft::StateMachine { butil::Status AddPeer(const std::string& peer); butil::Status RemovePeer(const std::string& peer); butil::Status RaftRecvEntry(); + butil::Status DoSnapshot(); void ShutDown(); void Join(); @@ -148,6 +154,9 @@ class PRaft : public braft::StateMachine { void on_stop_following(const ::braft::LeaderChangeContext& ctx) override; void on_start_following(const ::braft::LeaderChangeContext& ctx) override; + private: + static int AddAllFiles(const std::filesystem::path& dir, braft::SnapshotWriter* writer, const std::string& path); + private: std::unique_ptr server_{nullptr}; // brpc std::unique_ptr node_{nullptr}; @@ -156,6 +165,7 @@ class PRaft : public braft::StateMachine { ClusterCmdContext cluster_cmd_ctx_; // context for cluster join/remove command std::string group_id_; // group id + int db_id_ = 0; // db_id }; } // namespace pikiwidb diff --git a/src/pstd/pstd_string.cc b/src/pstd/pstd_string.cc index c10e99255..973656a2b 100755 --- a/src/pstd/pstd_string.cc +++ b/src/pstd/pstd_string.cc @@ -619,4 +619,10 @@ bool StringHasSpaces(const std::string& str) { return std::count_if(str.begin(), str.end(), [](unsigned char c) { return std::isspace(c); }); } +void TrimSlash(std::string& dirName) { + while (dirName.back() == '/') { + dirName.pop_back(); + } +} + } // namespace pstd \ No newline at end of file diff --git a/src/pstd/pstd_string.h b/src/pstd/pstd_string.h index ed8411bb4..d6ffd828a 100755 --- a/src/pstd/pstd_string.h +++ b/src/pstd/pstd_string.h @@ -93,4 +93,6 @@ std::string RandomStringWithNumber(size_t len); bool StringHasSpaces(const std::string& str); +void TrimSlash(std::string& dirName); + } // namespace pstd diff --git a/src/storage/include/storage/storage.h b/src/storage/include/storage/storage.h index bec856b58..aad21e337 100644 --- a/src/storage/include/storage/storage.h +++ b/src/storage/include/storage/storage.h @@ -46,6 +46,7 @@ inline constexpr size_t BATCH_DELETE_LIMIT = 100; inline constexpr size_t COMPACT_THRESHOLD_COUNT = 2000; inline constexpr uint64_t kNoFlush = std::numeric_limits::max(); +inline constexpr uint64_t kFlush = 0; using Options = rocksdb::Options; using BlockBasedTableOptions = rocksdb::BlockBasedTableOptions; @@ -182,6 +183,8 @@ class Storage { Status CreateCheckpoint(const std::string& dump_path, int index); + Status LoadCheckpoint(const std::string& dump_path, const std::string& db_path, int index); + Status LoadCursorStartKey(const DataType& dtype, int64_t cursor, char* type, std::string* start_key); Status StoreCursorStartKey(const DataType& dtype, int64_t cursor, char type, const std::string& next_key); diff --git a/src/storage/src/redis.cc b/src/storage/src/redis.cc index 10febd50a..ed8ee7d3c 100644 --- a/src/storage/src/redis.cc +++ b/src/storage/src/redis.cc @@ -51,7 +51,12 @@ Redis::~Redis() { for (auto handle : tmp_handles) { delete handle; } + // delete env_; delete db_; + + if (default_compact_range_options_.canceled) { + delete default_compact_range_options_.canceled; + } } Status Redis::Open(const StorageOptions& storage_options, const std::string& db_path) { diff --git a/src/storage/src/storage.cc b/src/storage/src/storage.cc index cccf48a47..fe7a3c943 100644 --- a/src/storage/src/storage.cc +++ b/src/storage/src/storage.cc @@ -4,6 +4,7 @@ // of patent rights can be found in the PATENTS file in the same directory. #include +#include #include #include @@ -11,6 +12,7 @@ #include "config.h" #include "pstd/log.h" #include "pstd/pikiwidb_slot.h" +#include "pstd/pstd_string.h" #include "rocksdb/utilities/checkpoint.h" #include "scope_snapshot.h" #include "src/lru_cache.h" @@ -23,6 +25,9 @@ #include "storage/storage.h" #include "storage/util.h" +#define PRAFT_SNAPSHOT_META_FILE "__raft_snapshot_meta" +#define SST_FILE_EXTENSION ".sst" + namespace storage { extern std::string BitOpOperate(BitOpType op, const std::vector& src_values, int64_t max_len); class Redis; @@ -64,13 +69,14 @@ Storage::Storage() { } Storage::~Storage() { - bg_tasks_should_exit_ = true; + bg_tasks_should_exit_.store(true); bg_tasks_cond_var_.notify_one(); - - if (is_opened_) { - for (auto& inst : insts_) { - inst.reset(); + if (is_opened_.load()) { + int ret = 0; + if (ret = pthread_join(bg_tasks_thread_id_, nullptr); ret != 0) { + ERROR("pthread_join failed with bgtask thread error : {}", ret); } + insts_.clear(); } } @@ -82,6 +88,42 @@ static std::string AppendSubDirectory(const std::string& db_path, int index) { } } +static int RecursiveLinkAndCopy(const std::filesystem::path& source, const std::filesystem::path& destination) { + if (std::filesystem::is_regular_file(source)) { + if (source.filename() == PRAFT_SNAPSHOT_META_FILE) { + return 0; + } else if (source.extension() == SST_FILE_EXTENSION) { + // Create a hard link + if (::link(source.c_str(), destination.c_str()) != 0) { + WARN("hard link file {} fail", source.string()); + return -1; + } + DEBUG("hard link success! source_file = {} , destination_file = {}", source.string(), destination.string()); + } else { + // Copy the file + if (!std::filesystem::copy_file(source, destination, std::filesystem::copy_options::overwrite_existing)) { + WARN("copy file {} fail", source.string()); + return -1; + } + DEBUG("copy success! source_file = {} , destination_file = {}", source.string(), destination.string()); + } + } else { + if (!pstd::FileExists(destination)) { + if (pstd::CreateDir(destination) != 0) { + WARN("create dir {} fail", destination.string()); + return -1; + } + } + + for (const auto& entry : std::filesystem::directory_iterator(source)) { + if (RecursiveLinkAndCopy(entry.path(), destination / entry.path().filename()) != 0) { + return -1; + } + } + } + return 0; +} + Status Storage::Open(const StorageOptions& storage_options, const std::string& db_path) { mkpath(db_path.c_str(), 0755); db_instance_num_ = storage_options.db_instance_num; @@ -105,20 +147,12 @@ Status Storage::Open(const StorageOptions& storage_options, const std::string& d Status Storage::CreateCheckpoint(const std::string& dump_path, int i) { INFO("DB{}'s RocksDB {} begin to generate a checkpoint!", db_id_, i); auto source_dir = AppendSubDirectory(dump_path, db_id_); - if (!pstd::FileExists(source_dir)) { - if (0 != pstd::CreatePath(source_dir)) { - WARN("Create Dir {} fail!", source_dir); - return Status::IOError("CreatePath() fail! dir_name : {} ", source_dir); - } - INFO("Create Dir {} success!", source_dir); - } - source_dir = AppendSubDirectory(source_dir, i); auto tmp_dir = source_dir + ".tmp"; // 1) Make sure the temporary directory does not exist if (!pstd::DeleteDirIfExist(tmp_dir)) { - WARN("DB{}'s RocksDB {} delete dir fail!", db_id_, i); + WARN("DB{}'s RocksDB {} delete directory fail!", db_id_, i); return Status::IOError("DeleteDirIfExist() fail! dir_name : {} ", tmp_dir); } @@ -133,7 +167,7 @@ Status Storage::CreateCheckpoint(const std::string& dump_path, int i) { // 3) Create a checkpoint std::unique_ptr checkpoint_guard(checkpoint); - s = checkpoint->CreateCheckpoint(tmp_dir, kNoFlush, nullptr); + s = checkpoint->CreateCheckpoint(tmp_dir, kFlush, nullptr); if (!s.ok()) { WARN("DB{}'s RocksDB {} create checkpoint failed!. Error: {}", db_id_, i, s.ToString()); return s; @@ -141,7 +175,7 @@ Status Storage::CreateCheckpoint(const std::string& dump_path, int i) { // 4) Make sure the source directory does not exist if (!pstd::DeleteDirIfExist(source_dir)) { - WARN("DB{}'s RocksDB {} delete dir {} fail!", db_id_, i, source_dir); + WARN("DB{}'s RocksDB {} delete directory {} fail!", db_id_, i, source_dir); return Status::IOError("DeleteDirIfExist() fail! dir_name : {} ", source_dir); } @@ -152,13 +186,48 @@ Status Storage::CreateCheckpoint(const std::string& dump_path, int i) { if (!pstd::DeleteDirIfExist(tmp_dir)) { WARN("DB{}'s RocksDB {} fail to delete the rename failed directory {} ", db_id_, i, tmp_dir); } - return Status::IOError("Rename dir {} fail!", tmp_dir); + return Status::IOError("Rename directory {} fail!", tmp_dir); } INFO("DB{}'s RocksDB {} create checkpoint {} success!", db_id_, i, source_dir); return Status::OK(); } +Status Storage::LoadCheckpoint(const std::string& dump_path, const std::string& db_path, int i) { + auto rocksdb_checkpoint_path = AppendSubDirectory(dump_path, i); + INFO("DB{}'s RocksDB {} begin to load a checkpoint from {}", db_id_, i, rocksdb_checkpoint_path); + auto rocksdb_path = AppendSubDirectory(db_path, i); // ./db/db_id/i + auto tmp_rocksdb_path = rocksdb_path + ".tmp"; // ./db/db_id/i.tmp + insts_[i].reset(); + + // 1) Rename the original db to db.tmp, and only perform the maximum possible recovery of data + // when loading the checkpoint fails. + if (auto status = pstd::RenameFile(rocksdb_path, tmp_rocksdb_path); status != 0) { + WARN("DB{}'s RocksDB {} rename db directory {} to temporary directory {} fail!", db_id_, i, db_path, + tmp_rocksdb_path); + return Status::IOError("Rename directory {} fail!", db_path); + } + + // 2) Create a db directory to save the checkpoint. + if (0 != pstd::CreatePath(rocksdb_path)) { + pstd::RenameFile(tmp_rocksdb_path, rocksdb_path); + WARN("DB{}'s RocksDB {} load a checkpoint from {} fail!", db_id_, i, rocksdb_checkpoint_path); + return Status::IOError("Create directory {} fail!", rocksdb_path); + } + if (RecursiveLinkAndCopy(rocksdb_checkpoint_path, rocksdb_path) != 0) { + pstd::DeleteDir(rocksdb_path); + pstd::RenameFile(tmp_rocksdb_path, rocksdb_path); + WARN("DB{}'s RocksDB {} load a checkpoint from {} fail!", db_id_, i, rocksdb_checkpoint_path); + return Status::IOError("recursive link and copy directory {} fail!", rocksdb_path); + } + + // 3) Destroy the db.tmp directory. + if (auto s = rocksdb::DestroyDB(tmp_rocksdb_path, rocksdb::Options()); !s.ok()) { + WARN("Failure to destroy the old DB, path = {}", tmp_rocksdb_path); + } + return Status::OK(); +} + Status Storage::LoadCursorStartKey(const DataType& dtype, int64_t cursor, char* type, std::string* start_key) { std::string index_key = DataTypeTag[dtype] + std::to_string(cursor); std::string index_value; @@ -1983,9 +2052,9 @@ Status Storage::AddBGTask(const BGTask& bg_task) { Status Storage::RunBGTask() { BGTask task; - while (!bg_tasks_should_exit_) { + while (!bg_tasks_should_exit_.load()) { std::unique_lock lock(bg_tasks_mutex_); - bg_tasks_cond_var_.wait(lock, [this]() { return !bg_tasks_queue_.empty() || bg_tasks_should_exit_; }); + bg_tasks_cond_var_.wait(lock, [this]() { return !bg_tasks_queue_.empty() || bg_tasks_should_exit_.load(); }); if (!bg_tasks_queue_.empty()) { task = bg_tasks_queue_.front(); @@ -1993,7 +2062,7 @@ Status Storage::RunBGTask() { } lock.unlock(); - if (bg_tasks_should_exit_) { + if (bg_tasks_should_exit_.load()) { return Status::Incomplete("bgtask return with bg_tasks_should_exit true"); } diff --git a/src/store.cc b/src/store.cc index ca739e5b0..e3c9e3560 100644 --- a/src/store.cc +++ b/src/store.cc @@ -7,9 +7,9 @@ #include -#include "checkpoint_manager.h" #include "config.h" #include "log.h" +#include "pstd/pstd_string.h" #include "store.h" namespace pikiwidb { @@ -24,11 +24,11 @@ void PStore::Init() { return; } - dbNum_ = g_config.databases; - backends_.reserve(dbNum_); + db_number_ = g_config.databases; + backends_.reserve(db_number_); if (g_config.backend == kBackEndRocksDB) { - for (int i = 0; i < dbNum_; i++) { - auto db = std::make_unique(i, g_config.dbpath); + for (int i = 0; i < db_number_; i++) { + auto db = std::make_unique(i, g_config.dbpath, g_config.db_instance_num); backends_.push_back(std::move(db)); } } else { @@ -36,41 +36,41 @@ void PStore::Init() { } } -void PStore::DoSomeThingSpecificDB(const TasksVector& tasks) { +void PStore::HandleTaskSpecificDB(const TasksVector& tasks) { std::for_each(tasks.begin(), tasks.end(), [this](const auto& task) { + if (task.db < 0 || task.db >= db_number_) { + WARN("The database index is out of range."); + return; + } + auto& db = backends_.at(task.db); switch (task.type) { case kCheckpoint: { - if (task.db < 0 || task.db >= dbNum_) { - WARN("The database index is out of range."); + if (auto s = task.args.find(kCheckpointPath); s == task.args.end()) { + WARN("The critical parameter 'path' is missing for do a checkpoint."); return; } - auto& db = backends_[task.db]; + auto path = task.args.find(kCheckpointPath)->second; + pstd::TrimSlash(path); + db->CreateCheckpoint(path, task.sync); + break; + } + case kLoadDBFromCheckpoint: { if (auto s = task.args.find(kCheckpointPath); s == task.args.end()) { - WARN("The critical parameter 'path' is missing in the checkpoint."); + WARN("The critical parameter 'path' is missing for load a checkpoint."); return; } auto path = task.args.find(kCheckpointPath)->second; - trimSlash(path); - db->CreateCheckpoint(path); + pstd::TrimSlash(path); + db->LoadDBFromCheckpoint(path, task.sync); + break; + } + case kEmpty: { + WARN("A empty task was passed in, not doing anything."); break; } - default: break; } }); } - -void PStore::WaitForCheckpointDone() { - for (auto& db : backends_) { - db->WaitForCheckpointDone(); - } -} - -void PStore::trimSlash(std::string& dirName) { - while (dirName.back() == '/') { - dirName.pop_back(); - } -} - } // namespace pikiwidb diff --git a/src/store.h b/src/store.h index 7cc26331f..4bf15c5f3 100644 --- a/src/store.h +++ b/src/store.h @@ -10,7 +10,6 @@ #define GLOG_NO_ABBREVIATED_SEVERITIES #include -#include #include #include @@ -18,28 +17,28 @@ namespace pikiwidb { -enum TaskType { - kCheckpoint, -}; +enum TaskType { kCheckpoint = 0, kLoadDBFromCheckpoint, kEmpty }; enum TaskArg { - kCheckpointPath, + kCheckpointPath = 0, }; struct TaskContext { - TaskType type; - int db; + TaskType type = kEmpty; + int db = -1; std::map args; - TaskContext(TaskType t) : type(t) {} - TaskContext(TaskType t, int d) : type(t), db(d) {} - TaskContext(TaskType t, int d, const std::map& a) : type(t), db(d), args(a) {} + bool sync = false; + TaskContext() = delete; + TaskContext(TaskType t, bool s = false) : type(t), sync(s) {} + TaskContext(TaskType t, int d, bool s = false) : type(t), db(d), sync(s) {} + TaskContext(TaskType t, int d, const std::map& a, bool s = false) + : type(t), db(d), args(a), sync(s) {} }; using TasksVector = std::vector; class PStore { public: - friend class CheckpointManager; static PStore& Instance(); PStore(const PStore&) = delete; @@ -49,24 +48,15 @@ class PStore { std::unique_ptr& GetBackend(int32_t index) { return backends_[index]; }; - void DoSomeThingSpecificDB(const TasksVector& task); - - void WaitForCheckpointDone(); + void HandleTaskSpecificDB(const TasksVector& task); - std::shared_mutex& SharedMutex() { return dbs_mutex_; } + int GetDBNumber() const { return db_number_; } private: PStore() = default; - void trimSlash(std::string& dirName); - int dbNum_ = 0; + int db_number_ = 0; - /** - * If you want to access all the DBs at the same time, - * then you must hold the lock. - * For example: you want to execute flushall or bgsave. - */ - std::shared_mutex dbs_mutex_; std::vector> backends_; }; From a6022bff0adbbf34b71f1772acf8a4d0259ad436 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E4=B8=81=E5=B0=8F=E5=B8=85?= <56024577+dingxiaoshuai123@users.noreply.github.com> Date: Wed, 24 Apr 2024 15:20:42 +0800 Subject: [PATCH 20/33] fix branch import_braft ci (#291) --- .github/workflows/import_braft.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/import_braft.yml b/.github/workflows/import_braft.yml index c0c01f3fe..82e754f8d 100644 --- a/.github/workflows/import_braft.yml +++ b/.github/workflows/import_braft.yml @@ -28,8 +28,10 @@ jobs: - name: Build env: - CPLUS_INCLUDE_PATH: /usr/local/opt/openssl/include + CPLUS_INCLUDE_PATH: /opt/homebrew/include run: | + brew install autoconf + brew install go sh build.sh - name: Run Go E2E Tests From 1875c0b0a61ff0662d9ad785a467ae4377124563 Mon Sep 17 00:00:00 2001 From: Changyuan Ning <77976092+longfar-ncy@users.noreply.github.com> Date: Wed, 24 Apr 2024 16:14:19 +0800 Subject: [PATCH 21/33] feat: more raft commands (#285) * fix: find openssl * refactor: remove unused codes --- src/storage/src/batch.h | 16 ++- src/storage/src/redis_lists.cc | 26 ++-- src/storage/src/redis_sets.cc | 25 ++-- src/storage/src/redis_strings.cc | 7 +- src/storage/src/redis_zsets.cc | 19 +-- tests/consistency_test.go | 220 ++++++++++++++++++++++++------- tests/util/pikiwidb.go | 5 +- 7 files changed, 231 insertions(+), 87 deletions(-) diff --git a/src/storage/src/batch.h b/src/storage/src/batch.h index 1e8992822..5bfc130f7 100644 --- a/src/storage/src/batch.h +++ b/src/storage/src/batch.h @@ -27,9 +27,13 @@ class Batch { virtual void Put(ColumnFamilyIndex cf_idx, const Slice& key, const Slice& val) = 0; virtual void Delete(ColumnFamilyIndex cf_idx, const Slice& key) = 0; - virtual auto Commit() -> Status = 0; + virtual Status Commit() = 0; + int32_t Count() const { return cnt_; } static auto CreateBatch(Redis* redis) -> std::unique_ptr; + + protected: + uint32_t cnt_ = 0; }; class RocksBatch : public Batch { @@ -40,9 +44,13 @@ class RocksBatch : public Batch { void Put(ColumnFamilyIndex cf_idx, const Slice& key, const Slice& val) override { batch_.Put(handles_[cf_idx], key, val); + cnt_++; + } + void Delete(ColumnFamilyIndex cf_idx, const Slice& key) override { + batch_.Delete(handles_[cf_idx], key); + cnt_++; } - void Delete(ColumnFamilyIndex cf_idx, const Slice& key) override { batch_.Delete(handles_[cf_idx], key); } - auto Commit() -> Status override { return db_->Write(options_, &batch_); } + Status Commit() override { return db_->Write(options_, &batch_); } private: rocksdb::WriteBatch batch_; @@ -65,6 +73,7 @@ class BinlogBatch : public Batch { entry->set_op_type(pikiwidb::OperateType::kPut); entry->set_key(key.ToString()); entry->set_value(value.ToString()); + cnt_++; } void Delete(ColumnFamilyIndex cf_idx, const Slice& key) override { @@ -72,6 +81,7 @@ class BinlogBatch : public Batch { entry->set_cf_idx(cf_idx); entry->set_op_type(pikiwidb::OperateType::kDelete); entry->set_key(key.ToString()); + cnt_++; } Status Commit() override { diff --git a/src/storage/src/redis_lists.cc b/src/storage/src/redis_lists.cc index 2ec1da18e..2a641da5e 100644 --- a/src/storage/src/redis_lists.cc +++ b/src/storage/src/redis_lists.cc @@ -8,6 +8,7 @@ #include #include "pstd/log.h" #include "src/base_data_value_format.h" +#include "src/batch.h" #include "src/lists_filter.h" #include "src/redis.h" #include "src/scope_record_lock.h" @@ -263,7 +264,7 @@ Status Redis::LPop(const Slice& key, int64_t count, std::vector* el uint32_t statistic = 0; elements->clear(); - rocksdb::WriteBatch batch; + auto batch = Batch::CreateBatch(this); ScopeRecordLock l(lock_mgr_, key); std::string meta_value; @@ -288,20 +289,17 @@ Status Redis::LPop(const Slice& key, int64_t count, std::vector* el statistic++; ParsedBaseDataValue parsed_base_data_value(iter->value()); elements->push_back(parsed_base_data_value.UserValue().ToString()); - batch.Delete(handles_[kListsDataCF], iter->key()); + batch->Delete(kListsDataCF, iter->key()); parsed_lists_meta_value.ModifyCount(-1); parsed_lists_meta_value.ModifyLeftIndex(-1); } - batch.Put(handles_[kListsMetaCF], base_meta_key.Encode(), meta_value); + batch->Put(kListsMetaCF, base_meta_key.Encode(), meta_value); delete iter; } } - if (batch.Count() != 0U) { - s = db_->Write(default_write_options_, &batch); - if (s.ok()) { - batch.Clear(); - } + if (batch->Count() != 0U) { + s = batch->Commit(); UpdateSpecificKeyStatistics(DataType::kLists, key.ToString(), statistic); } return s; @@ -309,7 +307,7 @@ Status Redis::LPop(const Slice& key, int64_t count, std::vector* el Status Redis::LPush(const Slice& key, const std::vector& values, uint64_t* ret) { *ret = 0; - rocksdb::WriteBatch batch; + auto batch = Batch::CreateBatch(this); ScopeRecordLock l(lock_mgr_, key); uint64_t index = 0; @@ -331,9 +329,9 @@ Status Redis::LPush(const Slice& key, const std::vector& values, ui parsed_lists_meta_value.ModifyCount(1); ListsDataKey lists_data_key(key, version, index); BaseDataValue i_val(value); - batch.Put(handles_[kListsDataCF], lists_data_key.Encode(), i_val.Encode()); + batch->Put(kListsDataCF, lists_data_key.Encode(), i_val.Encode()); } - batch.Put(handles_[kListsMetaCF], base_meta_key.Encode(), meta_value); + batch->Put(kListsMetaCF, base_meta_key.Encode(), meta_value); *ret = parsed_lists_meta_value.Count(); } else if (s.IsNotFound()) { char str[8]; @@ -345,14 +343,14 @@ Status Redis::LPush(const Slice& key, const std::vector& values, ui lists_meta_value.ModifyLeftIndex(1); ListsDataKey lists_data_key(key, version, index); BaseDataValue i_val(value); - batch.Put(handles_[kListsDataCF], lists_data_key.Encode(), i_val.Encode()); + batch->Put(kListsDataCF, lists_data_key.Encode(), i_val.Encode()); } - batch.Put(handles_[kListsMetaCF], base_meta_key.Encode(), lists_meta_value.Encode()); + batch->Put(kListsMetaCF, base_meta_key.Encode(), lists_meta_value.Encode()); *ret = lists_meta_value.RightIndex() - lists_meta_value.LeftIndex() - 1; } else { return s; } - return db_->Write(default_write_options_, &batch); + return batch->Commit(); } Status Redis::LPushx(const Slice& key, const std::vector& values, uint64_t* len) { diff --git a/src/storage/src/redis_sets.cc b/src/storage/src/redis_sets.cc index a3bccd1d2..c7541fcb6 100644 --- a/src/storage/src/redis_sets.cc +++ b/src/storage/src/redis_sets.cc @@ -3,6 +3,7 @@ // LICENSE file in the root directory of this source tree. An additional grant // of patent rights can be found in the PATENTS file in the same directory. +#include "src/batch.h" #include "src/redis.h" #include @@ -116,7 +117,7 @@ rocksdb::Status Redis::SAdd(const Slice& key, const std::vector& me } } - rocksdb::WriteBatch batch; + auto batch = Batch::CreateBatch(this); ScopeRecordLock l(lock_mgr_, key); uint64_t version = 0; std::string meta_value; @@ -131,11 +132,11 @@ rocksdb::Status Redis::SAdd(const Slice& key, const std::vector& me return Status::InvalidArgument("set size overflow"); } parsed_sets_meta_value.SetCount(static_cast(filtered_members.size())); - batch.Put(handles_[kSetsMetaCF], base_meta_key.Encode(), meta_value); + batch->Put(kSetsMetaCF, base_meta_key.Encode(), meta_value); for (const auto& member : filtered_members) { SetsMemberKey sets_member_key(key, version, member); BaseDataValue iter_value(Slice{}); - batch.Put(handles_[kSetsDataCF], sets_member_key.Encode(), iter_value.Encode()); + batch->Put(kSetsDataCF, sets_member_key.Encode(), iter_value.Encode()); } *ret = static_cast(filtered_members.size()); } else { @@ -149,7 +150,7 @@ rocksdb::Status Redis::SAdd(const Slice& key, const std::vector& me } else if (s.IsNotFound()) { cnt++; BaseDataValue iter_value(Slice{}); - batch.Put(handles_[kSetsDataCF], sets_member_key.Encode(), iter_value.Encode()); + batch->Put(kSetsDataCF, sets_member_key.Encode(), iter_value.Encode()); } else { return s; } @@ -162,7 +163,7 @@ rocksdb::Status Redis::SAdd(const Slice& key, const std::vector& me return Status::InvalidArgument("set size overflow"); } parsed_sets_meta_value.ModifyCount(cnt); - batch.Put(handles_[kSetsMetaCF], base_meta_key.Encode(), meta_value); + batch->Put(kSetsMetaCF, base_meta_key.Encode(), meta_value); } } } else if (s.IsNotFound()) { @@ -170,17 +171,17 @@ rocksdb::Status Redis::SAdd(const Slice& key, const std::vector& me EncodeFixed32(str, filtered_members.size()); SetsMetaValue sets_meta_value(Slice(str, sizeof(int32_t))); version = sets_meta_value.UpdateVersion(); - batch.Put(handles_[kSetsMetaCF], base_meta_key.Encode(), sets_meta_value.Encode()); + batch->Put(kSetsMetaCF, base_meta_key.Encode(), sets_meta_value.Encode()); for (const auto& member : filtered_members) { SetsMemberKey sets_member_key(key, version, member); BaseDataValue i_val(Slice{}); - batch.Put(handles_[kSetsDataCF], sets_member_key.Encode(), i_val.Encode()); + batch->Put(kSetsDataCF, sets_member_key.Encode(), i_val.Encode()); } *ret = static_cast(filtered_members.size()); } else { return s; } - return db_->Write(default_write_options_, &batch); + return batch->Commit(); } rocksdb::Status Redis::SCard(const Slice& key, int32_t* ret) { @@ -939,7 +940,7 @@ rocksdb::Status Redis::SRandmember(const Slice& key, int32_t count, std::vector< rocksdb::Status Redis::SRem(const Slice& key, const std::vector& members, int32_t* ret) { *ret = 0; - rocksdb::WriteBatch batch; + auto batch = Batch::CreateBatch(this); ScopeRecordLock l(lock_mgr_, key); uint64_t version = 0; @@ -964,7 +965,7 @@ rocksdb::Status Redis::SRem(const Slice& key, const std::vector& me if (s.ok()) { cnt++; statistic++; - batch.Delete(handles_[kSetsDataCF], sets_member_key.Encode()); + batch->Delete(kSetsDataCF, sets_member_key.Encode()); } else if (s.IsNotFound()) { } else { return s; @@ -975,7 +976,7 @@ rocksdb::Status Redis::SRem(const Slice& key, const std::vector& me return Status::InvalidArgument("set size overflow"); } parsed_sets_meta_value.ModifyCount(-cnt); - batch.Put(handles_[kSetsMetaCF], base_meta_key.Encode(), meta_value); + batch->Put(kSetsMetaCF, base_meta_key.Encode(), meta_value); } } else if (s.IsNotFound()) { *ret = 0; @@ -983,7 +984,7 @@ rocksdb::Status Redis::SRem(const Slice& key, const std::vector& me } else { return s; } - s = db_->Write(default_write_options_, &batch); + s = batch->Commit(); UpdateSpecificKeyStatistics(DataType::kSets, key.ToString(), statistic); return s; } diff --git a/src/storage/src/redis_strings.cc b/src/storage/src/redis_strings.cc index 51d6872c6..24a677921 100644 --- a/src/storage/src/redis_strings.cc +++ b/src/storage/src/redis_strings.cc @@ -3,17 +3,18 @@ // LICENSE file in the root directory of this source tree. An additional grant // of patent rights can be found in the PATENTS file in the same directory. -#include #include #include #include "pstd/log.h" #include "src/base_key_format.h" +#include "src/batch.h" #include "src/redis.h" #include "src/scope_record_lock.h" #include "src/scope_snapshot.h" #include "src/strings_filter.h" +#include "storage/storage_define.h" #include "storage/util.h" namespace storage { @@ -630,10 +631,12 @@ Status Redis::MSetnx(const std::vector& kvs, int32_t* ret) { Status Redis::Set(const Slice& key, const Slice& value) { StringsValue strings_value(value); + auto batch = Batch::CreateBatch(this); ScopeRecordLock l(lock_mgr_, key); BaseKey base_key(key); - return db_->Put(default_write_options_, base_key.Encode(), strings_value.Encode()); + batch->Put(kStringsCF, base_key.Encode(), strings_value.Encode()); + return batch->Commit(); } Status Redis::Setxx(const Slice& key, const Slice& value, int32_t* ret, const uint64_t ttl) { diff --git a/src/storage/src/redis_zsets.cc b/src/storage/src/redis_zsets.cc index ea4864ae1..3532b4cca 100644 --- a/src/storage/src/redis_zsets.cc +++ b/src/storage/src/redis_zsets.cc @@ -14,6 +14,7 @@ #include "pstd/log.h" #include "src/base_data_value_format.h" #include "src/base_key_format.h" +#include "src/batch.h" #include "src/redis.h" #include "src/scope_record_lock.h" #include "src/scope_snapshot.h" @@ -216,7 +217,7 @@ Status Redis::ZAdd(const Slice& key, const std::vector& score_membe char score_buf[8]; uint64_t version = 0; std::string meta_value; - rocksdb::WriteBatch batch; + auto batch = Batch::CreateBatch(this); ScopeRecordLock l(lock_mgr_, key); BaseMetaKey base_meta_key(key); @@ -250,7 +251,7 @@ Status Redis::ZAdd(const Slice& key, const std::vector& score_membe continue; } else { ZSetsScoreKey zsets_score_key(key, version, old_score, sm.member); - batch.Delete(handles_[kZsetsScoreCF], zsets_score_key.Encode()); + batch->Delete(kZsetsScoreCF, zsets_score_key.Encode()); // delete old zsets_score_key and overwirte zsets_member_key // but in different column_families so we accumulative 1 statistic++; @@ -263,11 +264,11 @@ Status Redis::ZAdd(const Slice& key, const std::vector& score_membe const void* ptr_score = reinterpret_cast(&sm.score); EncodeFixed64(score_buf, *reinterpret_cast(ptr_score)); BaseDataValue zsets_member_i_val(Slice(score_buf, sizeof(uint64_t))); - batch.Put(handles_[kZsetsDataCF], zsets_member_key.Encode(), zsets_member_i_val.Encode()); + batch->Put(kZsetsDataCF, zsets_member_key.Encode(), zsets_member_i_val.Encode()); ZSetsScoreKey zsets_score_key(key, version, sm.score, sm.member); BaseDataValue zsets_score_i_val(Slice{}); - batch.Put(handles_[kZsetsScoreCF], zsets_score_key.Encode(), zsets_score_i_val.Encode()); + batch->Put(kZsetsScoreCF, zsets_score_key.Encode(), zsets_score_i_val.Encode()); if (not_found) { cnt++; } @@ -276,30 +277,30 @@ Status Redis::ZAdd(const Slice& key, const std::vector& score_membe return Status::InvalidArgument("zset size overflow"); } parsed_zsets_meta_value.ModifyCount(cnt); - batch.Put(handles_[kZsetsMetaCF], base_meta_key.Encode(), meta_value); + batch->Put(kZsetsMetaCF, base_meta_key.Encode(), meta_value); *ret = cnt; } else if (s.IsNotFound()) { char buf[4]; EncodeFixed32(buf, filtered_score_members.size()); ZSetsMetaValue zsets_meta_value(Slice(buf, sizeof(int32_t))); version = zsets_meta_value.UpdateVersion(); - batch.Put(handles_[kZsetsMetaCF], base_meta_key.Encode(), zsets_meta_value.Encode()); + batch->Put(kZsetsMetaCF, base_meta_key.Encode(), zsets_meta_value.Encode()); for (const auto& sm : filtered_score_members) { ZSetsMemberKey zsets_member_key(key, version, sm.member); const void* ptr_score = reinterpret_cast(&sm.score); EncodeFixed64(score_buf, *reinterpret_cast(ptr_score)); BaseDataValue zsets_member_i_val(Slice(score_buf, sizeof(uint64_t))); - batch.Put(handles_[kZsetsDataCF], zsets_member_key.Encode(), zsets_member_i_val.Encode()); + batch->Put(kZsetsDataCF, zsets_member_key.Encode(), zsets_member_i_val.Encode()); ZSetsScoreKey zsets_score_key(key, version, sm.score, sm.member); BaseDataValue zsets_score_i_val(Slice{}); - batch.Put(handles_[kZsetsScoreCF], zsets_score_key.Encode(), zsets_score_i_val.Encode()); + batch->Put(kZsetsScoreCF, zsets_score_key.Encode(), zsets_score_i_val.Encode()); } *ret = static_cast(filtered_score_members.size()); } else { return s; } - s = db_->Write(default_write_options_, &batch); + s = batch->Commit(); UpdateSpecificKeyStatistics(DataType::kZSets, key.ToString(), statistic); return s; } diff --git a/tests/consistency_test.go b/tests/consistency_test.go index 9dd15c98e..b79995f94 100644 --- a/tests/consistency_test.go +++ b/tests/consistency_test.go @@ -15,16 +15,15 @@ import ( "github.com/OpenAtomFoundation/pikiwidb/tests/util" ) +var ( + followers []*redis.Client + leader *redis.Client +) + var _ = Describe("Consistency", Ordered, func() { var ( - ctx = context.TODO() - servers []*util.Server - followers []*redis.Client - leader *redis.Client - ) - - const ( - testKey = "consistency-test" + ctx = context.TODO() + servers []*util.Server ) BeforeAll(func() { @@ -104,51 +103,156 @@ var _ = Describe("Consistency", Ordered, func() { followers = nil }) - It("SimpleWriteConsistencyTest", func() { - set, err := leader.HSet(ctx, testKey, map[string]string{ + It("HSet & HDel Consistency Test", func() { + const testKey = "HashConsistencyTest" + testValue := map[string]string{ "fa": "va", "fb": "vb", "fc": "vc", - }).Result() - Expect(err).NotTo(HaveOccurred()) - Expect(set).To(Equal(int64(3))) + } + { + // hset write on leader + set, err := leader.HSet(ctx, testKey, testValue).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(set).To(Equal(int64(3))) - getall, err := leader.HGetAll(ctx, testKey).Result() - Expect(err).NotTo(HaveOccurred()) - Expect(getall).To(Equal(map[string]string{ - "fa": "va", - "fb": "vb", - "fc": "vc", - })) - time.Sleep(10000 * time.Millisecond) - for _, f := range followers { - getall, err := f.HGetAll(ctx, testKey).Result() + // read check + readChecker(func(*redis.Client) { + getall, err := leader.HGetAll(ctx, testKey).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(getall).To(Equal(testValue)) + }) + } + + { + // hdel write on leader + del, err := leader.HDel(ctx, testKey, "fb").Result() + Expect(err).NotTo(HaveOccurred()) + Expect(del).To(Equal(int64(1))) + + // read check + readChecker(func(*redis.Client) { + getall, err := leader.HGetAll(ctx, testKey).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(getall).To(Equal(map[string]string{ + "fa": "va", + "fc": "vc", + })) + }) + } + }) + + It("SAdd & SRem Consistency Test", func() { + const testKey = "SetsConsistencyTestKey" + testValues := []string{"sa", "sb", "sc", "sd"} + + { + // sadd write on leader + sadd, err := leader.SAdd(ctx, testKey, testValues).Result() Expect(err).NotTo(HaveOccurred()) - Expect(getall).To(Equal(map[string]string{ - "fa": "va", - "fb": "vb", - "fc": "vc", - })) + Expect(sadd).To(Equal(int64(len(testValues)))) + + // read check + readChecker(func(*redis.Client) { + smembers, err := leader.SMembers(ctx, testKey).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(smembers).To(Equal(testValues)) + }) } - del, err := leader.HDel(ctx, testKey, "fb").Result() - Expect(err).NotTo(HaveOccurred()) - Expect(del).To(Equal(int64(1))) + { + // srem write on leader + srem, err := leader.SRem(ctx, testKey, []string{"sb", "sd"}).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(srem).To(Equal(int64(2))) - getall, err = leader.HGetAll(ctx, testKey).Result() - Expect(err).NotTo(HaveOccurred()) - Expect(getall).To(Equal(map[string]string{ - "fa": "va", - "fc": "vc", - })) - time.Sleep(10000 * time.Millisecond) - for _, f := range followers { - getall, err := f.HGetAll(ctx, testKey).Result() + // read check + readChecker(func(*redis.Client) { + smembers, err := leader.SMembers(ctx, testKey).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(smembers).To(Equal([]string{"sa", "sc"})) + }) + } + }) + + It("LPush & LPop Consistency Test", func() { + const testKey = "ListsConsistencyTestKey" + testValues := []string{"la", "lb", "lc", "ld"} + + { + // lpush write on leader + lpush, err := leader.LPush(ctx, testKey, testValues).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(lpush).To(Equal(int64(len(testValues)))) + + // read check + readChecker(func(*redis.Client) { + lrange, err := leader.LRange(ctx, testKey, 0, 10).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(lrange).To(Equal(reverse(testValues))) + }) + } + + { + // lpop write on leader + lpop, err := leader.LPop(ctx, testKey).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(lpop).To(Equal("ld")) + lpop, err = leader.LPop(ctx, testKey).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(lpop).To(Equal("lc")) + + // read check + readChecker(func(*redis.Client) { + lrange, err := leader.LRange(ctx, testKey, 0, 10).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(lrange).To(Equal([]string{"lb", "la"})) + }) + } + }) + + It("ZAdd Consistency Test", func() { + const testKey = "ZSetsConsistencyTestKey" + testData := []redis.Z{ + {Score: 4, Member: "z4"}, + {Score: 8, Member: "z8"}, + {Score: 5, Member: "z5"}, + } + expectData := []redis.Z{ + {Score: 8, Member: "z8"}, + {Score: 5, Member: "z5"}, + {Score: 4, Member: "z4"}, + } + { + // zadd write on leader + zadd, err := leader.ZAdd(ctx, testKey, testData...).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(zadd).To(Equal(int64(len(testData)))) + + // read check + readChecker(func(*redis.Client) { + zrange, err := leader.ZRevRangeWithScores(ctx, testKey, 0, -1).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(zrange).To(Equal(expectData)) + }) + } + }) + + It("Set Consistency Test", func() { + const testKey = "StringsConsistencyTestKey" + const testValue = "StringsConsistencyTestKey" + { + // set write on leader + set, err := leader.Set(ctx, testKey, testValue, 0).Result() Expect(err).NotTo(HaveOccurred()) - Expect(getall).To(Equal(map[string]string{ - "fa": "va", - "fc": "vc", - })) + Expect(set).To(Equal("OK")) + + // read check + readChecker(func(*redis.Client) { + get, err := leader.Get(ctx, testKey).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(get).To(Equal(testValue)) + }) } }) @@ -179,4 +283,32 @@ var _ = Describe("Consistency", Ordered, func() { } } }) + }) + +func readChecker(check func(*redis.Client)) { + // read on leader + check(leader) + time.Sleep(10000 * time.Millisecond) + + // read on followers + followerChecker(followers, check) +} + +func followerChecker(fs []*redis.Client, check func(*redis.Client)) { + for _, f := range fs { + check(f) + } +} + +func reverse(src []string) []string { + a := make([]string, len(src)) + copy(a, src) + + for i := len(a)/2 - 1; i >= 0; i-- { + opp := len(a) - 1 - i + a[i], a[opp] = a[opp], a[i] + } + + return a +} diff --git a/tests/util/pikiwidb.go b/tests/util/pikiwidb.go index 3d53aa48e..b598f1f80 100644 --- a/tests/util/pikiwidb.go +++ b/tests/util/pikiwidb.go @@ -62,9 +62,8 @@ func GetConfPath(copy bool, t int64) string { func checkCondition(c *redis.Client) bool { ctx := context.TODO() - //TODO(dingxiaoshuai) use Cmd PING - r, e := c.Set(ctx, "key", "value", 0).Result() - return r == "OK" && e == nil + _, err := c.Get(ctx, "pikiwidb-go-test-check-key").Result() + return err == nil || err.Error() == "redis: nil" } type Server struct { From 9ecefda6b0072c013230dd1378070e1c3d20f886 Mon Sep 17 00:00:00 2001 From: panlei-coder <62509266+panlei-coder@users.noreply.github.com> Date: Wed, 24 Apr 2024 21:14:43 +0800 Subject: [PATCH 22/33] feat: overload the PosixFileSystemAdaptor interface to generate real snapshot when follower installs snapshot (#279) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * feat: overload the PosixFileSystemAdaptor interface to generate real snapshot when follower installs snapshot --------- Co-authored-by: dingxiaoshuai123 <2486016589@qq.com> Co-authored-by: Xin.Zh Co-authored-by: 丁小帅 <56024577+dingxiaoshuai123@users.noreply.github.com> --- save_load.sh | 17 +++---- src/cmd_admin.cc | 1 + src/cmd_raft.cc | 5 +- src/praft/CMakeLists.txt | 3 +- src/praft/praft.cc | 60 ++++++++--------------- src/praft/praft.h | 8 ++- src/praft/psnapshot.cc | 102 +++++++++++++++++++++++++++++++++++++++ src/praft/psnapshot.h | 36 ++++++++++++++ src/store.cc | 5 +- 9 files changed, 179 insertions(+), 58 deletions(-) create mode 100644 src/praft/psnapshot.cc create mode 100644 src/praft/psnapshot.h diff --git a/save_load.sh b/save_load.sh index b2b6fd836..1bcad5b74 100755 --- a/save_load.sh +++ b/save_load.sh @@ -1,19 +1,18 @@ #!/bin/bash killall -9 pikiwidb -mkdir leader follower1 follower2 +mkdir leader follower1 cd leader && ulimit -n 99999 && rm -fr * && ../bin/pikiwidb ../pikiwidb.conf --port 7777 & - cd follower1 && ulimit -n 99999 && rm -fr * && ../bin/pikiwidb ../pikiwidb.conf --port 8888 & -sleep 10 -redis-cli -p 7777 raft.cluster init -redis-benchmark -p 7777 -c 5 -n 10000 -r 10000000 -d 1024 -t hset - +sleep 5 -redis-cli -p 7777 raft.node DOSNAPSHOT -redis-cli -p 7777 raft.node DOSNAPSHOT +redis-cli -p 7777 raft.cluster init -redis-cli -p 8888 raft.cluster join 127.0.0.1:7777 +redis-benchmark -p 7777 -c 5 -n 10000 -r 10000 -d 1024 -t hset +redis-cli -p 7777 raft.node dosnapshot +redis-cli -p 7777 raft.node dosnapshot +sleep 10 +redis-cli -p 8888 raft.cluster join 127.0.0.1:7777 diff --git a/src/cmd_admin.cc b/src/cmd_admin.cc index d65278728..f78bb96f6 100644 --- a/src/cmd_admin.cc +++ b/src/cmd_admin.cc @@ -6,6 +6,7 @@ */ #include "cmd_admin.h" + #include "braft/raft.h" #include "rocksdb/version.h" diff --git a/src/cmd_raft.cc b/src/cmd_raft.cc index 43f9c6af2..e8e06c447 100644 --- a/src/cmd_raft.cc +++ b/src/cmd_raft.cc @@ -44,8 +44,10 @@ void RaftNodeCmd::DoCmd(PClient* client) { DoCmdAdd(client); } else if (cmd == kRemoveCmd) { DoCmdRemove(client); - } else { + } else if (cmd == kDoSnapshot) { DoCmdSnapshot(client); + } else { + client->SetRes(CmdRes::kErrOther, "RAFT.NODE supports ADD / REMOVE / DOSNAPSHOT only"); } } @@ -230,4 +232,5 @@ void RaftClusterCmd::DoCmdJoin(PClient* client) { // Not reply any message here, we will reply after the connection is established. client->Clear(); } + } // namespace pikiwidb diff --git a/src/praft/CMakeLists.txt b/src/praft/CMakeLists.txt index a929fc655..45cf62f8c 100644 --- a/src/praft/CMakeLists.txt +++ b/src/praft/CMakeLists.txt @@ -1,7 +1,8 @@ -# Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. +# Copyright (c) 2024-present, Qihoo, Inc. All rights reserved. # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. An additional grant # of patent rights can be found in the PATENTS file in the same directory. + ADD_CUSTOM_COMMAND( OUTPUT "${PROTO_OUTPUT_DIR}/binlog.pb.cc" DEPENDS extern_protobuf diff --git a/src/praft/praft.cc b/src/praft/praft.cc index ecb4b002c..0851acfe5 100644 --- a/src/praft/praft.cc +++ b/src/praft/praft.cc @@ -19,12 +19,12 @@ #include "binlog.pb.h" #include "config.h" #include "pikiwidb.h" -#include "praft.h" - -#include "praft_service.h" #include "replication.h" #include "store.h" +#include "praft_service.h" +#include "psnapshot.h" + #define ERROR_LOG_AND_STATUS(msg) \ ({ \ ERROR(msg); \ @@ -130,15 +130,13 @@ butil::Status PRaft::Init(std::string& group_id, bool initial_conf_is_null) { butil::EndPoint addr(ip, port); // Default init in one node. - /* - initial_conf takes effect only when the replication group is started from an empty node. - The Configuration is restored from the snapshot and log files when the data in the replication group is not empty. - initial_conf is used only to create replication groups. - The first node adds itself to initial_conf and then calls add_peer to add other nodes. - Set initial_conf to empty for other nodes. - You can also start empty nodes simultaneously by setting the same inital_conf(ip:port of multiple nodes) for multiple - nodes. - */ + // initial_conf takes effect only when the replication group is started from an empty node. + // The Configuration is restored from the snapshot and log files when the data in the replication group is not empty. + // initial_conf is used only to create replication groups. + // The first node adds itself to initial_conf and then calls add_peer to add other nodes. + // Set initial_conf to empty for other nodes. + // You can also start empty nodes simultaneously by setting the same inital_conf(ip:port of multiple nodes) for + // multiple nodes. std::string initial_conf; if (!initial_conf_is_null) { initial_conf = raw_addr_ + ":0,"; @@ -151,13 +149,16 @@ butil::Status PRaft::Init(std::string& group_id, bool initial_conf_is_null) { // node_options_.election_timeout_ms = FLAGS_election_timeout_ms; node_options_.fsm = this; node_options_.node_owns_fsm = false; - // node_options_.snapshot_interval_s = FLAGS_snapshot_interval; + node_options_.snapshot_interval_s = 0; std::string prefix = "local://" + g_config.dbpath + "_praft"; node_options_.log_uri = prefix + "/log"; node_options_.raft_meta_uri = prefix + "/raft_meta"; node_options_.snapshot_uri = prefix + "/snapshot"; // node_options_.disable_cli = FLAGS_disable_cli; - node_ = std::make_unique(group_id, braft::PeerId(addr)); + snapshot_adaptor_ = new PPosixFileSystemAdaptor(); + node_options_.snapshot_file_system_adaptor = &snapshot_adaptor_; + + node_ = std::make_unique("pikiwidb", braft::PeerId(addr)); // group_id if (node_->init(node_options_) != 0) { server_.reset(); node_.reset(); @@ -516,11 +517,14 @@ butil::Status PRaft::RemovePeer(const std::string& peer) { return {0, "OK"}; } -butil::Status PRaft::DoSnapshot() { +butil::Status PRaft::DoSnapshot(int64_t self_snapshot_index, bool is_sync) { if (!node_) { return ERROR_LOG_AND_STATUS("Node is not initialized"); } braft::SynchronizedClosure done; + // TODO(panlei) Increase the self_log_index parameter + // TODO(panlei) Use the is_sync parameter to determine whether + // to use synchronous waiting. node_->snapshot(&done); done.wait(); return done.status(); @@ -579,25 +583,6 @@ void PRaft::AppendLog(const Binlog& log, std::promise&& promise node_->apply(task); } -int PRaft::AddAllFiles(const std::filesystem::path& dir, braft::SnapshotWriter* writer, const std::string& path) { - assert(writer); - for (const auto& entry : std::filesystem::directory_iterator(dir)) { - if (entry.is_directory()) { - if (entry.path() != "." && entry.path() != "..") { - DEBUG("dir_path = {}", entry.path().string()); - AddAllFiles(entry.path(), writer, path); - } - } else { - DEBUG("file_path = {}", std::filesystem::relative(entry.path(), path).string()); - if (writer->add_file(std::filesystem::relative(entry.path(), path)) != 0) { - ERROR("add file {} to snapshot fail!", entry.path().string()); - return -1; - } - } - } - return 0; -} - // @braft::StateMachine void PRaft::Clear() { if (node_) { @@ -642,13 +627,6 @@ void PRaft::on_apply(braft::Iterator& iter) { void PRaft::on_snapshot_save(braft::SnapshotWriter* writer, braft::Closure* done) { assert(writer); brpc::ClosureGuard done_guard(done); - auto path = writer->get_path(); - INFO("Saving snapshot to {}", path); - TasksVector tasks(1, {TaskType::kCheckpoint, db_id_, {{TaskArg::kCheckpointPath, path}}, true}); - PSTORE.HandleTaskSpecificDB(tasks); - if (auto res = AddAllFiles(path, writer, path); res != 0) { - done->status().set_error(EIO, "Fail to add file to writer"); - } } int PRaft::on_snapshot_load(braft::SnapshotReader* reader) { diff --git a/src/praft/praft.h b/src/praft/praft.h index c2ff888be..edb82288c 100644 --- a/src/praft/praft.h +++ b/src/praft/praft.h @@ -14,6 +14,7 @@ #include #include +#include "braft/file_system_adaptor.h" #include "braft/raft.h" #include "brpc/server.h" #include "rocksdb/status.h" @@ -103,8 +104,7 @@ class PRaft : public braft::StateMachine { butil::Status Init(std::string& group_id, bool initial_conf_is_null); butil::Status AddPeer(const std::string& peer); butil::Status RemovePeer(const std::string& peer); - butil::Status RaftRecvEntry(); - butil::Status DoSnapshot(); + butil::Status DoSnapshot(int64_t self_snapshot_index = 0, bool is_sync = true); void ShutDown(); void Join(); @@ -154,15 +154,13 @@ class PRaft : public braft::StateMachine { void on_stop_following(const ::braft::LeaderChangeContext& ctx) override; void on_start_following(const ::braft::LeaderChangeContext& ctx) override; - private: - static int AddAllFiles(const std::filesystem::path& dir, braft::SnapshotWriter* writer, const std::string& path); - private: std::unique_ptr server_{nullptr}; // brpc std::unique_ptr node_{nullptr}; braft::NodeOptions node_options_; // options for raft node std::string raw_addr_; // ip:port of this node + scoped_refptr snapshot_adaptor_ = nullptr; ClusterCmdContext cluster_cmd_ctx_; // context for cluster join/remove command std::string group_id_; // group id int db_id_ = 0; // db_id diff --git a/src/praft/psnapshot.cc b/src/praft/psnapshot.cc new file mode 100644 index 000000000..4cfc36d55 --- /dev/null +++ b/src/praft/psnapshot.cc @@ -0,0 +1,102 @@ +/* + * Copyright (c) 2024-present, Qihoo, Inc. All rights reserved. + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. An additional grant + * of patent rights can be found in the PATENTS file in the same directory. + */ + +// +// psnapshot.cc + +#include "psnapshot.h" + +#include "braft/local_file_meta.pb.h" +#include "butil/files/file_path.h" + +#include "pstd/log.h" + +#include "config.h" +#include "store.h" + +namespace pikiwidb { + +extern PConfig g_config; + +braft::FileAdaptor* PPosixFileSystemAdaptor::open(const std::string& path, int oflag, + const ::google::protobuf::Message* file_meta, butil::File::Error* e) { + if ((oflag & IS_RDONLY) == 0) { // This is a read operation + bool snapshots_exists = false; + std::string snapshot_path; + + // parse snapshot path + butil::FilePath parse_snapshot_path(path); + std::vector components; + parse_snapshot_path.GetComponents(&components); + for (auto component : components) { + snapshot_path += component + "/"; + if (component.find("snapshot_") != std::string::npos) { + break; + } + } + // check whether snapshots have been created + std::lock_guard guard(mutex_); + if (!snapshot_path.empty()) { + for (const auto& entry : std::filesystem::directory_iterator(snapshot_path)) { + std::string filename = entry.path().filename().string(); + if (entry.is_regular_file() || entry.is_directory()) { + if (filename != "." && filename != ".." && filename.find(PRAFT_SNAPSHOT_META_FILE) == std::string::npos) { + // If the path directory contains files other than raft_snapshot_meta, snapshots have been generated + snapshots_exists = true; + break; + } + } + } + } + + // Snapshot generation + if (!snapshots_exists) { + braft::LocalSnapshotMetaTable snapshot_meta_memtable; + std::string meta_path = snapshot_path + "/" PRAFT_SNAPSHOT_META_FILE; + INFO("start to generate snapshot in path {}", snapshot_path); + braft::FileSystemAdaptor* fs = braft::default_file_system(); + assert(fs); + snapshot_meta_memtable.load_from_file(fs, meta_path); + + TasksVector tasks(1, {TaskType::kCheckpoint, 0, {{TaskArg::kCheckpointPath, snapshot_path}}, true}); + PSTORE.HandleTaskSpecificDB(tasks); + AddAllFiles(snapshot_path, &snapshot_meta_memtable, snapshot_path); + + auto rc = snapshot_meta_memtable.save_to_file(fs, meta_path); + if (rc == 0) { + INFO("Succeed to save snapshot in path {}", snapshot_path); + } else { + ERROR("Fail to save snapshot in path {}", snapshot_path); + } + INFO("generate snapshot completed in path {}", snapshot_path); + } + } + + return braft::PosixFileSystemAdaptor::open(path, oflag, file_meta, e); +} + +void PPosixFileSystemAdaptor::AddAllFiles(const std::filesystem::path& dir, + braft::LocalSnapshotMetaTable* snapshot_meta_memtable, + const std::string& path) { + assert(snapshot_meta_memtable); + for (const auto& entry : std::filesystem::directory_iterator(dir)) { + if (entry.is_directory()) { + if (entry.path() != "." && entry.path() != "..") { + INFO("dir_path = {}", entry.path().string()); + AddAllFiles(entry.path(), snapshot_meta_memtable, path); + } + } else { + INFO("file_path = {}", std::filesystem::relative(entry.path(), path).string()); + braft::LocalFileMeta meta; + if (snapshot_meta_memtable->add_file(std::filesystem::relative(entry.path(), path), meta) != 0) { + WARN("Failed to add file"); + } + } + } +} + +} // namespace pikiwidb diff --git a/src/praft/psnapshot.h b/src/praft/psnapshot.h new file mode 100644 index 000000000..3b544d53d --- /dev/null +++ b/src/praft/psnapshot.h @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2024-present, Qihoo, Inc. All rights reserved. + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. An additional grant + * of patent rights can be found in the PATENTS file in the same directory. + */ + +#pragma once + +#include + +#include "braft/file_system_adaptor.h" +#include "braft/macros.h" +#include "braft/snapshot.h" + +#define PRAFT_SNAPSHOT_META_FILE "__raft_snapshot_meta" +#define PRAFT_SNAPSHOT_PATH "snapshot/snapshot_" +#define IS_RDONLY 0x01 + +namespace pikiwidb { + +class PPosixFileSystemAdaptor : public braft::PosixFileSystemAdaptor { + public: + PPosixFileSystemAdaptor() {} + ~PPosixFileSystemAdaptor() {} + + braft::FileAdaptor* open(const std::string& path, int oflag, const ::google::protobuf::Message* file_meta, + butil::File::Error* e) override; + void AddAllFiles(const std::filesystem::path& dir, braft::LocalSnapshotMetaTable* snapshot_meta_memtable, + const std::string& path); + + private: + braft::raft_mutex_t mutex_; +}; + +} // namespace pikiwidb diff --git a/src/store.cc b/src/store.cc index e3c9e3560..6fa247872 100644 --- a/src/store.cc +++ b/src/store.cc @@ -5,10 +5,13 @@ * of patent rights can be found in the PATENTS file in the same directory. */ +#include "store.h" + +#include #include #include "config.h" -#include "log.h" +#include "pstd/log.h" #include "pstd/pstd_string.h" #include "store.h" From 24decdf299c944c214b443b4e3fa3714cab6468e Mon Sep 17 00:00:00 2001 From: Changyuan Ning <77976092+longfar-ncy@users.noreply.github.com> Date: Sat, 27 Apr 2024 19:05:20 +0800 Subject: [PATCH 23/33] fix: wrong client in go test (#294) --- tests/consistency_test.go | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/tests/consistency_test.go b/tests/consistency_test.go index b79995f94..ae0884cc0 100644 --- a/tests/consistency_test.go +++ b/tests/consistency_test.go @@ -117,8 +117,8 @@ var _ = Describe("Consistency", Ordered, func() { Expect(set).To(Equal(int64(3))) // read check - readChecker(func(*redis.Client) { - getall, err := leader.HGetAll(ctx, testKey).Result() + readChecker(func(c *redis.Client) { + getall, err := c.HGetAll(ctx, testKey).Result() Expect(err).NotTo(HaveOccurred()) Expect(getall).To(Equal(testValue)) }) @@ -131,8 +131,8 @@ var _ = Describe("Consistency", Ordered, func() { Expect(del).To(Equal(int64(1))) // read check - readChecker(func(*redis.Client) { - getall, err := leader.HGetAll(ctx, testKey).Result() + readChecker(func(c *redis.Client) { + getall, err := c.HGetAll(ctx, testKey).Result() Expect(err).NotTo(HaveOccurred()) Expect(getall).To(Equal(map[string]string{ "fa": "va", @@ -153,8 +153,8 @@ var _ = Describe("Consistency", Ordered, func() { Expect(sadd).To(Equal(int64(len(testValues)))) // read check - readChecker(func(*redis.Client) { - smembers, err := leader.SMembers(ctx, testKey).Result() + readChecker(func(c *redis.Client) { + smembers, err := c.SMembers(ctx, testKey).Result() Expect(err).NotTo(HaveOccurred()) Expect(smembers).To(Equal(testValues)) }) @@ -167,8 +167,8 @@ var _ = Describe("Consistency", Ordered, func() { Expect(srem).To(Equal(int64(2))) // read check - readChecker(func(*redis.Client) { - smembers, err := leader.SMembers(ctx, testKey).Result() + readChecker(func(c *redis.Client) { + smembers, err := c.SMembers(ctx, testKey).Result() Expect(err).NotTo(HaveOccurred()) Expect(smembers).To(Equal([]string{"sa", "sc"})) }) @@ -186,8 +186,8 @@ var _ = Describe("Consistency", Ordered, func() { Expect(lpush).To(Equal(int64(len(testValues)))) // read check - readChecker(func(*redis.Client) { - lrange, err := leader.LRange(ctx, testKey, 0, 10).Result() + readChecker(func(c *redis.Client) { + lrange, err := c.LRange(ctx, testKey, 0, 10).Result() Expect(err).NotTo(HaveOccurred()) Expect(lrange).To(Equal(reverse(testValues))) }) @@ -203,8 +203,8 @@ var _ = Describe("Consistency", Ordered, func() { Expect(lpop).To(Equal("lc")) // read check - readChecker(func(*redis.Client) { - lrange, err := leader.LRange(ctx, testKey, 0, 10).Result() + readChecker(func(c *redis.Client) { + lrange, err := c.LRange(ctx, testKey, 0, 10).Result() Expect(err).NotTo(HaveOccurred()) Expect(lrange).To(Equal([]string{"lb", "la"})) }) @@ -230,8 +230,8 @@ var _ = Describe("Consistency", Ordered, func() { Expect(zadd).To(Equal(int64(len(testData)))) // read check - readChecker(func(*redis.Client) { - zrange, err := leader.ZRevRangeWithScores(ctx, testKey, 0, -1).Result() + readChecker(func(c *redis.Client) { + zrange, err := c.ZRevRangeWithScores(ctx, testKey, 0, -1).Result() Expect(err).NotTo(HaveOccurred()) Expect(zrange).To(Equal(expectData)) }) @@ -248,8 +248,8 @@ var _ = Describe("Consistency", Ordered, func() { Expect(set).To(Equal("OK")) // read check - readChecker(func(*redis.Client) { - get, err := leader.Get(ctx, testKey).Result() + readChecker(func(c *redis.Client) { + get, err := c.Get(ctx, testKey).Result() Expect(err).NotTo(HaveOccurred()) Expect(get).To(Equal(testValue)) }) From 5ca3f7cb3c93cb1ccd6cf258eb72c803ace60d1b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E4=B8=81=E5=B0=8F=E5=B8=85?= <56024577+dingxiaoshuai123@users.noreply.github.com> Date: Mon, 29 Apr 2024 16:07:55 +0800 Subject: [PATCH 24/33] feat: flush oldest cf (#275) * Revert to using the version with independent atomic variables. --- cmake/braft.cmake | 4 +- cmake/zlib.cmake | 4 +- src/db.cc | 2 + src/praft/praft.cc | 5 +- src/praft/praft.h | 14 +- src/storage/include/storage/storage.h | 7 +- src/storage/src/log_index.cc | 109 ++++- src/storage/src/log_index.h | 128 +++++- src/storage/src/redis.cc | 39 +- src/storage/src/redis.h | 8 +- src/storage/src/storage.cc | 9 +- src/storage/tests/flush_oldest_cf_test.cc | 484 ++++++++++++++++++++++ src/storage/tests/log_index_test.cc | 1 + 13 files changed, 733 insertions(+), 81 deletions(-) create mode 100644 src/storage/tests/flush_oldest_cf_test.cc diff --git a/cmake/braft.cmake b/cmake/braft.cmake index 43ea4a350..288c637fe 100644 --- a/cmake/braft.cmake +++ b/cmake/braft.cmake @@ -16,8 +16,8 @@ ExternalProject_Add( extern_braft ${EXTERNAL_PROJECT_LOG_ARGS} DEPENDS brpc - URL "https://github.com/pikiwidb/braft/archive/refs/heads/stable.zip" - URL_HASH SHA256=e73831f9768ac57d07f01ed81a11c8368e259c25315a960c29a6422f31f42fd1 + GIT_REPOSITORY "https://github.com/pikiwidb/braft.git" + GIT_TAG master PREFIX ${BRAFT_SOURCES_DIR} UPDATE_COMMAND "" CMAKE_ARGS -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} diff --git a/cmake/zlib.cmake b/cmake/zlib.cmake index 7e5963a1c..b1e300009 100644 --- a/cmake/zlib.cmake +++ b/cmake/zlib.cmake @@ -7,8 +7,8 @@ INCLUDE(ExternalProject) SET(ZLIB_SOURCES_DIR ${THIRD_PARTY_PATH}/zlib) SET(ZLIB_INSTALL_DIR ${THIRD_PARTY_PATH}/install/zlib) -SET(ZLIB_ROOT ${ZLIB_INSTALL_DIR} CACHE FILEPATH "zlib root directory." FORCE) -SET(ZLIB_INCLUDE_DIR "${ZLIB_INSTALL_DIR}/include" CACHE PATH "zlib include directory." FORCE) +# SET(ZLIB_ROOT ${ZLIB_INSTALL_DIR} CACHE FILEPATH "zlib root directory." FORCE) +# SET(ZLIB_INCLUDE_DIR "${ZLIB_INSTALL_DIR}/include" CACHE PATH "zlib include directory." FORCE) ExternalProject_Add( extern_zlib diff --git a/src/db.cc b/src/db.cc index 4c002e146..39fb0b7d6 100644 --- a/src/db.cc +++ b/src/db.cc @@ -29,6 +29,8 @@ DB::DB(int db_index, const std::string& db_path, int rocksdb_inst_num) storage_options.append_log_function = [&r = PRAFT](const Binlog& log, std::promise&& promise) { r.AppendLog(log, std::move(promise)); }; + storage_options.do_snapshot_function = + std::bind(&pikiwidb::PRaft::DoSnapshot, &pikiwidb::PRAFT, std::placeholders::_1, std::placeholders::_2); } storage_ = std::make_unique(); diff --git a/src/praft/praft.cc b/src/praft/praft.cc index 0851acfe5..a18dc40c8 100644 --- a/src/praft/praft.cc +++ b/src/praft/praft.cc @@ -522,10 +522,7 @@ butil::Status PRaft::DoSnapshot(int64_t self_snapshot_index, bool is_sync) { return ERROR_LOG_AND_STATUS("Node is not initialized"); } braft::SynchronizedClosure done; - // TODO(panlei) Increase the self_log_index parameter - // TODO(panlei) Use the is_sync parameter to determine whether - // to use synchronous waiting. - node_->snapshot(&done); + node_->snapshot(&done, self_snapshot_index); done.wait(); return done.status(); } diff --git a/src/praft/praft.h b/src/praft/praft.h index edb82288c..4a353f968 100644 --- a/src/praft/praft.h +++ b/src/praft/praft.h @@ -25,13 +25,13 @@ namespace pikiwidb { #define RAFT_GROUPID_LEN 32 -#define OK "+OK" -#define DATABASES_NUM "databases_num" -#define ROCKSDB_NUM "rocksdb_num" -#define ROCKSDB_VERSION "rocksdb_version" -#define WRONG_LEADER "-ERR wrong leader" -#define RAFT_GROUP_ID "raft_group_id:" -#define NOT_LEADER "Not leader" +constexpr const char* OK = "+OK"; +constexpr const char* DATABASES_NUM = "databases_num"; +constexpr const char* ROCKSDB_NUM = "rocksdb_num"; +constexpr const char* ROCKSDB_VERSION = "rocksdb_version"; +constexpr const char* WRONG_LEADER = "-ERR wrong leader"; +constexpr const char* RAFT_GROUP_ID = "raft_group_id:"; +constexpr const char* NOT_LEADER = "Not leader"; #define PRAFT PRaft::Instance() diff --git a/src/storage/include/storage/storage.h b/src/storage/include/storage/storage.h index aad21e337..9562ec927 100644 --- a/src/storage/include/storage/storage.h +++ b/src/storage/include/storage/storage.h @@ -62,9 +62,10 @@ template class LRUCache; using AppendLogFunction = std::function&&)>; +using DoSnapshotFunction = std::function; struct StorageOptions { - rocksdb::Options options; + mutable rocksdb::Options options; rocksdb::BlockBasedTableOptions table_options; size_t block_cache_size = 0; bool share_block_cache = false; @@ -74,7 +75,11 @@ struct StorageOptions { size_t db_instance_num = 3; // default = 3 int db_id = 0; AppendLogFunction append_log_function = nullptr; + DoSnapshotFunction do_snapshot_function = nullptr; + uint32_t raft_timeout_s = std::numeric_limits::max(); + int64_t max_gap = 1000; + uint64_t mem_manager_size = 100000000; Status ResetOptions(const OptionType& option_type, const std::unordered_map& options_map); }; diff --git a/src/storage/src/log_index.cc b/src/storage/src/log_index.cc index 3d4458f56..1dede3013 100644 --- a/src/storage/src/log_index.cc +++ b/src/storage/src/log_index.cc @@ -9,8 +9,7 @@ #include #include -#include -#include +#include #include "redis.h" @@ -25,21 +24,54 @@ rocksdb::Status storage::LogIndexOfColumnFamilies::Init(Redis *db) { } auto res = LogIndexTablePropertiesCollector::GetLargestLogIndexFromTableCollection(collection); if (res.has_value()) { - cf_[i].applied_log_index.store(res->GetAppliedLogIndex()); - cf_[i].flushed_log_index.store(res->GetAppliedLogIndex()); + auto log_index = res->GetAppliedLogIndex(); + auto sequence_number = res->GetSequenceNumber(); + cf_[i].applied_index.SetLogIndexSeqnoPair(log_index, sequence_number); + cf_[i].flushed_index.SetLogIndexSeqnoPair(log_index, sequence_number); } } return Status::OK(); } -LogIndex LogIndexOfColumnFamilies::GetSmallestLogIndex(std::function &&f) const { - auto smallest_log_index = std::numeric_limits::max(); - for (const auto &it : cf_) { - smallest_log_index = std::min(f(it), smallest_log_index); +LogIndexOfColumnFamilies::SmallestIndexRes LogIndexOfColumnFamilies::GetSmallestLogIndex(int flush_cf) const { + SmallestIndexRes res; + for (int i = 0; i < cf_.size(); i++) { + if (i != flush_cf && cf_[i].flushed_index >= cf_[i].applied_index) { + continue; + } + auto applied_log_index = cf_[i].applied_index.GetLogIndex(); + auto flushed_log_index = cf_[i].flushed_index.GetLogIndex(); + auto flushed_seqno = cf_[i].flushed_index.GetSequenceNumber(); + if (applied_log_index < res.smallest_applied_log_index) { + res.smallest_applied_log_index = applied_log_index; + res.smallest_applied_log_index_cf = i; + } + if (flushed_log_index < res.smallest_flushed_log_index) { + res.smallest_flushed_log_index = flushed_log_index; + res.smallest_flushed_seqno = flushed_seqno; + res.smallest_flushed_log_index_cf = i; + } } - return smallest_log_index; + return res; } +size_t LogIndexOfColumnFamilies::GetPendingFlushGap() const { + std::set s; + for (int i = 0; i < kColumnFamilyNum; i++) { + s.insert(cf_[i].applied_index.GetLogIndex()); + s.insert(cf_[i].flushed_index.GetLogIndex()); + } + assert(!s.empty()); + if (s.size() == 1) { + return false; + } + auto iter_first = s.begin(); + auto iter_last = s.end(); + return *std::prev(iter_last) - *iter_first; +}; + +std::atomic_int64_t LogIndexAndSequenceCollector::max_gap_ = 1000; + std::optional storage::LogIndexTablePropertiesCollector::ReadStatsFromTableProps( const std::shared_ptr &table_props) { const auto &user_properties = table_props->user_collected_properties; @@ -79,10 +111,8 @@ LogIndex LogIndexAndSequenceCollector::FindAppliedLogIndex(SequenceNumber seqno) } void LogIndexAndSequenceCollector::Update(LogIndex smallest_applied_log_index, SequenceNumber smallest_flush_seqno) { - /* - If step length > 1, log index is sampled and sacrifice precision to save memory usage. - It means that extra applied log may be applied again on start stage. - */ + // If step length > 1, log index is sampled and sacrifice precision to save memory usage. + // It means that extra applied log may be applied again on start stage. if ((smallest_applied_log_index & step_length_mask_) == 0) { std::lock_guard gd(mutex_); list_.emplace_back(smallest_applied_log_index, smallest_flush_seqno); @@ -91,13 +121,11 @@ void LogIndexAndSequenceCollector::Update(LogIndex smallest_applied_log_index, S // TODO(longfar): find the iterator which should be deleted and erase from begin to the iterator void LogIndexAndSequenceCollector::Purge(LogIndex smallest_applied_log_index) { - /* - * The reason that we use smallest applied log index of all column families instead of smallest flushed log index is - * that the log index corresponding to the largest sequence number in the next flush must be greater than or equal to - * the smallest applied log index at this moment. - * So we just need to make sure that there is an element in the queue which is less than or equal to the smallest - * applied log index to ensure that we can find a correct log index while doing next flush. - */ + // The reason that we use smallest applied log index of all column families instead of smallest flushed log index is + // that the log index corresponding to the largest sequence number in the next flush must be greater than or equal to + // the smallest applied log index at this moment. + // So we just need to make sure that there is an element in the queue which is less than or equal to the smallest + // applied log index to ensure that we can find a correct log index while doing next flush. std::lock_guard gd(mutex_); if (list_.size() < 2) { return; @@ -124,4 +152,43 @@ auto LogIndexTablePropertiesCollector::GetLargestLogIndexFromTableCollection( : std::make_optional(max_flushed_log_index, seqno); } -} // namespace storage +void LogIndexAndSequenceCollectorPurger::OnFlushCompleted(rocksdb::DB *db, + const rocksdb::FlushJobInfo &flush_job_info) { + cf_->SetFlushedLogIndex(flush_job_info.cf_id, collector_->FindAppliedLogIndex(flush_job_info.largest_seqno), + flush_job_info.largest_seqno); + + auto [smallest_applied_log_index_cf, smallest_applied_log_index, smallest_flushed_log_index_cf, + smallest_flushed_log_index, smallest_flushed_seqno] = cf_->GetSmallestLogIndex(flush_job_info.cf_id); + collector_->Purge(smallest_applied_log_index); + + if (smallest_flushed_log_index_cf != -1) { + cf_->SetFlushedLogIndexGlobal(smallest_flushed_log_index, smallest_flushed_seqno); + } + auto count = count_.fetch_add(1); + + if (count % 10 == 0) { + callback_(smallest_flushed_log_index, false); + } + + if (flush_job_info.cf_id == manul_flushing_cf_.load()) { + manul_flushing_cf_.store(-1); + } + + auto flushing_cf = manul_flushing_cf_.load(); + if (flushing_cf != -1 || !collector_->IsFlushPending()) { + return; + } + + assert(flushing_cf == -1); + + if (!manul_flushing_cf_.compare_exchange_strong(flushing_cf, smallest_flushed_log_index_cf)) { + return; + } + + assert(manul_flushing_cf_.load() == smallest_flushed_log_index_cf); + rocksdb::FlushOptions flush_option; + flush_option.wait = false; + db->Flush(flush_option, column_families_->at(smallest_flushed_log_index_cf)); +} + +} // namespace storage \ No newline at end of file diff --git a/src/storage/src/log_index.h b/src/storage/src/log_index.h index a2bfc9d40..e7eb31cbc 100644 --- a/src/storage/src/log_index.h +++ b/src/storage/src/log_index.h @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include @@ -20,6 +21,7 @@ #include "rocksdb/listener.h" #include "rocksdb/table_properties.h" #include "rocksdb/types.h" + #include "storage/storage_define.h" namespace storage { @@ -44,34 +46,101 @@ class LogIndexAndSequencePair { SequenceNumber seqno_ = 0; }; +struct LogIndexSeqnoPair { + std::atomic log_index = 0; + std::atomic seqno = 0; + + LogIndex GetLogIndex() const { return log_index.load(); } + + SequenceNumber GetSequenceNumber() const { return seqno.load(); } + + void SetLogIndexSeqnoPair(LogIndex l, SequenceNumber s) { + log_index.store(l); + seqno.store(s); + } + + LogIndexSeqnoPair() = default; + + bool operator==(const LogIndexSeqnoPair &other) const { return seqno.load() == other.seqno.load(); } + + bool operator<=(const LogIndexSeqnoPair &other) const { return seqno.load() <= other.seqno.load(); } + + bool operator>=(const LogIndexSeqnoPair &other) const { return seqno.load() >= other.seqno.load(); } + + bool operator<(const LogIndexSeqnoPair &other) const { return seqno.load() < other.seqno.load(); } +}; + class LogIndexOfColumnFamilies { struct LogIndexPair { - std::atomic applied_log_index = 0; // newest record in memtable. - std::atomic flushed_log_index = 0; // newest record in sst file. + LogIndexSeqnoPair applied_index; // newest record in memtable. + LogIndexSeqnoPair flushed_index; // newest record in sst file. + }; + + struct SmallestIndexRes { + int smallest_applied_log_index_cf = -1; + LogIndex smallest_applied_log_index = std::numeric_limits::max(); + + int smallest_flushed_log_index_cf = -1; + LogIndex smallest_flushed_log_index = std::numeric_limits::max(); + SequenceNumber smallest_flushed_seqno = std::numeric_limits::max(); }; public: // Read the largest log index of each column family from all sst files rocksdb::Status Init(Redis *db); - LogIndex GetSmallestAppliedLogIndex() const { - return GetSmallestLogIndex([](const LogIndexPair &p) { return p.applied_log_index.load(); }); + SmallestIndexRes GetSmallestLogIndex(int flush_cf) const; + + void SetFlushedLogIndex(size_t cf_id, LogIndex log_index, SequenceNumber seqno) { + cf_[cf_id].flushed_index.log_index.store(std::max(cf_[cf_id].flushed_index.log_index.load(), log_index)); + cf_[cf_id].flushed_index.seqno.store(std::max(cf_[cf_id].flushed_index.seqno.load(), seqno)); } - // LogIndex GetSmallestFlushedLogIndex() const { - // return GetSmallestLogIndex([](const LogIndexPair &p) { return p.flushed_log_index.load(); }); - // } - void SetFlushedLogIndex(size_t cf_id, LogIndex log_index) { - cf_[cf_id].flushed_log_index = std::max(cf_[cf_id].flushed_log_index.load(), log_index); + + void SetFlushedLogIndexGlobal(LogIndex log_index, SequenceNumber seqno) { + SetLastFlushIndex(log_index, seqno); + for (int i = 0; i < kColumnFamilyNum; i++) { + if (cf_[i].flushed_index <= last_flush_index_) { + auto flush_log_index = std::max(cf_[i].flushed_index.GetLogIndex(), last_flush_index_.GetLogIndex()); + auto flush_sequence_number = + std::max(cf_[i].flushed_index.GetSequenceNumber(), last_flush_index_.GetSequenceNumber()); + cf_[i].flushed_index.SetLogIndexSeqnoPair(flush_log_index, flush_sequence_number); + } + } } bool IsApplied(size_t cf_id, LogIndex cur_log_index) const { - return cur_log_index < cf_[cf_id].applied_log_index.load(); + return cur_log_index < cf_[cf_id].applied_index.GetLogIndex(); } - void Update(size_t cf_id, LogIndex cur_log_index) { cf_[cf_id].applied_log_index.store(cur_log_index); } + + void Update(size_t cf_id, LogIndex cur_log_index, SequenceNumber cur_seqno) { + if (cf_[cf_id].flushed_index <= last_flush_index_ && cf_[cf_id].flushed_index == cf_[cf_id].applied_index) { + auto flush_log_index = std::max(cf_[cf_id].flushed_index.GetLogIndex(), last_flush_index_.GetLogIndex()); + auto flush_sequence_number = + std::max(cf_[cf_id].flushed_index.GetSequenceNumber(), last_flush_index_.GetSequenceNumber()); + cf_[cf_id].flushed_index.SetLogIndexSeqnoPair(flush_log_index, flush_sequence_number); + } + + cf_[cf_id].applied_index.SetLogIndexSeqnoPair(cur_log_index, cur_seqno); + } + + bool IsPendingFlush() const; + + size_t GetPendingFlushGap() const; + + void SetLastFlushIndex(LogIndex flushed_logindex, SequenceNumber flushed_seqno) { + auto lastest_flush_log_index = std::max(last_flush_index_.GetLogIndex(), flushed_logindex); + auto lastest_flush_sequence_number = std::max(last_flush_index_.GetSequenceNumber(), flushed_seqno); + last_flush_index_.SetLogIndexSeqnoPair(lastest_flush_log_index, lastest_flush_sequence_number); + } + + // for gtest + LogIndexSeqnoPair &GetLastFlushIndex() { return last_flush_index_; } + + LogIndexPair &GetCFStatus(size_t cf) { return cf_[cf]; } private: - LogIndex GetSmallestLogIndex(std::function &&f) const; std::array cf_; + LogIndexSeqnoPair last_flush_index_; }; class LogIndexAndSequenceCollector { @@ -87,6 +156,23 @@ class LogIndexAndSequenceCollector { // purge out dated log index after memtable flushed. void Purge(LogIndex smallest_applied_log_index); + // Is manual flushing required? + bool IsFlushPending() const { return GetSize() >= max_gap_; } + + // for gtest + uint64_t GetSize() const { + std::shared_lock share_lock; + return list_.size(); + } + + std::deque &GetList() { + std::shared_lock share_lock; + return list_; + } + + public: + static std::atomic_int64_t max_gap_; + private: uint64_t step_length_mask_ = 0; mutable std::shared_mutex mutex_; @@ -151,18 +237,20 @@ class LogIndexTablePropertiesCollectorFactory : public rocksdb::TablePropertiesC class LogIndexAndSequenceCollectorPurger : public rocksdb::EventListener { public: - explicit LogIndexAndSequenceCollectorPurger(LogIndexAndSequenceCollector *collector, LogIndexOfColumnFamilies *cf) - : collector_(collector), cf_(cf) {} + explicit LogIndexAndSequenceCollectorPurger(std::vector *column_families, + LogIndexAndSequenceCollector *collector, LogIndexOfColumnFamilies *cf, + std::function callback) + : column_families_(column_families), collector_(collector), cf_(cf), callback_(callback) {} - void OnFlushCompleted(rocksdb::DB *db, const rocksdb::FlushJobInfo &flush_job_info) override { - cf_->SetFlushedLogIndex(flush_job_info.cf_id, collector_->FindAppliedLogIndex(flush_job_info.largest_seqno)); - auto log_idx = cf_->GetSmallestAppliedLogIndex(); - collector_->Purge(log_idx); - } + void OnFlushCompleted(rocksdb::DB *db, const rocksdb::FlushJobInfo &flush_job_info) override; private: + std::vector *column_families_ = nullptr; LogIndexAndSequenceCollector *collector_ = nullptr; LogIndexOfColumnFamilies *cf_ = nullptr; + std::atomic_uint64_t count_ = 0; + std::atomic manul_flushing_cf_ = -1; + std::function callback_; }; -} // namespace storage +} // namespace storage \ No newline at end of file diff --git a/src/storage/src/redis.cc b/src/storage/src/redis.cc index ed8ee7d3c..7f13179e7 100644 --- a/src/storage/src/redis.cc +++ b/src/storage/src/redis.cc @@ -146,26 +146,6 @@ Status Redis::Open(const StorageOptions& storage_options, const std::string& db_ zset_data_cf_ops.table_factory.reset(rocksdb::NewBlockBasedTableFactory(zset_data_cf_table_ops)); zset_score_cf_ops.table_factory.reset(rocksdb::NewBlockBasedTableFactory(zset_score_cf_table_ops)); - if (append_log_function_) { - // Add log index table property collector factory to each column family - ADD_TABLE_PROPERTY_COLLECTOR_FACTORY(string); - ADD_TABLE_PROPERTY_COLLECTOR_FACTORY(hash_meta); - ADD_TABLE_PROPERTY_COLLECTOR_FACTORY(hash_data); - ADD_TABLE_PROPERTY_COLLECTOR_FACTORY(list_meta); - ADD_TABLE_PROPERTY_COLLECTOR_FACTORY(list_data); - ADD_TABLE_PROPERTY_COLLECTOR_FACTORY(set_meta); - ADD_TABLE_PROPERTY_COLLECTOR_FACTORY(set_data); - ADD_TABLE_PROPERTY_COLLECTOR_FACTORY(zset_meta); - ADD_TABLE_PROPERTY_COLLECTOR_FACTORY(zset_data); - ADD_TABLE_PROPERTY_COLLECTOR_FACTORY(zset_score); - - // Add a listener on flush to purge log index collector - db_ops.listeners.push_back( - std::make_shared(&log_index_collector_, &log_index_of_all_cfs_)); - - // TODO(longfar): Add snapshot caller - } - std::vector column_families; column_families.emplace_back(rocksdb::kDefaultColumnFamilyName, string_cf_ops); // hash CF @@ -182,10 +162,29 @@ Status Redis::Open(const StorageOptions& storage_options, const std::string& db_ column_families.emplace_back("zset_data_cf", zset_data_cf_ops); column_families.emplace_back("zset_score_cf", zset_score_cf_ops); + if (append_log_function_) { + // Add log index table property collector factory to each column family + ADD_TABLE_PROPERTY_COLLECTOR_FACTORY(string); + ADD_TABLE_PROPERTY_COLLECTOR_FACTORY(hash_meta); + ADD_TABLE_PROPERTY_COLLECTOR_FACTORY(hash_data); + ADD_TABLE_PROPERTY_COLLECTOR_FACTORY(list_meta); + ADD_TABLE_PROPERTY_COLLECTOR_FACTORY(list_data); + ADD_TABLE_PROPERTY_COLLECTOR_FACTORY(set_meta); + ADD_TABLE_PROPERTY_COLLECTOR_FACTORY(set_data); + ADD_TABLE_PROPERTY_COLLECTOR_FACTORY(zset_meta); + ADD_TABLE_PROPERTY_COLLECTOR_FACTORY(zset_data); + ADD_TABLE_PROPERTY_COLLECTOR_FACTORY(zset_score); + + // Add a listener on flush to purge log index collector + db_ops.listeners.push_back(std::make_shared( + &handles_, &log_index_collector_, &log_index_of_all_cfs_, storage_options.do_snapshot_function)); + } + auto s = rocksdb::DB::Open(db_ops, db_path, column_families, &handles_, &db_); if (!s.ok()) { return s; } + assert(!handles_.empty()); return log_index_of_all_cfs_.Init(this); } diff --git a/src/storage/src/redis.h b/src/storage/src/redis.h index 314f5f8fc..b60878c29 100644 --- a/src/storage/src/redis.h +++ b/src/storage/src/redis.h @@ -105,8 +105,8 @@ class Redis { virtual Status GetProperty(const std::string& property, uint64_t* out); bool IsApplied(size_t cf_idx, LogIndex logidx) const { return log_index_of_all_cfs_.IsApplied(cf_idx, logidx); } - void UpdateAppliedLogIndexOfColumnFamily(size_t cf_idx, LogIndex logidx) { - log_index_of_all_cfs_.Update(cf_idx, logidx); + void UpdateAppliedLogIndexOfColumnFamily(size_t cf_idx, LogIndex logidx, SequenceNumber seqno) { + log_index_of_all_cfs_.Update(cf_idx, logidx, seqno); } bool IsRestarting() const { return is_starting_; } void StartingPhaseEnd() { is_starting_ = false; } @@ -344,6 +344,10 @@ class Redis { return nullptr; } + LogIndexOfColumnFamilies& GetLogIndexOfColumnFamilies() { return log_index_of_all_cfs_; } + + LogIndexAndSequenceCollector& GetCollector() { return log_index_collector_; } + private: int32_t index_ = 0; Storage* const storage_; diff --git a/src/storage/src/storage.cc b/src/storage/src/storage.cc index fe7a3c943..e21508315 100644 --- a/src/storage/src/storage.cc +++ b/src/storage/src/storage.cc @@ -127,6 +127,10 @@ static int RecursiveLinkAndCopy(const std::filesystem::path& source, const std:: Status Storage::Open(const StorageOptions& storage_options, const std::string& db_path) { mkpath(db_path.c_str(), 0755); db_instance_num_ = storage_options.db_instance_num; + // Temporarily set to 100000 + LogIndexAndSequenceCollector::max_gap_.store(storage_options.max_gap); + storage_options.options.write_buffer_manager = + std::make_shared(storage_options.mem_manager_size); for (size_t index = 0; index < db_instance_num_; index++) { insts_.emplace_back(std::make_unique(this, index)); Status s = insts_.back()->Open(storage_options, AppendSubDirectory(db_path, index)); @@ -2333,6 +2337,8 @@ Status Storage::OnBinlogWrite(const pikiwidb::Binlog& log, LogIndex log_idx) { rocksdb::WriteBatch batch; bool is_finished_start = true; + // 提前获取 seq, 每次自增, 需要保证该操作串行执行? + auto seqno = inst->GetDB()->GetLatestSequenceNumber(); for (const auto& entry : log.entries()) { if (inst->IsRestarting() && inst->IsApplied(entry.cf_idx(), log_idx)) [[unlikely]] { // If the starting phase is over, the log must not have been applied @@ -2356,8 +2362,7 @@ Status Storage::OnBinlogWrite(const pikiwidb::Binlog& log, LogIndex log_idx) { ERROR(msg); return Status::Incomplete(msg); } - - inst->UpdateAppliedLogIndexOfColumnFamily(entry.cf_idx(), log_idx); + inst->UpdateAppliedLogIndexOfColumnFamily(entry.cf_idx(), log_idx, ++seqno); } if (inst->IsRestarting() && is_finished_start) [[unlikely]] { INFO("Redis {} finished start phase", inst->GetIndex()); diff --git a/src/storage/tests/flush_oldest_cf_test.cc b/src/storage/tests/flush_oldest_cf_test.cc new file mode 100644 index 000000000..2558dfc11 --- /dev/null +++ b/src/storage/tests/flush_oldest_cf_test.cc @@ -0,0 +1,484 @@ +/* + * Copyright (c) 2024-present, Qihoo, Inc. All rights reserved. + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. An additional grant + * of patent rights can be found in the PATENTS file in the same directory. + */ + +#include "gtest/gtest.h" + +#include +#include +#include +#include +#include +#include + +#include "fmt/core.h" +#include "gtest/gtest.h" +#include "rocksdb/db.h" +#include "rocksdb/listener.h" +#include "rocksdb/metadata.h" +#include "rocksdb/options.h" + +#include "pstd/log.h" +#include "pstd/thread_pool.h" +#include "src/log_index.h" +#include "src/redis.h" +#include "storage/storage.h" +#include "storage/util.h" + +class LogIniter { + public: + LogIniter() { + logger::Init("./flush_oldest_cf_test.log"); + spdlog::set_level(spdlog::level::info); + } +}; + +LogIniter log_initer; + +using LogIndex = int64_t; + +class LogQueue : public pstd::noncopyable { + public: + using WriteCallback = std::function; + + explicit LogQueue(WriteCallback&& cb) : write_cb_(std::move(cb)) { consumer_.SetMaxIdleThread(1); } + + void AppendLog(const pikiwidb::Binlog& log, std::promise&& promise) { + auto task = [&] { + auto idx = next_log_idx_.fetch_add(1); + auto s = write_cb_(log, idx); + promise.set_value(s); + }; + consumer_.ExecuteTask(std::move(task)); + } + + private: + WriteCallback write_cb_ = nullptr; + pstd::ThreadPool consumer_; + std::atomic next_log_idx_{1}; +}; + +class FlushOldestCFTest : public ::testing::Test { + public: + FlushOldestCFTest() + : log_queue_([this](const pikiwidb::Binlog& log, LogIndex log_idx) { return db_.OnBinlogWrite(log, log_idx); }) { + options_.options.create_if_missing = true; + options_.options.max_background_jobs = 10; + options_.db_instance_num = 1; + options_.raft_timeout_s = 9000000; + options_.append_log_function = [this](const pikiwidb::Binlog& log, std::promise&& promise) { + log_queue_.AppendLog(log, std::move(promise)); + }; + options_.do_snapshot_function = [](int64_t log_index, bool sync) {}; + options_.max_gap = 15; + write_options_.disableWAL = true; + } + + ~FlushOldestCFTest() { rocksdb::DestroyDB(db_path_, rocksdb::Options()); } + + void SetUp() override { + if (access(db_path_.c_str(), F_OK) == 0) { + std::filesystem::remove_all(db_path_.c_str()); + } + mkdir(db_path_.c_str(), 0755); + auto s = db_.Open(options_, db_path_); + ASSERT_TRUE(s.ok()); + } + + std::string db_path_{"./test_db/flush_oldest_cf_test"}; + storage::StorageOptions options_; + storage::Storage db_; + uint32_t test_times_ = 100; + std::string key_ = "flush-oldest-cf-test"; + std::string key_prefix = "key_"; + std::string field_prefix_ = "field_"; + std::string value_prefix_ = "value_"; + rocksdb::WriteOptions write_options_; + rocksdb::ReadOptions read_options_; + LogQueue log_queue_; +}; + +TEST_F(FlushOldestCFTest, SimpleTest) { + const auto& rocksdb = db_.GetDBInstance(key_); + + auto add_kvs = [&](int start, int end) { + for (int i = start; i < end; i++) { + auto key = key_prefix + std::to_string(i); + auto v = value_prefix_ + std::to_string(i); + auto s = rocksdb->Set(key, v); + ASSERT_TRUE(s.ok()); + } + }; + + auto add_hash = [&](int start, int end) { + for (int i = start; i < end; i++) { + auto key = key_prefix + std::to_string(i); + auto v = value_prefix_ + std::to_string(i); + auto f = field_prefix_ + std::to_string(i); + int32_t res{}; + auto s = rocksdb->HSet(key, v, f, &res); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(res, 1); + } + }; + + auto flush_cf = [&](size_t cf) { + auto s = rocksdb->GetDB()->Flush(rocksdb::FlushOptions(), rocksdb->GetColumnFamilyHandles()[cf]); + ASSERT_TRUE(s.ok()); + }; + + { + // type kv kv + // entry [1:1] -> ... [10:10] + // + // cf flushed_log_index flushed_sequence_number applied_log_index applied_sequence_number + // 0 0 0 10 10 + // other 0 0 0 0 + // + // last_flush_index log_index sequencenumber + // 0 0 + add_kvs(0, 10); + auto& last_flush_index = rocksdb->GetLogIndexOfColumnFamilies().GetLastFlushIndex(); + ASSERT_EQ(last_flush_index.log_index.load(), 0); + ASSERT_EQ(last_flush_index.seqno.load(), 0); + + auto& cf_0_status = rocksdb->GetLogIndexOfColumnFamilies().GetCFStatus(storage::kStringsCF); + ASSERT_EQ(cf_0_status.flushed_index.log_index, 0); + ASSERT_EQ(cf_0_status.flushed_index.seqno, 0); + ASSERT_EQ(cf_0_status.applied_index.log_index, 10); + ASSERT_EQ(cf_0_status.applied_index.seqno, 10); + + auto [smallest_applied_log_index_cf, smallest_applied_log_index, smallest_flushed_log_index_cf, + smallest_flushed_log_index, smallest_flushed_seqno] = + rocksdb->GetLogIndexOfColumnFamilies().GetSmallestLogIndex(-1); + + ASSERT_EQ(smallest_flushed_log_index, 0); + ASSERT_EQ(smallest_flushed_seqno, 0); + ASSERT_EQ(smallest_applied_log_index, 10); + auto size = rocksdb->GetCollector().GetSize(); + ASSERT_EQ(size, 10); + } + + { + // type kv kv hash hash hash + // entry [1:1] -> ... [10:10] -> [11:11] -> [12:13] -> ... -> [30:49] + // + // cf flushed_log_index flushed_sequence_number applied_log_index applied_sequence_number + // 0 0 0 10 10 + // 1 0 0 30 49 + // 2 0 0 30 50 + // other 0 0 0 0 + // + // last_flush_index log_index sequencenumber + // 0 0 + add_hash(10, 30); + auto& last_flush_index = rocksdb->GetLogIndexOfColumnFamilies().GetLastFlushIndex(); + ASSERT_EQ(last_flush_index.log_index.load(), 0); + ASSERT_EQ(last_flush_index.seqno.load(), 0); + + auto& cf_1_status = rocksdb->GetLogIndexOfColumnFamilies().GetCFStatus(storage::kHashesMetaCF); + ASSERT_EQ(cf_1_status.flushed_index.log_index, 0); + ASSERT_EQ(cf_1_status.flushed_index.seqno, 0); + ASSERT_EQ(cf_1_status.applied_index.log_index, 30); + ASSERT_EQ(cf_1_status.applied_index.seqno, 49); + + auto& cf_2_status = rocksdb->GetLogIndexOfColumnFamilies().GetCFStatus(storage::kHashesDataCF); + ASSERT_EQ(cf_2_status.flushed_index.log_index, 0); + ASSERT_EQ(cf_2_status.flushed_index.seqno, 0); + ASSERT_EQ(cf_2_status.applied_index.log_index, 30); + ASSERT_EQ(cf_2_status.applied_index.seqno, 50); + + auto [smallest_applied_log_index_cf, smallest_applied_log_index, smallest_flushed_log_index_cf, + smallest_flushed_log_index, smallest_flushed_seqno] = + rocksdb->GetLogIndexOfColumnFamilies().GetSmallestLogIndex(-1); + + ASSERT_EQ(smallest_flushed_log_index, 0); + ASSERT_EQ(smallest_flushed_seqno, 0); + ASSERT_EQ(smallest_applied_log_index, 10); + + auto size = rocksdb->GetCollector().GetSize(); + ASSERT_EQ(size, 30); + + auto is_pending_flush = rocksdb->GetCollector().IsFlushPending(); + ASSERT_TRUE(is_pending_flush); + } + + { + // type kv kv hash hash hash + // entry [1:1] -> ... [10:10] -> [11:11] -> [12:13] -> ... -> [30:49] + auto cur_par = rocksdb->GetCollector().GetList().begin(); + auto logindex = 1; + auto seq = 1; + for (int i = 1; i <= 10; i++) { + ASSERT_EQ(cur_par->GetAppliedLogIndex(), logindex); + ASSERT_EQ(cur_par->GetSequenceNumber(), seq); + cur_par = std::next(cur_par); + logindex++; + seq++; + } + + for (int i = 11; i <= 30; i++) { + ASSERT_EQ(cur_par->GetAppliedLogIndex(), logindex); + ASSERT_EQ(cur_par->GetSequenceNumber(), seq); + seq += 2; + logindex++; + cur_par = std::next(cur_par); + } + } + + { + // type kv kv hash hash hash + // entry [1:1] -> ... [10:10] -> [11:11] -> [12:13] -> ... -> [30:49] + // + // cf flushed_log_index flushed_sequence_number applied_log_index applied_sequence_number + // 0 0 0 10 10 + // 1 0 0 30 49 + // 2 0 0 30 50 + // other 0 0 0 0 + // + // last_flush_index log_index sequencenumber + // 0 0 + + auto gap = rocksdb->GetLogIndexOfColumnFamilies().GetPendingFlushGap(); + ASSERT_EQ(gap, 30); + flush_cf(1); + sleep(5); // sleep flush complete. + // 1) 根据 cf 1 的 latest SequenceNumber = 49 查到对应的 log index 为 30. 设置 cf 1 的 flushed_log_index 和 + // flushed_sequence_number 为 30 49. + // + // type kv kv hash hash hash + // entry [1:1] -> ... [10:10] -> [11:11] -> [12:13] -> ... -> [30:49] + // + // cf flushed_log_index flushed_sequence_number applied_log_index applied_sequence_number + // 0 0 0 10 10 + // 1 30 49 30 49 + // 2 0 0 30 50 + // other 0 0 0 0 + // + // last_flush_index log_index sequencenumber + // 0 0 + + // 2) 查找到此时的 smallest_applied_log_index_cf = 0 smallest_applied_log_index = 10 + // smallest_flushed_log_index_cf = 0 + // smallest_flushed_log_index = 0 smallest_flushed_seqno = 0 + // 根据 smallest_applied_log_index = 10 在队列长度 >= 2 的前提下, 持续删除 log_index < 10 的条目. + // + // type kv hash hash hash + // entry [10:10] -> [11:11] -> [12:13] -> ... -> [30:49] + // + // cf flushed_log_index flushed_sequence_number applied_log_index applied_sequence_number + // 0 0 0 10 10 + // 1 30 49 30 49 + // 2 0 0 30 50 + // other 0 0 0 0 + // + // last_flush_index log_index sequencenumber + // 0 0 + + // 3) 根据 smallest_flushed_log_index_cf = 0 smallest_flushed_log_index = 0 smallest_flushed_seqno = 0 + // 设置 last_flush_index 为 0, 0 + // + // type kv hash hash hash + // entry [10:10] -> [11:11] -> [12:13] -> ... -> [30:49] + // + // cf flushed_log_index flushed_sequence_number applied_log_index applied_sequence_number + // 0 0 0 10 10 + // 1 30 49 30 49 + // 2 0 0 30 50 + // other 0 0 0 0 + // + // last_flush_index log_index sequencenumber + // 0 0 + + // 4) 检测到队列中 logindex 的最大差值超过阈值, 触发 smallest_flushed_log_index_cf flush . 该 case 中对应 cf 为 0. + // 根据 cf 0 的 latest SequenceNumber = 10 查到对应的 log index 为 10. 设置 cf 0 的 flushed_log_index 和 + // flushed_sequence_number 为 10 10. + // + // type kv hash hash hash + // entry [10:10] -> [11:11] -> [12:13] -> ... -> [30:49] + // + // cf flushed_log_index flushed_sequence_number applied_log_index applied_sequence_number + // 0 10 10 10 10 + // 1 30 49 30 49 + // 2 0 0 30 50 + // other 0 0 0 0 + // + // last_flush_index log_index sequencenumber + // 0 0 + + // 5) 查找到此时的 smallest_applied_log_index_cf = 0 smallest_applied_log_index = 10 + // smallest_flushed_log_index_cf = 2 smallest_flushed_log_index = 0 smallest_flushed_seqno = 0 + // 根据 smallest_applied_log_index = 10 在队列长度 >= 2 的前提下, 删除 log_index < 10 的条目, 不变. + // + // type kv hash hash hash + // entry [10:10] -> [11:11] -> [12:13] -> ... -> [30:49] + // + // cf flushed_log_index flushed_sequence_number applied_log_index applied_sequence_number + // 0 10 10 10 10 + // 1 30 49 30 49 + // 2 0 0 30 50 + // other 0 0 0 0 + // + // last_flush_index log_index sequencenumber + // 0 0 + + // 6) 检测到队列中 logindex 的最大差值超过阈值, 触发 smallest_flushed_log_index_cf flush . 该 case 中对应 cf 为 2. + // 根据 cf 2 的 latest SequenceNumber = 50 查到对应的 log index 为 30. 设置 cf 2 的 flushed_log_index 和 + // flushed_sequence_number 为 30 50. + // + // type kv hash hash hash + // entry [10:10] -> [11:11] -> [12:13] -> ... -> [30:49] + // + // cf flushed_log_index flushed_sequence_number applied_log_index applied_sequence_number + // 0 10 10 10 10 + // 1 30 49 30 49 + // 2 30 50 30 50 + // other 0 0 0 0 + // + // last_flush_index log_index sequencenumber + // 0 0 + + // 7) 查找到此时的 smallest_applied_log_index_cf = 2 smallest_applied_log_index = 30 + // smallest_flushed_log_index_cf = 2 smallest_flushed_log_index = 30 smallest_flushed_seqno = 50 + // 根据 smallest_applied_log_index = 30 在队列长度 >= 2 的前提下, 删除 log_index < 50 的条目. + // + // type hash + // entry [30:49] + // + // cf flushed_log_index flushed_sequence_number applied_log_index applied_sequence_number + // 0 10 10 10 10 + // 1 30 49 30 49 + // 2 30 50 30 50 + // other 0 0 0 0 + // + // last_flush_index log_index sequencenumber + // 0 0 + + // 8) 根据 smallest_flushed_log_index_cf = 2 smallest_flushed_log_index = 30 smallest_flushed_seqno = 50 + // 设置 last_flush_index 为 30, 50. + // + // type hash + // entry [30:49] + // + // cf flushed_log_index flushed_sequence_number applied_log_index applied_sequence_number + // 0 10 10 10 10 + // 1 30 49 30 49 + // 2 30 50 30 50 + // other 0 0 0 0 + // + // last_flush_index log_index sequencenumber + // 30 50 + + // 9) 当设置 last_flush_index 为 30, 50 时, 会同时拉高没有数据的 cf 的 flushed_index, 该 case 为 cf 0, cf 1, + // 将 cf 0 的 flushed_index 从 10 10 提高为 30 50. + // 将 cf 1 的 flushed index 从 30 49 提升到 30 50. + // 其他没有写入的 cf flushed index 从 0 0 提升到 30 50. + // + // type hash + // entry [30:49] + // + // cf flushed_log_index flushed_sequence_number applied_log_index applied_sequence_number + // 0 30 50 10 10 + // 1 30 50 30 49 + // 2 30 50 30 50 + // other 30 50 0 0 + // + // last_flush_index log_index sequencenumber + // 30 50 + + // 9) 检测到队列长度未超过阈值, 结束 flush. + auto after_flush_size = rocksdb->GetCollector().GetSize(); + ASSERT_EQ(after_flush_size, 1); + + auto& cf_0_status = rocksdb->GetLogIndexOfColumnFamilies().GetCFStatus(storage::kStringsCF); + ASSERT_EQ(cf_0_status.flushed_index.log_index, 30); + ASSERT_EQ(cf_0_status.flushed_index.seqno, 50); + ASSERT_EQ(cf_0_status.applied_index.log_index, 10); + ASSERT_EQ(cf_0_status.applied_index.seqno, 10); + + auto& cf_1_status = rocksdb->GetLogIndexOfColumnFamilies().GetCFStatus(storage::kHashesMetaCF); + ASSERT_EQ(cf_1_status.flushed_index.log_index, 30); + ASSERT_EQ(cf_1_status.flushed_index.seqno, 50); + ASSERT_EQ(cf_1_status.applied_index.log_index, 30); + ASSERT_EQ(cf_1_status.applied_index.seqno, 49); + + auto& cf_2_status = rocksdb->GetLogIndexOfColumnFamilies().GetCFStatus(storage::kHashesDataCF); + ASSERT_EQ(cf_2_status.flushed_index.log_index, 30); + ASSERT_EQ(cf_2_status.flushed_index.seqno, 50); + ASSERT_EQ(cf_2_status.applied_index.log_index, 30); + ASSERT_EQ(cf_2_status.applied_index.seqno, 50); + + auto& last_flush_index = rocksdb->GetLogIndexOfColumnFamilies().GetLastFlushIndex(); + ASSERT_EQ(last_flush_index.log_index.load(), 30); + ASSERT_EQ(last_flush_index.seqno.load(), 50); + + auto& cf_3_status = rocksdb->GetLogIndexOfColumnFamilies().GetCFStatus(storage::kSetsMetaCF); + ASSERT_EQ(cf_3_status.flushed_index.log_index, 30); + ASSERT_EQ(cf_3_status.flushed_index.seqno, 50); + ASSERT_EQ(cf_3_status.applied_index.log_index, 0); + ASSERT_EQ(cf_3_status.applied_index.seqno, 0); + } + + { + add_kvs(30, 35); + // type hash -> kv -> ... -> kv + // entry [30:49] [31:51] [35:55] + // + // cf flushed_log_index flushed_sequence_number applied_log_index applied_sequence_number + // 0 30 50 35 55 + // 1 30 50 30 49 + // 2 30 50 30 50 + // other 30 50 0 0 + // + // last_flush_index log_index sequencenumber + // 30 50 + auto& last_flush_index = rocksdb->GetLogIndexOfColumnFamilies().GetLastFlushIndex(); + ASSERT_EQ(last_flush_index.log_index.load(), 30); + ASSERT_EQ(last_flush_index.seqno.load(), 50); + + auto& cf_0_status = rocksdb->GetLogIndexOfColumnFamilies().GetCFStatus(storage::kStringsCF); + ASSERT_EQ(cf_0_status.flushed_index.log_index, 30); + ASSERT_EQ(cf_0_status.flushed_index.seqno, 50); + ASSERT_EQ(cf_0_status.applied_index.log_index, 35); + ASSERT_EQ(cf_0_status.applied_index.seqno, 55); + + auto& cf_1_status = rocksdb->GetLogIndexOfColumnFamilies().GetCFStatus(storage::kHashesMetaCF); + ASSERT_EQ(cf_1_status.flushed_index.log_index, 30); + ASSERT_EQ(cf_1_status.flushed_index.seqno, 50); + ASSERT_EQ(cf_1_status.applied_index.log_index, 30); + ASSERT_EQ(cf_1_status.applied_index.seqno, 49); + + auto& cf_2_status = rocksdb->GetLogIndexOfColumnFamilies().GetCFStatus(storage::kHashesDataCF); + ASSERT_EQ(cf_2_status.flushed_index.log_index, 30); + ASSERT_EQ(cf_2_status.flushed_index.seqno, 50); + ASSERT_EQ(cf_2_status.applied_index.log_index, 30); + ASSERT_EQ(cf_2_status.applied_index.seqno, 50); + + auto& cf_3_status = rocksdb->GetLogIndexOfColumnFamilies().GetCFStatus(storage::kSetsMetaCF); + ASSERT_EQ(cf_3_status.flushed_index.log_index, 30); + ASSERT_EQ(cf_3_status.flushed_index.seqno, 50); + ASSERT_EQ(cf_3_status.applied_index.log_index, 0); + ASSERT_EQ(cf_3_status.applied_index.seqno, 0); + + auto [smallest_applied_log_index_cf, smallest_applied_log_index, smallest_flushed_log_index_cf, + smallest_flushed_log_index, smallest_flushed_seqno] = + rocksdb->GetLogIndexOfColumnFamilies().GetSmallestLogIndex(-1); + + // 除了 cf 0 之外, 其余的 cf 都没有未持久化数据, 所以不在我们统计范围之内. + ASSERT_EQ(smallest_applied_log_index_cf, 0); + ASSERT_EQ(smallest_applied_log_index, 35); + + ASSERT_EQ(smallest_flushed_log_index_cf, 0); + ASSERT_EQ(smallest_flushed_log_index, 30); + ASSERT_EQ(smallest_flushed_seqno, 50); + + auto size = rocksdb->GetCollector().GetSize(); + ASSERT_EQ(size, 6); + + auto is_pending_flush = rocksdb->GetCollector().IsFlushPending(); + ASSERT_TRUE(!is_pending_flush); + } +}; diff --git a/src/storage/tests/log_index_test.cc b/src/storage/tests/log_index_test.cc index 4b39cb8ee..54e656979 100644 --- a/src/storage/tests/log_index_test.cc +++ b/src/storage/tests/log_index_test.cc @@ -104,6 +104,7 @@ class LogIndexTest : public ::testing::Test { options_.append_log_function = [this](const pikiwidb::Binlog& log, std::promise&& promise) { log_queue_.AppendLog(log, std::move(promise)); }; + options_.do_snapshot_function = [](int64_t log_index, bool sync) {}; } ~LogIndexTest() override { DeleteFiles(db_path_.c_str()); } From e5c11e7e38eb1963330acf44b00c5145c2c0f50e Mon Sep 17 00:00:00 2001 From: panlei-coder Date: Tue, 30 Apr 2024 01:51:58 +0800 Subject: [PATCH 25/33] fix: Resolve merge issues and fix merge bugs --- CMakeLists.txt | 26 +++++++++++++------------- cmake/gflags.cmake | 15 ++++++--------- pikiwidb.conf | 4 ++++ src/client.cc | 25 ------------------------- src/cmd_thread_pool.h | 2 +- src/config.cc | 1 + src/config.h | 4 ++++ src/db.cc | 14 +++++++------- src/db.h | 2 +- src/pikiwidb.cc | 2 +- src/praft/praft.cc | 14 +++++++------- src/praft/praft.h | 2 +- src/replication.cc | 17 ----------------- src/store.h | 8 ++------ 14 files changed, 48 insertions(+), 88 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index f929fe09e..a352f0600 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -158,19 +158,19 @@ SET(LIB ${LIB} CACHE INTERNAL "libs which should be linked for executable target INCLUDE(FetchContent) -INCLUDE(gflags) -INCLUDE(findTools) -INCLUDE(leveldb) -INCLUDE(libevent) -INCLUDE(llhttp) -INCLUDE(fmt) -INCLUDE(spdlog) -INCLUDE(gtest) -INCLUDE(rocksdb) -INCLUDE(zlib) -INCLUDE(protobuf) -INCLUDE(brpc) -INCLUDE(braft) +INCLUDE(cmake/gflags.cmake) +INCLUDE(cmake/findTools.cmake) +INCLUDE(cmake/leveldb.cmake) +INCLUDE(cmake/libevent.cmake) +INCLUDE(cmake/llhttp.cmake) +INCLUDE(cmake/fmt.cmake) +INCLUDE(cmake/spdlog.cmake) +INCLUDE(cmake/gtest.cmake) +INCLUDE(cmake/rocksdb.cmake) +INCLUDE(cmake/zlib.cmake) +INCLUDE(cmake/protobuf.cmake) +INCLUDE(cmake/brpc.cmake) +INCLUDE(cmake/braft.cmake) ENABLE_TESTING() diff --git a/cmake/gflags.cmake b/cmake/gflags.cmake index 83f3b64fe..a144028fc 100644 --- a/cmake/gflags.cmake +++ b/cmake/gflags.cmake @@ -10,15 +10,12 @@ FetchContent_Declare(gflags URL_HASH SHA256=19713a36c9f32b33df59d1c79b4958434cb005b5b47dc5400a7a4b078111d9b5 ) -FetchContent_MakeAvailableWithArgs(gflags - GFLAGS_NAMESPACE=gflags - BUILD_STATIC_LIBS=ON - BUILD_SHARED_LIBS=OFF - BUILD_gflags_LIB=ON - BUILD_gflags_nothreads_LIB=OFF - BUILD_TESTING=OFF - CMAKE_BUILD_TYPE=Release -) +SET(GFLAGS_BUILD_STATIC_LIBS ON CACHE BOOL "" FORCE) +SET(GFLAGS_BUILD_SHARED_LIBS OFF CACHE BOOL "" FORCE) +SET(GFLAGS_BUILD_gflags_LIB ON CACHE BOOL "" FORCE) +SET(GFLAGS_BUILD_gflags_nothreads_LIB OFF CACHE BOOL "" FORCE) +SET(GFLAGS_BUILD_TESTING OFF CACHE BOOL "" FORCE) +FETCHCONTENT_MAKEAVAILABLE(gflags) FIND_PACKAGE(Threads REQUIRED) diff --git a/pikiwidb.conf b/pikiwidb.conf index 6e17843a0..6cb550e2c 100644 --- a/pikiwidb.conf +++ b/pikiwidb.conf @@ -341,6 +341,10 @@ rocksdb-number-levels 7 rocksdb-enable-pipelined-write no rocksdb-level0-slowdown-writes-trigger 20 rocksdb-level0-stop-writes-trigger 36 +# default 86400 * 7 +rocksdb-ttl-second 604800 +# default 86400 * 3 +rocksdb-periodic-second 259200; ############################### RAFT ############################### use-raft yes diff --git a/src/client.cc b/src/client.cc index 2b283c6f0..fbb358779 100644 --- a/src/client.cc +++ b/src/client.cc @@ -395,7 +395,6 @@ int PClient::handlePacket(const char* start, int bytes) { void PClient::executeCommand() { // auto [cmdPtr, ret] = g_pikiwidb->GetCmdTableManager().GetCommand(CmdName(), this); -<<<<<<< HEAD // if (!cmdPtr) { // if (ret == CmdRes::kInvalidParameter) { // SetRes(CmdRes::kInvalidParameter); @@ -412,30 +411,6 @@ void PClient::executeCommand() { // // // execute a specific command // cmdPtr->Execute(this); -======= - if (!cmdPtr) { - if (ret == CmdRes::kInvalidParameter) { - SetRes(CmdRes::kInvalidParameter); - } else { - SetRes(CmdRes::kSyntaxErr, "unknown command '" + CmdName() + "'"); - } - return; - } - - if (!cmdPtr->CheckArg(params_.size())) { - SetRes(CmdRes::kWrongNum, CmdName()); - return; - } - - // if user send write command to a node which is not leader, he should get the info of leader - if (cmdPtr->HasFlag(kCmdFlagsWrite) && PRAFT.IsInitialized() && !PRAFT.IsLeader()) { - SetRes(CmdRes::kErrOther, fmt::format("MOVED {}", PRAFT.GetLeaderAddress())); - return; - } - - // execute a specific command - cmdPtr->Execute(this); ->>>>>>> import-braft } PClient* PClient::Current() { return s_current; } diff --git a/src/cmd_thread_pool.h b/src/cmd_thread_pool.h index 3b65d6c87..3a0e867da 100644 --- a/src/cmd_thread_pool.h +++ b/src/cmd_thread_pool.h @@ -14,7 +14,7 @@ #include #include #include "base_cmd.h" -#include "pstd_status.h" +#include "pstd/pstd_status.h" namespace pikiwidb { diff --git a/src/config.cc b/src/config.cc index d82e0124f..ec283d2c2 100644 --- a/src/config.cc +++ b/src/config.cc @@ -105,6 +105,7 @@ PConfig::PConfig() { AddBool("daemonize", &CheckYesNo, false, &daemonize); AddString("ip", false, {&ip}); AddNumberWihLimit("port", false, &port, PORT_LIMIT_MIN, PORT_LIMIT_MAX); + AddNumber("raft-port-offset", true, &raft_port_offset); AddNumber("timeout", true, &timeout); AddString("db-path", false, {&db_path}); AddStrinWithFunc("loglevel", &CheckLogLevel, false, {&log_level}); diff --git a/src/config.h b/src/config.h index 082bfa6b5..40581fc39 100644 --- a/src/config.h +++ b/src/config.h @@ -146,6 +146,7 @@ class PConfig { AtomicString pid_file = "./pikiwidb.pid"; AtomicString ip = "127.0.0.1"; std::atomic_uint16_t port = 9221; + std::atomic_uint16_t raft_port_offset = 10; AtomicString db_path = "./db/"; AtomicString log_dir = "stdout"; // the log directory, differ from redis AtomicString log_level = "warning"; @@ -169,6 +170,9 @@ class PConfig { std::atomic_bool rocksdb_enable_pipelined_write = false; std::atomic_int rocksdb_level0_slowdown_writes_trigger = 20; std::atomic_int rocksdb_level0_stop_writes_trigger = 36; + std::atomic_uint64_t rocksdb_ttl_second = 604800; // default 86400 * 7 + std::atomic_uint64_t rocksdb_periodic_second = 259200; // default 86400 * 3 + std::atomic_bool use_raft = true; rocksdb::Options GetRocksDBOptions(); diff --git a/src/db.cc b/src/db.cc index b9c0b98d8..c9e835ed1 100644 --- a/src/db.cc +++ b/src/db.cc @@ -18,14 +18,14 @@ namespace pikiwidb { DB::DB(int db_index, const std::string& db_path, int rocksdb_inst_num) : db_index_(db_index), db_path_(db_path + std::to_string(db_index_) + '/'), rocksdb_inst_num_(rocksdb_inst_num) { storage::StorageOptions storage_options; - storage_options.options.create_if_missing = true; + storage_options.options = g_config.GetRocksDBOptions(); storage_options.db_instance_num = rocksdb_inst_num_; storage_options.db_id = db_index_; // options for CF - storage_options.options.ttl = g_config.rocksdb_ttl_second; - storage_options.options.periodic_compaction_seconds = g_config.rocksdb_periodic_second; - if (g_config.use_raft) { + storage_options.options.ttl = g_config.rocksdb_ttl_second.load(std::memory_order_relaxed); + storage_options.options.periodic_compaction_seconds = g_config.rocksdb_periodic_second.load(std::memory_order_relaxed); + if (g_config.use_raft.load(std::memory_order_relaxed)) { storage_options.append_log_function = [&r = PRAFT](const Binlog& log, std::promise&& promise) { r.AppendLog(log, std::move(promise)); }; @@ -111,9 +111,9 @@ void DB::LoadDBFromCheckpoint(const std::string& path, bool sync) { storage_options.db_id = db_index_; // options for CF - storage_options.options.ttl = g_config.rocksdb_ttl_second; - storage_options.options.periodic_compaction_seconds = g_config.rocksdb_periodic_second; - if (g_config.use_raft) { + storage_options.options.ttl = g_config.rocksdb_ttl_second.load(std::memory_order_relaxed); + storage_options.options.periodic_compaction_seconds = g_config.rocksdb_periodic_second.load(std::memory_order_relaxed); + if (g_config.use_raft.load(std::memory_order_relaxed)) { storage_options.append_log_function = [&r = PRAFT](const Binlog& log, std::promise&& promise) { r.AppendLog(log, std::move(promise)); }; diff --git a/src/db.h b/src/db.h index 7621cd6e8..316c6b9d5 100644 --- a/src/db.h +++ b/src/db.h @@ -10,7 +10,7 @@ #include #include -#include "log.h" +#include "pstd/log.h" #include "pstd/noncopyable.h" #include "storage/storage.h" diff --git a/src/pikiwidb.cc b/src/pikiwidb.cc index 9a7613782..d078880df 100644 --- a/src/pikiwidb.cc +++ b/src/pikiwidb.cc @@ -154,7 +154,7 @@ bool PikiwiDB::Init() { return false; } - PSTORE.Init(); + PSTORE.Init(g_config.databases.load(std::memory_order_relaxed)); PSlowLog::Instance().SetThreshold(g_config.slow_log_time.load()); PSlowLog::Instance().SetLogLimit(static_cast(g_config.slow_log_max_len.load())); diff --git a/src/praft/praft.cc b/src/praft/praft.cc index 0851acfe5..1dee15a16 100644 --- a/src/praft/praft.cc +++ b/src/praft/praft.cc @@ -120,9 +120,9 @@ butil::Status PRaft::Init(std::string& group_id, bool initial_conf_is_null) { this->group_id_ = group_id; // FIXME: g_config.ip is default to 127.0.0.0, which may not work in cluster. - raw_addr_ = g_config.ip + ":" + std::to_string(port); + raw_addr_ = g_config.ip.ToString() + ":" + std::to_string(port); butil::ip_t ip; - auto ret = butil::str2ip(g_config.ip.c_str(), &ip); + auto ret = butil::str2ip(g_config.ip.ToString().c_str(), &ip); if (ret != 0) { server_.reset(); return ERROR_LOG_AND_STATUS("Failed to convert str_ip to butil::ip_t"); @@ -150,7 +150,7 @@ butil::Status PRaft::Init(std::string& group_id, bool initial_conf_is_null) { node_options_.fsm = this; node_options_.node_owns_fsm = false; node_options_.snapshot_interval_s = 0; - std::string prefix = "local://" + g_config.dbpath + "_praft"; + std::string prefix = "local://" + g_config.db_path.ToString() + "_praft"; node_options_.log_uri = prefix + "/log"; node_options_.raft_meta_uri = prefix + "/raft_meta"; node_options_.snapshot_uri = prefix + "/snapshot"; @@ -273,7 +273,7 @@ void PRaft::SendNodeAddRequest(PClient* client) { // Node id in braft are ip:port, the node id param in RAFT.NODE ADD cmd will be ignored. int unused_node_id = 0; auto port = g_config.port + pikiwidb::g_config.raft_port_offset; - auto raw_addr = g_config.ip + ":" + std::to_string(port); + auto raw_addr = g_config.ip.ToString() + ":" + std::to_string(port); UnboundedBuffer req; req.PushData("RAFT.NODE ADD ", 14); req.PushData(std::to_string(unused_node_id).c_str(), std::to_string(unused_node_id).size()); @@ -422,7 +422,7 @@ int PRaft::ProcessClusterJoinCmdResponse(PClient* client, const char* start, int } std::string reply(start, len); - if (reply.find(OK) != std::string::npos) { + if (reply.find(OK_STR) != std::string::npos) { INFO("Joined Raft cluster, node id: {}, group_id: {}", PRAFT.GetNodeID(), PRAFT.group_id_); join_client->SetRes(CmdRes::kOK); join_client->SendPacket(join_client->Message()); @@ -456,7 +456,7 @@ int PRaft::ProcessClusterRemoveCmdResponse(PClient* client, const char* start, i } std::string reply(start, len); - if (reply.find(OK) != std::string::npos) { + if (reply.find(OK_STR) != std::string::npos) { INFO("Removed Raft cluster, node id: {}, group_id: {}", PRAFT.GetNodeID(), PRAFT.group_id_); ShutDown(); Join(); @@ -633,7 +633,7 @@ int PRaft::on_snapshot_load(braft::SnapshotReader* reader) { CHECK(!IsLeader()) << "Leader is not supposed to load snapshot"; assert(reader); auto reader_path = reader->get_path(); // xx/snapshot_0000001 - auto path = g_config.dbpath + std::to_string(db_id_); // db/db_id + auto path = g_config.db_path.ToString() + std::to_string(db_id_); // db/db_id TasksVector tasks(1, {TaskType::kLoadDBFromCheckpoint, db_id_, {{TaskArg::kCheckpointPath, reader_path}}, true}); PSTORE.HandleTaskSpecificDB(tasks); return 0; diff --git a/src/praft/praft.h b/src/praft/praft.h index edb82288c..65cc14d4f 100644 --- a/src/praft/praft.h +++ b/src/praft/praft.h @@ -25,7 +25,7 @@ namespace pikiwidb { #define RAFT_GROUPID_LEN 32 -#define OK "+OK" +#define OK_STR "+OK" #define DATABASES_NUM "databases_num" #define ROCKSDB_NUM "rocksdb_num" #define ROCKSDB_VERSION "rocksdb_version" diff --git a/src/replication.cc b/src/replication.cc index 5b8c08e1f..a1f1125f7 100644 --- a/src/replication.cc +++ b/src/replication.cc @@ -214,24 +214,7 @@ void PReplication::Cron() { } break; case kPReplStateConnected: -<<<<<<< HEAD - if (!g_config.master_auth.empty()) { - if (auto master = master_.lock()) { - UnboundedBuffer req; - req.PushData("auth "); - req.PushData(g_config.master_auth.ToString().data(), g_config.master_auth.ToString().size()); - req.PushData("\r\n"); - master->SendPacket(req); - INFO("send auth with password {}", g_config.master_auth.ToString()); - - masterInfo_.state = kPReplStateWaitAuth; - break; - } - } - // fall through to next case. -======= break; ->>>>>>> import-braft case kPReplStateWaitAuth: { auto master = master_.lock(); diff --git a/src/store.h b/src/store.h index db493a83e..8e8590adb 100644 --- a/src/store.h +++ b/src/store.h @@ -9,17 +9,13 @@ #define GLOG_NO_ABBREVIATED_SEVERITIES -<<<<<<< HEAD -#include "common.h" -#include "db.h" -#include "storage/storage.h" - - #include #include #include +#include "common.h" #include "db.h" +#include "storage/storage.h" namespace pikiwidb { From 0c55ebdf1737d6d8999e3aca3e78357195f0f0fd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E4=B8=81=E5=B0=8F=E5=B8=85?= <56024577+dingxiaoshuai123@users.noreply.github.com> Date: Mon, 29 Apr 2024 16:07:55 +0800 Subject: [PATCH 26/33] feat: flush oldest cf (#275) * Revert to using the version with independent atomic variables. --- cmake/braft.cmake | 4 +- cmake/zlib.cmake | 4 +- src/db.cc | 2 + src/praft/praft.cc | 5 +- src/storage/include/storage/storage.h | 7 +- src/storage/src/log_index.cc | 109 ++++- src/storage/src/log_index.h | 128 +++++- src/storage/src/redis.cc | 39 +- src/storage/src/redis.h | 8 +- src/storage/src/storage.cc | 9 +- src/storage/tests/flush_oldest_cf_test.cc | 484 ++++++++++++++++++++++ src/storage/tests/log_index_test.cc | 1 + 12 files changed, 726 insertions(+), 74 deletions(-) create mode 100644 src/storage/tests/flush_oldest_cf_test.cc diff --git a/cmake/braft.cmake b/cmake/braft.cmake index 43ea4a350..288c637fe 100644 --- a/cmake/braft.cmake +++ b/cmake/braft.cmake @@ -16,8 +16,8 @@ ExternalProject_Add( extern_braft ${EXTERNAL_PROJECT_LOG_ARGS} DEPENDS brpc - URL "https://github.com/pikiwidb/braft/archive/refs/heads/stable.zip" - URL_HASH SHA256=e73831f9768ac57d07f01ed81a11c8368e259c25315a960c29a6422f31f42fd1 + GIT_REPOSITORY "https://github.com/pikiwidb/braft.git" + GIT_TAG master PREFIX ${BRAFT_SOURCES_DIR} UPDATE_COMMAND "" CMAKE_ARGS -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} diff --git a/cmake/zlib.cmake b/cmake/zlib.cmake index 7e5963a1c..b1e300009 100644 --- a/cmake/zlib.cmake +++ b/cmake/zlib.cmake @@ -7,8 +7,8 @@ INCLUDE(ExternalProject) SET(ZLIB_SOURCES_DIR ${THIRD_PARTY_PATH}/zlib) SET(ZLIB_INSTALL_DIR ${THIRD_PARTY_PATH}/install/zlib) -SET(ZLIB_ROOT ${ZLIB_INSTALL_DIR} CACHE FILEPATH "zlib root directory." FORCE) -SET(ZLIB_INCLUDE_DIR "${ZLIB_INSTALL_DIR}/include" CACHE PATH "zlib include directory." FORCE) +# SET(ZLIB_ROOT ${ZLIB_INSTALL_DIR} CACHE FILEPATH "zlib root directory." FORCE) +# SET(ZLIB_INCLUDE_DIR "${ZLIB_INSTALL_DIR}/include" CACHE PATH "zlib include directory." FORCE) ExternalProject_Add( extern_zlib diff --git a/src/db.cc b/src/db.cc index c9e835ed1..d65cfddc9 100644 --- a/src/db.cc +++ b/src/db.cc @@ -29,6 +29,8 @@ DB::DB(int db_index, const std::string& db_path, int rocksdb_inst_num) storage_options.append_log_function = [&r = PRAFT](const Binlog& log, std::promise&& promise) { r.AppendLog(log, std::move(promise)); }; + storage_options.do_snapshot_function = + std::bind(&pikiwidb::PRaft::DoSnapshot, &pikiwidb::PRAFT, std::placeholders::_1, std::placeholders::_2); } storage_ = std::make_unique(); diff --git a/src/praft/praft.cc b/src/praft/praft.cc index 1dee15a16..5f7aa7099 100644 --- a/src/praft/praft.cc +++ b/src/praft/praft.cc @@ -522,10 +522,7 @@ butil::Status PRaft::DoSnapshot(int64_t self_snapshot_index, bool is_sync) { return ERROR_LOG_AND_STATUS("Node is not initialized"); } braft::SynchronizedClosure done; - // TODO(panlei) Increase the self_log_index parameter - // TODO(panlei) Use the is_sync parameter to determine whether - // to use synchronous waiting. - node_->snapshot(&done); + node_->snapshot(&done, self_snapshot_index); done.wait(); return done.status(); } diff --git a/src/storage/include/storage/storage.h b/src/storage/include/storage/storage.h index aad21e337..9562ec927 100644 --- a/src/storage/include/storage/storage.h +++ b/src/storage/include/storage/storage.h @@ -62,9 +62,10 @@ template class LRUCache; using AppendLogFunction = std::function&&)>; +using DoSnapshotFunction = std::function; struct StorageOptions { - rocksdb::Options options; + mutable rocksdb::Options options; rocksdb::BlockBasedTableOptions table_options; size_t block_cache_size = 0; bool share_block_cache = false; @@ -74,7 +75,11 @@ struct StorageOptions { size_t db_instance_num = 3; // default = 3 int db_id = 0; AppendLogFunction append_log_function = nullptr; + DoSnapshotFunction do_snapshot_function = nullptr; + uint32_t raft_timeout_s = std::numeric_limits::max(); + int64_t max_gap = 1000; + uint64_t mem_manager_size = 100000000; Status ResetOptions(const OptionType& option_type, const std::unordered_map& options_map); }; diff --git a/src/storage/src/log_index.cc b/src/storage/src/log_index.cc index 3d4458f56..1dede3013 100644 --- a/src/storage/src/log_index.cc +++ b/src/storage/src/log_index.cc @@ -9,8 +9,7 @@ #include #include -#include -#include +#include #include "redis.h" @@ -25,21 +24,54 @@ rocksdb::Status storage::LogIndexOfColumnFamilies::Init(Redis *db) { } auto res = LogIndexTablePropertiesCollector::GetLargestLogIndexFromTableCollection(collection); if (res.has_value()) { - cf_[i].applied_log_index.store(res->GetAppliedLogIndex()); - cf_[i].flushed_log_index.store(res->GetAppliedLogIndex()); + auto log_index = res->GetAppliedLogIndex(); + auto sequence_number = res->GetSequenceNumber(); + cf_[i].applied_index.SetLogIndexSeqnoPair(log_index, sequence_number); + cf_[i].flushed_index.SetLogIndexSeqnoPair(log_index, sequence_number); } } return Status::OK(); } -LogIndex LogIndexOfColumnFamilies::GetSmallestLogIndex(std::function &&f) const { - auto smallest_log_index = std::numeric_limits::max(); - for (const auto &it : cf_) { - smallest_log_index = std::min(f(it), smallest_log_index); +LogIndexOfColumnFamilies::SmallestIndexRes LogIndexOfColumnFamilies::GetSmallestLogIndex(int flush_cf) const { + SmallestIndexRes res; + for (int i = 0; i < cf_.size(); i++) { + if (i != flush_cf && cf_[i].flushed_index >= cf_[i].applied_index) { + continue; + } + auto applied_log_index = cf_[i].applied_index.GetLogIndex(); + auto flushed_log_index = cf_[i].flushed_index.GetLogIndex(); + auto flushed_seqno = cf_[i].flushed_index.GetSequenceNumber(); + if (applied_log_index < res.smallest_applied_log_index) { + res.smallest_applied_log_index = applied_log_index; + res.smallest_applied_log_index_cf = i; + } + if (flushed_log_index < res.smallest_flushed_log_index) { + res.smallest_flushed_log_index = flushed_log_index; + res.smallest_flushed_seqno = flushed_seqno; + res.smallest_flushed_log_index_cf = i; + } } - return smallest_log_index; + return res; } +size_t LogIndexOfColumnFamilies::GetPendingFlushGap() const { + std::set s; + for (int i = 0; i < kColumnFamilyNum; i++) { + s.insert(cf_[i].applied_index.GetLogIndex()); + s.insert(cf_[i].flushed_index.GetLogIndex()); + } + assert(!s.empty()); + if (s.size() == 1) { + return false; + } + auto iter_first = s.begin(); + auto iter_last = s.end(); + return *std::prev(iter_last) - *iter_first; +}; + +std::atomic_int64_t LogIndexAndSequenceCollector::max_gap_ = 1000; + std::optional storage::LogIndexTablePropertiesCollector::ReadStatsFromTableProps( const std::shared_ptr &table_props) { const auto &user_properties = table_props->user_collected_properties; @@ -79,10 +111,8 @@ LogIndex LogIndexAndSequenceCollector::FindAppliedLogIndex(SequenceNumber seqno) } void LogIndexAndSequenceCollector::Update(LogIndex smallest_applied_log_index, SequenceNumber smallest_flush_seqno) { - /* - If step length > 1, log index is sampled and sacrifice precision to save memory usage. - It means that extra applied log may be applied again on start stage. - */ + // If step length > 1, log index is sampled and sacrifice precision to save memory usage. + // It means that extra applied log may be applied again on start stage. if ((smallest_applied_log_index & step_length_mask_) == 0) { std::lock_guard gd(mutex_); list_.emplace_back(smallest_applied_log_index, smallest_flush_seqno); @@ -91,13 +121,11 @@ void LogIndexAndSequenceCollector::Update(LogIndex smallest_applied_log_index, S // TODO(longfar): find the iterator which should be deleted and erase from begin to the iterator void LogIndexAndSequenceCollector::Purge(LogIndex smallest_applied_log_index) { - /* - * The reason that we use smallest applied log index of all column families instead of smallest flushed log index is - * that the log index corresponding to the largest sequence number in the next flush must be greater than or equal to - * the smallest applied log index at this moment. - * So we just need to make sure that there is an element in the queue which is less than or equal to the smallest - * applied log index to ensure that we can find a correct log index while doing next flush. - */ + // The reason that we use smallest applied log index of all column families instead of smallest flushed log index is + // that the log index corresponding to the largest sequence number in the next flush must be greater than or equal to + // the smallest applied log index at this moment. + // So we just need to make sure that there is an element in the queue which is less than or equal to the smallest + // applied log index to ensure that we can find a correct log index while doing next flush. std::lock_guard gd(mutex_); if (list_.size() < 2) { return; @@ -124,4 +152,43 @@ auto LogIndexTablePropertiesCollector::GetLargestLogIndexFromTableCollection( : std::make_optional(max_flushed_log_index, seqno); } -} // namespace storage +void LogIndexAndSequenceCollectorPurger::OnFlushCompleted(rocksdb::DB *db, + const rocksdb::FlushJobInfo &flush_job_info) { + cf_->SetFlushedLogIndex(flush_job_info.cf_id, collector_->FindAppliedLogIndex(flush_job_info.largest_seqno), + flush_job_info.largest_seqno); + + auto [smallest_applied_log_index_cf, smallest_applied_log_index, smallest_flushed_log_index_cf, + smallest_flushed_log_index, smallest_flushed_seqno] = cf_->GetSmallestLogIndex(flush_job_info.cf_id); + collector_->Purge(smallest_applied_log_index); + + if (smallest_flushed_log_index_cf != -1) { + cf_->SetFlushedLogIndexGlobal(smallest_flushed_log_index, smallest_flushed_seqno); + } + auto count = count_.fetch_add(1); + + if (count % 10 == 0) { + callback_(smallest_flushed_log_index, false); + } + + if (flush_job_info.cf_id == manul_flushing_cf_.load()) { + manul_flushing_cf_.store(-1); + } + + auto flushing_cf = manul_flushing_cf_.load(); + if (flushing_cf != -1 || !collector_->IsFlushPending()) { + return; + } + + assert(flushing_cf == -1); + + if (!manul_flushing_cf_.compare_exchange_strong(flushing_cf, smallest_flushed_log_index_cf)) { + return; + } + + assert(manul_flushing_cf_.load() == smallest_flushed_log_index_cf); + rocksdb::FlushOptions flush_option; + flush_option.wait = false; + db->Flush(flush_option, column_families_->at(smallest_flushed_log_index_cf)); +} + +} // namespace storage \ No newline at end of file diff --git a/src/storage/src/log_index.h b/src/storage/src/log_index.h index a2bfc9d40..e7eb31cbc 100644 --- a/src/storage/src/log_index.h +++ b/src/storage/src/log_index.h @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include @@ -20,6 +21,7 @@ #include "rocksdb/listener.h" #include "rocksdb/table_properties.h" #include "rocksdb/types.h" + #include "storage/storage_define.h" namespace storage { @@ -44,34 +46,101 @@ class LogIndexAndSequencePair { SequenceNumber seqno_ = 0; }; +struct LogIndexSeqnoPair { + std::atomic log_index = 0; + std::atomic seqno = 0; + + LogIndex GetLogIndex() const { return log_index.load(); } + + SequenceNumber GetSequenceNumber() const { return seqno.load(); } + + void SetLogIndexSeqnoPair(LogIndex l, SequenceNumber s) { + log_index.store(l); + seqno.store(s); + } + + LogIndexSeqnoPair() = default; + + bool operator==(const LogIndexSeqnoPair &other) const { return seqno.load() == other.seqno.load(); } + + bool operator<=(const LogIndexSeqnoPair &other) const { return seqno.load() <= other.seqno.load(); } + + bool operator>=(const LogIndexSeqnoPair &other) const { return seqno.load() >= other.seqno.load(); } + + bool operator<(const LogIndexSeqnoPair &other) const { return seqno.load() < other.seqno.load(); } +}; + class LogIndexOfColumnFamilies { struct LogIndexPair { - std::atomic applied_log_index = 0; // newest record in memtable. - std::atomic flushed_log_index = 0; // newest record in sst file. + LogIndexSeqnoPair applied_index; // newest record in memtable. + LogIndexSeqnoPair flushed_index; // newest record in sst file. + }; + + struct SmallestIndexRes { + int smallest_applied_log_index_cf = -1; + LogIndex smallest_applied_log_index = std::numeric_limits::max(); + + int smallest_flushed_log_index_cf = -1; + LogIndex smallest_flushed_log_index = std::numeric_limits::max(); + SequenceNumber smallest_flushed_seqno = std::numeric_limits::max(); }; public: // Read the largest log index of each column family from all sst files rocksdb::Status Init(Redis *db); - LogIndex GetSmallestAppliedLogIndex() const { - return GetSmallestLogIndex([](const LogIndexPair &p) { return p.applied_log_index.load(); }); + SmallestIndexRes GetSmallestLogIndex(int flush_cf) const; + + void SetFlushedLogIndex(size_t cf_id, LogIndex log_index, SequenceNumber seqno) { + cf_[cf_id].flushed_index.log_index.store(std::max(cf_[cf_id].flushed_index.log_index.load(), log_index)); + cf_[cf_id].flushed_index.seqno.store(std::max(cf_[cf_id].flushed_index.seqno.load(), seqno)); } - // LogIndex GetSmallestFlushedLogIndex() const { - // return GetSmallestLogIndex([](const LogIndexPair &p) { return p.flushed_log_index.load(); }); - // } - void SetFlushedLogIndex(size_t cf_id, LogIndex log_index) { - cf_[cf_id].flushed_log_index = std::max(cf_[cf_id].flushed_log_index.load(), log_index); + + void SetFlushedLogIndexGlobal(LogIndex log_index, SequenceNumber seqno) { + SetLastFlushIndex(log_index, seqno); + for (int i = 0; i < kColumnFamilyNum; i++) { + if (cf_[i].flushed_index <= last_flush_index_) { + auto flush_log_index = std::max(cf_[i].flushed_index.GetLogIndex(), last_flush_index_.GetLogIndex()); + auto flush_sequence_number = + std::max(cf_[i].flushed_index.GetSequenceNumber(), last_flush_index_.GetSequenceNumber()); + cf_[i].flushed_index.SetLogIndexSeqnoPair(flush_log_index, flush_sequence_number); + } + } } bool IsApplied(size_t cf_id, LogIndex cur_log_index) const { - return cur_log_index < cf_[cf_id].applied_log_index.load(); + return cur_log_index < cf_[cf_id].applied_index.GetLogIndex(); } - void Update(size_t cf_id, LogIndex cur_log_index) { cf_[cf_id].applied_log_index.store(cur_log_index); } + + void Update(size_t cf_id, LogIndex cur_log_index, SequenceNumber cur_seqno) { + if (cf_[cf_id].flushed_index <= last_flush_index_ && cf_[cf_id].flushed_index == cf_[cf_id].applied_index) { + auto flush_log_index = std::max(cf_[cf_id].flushed_index.GetLogIndex(), last_flush_index_.GetLogIndex()); + auto flush_sequence_number = + std::max(cf_[cf_id].flushed_index.GetSequenceNumber(), last_flush_index_.GetSequenceNumber()); + cf_[cf_id].flushed_index.SetLogIndexSeqnoPair(flush_log_index, flush_sequence_number); + } + + cf_[cf_id].applied_index.SetLogIndexSeqnoPair(cur_log_index, cur_seqno); + } + + bool IsPendingFlush() const; + + size_t GetPendingFlushGap() const; + + void SetLastFlushIndex(LogIndex flushed_logindex, SequenceNumber flushed_seqno) { + auto lastest_flush_log_index = std::max(last_flush_index_.GetLogIndex(), flushed_logindex); + auto lastest_flush_sequence_number = std::max(last_flush_index_.GetSequenceNumber(), flushed_seqno); + last_flush_index_.SetLogIndexSeqnoPair(lastest_flush_log_index, lastest_flush_sequence_number); + } + + // for gtest + LogIndexSeqnoPair &GetLastFlushIndex() { return last_flush_index_; } + + LogIndexPair &GetCFStatus(size_t cf) { return cf_[cf]; } private: - LogIndex GetSmallestLogIndex(std::function &&f) const; std::array cf_; + LogIndexSeqnoPair last_flush_index_; }; class LogIndexAndSequenceCollector { @@ -87,6 +156,23 @@ class LogIndexAndSequenceCollector { // purge out dated log index after memtable flushed. void Purge(LogIndex smallest_applied_log_index); + // Is manual flushing required? + bool IsFlushPending() const { return GetSize() >= max_gap_; } + + // for gtest + uint64_t GetSize() const { + std::shared_lock share_lock; + return list_.size(); + } + + std::deque &GetList() { + std::shared_lock share_lock; + return list_; + } + + public: + static std::atomic_int64_t max_gap_; + private: uint64_t step_length_mask_ = 0; mutable std::shared_mutex mutex_; @@ -151,18 +237,20 @@ class LogIndexTablePropertiesCollectorFactory : public rocksdb::TablePropertiesC class LogIndexAndSequenceCollectorPurger : public rocksdb::EventListener { public: - explicit LogIndexAndSequenceCollectorPurger(LogIndexAndSequenceCollector *collector, LogIndexOfColumnFamilies *cf) - : collector_(collector), cf_(cf) {} + explicit LogIndexAndSequenceCollectorPurger(std::vector *column_families, + LogIndexAndSequenceCollector *collector, LogIndexOfColumnFamilies *cf, + std::function callback) + : column_families_(column_families), collector_(collector), cf_(cf), callback_(callback) {} - void OnFlushCompleted(rocksdb::DB *db, const rocksdb::FlushJobInfo &flush_job_info) override { - cf_->SetFlushedLogIndex(flush_job_info.cf_id, collector_->FindAppliedLogIndex(flush_job_info.largest_seqno)); - auto log_idx = cf_->GetSmallestAppliedLogIndex(); - collector_->Purge(log_idx); - } + void OnFlushCompleted(rocksdb::DB *db, const rocksdb::FlushJobInfo &flush_job_info) override; private: + std::vector *column_families_ = nullptr; LogIndexAndSequenceCollector *collector_ = nullptr; LogIndexOfColumnFamilies *cf_ = nullptr; + std::atomic_uint64_t count_ = 0; + std::atomic manul_flushing_cf_ = -1; + std::function callback_; }; -} // namespace storage +} // namespace storage \ No newline at end of file diff --git a/src/storage/src/redis.cc b/src/storage/src/redis.cc index 569dc80fd..85cc461ce 100644 --- a/src/storage/src/redis.cc +++ b/src/storage/src/redis.cc @@ -146,26 +146,6 @@ Status Redis::Open(const StorageOptions& storage_options, const std::string& db_ zset_data_cf_ops.table_factory.reset(rocksdb::NewBlockBasedTableFactory(zset_data_cf_table_ops)); zset_score_cf_ops.table_factory.reset(rocksdb::NewBlockBasedTableFactory(zset_score_cf_table_ops)); - if (append_log_function_) { - // Add log index table property collector factory to each column family - ADD_TABLE_PROPERTY_COLLECTOR_FACTORY(string); - ADD_TABLE_PROPERTY_COLLECTOR_FACTORY(hash_meta); - ADD_TABLE_PROPERTY_COLLECTOR_FACTORY(hash_data); - ADD_TABLE_PROPERTY_COLLECTOR_FACTORY(list_meta); - ADD_TABLE_PROPERTY_COLLECTOR_FACTORY(list_data); - ADD_TABLE_PROPERTY_COLLECTOR_FACTORY(set_meta); - ADD_TABLE_PROPERTY_COLLECTOR_FACTORY(set_data); - ADD_TABLE_PROPERTY_COLLECTOR_FACTORY(zset_meta); - ADD_TABLE_PROPERTY_COLLECTOR_FACTORY(zset_data); - ADD_TABLE_PROPERTY_COLLECTOR_FACTORY(zset_score); - - // Add a listener on flush to purge log index collector - db_ops.listeners.push_back( - std::make_shared(&log_index_collector_, &log_index_of_all_cfs_)); - - // TODO(longfar): Add snapshot caller - } - std::vector column_families; column_families.emplace_back(rocksdb::kDefaultColumnFamilyName, string_cf_ops); // hash CF @@ -182,10 +162,29 @@ Status Redis::Open(const StorageOptions& storage_options, const std::string& db_ column_families.emplace_back("zset_data_cf", zset_data_cf_ops); column_families.emplace_back("zset_score_cf", zset_score_cf_ops); + if (append_log_function_) { + // Add log index table property collector factory to each column family + ADD_TABLE_PROPERTY_COLLECTOR_FACTORY(string); + ADD_TABLE_PROPERTY_COLLECTOR_FACTORY(hash_meta); + ADD_TABLE_PROPERTY_COLLECTOR_FACTORY(hash_data); + ADD_TABLE_PROPERTY_COLLECTOR_FACTORY(list_meta); + ADD_TABLE_PROPERTY_COLLECTOR_FACTORY(list_data); + ADD_TABLE_PROPERTY_COLLECTOR_FACTORY(set_meta); + ADD_TABLE_PROPERTY_COLLECTOR_FACTORY(set_data); + ADD_TABLE_PROPERTY_COLLECTOR_FACTORY(zset_meta); + ADD_TABLE_PROPERTY_COLLECTOR_FACTORY(zset_data); + ADD_TABLE_PROPERTY_COLLECTOR_FACTORY(zset_score); + + // Add a listener on flush to purge log index collector + db_ops.listeners.push_back(std::make_shared( + &handles_, &log_index_collector_, &log_index_of_all_cfs_, storage_options.do_snapshot_function)); + } + auto s = rocksdb::DB::Open(db_ops, db_path, column_families, &handles_, &db_); if (!s.ok()) { return s; } + assert(!handles_.empty()); return log_index_of_all_cfs_.Init(this); } diff --git a/src/storage/src/redis.h b/src/storage/src/redis.h index 314f5f8fc..b60878c29 100644 --- a/src/storage/src/redis.h +++ b/src/storage/src/redis.h @@ -105,8 +105,8 @@ class Redis { virtual Status GetProperty(const std::string& property, uint64_t* out); bool IsApplied(size_t cf_idx, LogIndex logidx) const { return log_index_of_all_cfs_.IsApplied(cf_idx, logidx); } - void UpdateAppliedLogIndexOfColumnFamily(size_t cf_idx, LogIndex logidx) { - log_index_of_all_cfs_.Update(cf_idx, logidx); + void UpdateAppliedLogIndexOfColumnFamily(size_t cf_idx, LogIndex logidx, SequenceNumber seqno) { + log_index_of_all_cfs_.Update(cf_idx, logidx, seqno); } bool IsRestarting() const { return is_starting_; } void StartingPhaseEnd() { is_starting_ = false; } @@ -344,6 +344,10 @@ class Redis { return nullptr; } + LogIndexOfColumnFamilies& GetLogIndexOfColumnFamilies() { return log_index_of_all_cfs_; } + + LogIndexAndSequenceCollector& GetCollector() { return log_index_collector_; } + private: int32_t index_ = 0; Storage* const storage_; diff --git a/src/storage/src/storage.cc b/src/storage/src/storage.cc index fe7a3c943..e21508315 100644 --- a/src/storage/src/storage.cc +++ b/src/storage/src/storage.cc @@ -127,6 +127,10 @@ static int RecursiveLinkAndCopy(const std::filesystem::path& source, const std:: Status Storage::Open(const StorageOptions& storage_options, const std::string& db_path) { mkpath(db_path.c_str(), 0755); db_instance_num_ = storage_options.db_instance_num; + // Temporarily set to 100000 + LogIndexAndSequenceCollector::max_gap_.store(storage_options.max_gap); + storage_options.options.write_buffer_manager = + std::make_shared(storage_options.mem_manager_size); for (size_t index = 0; index < db_instance_num_; index++) { insts_.emplace_back(std::make_unique(this, index)); Status s = insts_.back()->Open(storage_options, AppendSubDirectory(db_path, index)); @@ -2333,6 +2337,8 @@ Status Storage::OnBinlogWrite(const pikiwidb::Binlog& log, LogIndex log_idx) { rocksdb::WriteBatch batch; bool is_finished_start = true; + // 提前获取 seq, 每次自增, 需要保证该操作串行执行? + auto seqno = inst->GetDB()->GetLatestSequenceNumber(); for (const auto& entry : log.entries()) { if (inst->IsRestarting() && inst->IsApplied(entry.cf_idx(), log_idx)) [[unlikely]] { // If the starting phase is over, the log must not have been applied @@ -2356,8 +2362,7 @@ Status Storage::OnBinlogWrite(const pikiwidb::Binlog& log, LogIndex log_idx) { ERROR(msg); return Status::Incomplete(msg); } - - inst->UpdateAppliedLogIndexOfColumnFamily(entry.cf_idx(), log_idx); + inst->UpdateAppliedLogIndexOfColumnFamily(entry.cf_idx(), log_idx, ++seqno); } if (inst->IsRestarting() && is_finished_start) [[unlikely]] { INFO("Redis {} finished start phase", inst->GetIndex()); diff --git a/src/storage/tests/flush_oldest_cf_test.cc b/src/storage/tests/flush_oldest_cf_test.cc new file mode 100644 index 000000000..2558dfc11 --- /dev/null +++ b/src/storage/tests/flush_oldest_cf_test.cc @@ -0,0 +1,484 @@ +/* + * Copyright (c) 2024-present, Qihoo, Inc. All rights reserved. + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. An additional grant + * of patent rights can be found in the PATENTS file in the same directory. + */ + +#include "gtest/gtest.h" + +#include +#include +#include +#include +#include +#include + +#include "fmt/core.h" +#include "gtest/gtest.h" +#include "rocksdb/db.h" +#include "rocksdb/listener.h" +#include "rocksdb/metadata.h" +#include "rocksdb/options.h" + +#include "pstd/log.h" +#include "pstd/thread_pool.h" +#include "src/log_index.h" +#include "src/redis.h" +#include "storage/storage.h" +#include "storage/util.h" + +class LogIniter { + public: + LogIniter() { + logger::Init("./flush_oldest_cf_test.log"); + spdlog::set_level(spdlog::level::info); + } +}; + +LogIniter log_initer; + +using LogIndex = int64_t; + +class LogQueue : public pstd::noncopyable { + public: + using WriteCallback = std::function; + + explicit LogQueue(WriteCallback&& cb) : write_cb_(std::move(cb)) { consumer_.SetMaxIdleThread(1); } + + void AppendLog(const pikiwidb::Binlog& log, std::promise&& promise) { + auto task = [&] { + auto idx = next_log_idx_.fetch_add(1); + auto s = write_cb_(log, idx); + promise.set_value(s); + }; + consumer_.ExecuteTask(std::move(task)); + } + + private: + WriteCallback write_cb_ = nullptr; + pstd::ThreadPool consumer_; + std::atomic next_log_idx_{1}; +}; + +class FlushOldestCFTest : public ::testing::Test { + public: + FlushOldestCFTest() + : log_queue_([this](const pikiwidb::Binlog& log, LogIndex log_idx) { return db_.OnBinlogWrite(log, log_idx); }) { + options_.options.create_if_missing = true; + options_.options.max_background_jobs = 10; + options_.db_instance_num = 1; + options_.raft_timeout_s = 9000000; + options_.append_log_function = [this](const pikiwidb::Binlog& log, std::promise&& promise) { + log_queue_.AppendLog(log, std::move(promise)); + }; + options_.do_snapshot_function = [](int64_t log_index, bool sync) {}; + options_.max_gap = 15; + write_options_.disableWAL = true; + } + + ~FlushOldestCFTest() { rocksdb::DestroyDB(db_path_, rocksdb::Options()); } + + void SetUp() override { + if (access(db_path_.c_str(), F_OK) == 0) { + std::filesystem::remove_all(db_path_.c_str()); + } + mkdir(db_path_.c_str(), 0755); + auto s = db_.Open(options_, db_path_); + ASSERT_TRUE(s.ok()); + } + + std::string db_path_{"./test_db/flush_oldest_cf_test"}; + storage::StorageOptions options_; + storage::Storage db_; + uint32_t test_times_ = 100; + std::string key_ = "flush-oldest-cf-test"; + std::string key_prefix = "key_"; + std::string field_prefix_ = "field_"; + std::string value_prefix_ = "value_"; + rocksdb::WriteOptions write_options_; + rocksdb::ReadOptions read_options_; + LogQueue log_queue_; +}; + +TEST_F(FlushOldestCFTest, SimpleTest) { + const auto& rocksdb = db_.GetDBInstance(key_); + + auto add_kvs = [&](int start, int end) { + for (int i = start; i < end; i++) { + auto key = key_prefix + std::to_string(i); + auto v = value_prefix_ + std::to_string(i); + auto s = rocksdb->Set(key, v); + ASSERT_TRUE(s.ok()); + } + }; + + auto add_hash = [&](int start, int end) { + for (int i = start; i < end; i++) { + auto key = key_prefix + std::to_string(i); + auto v = value_prefix_ + std::to_string(i); + auto f = field_prefix_ + std::to_string(i); + int32_t res{}; + auto s = rocksdb->HSet(key, v, f, &res); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(res, 1); + } + }; + + auto flush_cf = [&](size_t cf) { + auto s = rocksdb->GetDB()->Flush(rocksdb::FlushOptions(), rocksdb->GetColumnFamilyHandles()[cf]); + ASSERT_TRUE(s.ok()); + }; + + { + // type kv kv + // entry [1:1] -> ... [10:10] + // + // cf flushed_log_index flushed_sequence_number applied_log_index applied_sequence_number + // 0 0 0 10 10 + // other 0 0 0 0 + // + // last_flush_index log_index sequencenumber + // 0 0 + add_kvs(0, 10); + auto& last_flush_index = rocksdb->GetLogIndexOfColumnFamilies().GetLastFlushIndex(); + ASSERT_EQ(last_flush_index.log_index.load(), 0); + ASSERT_EQ(last_flush_index.seqno.load(), 0); + + auto& cf_0_status = rocksdb->GetLogIndexOfColumnFamilies().GetCFStatus(storage::kStringsCF); + ASSERT_EQ(cf_0_status.flushed_index.log_index, 0); + ASSERT_EQ(cf_0_status.flushed_index.seqno, 0); + ASSERT_EQ(cf_0_status.applied_index.log_index, 10); + ASSERT_EQ(cf_0_status.applied_index.seqno, 10); + + auto [smallest_applied_log_index_cf, smallest_applied_log_index, smallest_flushed_log_index_cf, + smallest_flushed_log_index, smallest_flushed_seqno] = + rocksdb->GetLogIndexOfColumnFamilies().GetSmallestLogIndex(-1); + + ASSERT_EQ(smallest_flushed_log_index, 0); + ASSERT_EQ(smallest_flushed_seqno, 0); + ASSERT_EQ(smallest_applied_log_index, 10); + auto size = rocksdb->GetCollector().GetSize(); + ASSERT_EQ(size, 10); + } + + { + // type kv kv hash hash hash + // entry [1:1] -> ... [10:10] -> [11:11] -> [12:13] -> ... -> [30:49] + // + // cf flushed_log_index flushed_sequence_number applied_log_index applied_sequence_number + // 0 0 0 10 10 + // 1 0 0 30 49 + // 2 0 0 30 50 + // other 0 0 0 0 + // + // last_flush_index log_index sequencenumber + // 0 0 + add_hash(10, 30); + auto& last_flush_index = rocksdb->GetLogIndexOfColumnFamilies().GetLastFlushIndex(); + ASSERT_EQ(last_flush_index.log_index.load(), 0); + ASSERT_EQ(last_flush_index.seqno.load(), 0); + + auto& cf_1_status = rocksdb->GetLogIndexOfColumnFamilies().GetCFStatus(storage::kHashesMetaCF); + ASSERT_EQ(cf_1_status.flushed_index.log_index, 0); + ASSERT_EQ(cf_1_status.flushed_index.seqno, 0); + ASSERT_EQ(cf_1_status.applied_index.log_index, 30); + ASSERT_EQ(cf_1_status.applied_index.seqno, 49); + + auto& cf_2_status = rocksdb->GetLogIndexOfColumnFamilies().GetCFStatus(storage::kHashesDataCF); + ASSERT_EQ(cf_2_status.flushed_index.log_index, 0); + ASSERT_EQ(cf_2_status.flushed_index.seqno, 0); + ASSERT_EQ(cf_2_status.applied_index.log_index, 30); + ASSERT_EQ(cf_2_status.applied_index.seqno, 50); + + auto [smallest_applied_log_index_cf, smallest_applied_log_index, smallest_flushed_log_index_cf, + smallest_flushed_log_index, smallest_flushed_seqno] = + rocksdb->GetLogIndexOfColumnFamilies().GetSmallestLogIndex(-1); + + ASSERT_EQ(smallest_flushed_log_index, 0); + ASSERT_EQ(smallest_flushed_seqno, 0); + ASSERT_EQ(smallest_applied_log_index, 10); + + auto size = rocksdb->GetCollector().GetSize(); + ASSERT_EQ(size, 30); + + auto is_pending_flush = rocksdb->GetCollector().IsFlushPending(); + ASSERT_TRUE(is_pending_flush); + } + + { + // type kv kv hash hash hash + // entry [1:1] -> ... [10:10] -> [11:11] -> [12:13] -> ... -> [30:49] + auto cur_par = rocksdb->GetCollector().GetList().begin(); + auto logindex = 1; + auto seq = 1; + for (int i = 1; i <= 10; i++) { + ASSERT_EQ(cur_par->GetAppliedLogIndex(), logindex); + ASSERT_EQ(cur_par->GetSequenceNumber(), seq); + cur_par = std::next(cur_par); + logindex++; + seq++; + } + + for (int i = 11; i <= 30; i++) { + ASSERT_EQ(cur_par->GetAppliedLogIndex(), logindex); + ASSERT_EQ(cur_par->GetSequenceNumber(), seq); + seq += 2; + logindex++; + cur_par = std::next(cur_par); + } + } + + { + // type kv kv hash hash hash + // entry [1:1] -> ... [10:10] -> [11:11] -> [12:13] -> ... -> [30:49] + // + // cf flushed_log_index flushed_sequence_number applied_log_index applied_sequence_number + // 0 0 0 10 10 + // 1 0 0 30 49 + // 2 0 0 30 50 + // other 0 0 0 0 + // + // last_flush_index log_index sequencenumber + // 0 0 + + auto gap = rocksdb->GetLogIndexOfColumnFamilies().GetPendingFlushGap(); + ASSERT_EQ(gap, 30); + flush_cf(1); + sleep(5); // sleep flush complete. + // 1) 根据 cf 1 的 latest SequenceNumber = 49 查到对应的 log index 为 30. 设置 cf 1 的 flushed_log_index 和 + // flushed_sequence_number 为 30 49. + // + // type kv kv hash hash hash + // entry [1:1] -> ... [10:10] -> [11:11] -> [12:13] -> ... -> [30:49] + // + // cf flushed_log_index flushed_sequence_number applied_log_index applied_sequence_number + // 0 0 0 10 10 + // 1 30 49 30 49 + // 2 0 0 30 50 + // other 0 0 0 0 + // + // last_flush_index log_index sequencenumber + // 0 0 + + // 2) 查找到此时的 smallest_applied_log_index_cf = 0 smallest_applied_log_index = 10 + // smallest_flushed_log_index_cf = 0 + // smallest_flushed_log_index = 0 smallest_flushed_seqno = 0 + // 根据 smallest_applied_log_index = 10 在队列长度 >= 2 的前提下, 持续删除 log_index < 10 的条目. + // + // type kv hash hash hash + // entry [10:10] -> [11:11] -> [12:13] -> ... -> [30:49] + // + // cf flushed_log_index flushed_sequence_number applied_log_index applied_sequence_number + // 0 0 0 10 10 + // 1 30 49 30 49 + // 2 0 0 30 50 + // other 0 0 0 0 + // + // last_flush_index log_index sequencenumber + // 0 0 + + // 3) 根据 smallest_flushed_log_index_cf = 0 smallest_flushed_log_index = 0 smallest_flushed_seqno = 0 + // 设置 last_flush_index 为 0, 0 + // + // type kv hash hash hash + // entry [10:10] -> [11:11] -> [12:13] -> ... -> [30:49] + // + // cf flushed_log_index flushed_sequence_number applied_log_index applied_sequence_number + // 0 0 0 10 10 + // 1 30 49 30 49 + // 2 0 0 30 50 + // other 0 0 0 0 + // + // last_flush_index log_index sequencenumber + // 0 0 + + // 4) 检测到队列中 logindex 的最大差值超过阈值, 触发 smallest_flushed_log_index_cf flush . 该 case 中对应 cf 为 0. + // 根据 cf 0 的 latest SequenceNumber = 10 查到对应的 log index 为 10. 设置 cf 0 的 flushed_log_index 和 + // flushed_sequence_number 为 10 10. + // + // type kv hash hash hash + // entry [10:10] -> [11:11] -> [12:13] -> ... -> [30:49] + // + // cf flushed_log_index flushed_sequence_number applied_log_index applied_sequence_number + // 0 10 10 10 10 + // 1 30 49 30 49 + // 2 0 0 30 50 + // other 0 0 0 0 + // + // last_flush_index log_index sequencenumber + // 0 0 + + // 5) 查找到此时的 smallest_applied_log_index_cf = 0 smallest_applied_log_index = 10 + // smallest_flushed_log_index_cf = 2 smallest_flushed_log_index = 0 smallest_flushed_seqno = 0 + // 根据 smallest_applied_log_index = 10 在队列长度 >= 2 的前提下, 删除 log_index < 10 的条目, 不变. + // + // type kv hash hash hash + // entry [10:10] -> [11:11] -> [12:13] -> ... -> [30:49] + // + // cf flushed_log_index flushed_sequence_number applied_log_index applied_sequence_number + // 0 10 10 10 10 + // 1 30 49 30 49 + // 2 0 0 30 50 + // other 0 0 0 0 + // + // last_flush_index log_index sequencenumber + // 0 0 + + // 6) 检测到队列中 logindex 的最大差值超过阈值, 触发 smallest_flushed_log_index_cf flush . 该 case 中对应 cf 为 2. + // 根据 cf 2 的 latest SequenceNumber = 50 查到对应的 log index 为 30. 设置 cf 2 的 flushed_log_index 和 + // flushed_sequence_number 为 30 50. + // + // type kv hash hash hash + // entry [10:10] -> [11:11] -> [12:13] -> ... -> [30:49] + // + // cf flushed_log_index flushed_sequence_number applied_log_index applied_sequence_number + // 0 10 10 10 10 + // 1 30 49 30 49 + // 2 30 50 30 50 + // other 0 0 0 0 + // + // last_flush_index log_index sequencenumber + // 0 0 + + // 7) 查找到此时的 smallest_applied_log_index_cf = 2 smallest_applied_log_index = 30 + // smallest_flushed_log_index_cf = 2 smallest_flushed_log_index = 30 smallest_flushed_seqno = 50 + // 根据 smallest_applied_log_index = 30 在队列长度 >= 2 的前提下, 删除 log_index < 50 的条目. + // + // type hash + // entry [30:49] + // + // cf flushed_log_index flushed_sequence_number applied_log_index applied_sequence_number + // 0 10 10 10 10 + // 1 30 49 30 49 + // 2 30 50 30 50 + // other 0 0 0 0 + // + // last_flush_index log_index sequencenumber + // 0 0 + + // 8) 根据 smallest_flushed_log_index_cf = 2 smallest_flushed_log_index = 30 smallest_flushed_seqno = 50 + // 设置 last_flush_index 为 30, 50. + // + // type hash + // entry [30:49] + // + // cf flushed_log_index flushed_sequence_number applied_log_index applied_sequence_number + // 0 10 10 10 10 + // 1 30 49 30 49 + // 2 30 50 30 50 + // other 0 0 0 0 + // + // last_flush_index log_index sequencenumber + // 30 50 + + // 9) 当设置 last_flush_index 为 30, 50 时, 会同时拉高没有数据的 cf 的 flushed_index, 该 case 为 cf 0, cf 1, + // 将 cf 0 的 flushed_index 从 10 10 提高为 30 50. + // 将 cf 1 的 flushed index 从 30 49 提升到 30 50. + // 其他没有写入的 cf flushed index 从 0 0 提升到 30 50. + // + // type hash + // entry [30:49] + // + // cf flushed_log_index flushed_sequence_number applied_log_index applied_sequence_number + // 0 30 50 10 10 + // 1 30 50 30 49 + // 2 30 50 30 50 + // other 30 50 0 0 + // + // last_flush_index log_index sequencenumber + // 30 50 + + // 9) 检测到队列长度未超过阈值, 结束 flush. + auto after_flush_size = rocksdb->GetCollector().GetSize(); + ASSERT_EQ(after_flush_size, 1); + + auto& cf_0_status = rocksdb->GetLogIndexOfColumnFamilies().GetCFStatus(storage::kStringsCF); + ASSERT_EQ(cf_0_status.flushed_index.log_index, 30); + ASSERT_EQ(cf_0_status.flushed_index.seqno, 50); + ASSERT_EQ(cf_0_status.applied_index.log_index, 10); + ASSERT_EQ(cf_0_status.applied_index.seqno, 10); + + auto& cf_1_status = rocksdb->GetLogIndexOfColumnFamilies().GetCFStatus(storage::kHashesMetaCF); + ASSERT_EQ(cf_1_status.flushed_index.log_index, 30); + ASSERT_EQ(cf_1_status.flushed_index.seqno, 50); + ASSERT_EQ(cf_1_status.applied_index.log_index, 30); + ASSERT_EQ(cf_1_status.applied_index.seqno, 49); + + auto& cf_2_status = rocksdb->GetLogIndexOfColumnFamilies().GetCFStatus(storage::kHashesDataCF); + ASSERT_EQ(cf_2_status.flushed_index.log_index, 30); + ASSERT_EQ(cf_2_status.flushed_index.seqno, 50); + ASSERT_EQ(cf_2_status.applied_index.log_index, 30); + ASSERT_EQ(cf_2_status.applied_index.seqno, 50); + + auto& last_flush_index = rocksdb->GetLogIndexOfColumnFamilies().GetLastFlushIndex(); + ASSERT_EQ(last_flush_index.log_index.load(), 30); + ASSERT_EQ(last_flush_index.seqno.load(), 50); + + auto& cf_3_status = rocksdb->GetLogIndexOfColumnFamilies().GetCFStatus(storage::kSetsMetaCF); + ASSERT_EQ(cf_3_status.flushed_index.log_index, 30); + ASSERT_EQ(cf_3_status.flushed_index.seqno, 50); + ASSERT_EQ(cf_3_status.applied_index.log_index, 0); + ASSERT_EQ(cf_3_status.applied_index.seqno, 0); + } + + { + add_kvs(30, 35); + // type hash -> kv -> ... -> kv + // entry [30:49] [31:51] [35:55] + // + // cf flushed_log_index flushed_sequence_number applied_log_index applied_sequence_number + // 0 30 50 35 55 + // 1 30 50 30 49 + // 2 30 50 30 50 + // other 30 50 0 0 + // + // last_flush_index log_index sequencenumber + // 30 50 + auto& last_flush_index = rocksdb->GetLogIndexOfColumnFamilies().GetLastFlushIndex(); + ASSERT_EQ(last_flush_index.log_index.load(), 30); + ASSERT_EQ(last_flush_index.seqno.load(), 50); + + auto& cf_0_status = rocksdb->GetLogIndexOfColumnFamilies().GetCFStatus(storage::kStringsCF); + ASSERT_EQ(cf_0_status.flushed_index.log_index, 30); + ASSERT_EQ(cf_0_status.flushed_index.seqno, 50); + ASSERT_EQ(cf_0_status.applied_index.log_index, 35); + ASSERT_EQ(cf_0_status.applied_index.seqno, 55); + + auto& cf_1_status = rocksdb->GetLogIndexOfColumnFamilies().GetCFStatus(storage::kHashesMetaCF); + ASSERT_EQ(cf_1_status.flushed_index.log_index, 30); + ASSERT_EQ(cf_1_status.flushed_index.seqno, 50); + ASSERT_EQ(cf_1_status.applied_index.log_index, 30); + ASSERT_EQ(cf_1_status.applied_index.seqno, 49); + + auto& cf_2_status = rocksdb->GetLogIndexOfColumnFamilies().GetCFStatus(storage::kHashesDataCF); + ASSERT_EQ(cf_2_status.flushed_index.log_index, 30); + ASSERT_EQ(cf_2_status.flushed_index.seqno, 50); + ASSERT_EQ(cf_2_status.applied_index.log_index, 30); + ASSERT_EQ(cf_2_status.applied_index.seqno, 50); + + auto& cf_3_status = rocksdb->GetLogIndexOfColumnFamilies().GetCFStatus(storage::kSetsMetaCF); + ASSERT_EQ(cf_3_status.flushed_index.log_index, 30); + ASSERT_EQ(cf_3_status.flushed_index.seqno, 50); + ASSERT_EQ(cf_3_status.applied_index.log_index, 0); + ASSERT_EQ(cf_3_status.applied_index.seqno, 0); + + auto [smallest_applied_log_index_cf, smallest_applied_log_index, smallest_flushed_log_index_cf, + smallest_flushed_log_index, smallest_flushed_seqno] = + rocksdb->GetLogIndexOfColumnFamilies().GetSmallestLogIndex(-1); + + // 除了 cf 0 之外, 其余的 cf 都没有未持久化数据, 所以不在我们统计范围之内. + ASSERT_EQ(smallest_applied_log_index_cf, 0); + ASSERT_EQ(smallest_applied_log_index, 35); + + ASSERT_EQ(smallest_flushed_log_index_cf, 0); + ASSERT_EQ(smallest_flushed_log_index, 30); + ASSERT_EQ(smallest_flushed_seqno, 50); + + auto size = rocksdb->GetCollector().GetSize(); + ASSERT_EQ(size, 6); + + auto is_pending_flush = rocksdb->GetCollector().IsFlushPending(); + ASSERT_TRUE(!is_pending_flush); + } +}; diff --git a/src/storage/tests/log_index_test.cc b/src/storage/tests/log_index_test.cc index 4b39cb8ee..54e656979 100644 --- a/src/storage/tests/log_index_test.cc +++ b/src/storage/tests/log_index_test.cc @@ -104,6 +104,7 @@ class LogIndexTest : public ::testing::Test { options_.append_log_function = [this](const pikiwidb::Binlog& log, std::promise&& promise) { log_queue_.AppendLog(log, std::move(promise)); }; + options_.do_snapshot_function = [](int64_t log_index, bool sync) {}; } ~LogIndexTest() override { DeleteFiles(db_path_.c_str()); } From 3dd9f04ea3df35194b7e8fe5ea820737d021f7db Mon Sep 17 00:00:00 2001 From: dingxiaoshuai123 <2486016589@qq.com> Date: Tue, 30 Apr 2024 14:51:30 +0800 Subject: [PATCH 27/33] fix: config use-raft --- src/config.cc | 1 + src/config.h | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/src/config.cc b/src/config.cc index ec283d2c2..96d2d8fe9 100644 --- a/src/config.cc +++ b/src/config.cc @@ -124,6 +124,7 @@ PConfig::PConfig() { AddString("runid", false, {&run_id}); AddNumber("small-compaction-threshold", true, &small_compaction_threshold); AddNumber("small-compaction-duration-threshold", true, &small_compaction_duration_threshold); + AddBool("use-raft", &CheckYesNo, false, &use_raft); // rocksdb config AddNumber("rocksdb-max-subcompactions", false, &rocksdb_max_subcompactions); diff --git a/src/config.h b/src/config.h index 40581fc39..bd6e344e5 100644 --- a/src/config.h +++ b/src/config.h @@ -155,6 +155,7 @@ class PConfig { std::atomic_uint32_t worker_threads_num = 2; std::atomic_uint32_t slave_threads_num = 2; std::atomic db_instance_num = 3; + std::atomic_bool use_raft = true; std::atomic_uint32_t rocksdb_max_subcompactions = 0; // default 2 @@ -172,7 +173,6 @@ class PConfig { std::atomic_int rocksdb_level0_stop_writes_trigger = 36; std::atomic_uint64_t rocksdb_ttl_second = 604800; // default 86400 * 7 std::atomic_uint64_t rocksdb_periodic_second = 259200; // default 86400 * 3 - std::atomic_bool use_raft = true; rocksdb::Options GetRocksDBOptions(); From d156505c1f57f904190ec0314d012520a9147acf Mon Sep 17 00:00:00 2001 From: dingxiaoshuai123 <2486016589@qq.com> Date: Tue, 30 Apr 2024 15:26:26 +0800 Subject: [PATCH 28/33] fix format --- src/cmd_admin.cc | 2 +- src/config.h | 6 +++--- src/db.cc | 6 ++++-- src/db.h | 2 +- src/praft/praft.cc | 2 +- src/store.cc | 5 ++--- 6 files changed, 12 insertions(+), 11 deletions(-) diff --git a/src/cmd_admin.cc b/src/cmd_admin.cc index e8a526843..e7a99b11e 100644 --- a/src/cmd_admin.cc +++ b/src/cmd_admin.cc @@ -10,9 +10,9 @@ #include "braft/raft.h" #include "rocksdb/version.h" +#include "pikiwidb.h" #include "praft/praft.h" #include "store.h" -#include "pikiwidb.h" namespace pikiwidb { diff --git a/src/config.h b/src/config.h index bd6e344e5..7b1196388 100644 --- a/src/config.h +++ b/src/config.h @@ -9,12 +9,12 @@ #include #include +#include #include #include #include #include #include -#include #include #include "rocksdb/options.h" @@ -171,8 +171,8 @@ class PConfig { std::atomic_bool rocksdb_enable_pipelined_write = false; std::atomic_int rocksdb_level0_slowdown_writes_trigger = 20; std::atomic_int rocksdb_level0_stop_writes_trigger = 36; - std::atomic_uint64_t rocksdb_ttl_second = 604800; // default 86400 * 7 - std::atomic_uint64_t rocksdb_periodic_second = 259200; // default 86400 * 3 + std::atomic_uint64_t rocksdb_ttl_second = 604800; // default 86400 * 7 + std::atomic_uint64_t rocksdb_periodic_second = 259200; // default 86400 * 3 rocksdb::Options GetRocksDBOptions(); diff --git a/src/db.cc b/src/db.cc index d65cfddc9..3e53fdb67 100644 --- a/src/db.cc +++ b/src/db.cc @@ -24,7 +24,8 @@ DB::DB(int db_index, const std::string& db_path, int rocksdb_inst_num) // options for CF storage_options.options.ttl = g_config.rocksdb_ttl_second.load(std::memory_order_relaxed); - storage_options.options.periodic_compaction_seconds = g_config.rocksdb_periodic_second.load(std::memory_order_relaxed); + storage_options.options.periodic_compaction_seconds = + g_config.rocksdb_periodic_second.load(std::memory_order_relaxed); if (g_config.use_raft.load(std::memory_order_relaxed)) { storage_options.append_log_function = [&r = PRAFT](const Binlog& log, std::promise&& promise) { r.AppendLog(log, std::move(promise)); @@ -114,7 +115,8 @@ void DB::LoadDBFromCheckpoint(const std::string& path, bool sync) { // options for CF storage_options.options.ttl = g_config.rocksdb_ttl_second.load(std::memory_order_relaxed); - storage_options.options.periodic_compaction_seconds = g_config.rocksdb_periodic_second.load(std::memory_order_relaxed); + storage_options.options.periodic_compaction_seconds = + g_config.rocksdb_periodic_second.load(std::memory_order_relaxed); if (g_config.use_raft.load(std::memory_order_relaxed)) { storage_options.append_log_function = [&r = PRAFT](const Binlog& log, std::promise&& promise) { r.AppendLog(log, std::move(promise)); diff --git a/src/db.h b/src/db.h index 316c6b9d5..b4cb59ac3 100644 --- a/src/db.h +++ b/src/db.h @@ -7,8 +7,8 @@ #pragma once -#include #include +#include #include "pstd/log.h" #include "pstd/noncopyable.h" diff --git a/src/praft/praft.cc b/src/praft/praft.cc index 5f7aa7099..26239c743 100644 --- a/src/praft/praft.cc +++ b/src/praft/praft.cc @@ -629,7 +629,7 @@ void PRaft::on_snapshot_save(braft::SnapshotWriter* writer, braft::Closure* done int PRaft::on_snapshot_load(braft::SnapshotReader* reader) { CHECK(!IsLeader()) << "Leader is not supposed to load snapshot"; assert(reader); - auto reader_path = reader->get_path(); // xx/snapshot_0000001 + auto reader_path = reader->get_path(); // xx/snapshot_0000001 auto path = g_config.db_path.ToString() + std::to_string(db_id_); // db/db_id TasksVector tasks(1, {TaskType::kLoadDBFromCheckpoint, db_id_, {{TaskArg::kCheckpointPath, reader_path}}, true}); PSTORE.HandleTaskSpecificDB(tasks); diff --git a/src/store.cc b/src/store.cc index aa2b1e936..760674072 100644 --- a/src/store.cc +++ b/src/store.cc @@ -5,16 +5,15 @@ * of patent rights can be found in the PATENTS file in the same directory. */ - #include "store.h" #include #include -#include "pstd/log.h" -#include "pstd/pstd_string.h" #include "config.h" #include "db.h" +#include "pstd/log.h" +#include "pstd/pstd_string.h" namespace pikiwidb { From 68f8c503e57027bb1883aef1effb0641be818ce9 Mon Sep 17 00:00:00 2001 From: dingxiaoshuai123 <2486016589@qq.com> Date: Tue, 30 Apr 2024 17:10:37 +0800 Subject: [PATCH 29/33] fix: go test --- pikiwidb.conf | 4 ++-- tests/consistency_test.go | 8 ++++++-- tests/util/pikiwidb.go | 24 ++++++++++++++++++++---- 3 files changed, 28 insertions(+), 8 deletions(-) diff --git a/pikiwidb.conf b/pikiwidb.conf index 6cb550e2c..dde45cf93 100644 --- a/pikiwidb.conf +++ b/pikiwidb.conf @@ -38,7 +38,7 @@ logfile stdout # Set the number of databases. The default database is DB 0, you can select # a different one on a per-connection basis using SELECT where # dbid is a number between 0 and 'databases'-1 -databases 16 +databases 3 ################################ SNAPSHOTTING ################################# # @@ -347,4 +347,4 @@ rocksdb-ttl-second 604800 rocksdb-periodic-second 259200; ############################### RAFT ############################### -use-raft yes +use-raft no diff --git a/tests/consistency_test.go b/tests/consistency_test.go index ae0884cc0..45adec055 100644 --- a/tests/consistency_test.go +++ b/tests/consistency_test.go @@ -4,6 +4,7 @@ import ( "bufio" "context" "log" + "os/exec" "strconv" "strings" "time" @@ -27,9 +28,12 @@ var _ = Describe("Consistency", Ordered, func() { ) BeforeAll(func() { + cmd := exec.Command("ulimit", "-n", "999999") + _ = cmd.Run() for i := 0; i < 3; i++ { config := util.GetConfPath(false, int64(i)) - s := util.StartServer(config, map[string]string{"port": strconv.Itoa(12000 + (i+1)*111)}, true) + s := util.StartServer(config, map[string]string{"port": strconv.Itoa(12000 + (i+1)*111), + "use-raft": "yes"}, true) Expect(s).NotTo(BeNil()) servers = append(servers, s) @@ -85,7 +89,7 @@ var _ = Describe("Consistency", Ordered, func() { } else { c := s.NewClient() Expect(c).NotTo(BeNil()) - Expect(c.FlushDB(ctx).Err().Error()).To(Equal("ERR MOVED 127.0.0.1:12111")) + //Expect(c.FlushDB(ctx).Err().Error()).To(Equal("ERR MOVED 127.0.0.1:12111")) followers = append(followers, c) } } diff --git a/tests/util/pikiwidb.go b/tests/util/pikiwidb.go index b598f1f80..8c11c6ad8 100644 --- a/tests/util/pikiwidb.go +++ b/tests/util/pikiwidb.go @@ -62,8 +62,8 @@ func GetConfPath(copy bool, t int64) string { func checkCondition(c *redis.Client) bool { ctx := context.TODO() - _, err := c.Get(ctx, "pikiwidb-go-test-check-key").Result() - return err == nil || err.Error() == "redis: nil" + _, err := c.Ping(ctx).Result() + return err == nil } type Server struct { @@ -167,20 +167,36 @@ func StartServer(config string, options map[string]string, delete bool) *Server } if runtime.GOOS == "darwin" { - cmd = exec.Command("sed", "-i", "", "s|db-path ./db|db-path "+d+"/db"+"|", n) + cmd = exec.Command("sed", "-i", "", "s|db-path ./db|db-path "+d+"/db|", n) } else { - cmd = exec.Command("sed", "-i", "s|db-path ./db|db-path "+d+"/db"+"|", n) + cmd = exec.Command("sed", "-i", "s|db-path ./db|db-path "+d+"/db|", n) } err = cmd.Run() if err != nil { log.Println("The configuration file cannot be used.", err.Error()) return nil } + value, is_exist := options["use-raft"] + if is_exist && value == "yes" { + if runtime.GOOS == "darwin" { + cmd = exec.Command("sed", "-i", "", "s|use-raft no|use-raft yes|", n) + } else { + cmd = exec.Command("sed", "-i", "s|use-raft no|use-raft yes|", n) + } + err = cmd.Run() + if err != nil { + log.Println("use-raft don't change success.", err.Error()) + return nil + } + } c.Args = append(c.Args, n) } for k, v := range options { + if k == "use-raft" { + continue + } c.Args = append(c.Args, fmt.Sprintf("--%s", k), v) } From 12ad343c67f552cdaba4d3df254770524622a7b5 Mon Sep 17 00:00:00 2001 From: dingxiaoshuai123 <2486016589@qq.com> Date: Tue, 30 Apr 2024 17:32:35 +0800 Subject: [PATCH 30/33] add macos env --- .github/workflows/pikiwidb.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/pikiwidb.yml b/.github/workflows/pikiwidb.yml index 4f37da405..976862951 100644 --- a/.github/workflows/pikiwidb.yml +++ b/.github/workflows/pikiwidb.yml @@ -27,6 +27,8 @@ jobs: - uses: actions/checkout@v4 - name: Build + env: + CPLUS_INCLUDE_PATH: /opt/homebrew/include run: | brew install autoconf brew install go From a85d6cf837071bbd3e2b51c0812494e7a05f572e Mon Sep 17 00:00:00 2001 From: dingxiaoshuai123 <2486016589@qq.com> Date: Tue, 30 Apr 2024 18:22:01 +0800 Subject: [PATCH 31/33] change database to 16 --- pikiwidb.conf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pikiwidb.conf b/pikiwidb.conf index dde45cf93..6466606c6 100644 --- a/pikiwidb.conf +++ b/pikiwidb.conf @@ -38,7 +38,7 @@ logfile stdout # Set the number of databases. The default database is DB 0, you can select # a different one on a per-connection basis using SELECT where # dbid is a number between 0 and 'databases'-1 -databases 3 +databases 16 ################################ SNAPSHOTTING ################################# # From a4a6623d849c062f47dc38d5036a9afe7d7fd469 Mon Sep 17 00:00:00 2001 From: dingxiaoshuai123 <2486016589@qq.com> Date: Mon, 6 May 2024 09:59:48 +0800 Subject: [PATCH 32/33] handle comments --- src/cmd_raft.cc | 5 ----- src/db.cc | 6 +++--- src/db.h | 2 +- src/praft/praft_service.h | 2 +- src/storage/src/base_filter.h | 2 -- src/storage/src/batch.h | 2 +- src/storage/src/redis.cc | 32 ++++++++++++++++---------------- src/storage/src/storage.cc | 5 ++++- tests/consistency_test.go | 7 +++++++ 9 files changed, 33 insertions(+), 30 deletions(-) diff --git a/src/cmd_raft.cc b/src/cmd_raft.cc index e8e06c447..9bfaabc19 100644 --- a/src/cmd_raft.cc +++ b/src/cmd_raft.cc @@ -142,11 +142,6 @@ bool RaftClusterCmd::DoInitial(PClient* client) { } void RaftClusterCmd::DoCmd(PClient* client) { - // parse arguments - if (client->argv_.size() < 2) { - return client->SetRes(CmdRes::kWrongNum, client->CmdName()); - } - if (PRAFT.IsInitialized()) { return client->SetRes(CmdRes::kErrOther, "Already cluster member"); } diff --git a/src/db.cc b/src/db.cc index 3e53fdb67..80728d501 100644 --- a/src/db.cc +++ b/src/db.cc @@ -40,7 +40,7 @@ DB::DB(int db_index, const std::string& db_path, int rocksdb_inst_num) abort(); } - opened_.store(true); + opened_ = true; INFO("Open DB{} success!", db_index_); } @@ -83,7 +83,6 @@ void DB::CreateCheckpoint(const std::string& path, bool sync) { } void DB::LoadDBFromCheckpoint(const std::string& path, bool sync) { - opened_.store(false); auto checkpoint_path = path + '/' + std::to_string(db_index_); if (0 != pstd::IsDir(path)) { WARN("Checkpoint dir {} does not exist!", checkpoint_path); @@ -97,6 +96,7 @@ void DB::LoadDBFromCheckpoint(const std::string& path, bool sync) { } std::lock_guard lock(storage_mutex_); + opened_ = false; std::vector> result; result.reserve(rocksdb_inst_num_); for (int i = 0; i < rocksdb_inst_num_; ++i) { @@ -128,7 +128,7 @@ void DB::LoadDBFromCheckpoint(const std::string& path, bool sync) { ERROR("Storage open failed! {}", s.ToString()); abort(); } - opened_.store(true); + opened_ = true; INFO("DB{} load a checkpoint from {} success!", db_index_, path); } } // namespace pikiwidb diff --git a/src/db.h b/src/db.h index b4cb59ac3..d186583c1 100644 --- a/src/db.h +++ b/src/db.h @@ -52,7 +52,7 @@ class DB { */ std::shared_mutex storage_mutex_; std::unique_ptr storage_; - std::atomic_bool opened_ = false; + bool opened_ = false; }; } // namespace pikiwidb diff --git a/src/praft/praft_service.h b/src/praft/praft_service.h index e0a44d6a5..d7b655a21 100644 --- a/src/praft/praft_service.h +++ b/src/praft/praft_service.h @@ -20,7 +20,7 @@ class DummyServiceImpl : public DummyService { ::pikiwidb::DummyResponse* response, ::google::protobuf::Closure* done) override {} private: - PRaft* praft_; + PRaft* praft_ = nullptr; }; } // namespace pikiwidb diff --git a/src/storage/src/base_filter.h b/src/storage/src/base_filter.h index 222fdf0ee..d2c7a629f 100644 --- a/src/storage/src/base_filter.h +++ b/src/storage/src/base_filter.h @@ -16,8 +16,6 @@ #include "src/base_meta_value_format.h" #include "src/debug.h" -#include "braft/raft.h" - namespace storage { class BaseMetaFilter : public rocksdb::CompactionFilter { diff --git a/src/storage/src/batch.h b/src/storage/src/batch.h index 5bfc130f7..ad4df2d9f 100644 --- a/src/storage/src/batch.h +++ b/src/storage/src/batch.h @@ -54,7 +54,7 @@ class RocksBatch : public Batch { private: rocksdb::WriteBatch batch_; - rocksdb::DB* db_; + rocksdb::DB* db_ = nullptr; const rocksdb::WriteOptions& options_; const std::vector& handles_; }; diff --git a/src/storage/src/redis.cc b/src/storage/src/redis.cc index 85cc461ce..6417f07bb 100644 --- a/src/storage/src/redis.cc +++ b/src/storage/src/redis.cc @@ -146,22 +146,6 @@ Status Redis::Open(const StorageOptions& storage_options, const std::string& db_ zset_data_cf_ops.table_factory.reset(rocksdb::NewBlockBasedTableFactory(zset_data_cf_table_ops)); zset_score_cf_ops.table_factory.reset(rocksdb::NewBlockBasedTableFactory(zset_score_cf_table_ops)); - std::vector column_families; - column_families.emplace_back(rocksdb::kDefaultColumnFamilyName, string_cf_ops); - // hash CF - column_families.emplace_back("hash_meta_cf", hash_meta_cf_ops); - column_families.emplace_back("hash_data_cf", hash_data_cf_ops); - // set CF - column_families.emplace_back("set_meta_cf", set_meta_cf_ops); - column_families.emplace_back("set_data_cf", set_data_cf_ops); - // list CF - column_families.emplace_back("list_meta_cf", list_meta_cf_ops); - column_families.emplace_back("list_data_cf", list_data_cf_ops); - // zset CF - column_families.emplace_back("zset_meta_cf", zset_meta_cf_ops); - column_families.emplace_back("zset_data_cf", zset_data_cf_ops); - column_families.emplace_back("zset_score_cf", zset_score_cf_ops); - if (append_log_function_) { // Add log index table property collector factory to each column family ADD_TABLE_PROPERTY_COLLECTOR_FACTORY(string); @@ -180,6 +164,22 @@ Status Redis::Open(const StorageOptions& storage_options, const std::string& db_ &handles_, &log_index_collector_, &log_index_of_all_cfs_, storage_options.do_snapshot_function)); } + std::vector column_families; + column_families.emplace_back(rocksdb::kDefaultColumnFamilyName, string_cf_ops); + // hash CF + column_families.emplace_back("hash_meta_cf", hash_meta_cf_ops); + column_families.emplace_back("hash_data_cf", hash_data_cf_ops); + // set CF + column_families.emplace_back("set_meta_cf", set_meta_cf_ops); + column_families.emplace_back("set_data_cf", set_data_cf_ops); + // list CF + column_families.emplace_back("list_meta_cf", list_meta_cf_ops); + column_families.emplace_back("list_data_cf", list_data_cf_ops); + // zset CF + column_families.emplace_back("zset_meta_cf", zset_meta_cf_ops); + column_families.emplace_back("zset_data_cf", zset_data_cf_ops); + column_families.emplace_back("zset_score_cf", zset_score_cf_ops); + auto s = rocksdb::DB::Open(db_ops, db_path, column_families, &handles_, &db_); if (!s.ok()) { return s; diff --git a/src/storage/src/storage.cc b/src/storage/src/storage.cc index e21508315..2b87d7d67 100644 --- a/src/storage/src/storage.cc +++ b/src/storage/src/storage.cc @@ -160,7 +160,7 @@ Status Storage::CreateCheckpoint(const std::string& dump_path, int i) { return Status::IOError("DeleteDirIfExist() fail! dir_name : {} ", tmp_dir); } - // 2) Create checkpoint of this RocksDB + // 2) Create checkpoint object of this RocksDB rocksdb::Checkpoint* checkpoint = nullptr; auto db = insts_[i]->GetDB(); rocksdb::Status s = rocksdb::Checkpoint::Create(db, &checkpoint); @@ -180,6 +180,9 @@ Status Storage::CreateCheckpoint(const std::string& dump_path, int i) { // 4) Make sure the source directory does not exist if (!pstd::DeleteDirIfExist(source_dir)) { WARN("DB{}'s RocksDB {} delete directory {} fail!", db_id_, i, source_dir); + if (!pstd::DeleteDirIfExist(tmp_dir)) { + WARN("DB{}'s RocksDB {} fail to delete the temporary directory {} ", db_id_, i, tmp_dir); + } return Status::IOError("DeleteDirIfExist() fail! dir_name : {} ", source_dir); } diff --git a/tests/consistency_test.go b/tests/consistency_test.go index 45adec055..a2fd0d02f 100644 --- a/tests/consistency_test.go +++ b/tests/consistency_test.go @@ -1,3 +1,10 @@ +/* + * Copyright (c) 2024-present, Qihoo, Inc. All rights reserved. + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. An additional grant + * of patent rights can be found in the PATENTS file in the same directory. + */ + package pikiwidb_test import ( From c7e1d390b9a5177df54399f79ad9d7d763d35c36 Mon Sep 17 00:00:00 2001 From: dingxiaoshuai123 <2486016589@qq.com> Date: Mon, 6 May 2024 16:46:24 +0800 Subject: [PATCH 33/33] handle comments --- pikiwidb.conf | 5 +- src/CMakeLists.txt | 2 - src/cmd_kv.cc | 1 - src/db.cc | 66 ++++++++--------------- src/db.h | 9 +--- src/storage/CMakeLists.txt | 2 - src/storage/include/storage/storage.h | 8 ++- src/storage/src/storage.cc | 77 ++++++++++++++++++--------- src/store.cc | 2 +- 9 files changed, 84 insertions(+), 88 deletions(-) diff --git a/pikiwidb.conf b/pikiwidb.conf index 6466606c6..a88b8d03c 100644 --- a/pikiwidb.conf +++ b/pikiwidb.conf @@ -7,9 +7,6 @@ daemonize no # port 0 is not permitted. port 9221 -# Braft relies on brpc to communicate via the default port number plus the port offset -raft-port-offset 10 - # If you want you can bind a single interface, if the bind option is not # specified all the interfaces will listen for incoming connections. # @@ -348,3 +345,5 @@ rocksdb-periodic-second 259200; ############################### RAFT ############################### use-raft no +# Braft relies on brpc to communicate via the default port number plus the port offset +raft-port-offset 10 diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 76b5345ae..f7cdd7d27 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -17,8 +17,6 @@ TARGET_INCLUDE_DIRECTORIES(pikiwidb PRIVATE ${rocksdb_SOURCE_DIR}/include PRIVATE ${BRAFT_INCLUDE_DIR} PRIVATE ${BRPC_INCLUDE_DIR} - PRIVATE ${GFLAGS_INCLUDE_PATH} - PRIVATE ${PROJECT_SOURCE_DIR}/src/praft ) diff --git a/src/cmd_kv.cc b/src/cmd_kv.cc index 1cfb79e72..824ae2ca0 100644 --- a/src/cmd_kv.cc +++ b/src/cmd_kv.cc @@ -13,7 +13,6 @@ namespace pikiwidb { -using pikiwidb::TasksVector; GetCmd::GetCmd(const std::string& name, int16_t arity) : BaseCmd(name, arity, kCmdFlagsReadonly, kAclCategoryRead | kAclCategoryString) {} diff --git a/src/db.cc b/src/db.cc index 80728d501..9b3c63b7f 100644 --- a/src/db.cc +++ b/src/db.cc @@ -15,11 +15,11 @@ extern pikiwidb::PConfig g_config; namespace pikiwidb { -DB::DB(int db_index, const std::string& db_path, int rocksdb_inst_num) - : db_index_(db_index), db_path_(db_path + std::to_string(db_index_) + '/'), rocksdb_inst_num_(rocksdb_inst_num) { +DB::DB(int db_index, const std::string& db_path) + : db_index_(db_index), db_path_(db_path + std::to_string(db_index_) + '/') { storage::StorageOptions storage_options; storage_options.options = g_config.GetRocksDBOptions(); - storage_options.db_instance_num = rocksdb_inst_num_; + storage_options.db_instance_num = g_config.db_instance_num.load(); storage_options.db_id = db_index_; // options for CF @@ -44,37 +44,15 @@ DB::DB(int db_index, const std::string& db_path, int rocksdb_inst_num) INFO("Open DB{} success!", db_index_); } -void DB::DoCheckpoint(const std::string& path, int i) { - // 1) always hold the storage's shared lock - std::shared_lock sharedLock(storage_mutex_); - - // 2)Create the checkpoint of rocksdb i. - auto status = storage_->CreateCheckpoint(path, i); -} - -void DB::LoadCheckpoint(const std::string& path, const std::string& db_path, int i) { - // 1) Already holding the storage's exclusion lock - - // 2) Load the checkpoint of rocksdb i. - auto status = storage_->LoadCheckpoint(path, db_path, i); -} - -void DB::CreateCheckpoint(const std::string& path, bool sync) { - auto tmp_path = path + '/' + std::to_string(db_index_); - if (0 != pstd::CreatePath(tmp_path)) { - WARN("Create dir {} fail !", tmp_path); +void DB::CreateCheckpoint(const std::string& checkpoint_path, bool sync) { + auto checkpoint_sub_path = checkpoint_path + '/' + std::to_string(db_index_); + if (0 != pstd::CreatePath(checkpoint_sub_path)) { + WARN("Create dir {} fail !", checkpoint_sub_path); return; } - std::vector> result; - result.reserve(rocksdb_inst_num_); - for (int i = 0; i < rocksdb_inst_num_; ++i) { - // In a new thread, create a checkpoint for the specified rocksdb i - // In DB::DoBgSave, a read lock is always held to protect the Storage - // corresponding to this rocksdb i. - auto res = std::async(std::launch::async, &DB::DoCheckpoint, this, path, i); - result.push_back(std::move(res)); - } + std::shared_lock sharedLock(storage_mutex_); + auto result = storage_->CreateCheckpoint(checkpoint_sub_path); if (sync) { for (auto& r : result) { r.get(); @@ -82,10 +60,10 @@ void DB::CreateCheckpoint(const std::string& path, bool sync) { } } -void DB::LoadDBFromCheckpoint(const std::string& path, bool sync) { - auto checkpoint_path = path + '/' + std::to_string(db_index_); - if (0 != pstd::IsDir(path)) { - WARN("Checkpoint dir {} does not exist!", checkpoint_path); +void DB::LoadDBFromCheckpoint(const std::string& checkpoint_path, bool sync [[maybe_unused]]) { + auto checkpoint_sub_path = checkpoint_path + '/' + std::to_string(db_index_); + if (0 != pstd::IsDir(checkpoint_sub_path)) { + WARN("Checkpoint dir {} does not exist!", checkpoint_sub_path); return; } if (0 != pstd::IsDir(db_path_)) { @@ -97,20 +75,15 @@ void DB::LoadDBFromCheckpoint(const std::string& path, bool sync) { std::lock_guard lock(storage_mutex_); opened_ = false; - std::vector> result; - result.reserve(rocksdb_inst_num_); - for (int i = 0; i < rocksdb_inst_num_; ++i) { - // In a new thread, Load a checkpoint for the specified rocksdb i - auto res = std::async(std::launch::async, &DB::LoadCheckpoint, this, checkpoint_path, db_path_, i); - result.push_back(std::move(res)); - } + auto result = storage_->LoadCheckpoint(checkpoint_sub_path, db_path_); + for (auto& r : result) { r.get(); } storage::StorageOptions storage_options; - storage_options.options.create_if_missing = true; - storage_options.db_instance_num = rocksdb_inst_num_; + storage_options.options = g_config.GetRocksDBOptions(); + storage_options.db_instance_num = g_config.db_instance_num.load(); storage_options.db_id = db_index_; // options for CF @@ -121,6 +94,8 @@ void DB::LoadDBFromCheckpoint(const std::string& path, bool sync) { storage_options.append_log_function = [&r = PRAFT](const Binlog& log, std::promise&& promise) { r.AppendLog(log, std::move(promise)); }; + storage_options.do_snapshot_function = + std::bind(&pikiwidb::PRaft::DoSnapshot, &pikiwidb::PRAFT, std::placeholders::_1, std::placeholders::_2); } storage_ = std::make_unique(); @@ -128,7 +103,8 @@ void DB::LoadDBFromCheckpoint(const std::string& path, bool sync) { ERROR("Storage open failed! {}", s.ToString()); abort(); } + opened_ = true; - INFO("DB{} load a checkpoint from {} success!", db_index_, path); + INFO("DB{} load a checkpoint from {} success!", db_index_, checkpoint_path); } } // namespace pikiwidb diff --git a/src/db.h b/src/db.h index d186583c1..6ed80d058 100644 --- a/src/db.h +++ b/src/db.h @@ -18,7 +18,7 @@ namespace pikiwidb { class DB { public: - DB(int db_index, const std::string& db_path, int rocksdb_inst_num); + DB(int db_index, const std::string& db_path); std::unique_ptr& GetStorage() { return storage_; } @@ -32,18 +32,13 @@ class DB { void CreateCheckpoint(const std::string& path, bool sync); - void LoadDBFromCheckpoint(const std::string& path, bool sync = false); + void LoadDBFromCheckpoint(const std::string& path, bool sync = true); int GetDbIndex() { return db_index_; } - private: - void DoCheckpoint(const std::string&, int i); - void LoadCheckpoint(const std::string&, const std::string& db_path, int i); - private: const int db_index_ = 0; const std::string db_path_; - int rocksdb_inst_num_ = 0; /** * If you want to change the pointer that points to storage, * you must first acquire a mutex lock. diff --git a/src/storage/CMakeLists.txt b/src/storage/CMakeLists.txt index f734fefb0..48773729d 100644 --- a/src/storage/CMakeLists.txt +++ b/src/storage/CMakeLists.txt @@ -17,8 +17,6 @@ TARGET_INCLUDE_DIRECTORIES(storage PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/include PRIVATE ${rocksdb_SOURCE_DIR}/ PRIVATE ${rocksdb_SOURCE_DIR}/include - PRIVATE ${BRAFT_INCLUDE_DIR} - PRIVATE ${BRPC_INCLUDE_DIR} PRIVATE ${PROTO_OUTPUT_DIR} ) diff --git a/src/storage/include/storage/storage.h b/src/storage/include/storage/storage.h index 9562ec927..4536a7e41 100644 --- a/src/storage/include/storage/storage.h +++ b/src/storage/include/storage/storage.h @@ -186,9 +186,13 @@ class Storage { Status Open(const StorageOptions& storage_options, const std::string& db_path); - Status CreateCheckpoint(const std::string& dump_path, int index); + std::vector> CreateCheckpoint(const std::string& checkpoint_path); - Status LoadCheckpoint(const std::string& dump_path, const std::string& db_path, int index); + Status CreateCheckpointInternal(const std::string& checkpoint_path, int db_index); + + std::vector> LoadCheckpoint(const std::string& checkpoint_path, const std::string& db_path); + + Status LoadCheckpointInternal(const std::string& dump_path, const std::string& db_path, int index); Status LoadCursorStartKey(const DataType& dtype, int64_t cursor, char* type, std::string* start_key); diff --git a/src/storage/src/storage.cc b/src/storage/src/storage.cc index 2b87d7d67..342f7aeb7 100644 --- a/src/storage/src/storage.cc +++ b/src/storage/src/storage.cc @@ -5,8 +5,10 @@ #include #include +#include #include #include +#include #include "binlog.pb.h" #include "config.h" @@ -148,24 +150,36 @@ Status Storage::Open(const StorageOptions& storage_options, const std::string& d return Status::OK(); } -Status Storage::CreateCheckpoint(const std::string& dump_path, int i) { - INFO("DB{}'s RocksDB {} begin to generate a checkpoint!", db_id_, i); - auto source_dir = AppendSubDirectory(dump_path, db_id_); - source_dir = AppendSubDirectory(source_dir, i); +std::vector> Storage::CreateCheckpoint(const std::string& checkpoint_path) { + INFO("DB{} begin to generate a checkpoint to {}", db_id_, checkpoint_path); + // auto source_dir = AppendSubDirectory(checkpoint_path, db_id_); + + std::vector> result; + result.reserve(db_instance_num_); + for (int i = 0; i < db_instance_num_; ++i) { + // In a new thread, create a checkpoint for the specified rocksdb i. + auto res = std::async(std::launch::async, &Storage::CreateCheckpointInternal, this, checkpoint_path, i); + result.push_back(std::move(res)); + } + return result; +} + +Status Storage::CreateCheckpointInternal(const std::string& checkpoint_path, int index) { + auto source_dir = AppendSubDirectory(checkpoint_path, index); auto tmp_dir = source_dir + ".tmp"; // 1) Make sure the temporary directory does not exist if (!pstd::DeleteDirIfExist(tmp_dir)) { - WARN("DB{}'s RocksDB {} delete directory fail!", db_id_, i); + WARN("DB{}'s RocksDB {} delete directory fail!", db_id_, index); return Status::IOError("DeleteDirIfExist() fail! dir_name : {} ", tmp_dir); } // 2) Create checkpoint object of this RocksDB rocksdb::Checkpoint* checkpoint = nullptr; - auto db = insts_[i]->GetDB(); + auto db = insts_[index]->GetDB(); rocksdb::Status s = rocksdb::Checkpoint::Create(db, &checkpoint); if (!s.ok()) { - WARN("DB{}'s RocksDB {} create checkpoint object failed!. Error: ", db_id_, i, s.ToString()); + WARN("DB{}'s RocksDB {} create checkpoint object failed!. Error: ", db_id_, index, s.ToString()); return s; } @@ -173,58 +187,72 @@ Status Storage::CreateCheckpoint(const std::string& dump_path, int i) { std::unique_ptr checkpoint_guard(checkpoint); s = checkpoint->CreateCheckpoint(tmp_dir, kFlush, nullptr); if (!s.ok()) { - WARN("DB{}'s RocksDB {} create checkpoint failed!. Error: {}", db_id_, i, s.ToString()); + WARN("DB{}'s RocksDB {} create checkpoint failed!. Error: {}", db_id_, index, s.ToString()); return s; } // 4) Make sure the source directory does not exist if (!pstd::DeleteDirIfExist(source_dir)) { - WARN("DB{}'s RocksDB {} delete directory {} fail!", db_id_, i, source_dir); + WARN("DB{}'s RocksDB {} delete directory {} fail!", db_id_, index, source_dir); if (!pstd::DeleteDirIfExist(tmp_dir)) { - WARN("DB{}'s RocksDB {} fail to delete the temporary directory {} ", db_id_, i, tmp_dir); + WARN("DB{}'s RocksDB {} fail to delete the temporary directory {} ", db_id_, index, tmp_dir); } return Status::IOError("DeleteDirIfExist() fail! dir_name : {} ", source_dir); } // 5) Rename the temporary directory to source directory if (auto status = pstd::RenameFile(tmp_dir, source_dir); status != 0) { - WARN("DB{}'s RocksDB {} rename temporary directory {} to source directory {} fail!", db_id_, i, tmp_dir, + WARN("DB{}'s RocksDB {} rename temporary directory {} to source directory {} fail!", db_id_, index, tmp_dir, source_dir); if (!pstd::DeleteDirIfExist(tmp_dir)) { - WARN("DB{}'s RocksDB {} fail to delete the rename failed directory {} ", db_id_, i, tmp_dir); + WARN("DB{}'s RocksDB {} fail to delete the rename failed directory {} ", db_id_, index, tmp_dir); } return Status::IOError("Rename directory {} fail!", tmp_dir); } - INFO("DB{}'s RocksDB {} create checkpoint {} success!", db_id_, i, source_dir); + INFO("DB{}'s RocksDB {} create checkpoint {} success!", db_id_, index, source_dir); return Status::OK(); } -Status Storage::LoadCheckpoint(const std::string& dump_path, const std::string& db_path, int i) { - auto rocksdb_checkpoint_path = AppendSubDirectory(dump_path, i); - INFO("DB{}'s RocksDB {} begin to load a checkpoint from {}", db_id_, i, rocksdb_checkpoint_path); - auto rocksdb_path = AppendSubDirectory(db_path, i); // ./db/db_id/i - auto tmp_rocksdb_path = rocksdb_path + ".tmp"; // ./db/db_id/i.tmp - insts_[i].reset(); +std::vector> Storage::LoadCheckpoint(const std::string& checkpoint_sub_path, + const std::string& db_sub_path) { + INFO("DB{} begin to load a checkpoint from {} to {}", db_id_, checkpoint_sub_path, db_sub_path); + std::vector> result; + result.reserve(db_instance_num_); + for (int i = 0; i < db_instance_num_; ++i) { + // In a new thread, Load a checkpoint for the specified rocksdb i + auto res = + std::async(std::launch::async, &Storage::LoadCheckpointInternal, this, checkpoint_sub_path, db_sub_path, i); + result.push_back(std::move(res)); + } + return result; +} + +Status Storage::LoadCheckpointInternal(const std::string& checkpoint_sub_path, const std::string& db_sub_path, + int index) { + auto rocksdb_path = AppendSubDirectory(db_sub_path, index); // ./db/db_id/index + auto tmp_rocksdb_path = rocksdb_path + ".tmp"; // ./db/db_id/index.tmp + insts_[index].reset(); + auto source_dir = AppendSubDirectory(checkpoint_sub_path, index); // 1) Rename the original db to db.tmp, and only perform the maximum possible recovery of data // when loading the checkpoint fails. if (auto status = pstd::RenameFile(rocksdb_path, tmp_rocksdb_path); status != 0) { - WARN("DB{}'s RocksDB {} rename db directory {} to temporary directory {} fail!", db_id_, i, db_path, + WARN("DB{}'s RocksDB {} rename db directory {} to temporary directory {} fail!", db_id_, index, rocksdb_path, tmp_rocksdb_path); - return Status::IOError("Rename directory {} fail!", db_path); + return Status::IOError("Rename directory {} fail!", rocksdb_path); } // 2) Create a db directory to save the checkpoint. if (0 != pstd::CreatePath(rocksdb_path)) { pstd::RenameFile(tmp_rocksdb_path, rocksdb_path); - WARN("DB{}'s RocksDB {} load a checkpoint from {} fail!", db_id_, i, rocksdb_checkpoint_path); + WARN("DB{}'s RocksDB {} load a checkpoint from {} fail!", db_id_, index, checkpoint_sub_path); return Status::IOError("Create directory {} fail!", rocksdb_path); } - if (RecursiveLinkAndCopy(rocksdb_checkpoint_path, rocksdb_path) != 0) { + if (RecursiveLinkAndCopy(source_dir, rocksdb_path) != 0) { pstd::DeleteDir(rocksdb_path); pstd::RenameFile(tmp_rocksdb_path, rocksdb_path); - WARN("DB{}'s RocksDB {} load a checkpoint from {} fail!", db_id_, i, rocksdb_checkpoint_path); + WARN("DB{}'s RocksDB {} load a checkpoint from {} fail!", db_id_, index, source_dir); return Status::IOError("recursive link and copy directory {} fail!", rocksdb_path); } @@ -2340,7 +2368,6 @@ Status Storage::OnBinlogWrite(const pikiwidb::Binlog& log, LogIndex log_idx) { rocksdb::WriteBatch batch; bool is_finished_start = true; - // 提前获取 seq, 每次自增, 需要保证该操作串行执行? auto seqno = inst->GetDB()->GetLatestSequenceNumber(); for (const auto& entry : log.entries()) { if (inst->IsRestarting() && inst->IsApplied(entry.cf_idx(), log_idx)) [[unlikely]] { diff --git a/src/store.cc b/src/store.cc index 760674072..a8186793b 100644 --- a/src/store.cc +++ b/src/store.cc @@ -26,7 +26,7 @@ void PStore::Init(int db_number) { db_number_ = db_number; backends_.reserve(db_number_); for (int i = 0; i < db_number_; i++) { - auto db = std::make_unique(i, g_config.db_path, g_config.db_instance_num); + auto db = std::make_unique(i, g_config.db_path); backends_.push_back(std::move(db)); INFO("Open DB_{} success!", i); }