diff --git a/.editorconfig b/.editorconfig index 2ae56fad..262979b4 100644 --- a/.editorconfig +++ b/.editorconfig @@ -12,3 +12,7 @@ insert_final_newline=true [*.swift] indent_style=space tab_width=4 + +[*.yml] +indent_style=tab +indent_size=2 diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 19d884d2..725143b4 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -25,32 +25,19 @@ jobs: test: name: Build and Test runs-on: [self-hosted, linux] - steps: - name: Checkout Code uses: actions/checkout@v4 with: submodules: recursive - - name: MsQuic Install Dependencies - run: | - export DEBIAN_FRONTEND=noninteractive - sudo apt-add-repository -y ppa:lttng/stable-2.13 - sudo apt-get update - sudo apt-get install -y lttng-tools lttng-modules-dkms babeltrace2 liblttng-ust-dev python3-babeltrace - sudo apt-get install -y cmake - sudo apt-get install -y build-essential - - name: Get msquic submodule commit hash - id: msquic-commit-hash - run: | - echo "commit-hash=$(git submodule status Networking/Sources/msquic/ | cut -c2-41)" >> $GITHUB_OUTPUT + - run: sudo apt-get update + - uses: awalsh128/cache-apt-pkgs-action@latest + with: + packages: librocksdb-dev libzstd-dev libbz2-dev liblz4-dev - name: Get blst submodule commit hash id: blst-commit-hash run: | echo "commit-hash=$(git submodule status Utils/Sources/blst/ | cut -c2-41)" >> $GITHUB_OUTPUT - - name: Get rocksdb submodule commit hash - id: rocksdb-commit-hash - run: | - echo "commit-hash=$(git submodule status Database/Sources/rocksdb/ | cut -c2-41)" >> $GITHUB_OUTPUT - name: Cache SPM uses: actions/cache@v4 with: @@ -68,13 +55,6 @@ jobs: ~/.cargo/git/db/ Utils/Sources/bandersnatch/target/ key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} - - name: Cache msquic static lib - uses: actions/cache@v4 - with: - path: .lib/libmsquic.a - key: ${{ runner.os }}-libs-msquic-${{ steps.msquic-commit-hash.outputs.commit-hash }} - restore-keys: | - ${{ runner.os }}-libs-msquic - name: Cache blst static lib uses: actions/cache@v4 with: @@ -89,13 +69,6 @@ jobs: key: ${{ runner.os }}-libs-libbandersnatch-${{ hashFiles('Utils/Sources/bandersnatch/**') }} restore-keys: | ${{ runner.os }}-libs-libbandersnatch - - name: Cache rocksdb static lib - uses: actions/cache@v4 - with: - path: .lib/librocksdb.a - key: ${{ runner.os }}-libs-librocksdb-${{ steps.rocksdb-commit-hash.outputs.commit-hash }} - restore-keys: | - ${{ runner.os }}-libs-librocksdb - name: Cache erasure-coding static lib uses: actions/cache@v4 with: diff --git a/.gitmodules b/.gitmodules index 1f3640f8..1556c22d 100644 --- a/.gitmodules +++ b/.gitmodules @@ -4,9 +4,3 @@ [submodule "blst"] path = Utils/Sources/blst url = https://github.com/supranational/blst.git -[submodule "Database/Sources/rocksdb"] - path = Database/Sources/rocksdb - url = https://github.com/facebook/rocksdb.git -[submodule "Networking/Sources/msquic"] - path = Networking/Sources/msquic - url = https://github.com/microsoft/msquic.git \ No newline at end of file diff --git a/Blockchain/Package.resolved b/Blockchain/Package.resolved index 8b21b5c4..7bbc69b7 100644 --- a/Blockchain/Package.resolved +++ b/Blockchain/Package.resolved @@ -1,5 +1,5 @@ { - "originHash" : "2a91ceec1663a1ed3fc9b58333d3d32c6ce1131b7d1874346655dbfae0af61f4", + "originHash" : "64fa76cb48bfb721e9426cf0d246c06245d80b6c98b16ca1a7c9ba00acfabb1b", "pins" : [ { "identity" : "blake2.swift", @@ -55,6 +55,15 @@ "version" : "2.5.0" } }, + { + "identity" : "swift-numerics", + "kind" : "remoteSourceControl", + "location" : "https://github.com/apple/swift-numerics", + "state" : { + "branch" : "main", + "revision" : "e30276bff2ff5ed80566fbdca49f50aa160b0e83" + } + }, { "identity" : "swift-service-context", "kind" : "remoteSourceControl", diff --git a/Blockchain/Sources/Blockchain/Blockchain.swift b/Blockchain/Sources/Blockchain/Blockchain.swift index 0568b9c5..5fb62e07 100644 --- a/Blockchain/Sources/Blockchain/Blockchain.swift +++ b/Blockchain/Sources/Blockchain/Blockchain.swift @@ -4,19 +4,9 @@ import Utils private let logger = Logger(label: "Blockchain") -private struct BlockchainStorage: Sendable { - var bestHead: Data32? - var bestHeadTimeslot: TimeslotIndex? - var finalizedHead: Data32 -} - -/// Holds the state of the blockchain. -/// Includes the canonical chain as well as pending forks. -/// Assume all blocks and states are valid and have been validated. public final class Blockchain: ServiceBase, @unchecked Sendable { - private let storage: ThreadSafeContainer - private let dataProvider: BlockchainDataProvider - private let timeProvider: TimeProvider + public let dataProvider: BlockchainDataProvider + public let timeProvider: TimeProvider public init( config: ProtocolConfigRef, @@ -27,24 +17,6 @@ public final class Blockchain: ServiceBase, @unchecked Sendable { self.dataProvider = dataProvider self.timeProvider = timeProvider - let heads = try await dataProvider.getHeads() - var bestHead: (HeaderRef, Data32)? - for head in heads { - guard let header = try? await dataProvider.getHeader(hash: head) else { - continue - } - if bestHead == nil || header.value.timeslot > bestHead!.0.value.timeslot { - bestHead = (header, head) - } - } - let finalizedHead = try await dataProvider.getFinalizedHead() - - storage = ThreadSafeContainer(.init( - bestHead: bestHead?.1, - bestHeadTimeslot: bestHead?.0.value.timeslot, - finalizedHead: finalizedHead - )) - super.init(config, eventBus) await subscribe(RuntimeEvents.BlockAuthored.self) { [weak self] event in @@ -57,58 +29,28 @@ public final class Blockchain: ServiceBase, @unchecked Sendable { } public func importBlock(_ block: BlockRef) async throws { + logger.debug("importing block: \(block.hash)") + try await withSpan("importBlock") { span in span.attributes.blockHash = block.hash.description let runtime = Runtime(config: config) let parent = try await dataProvider.getState(hash: block.header.parentHash) - let timeslot = timeProvider.getTime() / UInt32(config.value.slotPeriodSeconds) + let timeslot = timeProvider.getTimeslot() let state = try runtime.apply(block: block, state: parent, context: .init(timeslot: timeslot)) - try await dataProvider.add(state: state) - // update best head - if state.value.timeslot > storage.value.bestHeadTimeslot ?? 0 { - storage.write { storage in - storage.bestHead = block.hash - storage.bestHeadTimeslot = state.value.timeslot - } - } + try await dataProvider.blockImported(block: block, state: state) - await publish(RuntimeEvents.BlockImported(block: block, state: state, parentState: parent)) + publish(RuntimeEvents.BlockImported(block: block, state: state, parentState: parent)) } } public func finalize(hash: Data32) async throws { + logger.debug("finalizing block: \(hash)") + // TODO: purge forks try await dataProvider.setFinalizedHead(hash: hash) - storage.write { storage in - storage.finalizedHead = hash - } - - await publish(RuntimeEvents.BlockFinalized(hash: hash)) - } - - public func getBestBlock() async throws -> BlockRef { - guard let hash = try await dataProvider.getHeads().first else { - try throwUnreachable("no head") - } - return try await dataProvider.getBlock(hash: hash) - } - - public func getBlock(hash: Data32) async throws -> BlockRef? { - try await dataProvider.getBlock(hash: hash) - } - - public func getState(hash: Data32) async throws -> StateRef? { - try await dataProvider.getState(hash: hash) - } - - public var bestHead: Data32 { - storage.value.bestHead ?? Data32() - } - - public var finalizedHead: Data32 { - storage.value.finalizedHead + publish(RuntimeEvents.BlockFinalized(hash: hash)) } } diff --git a/Blockchain/Sources/Blockchain/BlockchainDataProvider/BlockchainDataProvider.swift b/Blockchain/Sources/Blockchain/BlockchainDataProvider/BlockchainDataProvider.swift index f65262ca..6b05cc25 100644 --- a/Blockchain/Sources/Blockchain/BlockchainDataProvider/BlockchainDataProvider.swift +++ b/Blockchain/Sources/Blockchain/BlockchainDataProvider/BlockchainDataProvider.swift @@ -1,37 +1,132 @@ +import TracingUtils import Utils -public enum BlockchainDataProviderError: Error { - case noData +private let logger = Logger(label: "BlockchainDataProvider") + +private struct BlockchainStorage: Sendable { + var bestHead: Data32 + var bestHeadTimeslot: TimeslotIndex + var finalizedHead: Data32 +} + +public final class BlockchainDataProvider { + private let storage: ThreadSafeContainer + private let dataProvider: BlockchainDataProviderProtocol + + public init(_ dataProvider: BlockchainDataProviderProtocol) async throws { + let heads = try await dataProvider.getHeads() + var bestHead: (HeaderRef, Data32)? + for head in heads { + guard let header = try? await dataProvider.getHeader(hash: head) else { + continue + } + if bestHead == nil || header.value.timeslot > bestHead!.0.value.timeslot { + bestHead = (header, head) + } + } + let finalizedHead = try await dataProvider.getFinalizedHead() + + storage = ThreadSafeContainer(.init( + bestHead: bestHead?.1 ?? Data32(), + bestHeadTimeslot: bestHead?.0.value.timeslot ?? 0, + finalizedHead: finalizedHead + )) + + self.dataProvider = dataProvider + } + + public var bestHead: Data32 { + storage.value.bestHead + } + + public var finalizedHead: Data32 { + storage.value.finalizedHead + } + + public func blockImported(block: BlockRef, state: StateRef) async throws { + try await add(block: block) + try await add(state: state) + try await updateHead(hash: block.hash, parent: block.header.parentHash) + + if block.header.timeslot > storage.value.bestHeadTimeslot { + storage.write { storage in + storage.bestHead = block.hash + storage.bestHeadTimeslot = block.header.timeslot + } + } + + logger.debug("block imported: \(block.hash)") + } } -public protocol BlockchainDataProvider: Sendable { - func hasBlock(hash: Data32) async throws -> Bool - func hasState(hash: Data32) async throws -> Bool - func isHead(hash: Data32) async throws -> Bool +// expose BlockchainDataProviderProtocol +extension BlockchainDataProvider { + public func hasBlock(hash: Data32) async throws -> Bool { + try await dataProvider.hasBlock(hash: hash) + } + + public func hasState(hash: Data32) async throws -> Bool { + try await dataProvider.hasState(hash: hash) + } + + public func isHead(hash: Data32) async throws -> Bool { + try await dataProvider.isHead(hash: hash) + } + + public func getHeader(hash: Data32) async throws -> HeaderRef { + try await dataProvider.getHeader(hash: hash) + } + + public func getBlock(hash: Data32) async throws -> BlockRef { + try await dataProvider.getBlock(hash: hash) + } + + public func getState(hash: Data32) async throws -> StateRef { + try await dataProvider.getState(hash: hash) + } + + public func getFinalizedHead() async throws -> Data32 { + try await dataProvider.getFinalizedHead() + } + + public func getHeads() async throws -> Set { + try await dataProvider.getHeads() + } + + public func getBlockHash(byTimeslot timeslot: TimeslotIndex) async throws -> Set { + try await dataProvider.getBlockHash(byTimeslot: timeslot) + } + + public func add(block: BlockRef) async throws { + logger.debug("adding block: \(block.hash)") + + try await dataProvider.add(block: block) + } - /// throw BlockchainDataProviderError.noData if not found - func getHeader(hash: Data32) async throws -> HeaderRef + public func add(state: StateRef) async throws { + logger.debug("adding state: \(state.value.lastBlockHash)") - /// throw BlockchainDataProviderError.noData if not found - func getBlock(hash: Data32) async throws -> BlockRef + try await dataProvider.add(state: state) + } - /// throw BlockchainDataProviderError.noData if not found - func getState(hash: Data32) async throws -> StateRef + public func setFinalizedHead(hash: Data32) async throws { + logger.debug("setting finalized head: \(hash)") - /// throw BlockchainDataProviderError.noData if not found - func getFinalizedHead() async throws -> Data32 - func getHeads() async throws -> Set + try await dataProvider.setFinalizedHead(hash: hash) + storage.write { storage in + storage.finalizedHead = hash + } + } - /// return empty set if not found - func getBlockHash(byTimeslot timeslot: TimeslotIndex) async throws -> Set + public func updateHead(hash: Data32, parent: Data32) async throws { + logger.debug("updating head: \(hash) with parent: \(parent)") - func add(block: BlockRef) async throws - func add(state: StateRef) async throws - func setFinalizedHead(hash: Data32) async throws + try await dataProvider.updateHead(hash: hash, parent: parent) + } - /// throw BlockchainDataProviderError.noData if parent is not a head - func updateHead(hash: Data32, parent: Data32) async throws + public func remove(hash: Data32) async throws { + logger.debug("removing block: \(hash)") - /// remove header, block and state - func remove(hash: Data32) async throws + try await dataProvider.remove(hash: hash) + } } diff --git a/Blockchain/Sources/Blockchain/BlockchainDataProvider/BlockchainDataProviderProtocol.swift b/Blockchain/Sources/Blockchain/BlockchainDataProvider/BlockchainDataProviderProtocol.swift new file mode 100644 index 00000000..c5c207d9 --- /dev/null +++ b/Blockchain/Sources/Blockchain/BlockchainDataProvider/BlockchainDataProviderProtocol.swift @@ -0,0 +1,37 @@ +import Utils + +public enum BlockchainDataProviderError: Error, Equatable { + case noData(hash: Data32) +} + +public protocol BlockchainDataProviderProtocol: Sendable { + func hasBlock(hash: Data32) async throws -> Bool + func hasState(hash: Data32) async throws -> Bool + func isHead(hash: Data32) async throws -> Bool + + /// throw BlockchainDataProviderError.noData if not found + func getHeader(hash: Data32) async throws -> HeaderRef + + /// throw BlockchainDataProviderError.noData if not found + func getBlock(hash: Data32) async throws -> BlockRef + + /// throw BlockchainDataProviderError.noData if not found + func getState(hash: Data32) async throws -> StateRef + + /// throw BlockchainDataProviderError.noData if not found + func getFinalizedHead() async throws -> Data32 + func getHeads() async throws -> Set + + /// return empty set if not found + func getBlockHash(byTimeslot timeslot: TimeslotIndex) async throws -> Set + + func add(block: BlockRef) async throws + func add(state: StateRef) async throws + func setFinalizedHead(hash: Data32) async throws + + /// throw BlockchainDataProviderError.noData if parent is not a head + func updateHead(hash: Data32, parent: Data32) async throws + + /// remove header, block and state + func remove(hash: Data32) async throws +} diff --git a/Blockchain/Sources/Blockchain/BlockchainDataProvider/InMemoryDataProvider.swift b/Blockchain/Sources/Blockchain/BlockchainDataProvider/InMemoryDataProvider.swift index fadcba39..5f59fcf0 100644 --- a/Blockchain/Sources/Blockchain/BlockchainDataProvider/InMemoryDataProvider.swift +++ b/Blockchain/Sources/Blockchain/BlockchainDataProvider/InMemoryDataProvider.swift @@ -17,7 +17,7 @@ public actor InMemoryDataProvider: Sendable { } } -extension InMemoryDataProvider: BlockchainDataProvider { +extension InMemoryDataProvider: BlockchainDataProviderProtocol { public func hasBlock(hash: Data32) -> Bool { blockByHash[hash] != nil } @@ -32,21 +32,21 @@ extension InMemoryDataProvider: BlockchainDataProvider { public func getHeader(hash: Data32) throws -> HeaderRef { guard let header = blockByHash[hash]?.header.asRef() else { - throw BlockchainDataProviderError.noData + throw BlockchainDataProviderError.noData(hash: hash) } return header } public func getBlock(hash: Data32) throws -> BlockRef { guard let block = blockByHash[hash] else { - throw BlockchainDataProviderError.noData + throw BlockchainDataProviderError.noData(hash: hash) } return block } public func getState(hash: Data32) throws -> StateRef { guard let state = stateByBlockHash[hash] else { - throw BlockchainDataProviderError.noData + throw BlockchainDataProviderError.noData(hash: hash) } return state } @@ -79,7 +79,7 @@ extension InMemoryDataProvider: BlockchainDataProvider { public func updateHead(hash: Data32, parent: Data32) throws { guard heads.remove(parent) != nil else { - throw BlockchainDataProviderError.noData + throw BlockchainDataProviderError.noData(hash: parent) } heads.insert(hash) } diff --git a/Blockchain/Sources/Blockchain/Config/ProtocolConfig+Preset.swift b/Blockchain/Sources/Blockchain/Config/ProtocolConfig+Preset.swift index 9fe079f8..91399c7b 100644 --- a/Blockchain/Sources/Blockchain/Config/ProtocolConfig+Preset.swift +++ b/Blockchain/Sources/Blockchain/Config/ProtocolConfig+Preset.swift @@ -9,11 +9,11 @@ extension Ref where T == ProtocolConfig { serviceMinBalance: 100, totalNumberOfCores: 341, preimagePurgePeriod: 28800, - epochLength: 600, + epochLength: 12, auditBiasFactor: 2, - coreAccumulationGas: 10_000_000, // TODO: check this - workPackageAuthorizerGas: 10_000_000, // TODO: check this - workPackageRefineGas: 10_000_000, // TODO: check this + coreAccumulationGas: Gas(10_000_000), // TODO: check this + workPackageAuthorizerGas: Gas(10_000_000), // TODO: check this + workPackageRefineGas: Gas(10_000_000), // TODO: check this recentHistorySize: 8, maxWorkItems: 4, maxTicketsPerExtrinsic: 16, @@ -26,13 +26,13 @@ extension Ref where T == ProtocolConfig { coreAssignmentRotationPeriod: 10, maxServiceCodeSize: 4_000_000, preimageReplacementPeriod: 5, - totalNumberOfValidators: 1023, + totalNumberOfValidators: 6, erasureCodedPieceSize: 684, maxWorkPackageManifestEntries: 1 << 11, maxEncodedWorkPackageSize: 12 * 1 << 20, maxEncodedWorkReportSize: 96 * 1 << 10, erasureCodedSegmentSize: 6, - ticketSubmissionEndSlot: 500, + ticketSubmissionEndSlot: 10, pvmDynamicAddressAlignmentFactor: 2, pvmProgramInitInputDataSize: 1 << 24, pvmProgramInitPageSize: 1 << 14, @@ -48,9 +48,9 @@ extension Ref where T == ProtocolConfig { preimagePurgePeriod: 28800, epochLength: 600, auditBiasFactor: 2, - coreAccumulationGas: 10_000_000, // TODO: check this - workPackageAuthorizerGas: 10_000_000, // TODO: check this - workPackageRefineGas: 10_000_000, // TODO: check this + coreAccumulationGas: Gas(10_000_000), // TODO: check this + workPackageAuthorizerGas: Gas(10_000_000), // TODO: check this + workPackageRefineGas: Gas(10_000_000), // TODO: check this recentHistorySize: 8, maxWorkItems: 4, maxTicketsPerExtrinsic: 16, diff --git a/Blockchain/Sources/Blockchain/Config/ProtocolConfig.swift b/Blockchain/Sources/Blockchain/Config/ProtocolConfig.swift index da4b1261..21d61277 100644 --- a/Blockchain/Sources/Blockchain/Config/ProtocolConfig.swift +++ b/Blockchain/Sources/Blockchain/Config/ProtocolConfig.swift @@ -248,7 +248,7 @@ extension ProtocolConfig { } } - public enum CoreAccumulationGas: ReadUInt64 { + public enum CoreAccumulationGas: ReadGas { public typealias TConfig = ProtocolConfigRef public typealias TOutput = Gas public static func read(config: ProtocolConfigRef) -> Gas { @@ -256,7 +256,7 @@ extension ProtocolConfig { } } - public enum WorkPackageAuthorizerGas: ReadUInt64 { + public enum WorkPackageAuthorizerGas: ReadGas { public typealias TConfig = ProtocolConfigRef public typealias TOutput = Gas public static func read(config: ProtocolConfigRef) -> Gas { @@ -264,7 +264,7 @@ extension ProtocolConfig { } } - public enum WorkPackageRefineGas: ReadUInt64 { + public enum WorkPackageRefineGas: ReadGas { public typealias TConfig = ProtocolConfigRef public typealias TOutput = Gas public static func read(config: ProtocolConfigRef) -> Gas { diff --git a/Blockchain/Sources/Blockchain/RuntimeProtocols/AccumulateFunction.swift b/Blockchain/Sources/Blockchain/RuntimeProtocols/AccumulateFunction.swift index 1570fd71..7fcbe2d5 100644 --- a/Blockchain/Sources/Blockchain/RuntimeProtocols/AccumulateFunction.swift +++ b/Blockchain/Sources/Blockchain/RuntimeProtocols/AccumulateFunction.swift @@ -1,7 +1,7 @@ import Foundation import Utils -public struct AccumulateArguments { +public struct AccumulateArguments: Codable { public var result: WorkResult public var paylaodHash: Data32 public var packageHash: Data32 @@ -23,11 +23,11 @@ public struct DeferredTransfers: Codable { // a public var amount: Balance // m - public var memo: Data64 + public var memo: Data128 // g public var gasLimit: Gas - public init(sender: ServiceIndex, destination: ServiceIndex, amount: Balance, memo: Data64, gasLimit: Gas) { + public init(sender: ServiceIndex, destination: ServiceIndex, amount: Balance, memo: Data128, gasLimit: Gas) { self.sender = sender self.destination = destination self.amount = amount @@ -59,15 +59,55 @@ public struct AccumlateResultContext { public var newAccounts: [ServiceIndex: ServiceAccount] // p public var privilegedServices: PrivilegedServices + + public init( + account: ServiceAccount?, + authorizationQueue: ConfigFixedSizeArray< + ConfigFixedSizeArray< + Data32, + ProtocolConfig.MaxAuthorizationsQueueItems + >, + ProtocolConfig.TotalNumberOfCores + >, + validatorQueue: ConfigFixedSizeArray< + ValidatorKey, ProtocolConfig.TotalNumberOfValidators + >, + serviceIndex: ServiceIndex, + transfers: [DeferredTransfers], + newAccounts: [ServiceIndex: ServiceAccount], + privilegedServices: PrivilegedServices + ) { + self.account = account + self.authorizationQueue = authorizationQueue + self.validatorQueue = validatorQueue + self.serviceIndex = serviceIndex + self.transfers = transfers + self.newAccounts = newAccounts + self.privilegedServices = privilegedServices + } } public protocol AccumulateFunction { func invoke( config: ProtocolConfigRef, - service: ServiceIndex, + serviceIndex: ServiceIndex, code: Data, serviceAccounts: [ServiceIndex: ServiceAccount], gas: Gas, - arguments: [AccumulateArguments] + arguments: [AccumulateArguments], + // other inputs needed (not directly in GP's Accumulation function signature) + validatorQueue: ConfigFixedSizeArray< + ValidatorKey, ProtocolConfig.TotalNumberOfValidators + >, + authorizationQueue: ConfigFixedSizeArray< + ConfigFixedSizeArray< + Data32, + ProtocolConfig.MaxAuthorizationsQueueItems + >, + ProtocolConfig.TotalNumberOfCores + >, + privilegedServices: PrivilegedServices, + initialIndex: ServiceIndex, + timeslot: TimeslotIndex ) throws -> (ctx: AccumlateResultContext, result: Data32?) } diff --git a/Blockchain/Sources/Blockchain/RuntimeProtocols/Accumulation.swift b/Blockchain/Sources/Blockchain/RuntimeProtocols/Accumulation.swift index e5111fce..7148d09b 100644 --- a/Blockchain/Sources/Blockchain/RuntimeProtocols/Accumulation.swift +++ b/Blockchain/Sources/Blockchain/RuntimeProtocols/Accumulation.swift @@ -23,13 +23,24 @@ public struct AccumulationOutput { public protocol Accumulation { var privilegedServices: PrivilegedServices { get } + var validatorQueue: ConfigFixedSizeArray< + ValidatorKey, ProtocolConfig.TotalNumberOfValidators + > { get } + var authorizationQueue: ConfigFixedSizeArray< + ConfigFixedSizeArray< + Data32, + ProtocolConfig.MaxAuthorizationsQueueItems + >, + ProtocolConfig.TotalNumberOfCores + > { get } + var entropyPool: EntropyPool { get } var serviceAccounts: [ServiceIndex: ServiceAccount] { get } var accumlateFunction: AccumulateFunction { get } var onTransferFunction: OnTransferFunction { get } } extension Accumulation { - public func update(config: ProtocolConfigRef, workReports: [WorkReport]) throws -> AccumulationOutput { + public func update(config: ProtocolConfigRef, block: BlockRef, workReports: [WorkReport]) throws -> AccumulationOutput { var servicesGasRatio: [ServiceIndex: Gas] = [:] var servicesGas: [ServiceIndex: Gas] = [:] @@ -38,20 +49,22 @@ extension Accumulation { servicesGas[service] = gas } - let totalGasRatio = workReports.flatMap(\.results).reduce(0) { $0 + $1.gasRatio } + let totalGasRatio = workReports.flatMap(\.results).reduce(Gas(0)) { $0 + $1.gasRatio } let totalMinimalGas = try workReports.flatMap(\.results) - .reduce(0) { try $0 + serviceAccounts[$1.serviceIndex].unwrap(orError: AccumulationError.invalidServiceIndex).minAccumlateGas } + .reduce(Gas(0)) { + try $0 + serviceAccounts[$1.serviceIndex].unwrap(orError: AccumulationError.invalidServiceIndex).minAccumlateGas + } for report in workReports { for result in report.results { - servicesGasRatio[result.serviceIndex, default: 0] += result.gasRatio - servicesGas[result.serviceIndex, default: 0] += try serviceAccounts[result.serviceIndex] + servicesGasRatio[result.serviceIndex, default: Gas(0)] += result.gasRatio + servicesGas[result.serviceIndex, default: Gas(0)] += try serviceAccounts[result.serviceIndex] .unwrap(orError: AccumulationError.invalidServiceIndex).minAccumlateGas } } let remainingGas = config.value.coreAccumulationGas - totalMinimalGas for (service, gas) in servicesGas { - servicesGas[service] = gas + servicesGasRatio[service, default: 0] * remainingGas / totalGasRatio + servicesGas[service] = gas + servicesGasRatio[service, default: Gas(0)] * remainingGas / totalGasRatio } var serviceArguments: [ServiceIndex: [AccumulateArguments]] = [:] @@ -100,11 +113,17 @@ extension Accumulation { } let (ctx, commitment) = try accumlateFunction.invoke( config: config, - service: service, + serviceIndex: service, code: code, serviceAccounts: serviceAccounts, gas: gas, - arguments: arguments + arguments: arguments, + validatorQueue: validatorQueue, + authorizationQueue: authorizationQueue, + privilegedServices: privilegedServices, + initialIndex: Blake2b256.hash(service.encode(), entropyPool.t0.data, block.header.timeslot.encode()) + .data.decode(UInt32.self), + timeslot: block.header.timeslot ) if let commitment { commitments.append((service, commitment)) @@ -151,10 +170,9 @@ extension Accumulation { return .init( commitments: commitments, - // those cannot be nil because priviledge services are always called - privilegedServices: newPrivilegedServices!, - validatorQueue: newValidatorQueue!, - authorizationQueue: newAuthorizationQueue!, + privilegedServices: newPrivilegedServices ?? privilegedServices, + validatorQueue: newValidatorQueue ?? validatorQueue, + authorizationQueue: newAuthorizationQueue ?? authorizationQueue, serviceAccounts: newServiceAccounts ) } diff --git a/Blockchain/Sources/Blockchain/RuntimeProtocols/Guaranteeing.swift b/Blockchain/Sources/Blockchain/RuntimeProtocols/Guaranteeing.swift index 92f5d055..4416b5ac 100644 --- a/Blockchain/Sources/Blockchain/RuntimeProtocols/Guaranteeing.swift +++ b/Blockchain/Sources/Blockchain/RuntimeProtocols/Guaranteeing.swift @@ -93,7 +93,7 @@ extension Guaranteeing { var workReportHashes = Set() - var totalMinGasRequirement: Gas = 0 + var totalMinGasRequirement = Gas(0) for guarantee in extrinsic.guarantees { let report = guarantee.workReport diff --git a/Blockchain/Sources/Blockchain/RuntimeProtocols/Runtime.swift b/Blockchain/Sources/Blockchain/RuntimeProtocols/Runtime.swift index 3c347c91..6da6d5f5 100644 --- a/Blockchain/Sources/Blockchain/RuntimeProtocols/Runtime.swift +++ b/Blockchain/Sources/Blockchain/RuntimeProtocols/Runtime.swift @@ -1,4 +1,5 @@ import Codec +import Foundation import Utils // the STF @@ -6,7 +7,7 @@ public final class Runtime { public enum Error: Swift.Error { case safroleError(SafroleError) case DisputeError(DisputeError) - case invalidTimeslot + case invalidTimeslot(got: TimeslotIndex, context: TimeslotIndex) case invalidReportAuthorizer case encodeError(any Swift.Error) case invalidExtrinsicHash @@ -61,7 +62,7 @@ public final class Runtime { } guard block.header.timeslot <= context.timeslot else { - throw Error.invalidTimeslot + throw Error.invalidTimeslot(got: block.header.timeslot, context: context.timeslot) } // epoch is validated at apply time by Safrole @@ -73,10 +74,11 @@ public final class Runtime { // validate block.header.seal let vrfOutput: Data32 let blockAuthorKey = try Result { - try Bandersnatch.PublicKey(data: state.value.validatorQueue[Int(block.header.authorIndex)].bandersnatch) + try Bandersnatch.PublicKey(data: state.value.currentValidators[Int(block.header.authorIndex)].bandersnatch) }.mapError(Error.invalidBlockSeal).get() let index = block.header.timeslot % UInt32(config.value.epochLength) - let encodedHeader = try Result { try JamEncoder.encode(block.header) }.mapError(Error.invalidBlockSeal).get() + let encodedHeader = try Result { try JamEncoder.encode(block.header.unsigned) }.mapError(Error.invalidBlockSeal).get() + let entropyVRFInputData: Data switch state.value.safroleState.ticketsOrKeys { case let .left(tickets): let ticket = tickets[Int(index)] @@ -92,6 +94,8 @@ public final class Runtime { throw Error.notBlockAuthor } + entropyVRFInputData = SigningContext.entropyInputData(entropy: vrfOutput) + case let .right(keys): let key = keys[Int(index)] guard key == blockAuthorKey.data else { @@ -105,11 +109,12 @@ public final class Runtime { signature: block.header.seal ) }.mapError(Error.invalidBlockSeal).get() + + entropyVRFInputData = SigningContext.fallbackSealInputData(entropy: state.value.entropyPool.t3) } - let vrfInputData = SigningContext.entropyInputData(entropy: vrfOutput) _ = try Result { - try blockAuthorKey.ietfVRFVerify(vrfInputData: vrfInputData, signature: block.header.vrfSignature) + try blockAuthorKey.ietfVRFVerify(vrfInputData: entropyVRFInputData, signature: block.header.vrfSignature) }.mapError { _ in Error.invalidVrfSignature }.get() } @@ -149,7 +154,7 @@ public final class Runtime { // depends on Safrole and Disputes let availableReports = try updateReports(block: block, state: &newState) - let res = try newState.update(config: config, workReports: availableReports) + let res = try newState.update(config: config, block: block, workReports: availableReports) newState.privilegedServices = res.privilegedServices newState.serviceAccounts = res.serviceAccounts newState.authorizationQueue = res.authorizationQueue @@ -181,7 +186,7 @@ public final class Runtime { public func updateRecentHistory(block: BlockRef, state newState: inout State) throws { let workReportHashes = block.extrinsic.reports.guarantees.map(\.workReport.packageSpecification.workPackageHash) try newState.recentHistory.update( - headerHash: block.header.parentHash, + headerHash: block.hash, parentStateRoot: block.header.priorStateRoot, accumulateRoot: Data32(), // TODO: calculate accumulation result workReportHashes: ConfigLimitedSizeArray(config: config, array: workReportHashes) diff --git a/Blockchain/Sources/Blockchain/RuntimeProtocols/Safrole.swift b/Blockchain/Sources/Blockchain/RuntimeProtocols/Safrole.swift index 892d3867..76deae39 100644 --- a/Blockchain/Sources/Blockchain/RuntimeProtocols/Safrole.swift +++ b/Blockchain/Sources/Blockchain/RuntimeProtocols/Safrole.swift @@ -252,7 +252,7 @@ extension Safrole { let newCommitment = { try Bandersnatch.RingCommitment( - ring: validatorQueueWithoutOffenders.map { try Bandersnatch.PublicKey(data: $0.bandersnatch) }, + ring: validatorQueueWithoutOffenders.map { try? Bandersnatch.PublicKey(data: $0.bandersnatch) }, ctx: ctx ).data } diff --git a/Blockchain/Sources/Blockchain/Scheduler/DispatchQueueScheduler.swift b/Blockchain/Sources/Blockchain/Scheduler/DispatchQueueScheduler.swift index 95a88d40..7af1ceb3 100644 --- a/Blockchain/Sources/Blockchain/Scheduler/DispatchQueueScheduler.swift +++ b/Blockchain/Sources/Blockchain/Scheduler/DispatchQueueScheduler.swift @@ -1,26 +1,29 @@ -@preconcurrency import Foundation +@preconcurrency import Dispatch +import Foundation import TracingUtils private let logger = Logger(label: "Scheduler") public final class DispatchQueueScheduler: Scheduler { public let timeProvider: TimeProvider - private let queue: DispatchQueue - public init(timeProvider: TimeProvider, queue: DispatchQueue = .global()) { + public init(timeProvider: TimeProvider) { self.timeProvider = timeProvider - self.queue = queue } public func schedule( delay: TimeInterval, repeats: Bool, - task: @escaping @Sendable () -> Void, + task: @escaping @Sendable () async -> Void, onCancel: (@Sendable () -> Void)? ) -> Cancellable { logger.trace("scheduling task in \(delay) seconds, repeats: \(repeats)") - let timer = DispatchSource.makeTimerSource(queue: queue) - timer.setEventHandler(handler: task) + let timer = DispatchSource.makeTimerSource(queue: .global()) + timer.setEventHandler { + Task { + await task() + } + } timer.setCancelHandler(handler: onCancel) timer.schedule(deadline: .now() + delay, repeating: repeats ? delay : .infinity) timer.activate() diff --git a/Blockchain/Sources/Blockchain/Scheduler/Scheduler.swift b/Blockchain/Sources/Blockchain/Scheduler/Scheduler.swift index 5ebc0e24..d172fb22 100644 --- a/Blockchain/Sources/Blockchain/Scheduler/Scheduler.swift +++ b/Blockchain/Sources/Blockchain/Scheduler/Scheduler.swift @@ -1,4 +1,7 @@ import Foundation +import TracingUtils + +private let logger = Logger(label: "Scheduler") public final class Cancellable: Sendable, Hashable { private let fn: @Sendable () -> Void @@ -25,7 +28,7 @@ public protocol Scheduler: Sendable { func schedule( delay: TimeInterval, repeats: Bool, - task: @escaping @Sendable () -> Void, + task: @escaping @Sendable () async -> Void, onCancel: (@Sendable () -> Void)? ) -> Cancellable } @@ -34,7 +37,7 @@ extension Scheduler { func schedule( delay: TimeInterval, repeats: Bool = false, - task: @escaping @Sendable () -> Void, + task: @escaping @Sendable () async -> Void, onCancel: (@Sendable () -> Void)? = nil ) -> Cancellable { schedule(delay: delay, repeats: repeats, task: task, onCancel: onCancel) @@ -42,11 +45,21 @@ extension Scheduler { public func schedule( at timeslot: TimeslotIndex, - task: @escaping @Sendable () -> Void, + task: @escaping @Sendable () async -> Void, onCancel: (@Sendable () -> Void)? = nil ) -> Cancellable { + let nowTimeslot = timeProvider.getTimeslot() + if timeslot == nowTimeslot { + return schedule(delay: 0, repeats: false, task: task, onCancel: onCancel) + } + let deadline = timeProvider.timeslotToTime(timeslot) - return schedule(delay: TimeInterval(deadline - timeProvider.getTime()), repeats: false, task: task, onCancel: onCancel) + let now = timeProvider.getTime() + if deadline < now { + logger.error("scheduling task in the past", metadata: ["deadline": "\(deadline)", "now": "\(now)"]) + return Cancellable {} + } + return schedule(delay: TimeInterval(deadline - now), repeats: false, task: task, onCancel: onCancel) } } diff --git a/Blockchain/Sources/Blockchain/Types/ServiceAccount.swift b/Blockchain/Sources/Blockchain/Types/ServiceAccount.swift index 236e466a..5971db8a 100644 --- a/Blockchain/Sources/Blockchain/Types/ServiceAccount.swift +++ b/Blockchain/Sources/Blockchain/Types/ServiceAccount.swift @@ -52,9 +52,9 @@ extension ServiceAccount: Dummy { preimages: [:], preimageInfos: [:], codeHash: Data32(), - balance: 0, - minAccumlateGas: 0, - minOnTransferGas: 0 + balance: Balance(0), + minAccumlateGas: Gas(0), + minOnTransferGas: Gas(0) ) } } diff --git a/Blockchain/Sources/Blockchain/Types/State+Genesis.swift b/Blockchain/Sources/Blockchain/Types/State+Genesis.swift new file mode 100644 index 00000000..df08f188 --- /dev/null +++ b/Blockchain/Sources/Blockchain/Types/State+Genesis.swift @@ -0,0 +1,37 @@ +import Utils + +extension State { + public static func devGenesis(config: ProtocolConfigRef) throws -> State { + var devKeys = [ValidatorKey]() + + var state = State.dummy(config: config) + + for i in 0 ..< config.value.totalNumberOfValidators { + let keySet = try DevKeyStore.getDevKey(seed: UInt32(i)) + devKeys.append(ValidatorKey( + bandersnatch: keySet.bandersnatch.data, + ed25519: keySet.ed25519.data, + bls: Data144(), // TODO: figure out BLS pub key size + metadata: Data128() + )) + } + state.safroleState.nextValidators = try ConfigFixedSizeArray(config: config, array: devKeys) + state.validatorQueue = try ConfigFixedSizeArray(config: config, array: devKeys) + state.currentValidators = try ConfigFixedSizeArray(config: config, array: devKeys) + + var epochKeys = [BandersnatchPublicKey]() + for i in 0 ..< config.value.epochLength { + epochKeys.append(devKeys[i % config.value.totalNumberOfValidators].bandersnatch) + } + state.safroleState.ticketsOrKeys = try .right(ConfigFixedSizeArray(config: config, array: epochKeys)) + + let ctx = try Bandersnatch.RingContext(size: UInt(config.value.totalNumberOfValidators)) + let commitment = try Bandersnatch.RingCommitment( + ring: devKeys.map { try Bandersnatch.PublicKey(data: $0.bandersnatch) }, + ctx: ctx + ) + state.safroleState.ticketsVerifier = commitment.data + + return state + } +} diff --git a/Blockchain/Sources/Blockchain/Types/State.swift b/Blockchain/Sources/Blockchain/Types/State.swift index efc83d2b..6d1f443e 100644 --- a/Blockchain/Sources/Blockchain/Types/State.swift +++ b/Blockchain/Sources/Blockchain/Types/State.swift @@ -211,11 +211,24 @@ extension State: Guaranteeing { struct DummyFunction: AccumulateFunction, OnTransferFunction { func invoke( config _: ProtocolConfigRef, - service _: ServiceIndex, + serviceIndex _: ServiceIndex, code _: Data, serviceAccounts _: [ServiceIndex: ServiceAccount], gas _: Gas, - arguments _: [AccumulateArguments] + arguments _: [AccumulateArguments], + validatorQueue _: ConfigFixedSizeArray< + ValidatorKey, ProtocolConfig.TotalNumberOfValidators + >, + authorizationQueue _: ConfigFixedSizeArray< + ConfigFixedSizeArray< + Data32, + ProtocolConfig.MaxAuthorizationsQueueItems + >, + ProtocolConfig.TotalNumberOfCores + >, + privilegedServices _: PrivilegedServices, + initialIndex _: ServiceIndex, + timeslot _: TimeslotIndex ) throws -> (ctx: AccumlateResultContext, result: Data32?) { fatalError("not implemented") } diff --git a/Blockchain/Sources/Blockchain/Types/ValidatorKey.swift b/Blockchain/Sources/Blockchain/Types/ValidatorKey.swift index 9c9dde0a..914faee7 100644 --- a/Blockchain/Sources/Blockchain/Types/ValidatorKey.swift +++ b/Blockchain/Sources/Blockchain/Types/ValidatorKey.swift @@ -1,6 +1,11 @@ +import Foundation import Utils public struct ValidatorKey: Sendable, Equatable, Codable { + public enum Error: Swift.Error { + case invalidDataLength + } + public var bandersnatch: BandersnatchPublicKey public var ed25519: Ed25519PublicKey public var bls: BLSKey @@ -18,6 +23,16 @@ public struct ValidatorKey: Sendable, Equatable, Codable { self.metadata = metadata } + public init(data: Data) throws { + guard data.count == 336 else { + throw Error.invalidDataLength + } + bandersnatch = BandersnatchPublicKey(data[0 ..< 32])! + ed25519 = Ed25519PublicKey(data[32 ..< 64])! + bls = BLSKey(data[64 ..< 64 + 144])! + metadata = Data128(data[208 ..< 208 + 128])! + } + public init() { bandersnatch = BandersnatchPublicKey() ed25519 = Ed25519PublicKey() diff --git a/Blockchain/Sources/Blockchain/Types/WorkItem.swift b/Blockchain/Sources/Blockchain/Types/WorkItem.swift index b50020d1..41bfa59b 100644 --- a/Blockchain/Sources/Blockchain/Types/WorkItem.swift +++ b/Blockchain/Sources/Blockchain/Types/WorkItem.swift @@ -60,7 +60,7 @@ extension WorkItem: Dummy { serviceIndex: 0, codeHash: Data32(), payloadBlob: Data(), - gasLimit: 0, + gasLimit: Gas(0), inputs: [], outputs: [], outputDataSegmentsCount: 0 diff --git a/Blockchain/Sources/Blockchain/Types/WorkResult.swift b/Blockchain/Sources/Blockchain/Types/WorkResult.swift index a9822016..f9e324fd 100644 --- a/Blockchain/Sources/Blockchain/Types/WorkResult.swift +++ b/Blockchain/Sources/Blockchain/Types/WorkResult.swift @@ -43,7 +43,7 @@ extension WorkResult: Dummy { serviceIndex: 0, codeHash: Data32(), payloadHash: Data32(), - gas: 0, + gas: Gas(0), output: .init(.success(Data())) ) } diff --git a/Blockchain/Sources/Blockchain/Types/primitives.swift b/Blockchain/Sources/Blockchain/Types/primitives.swift index f280b693..0a0ec673 100644 --- a/Blockchain/Sources/Blockchain/Types/primitives.swift +++ b/Blockchain/Sources/Blockchain/Types/primitives.swift @@ -1,9 +1,9 @@ import Utils -public typealias Balance = UInt64 +public typealias Balance = Utils.Balance public typealias ServiceIndex = UInt32 public typealias TimeslotIndex = UInt32 -public typealias Gas = UInt64 +public typealias Gas = Utils.Gas public typealias DataLength = UInt32 public typealias ValidatorIndex = UInt16 @@ -20,13 +20,13 @@ public typealias BandersnatchRingVRFRoot = Data144 public typealias BLSKey = Data144 extension TimeslotIndex { - public func toEpochIndex(config: ProtocolConfigRef) -> EpochIndex { + public func timeslotToEpochIndex(config: ProtocolConfigRef) -> EpochIndex { self / EpochIndex(config.value.epochLength) } } extension EpochIndex { - public func toTimeslotIndex(config: ProtocolConfigRef) -> TimeslotIndex { + public func epochToTimeslotIndex(config: ProtocolConfigRef) -> TimeslotIndex { self * TimeslotIndex(config.value.epochLength) } } diff --git a/Blockchain/Sources/Blockchain/VMInvocations/Constants.swift b/Blockchain/Sources/Blockchain/VMInvocations/Constants.swift new file mode 100644 index 00000000..7b29d5ef --- /dev/null +++ b/Blockchain/Sources/Blockchain/VMInvocations/Constants.swift @@ -0,0 +1 @@ +public let serviceIndexModValue = UInt32((1 << 32) - (1 << 9)) diff --git a/Blockchain/Sources/Blockchain/VMInvocations/Error.swift b/Blockchain/Sources/Blockchain/VMInvocations/Error.swift index 27fe3874..1d0d4672 100644 --- a/Blockchain/Sources/Blockchain/VMInvocations/Error.swift +++ b/Blockchain/Sources/Blockchain/VMInvocations/Error.swift @@ -1,3 +1,6 @@ public enum VMInvocationsError: Error { case serviceAccountNotFound + case checkMaxDepthLimit + case checkIndexTooSmall + case forceHalt } diff --git a/Blockchain/Sources/Blockchain/VMInvocations/HostCall/HostCall.swift b/Blockchain/Sources/Blockchain/VMInvocations/HostCall/HostCall.swift new file mode 100644 index 00000000..c9c3cffa --- /dev/null +++ b/Blockchain/Sources/Blockchain/VMInvocations/HostCall/HostCall.swift @@ -0,0 +1,48 @@ +import PolkaVM +import TracingUtils + +private let logger = Logger(label: "HostCall") + +public protocol HostCall { + static var identifier: UInt8 { get } + + func gasCost(state: VMState) -> Gas + func _callImpl(config: ProtocolConfigRef, state: VMState) throws +} + +extension HostCall { + public func call(config: ProtocolConfigRef, state: VMState) -> ExecOutcome { + guard hasEnoughGas(state: state) else { + logger.debug("not enough gas") + return .exit(.outOfGas) + } + state.consumeGas(gasCost(state: state)) + logger.debug("consumed \(gasCost(state: state)) gas") + + do { + try _callImpl(config: config, state: state) + return .continued + } catch let e as Memory.Error { + logger.error("memory error: \(e)") + return .exit(.pageFault(e.address)) + } catch VMInvocationsError.forceHalt { + logger.debug("force halt") + return .exit(.halt) + } catch let e as VMInvocationsError { + logger.error("invocation error: \(e)") + return .exit(.panic(.trap)) + } catch let e { + logger.error("unknown error: \(e)") + return .exit(.panic(.trap)) + } + } + + // TODO: host-calls will have different gas costs later on + public func gasCost(state _: VMState) -> Gas { + Gas(10) + } + + func hasEnoughGas(state: VMState) -> Bool { + Gas(state.getGas()) >= gasCost(state: state) + } +} diff --git a/Blockchain/Sources/Blockchain/VMInvocations/HostCall/HostCallFunction+Helpers.swift b/Blockchain/Sources/Blockchain/VMInvocations/HostCall/HostCallFunction+Helpers.swift deleted file mode 100644 index 719235cb..00000000 --- a/Blockchain/Sources/Blockchain/VMInvocations/HostCall/HostCallFunction+Helpers.swift +++ /dev/null @@ -1,7 +0,0 @@ -import PolkaVM - -extension HostCallFunction { - public static func hasEnoughGas(state: VMState) -> Bool { - state.getGas() >= gasCost - } -} diff --git a/Blockchain/Sources/Blockchain/VMInvocations/HostCall/HostCallFunction.swift b/Blockchain/Sources/Blockchain/VMInvocations/HostCall/HostCallFunction.swift deleted file mode 100644 index eff107f0..00000000 --- a/Blockchain/Sources/Blockchain/VMInvocations/HostCall/HostCallFunction.swift +++ /dev/null @@ -1,11 +0,0 @@ -import PolkaVM - -public protocol HostCallFunction { - static var identifier: UInt8 { get } - static var gasCost: UInt64 { get } - - associatedtype Input - associatedtype Output - - static func call(state: VMState, input: Input) throws -> Output -} diff --git a/Blockchain/Sources/Blockchain/VMInvocations/HostCall/HostCallFunctions.swift b/Blockchain/Sources/Blockchain/VMInvocations/HostCall/HostCallFunctions.swift deleted file mode 100644 index 81b2a3ea..00000000 --- a/Blockchain/Sources/Blockchain/VMInvocations/HostCall/HostCallFunctions.swift +++ /dev/null @@ -1,234 +0,0 @@ -import Codec -import Foundation -import PolkaVM -import Utils - -public class GasFn: HostCallFunction { - public static var identifier: UInt8 { 0 } - public static var gasCost: UInt64 { 10 } - - public typealias Input = Void - public typealias Output = Void - - public static func call(state: VMState, input _: Input) throws -> Output { - guard hasEnoughGas(state: state) else { - return - } - state.consumeGas(gasCost) - - state.writeRegister(Registers.Index(raw: 0), UInt32(bitPattern: Int32(state.getGas() & 0xFFFF_FFFF))) - state.writeRegister(Registers.Index(raw: 1), UInt32(bitPattern: Int32(state.getGas() >> 32))) - } -} - -public class Lookup: HostCallFunction { - public static var identifier: UInt8 { 1 } - public static var gasCost: UInt64 { 10 } - - public typealias Input = (ServiceAccount, ServiceIndex, [ServiceIndex: ServiceAccount]) - public typealias Output = Void - - public static func call(state: VMState, input: Input) throws -> Output { - guard hasEnoughGas(state: state) else { - return - } - state.consumeGas(gasCost) - - let (serviceAccount, serviceIndex, serviceAccounts) = input - - var account: ServiceAccount? - let reg0 = state.readRegister(Registers.Index(raw: 0)) - if reg0 == serviceIndex || reg0 == Int32.max { - account = serviceAccount - } else { - account = serviceAccounts[reg0] - } - - let regs = state.readRegisters(in: 1 ..< 4) - - let preimageHash = try? Blake2b256.hash(state.readMemory(address: regs[0], length: 32)) - - let value: Data? = if let account, let preimageHash { - account.preimages[preimageHash] - } else { - nil - } - - let isWritable = state.isMemoryWritable(address: regs[1], length: Int(regs[2])) - if let value, isWritable { - let maxLen = min(regs[2], UInt32(value.count)) - try state.writeMemory(address: regs[1], values: value[0 ..< Int(maxLen)]) - } - - if preimageHash != nil, isWritable { - if let value { - state.writeRegister(Registers.Index(raw: 0), UInt32(value.count)) - } else { - state.writeRegister(Registers.Index(raw: 0), HostCallResultCode.NONE.rawValue) - } - } else { - state.writeRegister(Registers.Index(raw: 0), HostCallResultCode.OOB.rawValue) - } - } -} - -public class Read: HostCallFunction { - public static var identifier: UInt8 { 2 } - public static var gasCost: UInt64 { 10 } - - public typealias Input = (ServiceAccount, ServiceIndex, [ServiceIndex: ServiceAccount]) - public typealias Output = Void - - public static func call(state: VMState, input: Input) throws -> Output { - guard hasEnoughGas(state: state) else { - return - } - state.consumeGas(gasCost) - - let (serviceAccount, serviceIndex, serviceAccounts) = input - - var account: ServiceAccount? - let reg0 = state.readRegister(Registers.Index(raw: 0)) - if reg0 == serviceIndex || reg0 == Int32.max { - account = serviceAccount - } else { - account = serviceAccounts[reg0] - } - - let regs = state.readRegisters(in: 1 ..< 5) - - let key = try? Blake2b256.hash(serviceIndex.encode(), state.readMemory(address: regs[0], length: Int(regs[1]))) - - let value: Data? = if let account, let key { - account.storage[key] - } else { - nil - } - - let isWritable = state.isMemoryWritable(address: regs[2], length: Int(regs[3])) - if let value, isWritable { - let maxLen = min(regs[3], UInt32(value.count)) - try state.writeMemory(address: regs[2], values: value[0 ..< Int(maxLen)]) - } - - if key != nil, isWritable { - if let value { - state.writeRegister(Registers.Index(raw: 0), UInt32(value.count)) - } else { - state.writeRegister(Registers.Index(raw: 0), HostCallResultCode.NONE.rawValue) - } - } else { - state.writeRegister(Registers.Index(raw: 0), HostCallResultCode.OOB.rawValue) - } - } -} - -public class Write: HostCallFunction { - public static var identifier: UInt8 { 3 } - public static var gasCost: UInt64 { 10 } - - public typealias Input = (ProtocolConfigRef, ServiceAccount, ServiceIndex) - public typealias Output = ServiceAccount - - public static func call(state: VMState, input: Input) throws -> Output { - let (config, serviceAccount, serviceIndex) = input - guard hasEnoughGas(state: state) else { - return serviceAccount - } - - state.consumeGas(gasCost) - - let regs = state.readRegisters(in: 0 ..< 4) - - let key = try? Blake2b256.hash(serviceIndex.encode(), state.readMemory(address: regs[0], length: Int(regs[1]))) - - var account: ServiceAccount? - if let key, state.isMemoryReadable(address: regs[2], length: Int(regs[3])) { - account = serviceAccount - if regs[3] == 0 { - account?.storage.removeValue(forKey: key) - } else { - account?.storage[key] = try state.readMemory(address: regs[2], length: Int(regs[3])) - } - } else { - account = nil - } - - let l = if let key, serviceAccount.storage.keys.contains(key) { - UInt32(serviceAccount.storage[key]!.count) - } else { - HostCallResultCode.NONE.rawValue - } - - if key != nil, let account, account.thresholdBalance(config: config) <= account.balance { - state.writeRegister(Registers.Index(raw: 0), l) - return account - } else if let account, account.thresholdBalance(config: config) > account.balance { - state.writeRegister(Registers.Index(raw: 0), HostCallResultCode.FULL.rawValue) - return serviceAccount - } else { - state.writeRegister(Registers.Index(raw: 0), HostCallResultCode.OOB.rawValue) - return serviceAccount - } - } -} - -public class Info: HostCallFunction { - public static var identifier: UInt8 { 4 } - public static var gasCost: UInt64 { 10 } - - public typealias Input = ( - ProtocolConfigRef, - ServiceAccount, - ServiceIndex, - [ServiceIndex: ServiceAccount], - [ServiceIndex: ServiceAccount] - ) - public typealias Output = Void - - public static func call(state: VMState, input: Input) throws -> Output { - guard hasEnoughGas(state: state) else { - return - } - state.consumeGas(gasCost) - - let (config, serviceAccount, serviceIndex, serviceAccounts, newServiceAccounts) = input - - var account: ServiceAccount? - let reg0 = state.readRegister(Registers.Index(raw: 0)) - if reg0 == serviceIndex || reg0 == Int32.max { - account = serviceAccount - } else { - let accounts = serviceAccounts.merging(newServiceAccounts) { _, new in new } - account = accounts[reg0] - } - - let o = state.readRegister(Registers.Index(raw: 1)) - - let m: Data? - if let account { - // codeHash, balance, thresholdBalance, minAccumlateGas, minOnTransferGas, totalByteLength, itemsCount - let capacity = 32 + 8 * 5 + 4 - let encoder = JamEncoder(capacity: capacity) - try encoder.encode(account.codeHash) - try encoder.encode(account.balance) - try encoder.encode(account.thresholdBalance(config: config)) - try encoder.encode(account.minAccumlateGas) - try encoder.encode(account.minOnTransferGas) - try encoder.encode(account.totalByteLength) - try encoder.encode(account.itemsCount) - m = encoder.data - } else { - m = nil - } - - if let m, state.isMemoryWritable(address: o, length: Int(m.count)) { - try state.writeMemory(address: o, values: m) - state.writeRegister(Registers.Index(raw: 0), HostCallResultCode.OK.rawValue) - } else if m == nil { - state.writeRegister(Registers.Index(raw: 0), HostCallResultCode.NONE.rawValue) - } else { - state.writeRegister(Registers.Index(raw: 0), HostCallResultCode.OOB.rawValue) - } - } -} diff --git a/Blockchain/Sources/Blockchain/VMInvocations/HostCall/HostCalls.swift b/Blockchain/Sources/Blockchain/VMInvocations/HostCall/HostCalls.swift new file mode 100644 index 00000000..036e46f1 --- /dev/null +++ b/Blockchain/Sources/Blockchain/VMInvocations/HostCall/HostCalls.swift @@ -0,0 +1,619 @@ +import Codec +import Foundation +import PolkaVM +import Utils + +// MARK: - General + +/// Get gas remaining +public class GasFn: HostCall { + public static var identifier: UInt8 { 0 } + + public func _callImpl(config _: ProtocolConfigRef, state: VMState) throws { + state.writeRegister(Registers.Index(raw: 0), UInt32(bitPattern: Int32(state.getGas().value & 0xFFFF_FFFF))) + state.writeRegister(Registers.Index(raw: 1), UInt32(bitPattern: Int32(state.getGas().value >> 32))) + } +} + +/// Lookup a preimage from a service account +public class Lookup: HostCall { + public static var identifier: UInt8 { 1 } + + public let serviceAccount: ServiceAccount + public let serviceIndex: ServiceIndex + public let serviceAccounts: [ServiceIndex: ServiceAccount] + + public init(account: ServiceAccount, serviceIndex: ServiceIndex, accounts: [ServiceIndex: ServiceAccount]) { + serviceAccount = account + self.serviceIndex = serviceIndex + serviceAccounts = accounts + } + + public func _callImpl(config _: ProtocolConfigRef, state: VMState) throws { + var account: ServiceAccount? + let reg0 = state.readRegister(Registers.Index(raw: 0)) + if reg0 == serviceIndex || reg0 == Int32.max { + account = serviceAccount + } else { + account = serviceAccounts[reg0] + } + + let regs = state.readRegisters(in: 1 ..< 4) + + let preimageHash = try? Blake2b256.hash(state.readMemory(address: regs[0], length: 32)) + + let value: Data? = if let account, let preimageHash { + account.preimages[preimageHash] + } else { + nil + } + + let isWritable = state.isMemoryWritable(address: regs[1], length: Int(regs[2])) + if let value, isWritable { + let maxLen = min(regs[2], UInt32(value.count)) + try state.writeMemory(address: regs[1], values: value[0 ..< Int(maxLen)]) + } + + if preimageHash != nil, isWritable { + if let value { + state.writeRegister(Registers.Index(raw: 0), UInt32(value.count)) + } else { + state.writeRegister(Registers.Index(raw: 0), HostCallResultCode.NONE.rawValue) + } + } else { + state.writeRegister(Registers.Index(raw: 0), HostCallResultCode.OOB.rawValue) + } + } +} + +/// Read a service account storage +public class Read: HostCall { + public static var identifier: UInt8 { 2 } + + public let serviceAccount: ServiceAccount + public let serviceIndex: ServiceIndex + public let serviceAccounts: [ServiceIndex: ServiceAccount] + + public init(account: ServiceAccount, serviceIndex: ServiceIndex, accounts: [ServiceIndex: ServiceAccount]) { + serviceAccount = account + self.serviceIndex = serviceIndex + serviceAccounts = accounts + } + + public func _callImpl(config _: ProtocolConfigRef, state: VMState) throws { + var account: ServiceAccount? + let reg0 = state.readRegister(Registers.Index(raw: 0)) + if reg0 == serviceIndex || reg0 == Int32.max { + account = serviceAccount + } else { + account = serviceAccounts[reg0] + } + + let regs = state.readRegisters(in: 1 ..< 5) + + let key = try? Blake2b256.hash(serviceIndex.encode(), state.readMemory(address: regs[0], length: Int(regs[1]))) + + let value: Data? = if let account, let key { + account.storage[key] + } else { + nil + } + + let isWritable = state.isMemoryWritable(address: regs[2], length: Int(regs[3])) + if let value, isWritable { + let maxLen = min(regs[3], UInt32(value.count)) + try state.writeMemory(address: regs[2], values: value[0 ..< Int(maxLen)]) + } + + if key != nil, isWritable { + if let value { + state.writeRegister(Registers.Index(raw: 0), UInt32(value.count)) + } else { + state.writeRegister(Registers.Index(raw: 0), HostCallResultCode.NONE.rawValue) + } + } else { + state.writeRegister(Registers.Index(raw: 0), HostCallResultCode.OOB.rawValue) + } + } +} + +/// Write to a service account storage +public class Write: HostCall { + public static var identifier: UInt8 { 3 } + + public var serviceAccount: ServiceAccount + public let serviceIndex: ServiceIndex + + public init(account: inout ServiceAccount, serviceIndex: ServiceIndex) { + serviceAccount = account + self.serviceIndex = serviceIndex + } + + public func _callImpl(config: ProtocolConfigRef, state: VMState) throws { + let regs = state.readRegisters(in: 0 ..< 4) + + let key = try? Blake2b256.hash(serviceIndex.encode(), state.readMemory(address: regs[0], length: Int(regs[1]))) + + var account: ServiceAccount? + if let key, state.isMemoryReadable(address: regs[2], length: Int(regs[3])) { + account = serviceAccount + if regs[3] == 0 { + account?.storage.removeValue(forKey: key) + } else { + account?.storage[key] = try state.readMemory(address: regs[2], length: Int(regs[3])) + } + } else { + account = nil + } + + let l = if let key, serviceAccount.storage.keys.contains(key) { + UInt32(serviceAccount.storage[key]!.count) + } else { + HostCallResultCode.NONE.rawValue + } + + if key != nil, let account, account.thresholdBalance(config: config) <= account.balance { + state.writeRegister(Registers.Index(raw: 0), l) + serviceAccount = account + } else if let account, account.thresholdBalance(config: config) > account.balance { + state.writeRegister(Registers.Index(raw: 0), HostCallResultCode.FULL.rawValue) + } else { + state.writeRegister(Registers.Index(raw: 0), HostCallResultCode.OOB.rawValue) + } + } +} + +/// Get information about a service account +public class Info: HostCall { + public static var identifier: UInt8 { 4 } + + public let serviceAccount: ServiceAccount + public let serviceIndex: ServiceIndex + public let serviceAccounts: [ServiceIndex: ServiceAccount] + // only used in accumulation x.n + public let newServiceAccounts: [ServiceIndex: ServiceAccount] + + public init( + account: ServiceAccount, + serviceIndex: ServiceIndex, + accounts: [ServiceIndex: ServiceAccount], + newAccounts: [ServiceIndex: ServiceAccount] + ) { + serviceAccount = account + self.serviceIndex = serviceIndex + serviceAccounts = accounts + newServiceAccounts = newAccounts + } + + public func _callImpl(config: ProtocolConfigRef, state: VMState) throws { + var account: ServiceAccount? + let reg0 = state.readRegister(Registers.Index(raw: 0)) + if reg0 == serviceIndex || reg0 == Int32.max { + account = serviceAccount + } else { + let accounts = serviceAccounts.merging(newServiceAccounts) { _, new in new } + account = accounts[reg0] + } + + let o = state.readRegister(Registers.Index(raw: 1)) + + let m: Data? + if let account { + // codeHash, balance, thresholdBalance, minAccumlateGas, minOnTransferGas, totalByteLength, itemsCount + let capacity = 32 + 8 * 5 + 4 + let encoder = JamEncoder(capacity: capacity) + try encoder.encode(account.codeHash) + try encoder.encode(account.balance) + try encoder.encode(account.thresholdBalance(config: config)) + try encoder.encode(account.minAccumlateGas) + try encoder.encode(account.minOnTransferGas) + try encoder.encode(account.totalByteLength) + try encoder.encode(account.itemsCount) + m = encoder.data + } else { + m = nil + } + + if let m, state.isMemoryWritable(address: o, length: Int(m.count)) { + try state.writeMemory(address: o, values: m) + state.writeRegister(Registers.Index(raw: 0), HostCallResultCode.OK.rawValue) + } else if m == nil { + state.writeRegister(Registers.Index(raw: 0), HostCallResultCode.NONE.rawValue) + } else { + state.writeRegister(Registers.Index(raw: 0), HostCallResultCode.OOB.rawValue) + } + } +} + +// MARK: - Accumulate + +/// Set privileged services details +public class Empower: HostCall { + public static var identifier: UInt8 { 5 } + + public var x: AccumlateResultContext + + public init(x: inout AccumlateResultContext) { + self.x = x + } + + public func _callImpl(config _: ProtocolConfigRef, state: VMState) throws { + let regs = state.readRegisters(in: 0 ..< 5) + + var basicGas: [ServiceIndex: Gas] = [:] + let length = 12 * Int(regs[4]) + if state.isMemoryReadable(address: regs[3], length: length) { + let data = try state.readMemory(address: regs[3], length: length) + for i in stride(from: 0, to: length, by: 12) { + let serviceIndex = ServiceIndex(data[i ..< i + 4].decode(UInt32.self)) + let gas = Gas(data[i + 4 ..< i + 12].decode(UInt64.self)) + basicGas[serviceIndex] = gas + } + } + + if basicGas.count != 0 { + state.writeRegister(Registers.Index(raw: 0), HostCallResultCode.OK.rawValue) + x.privilegedServices.empower = regs[0] + x.privilegedServices.assign = regs[1] + x.privilegedServices.designate = regs[2] + x.privilegedServices.basicGas = basicGas + } else { + state.writeRegister(Registers.Index(raw: 0), HostCallResultCode.OOB.rawValue) + } + } +} + +/// Set authorization queue for a service account +public class Assign: HostCall { + public static var identifier: UInt8 { 6 } + + public var x: AccumlateResultContext + + public init(x: inout AccumlateResultContext) { + self.x = x + } + + public func _callImpl(config: ProtocolConfigRef, state: VMState) throws { + let (targetCoreIndex, startAddr) = state.readRegister(Registers.Index(raw: 0), Registers.Index(raw: 1)) + + var authorizationQueue: [Data32] = [] + let length = 32 * config.value.maxAuthorizationsQueueItems + if state.isMemoryReadable(address: startAddr, length: length) { + let data = try state.readMemory(address: startAddr, length: length) + for i in stride(from: 0, to: length, by: 32) { + authorizationQueue.append(Data32(data[i ..< i + 32])!) + } + } + + if targetCoreIndex < config.value.totalNumberOfCores, !authorizationQueue.isEmpty { + x.authorizationQueue[targetCoreIndex] = try ConfigFixedSizeArray(config: config, array: authorizationQueue) + state.writeRegister(Registers.Index(raw: 0), HostCallResultCode.OK.rawValue) + } else if authorizationQueue.isEmpty { + state.writeRegister(Registers.Index(raw: 0), HostCallResultCode.OOB.rawValue) + } else { + state.writeRegister(Registers.Index(raw: 0), HostCallResultCode.CORE.rawValue) + } + } +} + +/// Set validator queue for a service account +public class Designate: HostCall { + public static var identifier: UInt8 { 7 } + + public var x: AccumlateResultContext + + public init(x: inout AccumlateResultContext) { + self.x = x + } + + public func _callImpl(config: ProtocolConfigRef, state: VMState) throws { + let startAddr = state.readRegister(Registers.Index(raw: 0)) + + var validatorQueue: [ValidatorKey] = [] + let length = 336 * config.value.totalNumberOfValidators + if state.isMemoryReadable(address: startAddr, length: length) { + let data = try state.readMemory(address: startAddr, length: length) + for i in stride(from: 0, to: length, by: 336) { + try validatorQueue.append(ValidatorKey(data: Data(data[i ..< i + 336]))) + } + } + + if !validatorQueue.isEmpty { + x.validatorQueue = try ConfigFixedSizeArray(config: config, array: validatorQueue) + state.writeRegister(Registers.Index(raw: 0), HostCallResultCode.OK.rawValue) + } else { + state.writeRegister(Registers.Index(raw: 0), HostCallResultCode.OOB.rawValue) + } + } +} + +/// Save a checkpoint +public class Checkpoint: HostCall { + public static var identifier: UInt8 { 8 } + + public let x: AccumlateResultContext + public var y: AccumlateResultContext + + public init(x: AccumlateResultContext, y: inout AccumlateResultContext) { + self.x = x + self.y = y + } + + public func _callImpl(config _: ProtocolConfigRef, state: VMState) throws { + state.writeRegister(Registers.Index(raw: 0), UInt32(bitPattern: Int32(state.getGas().value & 0xFFFF_FFFF))) + state.writeRegister(Registers.Index(raw: 1), UInt32(bitPattern: Int32(state.getGas().value >> 32))) + + y = x + } +} + +/// Create a new service account +public class New: HostCall { + public static var identifier: UInt8 { 9 } + + public var x: AccumlateResultContext + public let accounts: [ServiceIndex: ServiceAccount] + + public init(x: inout AccumlateResultContext, accounts: [ServiceIndex: ServiceAccount]) { + self.x = x + self.accounts = accounts + } + + private static func bump(i: ServiceIndex) -> ServiceIndex { + 256 + ((i - 256 + 42) & (serviceIndexModValue - 1)) + } + + public func _callImpl(config: ProtocolConfigRef, state: VMState) throws { + let regs = state.readRegisters(in: 0 ..< 6) + + let codeHash: Data32? = try? Data32(state.readMemory(address: regs[0], length: 32)) + let minAccumlateGas = Gas(0x1_0000_0000) * Gas(regs[3]) + Gas(regs[2]) + let minOnTransferGas = Gas(0x1_0000_0000) * Gas(regs[5]) + Gas(regs[4]) + + var newAccount: ServiceAccount? + if let codeHash { + newAccount = ServiceAccount( + storage: [:], + preimages: [:], + preimageInfos: [HashAndLength(hash: codeHash, length: regs[1]): []], + codeHash: codeHash, + balance: Balance(0), + minAccumlateGas: minAccumlateGas, + minOnTransferGas: minOnTransferGas + ) + newAccount!.balance = newAccount!.thresholdBalance(config: config) + } + + let newBalance = (x.account?.balance ?? Balance(0)) - newAccount!.balance + + if let newAccount, x.account != nil, newBalance >= x.account!.thresholdBalance(config: config) { + state.writeRegister(Registers.Index(raw: 0), x.serviceIndex) + x.newAccounts[x.serviceIndex] = newAccount + x.account!.balance = newBalance + x.serviceIndex = try AccumulateContext.check(i: New.bump(i: x.serviceIndex), serviceAccounts: accounts) + } else if codeHash == nil { + state.writeRegister(Registers.Index(raw: 0), HostCallResultCode.OOB.rawValue) + } else { + state.writeRegister(Registers.Index(raw: 0), HostCallResultCode.CASH.rawValue) + } + } +} + +/// Upgrade a service account +public class Upgrade: HostCall { + public static var identifier: UInt8 { 10 } + + public var x: AccumlateResultContext + public let serviceIndex: ServiceIndex + + public init(x: inout AccumlateResultContext, serviceIndex: ServiceIndex) { + self.x = x + self.serviceIndex = serviceIndex + } + + public func _callImpl(config _: ProtocolConfigRef, state: VMState) throws { + let regs = state.readRegisters(in: 0 ..< 5) + + let codeHash: Data32? = try? Data32(state.readMemory(address: regs[0], length: 32)) + let minAccumlateGas = Gas(0x1_0000_0000) * Gas(regs[1]) + Gas(regs[2]) + let minOnTransferGas = Gas(0x1_0000_0000) * Gas(regs[3]) + Gas(regs[4]) + + if let codeHash { + state.writeRegister(Registers.Index(raw: 0), HostCallResultCode.OK.rawValue) + x.newAccounts[serviceIndex]?.codeHash = codeHash + x.newAccounts[serviceIndex]?.minAccumlateGas = minAccumlateGas + x.newAccounts[serviceIndex]?.minOnTransferGas = minOnTransferGas + } else { + state.writeRegister(Registers.Index(raw: 0), HostCallResultCode.OOB.rawValue) + } + } +} + +/// Add a new transfer +public class Transfer: HostCall { + public static var identifier: UInt8 { 11 } + + public var x: AccumlateResultContext + public let serviceIndex: ServiceIndex + public let accounts: [ServiceIndex: ServiceAccount] + + public init(x: inout AccumlateResultContext, serviceIndex: ServiceIndex, accounts: [ServiceIndex: ServiceAccount]) { + self.x = x + self.serviceIndex = serviceIndex + self.accounts = accounts + } + + public func gasCost(state: VMState) -> Gas { + let (reg1, reg2) = state.readRegister(Registers.Index(raw: 1), Registers.Index(raw: 2)) + return Gas(10) + Gas(reg1) + Gas(0x1_0000_0000) * Gas(reg2) + } + + public func _callImpl(config: ProtocolConfigRef, state: VMState) throws { + let regs = state.readRegisters(in: 0 ..< 6) + let amount = Balance(0x1_0000_0000) * Balance(regs[2]) + Balance(regs[1]) + let gasLimit = Gas(0x1_0000_0000) * Gas(regs[4]) + Gas(regs[3]) + let memo = try? state.readMemory(address: regs[5], length: config.value.transferMemoSize) + let dest = regs[0] + let allAccounts = accounts.merging(x.newAccounts) { _, new in new } + + let newBalance = (x.account?.balance ?? Balance(0)) - amount + + if memo == nil { + state.writeRegister(Registers.Index(raw: 0), HostCallResultCode.OOB.rawValue) + } else if allAccounts[dest] == nil { + state.writeRegister(Registers.Index(raw: 0), HostCallResultCode.WHO.rawValue) + } else if gasLimit < allAccounts[dest]!.minOnTransferGas { + state.writeRegister(Registers.Index(raw: 0), HostCallResultCode.LOW.rawValue) + } else if Gas(state.getGas()) < gasLimit { + state.writeRegister(Registers.Index(raw: 0), HostCallResultCode.HIGH.rawValue) + } else if newBalance < x.account!.thresholdBalance(config: config) { + state.writeRegister(Registers.Index(raw: 0), HostCallResultCode.CASH.rawValue) + } else { + x.account!.balance = newBalance + x.transfers.append(DeferredTransfers( + sender: serviceIndex, + destination: dest, + amount: amount, + memo: Data128(memo!)!, + gasLimit: gasLimit + )) + state.writeRegister(Registers.Index(raw: 0), HostCallResultCode.OK.rawValue) + } + } +} + +/// Quit (remove) a service account +public class Quit: HostCall { + public static var identifier: UInt8 { 12 } + + public var x: AccumlateResultContext + public let serviceIndex: ServiceIndex + public let accounts: [ServiceIndex: ServiceAccount] + + public init(x: inout AccumlateResultContext, serviceIndex: ServiceIndex, accounts: [ServiceIndex: ServiceAccount]) { + self.x = x + self.serviceIndex = serviceIndex + self.accounts = accounts + } + + public func gasCost(state: VMState) -> Gas { + let (reg1, reg2) = state.readRegister(Registers.Index(raw: 1), Registers.Index(raw: 2)) + return Gas(10) + Gas(reg1) + Gas(0x1_0000_0000) * Gas(reg2) + } + + public func _callImpl(config: ProtocolConfigRef, state: VMState) throws { + let (reg0, reg1) = state.readRegister(Registers.Index(raw: 0), Registers.Index(raw: 1)) + let allAccounts = accounts.merging(x.newAccounts) { _, new in new } + let amount = (x.account?.balance ?? Balance(0)) - x.account! + .thresholdBalance(config: config) + Balance(config.value.serviceMinBalance) + let gasLimit = Gas(state.getGas()) + let dest = reg0 + + let isValidDest = dest == serviceIndex || dest == Int32.max + let memoData = try? state.readMemory(address: reg1, length: config.value.transferMemoSize) + let memo = memoData != nil ? try JamDecoder.decode(Data128.self, from: memoData!) : nil + + if isValidDest { + state.writeRegister(Registers.Index(raw: 0), HostCallResultCode.OK.rawValue) + x.account = nil + throw VMInvocationsError.forceHalt + } else if memo == nil { + state.writeRegister(Registers.Index(raw: 0), HostCallResultCode.OOB.rawValue) + } else if allAccounts[dest] == nil { + state.writeRegister(Registers.Index(raw: 0), HostCallResultCode.WHO.rawValue) + } else if gasLimit < allAccounts[dest]!.minOnTransferGas { + state.writeRegister(Registers.Index(raw: 0), HostCallResultCode.LOW.rawValue) + } else { + state.writeRegister(Registers.Index(raw: 0), HostCallResultCode.OK.rawValue) + x.account = nil + x.transfers.append(DeferredTransfers( + sender: serviceIndex, + destination: dest, + amount: amount, + memo: memo!, + gasLimit: gasLimit + )) + throw VMInvocationsError.forceHalt + } + } +} + +/// Solicit data to be made available in-core (through preimage lookups) +public class Solicit: HostCall { + public static var identifier: UInt8 { 13 } + + public var x: AccumlateResultContext + public let timeslot: TimeslotIndex + + public init(x: inout AccumlateResultContext, timeslot: TimeslotIndex) { + self.x = x + self.timeslot = timeslot + } + + public func _callImpl(config: ProtocolConfigRef, state: VMState) throws { + let (startAddr, length) = state.readRegister(Registers.Index(raw: 0), Registers.Index(raw: 1)) + let hash = try? state.readMemory(address: startAddr, length: 32) + var account: ServiceAccount? + if let hash { + let hashAndLength = HashAndLength(hash: Data32(hash)!, length: length) + account = x.account + if account?.preimageInfos[hashAndLength] == nil { + account?.preimageInfos[hashAndLength] = [] + } else if account?.preimageInfos[hashAndLength]!.count == 2 { + account?.preimageInfos[hashAndLength]!.append(timeslot) + } + } + + if hash == nil { + state.writeRegister(Registers.Index(raw: 0), HostCallResultCode.OOB.rawValue) + } else if account == nil { + state.writeRegister(Registers.Index(raw: 0), HostCallResultCode.HUH.rawValue) + } else if account!.balance < account!.thresholdBalance(config: config) { + state.writeRegister(Registers.Index(raw: 0), HostCallResultCode.FULL.rawValue) + } else { + x.account = account + } + } +} + +/// Forget data made available in-core (through preimage lookups) +public class Forget: HostCall { + public static var identifier: UInt8 { 14 } + + public var x: AccumlateResultContext + public let timeslot: TimeslotIndex + + public init(x: inout AccumlateResultContext, timeslot: TimeslotIndex) { + self.x = x + self.timeslot = timeslot + } + + public func _callImpl(config: ProtocolConfigRef, state: VMState) throws { + let (startAddr, length) = state.readRegister(Registers.Index(raw: 0), Registers.Index(raw: 1)) + let hash = try? state.readMemory(address: startAddr, length: 32) + var account: ServiceAccount? + if let hash { + let hashAndLength = HashAndLength(hash: Data32(hash)!, length: length) + account = x.account + let value = account?.preimageInfos[hashAndLength] + let minHoldPeriod = TimeslotIndex(config.value.preimagePurgePeriod) + + if value?.count == 0 || (value?.count == 2 && value![1] < timeslot - minHoldPeriod) { + account?.preimageInfos.removeValue(forKey: hashAndLength) + account?.preimages.removeValue(forKey: hashAndLength.hash) + } else if value?.count == 1 { + account?.preimageInfos[hashAndLength]!.append(timeslot) + } else if value?.count == 3, value![1] < timeslot - minHoldPeriod { + account?.preimageInfos[hashAndLength] = [value![2], timeslot] + } + } + + if hash == nil { + state.writeRegister(Registers.Index(raw: 0), HostCallResultCode.OOB.rawValue) + } else if account == nil { + state.writeRegister(Registers.Index(raw: 0), HostCallResultCode.HUH.rawValue) + } else { + x.account = account + } + } +} diff --git a/Blockchain/Sources/Blockchain/VMInvocations/InvocationContexts/AccumulateContext.swift b/Blockchain/Sources/Blockchain/VMInvocations/InvocationContexts/AccumulateContext.swift new file mode 100644 index 00000000..2c3c37f3 --- /dev/null +++ b/Blockchain/Sources/Blockchain/VMInvocations/InvocationContexts/AccumulateContext.swift @@ -0,0 +1,99 @@ +import Foundation +import PolkaVM +import TracingUtils + +private let logger = Logger(label: "AccumulateContext") + +public class AccumulateContext: InvocationContext { + public typealias ContextType = ( + x: AccumlateResultContext, + y: AccumlateResultContext, // only set in checkpoint function + serviceIndex: ServiceIndex, + accounts: [ServiceIndex: ServiceAccount], + timeslot: TimeslotIndex + ) + + public var config: ProtocolConfigRef + public var context: ContextType + + public init(context: ContextType, config: ProtocolConfigRef) { + self.config = config + self.context = context + } + + public func dispatch(index: UInt32, state: VMState) -> ExecOutcome { + logger.debug("dispatching host-call: \(index)") + guard context.x.account != nil else { + fatalError("context x.account is nil") + } + switch UInt8(index) { + case Read.identifier: + return Read(account: context.x.account!, serviceIndex: context.serviceIndex, accounts: context.accounts) + .call(config: config, state: state) + case Write.identifier: + return Write(account: &context.x.account!, serviceIndex: context.serviceIndex) + .call(config: config, state: state) + case Lookup.identifier: + return Lookup(account: context.x.account!, serviceIndex: context.serviceIndex, accounts: context.accounts) + .call(config: config, state: state) + case GasFn.identifier: + return GasFn().call(config: config, state: state) + case Info.identifier: + return Info( + account: context.x.account!, + serviceIndex: context.serviceIndex, + accounts: context.accounts, + newAccounts: context.x.newAccounts + ) + .call(config: config, state: state) + case Empower.identifier: + return Empower(x: &context.x).call(config: config, state: state) + case Assign.identifier: + return Assign(x: &context.x).call(config: config, state: state) + case Designate.identifier: + return Designate(x: &context.x).call(config: config, state: state) + case Checkpoint.identifier: + return Checkpoint(x: context.x, y: &context.y).call(config: config, state: state) + case New.identifier: + return New(x: &context.x, accounts: context.accounts).call(config: config, state: state) + case Upgrade.identifier: + return Upgrade(x: &context.x, serviceIndex: context.serviceIndex) + .call(config: config, state: state) + case Transfer.identifier: + return Transfer(x: &context.x, serviceIndex: context.serviceIndex, accounts: context.accounts) + .call(config: config, state: state) + case Quit.identifier: + return Quit(x: &context.x, serviceIndex: context.serviceIndex, accounts: context.accounts) + .call(config: config, state: state) + case Solicit.identifier: + return Solicit(x: &context.x, timeslot: context.timeslot).call(config: config, state: state) + case Forget.identifier: + return Forget(x: &context.x, timeslot: context.timeslot).call(config: config, state: state) + default: + state.consumeGas(Gas(10)) + state.writeRegister(Registers.Index(raw: 0), HostCallResultCode.WHAT.rawValue) + return .continued + } + } + + // a check function to find the first such index in this sequence which does not already represent a service + public static func check(i: ServiceIndex, serviceAccounts: [ServiceIndex: ServiceAccount]) throws -> ServiceIndex { + var currentIndex = i + let maxIter = serviceIndexModValue + var iter = 0 + + guard currentIndex >= 255 else { + throw VMInvocationsError.checkIndexTooSmall + } + + while serviceAccounts.keys.contains(currentIndex) { + currentIndex = (currentIndex - 255) & (serviceIndexModValue - 1) + 256 + iter += 1 + + if iter > maxIter { + throw VMInvocationsError.checkMaxDepthLimit + } + } + return currentIndex + } +} diff --git a/Blockchain/Sources/Blockchain/VMInvocations/InvocationContexts/IsAuthorizedContext.swift b/Blockchain/Sources/Blockchain/VMInvocations/InvocationContexts/IsAuthorizedContext.swift index d06027de..2152dbb3 100644 --- a/Blockchain/Sources/Blockchain/VMInvocations/InvocationContexts/IsAuthorizedContext.swift +++ b/Blockchain/Sources/Blockchain/VMInvocations/InvocationContexts/IsAuthorizedContext.swift @@ -8,27 +8,19 @@ public class IsAuthorizedContext: InvocationContext { public typealias ContextType = Void public var context: ContextType = () + public let config: ProtocolConfigRef - public init() {} + public init(config: ProtocolConfigRef) { + self.config = config + } public func dispatch(index: UInt32, state: VMState) -> ExecOutcome { - do { - if index == GasFn.identifier { - try GasFn.call(state: state, input: ()) - } else { - state.consumeGas(10) - state.writeRegister(Registers.Index(raw: 0), HostCallResultCode.WHAT.rawValue) - } + if index == GasFn.identifier { + return GasFn().call(config: config, state: state) + } else { + state.consumeGas(Gas(10)) + state.writeRegister(Registers.Index(raw: 0), HostCallResultCode.WHAT.rawValue) return .continued - } catch let e as Memory.Error { - logger.error("invocation memory error: \(e)") - return .exit(.pageFault(e.address)) - } catch let e as VMInvocationsError { - logger.error("invocation dispatch error: \(e)") - return .exit(.panic(.trap)) - } catch let e { - logger.error("invocation unknown error: \(e)") - return .exit(.panic(.trap)) } } } diff --git a/Blockchain/Sources/Blockchain/VMInvocations/InvocationContexts/OnTransferContext.swift b/Blockchain/Sources/Blockchain/VMInvocations/InvocationContexts/OnTransferContext.swift index 6167f45d..d8323753 100644 --- a/Blockchain/Sources/Blockchain/VMInvocations/InvocationContexts/OnTransferContext.swift +++ b/Blockchain/Sources/Blockchain/VMInvocations/InvocationContexts/OnTransferContext.swift @@ -5,7 +5,11 @@ import TracingUtils private let logger = Logger(label: "OnTransferContext") public class OnTransferContext: InvocationContext { - public typealias ContextType = (ServiceAccount, ServiceIndex, [ServiceIndex: ServiceAccount]) + public typealias ContextType = ( + account: ServiceAccount, + index: ServiceIndex, + accounts: [ServiceIndex: ServiceAccount] + ) public var config: ProtocolConfigRef public var context: ContextType @@ -16,32 +20,31 @@ public class OnTransferContext: InvocationContext { } public func dispatch(index: UInt32, state: VMState) -> ExecOutcome { - do { - switch UInt8(index) { - case Lookup.identifier: - try Lookup.call(state: state, input: (context.0, context.1, context.2)) - case Read.identifier: - try Read.call(state: state, input: (context.0, context.1, context.2)) - case Write.identifier: - context.0 = try Write.call(state: state, input: (config, context.0, context.1)) - case GasFn.identifier: - try GasFn.call(state: state, input: ()) - case Info.identifier: - try Info.call(state: state, input: (config, context.0, context.1, context.2, [:])) - default: - state.consumeGas(10) - state.writeRegister(Registers.Index(raw: 0), HostCallResultCode.WHAT.rawValue) - } + logger.debug("dispatching host-call: \(index)") + switch UInt8(index) { + case Lookup.identifier: + return Lookup(account: context.account, serviceIndex: context.index, accounts: context.accounts) + .call(config: config, state: state) + case Read.identifier: + return Read(account: context.account, serviceIndex: context.index, accounts: context.accounts) + .call(config: config, state: state) + case Write.identifier: + return Write(account: &context.account, serviceIndex: context.index) + .call(config: config, state: state) + case GasFn.identifier: + return GasFn().call(config: config, state: state) + case Info.identifier: + return Info( + account: context.account, + serviceIndex: context.index, + accounts: context.accounts, + newAccounts: [:] + ) + .call(config: config, state: state) + default: + state.consumeGas(Gas(10)) + state.writeRegister(Registers.Index(raw: 0), HostCallResultCode.WHAT.rawValue) return .continued - } catch let e as Memory.Error { - logger.error("invocation memory error: \(e)") - return .exit(.pageFault(e.address)) - } catch let e as VMInvocationsError { - logger.error("invocation dispatch error: \(e)") - return .exit(.panic(.trap)) - } catch let e { - logger.error("invocation unknown error: \(e)") - return .exit(.panic(.trap)) } } } diff --git a/Blockchain/Sources/Blockchain/VMInvocations/Invocations/AccumulateInvocation.swift b/Blockchain/Sources/Blockchain/VMInvocations/Invocations/AccumulateInvocation.swift new file mode 100644 index 00000000..62ed3a9d --- /dev/null +++ b/Blockchain/Sources/Blockchain/VMInvocations/Invocations/AccumulateInvocation.swift @@ -0,0 +1,87 @@ +import Codec +import Foundation +import PolkaVM +import Utils + +extension AccumulateFunction { + public func invoke( + config: ProtocolConfigRef, + serviceIndex: ServiceIndex, + code _: Data, + serviceAccounts: [ServiceIndex: ServiceAccount], + gas: Gas, + arguments: [AccumulateArguments], + validatorQueue: ConfigFixedSizeArray< + ValidatorKey, ProtocolConfig.TotalNumberOfValidators + >, + authorizationQueue: ConfigFixedSizeArray< + ConfigFixedSizeArray< + Data32, + ProtocolConfig.MaxAuthorizationsQueueItems + >, + ProtocolConfig.TotalNumberOfCores + >, + privilegedServices: PrivilegedServices, + initialIndex: ServiceIndex, + timeslot: TimeslotIndex + ) throws -> (ctx: AccumlateResultContext, result: Data32?) { + var defaultCtx = AccumlateResultContext( + account: serviceAccounts[serviceIndex], + authorizationQueue: authorizationQueue, + validatorQueue: validatorQueue, + serviceIndex: serviceIndex, + transfers: [], + newAccounts: [:], + privilegedServices: privilegedServices + ) + + if serviceAccounts[serviceIndex]?.codeHash.data == nil { + return (ctx: defaultCtx, result: nil) + } + + defaultCtx.serviceIndex = try AccumulateContext.check( + i: initialIndex & (serviceIndexModValue - 1) + 256, + serviceAccounts: serviceAccounts + ) + + let ctx = AccumulateContext( + context: ( + x: defaultCtx, + y: defaultCtx, + serviceIndex: serviceIndex, + accounts: serviceAccounts, + timeslot: timeslot + ), + config: config + ) + let argument = try JamEncoder.encode(arguments) + + let (exitReason, _, _, output) = invokePVM( + config: config, + blob: serviceAccounts[serviceIndex]!.codeHash.data, + pc: 10, + gas: gas, + argumentData: argument, + ctx: ctx + ) + + return try collapse(exitReason: exitReason, output: output, context: ctx.context) + } + + // collapse function C selects one of the two dimensions of context depending on whether the virtual + // machine’s halt was regular or exceptional + private func collapse( + exitReason: ExitReason, output: Data?, context: AccumulateContext.ContextType + ) throws -> (ctx: AccumlateResultContext, result: Data32?) { + switch exitReason { + case .halt: + if let output, let o = Data32(output) { + (ctx: context.x, result: o) + } else { + (ctx: context.x, result: nil) + } + default: + (ctx: context.y, result: nil) + } + } +} diff --git a/Blockchain/Sources/Blockchain/VMInvocations/IsAuthorizedInvocation.swift b/Blockchain/Sources/Blockchain/VMInvocations/Invocations/IsAuthorizedInvocation.swift similarity index 95% rename from Blockchain/Sources/Blockchain/VMInvocations/IsAuthorizedInvocation.swift rename to Blockchain/Sources/Blockchain/VMInvocations/Invocations/IsAuthorizedInvocation.swift index e271d773..457425d2 100644 --- a/Blockchain/Sources/Blockchain/VMInvocations/IsAuthorizedInvocation.swift +++ b/Blockchain/Sources/Blockchain/VMInvocations/Invocations/IsAuthorizedInvocation.swift @@ -13,7 +13,7 @@ public protocol IsAuthorizedFunction { extension IsAuthorizedFunction { public func invoke(config: ProtocolConfigRef, package: WorkPackage, coreIndex: CoreIndex) throws -> Result { let args = try JamEncoder.encode(package) + JamEncoder.encode(coreIndex) - let ctx = IsAuthorizedContext() + let ctx = IsAuthorizedContext(config: config) let (exitReason, _, _, output) = invokePVM( config: config, diff --git a/Blockchain/Sources/Blockchain/VMInvocations/OnTransferInvocation.swift b/Blockchain/Sources/Blockchain/VMInvocations/Invocations/OnTransferInvocation.swift similarity index 83% rename from Blockchain/Sources/Blockchain/VMInvocations/OnTransferInvocation.swift rename to Blockchain/Sources/Blockchain/VMInvocations/Invocations/OnTransferInvocation.swift index 9f8f90a0..e6db3e8d 100644 --- a/Blockchain/Sources/Blockchain/VMInvocations/OnTransferInvocation.swift +++ b/Blockchain/Sources/Blockchain/VMInvocations/Invocations/OnTransferInvocation.swift @@ -1,6 +1,7 @@ import Codec import Foundation import PolkaVM +import Utils extension OnTransferFunction { public func invoke( @@ -14,20 +15,20 @@ extension OnTransferFunction { throw VMInvocationsError.serviceAccountNotFound } - account.balance += transfers.reduce(0) { $0 + $1.amount } + account.balance += transfers.reduce(Balance(0)) { $0 + $1.amount } if account.codeHash.data.isEmpty || transfers.isEmpty { return account } let ctx = OnTransferContext(context: (account, service, serviceAccounts), config: config) - let gasLimitSum = transfers.reduce(0) { $0 + $1.gasLimit } + let gasLimitSum = transfers.reduce(Balance(0)) { $0 + $1.gasLimit } let argument = try JamEncoder.encode(transfers) _ = invokePVM( config: config, blob: account.codeHash.data, - pc: 3, + pc: 15, gas: gasLimitSum, argumentData: argument, ctx: ctx diff --git a/Blockchain/Sources/Blockchain/Validator/BlockAuthor.swift b/Blockchain/Sources/Blockchain/Validator/BlockAuthor.swift index a3816247..5decab33 100644 --- a/Blockchain/Sources/Blockchain/Validator/BlockAuthor.swift +++ b/Blockchain/Sources/Blockchain/Validator/BlockAuthor.swift @@ -6,24 +6,25 @@ import Utils private let logger = Logger(label: "BlockAuthor") public final class BlockAuthor: ServiceBase2, @unchecked Sendable { - private let blockchain: Blockchain + private let dataProvider: BlockchainDataProvider private let keystore: KeyStore private let extrinsicPool: ExtrinsicPoolService private var tickets: ThreadSafeContainer<[RuntimeEvents.SafroleTicketsGenerated]> = .init([]) public init( - blockchain: Blockchain, + config: ProtocolConfigRef, + dataProvider: BlockchainDataProvider, eventBus: EventBus, keystore: KeyStore, scheduler: Scheduler, extrinsicPool: ExtrinsicPoolService ) async { - self.blockchain = blockchain + self.dataProvider = dataProvider self.keystore = keystore self.extrinsicPool = extrinsicPool - super.init(blockchain.config, eventBus, scheduler) + super.init(config, eventBus, scheduler) await subscribe(RuntimeEvents.SafroleTicketsGenerated.self) { [weak self] event in try await self?.on(safroleTicketsGenerated: event) @@ -31,18 +32,16 @@ public final class BlockAuthor: ServiceBase2, @unchecked Sendable { } private func scheduleForNextEpoch() async { - let now = timeProvider.getTime() / UInt32(config.value.slotPeriodSeconds) - let nextEpoch = now.toEpochIndex(config: config) + 1 - let timeslot = nextEpoch.toTimeslotIndex(config: config) + let now = timeProvider.getTimeslot() + let nextEpoch = now.timeslotToEpochIndex(config: config) + 1 + let timeslot = nextEpoch.epochToTimeslotIndex(config: config) // at end of an epoch, try to determine the block author of next epoch // and schedule new block task schedule(at: timeslot - 1) { [weak self] in if let self { - Task { - await self.onBeforeEpoch(timeslot: timeslot) - await self.scheduleForNextEpoch() - } + await onBeforeEpoch(timeslot: timeslot) + await scheduleForNextEpoch() } } } @@ -55,13 +54,34 @@ public final class BlockAuthor: ServiceBase2, @unchecked Sendable { public func createNewBlock(claim: Either<(TicketItemAndOutput, Bandersnatch.PublicKey), Bandersnatch.PublicKey>) async throws -> BlockRef { - let parentHash = blockchain.bestHead - let state = try await blockchain.getState(hash: parentHash) - guard let state else { - try throwUnreachable("no state for best head") - } - - let extrinsic = Extrinsic.dummy(config: config) + let parentHash = dataProvider.bestHead + let state = try await dataProvider.getState(hash: parentHash) + let timeslot = timeProvider.getTimeslot() + let epoch = timeslot.timeslotToEpochIndex(config: config) + + let pendingTickets = await extrinsicPool.getPendingTickets(epoch: epoch) + let existingTickets = SortedArray(sortedUnchecked: state.value.safroleState.ticketsAccumulator.array.map(\.id)) + let tickets = pendingTickets.array + .lazy + .filter { ticket in + !existingTickets.contains(ticket.output) + } + .trimmingPrefix { ticket in + guard let last = existingTickets.array.last else { + return true + } + return ticket.output < last + } + .prefix(config.value.maxTicketsPerExtrinsic) + .map(\.ticket) + + let extrinsic = try Extrinsic( + tickets: ExtrinsicTickets(tickets: ConfigLimitedSizeArray(config: config, array: Array(tickets))), + judgements: ExtrinsicDisputes.dummy(config: config), // TODO: + preimages: ExtrinsicPreimages.dummy(config: config), // TODO: + availability: ExtrinsicAvailability.dummy(config: config), // TODO: + reports: ExtrinsicGuarantees.dummy(config: config) // TODO: + ) let (ticket, publicKey): (TicketItemAndOutput?, Bandersnatch.PublicKey) = switch claim { case let .left((ticket, publicKey)): @@ -93,13 +113,21 @@ public final class BlockAuthor: ServiceBase2, @unchecked Sendable { try throwUnreachable("author not in current validator") } + let safroleResult = try state.value.updateSafrole( + config: config, + slot: timeslot, + entropy: state.value.entropyPool.t0, + offenders: state.value.judgements.punishSet, + extrinsics: extrinsic.tickets + ) + let unsignedHeader = Header.Unsigned( parentHash: parentHash, priorStateRoot: state.stateRoot, extrinsicsHash: extrinsic.hash(), - timeslot: timeProvider.getTime().toTimeslotIndex(config: config), - epoch: nil, // TODO: - winningTickets: nil, // TODO: + timeslot: timeslot, + epoch: safroleResult.epochMark, + winningTickets: safroleResult.ticketsMark, offendersMarkers: [], // TODO: authorIndex: ValidatorIndex(authorIndex), vrfSignature: vrfSignature @@ -110,7 +138,7 @@ public final class BlockAuthor: ServiceBase2, @unchecked Sendable { let seal = if let ticket { try secretKey.ietfVRFSign( vrfInputData: SigningContext.safroleTicketInputData( - entropy: vrfOutput, + entropy: state.value.entropyPool.t3, attempt: ticket.ticket.attempt ), auxData: encodedHeader @@ -134,7 +162,7 @@ public final class BlockAuthor: ServiceBase2, @unchecked Sendable { await withSpan("BlockAuthor.newBlock", logger: logger) { _ in let block = try await createNewBlock(claim: claim) logger.info("New block created: \(block.hash)") - await publish(RuntimeEvents.BlockAuthored(block: block)) + publish(RuntimeEvents.BlockAuthored(block: block)) } } @@ -146,11 +174,8 @@ public final class BlockAuthor: ServiceBase2, @unchecked Sendable { await withSpan("BlockAuthor.onBeforeEpoch", logger: logger) { _ in tickets.value = [] - let bestHead = blockchain.bestHead - let state = try await blockchain.getState(hash: bestHead) - guard let state else { - try throwUnreachable("no state for best head") - } + let bestHead = dataProvider.bestHead + let state = try await dataProvider.getState(hash: bestHead) // simulate next block to determine the block authors for next epoch let res = try state.value.updateSafrole( @@ -167,9 +192,9 @@ public final class BlockAuthor: ServiceBase2, @unchecked Sendable { private func scheduleNewBlocks(ticketsOrKeys: SafroleTicketsOrKeys) async { let selfTickets = tickets.value - let now = timeProvider.getTime() - let epochBase = now.toEpochIndex(config: config) - let timeslotBase = epochBase.toTimeslotIndex(config: config) + let now = timeProvider.getTimeslot() + let epochBase = now.timeslotToEpochIndex(config: config) + let timeslotBase = epochBase.epochToTimeslotIndex(config: config) switch ticketsOrKeys { case let .left(tickets): if selfTickets.isEmpty { @@ -178,12 +203,13 @@ public final class BlockAuthor: ServiceBase2, @unchecked Sendable { for (idx, ticket) in tickets.enumerated() { if let claim = selfTickets.first(withOutput: ticket.id) { let timeslot = timeslotBase + TimeslotIndex(idx) - logger.info("Scheduling new block task at timeslot \(timeslot))") + if timeslot <= now { + continue + } + logger.debug("Scheduling new block task at timeslot \(timeslot))") schedule(at: timeslot) { [weak self] in if let self { - Task { - await self.newBlock(claim: .left(claim)) - } + await newBlock(claim: .left(claim)) } } } @@ -193,12 +219,13 @@ public final class BlockAuthor: ServiceBase2, @unchecked Sendable { let pubkey = try? Bandersnatch.PublicKey(data: key) if let pubkey, await keystore.contains(publicKey: pubkey) { let timeslot = timeslotBase + TimeslotIndex(idx) - logger.info("Scheduling new block task at timeslot \(timeslot))") + if timeslot <= now { + continue + } + logger.debug("Scheduling new block task at timeslot \(timeslot))") schedule(at: timeslot) { [weak self] in if let self { - Task { - await self.newBlock(claim: .right(pubkey)) - } + await newBlock(claim: .right(pubkey)) } } } diff --git a/Blockchain/Sources/Blockchain/Validator/DevKeyStore.swift b/Blockchain/Sources/Blockchain/Validator/DevKeyStore.swift index 2828a68e..4a8dd0cc 100644 --- a/Blockchain/Sources/Blockchain/Validator/DevKeyStore.swift +++ b/Blockchain/Sources/Blockchain/Validator/DevKeyStore.swift @@ -4,11 +4,11 @@ import Utils public final class DevKeyStore: KeyStore { private let keystore: InMemoryKeyStore - public init(devKeysCount: UInt32 = 12) async throws { + public init(devKeysCount: Int = 12) async throws { keystore = InMemoryKeyStore() for i in 0 ..< devKeysCount { - _ = try await addDevKeys(seed: i) + _ = try await addDevKeys(seed: UInt32(i)) } } diff --git a/Blockchain/Sources/Blockchain/Validator/ExtrinsicPoolService.swift b/Blockchain/Sources/Blockchain/Validator/ExtrinsicPoolService.swift index abe31570..2f13a5e6 100644 --- a/Blockchain/Sources/Blockchain/Validator/ExtrinsicPoolService.swift +++ b/Blockchain/Sources/Blockchain/Validator/ExtrinsicPoolService.swift @@ -7,7 +7,7 @@ private let logger = Logger(label: "ExtrinsicPoolService") private actor ServiceStorage { // sorted array ordered by output - var pendingTickets: SortedArray = .init() + var pendingTickets: SortedUniqueArray = .init() var epoch: EpochIndex = 0 var verifier: Bandersnatch.Verifier! var entropy: Data32 = .init() @@ -17,8 +17,12 @@ private actor ServiceStorage { self.ringContext = ringContext } - func add(tickets: [TicketItem]) { + func add(tickets: [TicketItem], config: ProtocolConfigRef) { for ticket in tickets { + if (try? ticket.validate(config: config)) == nil { + logger.info("Received invalid ticket: \(ticket)") + continue + } let inputData = SigningContext.safroleTicketInputData(entropy: entropy, attempt: ticket.attempt) let output = try? verifier.ringVRFVerify(vrfInputData: inputData, signature: ticket.signature) guard let output else { @@ -34,36 +38,48 @@ private actor ServiceStorage { } func update(state: StateRef, config: ProtocolConfigRef) throws { - let epoch = state.value.timeslot.toEpochIndex(config: config) + let epoch = state.value.timeslot.timeslotToEpochIndex(config: config) if verifier == nil || self.epoch != epoch { let commitment = try Bandersnatch.RingCommitment(data: state.value.safroleState.ticketsVerifier) let verifier = Bandersnatch.Verifier(ctx: ringContext, commitment: commitment) self.epoch = epoch self.verifier = verifier - entropy = state.value.entropyPool.t2 + entropy = state.value.entropyPool.t3 pendingTickets.removeAll() } } func removeTickets(tickets: [TicketItem]) { pendingTickets.remove { ticket in - !tickets.contains { $0 == ticket.ticket } + tickets.contains { $0 == ticket.ticket } + } + } + + func getPendingTickets(epoch: EpochIndex) -> SortedUniqueArray { + if epoch != self.epoch { + .init() + } else { + pendingTickets } } } public final class ExtrinsicPoolService: ServiceBase, @unchecked Sendable { private var storage: ServiceStorage - private let blockchain: Blockchain + private let dataProvider: BlockchainDataProvider - public init(blockchain: Blockchain, eventBus: EventBus) async { - self.blockchain = blockchain + public init( + config: ProtocolConfigRef, + dataProvider: BlockchainDataProvider, + eventBus: EventBus + ) async { + self.dataProvider = dataProvider - let ringContext = try! Bandersnatch.RingContext(size: UInt(blockchain.config.value.totalNumberOfValidators)) + let ringContext = try! Bandersnatch.RingContext(size: UInt(config.value.totalNumberOfValidators)) storage = ServiceStorage(ringContext: ringContext) - super.init(blockchain.config, eventBus) + super.init(config, eventBus) await subscribe(RuntimeEvents.SafroleTicketsGenerated.self) { [weak self] event in try await self?.on(safroleTicketsGenerated: event) @@ -82,36 +98,25 @@ public final class ExtrinsicPoolService: ServiceBase, @unchecked Sendable { // Safrole VRF commitments only changes every epoch // and we should never receive tickets at very beginning and very end of an epoch // so it is safe to use best head state without worrying about forks or edge cases - let state = try await blockchain.getState(hash: blockchain.bestHead) - guard let state else { - try throwUnreachable("no state for best head") - } - - try await storage.update(state: state, config: blockchain.config) + let state = try await dataProvider.getState(hash: dataProvider.bestHead) + try await storage.update(state: state, config: config) await storage.add(tickets: tickets.items) } private func on(safroleTicketsReceived tickets: RuntimeEvents.SafroleTicketsReceived) async throws { - let state = try await blockchain.getState(hash: blockchain.bestHead) - guard let state else { - try throwUnreachable("no state for best head") - } + let state = try await dataProvider.getState(hash: dataProvider.bestHead) - try await storage.update(state: state, config: blockchain.config) - await storage.add(tickets: tickets.items) + try await storage.update(state: state, config: config) + await storage.add(tickets: tickets.items, config: config) } private func on(blockFinalized event: RuntimeEvents.BlockFinalized) async throws { - let block = try await blockchain.getBlock(hash: event.hash) - guard let block else { - try throwUnreachable("no block for finalized head") - } + let block = try await dataProvider.getBlock(hash: event.hash) + await storage.removeTickets(tickets: block.extrinsic.tickets.tickets.array) } - public var pendingTickets: SortedArray { - get async { - await storage.pendingTickets - } + public func getPendingTickets(epoch: EpochIndex) async -> SortedUniqueArray { + await storage.getPendingTickets(epoch: epoch) } } diff --git a/Blockchain/Sources/Blockchain/Validator/SafroleService.swift b/Blockchain/Sources/Blockchain/Validator/SafroleService.swift index 82ced72a..be76160b 100644 --- a/Blockchain/Sources/Blockchain/Validator/SafroleService.swift +++ b/Blockchain/Sources/Blockchain/Validator/SafroleService.swift @@ -49,13 +49,13 @@ public final class SafroleService: ServiceBase, @unchecked Sendable { } private func generateAndSubmitTickets(state: StateRef) async throws { - let events = try await generateTickets(state: state) + let events = try await generateTicketEvents(state: state) for event in events { - await publish(event) + publish(event) } } - private func generateTickets(state: StateRef) async throws -> [RuntimeEvents.SafroleTicketsGenerated] { + private func generateTicketEvents(state: StateRef) async throws -> [RuntimeEvents.SafroleTicketsGenerated] { var events = [RuntimeEvents.SafroleTicketsGenerated]() for (idx, validator) in state.value.nextValidators.enumerated() { @@ -70,31 +70,17 @@ public final class SafroleService: ServiceBase, @unchecked Sendable { logger.debug("Generating tickets for validator \(pubkey)") try withSpan("generateTickets") { _ in - let pubkeys = try state.value.nextValidators.map { - try Bandersnatch.PublicKey(data: $0.bandersnatch) - } - - let prover = Bandersnatch.Prover(sercret: secret, ring: pubkeys, proverIdx: UInt(idx), ctx: ringContext) - let verifier = try Bandersnatch.Verifier( - ctx: ringContext, - commitment: Bandersnatch.RingCommitment(data: state.value.safroleState.ticketsVerifier) + let tickets = try SafroleService.generateTickets( + count: TicketIndex(config.value.ticketEntriesPerValidator), + validators: state.value.nextValidators.array, + entropy: state.value.entropyPool.t2, + ringContext: ringContext, + secret: secret, + idx: UInt32(idx) ) - var vrfInputData = SigningContext.safroleTicketInputData(entropy: state.value.entropyPool.t2, attempt: 0) - - let sig1 = try prover.ringVRFSign(vrfInputData: vrfInputData) - let out1 = try verifier.ringVRFVerify(vrfInputData: vrfInputData, signature: sig1) - - vrfInputData[vrfInputData.count - 1] = TicketIndex(1) - - let sig2 = try prover.ringVRFSign(vrfInputData: vrfInputData) - let out2 = try verifier.ringVRFVerify(vrfInputData: vrfInputData, signature: sig2) - events.append(.init( - items: [ - .init(ticket: .init(attempt: 0, signature: sig1), output: out1), - .init(ticket: .init(attempt: 1, signature: sig2), output: out2), - ], + items: tickets, publicKey: secret.publicKey )) } @@ -106,4 +92,32 @@ public final class SafroleService: ServiceBase, @unchecked Sendable { return events } + + public static func generateTickets( + count: TicketIndex, + validators: [ValidatorKey], + entropy: Data32, + ringContext: Bandersnatch.RingContext, + secret: Bandersnatch.SecretKey, + idx: UInt32 + ) throws -> [TicketItemAndOutput] { + let pubkeys = try validators.map { + try Bandersnatch.PublicKey(data: $0.bandersnatch) + } + + let prover = Bandersnatch.Prover(sercret: secret, ring: pubkeys, proverIdx: UInt(idx), ctx: ringContext) + + var vrfInputData = SigningContext.safroleTicketInputData(entropy: entropy, attempt: 0) + + var tickets: [TicketItemAndOutput] = [] + + for i in 0 ..< count { + vrfInputData[vrfInputData.count - 1] = TicketIndex(i) + let sig = try prover.ringVRFSign(vrfInputData: vrfInputData) + let out = try secret.getOutput(vrfInputData: vrfInputData) + tickets.append(.init(ticket: .init(attempt: TicketIndex(i), signature: sig), output: out)) + } + + return tickets + } } diff --git a/Blockchain/Sources/Blockchain/Validator/ServiceBase.swift b/Blockchain/Sources/Blockchain/Validator/ServiceBase.swift index f2e31efd..3bf5fc5c 100644 --- a/Blockchain/Sources/Blockchain/Validator/ServiceBase.swift +++ b/Blockchain/Sources/Blockchain/Validator/ServiceBase.swift @@ -26,8 +26,8 @@ public class ServiceBase { await eventBus.unsubscribe(token: token) } - func publish(_ event: some Event) async { - await eventBus.publish(event) + func publish(_ event: some Event) { + eventBus.publish(event) } deinit { diff --git a/Blockchain/Sources/Blockchain/Validator/ServiceBase2.swift b/Blockchain/Sources/Blockchain/Validator/ServiceBase2.swift index e11125e4..6be3938d 100644 --- a/Blockchain/Sources/Blockchain/Validator/ServiceBase2.swift +++ b/Blockchain/Sources/Blockchain/Validator/ServiceBase2.swift @@ -30,23 +30,31 @@ public class ServiceBase2: ServiceBase { } @discardableResult - public func schedule(delay: TimeInterval, repeats: Bool = false, task: @escaping @Sendable () -> Void) -> Cancellable { + public func schedule(delay: TimeInterval, repeats: Bool = false, task: @escaping @Sendable () async -> Void) -> Cancellable { let id = Self.idGenerator.loadThenWrappingIncrement(ordering: .relaxed) let cancellables = cancellables - let cancellable = scheduler.schedule(delay: delay, repeats: repeats, task: task, onCancel: { + let cancellable = scheduler.schedule(delay: delay, repeats: repeats) { + if !repeats { + cancellables.write { $0.remove(IdCancellable(id: id, cancellable: nil)) } + } + await task() + } onCancel: { cancellables.write { $0.remove(IdCancellable(id: id, cancellable: nil)) } - }) + } cancellables.write { $0.insert(IdCancellable(id: id, cancellable: cancellable)) } return cancellable } @discardableResult - public func schedule(at timeslot: TimeslotIndex, task: @escaping @Sendable () -> Void) -> Cancellable { + public func schedule(at timeslot: TimeslotIndex, task: @escaping @Sendable () async -> Void) -> Cancellable { let id = Self.idGenerator.loadThenWrappingIncrement(ordering: .relaxed) let cancellables = cancellables - let cancellable = scheduler.schedule(at: timeslot, task: task, onCancel: { + let cancellable = scheduler.schedule(at: timeslot) { + cancellables.write { $0.remove(IdCancellable(id: id, cancellable: nil)) } + await task() + } onCancel: { cancellables.write { $0.remove(IdCancellable(id: id, cancellable: nil)) } - }) + } cancellables.write { $0.insert(IdCancellable(id: id, cancellable: cancellable)) } return cancellable } diff --git a/Blockchain/Sources/Blockchain/Validator/Validator.swift b/Blockchain/Sources/Blockchain/Validator/ValidatorService.swift similarity index 79% rename from Blockchain/Sources/Blockchain/Validator/Validator.swift rename to Blockchain/Sources/Blockchain/Validator/ValidatorService.swift index fa974246..2ff891c5 100644 --- a/Blockchain/Sources/Blockchain/Validator/Validator.swift +++ b/Blockchain/Sources/Blockchain/Validator/ValidatorService.swift @@ -1,7 +1,7 @@ import Foundation import Utils -public class Validator { +public class ValidatorService { private let blockchain: Blockchain private let keystore: KeyStore private let safrole: SafroleService @@ -12,7 +12,8 @@ public class Validator { blockchain: Blockchain, keystore: KeyStore, eventBus: EventBus, - scheduler: Scheduler + scheduler: Scheduler, + dataProvider: BlockchainDataProvider ) async { self.blockchain = blockchain self.keystore = keystore @@ -24,12 +25,14 @@ public class Validator { ) extrinsicPool = await ExtrinsicPoolService( - blockchain: blockchain, + config: blockchain.config, + dataProvider: dataProvider, eventBus: eventBus ) blockAuthor = await BlockAuthor( - blockchain: blockchain, + config: blockchain.config, + dataProvider: dataProvider, eventBus: eventBus, keystore: keystore, scheduler: scheduler, diff --git a/Blockchain/Tests/BlockchainTests/BlockAuthorTests.swift b/Blockchain/Tests/BlockchainTests/BlockAuthorTests.swift new file mode 100644 index 00000000..96e797dc --- /dev/null +++ b/Blockchain/Tests/BlockchainTests/BlockAuthorTests.swift @@ -0,0 +1,128 @@ +import Foundation +import Testing +import TracingUtils +import Utils + +@testable import Blockchain + +struct BlockAuthorTests { + let config: ProtocolConfigRef + let timeProvider: MockTimeProvider + let dataProvider: BlockchainDataProvider + let eventBus: EventBus + let scheduler: MockScheduler + let keystore: KeyStore + let blockAuthor: BlockAuthor + let runtime: Runtime + let storeMiddleware: StoreMiddleware + + init() async throws { + config = ProtocolConfigRef.dev + timeProvider = MockTimeProvider(slotPeriodSeconds: UInt32(config.value.slotPeriodSeconds), time: 1000) + + dataProvider = try await BlockchainDataProvider(InMemoryDataProvider(genesis: StateRef(State.devGenesis(config: config)))) + + storeMiddleware = StoreMiddleware() + eventBus = EventBus(eventMiddleware: Middleware(storeMiddleware)) + + scheduler = MockScheduler(timeProvider: timeProvider) + + keystore = try await DevKeyStore(devKeysCount: config.value.totalNumberOfValidators) + + blockAuthor = await BlockAuthor( + config: config, + dataProvider: dataProvider, + eventBus: eventBus, + keystore: keystore, + scheduler: scheduler, + extrinsicPool: ExtrinsicPoolService(config: config, dataProvider: dataProvider, eventBus: eventBus) + ) + + runtime = Runtime(config: config) + + // setupTestLogger() + } + + @Test + func createNewBlockWithFallbackKey() async throws { + let genesisState = try await dataProvider.getState(hash: Data32()) + + // get the validator key + let idx = scheduler.timeProvider.getTimeslot() % UInt32(config.value.totalNumberOfValidators) + let devKey = try DevKeyStore.getDevKey(seed: idx) + + // Create a new block + let block = try await blockAuthor.createNewBlock(claim: .right(devKey.bandersnatch)) + + // Verify block + try _ = runtime.apply(block: block, state: genesisState, context: .init(timeslot: timeProvider.getTimeslot() + 1)) + } + + @Test + func createNewBlockWithTicket() async throws { + let genesisState = try await dataProvider.getState(hash: Data32()) + var state = genesisState.value + + state.safroleState.ticketsVerifier = try Bandersnatch.RingCommitment( + ring: state.currentValidators.map { try Bandersnatch.PublicKey(data: $0.bandersnatch) }, + ctx: Bandersnatch.RingContext(size: UInt(config.value.totalNumberOfValidators)) + ).data + + // get the validator key + let idx = scheduler.timeProvider.getTimeslot() % UInt32(config.value.epochLength) + let devKey = try DevKeyStore.getDevKey(seed: idx % UInt32(config.value.totalNumberOfValidators)) + let secretKey = await keystore.get(Bandersnatch.self, publicKey: devKey.bandersnatch)! + + let ticket = try SafroleService.generateTickets( + count: 1, + validators: state.currentValidators.array, + entropy: state.entropyPool.t2, + ringContext: Bandersnatch.RingContext(size: UInt(config.value.totalNumberOfValidators)), + secret: secretKey, + idx: UInt32(idx) + )[0] + + var validatorTickets = Array(repeating: Ticket.dummy(config: config), count: config.value.epochLength) + + validatorTickets[Int(idx)] = Ticket( + id: ticket.output, + attempt: ticket.ticket.attempt + ) + + state.safroleState.ticketsOrKeys = try .left(ConfigFixedSizeArray(config: config, array: validatorTickets)) + + let newStateRef = StateRef(state) + // modify genesis state + try await dataProvider.add(state: newStateRef) + + // Create a new block + let block = try await blockAuthor.createNewBlock(claim: .left((ticket, devKey.bandersnatch))) + + // Verify block + try _ = runtime.apply(block: block, state: newStateRef, context: .init(timeslot: timeProvider.getTimeslot() + 1)) + } + + @Test + func scheduleNewBlocks() async throws { + let genesisState = try await dataProvider.getState(hash: Data32()) + + await blockAuthor.on(genesis: genesisState) + + #expect(scheduler.storage.value.tasks.count > 0) + + await scheduler.advance(by: 6) + + let events = await storeMiddleware.wait() + #expect(events.count == 1) + #expect(events.first is RuntimeEvents.BlockAuthored) + + let block = events.first as! RuntimeEvents.BlockAuthored + + // Verify block + try _ = runtime.apply(block: block.block, state: genesisState, context: .init(timeslot: timeProvider.getTimeslot() + 1)) + } + + // TODO: test including extrinsic tickets from extrinsic pool + // TODO: test when ticketsAccumulator is full + // TODO: test when none of the items in pool are smaller enough +} diff --git a/Blockchain/Tests/BlockchainTests/DispatchQueueSchedulerTests.swift b/Blockchain/Tests/BlockchainTests/DispatchQueueSchedulerTests.swift new file mode 100644 index 00000000..a99b258d --- /dev/null +++ b/Blockchain/Tests/BlockchainTests/DispatchQueueSchedulerTests.swift @@ -0,0 +1,110 @@ +import Foundation +import Testing +import Utils + +@testable import Blockchain + +struct DispatchQueueSchedulerTests { + let scheduler = DispatchQueueScheduler(timeProvider: SystemTimeProvider(slotPeriodSeconds: 6)) + + @Test func scheduleTaskWithoutDelay() async throws { + try await confirmation { confirm in + let cancel = scheduler.schedule(delay: 0, repeats: false) { + confirm() + } + + try await Task.sleep(for: .milliseconds(10)) + + _ = cancel + } + } + + @Test func scheduleDelayedTask() async throws { + try await confirmation { confirm in + let delay = 0.05 + let now = Date() + let end: ThreadSafeContainer = .init(nil) + let cancel = scheduler.schedule(delay: delay, repeats: false) { + end.value = Date() + confirm() + } + + try await Task.sleep(for: .seconds(0.06)) + + _ = cancel + + let diff = end.value!.timeIntervalSince(now) - delay + let diffAbs = abs(diff) + #expect(diffAbs < 0.01) + } + } + + @Test func scheduleRepeatingTask() async throws { + try await confirmation(expectedCount: 3) { confirm in + let delay = 0.05 + let now = Date() + let executionTimes = ThreadSafeContainer<[Date]>([]) + let expectedExecutions = 3 + + let cancel = scheduler.schedule(delay: delay, repeats: true) { + executionTimes.value.append(Date()) + confirm() + } + + try await Task.sleep(for: .seconds(0.16)) + + _ = cancel + + #expect(executionTimes.value.count == expectedExecutions) + + for (index, time) in executionTimes.value.enumerated() { + let expectedInterval = delay * Double(index + 1) + let actualInterval = time.timeIntervalSince(now) + let difference = abs(actualInterval - expectedInterval) + #expect(difference < 0.01) + } + } + } + + @Test func cancelTask() async throws { + try await confirmation(expectedCount: 0) { confirm in + let cancel = scheduler.schedule(delay: 0, repeats: false) { + confirm() + } + + cancel.cancel() + + try await Task.sleep(for: .seconds(0.1)) + } + } + + @Test func cancelRepeatingTask() async throws { + try await confirmation(expectedCount: 2) { confirm in + let delay = 0.05 + + let cancel = scheduler.schedule(delay: delay, repeats: true) { + confirm() + } + + try await Task.sleep(for: .seconds(0.11)) + + cancel.cancel() + + try await Task.sleep(for: .seconds(0.01)) + } + } + + @Test func onCancelHandler() async throws { + try await confirmation(expectedCount: 1) { confirm in + let cancel = scheduler.schedule(delay: 0.01, repeats: false, task: { + Issue.record("Task executed") + }, onCancel: { + confirm() + }) + + cancel.cancel() + + try await Task.sleep(for: .seconds(0.02)) + } + } +} diff --git a/Blockchain/Tests/BlockchainTests/ExtrinsicPoolServiceTests.swift b/Blockchain/Tests/BlockchainTests/ExtrinsicPoolServiceTests.swift new file mode 100644 index 00000000..07a61394 --- /dev/null +++ b/Blockchain/Tests/BlockchainTests/ExtrinsicPoolServiceTests.swift @@ -0,0 +1,227 @@ +import Foundation +import Testing +import TracingUtils +import Utils + +@testable import Blockchain + +struct ExtrinsicPoolServiceTests { + let config: ProtocolConfigRef + let timeProvider: MockTimeProvider + let dataProvider: BlockchainDataProvider + let eventBus: EventBus + let keystore: KeyStore + let storeMiddleware: StoreMiddleware + let extrinsicPoolService: ExtrinsicPoolService + let ringContext: Bandersnatch.RingContext + + init() async throws { + config = ProtocolConfigRef.dev.mutate { config in + config.ticketEntriesPerValidator = 4 + } + timeProvider = MockTimeProvider(slotPeriodSeconds: UInt32(config.value.slotPeriodSeconds), time: 1000) + + dataProvider = try await BlockchainDataProvider(InMemoryDataProvider(genesis: StateRef(State.devGenesis(config: config)))) + + storeMiddleware = StoreMiddleware() + eventBus = EventBus(eventMiddleware: Middleware(storeMiddleware)) + + keystore = try await DevKeyStore(devKeysCount: config.value.totalNumberOfValidators) + + extrinsicPoolService = await ExtrinsicPoolService(config: config, dataProvider: dataProvider, eventBus: eventBus) + + ringContext = try Bandersnatch.RingContext(size: UInt(config.value.totalNumberOfValidators)) + + // setupTestLogger() + } + + @Test + func testAddAndRetrieveTickets() async throws { + let state = try await dataProvider.getState(hash: dataProvider.bestHead) + + var allTickets = SortedUniqueArray() + + for (i, validatorKey) in state.value.nextValidators.enumerated() { + let secretKey = try await keystore.get(Bandersnatch.self, publicKey: Bandersnatch.PublicKey(data: validatorKey.bandersnatch))! + + let tickets = try SafroleService.generateTickets( + count: TicketIndex(config.value.ticketEntriesPerValidator), + validators: state.value.nextValidators.array, + entropy: state.value.entropyPool.t3, + ringContext: ringContext, + secret: secretKey, + idx: UInt32(i) + ) + + allTickets.append(contentsOf: tickets) + + let event = RuntimeEvents.SafroleTicketsGenerated(items: tickets, publicKey: secretKey.publicKey) + await eventBus.publish(event) + + // Wait for the event to be processed + await storeMiddleware.wait() + + let pendingTickets = await extrinsicPoolService + .getPendingTickets(epoch: state.value.timeslot.timeslotToEpochIndex(config: config)) + #expect(pendingTickets == allTickets) + } + } + + @Test + func testAddAndInvalidTickets() async throws { + let state = try await dataProvider.getState(hash: dataProvider.bestHead) + + var allTickets = SortedUniqueArray() + + let validatorKey = state.value.currentValidators[0] + let secretKey = try await keystore.get(Bandersnatch.self, publicKey: Bandersnatch.PublicKey(data: validatorKey.bandersnatch))! + + var tickets = try SafroleService.generateTickets( + count: TicketIndex(config.value.ticketEntriesPerValidator) + 2, + validators: state.value.nextValidators.array, + entropy: state.value.entropyPool.t3, + ringContext: ringContext, + secret: secretKey, + idx: 0 + ) + + tickets.append(tickets[0]) // duplicate + + let invalidTicket = TicketItemAndOutput( + ticket: ExtrinsicTickets.TicketItem( + attempt: 0, + signature: Data784() + ), + output: Data32() + ) + tickets.append(invalidTicket) + + allTickets.append(contentsOf: tickets[.. Void + let task: @Sendable () async -> Void let cancel: (@Sendable () -> Void)? init( id: Int, scheduleTime: UInt32, repeats: TimeInterval?, - task: @escaping @Sendable () -> Void, + task: @escaping @Sendable () async -> Void, cancel: (@Sendable () -> Void)? ) { self.id = id @@ -24,29 +25,29 @@ private final class SchedulerTask: Sendable { } } -private struct Storage: Sendable { +struct Storage: Sendable { var tasks: [SchedulerTask] = [] var prevTime: UInt32 = 0 } -public final class MockScheduler: Scheduler, Sendable { +final class MockScheduler: Scheduler, Sendable { static let idGenerator = ManagedAtomic(0) - private let mockTimeProvider: MockTimeProvider - public var timeProvider: TimeProvider { + let mockTimeProvider: MockTimeProvider + var timeProvider: TimeProvider { mockTimeProvider } - private let storage: ThreadSafeContainer = .init(.init()) + let storage: ThreadSafeContainer = .init(.init()) - public init(timeProvider: MockTimeProvider) { + init(timeProvider: MockTimeProvider) { mockTimeProvider = timeProvider } - public func schedule( + func schedule( delay: TimeInterval, repeats: Bool, - task: @escaping @Sendable () -> Void, + task: @escaping @Sendable () async -> Void, onCancel: (@Sendable () -> Void)? ) -> Cancellable { let now = timeProvider.getTime() @@ -66,12 +67,12 @@ public final class MockScheduler: Scheduler, Sendable { } } - public func advance(by interval: UInt32) { + func advance(by interval: UInt32) async { mockTimeProvider.advance(by: interval) - trigger() + await trigger() } - public func trigger() { + func trigger() async { let now = timeProvider.getTime() let tasks = storage.mutate { storage in var tasksToDispatch: [SchedulerTask] = [] @@ -102,7 +103,7 @@ public final class MockScheduler: Scheduler, Sendable { } for task in tasks { - task.task() + await task.task() } } } diff --git a/Blockchain/Tests/BlockchainTests/SafroleServiceTests.swift b/Blockchain/Tests/BlockchainTests/SafroleServiceTests.swift new file mode 100644 index 00000000..45c858d2 --- /dev/null +++ b/Blockchain/Tests/BlockchainTests/SafroleServiceTests.swift @@ -0,0 +1,103 @@ +import Foundation +import Testing +import TracingUtils +import Utils + +@testable import Blockchain + +struct SafroleServiceTests { + let config: ProtocolConfigRef + let timeProvider: MockTimeProvider + let eventBus: EventBus + let keystore: KeyStore + let storeMiddleware: StoreMiddleware + let safroleService: SafroleService + let ringContext: Bandersnatch.RingContext + let genesisState: StateRef + + init() async throws { + config = ProtocolConfigRef.dev.mutate { config in + config.ticketEntriesPerValidator = 4 + } + timeProvider = MockTimeProvider(slotPeriodSeconds: UInt32(config.value.slotPeriodSeconds), time: 1000) + + genesisState = try StateRef(State.devGenesis(config: config)) + + storeMiddleware = StoreMiddleware() + eventBus = EventBus(eventMiddleware: Middleware(storeMiddleware)) + + keystore = try await DevKeyStore(devKeysCount: 2) + + safroleService = await SafroleService(config: config, eventBus: eventBus, keystore: keystore) + + ringContext = try Bandersnatch.RingContext(size: UInt(config.value.totalNumberOfValidators)) + + // setupTestLogger() + } + + @Test + func testGenerateTicketsOnGenesis() async throws { + await safroleService.on(genesis: genesisState) + + let events = await storeMiddleware.wait() + #expect(events.count == 2) + + for event in events { + #expect(event is RuntimeEvents.SafroleTicketsGenerated) + let ticketEvent = event as! RuntimeEvents.SafroleTicketsGenerated + #expect(ticketEvent.items.count == config.value.ticketEntriesPerValidator) + } + } + + @Test + func testGenerateTicketsOnEpochChange() async throws { + // Simulate an epoch change + let newBlock = BlockRef.dummy(config: config).mutate { + $0.header.unsigned.timeslot += TimeslotIndex(config.value.epochLength) + } + + let newState = try genesisState.mutate { + $0.timeslot = newBlock.header.timeslot + try $0.recentHistory.items.append(RecentHistory.HistoryItem( + headerHash: newBlock.hash, + mmr: MMR([]), + stateRoot: Data32(), + workReportHashes: ConfigLimitedSizeArray(config: config) + )) + } + + await eventBus.publish(RuntimeEvents.BlockImported(block: newBlock, state: newState, parentState: genesisState)) + + let events = await storeMiddleware.wait() + #expect(events.count == 3) // first event is BlockImported + + for event in events[1...] { + #expect(event is RuntimeEvents.SafroleTicketsGenerated) + let ticketEvent = event as! RuntimeEvents.SafroleTicketsGenerated + #expect(ticketEvent.items.count == config.value.ticketEntriesPerValidator) + } + } + + @Test + func testNoTicketGenerationMidEpoch() async throws { + // Simulate a mid-epoch block + let newBlock = BlockRef.dummy(config: config).mutate { + $0.header.unsigned.timeslot += 1 + } + + let newState = try genesisState.mutate { + $0.timeslot = newBlock.header.timeslot + try $0.recentHistory.items.append(RecentHistory.HistoryItem( + headerHash: newBlock.hash, + mmr: MMR([]), + stateRoot: Data32(), + workReportHashes: ConfigLimitedSizeArray(config: config) + )) + } + + await eventBus.publish(RuntimeEvents.BlockImported(block: newBlock, state: newState, parentState: genesisState)) + + let events = await storeMiddleware.wait() + #expect(events.count == 1) + } +} diff --git a/Blockchain/Tests/BlockchainTests/StoreMiddleware.swift b/Blockchain/Tests/BlockchainTests/StoreMiddleware.swift new file mode 100644 index 00000000..9f40d10c --- /dev/null +++ b/Blockchain/Tests/BlockchainTests/StoreMiddleware.swift @@ -0,0 +1,40 @@ +import TracingUtils +import Utils + +private let logger = Logger(label: "StoreMiddleware") + +struct StoreMiddleware: MiddlewareProtocol { + let storage: ThreadSafeContainer<[(Sendable, Task)]> = .init([]) + + init() {} + + func handle(_ event: T, next: @escaping MiddlewareHandler) async throws { + logger.debug(">>> dispatching event: \(event)") + let task = Task { try await next(event) } + storage.mutate { storage in + storage.append((event, task)) + } + try await task.value + logger.debug("<<< event dispatched: \(event)") + } + + @discardableResult + func wait() async -> [Sendable] { + try? await Task.sleep(for: .milliseconds(5)) + + let value = storage.value + + for (_, task) in value { + try? await task.value + } + + // new event is published in event hanlder + // wait for the new event handlers to be executed + let newValue = storage.value + if newValue.count != value.count { + return await wait() + } + + return newValue.map(\.0) + } +} diff --git a/Blockchain/Tests/BlockchainTests/ValidatorServiceTests.swift b/Blockchain/Tests/BlockchainTests/ValidatorServiceTests.swift new file mode 100644 index 00000000..3989a835 --- /dev/null +++ b/Blockchain/Tests/BlockchainTests/ValidatorServiceTests.swift @@ -0,0 +1,102 @@ +import Foundation +import Testing +import TracingUtils +import Utils + +@testable import Blockchain + +struct ValidatorServiceTests { + let config: ProtocolConfigRef + let timeProvider: MockTimeProvider + let dataProvider: BlockchainDataProvider + let eventBus: EventBus + let scheduler: MockScheduler + let keystore: KeyStore + let validatorService: ValidatorService + let storeMiddleware: StoreMiddleware + + init() async throws { + config = ProtocolConfigRef.dev + timeProvider = MockTimeProvider(slotPeriodSeconds: UInt32(config.value.slotPeriodSeconds), time: 1000) + + dataProvider = try await BlockchainDataProvider(InMemoryDataProvider(genesis: StateRef(State.devGenesis(config: config)))) + + storeMiddleware = StoreMiddleware() + eventBus = EventBus(eventMiddleware: Middleware(storeMiddleware)) + + scheduler = MockScheduler(timeProvider: timeProvider) + + keystore = try await DevKeyStore(devKeysCount: config.value.totalNumberOfValidators) + + let blockchain = try await Blockchain( + config: config, + dataProvider: dataProvider, + timeProvider: timeProvider, + eventBus: eventBus + ) + + validatorService = await ValidatorService( + blockchain: blockchain, + keystore: keystore, + eventBus: eventBus, + scheduler: scheduler, + dataProvider: dataProvider + ) + + // setupTestLogger() + } + + @Test + func testOnGenesis() async throws { + let genesisState = try await dataProvider.getState(hash: Data32()) + + await validatorService.on(genesis: genesisState) + + let events = await storeMiddleware.wait() + + // Check if SafroleTicketsGenerated events were published + let safroleEvents = events.filter { $0 is RuntimeEvents.SafroleTicketsGenerated } + #expect(safroleEvents.count == config.value.totalNumberOfValidators) + + // Check if block author tasks were scheduled + #expect(scheduler.storage.value.tasks.count > 0) + } + + @Test + func testBlockProductionCycle() async throws { + let genesisState = try await dataProvider.getState(hash: Data32()) + + await validatorService.on(genesis: genesisState) + + // Advance time to trigger block production + await scheduler.advance(by: UInt32(config.value.slotPeriodSeconds)) + + let events = await storeMiddleware.wait() + + // Check if a BlockAuthored event was published + let blockAuthoredEvent = events.first { $0 is RuntimeEvents.BlockAuthored } + #expect(blockAuthoredEvent != nil) + + let blockEvent = blockAuthoredEvent as! RuntimeEvents.BlockAuthored + // Verify the produced block + let block = blockEvent.block + #expect(block.header.timeslot == timeProvider.getTimeslot()) + #expect(block.header.parentHash == genesisState.value.lastBlockHash) + + // Check if the block author is one of the validators + let authorIndex = Int(block.header.authorIndex) + + let authorKey = genesisState.value.currentValidators[authorIndex] + let publicKey = try Bandersnatch.PublicKey(data: authorKey.bandersnatch) + #expect(await keystore.contains(publicKey: publicKey)) + + // Check the blockchain head is updated + #expect(dataProvider.bestHead == block.hash) + + // Check block is stored in database + #expect(try await dataProvider.hasBlock(hash: block.hash)) + #expect(try await dataProvider.getBlock(hash: block.hash) == block) + _ = try await dataProvider.getState(hash: block.hash) // check can get state + #expect(try await dataProvider.getHeads().contains(block.hash)) + } +} diff --git a/Boka/Package.resolved b/Boka/Package.resolved index 40af118c..78caa8ae 100644 --- a/Boka/Package.resolved +++ b/Boka/Package.resolved @@ -1,5 +1,5 @@ { - "originHash" : "5f96de0d238fc265f9b7cb284904cef36f852479ed255f8905cbf984f61ae78b", + "originHash" : "374b7e6e0a436000edb551e3149c1a5a7546e03c66996cbebd99bc9ef520cbed", "pins" : [ { "identity" : "async-http-client", @@ -202,10 +202,10 @@ { "identity" : "swift-numerics", "kind" : "remoteSourceControl", - "location" : "https://github.com/apple/swift-numerics.git", + "location" : "https://github.com/apple/swift-numerics", "state" : { - "revision" : "0a5bc04095a675662cf24757cc0640aa2204253b", - "version" : "1.0.2" + "branch" : "main", + "revision" : "e30276bff2ff5ed80566fbdca49f50aa160b0e83" } }, { diff --git a/Boka/Sources/BokaLogger.swift b/Boka/Sources/BokaLogger.swift new file mode 100644 index 00000000..9e2daf17 --- /dev/null +++ b/Boka/Sources/BokaLogger.swift @@ -0,0 +1,91 @@ +import ConsoleKit +import Logging +import Utils + +public struct BokaLogger: LogHandler, Sendable { + public let label: String + public var metadata: Logger.Metadata + public var metadataProvider: Logger.MetadataProvider? + public var logLevel: Logger.Level + public let console: Terminal + public var fragment: T + + private let defaultLevel: Logger.Level + private let filters: ThreadSafeContainer<[String: Logger.Level]> + + public init( + fragment: T, + label: String, + level: Logger.Level = .debug, + metadata: Logger.Metadata = [:], + metadataProvider: Logger.MetadataProvider?, + defaultLevel: Logger.Level = .info, + filters: [String: Logger.Level] = [:] + ) { + self.fragment = fragment + self.label = label + self.metadata = metadata + logLevel = level + console = Terminal() + self.metadataProvider = metadataProvider + self.defaultLevel = defaultLevel + self.filters = .init(filters) + } + + public subscript(metadataKey key: String) -> Logger.Metadata.Value? { + get { metadata[key] } + set { metadata[key] = newValue } + } + + public func log( + level: Logger.Level, + message: Logger.Message, + metadata: Logger.Metadata?, + source: String, + file: String, + function: String, + line: UInt + ) { + let labelLevel = levelFor(label: label) + if labelLevel > level { + return + } + + var output = FragmentOutput() + var record = LogRecord( + level: level, + message: message, + metadata: metadata, + source: source, + file: file, + function: function, + line: line, + label: label, + loggerLevel: logLevel, + loggerMetadata: self.metadata, + metadataProvider: metadataProvider + ) + + fragment.write(&record, to: &output) + + console.output(output.text) + } + + public func levelFor(label: String) -> Logger.Level { + let label = label.lowercased() + let level: Logger.Level? = filters.read { filters in filters[label] } + if let level { + return level + } + + let defaultLevel = defaultLevel + return filters.mutate { filters in + for (key, value) in filters where label.hasPrefix(key) { + filters[label] = value + return value + } + filters[label] = defaultLevel + return defaultLevel + } + } +} diff --git a/Boka/Sources/LogFragment.swift b/Boka/Sources/LogFragment.swift new file mode 100644 index 00000000..8521e638 --- /dev/null +++ b/Boka/Sources/LogFragment.swift @@ -0,0 +1,63 @@ +import ConsoleKit +import Logging +import Utils + +private struct SourceLocationFragment: LoggerFragment { + public func write(_ record: inout LogRecord, to output: inout FragmentOutput) { + output += "(\(record.file):\(record.line))".consoleText(ConsoleStyle(color: .brightBlack)) + output.needsSeparator = true + } +} + +private struct InnerFragment: LoggerFragment { + func write(_ record: inout LogRecord, to output: inout FragmentOutput) { + output += "\(levelName(record.level))".consoleText(levelStyle(record.level)) + output += "\t| " + output += record.label.consoleText(ConsoleStyle(color: .brightBlack)) + output += "\t|" + output.needsSeparator = true + } + + private func levelStyle(_ level: Logger.Level) -> ConsoleStyle { + switch level { + case .trace: ConsoleStyle(color: .brightBlack) + case .debug: .plain + case .info, .notice: .info + case .warning: .warning + case .error: .error + case .critical: ConsoleStyle(color: .brightRed) + } + } + + private func levelName(_ level: Logger.Level) -> String { + switch level { + case .trace: "TRACE" + case .debug: "DEBUG" + case .info: "INFO" + case .notice: "NOTICE" + case .warning: "WARN" + case .error: "ERROR" + case .critical: "CRITIC" + } + } +} + +public final class LogFragment: LoggerFragment { + private let inner: LoggerFragment + + public init() { + inner = TimestampFragment() + .and(InnerFragment().separated(" ")) + .and(MessageFragment().separated(" ")) + .and(MetadataFragment().separated(" ")) + .and(SourceLocationFragment().separated(" ").maxLevel(.debug)) + } + + public func hasContent(record: inout LogRecord) -> Bool { + inner.hasContent(record: &record) + } + + public func write(_ record: inout LogRecord, to: inout FragmentOutput) { + inner.write(&record, to: &to) + } +} diff --git a/Boka/Sources/Tracing.swift b/Boka/Sources/Tracing.swift index b864b134..359b5900 100644 --- a/Boka/Sources/Tracing.swift +++ b/Boka/Sources/Tracing.swift @@ -1,18 +1,77 @@ import ConsoleKit +import Foundation import OTel import OTLPGRPC import ServiceLifecycle import TracingUtils +public func parse(from: String) -> ( + filters: [String: Logger.Level], + defaultLevel: Logger.Level, + minimalLevel: Logger.Level +)? { + var defaultLevel: Logger.Level? + var lowestLevel = Logger.Level.critical + var filters: [String: Logger.Level] = [:] + let parts = from.split(separator: ",") + for part in parts { + let entry = part.split(separator: "=") + switch entry.count { + case 1: + guard let level = parseLevel(String(entry[0])) else { + return nil + } + defaultLevel = level + case 2: + guard let level = parseLevel(String(entry[1])) else { + return nil + } + filters[String(entry[0].lowercased())] = level + lowestLevel = min(lowestLevel, level) + default: + return nil + } + } + + return ( + filters: filters, + defaultLevel: defaultLevel ?? .info, + minimalLevel: min(lowestLevel, defaultLevel ?? .info) + ) +} + +private func parseLevel(_ level: String) -> Logger.Level? { + switch level.lowercased().trimmingCharacters(in: .whitespaces) { + case "trace": .trace + case "debug": .debug + case "info": .info + case "notice": .notice + case "warn", "warning": .warning + case "error": .error + case "critical": .critical + default: nil + } +} + public enum Tracing { public static func bootstrap(_ serviceName: String, loggerOnly: Bool = false) async throws -> [Service] { - // Bootstrap the logging backend with the OTel metadata provider which includes span IDs in logging messages. - LoggingSystem.bootstrap( - fragment: timestampDefaultLoggerFragment(), - console: Terminal(), - level: .trace, - metadataProvider: .otel - ) + let env = ProcessInfo.processInfo.environment + + let (filters, defaultLevel, minimalLevel) = parse(from: env["LOG_LEVEL"] ?? "") ?? { + print("Invalid LOG_LEVEL, using default") + return (filters: [:], defaultLevel: .info, minimalLevel: .info) + }() + + LoggingSystem.bootstrap({ label, metadataProvider in + BokaLogger( + fragment: LogFragment(), + label: label, + level: minimalLevel, + metadataProvider: metadataProvider, + defaultLevel: defaultLevel, + filters: filters + ) + }, metadataProvider: .otel) if loggerOnly { return [] diff --git a/Database/Package.swift b/Database/Package.swift index 2c76e4f7..2bdc378a 100644 --- a/Database/Package.swift +++ b/Database/Package.swift @@ -6,7 +6,7 @@ import PackageDescription let package = Package( name: "Database", platforms: [ - .macOS(.v14), + .macOS(.v15), ], products: [ // Products define the executables and libraries a package produces, making them visible to other packages. @@ -27,9 +27,11 @@ let package = Package( "rocksdb", ], linkerSettings: [ - .unsafeFlags(["-L../.lib"]), + .unsafeFlags(["-L../.lib", "-L/opt/homebrew/lib"]), .linkedLibrary("z"), .linkedLibrary("bz2"), + .linkedLibrary("zstd"), + .linkedLibrary("lz4"), ] ), .systemLibrary( diff --git a/Database/Sources/include/rocksdb.h b/Database/Sources/include/rocksdb.h new file mode 100644 index 00000000..ed403a6e --- /dev/null +++ b/Database/Sources/include/rocksdb.h @@ -0,0 +1,3126 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +/* Copyright (c) 2011 The LevelDB Authors. All rights reserved. + Use of this source code is governed by a BSD-style license that can be + found in the LICENSE file. See the AUTHORS file for names of contributors. + + C bindings for rocksdb. May be useful as a stable ABI that can be + used by programs that keep rocksdb in a shared library, or for + a JNI api. + + Does not support: + . getters for the option types + . custom comparators that implement key shortening + . capturing post-write-snapshot + . custom iter, db, env, cache implementations using just the C bindings + + Some conventions: + + (1) We expose just opaque struct pointers and functions to clients. + This allows us to change internal representations without having to + recompile clients. + + (2) For simplicity, there is no equivalent to the Slice type. Instead, + the caller has to pass the pointer and length as separate + arguments. + + (3) Errors are represented by a null-terminated c string. NULL + means no error. All operations that can raise an error are passed + a "char** errptr" as the last argument. One of the following must + be true on entry: + *errptr == NULL + *errptr points to a malloc()ed null-terminated error message + On success, a leveldb routine leaves *errptr unchanged. + On failure, leveldb frees the old value of *errptr and + set *errptr to a malloc()ed error message. + + (4) Bools have the type unsigned char (0 == false; rest == true) + + (5) All of the pointer arguments must be non-NULL. +*/ + +#pragma once + +#ifdef _WIN32 +#ifdef ROCKSDB_DLL +#ifdef ROCKSDB_LIBRARY_EXPORTS +#define ROCKSDB_LIBRARY_API __declspec(dllexport) +#else +#define ROCKSDB_LIBRARY_API __declspec(dllimport) +#endif +#else +#define ROCKSDB_LIBRARY_API +#endif +#else +#define ROCKSDB_LIBRARY_API +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include +#include + +/* Exported types */ + +typedef struct rocksdb_t rocksdb_t; +typedef struct rocksdb_backup_engine_t rocksdb_backup_engine_t; +typedef struct rocksdb_backup_engine_info_t rocksdb_backup_engine_info_t; +typedef struct rocksdb_backup_engine_options_t rocksdb_backup_engine_options_t; +typedef struct rocksdb_restore_options_t rocksdb_restore_options_t; +typedef struct rocksdb_memory_allocator_t rocksdb_memory_allocator_t; +typedef struct rocksdb_lru_cache_options_t rocksdb_lru_cache_options_t; +typedef struct rocksdb_hyper_clock_cache_options_t + rocksdb_hyper_clock_cache_options_t; +typedef struct rocksdb_cache_t rocksdb_cache_t; +typedef struct rocksdb_write_buffer_manager_t rocksdb_write_buffer_manager_t; +typedef struct rocksdb_compactionfilter_t rocksdb_compactionfilter_t; +typedef struct rocksdb_compactionfiltercontext_t + rocksdb_compactionfiltercontext_t; +typedef struct rocksdb_compactionfilterfactory_t + rocksdb_compactionfilterfactory_t; +typedef struct rocksdb_comparator_t rocksdb_comparator_t; +typedef struct rocksdb_dbpath_t rocksdb_dbpath_t; +typedef struct rocksdb_env_t rocksdb_env_t; +typedef struct rocksdb_fifo_compaction_options_t + rocksdb_fifo_compaction_options_t; +typedef struct rocksdb_filelock_t rocksdb_filelock_t; +typedef struct rocksdb_filterpolicy_t rocksdb_filterpolicy_t; +typedef struct rocksdb_flushoptions_t rocksdb_flushoptions_t; +typedef struct rocksdb_iterator_t rocksdb_iterator_t; +typedef struct rocksdb_logger_t rocksdb_logger_t; +typedef struct rocksdb_mergeoperator_t rocksdb_mergeoperator_t; +typedef struct rocksdb_options_t rocksdb_options_t; +typedef struct rocksdb_compactoptions_t rocksdb_compactoptions_t; +typedef struct rocksdb_block_based_table_options_t + rocksdb_block_based_table_options_t; +typedef struct rocksdb_cuckoo_table_options_t rocksdb_cuckoo_table_options_t; +typedef struct rocksdb_randomfile_t rocksdb_randomfile_t; +typedef struct rocksdb_readoptions_t rocksdb_readoptions_t; +typedef struct rocksdb_seqfile_t rocksdb_seqfile_t; +typedef struct rocksdb_slicetransform_t rocksdb_slicetransform_t; +typedef struct rocksdb_snapshot_t rocksdb_snapshot_t; +typedef struct rocksdb_writablefile_t rocksdb_writablefile_t; +typedef struct rocksdb_writebatch_t rocksdb_writebatch_t; +typedef struct rocksdb_writebatch_wi_t rocksdb_writebatch_wi_t; +typedef struct rocksdb_writeoptions_t rocksdb_writeoptions_t; +typedef struct rocksdb_universal_compaction_options_t + rocksdb_universal_compaction_options_t; +typedef struct rocksdb_livefiles_t rocksdb_livefiles_t; +typedef struct rocksdb_column_family_handle_t rocksdb_column_family_handle_t; +typedef struct rocksdb_column_family_metadata_t + rocksdb_column_family_metadata_t; +typedef struct rocksdb_level_metadata_t rocksdb_level_metadata_t; +typedef struct rocksdb_sst_file_metadata_t rocksdb_sst_file_metadata_t; +typedef struct rocksdb_envoptions_t rocksdb_envoptions_t; +typedef struct rocksdb_ingestexternalfileoptions_t + rocksdb_ingestexternalfileoptions_t; +typedef struct rocksdb_sstfilewriter_t rocksdb_sstfilewriter_t; +typedef struct rocksdb_ratelimiter_t rocksdb_ratelimiter_t; +typedef struct rocksdb_perfcontext_t rocksdb_perfcontext_t; +typedef struct rocksdb_pinnableslice_t rocksdb_pinnableslice_t; +typedef struct rocksdb_transactiondb_options_t rocksdb_transactiondb_options_t; +typedef struct rocksdb_transactiondb_t rocksdb_transactiondb_t; +typedef struct rocksdb_transaction_options_t rocksdb_transaction_options_t; +typedef struct rocksdb_optimistictransactiondb_t + rocksdb_optimistictransactiondb_t; +typedef struct rocksdb_optimistictransaction_options_t + rocksdb_optimistictransaction_options_t; +typedef struct rocksdb_transaction_t rocksdb_transaction_t; +typedef struct rocksdb_checkpoint_t rocksdb_checkpoint_t; +typedef struct rocksdb_wal_iterator_t rocksdb_wal_iterator_t; +typedef struct rocksdb_wal_readoptions_t rocksdb_wal_readoptions_t; +typedef struct rocksdb_memory_consumers_t rocksdb_memory_consumers_t; +typedef struct rocksdb_memory_usage_t rocksdb_memory_usage_t; +typedef struct rocksdb_statistics_histogram_data_t + rocksdb_statistics_histogram_data_t; +typedef struct rocksdb_wait_for_compact_options_t + rocksdb_wait_for_compact_options_t; + +/* DB operations */ + +extern ROCKSDB_LIBRARY_API rocksdb_t* rocksdb_open( + const rocksdb_options_t* options, const char* name, char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_t* rocksdb_open_with_ttl( + const rocksdb_options_t* options, const char* name, int ttl, char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_t* rocksdb_open_for_read_only( + const rocksdb_options_t* options, const char* name, + unsigned char error_if_wal_file_exists, char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_t* rocksdb_open_as_secondary( + const rocksdb_options_t* options, const char* name, + const char* secondary_path, char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_backup_engine_t* rocksdb_backup_engine_open( + const rocksdb_options_t* options, const char* path, char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_backup_engine_t* +rocksdb_backup_engine_open_opts(const rocksdb_backup_engine_options_t* options, + rocksdb_env_t* env, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_backup_engine_create_new_backup( + rocksdb_backup_engine_t* be, rocksdb_t* db, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_backup_engine_create_new_backup_flush( + rocksdb_backup_engine_t* be, rocksdb_t* db, + unsigned char flush_before_backup, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_backup_engine_purge_old_backups( + rocksdb_backup_engine_t* be, uint32_t num_backups_to_keep, char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_restore_options_t* +rocksdb_restore_options_create(void); +extern ROCKSDB_LIBRARY_API void rocksdb_restore_options_destroy( + rocksdb_restore_options_t* opt); +extern ROCKSDB_LIBRARY_API void rocksdb_restore_options_set_keep_log_files( + rocksdb_restore_options_t* opt, int v); + +extern ROCKSDB_LIBRARY_API void rocksdb_backup_engine_verify_backup( + rocksdb_backup_engine_t* be, uint32_t backup_id, char** errptr); + +extern ROCKSDB_LIBRARY_API void +rocksdb_backup_engine_restore_db_from_latest_backup( + rocksdb_backup_engine_t* be, const char* db_dir, const char* wal_dir, + const rocksdb_restore_options_t* restore_options, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_backup_engine_restore_db_from_backup( + rocksdb_backup_engine_t* be, const char* db_dir, const char* wal_dir, + const rocksdb_restore_options_t* restore_options, const uint32_t backup_id, + char** errptr); + +extern ROCKSDB_LIBRARY_API const rocksdb_backup_engine_info_t* +rocksdb_backup_engine_get_backup_info(rocksdb_backup_engine_t* be); + +extern ROCKSDB_LIBRARY_API int rocksdb_backup_engine_info_count( + const rocksdb_backup_engine_info_t* info); + +extern ROCKSDB_LIBRARY_API int64_t rocksdb_backup_engine_info_timestamp( + const rocksdb_backup_engine_info_t* info, int index); + +extern ROCKSDB_LIBRARY_API uint32_t rocksdb_backup_engine_info_backup_id( + const rocksdb_backup_engine_info_t* info, int index); + +extern ROCKSDB_LIBRARY_API uint64_t rocksdb_backup_engine_info_size( + const rocksdb_backup_engine_info_t* info, int index); + +extern ROCKSDB_LIBRARY_API uint32_t rocksdb_backup_engine_info_number_files( + const rocksdb_backup_engine_info_t* info, int index); + +extern ROCKSDB_LIBRARY_API void rocksdb_backup_engine_info_destroy( + const rocksdb_backup_engine_info_t* info); + +extern ROCKSDB_LIBRARY_API void rocksdb_backup_engine_close( + rocksdb_backup_engine_t* be); + +extern ROCKSDB_LIBRARY_API void rocksdb_put_with_ts( + rocksdb_t* db, const rocksdb_writeoptions_t* options, const char* key, + size_t keylen, const char* ts, size_t tslen, const char* val, size_t vallen, + char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_put_cf_with_ts( + rocksdb_t* db, const rocksdb_writeoptions_t* options, + rocksdb_column_family_handle_t* column_family, const char* key, + size_t keylen, const char* ts, size_t tslen, const char* val, size_t vallen, + char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_delete_with_ts( + rocksdb_t* db, const rocksdb_writeoptions_t* options, const char* key, + size_t keylen, const char* ts, size_t tslen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_delete_cf_with_ts( + rocksdb_t* db, const rocksdb_writeoptions_t* options, + rocksdb_column_family_handle_t* column_family, const char* key, + size_t keylen, const char* ts, size_t tslen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_singledelete( + rocksdb_t* db, const rocksdb_writeoptions_t* options, const char* key, + size_t keylen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_singledelete_cf( + rocksdb_t* db, const rocksdb_writeoptions_t* options, + rocksdb_column_family_handle_t* column_family, const char* key, + size_t keylen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_singledelete_with_ts( + rocksdb_t* db, const rocksdb_writeoptions_t* options, const char* key, + size_t keylen, const char* ts, size_t tslen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_singledelete_cf_with_ts( + rocksdb_t* db, const rocksdb_writeoptions_t* options, + rocksdb_column_family_handle_t* column_family, const char* key, + size_t keylen, const char* ts, size_t tslen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_increase_full_history_ts_low( + rocksdb_t* db, rocksdb_column_family_handle_t* column_family, + const char* ts_low, size_t ts_lowlen, char** errptr); + +extern ROCKSDB_LIBRARY_API char* rocksdb_get_full_history_ts_low( + rocksdb_t* db, rocksdb_column_family_handle_t* column_family, + size_t* ts_lowlen, char** errptr); + +/* BackupEngineOptions */ + +extern ROCKSDB_LIBRARY_API rocksdb_backup_engine_options_t* +rocksdb_backup_engine_options_create(const char* backup_dir); + +extern ROCKSDB_LIBRARY_API void rocksdb_backup_engine_options_set_backup_dir( + rocksdb_backup_engine_options_t* options, const char* backup_dir); + +extern ROCKSDB_LIBRARY_API void rocksdb_backup_engine_options_set_env( + rocksdb_backup_engine_options_t* options, rocksdb_env_t* env); + +extern ROCKSDB_LIBRARY_API void +rocksdb_backup_engine_options_set_share_table_files( + rocksdb_backup_engine_options_t* options, unsigned char val); + +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_backup_engine_options_get_share_table_files( + rocksdb_backup_engine_options_t* options); + +extern ROCKSDB_LIBRARY_API void rocksdb_backup_engine_options_set_sync( + rocksdb_backup_engine_options_t* options, unsigned char val); + +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_backup_engine_options_get_sync( + rocksdb_backup_engine_options_t* options); + +extern ROCKSDB_LIBRARY_API void +rocksdb_backup_engine_options_set_destroy_old_data( + rocksdb_backup_engine_options_t* options, unsigned char val); + +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_backup_engine_options_get_destroy_old_data( + rocksdb_backup_engine_options_t* options); + +extern ROCKSDB_LIBRARY_API void +rocksdb_backup_engine_options_set_backup_log_files( + rocksdb_backup_engine_options_t* options, unsigned char val); + +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_backup_engine_options_get_backup_log_files( + rocksdb_backup_engine_options_t* options); + +extern ROCKSDB_LIBRARY_API void +rocksdb_backup_engine_options_set_backup_rate_limit( + rocksdb_backup_engine_options_t* options, uint64_t limit); + +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_backup_engine_options_get_backup_rate_limit( + rocksdb_backup_engine_options_t* options); + +extern ROCKSDB_LIBRARY_API void +rocksdb_backup_engine_options_set_restore_rate_limit( + rocksdb_backup_engine_options_t* options, uint64_t limit); + +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_backup_engine_options_get_restore_rate_limit( + rocksdb_backup_engine_options_t* options); + +extern ROCKSDB_LIBRARY_API void +rocksdb_backup_engine_options_set_max_background_operations( + rocksdb_backup_engine_options_t* options, int val); + +extern ROCKSDB_LIBRARY_API int +rocksdb_backup_engine_options_get_max_background_operations( + rocksdb_backup_engine_options_t* options); + +extern ROCKSDB_LIBRARY_API void +rocksdb_backup_engine_options_set_callback_trigger_interval_size( + rocksdb_backup_engine_options_t* options, uint64_t size); + +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_backup_engine_options_get_callback_trigger_interval_size( + rocksdb_backup_engine_options_t* options); + +extern ROCKSDB_LIBRARY_API void +rocksdb_backup_engine_options_set_max_valid_backups_to_open( + rocksdb_backup_engine_options_t* options, int val); + +extern ROCKSDB_LIBRARY_API int +rocksdb_backup_engine_options_get_max_valid_backups_to_open( + rocksdb_backup_engine_options_t* options); + +extern ROCKSDB_LIBRARY_API void +rocksdb_backup_engine_options_set_share_files_with_checksum_naming( + rocksdb_backup_engine_options_t* options, int val); + +extern ROCKSDB_LIBRARY_API int +rocksdb_backup_engine_options_get_share_files_with_checksum_naming( + rocksdb_backup_engine_options_t* options); + +extern ROCKSDB_LIBRARY_API void rocksdb_backup_engine_options_destroy( + rocksdb_backup_engine_options_t*); + +/* Checkpoint */ + +extern ROCKSDB_LIBRARY_API rocksdb_checkpoint_t* +rocksdb_checkpoint_object_create(rocksdb_t* db, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_checkpoint_create( + rocksdb_checkpoint_t* checkpoint, const char* checkpoint_dir, + uint64_t log_size_for_flush, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_checkpoint_object_destroy( + rocksdb_checkpoint_t* checkpoint); + +extern ROCKSDB_LIBRARY_API rocksdb_t* rocksdb_open_and_trim_history( + const rocksdb_options_t* options, const char* name, int num_column_families, + const char* const* column_family_names, + const rocksdb_options_t* const* column_family_options, + rocksdb_column_family_handle_t** column_family_handles, char* trim_ts, + size_t trim_tslen, char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_t* rocksdb_open_column_families( + const rocksdb_options_t* options, const char* name, int num_column_families, + const char* const* column_family_names, + const rocksdb_options_t* const* column_family_options, + rocksdb_column_family_handle_t** column_family_handles, char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_t* rocksdb_open_column_families_with_ttl( + const rocksdb_options_t* options, const char* name, int num_column_families, + const char* const* column_family_names, + const rocksdb_options_t* const* column_family_options, + rocksdb_column_family_handle_t** column_family_handles, const int* ttls, + char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_t* +rocksdb_open_for_read_only_column_families( + const rocksdb_options_t* options, const char* name, int num_column_families, + const char* const* column_family_names, + const rocksdb_options_t* const* column_family_options, + rocksdb_column_family_handle_t** column_family_handles, + unsigned char error_if_wal_file_exists, char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_t* rocksdb_open_as_secondary_column_families( + const rocksdb_options_t* options, const char* name, + const char* secondary_path, int num_column_families, + const char* const* column_family_names, + const rocksdb_options_t* const* column_family_options, + rocksdb_column_family_handle_t** column_family_handles, char** errptr); + +extern ROCKSDB_LIBRARY_API char** rocksdb_list_column_families( + const rocksdb_options_t* options, const char* name, size_t* lencf, + char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_list_column_families_destroy( + char** list, size_t len); + +extern ROCKSDB_LIBRARY_API rocksdb_column_family_handle_t* +rocksdb_create_column_family(rocksdb_t* db, + const rocksdb_options_t* column_family_options, + const char* column_family_name, char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_column_family_handle_t** +rocksdb_create_column_families(rocksdb_t* db, + const rocksdb_options_t* column_family_options, + int num_column_families, + const char* const* column_family_names, + size_t* lencfs, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_create_column_families_destroy( + rocksdb_column_family_handle_t** list); + +extern ROCKSDB_LIBRARY_API rocksdb_column_family_handle_t* +rocksdb_create_column_family_with_ttl( + rocksdb_t* db, const rocksdb_options_t* column_family_options, + const char* column_family_name, int ttl, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_drop_column_family( + rocksdb_t* db, rocksdb_column_family_handle_t* handle, char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_column_family_handle_t* + rocksdb_get_default_column_family_handle(rocksdb_t* db); + +extern ROCKSDB_LIBRARY_API void rocksdb_column_family_handle_destroy( + rocksdb_column_family_handle_t*); + +extern ROCKSDB_LIBRARY_API uint32_t +rocksdb_column_family_handle_get_id(rocksdb_column_family_handle_t* handle); + +extern ROCKSDB_LIBRARY_API char* rocksdb_column_family_handle_get_name( + rocksdb_column_family_handle_t* handle, size_t* name_len); + +extern ROCKSDB_LIBRARY_API void rocksdb_close(rocksdb_t* db); + +extern ROCKSDB_LIBRARY_API void rocksdb_put( + rocksdb_t* db, const rocksdb_writeoptions_t* options, const char* key, + size_t keylen, const char* val, size_t vallen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_put_cf( + rocksdb_t* db, const rocksdb_writeoptions_t* options, + rocksdb_column_family_handle_t* column_family, const char* key, + size_t keylen, const char* val, size_t vallen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_delete( + rocksdb_t* db, const rocksdb_writeoptions_t* options, const char* key, + size_t keylen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_delete_cf( + rocksdb_t* db, const rocksdb_writeoptions_t* options, + rocksdb_column_family_handle_t* column_family, const char* key, + size_t keylen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_delete_range_cf( + rocksdb_t* db, const rocksdb_writeoptions_t* options, + rocksdb_column_family_handle_t* column_family, const char* start_key, + size_t start_key_len, const char* end_key, size_t end_key_len, + char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_merge( + rocksdb_t* db, const rocksdb_writeoptions_t* options, const char* key, + size_t keylen, const char* val, size_t vallen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_merge_cf( + rocksdb_t* db, const rocksdb_writeoptions_t* options, + rocksdb_column_family_handle_t* column_family, const char* key, + size_t keylen, const char* val, size_t vallen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_write( + rocksdb_t* db, const rocksdb_writeoptions_t* options, + rocksdb_writebatch_t* batch, char** errptr); + +/* Returns NULL if not found. A malloc()ed array otherwise. + Stores the length of the array in *vallen. */ +extern ROCKSDB_LIBRARY_API char* rocksdb_get( + rocksdb_t* db, const rocksdb_readoptions_t* options, const char* key, + size_t keylen, size_t* vallen, char** errptr); + +extern ROCKSDB_LIBRARY_API char* rocksdb_get_with_ts( + rocksdb_t* db, const rocksdb_readoptions_t* options, const char* key, + size_t keylen, size_t* vallen, char** ts, size_t* tslen, char** errptr); + +extern ROCKSDB_LIBRARY_API char* rocksdb_get_cf( + rocksdb_t* db, const rocksdb_readoptions_t* options, + rocksdb_column_family_handle_t* column_family, const char* key, + size_t keylen, size_t* vallen, char** errptr); + +extern ROCKSDB_LIBRARY_API char* rocksdb_get_cf_with_ts( + rocksdb_t* db, const rocksdb_readoptions_t* options, + rocksdb_column_family_handle_t* column_family, const char* key, + size_t keylen, size_t* vallen, char** ts, size_t* tslen, char** errptr); + +/** + * Returns a malloc() buffer with the DB identity, assigning the length to + * *id_len. Returns NULL if an error occurred. + */ +extern ROCKSDB_LIBRARY_API char* rocksdb_get_db_identity(rocksdb_t* db, + size_t* id_len); + +// if values_list[i] == NULL and errs[i] == NULL, +// then we got status.IsNotFound(), which we will not return. +// all errors except status status.ok() and status.IsNotFound() are returned. +// +// errs, values_list and values_list_sizes must be num_keys in length, +// allocated by the caller. +// errs is a list of strings as opposed to the conventional one error, +// where errs[i] is the status for retrieval of keys_list[i]. +// each non-NULL errs entry is a malloc()ed, null terminated string. +// each non-NULL values_list entry is a malloc()ed array, with +// the length for each stored in values_list_sizes[i]. +extern ROCKSDB_LIBRARY_API void rocksdb_multi_get( + rocksdb_t* db, const rocksdb_readoptions_t* options, size_t num_keys, + const char* const* keys_list, const size_t* keys_list_sizes, + char** values_list, size_t* values_list_sizes, char** errs); + +extern ROCKSDB_LIBRARY_API void rocksdb_multi_get_with_ts( + rocksdb_t* db, const rocksdb_readoptions_t* options, size_t num_keys, + const char* const* keys_list, const size_t* keys_list_sizes, + char** values_list, size_t* values_list_sizes, char** timestamp_list, + size_t* timestamp_list_sizes, char** errs); + +extern ROCKSDB_LIBRARY_API void rocksdb_multi_get_cf( + rocksdb_t* db, const rocksdb_readoptions_t* options, + const rocksdb_column_family_handle_t* const* column_families, + size_t num_keys, const char* const* keys_list, + const size_t* keys_list_sizes, char** values_list, + size_t* values_list_sizes, char** errs); + +extern ROCKSDB_LIBRARY_API void rocksdb_multi_get_cf_with_ts( + rocksdb_t* db, const rocksdb_readoptions_t* options, + const rocksdb_column_family_handle_t* const* column_families, + size_t num_keys, const char* const* keys_list, + const size_t* keys_list_sizes, char** values_list, + size_t* values_list_sizes, char** timestamps_list, + size_t* timestamps_list_sizes, char** errs); + +// The MultiGet API that improves performance by batching operations +// in the read path for greater efficiency. Currently, only the block based +// table format with full filters are supported. Other table formats such +// as plain table, block based table with block based filters and +// partitioned indexes will still work, but will not get any performance +// benefits. +// +// Note that all the keys passed to this API are restricted to a single +// column family. +// +// Parameters - +// db - the RocksDB instance. +// options - ReadOptions +// column_family - ColumnFamilyHandle* that the keys belong to. All the keys +// passed to the API are restricted to a single column family +// num_keys - Number of keys to lookup +// keys_list - Pointer to C style array of keys with num_keys elements +// keys_list_sizes - Pointer to C style array of the size of corresponding key +// in key_list with num_keys elements. +// values - Pointer to C style array of PinnableSlices with num_keys elements +// statuses - Pointer to C style array of Status with num_keys elements +// sorted_input - If true, it means the input keys are already sorted by key +// order, so the MultiGet() API doesn't have to sort them +// again. If false, the keys will be copied and sorted +// internally by the API - the input array will not be +// modified +extern ROCKSDB_LIBRARY_API void rocksdb_batched_multi_get_cf( + rocksdb_t* db, const rocksdb_readoptions_t* options, + rocksdb_column_family_handle_t* column_family, size_t num_keys, + const char* const* keys_list, const size_t* keys_list_sizes, + rocksdb_pinnableslice_t** values, char** errs, const bool sorted_input); + +// The value is only allocated (using malloc) and returned if it is found and +// value_found isn't NULL. In that case the user is responsible for freeing it. +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_key_may_exist( + rocksdb_t* db, const rocksdb_readoptions_t* options, const char* key, + size_t key_len, char** value, size_t* val_len, const char* timestamp, + size_t timestamp_len, unsigned char* value_found); + +// The value is only allocated (using malloc) and returned if it is found and +// value_found isn't NULL. In that case the user is responsible for freeing it. +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_key_may_exist_cf( + rocksdb_t* db, const rocksdb_readoptions_t* options, + rocksdb_column_family_handle_t* column_family, const char* key, + size_t key_len, char** value, size_t* val_len, const char* timestamp, + size_t timestamp_len, unsigned char* value_found); + +extern ROCKSDB_LIBRARY_API rocksdb_iterator_t* rocksdb_create_iterator( + rocksdb_t* db, const rocksdb_readoptions_t* options); + +extern ROCKSDB_LIBRARY_API rocksdb_wal_iterator_t* rocksdb_get_updates_since( + rocksdb_t* db, uint64_t seq_number, + const rocksdb_wal_readoptions_t* options, char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_iterator_t* rocksdb_create_iterator_cf( + rocksdb_t* db, const rocksdb_readoptions_t* options, + rocksdb_column_family_handle_t* column_family); + +extern ROCKSDB_LIBRARY_API void rocksdb_create_iterators( + rocksdb_t* db, rocksdb_readoptions_t* opts, + rocksdb_column_family_handle_t** column_families, + rocksdb_iterator_t** iterators, size_t size, char** errptr); + +extern ROCKSDB_LIBRARY_API const rocksdb_snapshot_t* rocksdb_create_snapshot( + rocksdb_t* db); + +extern ROCKSDB_LIBRARY_API void rocksdb_release_snapshot( + rocksdb_t* db, const rocksdb_snapshot_t* snapshot); + +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_snapshot_get_sequence_number(const rocksdb_snapshot_t* snapshot); + +/* Returns NULL if property name is unknown. + Else returns a pointer to a malloc()-ed null-terminated value. */ +extern ROCKSDB_LIBRARY_API char* rocksdb_property_value(rocksdb_t* db, + const char* propname); +/* returns 0 on success, -1 otherwise */ +extern ROCKSDB_LIBRARY_API int rocksdb_property_int(rocksdb_t* db, + const char* propname, + uint64_t* out_val); + +/* returns 0 on success, -1 otherwise */ +extern ROCKSDB_LIBRARY_API int rocksdb_property_int_cf( + rocksdb_t* db, rocksdb_column_family_handle_t* column_family, + const char* propname, uint64_t* out_val); + +extern ROCKSDB_LIBRARY_API char* rocksdb_property_value_cf( + rocksdb_t* db, rocksdb_column_family_handle_t* column_family, + const char* propname); + +extern ROCKSDB_LIBRARY_API void rocksdb_approximate_sizes( + rocksdb_t* db, int num_ranges, const char* const* range_start_key, + const size_t* range_start_key_len, const char* const* range_limit_key, + const size_t* range_limit_key_len, uint64_t* sizes, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_approximate_sizes_cf( + rocksdb_t* db, rocksdb_column_family_handle_t* column_family, + int num_ranges, const char* const* range_start_key, + const size_t* range_start_key_len, const char* const* range_limit_key, + const size_t* range_limit_key_len, uint64_t* sizes, char** errptr); + +enum { + rocksdb_size_approximation_flags_none = 0, + rocksdb_size_approximation_flags_include_memtable = 1 << 0, + rocksdb_size_approximation_flags_include_files = 1 << 1, +}; + +extern ROCKSDB_LIBRARY_API void rocksdb_approximate_sizes_cf_with_flags( + rocksdb_t* db, rocksdb_column_family_handle_t* column_family, + int num_ranges, const char* const* range_start_key, + const size_t* range_start_key_len, const char* const* range_limit_key, + const size_t* range_limit_key_len, uint8_t include_flags, uint64_t* sizes, + char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_compact_range(rocksdb_t* db, + const char* start_key, + size_t start_key_len, + const char* limit_key, + size_t limit_key_len); + +extern ROCKSDB_LIBRARY_API void rocksdb_compact_range_cf( + rocksdb_t* db, rocksdb_column_family_handle_t* column_family, + const char* start_key, size_t start_key_len, const char* limit_key, + size_t limit_key_len); + +extern ROCKSDB_LIBRARY_API void rocksdb_suggest_compact_range( + rocksdb_t* db, const char* start_key, size_t start_key_len, + const char* limit_key, size_t limit_key_len, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_suggest_compact_range_cf( + rocksdb_t* db, rocksdb_column_family_handle_t* column_family, + const char* start_key, size_t start_key_len, const char* limit_key, + size_t limit_key_len, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_compact_range_opt( + rocksdb_t* db, rocksdb_compactoptions_t* opt, const char* start_key, + size_t start_key_len, const char* limit_key, size_t limit_key_len); + +extern ROCKSDB_LIBRARY_API void rocksdb_compact_range_cf_opt( + rocksdb_t* db, rocksdb_column_family_handle_t* column_family, + rocksdb_compactoptions_t* opt, const char* start_key, size_t start_key_len, + const char* limit_key, size_t limit_key_len); + +extern ROCKSDB_LIBRARY_API void rocksdb_delete_file(rocksdb_t* db, + const char* name); + +extern ROCKSDB_LIBRARY_API const rocksdb_livefiles_t* rocksdb_livefiles( + rocksdb_t* db); + +extern ROCKSDB_LIBRARY_API void rocksdb_flush( + rocksdb_t* db, const rocksdb_flushoptions_t* options, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_flush_cf( + rocksdb_t* db, const rocksdb_flushoptions_t* options, + rocksdb_column_family_handle_t* column_family, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_flush_cfs( + rocksdb_t* db, const rocksdb_flushoptions_t* options, + rocksdb_column_family_handle_t** column_family, int num_column_families, + char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_flush_wal(rocksdb_t* db, + unsigned char sync, + char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_disable_file_deletions(rocksdb_t* db, + char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_enable_file_deletions(rocksdb_t* db, + char** errptr); + +/* Management operations */ + +extern ROCKSDB_LIBRARY_API void rocksdb_destroy_db( + const rocksdb_options_t* options, const char* name, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_repair_db( + const rocksdb_options_t* options, const char* name, char** errptr); + +/* Iterator */ + +extern ROCKSDB_LIBRARY_API void rocksdb_iter_destroy(rocksdb_iterator_t*); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_iter_valid( + const rocksdb_iterator_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_iter_seek_to_first(rocksdb_iterator_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_iter_seek_to_last(rocksdb_iterator_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_iter_seek(rocksdb_iterator_t*, + const char* k, size_t klen); +extern ROCKSDB_LIBRARY_API void rocksdb_iter_seek_for_prev(rocksdb_iterator_t*, + const char* k, + size_t klen); +extern ROCKSDB_LIBRARY_API void rocksdb_iter_next(rocksdb_iterator_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_iter_prev(rocksdb_iterator_t*); +extern ROCKSDB_LIBRARY_API const char* rocksdb_iter_key( + const rocksdb_iterator_t*, size_t* klen); +extern ROCKSDB_LIBRARY_API const char* rocksdb_iter_value( + const rocksdb_iterator_t*, size_t* vlen); +extern ROCKSDB_LIBRARY_API const char* rocksdb_iter_timestamp( + const rocksdb_iterator_t*, size_t* tslen); +extern ROCKSDB_LIBRARY_API void rocksdb_iter_get_error( + const rocksdb_iterator_t*, char** errptr); +extern ROCKSDB_LIBRARY_API void rocksdb_iter_refresh( + const rocksdb_iterator_t* iter, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_wal_iter_next( + rocksdb_wal_iterator_t* iter); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_wal_iter_valid( + const rocksdb_wal_iterator_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_wal_iter_status( + const rocksdb_wal_iterator_t* iter, char** errptr); +extern ROCKSDB_LIBRARY_API rocksdb_writebatch_t* rocksdb_wal_iter_get_batch( + const rocksdb_wal_iterator_t* iter, uint64_t* seq); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_get_latest_sequence_number(rocksdb_t* db); +extern ROCKSDB_LIBRARY_API void rocksdb_wal_iter_destroy( + const rocksdb_wal_iterator_t* iter); + +/* Write batch */ + +extern ROCKSDB_LIBRARY_API rocksdb_writebatch_t* rocksdb_writebatch_create( + void); +extern ROCKSDB_LIBRARY_API rocksdb_writebatch_t* rocksdb_writebatch_create_from( + const char* rep, size_t size); +extern ROCKSDB_LIBRARY_API rocksdb_writebatch_t* +rocksdb_writebatch_create_with_params(size_t reserved_bytes, size_t max_bytes, + size_t protection_bytes_per_key, + size_t default_cf_ts_sz); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_destroy( + rocksdb_writebatch_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_clear(rocksdb_writebatch_t*); +extern ROCKSDB_LIBRARY_API int rocksdb_writebatch_count(rocksdb_writebatch_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_put(rocksdb_writebatch_t*, + const char* key, + size_t klen, + const char* val, + size_t vlen); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_put_cf( + rocksdb_writebatch_t*, rocksdb_column_family_handle_t* column_family, + const char* key, size_t klen, const char* val, size_t vlen); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_put_cf_with_ts( + rocksdb_writebatch_t*, rocksdb_column_family_handle_t* column_family, + const char* key, size_t klen, const char* ts, size_t tslen, const char* val, + size_t vlen); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_putv( + rocksdb_writebatch_t* b, int num_keys, const char* const* keys_list, + const size_t* keys_list_sizes, int num_values, + const char* const* values_list, const size_t* values_list_sizes); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_putv_cf( + rocksdb_writebatch_t* b, rocksdb_column_family_handle_t* column_family, + int num_keys, const char* const* keys_list, const size_t* keys_list_sizes, + int num_values, const char* const* values_list, + const size_t* values_list_sizes); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_merge(rocksdb_writebatch_t*, + const char* key, + size_t klen, + const char* val, + size_t vlen); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_merge_cf( + rocksdb_writebatch_t*, rocksdb_column_family_handle_t* column_family, + const char* key, size_t klen, const char* val, size_t vlen); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_mergev( + rocksdb_writebatch_t* b, int num_keys, const char* const* keys_list, + const size_t* keys_list_sizes, int num_values, + const char* const* values_list, const size_t* values_list_sizes); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_mergev_cf( + rocksdb_writebatch_t* b, rocksdb_column_family_handle_t* column_family, + int num_keys, const char* const* keys_list, const size_t* keys_list_sizes, + int num_values, const char* const* values_list, + const size_t* values_list_sizes); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_delete(rocksdb_writebatch_t*, + const char* key, + size_t klen); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_singledelete( + rocksdb_writebatch_t* b, const char* key, size_t klen); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_delete_cf( + rocksdb_writebatch_t*, rocksdb_column_family_handle_t* column_family, + const char* key, size_t klen); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_delete_cf_with_ts( + rocksdb_writebatch_t*, rocksdb_column_family_handle_t* column_family, + const char* key, size_t klen, const char* ts, size_t tslen); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_singledelete_cf( + rocksdb_writebatch_t* b, rocksdb_column_family_handle_t* column_family, + const char* key, size_t klen); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_singledelete_cf_with_ts( + rocksdb_writebatch_t* b, rocksdb_column_family_handle_t* column_family, + const char* key, size_t klen, const char* ts, size_t tslen); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_deletev( + rocksdb_writebatch_t* b, int num_keys, const char* const* keys_list, + const size_t* keys_list_sizes); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_deletev_cf( + rocksdb_writebatch_t* b, rocksdb_column_family_handle_t* column_family, + int num_keys, const char* const* keys_list, const size_t* keys_list_sizes); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_delete_range( + rocksdb_writebatch_t* b, const char* start_key, size_t start_key_len, + const char* end_key, size_t end_key_len); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_delete_range_cf( + rocksdb_writebatch_t* b, rocksdb_column_family_handle_t* column_family, + const char* start_key, size_t start_key_len, const char* end_key, + size_t end_key_len); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_delete_rangev( + rocksdb_writebatch_t* b, int num_keys, const char* const* start_keys_list, + const size_t* start_keys_list_sizes, const char* const* end_keys_list, + const size_t* end_keys_list_sizes); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_delete_rangev_cf( + rocksdb_writebatch_t* b, rocksdb_column_family_handle_t* column_family, + int num_keys, const char* const* start_keys_list, + const size_t* start_keys_list_sizes, const char* const* end_keys_list, + const size_t* end_keys_list_sizes); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_put_log_data( + rocksdb_writebatch_t*, const char* blob, size_t len); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_iterate( + rocksdb_writebatch_t*, void* state, + void (*put)(void*, const char* k, size_t klen, const char* v, size_t vlen), + void (*deleted)(void*, const char* k, size_t klen)); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_iterate_cf( + rocksdb_writebatch_t*, void* state, + void (*put_cf)(void*, uint32_t cfid, const char* k, size_t klen, + const char* v, size_t vlen), + void (*deleted_cf)(void*, uint32_t cfid, const char* k, size_t klen), + void (*merge_cf)(void*, uint32_t cfid, const char* k, size_t klen, + const char* v, size_t vlen)); +extern ROCKSDB_LIBRARY_API const char* rocksdb_writebatch_data( + rocksdb_writebatch_t*, size_t* size); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_set_save_point( + rocksdb_writebatch_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_rollback_to_save_point( + rocksdb_writebatch_t*, char** errptr); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_pop_save_point( + rocksdb_writebatch_t*, char** errptr); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_update_timestamps( + rocksdb_writebatch_t* wb, const char* ts, size_t tslen, void* state, + size_t (*get_ts_size)(void*, uint32_t), char** errptr); + +/* Write batch with index */ + +extern ROCKSDB_LIBRARY_API rocksdb_writebatch_wi_t* +rocksdb_writebatch_wi_create(size_t reserved_bytes, + unsigned char overwrite_keys); +extern ROCKSDB_LIBRARY_API rocksdb_writebatch_wi_t* +rocksdb_writebatch_wi_create_from(const char* rep, size_t size); +extern ROCKSDB_LIBRARY_API rocksdb_writebatch_wi_t* +rocksdb_writebatch_wi_create_with_params( + rocksdb_comparator_t* backup_index_comparator, size_t reserved_bytes, + unsigned char overwrite_key, size_t max_bytes, + size_t protection_bytes_per_key); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_destroy( + rocksdb_writebatch_wi_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_clear( + rocksdb_writebatch_wi_t*); +extern ROCKSDB_LIBRARY_API int rocksdb_writebatch_wi_count( + rocksdb_writebatch_wi_t* b); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_put( + rocksdb_writebatch_wi_t*, const char* key, size_t klen, const char* val, + size_t vlen); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_put_cf( + rocksdb_writebatch_wi_t*, rocksdb_column_family_handle_t* column_family, + const char* key, size_t klen, const char* val, size_t vlen); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_putv( + rocksdb_writebatch_wi_t* b, int num_keys, const char* const* keys_list, + const size_t* keys_list_sizes, int num_values, + const char* const* values_list, const size_t* values_list_sizes); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_putv_cf( + rocksdb_writebatch_wi_t* b, rocksdb_column_family_handle_t* column_family, + int num_keys, const char* const* keys_list, const size_t* keys_list_sizes, + int num_values, const char* const* values_list, + const size_t* values_list_sizes); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_merge( + rocksdb_writebatch_wi_t*, const char* key, size_t klen, const char* val, + size_t vlen); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_merge_cf( + rocksdb_writebatch_wi_t*, rocksdb_column_family_handle_t* column_family, + const char* key, size_t klen, const char* val, size_t vlen); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_mergev( + rocksdb_writebatch_wi_t* b, int num_keys, const char* const* keys_list, + const size_t* keys_list_sizes, int num_values, + const char* const* values_list, const size_t* values_list_sizes); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_mergev_cf( + rocksdb_writebatch_wi_t* b, rocksdb_column_family_handle_t* column_family, + int num_keys, const char* const* keys_list, const size_t* keys_list_sizes, + int num_values, const char* const* values_list, + const size_t* values_list_sizes); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_delete( + rocksdb_writebatch_wi_t*, const char* key, size_t klen); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_singledelete( + rocksdb_writebatch_wi_t*, const char* key, size_t klen); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_delete_cf( + rocksdb_writebatch_wi_t*, rocksdb_column_family_handle_t* column_family, + const char* key, size_t klen); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_singledelete_cf( + rocksdb_writebatch_wi_t*, rocksdb_column_family_handle_t* column_family, + const char* key, size_t klen); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_deletev( + rocksdb_writebatch_wi_t* b, int num_keys, const char* const* keys_list, + const size_t* keys_list_sizes); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_deletev_cf( + rocksdb_writebatch_wi_t* b, rocksdb_column_family_handle_t* column_family, + int num_keys, const char* const* keys_list, const size_t* keys_list_sizes); +// DO NOT USE - rocksdb_writebatch_wi_delete_range is not yet supported +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_delete_range( + rocksdb_writebatch_wi_t* b, const char* start_key, size_t start_key_len, + const char* end_key, size_t end_key_len); +// DO NOT USE - rocksdb_writebatch_wi_delete_range_cf is not yet supported +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_delete_range_cf( + rocksdb_writebatch_wi_t* b, rocksdb_column_family_handle_t* column_family, + const char* start_key, size_t start_key_len, const char* end_key, + size_t end_key_len); +// DO NOT USE - rocksdb_writebatch_wi_delete_rangev is not yet supported +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_delete_rangev( + rocksdb_writebatch_wi_t* b, int num_keys, + const char* const* start_keys_list, const size_t* start_keys_list_sizes, + const char* const* end_keys_list, const size_t* end_keys_list_sizes); +// DO NOT USE - rocksdb_writebatch_wi_delete_rangev_cf is not yet supported +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_delete_rangev_cf( + rocksdb_writebatch_wi_t* b, rocksdb_column_family_handle_t* column_family, + int num_keys, const char* const* start_keys_list, + const size_t* start_keys_list_sizes, const char* const* end_keys_list, + const size_t* end_keys_list_sizes); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_put_log_data( + rocksdb_writebatch_wi_t*, const char* blob, size_t len); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_iterate( + rocksdb_writebatch_wi_t* b, void* state, + void (*put)(void*, const char* k, size_t klen, const char* v, size_t vlen), + void (*deleted)(void*, const char* k, size_t klen)); +extern ROCKSDB_LIBRARY_API const char* rocksdb_writebatch_wi_data( + rocksdb_writebatch_wi_t* b, size_t* size); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_set_save_point( + rocksdb_writebatch_wi_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_rollback_to_save_point( + rocksdb_writebatch_wi_t*, char** errptr); +extern ROCKSDB_LIBRARY_API char* rocksdb_writebatch_wi_get_from_batch( + rocksdb_writebatch_wi_t* wbwi, const rocksdb_options_t* options, + const char* key, size_t keylen, size_t* vallen, char** errptr); +extern ROCKSDB_LIBRARY_API char* rocksdb_writebatch_wi_get_from_batch_cf( + rocksdb_writebatch_wi_t* wbwi, const rocksdb_options_t* options, + rocksdb_column_family_handle_t* column_family, const char* key, + size_t keylen, size_t* vallen, char** errptr); +extern ROCKSDB_LIBRARY_API char* rocksdb_writebatch_wi_get_from_batch_and_db( + rocksdb_writebatch_wi_t* wbwi, rocksdb_t* db, + const rocksdb_readoptions_t* options, const char* key, size_t keylen, + size_t* vallen, char** errptr); +extern ROCKSDB_LIBRARY_API char* rocksdb_writebatch_wi_get_from_batch_and_db_cf( + rocksdb_writebatch_wi_t* wbwi, rocksdb_t* db, + const rocksdb_readoptions_t* options, + rocksdb_column_family_handle_t* column_family, const char* key, + size_t keylen, size_t* vallen, char** errptr); +extern ROCKSDB_LIBRARY_API void rocksdb_write_writebatch_wi( + rocksdb_t* db, const rocksdb_writeoptions_t* options, + rocksdb_writebatch_wi_t* wbwi, char** errptr); +extern ROCKSDB_LIBRARY_API rocksdb_iterator_t* +rocksdb_writebatch_wi_create_iterator_with_base( + rocksdb_writebatch_wi_t* wbwi, rocksdb_iterator_t* base_iterator); +extern ROCKSDB_LIBRARY_API rocksdb_iterator_t* +rocksdb_writebatch_wi_create_iterator_with_base_cf( + rocksdb_writebatch_wi_t* wbwi, rocksdb_iterator_t* base_iterator, + rocksdb_column_family_handle_t* cf); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_wi_update_timestamps( + rocksdb_writebatch_wi_t* wbwi, const char* ts, size_t tslen, void* state, + size_t (*get_ts_size)(void*, uint32_t), char** errptr); + +/* Options utils */ + +// Load the latest rocksdb options from the specified db_path. +// +// On success, num_column_families will be updated with a non-zero +// number indicating the number of column families. +// The returned db_options, column_family_names, and column_family_options +// should be released via rocksdb_load_latest_options_destroy(). +// +// On error, a non-null errptr that includes the error message will be +// returned. db_options, column_family_names, and column_family_options +// will be set to NULL. +extern ROCKSDB_LIBRARY_API void rocksdb_load_latest_options( + const char* db_path, rocksdb_env_t* env, bool ignore_unknown_options, + rocksdb_cache_t* cache, rocksdb_options_t** db_options, + size_t* num_column_families, char*** column_family_names, + rocksdb_options_t*** column_family_options, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_load_latest_options_destroy( + rocksdb_options_t* db_options, char** list_column_family_names, + rocksdb_options_t** list_column_family_options, size_t len); + +/* Block based table options */ + +extern ROCKSDB_LIBRARY_API rocksdb_block_based_table_options_t* +rocksdb_block_based_options_create(void); +extern ROCKSDB_LIBRARY_API void rocksdb_block_based_options_destroy( + rocksdb_block_based_table_options_t* options); +extern ROCKSDB_LIBRARY_API void rocksdb_block_based_options_set_checksum( + rocksdb_block_based_table_options_t*, char); +extern ROCKSDB_LIBRARY_API void rocksdb_block_based_options_set_block_size( + rocksdb_block_based_table_options_t* options, size_t block_size); +extern ROCKSDB_LIBRARY_API void +rocksdb_block_based_options_set_block_size_deviation( + rocksdb_block_based_table_options_t* options, int block_size_deviation); +extern ROCKSDB_LIBRARY_API void +rocksdb_block_based_options_set_block_restart_interval( + rocksdb_block_based_table_options_t* options, int block_restart_interval); +extern ROCKSDB_LIBRARY_API void +rocksdb_block_based_options_set_index_block_restart_interval( + rocksdb_block_based_table_options_t* options, + int index_block_restart_interval); +extern ROCKSDB_LIBRARY_API void +rocksdb_block_based_options_set_metadata_block_size( + rocksdb_block_based_table_options_t* options, uint64_t metadata_block_size); +extern ROCKSDB_LIBRARY_API void +rocksdb_block_based_options_set_partition_filters( + rocksdb_block_based_table_options_t* options, + unsigned char partition_filters); +extern ROCKSDB_LIBRARY_API void +rocksdb_block_based_options_set_optimize_filters_for_memory( + rocksdb_block_based_table_options_t* options, + unsigned char optimize_filters_for_memory); +extern ROCKSDB_LIBRARY_API void +rocksdb_block_based_options_set_use_delta_encoding( + rocksdb_block_based_table_options_t* options, + unsigned char use_delta_encoding); +extern ROCKSDB_LIBRARY_API void rocksdb_block_based_options_set_filter_policy( + rocksdb_block_based_table_options_t* options, + rocksdb_filterpolicy_t* filter_policy); +extern ROCKSDB_LIBRARY_API void rocksdb_block_based_options_set_no_block_cache( + rocksdb_block_based_table_options_t* options, unsigned char no_block_cache); +extern ROCKSDB_LIBRARY_API void rocksdb_block_based_options_set_block_cache( + rocksdb_block_based_table_options_t* options, rocksdb_cache_t* block_cache); +extern ROCKSDB_LIBRARY_API void +rocksdb_block_based_options_set_whole_key_filtering( + rocksdb_block_based_table_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API void rocksdb_block_based_options_set_format_version( + rocksdb_block_based_table_options_t*, int); +enum { + rocksdb_block_based_table_index_type_binary_search = 0, + rocksdb_block_based_table_index_type_hash_search = 1, + rocksdb_block_based_table_index_type_two_level_index_search = 2, +}; +extern ROCKSDB_LIBRARY_API void rocksdb_block_based_options_set_index_type( + rocksdb_block_based_table_options_t*, int); // uses one of the above enums +enum { + rocksdb_block_based_table_data_block_index_type_binary_search = 0, + rocksdb_block_based_table_data_block_index_type_binary_search_and_hash = 1, +}; +extern ROCKSDB_LIBRARY_API void +rocksdb_block_based_options_set_data_block_index_type( + rocksdb_block_based_table_options_t*, int); // uses one of the above enums +extern ROCKSDB_LIBRARY_API void +rocksdb_block_based_options_set_data_block_hash_ratio( + rocksdb_block_based_table_options_t* options, double v); +// rocksdb_block_based_options_set_hash_index_allow_collision() +// is removed since BlockBasedTableOptions.hash_index_allow_collision() +// is removed +extern ROCKSDB_LIBRARY_API void +rocksdb_block_based_options_set_cache_index_and_filter_blocks( + rocksdb_block_based_table_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API void +rocksdb_block_based_options_set_cache_index_and_filter_blocks_with_high_priority( + rocksdb_block_based_table_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API void +rocksdb_block_based_options_set_pin_l0_filter_and_index_blocks_in_cache( + rocksdb_block_based_table_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API void +rocksdb_block_based_options_set_pin_top_level_index_and_filter( + rocksdb_block_based_table_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_block_based_table_factory( + rocksdb_options_t* opt, rocksdb_block_based_table_options_t* table_options); +enum { + rocksdb_block_based_k_fallback_pinning_tier = 0, + rocksdb_block_based_k_none_pinning_tier = 1, + rocksdb_block_based_k_flush_and_similar_pinning_tier = 2, + rocksdb_block_based_k_all_pinning_tier = 3, +}; +extern ROCKSDB_LIBRARY_API void +rocksdb_block_based_options_set_top_level_index_pinning_tier( + rocksdb_block_based_table_options_t*, int); +extern ROCKSDB_LIBRARY_API void +rocksdb_block_based_options_set_partition_pinning_tier( + rocksdb_block_based_table_options_t*, int); +extern ROCKSDB_LIBRARY_API void +rocksdb_block_based_options_set_unpartitioned_pinning_tier( + rocksdb_block_based_table_options_t*, int); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_write_buffer_manager( + rocksdb_options_t* opt, rocksdb_write_buffer_manager_t* wbm); + +/* Cuckoo table options */ + +extern ROCKSDB_LIBRARY_API rocksdb_cuckoo_table_options_t* +rocksdb_cuckoo_options_create(void); +extern ROCKSDB_LIBRARY_API void rocksdb_cuckoo_options_destroy( + rocksdb_cuckoo_table_options_t* options); +extern ROCKSDB_LIBRARY_API void rocksdb_cuckoo_options_set_hash_ratio( + rocksdb_cuckoo_table_options_t* options, double v); +extern ROCKSDB_LIBRARY_API void rocksdb_cuckoo_options_set_max_search_depth( + rocksdb_cuckoo_table_options_t* options, uint32_t v); +extern ROCKSDB_LIBRARY_API void rocksdb_cuckoo_options_set_cuckoo_block_size( + rocksdb_cuckoo_table_options_t* options, uint32_t v); +extern ROCKSDB_LIBRARY_API void +rocksdb_cuckoo_options_set_identity_as_first_hash( + rocksdb_cuckoo_table_options_t* options, unsigned char v); +extern ROCKSDB_LIBRARY_API void rocksdb_cuckoo_options_set_use_module_hash( + rocksdb_cuckoo_table_options_t* options, unsigned char v); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_cuckoo_table_factory( + rocksdb_options_t* opt, rocksdb_cuckoo_table_options_t* table_options); + +/* Options */ +extern ROCKSDB_LIBRARY_API void rocksdb_set_options(rocksdb_t* db, int count, + const char* const keys[], + const char* const values[], + char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_set_options_cf( + rocksdb_t* db, rocksdb_column_family_handle_t* handle, int count, + const char* const keys[], const char* const values[], char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_options_t* rocksdb_options_create(void); +extern ROCKSDB_LIBRARY_API void rocksdb_options_destroy(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API rocksdb_options_t* rocksdb_options_create_copy( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_increase_parallelism( + rocksdb_options_t* opt, int total_threads); +extern ROCKSDB_LIBRARY_API void rocksdb_options_optimize_for_point_lookup( + rocksdb_options_t* opt, uint64_t block_cache_size_mb); +extern ROCKSDB_LIBRARY_API void rocksdb_options_optimize_level_style_compaction( + rocksdb_options_t* opt, uint64_t memtable_memory_budget); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_optimize_universal_style_compaction( + rocksdb_options_t* opt, uint64_t memtable_memory_budget); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_allow_ingest_behind( + rocksdb_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_options_get_allow_ingest_behind(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_compaction_filter( + rocksdb_options_t*, rocksdb_compactionfilter_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_compaction_filter_factory( + rocksdb_options_t*, rocksdb_compactionfilterfactory_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_compaction_readahead_size( + rocksdb_options_t*, size_t); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_options_get_compaction_readahead_size(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_comparator( + rocksdb_options_t*, rocksdb_comparator_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_merge_operator( + rocksdb_options_t*, rocksdb_mergeoperator_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_uint64add_merge_operator( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_compression_per_level( + rocksdb_options_t* opt, const int* level_values, size_t num_levels); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_create_if_missing( + rocksdb_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_options_get_create_if_missing( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_create_missing_column_families(rocksdb_options_t*, + unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_options_get_create_missing_column_families(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_error_if_exists( + rocksdb_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_options_get_error_if_exists( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_paranoid_checks( + rocksdb_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_options_get_paranoid_checks( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_db_paths( + rocksdb_options_t*, const rocksdb_dbpath_t** path_values, size_t num_paths); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_cf_paths( + rocksdb_options_t*, const rocksdb_dbpath_t** path_values, size_t num_paths); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_env(rocksdb_options_t*, + rocksdb_env_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_info_log(rocksdb_options_t*, + rocksdb_logger_t*); +extern ROCKSDB_LIBRARY_API rocksdb_logger_t* rocksdb_options_get_info_log( + rocksdb_options_t* opt); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_info_log_level( + rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_info_log_level( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API rocksdb_logger_t* +rocksdb_logger_create_stderr_logger(int log_level, const char* prefix); +extern ROCKSDB_LIBRARY_API rocksdb_logger_t* +rocksdb_logger_create_callback_logger(int log_level, + void (*)(void* priv, unsigned lev, + char* msg, size_t len), + void* priv); +extern ROCKSDB_LIBRARY_API void rocksdb_logger_destroy( + rocksdb_logger_t* logger); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_write_buffer_size( + rocksdb_options_t*, size_t); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_options_get_write_buffer_size(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_db_write_buffer_size( + rocksdb_options_t*, size_t); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_options_get_db_write_buffer_size(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_open_files( + rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_max_open_files( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_file_opening_threads( + rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_max_file_opening_threads( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_total_wal_size( + rocksdb_options_t* opt, uint64_t n); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_options_get_max_total_wal_size(rocksdb_options_t* opt); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_compression_options( + rocksdb_options_t*, int, int, int, int); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_compression_options_zstd_max_train_bytes(rocksdb_options_t*, + int); +extern ROCKSDB_LIBRARY_API int +rocksdb_options_get_compression_options_zstd_max_train_bytes( + rocksdb_options_t* opt); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_compression_options_use_zstd_dict_trainer( + rocksdb_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_options_get_compression_options_use_zstd_dict_trainer( + rocksdb_options_t* opt); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_compression_options_parallel_threads(rocksdb_options_t*, + int); +extern ROCKSDB_LIBRARY_API int +rocksdb_options_get_compression_options_parallel_threads( + rocksdb_options_t* opt); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_compression_options_max_dict_buffer_bytes( + rocksdb_options_t*, uint64_t); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_options_get_compression_options_max_dict_buffer_bytes( + rocksdb_options_t* opt); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_bottommost_compression_options(rocksdb_options_t*, int, int, + int, int, unsigned char); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_bottommost_compression_options_zstd_max_train_bytes( + rocksdb_options_t*, int, unsigned char); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_bottommost_compression_options_use_zstd_dict_trainer( + rocksdb_options_t*, unsigned char, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_options_get_bottommost_compression_options_use_zstd_dict_trainer( + rocksdb_options_t* opt); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_bottommost_compression_options_max_dict_buffer_bytes( + rocksdb_options_t*, uint64_t, unsigned char); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_prefix_extractor( + rocksdb_options_t*, rocksdb_slicetransform_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_num_levels( + rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_num_levels( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_level0_file_num_compaction_trigger(rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int +rocksdb_options_get_level0_file_num_compaction_trigger(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_level0_slowdown_writes_trigger(rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int +rocksdb_options_get_level0_slowdown_writes_trigger(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_level0_stop_writes_trigger( + rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_level0_stop_writes_trigger( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_target_file_size_base( + rocksdb_options_t*, uint64_t); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_options_get_target_file_size_base(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_target_file_size_multiplier( + rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_target_file_size_multiplier( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_bytes_for_level_base( + rocksdb_options_t*, uint64_t); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_options_get_max_bytes_for_level_base(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_level_compaction_dynamic_level_bytes(rocksdb_options_t*, + unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_options_get_level_compaction_dynamic_level_bytes(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_max_bytes_for_level_multiplier(rocksdb_options_t*, double); +extern ROCKSDB_LIBRARY_API double +rocksdb_options_get_max_bytes_for_level_multiplier(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_max_bytes_for_level_multiplier_additional( + rocksdb_options_t*, int* level_values, size_t num_levels); +extern ROCKSDB_LIBRARY_API void rocksdb_options_enable_statistics( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_ttl(rocksdb_options_t*, + uint64_t); +extern ROCKSDB_LIBRARY_API uint64_t rocksdb_options_get_ttl(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_periodic_compaction_seconds( + rocksdb_options_t*, uint64_t); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_options_get_periodic_compaction_seconds(rocksdb_options_t*); + +enum { + rocksdb_statistics_level_disable_all = 0, + rocksdb_statistics_level_except_tickers = + rocksdb_statistics_level_disable_all, + rocksdb_statistics_level_except_histogram_or_timers = 1, + rocksdb_statistics_level_except_timers = 2, + rocksdb_statistics_level_except_detailed_timers = 3, + rocksdb_statistics_level_except_time_for_mutex = 4, + rocksdb_statistics_level_all = 5, +}; + +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_statistics_level( + rocksdb_options_t*, int level); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_statistics_level( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_skip_stats_update_on_db_open(rocksdb_options_t* opt, + unsigned char val); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_options_get_skip_stats_update_on_db_open(rocksdb_options_t* opt); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_skip_checking_sst_file_sizes_on_db_open( + rocksdb_options_t* opt, unsigned char val); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_options_get_skip_checking_sst_file_sizes_on_db_open( + rocksdb_options_t* opt); + +/* Blob Options Settings */ +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_enable_blob_files( + rocksdb_options_t* opt, unsigned char val); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_options_get_enable_blob_files( + rocksdb_options_t* opt); + +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_min_blob_size( + rocksdb_options_t* opt, uint64_t val); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_options_get_min_blob_size(rocksdb_options_t* opt); + +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_blob_file_size( + rocksdb_options_t* opt, uint64_t val); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_options_get_blob_file_size(rocksdb_options_t* opt); + +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_blob_compression_type( + rocksdb_options_t* opt, int val); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_blob_compression_type( + rocksdb_options_t* opt); + +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_enable_blob_gc( + rocksdb_options_t* opt, unsigned char val); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_options_get_enable_blob_gc( + rocksdb_options_t* opt); + +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_blob_gc_age_cutoff( + rocksdb_options_t* opt, double val); +extern ROCKSDB_LIBRARY_API double rocksdb_options_get_blob_gc_age_cutoff( + rocksdb_options_t* opt); + +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_blob_gc_force_threshold( + rocksdb_options_t* opt, double val); +extern ROCKSDB_LIBRARY_API double rocksdb_options_get_blob_gc_force_threshold( + rocksdb_options_t* opt); + +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_blob_compaction_readahead_size(rocksdb_options_t* opt, + uint64_t val); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_options_get_blob_compaction_readahead_size(rocksdb_options_t* opt); + +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_blob_file_starting_level( + rocksdb_options_t* opt, int val); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_blob_file_starting_level( + rocksdb_options_t* opt); + +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_blob_cache( + rocksdb_options_t* opt, rocksdb_cache_t* blob_cache); + +enum { + rocksdb_prepopulate_blob_disable = 0, + rocksdb_prepopulate_blob_flush_only = 1 +}; + +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_prepopulate_blob_cache( + rocksdb_options_t* opt, int val); + +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_prepopulate_blob_cache( + rocksdb_options_t* opt); + +/* returns a pointer to a malloc()-ed, null terminated string */ +extern ROCKSDB_LIBRARY_API char* rocksdb_options_statistics_get_string( + rocksdb_options_t* opt); +extern ROCKSDB_LIBRARY_API uint64_t rocksdb_options_statistics_get_ticker_count( + rocksdb_options_t* opt, uint32_t ticker_type); +extern ROCKSDB_LIBRARY_API void rocksdb_options_statistics_get_histogram_data( + rocksdb_options_t* opt, uint32_t histogram_type, + rocksdb_statistics_histogram_data_t* const data); + +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_write_buffer_number( + rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_max_write_buffer_number( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_min_write_buffer_number_to_merge(rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int +rocksdb_options_get_min_write_buffer_number_to_merge(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_max_write_buffer_number_to_maintain(rocksdb_options_t*, + int); +extern ROCKSDB_LIBRARY_API int +rocksdb_options_get_max_write_buffer_number_to_maintain(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_max_write_buffer_size_to_maintain(rocksdb_options_t*, + int64_t); +extern ROCKSDB_LIBRARY_API int64_t +rocksdb_options_get_max_write_buffer_size_to_maintain(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_enable_pipelined_write( + rocksdb_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_options_get_enable_pipelined_write(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_unordered_write( + rocksdb_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_options_get_unordered_write( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_subcompactions( + rocksdb_options_t*, uint32_t); +extern ROCKSDB_LIBRARY_API uint32_t +rocksdb_options_get_max_subcompactions(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_background_jobs( + rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_max_background_jobs( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_background_compactions( + rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_max_background_compactions( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_background_flushes( + rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_max_background_flushes( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_log_file_size( + rocksdb_options_t*, size_t); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_options_get_max_log_file_size(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_log_file_time_to_roll( + rocksdb_options_t*, size_t); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_options_get_log_file_time_to_roll(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_keep_log_file_num( + rocksdb_options_t*, size_t); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_options_get_keep_log_file_num(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_recycle_log_file_num( + rocksdb_options_t*, size_t); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_options_get_recycle_log_file_num(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_soft_pending_compaction_bytes_limit(rocksdb_options_t* opt, + size_t v); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_options_get_soft_pending_compaction_bytes_limit(rocksdb_options_t* opt); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_hard_pending_compaction_bytes_limit(rocksdb_options_t* opt, + size_t v); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_options_get_hard_pending_compaction_bytes_limit(rocksdb_options_t* opt); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_manifest_file_size( + rocksdb_options_t*, size_t); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_options_get_max_manifest_file_size(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_table_cache_numshardbits( + rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_table_cache_numshardbits( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_arena_block_size( + rocksdb_options_t*, size_t); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_options_get_arena_block_size(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_use_fsync( + rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_use_fsync( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_db_log_dir( + rocksdb_options_t*, const char*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_wal_dir(rocksdb_options_t*, + const char*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_WAL_ttl_seconds( + rocksdb_options_t*, uint64_t); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_options_get_WAL_ttl_seconds(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_WAL_size_limit_MB( + rocksdb_options_t*, uint64_t); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_options_get_WAL_size_limit_MB(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_manifest_preallocation_size( + rocksdb_options_t*, size_t); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_options_get_manifest_preallocation_size(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_allow_mmap_reads( + rocksdb_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_options_get_allow_mmap_reads( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_allow_mmap_writes( + rocksdb_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_options_get_allow_mmap_writes( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_use_direct_reads( + rocksdb_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_options_get_use_direct_reads( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_use_direct_io_for_flush_and_compaction(rocksdb_options_t*, + unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_options_get_use_direct_io_for_flush_and_compaction(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_is_fd_close_on_exec( + rocksdb_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_options_get_is_fd_close_on_exec(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_stats_dump_period_sec( + rocksdb_options_t*, unsigned int); +extern ROCKSDB_LIBRARY_API unsigned int +rocksdb_options_get_stats_dump_period_sec(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_stats_persist_period_sec( + rocksdb_options_t*, unsigned int); +extern ROCKSDB_LIBRARY_API unsigned int +rocksdb_options_get_stats_persist_period_sec(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_advise_random_on_open( + rocksdb_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_options_get_advise_random_on_open(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_use_adaptive_mutex( + rocksdb_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_options_get_use_adaptive_mutex( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_bytes_per_sync( + rocksdb_options_t*, uint64_t); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_options_get_bytes_per_sync(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_wal_bytes_per_sync( + rocksdb_options_t*, uint64_t); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_options_get_wal_bytes_per_sync(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_writable_file_max_buffer_size(rocksdb_options_t*, uint64_t); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_options_get_writable_file_max_buffer_size(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_allow_concurrent_memtable_write(rocksdb_options_t*, + unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_options_get_allow_concurrent_memtable_write(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_enable_write_thread_adaptive_yield(rocksdb_options_t*, + unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_options_get_enable_write_thread_adaptive_yield(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_max_sequential_skip_in_iterations(rocksdb_options_t*, + uint64_t); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_options_get_max_sequential_skip_in_iterations(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_disable_auto_compactions( + rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_options_get_disable_auto_compactions(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_optimize_filters_for_hits( + rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_options_get_optimize_filters_for_hits(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_delete_obsolete_files_period_micros(rocksdb_options_t*, + uint64_t); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_options_get_delete_obsolete_files_period_micros(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_prepare_for_bulk_load( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_memtable_vector_rep( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_memtable_prefix_bloom_size_ratio(rocksdb_options_t*, + double); +extern ROCKSDB_LIBRARY_API double +rocksdb_options_get_memtable_prefix_bloom_size_ratio(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_compaction_bytes( + rocksdb_options_t*, uint64_t); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_options_get_max_compaction_bytes(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_hash_skip_list_rep( + rocksdb_options_t*, size_t, int32_t, int32_t); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_hash_link_list_rep( + rocksdb_options_t*, size_t); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_plain_table_factory( + rocksdb_options_t*, uint32_t, int, double, size_t, size_t, char, + unsigned char, unsigned char); + +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_options_get_write_dbid_to_manifest(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_write_dbid_to_manifest( + rocksdb_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_options_get_write_identity_file(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_write_identity_file( + rocksdb_options_t*, unsigned char); + +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_options_get_track_and_verify_wals_in_manifest(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_track_and_verify_wals_in_manifest(rocksdb_options_t*, + unsigned char); + +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_min_level_to_compress( + rocksdb_options_t* opt, int level); + +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_memtable_huge_page_size( + rocksdb_options_t*, size_t); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_options_get_memtable_huge_page_size(rocksdb_options_t*); + +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_successive_merges( + rocksdb_options_t*, size_t); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_options_get_max_successive_merges(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_bloom_locality( + rocksdb_options_t*, uint32_t); +extern ROCKSDB_LIBRARY_API uint32_t +rocksdb_options_get_bloom_locality(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_inplace_update_support( + rocksdb_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_options_get_inplace_update_support(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_inplace_update_num_locks( + rocksdb_options_t*, size_t); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_options_get_inplace_update_num_locks(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_report_bg_io_stats( + rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_options_get_report_bg_io_stats( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_avoid_unnecessary_blocking_io(rocksdb_options_t*, + unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_options_get_avoid_unnecessary_blocking_io(rocksdb_options_t*); + +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_experimental_mempurge_threshold(rocksdb_options_t*, double); +extern ROCKSDB_LIBRARY_API double +rocksdb_options_get_experimental_mempurge_threshold(rocksdb_options_t*); + +enum { + rocksdb_tolerate_corrupted_tail_records_recovery = 0, + rocksdb_absolute_consistency_recovery = 1, + rocksdb_point_in_time_recovery = 2, + rocksdb_skip_any_corrupted_records_recovery = 3 +}; +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_wal_recovery_mode( + rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_wal_recovery_mode( + rocksdb_options_t*); + +enum { + rocksdb_no_compression = 0, + rocksdb_snappy_compression = 1, + rocksdb_zlib_compression = 2, + rocksdb_bz2_compression = 3, + rocksdb_lz4_compression = 4, + rocksdb_lz4hc_compression = 5, + rocksdb_xpress_compression = 6, + rocksdb_zstd_compression = 7 +}; +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_compression( + rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_compression( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_bottommost_compression( + rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_bottommost_compression( + rocksdb_options_t*); + +enum { + rocksdb_level_compaction = 0, + rocksdb_universal_compaction = 1, + rocksdb_fifo_compaction = 2 +}; +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_compaction_style( + rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_compaction_style( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_universal_compaction_options( + rocksdb_options_t*, rocksdb_universal_compaction_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_fifo_compaction_options( + rocksdb_options_t* opt, rocksdb_fifo_compaction_options_t* fifo); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_ratelimiter( + rocksdb_options_t* opt, rocksdb_ratelimiter_t* limiter); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_atomic_flush( + rocksdb_options_t* opt, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_options_get_atomic_flush( + rocksdb_options_t* opt); + +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_row_cache( + rocksdb_options_t* opt, rocksdb_cache_t* cache); + +extern ROCKSDB_LIBRARY_API void +rocksdb_options_add_compact_on_deletion_collector_factory( + rocksdb_options_t*, size_t window_size, size_t num_dels_trigger); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_add_compact_on_deletion_collector_factory_del_ratio( + rocksdb_options_t*, size_t window_size, size_t num_dels_trigger, + double deletion_ratio); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_manual_wal_flush( + rocksdb_options_t* opt, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_options_get_manual_wal_flush( + rocksdb_options_t* opt); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_wal_compression( + rocksdb_options_t* opt, int); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_wal_compression( + rocksdb_options_t* opt); + +enum { + rocksdb_k_by_compensated_size_compaction_pri = 0, + rocksdb_k_oldest_largest_seq_first_compaction_pri = 1, + rocksdb_k_oldest_smallest_seq_first_compaction_pri = 2, + rocksdb_k_min_overlapping_ratio_compaction_pri = 3, + rocksdb_k_round_robin_compaction_pri = 4 +}; +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_compaction_pri( + rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API int rocksdb_options_get_compaction_pri( + rocksdb_options_t*); + +/* RateLimiter */ +extern ROCKSDB_LIBRARY_API rocksdb_ratelimiter_t* rocksdb_ratelimiter_create( + int64_t rate_bytes_per_sec, int64_t refill_period_us, int32_t fairness); +extern ROCKSDB_LIBRARY_API rocksdb_ratelimiter_t* +rocksdb_ratelimiter_create_auto_tuned(int64_t rate_bytes_per_sec, + int64_t refill_period_us, + int32_t fairness); +extern ROCKSDB_LIBRARY_API rocksdb_ratelimiter_t* +rocksdb_ratelimiter_create_with_mode(int64_t rate_bytes_per_sec, + int64_t refill_period_us, int32_t fairness, + int mode, bool auto_tuned); +extern ROCKSDB_LIBRARY_API void rocksdb_ratelimiter_destroy( + rocksdb_ratelimiter_t*); + +/* PerfContext */ +enum { + rocksdb_uninitialized = 0, + rocksdb_disable = 1, + rocksdb_enable_count = 2, + rocksdb_enable_time_except_for_mutex = 3, + rocksdb_enable_time = 4, + rocksdb_out_of_bounds = 5 +}; + +enum { + rocksdb_user_key_comparison_count = 0, + rocksdb_block_cache_hit_count, + rocksdb_block_read_count, + rocksdb_block_read_byte, + rocksdb_block_read_time, + rocksdb_block_checksum_time, + rocksdb_block_decompress_time, + rocksdb_get_read_bytes, + rocksdb_multiget_read_bytes, + rocksdb_iter_read_bytes, + rocksdb_internal_key_skipped_count, + rocksdb_internal_delete_skipped_count, + rocksdb_internal_recent_skipped_count, + rocksdb_internal_merge_count, + rocksdb_get_snapshot_time, + rocksdb_get_from_memtable_time, + rocksdb_get_from_memtable_count, + rocksdb_get_post_process_time, + rocksdb_get_from_output_files_time, + rocksdb_seek_on_memtable_time, + rocksdb_seek_on_memtable_count, + rocksdb_next_on_memtable_count, + rocksdb_prev_on_memtable_count, + rocksdb_seek_child_seek_time, + rocksdb_seek_child_seek_count, + rocksdb_seek_min_heap_time, + rocksdb_seek_max_heap_time, + rocksdb_seek_internal_seek_time, + rocksdb_find_next_user_entry_time, + rocksdb_write_wal_time, + rocksdb_write_memtable_time, + rocksdb_write_delay_time, + rocksdb_write_pre_and_post_process_time, + rocksdb_db_mutex_lock_nanos, + rocksdb_db_condition_wait_nanos, + rocksdb_merge_operator_time_nanos, + rocksdb_read_index_block_nanos, + rocksdb_read_filter_block_nanos, + rocksdb_new_table_block_iter_nanos, + rocksdb_new_table_iterator_nanos, + rocksdb_block_seek_nanos, + rocksdb_find_table_nanos, + rocksdb_bloom_memtable_hit_count, + rocksdb_bloom_memtable_miss_count, + rocksdb_bloom_sst_hit_count, + rocksdb_bloom_sst_miss_count, + rocksdb_key_lock_wait_time, + rocksdb_key_lock_wait_count, + rocksdb_env_new_sequential_file_nanos, + rocksdb_env_new_random_access_file_nanos, + rocksdb_env_new_writable_file_nanos, + rocksdb_env_reuse_writable_file_nanos, + rocksdb_env_new_random_rw_file_nanos, + rocksdb_env_new_directory_nanos, + rocksdb_env_file_exists_nanos, + rocksdb_env_get_children_nanos, + rocksdb_env_get_children_file_attributes_nanos, + rocksdb_env_delete_file_nanos, + rocksdb_env_create_dir_nanos, + rocksdb_env_create_dir_if_missing_nanos, + rocksdb_env_delete_dir_nanos, + rocksdb_env_get_file_size_nanos, + rocksdb_env_get_file_modification_time_nanos, + rocksdb_env_rename_file_nanos, + rocksdb_env_link_file_nanos, + rocksdb_env_lock_file_nanos, + rocksdb_env_unlock_file_nanos, + rocksdb_env_new_logger_nanos, + rocksdb_number_async_seek, + rocksdb_blob_cache_hit_count, + rocksdb_blob_read_count, + rocksdb_blob_read_byte, + rocksdb_blob_read_time, + rocksdb_blob_checksum_time, + rocksdb_blob_decompress_time, + rocksdb_internal_range_del_reseek_count, + rocksdb_block_read_cpu_time, + rocksdb_total_metric_count = 79 +}; + +extern ROCKSDB_LIBRARY_API void rocksdb_set_perf_level(int); +extern ROCKSDB_LIBRARY_API rocksdb_perfcontext_t* rocksdb_perfcontext_create( + void); +extern ROCKSDB_LIBRARY_API void rocksdb_perfcontext_reset( + rocksdb_perfcontext_t* context); +extern ROCKSDB_LIBRARY_API char* rocksdb_perfcontext_report( + rocksdb_perfcontext_t* context, unsigned char exclude_zero_counters); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_perfcontext_metric(rocksdb_perfcontext_t* context, int metric); +extern ROCKSDB_LIBRARY_API void rocksdb_perfcontext_destroy( + rocksdb_perfcontext_t* context); + +/* Compaction Filter */ + +extern ROCKSDB_LIBRARY_API rocksdb_compactionfilter_t* +rocksdb_compactionfilter_create( + void* state, void (*destructor)(void*), + unsigned char (*filter)(void*, int level, const char* key, + size_t key_length, const char* existing_value, + size_t value_length, char** new_value, + size_t* new_value_length, + unsigned char* value_changed), + const char* (*name)(void*)); +extern ROCKSDB_LIBRARY_API void rocksdb_compactionfilter_set_ignore_snapshots( + rocksdb_compactionfilter_t*, unsigned char); +extern ROCKSDB_LIBRARY_API void rocksdb_compactionfilter_destroy( + rocksdb_compactionfilter_t*); + +/* Compaction Filter Context */ + +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_compactionfiltercontext_is_full_compaction( + rocksdb_compactionfiltercontext_t* context); + +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_compactionfiltercontext_is_manual_compaction( + rocksdb_compactionfiltercontext_t* context); + +/* Compaction Filter Factory */ + +extern ROCKSDB_LIBRARY_API rocksdb_compactionfilterfactory_t* +rocksdb_compactionfilterfactory_create( + void* state, void (*destructor)(void*), + rocksdb_compactionfilter_t* (*create_compaction_filter)( + void*, rocksdb_compactionfiltercontext_t* context), + const char* (*name)(void*)); +extern ROCKSDB_LIBRARY_API void rocksdb_compactionfilterfactory_destroy( + rocksdb_compactionfilterfactory_t*); + +/* Comparator */ + +extern ROCKSDB_LIBRARY_API rocksdb_comparator_t* rocksdb_comparator_create( + void* state, void (*destructor)(void*), + int (*compare)(void*, const char* a, size_t alen, const char* b, + size_t blen), + const char* (*name)(void*)); +extern ROCKSDB_LIBRARY_API void rocksdb_comparator_destroy( + rocksdb_comparator_t*); + +extern ROCKSDB_LIBRARY_API rocksdb_comparator_t* +rocksdb_comparator_with_ts_create( + void* state, void (*destructor)(void*), + int (*compare)(void*, const char* a, size_t alen, const char* b, + size_t blen), + int (*compare_ts)(void*, const char* a_ts, size_t a_tslen, const char* b_ts, + size_t b_tslen), + int (*compare_without_ts)(void*, const char* a, size_t alen, + unsigned char a_has_ts, const char* b, + size_t blen, unsigned char b_has_ts), + const char* (*name)(void*), size_t timestamp_size); + +/* Filter policy */ + +extern ROCKSDB_LIBRARY_API void rocksdb_filterpolicy_destroy( + rocksdb_filterpolicy_t*); + +extern ROCKSDB_LIBRARY_API rocksdb_filterpolicy_t* +rocksdb_filterpolicy_create_bloom(double bits_per_key); +extern ROCKSDB_LIBRARY_API rocksdb_filterpolicy_t* +rocksdb_filterpolicy_create_bloom_full(double bits_per_key); +extern ROCKSDB_LIBRARY_API rocksdb_filterpolicy_t* +rocksdb_filterpolicy_create_ribbon(double bloom_equivalent_bits_per_key); +extern ROCKSDB_LIBRARY_API rocksdb_filterpolicy_t* +rocksdb_filterpolicy_create_ribbon_hybrid(double bloom_equivalent_bits_per_key, + int bloom_before_level); + +/* Merge Operator */ + +extern ROCKSDB_LIBRARY_API rocksdb_mergeoperator_t* +rocksdb_mergeoperator_create( + void* state, void (*destructor)(void*), + char* (*full_merge)(void*, const char* key, size_t key_length, + const char* existing_value, + size_t existing_value_length, + const char* const* operands_list, + const size_t* operands_list_length, int num_operands, + unsigned char* success, size_t* new_value_length), + char* (*partial_merge)(void*, const char* key, size_t key_length, + const char* const* operands_list, + const size_t* operands_list_length, int num_operands, + unsigned char* success, size_t* new_value_length), + void (*delete_value)(void*, const char* value, size_t value_length), + const char* (*name)(void*)); +extern ROCKSDB_LIBRARY_API void rocksdb_mergeoperator_destroy( + rocksdb_mergeoperator_t*); + +/* Read options */ + +extern ROCKSDB_LIBRARY_API rocksdb_readoptions_t* rocksdb_readoptions_create( + void); +extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_destroy( + rocksdb_readoptions_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_verify_checksums( + rocksdb_readoptions_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_readoptions_get_verify_checksums(rocksdb_readoptions_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_fill_cache( + rocksdb_readoptions_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_readoptions_get_fill_cache( + rocksdb_readoptions_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_snapshot( + rocksdb_readoptions_t*, const rocksdb_snapshot_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_iterate_upper_bound( + rocksdb_readoptions_t*, const char* key, size_t keylen); +extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_iterate_lower_bound( + rocksdb_readoptions_t*, const char* key, size_t keylen); +extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_read_tier( + rocksdb_readoptions_t*, int); +extern ROCKSDB_LIBRARY_API int rocksdb_readoptions_get_read_tier( + rocksdb_readoptions_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_tailing( + rocksdb_readoptions_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_readoptions_get_tailing( + rocksdb_readoptions_t*); +// The functionality that this option controlled has been removed. +extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_managed( + rocksdb_readoptions_t*, unsigned char); +extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_readahead_size( + rocksdb_readoptions_t*, size_t); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_readoptions_get_readahead_size(rocksdb_readoptions_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_prefix_same_as_start( + rocksdb_readoptions_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_readoptions_get_prefix_same_as_start(rocksdb_readoptions_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_pin_data( + rocksdb_readoptions_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_readoptions_get_pin_data( + rocksdb_readoptions_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_total_order_seek( + rocksdb_readoptions_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_readoptions_get_total_order_seek(rocksdb_readoptions_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_readoptions_set_max_skippable_internal_keys(rocksdb_readoptions_t*, + uint64_t); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_readoptions_get_max_skippable_internal_keys(rocksdb_readoptions_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_readoptions_set_background_purge_on_iterator_cleanup( + rocksdb_readoptions_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_readoptions_get_background_purge_on_iterator_cleanup( + rocksdb_readoptions_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_ignore_range_deletions( + rocksdb_readoptions_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_readoptions_get_ignore_range_deletions(rocksdb_readoptions_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_deadline( + rocksdb_readoptions_t*, uint64_t microseconds); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_readoptions_get_deadline(rocksdb_readoptions_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_io_timeout( + rocksdb_readoptions_t*, uint64_t microseconds); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_readoptions_get_io_timeout(rocksdb_readoptions_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_async_io( + rocksdb_readoptions_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_readoptions_get_async_io( + rocksdb_readoptions_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_timestamp( + rocksdb_readoptions_t*, const char* ts, size_t tslen); +extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_iter_start_ts( + rocksdb_readoptions_t*, const char* ts, size_t tslen); +extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_auto_readahead_size( + rocksdb_readoptions_t*, unsigned char); + +/* Write options */ + +extern ROCKSDB_LIBRARY_API rocksdb_writeoptions_t* rocksdb_writeoptions_create( + void); +extern ROCKSDB_LIBRARY_API void rocksdb_writeoptions_destroy( + rocksdb_writeoptions_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_writeoptions_set_sync( + rocksdb_writeoptions_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_writeoptions_get_sync( + rocksdb_writeoptions_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_writeoptions_disable_WAL( + rocksdb_writeoptions_t* opt, int disable); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_writeoptions_get_disable_WAL( + rocksdb_writeoptions_t* opt); +extern ROCKSDB_LIBRARY_API void +rocksdb_writeoptions_set_ignore_missing_column_families(rocksdb_writeoptions_t*, + unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_writeoptions_get_ignore_missing_column_families( + rocksdb_writeoptions_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_writeoptions_set_no_slowdown( + rocksdb_writeoptions_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_writeoptions_get_no_slowdown( + rocksdb_writeoptions_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_writeoptions_set_low_pri( + rocksdb_writeoptions_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_writeoptions_get_low_pri( + rocksdb_writeoptions_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_writeoptions_set_memtable_insert_hint_per_batch(rocksdb_writeoptions_t*, + unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_writeoptions_get_memtable_insert_hint_per_batch( + rocksdb_writeoptions_t*); + +/* Compact range options */ + +extern ROCKSDB_LIBRARY_API rocksdb_compactoptions_t* +rocksdb_compactoptions_create(void); +extern ROCKSDB_LIBRARY_API void rocksdb_compactoptions_destroy( + rocksdb_compactoptions_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_compactoptions_set_exclusive_manual_compaction( + rocksdb_compactoptions_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_compactoptions_get_exclusive_manual_compaction( + rocksdb_compactoptions_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_compactoptions_set_bottommost_level_compaction( + rocksdb_compactoptions_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_compactoptions_get_bottommost_level_compaction( + rocksdb_compactoptions_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_compactoptions_set_change_level( + rocksdb_compactoptions_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_compactoptions_get_change_level(rocksdb_compactoptions_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_compactoptions_set_target_level( + rocksdb_compactoptions_t*, int); +extern ROCKSDB_LIBRARY_API int rocksdb_compactoptions_get_target_level( + rocksdb_compactoptions_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_compactoptions_set_full_history_ts_low( + rocksdb_compactoptions_t*, char* ts, size_t tslen); + +/* Flush options */ + +extern ROCKSDB_LIBRARY_API rocksdb_flushoptions_t* rocksdb_flushoptions_create( + void); +extern ROCKSDB_LIBRARY_API void rocksdb_flushoptions_destroy( + rocksdb_flushoptions_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_flushoptions_set_wait( + rocksdb_flushoptions_t*, unsigned char); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_flushoptions_get_wait( + rocksdb_flushoptions_t*); + +/* Memory allocator */ + +extern ROCKSDB_LIBRARY_API rocksdb_memory_allocator_t* +rocksdb_jemalloc_nodump_allocator_create(char** errptr); +extern ROCKSDB_LIBRARY_API void rocksdb_memory_allocator_destroy( + rocksdb_memory_allocator_t*); + +/* Cache */ + +extern ROCKSDB_LIBRARY_API rocksdb_lru_cache_options_t* +rocksdb_lru_cache_options_create(void); +extern ROCKSDB_LIBRARY_API void rocksdb_lru_cache_options_destroy( + rocksdb_lru_cache_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_lru_cache_options_set_capacity( + rocksdb_lru_cache_options_t*, size_t); +extern ROCKSDB_LIBRARY_API void rocksdb_lru_cache_options_set_num_shard_bits( + rocksdb_lru_cache_options_t*, int); +extern ROCKSDB_LIBRARY_API void rocksdb_lru_cache_options_set_memory_allocator( + rocksdb_lru_cache_options_t*, rocksdb_memory_allocator_t*); + +extern ROCKSDB_LIBRARY_API rocksdb_cache_t* rocksdb_cache_create_lru( + size_t capacity); +extern ROCKSDB_LIBRARY_API rocksdb_cache_t* +rocksdb_cache_create_lru_with_strict_capacity_limit(size_t capacity); +extern ROCKSDB_LIBRARY_API rocksdb_cache_t* rocksdb_cache_create_lru_opts( + const rocksdb_lru_cache_options_t*); + +extern ROCKSDB_LIBRARY_API void rocksdb_cache_destroy(rocksdb_cache_t* cache); +extern ROCKSDB_LIBRARY_API void rocksdb_cache_disown_data( + rocksdb_cache_t* cache); +extern ROCKSDB_LIBRARY_API void rocksdb_cache_set_capacity( + rocksdb_cache_t* cache, size_t capacity); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_cache_get_capacity(const rocksdb_cache_t* cache); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_cache_get_usage(const rocksdb_cache_t* cache); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_cache_get_pinned_usage(const rocksdb_cache_t* cache); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_cache_get_table_address_count(const rocksdb_cache_t* cache); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_cache_get_occupancy_count(const rocksdb_cache_t* cache); + +/* WriteBufferManager */ + +extern ROCKSDB_LIBRARY_API rocksdb_write_buffer_manager_t* +rocksdb_write_buffer_manager_create(size_t buffer_size, bool allow_stall); +extern ROCKSDB_LIBRARY_API rocksdb_write_buffer_manager_t* +rocksdb_write_buffer_manager_create_with_cache(size_t buffer_size, + const rocksdb_cache_t* cache, + bool allow_stall); + +extern ROCKSDB_LIBRARY_API void rocksdb_write_buffer_manager_destroy( + rocksdb_write_buffer_manager_t* wbm); +extern ROCKSDB_LIBRARY_API bool rocksdb_write_buffer_manager_enabled( + rocksdb_write_buffer_manager_t* wbm); +extern ROCKSDB_LIBRARY_API bool rocksdb_write_buffer_manager_cost_to_cache( + rocksdb_write_buffer_manager_t* wbm); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_write_buffer_manager_memory_usage(rocksdb_write_buffer_manager_t* wbm); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_write_buffer_manager_mutable_memtable_memory_usage( + rocksdb_write_buffer_manager_t* wbm); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_write_buffer_manager_dummy_entries_in_cache_usage( + rocksdb_write_buffer_manager_t* wbm); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_write_buffer_manager_buffer_size(rocksdb_write_buffer_manager_t* wbm); +extern ROCKSDB_LIBRARY_API void rocksdb_write_buffer_manager_set_buffer_size( + rocksdb_write_buffer_manager_t* wbm, size_t new_size); +extern ROCKSDB_LIBRARY_API void rocksdb_write_buffer_manager_set_allow_stall( + rocksdb_write_buffer_manager_t* wbm, bool new_allow_stall); + +/* HyperClockCache */ + +extern ROCKSDB_LIBRARY_API rocksdb_hyper_clock_cache_options_t* +rocksdb_hyper_clock_cache_options_create(size_t capacity, + size_t estimated_entry_charge); +extern ROCKSDB_LIBRARY_API void rocksdb_hyper_clock_cache_options_destroy( + rocksdb_hyper_clock_cache_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_hyper_clock_cache_options_set_capacity( + rocksdb_hyper_clock_cache_options_t*, size_t); +extern ROCKSDB_LIBRARY_API void +rocksdb_hyper_clock_cache_options_set_estimated_entry_charge( + rocksdb_hyper_clock_cache_options_t*, size_t); +extern ROCKSDB_LIBRARY_API void +rocksdb_hyper_clock_cache_options_set_num_shard_bits( + rocksdb_hyper_clock_cache_options_t*, int); +extern ROCKSDB_LIBRARY_API void +rocksdb_hyper_clock_cache_options_set_memory_allocator( + rocksdb_hyper_clock_cache_options_t*, rocksdb_memory_allocator_t*); + +extern ROCKSDB_LIBRARY_API rocksdb_cache_t* rocksdb_cache_create_hyper_clock( + size_t capacity, size_t estimated_entry_charge); +extern ROCKSDB_LIBRARY_API rocksdb_cache_t* +rocksdb_cache_create_hyper_clock_opts( + const rocksdb_hyper_clock_cache_options_t*); + +/* DBPath */ + +extern ROCKSDB_LIBRARY_API rocksdb_dbpath_t* rocksdb_dbpath_create( + const char* path, uint64_t target_size); +extern ROCKSDB_LIBRARY_API void rocksdb_dbpath_destroy(rocksdb_dbpath_t*); + +/* Env */ + +extern ROCKSDB_LIBRARY_API rocksdb_env_t* rocksdb_create_default_env(void); +extern ROCKSDB_LIBRARY_API rocksdb_env_t* rocksdb_create_mem_env(void); +extern ROCKSDB_LIBRARY_API void rocksdb_env_set_background_threads( + rocksdb_env_t* env, int n); +extern ROCKSDB_LIBRARY_API int rocksdb_env_get_background_threads( + rocksdb_env_t* env); +extern ROCKSDB_LIBRARY_API void +rocksdb_env_set_high_priority_background_threads(rocksdb_env_t* env, int n); +extern ROCKSDB_LIBRARY_API int rocksdb_env_get_high_priority_background_threads( + rocksdb_env_t* env); +extern ROCKSDB_LIBRARY_API void rocksdb_env_set_low_priority_background_threads( + rocksdb_env_t* env, int n); +extern ROCKSDB_LIBRARY_API int rocksdb_env_get_low_priority_background_threads( + rocksdb_env_t* env); +extern ROCKSDB_LIBRARY_API void +rocksdb_env_set_bottom_priority_background_threads(rocksdb_env_t* env, int n); +extern ROCKSDB_LIBRARY_API int +rocksdb_env_get_bottom_priority_background_threads(rocksdb_env_t* env); +extern ROCKSDB_LIBRARY_API void rocksdb_env_join_all_threads( + rocksdb_env_t* env); +extern ROCKSDB_LIBRARY_API void rocksdb_env_lower_thread_pool_io_priority( + rocksdb_env_t* env); +extern ROCKSDB_LIBRARY_API void +rocksdb_env_lower_high_priority_thread_pool_io_priority(rocksdb_env_t* env); +extern ROCKSDB_LIBRARY_API void rocksdb_env_lower_thread_pool_cpu_priority( + rocksdb_env_t* env); +extern ROCKSDB_LIBRARY_API void +rocksdb_env_lower_high_priority_thread_pool_cpu_priority(rocksdb_env_t* env); + +extern ROCKSDB_LIBRARY_API void rocksdb_env_destroy(rocksdb_env_t*); + +extern ROCKSDB_LIBRARY_API rocksdb_envoptions_t* rocksdb_envoptions_create( + void); +extern ROCKSDB_LIBRARY_API void rocksdb_envoptions_destroy( + rocksdb_envoptions_t* opt); +extern ROCKSDB_LIBRARY_API void rocksdb_create_dir_if_missing( + rocksdb_env_t* env, const char* path, char** errptr); + +/* SstFile */ + +extern ROCKSDB_LIBRARY_API rocksdb_sstfilewriter_t* +rocksdb_sstfilewriter_create(const rocksdb_envoptions_t* env, + const rocksdb_options_t* io_options); +extern ROCKSDB_LIBRARY_API rocksdb_sstfilewriter_t* +rocksdb_sstfilewriter_create_with_comparator( + const rocksdb_envoptions_t* env, const rocksdb_options_t* io_options, + const rocksdb_comparator_t* comparator); +extern ROCKSDB_LIBRARY_API void rocksdb_sstfilewriter_open( + rocksdb_sstfilewriter_t* writer, const char* name, char** errptr); +extern ROCKSDB_LIBRARY_API void rocksdb_sstfilewriter_add( + rocksdb_sstfilewriter_t* writer, const char* key, size_t keylen, + const char* val, size_t vallen, char** errptr); +extern ROCKSDB_LIBRARY_API void rocksdb_sstfilewriter_put( + rocksdb_sstfilewriter_t* writer, const char* key, size_t keylen, + const char* val, size_t vallen, char** errptr); +extern ROCKSDB_LIBRARY_API void rocksdb_sstfilewriter_put_with_ts( + rocksdb_sstfilewriter_t* writer, const char* key, size_t keylen, + const char* ts, size_t tslen, const char* val, size_t vallen, + char** errptr); +extern ROCKSDB_LIBRARY_API void rocksdb_sstfilewriter_merge( + rocksdb_sstfilewriter_t* writer, const char* key, size_t keylen, + const char* val, size_t vallen, char** errptr); +extern ROCKSDB_LIBRARY_API void rocksdb_sstfilewriter_delete( + rocksdb_sstfilewriter_t* writer, const char* key, size_t keylen, + char** errptr); +extern ROCKSDB_LIBRARY_API void rocksdb_sstfilewriter_delete_with_ts( + rocksdb_sstfilewriter_t* writer, const char* key, size_t keylen, + const char* ts, size_t tslen, char** errptr); +extern ROCKSDB_LIBRARY_API void rocksdb_sstfilewriter_delete_range( + rocksdb_sstfilewriter_t* writer, const char* begin_key, size_t begin_keylen, + const char* end_key, size_t end_keylen, char** errptr); +extern ROCKSDB_LIBRARY_API void rocksdb_sstfilewriter_finish( + rocksdb_sstfilewriter_t* writer, char** errptr); +extern ROCKSDB_LIBRARY_API void rocksdb_sstfilewriter_file_size( + rocksdb_sstfilewriter_t* writer, uint64_t* file_size); +extern ROCKSDB_LIBRARY_API void rocksdb_sstfilewriter_destroy( + rocksdb_sstfilewriter_t* writer); +extern ROCKSDB_LIBRARY_API rocksdb_ingestexternalfileoptions_t* +rocksdb_ingestexternalfileoptions_create(void); +extern ROCKSDB_LIBRARY_API void +rocksdb_ingestexternalfileoptions_set_move_files( + rocksdb_ingestexternalfileoptions_t* opt, unsigned char move_files); +extern ROCKSDB_LIBRARY_API void +rocksdb_ingestexternalfileoptions_set_snapshot_consistency( + rocksdb_ingestexternalfileoptions_t* opt, + unsigned char snapshot_consistency); +extern ROCKSDB_LIBRARY_API void +rocksdb_ingestexternalfileoptions_set_allow_global_seqno( + rocksdb_ingestexternalfileoptions_t* opt, unsigned char allow_global_seqno); +extern ROCKSDB_LIBRARY_API void +rocksdb_ingestexternalfileoptions_set_allow_blocking_flush( + rocksdb_ingestexternalfileoptions_t* opt, + unsigned char allow_blocking_flush); +extern ROCKSDB_LIBRARY_API void +rocksdb_ingestexternalfileoptions_set_ingest_behind( + rocksdb_ingestexternalfileoptions_t* opt, unsigned char ingest_behind); +extern ROCKSDB_LIBRARY_API void +rocksdb_ingestexternalfileoptions_set_fail_if_not_bottommost_level( + rocksdb_ingestexternalfileoptions_t* opt, + unsigned char fail_if_not_bottommost_level); + +extern ROCKSDB_LIBRARY_API void rocksdb_ingestexternalfileoptions_destroy( + rocksdb_ingestexternalfileoptions_t* opt); + +extern ROCKSDB_LIBRARY_API void rocksdb_ingest_external_file( + rocksdb_t* db, const char* const* file_list, const size_t list_len, + const rocksdb_ingestexternalfileoptions_t* opt, char** errptr); +extern ROCKSDB_LIBRARY_API void rocksdb_ingest_external_file_cf( + rocksdb_t* db, rocksdb_column_family_handle_t* handle, + const char* const* file_list, const size_t list_len, + const rocksdb_ingestexternalfileoptions_t* opt, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_try_catch_up_with_primary( + rocksdb_t* db, char** errptr); + +/* SliceTransform */ + +extern ROCKSDB_LIBRARY_API rocksdb_slicetransform_t* +rocksdb_slicetransform_create( + void* state, void (*destructor)(void*), + char* (*transform)(void*, const char* key, size_t length, + size_t* dst_length), + unsigned char (*in_domain)(void*, const char* key, size_t length), + unsigned char (*in_range)(void*, const char* key, size_t length), + const char* (*name)(void*)); +extern ROCKSDB_LIBRARY_API rocksdb_slicetransform_t* + rocksdb_slicetransform_create_fixed_prefix(size_t); +extern ROCKSDB_LIBRARY_API rocksdb_slicetransform_t* +rocksdb_slicetransform_create_noop(void); +extern ROCKSDB_LIBRARY_API void rocksdb_slicetransform_destroy( + rocksdb_slicetransform_t*); + +/* Universal Compaction options */ + +enum { + rocksdb_similar_size_compaction_stop_style = 0, + rocksdb_total_size_compaction_stop_style = 1 +}; + +extern ROCKSDB_LIBRARY_API rocksdb_universal_compaction_options_t* +rocksdb_universal_compaction_options_create(void); +extern ROCKSDB_LIBRARY_API void +rocksdb_universal_compaction_options_set_size_ratio( + rocksdb_universal_compaction_options_t*, int); +extern ROCKSDB_LIBRARY_API int +rocksdb_universal_compaction_options_get_size_ratio( + rocksdb_universal_compaction_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_universal_compaction_options_set_min_merge_width( + rocksdb_universal_compaction_options_t*, int); +extern ROCKSDB_LIBRARY_API int +rocksdb_universal_compaction_options_get_min_merge_width( + rocksdb_universal_compaction_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_universal_compaction_options_set_max_merge_width( + rocksdb_universal_compaction_options_t*, int); +extern ROCKSDB_LIBRARY_API int +rocksdb_universal_compaction_options_get_max_merge_width( + rocksdb_universal_compaction_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_universal_compaction_options_set_max_size_amplification_percent( + rocksdb_universal_compaction_options_t*, int); +extern ROCKSDB_LIBRARY_API int +rocksdb_universal_compaction_options_get_max_size_amplification_percent( + rocksdb_universal_compaction_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_universal_compaction_options_set_compression_size_percent( + rocksdb_universal_compaction_options_t*, int); +extern ROCKSDB_LIBRARY_API int +rocksdb_universal_compaction_options_get_compression_size_percent( + rocksdb_universal_compaction_options_t*); +extern ROCKSDB_LIBRARY_API void +rocksdb_universal_compaction_options_set_stop_style( + rocksdb_universal_compaction_options_t*, int); +extern ROCKSDB_LIBRARY_API int +rocksdb_universal_compaction_options_get_stop_style( + rocksdb_universal_compaction_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_universal_compaction_options_destroy( + rocksdb_universal_compaction_options_t*); + +extern ROCKSDB_LIBRARY_API rocksdb_fifo_compaction_options_t* +rocksdb_fifo_compaction_options_create(void); +extern ROCKSDB_LIBRARY_API void +rocksdb_fifo_compaction_options_set_allow_compaction( + rocksdb_fifo_compaction_options_t* fifo_opts, + unsigned char allow_compaction); +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_fifo_compaction_options_get_allow_compaction( + rocksdb_fifo_compaction_options_t* fifo_opts); +extern ROCKSDB_LIBRARY_API void +rocksdb_fifo_compaction_options_set_max_table_files_size( + rocksdb_fifo_compaction_options_t* fifo_opts, uint64_t size); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_fifo_compaction_options_get_max_table_files_size( + rocksdb_fifo_compaction_options_t* fifo_opts); +extern ROCKSDB_LIBRARY_API void rocksdb_fifo_compaction_options_destroy( + rocksdb_fifo_compaction_options_t* fifo_opts); + +extern ROCKSDB_LIBRARY_API int rocksdb_livefiles_count( + const rocksdb_livefiles_t*); +extern ROCKSDB_LIBRARY_API const char* rocksdb_livefiles_column_family_name( + const rocksdb_livefiles_t*, int index); +extern ROCKSDB_LIBRARY_API const char* rocksdb_livefiles_name( + const rocksdb_livefiles_t*, int index); +extern ROCKSDB_LIBRARY_API int rocksdb_livefiles_level( + const rocksdb_livefiles_t*, int index); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_livefiles_size(const rocksdb_livefiles_t*, int index); +extern ROCKSDB_LIBRARY_API const char* rocksdb_livefiles_smallestkey( + const rocksdb_livefiles_t*, int index, size_t* size); +extern ROCKSDB_LIBRARY_API const char* rocksdb_livefiles_largestkey( + const rocksdb_livefiles_t*, int index, size_t* size); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_livefiles_entries(const rocksdb_livefiles_t*, int index); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_livefiles_deletions(const rocksdb_livefiles_t*, int index); +extern ROCKSDB_LIBRARY_API void rocksdb_livefiles_destroy( + const rocksdb_livefiles_t*); + +/* Utility Helpers */ + +extern ROCKSDB_LIBRARY_API void rocksdb_get_options_from_string( + const rocksdb_options_t* base_options, const char* opts_str, + rocksdb_options_t* new_options, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_delete_file_in_range( + rocksdb_t* db, const char* start_key, size_t start_key_len, + const char* limit_key, size_t limit_key_len, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_delete_file_in_range_cf( + rocksdb_t* db, rocksdb_column_family_handle_t* column_family, + const char* start_key, size_t start_key_len, const char* limit_key, + size_t limit_key_len, char** errptr); + +/* MetaData */ + +extern ROCKSDB_LIBRARY_API rocksdb_column_family_metadata_t* +rocksdb_get_column_family_metadata(rocksdb_t* db); + +/** + * Returns the rocksdb_column_family_metadata_t of the specified + * column family. + * + * Note that the caller is responsible to release the returned memory + * using rocksdb_column_family_metadata_destroy. + */ +extern ROCKSDB_LIBRARY_API rocksdb_column_family_metadata_t* +rocksdb_get_column_family_metadata_cf( + rocksdb_t* db, rocksdb_column_family_handle_t* column_family); + +extern ROCKSDB_LIBRARY_API void rocksdb_column_family_metadata_destroy( + rocksdb_column_family_metadata_t* cf_meta); + +extern ROCKSDB_LIBRARY_API uint64_t rocksdb_column_family_metadata_get_size( + rocksdb_column_family_metadata_t* cf_meta); + +extern ROCKSDB_LIBRARY_API size_t rocksdb_column_family_metadata_get_file_count( + rocksdb_column_family_metadata_t* cf_meta); + +extern ROCKSDB_LIBRARY_API char* rocksdb_column_family_metadata_get_name( + rocksdb_column_family_metadata_t* cf_meta); + +extern ROCKSDB_LIBRARY_API size_t +rocksdb_column_family_metadata_get_level_count( + rocksdb_column_family_metadata_t* cf_meta); + +/** + * Returns the rocksdb_level_metadata_t of the ith level from the specified + * column family metadata. + * + * If the specified i is greater than or equal to the number of levels + * in the specified column family, then NULL will be returned. + * + * Note that the caller is responsible to release the returned memory + * using rocksdb_level_metadata_destroy before releasing its parent + * rocksdb_column_family_metadata_t. + */ +extern ROCKSDB_LIBRARY_API rocksdb_level_metadata_t* +rocksdb_column_family_metadata_get_level_metadata( + rocksdb_column_family_metadata_t* cf_meta, size_t i); + +/** + * Releases the specified rocksdb_level_metadata_t. + * + * Note that the specified rocksdb_level_metadata_t must be released + * before the release of its parent rocksdb_column_family_metadata_t. + */ +extern ROCKSDB_LIBRARY_API void rocksdb_level_metadata_destroy( + rocksdb_level_metadata_t* level_meta); + +extern ROCKSDB_LIBRARY_API int rocksdb_level_metadata_get_level( + rocksdb_level_metadata_t* level_meta); + +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_level_metadata_get_size(rocksdb_level_metadata_t* level_meta); + +extern ROCKSDB_LIBRARY_API size_t +rocksdb_level_metadata_get_file_count(rocksdb_level_metadata_t* level_meta); + +/** + * Returns the sst_file_metadata_t of the ith file from the specified level + * metadata. + * + * If the specified i is greater than or equal to the number of files + * in the specified level, then NULL will be returned. + * + * Note that the caller is responsible to release the returned memory + * using rocksdb_sst_file_metadata_destroy before releasing its + * parent rocksdb_level_metadata_t. + */ +extern ROCKSDB_LIBRARY_API rocksdb_sst_file_metadata_t* +rocksdb_level_metadata_get_sst_file_metadata( + rocksdb_level_metadata_t* level_meta, size_t i); + +/** + * Releases the specified rocksdb_sst_file_metadata_t. + * + * Note that the specified rocksdb_sst_file_metadata_t must be released + * before the release of its parent rocksdb_level_metadata_t. + */ +extern ROCKSDB_LIBRARY_API void rocksdb_sst_file_metadata_destroy( + rocksdb_sst_file_metadata_t* file_meta); + +extern ROCKSDB_LIBRARY_API char* +rocksdb_sst_file_metadata_get_relative_filename( + rocksdb_sst_file_metadata_t* file_meta); + +extern ROCKSDB_LIBRARY_API char* rocksdb_sst_file_metadata_get_directory( + rocksdb_sst_file_metadata_t* file_meta); + +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_sst_file_metadata_get_size(rocksdb_sst_file_metadata_t* file_meta); + +/** + * Returns the smallest key of the specified sst file. + * The caller is responsible for releasing the returned memory. + * + * @param file_meta the metadata of an SST file to obtain its smallest key. + * @param len the out value which will contain the length of the returned key + * after the function call. + */ +extern ROCKSDB_LIBRARY_API char* rocksdb_sst_file_metadata_get_smallestkey( + rocksdb_sst_file_metadata_t* file_meta, size_t* len); + +/** + * Returns the smallest key of the specified sst file. + * The caller is responsible for releasing the returned memory. + * + * @param file_meta the metadata of an SST file to obtain its smallest key. + * @param len the out value which will contain the length of the returned key + * after the function call. + */ +extern ROCKSDB_LIBRARY_API char* rocksdb_sst_file_metadata_get_largestkey( + rocksdb_sst_file_metadata_t* file_meta, size_t* len); + +/* Transactions */ + +extern ROCKSDB_LIBRARY_API rocksdb_column_family_handle_t* +rocksdb_transactiondb_create_column_family( + rocksdb_transactiondb_t* txn_db, + const rocksdb_options_t* column_family_options, + const char* column_family_name, char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_transactiondb_t* rocksdb_transactiondb_open( + const rocksdb_options_t* options, + const rocksdb_transactiondb_options_t* txn_db_options, const char* name, + char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_transactiondb_t* +rocksdb_transactiondb_open_column_families( + const rocksdb_options_t* options, + const rocksdb_transactiondb_options_t* txn_db_options, const char* name, + int num_column_families, const char* const* column_family_names, + const rocksdb_options_t* const* column_family_options, + rocksdb_column_family_handle_t** column_family_handles, char** errptr); + +extern ROCKSDB_LIBRARY_API const rocksdb_snapshot_t* +rocksdb_transactiondb_create_snapshot(rocksdb_transactiondb_t* txn_db); + +extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_release_snapshot( + rocksdb_transactiondb_t* txn_db, const rocksdb_snapshot_t* snapshot); + +extern ROCKSDB_LIBRARY_API char* rocksdb_transactiondb_property_value( + rocksdb_transactiondb_t* db, const char* propname); + +extern ROCKSDB_LIBRARY_API int rocksdb_transactiondb_property_int( + rocksdb_transactiondb_t* db, const char* propname, uint64_t* out_val); + +extern ROCKSDB_LIBRARY_API rocksdb_t* rocksdb_transactiondb_get_base_db( + rocksdb_transactiondb_t* txn_db); + +extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_close_base_db( + rocksdb_t* base_db); + +extern ROCKSDB_LIBRARY_API rocksdb_transaction_t* rocksdb_transaction_begin( + rocksdb_transactiondb_t* txn_db, + const rocksdb_writeoptions_t* write_options, + const rocksdb_transaction_options_t* txn_options, + rocksdb_transaction_t* old_txn); + +extern ROCKSDB_LIBRARY_API rocksdb_transaction_t** +rocksdb_transactiondb_get_prepared_transactions(rocksdb_transactiondb_t* txn_db, + size_t* cnt); + +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_set_name( + rocksdb_transaction_t* txn, const char* name, size_t name_len, + char** errptr); + +extern ROCKSDB_LIBRARY_API char* rocksdb_transaction_get_name( + rocksdb_transaction_t* txn, size_t* name_len); + +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_prepare( + rocksdb_transaction_t* txn, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_commit( + rocksdb_transaction_t* txn, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_rollback( + rocksdb_transaction_t* txn, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_set_savepoint( + rocksdb_transaction_t* txn); + +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_rollback_to_savepoint( + rocksdb_transaction_t* txn, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_destroy( + rocksdb_transaction_t* txn); + +extern ROCKSDB_LIBRARY_API rocksdb_writebatch_wi_t* +rocksdb_transaction_get_writebatch_wi(rocksdb_transaction_t* txn); + +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_rebuild_from_writebatch( + rocksdb_transaction_t* txn, rocksdb_writebatch_t* writebatch, + char** errptr); + +// This rocksdb_writebatch_wi_t should be freed with rocksdb_free +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_rebuild_from_writebatch_wi( + rocksdb_transaction_t* txn, rocksdb_writebatch_wi_t* wi, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_set_commit_timestamp( + rocksdb_transaction_t* txn, uint64_t commit_timestamp); + +extern ROCKSDB_LIBRARY_API void +rocksdb_transaction_set_read_timestamp_for_validation( + rocksdb_transaction_t* txn, uint64_t read_timestamp); + +// This snapshot should be freed using rocksdb_free +extern ROCKSDB_LIBRARY_API const rocksdb_snapshot_t* +rocksdb_transaction_get_snapshot(rocksdb_transaction_t* txn); + +extern ROCKSDB_LIBRARY_API char* rocksdb_transaction_get( + rocksdb_transaction_t* txn, const rocksdb_readoptions_t* options, + const char* key, size_t klen, size_t* vlen, char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_pinnableslice_t* +rocksdb_transaction_get_pinned(rocksdb_transaction_t* txn, + const rocksdb_readoptions_t* options, + const char* key, size_t klen, char** errptr); + +extern ROCKSDB_LIBRARY_API char* rocksdb_transaction_get_cf( + rocksdb_transaction_t* txn, const rocksdb_readoptions_t* options, + rocksdb_column_family_handle_t* column_family, const char* key, size_t klen, + size_t* vlen, char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_pinnableslice_t* +rocksdb_transaction_get_pinned_cf(rocksdb_transaction_t* txn, + const rocksdb_readoptions_t* options, + rocksdb_column_family_handle_t* column_family, + const char* key, size_t klen, char** errptr); + +extern ROCKSDB_LIBRARY_API char* rocksdb_transaction_get_for_update( + rocksdb_transaction_t* txn, const rocksdb_readoptions_t* options, + const char* key, size_t klen, size_t* vlen, unsigned char exclusive, + char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_pinnableslice_t* +rocksdb_transaction_get_pinned_for_update(rocksdb_transaction_t* txn, + const rocksdb_readoptions_t* options, + const char* key, size_t klen, + unsigned char exclusive, + char** errptr); + +extern ROCKSDB_LIBRARY_API char* rocksdb_transaction_get_for_update_cf( + rocksdb_transaction_t* txn, const rocksdb_readoptions_t* options, + rocksdb_column_family_handle_t* column_family, const char* key, size_t klen, + size_t* vlen, unsigned char exclusive, char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_pinnableslice_t* +rocksdb_transaction_get_pinned_for_update_cf( + rocksdb_transaction_t* txn, const rocksdb_readoptions_t* options, + rocksdb_column_family_handle_t* column_family, const char* key, size_t klen, + unsigned char exclusive, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_multi_get( + rocksdb_transaction_t* txn, const rocksdb_readoptions_t* options, + size_t num_keys, const char* const* keys_list, + const size_t* keys_list_sizes, char** values_list, + size_t* values_list_sizes, char** errs); + +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_multi_get_for_update( + rocksdb_transaction_t* txn, const rocksdb_readoptions_t* options, + size_t num_keys, const char* const* keys_list, + const size_t* keys_list_sizes, char** values_list, + size_t* values_list_sizes, char** errs); + +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_multi_get_cf( + rocksdb_transaction_t* txn, const rocksdb_readoptions_t* options, + const rocksdb_column_family_handle_t* const* column_families, + size_t num_keys, const char* const* keys_list, + const size_t* keys_list_sizes, char** values_list, + size_t* values_list_sizes, char** errs); + +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_multi_get_for_update_cf( + rocksdb_transaction_t* txn, const rocksdb_readoptions_t* options, + const rocksdb_column_family_handle_t* const* column_families, + size_t num_keys, const char* const* keys_list, + const size_t* keys_list_sizes, char** values_list, + size_t* values_list_sizes, char** errs); + +extern ROCKSDB_LIBRARY_API char* rocksdb_transactiondb_get( + rocksdb_transactiondb_t* txn_db, const rocksdb_readoptions_t* options, + const char* key, size_t klen, size_t* vlen, char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_pinnableslice_t* +rocksdb_transactiondb_get_pinned(rocksdb_transactiondb_t* txn_db, + const rocksdb_readoptions_t* options, + const char* key, size_t klen, char** errptr); + +extern ROCKSDB_LIBRARY_API char* rocksdb_transactiondb_get_cf( + rocksdb_transactiondb_t* txn_db, const rocksdb_readoptions_t* options, + rocksdb_column_family_handle_t* column_family, const char* key, + size_t keylen, size_t* vallen, char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_pinnableslice_t* +rocksdb_transactiondb_get_pinned_cf( + rocksdb_transactiondb_t* txn_db, const rocksdb_readoptions_t* options, + rocksdb_column_family_handle_t* column_family, const char* key, + size_t keylen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_multi_get( + rocksdb_transactiondb_t* txn_db, const rocksdb_readoptions_t* options, + size_t num_keys, const char* const* keys_list, + const size_t* keys_list_sizes, char** values_list, + size_t* values_list_sizes, char** errs); + +extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_multi_get_cf( + rocksdb_transactiondb_t* txn_db, const rocksdb_readoptions_t* options, + const rocksdb_column_family_handle_t* const* column_families, + size_t num_keys, const char* const* keys_list, + const size_t* keys_list_sizes, char** values_list, + size_t* values_list_sizes, char** errs); + +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_put( + rocksdb_transaction_t* txn, const char* key, size_t klen, const char* val, + size_t vlen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_put_cf( + rocksdb_transaction_t* txn, rocksdb_column_family_handle_t* column_family, + const char* key, size_t klen, const char* val, size_t vlen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_put( + rocksdb_transactiondb_t* txn_db, const rocksdb_writeoptions_t* options, + const char* key, size_t klen, const char* val, size_t vlen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_put_cf( + rocksdb_transactiondb_t* txn_db, const rocksdb_writeoptions_t* options, + rocksdb_column_family_handle_t* column_family, const char* key, + size_t keylen, const char* val, size_t vallen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_write( + rocksdb_transactiondb_t* txn_db, const rocksdb_writeoptions_t* options, + rocksdb_writebatch_t* batch, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_merge( + rocksdb_transaction_t* txn, const char* key, size_t klen, const char* val, + size_t vlen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_merge_cf( + rocksdb_transaction_t* txn, rocksdb_column_family_handle_t* column_family, + const char* key, size_t klen, const char* val, size_t vlen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_merge( + rocksdb_transactiondb_t* txn_db, const rocksdb_writeoptions_t* options, + const char* key, size_t klen, const char* val, size_t vlen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_merge_cf( + rocksdb_transactiondb_t* txn_db, const rocksdb_writeoptions_t* options, + rocksdb_column_family_handle_t* column_family, const char* key, size_t klen, + const char* val, size_t vlen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_delete( + rocksdb_transaction_t* txn, const char* key, size_t klen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_delete_cf( + rocksdb_transaction_t* txn, rocksdb_column_family_handle_t* column_family, + const char* key, size_t klen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_delete( + rocksdb_transactiondb_t* txn_db, const rocksdb_writeoptions_t* options, + const char* key, size_t klen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_delete_cf( + rocksdb_transactiondb_t* txn_db, const rocksdb_writeoptions_t* options, + rocksdb_column_family_handle_t* column_family, const char* key, + size_t keylen, char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_iterator_t* +rocksdb_transaction_create_iterator(rocksdb_transaction_t* txn, + const rocksdb_readoptions_t* options); + +extern ROCKSDB_LIBRARY_API rocksdb_iterator_t* +rocksdb_transaction_create_iterator_cf( + rocksdb_transaction_t* txn, const rocksdb_readoptions_t* options, + rocksdb_column_family_handle_t* column_family); + +extern ROCKSDB_LIBRARY_API rocksdb_iterator_t* +rocksdb_transactiondb_create_iterator(rocksdb_transactiondb_t* txn_db, + const rocksdb_readoptions_t* options); + +extern ROCKSDB_LIBRARY_API rocksdb_iterator_t* +rocksdb_transactiondb_create_iterator_cf( + rocksdb_transactiondb_t* txn_db, const rocksdb_readoptions_t* options, + rocksdb_column_family_handle_t* column_family); + +extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_close( + rocksdb_transactiondb_t* txn_db); + +extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_flush( + rocksdb_transactiondb_t* txn_db, const rocksdb_flushoptions_t* options, + char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_flush_cf( + rocksdb_transactiondb_t* txn_db, const rocksdb_flushoptions_t* options, + rocksdb_column_family_handle_t* column_family, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_flush_cfs( + rocksdb_transactiondb_t* txn_db, const rocksdb_flushoptions_t* options, + rocksdb_column_family_handle_t** column_families, int num_column_families, + char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_flush_wal( + rocksdb_transactiondb_t* txn_db, unsigned char sync, char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_checkpoint_t* +rocksdb_transactiondb_checkpoint_object_create(rocksdb_transactiondb_t* txn_db, + char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_optimistictransactiondb_t* +rocksdb_optimistictransactiondb_open(const rocksdb_options_t* options, + const char* name, char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_optimistictransactiondb_t* +rocksdb_optimistictransactiondb_open_column_families( + const rocksdb_options_t* options, const char* name, int num_column_families, + const char* const* column_family_names, + const rocksdb_options_t* const* column_family_options, + rocksdb_column_family_handle_t** column_family_handles, char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_t* +rocksdb_optimistictransactiondb_get_base_db( + rocksdb_optimistictransactiondb_t* otxn_db); + +extern ROCKSDB_LIBRARY_API void rocksdb_optimistictransactiondb_close_base_db( + rocksdb_t* base_db); + +extern ROCKSDB_LIBRARY_API rocksdb_transaction_t* +rocksdb_optimistictransaction_begin( + rocksdb_optimistictransactiondb_t* otxn_db, + const rocksdb_writeoptions_t* write_options, + const rocksdb_optimistictransaction_options_t* otxn_options, + rocksdb_transaction_t* old_txn); + +extern ROCKSDB_LIBRARY_API void rocksdb_optimistictransactiondb_write( + rocksdb_optimistictransactiondb_t* otxn_db, + const rocksdb_writeoptions_t* options, rocksdb_writebatch_t* batch, + char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_optimistictransactiondb_close( + rocksdb_optimistictransactiondb_t* otxn_db); + +extern ROCKSDB_LIBRARY_API rocksdb_checkpoint_t* +rocksdb_optimistictransactiondb_checkpoint_object_create( + rocksdb_optimistictransactiondb_t* otxn_db, char** errptr); + +/* Transaction Options */ + +extern ROCKSDB_LIBRARY_API rocksdb_transactiondb_options_t* +rocksdb_transactiondb_options_create(void); + +extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_options_destroy( + rocksdb_transactiondb_options_t* opt); + +extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_options_set_max_num_locks( + rocksdb_transactiondb_options_t* opt, int64_t max_num_locks); + +extern ROCKSDB_LIBRARY_API void rocksdb_transactiondb_options_set_num_stripes( + rocksdb_transactiondb_options_t* opt, size_t num_stripes); + +extern ROCKSDB_LIBRARY_API void +rocksdb_transactiondb_options_set_transaction_lock_timeout( + rocksdb_transactiondb_options_t* opt, int64_t txn_lock_timeout); + +extern ROCKSDB_LIBRARY_API void +rocksdb_transactiondb_options_set_default_lock_timeout( + rocksdb_transactiondb_options_t* opt, int64_t default_lock_timeout); + +extern ROCKSDB_LIBRARY_API rocksdb_transaction_options_t* +rocksdb_transaction_options_create(void); + +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_options_destroy( + rocksdb_transaction_options_t* opt); + +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_options_set_set_snapshot( + rocksdb_transaction_options_t* opt, unsigned char v); + +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_options_set_deadlock_detect( + rocksdb_transaction_options_t* opt, unsigned char v); + +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_options_set_lock_timeout( + rocksdb_transaction_options_t* opt, int64_t lock_timeout); + +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_options_set_expiration( + rocksdb_transaction_options_t* opt, int64_t expiration); + +extern ROCKSDB_LIBRARY_API void +rocksdb_transaction_options_set_deadlock_detect_depth( + rocksdb_transaction_options_t* opt, int64_t depth); + +extern ROCKSDB_LIBRARY_API void +rocksdb_transaction_options_set_max_write_batch_size( + rocksdb_transaction_options_t* opt, size_t size); + +extern ROCKSDB_LIBRARY_API void rocksdb_transaction_options_set_skip_prepare( + rocksdb_transaction_options_t* opt, unsigned char v); + +extern ROCKSDB_LIBRARY_API rocksdb_optimistictransaction_options_t* +rocksdb_optimistictransaction_options_create(void); + +extern ROCKSDB_LIBRARY_API void rocksdb_optimistictransaction_options_destroy( + rocksdb_optimistictransaction_options_t* opt); + +extern ROCKSDB_LIBRARY_API void +rocksdb_optimistictransaction_options_set_set_snapshot( + rocksdb_optimistictransaction_options_t* opt, unsigned char v); + +extern ROCKSDB_LIBRARY_API char* rocksdb_optimistictransactiondb_property_value( + rocksdb_optimistictransactiondb_t* db, const char* propname); + +extern ROCKSDB_LIBRARY_API int rocksdb_optimistictransactiondb_property_int( + rocksdb_optimistictransactiondb_t* db, const char* propname, + uint64_t* out_val); + +// referring to convention (3), this should be used by client +// to free memory that was malloc()ed +extern ROCKSDB_LIBRARY_API void rocksdb_free(void* ptr); + +extern ROCKSDB_LIBRARY_API rocksdb_pinnableslice_t* rocksdb_get_pinned( + rocksdb_t* db, const rocksdb_readoptions_t* options, const char* key, + size_t keylen, char** errptr); +extern ROCKSDB_LIBRARY_API rocksdb_pinnableslice_t* rocksdb_get_pinned_cf( + rocksdb_t* db, const rocksdb_readoptions_t* options, + rocksdb_column_family_handle_t* column_family, const char* key, + size_t keylen, char** errptr); +extern ROCKSDB_LIBRARY_API void rocksdb_pinnableslice_destroy( + rocksdb_pinnableslice_t* v); +extern ROCKSDB_LIBRARY_API const char* rocksdb_pinnableslice_value( + const rocksdb_pinnableslice_t* t, size_t* vlen); + +extern ROCKSDB_LIBRARY_API rocksdb_memory_consumers_t* +rocksdb_memory_consumers_create(void); +extern ROCKSDB_LIBRARY_API void rocksdb_memory_consumers_add_db( + rocksdb_memory_consumers_t* consumers, rocksdb_t* db); +extern ROCKSDB_LIBRARY_API void rocksdb_memory_consumers_add_cache( + rocksdb_memory_consumers_t* consumers, rocksdb_cache_t* cache); +extern ROCKSDB_LIBRARY_API void rocksdb_memory_consumers_destroy( + rocksdb_memory_consumers_t* consumers); +extern ROCKSDB_LIBRARY_API rocksdb_memory_usage_t* +rocksdb_approximate_memory_usage_create(rocksdb_memory_consumers_t* consumers, + char** errptr); +extern ROCKSDB_LIBRARY_API void rocksdb_approximate_memory_usage_destroy( + rocksdb_memory_usage_t* usage); + +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_approximate_memory_usage_get_mem_table_total( + rocksdb_memory_usage_t* memory_usage); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_approximate_memory_usage_get_mem_table_unflushed( + rocksdb_memory_usage_t* memory_usage); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_approximate_memory_usage_get_mem_table_readers_total( + rocksdb_memory_usage_t* memory_usage); +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_approximate_memory_usage_get_cache_total( + rocksdb_memory_usage_t* memory_usage); + +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_dump_malloc_stats( + rocksdb_options_t*, unsigned char); + +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_memtable_whole_key_filtering(rocksdb_options_t*, + unsigned char); + +extern ROCKSDB_LIBRARY_API void rocksdb_cancel_all_background_work( + rocksdb_t* db, unsigned char wait); + +extern ROCKSDB_LIBRARY_API void rocksdb_disable_manual_compaction( + rocksdb_t* db); + +extern ROCKSDB_LIBRARY_API void rocksdb_enable_manual_compaction(rocksdb_t* db); + +extern ROCKSDB_LIBRARY_API rocksdb_statistics_histogram_data_t* +rocksdb_statistics_histogram_data_create(void); +extern ROCKSDB_LIBRARY_API void rocksdb_statistics_histogram_data_destroy( + rocksdb_statistics_histogram_data_t* data); +extern ROCKSDB_LIBRARY_API double rocksdb_statistics_histogram_data_get_median( + rocksdb_statistics_histogram_data_t* data); +extern ROCKSDB_LIBRARY_API double rocksdb_statistics_histogram_data_get_p95( + rocksdb_statistics_histogram_data_t* data); +extern ROCKSDB_LIBRARY_API double rocksdb_statistics_histogram_data_get_p99( + rocksdb_statistics_histogram_data_t* data); +extern ROCKSDB_LIBRARY_API double rocksdb_statistics_histogram_data_get_average( + rocksdb_statistics_histogram_data_t* data); +extern ROCKSDB_LIBRARY_API double rocksdb_statistics_histogram_data_get_std_dev( + rocksdb_statistics_histogram_data_t* data); +extern ROCKSDB_LIBRARY_API double rocksdb_statistics_histogram_data_get_max( + rocksdb_statistics_histogram_data_t* data); +extern ROCKSDB_LIBRARY_API uint64_t rocksdb_statistics_histogram_data_get_count( + rocksdb_statistics_histogram_data_t* data); +extern ROCKSDB_LIBRARY_API uint64_t rocksdb_statistics_histogram_data_get_sum( + rocksdb_statistics_histogram_data_t* data); +extern ROCKSDB_LIBRARY_API double rocksdb_statistics_histogram_data_get_min( + rocksdb_statistics_histogram_data_t* data); + +extern ROCKSDB_LIBRARY_API void rocksdb_wait_for_compact( + rocksdb_t* db, rocksdb_wait_for_compact_options_t* options, char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_wait_for_compact_options_t* +rocksdb_wait_for_compact_options_create(void); + +extern ROCKSDB_LIBRARY_API void rocksdb_wait_for_compact_options_destroy( + rocksdb_wait_for_compact_options_t* opt); + +extern ROCKSDB_LIBRARY_API void +rocksdb_wait_for_compact_options_set_abort_on_pause( + rocksdb_wait_for_compact_options_t* opt, unsigned char v); + +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_wait_for_compact_options_get_abort_on_pause( + rocksdb_wait_for_compact_options_t* opt); + +extern ROCKSDB_LIBRARY_API void rocksdb_wait_for_compact_options_set_flush( + rocksdb_wait_for_compact_options_t* opt, unsigned char v); + +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_wait_for_compact_options_get_flush( + rocksdb_wait_for_compact_options_t* opt); + +extern ROCKSDB_LIBRARY_API void rocksdb_wait_for_compact_options_set_close_db( + rocksdb_wait_for_compact_options_t* opt, unsigned char v); + +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_wait_for_compact_options_get_close_db( + rocksdb_wait_for_compact_options_t* opt); + +extern ROCKSDB_LIBRARY_API void rocksdb_wait_for_compact_options_set_timeout( + rocksdb_wait_for_compact_options_t* opt, uint64_t microseconds); + +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_wait_for_compact_options_get_timeout( + rocksdb_wait_for_compact_options_t* opt); + +#ifdef __cplusplus +} /* end extern "C" */ +#endif diff --git a/Database/Sources/module.modulemap b/Database/Sources/module.modulemap index c823b667..6d8f958c 100644 --- a/Database/Sources/module.modulemap +++ b/Database/Sources/module.modulemap @@ -1,4 +1,4 @@ module rocksdb { - header "rocksdb/include/rocksdb/c.h" + header "include/rocksdb.h" link "rocksdb" } diff --git a/Database/Sources/rocksdb b/Database/Sources/rocksdb deleted file mode 160000 index 5f003e4a..00000000 --- a/Database/Sources/rocksdb +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 5f003e4a22d2e48e37c98d9620241237cd30dd24 diff --git a/JAMTests/Package.resolved b/JAMTests/Package.resolved index f8e67f26..784ad7cf 100644 --- a/JAMTests/Package.resolved +++ b/JAMTests/Package.resolved @@ -1,5 +1,5 @@ { - "originHash" : "ea57294dc40bb2d6c41fd2ad02e6e7f4cc107b8698f88168f6f409fd7857b774", + "originHash" : "cd9cc1e5d46c44090826877a95ecb2fbfa27868633e11d6438dda8ee66ea8452", "pins" : [ { "identity" : "blake2.swift", @@ -55,6 +55,15 @@ "version" : "2.5.0" } }, + { + "identity" : "swift-numerics", + "kind" : "remoteSourceControl", + "location" : "https://github.com/apple/swift-numerics", + "state" : { + "branch" : "main", + "revision" : "e30276bff2ff5ed80566fbdca49f50aa160b0e83" + } + }, { "identity" : "swift-service-context", "kind" : "remoteSourceControl", diff --git a/JAMTests/Tests/JAMTests/CodecTests.swift b/JAMTests/Tests/JAMTests/CodecTests.swift index acdbc44c..490081e2 100644 --- a/JAMTests/Tests/JAMTests/CodecTests.swift +++ b/JAMTests/Tests/JAMTests/CodecTests.swift @@ -175,8 +175,8 @@ struct CodecTests { return [ "hash": json["workPackageHash"]!, "len": json["length"]!, - "root": json["erasureRoot"]!, - "segments": json["segmentRoot"]!, + "erasure_root": json["erasureRoot"]!, + "exports_root": json["segmentRoot"]!, ].json } if let value = value as? ExtrinsicGuarantees { diff --git a/JAMTests/Tests/JAMTests/PVMTests.swift b/JAMTests/Tests/JAMTests/PVMTests.swift index 95147ff3..2db3b9a1 100644 --- a/JAMTests/Tests/JAMTests/PVMTests.swift +++ b/JAMTests/Tests/JAMTests/PVMTests.swift @@ -34,13 +34,13 @@ struct PolkaVMTestcase: Codable, CustomStringConvertible { var initialPC: UInt32 var initialPageMap: [PageMap] var initialMemory: [MemoryChunk] - var initialGas: Int64 + var initialGas: Gas var program: [UInt8] var expectedStatus: Status var expectedRegs: [UInt32] var expectedPC: UInt32 var expectedMemory: [MemoryChunk] - var expectedGas: Int64 + var expectedGas: GasInt enum CodingKeys: String, CodingKey { case name @@ -92,7 +92,7 @@ struct PVMTests { program: program, pc: testCase.initialPC, registers: Registers(testCase.initialRegs), - gas: UInt64(testCase.initialGas), + gas: testCase.initialGas, memory: memory ) let engine = Engine(config: DefaultPvmConfig()) diff --git a/JAMTests/Tests/JAMTests/RecentHistoryTests.swift b/JAMTests/Tests/JAMTests/RecentHistoryTests.swift index 073ae389..e6636ab9 100644 --- a/JAMTests/Tests/JAMTests/RecentHistoryTests.swift +++ b/JAMTests/Tests/JAMTests/RecentHistoryTests.swift @@ -21,24 +21,22 @@ struct RecentHisoryTestcase: Codable { struct RecentHistoryTests { static func loadTests() throws -> [Testcase] { - try TestLoader.getTestcases(path: "history/data", extension: "scale") + try TestLoader.getTestcases(path: "history/data", extension: "bin") } @Test(arguments: try loadTests()) func recentHistory(_ testcase: Testcase) throws { - withKnownIssue("wait for codec to be updated") { - let config = ProtocolConfigRef.mainnet - let testcase = try JamDecoder.decode(RecentHisoryTestcase.self, from: testcase.data, withConfig: config) + let config = ProtocolConfigRef.mainnet + let testcase = try JamDecoder.decode(RecentHisoryTestcase.self, from: testcase.data, withConfig: config) - var state = testcase.preState - try state.update( - headerHash: testcase.input.headerHash, - parentStateRoot: testcase.input.parentStateRoot, - accumulateRoot: testcase.input.accumulateRoot, - workReportHashes: ConfigLimitedSizeArray(config: config, array: testcase.input.workPackages) - ) + var state = testcase.preState + try state.update( + headerHash: testcase.input.headerHash, + parentStateRoot: testcase.input.parentStateRoot, + accumulateRoot: testcase.input.accumulateRoot, + workReportHashes: ConfigLimitedSizeArray(config: config, array: testcase.input.workPackages) + ) - #expect(state == testcase.postState) - } + #expect(state == testcase.postState) } } diff --git a/JAMTests/Tests/JAMTests/SafroleTests.swift b/JAMTests/Tests/JAMTests/SafroleTests.swift index c060d03c..bc51a975 100644 --- a/JAMTests/Tests/JAMTests/SafroleTests.swift +++ b/JAMTests/Tests/JAMTests/SafroleTests.swift @@ -9,8 +9,8 @@ import Utils struct SafroleInput: Codable { var slot: UInt32 var entropy: Data32 - var offenders: [Ed25519PublicKey] var extrinsics: ExtrinsicTickets + var offenders: [Ed25519PublicKey] } struct OutputMarks: Codable { @@ -98,14 +98,12 @@ enum SafroleTestVariants: String, CaseIterable { case tiny case full - static let tinyConfig = { - var config = ProtocolConfigRef.mainnet.value + static let tinyConfig = ProtocolConfigRef.mainnet.mutate { config in config.totalNumberOfValidators = 6 config.epochLength = 12 // 10 = 12 * 500/600, not sure what this should be for tiny, but this passes tests config.ticketSubmissionEndSlot = 10 - return Ref(config) - }() + } var config: ProtocolConfigRef { switch self { @@ -119,7 +117,7 @@ enum SafroleTestVariants: String, CaseIterable { struct SafroleTests { static func loadTests(variant: SafroleTestVariants) throws -> [Testcase] { - try TestLoader.getTestcases(path: "safrole/\(variant)", extension: "scale") + try TestLoader.getTestcases(path: "safrole/\(variant)", extension: "bin") } func safroleTests(_ input: Testcase, variant: SafroleTestVariants) throws { @@ -161,15 +159,11 @@ struct SafroleTests { @Test(arguments: try SafroleTests.loadTests(variant: .tiny)) func tinyTests(_ testcase: Testcase) throws { - withKnownIssue("waiting for codec to be updated", isIntermittent: true) { - try safroleTests(testcase, variant: .tiny) - } + try safroleTests(testcase, variant: .tiny) } @Test(arguments: try SafroleTests.loadTests(variant: .full)) func fullTests(_ testcase: Testcase) throws { - withKnownIssue("waiting for codec to be updated", isIntermittent: true) { - try safroleTests(testcase, variant: .full) - } + try safroleTests(testcase, variant: .full) } } diff --git a/JAMTests/jamtestvectors b/JAMTests/jamtestvectors index 03685594..4fdcf95a 160000 --- a/JAMTests/jamtestvectors +++ b/JAMTests/jamtestvectors @@ -1 +1 @@ -Subproject commit 0368559435b2ae2431520b6aa3897dae6b42cf6b +Subproject commit 4fdcf95aeed04d53bb4198373925d34f63069059 diff --git a/Makefile b/Makefile index 09274c09..47972aab 100644 --- a/Makefile +++ b/Makefile @@ -8,7 +8,7 @@ default: build githooks: .git/hooks/pre-commit .PHONY: deps -deps: .lib/libblst.a .lib/libbandersnatch_vrfs.a .lib/librocksdb.a .lib/libec.a .lib/libmsquic.a +deps: .lib/libblst.a .lib/libbandersnatch_vrfs.a .lib/libec.a .lib/libmsquic.a .lib/libblst.a: ./scripts/blst.sh @@ -19,12 +19,9 @@ deps: .lib/libblst.a .lib/libbandersnatch_vrfs.a .lib/librocksdb.a .lib/libec.a .lib/libec.a: $(wildcard Utils/Sources/erasure-coding/src/*) ./scripts/erasure-coding.sh -.lib/librocksdb.a: - ./scripts/rocksdb.sh - .lib/libmsquic.a: - ./scripts/msquic.sh - + ./scripts/external-libs.sh + .PHONY: test test: githooks deps ./scripts/runTests.sh test diff --git a/Networking/Sources/include/msquic.h b/Networking/Sources/include/msquic.h new file mode 100644 index 00000000..d9c25b89 --- /dev/null +++ b/Networking/Sources/include/msquic.h @@ -0,0 +1,1905 @@ +/*++ + + Copyright (c) Microsoft Corporation. + Licensed under the MIT License. + +Abstract: + + Declarations for the MsQuic API, which enables applications and drivers to + create QUIC connections as a client or server. + + For more detailed information, see ../docs/API.md + +Supported Platforms: + + Windows User mode + Windows Kernel mode + Linux User mode + +--*/ + +#ifndef _MSQUIC_ +#define _MSQUIC_ + +#ifdef _WIN32 +#pragma once + +#pragma warning(disable:4201) // nonstandard extension used: nameless struct/union +#pragma warning(disable:4214) // nonstandard extension used: bit field types other than int +#endif + +#ifdef _KERNEL_MODE +#include "msquic_winkernel.h" +#elif _WIN32 +#include "msquic_winuser.h" +#elif __linux__ || __APPLE__ || __FreeBSD__ +#include "msquic_posix.h" +#else +#error "Unsupported Platform" +#endif + +#if defined(__cplusplus) +extern "C" { +#endif + +typedef struct QUIC_HANDLE *HQUIC; + +// +// The maximum value that can be encoded in a 62-bit integer. +// +#define QUIC_UINT62_MAX ((1ULL << 62U) - 1) + +// +// Represents a 62-bit integer. +// +typedef _In_range_(0, QUIC_UINT62_MAX) uint64_t QUIC_UINT62; + +// +// An ALPN must not exceed 255 bytes, and must not be zero-length. +// +#define QUIC_MAX_ALPN_LENGTH 255 + +// +// A server name must not exceed 65535 bytes. +// +#define QUIC_MAX_SNI_LENGTH 65535 + +// +// The maximum number of bytes of application data a server application can +// send in a resumption ticket. +// +#define QUIC_MAX_RESUMPTION_APP_DATA_LENGTH 1000 + +// +// The number of bytes of stateless reset key. +// +#define QUIC_STATELESS_RESET_KEY_LENGTH 32 + +typedef enum QUIC_TLS_PROVIDER { + QUIC_TLS_PROVIDER_SCHANNEL = 0x0000, + QUIC_TLS_PROVIDER_OPENSSL = 0x0001, +} QUIC_TLS_PROVIDER; + +typedef enum QUIC_EXECUTION_PROFILE { + QUIC_EXECUTION_PROFILE_LOW_LATENCY, // Default + QUIC_EXECUTION_PROFILE_TYPE_MAX_THROUGHPUT, + QUIC_EXECUTION_PROFILE_TYPE_SCAVENGER, + QUIC_EXECUTION_PROFILE_TYPE_REAL_TIME, +} QUIC_EXECUTION_PROFILE; + +typedef enum QUIC_LOAD_BALANCING_MODE { + QUIC_LOAD_BALANCING_DISABLED, // Default + QUIC_LOAD_BALANCING_SERVER_ID_IP, // Encodes IP address in Server ID + QUIC_LOAD_BALANCING_SERVER_ID_FIXED, // Encodes a fixed 4-byte value in Server ID + QUIC_LOAD_BALANCING_COUNT, // The number of supported load balancing modes + // MUST BE LAST +} QUIC_LOAD_BALANCING_MODE; + +typedef enum QUIC_TLS_ALERT_CODES { + QUIC_TLS_ALERT_CODE_SUCCESS = 0xFFFF, // Not a real TlsAlert + QUIC_TLS_ALERT_CODE_UNEXPECTED_MESSAGE = 10, + QUIC_TLS_ALERT_CODE_BAD_CERTIFICATE = 42, + QUIC_TLS_ALERT_CODE_UNSUPPORTED_CERTIFICATE = 43, + QUIC_TLS_ALERT_CODE_CERTIFICATE_REVOKED = 44, + QUIC_TLS_ALERT_CODE_CERTIFICATE_EXPIRED = 45, + QUIC_TLS_ALERT_CODE_CERTIFICATE_UNKNOWN = 46, + QUIC_TLS_ALERT_CODE_ILLEGAL_PARAMETER = 47, + QUIC_TLS_ALERT_CODE_UNKNOWN_CA = 48, + QUIC_TLS_ALERT_CODE_ACCESS_DENIED = 49, + QUIC_TLS_ALERT_CODE_INSUFFICIENT_SECURITY = 71, + QUIC_TLS_ALERT_CODE_INTERNAL_ERROR = 80, + QUIC_TLS_ALERT_CODE_USER_CANCELED = 90, + QUIC_TLS_ALERT_CODE_CERTIFICATE_REQUIRED = 116, + QUIC_TLS_ALERT_CODE_MAX = 255, +} QUIC_TLS_ALERT_CODES; + +typedef enum QUIC_CREDENTIAL_TYPE { + QUIC_CREDENTIAL_TYPE_NONE, + QUIC_CREDENTIAL_TYPE_CERTIFICATE_HASH, + QUIC_CREDENTIAL_TYPE_CERTIFICATE_HASH_STORE, + QUIC_CREDENTIAL_TYPE_CERTIFICATE_CONTEXT, + QUIC_CREDENTIAL_TYPE_CERTIFICATE_FILE, + QUIC_CREDENTIAL_TYPE_CERTIFICATE_FILE_PROTECTED, + QUIC_CREDENTIAL_TYPE_CERTIFICATE_PKCS12, +} QUIC_CREDENTIAL_TYPE; + +typedef enum QUIC_CREDENTIAL_FLAGS { + QUIC_CREDENTIAL_FLAG_NONE = 0x00000000, + QUIC_CREDENTIAL_FLAG_CLIENT = 0x00000001, // Lack of client flag indicates server. + QUIC_CREDENTIAL_FLAG_LOAD_ASYNCHRONOUS = 0x00000002, + QUIC_CREDENTIAL_FLAG_NO_CERTIFICATE_VALIDATION = 0x00000004, + QUIC_CREDENTIAL_FLAG_ENABLE_OCSP = 0x00000008, // Schannel only currently + QUIC_CREDENTIAL_FLAG_INDICATE_CERTIFICATE_RECEIVED = 0x00000010, + QUIC_CREDENTIAL_FLAG_DEFER_CERTIFICATE_VALIDATION = 0x00000020, + QUIC_CREDENTIAL_FLAG_REQUIRE_CLIENT_AUTHENTICATION = 0x00000040, + QUIC_CREDENTIAL_FLAG_USE_TLS_BUILTIN_CERTIFICATE_VALIDATION = 0x00000080, // OpenSSL only currently + QUIC_CREDENTIAL_FLAG_REVOCATION_CHECK_END_CERT = 0x00000100, // Schannel only currently + QUIC_CREDENTIAL_FLAG_REVOCATION_CHECK_CHAIN = 0x00000200, // Schannel only currently + QUIC_CREDENTIAL_FLAG_REVOCATION_CHECK_CHAIN_EXCLUDE_ROOT = 0x00000400, // Schannel only currently + QUIC_CREDENTIAL_FLAG_IGNORE_NO_REVOCATION_CHECK = 0x00000800, // Schannel only currently + QUIC_CREDENTIAL_FLAG_IGNORE_REVOCATION_OFFLINE = 0x00001000, // Schannel only currently + QUIC_CREDENTIAL_FLAG_SET_ALLOWED_CIPHER_SUITES = 0x00002000, + QUIC_CREDENTIAL_FLAG_USE_PORTABLE_CERTIFICATES = 0x00004000, + QUIC_CREDENTIAL_FLAG_USE_SUPPLIED_CREDENTIALS = 0x00008000, // Schannel only + QUIC_CREDENTIAL_FLAG_USE_SYSTEM_MAPPER = 0x00010000, // Schannel only + QUIC_CREDENTIAL_FLAG_CACHE_ONLY_URL_RETRIEVAL = 0x00020000, // Windows only currently + QUIC_CREDENTIAL_FLAG_REVOCATION_CHECK_CACHE_ONLY = 0x00040000, // Windows only currently + QUIC_CREDENTIAL_FLAG_INPROC_PEER_CERTIFICATE = 0x00080000, // Schannel only + QUIC_CREDENTIAL_FLAG_SET_CA_CERTIFICATE_FILE = 0x00100000, // OpenSSL only currently +} QUIC_CREDENTIAL_FLAGS; + +DEFINE_ENUM_FLAG_OPERATORS(QUIC_CREDENTIAL_FLAGS) + +typedef enum QUIC_ALLOWED_CIPHER_SUITE_FLAGS { + QUIC_ALLOWED_CIPHER_SUITE_NONE = 0x0, + QUIC_ALLOWED_CIPHER_SUITE_AES_128_GCM_SHA256 = 0x1, + QUIC_ALLOWED_CIPHER_SUITE_AES_256_GCM_SHA384 = 0x2, + QUIC_ALLOWED_CIPHER_SUITE_CHACHA20_POLY1305_SHA256 = 0x4, // Not supported on Schannel +} QUIC_ALLOWED_CIPHER_SUITE_FLAGS; + +DEFINE_ENUM_FLAG_OPERATORS(QUIC_ALLOWED_CIPHER_SUITE_FLAGS); + +typedef enum QUIC_CERTIFICATE_HASH_STORE_FLAGS { + QUIC_CERTIFICATE_HASH_STORE_FLAG_NONE = 0x0000, + QUIC_CERTIFICATE_HASH_STORE_FLAG_MACHINE_STORE = 0x0001, +} QUIC_CERTIFICATE_HASH_STORE_FLAGS; + +DEFINE_ENUM_FLAG_OPERATORS(QUIC_CERTIFICATE_HASH_STORE_FLAGS) + +typedef enum QUIC_CONNECTION_SHUTDOWN_FLAGS { + QUIC_CONNECTION_SHUTDOWN_FLAG_NONE = 0x0000, + QUIC_CONNECTION_SHUTDOWN_FLAG_SILENT = 0x0001, // Don't send the close frame over the network. +} QUIC_CONNECTION_SHUTDOWN_FLAGS; + +DEFINE_ENUM_FLAG_OPERATORS(QUIC_CONNECTION_SHUTDOWN_FLAGS) + +typedef enum QUIC_SERVER_RESUMPTION_LEVEL { + QUIC_SERVER_NO_RESUME, + QUIC_SERVER_RESUME_ONLY, + QUIC_SERVER_RESUME_AND_ZERORTT, +} QUIC_SERVER_RESUMPTION_LEVEL; + +typedef enum QUIC_SEND_RESUMPTION_FLAGS { + QUIC_SEND_RESUMPTION_FLAG_NONE = 0x0000, + QUIC_SEND_RESUMPTION_FLAG_FINAL = 0x0001, // Free TLS state after sending this ticket. +} QUIC_SEND_RESUMPTION_FLAGS; + +DEFINE_ENUM_FLAG_OPERATORS(QUIC_SEND_RESUMPTION_FLAGS) + +typedef enum QUIC_STREAM_SCHEDULING_SCHEME { + QUIC_STREAM_SCHEDULING_SCHEME_FIFO = 0x0000, // Sends stream data first come, first served. (Default) + QUIC_STREAM_SCHEDULING_SCHEME_ROUND_ROBIN = 0x0001, // Sends stream data evenly multiplexed. + QUIC_STREAM_SCHEDULING_SCHEME_COUNT, // The number of stream scheduling schemes. +} QUIC_STREAM_SCHEDULING_SCHEME; + +typedef enum QUIC_STREAM_OPEN_FLAGS { + QUIC_STREAM_OPEN_FLAG_NONE = 0x0000, + QUIC_STREAM_OPEN_FLAG_UNIDIRECTIONAL = 0x0001, // Indicates the stream is unidirectional. + QUIC_STREAM_OPEN_FLAG_0_RTT = 0x0002, // The stream was opened via a 0-RTT packet. + QUIC_STREAM_OPEN_FLAG_DELAY_ID_FC_UPDATES = 0x0004, // Indicates stream ID flow control limit updates for the + // connection should be delayed to StreamClose. +} QUIC_STREAM_OPEN_FLAGS; + +DEFINE_ENUM_FLAG_OPERATORS(QUIC_STREAM_OPEN_FLAGS) + +typedef enum QUIC_STREAM_START_FLAGS { + QUIC_STREAM_START_FLAG_NONE = 0x0000, + QUIC_STREAM_START_FLAG_IMMEDIATE = 0x0001, // Immediately informs peer that stream is open. + QUIC_STREAM_START_FLAG_FAIL_BLOCKED = 0x0002, // Only opens the stream if flow control allows. + QUIC_STREAM_START_FLAG_SHUTDOWN_ON_FAIL = 0x0004, // Shutdown the stream immediately after start failure. + QUIC_STREAM_START_FLAG_INDICATE_PEER_ACCEPT = 0x0008, // Indicate PEER_ACCEPTED event if not accepted at start. + QUIC_STREAM_START_FLAG_PRIORITY_WORK = 0x0010, // Higher priority than other connection work. +} QUIC_STREAM_START_FLAGS; + +DEFINE_ENUM_FLAG_OPERATORS(QUIC_STREAM_START_FLAGS) + +typedef enum QUIC_STREAM_SHUTDOWN_FLAGS { + QUIC_STREAM_SHUTDOWN_FLAG_NONE = 0x0000, + QUIC_STREAM_SHUTDOWN_FLAG_GRACEFUL = 0x0001, // Cleanly closes the send path. + QUIC_STREAM_SHUTDOWN_FLAG_ABORT_SEND = 0x0002, // Abruptly closes the send path. + QUIC_STREAM_SHUTDOWN_FLAG_ABORT_RECEIVE = 0x0004, // Abruptly closes the receive path. + QUIC_STREAM_SHUTDOWN_FLAG_ABORT = 0x0006, // Abruptly closes both send and receive paths. + QUIC_STREAM_SHUTDOWN_FLAG_IMMEDIATE = 0x0008, // Immediately sends completion events to app. + QUIC_STREAM_SHUTDOWN_FLAG_INLINE = 0x0010, // Process the shutdown immediately inline. Only for calls on callbacks. + // WARNING: Can cause reentrant callbacks! +} QUIC_STREAM_SHUTDOWN_FLAGS; + +DEFINE_ENUM_FLAG_OPERATORS(QUIC_STREAM_SHUTDOWN_FLAGS) + +typedef enum QUIC_RECEIVE_FLAGS { + QUIC_RECEIVE_FLAG_NONE = 0x0000, + QUIC_RECEIVE_FLAG_0_RTT = 0x0001, // Data was encrypted with 0-RTT key. + QUIC_RECEIVE_FLAG_FIN = 0x0002, // FIN was included with this data. +} QUIC_RECEIVE_FLAGS; + +DEFINE_ENUM_FLAG_OPERATORS(QUIC_RECEIVE_FLAGS) + +typedef enum QUIC_SEND_FLAGS { + QUIC_SEND_FLAG_NONE = 0x0000, + QUIC_SEND_FLAG_ALLOW_0_RTT = 0x0001, // Allows the use of encrypting with 0-RTT key. + QUIC_SEND_FLAG_START = 0x0002, // Asynchronously starts the stream with the sent data. + QUIC_SEND_FLAG_FIN = 0x0004, // Indicates the request is the one last sent on the stream. + QUIC_SEND_FLAG_DGRAM_PRIORITY = 0x0008, // Indicates the datagram is higher priority than others. + QUIC_SEND_FLAG_DELAY_SEND = 0x0010, // Indicates the send should be delayed because more will be queued soon. + QUIC_SEND_FLAG_CANCEL_ON_LOSS = 0x0020, // Indicates that a stream is to be cancelled when packet loss is detected. + QUIC_SEND_FLAG_PRIORITY_WORK = 0x0040, // Higher priority than other connection work. +} QUIC_SEND_FLAGS; + +DEFINE_ENUM_FLAG_OPERATORS(QUIC_SEND_FLAGS) + +typedef enum QUIC_DATAGRAM_SEND_STATE { + QUIC_DATAGRAM_SEND_UNKNOWN, // Not yet sent. + QUIC_DATAGRAM_SEND_SENT, // Sent and awaiting acknowledgment + QUIC_DATAGRAM_SEND_LOST_SUSPECT, // Suspected as lost, but still tracked + QUIC_DATAGRAM_SEND_LOST_DISCARDED, // Lost and not longer being tracked + QUIC_DATAGRAM_SEND_ACKNOWLEDGED, // Acknowledged + QUIC_DATAGRAM_SEND_ACKNOWLEDGED_SPURIOUS, // Acknowledged after being suspected lost + QUIC_DATAGRAM_SEND_CANCELED, // Canceled before send +} QUIC_DATAGRAM_SEND_STATE; + +// +// Helper to determine if a datagrams state is final, and no longer tracked +// by MsQuic. +// +#define QUIC_DATAGRAM_SEND_STATE_IS_FINAL(State) \ + ((State) >= QUIC_DATAGRAM_SEND_LOST_DISCARDED) + +typedef enum QUIC_EXECUTION_CONFIG_FLAGS { + QUIC_EXECUTION_CONFIG_FLAG_NONE = 0x0000, +#ifdef QUIC_API_ENABLE_PREVIEW_FEATURES + QUIC_EXECUTION_CONFIG_FLAG_QTIP = 0x0001, + QUIC_EXECUTION_CONFIG_FLAG_RIO = 0x0002, + QUIC_EXECUTION_CONFIG_FLAG_XDP = 0x0004, + QUIC_EXECUTION_CONFIG_FLAG_NO_IDEAL_PROC = 0x0008, + QUIC_EXECUTION_CONFIG_FLAG_HIGH_PRIORITY = 0x0010, +#endif +} QUIC_EXECUTION_CONFIG_FLAGS; + +DEFINE_ENUM_FLAG_OPERATORS(QUIC_EXECUTION_CONFIG_FLAGS) + +// +// A custom configuration for thread execution in QUIC. +// +typedef struct QUIC_EXECUTION_CONFIG { + + QUIC_EXECUTION_CONFIG_FLAGS Flags; + uint32_t PollingIdleTimeoutUs; // Time before a polling thread, with no work to do, sleeps. + uint32_t ProcessorCount; + _Field_size_(ProcessorCount) + uint16_t ProcessorList[1]; // List of processors to use for threads. + +} QUIC_EXECUTION_CONFIG; + +#define QUIC_EXECUTION_CONFIG_MIN_SIZE \ + (uint32_t)FIELD_OFFSET(QUIC_EXECUTION_CONFIG, ProcessorList) + +typedef struct QUIC_REGISTRATION_CONFIG { // All fields may be NULL/zero. + const char* AppName; + QUIC_EXECUTION_PROFILE ExecutionProfile; +} QUIC_REGISTRATION_CONFIG; + +typedef +_IRQL_requires_max_(PASSIVE_LEVEL) +_Function_class_(QUIC_CREDENTIAL_LOAD_COMPLETE) +void +(QUIC_API QUIC_CREDENTIAL_LOAD_COMPLETE)( + _In_ HQUIC Configuration, + _In_opt_ void* Context, + _In_ QUIC_STATUS Status + ); + +typedef QUIC_CREDENTIAL_LOAD_COMPLETE *QUIC_CREDENTIAL_LOAD_COMPLETE_HANDLER; + +typedef struct QUIC_CERTIFICATE_HASH { + uint8_t ShaHash[20]; +} QUIC_CERTIFICATE_HASH; + +typedef struct QUIC_CERTIFICATE_HASH_STORE { + QUIC_CERTIFICATE_HASH_STORE_FLAGS Flags; + uint8_t ShaHash[20]; + char StoreName[128]; +} QUIC_CERTIFICATE_HASH_STORE; + +typedef struct QUIC_CERTIFICATE_FILE { + const char *PrivateKeyFile; + const char *CertificateFile; +} QUIC_CERTIFICATE_FILE; + +typedef struct QUIC_CERTIFICATE_FILE_PROTECTED { + const char *PrivateKeyFile; + const char *CertificateFile; + const char *PrivateKeyPassword; +} QUIC_CERTIFICATE_FILE_PROTECTED; + +typedef struct QUIC_CERTIFICATE_PKCS12 { + const uint8_t *Asn1Blob; + uint32_t Asn1BlobLength; + const char *PrivateKeyPassword; // Optional: used if provided. Ignored if NULL +} QUIC_CERTIFICATE_PKCS12; + +typedef void QUIC_CERTIFICATE; // Platform specific certificate object +typedef void QUIC_CERTIFICATE_CHAIN; // Platform specific certificate chain object + +typedef struct QUIC_CREDENTIAL_CONFIG { + QUIC_CREDENTIAL_TYPE Type; + QUIC_CREDENTIAL_FLAGS Flags; + union { + QUIC_CERTIFICATE_HASH* CertificateHash; + QUIC_CERTIFICATE_HASH_STORE* CertificateHashStore; + QUIC_CERTIFICATE* CertificateContext; + QUIC_CERTIFICATE_FILE* CertificateFile; + QUIC_CERTIFICATE_FILE_PROTECTED* CertificateFileProtected; + QUIC_CERTIFICATE_PKCS12* CertificatePkcs12; + }; + const char* Principal; + void* Reserved; // Currently unused + QUIC_CREDENTIAL_LOAD_COMPLETE_HANDLER AsyncHandler; // Optional + QUIC_ALLOWED_CIPHER_SUITE_FLAGS AllowedCipherSuites;// Optional + const char* CaCertificateFile; // Optional +} QUIC_CREDENTIAL_CONFIG; + +// +// The maximum number of QUIC_TICKET_KEY_CONFIG that can be used at one time. +// +#define QUIC_MAX_TICKET_KEY_COUNT 16 + +// +// TLS New Session Ticket encryption key configuration. +// +typedef struct QUIC_TICKET_KEY_CONFIG { + uint8_t Id[16]; + uint8_t Material[64]; + uint8_t MaterialLength; +} QUIC_TICKET_KEY_CONFIG; + +// +// A single contiguous buffer. +// +typedef struct QUIC_BUFFER { + uint32_t Length; + _Field_size_bytes_(Length) + uint8_t* Buffer; +} QUIC_BUFFER; + +// +// All the available information describing a new incoming connection. +// +typedef struct QUIC_NEW_CONNECTION_INFO { + uint32_t QuicVersion; + const QUIC_ADDR* LocalAddress; + const QUIC_ADDR* RemoteAddress; + uint32_t CryptoBufferLength; + uint16_t ClientAlpnListLength; + uint16_t ServerNameLength; + uint8_t NegotiatedAlpnLength; + _Field_size_bytes_(CryptoBufferLength) + const uint8_t* CryptoBuffer; + _Field_size_bytes_(ClientAlpnListLength) + const uint8_t* ClientAlpnList; + _Field_size_bytes_(NegotiatedAlpnLength) + const uint8_t* NegotiatedAlpn; + _Field_size_bytes_opt_(ServerNameLength) + const char* ServerName; +} QUIC_NEW_CONNECTION_INFO; + +typedef enum QUIC_TLS_PROTOCOL_VERSION { + QUIC_TLS_PROTOCOL_UNKNOWN = 0, + QUIC_TLS_PROTOCOL_1_3 = 0x3000, +} QUIC_TLS_PROTOCOL_VERSION; + +typedef enum QUIC_CIPHER_ALGORITHM { + QUIC_CIPHER_ALGORITHM_NONE = 0, + QUIC_CIPHER_ALGORITHM_AES_128 = 0x660E, + QUIC_CIPHER_ALGORITHM_AES_256 = 0x6610, + QUIC_CIPHER_ALGORITHM_CHACHA20 = 0x6612, // Not supported on Schannel/BCrypt +} QUIC_CIPHER_ALGORITHM; + +typedef enum QUIC_HASH_ALGORITHM { + QUIC_HASH_ALGORITHM_NONE = 0, + QUIC_HASH_ALGORITHM_SHA_256 = 0x800C, + QUIC_HASH_ALGORITHM_SHA_384 = 0x800D, +} QUIC_HASH_ALGORITHM; + +typedef enum QUIC_KEY_EXCHANGE_ALGORITHM { + QUIC_KEY_EXCHANGE_ALGORITHM_NONE = 0, +} QUIC_KEY_EXCHANGE_ALGORITHM; + +typedef enum QUIC_CIPHER_SUITE { + QUIC_CIPHER_SUITE_TLS_AES_128_GCM_SHA256 = 0x1301, + QUIC_CIPHER_SUITE_TLS_AES_256_GCM_SHA384 = 0x1302, + QUIC_CIPHER_SUITE_TLS_CHACHA20_POLY1305_SHA256 = 0x1303, // Not supported on Schannel +} QUIC_CIPHER_SUITE; + +typedef enum QUIC_CONGESTION_CONTROL_ALGORITHM { + QUIC_CONGESTION_CONTROL_ALGORITHM_CUBIC, +#ifdef QUIC_API_ENABLE_PREVIEW_FEATURES + QUIC_CONGESTION_CONTROL_ALGORITHM_BBR, +#endif + QUIC_CONGESTION_CONTROL_ALGORITHM_MAX, +} QUIC_CONGESTION_CONTROL_ALGORITHM; + +// +// All the available information describing a handshake. +// +typedef struct QUIC_HANDSHAKE_INFO { + QUIC_TLS_PROTOCOL_VERSION TlsProtocolVersion; + QUIC_CIPHER_ALGORITHM CipherAlgorithm; + int32_t CipherStrength; + QUIC_HASH_ALGORITHM Hash; + int32_t HashStrength; + QUIC_KEY_EXCHANGE_ALGORITHM KeyExchangeAlgorithm; + int32_t KeyExchangeStrength; + QUIC_CIPHER_SUITE CipherSuite; +} QUIC_HANDSHAKE_INFO; + +// +// All statistics available to query about a connection. +// +typedef struct QUIC_STATISTICS { + uint64_t CorrelationId; + uint32_t VersionNegotiation : 1; + uint32_t StatelessRetry : 1; + uint32_t ResumptionAttempted : 1; + uint32_t ResumptionSucceeded : 1; + uint32_t Rtt; // In microseconds + uint32_t MinRtt; // In microseconds + uint32_t MaxRtt; // In microseconds + struct { + uint64_t Start; + uint64_t InitialFlightEnd; // Processed all peer's Initial packets + uint64_t HandshakeFlightEnd; // Processed all peer's Handshake packets + } Timing; + struct { + uint32_t ClientFlight1Bytes; // Sum of TLS payloads + uint32_t ServerFlight1Bytes; // Sum of TLS payloads + uint32_t ClientFlight2Bytes; // Sum of TLS payloads + } Handshake; + struct { + uint16_t PathMtu; // Current path MTU. + uint64_t TotalPackets; // QUIC packets; could be coalesced into fewer UDP datagrams. + uint64_t RetransmittablePackets; + uint64_t SuspectedLostPackets; + uint64_t SpuriousLostPackets; // Actual lost is (SuspectedLostPackets - SpuriousLostPackets) + uint64_t TotalBytes; // Sum of UDP payloads + uint64_t TotalStreamBytes; // Sum of stream payloads + uint32_t CongestionCount; // Number of congestion events + uint32_t PersistentCongestionCount; // Number of persistent congestion events + } Send; + struct { + uint64_t TotalPackets; // QUIC packets; could be coalesced into fewer UDP datagrams. + uint64_t ReorderedPackets; // Packets where packet number is less than highest seen. + uint64_t DroppedPackets; // Includes DuplicatePackets. + uint64_t DuplicatePackets; + uint64_t TotalBytes; // Sum of UDP payloads + uint64_t TotalStreamBytes; // Sum of stream payloads + uint64_t DecryptionFailures; // Count of packet decryption failures. + uint64_t ValidAckFrames; // Count of receive ACK frames. + } Recv; + struct { + uint32_t KeyUpdateCount; + } Misc; +} QUIC_STATISTICS; + +// +// N.B. Consumers of this struct depend on it being the same for 32-bit and +// 64-bit systems. DO NOT include any fields that have different sizes on those +// platforms, such as size_t or pointers. +// +typedef struct QUIC_STATISTICS_V2 { + + uint64_t CorrelationId; + uint32_t VersionNegotiation : 1; + uint32_t StatelessRetry : 1; + uint32_t ResumptionAttempted : 1; + uint32_t ResumptionSucceeded : 1; + uint32_t GreaseBitNegotiated : 1; // Set if we negotiated the GREASE bit. + uint32_t EcnCapable : 1; + uint32_t EncryptionOffloaded : 1; // At least one path successfully offloaded encryption + uint32_t RESERVED : 25; + uint32_t Rtt; // In microseconds + uint32_t MinRtt; // In microseconds + uint32_t MaxRtt; // In microseconds + + uint64_t TimingStart; + uint64_t TimingInitialFlightEnd; // Processed all peer's Initial packets + uint64_t TimingHandshakeFlightEnd; // Processed all peer's Handshake packets + + uint32_t HandshakeClientFlight1Bytes; // Sum of TLS payloads + uint32_t HandshakeServerFlight1Bytes; // Sum of TLS payloads + uint32_t HandshakeClientFlight2Bytes; // Sum of TLS payloads + + uint16_t SendPathMtu; // Current path MTU. + uint64_t SendTotalPackets; // QUIC packets; could be coalesced into fewer UDP datagrams. + uint64_t SendRetransmittablePackets; + uint64_t SendSuspectedLostPackets; + uint64_t SendSpuriousLostPackets; // Actual lost is (SuspectedLostPackets - SpuriousLostPackets) + uint64_t SendTotalBytes; // Sum of UDP payloads + uint64_t SendTotalStreamBytes; // Sum of stream payloads + uint32_t SendCongestionCount; // Number of congestion events + uint32_t SendPersistentCongestionCount; // Number of persistent congestion events + + uint64_t RecvTotalPackets; // QUIC packets; could be coalesced into fewer UDP datagrams. + uint64_t RecvReorderedPackets; // Packets where packet number is less than highest seen. + uint64_t RecvDroppedPackets; // Includes DuplicatePackets. + uint64_t RecvDuplicatePackets; + uint64_t RecvTotalBytes; // Sum of UDP payloads + uint64_t RecvTotalStreamBytes; // Sum of stream payloads + uint64_t RecvDecryptionFailures; // Count of packet decryption failures. + uint64_t RecvValidAckFrames; // Count of receive ACK frames. + + uint32_t KeyUpdateCount; + + uint32_t SendCongestionWindow; // Congestion window size + + uint32_t DestCidUpdateCount; // Number of times the destionation CID changed. + + uint32_t SendEcnCongestionCount; // Number of congestion events caused by ECN. + + // N.B. New fields must be appended to end + +} QUIC_STATISTICS_V2; + +#define QUIC_STRUCT_SIZE_THRU_FIELD(Struct, Field) \ + (FIELD_OFFSET(Struct, Field) + sizeof(((Struct*)0)->Field)) + +#define QUIC_STATISTICS_V2_SIZE_1 QUIC_STRUCT_SIZE_THRU_FIELD(QUIC_STATISTICS_V2, KeyUpdateCount) // v2.0 final size +#define QUIC_STATISTICS_V2_SIZE_2 QUIC_STRUCT_SIZE_THRU_FIELD(QUIC_STATISTICS_V2, DestCidUpdateCount) // v2.1 final size +#define QUIC_STATISTICS_V2_SIZE_3 QUIC_STRUCT_SIZE_THRU_FIELD(QUIC_STATISTICS_V2, SendEcnCongestionCount) // v2.2 final size + +typedef struct QUIC_LISTENER_STATISTICS { + + uint64_t TotalAcceptedConnections; + uint64_t TotalRejectedConnections; + + uint64_t BindingRecvDroppedPackets; + +} QUIC_LISTENER_STATISTICS; + +typedef enum QUIC_PERFORMANCE_COUNTERS { + QUIC_PERF_COUNTER_CONN_CREATED, // Total connections ever allocated. + QUIC_PERF_COUNTER_CONN_HANDSHAKE_FAIL, // Total connections that failed during handshake. + QUIC_PERF_COUNTER_CONN_APP_REJECT, // Total connections rejected by the application. + QUIC_PERF_COUNTER_CONN_RESUMED, // Total connections resumed. + QUIC_PERF_COUNTER_CONN_ACTIVE, // Connections currently allocated. + QUIC_PERF_COUNTER_CONN_CONNECTED, // Connections currently in the connected state. + QUIC_PERF_COUNTER_CONN_PROTOCOL_ERRORS, // Total connections shutdown with a protocol error. + QUIC_PERF_COUNTER_CONN_NO_ALPN, // Total connection attempts with no matching ALPN. + QUIC_PERF_COUNTER_STRM_ACTIVE, // Current streams allocated. + QUIC_PERF_COUNTER_PKTS_SUSPECTED_LOST, // Total suspected packets lost + QUIC_PERF_COUNTER_PKTS_DROPPED, // Total packets dropped for any reason. + QUIC_PERF_COUNTER_PKTS_DECRYPTION_FAIL, // Total packets with decryption failures. + QUIC_PERF_COUNTER_UDP_RECV, // Total UDP datagrams received. + QUIC_PERF_COUNTER_UDP_SEND, // Total UDP datagrams sent. + QUIC_PERF_COUNTER_UDP_RECV_BYTES, // Total UDP payload bytes received. + QUIC_PERF_COUNTER_UDP_SEND_BYTES, // Total UDP payload bytes sent. + QUIC_PERF_COUNTER_UDP_RECV_EVENTS, // Total UDP receive events. + QUIC_PERF_COUNTER_UDP_SEND_CALLS, // Total UDP send API calls. + QUIC_PERF_COUNTER_APP_SEND_BYTES, // Total bytes sent by applications. + QUIC_PERF_COUNTER_APP_RECV_BYTES, // Total bytes received by applications. + QUIC_PERF_COUNTER_CONN_QUEUE_DEPTH, // Current connections queued for processing. + QUIC_PERF_COUNTER_CONN_OPER_QUEUE_DEPTH,// Current connection operations queued. + QUIC_PERF_COUNTER_CONN_OPER_QUEUED, // Total connection operations queued ever. + QUIC_PERF_COUNTER_CONN_OPER_COMPLETED, // Total connection operations processed ever. + QUIC_PERF_COUNTER_WORK_OPER_QUEUE_DEPTH,// Current worker operations queued. + QUIC_PERF_COUNTER_WORK_OPER_QUEUED, // Total worker operations queued ever. + QUIC_PERF_COUNTER_WORK_OPER_COMPLETED, // Total worker operations processed ever. + QUIC_PERF_COUNTER_PATH_VALIDATED, // Total path challenges that succeed ever. + QUIC_PERF_COUNTER_PATH_FAILURE, // Total path challenges that fail ever. + QUIC_PERF_COUNTER_SEND_STATELESS_RESET, // Total stateless reset packets sent ever. + QUIC_PERF_COUNTER_SEND_STATELESS_RETRY, // Total stateless retry packets sent ever. + QUIC_PERF_COUNTER_CONN_LOAD_REJECT, // Total connections rejected due to worker load. + QUIC_PERF_COUNTER_MAX, +} QUIC_PERFORMANCE_COUNTERS; + +#ifdef QUIC_API_ENABLE_PREVIEW_FEATURES +typedef struct QUIC_VERSION_SETTINGS { + + const uint32_t* AcceptableVersions; + const uint32_t* OfferedVersions; + const uint32_t* FullyDeployedVersions; + uint32_t AcceptableVersionsLength; + uint32_t OfferedVersionsLength; + uint32_t FullyDeployedVersionsLength; + +} QUIC_VERSION_SETTINGS; +#endif + +typedef struct QUIC_GLOBAL_SETTINGS { + union { + uint64_t IsSetFlags; + struct { + uint64_t RetryMemoryLimit : 1; + uint64_t LoadBalancingMode : 1; + uint64_t FixedServerID : 1; + uint64_t RESERVED : 61; + } IsSet; + }; + uint16_t RetryMemoryLimit; + uint16_t LoadBalancingMode; + uint32_t FixedServerID; +} QUIC_GLOBAL_SETTINGS; + +typedef struct QUIC_SETTINGS { + + union { + uint64_t IsSetFlags; + struct { + uint64_t MaxBytesPerKey : 1; + uint64_t HandshakeIdleTimeoutMs : 1; + uint64_t IdleTimeoutMs : 1; + uint64_t MtuDiscoverySearchCompleteTimeoutUs : 1; + uint64_t TlsClientMaxSendBuffer : 1; + uint64_t TlsServerMaxSendBuffer : 1; + uint64_t StreamRecvWindowDefault : 1; + uint64_t StreamRecvBufferDefault : 1; + uint64_t ConnFlowControlWindow : 1; + uint64_t MaxWorkerQueueDelayUs : 1; + uint64_t MaxStatelessOperations : 1; + uint64_t InitialWindowPackets : 1; + uint64_t SendIdleTimeoutMs : 1; + uint64_t InitialRttMs : 1; + uint64_t MaxAckDelayMs : 1; + uint64_t DisconnectTimeoutMs : 1; + uint64_t KeepAliveIntervalMs : 1; + uint64_t CongestionControlAlgorithm : 1; + uint64_t PeerBidiStreamCount : 1; + uint64_t PeerUnidiStreamCount : 1; + uint64_t MaxBindingStatelessOperations : 1; + uint64_t StatelessOperationExpirationMs : 1; + uint64_t MinimumMtu : 1; + uint64_t MaximumMtu : 1; + uint64_t SendBufferingEnabled : 1; + uint64_t PacingEnabled : 1; + uint64_t MigrationEnabled : 1; + uint64_t DatagramReceiveEnabled : 1; + uint64_t ServerResumptionLevel : 1; + uint64_t MaxOperationsPerDrain : 1; + uint64_t MtuDiscoveryMissingProbeCount : 1; + uint64_t DestCidUpdateIdleTimeoutMs : 1; + uint64_t GreaseQuicBitEnabled : 1; + uint64_t EcnEnabled : 1; + uint64_t HyStartEnabled : 1; + uint64_t StreamRecvWindowBidiLocalDefault : 1; + uint64_t StreamRecvWindowBidiRemoteDefault : 1; + uint64_t StreamRecvWindowUnidiDefault : 1; +#ifdef QUIC_API_ENABLE_PREVIEW_FEATURES + uint64_t EncryptionOffloadAllowed : 1; + uint64_t ReliableResetEnabled : 1; + uint64_t OneWayDelayEnabled : 1; + uint64_t NetStatsEventEnabled : 1; + uint64_t StreamMultiReceiveEnabled : 1; + uint64_t RESERVED : 21; +#else + uint64_t RESERVED : 26; +#endif + } IsSet; + }; + + uint64_t MaxBytesPerKey; + uint64_t HandshakeIdleTimeoutMs; + uint64_t IdleTimeoutMs; + uint64_t MtuDiscoverySearchCompleteTimeoutUs; + uint32_t TlsClientMaxSendBuffer; + uint32_t TlsServerMaxSendBuffer; + uint32_t StreamRecvWindowDefault; + uint32_t StreamRecvBufferDefault; + uint32_t ConnFlowControlWindow; + uint32_t MaxWorkerQueueDelayUs; + uint32_t MaxStatelessOperations; + uint32_t InitialWindowPackets; + uint32_t SendIdleTimeoutMs; + uint32_t InitialRttMs; + uint32_t MaxAckDelayMs; + uint32_t DisconnectTimeoutMs; + uint32_t KeepAliveIntervalMs; + uint16_t CongestionControlAlgorithm; // QUIC_CONGESTION_CONTROL_ALGORITHM + uint16_t PeerBidiStreamCount; + uint16_t PeerUnidiStreamCount; + uint16_t MaxBindingStatelessOperations; + uint16_t StatelessOperationExpirationMs; + uint16_t MinimumMtu; + uint16_t MaximumMtu; + uint8_t SendBufferingEnabled : 1; + uint8_t PacingEnabled : 1; + uint8_t MigrationEnabled : 1; + uint8_t DatagramReceiveEnabled : 1; + uint8_t ServerResumptionLevel : 2; // QUIC_SERVER_RESUMPTION_LEVEL + uint8_t GreaseQuicBitEnabled : 1; + uint8_t EcnEnabled : 1; + uint8_t MaxOperationsPerDrain; + uint8_t MtuDiscoveryMissingProbeCount; + uint32_t DestCidUpdateIdleTimeoutMs; + union { + uint64_t Flags; + struct { + uint64_t HyStartEnabled : 1; +#ifdef QUIC_API_ENABLE_PREVIEW_FEATURES + uint64_t EncryptionOffloadAllowed : 1; + uint64_t ReliableResetEnabled : 1; + uint64_t OneWayDelayEnabled : 1; + uint64_t NetStatsEventEnabled : 1; + uint64_t StreamMultiReceiveEnabled : 1; + uint64_t ReservedFlags : 58; +#else + uint64_t ReservedFlags : 63; +#endif + }; + }; + uint32_t StreamRecvWindowBidiLocalDefault; + uint32_t StreamRecvWindowBidiRemoteDefault; + uint32_t StreamRecvWindowUnidiDefault; + +} QUIC_SETTINGS; + +// +// This struct enables QUIC applications to support SSLKEYLOGFILE +// for debugging packet captures with e.g. Wireshark. +// + +#define QUIC_TLS_SECRETS_MAX_SECRET_LEN 64 +typedef struct QUIC_TLS_SECRETS { + uint8_t SecretLength; + struct { + uint8_t ClientRandom : 1; + uint8_t ClientEarlyTrafficSecret : 1; + uint8_t ClientHandshakeTrafficSecret : 1; + uint8_t ServerHandshakeTrafficSecret : 1; + uint8_t ClientTrafficSecret0 : 1; + uint8_t ServerTrafficSecret0 : 1; + } IsSet; + uint8_t ClientRandom[32]; + uint8_t ClientEarlyTrafficSecret[QUIC_TLS_SECRETS_MAX_SECRET_LEN]; + uint8_t ClientHandshakeTrafficSecret[QUIC_TLS_SECRETS_MAX_SECRET_LEN]; + uint8_t ServerHandshakeTrafficSecret[QUIC_TLS_SECRETS_MAX_SECRET_LEN]; + uint8_t ClientTrafficSecret0[QUIC_TLS_SECRETS_MAX_SECRET_LEN]; + uint8_t ServerTrafficSecret0[QUIC_TLS_SECRETS_MAX_SECRET_LEN]; +} QUIC_TLS_SECRETS; + +typedef struct QUIC_STREAM_STATISTICS { + uint64_t ConnBlockedBySchedulingUs; + uint64_t ConnBlockedByPacingUs; + uint64_t ConnBlockedByAmplificationProtUs; + uint64_t ConnBlockedByCongestionControlUs; + uint64_t ConnBlockedByFlowControlUs; + uint64_t StreamBlockedByIdFlowControlUs; + uint64_t StreamBlockedByFlowControlUs; + uint64_t StreamBlockedByAppUs; +} QUIC_STREAM_STATISTICS; + +// +// Functions for associating application contexts with QUIC handles. MsQuic +// provides no explicit synchronization between parallel calls to these +// functions. +// + +typedef +_IRQL_requires_max_(DISPATCH_LEVEL) +void +(QUIC_API * QUIC_SET_CONTEXT_FN)( + _In_ _Pre_defensive_ HQUIC Handle, + _In_opt_ void* Context + ); + +typedef +_IRQL_requires_max_(DISPATCH_LEVEL) +void* +(QUIC_API * QUIC_GET_CONTEXT_FN)( + _In_ _Pre_defensive_ HQUIC Handle + ); + +// +// Sets the event handler for the QUIC handle. The type of the handler must be +// appropriate for the type of the handle. MsQuic provides no explicit +// synchronization between parallel calls to this function or the ones above. +// +typedef +_IRQL_requires_max_(DISPATCH_LEVEL) +void +(QUIC_API * QUIC_SET_CALLBACK_HANDLER_FN)( + _In_ _Pre_defensive_ HQUIC Handle, + _In_ void* Handler, + _In_opt_ void* Context + ); + +// +// Get and Set parameters on a handle. +// + +#define QUIC_PARAM_PREFIX_GLOBAL 0x01000000 +#define QUIC_PARAM_PREFIX_REGISTRATION 0x02000000 +#define QUIC_PARAM_PREFIX_CONFIGURATION 0x03000000 +#define QUIC_PARAM_PREFIX_LISTENER 0x04000000 +#define QUIC_PARAM_PREFIX_CONNECTION 0x05000000 +#define QUIC_PARAM_PREFIX_TLS 0x06000000 +#define QUIC_PARAM_PREFIX_TLS_SCHANNEL 0x07000000 +#define QUIC_PARAM_PREFIX_STREAM 0x08000000 + +#define QUIC_PARAM_HIGH_PRIORITY 0x40000000 // Combine with any param to make it high priority. + +#define QUIC_PARAM_IS_GLOBAL(Param) ((Param & 0x3F000000) == QUIC_PARAM_PREFIX_GLOBAL) + +// +// Parameters for Global. +// +#define QUIC_PARAM_GLOBAL_RETRY_MEMORY_PERCENT 0x01000000 // uint16_t +#define QUIC_PARAM_GLOBAL_SUPPORTED_VERSIONS 0x01000001 // uint32_t[] - network byte order +#define QUIC_PARAM_GLOBAL_LOAD_BALACING_MODE 0x01000002 // uint16_t - QUIC_LOAD_BALANCING_MODE +#define QUIC_PARAM_GLOBAL_PERF_COUNTERS 0x01000003 // uint64_t[] - Array size is QUIC_PERF_COUNTER_MAX +#define QUIC_PARAM_GLOBAL_LIBRARY_VERSION 0x01000004 // uint32_t[4] +#define QUIC_PARAM_GLOBAL_SETTINGS 0x01000005 // QUIC_SETTINGS +#define QUIC_PARAM_GLOBAL_GLOBAL_SETTINGS 0x01000006 // QUIC_GLOBAL_SETTINGS +#ifdef QUIC_API_ENABLE_PREVIEW_FEATURES +#define QUIC_PARAM_GLOBAL_VERSION_SETTINGS 0x01000007 // QUIC_VERSION_SETTINGS +#endif +#define QUIC_PARAM_GLOBAL_LIBRARY_GIT_HASH 0x01000008 // char[64] +#ifdef QUIC_API_ENABLE_PREVIEW_FEATURES +#define QUIC_PARAM_GLOBAL_EXECUTION_CONFIG 0x01000009 // QUIC_EXECUTION_CONFIG +#endif +#define QUIC_PARAM_GLOBAL_TLS_PROVIDER 0x0100000A // QUIC_TLS_PROVIDER +#define QUIC_PARAM_GLOBAL_STATELESS_RESET_KEY 0x0100000B // uint8_t[] - Array size is QUIC_STATELESS_RESET_KEY_LENGTH +// +// Parameters for Registration. +// + +// +// Parameters for Configuration. +// +#define QUIC_PARAM_CONFIGURATION_SETTINGS 0x03000000 // QUIC_SETTINGS +#define QUIC_PARAM_CONFIGURATION_TICKET_KEYS 0x03000001 // QUIC_TICKET_KEY_CONFIG[] +#ifdef QUIC_API_ENABLE_PREVIEW_FEATURES +#define QUIC_PARAM_CONFIGURATION_VERSION_SETTINGS 0x03000002 // QUIC_VERSION_SETTINGS +#endif +// Schannel-specific Configuration parameter +typedef struct QUIC_SCHANNEL_CREDENTIAL_ATTRIBUTE_W { + unsigned long Attribute; + unsigned long BufferLength; + void* Buffer; +} QUIC_SCHANNEL_CREDENTIAL_ATTRIBUTE_W; +#define QUIC_PARAM_CONFIGURATION_SCHANNEL_CREDENTIAL_ATTRIBUTE_W 0x03000003 // QUIC_SCHANNEL_CREDENTIAL_ATTRIBUTE_W + +// +// Parameters for Listener. +// +#define QUIC_PARAM_LISTENER_LOCAL_ADDRESS 0x04000000 // QUIC_ADDR +#define QUIC_PARAM_LISTENER_STATS 0x04000001 // QUIC_LISTENER_STATISTICS +#ifdef QUIC_API_ENABLE_PREVIEW_FEATURES +#define QUIC_PARAM_LISTENER_CIBIR_ID 0x04000002 // uint8_t[] {offset, id[]} +#endif + +// +// Parameters for Connection. +// +#define QUIC_PARAM_CONN_QUIC_VERSION 0x05000000 // uint32_t +#define QUIC_PARAM_CONN_LOCAL_ADDRESS 0x05000001 // QUIC_ADDR +#define QUIC_PARAM_CONN_REMOTE_ADDRESS 0x05000002 // QUIC_ADDR +#define QUIC_PARAM_CONN_IDEAL_PROCESSOR 0x05000003 // uint16_t +#define QUIC_PARAM_CONN_SETTINGS 0x05000004 // QUIC_SETTINGS +#define QUIC_PARAM_CONN_STATISTICS 0x05000005 // QUIC_STATISTICS +#define QUIC_PARAM_CONN_STATISTICS_PLAT 0x05000006 // QUIC_STATISTICS +#define QUIC_PARAM_CONN_SHARE_UDP_BINDING 0x05000007 // uint8_t (BOOLEAN) +#define QUIC_PARAM_CONN_LOCAL_BIDI_STREAM_COUNT 0x05000008 // uint16_t +#define QUIC_PARAM_CONN_LOCAL_UNIDI_STREAM_COUNT 0x05000009 // uint16_t +#define QUIC_PARAM_CONN_MAX_STREAM_IDS 0x0500000A // uint64_t[4] +#define QUIC_PARAM_CONN_CLOSE_REASON_PHRASE 0x0500000B // char[] +#define QUIC_PARAM_CONN_STREAM_SCHEDULING_SCHEME 0x0500000C // QUIC_STREAM_SCHEDULING_SCHEME +#define QUIC_PARAM_CONN_DATAGRAM_RECEIVE_ENABLED 0x0500000D // uint8_t (BOOLEAN) +#define QUIC_PARAM_CONN_DATAGRAM_SEND_ENABLED 0x0500000E // uint8_t (BOOLEAN) +#ifdef QUIC_API_ENABLE_INSECURE_FEATURES +#define QUIC_PARAM_CONN_DISABLE_1RTT_ENCRYPTION 0x0500000F // uint8_t (BOOLEAN) +#endif +#define QUIC_PARAM_CONN_RESUMPTION_TICKET 0x05000010 // uint8_t[] +#define QUIC_PARAM_CONN_PEER_CERTIFICATE_VALID 0x05000011 // uint8_t (BOOLEAN) +#define QUIC_PARAM_CONN_LOCAL_INTERFACE 0x05000012 // uint32_t +#define QUIC_PARAM_CONN_TLS_SECRETS 0x05000013 // QUIC_TLS_SECRETS (SSLKEYLOGFILE compatible) +#ifdef QUIC_API_ENABLE_PREVIEW_FEATURES +#define QUIC_PARAM_CONN_VERSION_SETTINGS 0x05000014 // QUIC_VERSION_SETTINGS +#define QUIC_PARAM_CONN_CIBIR_ID 0x05000015 // uint8_t[] {offset, id[]} +#endif +#define QUIC_PARAM_CONN_STATISTICS_V2 0x05000016 // QUIC_STATISTICS_V2 +#define QUIC_PARAM_CONN_STATISTICS_V2_PLAT 0x05000017 // QUIC_STATISTICS_V2 +#define QUIC_PARAM_CONN_ORIG_DEST_CID 0x05000018 // uint8_t[] + +// +// Parameters for TLS. +// +#define QUIC_PARAM_TLS_HANDSHAKE_INFO 0x06000000 // QUIC_HANDSHAKE_INFO +#define QUIC_PARAM_TLS_NEGOTIATED_ALPN 0x06000001 // uint8_t[] (max 255 bytes) + +#ifdef WIN32 // Schannel specific TLS parameters +typedef struct QUIC_SCHANNEL_CONTEXT_ATTRIBUTE_W { + unsigned long Attribute; + void* Buffer; +} QUIC_SCHANNEL_CONTEXT_ATTRIBUTE_W; +#define QUIC_PARAM_TLS_SCHANNEL_CONTEXT_ATTRIBUTE_W 0x07000000 // QUIC_SCHANNEL_CONTEXT_ATTRIBUTE_W +typedef struct QUIC_SCHANNEL_CONTEXT_ATTRIBUTE_EX_W { + unsigned long Attribute; + unsigned long BufferLength; + void* Buffer; +} QUIC_SCHANNEL_CONTEXT_ATTRIBUTE_EX_W; +#define QUIC_PARAM_TLS_SCHANNEL_CONTEXT_ATTRIBUTE_EX_W 0x07000001 // QUIC_SCHANNEL_CONTEXT_ATTRIBUTE_EX_W +#define QUIC_PARAM_TLS_SCHANNEL_SECURITY_CONTEXT_TOKEN 0x07000002 // HANDLE +#endif + +// +// Parameters for Stream. +// +#define QUIC_PARAM_STREAM_ID 0x08000000 // QUIC_UINT62 +#define QUIC_PARAM_STREAM_0RTT_LENGTH 0x08000001 // uint64_t +#define QUIC_PARAM_STREAM_IDEAL_SEND_BUFFER_SIZE 0x08000002 // uint64_t - bytes +#define QUIC_PARAM_STREAM_PRIORITY 0x08000003 // uint16_t - 0 (low) to 0xFFFF (high) - 0x7FFF (default) +#define QUIC_PARAM_STREAM_STATISTICS 0X08000004 // QUIC_STREAM_STATISTICS +#ifdef QUIC_API_ENABLE_PREVIEW_FEATURES +#define QUIC_PARAM_STREAM_RELIABLE_OFFSET 0x08000005 // uint64_t +#endif + +typedef +_IRQL_requires_max_(PASSIVE_LEVEL) +QUIC_STATUS +(QUIC_API * QUIC_SET_PARAM_FN)( + _When_(QUIC_PARAM_IS_GLOBAL(Param), _Reserved_) + _When_(!QUIC_PARAM_IS_GLOBAL(Param), _In_ _Pre_defensive_) + HQUIC Handle, + _In_ uint32_t Param, + _In_ uint32_t BufferLength, + _In_reads_bytes_(BufferLength) + const void* Buffer + ); + +typedef +_IRQL_requires_max_(PASSIVE_LEVEL) +QUIC_STATUS +(QUIC_API * QUIC_GET_PARAM_FN)( + _When_(QUIC_PARAM_IS_GLOBAL(Param), _Reserved_) + _When_(!QUIC_PARAM_IS_GLOBAL(Param), _In_ _Pre_defensive_) + HQUIC Handle, + _In_ uint32_t Param, + _Inout_ _Pre_defensive_ uint32_t* BufferLength, + _Out_writes_bytes_opt_(*BufferLength) + void* Buffer + ); + +// +// Registration Context Interface. +// + +// +// Opens a new registration. +// +typedef +_IRQL_requires_max_(PASSIVE_LEVEL) +QUIC_STATUS +(QUIC_API * QUIC_REGISTRATION_OPEN_FN)( + _In_opt_ const QUIC_REGISTRATION_CONFIG* Config, + _Outptr_ _At_(*Registration, __drv_allocatesMem(Mem)) _Pre_defensive_ + HQUIC* Registration + ); + +// +// Closes the registration. This function synchronizes the cleanup of all +// child objects. It does this by blocking until all those child objects have +// been closed by the application. +// N.B. This function will deadlock if called in any MsQuic callbacks. +// +typedef +_IRQL_requires_max_(PASSIVE_LEVEL) +void +(QUIC_API * QUIC_REGISTRATION_CLOSE_FN)( + _In_ _Pre_defensive_ __drv_freesMem(Mem) + HQUIC Registration + ); + +// +// Calls shutdown for all connections in this registration. Don't call on a +// MsQuic callback thread or it might deadlock. +// +typedef +_IRQL_requires_max_(DISPATCH_LEVEL) +void +(QUIC_API * QUIC_REGISTRATION_SHUTDOWN_FN)( + _In_ _Pre_defensive_ HQUIC Registration, + _In_ QUIC_CONNECTION_SHUTDOWN_FLAGS Flags, + _In_ _Pre_defensive_ QUIC_UINT62 ErrorCode // Application defined error code + ); + +// +// Configuration Interface. +// + +// +// Opens a new configuration. +// +typedef +_IRQL_requires_max_(PASSIVE_LEVEL) +QUIC_STATUS +(QUIC_API * QUIC_CONFIGURATION_OPEN_FN)( + _In_ _Pre_defensive_ HQUIC Registration, + _In_reads_(AlpnBufferCount) _Pre_defensive_ + const QUIC_BUFFER* const AlpnBuffers, + _In_range_(>, 0) uint32_t AlpnBufferCount, + _In_reads_bytes_opt_(SettingsSize) + const QUIC_SETTINGS* Settings, + _In_ uint32_t SettingsSize, + _In_opt_ void* Context, + _Outptr_ _At_(*Configuration, __drv_allocatesMem(Mem)) _Pre_defensive_ + HQUIC* Configuration + ); + +// +// Closes an existing configuration. +// +typedef +_IRQL_requires_max_(PASSIVE_LEVEL) +void +(QUIC_API * QUIC_CONFIGURATION_CLOSE_FN)( + _In_ _Pre_defensive_ __drv_freesMem(Mem) + HQUIC Configuration + ); + +// +// Loads the credentials based on the input configuration. +// +typedef +_IRQL_requires_max_(PASSIVE_LEVEL) +QUIC_STATUS +(QUIC_API * QUIC_CONFIGURATION_LOAD_CREDENTIAL_FN)( + _In_ _Pre_defensive_ HQUIC Configuration, + _In_ _Pre_defensive_ const QUIC_CREDENTIAL_CONFIG* CredConfig + ); + +// +// Listener Context Interface. +// + +typedef enum QUIC_LISTENER_EVENT_TYPE { + QUIC_LISTENER_EVENT_NEW_CONNECTION = 0, + QUIC_LISTENER_EVENT_STOP_COMPLETE = 1, +} QUIC_LISTENER_EVENT_TYPE; + +typedef struct QUIC_LISTENER_EVENT { + QUIC_LISTENER_EVENT_TYPE Type; + union { + struct { + const QUIC_NEW_CONNECTION_INFO* Info; + HQUIC Connection; + } NEW_CONNECTION; + struct { + BOOLEAN AppCloseInProgress : 1; + BOOLEAN RESERVED : 7; + } STOP_COMPLETE; + }; +} QUIC_LISTENER_EVENT; + +typedef +_IRQL_requires_max_(PASSIVE_LEVEL) +_Function_class_(QUIC_LISTENER_CALLBACK) +QUIC_STATUS +(QUIC_API QUIC_LISTENER_CALLBACK)( + _In_ HQUIC Listener, + _In_opt_ void* Context, + _Inout_ QUIC_LISTENER_EVENT* Event + ); + +typedef QUIC_LISTENER_CALLBACK *QUIC_LISTENER_CALLBACK_HANDLER; + +// +// Opens a new listener. +// +typedef +_IRQL_requires_max_(PASSIVE_LEVEL) +QUIC_STATUS +(QUIC_API * QUIC_LISTENER_OPEN_FN)( + _In_ _Pre_defensive_ HQUIC Registration, + _In_ _Pre_defensive_ QUIC_LISTENER_CALLBACK_HANDLER Handler, + _In_opt_ void* Context, + _Outptr_ _At_(*Listener, __drv_allocatesMem(Mem)) _Pre_defensive_ + HQUIC* Listener + ); + +// +// Closes an existing listener. +// +typedef +_IRQL_requires_max_(PASSIVE_LEVEL) +void +(QUIC_API * QUIC_LISTENER_CLOSE_FN)( + _In_ _Pre_defensive_ __drv_freesMem(Mem) + HQUIC Listener + ); + +// +// Starts the listener processing incoming connections. +// +typedef +_IRQL_requires_max_(PASSIVE_LEVEL) +QUIC_STATUS +(QUIC_API * QUIC_LISTENER_START_FN)( + _In_ _Pre_defensive_ HQUIC Listener, + _In_reads_(AlpnBufferCount) _Pre_defensive_ + const QUIC_BUFFER* const AlpnBuffers, + _In_range_(>, 0) uint32_t AlpnBufferCount, + _In_opt_ const QUIC_ADDR* LocalAddress + ); + +// +// Asynchronously stops the listener from processing incoming connections. +// +typedef +_IRQL_requires_max_(PASSIVE_LEVEL) +void +(QUIC_API * QUIC_LISTENER_STOP_FN)( + _In_ _Pre_defensive_ HQUIC Listener + ); + +// +// Connections +// + +typedef enum QUIC_CONNECTION_EVENT_TYPE { + QUIC_CONNECTION_EVENT_CONNECTED = 0, + QUIC_CONNECTION_EVENT_SHUTDOWN_INITIATED_BY_TRANSPORT = 1, // The transport started the shutdown process. + QUIC_CONNECTION_EVENT_SHUTDOWN_INITIATED_BY_PEER = 2, // The peer application started the shutdown process. + QUIC_CONNECTION_EVENT_SHUTDOWN_COMPLETE = 3, // Ready for the handle to be closed. + QUIC_CONNECTION_EVENT_LOCAL_ADDRESS_CHANGED = 4, + QUIC_CONNECTION_EVENT_PEER_ADDRESS_CHANGED = 5, + QUIC_CONNECTION_EVENT_PEER_STREAM_STARTED = 6, + QUIC_CONNECTION_EVENT_STREAMS_AVAILABLE = 7, + QUIC_CONNECTION_EVENT_PEER_NEEDS_STREAMS = 8, + QUIC_CONNECTION_EVENT_IDEAL_PROCESSOR_CHANGED = 9, + QUIC_CONNECTION_EVENT_DATAGRAM_STATE_CHANGED = 10, + QUIC_CONNECTION_EVENT_DATAGRAM_RECEIVED = 11, + QUIC_CONNECTION_EVENT_DATAGRAM_SEND_STATE_CHANGED = 12, + QUIC_CONNECTION_EVENT_RESUMED = 13, // Server-only; provides resumption data, if any. + QUIC_CONNECTION_EVENT_RESUMPTION_TICKET_RECEIVED = 14, // Client-only; provides ticket to persist, if any. + QUIC_CONNECTION_EVENT_PEER_CERTIFICATE_RECEIVED = 15, // Only with QUIC_CREDENTIAL_FLAG_INDICATE_CERTIFICATE_RECEIVED set +#ifdef QUIC_API_ENABLE_PREVIEW_FEATURES + QUIC_CONNECTION_EVENT_RELIABLE_RESET_NEGOTIATED = 16, // Only indicated if QUIC_SETTINGS.ReliableResetEnabled is TRUE. + QUIC_CONNECTION_EVENT_ONE_WAY_DELAY_NEGOTIATED = 17, // Only indicated if QUIC_SETTINGS.OneWayDelayEnabled is TRUE. + QUIC_CONNECTION_EVENT_NETWORK_STATISTICS = 18, // Only indicated if QUIC_SETTINGS.EnableNetStatsEvent is TRUE. +#endif +} QUIC_CONNECTION_EVENT_TYPE; + +typedef struct QUIC_CONNECTION_EVENT { + QUIC_CONNECTION_EVENT_TYPE Type; + union { + struct { + BOOLEAN SessionResumed; + _Field_range_(>, 0) + uint8_t NegotiatedAlpnLength; + _Field_size_(NegotiatedAlpnLength) + const uint8_t* NegotiatedAlpn; + } CONNECTED; + struct { + QUIC_STATUS Status; + QUIC_UINT62 ErrorCode; // Wire format error code. + } SHUTDOWN_INITIATED_BY_TRANSPORT; + struct { + QUIC_UINT62 ErrorCode; + } SHUTDOWN_INITIATED_BY_PEER; + struct { + BOOLEAN HandshakeCompleted : 1; + BOOLEAN PeerAcknowledgedShutdown : 1; + BOOLEAN AppCloseInProgress : 1; + } SHUTDOWN_COMPLETE; + struct { + const QUIC_ADDR* Address; + } LOCAL_ADDRESS_CHANGED; + struct { + const QUIC_ADDR* Address; + } PEER_ADDRESS_CHANGED; + struct { + HQUIC Stream; + QUIC_STREAM_OPEN_FLAGS Flags; + } PEER_STREAM_STARTED; + struct { + uint16_t BidirectionalCount; + uint16_t UnidirectionalCount; + } STREAMS_AVAILABLE; + struct { + BOOLEAN Bidirectional; + } PEER_NEEDS_STREAMS; + struct { + uint16_t IdealProcessor; + uint16_t PartitionIndex; + } IDEAL_PROCESSOR_CHANGED; + struct { + BOOLEAN SendEnabled; + uint16_t MaxSendLength; + } DATAGRAM_STATE_CHANGED; + struct { + const QUIC_BUFFER* Buffer; + QUIC_RECEIVE_FLAGS Flags; + } DATAGRAM_RECEIVED; + struct { + /* inout */ void* ClientContext; + QUIC_DATAGRAM_SEND_STATE State; + } DATAGRAM_SEND_STATE_CHANGED; + struct { + uint16_t ResumptionStateLength; + const uint8_t* ResumptionState; + } RESUMED; + struct { + _Field_range_(>, 0) + uint32_t ResumptionTicketLength; + _Field_size_(ResumptionTicketLength) + const uint8_t* ResumptionTicket; + } RESUMPTION_TICKET_RECEIVED; + struct { + QUIC_CERTIFICATE* Certificate; // Peer certificate (platform specific). Valid only during QUIC_CONNECTION_EVENT_PEER_CERTIFICATE_RECEIVED callback. + uint32_t DeferredErrorFlags; // Bit flag of errors (only valid with QUIC_CREDENTIAL_FLAG_DEFER_CERTIFICATE_VALIDATION) - Schannel only, zero otherwise. + QUIC_STATUS DeferredStatus; // Most severe error status (only valid with QUIC_CREDENTIAL_FLAG_DEFER_CERTIFICATE_VALIDATION) + QUIC_CERTIFICATE_CHAIN* Chain; // Peer certificate chain (platform specific). Valid only during QUIC_CONNECTION_EVENT_PEER_CERTIFICATE_RECEIVED callback. + } PEER_CERTIFICATE_RECEIVED; +#ifdef QUIC_API_ENABLE_PREVIEW_FEATURES + struct { + BOOLEAN IsNegotiated; + } RELIABLE_RESET_NEGOTIATED; + struct { + BOOLEAN SendNegotiated; // TRUE if sending one-way delay timestamps is negotiated. + BOOLEAN ReceiveNegotiated; // TRUE if receiving one-way delay timestamps is negotiated. + } ONE_WAY_DELAY_NEGOTIATED; + struct { + uint32_t BytesInFlight; // Bytes that were sent on the wire, but not yet acked + uint64_t PostedBytes; // Total bytes queued, but not yet acked. These may contain sent bytes that may have portentially lost too. + uint64_t IdealBytes; // Ideal number of bytes required to be available to avoid limiting throughput + uint64_t SmoothedRTT; // Smoothed RTT value + uint32_t CongestionWindow; // Congestion Window + uint64_t Bandwidth; // Estimated bandwidth + } NETWORK_STATISTICS; +#endif + }; +} QUIC_CONNECTION_EVENT; + +typedef +_IRQL_requires_max_(PASSIVE_LEVEL) +_Function_class_(QUIC_CONNECTION_CALLBACK) +QUIC_STATUS +(QUIC_API QUIC_CONNECTION_CALLBACK)( + _In_ HQUIC Connection, + _In_opt_ void* Context, + _Inout_ QUIC_CONNECTION_EVENT* Event + ); + +typedef QUIC_CONNECTION_CALLBACK *QUIC_CONNECTION_CALLBACK_HANDLER; + +// +// Opens a new connection. +// +typedef +_IRQL_requires_max_(DISPATCH_LEVEL) +QUIC_STATUS +(QUIC_API * QUIC_CONNECTION_OPEN_FN)( + _In_ _Pre_defensive_ HQUIC Registration, + _In_ _Pre_defensive_ QUIC_CONNECTION_CALLBACK_HANDLER Handler, + _In_opt_ void* Context, + _Outptr_ _At_(*Connection, __drv_allocatesMem(Mem)) _Pre_defensive_ + HQUIC* Connection + ); + +// +// Closes an existing connection. +// +typedef +_IRQL_requires_max_(PASSIVE_LEVEL) +void +(QUIC_API * QUIC_CONNECTION_CLOSE_FN)( + _In_ _Pre_defensive_ __drv_freesMem(Mem) + HQUIC Connection + ); + +// +// Starts the shutdown process on the connection. This immediately and silently +// shuts down any open streams; which will trigger callbacks for +// QUIC_CONNECTION_EVENT_STREAM_CLOSED events. Does nothing if already shutdown. +// Can be passed either a connection or stream handle. +// +typedef +_IRQL_requires_max_(DISPATCH_LEVEL) +void +(QUIC_API * QUIC_CONNECTION_SHUTDOWN_FN)( + _In_ _Pre_defensive_ HQUIC Connection, + _In_ QUIC_CONNECTION_SHUTDOWN_FLAGS Flags, + _In_ _Pre_defensive_ QUIC_UINT62 ErrorCode // Application defined error code + ); + +// +// Uses the QUIC (client) handle to start a connection attempt to the +// remote server. Can be passed either a connection or stream handle. +// +typedef +_IRQL_requires_max_(DISPATCH_LEVEL) +QUIC_STATUS +(QUIC_API * QUIC_CONNECTION_START_FN)( + _In_ _Pre_defensive_ HQUIC Connection, + _In_ _Pre_defensive_ HQUIC Configuration, + _In_ QUIC_ADDRESS_FAMILY Family, + _In_reads_or_z_opt_(QUIC_MAX_SNI_LENGTH) + const char* ServerName, + _In_ uint16_t ServerPort // Host byte order + ); + +// +// Sets the (server-side) configuration handle for the connection. This must be +// called on an accepted connection in order to proceed with the QUIC handshake. +// +typedef +_IRQL_requires_max_(DISPATCH_LEVEL) +QUIC_STATUS +(QUIC_API * QUIC_CONNECTION_SET_CONFIGURATION_FN)( + _In_ _Pre_defensive_ HQUIC Connection, + _In_ _Pre_defensive_ HQUIC Configuration + ); + +// +// Uses the QUIC (server) handle to send a resumption ticket to the remote +// client, optionally with app-specific data useful during resumption. +// +typedef +_IRQL_requires_max_(DISPATCH_LEVEL) +QUIC_STATUS +(QUIC_API * QUIC_CONNECTION_SEND_RESUMPTION_FN)( + _In_ _Pre_defensive_ HQUIC Connection, + _In_ QUIC_SEND_RESUMPTION_FLAGS Flags, + _In_ uint16_t DataLength, + _In_reads_bytes_opt_(DataLength) + const uint8_t* ResumptionData + ); + +// +// Uses the QUIC (server) handle to complete resumption ticket validation. +// This must be called after server app handles ticket validation and then +// return QUIC_STATUS_PENDING. +// +typedef +_IRQL_requires_max_(DISPATCH_LEVEL) +QUIC_STATUS +(QUIC_API * QUIC_CONNECTION_COMP_RESUMPTION_FN)( + _In_ _Pre_defensive_ HQUIC Connection, + _In_ BOOLEAN Result + ); + +// +// Uses the QUIC (client) handle to complete certificate validation. +// This must be called after client app handles certificate validation +// and then return QUIC_STATUS_PENDING. The TlsAlert value is ignored if Result +// equals TRUE (recommend just pass QUIC_TLS_ALERT_CODE_SUCCESS). +// +typedef +_IRQL_requires_max_(DISPATCH_LEVEL) +QUIC_STATUS +(QUIC_API * QUIC_CONNECTION_COMP_CERT_FN)( + _In_ _Pre_defensive_ HQUIC Connection, + _In_ BOOLEAN Result, + _In_ QUIC_TLS_ALERT_CODES TlsAlert + ); + +// +// Streams +// + +typedef enum QUIC_STREAM_EVENT_TYPE { + QUIC_STREAM_EVENT_START_COMPLETE = 0, + QUIC_STREAM_EVENT_RECEIVE = 1, + QUIC_STREAM_EVENT_SEND_COMPLETE = 2, + QUIC_STREAM_EVENT_PEER_SEND_SHUTDOWN = 3, + QUIC_STREAM_EVENT_PEER_SEND_ABORTED = 4, + QUIC_STREAM_EVENT_PEER_RECEIVE_ABORTED = 5, + QUIC_STREAM_EVENT_SEND_SHUTDOWN_COMPLETE = 6, + QUIC_STREAM_EVENT_SHUTDOWN_COMPLETE = 7, + QUIC_STREAM_EVENT_IDEAL_SEND_BUFFER_SIZE = 8, + QUIC_STREAM_EVENT_PEER_ACCEPTED = 9, + QUIC_STREAM_EVENT_CANCEL_ON_LOSS = 10, +} QUIC_STREAM_EVENT_TYPE; + +typedef struct QUIC_STREAM_EVENT { + QUIC_STREAM_EVENT_TYPE Type; + union { + struct { + QUIC_STATUS Status; + QUIC_UINT62 ID; + BOOLEAN PeerAccepted : 1; + BOOLEAN RESERVED : 7; + } START_COMPLETE; + struct { + /* in */ uint64_t AbsoluteOffset; + /* inout */ uint64_t TotalBufferLength; + _Field_size_(BufferCount) + /* in */ const QUIC_BUFFER* Buffers; + _Field_range_(0, UINT32_MAX) + /* in */ uint32_t BufferCount; + /* in */ QUIC_RECEIVE_FLAGS Flags; + } RECEIVE; + struct { + BOOLEAN Canceled; + void* ClientContext; + } SEND_COMPLETE; + struct { + QUIC_UINT62 ErrorCode; + } PEER_SEND_ABORTED; + struct { + QUIC_UINT62 ErrorCode; + } PEER_RECEIVE_ABORTED; + struct { + BOOLEAN Graceful; + } SEND_SHUTDOWN_COMPLETE; + struct { + BOOLEAN ConnectionShutdown; + BOOLEAN AppCloseInProgress : 1; + BOOLEAN ConnectionShutdownByApp : 1; + BOOLEAN ConnectionClosedRemotely : 1; + BOOLEAN RESERVED : 5; + QUIC_UINT62 ConnectionErrorCode; + QUIC_STATUS ConnectionCloseStatus; + } SHUTDOWN_COMPLETE; + struct { + uint64_t ByteCount; + } IDEAL_SEND_BUFFER_SIZE; + struct { + /* out */ QUIC_UINT62 ErrorCode; + } CANCEL_ON_LOSS; + }; +} QUIC_STREAM_EVENT; + +typedef +_IRQL_requires_max_(PASSIVE_LEVEL) +_Function_class_(QUIC_STREAM_CALLBACK) +QUIC_STATUS +(QUIC_API QUIC_STREAM_CALLBACK)( + _In_ HQUIC Stream, + _In_opt_ void* Context, + _Inout_ QUIC_STREAM_EVENT* Event + ); + +typedef QUIC_STREAM_CALLBACK *QUIC_STREAM_CALLBACK_HANDLER; + +// +// Opens a stream on the given connection. +// +typedef +_IRQL_requires_max_(DISPATCH_LEVEL) +QUIC_STATUS +(QUIC_API * QUIC_STREAM_OPEN_FN)( + _In_ _Pre_defensive_ HQUIC Connection, + _In_ QUIC_STREAM_OPEN_FLAGS Flags, + _In_ _Pre_defensive_ QUIC_STREAM_CALLBACK_HANDLER Handler, + _In_opt_ void* Context, + _Outptr_ _At_(*Stream, __drv_allocatesMem(Mem)) _Pre_defensive_ + HQUIC* Stream + ); + +// +// Closes a stream handle. +// +typedef +_IRQL_requires_max_(PASSIVE_LEVEL) +void +(QUIC_API * QUIC_STREAM_CLOSE_FN)( + _In_ _Pre_defensive_ __drv_freesMem(Mem) + HQUIC Stream + ); + +// +// Starts processing the stream. +// +typedef +_IRQL_requires_max_(DISPATCH_LEVEL) +QUIC_STATUS +(QUIC_API * QUIC_STREAM_START_FN)( + _In_ _Pre_defensive_ HQUIC Stream, + _In_ QUIC_STREAM_START_FLAGS Flags + ); + +// +// Shuts the stream down as specified, and waits for graceful +// shutdowns to complete. Does nothing if already shut down. +// +typedef +_IRQL_requires_max_(DISPATCH_LEVEL) +QUIC_STATUS +(QUIC_API * QUIC_STREAM_SHUTDOWN_FN)( + _In_ _Pre_defensive_ HQUIC Stream, + _In_ QUIC_STREAM_SHUTDOWN_FLAGS Flags, + _In_ _Pre_defensive_ QUIC_UINT62 ErrorCode // Application defined error code + ); + +// +// Sends data on an open stream. +// +typedef +_IRQL_requires_max_(DISPATCH_LEVEL) +QUIC_STATUS +(QUIC_API * QUIC_STREAM_SEND_FN)( + _In_ _Pre_defensive_ HQUIC Stream, + _In_reads_(BufferCount) _Pre_defensive_ + const QUIC_BUFFER* const Buffers, + _In_ uint32_t BufferCount, + _In_ QUIC_SEND_FLAGS Flags, + _In_opt_ void* ClientSendContext + ); + +// +// Completes a previously pended receive callback. +// +typedef +_IRQL_requires_max_(DISPATCH_LEVEL) +void +(QUIC_API * QUIC_STREAM_RECEIVE_COMPLETE_FN)( + _In_ _Pre_defensive_ HQUIC Stream, + _In_ uint64_t BufferLength + ); + +// +// Enables or disables stream receive callbacks. +// +typedef +_IRQL_requires_max_(DISPATCH_LEVEL) +QUIC_STATUS +(QUIC_API * QUIC_STREAM_RECEIVE_SET_ENABLED_FN)( + _In_ _Pre_defensive_ HQUIC Stream, + _In_ BOOLEAN IsEnabled + ); + +// +// Datagrams +// + +// +// Sends an unreliable datagram on the connection. Note, the total payload +// of the send must fit in a single QUIC packet. +// +typedef +_IRQL_requires_max_(DISPATCH_LEVEL) +QUIC_STATUS +(QUIC_API * QUIC_DATAGRAM_SEND_FN)( + _In_ _Pre_defensive_ HQUIC Connection, + _In_reads_(BufferCount) _Pre_defensive_ + const QUIC_BUFFER* const Buffers, + _In_ uint32_t BufferCount, + _In_ QUIC_SEND_FLAGS Flags, + _In_opt_ void* ClientSendContext + ); + +// +// Version 2 API Function Table. Returned from MsQuicOpenVersion when Version +// is 2. Also returned from MsQuicOpen2. +// +typedef struct QUIC_API_TABLE { + + QUIC_SET_CONTEXT_FN SetContext; + QUIC_GET_CONTEXT_FN GetContext; + QUIC_SET_CALLBACK_HANDLER_FN SetCallbackHandler; + + QUIC_SET_PARAM_FN SetParam; + QUIC_GET_PARAM_FN GetParam; + + QUIC_REGISTRATION_OPEN_FN RegistrationOpen; + QUIC_REGISTRATION_CLOSE_FN RegistrationClose; + QUIC_REGISTRATION_SHUTDOWN_FN RegistrationShutdown; + + QUIC_CONFIGURATION_OPEN_FN ConfigurationOpen; + QUIC_CONFIGURATION_CLOSE_FN ConfigurationClose; + QUIC_CONFIGURATION_LOAD_CREDENTIAL_FN + ConfigurationLoadCredential; + + QUIC_LISTENER_OPEN_FN ListenerOpen; + QUIC_LISTENER_CLOSE_FN ListenerClose; + QUIC_LISTENER_START_FN ListenerStart; + QUIC_LISTENER_STOP_FN ListenerStop; + + QUIC_CONNECTION_OPEN_FN ConnectionOpen; + QUIC_CONNECTION_CLOSE_FN ConnectionClose; + QUIC_CONNECTION_SHUTDOWN_FN ConnectionShutdown; + QUIC_CONNECTION_START_FN ConnectionStart; + QUIC_CONNECTION_SET_CONFIGURATION_FN + ConnectionSetConfiguration; + QUIC_CONNECTION_SEND_RESUMPTION_FN ConnectionSendResumptionTicket; + + QUIC_STREAM_OPEN_FN StreamOpen; + QUIC_STREAM_CLOSE_FN StreamClose; + QUIC_STREAM_START_FN StreamStart; + QUIC_STREAM_SHUTDOWN_FN StreamShutdown; + QUIC_STREAM_SEND_FN StreamSend; + QUIC_STREAM_RECEIVE_COMPLETE_FN StreamReceiveComplete; + QUIC_STREAM_RECEIVE_SET_ENABLED_FN StreamReceiveSetEnabled; + + QUIC_DATAGRAM_SEND_FN DatagramSend; + + QUIC_CONNECTION_COMP_RESUMPTION_FN ConnectionResumptionTicketValidationComplete; // Available from v2.2 + QUIC_CONNECTION_COMP_CERT_FN ConnectionCertificateValidationComplete; // Available from v2.2 + +} QUIC_API_TABLE; + +#define QUIC_API_VERSION_1 1 // Not supported any more +#define QUIC_API_VERSION_2 2 // Current latest + +#if defined(_KERNEL_MODE) && !defined(_WIN64) + +// +// 32 bit kernel mode is no longer supported, so shim behavior in 32 bit kernel +// mode +// +#define MsQuicClose(QuicApi) UNREFERENCED_PARAMETER((QuicApi)) +#define MsQuicOpenVersion(Version, QuicApi) QUIC_STATUS_NOT_SUPPORTED + +#else + +// +// Opens the API library and initializes it if this is the first call for the +// process. It returns API function table for the rest of the API's functions. +// MsQuicClose must be called when the app is done with the function table. +// +_IRQL_requires_max_(PASSIVE_LEVEL) +_Check_return_ +#if (__cplusplus >= 201703L || _MSVC_LANG >= 201703L) +[[nodiscard]] +#endif +QUIC_STATUS +QUIC_API +MsQuicOpenVersion( + _In_ uint32_t Version, + _Out_ _Pre_defensive_ const void** QuicApi + ); + +// +// Cleans up the function table returned from MsQuicOpenVersion and releases the +// reference on the API. +// +_IRQL_requires_max_(PASSIVE_LEVEL) +void +QUIC_API +MsQuicClose( + _In_ _Pre_defensive_ const void* QuicApi + ); + +#endif + +_IRQL_requires_max_(PASSIVE_LEVEL) +_Check_return_ +typedef +QUIC_STATUS +(QUIC_API *MsQuicOpenVersionFn)( + _In_ uint32_t Version, + _Out_ _Pre_defensive_ const void** QuicApi + ); + +_IRQL_requires_max_(PASSIVE_LEVEL) +typedef +void +(QUIC_API *MsQuicCloseFn)( + _In_ _Pre_defensive_ const void* QuicApi + ); + +#ifdef _KERNEL_MODE + +DECLSPEC_SELECTANY GUID MSQUIC_NPI_ID = { + 0xC43138E3, 0xCD13, 0x4CB1, { 0x9C, 0xAE, 0xE0, 0x05, 0xC8, 0x55, 0x7A, 0xBA } +}; // C43138E3-CD13-4CB1-9CAE-E005C8557ABA + +DECLSPEC_SELECTANY GUID MSQUIC_MODULE_ID = { + 0x698F7C72, 0xC2E6, 0x49CD, { 0x8C, 0x39, 0x98, 0x85, 0x1D, 0x50, 0x19, 0x01 } +}; // 698F7C72-C2E6-49CD-8C39-98851D501901 + +typedef struct MSQUIC_NMR_DISPATCH { + uint16_t Version; + uint16_t Reserved; + MsQuicOpenVersionFn OpenVersion; + MsQuicCloseFn Close; +} MSQUIC_NMR_DISPATCH; + +// +// Stores the internal NMR client state. It's meant to be opaque to the users. +// +typedef struct __MSQUIC_NMR_CLIENT { + NPI_CLIENT_CHARACTERISTICS NpiClientCharacteristics; + LONG BindingCount; + HANDLE NmrClientHandle; + NPI_MODULEID ModuleId; + KEVENT RegistrationCompleteEvent; + MSQUIC_NMR_DISPATCH* ProviderDispatch; + BOOLEAN Deleting; +} __MSQUIC_NMR_CLIENT; + +#define QUIC_GET_DISPATCH(h) (((__MSQUIC_NMR_CLIENT*)(h))->ProviderDispatch) + +static +NTSTATUS +__MsQuicClientAttachProvider( + _In_ HANDLE NmrBindingHandle, + _In_ void *ClientContext, + _In_ const NPI_REGISTRATION_INSTANCE *ProviderRegistrationInstance + ) +{ + UNREFERENCED_PARAMETER(ProviderRegistrationInstance); + + NTSTATUS Status; + __MSQUIC_NMR_CLIENT* Client = (__MSQUIC_NMR_CLIENT*)ClientContext; + void* ProviderContext; + + if (InterlockedIncrement(&Client->BindingCount) == 1) { + #pragma warning(suppress:6387) // _Param_(2) could be '0' - by design. + Status = + NmrClientAttachProvider( + NmrBindingHandle, + Client, + NULL, + &ProviderContext, + (const void**)&Client->ProviderDispatch); + if (NT_SUCCESS(Status)) { + KeSetEvent(&Client->RegistrationCompleteEvent, IO_NO_INCREMENT, FALSE); + } else { + InterlockedDecrement(&Client->BindingCount); + } + } else { + Status = STATUS_NOINTERFACE; + } + + return Status; +} + +static +NTSTATUS +__MsQuicClientDetachProvider( + _In_ void *ClientBindingContext + ) +{ + __MSQUIC_NMR_CLIENT* Client = (__MSQUIC_NMR_CLIENT*)ClientBindingContext; + if (InterlockedOr8((char*)&Client->Deleting, 1)) { + return STATUS_SUCCESS; + } else { + return STATUS_PENDING; + } +} + +_IRQL_requires_max_(PASSIVE_LEVEL) +__forceinline +void +MsQuicNmrClientDeregister( + _Inout_ HANDLE* ClientHandle + ) +{ + __MSQUIC_NMR_CLIENT* Client = (__MSQUIC_NMR_CLIENT*)(*ClientHandle); + + if (InterlockedOr8((char*)&Client->Deleting, 1)) { + // + // We are already in the middle of detaching the client. + // Complete it now. + // + NmrClientDetachProviderComplete(Client->NmrClientHandle); + } + + if (Client->NmrClientHandle) { + if (NmrDeregisterClient(Client->NmrClientHandle) == STATUS_PENDING) { + // + // Wait for the deregistration to complete. + // + NmrWaitForClientDeregisterComplete(Client->NmrClientHandle); + } + Client->NmrClientHandle = NULL; + } + + ExFreePoolWithTag(Client, 'cNQM'); + *ClientHandle = NULL; +} + +_IRQL_requires_max_(PASSIVE_LEVEL) +__forceinline +NTSTATUS +MsQuicNmrClientRegister( + _Out_ HANDLE* ClientHandle, + _In_ GUID* ClientModuleId, + _In_ ULONG TimeoutMs // zero = no wait, non-zero = some wait + ) +{ + NPI_REGISTRATION_INSTANCE *ClientRegistrationInstance; + NTSTATUS Status = STATUS_SUCCESS; + + __MSQUIC_NMR_CLIENT* Client = + (__MSQUIC_NMR_CLIENT*)ExAllocatePool2(POOL_FLAG_NON_PAGED, sizeof(*Client), 'cNQM'); + if (Client == NULL) { + Status = STATUS_INSUFFICIENT_RESOURCES; + goto Exit; + } + + KeInitializeEvent(&Client->RegistrationCompleteEvent, SynchronizationEvent, FALSE); + + Client->ModuleId.Length = sizeof(Client->ModuleId); + Client->ModuleId.Type = MIT_GUID; + Client->ModuleId.Guid = *ClientModuleId; + + Client->NpiClientCharacteristics.Length = sizeof(Client->NpiClientCharacteristics); + Client->NpiClientCharacteristics.ClientAttachProvider = __MsQuicClientAttachProvider; + Client->NpiClientCharacteristics.ClientDetachProvider = __MsQuicClientDetachProvider; + + ClientRegistrationInstance = &Client->NpiClientCharacteristics.ClientRegistrationInstance; + ClientRegistrationInstance->Size = sizeof(*ClientRegistrationInstance); + ClientRegistrationInstance->Version = 0; + ClientRegistrationInstance->NpiId = &MSQUIC_NPI_ID; + ClientRegistrationInstance->ModuleId = &Client->ModuleId; + + Status = + NmrRegisterClient( + &Client->NpiClientCharacteristics, Client, &Client->NmrClientHandle); + if (!NT_SUCCESS(Status)) { + goto Exit; + } + + LARGE_INTEGER Timeout; + Timeout.QuadPart = UInt32x32To64(TimeoutMs, 10000); + Timeout.QuadPart = -Timeout.QuadPart; + + Status = + KeWaitForSingleObject( + &Client->RegistrationCompleteEvent, + Executive, KernelMode, FALSE, + &Timeout); + if (Status != STATUS_SUCCESS) { + Status = STATUS_UNSUCCESSFUL; + goto Exit; + } + + *ClientHandle = Client; + +Exit: + if (!NT_SUCCESS(Status) && Client != NULL) { + MsQuicNmrClientDeregister((HANDLE*)&Client); + } + + return Status; +} + +#endif + +// +// Version specific helpers that wrap MsQuicOpenVersion. +// + +#if defined(__cplusplus) + +_IRQL_requires_max_(PASSIVE_LEVEL) +_Check_return_ +#if (__cplusplus >= 201703L || _MSVC_LANG >= 201703L) +[[nodiscard]] +#endif +#ifdef WIN32 +__forceinline +#else +__attribute__((always_inline)) inline +#endif +QUIC_STATUS +MsQuicOpen2( + _Out_ _Pre_defensive_ const QUIC_API_TABLE** QuicApi + ) +{ + return MsQuicOpenVersion(QUIC_API_VERSION_2, (const void**)QuicApi); +} + +#else + +#define MsQuicOpen2(QuicApi) MsQuicOpenVersion(2, (const void**)QuicApi) + +#endif // defined(__cplusplus) + +#if defined(__cplusplus) +} +#endif + +#endif // _MSQUIC_ diff --git a/Networking/Sources/include/msquic_posix.h b/Networking/Sources/include/msquic_posix.h new file mode 100644 index 00000000..27393e12 --- /dev/null +++ b/Networking/Sources/include/msquic_posix.h @@ -0,0 +1,522 @@ +/*++ + + Copyright (c) Microsoft Corporation. + Licensed under the MIT License. + +Abstract: + + This file contains the platform specific definitions for MsQuic structures + and error codes. + +Environment: + + POSIX (Linux and macOS) + +--*/ + +#ifdef _WIN32 +#pragma once +#endif + +#ifndef _MSQUIC_POSIX_ +#define _MSQUIC_POSIX_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "quic_sal_stub.h" + +#ifdef __cplusplus +extern "C++" { +template struct _ENUM_FLAG_INTEGER_FOR_SIZE; +template <> struct _ENUM_FLAG_INTEGER_FOR_SIZE<1> { + typedef uint8_t type; +}; +template <> struct _ENUM_FLAG_INTEGER_FOR_SIZE<2> { + typedef uint16_t type; +}; +template <> struct _ENUM_FLAG_INTEGER_FOR_SIZE<4> { + typedef uint32_t type; +}; +template <> struct _ENUM_FLAG_INTEGER_FOR_SIZE<8> { + typedef uint64_t type; +}; + +// used as an approximation of std::underlying_type +template struct _ENUM_FLAG_SIZED_INTEGER +{ + typedef typename _ENUM_FLAG_INTEGER_FOR_SIZE::type type; +}; +} + +#define DEFINE_ENUM_FLAG_OPERATORS(ENUMTYPE) \ +extern "C++" { \ +inline ENUMTYPE operator | (ENUMTYPE a, ENUMTYPE b) throw() { return ENUMTYPE(((_ENUM_FLAG_SIZED_INTEGER::type)a) | ((_ENUM_FLAG_SIZED_INTEGER::type)b)); } \ +inline ENUMTYPE &operator |= (ENUMTYPE &a, ENUMTYPE b) throw() { return (ENUMTYPE &)(((_ENUM_FLAG_SIZED_INTEGER::type &)a) |= ((_ENUM_FLAG_SIZED_INTEGER::type)b)); } \ +inline ENUMTYPE operator & (ENUMTYPE a, ENUMTYPE b) throw() { return ENUMTYPE(((_ENUM_FLAG_SIZED_INTEGER::type)a) & ((_ENUM_FLAG_SIZED_INTEGER::type)b)); } \ +inline ENUMTYPE &operator &= (ENUMTYPE &a, ENUMTYPE b) throw() { return (ENUMTYPE &)(((_ENUM_FLAG_SIZED_INTEGER::type &)a) &= ((_ENUM_FLAG_SIZED_INTEGER::type)b)); } \ +inline ENUMTYPE operator ~ (ENUMTYPE a) throw() { return ENUMTYPE(~((_ENUM_FLAG_SIZED_INTEGER::type)a)); } \ +inline ENUMTYPE operator ^ (ENUMTYPE a, ENUMTYPE b) throw() { return ENUMTYPE(((_ENUM_FLAG_SIZED_INTEGER::type)a) ^ ((_ENUM_FLAG_SIZED_INTEGER::type)b)); } \ +inline ENUMTYPE &operator ^= (ENUMTYPE &a, ENUMTYPE b) throw() { return (ENUMTYPE &)(((_ENUM_FLAG_SIZED_INTEGER::type &)a) ^= ((_ENUM_FLAG_SIZED_INTEGER::type)b)); } \ +} +#else +#define DEFINE_ENUM_FLAG_OPERATORS(ENUMTYPE) // NOP, C allows these operators. +#endif + +#define QUIC_API +#define QUIC_MAIN_EXPORT +#define QUIC_STATUS unsigned int +#define QUIC_FAILED(X) ((int)(X) > 0) +#define QUIC_SUCCEEDED(X) ((int)(X) <= 0) + +// +// The type of an error code generated by the system is mostly 'int'. In most +// situations, we use the value of a system-generated error code as the value +// of QUIC_STATUS. In some situations, we use a custom value for QUIC_STATUS. +// In order to ensure that custom values don't conflict with system-generated +// values, the custom values are all kept outside the range of any possible +// 'int' value. There are static asserts to ensure that QUIC_STATUS type is +// large enough for this purpose. +// + +#ifndef ESTRPIPE // undefined on macOS +#define ESTRPIPE 86 +#endif // ESTRPIPE + +#ifndef ENOKEY // undefined om macOS +#define ENOKEY 126 +#endif // ENOKEY + +#define ERROR_BASE 200000000 // 0xBEBC200 +#define TLS_ERROR_BASE 256 + ERROR_BASE // 0xBEBC300 +#define CERT_ERROR_BASE 512 + ERROR_BASE // 0xBEBC400 + +#define QUIC_STATUS_SUCCESS ((QUIC_STATUS)0) // 0 +#define QUIC_STATUS_PENDING ((QUIC_STATUS)-2) // -2 +#define QUIC_STATUS_CONTINUE ((QUIC_STATUS)-1) // -1 +#define QUIC_STATUS_OUT_OF_MEMORY ((QUIC_STATUS)ENOMEM) // 12 +#define QUIC_STATUS_INVALID_PARAMETER ((QUIC_STATUS)EINVAL) // 22 +#define QUIC_STATUS_INVALID_STATE ((QUIC_STATUS)EPERM) // 1 +#define QUIC_STATUS_NOT_SUPPORTED ((QUIC_STATUS)EOPNOTSUPP) // 95 (102 on macOS) +#define QUIC_STATUS_NOT_FOUND ((QUIC_STATUS)ENOENT) // 2 +#define QUIC_STATUS_BUFFER_TOO_SMALL ((QUIC_STATUS)EOVERFLOW) // 75 (84 on macOS) +#define QUIC_STATUS_HANDSHAKE_FAILURE ((QUIC_STATUS)ECONNABORTED) // 103 (53 on macOS) +#define QUIC_STATUS_ABORTED ((QUIC_STATUS)ECANCELED) // 125 (89 on macOS) +#define QUIC_STATUS_ADDRESS_IN_USE ((QUIC_STATUS)EADDRINUSE) // 98 (48 on macOS) +#define QUIC_STATUS_INVALID_ADDRESS ((QUIC_STATUS)EAFNOSUPPORT) // 97 (47 on macOS) +#define QUIC_STATUS_CONNECTION_TIMEOUT ((QUIC_STATUS)ETIMEDOUT) // 110 (60 on macOS) +#define QUIC_STATUS_CONNECTION_IDLE ((QUIC_STATUS)ETIME) // 62 (101 on macOS) +#define QUIC_STATUS_INTERNAL_ERROR ((QUIC_STATUS)EIO) // 5 +#define QUIC_STATUS_CONNECTION_REFUSED ((QUIC_STATUS)ECONNREFUSED) // 111 (61 on macOS) +#define QUIC_STATUS_PROTOCOL_ERROR ((QUIC_STATUS)EPROTO) // 71 (100 on macOS) +#define QUIC_STATUS_VER_NEG_ERROR ((QUIC_STATUS)EPROTONOSUPPORT) // 93 (43 on macOS) +#define QUIC_STATUS_UNREACHABLE ((QUIC_STATUS)EHOSTUNREACH) // 113 (65 on macOS) +#define QUIC_STATUS_TLS_ERROR ((QUIC_STATUS)ENOKEY) // 126 +#define QUIC_STATUS_USER_CANCELED ((QUIC_STATUS)EOWNERDEAD) // 130 (105 on macOS) +#define QUIC_STATUS_ALPN_NEG_FAILURE ((QUIC_STATUS)ENOPROTOOPT) // 92 (42 on macOS) +#define QUIC_STATUS_STREAM_LIMIT_REACHED ((QUIC_STATUS)ESTRPIPE) // 86 +#define QUIC_STATUS_ALPN_IN_USE ((QUIC_STATUS)EPROTOTYPE) // 91 (41 on macOS) +#define QUIC_STATUS_ADDRESS_NOT_AVAILABLE ((QUIC_STATUS)EADDRNOTAVAIL) // 99 (47 on macOS) + +#define QUIC_STATUS_TLS_ALERT(Alert) ((QUIC_STATUS)(0xff & Alert) + TLS_ERROR_BASE) + +#define QUIC_STATUS_CLOSE_NOTIFY QUIC_STATUS_TLS_ALERT(0) // 0xBEBC300 - Close notify +#define QUIC_STATUS_BAD_CERTIFICATE QUIC_STATUS_TLS_ALERT(42) // 0xBEBC32A - Bad Certificate +#define QUIC_STATUS_UNSUPPORTED_CERTIFICATE QUIC_STATUS_TLS_ALERT(43) // 0xBEBC32B - Unsupported Certficiate +#define QUIC_STATUS_REVOKED_CERTIFICATE QUIC_STATUS_TLS_ALERT(44) // 0xBEBC32C - Revoked Certificate +#define QUIC_STATUS_EXPIRED_CERTIFICATE QUIC_STATUS_TLS_ALERT(45) // 0xBEBC32D - Expired Certificate +#define QUIC_STATUS_UNKNOWN_CERTIFICATE QUIC_STATUS_TLS_ALERT(46) // 0xBEBC32E - Unknown Certificate +#define QUIC_STATUS_REQUIRED_CERTIFICATE QUIC_STATUS_TLS_ALERT(116) // 0xBEBC374 - Required Certificate + +#define QUIC_STATUS_CERT_ERROR(Val) ((QUIC_STATUS)Val + CERT_ERROR_BASE) + +#define QUIC_STATUS_CERT_EXPIRED QUIC_STATUS_CERT_ERROR(1) // 0xBEBC401 +#define QUIC_STATUS_CERT_UNTRUSTED_ROOT QUIC_STATUS_CERT_ERROR(2) // 0xBEBC402 +#define QUIC_STATUS_CERT_NO_CERT QUIC_STATUS_CERT_ERROR(3) // 0xBEBC403 + +typedef unsigned char BOOLEAN; +typedef struct in_addr IN_ADDR; +typedef struct in6_addr IN6_ADDR; +typedef struct addrinfo ADDRINFO; +typedef sa_family_t QUIC_ADDRESS_FAMILY; + +#define QUIC_ADDRESS_FAMILY_UNSPEC AF_UNSPEC +#define QUIC_ADDRESS_FAMILY_INET AF_INET +#define QUIC_ADDRESS_FAMILY_INET6 AF_INET6 + +typedef union QUIC_ADDR { + struct sockaddr Ip; + struct sockaddr_in Ipv4; + struct sockaddr_in6 Ipv6; +} QUIC_ADDR; + +#ifndef RTL_FIELD_SIZE +#define RTL_FIELD_SIZE(type, field) (sizeof(((type *)0)->field)) +#endif + +#define FIELD_OFFSET(type, field) offsetof(type, field) + +#define QUIC_ADDR_V4_PORT_OFFSET FIELD_OFFSET(struct sockaddr_in, sin_port) +#define QUIC_ADDR_V4_IP_OFFSET FIELD_OFFSET(struct sockaddr_in, sin_addr) + +#define QUIC_ADDR_V6_PORT_OFFSET FIELD_OFFSET(struct sockaddr_in6, sin6_port) +#define QUIC_ADDR_V6_IP_OFFSET FIELD_OFFSET(struct sockaddr_in6, sin6_addr) + +#ifndef FALSE +#define FALSE 0 +#define TRUE 1 +#endif + +#define INITCODE +#define PAGEDX +#define QUIC_CACHEALIGN + +#if defined(__cplusplus) +extern "C" { +#endif + +#if defined(CX_PLATFORM_DARWIN) +#define QUIC_LOCALHOST_FOR_AF(Af) ("localhost") +#else +#define QUIC_LOCALHOST_FOR_AF(Af) ((Af == QUIC_ADDRESS_FAMILY_INET) ? "localhost" : "ip6-localhost") +#endif + +#define QUIC_CERTIFICATE_FLAG_IGNORE_REVOCATION 0x00000080 +#define QUIC_CERTIFICATE_FLAG_IGNORE_UNKNOWN_CA 0x00000100 +#define QUIC_CERTIFICATE_FLAG_IGNORE_WRONG_USAGE 0x00000200 +#define QUIC_CERTIFICATE_FLAG_IGNORE_CERTIFICATE_CN_INVALID 0x00001000 // bad common name in X509 Cert. +#define QUIC_CERTIFICATE_FLAG_IGNORE_CERTIFICATE_DATE_INVALID 0x00002000 // expired X509 Cert. +#define QUIC_CERTIFICATE_FLAG_IGNORE_WEAK_SIGNATURE 0x00010000 + +#if defined(__clang__) +#define QUIC_NO_SANITIZE(X) __attribute__((no_sanitize(X))) +#else +#define QUIC_NO_SANITIZE(X) +#endif + +// +// Helpers for Windows string functions. +// + +#define _strnicmp strncasecmp +#define sprintf_s(dst, dst_len, format, ...) snprintf(dst, dst_len, format, __VA_ARGS__) +#define _vsnprintf_s(dst, dst_len, flag, format, ...) vsnprintf(dst, dst_len, format, __VA_ARGS__) + +// +// IP Address Abstraction Helpers +// + +inline +BOOLEAN +QuicAddrFamilyIsValid( + _In_ QUIC_ADDRESS_FAMILY Family + ) +{ + return + Family == QUIC_ADDRESS_FAMILY_UNSPEC || + Family == QUIC_ADDRESS_FAMILY_INET || + Family == QUIC_ADDRESS_FAMILY_INET6; +} + +inline +BOOLEAN +QuicAddrIsValid( + _In_ const QUIC_ADDR* const Addr + ) +{ + return QuicAddrFamilyIsValid(Addr->Ip.sa_family); +} + +inline +BOOLEAN +QuicAddrCompareIp( + _In_ const QUIC_ADDR* const Addr1, + _In_ const QUIC_ADDR* const Addr2 + ) +{ + if (QUIC_ADDRESS_FAMILY_INET == Addr1->Ip.sa_family) { + return memcmp(&Addr1->Ipv4.sin_addr, &Addr2->Ipv4.sin_addr, sizeof(IN_ADDR)) == 0; + } else { + return memcmp(&Addr1->Ipv6.sin6_addr, &Addr2->Ipv6.sin6_addr, sizeof(IN6_ADDR)) == 0; + } +} + +inline +BOOLEAN +QuicAddrCompare( + _In_ const QUIC_ADDR* const Addr1, + _In_ const QUIC_ADDR* const Addr2 + ) +{ + if (Addr1->Ip.sa_family != Addr2->Ip.sa_family || + Addr1->Ipv4.sin_port != Addr2->Ipv4.sin_port) { + return FALSE; + } + + if (QUIC_ADDRESS_FAMILY_INET == Addr1->Ip.sa_family) { + return memcmp(&Addr1->Ipv4.sin_addr, &Addr2->Ipv4.sin_addr, sizeof(IN_ADDR)) == 0; + } else { + return memcmp(&Addr1->Ipv6.sin6_addr, &Addr2->Ipv6.sin6_addr, sizeof(IN6_ADDR)) == 0; + } +} + +inline +QUIC_ADDRESS_FAMILY +QuicAddrGetFamily( + _In_ const QUIC_ADDR* const Addr + ) +{ + return Addr->Ip.sa_family; +} + +inline +void +QuicAddrSetFamily( + _In_ QUIC_ADDR* Addr, + _In_ QUIC_ADDRESS_FAMILY Family + ) +{ + Addr->Ip.sa_family = Family; +} + +inline +uint16_t +QuicAddrGetPort( + _In_ const QUIC_ADDR* const Addr + ) +{ + if (QUIC_ADDRESS_FAMILY_INET == Addr->Ip.sa_family) { + return ntohs(Addr->Ipv4.sin_port); + } else { + return ntohs(Addr->Ipv6.sin6_port); + } +} + +inline +void +QuicAddrSetPort( + _Out_ QUIC_ADDR* Addr, + _In_ uint16_t Port + ) +{ + if (QUIC_ADDRESS_FAMILY_INET == Addr->Ip.sa_family) { + Addr->Ipv4.sin_port = htons(Port); + } else { + Addr->Ipv6.sin6_port = htons(Port); + } +} + +// +// Test only API to increment the IP address value. +// +inline +void +QuicAddrIncrement( + _Inout_ QUIC_ADDR* Addr + ) +{ + if (Addr->Ip.sa_family == QUIC_ADDRESS_FAMILY_INET) { + ((uint8_t*)&Addr->Ipv4.sin_addr)[3]++; + } else { + ((uint8_t*)&Addr->Ipv6.sin6_addr)[15]++; + } +} + +inline +void +QuicAddrSetToLoopback( + _Inout_ QUIC_ADDR* Addr + ) +{ + if (Addr->Ip.sa_family == QUIC_ADDRESS_FAMILY_INET) { + Addr->Ipv4.sin_addr.s_addr = htonl(INADDR_LOOPBACK); + } else { + Addr->Ipv6.sin6_addr = in6addr_loopback; + } +} + +inline +uint32_t +QUIC_NO_SANITIZE("unsigned-integer-overflow") +QuicAddrHash( + _In_ const QUIC_ADDR* Addr + ) +{ + uint32_t Hash = 5387; // A random prime number. +#define UPDATE_HASH(byte) Hash = ((Hash << 5) - Hash) + (byte) + if (Addr->Ip.sa_family == QUIC_ADDRESS_FAMILY_INET) { + UPDATE_HASH(Addr->Ipv4.sin_port & 0xFF); + UPDATE_HASH(Addr->Ipv4.sin_port >> 8); + for (uint8_t i = 0; i < sizeof(Addr->Ipv4.sin_addr); ++i) { + UPDATE_HASH(((uint8_t*)&Addr->Ipv4.sin_addr)[i]); + } + } else { + UPDATE_HASH(Addr->Ipv6.sin6_port & 0xFF); + UPDATE_HASH(Addr->Ipv6.sin6_port >> 8); + for (uint8_t i = 0; i < sizeof(Addr->Ipv6.sin6_addr); ++i) { + UPDATE_HASH(((uint8_t*)&Addr->Ipv6.sin6_addr)[i]); + } + } + return Hash; +} + +inline +BOOLEAN +QuicAddrIsWildCard( + _In_ const QUIC_ADDR* const Addr + ) +{ + if (Addr->Ip.sa_family == QUIC_ADDRESS_FAMILY_UNSPEC) { + return TRUE; + } else if (Addr->Ip.sa_family == QUIC_ADDRESS_FAMILY_INET) { + const IN_ADDR ZeroAddr = {0}; + return memcmp(&Addr->Ipv4.sin_addr.s_addr, &ZeroAddr, sizeof(IN_ADDR)) == 0; + } else { + const IN6_ADDR ZeroAddr = {0}; + return memcmp(&Addr->Ipv6.sin6_addr, &ZeroAddr, sizeof(IN6_ADDR)) == 0; + } +} + +inline +BOOLEAN +QuicAddr4FromString( + _In_z_ const char* AddrStr, + _Out_ QUIC_ADDR* Addr + ) +{ + if (AddrStr[0] == '[') { + return FALSE; + } + + const char* PortStart = strchr(AddrStr, ':'); + if (PortStart != NULL) { + if (strchr(PortStart+1, ':') != NULL) { + return FALSE; + } + + char TmpAddrStr[16]; + size_t AddrLength = PortStart - AddrStr; + if (AddrLength >= sizeof(TmpAddrStr)) { + return FALSE; + } + memcpy(TmpAddrStr, AddrStr, AddrLength); + TmpAddrStr[AddrLength] = '\0'; + + if (inet_pton(AF_INET, TmpAddrStr, &Addr->Ipv4.sin_addr) != 1) { + return FALSE; + } + Addr->Ipv4.sin_port = htons(atoi(PortStart+1)); + } else { + if (inet_pton(AF_INET, AddrStr, &Addr->Ipv4.sin_addr) != 1) { + return FALSE; + } + } + Addr->Ip.sa_family = QUIC_ADDRESS_FAMILY_INET; + return TRUE; +} + +inline +BOOLEAN +QuicAddr6FromString( + _In_z_ const char* AddrStr, + _Out_ QUIC_ADDR* Addr + ) +{ + if (AddrStr[0] == '[') { + const char* BracketEnd = strchr(AddrStr, ']'); + if (BracketEnd == NULL || *(BracketEnd+1) != ':') { + return FALSE; + } + + char TmpAddrStr[64]; + size_t AddrLength = BracketEnd - AddrStr - 1; + if (AddrLength >= sizeof(TmpAddrStr)) { + return FALSE; + } + memcpy(TmpAddrStr, AddrStr + 1, AddrLength); + TmpAddrStr[AddrLength] = '\0'; + + if (inet_pton(AF_INET6, TmpAddrStr, &Addr->Ipv6.sin6_addr) != 1) { + return FALSE; + } + Addr->Ipv6.sin6_port = htons(atoi(BracketEnd+2)); + } else { + if (inet_pton(AF_INET6, AddrStr, &Addr->Ipv6.sin6_addr) != 1) { + return FALSE; + } + } + Addr->Ip.sa_family = QUIC_ADDRESS_FAMILY_INET6; + return TRUE; +} + +inline +BOOLEAN +QuicAddrFromString( + _In_z_ const char* AddrStr, + _In_ uint16_t Port, // Host byte order + _Out_ QUIC_ADDR* Addr + ) +{ + Addr->Ipv4.sin_port = htons(Port); + return + QuicAddr4FromString(AddrStr, Addr) || + QuicAddr6FromString(AddrStr, Addr); +} + +// +// Represents an IP address and (optionally) port number as a string. +// +typedef struct QUIC_ADDR_STR { + char Address[64]; +} QUIC_ADDR_STR; + +inline +BOOLEAN +QuicAddrToString( + _In_ const QUIC_ADDR* Addr, + _Out_ QUIC_ADDR_STR* AddrStr + ) +{ + size_t AvailSpace = sizeof(AddrStr->Address); + char* Address = AddrStr->Address; + if (Addr->Ip.sa_family == QUIC_ADDRESS_FAMILY_INET6 && Addr->Ipv6.sin6_port != 0) { + Address[0] = '['; + Address++; + AvailSpace--; + } + if (inet_ntop( + Addr->Ip.sa_family == QUIC_ADDRESS_FAMILY_INET ? AF_INET : AF_INET6, + Addr->Ip.sa_family == QUIC_ADDRESS_FAMILY_INET ? (void*)&Addr->Ipv4.sin_addr : (void*)&Addr->Ipv6.sin6_addr, + Address, + AvailSpace) == NULL) { + return FALSE; + } + if (Addr->Ipv4.sin_port != 0) { + Address += strlen(Address); + if (Addr->Ip.sa_family == QUIC_ADDRESS_FAMILY_INET6) { + Address[0] = ']'; + Address++; + } + AvailSpace = sizeof(AddrStr->Address) - (Address - AddrStr->Address); + snprintf(Address, AvailSpace, ":%hu", ntohs(Addr->Ipv4.sin_port)); + } + return TRUE; +} + +#if defined(__cplusplus) +} +#endif + +#endif diff --git a/Networking/Sources/include/quic_sal_stub.h b/Networking/Sources/include/quic_sal_stub.h new file mode 100644 index 00000000..07f9c0c3 --- /dev/null +++ b/Networking/Sources/include/quic_sal_stub.h @@ -0,0 +1,282 @@ +/*++ + + Copyright (c) Microsoft Corporation. + Licensed under the MIT License. + + --*/ + +#ifndef _SAL_STUB_H +#define _SAL_STUB_H + +#pragma once + +// +// Necessary when SAL isn't supported to tell compiler it's not necessary. +// +#define INIT_NO_SAL(X) = X + +#ifndef _Must_inspect_result_ +#define _Must_inspect_result_ +#endif + +#ifndef _Pre_defensive_ +#define _Pre_defensive_ +#endif + +#ifndef _Ret_notnull_ +#define _Ret_notnull_ +#endif + +#ifndef _IRQL_requires_max_ +#define _IRQL_requires_max_(...) +#endif + +#ifndef _Function_class_ +#define _Function_class_(...) +#endif + +#ifndef _In_ +#define _In_ +#endif + +#ifndef _In_opt_ +#define _In_opt_ +#endif + +#ifndef _In_opt_z_ +#define _In_opt_z_ +#endif + +#ifndef _Inout_ +#define _Inout_ +#endif + +#ifndef _Inout_opt_ +#define _Inout_opt_ +#endif + +#ifndef _In_z_ +#define _In_z_ +#endif + +#ifndef _Out_ +#define _Out_ +#endif + +#ifndef _Out_range_ +#define _Out_range_(...) +#endif + +#ifndef _Field_size_bytes_ +#define _Field_size_bytes_(...) +#endif + +#ifndef _Field_size_bytes_opt_ +#define _Field_size_bytes_opt_(...) +#endif + +#ifndef _In_reads_ +#define _In_reads_(...) +#endif + +#ifndef _In_reads_bytes_ +#define _In_reads_bytes_(...) +#endif + +#ifndef _In_reads_z_ +#define _In_reads_z_(...) +#endif + +#ifndef _In_reads_opt_z_ +#define _In_reads_opt_z_(...) +#endif + +#ifndef _In_reads_or_z_opt_ +#define _In_reads_or_z_opt_(...) +#endif + +#ifndef _Out_writes_bytes_opt_ +#define _Out_writes_bytes_opt_(...) +#endif + +#ifndef _Null_terminated_ +#define _Null_terminated_ +#endif + +#ifndef _NullNull_terminated_ +#define _NullNull_terminated_ +#endif + +#ifndef _Out_writes_bytes_ +#define _Out_writes_bytes_(...) +#endif + +#ifndef _Field_size_ +#define _Field_size_(...) +#endif + +#ifndef _Success_ +#define _Success_(...) +#endif + +#ifndef _Field_range_ +#define _Field_range_(...) +#endif + +#ifndef _In_reads_bytes_opt_ +#define _In_reads_bytes_opt_(...) +#endif + +#ifndef _Out_writes_bytes_to_opt_ +#define _Out_writes_bytes_to_opt_(...) +#endif + +#ifndef _Deref_pre_opt_count_ +#define _Deref_pre_opt_count_(...) +#endif + +#ifndef _Deref_post_opt_count_ +#define _Deref_post_opt_count_(...) +#endif + +#ifndef _Outptr_result_buffer_ +#define _Outptr_result_buffer_(...) +#endif + +#ifndef _Outptr_result_buffer_maybenull_ +#define _Outptr_result_buffer_maybenull_(...) +#endif + +#ifndef _Inout_updates_bytes_ +#define _Inout_updates_bytes_(...) +#endif + +#ifndef _Inout_updates_bytes_opt_ +#define _Inout_updates_bytes_opt_(...) +#endif + +#ifndef _Inout_updates_ +#define _Inout_updates_(...) +#endif + +#ifndef _Out_opt_ +#define _Out_opt_ +#endif + +#ifndef _Outptr_ +#define _Outptr_ +#endif + +#ifndef _Ret_maybenull_ +#define _Ret_maybenull_ +#endif + +#ifndef _Must_inspect_result_ +#define _Must_inspect_result_ +#endif + +#ifndef _Post_invalid_ +#define _Post_invalid_ +#endif + +#ifndef _Post_writable_byte_size_ +#define _Post_writable_byte_size_(...) +#endif + +#ifndef __drv_allocatesMem +#define __drv_allocatesMem(...) +#endif + +#ifndef __drv_freesMem +#define __drv_freesMem(...) +#endif + +#ifndef __drv_aliasesMem +#define __drv_aliasesMem +#endif + +#ifndef _Frees_ptr_ +#define _Frees_ptr_ +#endif + +#ifndef _Frees_ptr_opt_ +#define _Frees_ptr_opt_ +#endif + +#ifndef _In_range_ +#define _In_range_(...) +#endif + +#ifndef _When_ +#define _When_(...) +#endif + +#ifndef _Post_equal_to_ +#define _Post_equal_to_(...) +#endif + +#ifndef _Deref_in_range_ +#define _Deref_in_range_(...) +#endif + +#ifndef _Deref_out_range_ +#define _Deref_out_range_(...) +#endif + +#ifndef _Out_writes_all_ +#define _Out_writes_all_(...) +#endif + +#ifndef _Out_writes_to_ +#define _Out_writes_to_(...) +#endif + +#ifndef _Out_writes_ +#define _Out_writes_(...) +#endif + +#ifndef _Field_z_ +#define _Field_z_ +#endif + +#ifndef __analysis_assume +#define __analysis_assume(expr) +#endif + +#ifndef _Out_writes_bytes_all_ +#define _Out_writes_bytes_all_(...) +#endif + +#ifndef _Analysis_assume_ +#define _Analysis_assume_(expr) +#endif + +#ifndef _Ret_range_ +#define _Ret_range_(...) +#endif + +#ifndef _Ret_writes_bytes_ +#define _Ret_writes_bytes_(...) +#endif + +#ifndef _Printf_format_string_ +#define _Printf_format_string_ +#endif + +#ifndef _Interlocked_operand_ +#define _Interlocked_operand_ +#endif + +#ifndef _In_reads_opt_ +#define _In_reads_opt_(...) +#endif + +#ifndef _At_ +#define _At_(...) +#endif + +#ifndef _Check_return_ +#define _Check_return_ +#endif + +#endif // _SAL_STUB_H diff --git a/Networking/Sources/module.modulemap b/Networking/Sources/module.modulemap index e9debb2b..088f479c 100644 --- a/Networking/Sources/module.modulemap +++ b/Networking/Sources/module.modulemap @@ -1,4 +1,4 @@ module msquic { - header "msquic/src/inc/msquic.h" + header "include/msquic.h" link "msquic" } diff --git a/Networking/Sources/msquic b/Networking/Sources/msquic deleted file mode 160000 index 2f1a7bec..00000000 --- a/Networking/Sources/msquic +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 2f1a7bec97b4b3a25bebed6da3c00bfa6f58e66f diff --git a/Node/Package.resolved b/Node/Package.resolved index e69f222d..ed891dc2 100644 --- a/Node/Package.resolved +++ b/Node/Package.resolved @@ -175,10 +175,10 @@ { "identity" : "swift-numerics", "kind" : "remoteSourceControl", - "location" : "https://github.com/apple/swift-numerics.git", + "location" : "https://github.com/apple/swift-numerics", "state" : { - "revision" : "0a5bc04095a675662cf24757cc0640aa2204253b", - "version" : "1.0.2" + "branch" : "main", + "revision" : "e30276bff2ff5ed80566fbdca49f50aa160b0e83" } }, { diff --git a/Node/Sources/Node/Genesis.swift b/Node/Sources/Node/Genesis.swift index a8c1738b..97a1694a 100644 --- a/Node/Sources/Node/Genesis.swift +++ b/Node/Sources/Node/Genesis.swift @@ -13,35 +13,7 @@ extension Genesis { fatalError("TODO: not implemented") case .dev: let config = ProtocolConfigRef.dev - var devKeys = [ValidatorKey]() - - var state = State.dummy(config: config) - - for i in 0 ..< config.value.totalNumberOfValidators { - let keySet = try DevKeyStore.getDevKey(seed: UInt32(i)) - devKeys.append(ValidatorKey( - bandersnatch: keySet.bandersnatch.data, - ed25519: keySet.ed25519.data, - bls: Data144(), // TODO: figure out BLS pub key size - metadata: Data128() - )) - } - state.safroleState.nextValidators = try ConfigFixedSizeArray(config: config, array: devKeys) - state.validatorQueue = try ConfigFixedSizeArray(config: config, array: devKeys) - state.currentValidators = try ConfigFixedSizeArray(config: config, array: devKeys) - - var epochKeys = [BandersnatchPublicKey]() - for i in 0 ..< config.value.epochLength { - epochKeys.append(devKeys[i % config.value.totalNumberOfValidators].bandersnatch) - } - state.safroleState.ticketsOrKeys = try .right(ConfigFixedSizeArray(config: config, array: epochKeys)) - - let ctx = try Bandersnatch.RingContext(size: UInt(config.value.totalNumberOfValidators)) - let commitment = try Bandersnatch.RingCommitment( - ring: devKeys.map { try Bandersnatch.PublicKey(data: $0.bandersnatch) }, - ctx: ctx - ) - state.safroleState.ticketsVerifier = commitment.data + let state = try State.devGenesis(config: config) return (StateRef(state), config) } diff --git a/Node/Sources/Node/Node.swift b/Node/Sources/Node/Node.swift index 1eb0d9d0..ff1532ef 100644 --- a/Node/Sources/Node/Node.swift +++ b/Node/Sources/Node/Node.swift @@ -19,12 +19,17 @@ public class Node { public let blockchain: Blockchain public let rpcServer: Server public let timeProvider: TimeProvider + public let dataProvider: BlockchainDataProvider - public init(genesis: Genesis, config: Config, eventBus: EventBus) async throws { + public init( + config: Config, + genesis: Genesis, + eventBus: EventBus + ) async throws { logger.debug("Initializing node") let (genesisState, protocolConfig) = try await genesis.load() - let dataProvider = await InMemoryDataProvider(genesis: genesisState) + dataProvider = try await BlockchainDataProvider(InMemoryDataProvider(genesis: genesisState)) timeProvider = SystemTimeProvider(slotPeriodSeconds: UInt32(protocolConfig.value.slotPeriodSeconds)) blockchain = try await Blockchain( config: protocolConfig, diff --git a/Node/Sources/Node/ValidatorNode.swift b/Node/Sources/Node/ValidatorNode.swift index 37c7cdff..7307a55f 100644 --- a/Node/Sources/Node/ValidatorNode.swift +++ b/Node/Sources/Node/ValidatorNode.swift @@ -4,22 +4,20 @@ import TracingUtils import Utils public class ValidatorNode: Node { - private var validator: Validator! + private var validator: ValidatorService! public required init( genesis: Genesis, config: Config, eventBus: EventBus, keystore: KeyStore ) async throws { - try await super.init(genesis: genesis, config: config, eventBus: eventBus) + try await super.init(config: config, genesis: genesis, eventBus: eventBus) - let scheduler = DispatchQueueScheduler( - timeProvider: timeProvider, - queue: DispatchQueue(label: "boka.validator.scheduler", attributes: .concurrent) - ) - validator = await Validator( + let scheduler = DispatchQueueScheduler(timeProvider: timeProvider) + validator = await ValidatorService( blockchain: blockchain, keystore: keystore, eventBus: eventBus, - scheduler: scheduler + scheduler: scheduler, + dataProvider: dataProvider ) let genesisState = try await blockchain.getState(hash: Data32()) diff --git a/PolkaVM/Package.resolved b/PolkaVM/Package.resolved index ad0eba61..12ae8fdb 100644 --- a/PolkaVM/Package.resolved +++ b/PolkaVM/Package.resolved @@ -1,5 +1,5 @@ { - "originHash" : "6d0c24ff032ea00dd46c16efbe5df39e0991db5de764148317e7f8327a3e3a9e", + "originHash" : "9e424d7bcd3f17dac238113a9cadb3e3629fd0d79c9ae55474a71e3340fff9e8", "pins" : [ { "identity" : "blake2.swift", @@ -55,6 +55,15 @@ "version" : "2.5.0" } }, + { + "identity" : "swift-numerics", + "kind" : "remoteSourceControl", + "location" : "https://github.com/apple/swift-numerics", + "state" : { + "branch" : "main", + "revision" : "e30276bff2ff5ed80566fbdca49f50aa160b0e83" + } + }, { "identity" : "swift-service-context", "kind" : "remoteSourceControl", diff --git a/PolkaVM/Sources/PolkaVM/Engine.swift b/PolkaVM/Sources/PolkaVM/Engine.swift index eb850fb8..fe3e65ee 100644 --- a/PolkaVM/Sources/PolkaVM/Engine.swift +++ b/PolkaVM/Sources/PolkaVM/Engine.swift @@ -1,5 +1,6 @@ import Foundation import TracingUtils +import Utils private let logger = Logger(label: "Engine") @@ -15,7 +16,7 @@ public class Engine { public func execute(program: ProgramCode, state: VMState) -> ExitReason { let context = ExecutionContext(state: state, config: config) while true { - guard state.getGas() > 0 else { + guard state.getGas() > GasInt(0) else { return .outOfGas } if case let .exit(reason) = step(program: program, context: context) { diff --git a/PolkaVM/Sources/PolkaVM/Instruction.swift b/PolkaVM/Sources/PolkaVM/Instruction.swift index 84f9bdab..64623f9b 100644 --- a/PolkaVM/Sources/PolkaVM/Instruction.swift +++ b/PolkaVM/Sources/PolkaVM/Instruction.swift @@ -1,5 +1,6 @@ import Foundation import TracingUtils +import Utils private let logger = Logger(label: "Instruction") @@ -8,7 +9,7 @@ public protocol Instruction { init(data: Data) throws - func gasCost() -> UInt64 + func gasCost() -> Gas func updatePC(context: ExecutionContext, skip: UInt32) -> ExecOutcome // protected method @@ -48,8 +49,8 @@ extension Instruction { } } - public func gasCost() -> UInt64 { - 1 + public func gasCost() -> Gas { + Gas(1) } public func updatePC(context: ExecutionContext, skip: UInt32) -> ExecOutcome { diff --git a/PolkaVM/Sources/PolkaVM/VMState.swift b/PolkaVM/Sources/PolkaVM/VMState.swift index bc3e86ef..6d529f73 100644 --- a/PolkaVM/Sources/PolkaVM/VMState.swift +++ b/PolkaVM/Sources/PolkaVM/VMState.swift @@ -1,4 +1,5 @@ import Foundation +import Utils public class VMState { public let program: ProgramCode @@ -6,32 +7,32 @@ public class VMState { public private(set) var pc: UInt32 private var registers: Registers - private var gas: Int64 + private var gas: GasInt private var memory: Memory - public init(program: ProgramCode, pc: UInt32, registers: Registers, gas: UInt64, memory: Memory) { + public init(program: ProgramCode, pc: UInt32, registers: Registers, gas: Gas, memory: Memory) { self.program = program self.pc = pc self.registers = registers - self.gas = Int64(gas) + self.gas = GasInt(gas) self.memory = memory } /// Initialize from a standard program blob - public init(standardProgramBlob blob: Data, pc: UInt32, gas: UInt64, argumentData: Data?) throws { + public init(standardProgramBlob blob: Data, pc: UInt32, gas: Gas, argumentData: Data?) throws { let program = try StandardProgram(blob: blob, argumentData: argumentData) self.program = program.code registers = program.initialRegisters memory = program.initialMemory self.pc = pc - self.gas = Int64(gas) + self.gas = GasInt(gas) } public func getRegisters() -> Registers { registers } - public func getGas() -> Int64 { + public func getGas() -> GasInt { gas } @@ -67,9 +68,8 @@ public class VMState { try memory.sbrk(increment) } - public func consumeGas(_ amount: UInt64) { - // TODO: use saturating subtraction - gas -= Int64(amount) + public func consumeGas(_ amount: Gas) { + gas -= GasInt(amount) } public func increasePC(_ amount: UInt32) { diff --git a/PolkaVM/Sources/PolkaVM/invokePVM.swift b/PolkaVM/Sources/PolkaVM/invokePVM.swift index e9028356..7b56ff86 100644 --- a/PolkaVM/Sources/PolkaVM/invokePVM.swift +++ b/PolkaVM/Sources/PolkaVM/invokePVM.swift @@ -1,12 +1,18 @@ import Foundation import TracingUtils +import Utils private let logger = Logger(label: "invokePVM") /// common PVM program-argument invocation function -public func invokePVM(config: PvmConfig, blob: Data, pc: UInt32, gas: UInt64, argumentData: Data?, - ctx: any InvocationContext) -> (ExitReason, VMState?, UInt64?, Data?) -{ +public func invokePVM( + config: PvmConfig, + blob: Data, + pc: UInt32, + gas: Gas, + argumentData: Data?, + ctx: any InvocationContext +) -> (ExitReason, VMState?, Gas?, Data?) { do { let state = try VMState(standardProgramBlob: blob, pc: pc, gas: gas, argumentData: argumentData) let engine = Engine(config: config, invocationContext: ctx) @@ -16,10 +22,9 @@ public func invokePVM(config: PvmConfig, blob: Data, pc: UInt32, gas: UInt64, ar case .outOfGas: return (.outOfGas, state, nil, nil) case .halt: - let (reg10, reg11) = state.readRegister(Registers.Index(raw: 10), Registers.Index(raw: 11)) - // TODO: check if this is correct - let output = try? state.readMemory(address: reg10, length: Int(reg11 - reg10)) - return (.halt, state, UInt64(state.getGas()), output ?? Data()) + let (addr, len) = state.readRegister(Registers.Index(raw: 10), Registers.Index(raw: 11)) + let output = try? state.readMemory(address: addr, length: Int(len)) + return (.halt, state, Gas(state.getGas()), output ?? Data()) default: return (.panic(.trap), state, nil, nil) } diff --git a/README.md b/README.md index 50bd3eb2..ed0f9618 100644 --- a/README.md +++ b/README.md @@ -4,9 +4,9 @@ JAM built with Swift ## Development Environment -- SwiftLint: `brew install swiftlint` -- SwiftFormat: `brew install swiftformat` -- CMake (required by msquic): `brew install cmake` +- Install tools and eps + - macos: `brwe install swiftlint swiftformat rocksdb` + - linux: `apt-get install librocksdb-dev libzstd-dev libbz2-dev liblz4-dev` - Precommit hooks: `make githooks` - Pull submodules: `git submodule update --init --recursive` - Setup deps: `make deps` diff --git a/RPC/Package.resolved b/RPC/Package.resolved index 62c82c81..cb3923e3 100644 --- a/RPC/Package.resolved +++ b/RPC/Package.resolved @@ -175,10 +175,10 @@ { "identity" : "swift-numerics", "kind" : "remoteSourceControl", - "location" : "https://github.com/apple/swift-numerics.git", + "location" : "https://github.com/apple/swift-numerics", "state" : { - "revision" : "0a5bc04095a675662cf24757cc0640aa2204253b", - "version" : "1.0.2" + "branch" : "main", + "revision" : "e30276bff2ff5ed80566fbdca49f50aa160b0e83" } }, { diff --git a/RPC/Sources/RPC/DataSource/Blockchain+DataSource.swift b/RPC/Sources/RPC/DataSource/Blockchain+DataSource.swift index 77df1cd1..53f85ad1 100644 --- a/RPC/Sources/RPC/DataSource/Blockchain+DataSource.swift +++ b/RPC/Sources/RPC/DataSource/Blockchain+DataSource.swift @@ -1,4 +1,16 @@ import Blockchain import Utils -extension Blockchain: DataSource {} +extension Blockchain: DataSource { + public func getBestBlock() async throws -> BlockRef { + try await dataProvider.getBlock(hash: dataProvider.bestHead) + } + + public func getBlock(hash: Data32) async throws -> BlockRef? { + try await dataProvider.getBlock(hash: hash) + } + + public func getState(hash: Data32) async throws -> StateRef? { + try await dataProvider.getState(hash: hash) + } +} diff --git a/Utils/Package.resolved b/Utils/Package.resolved index ae3aaf18..75638cbe 100644 --- a/Utils/Package.resolved +++ b/Utils/Package.resolved @@ -1,5 +1,5 @@ { - "originHash" : "c2eb25061f92db026235ca72d3db8b7b3e6923914ef9705646c538bf0a85b3c6", + "originHash" : "d6c46e06c62a99990ec7081e211b89a840b494c327f7339c2580791428261b14", "pins" : [ { "identity" : "blake2.swift", @@ -55,6 +55,15 @@ "version" : "2.5.0" } }, + { + "identity" : "swift-numerics", + "kind" : "remoteSourceControl", + "location" : "https://github.com/apple/swift-numerics", + "state" : { + "branch" : "main", + "revision" : "e30276bff2ff5ed80566fbdca49f50aa160b0e83" + } + }, { "identity" : "swift-service-context", "kind" : "remoteSourceControl", diff --git a/Utils/Package.swift b/Utils/Package.swift index 64169362..795f590a 100644 --- a/Utils/Package.swift +++ b/Utils/Package.swift @@ -22,6 +22,7 @@ let package = Package( .package(url: "https://github.com/apple/swift-crypto.git", "1.0.0" ..< "4.0.0"), .package(url: "https://github.com/apple/swift-testing.git", branch: "0.10.0"), .package(url: "https://github.com/apple/swift-atomics.git", from: "1.2.0"), + .package(url: "https://github.com/apple/swift-numerics.git", branch: "main"), ], targets: [ // Targets are the basic building blocks of a package, defining a module or a test suite. @@ -34,6 +35,7 @@ let package = Package( .product(name: "Blake2", package: "Blake2.swift"), .product(name: "Crypto", package: "swift-crypto"), .product(name: "Atomics", package: "swift-atomics"), + .product(name: "Numerics", package: "swift-numerics"), "blst", "bandersnatch_vrfs", "erasure_coding", diff --git a/Utils/Sources/Utils/AtomicArray.swift b/Utils/Sources/Utils/AtomicArray.swift index e6dff5ff..6b6e3a48 100644 --- a/Utils/Sources/Utils/AtomicArray.swift +++ b/Utils/Sources/Utils/AtomicArray.swift @@ -26,129 +26,127 @@ public struct AtomicArray: RangeReplaceableCollection { } public func index(after i: Int) -> Int { - return array.index(after: i) + array.index(after: i) } - fileprivate func _read(_ block: () throws -> R) rethrows -> R { - var result: R! + fileprivate func read(_ block: () throws -> R) rethrows -> R { try queue.sync { - result = try block() + try block() } - return result } - fileprivate func _write(_ block: () throws -> R) rethrows -> R { - var result: R! + fileprivate func write(_ block: () throws -> R) rethrows -> R { try queue.sync(flags: .barrier) { - result = try block() + try block() } - return result } public mutating func append(_ newElement: AtomicArray.Element) { - _write { + write { array.append(newElement) } } public mutating func append(contentsOf newElements: S) where S: Sequence, AtomicArray.Element == S.Element { - _write { + write { array.append(contentsOf: newElements) } } public func filter(_ isIncluded: (AtomicArray.Element) throws -> Bool) rethrows -> AtomicArray { - return try _read { + try read { let subArray = try array.filter(isIncluded) return AtomicArray(subArray) } } public mutating func insert(_ newElement: AtomicArray.Element, at i: AtomicArray.Index) { - _write { + write { array.insert(newElement, at: i) } } - public mutating func insert(contentsOf newElements: S, at i: AtomicArray.Index) where S: Collection, AtomicArray.Element == S.Element { - _write { + public mutating func insert(contentsOf newElements: S, at i: AtomicArray.Index) where S: Collection, + AtomicArray.Element == S.Element + { + write { array.insert(contentsOf: newElements, at: i) } } @discardableResult public mutating func popLast() -> AtomicArray.Element? { - return _write { + write { array.popLast() } } @discardableResult public mutating func remove(at i: AtomicArray.Index) -> AtomicArray.Element { - return _write { + write { array.remove(at: i) } } public mutating func removeAll() { - _write { + write { array.removeAll() } } public mutating func removeAll(keepingCapacity keepCapacity: Bool) { - _write { + write { array.removeAll(keepingCapacity: keepCapacity) } } public mutating func removeAll(where shouldBeRemoved: (AtomicArray.Element) throws -> Bool) rethrows { - try _write { + try write { try array.removeAll(where: shouldBeRemoved) } } @discardableResult public mutating func removeFirst() -> AtomicArray.Element { - return _write { + write { array.removeFirst() } } public mutating func removeFirst(_ k: Int) { - _write { + write { array.removeFirst(k) } } @discardableResult public mutating func removeLast() -> AtomicArray.Element { - return _write { + write { array.removeLast() } } public mutating func removeLast(_ k: Int) { - _write { + write { array.removeLast(k) } } public func forEach(_ body: (Element) throws -> Void) rethrows { - try _read { + try read { try array.forEach(body) } } public mutating func removeFirstIfExist(where shouldBeRemoved: (AtomicArray.Element) throws -> Bool) { - _write { + write { guard let index = try? array.firstIndex(where: shouldBeRemoved) else { return } array.remove(at: index) } } public mutating func removeSubrange(_ bounds: Range) { - _write { + write { array.removeSubrange(bounds) } } @@ -156,98 +154,98 @@ public struct AtomicArray: RangeReplaceableCollection { public mutating func replaceSubrange(_ subrange: R, with newElements: C) where C: Collection, R: RangeExpression, T == C.Element, AtomicArray.Index == R.Bound { - _write { + write { array.replaceSubrange(subrange, with: newElements) } } public mutating func reserveCapacity(_ n: Int) { - _write { + write { array.reserveCapacity(n) } } public var count: Int { - return _read { + read { array.count } } public var isEmpty: Bool { - return _read { + read { array.isEmpty } } public var first: AtomicArray.Element? { - return _read { + read { array.first } } public func getArray() -> [T] { - return _read { + read { array } } public mutating func setArray(_ newArray: [T]) { - _write { + write { array = newArray } } public mutating func performRead(_ closure: ([T]) -> Void) { - _read { + read { closure(array) } } public mutating func performWrite(_ closure: ([T]) -> ([T])) { - _write { + write { array = closure(array) } } public subscript(bounds: Range) -> AtomicArray.SubSequence { - return _read { + read { AtomicArray(array[bounds]) } } public subscript(bounds: AtomicArray.Index) -> AtomicArray.Element { get { - return _read { + read { array[bounds] } } set(value) { - _write { + write { array[bounds] = value } } } public static func + (lhs: Other, rhs: AtomicArray) -> AtomicArray where Other: Sequence, AtomicArray.Element == Other.Element { - return AtomicArray(lhs + rhs.getArray()) + AtomicArray(lhs + rhs.getArray()) } public static func + (lhs: AtomicArray, rhs: Other) -> AtomicArray where Other: Sequence, AtomicArray.Element == Other.Element { - return AtomicArray(lhs.getArray() + rhs) + AtomicArray(lhs.getArray() + rhs) } public static func + (lhs: AtomicArray, rhs: Other) -> AtomicArray where Other: RangeReplaceableCollection, AtomicArray.Element == Other.Element { - return AtomicArray(lhs.getArray() + rhs) + AtomicArray(lhs.getArray() + rhs) } public static func + (lhs: AtomicArray, rhs: AtomicArray) -> AtomicArray { - return AtomicArray(lhs.getArray() + rhs.getArray()) + AtomicArray(lhs.getArray() + rhs.getArray()) } public static func += (lhs: inout AtomicArray, rhs: Other) where Other: Sequence, AtomicArray.Element == Other.Element { - lhs._write { + lhs.write { lhs.array += rhs } } @@ -255,7 +253,7 @@ public struct AtomicArray: RangeReplaceableCollection { extension AtomicArray: CustomStringConvertible { public var description: String { - return _read { + read { "\(array)" } } @@ -263,19 +261,19 @@ extension AtomicArray: CustomStringConvertible { extension AtomicArray where Element: Equatable { public func split(separator: Element, maxSplits: Int, omittingEmptySubsequences: Bool) -> [ArraySlice] { - return _read { + read { array.split(separator: separator, maxSplits: maxSplits, omittingEmptySubsequences: omittingEmptySubsequences) } } public func firstIndex(of element: Element) -> Int? { - return _read { + read { array.firstIndex(of: element) } } public func lastIndex(of element: Element) -> Int? { - return _read { + read { array.lastIndex(of: element) } } @@ -283,34 +281,36 @@ extension AtomicArray where Element: Equatable { public func starts(with possiblePrefix: PossiblePrefix) -> Bool where PossiblePrefix: Sequence, Element == PossiblePrefix.Element { - return _read { + read { array.starts(with: possiblePrefix) } } - public func elementsEqual(_ other: OtherSequence) -> Bool where OtherSequence: Sequence, Element == OtherSequence.Element { - return _read { + public func elementsEqual(_ other: OtherSequence) -> Bool where OtherSequence: Sequence, + Element == OtherSequence.Element + { + read { array.elementsEqual(other) } } public func contains(_ element: Element) -> Bool { - return _read { + read { array.contains(element) } } public static func != (lhs: AtomicArray, rhs: AtomicArray) -> Bool { - return lhs._read { - rhs._read { + lhs.read { + rhs.read { lhs.array != rhs.array } } } public static func == (lhs: AtomicArray, rhs: AtomicArray) -> Bool { - return lhs._read { - rhs._read { + lhs.read { + rhs.read { lhs.array == rhs.array } } diff --git a/Utils/Sources/Utils/AtomicDictionary.swift b/Utils/Sources/Utils/AtomicDictionary.swift index c3e4c2d6..5bb4c4d5 100644 --- a/Utils/Sources/Utils/AtomicDictionary.swift +++ b/Utils/Sources/Utils/AtomicDictionary.swift @@ -12,129 +12,125 @@ public struct AtomicDictionary { dictionary = elements } - private func _read(_ block: () throws -> R) rethrows -> R { - var result: R! + private func read(_ block: () throws -> R) rethrows -> R { try queue.sync { - result = try block() + try block() } - return result } - private func _write(_ block: () throws -> R) rethrows -> R { - var result: R! + private func write(_ block: () throws -> R) rethrows -> R { try queue.sync(flags: .barrier) { - result = try block() + try block() } - return result } public subscript(key: Key) -> Value? { get { - return _read { + read { dictionary[key] } } set { - _write { + write { dictionary[key] = newValue } } } public mutating func set(value: Value, forKey key: Key) { - _write { + write { dictionary[key] = value } } public func value(forKey key: Key) -> Value? { - return _read { + read { dictionary[key] } } public var count: Int { - return _read { + read { dictionary.count } } public var isEmpty: Bool { - return _read { + read { dictionary.isEmpty } } public var keys: [Key] { - return _read { + read { Array(dictionary.keys) } } public var values: [Value] { - return _read { + read { Array(dictionary.values) } } public func contains(key: Key) -> Bool { - return _read { + read { dictionary.keys.contains(key) } } public mutating func removeValue(forKey key: Key) -> Value? { - return _write { + write { dictionary.removeValue(forKey: key) } } public mutating func removeAll() { - _write { + write { dictionary.removeAll() } } public mutating func updateValue(_ value: Value, forKey key: Key) -> Value? { - return _write { + write { dictionary.updateValue(value, forKey: key) } } public func forEach(_ body: ((key: Key, value: Value)) throws -> Void) rethrows { - try _read { + try read { try dictionary.forEach(body) } } public func filter(_ isIncluded: ((key: Key, value: Value)) throws -> Bool) rethrows -> AtomicDictionary { - return try _read { + try read { let filtered = try dictionary.filter(isIncluded) return AtomicDictionary(filtered) } } public mutating func merge(_ other: [Key: Value], uniquingKeysWith combine: (Value, Value) throws -> Value) rethrows { - try _write { + try write { try dictionary.merge(other, uniquingKeysWith: combine) } } public mutating func merge(_ other: AtomicDictionary, uniquingKeysWith combine: (Value, Value) throws -> Value) rethrows { - try _write { + try write { try dictionary.merge(other.dictionary, uniquingKeysWith: combine) } } public func mapValues(_ transform: (Value) throws -> T) rethrows -> AtomicDictionary { - return try _read { + try read { let mapped = try dictionary.mapValues(transform) return AtomicDictionary(mapped) } } public func compactMapValues(_ transform: (Value) throws -> T?) rethrows -> AtomicDictionary { - return try _read { + try read { let compactMapped = try dictionary.compactMapValues(transform) return AtomicDictionary(compactMapped) } @@ -144,8 +140,8 @@ public struct AtomicDictionary { // Equatable conformance extension AtomicDictionary: Equatable where Value: Equatable { public static func == (lhs: AtomicDictionary, rhs: AtomicDictionary) -> Bool { - return lhs._read { - rhs._read { + lhs.read { + rhs.read { lhs.dictionary == rhs.dictionary } } diff --git a/Utils/Sources/Utils/Crypto/Bandersnatch.swift b/Utils/Sources/Utils/Crypto/Bandersnatch.swift index 20443301..0cc18fe2 100644 --- a/Utils/Sources/Utils/Crypto/Bandersnatch.swift +++ b/Utils/Sources/Utils/Crypto/Bandersnatch.swift @@ -1,5 +1,8 @@ import bandersnatch_vrfs import Foundation +import TracingUtils + +private let logger = Logger(label: "Bandersnatch") private func _call( data: [Data], @@ -115,6 +118,8 @@ public enum Bandersnatch: KeyType { /// /// Used for ticket claiming during block production. public func ietfVRFSign(vrfInputData: Data, auxData: Data = Data()) throws -> Data96 { + logger.trace("ietfVRFSign", metadata: ["vrfInputData": "\(vrfInputData.toHexString())", "auxData": "\(auxData.toHexString())"]) + var output = Data(repeating: 0, count: 96) try call(vrfInputData, auxData, out: &output) { ptrs, out_buf in @@ -135,6 +140,8 @@ public enum Bandersnatch: KeyType { } public func getOutput(vrfInputData: Data) throws -> Data32 { + logger.trace("getOutput", metadata: ["vrfInputData": "\(vrfInputData.toHexString())"]) + var output = Data(repeating: 0, count: 32) try call(vrfInputData, out: &output) { ptrs, out_buf in @@ -221,6 +228,15 @@ public enum Bandersnatch: KeyType { public func ietfVRFVerify( vrfInputData: Data, auxData: Data = Data(), signature: Data96 ) throws(Error) -> Data32 { + logger.trace( + "ietfVRFVerify", + metadata: [ + "vrfInputData": "\(vrfInputData.toHexString())", + "auxData": "\(auxData.toHexString())", + "signature": "\(signature.data.toHexString())", + ] + ) + var output = Data(repeating: 0, count: 32) try call(vrfInputData, auxData, signature.data, out: &output) { ptrs, out_buf in @@ -263,23 +279,25 @@ public enum Bandersnatch: KeyType { public final class Prover { private let secret: SecretKey - private let ring: [PublicKey] + private let ring: [PublicKey?] private let ringPtrs: [OpaquePointer?] private let proverIdx: UInt private let ctx: RingContext - public init(sercret: SecretKey, ring: [PublicKey], proverIdx: UInt, ctx: RingContext) { + public init(sercret: SecretKey, ring: [PublicKey?], proverIdx: UInt, ctx: RingContext) { secret = sercret self.ring = ring self.proverIdx = proverIdx self.ctx = ctx - ringPtrs = ring.map(\.ptr) + ringPtrs = ring.map { $0?.ptr } } /// Anonymous VRF signature. /// /// Used for tickets submission. public func ringVRFSign(vrfInputData: Data, auxData: Data = Data()) throws(Error) -> Data784 { + logger.trace("ringVRFSign", metadata: ["vrfInputData": "\(vrfInputData.toHexString())", "auxData": "\(auxData.toHexString())"]) + var output = Data(repeating: 0, count: 784) try call(vrfInputData, auxData, out: &output) { ptrs, out_buf in @@ -310,8 +328,8 @@ public enum Bandersnatch: KeyType { fileprivate let ptr: OpaquePointer public let data: Data144 - public init(ring: [PublicKey], ctx: RingContext) throws(Error) { - let ringPtrs = ring.map { $0.ptr as OpaquePointer? } + public init(ring: [PublicKey?], ctx: RingContext) throws(Error) { + let ringPtrs = ring.map { $0?.ptr as OpaquePointer? } var ptr: OpaquePointer! try call { _ in @@ -370,6 +388,15 @@ public enum Bandersnatch: KeyType { /// /// On success returns the VRF output hash. public func ringVRFVerify(vrfInputData: Data, auxData: Data = Data(), signature: Data784) throws(Error) -> Data32 { + logger.trace( + "ringVRFVerify", + metadata: [ + "vrfInputData": "\(vrfInputData.toHexString())", + "auxData": "\(auxData.toHexString())", + "signature": "\(signature.data.toHexString())", + ] + ) + var output = Data(repeating: 0, count: 32) try call(vrfInputData, auxData, signature.data, out: &output) { ptrs, out_buf in diff --git a/Utils/Sources/Utils/EventBus/EventBus.swift b/Utils/Sources/Utils/EventBus/EventBus.swift index 5154f0fc..ba05d8c5 100644 --- a/Utils/Sources/Utils/EventBus/EventBus.swift +++ b/Utils/Sources/Utils/EventBus/EventBus.swift @@ -66,24 +66,30 @@ public actor EventBus: Sendable { } } - public func publish(_ event: some Event) { + public nonisolated func publish(_ event: some Event) { + Task { + await publish(event) + } + } + + public func publish(_ event: some Event) async { let key = ObjectIdentifier(type(of: event)) - if let eventHandlers = handlers[key] { - let eventMiddleware = eventMiddleware - let handlerMiddleware = handlerMiddleware - Task { - do { - try await eventMiddleware.handle(event) { event in - for handler in eventHandlers { - try await handlerMiddleware.handle(event) { evt in - try await handler.handle(evt) - } - } + let eventHandlers = handlers[key] + let eventMiddleware = eventMiddleware + let handlerMiddleware = handlerMiddleware + do { + try await eventMiddleware.handle(event) { event in + guard let eventHandlers else { + return + } + for handler in eventHandlers { + try await handlerMiddleware.handle(event) { evt in + try await handler.handle(evt) } - } catch { - logger.warning("Unhandled error for event: \(event) with error: \(error)") } } + } catch { + logger.warning("Unhandled error for event: \(event) with error: \(error)") } } } diff --git a/Utils/Sources/Utils/Extensions/Array+Utils.swift b/Utils/Sources/Utils/Extensions/Array+Utils.swift index 7c1d06d9..ffa63ea9 100644 --- a/Utils/Sources/Utils/Extensions/Array+Utils.swift +++ b/Utils/Sources/Utils/Extensions/Array+Utils.swift @@ -49,7 +49,7 @@ extension Array { var iter = randomness.makeIterator() // TODO: confirm this is matching to the defs in GP for i in stride(from: count - 1, through: 1, by: -1) { - let j = Int(iter.next() ?? 0 % UInt32(i + 1)) + let j = Int((iter.next() ?? 0) % UInt32(i + 1)) guard i != j else { continue } diff --git a/Utils/Sources/Utils/FixedSizeData.swift b/Utils/Sources/Utils/FixedSizeData.swift index 9a554366..126c0908 100644 --- a/Utils/Sources/Utils/FixedSizeData.swift +++ b/Utils/Sources/Utils/FixedSizeData.swift @@ -30,13 +30,15 @@ extension FixedSizeData: Codable { } } -extension FixedSizeData: CustomStringConvertible, CustomDebugStringConvertible { +extension FixedSizeData: CustomStringConvertible { public var description: String { - "0x\(data.map { String(format: "%02x", $0) }.joined())" - } - - public var debugDescription: String { - description + if T.value > 32 { + let prefix = data.prefix(8).map { String(format: "%02x", $0) }.joined() + let suffix = data.suffix(8).map { String(format: "%02x", $0) }.joined() + return "0x\(prefix)...\(suffix)" + } else { + return "0x\(data.map { String(format: "%02x", $0) }.joined())" + } } } @@ -82,10 +84,17 @@ extension FixedSizeData: EncodedSize { extension FixedSizeData { public static func random() -> Self { - var data = Data(repeating: 0, count: T.value) + var data = Data(count: T.value) + var generator = SystemRandomNumberGenerator() + data.withUnsafeMutableBytes { ptr in - ptr.baseAddress!.withMemoryRebound(to: UInt8.self, capacity: T.value) { ptr in - arc4random_buf(ptr, T.value) + for i in stride(from: 0, to: T.value, by: 8) { + let randomValue = generator.next() + let bytesToCopy = min(8, T.value - i) + withUnsafeBytes(of: randomValue) { randomBytes in + UnsafeMutableRawBufferPointer(rebasing: ptr[i ..< (i + bytesToCopy)]) + .copyMemory(from: UnsafeRawBufferPointer(rebasing: randomBytes[.. UInt64 } + +public protocol ReadGas { + associatedtype TConfig + static func read(config: TConfig) -> Gas +} diff --git a/Utils/Sources/Utils/Ref.swift b/Utils/Sources/Utils/Ref.swift index ba814840..db2362a9 100644 --- a/Utils/Sources/Utils/Ref.swift +++ b/Utils/Sources/Utils/Ref.swift @@ -6,6 +6,12 @@ open class Ref: @unchecked Sendable, AtomicReference { public required init(_ value: T) { self.value = value } + + public func mutate(fn: (inout T) throws -> Void) rethrows -> Self { + var config = value + try fn(&config) + return Self(config) + } } public final class RefMut { diff --git a/Utils/Sources/Utils/SaturatingNumber.swift b/Utils/Sources/Utils/SaturatingNumber.swift new file mode 100644 index 00000000..8d0e0db8 --- /dev/null +++ b/Utils/Sources/Utils/SaturatingNumber.swift @@ -0,0 +1,147 @@ +import Codec +import Numerics + +public struct SaturatingNumber: Sendable { + public private(set) var value: T + + public static var max: SaturatingNumber { + SaturatingNumber(T.max) + } + + public static var min: SaturatingNumber { + SaturatingNumber(T.min) + } + + public init(_ value: T) { + self.value = value + } + + // Initializer for converting from other integer types to `T` with saturation + public init(_ value: some FixedWidthInteger & BinaryInteger) { + self.value = T(clamping: value) + } + + public static func + (lhs: SaturatingNumber, rhs: SaturatingNumber) -> SaturatingNumber { + SaturatingNumber(lhs.value.addingWithSaturation(rhs.value)) + } + + public static func - (lhs: SaturatingNumber, rhs: SaturatingNumber) -> SaturatingNumber { + SaturatingNumber(lhs.value.subtractingWithSaturation(rhs.value)) + } + + public static func * (lhs: SaturatingNumber, rhs: SaturatingNumber) -> SaturatingNumber { + SaturatingNumber(lhs.value.multipliedWithSaturation(by: rhs.value)) + } + + public static func / (lhs: SaturatingNumber, rhs: SaturatingNumber) -> SaturatingNumber { + SaturatingNumber(lhs.value / rhs.value) + } + + public static func % (lhs: SaturatingNumber, rhs: SaturatingNumber) -> SaturatingNumber { + SaturatingNumber(lhs.value % rhs.value) + } + + public static prefix func - (lhs: SaturatingNumber) -> SaturatingNumber { + SaturatingNumber(lhs.value.negatedWithSaturation()) + } + + public static func += (lhs: inout SaturatingNumber, rhs: SaturatingNumber) { + lhs.value = lhs.value.addingWithSaturation(rhs.value) + } + + public static func -= (lhs: inout SaturatingNumber, rhs: SaturatingNumber) { + lhs.value = lhs.value.subtractingWithSaturation(rhs.value) + } + + public static func *= (lhs: inout SaturatingNumber, rhs: SaturatingNumber) { + lhs.value = lhs.value.multipliedWithSaturation(by: rhs.value) + } + + public static func /= (lhs: inout SaturatingNumber, rhs: SaturatingNumber) { + lhs.value = lhs.value / rhs.value + } + + public static func %= (lhs: inout SaturatingNumber, rhs: SaturatingNumber) { + lhs.value = lhs.value % rhs.value + } + + // With other types + + public static func + (lhs: SaturatingNumber, rhs: T) -> SaturatingNumber { + SaturatingNumber(lhs.value.addingWithSaturation(rhs)) + } + + public static func - (lhs: SaturatingNumber, rhs: T) -> SaturatingNumber { + SaturatingNumber(lhs.value.subtractingWithSaturation(rhs)) + } + + public static func * (lhs: SaturatingNumber, rhs: T) -> SaturatingNumber { + SaturatingNumber(lhs.value.multipliedWithSaturation(by: rhs)) + } + + public static func / (lhs: SaturatingNumber, rhs: T) -> SaturatingNumber { + SaturatingNumber(lhs.value / rhs) + } + + public static func % (lhs: SaturatingNumber, rhs: T) -> SaturatingNumber { + SaturatingNumber(lhs.value % rhs) + } + + public static func += (lhs: inout SaturatingNumber, rhs: T) { + lhs.value = lhs.value.addingWithSaturation(rhs) + } + + public static func -= (lhs: inout SaturatingNumber, rhs: T) { + lhs.value = lhs.value.subtractingWithSaturation(rhs) + } + + public static func *= (lhs: inout SaturatingNumber, rhs: T) { + lhs.value = lhs.value.multipliedWithSaturation(by: rhs) + } + + public static func /= (lhs: inout SaturatingNumber, rhs: T) { + lhs.value = lhs.value / rhs + } + + public static func %= (lhs: inout SaturatingNumber, rhs: T) { + lhs.value = lhs.value % rhs + } +} + +extension SaturatingNumber: Comparable, Equatable { + public static func < (lhs: SaturatingNumber, rhs: SaturatingNumber) -> Bool { + lhs.value < rhs.value + } + + public static func == (lhs: SaturatingNumber, rhs: SaturatingNumber) -> Bool { + lhs.value == rhs.value + } +} + +extension SaturatingNumber: CustomStringConvertible { + public var description: String { + "\(value)" + } +} + +extension SaturatingNumber: Codable where T: Codable { + public init(from decoder: Decoder) throws { + let container = try decoder.singleValueContainer() + value = try container.decode(T.self) + } + + public func encode(to encoder: Encoder) throws { + var container = encoder.singleValueContainer() + try container.encode(value) + } +} + +extension SaturatingNumber: EncodedSize { + public var encodedSize: Int { + MemoryLayout.size + } + + public static var encodeedSizeHint: Int? { + MemoryLayout.size + } +} diff --git a/Utils/Sources/Utils/SortedArray.swift b/Utils/Sources/Utils/SortedContainer/SortedArray.swift similarity index 57% rename from Utils/Sources/Utils/SortedArray.swift rename to Utils/Sources/Utils/SortedContainer/SortedArray.swift index 3e36a1ea..e053d0f0 100644 --- a/Utils/Sources/Utils/SortedArray.swift +++ b/Utils/Sources/Utils/SortedContainer/SortedArray.swift @@ -1,20 +1,17 @@ -public enum SortedArrayError: Swift.Error { - case invalidData -} - -public struct SortedArray { +// TODO: add tests +public struct SortedArray: SortedContainer { public private(set) var array: [T] - public init(unsorted: [T]) { + public init(_ unsorted: [T]) { array = unsorted array.sort() } - public init(sorted: [T]) throws(SortedArrayError) { + public init(sorted: [T]) throws(SortedContainerError) { array = sorted guard array.isSorted() else { - throw SortedArrayError.invalidData + throw SortedContainerError.invalidData } } @@ -22,32 +19,11 @@ public struct SortedArray { array = sortedUnchecked } - /// Use binary search to find the index of the first element equal to or greater than the given element. - public func insertIndex(_ element: T, begin: Int = 0, end: Int? = nil) -> Int { - var low = begin - var high = end ?? array.count - while low < high { - let mid = (low + high) / 2 - if array[mid] < element { - low = mid + 1 - } else { - high = mid - } - } - return low - } - public mutating func insert(_ element: T) { array.insert(element, at: insertIndex(element)) } - public mutating func append(contentsOf newElements: some Collection) { - for element in newElements { - insert(element) - } - } - - public mutating func append(contentsOf other: SortedArray) { + public mutating func append(contentsOf other: some SortedContainer) { var begin = 0 for element in other.array { let idx = insertIndex(element, begin: begin) @@ -67,10 +43,6 @@ public struct SortedArray { public mutating func remove(where predicate: (T) throws -> Bool) rethrows { try array.removeAll(where: predicate) } - - public var count: Int { - array.count - } } extension SortedArray: Encodable where T: Encodable { diff --git a/Utils/Sources/Utils/SortedContainer/SortedContainer.swift b/Utils/Sources/Utils/SortedContainer/SortedContainer.swift new file mode 100644 index 00000000..8a6a61f5 --- /dev/null +++ b/Utils/Sources/Utils/SortedContainer/SortedContainer.swift @@ -0,0 +1,43 @@ +public enum SortedContainerError: Swift.Error { + case invalidData +} + +public protocol SortedContainer: Equatable { + associatedtype T: Comparable + + var array: [T] { get } + + mutating func insert(_ element: T) +} + +extension SortedContainer { + /// Use binary search to find the index of the first element equal to or greater than the given element. + public func insertIndex(_ element: T, begin: Int = 0, end: Int? = nil) -> Int { + var low = begin + var high = end ?? array.count + while low < high { + let mid = (low + high) / 2 + if array[mid] < element { + low = mid + 1 + } else { + high = mid + } + } + return low + } + + public func contains(_ element: T) -> Bool { + let idx = insertIndex(element) + return idx < array.count && array[idx] == element + } + + public mutating func append(contentsOf newElements: some Collection) { + for element in newElements { + insert(element) + } + } + + public var count: Int { + array.count + } +} diff --git a/Utils/Sources/Utils/SortedContainer/SortedUniqueArray.swift b/Utils/Sources/Utils/SortedContainer/SortedUniqueArray.swift new file mode 100644 index 00000000..3e15cda2 --- /dev/null +++ b/Utils/Sources/Utils/SortedContainer/SortedUniqueArray.swift @@ -0,0 +1,73 @@ +// TODO: add tests +public struct SortedUniqueArray: SortedContainer { + public private(set) var array: [T] + + public init(_ unchecked: [T]) { + array = unchecked + array.sort() + + for i in (1 ..< array.count).reversed() where array[i] == array[i - 1] { + array.remove(at: i) + } + } + + public init(sorted: [T]) throws(SortedContainerError) { + array = sorted + + guard array.isSortedAndUnique() else { + throw SortedContainerError.invalidData + } + } + + public init(sortedUnchecked: [T] = []) { + array = sortedUnchecked + } + + public mutating func insert(_ element: T) { + let index = insertIndex(element) + if index < array.count, array[index] == element { + return + } + array.insert(element, at: index) + } + + public mutating func append(contentsOf other: some SortedContainer) { + var begin = 0 + for element in other.array { + let idx = insertIndex(element, begin: begin) + if idx > array.count || array[idx] != element { + array.insert(element, at: idx) + } + begin = idx + 1 + } + } + + public mutating func remove(at index: Int) { + array.remove(at: index) + } + + public mutating func removeAll() { + array.removeAll() + } + + public mutating func remove(where predicate: (T) throws -> Bool) rethrows { + try array.removeAll(where: predicate) + } +} + +extension SortedUniqueArray: Encodable where T: Encodable { + public func encode(to encoder: Encoder) throws { + var container = encoder.singleValueContainer() + try container.encode(array) + } +} + +extension SortedUniqueArray: Decodable where T: Decodable { + public init(from decoder: Decoder) throws { + let container = try decoder.singleValueContainer() + let array = try container.decode([T].self) + try self.init(sorted: array) + } +} + +extension SortedUniqueArray: Sendable where T: Sendable {} diff --git a/Utils/Sources/Utils/primitives.swift b/Utils/Sources/Utils/primitives.swift new file mode 100644 index 00000000..62dc7c86 --- /dev/null +++ b/Utils/Sources/Utils/primitives.swift @@ -0,0 +1,15 @@ +public typealias Gas = SaturatingNumber +public typealias GasInt = SaturatingNumber +public typealias Balance = SaturatingNumber + +extension Gas { + public init(_ gasInt: GasInt) { + self = .init(gasInt.value) + } +} + +extension GasInt { + public init(_ gas: Gas) { + self = .init(gas.value) + } +} diff --git a/Utils/Sources/bandersnatch/Cargo.lock b/Utils/Sources/bandersnatch/Cargo.lock index e25781fb..6358e635 100644 --- a/Utils/Sources/bandersnatch/Cargo.lock +++ b/Utils/Sources/bandersnatch/Cargo.lock @@ -14,6 +14,55 @@ dependencies = [ "zerocopy", ] +[[package]] +name = "anstream" +version = "0.6.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "64e15c1ab1f89faffbf04a634d5e1962e9074f2741eef6d97f3c4e322426d526" +dependencies = [ + "anstyle", + "anstyle-parse", + "anstyle-query", + "anstyle-wincon", + "colorchoice", + "is_terminal_polyfill", + "utf8parse", +] + +[[package]] +name = "anstyle" +version = "1.0.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1bec1de6f59aedf83baf9ff929c98f2ad654b97c9510f4e70cf6f661d49fd5b1" + +[[package]] +name = "anstyle-parse" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eb47de1e80c2b463c735db5b217a0ddc39d612e7ac9e2e96a5aed1f57616c1cb" +dependencies = [ + "utf8parse", +] + +[[package]] +name = "anstyle-query" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d36fc52c7f6c869915e99412912f22093507da8d9e942ceaf66fe4b7c14422a" +dependencies = [ + "windows-sys", +] + +[[package]] +name = "anstyle-wincon" +version = "3.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5bf74e1b6e971609db8ca7a9ce79fd5768ab6ae46441c572e46cf596f59e57f8" +dependencies = [ + "anstyle", + "windows-sys", +] + [[package]] name = "ark-bls12-381" version = "0.4.0" @@ -47,7 +96,7 @@ dependencies = [ [[package]] name = "ark-ec-vrfs" version = "0.1.0" -source = "git+https://github.com/davxy/ark-ec-vrfs?rev=8cef209#8cef20972fd808780f0462c28162b3030a195460" +source = "git+https://github.com/davxy/ark-ec-vrfs?rev=ea35e22#ea35e224f79c6f920e3816ae2e3cfdb02e2e8448" dependencies = [ "ark-bls12-381", "ark-ec", @@ -168,10 +217,18 @@ dependencies = [ ] [[package]] -name = "arrayref" -version = "0.3.8" +name = "ark-transcript" +version = "0.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d151e35f61089500b617991b791fc8bfd237ae50cd5950803758a179b41e67a" +checksum = "563084372d89271122bd743ef0a608179726f5fad0566008ba55bd0f756489b8" +dependencies = [ + "ark-ff", + "ark-serialize", + "ark-std", + "digest", + "rand_core", + "sha3", +] [[package]] name = "arrayvec" @@ -179,17 +236,6 @@ version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" -[[package]] -name = "atty" -version = "0.2.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" -dependencies = [ - "hermit-abi", - "libc", - "winapi", -] - [[package]] name = "autocfg" version = "1.3.0" @@ -205,12 +251,6 @@ dependencies = [ "hex-literal", ] -[[package]] -name = "bitflags" -version = "1.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" - [[package]] name = "bitflags" version = "2.6.0" @@ -226,17 +266,6 @@ dependencies = [ "digest", ] -[[package]] -name = "blake2b_simd" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23285ad32269793932e830392f2fe2f83e26488fd3ec778883a93c8323735780" -dependencies = [ - "arrayref", - "arrayvec", - "constant_time_eq", -] - [[package]] name = "block-buffer" version = "0.10.4" @@ -254,9 +283,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "cbindgen" -version = "0.24.5" +version = "0.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b922faaf31122819ec80c4047cc684c6979a087366c069611e33649bf98e18d" +checksum = "3fce8dd7fcfcbf3a0a87d8f515194b49d6135acab73e18bd380d1d93bb1a15eb" dependencies = [ "clap", "heck", @@ -266,7 +295,7 @@ dependencies = [ "quote", "serde", "serde_json", - "syn 1.0.109", + "syn 2.0.72", "tempfile", "toml", ] @@ -279,52 +308,52 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "clap" -version = "3.2.25" +version = "4.5.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7be5744db7978a28d9df86a214130d106a89ce49644cbc4e3f0c22c3fba30615" +dependencies = [ + "clap_builder", +] + +[[package]] +name = "clap_builder" +version = "4.5.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ea181bf566f71cb9a5d17a59e1871af638180a18fb0035c92ae62b705207123" +checksum = "a5fbc17d3ef8278f55b282b2a2e75ae6f6c7d4bb70ed3d0382375104bfafdb4b" dependencies = [ - "atty", - "bitflags 1.3.2", + "anstream", + "anstyle", "clap_lex", - "indexmap", "strsim", - "termcolor", - "textwrap", ] [[package]] name = "clap_lex" -version = "0.2.4" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2850f2f5a82cbf437dd5af4d49848fbdfc27c157c3d010345776f952765261c5" -dependencies = [ - "os_str_bytes", -] +checksum = "1462739cb27611015575c0c11df5df7601141071f07518d56fcc1be504cbec97" + +[[package]] +name = "colorchoice" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3fd119d74b830634cea2a0f58bbd0d54540518a14397557951e79340abc28c0" [[package]] name = "common" version = "0.1.0" -source = "git+https://github.com/davxy/ring-proof?branch=extended#6524d3a17b2ddadcfca2f2934d3aed7bb129fa9f" +source = "git+https://github.com/davxy/ring-proof?rev=86e3ce0#86e3ce052c1598a4c4bfac246fb5af6bfa5b8a94" dependencies = [ "ark-ec", "ark-ff", "ark-poly", "ark-serialize", "ark-std", - "blake2b_simd", "fflonk", "getrandom_or_panic", - "merlin", - "rand_chacha", "rayon", ] -[[package]] -name = "constant_time_eq" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7144d30dcf0fafbce74250a3963025d8d52177934239851c917d29f1df280c2" - [[package]] name = "cpufeatures" version = "0.2.12" @@ -397,6 +426,12 @@ version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0" +[[package]] +name = "equivalent" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" + [[package]] name = "errno" version = "0.3.9" @@ -458,12 +493,6 @@ dependencies = [ "rand_core", ] -[[package]] -name = "hashbrown" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" - [[package]] name = "hashbrown" version = "0.13.2" @@ -474,19 +503,16 @@ dependencies = [ ] [[package]] -name = "heck" -version = "0.4.1" +name = "hashbrown" +version = "0.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" +checksum = "1e087f84d4f86bf4b218b927129862374b72199ae7d8657835f1e89000eea4fb" [[package]] -name = "hermit-abi" -version = "0.1.19" +name = "heck" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" -dependencies = [ - "libc", -] +checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" [[package]] name = "hex-literal" @@ -496,14 +522,20 @@ checksum = "6fe2267d4ed49bc07b63801559be28c718ea06c4738b7a03c94df7386d2cde46" [[package]] name = "indexmap" -version = "1.9.3" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" +checksum = "707907fe3c25f5424cce2cb7e1cbcafee6bdbe735ca90ef77c29e84591e5b9da" dependencies = [ - "autocfg", - "hashbrown 0.12.3", + "equivalent", + "hashbrown 0.15.0", ] +[[package]] +name = "is_terminal_polyfill" +version = "1.70.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7943c866cc5cd64cbc25b2e01621d07fa8eb2a1a23160ee81ce38704e97b8ecf" + [[package]] name = "itertools" version = "0.10.5" @@ -598,12 +630,6 @@ version = "1.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" -[[package]] -name = "os_str_bytes" -version = "6.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2355d85b9a3786f481747ced0e0ff2ba35213a1f9bd406ed906554d7af805a1" - [[package]] name = "paste" version = "1.0.15" @@ -687,18 +713,18 @@ dependencies = [ [[package]] name = "ring" version = "0.1.0" -source = "git+https://github.com/davxy/ring-proof?branch=extended#6524d3a17b2ddadcfca2f2934d3aed7bb129fa9f" +source = "git+https://github.com/davxy/ring-proof?rev=86e3ce0#86e3ce052c1598a4c4bfac246fb5af6bfa5b8a94" dependencies = [ "ark-ec", "ark-ff", "ark-poly", "ark-serialize", "ark-std", + "ark-transcript", "arrayvec", "blake2", "common", "fflonk", - "merlin", "rayon", ] @@ -717,7 +743,7 @@ version = "0.38.34" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "70dc5ec042f7a43c4a73241207cecc9873a06d45debb38b329f8541d85c2730f" dependencies = [ - "bitflags 2.6.0", + "bitflags", "errno", "libc", "linux-raw-sys", @@ -768,6 +794,15 @@ dependencies = [ "serde", ] +[[package]] +name = "serde_spanned" +version = "0.6.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87607cb1398ed59d48732e575a4c28a7a8ebf2454b964fe3f224f2afc07909e1" +dependencies = [ + "serde", +] + [[package]] name = "sha2" version = "0.10.8" @@ -779,11 +814,21 @@ dependencies = [ "digest", ] +[[package]] +name = "sha3" +version = "0.10.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75872d278a8f37ef87fa0ddbda7802605cb18344497949862c0d4dcb291eba60" +dependencies = [ + "digest", + "keccak", +] + [[package]] name = "strsim" -version = "0.10.0" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" +checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" [[package]] name = "subtle" @@ -826,27 +871,37 @@ dependencies = [ ] [[package]] -name = "termcolor" -version = "1.4.1" +name = "toml" +version = "0.8.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06794f8f6c5c898b3275aebefa6b8a1cb24cd2c6c79397ab15774837a0bc5755" +checksum = "a1ed1f98e3fdc28d6d910e6737ae6ab1a93bf1985935a1193e68f93eeb68d24e" dependencies = [ - "winapi-util", + "serde", + "serde_spanned", + "toml_datetime", + "toml_edit", ] [[package]] -name = "textwrap" -version = "0.16.1" +name = "toml_datetime" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23d434d3f8967a09480fb04132ebe0a3e088c173e6d0ee7897abbdf4eab0f8b9" +checksum = "0dd7358ecb8fc2f8d014bf86f6f638ce72ba252a2c3a2572f2a795f1d23efb41" +dependencies = [ + "serde", +] [[package]] -name = "toml" -version = "0.5.11" +name = "toml_edit" +version = "0.22.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4f7f0dd8d50a853a531c426359045b1998f04219d88799810762cd4ad314234" +checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5" dependencies = [ + "indexmap", "serde", + "serde_spanned", + "toml_datetime", + "winnow", ] [[package]] @@ -861,6 +916,12 @@ version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" +[[package]] +name = "utf8parse" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" + [[package]] name = "version_check" version = "0.9.5" @@ -873,37 +934,6 @@ version = "0.11.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" -[[package]] -name = "winapi" -version = "0.3.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" -dependencies = [ - "winapi-i686-pc-windows-gnu", - "winapi-x86_64-pc-windows-gnu", -] - -[[package]] -name = "winapi-i686-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" - -[[package]] -name = "winapi-util" -version = "0.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d4cc384e1e73b93bafa6fb4f1df8c41695c8a91cf9c4c64358067d15a7b6c6b" -dependencies = [ - "windows-sys", -] - -[[package]] -name = "winapi-x86_64-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" - [[package]] name = "windows-sys" version = "0.52.0" @@ -977,6 +1007,15 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" +[[package]] +name = "winnow" +version = "0.6.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "36c1fec1a2bb5866f07c25f68c26e565c4c200aebb96d7e55710c19d3e8ac49b" +dependencies = [ + "memchr", +] + [[package]] name = "zerocopy" version = "0.7.35" diff --git a/Utils/Sources/bandersnatch/Cargo.toml b/Utils/Sources/bandersnatch/Cargo.toml index 597d8b51..f13ad1da 100644 --- a/Utils/Sources/bandersnatch/Cargo.toml +++ b/Utils/Sources/bandersnatch/Cargo.toml @@ -7,14 +7,13 @@ edition = "2021" crate-type = ["staticlib"] [build-dependencies] -cbindgen = "0.24.0" +cbindgen = "0.27.0" [dependencies] -ark-ec-vrfs = { git = "https://github.com/davxy/ark-ec-vrfs", rev = "8cef209", features = [ +ark-ec-vrfs = { git = "https://github.com/davxy/ark-ec-vrfs", rev = "ea35e22", features = [ "bandersnatch", "ring", "parallel", - "test-vectors", ] } [dev-dependencies] diff --git a/Utils/Sources/bandersnatch/src/bandersnatch_vrfs.rs b/Utils/Sources/bandersnatch/src/bandersnatch_vrfs.rs index 5696f754..61bd3a62 100644 --- a/Utils/Sources/bandersnatch/src/bandersnatch_vrfs.rs +++ b/Utils/Sources/bandersnatch/src/bandersnatch_vrfs.rs @@ -64,7 +64,17 @@ pub fn ring_vrf_sign( let output = secret.output(input); // Backend currently requires the wrapped type (plain affine points) - let pts: Vec<_> = unsafe { ring.iter().map(|pk| (*(*pk)).0).collect() }; + let pts: Vec<_> = unsafe { + ring.iter() + .map(|pk| { + if pk.is_null() { + ctx.padding_point() + } else { + (*(*pk)).0 + } + }) + .collect() + }; // Proof construction let prover_key = ctx.prover_key(&pts); diff --git a/Utils/Sources/bandersnatch/src/ffi.rs b/Utils/Sources/bandersnatch/src/ffi.rs index 14003ea5..1432894b 100644 --- a/Utils/Sources/bandersnatch/src/ffi.rs +++ b/Utils/Sources/bandersnatch/src/ffi.rs @@ -245,7 +245,18 @@ pub extern "C" fn ring_commitment_new_from_ring( let ring_slice: &[*const Public] = unsafe { std::slice::from_raw_parts(ring, ring_len) }; let ctx: &RingContext = unsafe { &*ctx }; // Backend currently requires the wrapped type (plain affine points) - let pts: Vec<_> = unsafe { ring_slice.iter().map(|pk| (*(*pk)).0).collect() }; + let pts: Vec<_> = unsafe { + ring_slice + .iter() + .map(|pk| { + if pk.is_null() { + ctx.padding_point() + } else { + (*(*pk)).0 + } + }) + .collect() + }; let verifier_key = ctx.verifier_key(&pts); let commitment = verifier_key.commitment(); unsafe { @@ -389,72 +400,3 @@ pub extern "C" fn verifier_ietf_vrf_verify( Err(_) => 3, } } - -#[cfg(test)] -mod tests { - use super::*; - use hex_literal::hex; - - #[test] - fn test_ring_vrf_sign() { - let ring_data = vec![ - hex!("5e465beb01dbafe160ce8216047f2155dd0569f058afd52dcea601025a8d161d"), - hex!("3d5e5a51aab2b048f8686ecd79712a80e3265a114cc73f14bdb2a59233fb66d0"), - hex!("aa2b95f7572875b0d0f186552ae745ba8222fc0b5bd456554bfe51c68938f8bc"), - hex!("7f6190116d118d643a98878e294ccf62b509e214299931aad8ff9764181a4e33"), - hex!("48e5fcdce10e0b64ec4eebd0d9211c7bac2f27ce54bca6f7776ff6fee86ab3e3"), - hex!("f16e5352840afb47e206b5c89f560f2611835855cf2e6ebad1acc9520a72591d"), - ]; - - let signature = hex!("b342bf8f6fa69c745daad2e99c92929b1da2b840f67e5e8015ac22dd1076343ea95c5bb4b69c197bfdc1b7d2f484fe455fb19bba7e8d17fcaf309ba5814bf54f3a74d75b408da8d3b99bf07f7cde373e4fd757061b1c99e0aac4847f1e393e892b566c14a7f8643a5d976ced0a18d12e32c660d59c66c271332138269cb0fe9c2462d5b3c1a6e9f5ed330ff0d70f64218010ff337b0b69b531f916c67ec564097cd842306df1b4b44534c95ff4efb73b17a14476057fdf8678683b251dc78b0b94712179345c794b6bd99aa54b564933651aee88c93b648e91a613c87bc3f445fff571452241e03e7d03151600a6ee259051a23086b408adec7c112dd94bd8123cf0bed88fddac46b7f891f34c29f13bf883771725aa234d398b13c39fd2a871894f1b1e2dbc7fffbc9c65c49d1e9fd5ee0da133bef363d4ebebe63de2b50328b5d7e020303499d55c07cae617091e33a1ee72ba1b65f940852e93e2905fdf577adcf62be9c74ebda9af59d3f11bece8996773f392a2b35693a45a5a042d88a3dc816b689fe596762d4ea7c6024da713304f56dc928be6e8048c651766952b6c40d0f48afc067ca7cbd77763a2d4f11e88e16033b3343f39bf519fe734db8a139d148ccead4331817d46cf469befa64ae153b5923869144dfa669da36171c20e1f757ed5231fa5a08827d83f7b478ddfb44c9bceb5c6c920b8761ff1e3edb03de48fb55884351f0ac5a7a1805b9b6c49c0529deb97e994deaf2dfd008825e8704cdc04b621f316b505fde26ab71b31af7becbc1154f9979e43e135d35720b93b367bedbe6c6182bb6ed99051f28a3ad6d348ba5b178e3ea0ec0bb4a03fe36604a9eeb609857f8334d3b4b34867361ed2ff9163acd9a27fa20303abe9fc29f2d6c921a8ee779f7f77d940b48bc4fce70a58eed83a206fb7db4c1c7ebe7658603495bb40f6a581dd9e235ba0583165b1569052f8fb4a3e604f2dd74ad84531c6b96723c867b06b6fdd1c4ba150cf9080aa6bbf44cc29041090973d56913b9dc755960371568ef1cf03f127fe8eca209db5d18829f5bfb5826f98833e3f42472b47fad995a9a8bb0e41a1df45ead20285a8"); - - let ring = ring_data - .iter() - .map(|data| { - let mut ptr: *mut Public = std::ptr::null_mut(); - public_new_from_data(data.as_ptr(), data.len(), &mut ptr); - ptr - }) - .collect::>(); - let vrf_input_data = hex!("6a616d5f7469636b65745f7365616cbb30a42c1e62f0afda5f0a4e8a562f7a13a24cea00ee81917b86b89e801314aa01"); - let aux_data = vec![]; - - let mut ctx_ptr: *mut RingContext = std::ptr::null_mut(); - assert_eq!(ring_context_new(6, &mut ctx_ptr), 0); - - let mut commitment_ptr: *mut RingCommitment = std::ptr::null_mut(); - assert_eq!( - ring_commitment_new_from_ring( - ring.as_ptr() as *const *const Public, - ring.len(), - ctx_ptr, - &mut commitment_ptr - ), - 0 - ); - - let mut output = [0u8; 32]; - - assert_eq!( - verifier_ring_vrf_verify( - ctx_ptr, - commitment_ptr, - vrf_input_data.as_ptr(), - vrf_input_data.len(), - aux_data.as_ptr(), - aux_data.len(), - signature.as_ptr(), - signature.len(), - output.as_mut_ptr(), - output.len() - ), - 0 - ); - - ring_commitment_free(commitment_ptr); - ring_context_free(ctx_ptr); - for ptr in ring.iter() { - public_free(*ptr); - } - } -} diff --git a/Utils/Tests/UtilsTests/AtomicDictionaryTests.swift b/Utils/Tests/UtilsTests/AtomicDictionaryTests.swift index 119c313f..cb9137f7 100644 --- a/Utils/Tests/UtilsTests/AtomicDictionaryTests.swift +++ b/Utils/Tests/UtilsTests/AtomicDictionaryTests.swift @@ -80,6 +80,7 @@ struct AtomicDictionaryTests { @Test func forEach() throws { let dict = AtomicDictionary(["one": 1, "two": 2, "three": 3]) var sum = 0 + // swiftformat:disable:next all dict.forEach { _, value in sum += value } diff --git a/Utils/Tests/UtilsTests/Crypto/BandersnatchTest.swift b/Utils/Tests/UtilsTests/Crypto/BandersnatchTest.swift index 95fd9bbb..aa4bec50 100644 --- a/Utils/Tests/UtilsTests/Crypto/BandersnatchTest.swift +++ b/Utils/Tests/UtilsTests/Crypto/BandersnatchTest.swift @@ -4,39 +4,28 @@ import Testing @testable import Utils @Suite struct BandersnatchTests { - @Test func ringVrfVerifyWorks() throws { - let ringHexStrings = [ - "5e465beb01dbafe160ce8216047f2155dd0569f058afd52dcea601025a8d161d", - "3d5e5a51aab2b048f8686ecd79712a80e3265a114cc73f14bdb2a59233fb66d0", - "aa2b95f7572875b0d0f186552ae745ba8222fc0b5bd456554bfe51c68938f8bc", - "7f6190116d118d643a98878e294ccf62b509e214299931aad8ff9764181a4e33", - "48e5fcdce10e0b64ec4eebd0d9211c7bac2f27ce54bca6f7776ff6fee86ab3e3", - "f16e5352840afb47e206b5c89f560f2611835855cf2e6ebad1acc9520a72591d", - ] + @Test func ringSignAndVerify() throws { + var seed = Data(repeating: 0x12, count: 32) + var keys = [Bandersnatch.SecretKey]() - let ringData = try ringHexStrings.compactMap { try Bandersnatch.PublicKey(data: Data32(Data(fromHexString: $0)!)!) } + for i in 0 ..< 10 { + seed[0] = UInt8(i) + let secret = try Bandersnatch.SecretKey(from: Data32(seed)!) + keys.append(secret) + } - var vrfInputData = Data("jam_ticket_seal".utf8) - let eta2Hex = "bb30a42c1e62f0afda5f0a4e8a562f7a13a24cea00ee81917b86b89e801314aa" - let eta2Bytes = Data(fromHexString: eta2Hex)! - vrfInputData.append(eta2Bytes) - vrfInputData.append(1) - - let auxData = Data() - - let signatureHex = - // swiftlint:disable:next line_length - "b342bf8f6fa69c745daad2e99c92929b1da2b840f67e5e8015ac22dd1076343ea95c5bb4b69c197bfdc1b7d2f484fe455fb19bba7e8d17fcaf309ba5814bf54f3a74d75b408da8d3b99bf07f7cde373e4fd757061b1c99e0aac4847f1e393e892b566c14a7f8643a5d976ced0a18d12e32c660d59c66c271332138269cb0fe9c2462d5b3c1a6e9f5ed330ff0d70f64218010ff337b0b69b531f916c67ec564097cd842306df1b4b44534c95ff4efb73b17a14476057fdf8678683b251dc78b0b94712179345c794b6bd99aa54b564933651aee88c93b648e91a613c87bc3f445fff571452241e03e7d03151600a6ee259051a23086b408adec7c112dd94bd8123cf0bed88fddac46b7f891f34c29f13bf883771725aa234d398b13c39fd2a871894f1b1e2dbc7fffbc9c65c49d1e9fd5ee0da133bef363d4ebebe63de2b50328b5d7e020303499d55c07cae617091e33a1ee72ba1b65f940852e93e2905fdf577adcf62be9c74ebda9af59d3f11bece8996773f392a2b35693a45a5a042d88a3dc816b689fe596762d4ea7c6024da713304f56dc928be6e8048c651766952b6c40d0f48afc067ca7cbd77763a2d4f11e88e16033b3343f39bf519fe734db8a139d148ccead4331817d46cf469befa64ae153b5923869144dfa669da36171c20e1f757ed5231fa5a08827d83f7b478ddfb44c9bceb5c6c920b8761ff1e3edb03de48fb55884351f0ac5a7a1805b9b6c49c0529deb97e994deaf2dfd008825e8704cdc04b621f316b505fde26ab71b31af7becbc1154f9979e43e135d35720b93b367bedbe6c6182bb6ed99051f28a3ad6d348ba5b178e3ea0ec0bb4a03fe36604a9eeb609857f8334d3b4b34867361ed2ff9163acd9a27fa20303abe9fc29f2d6c921a8ee779f7f77d940b48bc4fce70a58eed83a206fb7db4c1c7ebe7658603495bb40f6a581dd9e235ba0583165b1569052f8fb4a3e604f2dd74ad84531c6b96723c867b06b6fdd1c4ba150cf9080aa6bbf44cc29041090973d56913b9dc755960371568ef1cf03f127fe8eca209db5d18829f5bfb5826f98833e3f42472b47fad995a9a8bb0e41a1df45ead20285a8" - let signatureBytes = Data(fromHexString: signatureHex)! - - // verifier - let ctx = try Bandersnatch.RingContext(size: 6) - let commitment = try Bandersnatch.RingCommitment(ring: ringData, ctx: ctx) + let ctx = try Bandersnatch.RingContext(size: UInt(keys.count)) + let commitment = try Bandersnatch.RingCommitment(ring: keys.map(\.publicKey), ctx: ctx) let verifier = Bandersnatch.Verifier(ctx: ctx, commitment: commitment) - let outputHashData = try verifier.ringVRFVerify( - vrfInputData: vrfInputData, auxData: auxData, signature: Data784(signatureBytes)! - ) - #expect(outputHashData != nil) + + for (i, key) in keys.enumerated() { + let prover = Bandersnatch.Prover(sercret: key, ring: keys.map(\.publicKey), proverIdx: UInt(i), ctx: ctx) + let vrfInputData = Data(repeating: UInt8(i), count: 32) + let sig = try prover.ringVRFSign(vrfInputData: vrfInputData) + let output = try verifier.ringVRFVerify(vrfInputData: vrfInputData, signature: sig) + let vrfOutput = try keys[i].getOutput(vrfInputData: vrfInputData) + #expect(output == vrfOutput) + } } } diff --git a/Utils/Tests/UtilsTests/SaturatingNumberTests.swift b/Utils/Tests/UtilsTests/SaturatingNumberTests.swift new file mode 100644 index 00000000..176cd1d7 --- /dev/null +++ b/Utils/Tests/UtilsTests/SaturatingNumberTests.swift @@ -0,0 +1,109 @@ +import Foundation +import Testing + +@testable import Utils + +struct SaturatingNumberTests { + @Test func testAdditionWithNoOverflow() { + let gas1 = Gas(100) + let gas2 = Gas(200) + let result = gas1 + gas2 + + #expect(result == Gas(300)) + } + + @Test func testAdditionWithOverflow() { + let maxGas = Gas.max + let result = maxGas + 1 + + #expect(result == Gas.max) + } + + @Test func testSubtractionWithNoOverflow() { + let gas1 = Gas(100) + let gas2 = Gas(200) + let result = gas1 - gas2 + + #expect(result == Gas(-100)) + } + + @Test func testSubtractionWithOverflow() { + let minGas = Gas.min + let result = minGas - 1 + + #expect(result == Gas.min) + } + + @Test func testMultiplicationWithNoOverflow() { + let gas1 = Gas(100) + let gas2 = Gas(200) + let result = gas1 * gas2 + + #expect(result == Gas(20000)) + } + + @Test func testMultiplicationWithOverflow() { + let maxGas = Gas.max + let result = maxGas * 2 + + #expect(result == Gas.max) + } + + @Test func testNegation() { + let gas1 = Gas(100) + let result = -gas1 + + #expect(result == Gas(-100)) + } + + @Test func testAdditionWithOtherType() { + let gas1 = Gas(100) + let result = gas1 + 1 + + #expect(result == Gas(101)) + } + + @Test func testSubtractionWithOtherType() { + let gas1 = Gas(100) + let result = gas1 - 1 + + #expect(result == Gas(99)) + } + + @Test func testMultiplicationWithOtherType() { + let gas1 = Gas(100) + let result = gas1 * 2 + + #expect(result == Gas(200)) + } + + @Test func testComparison() { + let gas1 = Gas(100) + let gas2 = Gas(200) + let gas3 = Gas(300) + + #expect(gas1 < gas2) + #expect(gas1 <= gas2) + #expect(gas2 > gas1) + #expect(gas2 >= gas1) + #expect(gas1 == gas1) + #expect(gas1 != gas2) + #expect(gas1 + gas2 == gas3) + } + + @Test func testDivision() { + let gas1 = Gas(100) + let gas2 = Gas(200) + let result = gas2 / gas1 + + #expect(result == Gas(2)) + } + + @Test func testModulo() { + let gas1 = Gas(100) + let gas2 = Gas(200) + + #expect(gas2 % gas1 == Gas(0)) + #expect(gas1 % gas2 == Gas(100)) + } +} diff --git a/Utils/Tests/UtilsTests/SortedArrayTests.swift b/Utils/Tests/UtilsTests/SortedArrayTests.swift index f5f58854..48c9d93f 100644 --- a/Utils/Tests/UtilsTests/SortedArrayTests.swift +++ b/Utils/Tests/UtilsTests/SortedArrayTests.swift @@ -8,7 +8,7 @@ struct SortedArrayTests { @Test func initWithUnsortedArray() { let unsorted = [3, 1, 4, 1, 5, 9, 2, 6, 5, 3, 5] - let sorted = SortedArray(unsorted: unsorted) + let sorted = SortedArray(unsorted) #expect(sorted.array == [1, 1, 2, 3, 3, 4, 5, 5, 5, 6, 9]) } @@ -20,7 +20,7 @@ struct SortedArrayTests { @Test func initWithSortedArrayThrowsOnUnsortedInput() { - #expect(throws: SortedArrayError.invalidData) { + #expect(throws: SortedContainerError.invalidData) { try SortedArray(sorted: [3, 1, 4, 1, 5]) } } @@ -33,42 +33,42 @@ struct SortedArrayTests { @Test func insertElement() { - var sorted = SortedArray(unsorted: [1, 3, 5]) + var sorted = SortedArray([1, 3, 5]) sorted.insert(4) #expect(sorted.array == [1, 3, 4, 5]) } @Test func appendContentsOfCollection() { - var sorted = SortedArray(unsorted: [1, 3, 5]) + var sorted = SortedArray([1, 3, 5]) sorted.append(contentsOf: [2, 4, 6]) #expect(sorted.array == [1, 2, 3, 4, 5, 6]) } @Test func appendContentsOfSortedArray() { - var sorted1 = SortedArray(unsorted: [1, 3, 5]) - let sorted2 = SortedArray(unsorted: [2, 4, 6]) + var sorted1 = SortedArray([1, 3, 5]) + let sorted2 = SortedArray([2, 4, 6]) sorted1.append(contentsOf: sorted2) #expect(sorted1.array == [1, 2, 3, 4, 5, 6]) } @Test func removeAtIndex() { - var sorted = SortedArray(unsorted: [1, 2, 3, 4, 5]) + var sorted = SortedArray([1, 2, 3, 4, 5]) sorted.remove(at: 2) #expect(sorted.array == [1, 2, 4, 5]) } @Test func countProperty() { - let sorted = SortedArray(unsorted: [1, 2, 3, 4, 5]) + let sorted = SortedArray([1, 2, 3, 4, 5]) #expect(sorted.count == 5) } @Test func encodingAndDecoding() throws { - let original = SortedArray(unsorted: [3, 1, 4, 1, 5, 9, 2, 6, 5, 3, 5]) + let original = SortedArray([3, 1, 4, 1, 5, 9, 2, 6, 5, 3, 5]) let encoded = try JamEncoder.encode(original) let decoder = JamDecoder(data: encoded) @@ -79,7 +79,7 @@ struct SortedArrayTests { @Test func insertIndex() { - let sorted = SortedArray(unsorted: [1, 3, 5, 7, 9]) + let sorted = SortedArray([1, 3, 5, 7, 9]) #expect(sorted.insertIndex(0) == 0) #expect(sorted.insertIndex(2) == 1) #expect(sorted.insertIndex(4) == 2) @@ -90,7 +90,7 @@ struct SortedArrayTests { @Test func insertIndexWithRange() { - let sorted = SortedArray(unsorted: [1, 3, 5, 7, 9]) + let sorted = SortedArray([1, 3, 5, 7, 9]) #expect(sorted.insertIndex(5, begin: 1, end: 3) == 2) } } diff --git a/boka.xcodeproj/project.xcworkspace/xcshareddata/swiftpm/Package.resolved b/boka.xcodeproj/project.xcworkspace/xcshareddata/swiftpm/Package.resolved index 259c54bb..dadbcffe 100644 --- a/boka.xcodeproj/project.xcworkspace/xcshareddata/swiftpm/Package.resolved +++ b/boka.xcodeproj/project.xcworkspace/xcshareddata/swiftpm/Package.resolved @@ -1,5 +1,5 @@ { - "originHash" : "51d7cf31cef9c84c0ac3503a94c9c24781f891ea76cbf8976dcee15c03505428", + "originHash" : "64d6d2bd3a54574173837ac745942b523c8e42e36237f566a2706ce1d1c2b949", "pins" : [ { "identity" : "async-http-client", @@ -202,10 +202,10 @@ { "identity" : "swift-numerics", "kind" : "remoteSourceControl", - "location" : "https://github.com/apple/swift-numerics.git", + "location" : "https://github.com/apple/swift-numerics", "state" : { - "revision" : "0a5bc04095a675662cf24757cc0640aa2204253b", - "version" : "1.0.2" + "branch" : "main", + "revision" : "e30276bff2ff5ed80566fbdca49f50aa160b0e83" } }, { diff --git a/scripts/external-libs.sh b/scripts/external-libs.sh new file mode 100755 index 00000000..28797207 --- /dev/null +++ b/scripts/external-libs.sh @@ -0,0 +1,24 @@ +#!/usr/bin/env bash + +set -e + +release=v6 + +os=$(uname -s) +arch=$(uname -m) + +if [ "$os" = "Darwin" ]; then + filename="macos-build.tar.gz" +elif [ "$os" = "Linux" ]; then + filename="linux-build.tar.gz" +fi + +cd .lib + +curl -L -o "$filename" "https://github.com/AcalaNetwork/boka-external-libs/releases/download/$release/$filename" + +tar -xvf "$filename" + +rm "$filename" + +mv build/$os-$arch/* . diff --git a/scripts/msquic.sh b/scripts/msquic.sh deleted file mode 100755 index 99ef3cf6..00000000 --- a/scripts/msquic.sh +++ /dev/null @@ -1,33 +0,0 @@ -#!/usr/bin/env bash - -set -e - - -# Setup quic C module -CWD=$(pwd) - -mkdir -p .lib - -system=$(uname -s) - -arch=$(uname -m) - -cd Networking/Sources/msquic || { echo "Submodule directory not found"; exit 1; } - -rm -rf build - -mkdir build && cd build - -if [ $system = "Darwin" ]; then - cmake -DENABLE_LOGGING=OFF -DCMAKE_OSX_ARCHITECTURES=$arch -DCMAKE_C_FLAGS="-Wno-invalid-unevaluated-string" -DQUIC_BUILD_SHARED=off .. -fi - -if [ $system = "Linux" ]; then - cmake -G 'Unix Makefiles' -DENABLE_LOGGING=OFF -DCMAKE_OSX_ARCHITECTURES=$arch -DCMAKE_C_FLAGS="-Wno-invalid-unevaluated-string" -DQUIC_BUILD_SHARED=off .. -fi - -cmake --build . || { echo "Build msquic library failed"; exit 1; } - -cp bin/Release/libmsquic.a ${CWD}/.lib - -echo "Setup msquic successfully." diff --git a/scripts/rocksdb.sh b/scripts/rocksdb.sh deleted file mode 100755 index 2eccffa7..00000000 --- a/scripts/rocksdb.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/usr/bin/env bash - -set -e - - -# Setup rocksdb -CWD=$(pwd) - -mkdir -p .lib - -cd Database/Sources/rocksdb || { echo "directory not found"; exit 1; } - -make static_lib - -cp librocksdb.a ${CWD}/.lib - -echo "Setup rocksdb successfully."