Skip to content

Commit

Permalink
Create a specific calculateRootHash method for BonsaiInMemoryWorldSta…
Browse files Browse the repository at this point in the history
…te to improve performance (hyperledger#4568)

* Parallelize some steps in BonsaiPersistedWorldState.calculateRootHash  method
Signed-off-by: Ameziane H <ameziane.hamlat@consensys.net>

* Add synchronized on storage flat database remove and update method

Signed-off-by: Ameziane H <ameziane.hamlat@consensys.net>

* Add synchronized on storage flat database remove and update method

Signed-off-by: Ameziane H <ameziane.hamlat@consensys.net>

* Fix this error org.rocksdb.RocksDBException: unknown WriteBatch tag

Signed-off-by: Ameziane H <ameziane.hamlat@consensys.net>

* create a specific calculateRootHash for BonsaiInMemoryWorldState class

Signed-off-by: Ameziane H <ameziane.hamlat@consensys.net>

* create a specific calculateRootHash for BonsaiInMemoryWorldState class

Signed-off-by: Ameziane H <ameziane.hamlat@consensys.net>

* Fix nullPointerException on Collections.synchronizedSet

Signed-off-by: Ameziane H <ameziane.hamlat@consensys.net>

* Use parallelStreams instead of CompletableFuture API

Signed-off-by: Ameziane H <ameziane.hamlat@consensys.net>

* Modify CHANGELOG.md

Signed-off-by: Ameziane H <ameziane.hamlat@consensys.net>

* spotless and synchronizedSet initializaton

Signed-off-by: garyschulte <garyschulte@gmail.com>

Signed-off-by: Ameziane H <ameziane.hamlat@consensys.net>
Signed-off-by: garyschulte <garyschulte@gmail.com>
Co-authored-by: garyschulte <garyschulte@gmail.com>
Co-authored-by: Sally MacFarlane <macfarla.github@gmail.com>
Signed-off-by: Sally MacFarlane <macfarla.github@gmail.com>
  • Loading branch information
3 people committed Jan 10, 2023
1 parent ca4de3c commit 74beb9b
Show file tree
Hide file tree
Showing 7 changed files with 208 additions and 94 deletions.
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@
- Upgrade RocksDB database version from 6.29.5 to 7.6.0 [#4517](https://github.com/hyperledger/besu/pull/4517)
- Avoid connecting to self when using static-nodes [#4521](https://github.com/hyperledger/besu/pull/4521)
- EVM performance has increased 20%-100% depending on the particulars of the contract. [#4540](https://github.com/hyperledger/besu/pull/4540)
- Improve calculateRootHash method performance during Block processing [#4568](https://github.com/hyperledger/besu/pull/4568)

### Bug Fixes
- Corrects emission of blockadded events when rewinding during a re-org. Fix for [#4495](https://github.com/hyperledger/besu/issues/4495)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,16 @@

package org.hyperledger.besu.ethereum.bonsai;

import org.hyperledger.besu.datatypes.Address;
import org.hyperledger.besu.datatypes.Hash;
import org.hyperledger.besu.ethereum.core.BlockHeader;
import org.hyperledger.besu.ethereum.trie.StoredMerklePatriciaTrie;

import java.util.Map;
import java.util.function.Function;

import org.apache.tuweni.bytes.Bytes;
import org.apache.tuweni.units.bigints.UInt256;

public class BonsaiInMemoryWorldState extends BonsaiPersistedWorldState {

Expand All @@ -38,12 +46,90 @@ public Hash rootHash() {
}

public Hash rootHash(final BonsaiWorldStateUpdater localUpdater) {
final BonsaiWorldStateKeyValueStorage.BonsaiUpdater updater = worldStateStorage.updater();
try {
final Hash calculatedRootHash = calculateRootHash(updater, localUpdater);
return Hash.wrap(calculatedRootHash);
} finally {
updater.rollback();
final Hash calculatedRootHash = calculateRootHash(localUpdater);
return Hash.wrap(calculatedRootHash);
}

protected Hash calculateRootHash(final BonsaiWorldStateUpdater worldStateUpdater) {

// second update account storage state. This must be done before updating the accounts so
// that we can get the storage state hash

worldStateUpdater.getStorageToUpdate().entrySet().parallelStream()
.forEach(
addressMapEntry -> {
updateAccountStorage(worldStateUpdater, addressMapEntry);
});

// for manicured tries and composting, trim and compost here

// next walk the account trie
final StoredMerklePatriciaTrie<Bytes, Bytes> accountTrie =
new StoredMerklePatriciaTrie<>(
this::getAccountStateTrieNode,
worldStateRootHash,
Function.identity(),
Function.identity());

// for manicured tries and composting, collect branches here (not implemented)

// now add the accounts
for (final Map.Entry<Address, BonsaiValue<BonsaiAccount>> accountUpdate :
worldStateUpdater.getAccountsToUpdate().entrySet()) {
final Bytes accountKey = accountUpdate.getKey();
final BonsaiValue<BonsaiAccount> bonsaiValue = accountUpdate.getValue();
final BonsaiAccount updatedAccount = bonsaiValue.getUpdated();
if (updatedAccount == null) {
final Hash addressHash = Hash.hash(accountKey);
accountTrie.remove(addressHash);
} else {
final Hash addressHash = updatedAccount.getAddressHash();
final Bytes accountValue = updatedAccount.serializeAccount();
accountTrie.put(addressHash, accountValue);
}
}

// TODO write to a cache and then generate a layer update from that and the
// DB tx updates. Right now it is just DB updates.
return Hash.wrap(accountTrie.getRootHash());
}

private void updateAccountStorage(
final BonsaiWorldStateUpdater worldStateUpdater,
final Map.Entry<Address, Map<Hash, BonsaiValue<UInt256>>> storageAccountUpdate) {
final Address updatedAddress = storageAccountUpdate.getKey();
final Hash updatedAddressHash = Hash.hash(updatedAddress);
if (worldStateUpdater.getAccountsToUpdate().containsKey(updatedAddress)) {
final BonsaiValue<BonsaiAccount> accountValue =
worldStateUpdater.getAccountsToUpdate().get(updatedAddress);
final BonsaiAccount accountOriginal = accountValue.getPrior();
final Hash storageRoot =
(accountOriginal == null) ? Hash.EMPTY_TRIE_HASH : accountOriginal.getStorageRoot();
final StoredMerklePatriciaTrie<Bytes, Bytes> storageTrie =
new StoredMerklePatriciaTrie<>(
(location, key) -> getStorageTrieNode(updatedAddressHash, location, key),
storageRoot,
Function.identity(),
Function.identity());

// for manicured tries and composting, collect branches here (not implemented)

for (final Map.Entry<Hash, BonsaiValue<UInt256>> storageUpdate :
storageAccountUpdate.getValue().entrySet()) {
final Hash keyHash = storageUpdate.getKey();
final UInt256 updatedStorage = storageUpdate.getValue().getUpdated();
if (updatedStorage == null || updatedStorage.equals(UInt256.ZERO)) {
storageTrie.remove(keyHash);
} else {
storageTrie.put(keyHash, BonsaiWorldView.encodeTrieValue(updatedStorage));
}
}

final BonsaiAccount accountUpdated = accountValue.getUpdated();
if (accountUpdated != null) {
final Hash newStorageRoot = Hash.wrap(storageTrie.getRootHash());
accountUpdated.setStorageRoot(newStorageRoot);
}
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -104,43 +104,76 @@ public BonsaiWorldStateKeyValueStorage getWorldStateStorage() {
protected Hash calculateRootHash(
final BonsaiWorldStateKeyValueStorage.BonsaiUpdater stateUpdater,
final BonsaiWorldStateUpdater worldStateUpdater) {
// first clear storage
for (final Address address : worldStateUpdater.getStorageToClear()) {
// because we are clearing persisted values we need the account root as persisted
final BonsaiAccount oldAccount =
worldStateStorage
.getAccount(Hash.hash(address))
.map(bytes -> fromRLP(BonsaiPersistedWorldState.this, address, bytes, true))
.orElse(null);
if (oldAccount == null) {
// This is when an account is both created and deleted within the scope of the same
// block. A not-uncommon DeFi bot pattern.
continue;
clearStorage(stateUpdater, worldStateUpdater);

// This must be done before updating the accounts so
// that we can get the storage state hash
updateAccountStorageState(stateUpdater, worldStateUpdater);

// Third update the code. This has the side effect of ensuring a code hash is calculated.
updateCode(stateUpdater, worldStateUpdater);

// next walk the account trie
final StoredMerklePatriciaTrie<Bytes, Bytes> accountTrie =
new StoredMerklePatriciaTrie<>(
this::getAccountStateTrieNode,
worldStateRootHash,
Function.identity(),
Function.identity());

// for manicured tries and composting, collect branches here (not implemented)

addTheAccounts(stateUpdater, worldStateUpdater, accountTrie);

// TODO write to a cache and then generate a layer update from that and the
// DB tx updates. Right now it is just DB updates.
accountTrie.commit(
(location, hash, value) ->
writeTrieNode(stateUpdater.getTrieBranchStorageTransaction(), location, value));
final Bytes32 rootHash = accountTrie.getRootHash();
return Hash.wrap(rootHash);
}

private static void addTheAccounts(
final BonsaiWorldStateKeyValueStorage.BonsaiUpdater stateUpdater,
final BonsaiWorldStateUpdater worldStateUpdater,
final StoredMerklePatriciaTrie<Bytes, Bytes> accountTrie) {
for (final Map.Entry<Address, BonsaiValue<BonsaiAccount>> accountUpdate :
worldStateUpdater.getAccountsToUpdate().entrySet()) {
final Bytes accountKey = accountUpdate.getKey();
final BonsaiValue<BonsaiAccount> bonsaiValue = accountUpdate.getValue();
final BonsaiAccount updatedAccount = bonsaiValue.getUpdated();
if (updatedAccount == null) {
final Hash addressHash = Hash.hash(accountKey);
accountTrie.remove(addressHash);
stateUpdater.removeAccountInfoState(addressHash);
} else {
final Hash addressHash = updatedAccount.getAddressHash();
final Bytes accountValue = updatedAccount.serializeAccount();
stateUpdater.putAccountInfoState(Hash.hash(accountKey), accountValue);
accountTrie.put(addressHash, accountValue);
}
final Hash addressHash = Hash.hash(address);
final StoredMerklePatriciaTrie<Bytes, Bytes> storageTrie =
new StoredMerklePatriciaTrie<>(
(location, key) -> getStorageTrieNode(addressHash, location, key),
oldAccount.getStorageRoot(),
Function.identity(),
Function.identity());
Map<Bytes32, Bytes> entriesToDelete = storageTrie.entriesFrom(Bytes32.ZERO, 256);
while (!entriesToDelete.isEmpty()) {
entriesToDelete
.keySet()
.forEach(
k -> stateUpdater.removeStorageValueBySlotHash(Hash.hash(address), Hash.wrap(k)));
if (entriesToDelete.size() == 256) {
entriesToDelete.keySet().forEach(storageTrie::remove);
entriesToDelete = storageTrie.entriesFrom(Bytes32.ZERO, 256);
} else {
break;
}
}
}

private static void updateCode(
final BonsaiWorldStateKeyValueStorage.BonsaiUpdater stateUpdater,
final BonsaiWorldStateUpdater worldStateUpdater) {
for (final Map.Entry<Address, BonsaiValue<Bytes>> codeUpdate :
worldStateUpdater.getCodeToUpdate().entrySet()) {
final Bytes updatedCode = codeUpdate.getValue().getUpdated();
final Hash accountHash = Hash.hash(codeUpdate.getKey());
if (updatedCode == null || updatedCode.size() == 0) {
stateUpdater.removeCode(accountHash);
} else {
stateUpdater.putCode(accountHash, null, updatedCode);
}
}
}

// second update account storage state. This must be done before updating the accounts so
// that we can get the storage state hash
private void updateAccountStorageState(
final BonsaiWorldStateKeyValueStorage.BonsaiUpdater stateUpdater,
final BonsaiWorldStateUpdater worldStateUpdater) {
for (final Map.Entry<Address, Map<Hash, BonsaiValue<UInt256>>> storageAccountUpdate :
worldStateUpdater.getStorageToUpdate().entrySet()) {
final Address updatedAddress = storageAccountUpdate.getKey();
Expand Down Expand Up @@ -184,54 +217,44 @@ protected Hash calculateRootHash(
}
// for manicured tries and composting, trim and compost here
}
}

// Third update the code. This has the side effect of ensuring a code hash is calculated.
for (final Map.Entry<Address, BonsaiValue<Bytes>> codeUpdate :
worldStateUpdater.getCodeToUpdate().entrySet()) {
final Bytes updatedCode = codeUpdate.getValue().getUpdated();
final Hash accountHash = Hash.hash(codeUpdate.getKey());
if (updatedCode == null || updatedCode.size() == 0) {
stateUpdater.removeCode(accountHash);
} else {
stateUpdater.putCode(accountHash, null, updatedCode);
private void clearStorage(
final BonsaiWorldStateKeyValueStorage.BonsaiUpdater stateUpdater,
final BonsaiWorldStateUpdater worldStateUpdater) {
for (final Address address : worldStateUpdater.getStorageToClear()) {
// because we are clearing persisted values we need the account root as persisted
final BonsaiAccount oldAccount =
worldStateStorage
.getAccount(Hash.hash(address))
.map(bytes -> fromRLP(BonsaiPersistedWorldState.this, address, bytes, true))
.orElse(null);
if (oldAccount == null) {
// This is when an account is both created and deleted within the scope of the same
// block. A not-uncommon DeFi bot pattern.
continue;
}
}

// next walk the account trie
final StoredMerklePatriciaTrie<Bytes, Bytes> accountTrie =
new StoredMerklePatriciaTrie<>(
this::getAccountStateTrieNode,
worldStateRootHash,
Function.identity(),
Function.identity());

// for manicured tries and composting, collect branches here (not implemented)

// now add the accounts
for (final Map.Entry<Address, BonsaiValue<BonsaiAccount>> accountUpdate :
worldStateUpdater.getAccountsToUpdate().entrySet()) {
final Bytes accountKey = accountUpdate.getKey();
final BonsaiValue<BonsaiAccount> bonsaiValue = accountUpdate.getValue();
final BonsaiAccount updatedAccount = bonsaiValue.getUpdated();
if (updatedAccount == null) {
final Hash addressHash = Hash.hash(accountKey);
accountTrie.remove(addressHash);
stateUpdater.removeAccountInfoState(addressHash);
} else {
final Hash addressHash = updatedAccount.getAddressHash();
final Bytes accountValue = updatedAccount.serializeAccount();
stateUpdater.putAccountInfoState(Hash.hash(accountKey), accountValue);
accountTrie.put(addressHash, accountValue);
final Hash addressHash = Hash.hash(address);
final StoredMerklePatriciaTrie<Bytes, Bytes> storageTrie =
new StoredMerklePatriciaTrie<>(
(location, key) -> getStorageTrieNode(addressHash, location, key),
oldAccount.getStorageRoot(),
Function.identity(),
Function.identity());
Map<Bytes32, Bytes> entriesToDelete = storageTrie.entriesFrom(Bytes32.ZERO, 256);
while (!entriesToDelete.isEmpty()) {
entriesToDelete
.keySet()
.forEach(
k -> stateUpdater.removeStorageValueBySlotHash(Hash.hash(address), Hash.wrap(k)));
entriesToDelete.keySet().forEach(storageTrie::remove);
if (entriesToDelete.size() == 256) {
entriesToDelete = storageTrie.entriesFrom(Bytes32.ZERO, 256);
} else {
break;
}
}
}

// TODO write to a cache and then generate a layer update from that and the
// DB tx updates. Right now it is just DB updates.
accountTrie.commit(
(location, hash, value) ->
writeTrieNode(stateUpdater.getTrieBranchStorageTransaction(), location, value));
final Bytes32 rootHash = accountTrie.getRootHash();
return Hash.wrap(rootHash);
}

@Override
Expand Down Expand Up @@ -342,7 +365,7 @@ public Account get(final Address address) {
.orElse(null);
}

private Optional<Bytes> getAccountStateTrieNode(final Bytes location, final Bytes32 nodeHash) {
protected Optional<Bytes> getAccountStateTrieNode(final Bytes location, final Bytes32 nodeHash) {
return worldStateStorage.getAccountStateTrieNode(location, nodeHash);
}

Expand All @@ -351,7 +374,7 @@ private void writeTrieNode(
tx.put(location.toArrayUnsafe(), value.toArrayUnsafe());
}

private Optional<Bytes> getStorageTrieNode(
protected Optional<Bytes> getStorageTrieNode(
final Hash accountHash, final Bytes location, final Bytes32 nodeHash) {
return worldStateStorage.getAccountStorageTrieNode(accountHash, location, nodeHash);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -378,15 +378,16 @@ public BonsaiUpdater putAccountStorageTrieNode(
}

@Override
public BonsaiUpdater putStorageValueBySlotHash(
public synchronized BonsaiUpdater putStorageValueBySlotHash(
final Hash accountHash, final Hash slotHash, final Bytes storage) {
storageStorageTransaction.put(
Bytes.concatenate(accountHash, slotHash).toArrayUnsafe(), storage.toArrayUnsafe());
return this;
}

@Override
public void removeStorageValueBySlotHash(final Hash accountHash, final Hash slotHash) {
public synchronized void removeStorageValueBySlotHash(
final Hash accountHash, final Hash slotHash) {
storageStorageTransaction.remove(Bytes.concatenate(accountHash, slotHash).toArrayUnsafe());
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@
import org.hyperledger.besu.evm.worldstate.WrappedEvmAccount;

import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.HashSet;
Expand All @@ -47,9 +48,10 @@
public class BonsaiWorldStateUpdater extends AbstractWorldUpdater<BonsaiWorldView, BonsaiAccount>
implements BonsaiWorldView {

private final Map<Address, BonsaiValue<BonsaiAccount>> accountsToUpdate = new HashMap<>();
private final Map<Address, BonsaiValue<Bytes>> codeToUpdate = new HashMap<>();
private final Set<Address> storageToClear = new HashSet<>();
private final Map<Address, BonsaiValue<BonsaiAccount>> accountsToUpdate =
new ConcurrentHashMap<>();
private final Map<Address, BonsaiValue<Bytes>> codeToUpdate = new ConcurrentHashMap<>();
private final Set<Address> storageToClear = Collections.synchronizedSet(new HashSet<>());

// storage sub mapped by _hashed_ key. This is because in self_destruct calls we need to
// enumerate the old storage and delete it. Those are trie stored by hashed key by spec and the
Expand Down
Loading

0 comments on commit 74beb9b

Please sign in to comment.