Skip to content

Commit

Permalink
spotless
Browse files Browse the repository at this point in the history
Signed-off-by: garyschulte <garyschulte@gmail.com>
  • Loading branch information
garyschulte committed Jul 13, 2023
1 parent b0503b5 commit 74162ed
Show file tree
Hide file tree
Showing 38 changed files with 608 additions and 519 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,8 @@
public class BftContextBuilder {

public static BftContext setupContextWithValidators(final Collection<Address> validators) {
final BftContext bftContext = mock(BftContext.class, withSettings().strictness(Strictness.LENIENT));
final BftContext bftContext =
mock(BftContext.class, withSettings().strictness(Strictness.LENIENT));
final ValidatorProvider mockValidatorProvider =
mock(ValidatorProvider.class, withSettings().strictness(Strictness.LENIENT));
final BftBlockInterface mockBftBlockInterface =
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,6 @@
import org.hyperledger.besu.plugin.services.storage.KeyValueStorageTransaction;
import org.hyperledger.besu.plugin.services.storage.SegmentedKeyValueStorage;
import org.hyperledger.besu.plugin.services.storage.SegmentedKeyValueStorageTransaction;
import org.hyperledger.besu.services.kvstore.SegmentedKeyValueStorageAdapter;
import org.hyperledger.besu.util.Subscribers;

import java.nio.charset.StandardCharsets;
Expand Down Expand Up @@ -84,7 +83,8 @@ public BonsaiWorldStateKeyValueStorage(
final StorageProvider provider, final ObservableMetricsSystem metricsSystem) {
this.composedWorldStateStorage =
provider.getStorageBySegmentIdentifiers(
List.of(ACCOUNT_INFO_STATE, CODE_STORAGE, ACCOUNT_STORAGE_STORAGE, TRIE_BRANCH_STORAGE));
List.of(
ACCOUNT_INFO_STATE, CODE_STORAGE, ACCOUNT_STORAGE_STORAGE, TRIE_BRANCH_STORAGE));
this.trieLogStorage =
provider.getStorageBySegmentIdentifier(KeyValueSegmentIdentifier.TRIE_LOG_STORAGE);
this.metricsSystem = metricsSystem;
Expand Down Expand Up @@ -199,15 +199,20 @@ public Optional<byte[]> getTrieLog(final Hash blockHash) {
}

public Optional<Bytes> getStateTrieNode(final Bytes location) {
return composedWorldStateStorage.get(TRIE_BRANCH_STORAGE, location.toArrayUnsafe()).map(Bytes::wrap);
return composedWorldStateStorage
.get(TRIE_BRANCH_STORAGE, location.toArrayUnsafe())
.map(Bytes::wrap);
}

public Optional<Bytes> getWorldStateRootHash() {
return composedWorldStateStorage.get(TRIE_BRANCH_STORAGE, WORLD_ROOT_HASH_KEY).map(Bytes::wrap);
}

public Optional<Hash> getWorldStateBlockHash() {
return composedWorldStateStorage.get(TRIE_BRANCH_STORAGE, WORLD_BLOCK_HASH_KEY).map(Bytes32::wrap).map(Hash::wrap);
return composedWorldStateStorage
.get(TRIE_BRANCH_STORAGE, WORLD_BLOCK_HASH_KEY)
.map(Bytes32::wrap)
.map(Hash::wrap);
}

public Optional<Bytes> getStorageValueByStorageSlotKey(
Expand Down Expand Up @@ -249,7 +254,8 @@ public Map<Bytes32, Bytes> streamFlatAccounts(
public Map<Bytes32, Bytes> streamFlatStorages(
final Hash accountHash, final Bytes startKeyHash, final Bytes32 endKeyHash, final long max) {
return getFlatDbReaderStrategy()
.streamStorageFlatDatabase(composedWorldStateStorage, accountHash, startKeyHash, endKeyHash, max);
.streamStorageFlatDatabase(
composedWorldStateStorage, accountHash, startKeyHash, endKeyHash, max);
}

@Override
Expand All @@ -267,15 +273,19 @@ public boolean isWorldStateAvailable(final Bytes32 rootHash, final Hash blockHas
}

public void upgradeToFullFlatDbMode() {
final SegmentedKeyValueStorageTransaction transaction = composedWorldStateStorage.startTransaction();
transaction.put(TRIE_BRANCH_STORAGE, FLAT_DB_MODE, FlatDbMode.FULL.getVersion().toArrayUnsafe());
final SegmentedKeyValueStorageTransaction transaction =
composedWorldStateStorage.startTransaction();
transaction.put(
TRIE_BRANCH_STORAGE, FLAT_DB_MODE, FlatDbMode.FULL.getVersion().toArrayUnsafe());
transaction.commit();
loadFlatDbStrategy(); // force reload of flat db reader strategy
}

public void downgradeToPartialFlatDbMode() {
final SegmentedKeyValueStorageTransaction transaction = composedWorldStateStorage.startTransaction();
transaction.put(TRIE_BRANCH_STORAGE, FLAT_DB_MODE, FlatDbMode.PARTIAL.getVersion().toArrayUnsafe());
final SegmentedKeyValueStorageTransaction transaction =
composedWorldStateStorage.startTransaction();
transaction.put(
TRIE_BRANCH_STORAGE, FLAT_DB_MODE, FlatDbMode.PARTIAL.getVersion().toArrayUnsafe());
transaction.commit();
loadFlatDbStrategy(); // force reload of flat db reader strategy
}
Expand Down Expand Up @@ -304,8 +314,7 @@ public void clearFlatDatabase() {
@Override
public BonsaiUpdater updater() {
return new Updater(
composedWorldStateStorage.startTransaction(),
trieLogStorage.startTransaction());
composedWorldStateStorage.startTransaction(), trieLogStorage.startTransaction());
}

@Override
Expand Down Expand Up @@ -365,7 +374,8 @@ public BonsaiUpdater putCode(final Hash accountHash, final Bytes32 codeHash, fin
// Don't save empty values
return this;
}
composedWorldStateTransaction.put(CODE_STORAGE, accountHash.toArrayUnsafe(), code.toArrayUnsafe());
composedWorldStateTransaction.put(
CODE_STORAGE, accountHash.toArrayUnsafe(), code.toArrayUnsafe());
return this;
}

Expand All @@ -381,16 +391,20 @@ public BonsaiUpdater putAccountInfoState(final Hash accountHash, final Bytes acc
// Don't save empty values
return this;
}
composedWorldStateTransaction.put(ACCOUNT_INFO_STATE, accountHash.toArrayUnsafe(), accountValue.toArrayUnsafe());
composedWorldStateTransaction.put(
ACCOUNT_INFO_STATE, accountHash.toArrayUnsafe(), accountValue.toArrayUnsafe());
return this;
}

@Override
public WorldStateStorage.Updater saveWorldState(
final Bytes blockHash, final Bytes32 nodeHash, final Bytes node) {
composedWorldStateTransaction.put(TRIE_BRANCH_STORAGE, Bytes.EMPTY.toArrayUnsafe(), node.toArrayUnsafe());
composedWorldStateTransaction.put(TRIE_BRANCH_STORAGE, WORLD_ROOT_HASH_KEY, nodeHash.toArrayUnsafe());
composedWorldStateTransaction.put(TRIE_BRANCH_STORAGE, WORLD_BLOCK_HASH_KEY, blockHash.toArrayUnsafe());
composedWorldStateTransaction.put(
TRIE_BRANCH_STORAGE, Bytes.EMPTY.toArrayUnsafe(), node.toArrayUnsafe());
composedWorldStateTransaction.put(
TRIE_BRANCH_STORAGE, WORLD_ROOT_HASH_KEY, nodeHash.toArrayUnsafe());
composedWorldStateTransaction.put(
TRIE_BRANCH_STORAGE, WORLD_BLOCK_HASH_KEY, blockHash.toArrayUnsafe());
return this;
}

Expand All @@ -401,7 +415,8 @@ public BonsaiUpdater putAccountStateTrieNode(
// Don't save empty nodes
return this;
}
composedWorldStateTransaction.put(TRIE_BRANCH_STORAGE, location.toArrayUnsafe(), node.toArrayUnsafe());
composedWorldStateTransaction.put(
TRIE_BRANCH_STORAGE, location.toArrayUnsafe(), node.toArrayUnsafe());
return this;
}

Expand All @@ -418,23 +433,28 @@ public synchronized BonsaiUpdater putAccountStorageTrieNode(
// Don't save empty nodes
return this;
}
composedWorldStateTransaction.put(TRIE_BRANCH_STORAGE,
Bytes.concatenate(accountHash, location).toArrayUnsafe(), node.toArrayUnsafe());
composedWorldStateTransaction.put(
TRIE_BRANCH_STORAGE,
Bytes.concatenate(accountHash, location).toArrayUnsafe(),
node.toArrayUnsafe());
return this;
}

@Override
public synchronized BonsaiUpdater putStorageValueBySlotHash(
final Hash accountHash, final Hash slotHash, final Bytes storage) {
composedWorldStateTransaction.put(ACCOUNT_STORAGE_STORAGE,
Bytes.concatenate(accountHash, slotHash).toArrayUnsafe(), storage.toArrayUnsafe());
composedWorldStateTransaction.put(
ACCOUNT_STORAGE_STORAGE,
Bytes.concatenate(accountHash, slotHash).toArrayUnsafe(),
storage.toArrayUnsafe());
return this;
}

@Override
public synchronized void removeStorageValueBySlotHash(
final Hash accountHash, final Hash slotHash) {
composedWorldStateTransaction.remove(ACCOUNT_STORAGE_STORAGE, Bytes.concatenate(accountHash, slotHash).toArrayUnsafe());
composedWorldStateTransaction.remove(
ACCOUNT_STORAGE_STORAGE, Bytes.concatenate(accountHash, slotHash).toArrayUnsafe());
}

@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -38,11 +38,7 @@ public BonsaiWorldStateLayerStorage(
final KeyValueStorage trieLogStorage,
final BonsaiWorldStateKeyValueStorage parent,
final ObservableMetricsSystem metricsSystem) {
super(
parent,
composedWorldStateStorage,
trieLogStorage,
metricsSystem);
super(parent, composedWorldStateStorage, trieLogStorage, metricsSystem);
}

@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,6 @@
import org.hyperledger.besu.metrics.BesuMetricCategory;
import org.hyperledger.besu.plugin.services.MetricsSystem;
import org.hyperledger.besu.plugin.services.metrics.Counter;
import org.hyperledger.besu.plugin.services.storage.KeyValueStorage;
import org.hyperledger.besu.plugin.services.storage.SegmentedKeyValueStorage;

import java.util.Map;
Expand Down Expand Up @@ -119,15 +118,13 @@ public Optional<Bytes> getCode(
}
}

public void clearAll(
final SegmentedKeyValueStorage storage) {
public void clearAll(final SegmentedKeyValueStorage storage) {
storage.clear(ACCOUNT_INFO_STATE);
storage.clear(ACCOUNT_STORAGE_STORAGE);
storage.clear(CODE_STORAGE);
}

public void resetOnResync(
final SegmentedKeyValueStorage storage) {
public void resetOnResync(final SegmentedKeyValueStorage storage) {
storage.clear(ACCOUNT_INFO_STATE);
storage.clear(ACCOUNT_STORAGE_STORAGE);
}
Expand Down Expand Up @@ -159,7 +156,9 @@ public Map<Bytes32, Bytes> streamStorageFlatDatabase(
final long max) {
final Stream<Pair<Bytes32, Bytes>> pairStream =
storage
.streamFromKey(ACCOUNT_STORAGE_STORAGE, Bytes.concatenate(accountHash, startKeyHash).toArrayUnsafe())
.streamFromKey(
ACCOUNT_STORAGE_STORAGE,
Bytes.concatenate(accountHash, startKeyHash).toArrayUnsafe())
.takeWhile(pair -> Bytes.wrap(pair.getKey()).slice(0, Hash.SIZE).equals(accountHash))
.limit(max)
.map(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,12 +20,10 @@

import org.hyperledger.besu.datatypes.Hash;
import org.hyperledger.besu.datatypes.StorageSlotKey;
import org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier;
import org.hyperledger.besu.ethereum.trie.NodeLoader;
import org.hyperledger.besu.metrics.BesuMetricCategory;
import org.hyperledger.besu.plugin.services.MetricsSystem;
import org.hyperledger.besu.plugin.services.metrics.Counter;
import org.hyperledger.besu.plugin.services.storage.KeyValueStorage;
import org.hyperledger.besu.plugin.services.storage.SegmentedKeyValueStorage;

import java.util.Optional;
Expand Down Expand Up @@ -83,7 +81,9 @@ public Optional<Bytes> getStorageValueByStorageSlotKey(
getStorageValueCounter.inc();
final Optional<Bytes> storageFound =
storage
.get(ACCOUNT_STORAGE_STORAGE, Bytes.concatenate(accountHash, storageSlotKey.getSlotHash()).toArrayUnsafe())
.get(
ACCOUNT_STORAGE_STORAGE,
Bytes.concatenate(accountHash, storageSlotKey.getSlotHash()).toArrayUnsafe())
.map(Bytes::wrap);
if (storageFound.isPresent()) {
getStorageValueFlatDatabaseCounter.inc();
Expand All @@ -95,8 +95,7 @@ public Optional<Bytes> getStorageValueByStorageSlotKey(
}

@Override
public void resetOnResync(
final SegmentedKeyValueStorage storage) {
public void resetOnResync(final SegmentedKeyValueStorage storage) {
// NOOP
// not need to reset anything in full mode
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,6 @@
import org.hyperledger.besu.metrics.BesuMetricCategory;
import org.hyperledger.besu.plugin.services.MetricsSystem;
import org.hyperledger.besu.plugin.services.metrics.Counter;
import org.hyperledger.besu.plugin.services.storage.KeyValueStorage;
import org.hyperledger.besu.plugin.services.storage.SegmentedKeyValueStorage;

import java.util.Optional;
Expand Down Expand Up @@ -87,7 +86,8 @@ public Optional<Bytes> getAccount(
final Hash accountHash,
final SegmentedKeyValueStorage storage) {
getAccountCounter.inc();
Optional<Bytes> response = storage.get(ACCOUNT_INFO_STATE, accountHash.toArrayUnsafe()).map(Bytes::wrap);
Optional<Bytes> response =
storage.get(ACCOUNT_INFO_STATE, accountHash.toArrayUnsafe()).map(Bytes::wrap);
if (response.isEmpty()) {
// after a snapsync/fastsync we only have the trie branches.
final Optional<Bytes> worldStateRootHash = worldStateRootHashSupplier.get();
Expand Down Expand Up @@ -121,7 +121,9 @@ public Optional<Bytes> getStorageValueByStorageSlotKey(
getStorageValueCounter.inc();
Optional<Bytes> response =
storage
.get(ACCOUNT_STORAGE_STORAGE, Bytes.concatenate(accountHash, storageSlotKey.getSlotHash()).toArrayUnsafe())
.get(
ACCOUNT_STORAGE_STORAGE,
Bytes.concatenate(accountHash, storageSlotKey.getSlotHash()).toArrayUnsafe())
.map(Bytes::wrap);
if (response.isEmpty()) {
final Optional<Hash> storageRoot = storageRootSupplier.get();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -186,7 +186,11 @@ private Hash calculateRootHash(
bonsaiUpdater -> {
accountTrie.commit(
(location, hash, value) ->
writeTrieNode(ACCOUNT_INFO_STATE, bonsaiUpdater.getWorldStateTransaction(), location, value));
writeTrieNode(
ACCOUNT_INFO_STATE,
bonsaiUpdater.getWorldStateTransaction(),
location,
value));
});
final Bytes32 rootHash = accountTrie.getRootHash();
return Hash.wrap(rootHash);
Expand Down Expand Up @@ -462,7 +466,8 @@ public void rollback() {
new SegmentedKeyValueStorageTransaction() {

@Override
public void put(final SegmentIdentifier segmentIdentifier, final byte[] key, final byte[] value) {
public void put(
final SegmentIdentifier segmentIdentifier, final byte[] key, final byte[] value) {
// no-op
}

Expand All @@ -485,8 +490,7 @@ public void rollback() {
@Override
public Hash frontierRootHash() {
return calculateRootHash(
Optional.of(
new BonsaiWorldStateKeyValueStorage.Updater(noOpSegmentedTx, noOpTx)),
Optional.of(new BonsaiWorldStateKeyValueStorage.Updater(noOpSegmentedTx, noOpTx)),
accumulator.copy());
}

Expand All @@ -511,8 +515,11 @@ protected Optional<Bytes> getAccountStateTrieNode(final Bytes location, final By
return worldStateStorage.getAccountStateTrieNode(location, nodeHash);
}

private void writeTrieNode(final SegmentIdentifier segmentId,
final SegmentedKeyValueStorageTransaction tx, final Bytes location, final Bytes value) {
private void writeTrieNode(
final SegmentIdentifier segmentId,
final SegmentedKeyValueStorageTransaction tx,
final Bytes location,
final Bytes value) {
tx.put(segmentId, location.toArrayUnsafe(), value.toArrayUnsafe());
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@
import org.hyperledger.besu.plugin.services.storage.KeyValueStorage;
import org.hyperledger.besu.plugin.services.storage.SegmentIdentifier;
import org.hyperledger.besu.plugin.services.storage.SegmentedKeyValueStorage;
import org.hyperledger.besu.plugin.services.storage.SnappableKeyValueStorage;

import java.io.Closeable;
import java.util.List;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -45,10 +45,12 @@ public class KeyValueStorageProvider implements StorageProvider {
public static final boolean SEGMENT_ISOLATION_SUPPORTED = true;
public static final boolean SNAPSHOT_ISOLATION_UNSUPPORTED = false;

protected final Function<List<SegmentIdentifier>, SegmentedKeyValueStorage> segmentedStorageCreator;
protected final Function<List<SegmentIdentifier>, SegmentedKeyValueStorage>
segmentedStorageCreator;
private final KeyValueStorage worldStatePreimageStorage;
private final boolean isWorldStateIterable;
protected final Map<List<SegmentIdentifier>, SegmentedKeyValueStorage> storageInstances = new HashMap<>();
protected final Map<List<SegmentIdentifier>, SegmentedKeyValueStorage> storageInstances =
new HashMap<>();
private final ObservableMetricsSystem metricsSystem;

public KeyValueStorageProvider(
Expand Down Expand Up @@ -94,11 +96,13 @@ public WorldStatePreimageStorage createWorldStatePreimageStorage() {

@Override
public KeyValueStorage getStorageBySegmentIdentifier(final SegmentIdentifier segment) {
return new SegmentedKeyValueStorageAdapter(segment, storageInstances.computeIfAbsent(List.of(segment), segmentedStorageCreator));
return new SegmentedKeyValueStorageAdapter(
segment, storageInstances.computeIfAbsent(List.of(segment), segmentedStorageCreator));
}

@Override
public SegmentedKeyValueStorage getStorageBySegmentIdentifiers(final List<SegmentIdentifier> segments) {
public SegmentedKeyValueStorage getStorageBySegmentIdentifiers(
final List<SegmentIdentifier> segments) {
return segmentedStorageCreator.apply(segments);
}

Expand All @@ -118,9 +122,10 @@ public void close() throws IOException {
} catch (final IOException e) {
LOG.atWarn()
.setMessage("Failed to close storage instance {}")
.addArgument(storage.getKey().stream()
.map(SegmentIdentifier::getName)
.collect(Collectors.joining(",")))
.addArgument(
storage.getKey().stream()
.map(SegmentIdentifier::getName)
.collect(Collectors.joining(",")))
.setCause(e)
.log();
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,8 @@ public KeyValueStorageProvider build() {
new LimitedInMemoryKeyValueStorage(DEFAULT_WORLD_STATE_PRE_IMAGE_CACHE_SIZE);

// this tickles init needed for isSegmentIsolationSupported
storageFactory.create(List.of(KeyValueSegmentIdentifier.BLOCKCHAIN), commonConfiguration, metricsSystem);
storageFactory.create(
List.of(KeyValueSegmentIdentifier.BLOCKCHAIN), commonConfiguration, metricsSystem);

return new KeyValueStorageProvider(
segments -> storageFactory.create(segments, commonConfiguration, metricsSystem),
Expand Down
Loading

0 comments on commit 74162ed

Please sign in to comment.