Skip to content

Commit

Permalink
lint fixes
Browse files Browse the repository at this point in the history
  • Loading branch information
oliverbundalo committed Feb 20, 2024
1 parent 45320fb commit 9db3b89
Show file tree
Hide file tree
Showing 10 changed files with 68 additions and 35 deletions.
16 changes: 13 additions & 3 deletions blockchain/storage/leveldb/leveldb_perf_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,7 @@ func randStringBytes(n int) string {
for i := range b {
b[i] = letterBytes[rand.Intn(len(letterBytes))]
}

return string(b)
}

Expand Down Expand Up @@ -128,6 +129,7 @@ func dbSize(t *testing.T, path string) int64 {
if err != nil {
t.Fail()
}

if info != nil && !info.IsDir() && strings.Contains(info.Name(), ".ldb") {
size += info.Size()
}
Expand All @@ -145,8 +147,10 @@ func updateBlock(t *testing.T, num uint64, b *types.FullBlock) *types.FullBlock
t.Helper()

var addr types.Address

b.Block.Header.Number = num
b.Block.Header.ParentHash = types.StringToHash(randStringBytes(12))

for i := range b.Block.Transactions {
addr = types.StringToAddress(randStringBytes(8))
b.Block.Transactions[i].SetTo(&addr)
Expand All @@ -170,6 +174,7 @@ func prepareBatch(t *testing.T, s storage.Storage, b *types.FullBlock) *storage.
// GidLid 'sorted'
batchWriter.PutHeadHash(b.Block.Header.Hash)
batchWriter.PutHeadNumber(b.Block.Number())

for _, tx := range b.Block.Transactions {
batchWriter.PutTxLookup(tx.Hash(), b.Block.Hash())
}
Expand All @@ -187,24 +192,27 @@ func TestWriteBlockPerf(t *testing.T) {
s, _, path := openStorage(t, "/tmp/leveldbV1-test")
defer s.Close()

count := 10000
var watchTime int

count := 10000
b := createBlock(t)
var watchTime int

for i := 1; i <= count; i++ {
updateBlock(t, uint64(i), b)
batchWriter := prepareBatch(t, s, b)

watch := stopwatch.Start()

if err := batchWriter.WriteBatch(); err != nil {
require.NoError(t, err)
}

watch.Stop()
watchTime = watchTime + int(watch.Milliseconds())
}

time.Sleep(time.Second)

size := dbSize(t, path)
t.Logf("\tdb size %d MB", size/(1024*1024))
t.Logf("\ttotal WriteBatch %d ms", watchTime)
Expand All @@ -214,8 +222,9 @@ func TestReadBlockPerf(t *testing.T) {
s, _, _ := openStorage(t, "/tmp/leveldbV1-test")
defer s.Close()

count := 1000
var watchTime int

count := 1000
for i := 1; i <= count; i++ {
n := uint64(1 + rand.Intn(10000))

Expand All @@ -224,6 +233,7 @@ func TestReadBlockPerf(t *testing.T) {
_, err2 := s.ReadBody(h)
_, err3 := s.ReadHeader(h)
_, err4 := s.ReadReceipts(h)

watch.Stop()
watchTime = watchTime + int(watch.Milliseconds())

Expand Down
File renamed without changes.
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
package leveldb

import (
"github.com/0xPolygon/polygon-edge/blockchain/storageV2"
"github.com/0xPolygon/polygon-edge/blockchain/storagev2"
"github.com/hashicorp/go-hclog"
"github.com/syndtr/goleveldb/leveldb"
"github.com/syndtr/goleveldb/leveldb/opt"
Expand All @@ -14,28 +14,29 @@ type levelDB struct {

// DB key = k + mapper
var tableMapper = map[uint8][]byte{
storageV2.BODY: []byte("b"), // DB key = block number + mapper
storageV2.CANONICAL: []byte("c"), // DB key = block number + mapper
storageV2.DIFFICULTY: []byte("d"), // DB key = block number + mapper
storageV2.HEADER: []byte("h"), // DB key = block number + mapper
storageV2.RECEIPTS: []byte("r"), // DB key = block number + mapper
storageV2.FORK: []byte("0000000f"), // DB key = empty + mapper
storageV2.HEAD_HASH: []byte("0000000h"), // DB key = empty + mapper
storageV2.HEAD_NUMBER: []byte("0000000n"), // DB key = empty + mapper
storageV2.BLOCK_LOOKUP: {}, // DB key = block hash + mapper, value = block number
storageV2.TX_LOOKUP: {}, // DB key = tx hash + mapper, value = block number
storagev2.BODY: []byte("b"), // DB key = block number + mapper
storagev2.CANONICAL: []byte("c"), // DB key = block number + mapper
storagev2.DIFFICULTY: []byte("d"), // DB key = block number + mapper
storagev2.HEADER: []byte("h"), // DB key = block number + mapper
storagev2.RECEIPTS: []byte("r"), // DB key = block number + mapper
storagev2.FORK: []byte("0000000f"), // DB key = empty + mapper
storagev2.HEAD_HASH: []byte("0000000h"), // DB key = empty + mapper
storagev2.HEAD_NUMBER: []byte("0000000n"), // DB key = empty + mapper
storagev2.BLOCK_LOOKUP: {}, // DB key = block hash + mapper, value = block number
storagev2.TX_LOOKUP: {}, // DB key = tx hash + mapper, value = block number
}

// NewLevelDBStorage creates the new storage reference with leveldb default options
func NewLevelDBStorage(path string, logger hclog.Logger) (*storageV2.Storage, error) {
var ldbs [2]storageV2.Database
func NewLevelDBStorage(path string, logger hclog.Logger) (*storagev2.Storage, error) {
var ldbs [2]storagev2.Database

// Open LevelDB storage
// Set default options
options := &opt.Options{
BlockCacheCapacity: 64 * opt.MiB,
WriteBuffer: 128 * opt.MiB, // Two of these are used internally
}

db, err := openLevelDBStorage(path, options)
if err != nil {
return nil, err
Expand All @@ -48,14 +49,16 @@ func NewLevelDBStorage(path string, logger hclog.Logger) (*storageV2.Storage, er
WriteBuffer: opt.DefaultWriteBuffer,
}
path = path + "/gidlid"

gidlid, err := openLevelDBStorage(path, options)
if err != nil {
return nil, err
}

ldbs[0] = &levelDB{db}
ldbs[1] = &levelDB{gidlid}
return storageV2.Open(logger.Named("leveldb"), ldbs)

return storagev2.Open(logger.Named("leveldb"), ldbs)
}

func openLevelDBStorage(path string, options *opt.Options) (*leveldb.DB, error) {
Expand All @@ -71,6 +74,7 @@ func openLevelDBStorage(path string, options *opt.Options) (*leveldb.DB, error)
func (l *levelDB) Get(t uint8, k []byte) ([]byte, error) {
mc := tableMapper[t]
k = append(k, mc...)

data, err := l.db.Get(k, nil)
if err != nil {
return nil, err
Expand All @@ -85,6 +89,6 @@ func (l *levelDB) Close() error {
}

// NewBatch creates batch for database write operations
func (l *levelDB) NewBatch() storageV2.Batch {
func (l *levelDB) NewBatch() storagev2.Batch {
return newBatchLevelDB(l.db)
}
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ import (
"time"

"github.com/0xPolygon/polygon-edge/blockchain"
"github.com/0xPolygon/polygon-edge/blockchain/storageV2"
"github.com/0xPolygon/polygon-edge/blockchain/storagev2"
"github.com/0xPolygon/polygon-edge/types"
"github.com/bradhe/stopwatch"
"github.com/hashicorp/go-hclog"
Expand Down Expand Up @@ -48,6 +48,7 @@ func randStringBytes(n int) string {
for i := range b {
b[i] = letterBytes[rand.Intn(len(letterBytes))]
}

return string(b)
}

Expand Down Expand Up @@ -101,7 +102,7 @@ func createBlock(t *testing.T) *types.FullBlock {
return b
}

func openStorage(t *testing.T, p string) (*storageV2.Storage, func(), string) {
func openStorage(t *testing.T, p string) (*storagev2.Storage, func(), string) {
t.Helper()

s, err := NewLevelDBStorage(p, hclog.NewNullLogger())
Expand Down Expand Up @@ -129,6 +130,7 @@ func dbSize(t *testing.T, path string) int64 {
if err != nil {
t.Fail()
}

if info != nil && !info.IsDir() && strings.Contains(info.Name(), ".ldb") {
size += info.Size()
}
Expand All @@ -146,8 +148,10 @@ func updateBlock(t *testing.T, num uint64, b *types.FullBlock) *types.FullBlock
t.Helper()

var addr types.Address

b.Block.Header.Number = num
b.Block.Header.ParentHash = types.StringToHash(randStringBytes(12))

for i := range b.Block.Transactions {
addr = types.StringToAddress(randStringBytes(8))
b.Block.Transactions[i].SetTo(&addr)
Expand All @@ -163,7 +167,7 @@ func updateBlock(t *testing.T, num uint64, b *types.FullBlock) *types.FullBlock
return b
}

func prepareBatch(t *testing.T, s *storageV2.Storage, b *types.FullBlock) *storageV2.Writer {
func prepareBatch(t *testing.T, s *storagev2.Storage, b *types.FullBlock) *storagev2.Writer {
t.Helper()

batchWriter := s.NewWriter()
Expand All @@ -172,6 +176,7 @@ func prepareBatch(t *testing.T, s *storageV2.Storage, b *types.FullBlock) *stora
batchWriter.PutHeadHash(b.Block.Header.Hash)
batchWriter.PutHeadNumber(b.Block.Number())
batchWriter.PutBlockLookup(b.Block.Hash(), b.Block.Number())

for _, tx := range b.Block.Transactions {
batchWriter.PutTxLookup(tx.Hash(), b.Block.Number())
}
Expand All @@ -189,24 +194,27 @@ func TestWriteBlockPerf(t *testing.T) {
s, _, path := openStorage(t, "/tmp/leveldbV2-test")
defer s.Close()

count := 10000
var watchTime int

count := 10000
b := createBlock(t)
var watchTime int

for i := 1; i <= count; i++ {
updateBlock(t, uint64(i), b)
batchWriter := prepareBatch(t, s, b)

watch := stopwatch.Start()

if err := batchWriter.WriteBatch(); err != nil {
require.NoError(t, err)
}

watch.Stop()
watchTime = watchTime + int(watch.Milliseconds())
}

time.Sleep(time.Second)

size := dbSize(t, path)
t.Logf("\tdb size %d MB", size/(1024*1024))
t.Logf("\ttotal WriteBatch %d ms", watchTime)
Expand All @@ -216,8 +224,9 @@ func TestReadBlockPerf(t *testing.T) {
s, _, _ := openStorage(t, "/tmp/leveldbV2-test")
defer s.Close()

count := 1000
var watchTime int

count := 1000
for i := 1; i <= count; i++ {
n := uint64(1 + rand.Intn(10000))

Expand All @@ -227,12 +236,14 @@ func TestReadBlockPerf(t *testing.T) {
_, err3 := s.ReadHeader(n)
_, err4 := s.ReadReceipts(n)
b, err5 := s.ReadBlockLookup(h)

watch.Stop()
watchTime = watchTime + int(watch.Milliseconds())

if err1 != nil || err2 != nil || err3 != nil || err4 != nil || err5 != nil {
t.Logf("\terror")
}

assert.Equal(t, n, b)
}
t.Logf("\ttotal read %d ms", watchTime)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,13 +12,13 @@ import (
"time"

"github.com/0xPolygon/polygon-edge/blockchain"
"github.com/0xPolygon/polygon-edge/blockchain/storageV2"
"github.com/0xPolygon/polygon-edge/blockchain/storagev2"
"github.com/0xPolygon/polygon-edge/types"
"github.com/hashicorp/go-hclog"
"github.com/stretchr/testify/require"
)

func newStorage(t *testing.T) (*storageV2.Storage, func()) {
func newStorage(t *testing.T) (*storagev2.Storage, func()) {
t.Helper()

path, err := os.MkdirTemp("/tmp", "minimal_storage")
Expand All @@ -45,7 +45,7 @@ func newStorage(t *testing.T) (*storageV2.Storage, func()) {
}

func TestStorage(t *testing.T) {
storageV2.TestStorage(t, newStorage)
storagev2.TestStorage(t, newStorage)
}

func generateTxs(t *testing.T, startNonce, count int, from types.Address, to *types.Address) []*types.Transaction {
Expand Down Expand Up @@ -128,7 +128,7 @@ func generateBlock(t *testing.T, num uint64) *types.FullBlock {
return b
}

func newStorageP(t *testing.T) (*storageV2.Storage, func(), string) {
func newStorageP(t *testing.T) (*storagev2.Storage, func(), string) {
t.Helper()

p, err := os.MkdirTemp("", "leveldbV2-test")
Expand Down Expand Up @@ -191,6 +191,7 @@ func dirSize(t *testing.T, path string) int64 {
if err != nil {
t.Fail()
}

if !info.IsDir() {
size += info.Size()
}
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
package storageV2
//nolint:stylecheck
package storagev2

import (
"github.com/hashicorp/go-hclog"
Expand Down Expand Up @@ -50,8 +51,7 @@ const (
GIDLID_INDEX = uint8(1)
)

// Empty key
var EMPTY = []byte{}
var EMPTY = []byte{} // Empty key

func Open(logger hclog.Logger, db [2]Database) (*Storage, error) {
return &Storage{logger: logger, db: db}, nil
Expand All @@ -68,15 +68,18 @@ func (s *Storage) Close() error {
s.db[i] = nil
}
}

return nil
}

func (s *Storage) NewWriter() *Writer {
var batch [2]Batch
batch[0] = s.db[0].NewBatch()

if s.db[1] != nil {
batch[1] = s.db[1].NewBatch()
}

return &Writer{batch: batch}
}

Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
//nolint:stylecheck
package storageV2
package storagev2

import (
"errors"
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
package storageV2
package storagev2

import (
"math/big"
Expand Down
Loading

0 comments on commit 9db3b89

Please sign in to comment.