forked from decred/dcrdata
-
Notifications
You must be signed in to change notification settings - Fork 0
/
main.go
1197 lines (1051 loc) · 42.4 KB
/
main.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
// Copyright (c) 2018-2019, The Decred developers
// Copyright (c) 2017, Jonathan Chappelow
// See LICENSE for details.
package main
import (
"context"
"database/sql"
"fmt"
"math"
"net"
"net/http"
_ "net/http/pprof"
"os"
"path/filepath"
"runtime"
"runtime/pprof"
"strings"
"sync"
"time"
"github.com/decred/dcrd/chaincfg/chainhash"
"github.com/decred/dcrd/rpcclient"
"github.com/decred/dcrdata/v4/api"
"github.com/decred/dcrdata/v4/api/insight"
"github.com/decred/dcrdata/v4/blockdata"
"github.com/decred/dcrdata/v4/db/agendadb"
"github.com/decred/dcrdata/v4/db/dbtypes"
"github.com/decred/dcrdata/v4/db/dcrpg"
"github.com/decred/dcrdata/v4/db/dcrsqlite"
"github.com/decred/dcrdata/v4/exchanges"
"github.com/decred/dcrdata/v4/explorer"
exptypes "github.com/decred/dcrdata/v4/explorer/types"
"github.com/decred/dcrdata/v4/mempool"
m "github.com/decred/dcrdata/v4/middleware"
notify "github.com/decred/dcrdata/v4/notification"
"github.com/decred/dcrdata/v4/pubsub"
pstypes "github.com/decred/dcrdata/v4/pubsub/types"
"github.com/decred/dcrdata/v4/rpcutils"
"github.com/decred/dcrdata/v4/semver"
"github.com/decred/dcrdata/v4/txhelpers"
"github.com/decred/dcrdata/v4/version"
"github.com/go-chi/chi"
"github.com/google/gops/agent"
)
func main() {
// Create a context that is cancelled when a shutdown request is received
// via requestShutdown.
ctx := withShutdownCancel(context.Background())
// Listen for both interrupt signals and shutdown requests.
go shutdownListener()
if err := _main(ctx); err != nil {
if logRotator != nil {
log.Error(err)
}
os.Exit(1)
}
os.Exit(0)
}
// _main does all the work. Deferred functions do not run after os.Exit(), so
// main wraps this function, which returns a code.
func _main(ctx context.Context) error {
// Parse the configuration file, and setup logger.
cfg, err := loadConfig()
if err != nil {
fmt.Printf("Failed to load dcrdata config: %s\n", err.Error())
return err
}
defer func() {
if logRotator != nil {
logRotator.Close()
}
}()
if cfg.CPUProfile != "" {
var f *os.File
f, err = os.Create(cfg.CPUProfile)
if err != nil {
return err
}
pprof.StartCPUProfile(f)
defer pprof.StopCPUProfile()
}
if cfg.UseGops {
// Start gops diagnostic agent, with shutdown cleanup.
if err = agent.Listen(agent.Options{}); err != nil {
return err
}
defer agent.Close()
}
// Display app version.
log.Infof("%s version %v (Go version %s)", version.AppName,
version.Version(), runtime.Version())
// PostgreSQL backend is enabled via FullMode config option (--pg switch).
usePG := cfg.FullMode
if usePG {
log.Info(`Running in full-functionality mode with PostgreSQL backend enabled.`)
} else {
log.Info(`Running in "Lite" mode with only SQLite backend and limited functionality.`)
}
// Setup the notification handlers.
notify.MakeNtfnChans(usePG)
// Connect to dcrd RPC server using a websocket.
ntfnHandlers, collectionQueue := notify.MakeNodeNtfnHandlers()
dcrdClient, nodeVer, err := connectNodeRPC(cfg, ntfnHandlers)
if err != nil || dcrdClient == nil {
return fmt.Errorf("Connection to dcrd failed: %v", err)
}
defer func() {
// The individial hander's loops should close the notifications channels
// on quit, but do it here too to be sure.
notify.CloseNtfnChans()
if dcrdClient != nil {
log.Infof("Closing connection to dcrd.")
dcrdClient.Shutdown()
}
log.Infof("Bye!")
time.Sleep(250 * time.Millisecond)
}()
// Display connected network (e.g. mainnet, testnet, simnet).
curnet, err := dcrdClient.GetCurrentNet()
if err != nil {
return fmt.Errorf("Unable to get current network from dcrd: %v", err)
}
log.Infof("Connected to dcrd (JSON-RPC API v%s) on %v",
nodeVer.String(), curnet.String())
if curnet != activeNet.Net {
log.Criticalf("Network of connected node, %s, does not match expected "+
"network, %s.", activeNet.Net, curnet)
return fmt.Errorf("expected network %s, got %s", activeNet.Net, curnet)
}
// SQLite output
dbPath := filepath.Join(cfg.DataDir, cfg.DBFileName)
dbInfo := dcrsqlite.DBInfo{FileName: dbPath}
baseDB, cleanupDB, err := dcrsqlite.InitWiredDB(&dbInfo,
notify.NtfnChans.UpdateStatusDBHeight, dcrdClient, activeChain, cfg.DataDir)
defer cleanupDB()
if err != nil {
return fmt.Errorf("Unable to initialize SQLite database: %v", err)
}
log.Infof("SQLite DB successfully opened: %s", cfg.DBFileName)
defer baseDB.Close()
if err = baseDB.ReportHeights(); err != nil {
return fmt.Errorf("Possible SQLite corruption: %v", err)
}
// Auxiliary DB (currently PostgreSQL)
var auxDB *dcrpg.ChainDBRPC
var newPGIndexes, updateAllAddresses, updateAllVotes bool
if usePG {
pgHost, pgPort := cfg.PGHost, ""
if !strings.HasPrefix(pgHost, "/") {
pgHost, pgPort, err = net.SplitHostPort(cfg.PGHost)
if err != nil {
return fmt.Errorf("SplitHostPort failed: %v", err)
}
}
dbi := dcrpg.DBInfo{
Host: pgHost,
Port: pgPort,
User: cfg.PGUser,
Pass: cfg.PGPass,
DBName: cfg.PGDBName,
QueryTimeout: cfg.PGQueryTimeout,
}
// If using {netname} then replace it with netName(activeNet).
dbi.DBName = strings.Replace(dbi.DBName, "{netname}", netName(activeNet), -1)
chainDB, err := dcrpg.NewChainDBWithCancel(ctx, &dbi, activeChain,
baseDB.GetStakeDB(), !cfg.NoDevPrefetch)
if chainDB != nil {
defer chainDB.Close()
}
if err != nil {
return err
}
auxDB, err = dcrpg.NewChainDBRPC(chainDB, dcrdClient)
if err != nil {
return err
}
if err = auxDB.VersionCheck(dcrdClient); err != nil {
return err
}
var idxExists bool
idxExists, err = auxDB.ExistsIndexVinOnVins()
if !idxExists || err != nil {
newPGIndexes = true
log.Infof("Indexes not found. Forcing new index creation.")
}
idxExists, err = auxDB.ExistsIndexAddressesVoutIDAddress()
if !idxExists || err != nil {
updateAllAddresses = true
}
}
// Heights gets the current height of each DB, the minimum of the DB heights
// (dbHeight), and the chain server height.
Heights := func() (dbHeight, nodeHeight, baseDBHeight, auxDBHeight int64, err error) {
_, nodeHeight, err = dcrdClient.GetBestBlock()
if err != nil {
err = fmt.Errorf("unable to get block from node: %v", err)
return
}
baseDBHeight, err = baseDB.GetHeight()
if err != nil {
if err != sql.ErrNoRows {
log.Errorf("baseDB.GetHeight failed: %v", err)
return
}
// err == sql.ErrNoRows is not an error
err = nil
log.Infof("baseDB block summary table is empty.")
}
log.Debugf("baseDB height: %d", baseDBHeight)
dbHeight = baseDBHeight
if usePG {
auxDBHeight, err = auxDB.GetHeight()
if err != nil {
if err != sql.ErrNoRows {
log.Errorf("auxDB.GetHeight failed: %v", err)
return
}
// err == sql.ErrNoRows is not an error
err = nil
log.Infof("auxDB block summary table is empty.")
}
log.Debugf("auxDB height: %d", auxDBHeight)
if baseDBHeight > auxDBHeight {
dbHeight = auxDBHeight
}
}
return
}
// Optionally purge best blocks according to config.
if cfg.PurgeNBestBlocks > 0 {
// The number of blocks to purge for each DB is computed so that the DBs
// will end on the same height.
_, _, baseDBHeight, auxDBHeight, err := Heights()
if err != nil {
return fmt.Errorf("Heights failed: %v", err)
}
// Determine the largest DB height.
maxHeight := baseDBHeight
if usePG && auxDBHeight > maxHeight {
maxHeight = auxDBHeight
}
// The final best block after purge.
purgeToBlock := maxHeight - int64(cfg.PurgeNBestBlocks)
// Purge from SQLite, using either the "blocks above" or "N best main
// chain" approach.
var heightDB, nRemovedSummary int64
if cfg.FastSQLitePurge {
log.Infof("Purging SQLite data for the blocks above %d...",
purgeToBlock)
nRemovedSummary, _, err = baseDB.PurgeBlocksAboveHeight(purgeToBlock)
if err != nil && err != sql.ErrNoRows {
return fmt.Errorf("failed to purge to block %d from SQLite: %v",
purgeToBlock, err)
}
heightDB = purgeToBlock
} else {
// Purge NBase blocks from base DB.
NBase := baseDBHeight - purgeToBlock
log.Infof("Purging SQLite data for the %d best blocks...", NBase)
nRemovedSummary, _, heightDB, _, err = baseDB.PurgeBestBlocks(NBase)
if err != nil && err != sql.ErrNoRows {
return fmt.Errorf("failed to purge %d blocks from SQLite: %v",
NBase, err)
}
}
// The number of rows removed from the summary table and stake table may
// be different if the DB was corrupted, but it is not important to log
// for the tables separately.
log.Infof("Sucessfully purged data for %d blocks from SQLite "+
"(new height = %d).", nRemovedSummary, heightDB)
if usePG {
// Purge NAux blocks from auxiliary DB.
NAux := auxDBHeight - purgeToBlock
log.Infof("Purging PostgreSQL data for the %d best blocks...", NAux)
s, heightDB, err := auxDB.PurgeBestBlocks(NAux)
if err != nil && err != sql.ErrNoRows {
return fmt.Errorf("Failed to purge %d blocks from PostgreSQL: %v",
NAux, err)
}
if s != nil {
log.Infof("Sucessfully purged data for %d blocks from PostgreSQL "+
"(new height = %d):\n%v", s.Blocks, heightDB, s)
} // otherwise likely err == sql.ErrNoRows
}
}
// When in lite mode, baseDB should get blocks without having to coordinate
// with auxDB. Setting fetchToHeight to a large number allows this.
var fetchToHeight = int64(math.MaxInt32)
if usePG {
// Get the last block added to the aux DB.
var heightDB uint64
heightDB, err = auxDB.HeightDB()
lastBlockPG := int64(heightDB)
// If the tables are empty, heightDB will still be zero, so we check for
// sql.ErrNoRows to recognize this case and set lastBlockPG to -1.
if err != nil {
if err != sql.ErrNoRows {
return fmt.Errorf("Unable to get height from PostgreSQL DB: %v", err)
}
// lastBlockPG of 0 implies genesis is already processed.
lastBlockPG = -1
}
// Allow WiredDB/stakedb to catch up to the auxDB, but after
// fetchToHeight, WiredDB must receive block signals from auxDB, and
// stakedb must send connect signals to auxDB.
fetchToHeight = lastBlockPG + 1
// Aux DB height and stakedb height must be equal. StakeDatabase will
// catch up automatically if it is behind, but we must rewind it here if
// it is ahead of auxDB. For auxDB to receive notification from
// StakeDatabase when the required blocks are connected, the
// StakeDatabase must be at the same height or lower than auxDB.
stakedbHeight := int64(baseDB.GetStakeDB().Height())
if stakedbHeight > int64(heightDB) {
// Have baseDB rewind it's the StakeDatabase. stakedbHeight is
// always rewound to a height of zero even when lastBlockPG is -1,
// hence we rewind to heightDB.
log.Infof("Rewinding StakeDatabase from block %d to %d.",
stakedbHeight, heightDB)
stakedbHeight, err = baseDB.RewindStakeDB(ctx, int64(heightDB))
if err != nil {
return fmt.Errorf("RewindStakeDB failed: %v", err)
}
// Verify that the StakeDatabase is at the intended height.
if stakedbHeight != int64(heightDB) {
return fmt.Errorf("failed to rewind stakedb: got %d, expecting %d",
stakedbHeight, heightDB)
}
}
// TODO: just use getblockchaininfo to see if it still syncing and what
// height the network's best block is at.
blockHash, nodeHeight, err := dcrdClient.GetBestBlock()
if err != nil {
return fmt.Errorf("Unable to get block from node: %v", err)
}
block, err := dcrdClient.GetBlockHeader(blockHash)
if err != nil {
return fmt.Errorf("unable to fetch the block from the node: %v", err)
}
// bestBlockAge is the time since the dcrd best block was mined.
bestBlockAge := time.Since(block.Timestamp).Minutes()
// Since mining a block take approximately ChainParams.TargetTimePerBlock then the
// expected height of the best block from dcrd now should be this.
expectedHeight := int64(bestBlockAge/float64(activeChain.TargetTimePerBlock)) + nodeHeight
// Estimate how far auxDB is behind the node.
blocksBehind := expectedHeight - lastBlockPG
if blocksBehind < 0 {
return fmt.Errorf("Node is still syncing. Node height = %d, "+
"DB height = %d", expectedHeight, heightDB)
}
// (TODO@chappjc) The following overrides of updateAllAddresses and
// newPGIndexes are COMMENTED OUT until the utxoStore/utxoCache can be
// pre-charged. This is because without indexes and without a
// well-populated utxo cache, the query the value and corresponding
// address for a UTXO is far too slow. In an initial sync, the utxo
// cache is effective as it gets filled from the start of the chain, but
// when starting at an arbitrary block the cache is empty. But the
// queries to look up the data on a cache miss rely on the indexes for
// acceptable performance.
//
// if blocksBehind > 7500 {
// log.Warnf("Setting PSQL sync to rebuild address table after large "+
// "import (%d blocks).", blocksBehind)
// updateAllAddresses = true
// if blocksBehind > 40000 {
// log.Warnf("Setting PSQL sync to drop indexes prior to bulk data "+
// "import (%d blocks).", blocksBehind)
// newPGIndexes = true
// }
// }
// PG gets winning tickets out of baseDB's pool info cache, so it must
// be big enough to hold the needed blocks' info, and charged with the
// data from disk. The cache is updated on each block connect.
tpcSize := int(blocksBehind) + 200
log.Debugf("Setting ticket pool cache capacity to %d blocks", tpcSize)
err = baseDB.GetStakeDB().SetPoolCacheCapacity(tpcSize)
if err != nil {
return err
}
// Charge stakedb pool info cache, including previous PG blocks, up to
// best in sqlite.
if err = baseDB.ChargePoolInfoCache(int64(heightDB) - 2); err != nil {
return fmt.Errorf("Failed to charge pool info cache: %v", err)
}
}
// Set the path to the AgendaDB file.
agendadb.SetDbPath(filepath.Join(cfg.DataDir, cfg.AgendaDBFileName))
// AgendaDB upgrade check
if err = agendadb.CheckForUpdates(dcrdClient); err != nil {
return fmt.Errorf("agendadb upgrade failed: %v", err)
}
// Block data collector. Needs a StakeDatabase too.
collector := blockdata.NewCollector(dcrdClient, activeChain, baseDB.GetStakeDB())
if collector == nil {
return fmt.Errorf("Failed to create block data collector")
}
// Build a slice of each required saver type for each data source.
var blockDataSavers []blockdata.BlockDataSaver
var mempoolSavers []mempool.MempoolDataSaver
if usePG {
blockDataSavers = append(blockDataSavers, auxDB)
}
blockDataSavers = append(blockDataSavers, baseDB)
mempoolSavers = append(mempoolSavers, baseDB.MPC)
// Allow Ctrl-C to halt startup here.
if shutdownRequested(ctx) {
return nil
}
// WaitGroup for monitoring goroutines
var wg sync.WaitGroup
// ExchangeBot
var xcBot *exchanges.ExchangeBot
if cfg.EnableExchangeBot {
var botCfg exchanges.ExchangeBotConfig
if cfg.DisabledExchanges != "" {
botCfg.Disabled = strings.Split(cfg.DisabledExchanges, ",")
}
botCfg.BtcIndex = cfg.ExchangeCurrency
xcBot, err = exchanges.NewExchangeBot(&botCfg)
if err != nil {
log.Errorf("Could not create exchange monitor. Exchange info will be disabled: %v", err)
} else {
var xcList, prepend string
for k := range xcBot.Exchanges {
xcList += prepend + k
prepend = ", "
}
log.Infof("ExchangeBot monitoring %s", xcList)
wg.Add(1)
go xcBot.Start(ctx, &wg)
}
}
// Create the explorer system.
explore := explorer.New(baseDB, auxDB, cfg.UseRealIP, version.Version(),
!cfg.NoDevPrefetch, "views", xcBot) // TODO: allow views config
if explore == nil {
return fmt.Errorf("failed to create new explorer (templates missing?)")
}
explore.UseSIGToReloadTemplates()
defer explore.StopWebsocketHub()
blockDataSavers = append(blockDataSavers, explore)
mempoolSavers = append(mempoolSavers, explore)
// Create the pub sub hub.
psHub := pubsub.NewPubSubHub(baseDB, auxDB)
if psHub == nil {
return fmt.Errorf("failed to create new PubSubHub")
}
defer psHub.StopWebsocketHub()
blockDataSavers = append(blockDataSavers, psHub)
mempoolSavers = append(mempoolSavers, psHub) // individial transactions are from mempool monitor
// Prepare for sync by setting up the channels for status/progress updates
// (barLoad) or full explorer page updates (latestBlockHash).
// barLoad is used to send sync status updates to websocket clients (e.g.
// browsers with the status page opened) via the goroutines launched by
// BeginSyncStatusUpdates.
var barLoad chan *dbtypes.ProgressBarLoad
// latestBlockHash communicates the hash of block most recently processed
// during synchronization. This is done if all of the explorer pages (not
// just the status page) are to be served during sync.
var latestBlockHash chan *chainhash.Hash
// Display the blockchain syncing status page if the number of blocks behind
// the node's best block height are more than the set limit. The sync status
// page should also be displayed when updateAllAddresses and newPGIndexes
// are true, indicating maintenance or an initial sync.
dbHeight, nodeHeight, _, _, err := Heights()
if err != nil {
return fmt.Errorf("Heights failed: %v", err)
}
blocksBehind := nodeHeight - dbHeight
log.Debugf("dbHeight: %d / blocksBehind: %d", dbHeight, blocksBehind)
displaySyncStatusPage := blocksBehind > int64(cfg.SyncStatusLimit) || // over limit
updateAllAddresses || newPGIndexes // maintenance or initial sync
// Initiate the sync status monitor and the coordinating goroutines if the
// sync status is activated, otherwise coordinate updating the full set of
// explorer pages.
if displaySyncStatusPage {
// Start goroutines that keep the update the shared progress bar data,
// and signal the websocket hub to send progress updates to clients.
barLoad = make(chan *dbtypes.ProgressBarLoad, 2)
explore.BeginSyncStatusUpdates(barLoad)
} else {
// Start a goroutine to update the explorer pages when the DB sync
// functions send a new block hash on the following channel.
latestBlockHash = make(chan *chainhash.Hash, 2)
// The BlockConnected handler should not be started until after sync.
go func() {
// Keep receiving updates until the channel is closed, or a nil Hash
// pointer received.
for hash := range latestBlockHash {
if hash == nil {
return
}
// Fetch the blockdata by block hash.
d, msgBlock, err := collector.CollectHash(hash)
if err != nil {
log.Warnf("failed to fetch blockdata for (%s) hash. error: %v",
hash.String(), err)
continue
}
// Store the blockdata for the explorer pages.
if err = explore.Store(d, msgBlock); err != nil {
log.Warnf("failed to store (%s) hash's blockdata for the explorer pages error: %v",
hash.String(), err)
}
}
}()
// Before starting the DB sync, trigger the explorer to display data for
// the current best block.
// Retrieve the hash of the best block across every DB.
latestDBBlockHash, err := dcrdClient.GetBlockHash(dbHeight)
if err != nil {
return fmt.Errorf("failed to fetch the block at height (%d): %v",
dbHeight, err)
}
// Signal to load this block's data into the explorer. Future signals
// will come from the sync methods of either baseDB or auxDB.
latestBlockHash <- latestDBBlockHash
}
// Create the Insight socket.io server, and add it to block savers if in
// full/pg mode. Since insightSocketServer is added into the url before even
// the sync starts, this implementation cannot be moved to
// initiateHandlersAndCollectBlocks function.
var insightSocketServer *insight.SocketServer
if usePG {
insightSocketServer, err = insight.NewSocketServer(notify.NtfnChans.InsightNewTxChan, activeChain)
if err != nil {
return fmt.Errorf("Could not create Insight socket.io server: %v", err)
}
blockDataSavers = append(blockDataSavers, insightSocketServer)
}
// Start dcrdata's JSON web API.
app := api.NewContext(dcrdClient, activeChain, baseDB, auxDB, cfg.IndentJSON, xcBot)
// Start the notification hander for keeping /status up-to-date.
wg.Add(1)
go app.StatusNtfnHandler(ctx, &wg)
// Initial setting of DBHeight. Subsequently, Store() will send this.
if dbHeight >= 0 {
// Do not sent 4294967295 = uint32(-1) if there are no blocks.
notify.NtfnChans.UpdateStatusDBHeight <- uint32(dbHeight)
}
// Configure the URL path to http handler router for the API.
apiMux := api.NewAPIRouter(app, cfg.UseRealIP)
// File downloads piggy-back on the API.
fileMux := api.NewFileRouter(app, cfg.UseRealIP)
// Configure the explorer web pages router.
webMux := chi.NewRouter()
webMux.With(explore.SyncStatusPageIntercept).Group(func(r chi.Router) {
r.Get("/", explore.Home)
r.Get("/nexthome", explore.NextHome)
})
webMux.Get("/ws", explore.RootWebsocket)
webMux.Get("/ps", psHub.WebSocketHandler)
webMux.Get("/favicon.ico", func(w http.ResponseWriter, r *http.Request) {
http.ServeFile(w, r, "./public/images/favicon.ico")
})
cacheControlMaxAge := int64(cfg.CacheControlMaxAge)
FileServer(webMux, "/js", http.Dir("./public/js"), cacheControlMaxAge)
FileServer(webMux, "/css", http.Dir("./public/css"), cacheControlMaxAge)
FileServer(webMux, "/fonts", http.Dir("./public/fonts"), cacheControlMaxAge)
FileServer(webMux, "/images", http.Dir("./public/images"), cacheControlMaxAge)
FileServer(webMux, "/dist", http.Dir("./public/dist"), cacheControlMaxAge)
// SyncStatusAPIIntercept returns a json response if the sync status page is
// enabled (no the full explorer while syncing).
webMux.With(explore.SyncStatusAPIIntercept).Group(func(r chi.Router) {
// Mount the dcrdata's REST API.
r.Mount("/api", apiMux.Mux)
// Setup and mount the Insight API.
if usePG {
insightApp := insight.NewInsightContext(dcrdClient, auxDB,
activeChain, baseDB, cfg.IndentJSON)
insightMux := insight.NewInsightApiRouter(insightApp, cfg.UseRealIP)
r.Mount("/insight/api", insightMux.Mux)
if insightSocketServer != nil {
r.Get("/insight/socket.io/", insightSocketServer.ServeHTTP)
}
}
})
// HTTP Error 503 StatusServiceUnavailable for file requests before sync.
webMux.With(explore.SyncStatusFileIntercept).Group(func(r chi.Router) {
r.Mount("/download", fileMux.Mux)
})
webMux.With(explore.SyncStatusPageIntercept).Group(func(r chi.Router) {
r.NotFound(explore.NotFound)
r.Mount("/explorer", explore.Mux)
r.Get("/days", explore.DayBlocksListing)
r.Get("/weeks", explore.WeekBlocksListing)
r.Get("/months", explore.MonthBlocksListing)
r.Get("/years", explore.YearBlocksListing)
r.Get("/blocks", explore.Blocks)
r.Get("/ticketpricewindows", explore.StakeDiffWindows)
r.Get("/side", explore.SideChains)
r.Get("/rejects", func(w http.ResponseWriter, r *http.Request) {
http.Redirect(w, r, "/disapproved", http.StatusPermanentRedirect)
})
r.Get("/disapproved", explore.DisapprovedBlocks)
r.Get("/mempool", explore.Mempool)
r.Get("/parameters", explore.ParametersPage)
r.With(explore.BlockHashPathOrIndexCtx).Get("/block/{blockhash}", explore.Block)
r.With(explorer.TransactionHashCtx).Get("/tx/{txid}", explore.TxPage)
r.With(explorer.TransactionHashCtx, explorer.TransactionIoIndexCtx).Get("/tx/{txid}/{inout}/{inoutid}", explore.TxPage)
r.With(explorer.AddressPathCtx).Get("/address/{address}", explore.AddressPage)
r.With(explorer.AddressPathCtx).Get("/addresstable/{address}", explore.AddressTable)
r.Get("/agendas", explore.AgendasPage)
r.With(explorer.AgendaPathCtx).Get("/agenda/{agendaid}", explore.AgendaPage)
r.Get("/decodetx", explore.DecodeTxPage)
r.Get("/search", explore.Search)
r.Get("/charts", explore.Charts)
r.Get("/ticketpool", explore.Ticketpool)
r.Get("/stats", explore.StatsPage)
r.Get("/statistics", func(w http.ResponseWriter, r *http.Request) {
http.Redirect(w, r, "/stats", http.StatusPermanentRedirect)
})
// HTTP profiler
if cfg.HTTPProfile {
profPath := cfg.HTTPProfPath
log.Warnf("Starting the HTTP profiler on path %s.", profPath)
// http pprof uses http.DefaultServeMux
http.Handle("/", http.RedirectHandler(profPath+"/debug/pprof/", http.StatusSeeOther))
r.Mount(profPath, http.StripPrefix(profPath, http.DefaultServeMux))
}
})
// Start the web server.
if err = listenAndServeProto(cfg.APIListen, cfg.APIProto, webMux); err != nil {
log.Criticalf("listenAndServeProto: %v", err)
requestShutdown()
}
log.Infof("Starting blockchain sync...")
explore.SetDBsSyncing(true)
psHub.SetReady(false)
// If in lite mode, baseDB will need to handle the sync progress bar or
// explorer page updates, otherwise it is auxDB's responsibility.
var baseProgressChan chan *dbtypes.ProgressBarLoad
var baseHashChan chan *chainhash.Hash
if !usePG {
baseProgressChan = barLoad
baseHashChan = latestBlockHash
}
// Coordinate the sync of both sqlite and auxiliary DBs with the network.
// This closure captures the RPC client and the quit channel.
getSyncd := func(updateAddys, updateVotes, newPGInds bool,
fetchHeightInBaseDB int64) (int64, int64, error) {
// Simultaneously synchronize the ChainDB (PostgreSQL) and the
// block/stake info DB (sqlite). Results are returned over channels:
sqliteSyncRes := make(chan dbtypes.SyncResult)
pgSyncRes := make(chan dbtypes.SyncResult)
// Synchronization between DBs via rpcutils.BlockGate
smartClient := rpcutils.NewBlockGate(dcrdClient, 10)
// stakedb (in baseDB) connects blocks *after* ChainDB retrieves them,
// but it has to get a notification channel first to receive them. The
// BlockGate will provide this for blocks after fetchHeightInBaseDB. In
// full mode, baseDB will be configured not to send progress updates or
// chain data to the explorer pages since auxDB will do it.
baseDB.SyncDBAsync(ctx, sqliteSyncRes, smartClient, fetchHeightInBaseDB,
baseHashChan, baseProgressChan)
// Now that stakedb is either catching up or waiting for a block, start
// the auxDB sync, which is the master block getter, retrieving and
// making available blocks to the baseDB. In return, baseDB maintains a
// StakeDatabase at the best block's height. For a detailed description
// on how the DBs' synchronization is coordinated, see the documents in
// db/dcrpg/sync.go.
go auxDB.SyncChainDBAsync(ctx, pgSyncRes, smartClient,
updateAddys, updateVotes, newPGInds, latestBlockHash, barLoad)
// Wait for the results from both of these DBs.
return waitForSync(ctx, sqliteSyncRes, pgSyncRes, usePG)
}
baseDBHeight, auxDBHeight, err := getSyncd(updateAllAddresses,
updateAllVotes, newPGIndexes, fetchToHeight)
if err != nil {
requestShutdown()
return err
}
if usePG {
// After sync and indexing, must use upsert statement, which checks for
// duplicate entries and updates instead of erroring. SyncChainDB should
// set this on successful sync, but do it again anyway.
auxDB.EnableDuplicateCheckOnInsert(true)
}
// The sync routines may have lengthy tasks, such as table indexing, that
// follow main sync loop. Before enabling the chain monitors, again ensure
// the DBs are at the node's best block.
ensureSync := func() error {
updateAllAddresses, updateAllVotes, newPGIndexes = false, false, false
_, height, err := dcrdClient.GetBestBlock()
if err != nil {
return fmt.Errorf("unable to get block from node: %v", err)
}
for baseDBHeight < height {
fetchToHeight = auxDBHeight + 1
baseDBHeight, auxDBHeight, err = getSyncd(updateAllAddresses, updateAllVotes,
newPGIndexes, fetchToHeight)
if err != nil {
requestShutdown()
return err
}
_, height, err = dcrdClient.GetBestBlock()
if err != nil {
return fmt.Errorf("unable to get block from node: %v", err)
}
}
// Update the node height for the status API endpoint.
select {
case notify.NtfnChans.UpdateStatusNodeHeight <- uint32(height):
default:
log.Errorf("Failed to update node height with API status. Is StatusNtfnHandler started?")
}
// WiredDB.resyncDB is responsible for updating DB status via
// notify.NtfnChans.UpdateStatusDBHeight.
return nil
}
if err = ensureSync(); err != nil {
return err
}
// Exits immediately after the sync completes if SyncAndQuit is to true
// because all we needed then was the blockchain sync be completed successfully.
if cfg.SyncAndQuit {
log.Infof("All ready, at height %d. Quitting.", baseDBHeight)
return nil
}
log.Info("Mainchain sync complete.")
// Ensure all side chains known by dcrd are also present in the base DB
// and import them if they are not already there.
if cfg.ImportSideChains {
log.Info("Primary DB -> Now retrieving side chain blocks from dcrd...")
err := baseDB.ImportSideChains(collector)
if err != nil {
log.Errorf("Primary DB -> Error importing side chains: %v", err)
}
}
// Ensure all side chains known by dcrd are also present in the auxiliary DB
// and import them if they are not already there.
if usePG && cfg.ImportSideChains {
// First identify the side chain blocks that are missing from the DB.
log.Info("Aux DB -> Retrieving side chain blocks from dcrd...")
sideChainBlocksToStore, nSideChainBlocks, err := auxDB.MissingSideChainBlocks()
if err != nil {
return fmt.Errorf("Aux DB -> Unable to determine missing side chain blocks: %v", err)
}
nSideChains := len(sideChainBlocksToStore)
// Importing side chain blocks involves only the aux (postgres) DBs
// since dcrsqlite does not track side chain blocks, and stakedb only
// supports mainchain. TODO: Get stakedb to work with side chain blocks
// to get ticket pool info.
// Collect and store data for each side chain.
log.Infof("Aux DB -> Importing %d new block(s) from %d known side chains...",
nSideChainBlocks, nSideChains)
// Disable recomputing project fund balance, and clearing address
// balance and counts cache.
auxDB.InBatchSync = true
var sideChainsStored, sideChainBlocksStored int
for _, sideChain := range sideChainBlocksToStore {
// Process this side chain only if there are blocks in it that need
// to be stored.
if len(sideChain.Hashes) == 0 {
continue
}
sideChainsStored++
// Collect and store data for each block in this side chain.
for _, hash := range sideChain.Hashes {
// Validate the block hash.
blockHash, err := chainhash.NewHashFromStr(hash)
if err != nil {
log.Errorf("Aux DB -> Invalid block hash %s: %v.", hash, err)
continue
}
// Collect block data.
blockData, msgBlock, err := collector.CollectHash(blockHash)
if err != nil {
// Do not quit if unable to collect side chain block data.
log.Errorf("Aux DB -> Unable to collect data for side chain block %s: %v.",
hash, err)
continue
}
// Get the chainwork
chainWork, err := rpcutils.GetChainWork(auxDB.Client, blockHash)
if err != nil {
log.Errorf("GetChainWork failed (%s): %v", blockHash, err)
continue
}
// PostgreSQL / aux DB
log.Debugf("Aux DB -> Importing block %s (height %d) into aux DB.",
blockHash, msgBlock.Header.Height)
// Stake invalidation is always handled by subsequent block, so
// add the block as valid. These are all side chain blocks.
isValid, isMainchain := true, false
// Existing DB records might be for mainchain and/or valid
// blocks, so these imported blocks should not data in rows that
// are conflicting as per the different table constraints and
// unique indexes.
updateExistingRecords := false
// Store data in the aux (dcrpg) DB.
_, _, _, err = auxDB.StoreBlock(msgBlock, blockData.WinningTickets,
isValid, isMainchain, updateExistingRecords, true, true, chainWork)
if err != nil {
// If data collection succeeded, but storage fails, bail out
// to diagnose the DB trouble.
return fmt.Errorf("Aux DB -> ChainDBRPC.StoreBlock failed: %v", err)
}
sideChainBlocksStored++
}
}
auxDB.InBatchSync = false
log.Infof("Successfully added %d blocks from %d side chains into dcrpg DB.",
sideChainBlocksStored, sideChainsStored)
// That may have taken a while, check again for new blocks from network.
if err = ensureSync(); err != nil {
return err
}
}
log.Infof("All ready, at height %d.", baseDBHeight)
explore.SetDBsSyncing(false)
psHub.SetReady(true)
// Enable new blocks being stored into the base DB's cache.
baseDB.EnableCache()
// Deactivate displaying the sync status page after the db sync was completed.
if barLoad != nil {
close(barLoad)
}
// Set that newly sync'd blocks should no longer be stored in the explorer.
// Monitors that fetch the latest updates from dcrd will be launched next.
if latestBlockHash != nil {
close(latestBlockHash)
}
// Monitors for new blocks, transactions, and reorgs should not run before
// blockchain syncing and DB indexing completes. If started before then, the
// DBs will not be prepared to process the notified events. For example, if
// dcrd notifies of block 200000 while dcrdata has only reached 1000 in
// batch synchronization, trying to process that block will be impossible as
// the entire chain before it is not yet processed. Similarly, if we have
// already registered for notifications with dcrd but the monitors below are
// not started, notifications will fill up the channels, only to be
// processed after sync. This is also incorrect since dcrd might notify of a
// bew block 200000, but the batch sync will process that block on its own,
// causing this to be a duplicate block by the time the monitors begin
// pulling data out of the full channels.
// The following configures and starts handlers that monitor for new blocks,
// changes in the mempool, and handle chain reorg. It also initiates data
// collection for the explorer.
// Blockchain monitor for the collector
addrMap := make(map[string]txhelpers.TxAction) // for support of watched addresses
// On reorg, only update web UI since dcrsqlite's own reorg handler will
// deal with patching up the block info database.
reorgBlockDataSavers := []blockdata.BlockDataSaver{explore}
wsChainMonitor := blockdata.NewChainMonitor(ctx, collector, blockDataSavers,
reorgBlockDataSavers, &wg, addrMap, notify.NtfnChans.ConnectChan,
notify.NtfnChans.RecvTxBlockChan, notify.NtfnChans.ReorgChanBlockData)
// Blockchain monitor for the stake DB
sdbChainMonitor := baseDB.NewStakeDBChainMonitor(ctx, &wg,
notify.NtfnChans.ConnectChanStakeDB, notify.NtfnChans.ReorgChanStakeDB)
// Blockchain monitor for the wired sqlite DB
WiredDBChainMonitor := baseDB.NewChainMonitor(ctx, collector, &wg,
notify.NtfnChans.ConnectChanWiredDB, notify.NtfnChans.ReorgChanWiredDB)
var auxDBChainMonitor *dcrpg.ChainMonitor
if usePG {
// Blockchain monitor for the aux (PG) DB
auxDBChainMonitor = auxDB.NewChainMonitor(ctx, &wg,
notify.NtfnChans.ConnectChanDcrpgDB, notify.NtfnChans.ReorgChanDcrpgDB)
if auxDBChainMonitor == nil {
return fmt.Errorf("Failed to enable dcrpg ChainMonitor. *ChainDB is nil.")
}
}
// Setup the synchronous handler functions called by the collectionQueue via
// OnBlockConnected.
collectionQueue.SetSynchronousHandlers([]func(*chainhash.Hash) error{
sdbChainMonitor.BlockConnectedSync, // 1. Stake DB for pool info
wsChainMonitor.BlockConnectedSync, // 2. blockdata for regular block data collection and storage
})
// Initial data summary for web ui. stakedb must be at the same height, so
// we do this before starting the monitors.
blockData, msgBlock, err := collector.Collect()
if err != nil {
return fmt.Errorf("Block data collection for initial summary failed: %v",
err.Error())
}
if err = explore.Store(blockData, msgBlock); err != nil {
return fmt.Errorf("Failed to store initial block data for explorer pages: %v", err.Error())
}
if err = psHub.Store(blockData, msgBlock); err != nil {
return fmt.Errorf("Failed to store initial block data with the PubSubHub: %v", err.Error())
}
// Register for notifications from dcrd. This also sets the daemon RPC
// client used by other functions in the notify/notification package (i.e.
// common ancestor identification in signalReorg).
cerr := notify.RegisterNodeNtfnHandlers(dcrdClient)