forked from lightningnetwork/lnd
-
Notifications
You must be signed in to change notification settings - Fork 0
/
rpcserver.go
1768 lines (1512 loc) · 53 KB
/
rpcserver.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
package main
import (
"crypto/rand"
"encoding/hex"
"errors"
"fmt"
"io"
"math"
"net"
"strings"
"time"
"sync"
"sync/atomic"
"github.com/btcsuite/fastsha256"
"github.com/davecgh/go-spew/spew"
"github.com/lightningnetwork/lnd/channeldb"
"github.com/lightningnetwork/lnd/lnrpc"
"github.com/lightningnetwork/lnd/lnwallet"
"github.com/lightningnetwork/lnd/lnwire"
"github.com/lightningnetwork/lnd/routing"
"github.com/lightningnetwork/lnd/zpay32"
"github.com/roasbeef/btcd/btcec"
"github.com/roasbeef/btcd/chaincfg"
"github.com/roasbeef/btcd/chaincfg/chainhash"
"github.com/roasbeef/btcd/txscript"
"github.com/roasbeef/btcd/wire"
"github.com/roasbeef/btcutil"
"github.com/roasbeef/btcwallet/waddrmgr"
"golang.org/x/net/context"
)
var (
defaultAccount uint32 = waddrmgr.DefaultAccountNum
)
// rpcServer is a gRPC, RPC front end to the lnd daemon.
// TODO(roasbeef): pagination support for the list-style calls
type rpcServer struct {
started int32 // To be used atomically.
shutdown int32 // To be used atomically.
server *server
wg sync.WaitGroup
quit chan struct{}
}
// A compile time check to ensure that rpcServer fully implements the
// LightningServer gRPC service.
var _ lnrpc.LightningServer = (*rpcServer)(nil)
// newRPCServer creates and returns a new instance of the rpcServer.
func newRPCServer(s *server) *rpcServer {
return &rpcServer{server: s, quit: make(chan struct{}, 1)}
}
// Start launches any helper goroutines required for the rpcServer
// to function.
func (r *rpcServer) Start() error {
if atomic.AddInt32(&r.started, 1) != 1 {
return nil
}
return nil
}
// Stop signals any active goroutines for a graceful closure.
func (r *rpcServer) Stop() error {
if atomic.AddInt32(&r.shutdown, 1) != 1 {
return nil
}
close(r.quit)
return nil
}
// addrPairsToOutputs converts a map describing a set of outputs to be created,
// the outputs themselves. The passed map pairs up an address, to a desired
// output value amount. Each address is converted to its corresponding pkScript
// to be used within the constructed output(s).
func addrPairsToOutputs(addrPairs map[string]int64) ([]*wire.TxOut, error) {
outputs := make([]*wire.TxOut, 0, len(addrPairs))
for addr, amt := range addrPairs {
addr, err := btcutil.DecodeAddress(addr, activeNetParams.Params)
if err != nil {
return nil, err
}
pkscript, err := txscript.PayToAddrScript(addr)
if err != nil {
return nil, err
}
outputs = append(outputs, wire.NewTxOut(amt, pkscript))
}
return outputs, nil
}
// sendCoinsOnChain makes an on-chain transaction in or to send coins to one or
// more addresses specified in the passed payment map. The payment map maps an
// address to a specified output value to be sent to that address.
func (r *rpcServer) sendCoinsOnChain(paymentMap map[string]int64) (*chainhash.Hash, error) {
outputs, err := addrPairsToOutputs(paymentMap)
if err != nil {
return nil, err
}
return r.server.lnwallet.SendOutputs(outputs)
}
// SendCoins executes a request to send coins to a particular address. Unlike
// SendMany, this RPC call only allows creating a single output at a time.
func (r *rpcServer) SendCoins(ctx context.Context,
in *lnrpc.SendCoinsRequest) (*lnrpc.SendCoinsResponse, error) {
rpcsLog.Infof("[sendcoins] addr=%v, amt=%v", in.Addr, btcutil.Amount(in.Amount))
paymentMap := map[string]int64{in.Addr: in.Amount}
txid, err := r.sendCoinsOnChain(paymentMap)
if err != nil {
return nil, err
}
rpcsLog.Infof("[sendcoins] spend generated txid: %v", txid.String())
return &lnrpc.SendCoinsResponse{Txid: txid.String()}, nil
}
// SendMany handles a request for a transaction create multiple specified
// outputs in parallel.
func (r *rpcServer) SendMany(ctx context.Context,
in *lnrpc.SendManyRequest) (*lnrpc.SendManyResponse, error) {
txid, err := r.sendCoinsOnChain(in.AddrToAmount)
if err != nil {
return nil, err
}
rpcsLog.Infof("[sendmany] spend generated txid: %v", txid.String())
return &lnrpc.SendManyResponse{Txid: txid.String()}, nil
}
// NewAddress creates a new address under control of the local wallet.
func (r *rpcServer) NewAddress(ctx context.Context,
in *lnrpc.NewAddressRequest) (*lnrpc.NewAddressResponse, error) {
// Translate the gRPC proto address type to the wallet controller's
// available address types.
var addrType lnwallet.AddressType
switch in.Type {
case lnrpc.NewAddressRequest_WITNESS_PUBKEY_HASH:
addrType = lnwallet.WitnessPubKey
case lnrpc.NewAddressRequest_NESTED_PUBKEY_HASH:
addrType = lnwallet.NestedWitnessPubKey
case lnrpc.NewAddressRequest_PUBKEY_HASH:
addrType = lnwallet.PubKeyHash
}
addr, err := r.server.lnwallet.NewAddress(addrType, false)
if err != nil {
return nil, err
}
rpcsLog.Infof("[newaddress] addr=%v", addr.String())
return &lnrpc.NewAddressResponse{Address: addr.String()}, nil
}
// NewWitnessAddress returns a new native witness address under the control of
// the local wallet.
func (r *rpcServer) NewWitnessAddress(ctx context.Context,
in *lnrpc.NewWitnessAddressRequest) (*lnrpc.NewAddressResponse, error) {
addr, err := r.server.lnwallet.NewAddress(lnwallet.WitnessPubKey, false)
if err != nil {
return nil, err
}
rpcsLog.Infof("[newaddress] addr=%v", addr.String())
return &lnrpc.NewAddressResponse{Address: addr.String()}, nil
}
// ConnectPeer attempts to establish a connection to a remote peer.
func (r *rpcServer) ConnectPeer(ctx context.Context,
in *lnrpc.ConnectPeerRequest) (*lnrpc.ConnectPeerResponse, error) {
if in.Addr == nil {
return nil, fmt.Errorf("need: lnc pubkeyhash@hostname")
}
pubkeyHex, err := hex.DecodeString(in.Addr.Pubkey)
if err != nil {
return nil, err
}
pubkey, err := btcec.ParsePubKey(pubkeyHex, btcec.S256())
if err != nil {
return nil, err
}
host, err := net.ResolveTCPAddr("tcp", in.Addr.Host)
if err != nil {
return nil, err
}
peerAddr := &lnwire.NetAddress{
IdentityKey: pubkey,
Address: host,
ChainNet: activeNetParams.Net,
}
if err := r.server.ConnectToPeer(peerAddr, in.Perm); err != nil {
rpcsLog.Errorf("(connectpeer): error connecting to peer: %v", err)
return nil, err
}
rpcsLog.Debugf("Connected to peer: %v", peerAddr.String())
return &lnrpc.ConnectPeerResponse{}, nil
}
// OpenChannel attempts to open a singly funded channel specified in the
// request to a remote peer.
func (r *rpcServer) OpenChannel(in *lnrpc.OpenChannelRequest,
updateStream lnrpc.Lightning_OpenChannelServer) error {
rpcsLog.Tracef("[openchannel] request to peerid(%v) "+
"allocation(us=%v, them=%v) numconfs=%v", in.TargetPeerId,
in.LocalFundingAmount, in.PushSat, in.NumConfs)
localFundingAmt := btcutil.Amount(in.LocalFundingAmount)
remoteInitialBalance := btcutil.Amount(in.PushSat)
// Ensure that the initial balance of the remote party (if pushing
// satoshis) does not execeed the amount the local party has requested
// for funding.
if remoteInitialBalance >= localFundingAmt {
return fmt.Errorf("amount pushed to remote peer for initial " +
"state must be below the local funding amount")
}
const minChannelSize = btcutil.Amount(6000)
// Restrict the size of the channel we'll actually open. Atm, we
// require the amount to be above 6k satoahis s we currently hard-coded
// a 5k satoshi fee in several areas. As a result 6k sat is the min
// channnel size that allows us to safely sit above the dust threshold
// after fees are applied
// TODO(roasbeef): remove after dynamic fees are in
if localFundingAmt < minChannelSize {
return fmt.Errorf("channel is too small, the minimum channel "+
"size is: %v (6k sat)", minChannelSize)
}
var (
nodepubKey *btcec.PublicKey
nodepubKeyBytes []byte
err error
)
// If the node key is set, the we'll parse the raw bytes into a pubkey
// object so we can easily manipulate it. If this isn't set, then we
// expected the TargetPeerId to be set accordingly.
if len(in.NodePubkey) != 0 {
nodepubKey, err = btcec.ParsePubKey(in.NodePubkey, btcec.S256())
if err != nil {
return err
}
nodepubKeyBytes = nodepubKey.SerializeCompressed()
}
// Instruct the server to trigger the necessary events to attempt to
// open a new channel. A stream is returned in place, this stream will
// be used to consume updates of the state of the pending channel.
updateChan, errChan := r.server.OpenChannel(in.TargetPeerId,
nodepubKey, localFundingAmt, remoteInitialBalance, in.NumConfs)
var outpoint wire.OutPoint
out:
for {
select {
case err := <-errChan:
rpcsLog.Errorf("unable to open channel to "+
"identityPub(%x) nor peerID(%v): %v",
nodepubKeyBytes, in.TargetPeerId, err)
return err
case fundingUpdate := <-updateChan:
rpcsLog.Tracef("[openchannel] sending update: %v",
fundingUpdate)
if err := updateStream.Send(fundingUpdate); err != nil {
return err
}
// If a final channel open update is being sent, then
// we can break out of our recv loop as we no longer
// need to process any further updates.
switch update := fundingUpdate.Update.(type) {
case *lnrpc.OpenStatusUpdate_ChanOpen:
chanPoint := update.ChanOpen.ChannelPoint
h, _ := chainhash.NewHash(chanPoint.FundingTxid)
outpoint = wire.OutPoint{
Hash: *h,
Index: chanPoint.OutputIndex,
}
break out
}
case <-r.quit:
return nil
}
}
rpcsLog.Tracef("[openchannel] success peerid(%v), ChannelPoint(%v)",
in.TargetPeerId, outpoint)
return nil
}
// OpenChannelSync is a synchronous version of the OpenChannel RPC call. This
// call is meant to be consumed by clients to the REST proxy. As with all other
// sync calls, all byte slices are instead to be populated as hex encoded
// strings.
func (r *rpcServer) OpenChannelSync(ctx context.Context,
in *lnrpc.OpenChannelRequest) (*lnrpc.ChannelPoint, error) {
rpcsLog.Tracef("[openchannel] request to peerid(%v) "+
"allocation(us=%v, them=%v) numconfs=%v", in.TargetPeerId,
in.LocalFundingAmount, in.PushSat, in.NumConfs)
// Creation of channels before the wallet syncs up is currently
// disallowed.
isSynced, err := r.server.lnwallet.IsSynced()
if err != nil {
return nil, err
}
if !isSynced {
return nil, errors.New("channels cannot be created before the " +
"wallet is fully synced")
}
// Decode the provided target node's public key, parsing it into a pub
// key object. For all sync call, byte slices are expected to be
// encoded as hex strings.
keyBytes, err := hex.DecodeString(in.NodePubkeyString)
if err != nil {
return nil, err
}
nodepubKey, err := btcec.ParsePubKey(keyBytes, btcec.S256())
if err != nil {
return nil, err
}
localFundingAmt := btcutil.Amount(in.LocalFundingAmount)
remoteInitialBalance := btcutil.Amount(in.PushSat)
// Ensure that the initial balance of the remote party (if pushing
// satoshis) does not execeed the amount the local party has requested
// for funding.
if remoteInitialBalance >= localFundingAmt {
return nil, fmt.Errorf("amount pushed to remote peer for " +
"initial state must be below the local funding amount")
}
updateChan, errChan := r.server.OpenChannel(in.TargetPeerId,
nodepubKey, localFundingAmt, remoteInitialBalance, in.NumConfs)
select {
// If an error occurs them immediately return the error to the client.
case err := <-errChan:
rpcsLog.Errorf("unable to open channel to "+
"identityPub(%x) nor peerID(%v): %v",
nodepubKey, in.TargetPeerId, err)
return nil, err
// Otherwise, wait for the first channel update. The first update sent
// is when the funding transaction is broadcast to the network.
case fundingUpdate := <-updateChan:
rpcsLog.Tracef("[openchannel] sending update: %v",
fundingUpdate)
// Parse out the txid of the pending funding transaction. The
// sync client can use this to poll against the list of
// PendingChannels.
openUpdate := fundingUpdate.Update.(*lnrpc.OpenStatusUpdate_ChanPending)
chanUpdate := openUpdate.ChanPending
return &lnrpc.ChannelPoint{
FundingTxid: chanUpdate.Txid,
}, nil
case <-r.quit:
return nil, nil
}
}
// CloseChannel attempts to close an active channel identified by its channel
// point. The actions of this method can additionally be augmented to attempt
// a force close after a timeout period in the case of an inactive peer.
func (r *rpcServer) CloseChannel(in *lnrpc.CloseChannelRequest,
updateStream lnrpc.Lightning_CloseChannelServer) error {
force := in.Force
index := in.ChannelPoint.OutputIndex
txid, err := chainhash.NewHash(in.ChannelPoint.FundingTxid)
if err != nil {
rpcsLog.Errorf("[closechannel] invalid txid: %v", err)
return err
}
chanPoint := wire.NewOutPoint(txid, index)
rpcsLog.Tracef("[closechannel] request for ChannelPoint(%v)",
chanPoint)
var (
updateChan chan *lnrpc.CloseStatusUpdate
errChan chan error
)
// If a force closure was requested, then we'll handle all the details
// around the creation and broadcast of the unilateral closure
// transaction here rather than going to the switch as we don't require
// interaction from the peer.
if force {
// As the first part of the force closure, we first fetch the
// channel from the database, then execute a direct force
// closure broadcasting our current commitment transaction.
// TODO(roasbeef): d/c peer if connected?
// * otherwise safety no guaranteed
channel, err := r.fetchActiveChannel(*chanPoint)
if err != nil {
return err
}
closingTxid, err := r.forceCloseChan(channel)
if err != nil {
rpcsLog.Errorf("unable to force close transaction: %v", err)
// If the transaction we broadcast is detected as a
// double spend, the this indicates that the remote
// party has broadcast their commitment transaction be
// we didn't notice.
if strings.Contains(err.Error(), "fully-spent") ||
strings.Contains(err.Error(), "double spend") {
// In this case, we'll clean up the channel
// state.
// TODO(roasbeef): check close summary to see
// if we need to sweep any HTLC's
if err := channel.DeleteState(); err != nil {
return err
}
return fmt.Errorf("channel has been closed by remote party")
}
return err
}
updateChan = make(chan *lnrpc.CloseStatusUpdate)
errChan = make(chan error)
go func() {
// With the transaction broadcast, we send our first
// update to the client.
updateChan <- &lnrpc.CloseStatusUpdate{
Update: &lnrpc.CloseStatusUpdate_ClosePending{
ClosePending: &lnrpc.PendingUpdate{
Txid: closingTxid[:],
},
},
}
// Next, we enter the second phase, waiting for the
// channel to be confirmed before we finalize the force
// closure.
notifier := r.server.chainNotifier
confNtfn, err := notifier.RegisterConfirmationsNtfn(closingTxid, 1)
if err != nil {
errChan <- err
return
}
select {
case txConf, ok := <-confNtfn.Confirmed:
if !ok {
return
}
// As the channel has been closed, we can now
// delete it's state from the database.
rpcsLog.Infof("ChannelPoint(%v) is now "+
"closed at height %v", chanPoint,
txConf.BlockHeight)
if err := channel.DeleteState(); err != nil {
errChan <- err
return
}
case <-r.quit:
return
}
// Respond to the local subsystem which requested the
// channel closure.
updateChan <- &lnrpc.CloseStatusUpdate{
Update: &lnrpc.CloseStatusUpdate_ChanClose{
ChanClose: &lnrpc.ChannelCloseUpdate{
ClosingTxid: closingTxid[:],
Success: true,
},
},
}
// Finally, signal to the breachArbiter that it no
// longer needs to watch the channel as it's been
// closed.
r.server.breachArbiter.settledContracts <- chanPoint
}()
} else {
// Otherwise, the caller has requested a regular interactive
// cooperative channel closure. So we'll forward the request to
// the htlc switch which will handle the negotiation and
// broadcast details.
updateChan, errChan = r.server.htlcSwitch.CloseLink(chanPoint,
CloseRegular)
}
out:
for {
select {
case err := <-errChan:
rpcsLog.Errorf("[closechannel] unable to close "+
"ChannelPoint(%v): %v", chanPoint, err)
return err
case closingUpdate := <-updateChan:
rpcsLog.Tracef("[closechannel] sending update: %v",
closingUpdate)
if err := updateStream.Send(closingUpdate); err != nil {
return err
}
// If a final channel closing updates is being sent,
// then we can break out of our dispatch loop as we no
// longer need to process any further updates.
switch closeUpdate := closingUpdate.Update.(type) {
case *lnrpc.CloseStatusUpdate_ChanClose:
h, _ := chainhash.NewHash(closeUpdate.ChanClose.ClosingTxid)
rpcsLog.Infof("[closechannel] close completed: "+
"txid(%v)", h)
break out
}
case <-r.quit:
return nil
}
}
return nil
}
// fetchActiveChannel attempts to locate a channel identified by it's channel
// point from the database's set of all currently opened channels.
func (r *rpcServer) fetchActiveChannel(chanPoint wire.OutPoint) (*lnwallet.LightningChannel, error) {
dbChannels, err := r.server.chanDB.FetchAllChannels()
if err != nil {
return nil, err
}
// With the channels fetched, attempt to locate the target channel
// according to its channel point.
var dbChan *channeldb.OpenChannel
for _, dbChannel := range dbChannels {
if *dbChannel.ChanID == chanPoint {
dbChan = dbChannel
break
}
}
// If the channel cannot be located, then we exit with an error to the
// caller.
if dbChan == nil {
return nil, fmt.Errorf("unable to find channel")
}
// Otherwise, we create a fully populated channel state machine which
// uses the db channel as backing storage.
return lnwallet.NewLightningChannel(r.server.lnwallet.Signer, nil,
dbChan)
}
// forceCloseChan executes a unilateral close of the target channel by
// broadcasting the current commitment state directly on-chain. Once the
// commitment transaction has been broadcast, a struct describing the final
// state of the channel is sent to the utxoNursery in order to ultimately sweep
// the immature outputs.
func (r *rpcServer) forceCloseChan(channel *lnwallet.LightningChannel) (*chainhash.Hash, error) {
// Execute a unilateral close shutting down all further channel
// operation.
closeSummary, err := channel.ForceClose()
if err != nil {
return nil, err
}
closeTx := closeSummary.CloseTx
txid := closeTx.TxHash()
// With the close transaction in hand, broadcast the transaction to the
// network, thereby entering the psot channel resolution state.
rpcsLog.Infof("Broadcasting force close transaction, ChannelPoint(%v): %v",
channel.ChannelPoint(), newLogClosure(func() string {
return spew.Sdump(closeTx)
}))
if err := r.server.lnwallet.PublishTransaction(closeTx); err != nil {
return nil, err
}
// Send the closed channel summary over to the utxoNursery in order to
// have its outputs swept back into the wallet once they're mature.
r.server.utxoNursery.incubateOutputs(closeSummary)
return &txid, nil
}
// GetInfo serves a request to the "getinfo" RPC call. This call returns
// general information concerning the lightning node including it's LN ID,
// identity address, and information concerning the number of open+pending
// channels.
func (r *rpcServer) GetInfo(ctx context.Context,
in *lnrpc.GetInfoRequest) (*lnrpc.GetInfoResponse, error) {
var activeChannels uint32
serverPeers := r.server.Peers()
for _, serverPeer := range serverPeers {
activeChannels += uint32(len(serverPeer.ChannelSnapshots()))
}
pendingChannels, err := r.server.fundingMgr.NumPendingChannels()
if err != nil {
return nil, err
}
idPub := r.server.identityPriv.PubKey().SerializeCompressed()
bestHash, bestHeight, err := r.server.bio.GetBestBlock()
if err != nil {
return nil, err
}
isSynced, err := r.server.lnwallet.IsSynced()
if err != nil {
return nil, err
}
// TODO(roasbeef): add synced height n stuff
return &lnrpc.GetInfoResponse{
IdentityPubkey: hex.EncodeToString(idPub),
NumPendingChannels: pendingChannels,
NumActiveChannels: activeChannels,
NumPeers: uint32(len(serverPeers)),
BlockHeight: uint32(bestHeight),
BlockHash: bestHash.String(),
SyncedToChain: isSynced,
Testnet: activeNetParams.Params == &chaincfg.TestNet3Params,
}, nil
}
// ListPeers returns a verbose listing of all currently active peers.
func (r *rpcServer) ListPeers(ctx context.Context,
in *lnrpc.ListPeersRequest) (*lnrpc.ListPeersResponse, error) {
rpcsLog.Tracef("[listpeers] request")
serverPeers := r.server.Peers()
resp := &lnrpc.ListPeersResponse{
Peers: make([]*lnrpc.Peer, 0, len(serverPeers)),
}
for _, serverPeer := range serverPeers {
// TODO(roasbeef): add a snapshot method which grabs peer read mtx
var (
satSent int64
satRecv int64
)
// In order to display the total number of satoshis of outbound
// (sent) and inbound (recv'd) satoshis that have been
// transported through this peer, we'll sum up the sent/recv'd
// values for each of the active channels we ahve with the
// peer.
chans := serverPeer.ChannelSnapshots()
for _, c := range chans {
satSent += int64(c.TotalSatoshisSent)
satRecv += int64(c.TotalSatoshisReceived)
}
nodePub := serverPeer.addr.IdentityKey.SerializeCompressed()
peer := &lnrpc.Peer{
PubKey: hex.EncodeToString(nodePub),
PeerId: serverPeer.id,
Address: serverPeer.conn.RemoteAddr().String(),
Inbound: serverPeer.inbound,
BytesRecv: atomic.LoadUint64(&serverPeer.bytesReceived),
BytesSent: atomic.LoadUint64(&serverPeer.bytesSent),
SatSent: satSent,
SatRecv: satRecv,
PingTime: serverPeer.PingTime(),
}
resp.Peers = append(resp.Peers, peer)
}
rpcsLog.Debugf("[listpeers] yielded %v peers", serverPeers)
return resp, nil
}
// WalletBalance returns the sum of all confirmed unspent outputs under control
// by the wallet. This method can be modified by having the request specify
// only witness outputs should be factored into the final output sum.
// TODO(roasbeef): split into total and confirmed/unconfirmed
// TODO(roasbeef): add async hooks into wallet balance changes
func (r *rpcServer) WalletBalance(ctx context.Context,
in *lnrpc.WalletBalanceRequest) (*lnrpc.WalletBalanceResponse, error) {
balance, err := r.server.lnwallet.ConfirmedBalance(1, in.WitnessOnly)
if err != nil {
return nil, err
}
rpcsLog.Debugf("[walletbalance] balance=%v", balance)
return &lnrpc.WalletBalanceResponse{
Balance: balance.ToBTC(),
}, nil
}
// ChannelBalance returns the total available channel flow across all open
// channels in satoshis.
func (r *rpcServer) ChannelBalance(ctx context.Context,
in *lnrpc.ChannelBalanceRequest) (*lnrpc.ChannelBalanceResponse, error) {
channels, err := r.server.chanDB.FetchAllChannels()
if err != nil {
return nil, err
}
var balance btcutil.Amount
for _, channel := range channels {
balance += channel.OurBalance
}
return &lnrpc.ChannelBalanceResponse{Balance: int64(balance)}, nil
}
// PendingChannels returns a list of all the channels that are currently
// considered "pending". A channel is pending if it has finished the funding
// workflow and is waiting for confirmations for the funding txn, or is in the
// process of closure, either initiated cooperatively or non-coopertively.
func (r *rpcServer) PendingChannels(ctx context.Context,
in *lnrpc.PendingChannelRequest) (*lnrpc.PendingChannelResponse, error) {
both := in.Status == lnrpc.ChannelStatus_ALL
includeOpen := (in.Status == lnrpc.ChannelStatus_OPENING) || both
includeClose := (in.Status == lnrpc.ChannelStatus_CLOSING) || both
rpcsLog.Debugf("[pendingchannels] %v", in.Status)
var pendingChannels []*lnrpc.PendingChannelResponse_PendingChannel
if includeOpen {
pendingOpenChans, err := r.server.fundingMgr.PendingChannels()
if err != nil {
return nil, err
}
for _, pendingOpen := range pendingOpenChans {
// TODO(roasbeef): add confirmation progress
pub := pendingOpen.identityPub.SerializeCompressed()
pendingChan := &lnrpc.PendingChannelResponse_PendingChannel{
IdentityKey: hex.EncodeToString(pub),
ChannelPoint: pendingOpen.channelPoint.String(),
Capacity: int64(pendingOpen.capacity),
LocalBalance: int64(pendingOpen.localBalance),
RemoteBalance: int64(pendingOpen.remoteBalance),
Status: lnrpc.ChannelStatus_OPENING,
}
pendingChannels = append(pendingChannels, pendingChan)
}
}
if includeClose {
}
return &lnrpc.PendingChannelResponse{
PendingChannels: pendingChannels,
}, nil
}
// ListChannels returns a description of all direct active, open channels the
// node knows of.
// TODO(roasbeef): add 'online' bit to response
func (r *rpcServer) ListChannels(ctx context.Context,
in *lnrpc.ListChannelsRequest) (*lnrpc.ListChannelsResponse, error) {
resp := &lnrpc.ListChannelsResponse{}
graph := r.server.chanDB.ChannelGraph()
dbChannels, err := r.server.chanDB.FetchAllChannels()
if err != nil {
return nil, err
}
rpcsLog.Infof("[listchannels] fetched %v channels from DB",
len(dbChannels))
for _, dbChannel := range dbChannels {
if dbChannel.IsPending {
continue
}
nodePub := dbChannel.IdentityPub.SerializeCompressed()
nodeID := hex.EncodeToString(nodePub)
chanPoint := dbChannel.ChanID
// With the channel point known, retrieve the network channel
// ID from the database.
var chanID uint64
chanID, _ = graph.ChannelID(chanPoint)
channel := &lnrpc.ActiveChannel{
RemotePubkey: nodeID,
ChannelPoint: chanPoint.String(),
ChanId: chanID,
Capacity: int64(dbChannel.Capacity),
LocalBalance: int64(dbChannel.OurBalance),
RemoteBalance: int64(dbChannel.TheirBalance),
TotalSatoshisSent: int64(dbChannel.TotalSatoshisSent),
TotalSatoshisReceived: int64(dbChannel.TotalSatoshisReceived),
NumUpdates: dbChannel.NumUpdates,
PendingHtlcs: make([]*lnrpc.HTLC, len(dbChannel.Htlcs)),
}
for i, htlc := range dbChannel.Htlcs {
channel.PendingHtlcs[i] = &lnrpc.HTLC{
Incoming: htlc.Incoming,
Amount: int64(htlc.Amt),
HashLock: htlc.RHash[:],
ExpirationHeight: htlc.RefundTimeout,
RevocationDelay: htlc.RevocationDelay,
}
}
resp.Channels = append(resp.Channels, channel)
}
return resp, nil
}
// savePayment saves a successfully completed payment to the database for
// historical record keeping.
func (r *rpcServer) savePayment(route *routing.Route, amount btcutil.Amount,
rHash []byte) error {
paymentPath := make([][33]byte, len(route.Hops))
for i, hop := range route.Hops {
hopPub := hop.Channel.Node.PubKey.SerializeCompressed()
copy(paymentPath[i][:], hopPub)
}
payment := &channeldb.OutgoingPayment{
Invoice: channeldb.Invoice{
Terms: channeldb.ContractTerm{
Value: btcutil.Amount(amount),
},
CreationDate: time.Now(),
},
Path: paymentPath,
Fee: route.TotalFees,
TimeLockLength: route.TotalTimeLock,
}
copy(payment.PaymentHash[:], rHash)
return r.server.chanDB.AddPayment(payment)
}
// SendPayment dispatches a bi-directional streaming RPC for sending payments
// through the Lightning Network. A single RPC invocation creates a persistent
// bi-directional stream allowing clients to rapidly send payments through the
// Lightning Network with a single persistent connection.
func (r *rpcServer) SendPayment(paymentStream lnrpc.Lightning_SendPaymentServer) error {
errChan := make(chan error, 1)
payChan := make(chan *lnrpc.SendRequest)
// Launch a new goroutine to handle reading new payment requests from
// the client. This way we can handle errors independently of blocking
// and waiting for the next payment request to come through.
go func() {
for {
select {
case <-r.quit:
errChan <- nil
return
default:
// Receive the next pending payment within the
// stream sent by the client. If we read the
// EOF sentinel, then the client has closed the
// stream, and we can exit normally.
nextPayment, err := paymentStream.Recv()
if err == io.EOF {
errChan <- nil
return
} else if err != nil {
errChan <- err
return
}
// If the payment request field isn't blank,
// then the details of the invoice are encoded
// entirely within the encode payReq. So we'll
// attempt to decode it, populating the
// nextPayment accordingly.
if nextPayment.PaymentRequest != "" {
payReq, err := zpay32.Decode(nextPayment.PaymentRequest)
if err != nil {
errChan <- err
return
}
// TODO(roasbeef): eliminate necessary
// encode/decode
nextPayment.Dest = payReq.Destination.SerializeCompressed()
nextPayment.Amt = int64(payReq.Amount)
nextPayment.PaymentHash = payReq.PaymentHash[:]
}
payChan <- nextPayment
}
}
}()
for {
select {
case err := <-errChan:
return err
case nextPayment := <-payChan:
// Parse the details of the payment which include the
// pubkey of the destination and the payment amount.
dest := nextPayment.Dest
amt := btcutil.Amount(nextPayment.Amt)
destNode, err := btcec.ParsePubKey(dest, btcec.S256())
if err != nil {
return err
}
// If we're in debug HTLC mode, then all outgoing HTLCs
// will pay to the same debug rHash. Otherwise, we pay
// to the rHash specified within the RPC request.
var rHash [32]byte
if cfg.DebugHTLC && len(nextPayment.PaymentHash) == 0 {
rHash = debugHash
} else {
copy(rHash[:], nextPayment.PaymentHash)
}
// We launch a new goroutine to execute the current
// payment so we can continue to serve requests while
// this payment is being dispatched.
//
// TODO(roasbeef): semaphore to limit num outstanding
// goroutines.
go func() {
// Construct a payment request to send to the
// channel router. If the payment is
// successful, the the route chosen will be
// returned. Otherwise, we'll get a non-nil
// error.
payment := &routing.LightningPayment{
Target: destNode,
Amount: amt,
PaymentHash: rHash,
}
preImage, route, err := r.server.chanRouter.SendPayment(payment)
if err != nil {
errChan <- err
return
}
// Save the completed payment to the database
// for record keeping purposes.
if err := r.savePayment(route, amt, rHash[:]); err != nil {
errChan <- err
return
}
err = paymentStream.Send(&lnrpc.SendResponse{
PaymentPreimage: preImage[:],
PaymentRoute: marshalRoute(route),
})
if err != nil {
errChan <- err
return
}
}()
}