Skip to content

Commit

Permalink
Eliminate TX trickle bypass, sort TX invs for privacy and priority.
Browse files Browse the repository at this point in the history
Previously Bitcoin would send 1/4 of transactions out to all peers
instantly. This causes high overhead because it makes >80% of
INVs size 1. Doing so harms privacy, because it limits the
amount of source obscurity a transaction can receive.

These randomized broadcasts also disobeyed transaction dependencies
and required use of the orphan pool. Because the orphan pool is
so small this leads to poor propagation for dependent transactions.

When the bypass wasn't in effect, transactions were sent in the
order they were received. This avoided creating orphans but
undermines privacy fairly significantly.

This commit:
Eliminates the bypass. The bypass is replaced by halving the
 average delay for outbound peers.

Sorts candidate transactions for INV by their topological
 depth then by their feerate (then hash); removing the
 information leakage and providing priority service to
 higher fee transactions.

Limits the amount of transactions sent in a single INV to
 7tx/sec (and twice that for outbound); this limits the
 harm of low fee transaction floods, gives faster relay
 service to higher fee transactions. The 7 sounds lower
 than it really is because received advertisements need
 not be sent, and because the aggregate rate is multipled
 by the number of peers.

 Coming from btc@f2d3ba73860e875972738d1da1507124d0971ae5
  • Loading branch information
furszy committed Jan 18, 2021
1 parent 6ebfd17 commit 23c9f3e
Show file tree
Hide file tree
Showing 5 changed files with 59 additions and 23 deletions.
56 changes: 36 additions & 20 deletions src/net_processing.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1940,6 +1940,29 @@ bool ProcessMessages(CNode* pfrom, CConnman& connman, std::atomic<bool>& interru
return fMoreWork;
}

class CompareInvMempoolOrder
{
CTxMemPool *mp;
public:
CompareInvMempoolOrder(CTxMemPool *mempool)
{
mp = mempool;
}

bool operator()(const CInv &a, const CInv &b)
{
if (a.type != MSG_TX && b.type != MSG_TX) {
return false;
} else {
if (a.type != MSG_TX) {
return true;
} else if (b.type != MSG_TX) {
return false;
}
return mp->CompareDepthAndScore(a.hash, b.hash);
}
}
};

bool SendMessages(CNode* pto, CConnman& connman, std::atomic<bool>& interruptMsgProc)
{
Expand Down Expand Up @@ -2067,38 +2090,31 @@ bool SendMessages(CNode* pto, CConnman& connman, std::atomic<bool>& interruptMsg
bool fSendTrickle = pto->fWhitelisted;
if (pto->nNextInvSend < nNow) {
fSendTrickle = true;
pto->nNextInvSend = PoissonNextSend(nNow, AVG_INVENTORY_BROADCAST_INTERVAL);
// Use half the delay for outbound peers, as their is less privacy concern for them.
pto->nNextInvSend = PoissonNextSend(nNow, INVENTORY_BROADCAST_INTERVAL >> !pto->fInbound);
}
LOCK(pto->cs_inventory);
vInv.reserve(pto->vInventoryToSend.size());
if (fSendTrickle && pto->vInventoryToSend.size() > 1) {
// Topologically and fee-rate sort the inventory we send for privacy and priority reasons.
CompareInvMempoolOrder compareInvMempoolOrder(&mempool);
std::stable_sort(pto->vInventoryToSend.begin(), pto->vInventoryToSend.end(), compareInvMempoolOrder);
}
vInv.reserve(std::min<size_t>(INVENTORY_BROADCAST_MAX, pto->vInventoryToSend.size()));
vInvWait.reserve(pto->vInventoryToSend.size());
for (const CInv& inv : pto->vInventoryToSend) {
if (inv.type == MSG_TX && pto->filterInventoryKnown.contains(inv.hash))
continue;

// trickle out tx inv to protect privacy
if (inv.type == MSG_TX && !fSendTrickle) {
// 1/4 of tx invs blast to all immediately
static uint256 hashSalt;
if (hashSalt.IsNull())
hashSalt = GetRandHash();
uint256 hashRand = inv.hash ^ hashSalt;
hashRand = Hash(BEGIN(hashRand), END(hashRand));
bool fTrickleWait = ((hashRand & 3) != 0);

if (fTrickleWait) {
vInvWait.push_back(inv);
continue;
}
// No reason to drain out at many times the network's capacity,
// especially since we have many peers and some will draw much shorter delays.
if (vInv.size() >= INVENTORY_BROADCAST_MAX || (inv.type == MSG_TX && !fSendTrickle)) {
vInvWait.push_back(inv);
continue;
}

pto->filterInventoryKnown.insert(inv.hash);

vInv.push_back(inv);
if (vInv.size() >= 1000) {
connman.PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv));
vInv.clear();
}
}
pto->vInventoryToSend = vInvWait;
}
Expand Down
7 changes: 7 additions & 0 deletions src/net_processing.h
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,13 @@ static const unsigned int DEFAULT_BLOCK_SPAM_FILTER_MAX_SIZE = 100;
/** Default for -blockspamfiltermaxavg, maximum average size of an index occurrence in the block spam filter */
static const unsigned int DEFAULT_BLOCK_SPAM_FILTER_MAX_AVG = 10;

/** Average delay between trickled inventory transmissions in seconds.
* Blocks and whitelisted receivers bypass this, outbound peers get half this delay. */
static const unsigned int INVENTORY_BROADCAST_INTERVAL = 5;
/** Maximum number of inventory items to send per transmission.
* Limits the impact of low-fee transaction floods. */
static const unsigned int INVENTORY_BROADCAST_MAX = 7 * INVENTORY_BROADCAST_INTERVAL;

/** Register with a network node to receive its signals */
void RegisterNodeSignals(CNodeSignals& nodeSignals);
/** Unregister a network node */
Expand Down
15 changes: 15 additions & 0 deletions src/txmempool.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -815,6 +815,21 @@ void CTxMemPool::checkNullifiers() const
}
}

bool CTxMemPool::CompareDepthAndScore(const uint256& hasha, const uint256& hashb)
{
LOCK(cs);
indexed_transaction_set::const_iterator i = mapTx.find(hasha);
if (i == mapTx.end()) return false;
indexed_transaction_set::const_iterator j = mapTx.find(hashb);
if (j == mapTx.end()) return true;
uint64_t counta = i->GetCountWithAncestors();
uint64_t countb = j->GetCountWithAncestors();
if (counta == countb) {
return CompareTxMemPoolEntryByScore()(*i, *j);
}
return counta < countb;
}

void CTxMemPool::queryHashes(std::vector<uint256>& vtxid)
{
vtxid.clear();
Expand Down
1 change: 1 addition & 0 deletions src/txmempool.h
Original file line number Diff line number Diff line change
Expand Up @@ -531,6 +531,7 @@ class CTxMemPool
void removeForBlock(const std::vector<CTransactionRef>& vtx, unsigned int nBlockHeight, std::list<CTransactionRef>& conflicts, bool fCurrentEstimate = true);
void clear();
void _clear(); // lock-free
bool CompareDepthAndScore(const uint256& hasha, const uint256& hashb);
void queryHashes(std::vector<uint256>& vtxid);
void getTransactions(std::set<uint256>& setTxid);
bool isSpent(const COutPoint& outpoint);
Expand Down
3 changes: 0 additions & 3 deletions src/validation.h
Original file line number Diff line number Diff line change
Expand Up @@ -107,9 +107,6 @@ static const unsigned int MAX_REJECT_MESSAGE_LENGTH = 111;
static const unsigned int AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL = 24 * 24 * 60;
/** Average delay between peer address broadcasts in seconds. */
static const unsigned int AVG_ADDRESS_BROADCAST_INTERVAL = 30;
/** Average delay between trickled inventory broadcasts in seconds.
* Blocks, whitelisted receivers, and a random 25% of transactions bypass this. */
static const unsigned int AVG_INVENTORY_BROADCAST_INTERVAL = 5;
/** Default multiplier used in the computation for shielded txes min fee */
static const unsigned int DEFAULT_SHIELDEDTXFEE_K = 100;

Expand Down

0 comments on commit 23c9f3e

Please sign in to comment.