diff --git a/src/coins.cpp b/src/coins.cpp index f5f2e2b80a..1b95149396 100644 --- a/src/coins.cpp +++ b/src/coins.cpp @@ -111,14 +111,9 @@ void AddCoins(CCoinsViewCache& cache, const CTransaction &tx, int nHeight, bool bool CCoinsViewCache::SpendCoin(const COutPoint &outpoint, Coin* moveout) { CCoinsMap::iterator it = FetchCoin(outpoint); - - if (it == cacheCoins.end()) { - return false; - } - + if (it == cacheCoins.end()) return false; cachedCoinsUsage -= it->second.coin.DynamicMemoryUsage(); snapshotHash.SubtractUTXO(snapshot::UTXO(outpoint, it->second.coin)); - if (moveout) { *moveout = std::move(it->second.coin); } diff --git a/src/consensus/tx_verify.cpp b/src/consensus/tx_verify.cpp index cc47dfb53d..02363e073e 100644 --- a/src/consensus/tx_verify.cpp +++ b/src/consensus/tx_verify.cpp @@ -123,9 +123,8 @@ unsigned int GetLegacySigOpCount(const CTransaction& tx) unsigned int GetP2SHSigOpCount(const CTransaction& tx, const CCoinsViewCache& inputs) { - if (tx.IsCoinBase()) { + if (tx.IsCoinBase()) return 0; - } unsigned int nSigOps = 0; for (unsigned int i = 0; i < tx.vin.size(); i++) @@ -133,9 +132,8 @@ unsigned int GetP2SHSigOpCount(const CTransaction& tx, const CCoinsViewCache& in const Coin& coin = inputs.AccessCoin(tx.vin[i].prevout); assert(!coin.IsSpent()); const CTxOut &prevout = coin.out; - if (prevout.scriptPubKey.IsPayToScriptHash()) { + if (prevout.scriptPubKey.IsPayToScriptHash()) nSigOps += prevout.scriptPubKey.GetSigOpCount(tx.vin[i].scriptSig); - } } return nSigOps; } @@ -144,9 +142,8 @@ int64_t GetTransactionSigOpCost(const CTransaction& tx, const CCoinsViewCache& i { int64_t nSigOps = GetLegacySigOpCount(tx) * WITNESS_SCALE_FACTOR; - if (tx.IsCoinBase()) { + if (tx.IsCoinBase()) return nSigOps; - } if (flags & SCRIPT_VERIFY_P2SH) { nSigOps += GetP2SHSigOpCount(tx, inputs) * WITNESS_SCALE_FACTOR; diff --git a/src/init.cpp b/src/init.cpp index 9660e9fd17..f2ffe6c257 100644 --- a/src/init.cpp +++ b/src/init.cpp @@ -1672,14 +1672,8 @@ bool AppInitMain() } } - if ( - !CVerifyDB().VerifyDB( - chainparams, - pcoinsdbview.get(), - gArgs.GetArg("-checklevel", DEFAULT_CHECKLEVEL), - gArgs.GetArg("-checkblocks", DEFAULT_CHECKBLOCKS) - ) - ) { + if (!CVerifyDB().VerifyDB(chainparams, pcoinsdbview.get(), gArgs.GetArg("-checklevel", DEFAULT_CHECKLEVEL), + gArgs.GetArg("-checkblocks", DEFAULT_CHECKBLOCKS))) { strLoadError = _("Corrupted block database detected"); break; } diff --git a/src/txmempool.cpp b/src/txmempool.cpp index 57310f1969..81056c04a3 100644 --- a/src/txmempool.cpp +++ b/src/txmempool.cpp @@ -253,16 +253,16 @@ void CTxMemPool::UpdateChildrenForRemoval(txiter it) void CTxMemPool::UpdateForRemoveFromMempool(const setEntries &entriesToRemove, bool updateDescendants) { - // For each entry, walk back all ancestors and decrement size associated - // with this transaction + // For each entry, walk back all ancestors and decrement size associated with this + // transaction const uint64_t nNoLimit = std::numeric_limits::max(); if (updateDescendants) { // updateDescendants should be true whenever we're not recursively // removing a tx and all its descendants, eg when a transaction is // confirmed in a block. - // Here we only update statistics and not data in mapLinks (which we - // need to preserve until we're finished with all operations that need - // to traverse the mempool). + // Here we only update statistics and not data in mapLinks (which + // we need to preserve until we're finished with all operations that + // need to traverse the mempool). for (txiter removeIt : entriesToRemove) { setEntries setDescendants; CalculateDescendants(removeIt, setDescendants); @@ -576,16 +576,12 @@ void CTxMemPool::removeForBlock(const std::vector& vtx, unsigne uint256 hash = (*ptx)->GetHash(); indexed_transaction_set::iterator i = mapTx.find(hash); - if (i != mapTx.end()) { + if (i != mapTx.end()) entries.push_back(&*i); - } } - // Before the txs in the new block have been removed from the mempool, - // update policy estimates - if (minerPolicyEstimator) { - minerPolicyEstimator->processBlock(nBlockHeight, entries); - } + // Before the txs in the new block have been removed from the mempool, update policy estimates + if (minerPolicyEstimator) {minerPolicyEstimator->processBlock(nBlockHeight, entries);} for ( auto ptx = disconnectpool.GetQueuedTx().get().rbegin(); diff --git a/src/txmempool.h b/src/txmempool.h index f4d773a7bc..7af07e7a59 100644 --- a/src/txmempool.h +++ b/src/txmempool.h @@ -765,9 +765,7 @@ struct DisconnectedBlockTransactions { // Estimate the overhead of queuedTx to be 6 pointers + an allocation, as // no exact formula for boost::multi_index_contained is implemented. size_t DynamicMemoryUsage() const { - return memusage::MallocUsage( - sizeof(CTransactionRef) + 6 * sizeof(void*) - ) * queuedTx.size() + cachedInnerUsage; + return memusage::MallocUsage(sizeof(CTransactionRef) + 6 * sizeof(void*)) * queuedTx.size() + cachedInnerUsage; } const indexed_disconnected_transactions &GetQueuedTx() const { diff --git a/src/validation.cpp b/src/validation.cpp index ab09b6af4c..cc8ea390a3 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -491,13 +491,12 @@ void UpdateMempoolForReorg(DisconnectedBlockTransactions &disconnectpool, bool f { AssertLockHeld(cs_main); std::vector vHashUpdate; - - // disconnectpool's insertion_order index sorts the entries from oldest to - // newest, but the oldest entry will be the last tx from the latest mined - // block that was disconnected. - // Iterate disconnectpool in reverse, so that we add transactions back to - // the mempool starting with the earliest transaction that had been - // previously seen in a block. + // disconnectpool's insertion_order index sorts the entries from + // oldest to newest, but the oldest entry will be the last tx from the + // latest mined block that was disconnected. + // Iterate disconnectpool in reverse, so that we add transactions + // back to the mempool starting with the earliest transaction that had + // been previously seen in a block. auto it = disconnectpool.queuedTx.get().rbegin(); while (it != disconnectpool.queuedTx.get().rend()) { // ignore validation errors in resurrected transactions @@ -513,9 +512,7 @@ void UpdateMempoolForReorg(DisconnectedBlockTransactions &disconnectpool, bool f } ++it; } - disconnectpool.queuedTx.clear(); - // AcceptToMemoryPool/addUnchecked all assume that new mempool entries have // no in-mempool children, which is generally not true when adding // previously-confirmed transactions back to the mempool. @@ -645,22 +642,24 @@ static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool if (!setConflicts.count(ptxConflicting->GetHash())) { // Allow opt-out of transaction replacement by setting - // nSequence > MAX_BIP125_RBF_SEQUENCE (SEQUENCE_FINAL-2) on all - // inputs. + // nSequence > MAX_BIP125_RBF_SEQUENCE (SEQUENCE_FINAL-2) on all inputs. // // SEQUENCE_FINAL-1 is picked to still allow use of nLockTime by // non-replaceable transactions. All inputs rather than just one - // is for the sake of multi-party protocols, where we don't want - // a single party to be able to disable replacement. + // is for the sake of multi-party protocols, where we don't + // want a single party to be able to disable replacement. // // The opt-out ignores descendants as anyone relying on // first-seen mempool behavior should be checking all // unconfirmed ancestors anyway; doing otherwise is hopelessly // insecure. bool fReplacementOptOut = true; - if (fEnableReplacement) { - for (const CTxIn &_txin : ptxConflicting->vin) { - if (_txin.nSequence <= MAX_BIP125_RBF_SEQUENCE) { + if (fEnableReplacement) + { + for (const CTxIn &_txin : ptxConflicting->vin) + { + if (_txin.nSequence <= MAX_BIP125_RBF_SEQUENCE) + { fReplacementOptOut = false; break; } @@ -1519,84 +1518,78 @@ void InitScriptExecutionCache() { */ bool CheckInputs(const CTransaction& tx, CValidationState &state, const CCoinsViewCache &inputs, bool fScriptChecks, unsigned int flags, bool cacheSigStore, bool cacheFullScriptStore, PrecomputedTransactionData& txdata, std::vector *pvChecks) { - if (tx.IsCoinBase()) { - return true; - } - - if (pvChecks) { - pvChecks->reserve(tx.vin.size()); - } - - // Skip script verification when connecting blocks under the assumevalid - // block. Assuming the assumevalid block is valid this is safe because block - // merkle hashes are still computed and checked, Of course, if an assumed - // valid block is invalid due to false scriptSigs this optimization would - // allow an invalid chain to be accepted. - if (!fScriptChecks) { - return true; - } - - // First check if script executions have been cached with the same flags. - // Note that this assumes that the inputs provided are correct (ie that the - // transaction hash which is in tx's prevouts properly commits to the - // scriptPubKey in the inputs view of that transaction). - uint256 hashCacheEntry; - // We only use the first 19 bytes of nonce to avoid a second SHA round - - // giving us 19 + 32 + 4 = 55 bytes (+ 8 + 1 = 64) - static_assert(55 - sizeof(flags) - 32 >= 128/8, "Want at least 128 bits of nonce for script execution cache"); - CSHA256() - .Write(scriptExecutionCacheNonce.begin(), 55 - sizeof(flags) - 32) - .Write(tx.GetWitnessHash().begin(), 32) - .Write((unsigned char*)&flags, sizeof(flags)) - .Finalize(hashCacheEntry.begin()); - AssertLockHeld(cs_main); //TODO: Remove this requirement by making CuckooCache not require external locks - if (scriptExecutionCache.contains(hashCacheEntry, !cacheFullScriptStore)) { - return true; - } + if (!tx.IsCoinBase()) + { + if (pvChecks) + pvChecks->reserve(tx.vin.size()); + + // Skip script verification when connecting blocks under the + // assumevalid block. Assuming the assumevalid block is valid this + // is safe because block merkle hashes are still computed and checked, + // Of course, if an assumed valid block is invalid due to false scriptSigs + // this optimization would allow an invalid chain to be accepted. + if (fScriptChecks) { + // First check if script executions have been cached with the same + // flags. Note that this assumes that the inputs provided are + // correct (ie that the transaction hash which is in tx's prevouts + // properly commits to the scriptPubKey in the inputs view of that + // transaction). + uint256 hashCacheEntry; + // We only use the first 19 bytes of nonce to avoid a second SHA + // round - giving us 19 + 32 + 4 = 55 bytes (+ 8 + 1 = 64) + static_assert(55 - sizeof(flags) - 32 >= 128/8, "Want at least 128 bits of nonce for script execution cache"); + CSHA256().Write(scriptExecutionCacheNonce.begin(), 55 - sizeof(flags) - 32).Write(tx.GetWitnessHash().begin(), 32).Write((unsigned char*)&flags, sizeof(flags)).Finalize(hashCacheEntry.begin()); + AssertLockHeld(cs_main); //TODO: Remove this requirement by making CuckooCache not require external locks + if (scriptExecutionCache.contains(hashCacheEntry, !cacheFullScriptStore)) { + return true; + } - for (unsigned int i = 0; i < tx.vin.size(); i++) { - const COutPoint &prevout = tx.vin[i].prevout; - const Coin& coin = inputs.AccessCoin(prevout); - assert(!coin.IsSpent()); - - // We very carefully only pass in things to CScriptCheck which - // are clearly committed to by tx' witness hash. This provides - // a sanity check that our caching is not introducing consensus - // failures through additional data in, eg, the coins being - // spent being checked as a part of CScriptCheck. - - // Verify signature - CScriptCheck check(coin.out, tx, i, flags, cacheSigStore, &txdata); - if (pvChecks) { - pvChecks->push_back(CScriptCheck()); - check.swap(pvChecks->back()); - } else if (!check()) { - if (flags & STANDARD_NOT_MANDATORY_VERIFY_FLAGS) { - // Check whether the failure was caused by a non-mandatory - // script verification check, such as non-standard DER encodings - // or non-null dummy arguments; if so, don't trigger DoS - // protection to avoid splitting the network between upgraded - // and non-upgraded nodes. - CScriptCheck check2(coin.out, tx, i, - flags & ~STANDARD_NOT_MANDATORY_VERIFY_FLAGS, cacheSigStore, &txdata); - if (check2()) { - return state.Invalid(false, REJECT_NONSTANDARD, strprintf("non-mandatory-script-verify-flag (%s)", ScriptErrorString(check.GetScriptError()))); + for (unsigned int i = 0; i < tx.vin.size(); i++) { + const COutPoint &prevout = tx.vin[i].prevout; + const Coin& coin = inputs.AccessCoin(prevout); + assert(!coin.IsSpent()); + + // We very carefully only pass in things to CScriptCheck which + // are clearly committed to by tx' witness hash. This provides + // a sanity check that our caching is not introducing consensus + // failures through additional data in, eg, the coins being + // spent being checked as a part of CScriptCheck. + + // Verify signature + CScriptCheck check(coin.out, tx, i, flags, cacheSigStore, &txdata); + if (pvChecks) { + pvChecks->push_back(CScriptCheck()); + check.swap(pvChecks->back()); + } else if (!check()) { + if (flags & STANDARD_NOT_MANDATORY_VERIFY_FLAGS) { + // Check whether the failure was caused by a + // non-mandatory script verification check, such as + // non-standard DER encodings or non-null dummy + // arguments; if so, don't trigger DoS protection to + // avoid splitting the network between upgraded and + // non-upgraded nodes. + CScriptCheck check2(coin.out, tx, i, + flags & ~STANDARD_NOT_MANDATORY_VERIFY_FLAGS, cacheSigStore, &txdata); + if (check2()) + return state.Invalid(false, REJECT_NONSTANDARD, strprintf("non-mandatory-script-verify-flag (%s)", ScriptErrorString(check.GetScriptError()))); + } + // Failures of other flags indicate a transaction that is + // invalid in new blocks, e.g. an invalid P2SH. We DoS ban + // such nodes as they are not following the protocol. That + // said during an upgrade careful thought should be taken + // as to the correct behavior - we may want to continue + // peering with non-upgraded nodes even after soft-fork + // super-majority signaling has occurred. + return state.DoS(100,false, REJECT_INVALID, strprintf("mandatory-script-verify-flag-failed (%s)", ScriptErrorString(check.GetScriptError()))); } } - // Failures of other flags indicate a transaction that is invalid in - // new blocks, e.g. an invalid P2SH. We DoS ban such nodes as they - // are not following the protocol. That said during an upgrade - // careful thought should be taken as to the correct behavior - we - // may want to continue peering with non-upgraded nodes even after - // soft-fork super-majority signaling has occurred. - return state.DoS(100,false, REJECT_INVALID, strprintf("mandatory-script-verify-flag-failed (%s)", ScriptErrorString(check.GetScriptError()))); - } - } - if (cacheFullScriptStore && !pvChecks) { - // We executed all of the provided scripts, and were told to - // cache the result. Do so now. - scriptExecutionCache.insert(hashCacheEntry); + if (cacheFullScriptStore && !pvChecks) { + // We executed all of the provided scripts, and were told to + // cache the result. Do so now. + scriptExecutionCache.insert(hashCacheEntry); + } + } } return true; @@ -1693,15 +1686,12 @@ int ApplyTxInUndo(Coin&& undo, CCoinsViewCache& view, const COutPoint& out) { bool fClean = true; - if (view.HaveCoin(out)) { - fClean = false; // Overwriting transaction output - } + if (view.HaveCoin(out)) fClean = false; // overwriting transaction output if (undo.nHeight == 0) { - // Missing undo metadata (height and coinbase). Older versions included - // this information only in undo records for the last spend of a - // transactions' outputs. This implies that it must be present for some - // other output of the same tx. + // Missing undo metadata (height and coinbase). Older versions included this + // information only in undo records for the last spend of a transactions' + // outputs. This implies that it must be present for some other output of the same tx. const Coin& alternate = AccessByTxid(view, out.hash); if (alternate.IsSpent()) { // Adding output for transaction without known metadata @@ -1712,10 +1702,10 @@ int ApplyTxInUndo(Coin&& undo, CCoinsViewCache& view, const COutPoint& out) } } - // The potential_overwrite parameter to AddCoin is only allowed to be false - // if we know for sure that the coin did not already exist in the cache. As - // we have queried for that above using HaveCoin, we don't need to guess. - // When fClean is false, a coin already existed and it is an overwrite. + // The potential_overwrite parameter to AddCoin is only allowed to be false if we know for + // sure that the coin did not already exist in the cache. As we have queried for that above + // using HaveCoin, we don't need to guess. When fClean is false, a coin already existed and + // it is an overwrite. view.AddCoin(out, std::move(undo), !fClean); return fClean ? DISCONNECT_OK : DISCONNECT_UNCLEAN; @@ -1975,73 +1965,51 @@ bool CChainState::ConnectBlock(const CBlock& block, CValidationState& state, CBl // is enforced in ContextualCheckBlockHeader(); we wouldn't want to // re-enforce that rule here (at least until we make it impossible for // GetAdjustedTime() to go backward). - if (!CheckBlock(block, state, chainparams.GetConsensus(), !fJustCheck, !fJustCheck)) { - return error( - "%s: Consensus::CheckBlock: %s", __func__, - FormatStateMessage(state) - ); - } + if (!CheckBlock(block, state, chainparams.GetConsensus(), !fJustCheck, !fJustCheck)) + return error("%s: Consensus::CheckBlock: %s", __func__, FormatStateMessage(state)); // verify that the view's current state corresponds to the previous block - uint256 hashPrevBlock = - pindex->pprev == nullptr ? uint256() : pindex->pprev->GetBlockHash(); + uint256 hashPrevBlock = pindex->pprev == nullptr ? uint256() : pindex->pprev->GetBlockHash(); assert(hashPrevBlock == view.GetBestBlock()); - // Here we do not skip in case of genesis because we want to be able to - // spend its coinbase + // Here we do not skip in case of genesis because we want to be able to spend its coinbase nBlocksTotal++; bool fScriptChecks = true; if (!hashAssumeValid.IsNull()) { - // We've been configured with the hash of a block which has been - // externally verified to have a valid history. A suitable default value - // is included with the software and updated from time to time. Because - // validity relative to a piece of software is an objective fact these - // defaults can be easily reviewed. This setting doesn't force the - // selection of any particular chain but makes validating some faster by - // effectively caching the result of part of the verification. + // We've been configured with the hash of a block which has been externally verified to have a valid history. + // A suitable default value is included with the software and updated from time to time. Because validity + // relative to a piece of software is an objective fact these defaults can be easily reviewed. + // This setting doesn't force the selection of any particular chain but makes validating some faster by + // effectively caching the result of part of the verification. BlockMap::const_iterator it = mapBlockIndex.find(hashAssumeValid); if (it != mapBlockIndex.end()) { if (it->second->GetAncestor(pindex->nHeight) == pindex && pindexBestHeader->GetAncestor(pindex->nHeight) == pindex && pindexBestHeader->nChainWork >= nMinimumChainWork) { - // This block is a member of the assumed verified chain and an - // ancestor of the best header. The equivalent time check - // discourages hash power from extorting the network via DOS - // attack into accepting an invalid block through telling users - // they must manually set assumevalid. Requiring a software - // change or burying the invalid block, regardless of the - // setting, makes it hard to hide the implication of the demand. - // This also avoids having release candidates that are hardly - // doing any signature verification at all in testing without - // having to artificially set the default assumed verified block - // further back. The test against nMinimumChainWork prevents the - // skipping when denied access to any chain at least as good as - // the expected chain. - fScriptChecks = (GetBlockProofEquivalentTime( - *pindexBestHeader, *pindex, *pindexBestHeader, - chainparams.GetConsensus() - ) <= 60 * 60 * 24 * 7 * 2); + // This block is a member of the assumed verified chain and an ancestor of the best header. + // The equivalent time check discourages hash power from extorting the network via DOS attack + // into accepting an invalid block through telling users they must manually set assumevalid. + // Requiring a software change or burying the invalid block, regardless of the setting, makes + // it hard to hide the implication of the demand. This also avoids having release candidates + // that are hardly doing any signature verification at all in testing without having to + // artificially set the default assumed verified block further back. + // The test against nMinimumChainWork prevents the skipping when denied access to any chain at + // least as good as the expected chain. + fScriptChecks = (GetBlockProofEquivalentTime(*pindexBestHeader, *pindex, *pindexBestHeader, chainparams.GetConsensus()) <= 60 * 60 * 24 * 7 * 2); } } } int64_t nTime1 = GetTimeMicros(); nTimeCheck += nTime1 - nTimeStart; - LogPrint( - BCLog::BENCH, " - Sanity checks: %.2fms [%.2fs (%.2fms/blk)]\n", - MILLI * (nTime1 - nTimeStart), nTimeCheck * MICRO, - nTimeCheck * MILLI / nBlocksTotal - ); + LogPrint(BCLog::BENCH, " - Sanity checks: %.2fms [%.2fs (%.2fms/blk)]\n", MILLI * (nTime1 - nTimeStart), nTimeCheck * MICRO, nTimeCheck * MILLI / nBlocksTotal); // Here there was a check for BIP30 before BIP34 but we consider those already active // Start enforcing BIP68 (sequence locks) and BIP112 (CHECKSEQUENCEVERIFY) using versionbits logic. int nLockTimeFlags = 0; - if (VersionBitsState( - pindex->pprev, chainparams.GetConsensus(), Consensus::DEPLOYMENT_CSV, - versionbitscache - ) == THRESHOLD_ACTIVE) { + if (VersionBitsState(pindex->pprev, chainparams.GetConsensus(), Consensus::DEPLOYMENT_CSV, versionbitscache) == THRESHOLD_ACTIVE) { nLockTimeFlags |= LOCKTIME_VERIFY_SEQUENCE; } @@ -2049,17 +2017,11 @@ bool CChainState::ConnectBlock(const CBlock& block, CValidationState& state, CBl unsigned int flags = GetBlockScriptFlags(pindex, chainparams.GetConsensus()); int64_t nTime2 = GetTimeMicros(); nTimeForks += nTime2 - nTime1; - LogPrint( - BCLog::BENCH, " - Fork checks: %.2fms [%.2fs (%.2fms/blk)]\n", - MILLI * (nTime2 - nTime1), nTimeForks * MICRO, - nTimeForks * MILLI / nBlocksTotal - ); + LogPrint(BCLog::BENCH, " - Fork checks: %.2fms [%.2fs (%.2fms/blk)]\n", MILLI * (nTime2 - nTime1), nTimeForks * MICRO, nTimeForks * MILLI / nBlocksTotal); CBlockUndo blockundo; - CCheckQueueControl control( - fScriptChecks && nScriptCheckThreads ? &scriptcheckqueue : nullptr - ); + CCheckQueueControl control(fScriptChecks && nScriptCheckThreads ? &scriptcheckqueue : nullptr); std::vector prevheights; CAmount nFees = 0; @@ -2117,10 +2079,9 @@ bool CChainState::ConnectBlock(const CBlock& block, CValidationState& state, CBl // * p2sh (when P2SH enabled in flags and excludes coinbase) // * witness (when witness enabled in flags and excludes coinbase) nSigOpsCost += GetTransactionSigOpCost(tx, view, flags); - if (nSigOpsCost > MAX_BLOCK_SIGOPS_COST) { + if (nSigOpsCost > MAX_BLOCK_SIGOPS_COST) return state.DoS(100, error("ConnectBlock(): too many sigops"), REJECT_INVALID, "bad-blk-sigops"); - } nFees += txfee; if (!MoneyRange(nFees)) { @@ -2147,41 +2108,33 @@ bool CChainState::ConnectBlock(const CBlock& block, CValidationState& state, CBl LogPrint(BCLog::BENCH, " - Connect %u transactions: %.2fms (%.3fms/tx, %.3fms/txin) [%.2fs (%.2fms/blk)]\n", (unsigned)block.vtx.size(), MILLI * (nTime3 - nTime2), MILLI * (nTime3 - nTime2) / block.vtx.size(), nInputs <= 1 ? 0 : MILLI * (nTime3 - nTime2) / (nInputs-1), nTimeConnect * MICRO, nTimeConnect * MILLI / nBlocksTotal); bool isGenesisBlock = block.GetHash() == chainparams.GetConsensus().hashGenesisBlock; - if (!isGenesisBlock) { + if(!isGenesisBlock) { CAmount blockReward = nFees + GetBlockSubsidy(pindex->nHeight, chainparams.GetConsensus()); - if (block.vtx[0]->GetValueOut() > blockReward) { - return state.DoS( - 100, error("ConnectBlock(): coinbase pays too much (actual=%d vs limit=%d)", - block.vtx[0]->GetValueOut(), blockReward), REJECT_INVALID, "bad-cb-amount" - ); - } + if (block.vtx[0]->GetValueOut() > blockReward) + return state.DoS(100, + error("ConnectBlock(): coinbase pays too much (actual=%d vs limit=%d)", + block.vtx[0]->GetValueOut(), blockReward), + REJECT_INVALID, "bad-cb-amount"); } - if (!control.Wait()) { - return state.DoS( - 100, error("%s: CheckQueue failed", __func__), REJECT_INVALID, - "block-validation-failed" - ); - } + if (!control.Wait()) + return state.DoS(100, error("%s: CheckQueue failed", __func__), REJECT_INVALID, "block-validation-failed"); int64_t nTime4 = GetTimeMicros(); nTimeVerify += nTime4 - nTime2; LogPrint(BCLog::BENCH, " - Verify %u txins: %.2fms (%.3fms/txin) [%.2fs (%.2fms/blk)]\n", nInputs - 1, MILLI * (nTime4 - nTime2), nInputs <= 1 ? 0 : MILLI * (nTime4 - nTime2) / (nInputs-1), nTimeVerify * MICRO, nTimeVerify * MILLI / nBlocksTotal); - if (fJustCheck) { + if (fJustCheck) return true; - } - if (!isGenesisBlock && !WriteUndoDataForBlock(blockundo, state, pindex, chainparams)) { + if (!isGenesisBlock && !WriteUndoDataForBlock(blockundo, state, pindex, chainparams)) return false; - } if (!pindex->IsValid(BLOCK_VALID_SCRIPTS)) { pindex->RaiseValidity(BLOCK_VALID_SCRIPTS); setDirtyBlockIndex.insert(pindex); } - if (!WriteTxIndexDataForBlock(block, state, pindex)) { + if (!WriteTxIndexDataForBlock(block, state, pindex)) return false; - } assert(pindex->phashBlock); // add this block to the view's block chain @@ -2365,14 +2318,12 @@ void static UpdateTip(const CBlockIndex *pindexNew, const CChainParams& chainPar for (int i = 0; i < 100 && pindex != nullptr; i++) { int32_t nExpectedVersion = ComputeBlockVersion(pindex->pprev, chainParams.GetConsensus()); - if (pindex->nVersion > VERSIONBITS_LAST_OLD_BLOCK_VERSION && (pindex->nVersion & ~nExpectedVersion) != 0) { + if (pindex->nVersion > VERSIONBITS_LAST_OLD_BLOCK_VERSION && (pindex->nVersion & ~nExpectedVersion) != 0) ++nUpgraded; - } pindex = pindex->pprev; } - if (nUpgraded > 0) { + if (nUpgraded > 0) warningMessages.push_back(strprintf(_("%d of last 100 blocks have unexpected version"), nUpgraded)); - } if (nUpgraded > 100/2) { std::string strWarning = _("Warning: Unknown block versions being mined! It's possible unknown rules are in effect"); @@ -2385,9 +2336,8 @@ void static UpdateTip(const CBlockIndex *pindexNew, const CChainParams& chainPar log(pindexNew->nChainWork.getdouble())/log(2.0), (unsigned long)pindexNew->nChainTx, DateTimeStrFormat("%Y-%m-%d %H:%M:%S", pindexNew->GetBlockTime()), GuessVerificationProgress(chainParams.TxData(), pindexNew), pcoinsTip->DynamicMemoryUsage() * (1.0 / (1<<20)), pcoinsTip->GetCacheSize()); - if (!warningMessages.empty()) { + if (!warningMessages.empty()) LogPrintf(" warning='%s'", boost::algorithm::join(warningMessages, ", ")); - } LogPrintf("\n"); } @@ -2406,34 +2356,27 @@ bool CChainState::DisconnectTip(CValidationState& state, const CChainParams& cha { CBlockIndex *pindexDelete = chainActive.Tip(); assert(pindexDelete); - // Read block from disk. std::shared_ptr pblock = std::make_shared(); CBlock& block = *pblock; - - if (!ReadBlockFromDisk(block, pindexDelete, chainparams.GetConsensus())) { + if (!ReadBlockFromDisk(block, pindexDelete, chainparams.GetConsensus())) return AbortNode(state, "Failed to read block"); - } - // Apply the block atomically to the chain state. int64_t nStart = GetTimeMicros(); { CCoinsViewCache view(pcoinsTip.get()); assert(view.GetBestBlock() == pindexDelete->GetBlockHash()); - if (DisconnectBlock(block, pindexDelete, view) != DISCONNECT_OK) { + if (DisconnectBlock(block, pindexDelete, view) != DISCONNECT_OK) return error("DisconnectTip(): DisconnectBlock %s failed", pindexDelete->GetBlockHash().ToString()); - } bool flushed = view.Flush(); assert(flushed); } - LogPrint(BCLog::BENCH, "- Disconnect block: %.2fms\n", (GetTimeMicros() - nStart) * MILLI); // Write the chain state to disk, if necessary. - if (!FlushStateToDisk(chainparams, state, FLUSH_STATE_IF_NEEDED)) { + if (!FlushStateToDisk(chainparams, state, FLUSH_STATE_IF_NEEDED)) return false; - } if (disconnectpool) { disconnectpool->addForBlock(block.vtx); @@ -3391,7 +3334,6 @@ static bool ContextualCheckBlock(const CBlock& block, CValidationState& state, c // Check that all transactions are finalized CTransactionRef prevTx = nullptr; const auto &fin_state = *esperanza::FinalizationState::GetState(pindexPrev); - for (const auto& tx : block.vtx) { if (!IsFinalTx(*tx, nHeight, nLockTimeCutoff)) { return state.DoS(10, false, REJECT_INVALID, "bad-txns-nonfinal", false, "non-final transaction"); @@ -3789,9 +3731,8 @@ bool ProcessNewBlock(const CChainParams& chainparams, const std::shared_ptrpprev == nullptr) { + if (chainActive.Tip() == nullptr || chainActive.Tip()->pprev == nullptr) return true; - } // Verify blocks in the best chain - if (nCheckDepth <= 0 || nCheckDepth > chainActive.Height()) { + if (nCheckDepth <= 0 || nCheckDepth > chainActive.Height()) nCheckDepth = chainActive.Height(); - } - nCheckLevel = std::max(0, std::min(4, nCheckLevel)); LogPrintf("Verifying last %i blocks at level %i\n", nCheckDepth, nCheckLevel); CCoinsViewCache coins(coinsview); @@ -4209,7 +4147,6 @@ bool CVerifyDB::VerifyDB(const CChainParams& chainparams, CCoinsView *coinsview, CValidationState state; int reportDone = 0; LogPrintf("[0%%]..."); - for (CBlockIndex* pindex = chainActive.Tip(); pindex && pindex->pprev; pindex = pindex->pprev) { boost::this_thread::interruption_point(); @@ -4220,9 +4157,8 @@ bool CVerifyDB::VerifyDB(const CChainParams& chainparams, CCoinsView *coinsview, reportDone = percentageDone/10; } uiInterface.ShowProgress(_("Verifying blocks..."), percentageDone, false); - if (pindex->nHeight < chainActive.Height() - nCheckDepth) { + if (pindex->nHeight < chainActive.Height()-nCheckDepth) break; - } if (fPruneMode && !(pindex->nStatus & BLOCK_HAVE_DATA)) { // If pruning, only go back as far as we have data. LogPrintf("VerifyDB(): block verification stopping at height %d (pruning, no data)\n", pindex->nHeight); @@ -4230,16 +4166,12 @@ bool CVerifyDB::VerifyDB(const CChainParams& chainparams, CCoinsView *coinsview, } CBlock block; // check level 0: read from disk - if (!ReadBlockFromDisk(block, pindex, chainparams.GetConsensus())) { + if (!ReadBlockFromDisk(block, pindex, chainparams.GetConsensus())) return error("VerifyDB(): *** ReadBlockFromDisk failed at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString()); - } - // check level 1: verify block validity - if (nCheckLevel >= 1 && !CheckBlock(block, state, chainparams.GetConsensus())) { + if (nCheckLevel >= 1 && !CheckBlock(block, state, chainparams.GetConsensus())) return error("%s: *** found bad block at %d, hash=%s (%s)\n", __func__, pindex->nHeight, pindex->GetBlockHash().ToString(), FormatStateMessage(state)); - } - // check level 2: verify undo validity if (nCheckLevel >= 2 && pindex) { CBlockUndo undo; @@ -4264,14 +4196,11 @@ bool CVerifyDB::VerifyDB(const CChainParams& chainparams, CCoinsView *coinsview, nGoodTransactions += block.vtx.size(); } } - - if (ShutdownRequested()) { + if (ShutdownRequested()) return true; - } } - if (pindexFailure) { + if (pindexFailure) return error("VerifyDB(): *** coin database inconsistencies found (last %i blocks, %i good transactions before that)\n", chainActive.Height() - pindexFailure->nHeight + 1, nGoodTransactions); - } // check level 4: try reconnecting blocks if (nCheckLevel >= 4) { @@ -4281,12 +4210,10 @@ bool CVerifyDB::VerifyDB(const CChainParams& chainparams, CCoinsView *coinsview, uiInterface.ShowProgress(_("Verifying blocks..."), std::max(1, std::min(99, 100 - (int)(((double)(chainActive.Height() - pindex->nHeight)) / (double)nCheckDepth * 50))), false); pindex = chainActive.Next(pindex); CBlock block; - if (!ReadBlockFromDisk(block, pindex, chainparams.GetConsensus())) { + if (!ReadBlockFromDisk(block, pindex, chainparams.GetConsensus())) return error("VerifyDB(): *** ReadBlockFromDisk failed at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString()); - } - if (!g_chainstate.ConnectBlock(block, state, pindex, coins, chainparams)) { + if (!g_chainstate.ConnectBlock(block, state, pindex, coins, chainparams)) return error("VerifyDB(): *** found unconnectable block at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString()); - } } } diff --git a/test/functional/feature_block.py b/test/functional/feature_block.py index af71b43811..2be047eb70 100755 --- a/test/functional/feature_block.py +++ b/test/functional/feature_block.py @@ -99,7 +99,7 @@ def sign_tx(self, tx, spend_tx, n): if (scriptPubKey[0] == OP_TRUE): # an anyone-can-spend tx.vin[0].scriptSig = CScript() return - sighash, err = SignatureHash(spend_tx.vout[n].scriptPubKey, tx, 0, SIGHASH_ALL) + (sighash, err) = SignatureHash(spend_tx.vout[n].scriptPubKey, tx, 0, SIGHASH_ALL) tx.vin[0].scriptSig = CScript([self.coinbase_key.sign(sighash) + bytes(bytearray([SIGHASH_ALL]))]) def create_and_sign_transaction(self, spend_tx, n, value, script=CScript([OP_TRUE])): @@ -121,7 +121,7 @@ def find_spend(self, prevout): def set_block_snapshot_meta(self, block, spend=None): - block_height = self.block_heights[block.sha256] # TODO UNIT-E Here is where the thing crashes... + block_height = self.block_heights[block.sha256] inputs = [] outputs = [] for tx_idx, tx in enumerate(block.vtx): @@ -206,11 +206,11 @@ def get_spendable_output(): return PreviousSpendableOutput(tx, 0, self.block_heights[block.sha256]) # returns a test case that asserts that the current tip was accepted - def accepted(test_name=""): + def accepted(test_name = ""): return TestInstance([[self.tip, True]], test_name=test_name) # returns a test case that asserts that the current tip was rejected - def rejected(reject=None, test_name=""): + def rejected(reject = None, test_name = ""): if reject is None: return TestInstance([[self.tip, False]], test_name=test_name) else: @@ -379,7 +379,7 @@ def comp_snapshot_hash(block_number): yield rejected() comp_snapshot_hash(6) - yield TestInstance([[b12, True, b13.sha256]]) # New tip should be b13. + yield TestInstance([[b12, True, b13.sha256]]) # New tip should be b13. comp_snapshot_hash(13) # Add a block with MAX_BLOCK_SIGOPS and one with one more sigop @@ -397,7 +397,7 @@ def comp_snapshot_hash(block_number): # Test that a block with too many checksigs is rejected - too_many_checksigs = CScript([OP_CHECKSIG] * MAX_BLOCK_SIGOPS) + too_many_checksigs = CScript([OP_CHECKSIG] * (MAX_BLOCK_SIGOPS)) block(16, spend=out[6], script=too_many_checksigs) yield rejected(RejectResult(16, b'bad-blk-sigops')) comp_snapshot_hash(15) @@ -578,7 +578,7 @@ def comp_snapshot_hash(block_number): save_spendable_output() comp_snapshot_hash(35) - too_many_checksigs = CScript([OP_CHECKSIGVERIFY] * MAX_BLOCK_SIGOPS) + too_many_checksigs = CScript([OP_CHECKSIGVERIFY] * (MAX_BLOCK_SIGOPS)) block(36, spend=out[11], script=too_many_checksigs) yield rejected(RejectResult(16, b'bad-blk-sigops')) comp_snapshot_hash(35) @@ -641,15 +641,15 @@ def comp_snapshot_hash(block_number): # Until block is full, add tx's with 1 satoshi to p2sh_script, the rest to OP_TRUE tx_new = None tx_last = tx - total_size = len(b39.serialize()) - while total_size < MAX_BLOCK_BASE_SIZE: + total_size=len(b39.serialize()) + while(total_size < MAX_BLOCK_BASE_SIZE): tx_new = create_tx(tx_last, 1, 1, p2sh_script) tx_new.vout.append(CTxOut(tx_last.vout[1].nValue - 1, CScript([OP_TRUE]))) tx_new.rehash() total_size += len(tx_new.serialize()) if total_size >= MAX_BLOCK_BASE_SIZE: break - b39.vtx.append(tx_new) # add tx to block + b39.vtx.append(tx_new) # add tx to block tx_last = tx_new b39_outputs += 1 @@ -852,7 +852,7 @@ def comp_snapshot_hash(block_number): # tip(43) block(53, spend=out[14]) - yield rejected() # rejected since b44 is at same height + yield rejected() # rejected since b44 is at same height save_spendable_output() comp_snapshot_hash(44) @@ -911,7 +911,7 @@ def comp_snapshot_hash(block_number): tip(55) b56 = copy.deepcopy(b57) self.blocks[56] = b56 - assert_equal(len(b56.vtx), 3) + assert_equal(len(b56.vtx),3) assert_equal(b56.hash, b57.hash) b56 = update_block(56, [tx1], del_refs=False) yield rejected(RejectResult(16, b'bad-txns-duplicate')) @@ -933,7 +933,7 @@ def comp_snapshot_hash(block_number): b56p2 = copy.deepcopy(b57p2) self.blocks["b56p2"] = b56p2 assert_equal(b56p2.hash, b57p2.hash) - assert_equal(len(b56p2.vtx), 6) + assert_equal(len(b56p2.vtx),6) b56p2 = update_block("b56p2", [tx3, tx4], del_refs=False) yield rejected(RejectResult(16, b'bad-txns-duplicate')) comp_snapshot_hash(55) diff --git a/test/functional/feature_csv_activation.py b/test/functional/feature_csv_activation.py index 1ad0b6c11f..11c9297a33 100755 --- a/test/functional/feature_csv_activation.py +++ b/test/functional/feature_csv_activation.py @@ -82,7 +82,6 @@ b25times.append(b22times) relative_locktimes.append(b25times) - def all_rlt_txs(txarray): txs = [] for b31 in range(2): @@ -110,8 +109,8 @@ def send_generic_input_tx(self, node, coinbases): return node.sendrawtransaction(ToHex(self.sign_transaction(node, self.create_transaction(node, node.getblock(coinbases.pop())['tx'][0], self.nodeaddress, amount)))) def create_transaction(self, node, txid, to_address, amount): - inputs = [{"txid": txid, "vout": 0}] - outputs = {to_address: amount} + inputs = [{ "txid" : txid, "vout" : 0}] + outputs = { to_address : amount } rawtx = node.createrawtransaction(inputs, outputs) tx = CTransaction() f = BytesIO(hex_str_to_bytes(rawtx)) @@ -126,7 +125,7 @@ def sign_transaction(self, node, unsignedtx): tx.deserialize(f) return tx - def generate_blocks(self, number, version, test_blocks=[]): + def generate_blocks(self, number, version, test_blocks = []): for i in range(number): block = self.create_test_block([], version) test_blocks.append([block, True]) @@ -135,7 +134,7 @@ def generate_blocks(self, number, version, test_blocks=[]): self.tipheight += 1 return test_blocks - def create_test_block(self, txs, version=536870912): + def create_test_block(self, txs, version = 536870912): coinbase = create_coinbase(self.tipheight + 1, self.tip_snapshot_meta.hash) block = create_block(self.tip, coinbase, self.last_block_time + 600) block.nVersion = version @@ -159,7 +158,7 @@ def create_bip68txs(self, bip68inputs, txversion, locktime_delta = 0): for b22 in range(2): b18txs = [] for b18 in range(2): - tx = self.create_transaction(self.nodes[0], bip68inputs[i], self.nodeaddress, Decimal("49.98")) + tx = self.create_transaction(self.nodes[0], bip68inputs[i], self.nodeaddress, Decimal("49.98")) i += 1 tx.nVersion = txversion tx.vin[0].nSequence = relative_locktimes[b31][b25][b22][b18] + locktime_delta @@ -187,11 +186,11 @@ def create_bip112txs(self, bip112inputs, varyOP_CSV, txversion, locktime_delta = for b22 in range(2): b18txs = [] for b18 in range(2): - tx = self.create_transaction(self.nodes[0], bip112inputs[i], self.nodeaddress, Decimal("49.98")) + tx = self.create_transaction(self.nodes[0], bip112inputs[i], self.nodeaddress, Decimal("49.98")) i += 1 - if varyOP_CSV: # if varying OP_CSV, nSequence is fixed + if (varyOP_CSV): # if varying OP_CSV, nSequence is fixed tx.vin[0].nSequence = base_relative_locktime + locktime_delta - else: # vary nSequence instead, OP_CSV is fixed + else: # vary nSequence instead, OP_CSV is fixed tx.vin[0].nSequence = relative_locktimes[b31][b25][b22][b18] + locktime_delta tx.nVersion = txversion signtx = self.sign_transaction(self.nodes[0], tx) @@ -206,12 +205,12 @@ def create_bip112txs(self, bip112inputs, varyOP_CSV, txversion, locktime_delta = return txs def get_tests(self): - long_past_time = int(time.time()) - 600 * 1000 # enough to build up to 1000 blocks 10 minutes apart without worrying about getting into the future - self.nodes[0].setmocktime(long_past_time - 100) # enough so that the generated blocks will still all be before long_past_time + long_past_time = int(time.time()) - 600 * 1000 # enough to build up to 1000 blocks 10 minutes apart without worrying about getting into the future + self.nodes[0].setmocktime(long_past_time - 100) # enough so that the generated blocks will still all be before long_past_time self.coinbase_blocks = self.nodes[0].generate(1 + 16 + 2*32 + 1) # 82 blocks generated for inputs self.tip_snapshot_meta = get_tip_snapshot_meta(self.nodes[0]) - self.nodes[0].setmocktime(0) # set time back to present so yielded blocks aren't in the future as we advance last_block_time - self.tipheight = 82 # height of the next block to build + self.nodes[0].setmocktime(0) # set time back to present so yielded blocks aren't in the future as we advance last_block_time + self.tipheight = 82 # height of the next block to build self.last_block_time = long_past_time self.tip = int("0x" + self.nodes[0].getbestblockhash(), 0) self.nodeaddress = self.nodes[0].getnewaddress() @@ -224,21 +223,21 @@ def get_tests(self): # Fail to achieve LOCKED_IN 100 out of 144 signal bit 0 # using a variety of bits to simulate multiple parallel softforks - test_blocks = self.generate_blocks(50, 536870913) # 0x20000001 (signalling ready) - test_blocks = self.generate_blocks(20, 4, test_blocks) # 0x00000004 (signalling not) - test_blocks = self.generate_blocks(50, 536871169, test_blocks) # 0x20000101 (signalling ready) - test_blocks = self.generate_blocks(24, 536936448, test_blocks) # 0x20010000 (signalling not) - yield TestInstance(test_blocks, sync_every_block=False) # 2 + test_blocks = self.generate_blocks(50, 536870913) # 0x20000001 (signalling ready) + test_blocks = self.generate_blocks(20, 4, test_blocks) # 0x00000004 (signalling not) + test_blocks = self.generate_blocks(50, 536871169, test_blocks) # 0x20000101 (signalling ready) + test_blocks = self.generate_blocks(24, 536936448, test_blocks) # 0x20010000 (signalling not) + yield TestInstance(test_blocks, sync_every_block=False) # 2 # Failed to advance past STARTED, height = 287 assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'started') # 108 out of 144 signal bit 0 to achieve lock-in # using a variety of bits to simulate multiple parallel softforks - test_blocks = self.generate_blocks(58, 536870913) # 0x20000001 (signalling ready) - test_blocks = self.generate_blocks(26, 4, test_blocks) # 0x00000004 (signalling not) - test_blocks = self.generate_blocks(50, 536871169, test_blocks) # 0x20000101 (signalling ready) - test_blocks = self.generate_blocks(10, 536936448, test_blocks) # 0x20010000 (signalling not) - yield TestInstance(test_blocks, sync_every_block=False) # 3 + test_blocks = self.generate_blocks(58, 536870913) # 0x20000001 (signalling ready) + test_blocks = self.generate_blocks(26, 4, test_blocks) # 0x00000004 (signalling not) + test_blocks = self.generate_blocks(50, 536871169, test_blocks) # 0x20000101 (signalling ready) + test_blocks = self.generate_blocks(10, 536936448, test_blocks) # 0x20010000 (signalling not) + yield TestInstance(test_blocks, sync_every_block=False) # 3 # Advanced from STARTED to LOCKED_IN, height = 431 assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'locked_in') @@ -326,7 +325,7 @@ def get_tests(self): ### Version 1 txs ### success_txs = [] # add BIP113 tx and -1 CSV tx - bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block + bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block bip113signed1 = self.sign_transaction(self.nodes[0], bip113tx_v1) success_txs.append(bip113signed1) success_txs.append(bip112tx_special_v1) @@ -338,14 +337,14 @@ def get_tests(self): # try BIP 112 with seq=9 txs success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v1)) success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_9_v1)) - yield TestInstance([[self.create_test_block(success_txs), True]]) # 6 + yield TestInstance([[self.create_test_block(success_txs), True]]) # 6 self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) self.tip_snapshot_meta = get_tip_snapshot_meta(self.nodes[0]) ### Version 2 txs ### success_txs = [] # add BIP113 tx and -1 CSV tx - bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block + bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block bip113signed2 = self.sign_transaction(self.nodes[0], bip113tx_v2) success_txs.append(bip113signed2) success_txs.append(bip112tx_special_v2) @@ -357,7 +356,7 @@ def get_tests(self): # try BIP 112 with seq=9 txs success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v2)) success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_9_v2)) - yield TestInstance([[self.create_test_block(success_txs), True]]) # 7 + yield TestInstance([[self.create_test_block(success_txs), True]]) # 7 self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) self.tip_snapshot_meta = get_tip_snapshot_meta(self.nodes[0]) @@ -372,20 +371,20 @@ def get_tests(self): ################################# ### BIP 113 ### # BIP 113 tests should now fail regardless of version number if nLockTime isn't satisfied by new rules - bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block + bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block bip113signed1 = self.sign_transaction(self.nodes[0], bip113tx_v1) - bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block + bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block bip113signed2 = self.sign_transaction(self.nodes[0], bip113tx_v2) for bip113tx in [bip113signed1, bip113signed2]: - yield TestInstance([[self.create_test_block([bip113tx]), False]]) # 9,10 + yield TestInstance([[self.create_test_block([bip113tx]), False]]) # 9,10 self.tip_snapshot_meta = get_tip_snapshot_meta(self.nodes[0]) # BIP 113 tests should now pass if the locktime is < MTP - bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 - 1 # < MTP of prior block + bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 - 1 # < MTP of prior block bip113signed1 = self.sign_transaction(self.nodes[0], bip113tx_v1) - bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 - 1 # < MTP of prior block + bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 - 1 # < MTP of prior block bip113signed2 = self.sign_transaction(self.nodes[0], bip113tx_v2) for bip113tx in [bip113signed1, bip113signed2]: - yield TestInstance([[self.create_test_block([bip113tx]), True]]) # 11,12 + yield TestInstance([[self.create_test_block([bip113tx]), True]]) # 11,12 self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) self.tip_snapshot_meta = get_tip_snapshot_meta(self.nodes[0]) @@ -398,7 +397,7 @@ def get_tests(self): # All still pass success_txs = [] success_txs.extend(all_rlt_txs(bip68txs_v1)) - yield TestInstance([[self.create_test_block(success_txs), True]]) # 14 + yield TestInstance([[self.create_test_block(success_txs), True]]) # 14 self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) self.tip_snapshot_meta = get_tip_snapshot_meta(self.nodes[0]) @@ -409,7 +408,7 @@ def get_tests(self): for b22 in range(2): for b18 in range(2): bip68success_txs.append(bip68txs_v2[1][b25][b22][b18]) - yield TestInstance([[self.create_test_block(bip68success_txs), True]]) # 15 + yield TestInstance([[self.create_test_block(bip68success_txs), True]]) # 15 self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) self.tip_snapshot_meta = get_tip_snapshot_meta(self.nodes[0]) # All txs without flag fail as we are at delta height = 8 < 10 and delta time = 8 * 600 < 10 * 512 @@ -425,7 +424,7 @@ def get_tests(self): for b18 in range(2): bip68heighttxs.append(bip68txs_v2[0][b25][0][b18]) for tx in bip68heighttxs: - yield TestInstance([[self.create_test_block([tx]), False]]) # 20 - 23 + yield TestInstance([[self.create_test_block([tx]), False]]) # 20 - 23 self.tip_snapshot_meta = get_tip_snapshot_meta(self.nodes[0]) # Advance one block to 581 @@ -434,11 +433,11 @@ def get_tests(self): # Height txs should fail and time txs should now pass 9 * 600 > 10 * 512 bip68success_txs.extend(bip68timetxs) - yield TestInstance([[self.create_test_block(bip68success_txs), True]]) # 25 + yield TestInstance([[self.create_test_block(bip68success_txs), True]]) # 25 self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) self.tip_snapshot_meta = get_tip_snapshot_meta(self.nodes[0]) for tx in bip68heighttxs: - yield TestInstance([[self.create_test_block([tx]), False]]) # 26 - 29 + yield TestInstance([[self.create_test_block([tx]), False]]) # 26 - 29 self.tip_snapshot_meta = get_tip_snapshot_meta(self.nodes[0]) # Advance one block to 582 @@ -447,7 +446,7 @@ def get_tests(self): # All BIP 68 txs should pass bip68success_txs.extend(bip68heighttxs) - yield TestInstance([[self.create_test_block(bip68success_txs), True]]) # 31 + yield TestInstance([[self.create_test_block(bip68success_txs), True]]) # 31 self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) self.tip_snapshot_meta = get_tip_snapshot_meta(self.nodes[0]) @@ -455,7 +454,7 @@ def get_tests(self): ### BIP 112 ### ### Version 1 txs ### # -1 OP_CSV tx should fail - yield TestInstance([[self.create_test_block([bip112tx_special_v1]), False]]) # 32 + yield TestInstance([[self.create_test_block([bip112tx_special_v1]), False]]) #32 self.tip_snapshot_meta = get_tip_snapshot_meta(self.nodes[0]) # If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in argument to OP_CSV, version 1 txs should still pass success_txs = [] @@ -464,7 +463,7 @@ def get_tests(self): for b18 in range(2): success_txs.append(bip112txs_vary_OP_CSV_v1[1][b25][b22][b18]) success_txs.append(bip112txs_vary_OP_CSV_9_v1[1][b25][b22][b18]) - yield TestInstance([[self.create_test_block(success_txs), True]]) # 33 + yield TestInstance([[self.create_test_block(success_txs), True]]) # 33 self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) self.tip_snapshot_meta = get_tip_snapshot_meta(self.nodes[0]) @@ -479,12 +478,12 @@ def get_tests(self): fail_txs.append(bip112txs_vary_OP_CSV_9_v1[0][b25][b22][b18]) for tx in fail_txs: - yield TestInstance([[self.create_test_block([tx]), False]]) # 34 - 81 + yield TestInstance([[self.create_test_block([tx]), False]]) # 34 - 81 self.tip_snapshot_meta = get_tip_snapshot_meta(self.nodes[0]) ### Version 2 txs ### # -1 OP_CSV tx should fail - yield TestInstance([[self.create_test_block([bip112tx_special_v2]), False]]) # 82 + yield TestInstance([[self.create_test_block([bip112tx_special_v2]), False]]) #82 self.tip_snapshot_meta = get_tip_snapshot_meta(self.nodes[0]) # If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in argument to OP_CSV, version 2 txs should pass (all sequence locks are met) @@ -495,21 +494,21 @@ def get_tests(self): success_txs.append(bip112txs_vary_OP_CSV_v2[1][b25][b22][b18]) # 8/16 of vary_OP_CSV success_txs.append(bip112txs_vary_OP_CSV_9_v2[1][b25][b22][b18]) # 8/16 of vary_OP_CSV_9 - yield TestInstance([[self.create_test_block(success_txs), True]]) # 83 + yield TestInstance([[self.create_test_block(success_txs), True]]) # 83 self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) self.tip_snapshot_meta = get_tip_snapshot_meta(self.nodes[0]) ## SEQUENCE_LOCKTIME_DISABLE_FLAG is unset in argument to OP_CSV for all remaining txs ## # All txs with nSequence 9 should fail either due to earlier mismatch or failing the CSV check fail_txs = [] - fail_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v2)) # 16/16 of vary_nSequence_9 + fail_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v2)) # 16/16 of vary_nSequence_9 for b25 in range(2): for b22 in range(2): for b18 in range(2): - fail_txs.append(bip112txs_vary_OP_CSV_9_v2[0][b25][b22][b18]) # 16/16 of vary_OP_CSV_9 + fail_txs.append(bip112txs_vary_OP_CSV_9_v2[0][b25][b22][b18]) # 16/16 of vary_OP_CSV_9 for tx in fail_txs: - yield TestInstance([[self.create_test_block([tx]), False]]) # 84 - 107 + yield TestInstance([[self.create_test_block([tx]), False]]) # 84 - 107 self.tip_snapshot_meta = get_tip_snapshot_meta(self.nodes[0]) # If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in nSequence, tx should fail @@ -517,28 +516,28 @@ def get_tests(self): for b25 in range(2): for b22 in range(2): for b18 in range(2): - fail_txs.append(bip112txs_vary_nSequence_v2[1][b25][b22][b18]) # 8/16 of vary_nSequence + fail_txs.append(bip112txs_vary_nSequence_v2[1][b25][b22][b18]) # 8/16 of vary_nSequence for tx in fail_txs: - yield TestInstance([[self.create_test_block([tx]), False]]) # 108-115 + yield TestInstance([[self.create_test_block([tx]), False]]) # 108-115 self.tip_snapshot_meta = get_tip_snapshot_meta(self.nodes[0]) # If sequencelock types mismatch, tx should fail fail_txs = [] for b25 in range(2): for b18 in range(2): - fail_txs.append(bip112txs_vary_nSequence_v2[0][b25][1][b18]) # 12/16 of vary_nSequence - fail_txs.append(bip112txs_vary_OP_CSV_v2[0][b25][1][b18]) # 12/16 of vary_OP_CSV + fail_txs.append(bip112txs_vary_nSequence_v2[0][b25][1][b18]) # 12/16 of vary_nSequence + fail_txs.append(bip112txs_vary_OP_CSV_v2[0][b25][1][b18]) # 12/16 of vary_OP_CSV for tx in fail_txs: - yield TestInstance([[self.create_test_block([tx]), False]]) # 116-123 + yield TestInstance([[self.create_test_block([tx]), False]]) # 116-123 self.tip_snapshot_meta = get_tip_snapshot_meta(self.nodes[0]) # Remaining txs should pass, just test masking works properly success_txs = [] for b25 in range(2): for b18 in range(2): - success_txs.append(bip112txs_vary_nSequence_v2[0][b25][0][b18]) # 16/16 of vary_nSequence - success_txs.append(bip112txs_vary_OP_CSV_v2[0][b25][0][b18]) # 16/16 of vary_OP_CSV - yield TestInstance([[self.create_test_block(success_txs), True]]) # 124 + success_txs.append(bip112txs_vary_nSequence_v2[0][b25][0][b18]) # 16/16 of vary_nSequence + success_txs.append(bip112txs_vary_OP_CSV_v2[0][b25][0][b18]) # 16/16 of vary_OP_CSV + yield TestInstance([[self.create_test_block(success_txs), True]]) # 124 self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) self.tip_snapshot_meta = get_tip_snapshot_meta(self.nodes[0]) @@ -550,7 +549,7 @@ def get_tests(self): tx.vin[0].nSequence = base_relative_locktime | seq_type_flag signtx = self.sign_transaction(self.nodes[0], tx) time_txs.append(signtx) - yield TestInstance([[self.create_test_block(time_txs), True]]) # 125 + yield TestInstance([[self.create_test_block(time_txs), True]]) # 125 self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) self.tip_snapshot_meta = get_tip_snapshot_meta(self.nodes[0]) diff --git a/test/functional/feature_segwit.py b/test/functional/feature_segwit.py index 4abcf51109..18b93da6f9 100755 --- a/test/functional/feature_segwit.py +++ b/test/functional/feature_segwit.py @@ -70,19 +70,19 @@ def fail_mine(self, node, txid, sign, redeem_script=""): sync_blocks(self.nodes) def run_test(self): - self.nodes[0].generate(161) # block 161 + self.nodes[0].generate(161) #block 161 self.log.info("Verify sigops are counted in GBT with pre-BIP141 rules before the fork") txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1) # UNIT-E TODO: check that blocks are mines using segwit (was getblocktemplate) - self.nodes[0].generate(1) # block 162 + self.nodes[0].generate(1) #block 162 balance_presetup = self.nodes[0].getbalance() self.pubkey = [] - p2sh_ids = [] # p2sh_ids[NODE][VER] is an array of txids that spend to a witness version VER pkscript to an address for NODE embedded in p2sh - wit_ids = [] # wit_ids[NODE][VER] is an array of txids that spend to a witness version VER pkscript to an address for NODE via bare witness + p2sh_ids = [] # p2sh_ids[NODE][VER] is an array of txids that spend to a witness version VER pkscript to an address for NODE embedded in p2sh + wit_ids = [] # wit_ids[NODE][VER] is an array of txids that spend to a witness version VER pkscript to an address for NODE via bare witness for i in range(3): newaddress = self.nodes[i].getnewaddress() self.pubkey.append(self.nodes[i].validateaddress(newaddress)["pubkey"]) @@ -97,17 +97,17 @@ def run_test(self): assert_equal(bip173_ms_addr, script_to_p2wsh(multiscript)) p2sh_ids.append([]) wit_ids.append([]) - for _ in range(2): + for v in range(2): p2sh_ids[i].append([]) wit_ids[i].append([]) - for _ in range(5): + for i in range(5): for n in range(3): for v in range(2): wit_ids[n][v].append(send_to_witness(v, self.nodes[0], find_unspent(self.nodes[0], 50), self.pubkey[n], False, Decimal("49.999"))) p2sh_ids[n][v].append(send_to_witness(v, self.nodes[0], find_unspent(self.nodes[0], 50), self.pubkey[n], True, Decimal("49.999"))) - self.nodes[0].generate(1) # block 163 + self.nodes[0].generate(1) #block 163 sync_blocks(self.nodes) # Make sure all nodes recognize the transactions as theirs @@ -115,7 +115,7 @@ def run_test(self): assert_equal(self.nodes[1].getbalance(), 20*Decimal("49.999")) assert_equal(self.nodes[2].getbalance(), 20*Decimal("49.999")) - self.nodes[0].generate(260) # block 423 + self.nodes[0].generate(260) #block 423 sync_blocks(self.nodes) self.log.info("Verify default node can't accept any witness format txs before fork") @@ -155,7 +155,7 @@ def run_test(self): self.log.info("Verify previous witness txs skipped for mining can now be mined") assert_equal(len(self.nodes[2].getrawmempool()), 4) - block = self.nodes[2].generate(1) # block 432 (first block with new rules; 432 = 144 * 3) + block = self.nodes[2].generate(1) #block 432 (first block with new rules; 432 = 144 * 3) sync_blocks(self.nodes) assert_equal(len(self.nodes[2].getrawmempool()), 0) segwit_tx_list = self.nodes[2].getblock(block[0])["tx"] diff --git a/test/functional/mempool_packages.py b/test/functional/mempool_packages.py index a0a9ba261e..9e3a2480e3 100755 --- a/test/functional/mempool_packages.py +++ b/test/functional/mempool_packages.py @@ -11,7 +11,6 @@ MAX_ANCESTORS = 25 MAX_DESCENDANTS = 25 - class MempoolPackagesTest(UnitETestFramework): def set_test_params(self): self.num_nodes = 2 @@ -29,7 +28,7 @@ def chain_transaction(self, node, parent_txid, vout, value, fee, num_outputs): signedtx = node.signrawtransaction(rawtx) txid = node.sendrawtransaction(signedtx['hex']) fulltx = node.getrawtransaction(txid, 1) - assert(len(fulltx['vout']) == num_outputs) # make sure we didn't generate a change output + assert(len(fulltx['vout']) == num_outputs) # make sure we didn't generate a change output return (txid, send_value) def run_test(self): @@ -113,15 +112,10 @@ def run_test(self): descendant_fees = 0 for x in reversed(chain): descendant_fees += mempool[x]['fee'] - assert_equal( - mempool[x]['descendantfees'], descendant_fees * UNIT + 1000 - ) + assert_equal(mempool[x]['descendantfees'], descendant_fees * UNIT + 1000) # Adding one more transaction on to the chain should fail. - assert_raises_rpc_error( - -26, "too-long-mempool-chain", self.chain_transaction, - self.nodes[0], txid, vout, value, fee, 1 - ) + assert_raises_rpc_error(-26, "too-long-mempool-chain", self.chain_transaction, self.nodes[0], txid, vout, value, fee, 1) # Check that prioritising a tx before it's added to the mempool works # First clear the mempool by mining a block. @@ -141,15 +135,9 @@ def run_test(self): descendant_fees = 0 for x in reversed(chain): descendant_fees += mempool[x]['fee'] - if x == chain[-1]: - assert_equal( - mempool[x]['modifiedfee'], - mempool[x]['fee'] + satoshi_round(0.00002) - ) - assert_equal( - mempool[x]['descendantfees'], - descendant_fees * UNIT + 2000 - ) + if (x == chain[-1]): + assert_equal(mempool[x]['modifiedfee'], mempool[x]['fee']+satoshi_round(0.00002)) + assert_equal(mempool[x]['descendantfees'], descendant_fees * UNIT + 2000) # TODO: check that node1's mempool is as expected