Skip to content
This repository has been archived by the owner on Aug 2, 2022. It is now read-only.

2.0.1 security omnibus - develop #8524

Merged
merged 3 commits into from
Jan 28, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
36 changes: 29 additions & 7 deletions libraries/chain/controller.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -114,6 +114,7 @@ struct building_block {
const vector<digest_type>& new_protocol_feature_activations )
:_pending_block_header_state( prev.next( when, num_prev_blocks_to_confirm ) )
,_new_protocol_feature_activations( new_protocol_feature_activations )
,_trx_mroot_or_receipt_digests( digests_t{} )
{}

pending_block_header_state _pending_block_header_state;
Expand All @@ -122,8 +123,8 @@ struct building_block {
size_t _num_new_protocol_features_that_have_activated = 0;
deque<transaction_metadata_ptr> _pending_trx_metas;
deque<transaction_receipt> _pending_trx_receipts; // boost deque in 1.71 with 1024 elements performs better
deque<digest_type> _pending_trx_receipt_digests;
deque<digest_type> _action_receipt_digests;
static_variant<checksum256_type, digests_t> _trx_mroot_or_receipt_digests;
digests_t _action_receipt_digests;
};

struct assembled_block {
Expand Down Expand Up @@ -1094,7 +1095,8 @@ struct controller_impl {
auto& bb = pending->_block_stage.get<building_block>();
auto orig_trx_receipts_size = bb._pending_trx_receipts.size();
auto orig_trx_metas_size = bb._pending_trx_metas.size();
auto orig_trx_receipt_digests_size = bb._pending_trx_receipt_digests.size();
auto orig_trx_receipt_digests_size = bb._trx_mroot_or_receipt_digests.contains<digests_t>() ?
bb._trx_mroot_or_receipt_digests.get<digests_t>().size() : 0;
auto orig_action_receipt_digests_size = bb._action_receipt_digests.size();

std::function<void()> callback = [this,
Expand All @@ -1106,7 +1108,8 @@ struct controller_impl {
auto& bb = pending->_block_stage.get<building_block>();
bb._pending_trx_receipts.resize(orig_trx_receipts_size);
bb._pending_trx_metas.resize(orig_trx_metas_size);
bb._pending_trx_receipt_digests.resize(orig_trx_receipt_digests_size);
if( bb._trx_mroot_or_receipt_digests.contains<digests_t>() )
bb._trx_mroot_or_receipt_digests.get<digests_t>().resize(orig_trx_receipt_digests_size);
bb._action_receipt_digests.resize(orig_action_receipt_digests_size);
};

Expand Down Expand Up @@ -1319,7 +1322,7 @@ struct controller_impl {

// Only subjective OR soft OR hard failure logic below:

if( gtrx.sender != account_name() && !failure_is_subjective(*trace->except)) {
if( gtrx.sender != account_name() && !(explicit_billed_cpu_time ? failure_is_subjective(*trace->except) : scheduled_failure_is_subjective(*trace->except))) {
// Attempt error handling for the generated transaction.

auto error_trace = apply_onerror( gtrx, deadline, trx_context.pseudo_start,
Expand Down Expand Up @@ -1394,7 +1397,9 @@ struct controller_impl {
r.cpu_usage_us = cpu_usage_us;
r.net_usage_words = net_usage_words;
r.status = status;
pending->_block_stage.get<building_block>()._pending_trx_receipt_digests.emplace_back( r.digest() );
auto& bb = pending->_block_stage.get<building_block>();
if( bb._trx_mroot_or_receipt_digests.contains<digests_t>() )
bb._trx_mroot_or_receipt_digests.get<digests_t>().emplace_back( r.digest() );
return r;
}

Expand Down Expand Up @@ -1689,7 +1694,9 @@ struct controller_impl {

// Create (unsigned) block:
auto block_ptr = std::make_shared<signed_block>( pbhs.make_block_header(
merkle( std::move( pending->_block_stage.get<building_block>()._pending_trx_receipt_digests ) ),
bb._trx_mroot_or_receipt_digests.contains<checksum256_type>() ?
bb._trx_mroot_or_receipt_digests.get<checksum256_type>() :
merkle( std::move( bb._trx_mroot_or_receipt_digests.get<digests_t>() ) ),
merkle( std::move( pending->_block_stage.get<building_block>()._action_receipt_digests ) ),
bb._new_pending_producer_schedule,
std::move( bb._new_protocol_feature_activations ),
Expand Down Expand Up @@ -1841,6 +1848,9 @@ struct controller_impl {
auto producer_block_id = b->id();
start_block( b->timestamp, b->confirmed, new_protocol_feature_activations, s, producer_block_id);

// validated in create_block_state_future()
pending->_block_stage.get<building_block>()._trx_mroot_or_receipt_digests = b->transaction_mroot;

const bool existing_trxs_metas = !bsp->trxs_metas().empty();
const bool pub_keys_recovered = bsp->is_pub_keys_recovered();
const bool skip_auth_checks = self.skip_auth_check();
Expand Down Expand Up @@ -1948,6 +1958,11 @@ struct controller_impl {

return async_thread_pool( thread_pool.get_executor(), [b, prev, control=this]() {
const bool skip_validate_signee = false;

auto trx_mroot = calculate_trx_merkle( b->transactions );
EOS_ASSERT( b->transaction_mroot == trx_mroot, block_validate_exception,
"invalid block transaction merkle root ${b} != ${c}", ("b", b->transaction_mroot)("c", trx_mroot) );

return std::make_shared<block_state>(
*prev,
move( b ),
Expand Down Expand Up @@ -2141,6 +2156,13 @@ struct controller_impl {
return applied_trxs;
}

static checksum256_type calculate_trx_merkle( const deque<transaction_receipt>& trxs ) {
deque<digest_type> trx_digests;
for( const auto& a : trxs )
trx_digests.emplace_back( a.digest() );
return merkle( move( trx_digests ) );
}

void update_producers_authority() {
const auto& producers = pending->get_pending_block_header_state().active_schedule.producers;

Expand Down
1 change: 1 addition & 0 deletions libraries/chain/include/eosio/chain/types.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -258,6 +258,7 @@ namespace eosio { namespace chain {
using int128_t = __int128;
using uint128_t = unsigned __int128;
using bytes = vector<char>;
using digests_t = deque<digest_type>;

struct sha256_less {
bool operator()( const fc::sha256& lhs, const fc::sha256& rhs ) const {
Expand Down
7 changes: 7 additions & 0 deletions plugins/net_plugin/include/eosio/net_plugin/protocol.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,13 @@ namespace eosio {
block_id_type head_id;
};

// Longest domain name is 253 characters according to wikipedia.
// Addresses include ":port" where max port is 65535, which adds 6 chars.
// We also add our own extentions of "[:trx|:blk] - xxxxxxx", which adds 14 chars, total= 273.
// Allow for future extentions as well, hence 384.
constexpr size_t max_p2p_address_length = 253 + 6;
constexpr size_t max_handshake_str_length = 384;

struct handshake_message {
uint16_t network_version = 0; ///< incremental value above a computed base
chain_id_type chain_id; ///< used to identify chain
Expand Down
40 changes: 32 additions & 8 deletions plugins/net_plugin/net_plugin.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -825,7 +825,7 @@ namespace eosio {
last_handshake_recv(),
last_handshake_sent()
{
fc_ilog( logger, "accepted network connection" );
fc_dlog( logger, "new connection object created" );
}

void connection::update_endpoints() {
Expand All @@ -852,13 +852,13 @@ namespace eosio {
peer_add.substr( colon2 + 1 ) : peer_add.substr( colon2 + 1, end - (colon2 + 1) );

if( type.empty() ) {
fc_ilog( logger, "Setting connection type for: ${peer} to both transactions and blocks", ("peer", peer_add) );
fc_dlog( logger, "Setting connection type for: ${peer} to both transactions and blocks", ("peer", peer_add) );
connection_type = both;
} else if( type == "trx" ) {
fc_ilog( logger, "Setting connection type for: ${peer} to transactions only", ("peer", peer_add) );
fc_dlog( logger, "Setting connection type for: ${peer} to transactions only", ("peer", peer_add) );
connection_type = transactions_only;
} else if( type == "blk" ) {
fc_ilog( logger, "Setting connection type for: ${peer} to blocks only", ("peer", peer_add) );
fc_dlog( logger, "Setting connection type for: ${peer} to blocks only", ("peer", peer_add) );
connection_type = blocks_only;
} else {
fc_wlog( logger, "Unknown connection type: ${t}", ("t", type) );
Expand Down Expand Up @@ -2136,6 +2136,7 @@ namespace eosio {
c->connect( resolver, endpoints );
} else {
fc_elog( logger, "Unable to resolve ${add}: ${error}", ("add", c->peer_name())( "error", err.message() ) );
c->connecting = false;
++c->consecutive_immediate_connection_close;
}
} ) );
Expand Down Expand Up @@ -2207,10 +2208,10 @@ namespace eosio {

} else {
if( from_addr >= max_nodes_per_host ) {
fc_elog( logger, "Number of connections (${n}) from ${ra} exceeds limit ${l}",
fc_dlog( logger, "Number of connections (${n}) from ${ra} exceeds limit ${l}",
("n", from_addr + 1)( "ra", paddr_str )( "l", max_nodes_per_host ));
} else {
fc_elog( logger, "Error max_client_count ${m} exceeded", ("m", max_client_count));
fc_dlog( logger, "max_client_count ${m} exceeded", ("m", max_client_count));
}
// new_connection never added to connections and start_session not called, lifetime will end
boost::system::error_code ec;
Expand Down Expand Up @@ -2471,10 +2472,21 @@ namespace eosio {
if (msg.p2p_address.empty()) {
fc_wlog( logger, "Handshake message validation: p2p_address is null string" );
valid = false;
} else if( msg.p2p_address.length() > max_handshake_str_length ) {
// see max_handshake_str_length comment in protocol.hpp
fc_wlog( logger, "Handshake message validation: p2p_address to large: ${p}", ("p", msg.p2p_address.substr(0, max_handshake_str_length) + "...") );
valid = false;
}
if (msg.os.empty()) {
fc_wlog( logger, "Handshake message validation: os field is null string" );
valid = false;
} else if( msg.os.length() > max_handshake_str_length ) {
fc_wlog( logger, "Handshake message validation: os field to large: ${p}", ("p", msg.os.substr(0, max_handshake_str_length) + "...") );
valid = false;
}
if( msg.agent.length() > max_handshake_str_length ) {
fc_wlog( logger, "Handshake message validation: agent field to large: ${p}", ("p", msg.agent.substr(0, max_handshake_str_length) + "...") );
valid = false;
}
if ((msg.sig != chain::signature_type() || msg.token != sha256()) && (msg.token != fc::sha256::hash(msg.time))) {
fc_wlog( logger, "Handshake message validation: token field invalid" );
Expand Down Expand Up @@ -3018,7 +3030,7 @@ namespace eosio {
std::unique_lock<std::shared_mutex> g( connections_mtx );
auto it = (from ? connections.find(from) : connections.begin());
if (it == connections.end()) it = connections.begin();
size_t num_rm = 0;
size_t num_rm = 0, num_clients = 0, num_peers = 0;
while (it != connections.end()) {
if (fc::time_point::now() >= max_time) {
connection_wptr wit = *it;
Expand All @@ -3029,20 +3041,26 @@ namespace eosio {
}
return;
}
(*it)->peer_address().empty() ? ++num_clients : ++num_peers;
if( !(*it)->socket_is_open() && !(*it)->connecting) {
if( (*it)->peer_address().length() > 0) {
if( !(*it)->peer_address().empty() ) {
if( !(*it)->resolve_and_connect() ) {
it = connections.erase(it);
--num_peers; ++num_rm;
continue;
}
} else {
--num_clients; ++num_rm;
it = connections.erase(it);
continue;
}
}
++it;
}
g.unlock();
if( num_clients > 0 || num_peers > 0 )
fc_ilog( logger, "p2p client connections: ${num}/${max}, peer connections: ${pnum}/${pmax}",
("num", num_clients)("max", max_client_count)("pnum", num_peers)("pmax", supplied_peers.size()) );
fc_dlog( logger, "connection monitor, removed ${n} connections", ("n", num_rm) );
if( reschedule ) {
start_conn_timer( connector_period, std::weak_ptr<connection>());
Expand Down Expand Up @@ -3268,9 +3286,13 @@ namespace eosio {

if( options.count( "p2p-listen-endpoint" ) && options.at("p2p-listen-endpoint").as<string>().length()) {
my->p2p_address = options.at( "p2p-listen-endpoint" ).as<string>();
EOS_ASSERT( my->p2p_address.length() <= max_p2p_address_length, chain::plugin_config_exception,
"p2p-listen-endpoint to long, must be less than ${m}", ("m", max_p2p_address_length) );
}
if( options.count( "p2p-server-address" ) ) {
my->p2p_server_address = options.at( "p2p-server-address" ).as<string>();
EOS_ASSERT( my->p2p_server_address.length() <= max_p2p_address_length, chain::plugin_config_exception,
"p2p_server_address to long, must be less than ${m}", ("m", max_p2p_address_length) );
}

my->thread_pool_size = options.at( "net-threads" ).as<uint16_t>();
Expand All @@ -3282,6 +3304,8 @@ namespace eosio {
}
if( options.count( "agent-name" )) {
my->user_agent_name = options.at( "agent-name" ).as<string>();
EOS_ASSERT( my->user_agent_name.length() <= max_handshake_str_length, chain::plugin_config_exception,
"agent-name to long, must be less than ${m}", ("m", max_handshake_str_length) );
}

if( options.count( "allowed-connection" )) {
Expand Down
7 changes: 7 additions & 0 deletions unittests/block_tests.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,13 @@ BOOST_AUTO_TEST_CASE(block_with_invalid_tx_test)
auto invalid_packed_tx = packed_transaction(signed_tx);
copy_b->transactions.back().trx = invalid_packed_tx;

// Re-calculate the transaction merkle
deque<digest_type> trx_digests;
const auto& trxs = copy_b->transactions;
for( const auto& a : trxs )
trx_digests.emplace_back( a.digest() );
copy_b->transaction_mroot = merkle( move(trx_digests) );

// Re-sign the block
auto header_bmroot = digest_type::hash( std::make_pair( copy_b->digest(), main.control->head_block_state()->blockroot_merkle.get_root() ) );
auto sig_digest = digest_type::hash( std::make_pair(header_bmroot, main.control->head_block_state()->pending_schedule.schedule_hash) );
Expand Down
2 changes: 1 addition & 1 deletion unittests/forked_tests.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -265,7 +265,7 @@ BOOST_AUTO_TEST_CASE( forking ) try {
wlog( "end push c2 blocks to c1" );
wlog( "now push dan's block to c1 but first corrupt it so it is a bad block" );
signed_block bad_block = std::move(*b);
bad_block.transaction_mroot = bad_block.previous;
bad_block.action_mroot = bad_block.previous;
auto bad_block_bs = c.control->create_block_state_future( std::make_shared<signed_block>(std::move(bad_block)) );
c.control->abort_block();
BOOST_REQUIRE_EXCEPTION(c.control->push_block( bad_block_bs, forked_branch_callback{}, trx_meta_cache_lookup{} ), fc::exception,
Expand Down