diff --git a/contracts/CMakeLists.txt b/contracts/CMakeLists.txt index 4b1e235715a..fa3685a199a 100644 --- a/contracts/CMakeLists.txt +++ b/contracts/CMakeLists.txt @@ -34,6 +34,7 @@ add_subdirectory(noop) add_subdirectory(dice) add_subdirectory(tic_tac_toe) add_subdirectory(payloadless) +add_subdirectory(integration_test) file(GLOB SKELETONS RELATIVE ${CMAKE_SOURCE_DIR}/contracts "skeleton/*") diff --git a/contracts/integration_test/CMakeLists.txt b/contracts/integration_test/CMakeLists.txt new file mode 100644 index 00000000000..6439a566f36 --- /dev/null +++ b/contracts/integration_test/CMakeLists.txt @@ -0,0 +1,8 @@ +file(GLOB ABI_FILES "*.abi") +configure_file("${ABI_FILES}" "${CMAKE_CURRENT_BINARY_DIR}" COPYONLY) + +add_wast_executable(TARGET integration_test + INCLUDE_FOLDERS "${STANDARD_INCLUDE_FOLDERS}" + LIBRARIES libc libc++ eosiolib + DESTINATION_FOLDER ${CMAKE_CURRENT_BINARY_DIR} +) diff --git a/contracts/integration_test/integration_test.abi b/contracts/integration_test/integration_test.abi new file mode 100644 index 00000000000..6f181b255ba --- /dev/null +++ b/contracts/integration_test/integration_test.abi @@ -0,0 +1,41 @@ +{ + "version": "eosio::abi/1.0", + "types": [{ + "new_type_name": "account_name", + "type": "name" + }], + "structs": [{ + "name": "store", + "base": "", + "fields": [ + {"name":"from", "type":"account_name"}, + {"name":"to", "type":"account_name"}, + {"name":"num", "type":"uint64"} + ] + },{ + "name": "payload", + "base": "", + "fields": [ + {"name":"key", "type":"uint64"}, + {"name":"data", "type":"uint64[]"} + ] + } + ], + "actions": [{ + "name": "store", + "type": "store", + "ricardian_contract": "" + } + + ], + "tables": [{ + "name": "payloads", + "type": "payload", + "index_type": "i64", + "key_names" : ["key"], + "key_types" : ["uint64"] + } + ], + "ricardian_clauses": [], + "abi_extensions": [] +} diff --git a/contracts/integration_test/integration_test.cpp b/contracts/integration_test/integration_test.cpp new file mode 100644 index 00000000000..87a0edeecc1 --- /dev/null +++ b/contracts/integration_test/integration_test.cpp @@ -0,0 +1,36 @@ +#include +using namespace eosio; + +struct integration_test : public eosio::contract { + using contract::contract; + + struct payload { + uint64_t key; + vector data; + + uint64_t primary_key()const { return key; } + }; + typedef eosio::multi_index payloads; + + /// @abi action + void store( account_name from, + account_name to, + uint64_t num ) { + require_auth( from ); + eosio_assert( is_account( to ), "to account does not exist"); + payloads data ( _self, from ); + uint64_t key = 0; + const uint64_t num_keys = 5; + while (data.find( key ) != data.end()) { + key += num_keys; + } + for (uint64_t i = 0; i < num_keys; ++i) { + data.emplace(from, [&]( auto& g ) { + g.key = key + i; + g.data = vector(num, 5); + }); + } + } +}; + +EOSIO_ABI( integration_test, (store) ) diff --git a/libraries/chain/include/eosio/chain/config.hpp b/libraries/chain/include/eosio/chain/config.hpp index 7de4d83ced8..9e1dcd0b073 100644 --- a/libraries/chain/include/eosio/chain/config.hpp +++ b/libraries/chain/include/eosio/chain/config.hpp @@ -20,7 +20,7 @@ const static auto default_reversible_guard_size = 2*1024*1024ll;/// 1MB * 340 bl const static auto default_state_dir_name = "state"; const static auto forkdb_filename = "forkdb.dat"; const static auto default_state_size = 1*1024*1024*1024ll; -const static auto default_state_guard_size = 128*1024*1024ll; +const static auto default_state_guard_size = 128*1024*1024ll; const static uint64_t system_account_name = N(eosio); diff --git a/plugins/mongo_db_plugin/mongo_db_plugin.cpp b/plugins/mongo_db_plugin/mongo_db_plugin.cpp index 2033d84421d..bd94094a783 100644 --- a/plugins/mongo_db_plugin/mongo_db_plugin.cpp +++ b/plugins/mongo_db_plugin/mongo_db_plugin.cpp @@ -73,6 +73,14 @@ class mongo_db_plugin_impl { void process_irreversible_block(const chain::block_state_ptr&); void _process_irreversible_block(const chain::block_state_ptr&); + optional get_abi_serializer( account_name n ); + template fc::variant to_variant_with_abi( const T& obj ); + + void purge_abi_cache(); + + void add_data(bsoncxx::builder::basic::document& act_doc, const chain::action& act); + void update_account(const chain::action& act); + void init(); void wipe_database(); @@ -87,6 +95,7 @@ class mongo_db_plugin_impl { mongocxx::collection accounts; size_t queue_size = 0; + size_t abi_cache_size = 0; std::deque transaction_metadata_queue; std::deque transaction_metadata_process_queue; std::deque transaction_trace_queue; @@ -103,6 +112,24 @@ class mongo_db_plugin_impl { fc::optional chain_id; fc::microseconds abi_serializer_max_time; + struct by_account; + struct by_last_access; + + struct abi_cache { + account_name account; + fc::time_point last_accessed; + fc::optional serializer; + }; + + typedef boost::multi_index_container, member >, + ordered_non_unique< tag, member > + > + > abi_cache_index_t; + + abi_cache_index_t abi_cache_index; + static const account_name newaccount; static const account_name setabi; @@ -292,56 +319,23 @@ void mongo_db_plugin_impl::consume_blocks() { namespace { - auto find_account(mongocxx::collection& accounts, const account_name& name) { - using bsoncxx::builder::basic::make_document; - using bsoncxx::builder::basic::kvp; - return accounts.find_one( make_document( kvp( "name", name.to_string()))); - } - - auto find_transaction(mongocxx::collection& trans, const string& id) { - using bsoncxx::builder::basic::make_document; - using bsoncxx::builder::basic::kvp; - return trans.find_one( make_document( kvp( "trx_id", id ))); - } - - auto find_block(mongocxx::collection& blocks, const string& id) { - using bsoncxx::builder::basic::make_document; - using bsoncxx::builder::basic::kvp; - return blocks.find_one( make_document( kvp( "block_id", id ))); - } +auto find_account( mongocxx::collection& accounts, const account_name& name ) { + using bsoncxx::builder::basic::make_document; + using bsoncxx::builder::basic::kvp; + return accounts.find_one( make_document( kvp( "name", name.to_string()))); +} - optional get_abi_serializer( account_name n, mongocxx::collection& accounts, const fc::microseconds& abi_serializer_max_time ) { - using bsoncxx::builder::basic::kvp; - using bsoncxx::builder::basic::make_document; - if( n.good()) { - try { - auto account = accounts.find_one( make_document( kvp("name", n.to_string())) ); - if(account) { - auto view = account->view(); - abi_def abi; - if( view.find( "abi" ) != view.end()) { - try { - abi = fc::json::from_string( bsoncxx::to_json( view["abi"].get_document())).as(); - } catch (...) { - ilog( "Unable to convert account abi to abi_def for ${n}", ( "n", n )); - return optional(); - } - return abi_serializer( abi, abi_serializer_max_time ); - } - } - } FC_CAPTURE_AND_LOG((n)) - } - return optional(); - } +auto find_transaction( mongocxx::collection& trans, const string& id ) { + using bsoncxx::builder::basic::make_document; + using bsoncxx::builder::basic::kvp; + return trans.find_one( make_document( kvp( "trx_id", id ))); +} - template - fc::variant to_variant_with_abi( const T& obj, mongocxx::collection& accounts, const fc::microseconds& abi_serializer_max_time ) { - fc::variant pretty_output; - abi_serializer::to_variant( obj, pretty_output, - [&]( account_name n ) { return get_abi_serializer( n, accounts, abi_serializer_max_time ); }, - abi_serializer_max_time ); - return pretty_output; - } +auto find_block( mongocxx::collection& blocks, const string& id ) { + using bsoncxx::builder::basic::make_document; + using bsoncxx::builder::basic::kvp; + return blocks.find_one( make_document( kvp( "block_id", id ))); +} void handle_mongo_exception( const std::string& desc, int line_num ) { bool shutdown = true; @@ -385,132 +379,68 @@ void handle_mongo_exception( const std::string& desc, int line_num ) { } } - void update_account(mongocxx::collection& accounts, const chain::action& act) { - using bsoncxx::builder::basic::kvp; - using bsoncxx::builder::basic::make_document; - using namespace bsoncxx::types; - - if (act.account != chain::config::system_account_name) - return; - - try { - if( act.name == mongo_db_plugin_impl::newaccount ) { - auto now = std::chrono::duration_cast( - std::chrono::microseconds{fc::time_point::now().time_since_epoch().count()} ); - auto newaccount = act.data_as(); - - // create new account - if( !accounts.insert_one( make_document( kvp( "name", newaccount.name.to_string()), - kvp( "createdAt", b_date{now} )))) { - elog( "Failed to insert account ${n}", ("n", newaccount.name)); - } +} // anonymous namespace - } else if( act.name == mongo_db_plugin_impl::setabi ) { - auto now = std::chrono::duration_cast( - std::chrono::microseconds{fc::time_point::now().time_since_epoch().count()} ); - auto setabi = act.data_as(); - auto from_account = find_account( accounts, setabi.account ); - if( !from_account ) { - if( !accounts.insert_one( make_document( kvp( "name", setabi.account.to_string()), - kvp( "createdAt", b_date{now} )))) { - elog( "Failed to insert account ${n}", ("n", setabi.account)); - } - from_account = find_account( accounts, setabi.account ); - } - if( from_account ) { - const abi_def& abi_def = fc::raw::unpack( setabi.abi ); - const string json_str = fc::json::to_string( abi_def ); +void mongo_db_plugin_impl::purge_abi_cache() { + if( abi_cache_index.size() < abi_cache_size ) return; - try{ - auto update_from = make_document( - kvp( "$set", make_document( kvp( "abi", bsoncxx::from_json( json_str )), - kvp( "updatedAt", b_date{now} )))); - - try { - if( !accounts.update_one( make_document( kvp( "_id", from_account->view()["_id"].get_oid())), - update_from.view())) { - EOS_ASSERT( false, chain::mongo_db_update_fail, "Failed to udpdate account ${n}", ("n", setabi.account)); - } - } catch( ... ) { - handle_mongo_exception( "account update", __LINE__ ); - } - } catch( bsoncxx::exception& e ) { - elog( "Unable to convert abi JSON to MongoDB JSON: ${e}", ("e", e.what())); - elog( " JSON: ${j}", ("j", json_str)); - } - } - } - } catch( fc::exception& e ) { - // if unable to unpack native type, skip account creation - } + // remove the oldest (smallest) last accessed + auto& idx = abi_cache_index.get(); + auto itr = idx.begin(); + if( itr != idx.end() ) { + idx.erase( itr ); } +} -void add_data( bsoncxx::builder::basic::document& act_doc, mongocxx::collection& accounts, const chain::action& act, const fc::microseconds& abi_serializer_max_time ) { +optional mongo_db_plugin_impl::get_abi_serializer( account_name n ) { using bsoncxx::builder::basic::kvp; using bsoncxx::builder::basic::make_document; - try { - if( act.account == chain::config::system_account_name ) { - if( act.name == mongo_db_plugin_impl::setabi ) { - auto setabi = act.data_as(); - try { - const abi_def& abi_def = fc::raw::unpack( setabi.abi ); - const string json_str = fc::json::to_string( abi_def ); + if( n.good()) { + try { - act_doc.append( - kvp( "data", make_document( kvp( "account", setabi.account.to_string()), - kvp( "abi_def", bsoncxx::from_json( json_str ))))); - return; - } catch( bsoncxx::exception& ) { - // better error handling below - } catch( fc::exception& e ) { - ilog( "Unable to convert action abi_def to json for ${n}", ("n", setabi.account.to_string())); - } - } - } - auto account = find_account( accounts, act.account ); - if( account ) { - auto from_account = *account; - abi_def abi; - if( from_account.view().find( "abi" ) != from_account.view().end()) { - try { - abi = fc::json::from_string( bsoncxx::to_json( from_account.view()["abi"].get_document())).as(); - } catch( ... ) { - ilog( "Unable to convert account abi to abi_def for ${s}::${n}", ("s", act.account)( "n", act.name )); - } + auto itr = abi_cache_index.find( n ); + if( itr != abi_cache_index.end() ) { + abi_cache_index.modify( itr, []( auto& entry ) { + entry.last_accessed = fc::time_point::now(); + }); + + return itr->serializer; } - string json; - try { - abi_serializer abis; - abis.set_abi( abi, abi_serializer_max_time ); - auto v = abis.binary_to_variant( abis.get_action_type( act.name ), act.data, abi_serializer_max_time ); - json = fc::json::to_string( v ); - const auto& value = bsoncxx::from_json( json ); - act_doc.append( kvp( "data", value )); - return; - } catch( bsoncxx::exception& e ) { - ilog( "Unable to convert EOS JSON to MongoDB JSON: ${e}", ("e", e.what())); - ilog( " EOS JSON: ${j}", ("j", json)); - ilog( " Storing data has hex." ); + auto account = accounts.find_one( make_document( kvp("name", n.to_string())) ); + if(account) { + auto view = account->view(); + abi_def abi; + if( view.find( "abi" ) != view.end()) { + try { + abi = fc::json::from_string( bsoncxx::to_json( view["abi"].get_document())).as(); + } catch (...) { + ilog( "Unable to convert account abi to abi_def for ${n}", ( "n", n )); + return optional(); + } + + purge_abi_cache(); // make room if necessary + abi_cache entry; + entry.account = n; + entry.last_accessed = fc::time_point::now(); + entry.serializer.emplace( abi, abi_serializer_max_time ); + abi_cache_index.insert( entry ); + return entry.serializer; + } } - } - } catch( std::exception& e ) { - ilog( "Unable to convert action.data to ABI: ${s}::${n}, std what: ${e}", - ("s", act.account)( "n", act.name )( "e", e.what())); - } catch (fc::exception& e) { - if (act.name != "onblock") { // eosio::onblock not in original eosio.system abi - ilog( "Unable to convert action.data to ABI: ${s}::${n}, fc exception: ${e}", - ("s", act.account)( "n", act.name )( "e", e.to_detail_string())); - } - } catch( ... ) { - ilog( "Unable to convert action.data to ABI: ${s}::${n}, unknown exception", - ("s", act.account)( "n", act.name )); + } FC_CAPTURE_AND_LOG((n)) } - // if anything went wrong just store raw hex_data - act_doc.append( kvp( "hex_data", fc::variant( act.data ).as_string())); + return optional(); } -} // anonymous namespace +template +fc::variant mongo_db_plugin_impl::to_variant_with_abi( const T& obj ) { + fc::variant pretty_output; + abi_serializer::to_variant( obj, pretty_output, + [&]( account_name n ) { return get_abi_serializer( n ); }, + abi_serializer_max_time ); + return pretty_output; +} void mongo_db_plugin_impl::process_accepted_transaction( const chain::transaction_metadata_ptr& t ) { try { @@ -616,12 +546,12 @@ void mongo_db_plugin_impl::_process_accepted_transaction( const chain::transacti } )); } try { - update_account( accounts, act ); + update_account( act ); } catch (...) { ilog( "Unable to update account for ${s}::${n}", ("s", act.account)( "n", act.name )); } if( start_block_reached ) { - add_data( act_doc, accounts, act, abi_serializer_max_time ); + add_data( act_doc, act ); act_array.append( act_doc ); mongocxx::model::insert_one insert_op{act_doc.view()}; bulk_actions.append( insert_op ); @@ -776,7 +706,7 @@ void mongo_db_plugin_impl::_process_applied_transaction( const chain::transactio auto now = std::chrono::duration_cast( std::chrono::microseconds{fc::time_point::now().time_since_epoch().count()}); - auto v = to_variant_with_abi( *t, accounts, abi_serializer_max_time ); + auto v = to_variant_with_abi( *t ); string json = fc::json::to_string( v ); try { const auto& value = bsoncxx::from_json( json ); @@ -861,7 +791,7 @@ void mongo_db_plugin_impl::_process_accepted_block( const chain::block_state_ptr kvp( "block_id", block_id_str ), kvp( "irreversible", b_bool{false} )); - auto v = to_variant_with_abi( *bs->block, accounts, abi_serializer_max_time ); + auto v = to_variant_with_abi( *bs->block ); json = fc::json::to_string( v ); try { const auto& value = bsoncxx::from_json( json ); @@ -966,6 +896,125 @@ void mongo_db_plugin_impl::_process_irreversible_block(const chain::block_state_ } } +void mongo_db_plugin_impl::add_data( bsoncxx::builder::basic::document& act_doc, const chain::action& act ) +{ + using bsoncxx::builder::basic::kvp; + using bsoncxx::builder::basic::make_document; + try { + if( act.account == chain::config::system_account_name ) { + if( act.name == mongo_db_plugin_impl::setabi ) { + auto setabi = act.data_as(); + try { + const abi_def& abi_def = fc::raw::unpack( setabi.abi ); + const string json_str = fc::json::to_string( abi_def ); + + act_doc.append( + kvp( "data", make_document( kvp( "account", setabi.account.to_string()), + kvp( "abi_def", bsoncxx::from_json( json_str ))))); + return; + } catch( bsoncxx::exception& ) { + // better error handling below + } catch( fc::exception& e ) { + ilog( "Unable to convert action abi_def to json for ${n}", ("n", setabi.account.to_string())); + } + } + } + auto serializer = get_abi_serializer( act.account ); + if( serializer.valid() ) { + string json; + try { + auto v = serializer->binary_to_variant( serializer->get_action_type( act.name ), act.data, abi_serializer_max_time ); + json = fc::json::to_string( v ); + + const auto& value = bsoncxx::from_json( json ); + act_doc.append( kvp( "data", value )); + return; + } catch( bsoncxx::exception& e ) { + ilog( "Unable to convert EOS JSON to MongoDB JSON: ${e}", ("e", e.what())); + ilog( " EOS JSON: ${j}", ("j", json)); + ilog( " Storing data has hex." ); + } + } + } catch( std::exception& e ) { + ilog( "Unable to convert action.data to ABI: ${s}::${n}, std what: ${e}", + ("s", act.account)( "n", act.name )( "e", e.what())); + } catch (fc::exception& e) { + if (act.name != "onblock") { // eosio::onblock not in original eosio.system abi + ilog( "Unable to convert action.data to ABI: ${s}::${n}, fc exception: ${e}", + ("s", act.account)( "n", act.name )( "e", e.to_detail_string())); + } + } catch( ... ) { + ilog( "Unable to convert action.data to ABI: ${s}::${n}, unknown exception", + ("s", act.account)( "n", act.name )); + } + // if anything went wrong just store raw hex_data + act_doc.append( kvp( "hex_data", fc::variant( act.data ).as_string())); +} + +void mongo_db_plugin_impl::update_account(const chain::action& act) +{ + using bsoncxx::builder::basic::kvp; + using bsoncxx::builder::basic::make_document; + using namespace bsoncxx::types; + + if (act.account != chain::config::system_account_name) + return; + + try { + if( act.name == mongo_db_plugin_impl::newaccount ) { + auto now = std::chrono::duration_cast( + std::chrono::microseconds{fc::time_point::now().time_since_epoch().count()} ); + auto newaccount = act.data_as(); + + // create new account + if( !accounts.insert_one( make_document( kvp( "name", newaccount.name.to_string()), + kvp( "createdAt", b_date{now} )))) { + elog( "Failed to insert account ${n}", ("n", newaccount.name)); + } + + } else if( act.name == mongo_db_plugin_impl::setabi ) { + auto now = std::chrono::duration_cast( + std::chrono::microseconds{fc::time_point::now().time_since_epoch().count()} ); + auto setabi = act.data_as(); + + abi_cache_index.erase( setabi.account ); + + auto from_account = find_account( accounts, setabi.account ); + if( !from_account ) { + if( !accounts.insert_one( make_document( kvp( "name", setabi.account.to_string()), + kvp( "createdAt", b_date{now} )))) { + elog( "Failed to insert account ${n}", ("n", setabi.account)); + } + from_account = find_account( accounts, setabi.account ); + } + if( from_account ) { + const abi_def& abi_def = fc::raw::unpack( setabi.abi ); + const string json_str = fc::json::to_string( abi_def ); + + try{ + auto update_from = make_document( + kvp( "$set", make_document( kvp( "abi", bsoncxx::from_json( json_str )), + kvp( "updatedAt", b_date{now} )))); + + try { + if( !accounts.update_one( make_document( kvp( "_id", from_account->view()["_id"].get_oid())), + update_from.view())) { + EOS_ASSERT( false, chain::mongo_db_update_fail, "Failed to udpdate account ${n}", ("n", setabi.account)); + } + } catch( ... ) { + handle_mongo_exception( "account update", __LINE__ ); + } + } catch( bsoncxx::exception& e ) { + elog( "Unable to convert abi JSON to MongoDB JSON: ${e}", ("e", e.what())); + elog( " JSON: ${j}", ("j", json_str)); + } + } + } + } catch( fc::exception& e ) { + // if unable to unpack native type, skip account creation + } +} + mongo_db_plugin_impl::mongo_db_plugin_impl() : mongo_inst{} , mongo_conn{} @@ -1077,6 +1126,8 @@ void mongo_db_plugin::set_program_options(options_description& cli, options_desc cfg.add_options() ("mongodb-queue-size,q", bpo::value()->default_value(256), "The target queue size between nodeos and MongoDB plugin thread.") + ("mongodb-abi-cache-size", bpo::value()->default_value(2048), + "The maximum size of the abi cache for serializing data.") ("mongodb-wipe", bpo::bool_switch()->default_value(false), "Required with --replay-blockchain, --hard-replay-blockchain, or --delete-all-blocks to wipe mongo db." "This option required to prevent accidental wipe of mongo db.") @@ -1114,6 +1165,10 @@ void mongo_db_plugin::plugin_initialize(const variables_map& options) if( options.count( "mongodb-queue-size" )) { my->queue_size = options.at( "mongodb-queue-size" ).as(); } + if( options.count( "mongodb-abi-cache-size" )) { + my->abi_cache_size = options.at( "mongodb-abi-cache-size" ).as(); + EOS_ASSERT( my->abi_cache_size > 0, chain::plugin_config_exception, "mongodb-abi-cache-size > 0 required" ); + } if( options.count( "mongodb-block-start" )) { my->start_block_num = options.at( "mongodb-block-start" ).as(); } diff --git a/programs/cleos/main.cpp b/programs/cleos/main.cpp index 7ec0f108489..917fc09bb27 100644 --- a/programs/cleos/main.cpp +++ b/programs/cleos/main.cpp @@ -1676,23 +1676,31 @@ int main( int argc, char** argv ) { create->require_subcommand(); bool r1 = false; + string key_file; + bool print_console = false; // create key - auto create_key = create->add_subcommand("key", localized("Create a new keypair and print the public and private keys"))->set_callback( [&r1](){ - if( r1 ) { - auto pk = private_key_type::generate_r1(); - auto privs = string(pk); - auto pubs = string(pk.get_public_key()); + auto create_key = create->add_subcommand("key", localized("Create a new keypair and print the public and private keys"))->set_callback( [&r1, &key_file, &print_console](){ + if (key_file.empty() && !print_console) { + std::cerr << "ERROR: Either indicate a file using \"--file\" or pass \"--to-console\"" << std::endl; + return; + } + + auto pk = r1 ? private_key_type::generate_r1() : private_key_type::generate(); + auto privs = string(pk); + auto pubs = string(pk.get_public_key()); + if (print_console) { std::cout << localized("Private key: ${key}", ("key", privs) ) << std::endl; std::cout << localized("Public key: ${key}", ("key", pubs ) ) << std::endl; } else { - auto pk = private_key_type::generate(); - auto privs = string(pk); - auto pubs = string(pk.get_public_key()); - std::cout << localized("Private key: ${key}", ("key", privs) ) << std::endl; - std::cout << localized("Public key: ${key}", ("key", pubs ) ) << std::endl; + std::cerr << localized("saving keys to ${filename}", ("filename", key_file)) << std::endl; + std::ofstream out( key_file.c_str() ); + out << localized("Private key: ${key}", ("key", privs) ) << std::endl; + out << localized("Public key: ${key}", ("key", pubs ) ) << std::endl; } }); create_key->add_flag( "--r1", r1, "Generate a key using the R1 curve (iPhone), instead of the K1 curve (Bitcoin)" ); + create_key->add_option("-f,--file", key_file, localized("Name of file to write wallet password output to. (Must be set, unless \"--to-console\" is passed")); + create_key->add_flag( "--to-console", print_console, localized("Print password to console.")); // create account auto createAccount = create_account_subcommand( create, true /*simple*/ ); @@ -2251,14 +2259,28 @@ int main( int argc, char** argv ) { wallet->require_subcommand(); // create wallet string wallet_name = "default"; + string password_file; auto createWallet = wallet->add_subcommand("create", localized("Create a new wallet locally"), false); createWallet->add_option("-n,--name", wallet_name, localized("The name of the new wallet"), true); - createWallet->set_callback([&wallet_name] { + createWallet->add_option("-f,--file", password_file, localized("Name of file to write wallet password output to. (Must be set, unless \"--to-console\" is passed")); + createWallet->add_flag( "--to-console", print_console, localized("Print password to console.")); + createWallet->set_callback([&wallet_name, &password_file, &print_console] { + if (password_file.empty() && !print_console) { + std::cerr << "ERROR: Either indicate a file using \"--file\" or pass \"--to-console\"" << std::endl; + return; + } + const auto& v = call(wallet_url, wallet_create, wallet_name); std::cout << localized("Creating wallet: ${wallet_name}", ("wallet_name", wallet_name)) << std::endl; std::cout << localized("Save password to use in the future to unlock this wallet.") << std::endl; std::cout << localized("Without password imported keys will not be retrievable.") << std::endl; - std::cout << fc::json::to_pretty_string(v) << std::endl; + if (print_console) { + std::cout << fc::json::to_pretty_string(v) << std::endl; + } else { + std::cerr << localized("saving password to ${filename}", ("filename", password_file)) << std::endl; + std::ofstream out( password_file.c_str() ); + out << fc::json::to_pretty_string(v); + } }); // open wallet diff --git a/programs/eosio-launcher/main.cpp b/programs/eosio-launcher/main.cpp index b6664608a53..0fe9652d49b 100644 --- a/programs/eosio-launcher/main.cpp +++ b/programs/eosio-launcher/main.cpp @@ -1652,8 +1652,8 @@ launcher_def::bounce (const string& node_numbers) { string node_num = node.name.substr( node.name.length() - 2 ); string cmd = "cd " + host.eosio_home + "; " + "export EOSIO_HOME=" + host.eosio_home + string("; ") - + "export EOSIO_TN_NODE=" + node_num + "; " - + "./scripts/eosio-tn_bounce.sh"; + + "export EOSIO_NODE=" + node_num + "; " + + "./scripts/eosio-tn_bounce.sh " + eosd_extra_args; cout << "Bouncing " << node.name << endl; if (!do_ssh(cmd, host.host_name)) { cerr << "Unable to bounce " << node.name << endl; @@ -1671,7 +1671,7 @@ launcher_def::down (const string& node_numbers) { string node_num = node.name.substr( node.name.length() - 2 ); string cmd = "cd " + host.eosio_home + "; " + "export EOSIO_HOME=" + host.eosio_home + "; " - + "export EOSIO_TN_NODE=" + node_num + "; " + + "export EOSIO_NODE=" + node_num + "; " + "export EOSIO_TN_RESTART_CONFIG_DIR=" + node.config_dir_name + "; " + "./scripts/eosio-tn_down.sh"; cout << "Taking down " << node.name << endl; diff --git a/scripts/eosio-tn_bounce.sh b/scripts/eosio-tn_bounce.sh index 202ac7b66d1..7062836c92c 100755 --- a/scripts/eosio-tn_bounce.sh +++ b/scripts/eosio-tn_bounce.sh @@ -3,7 +3,7 @@ # eosio-tn_bounce is used to restart a node that is acting badly or is down. # usage: eosio-tn_bounce.sh [arglist] # arglist will be passed to the node's command line. First with no modifiers -# then with --replay and then a third time with --resync +# then with --hard-replay-blockchain and then a third time with --delete-all-blocks # # the data directory and log file are set by this script. Do not pass them on # the command line. diff --git a/scripts/eosio-tn_down.sh b/scripts/eosio-tn_down.sh index ad8ca2106be..e13d1357b0a 100755 --- a/scripts/eosio-tn_down.sh +++ b/scripts/eosio-tn_down.sh @@ -20,7 +20,7 @@ running=`ps -e | grep $runtest | grep -cv grep ` if [ $running -ne 0 ]; then echo killing $prog - pkill -15 $prog + kill -15 $runtest for (( a = 1;11-$a; a = $(($a + 1)) )); do echo waiting for safe termination, pass $a diff --git a/scripts/eosio-tn_roll.sh b/scripts/eosio-tn_roll.sh index fe46d002b57..7c8f665c880 100755 --- a/scripts/eosio-tn_roll.sh +++ b/scripts/eosio-tn_roll.sh @@ -5,7 +5,7 @@ # all instances are restarted. # usage: eosio-tn_roll.sh [arglist] # arglist will be passed to the node's command line. First with no modifiers -# then with --replay and then a third time with --resync +# then with --hard-replay-blockchain and then a third time with --delete-all-blocks # # The data directory and log file are set by this script. Do not pass them on # the command line. diff --git a/scripts/eosio-tn_up.sh b/scripts/eosio-tn_up.sh index 451e6ffaf7f..895322a5eee 100755 --- a/scripts/eosio-tn_up.sh +++ b/scripts/eosio-tn_up.sh @@ -66,7 +66,7 @@ fi if [ "$EOSIO_LEVEL" == replay ]; then echo starting with replay - relaunch $* --replay + relaunch $* --hard-replay-blockchain if [ "$connected" -eq 0 ]; then EOSIO_LEVEL=resync else @@ -74,6 +74,6 @@ if [ "$EOSIO_LEVEL" == replay ]; then fi fi if [ "$EOSIO_LEVEL" == resync ]; then - echo starting wih resync - relaunch $* --resync + echo starting with delete-all-blocks + relaunch $* --delete-all-blocks fi diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 762a55d2615..565a6679ee3 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -36,6 +36,7 @@ configure_file(${CMAKE_CURRENT_SOURCE_DIR}/sample-cluster-map.json ${CMAKE_CURRE configure_file(${CMAKE_CURRENT_SOURCE_DIR}/restart-scenarios-test.py ${CMAKE_CURRENT_BINARY_DIR}/restart-scenarios-test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/nodeos_run_test.py ${CMAKE_CURRENT_BINARY_DIR}/nodeos_run_test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/nodeos_run_remote_test.py ${CMAKE_CURRENT_BINARY_DIR}/nodeos_run_remote_test.py COPYONLY) +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/nodeos_under_min_avail_ram.py ${CMAKE_CURRENT_BINARY_DIR}/nodeos_under_min_avail_ram.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/nodeos_voting_test.py ${CMAKE_CURRENT_BINARY_DIR}/nodeos_voting_test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/consensus-validation-malicious-producers.py ${CMAKE_CURRENT_BINARY_DIR}/consensus-validation-malicious-producers.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/validate-dirty-db.py ${CMAKE_CURRENT_BINARY_DIR}/validate-dirty-db.py COPYONLY) @@ -68,6 +69,9 @@ set_property(TEST nodeos_sanity_lr_test PROPERTY LABELS long_running_tests) add_test(NAME nodeos_voting_lr_test COMMAND tests/nodeos_voting_test.py -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST nodeos_voting_lr_test PROPERTY LABELS long_running_tests) +add_test(NAME nodeos_under_min_avail_ram_lr_test COMMAND tests/nodeos_under_min_avail_ram.py -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +set_property(TEST nodeos_under_min_avail_ram_lr_test PROPERTY LABELS long_running_tests) + if(ENABLE_COVERAGE_TESTING) diff --git a/tests/Cluster.py b/tests/Cluster.py index b60ce6f4035..9711f5c585e 100644 --- a/tests/Cluster.py +++ b/tests/Cluster.py @@ -97,7 +97,7 @@ def setWalletMgr(self, walletMgr): # pylint: disable=too-many-branches # pylint: disable=too-many-statements def launch(self, pnodes=1, totalNodes=1, prodCount=1, topo="mesh", p2pPlugin="net", delay=1, onlyBios=False, dontKill=False - , dontBootstrap=False, totalProducers=None): + , dontBootstrap=False, totalProducers=None, extraNodeosArgs=None): """Launch cluster. pnodes: producer nodes count totalNodes: producer + non-producer nodes count @@ -129,11 +129,14 @@ def launch(self, pnodes=1, totalNodes=1, prodCount=1, topo="mesh", p2pPlugin="ne if self.staging: cmdArr.append("--nogen") - nodeosArgs="--max-transaction-time 5000 --abi-serializer-max-time-ms 5000 --filter-on * --p2p-max-nodes-per-host %d" % (totalNodes) + nodeosArgs="--max-transaction-time 50000 --abi-serializer-max-time-ms 50000 --filter-on * --p2p-max-nodes-per-host %d" % (totalNodes) if not self.walletd: nodeosArgs += " --plugin eosio::wallet_api_plugin" if self.enableMongo: nodeosArgs += " --plugin eosio::mongo_db_plugin --mongodb-wipe --delete-all-blocks --mongodb-uri %s" % self.mongoUri + if extraNodeosArgs is not None: + assert(isinstance(extraNodeosArgs, str)) + nodeosArgs += extraNodeosArgs if Utils.Debug: nodeosArgs += " --contracts-console" @@ -351,7 +354,7 @@ def createAccountKeys(count): p = re.compile('Private key: (.+)\nPublic key: (.+)\n', re.MULTILINE) for _ in range(0, count): try: - cmd="%s create key" % (Utils.EosClientPath) + cmd="%s create key --to-console" % (Utils.EosClientPath) if Utils.Debug: Utils.Print("cmd: %s" % (cmd)) keyStr=Utils.checkOutput(cmd.split()) m=p.search(keyStr) @@ -362,7 +365,7 @@ def createAccountKeys(count): ownerPrivate=m.group(1) ownerPublic=m.group(2) - cmd="%s create key" % (Utils.EosClientPath) + cmd="%s create key --to-console" % (Utils.EosClientPath) if Utils.Debug: Utils.Print("cmd: %s" % (cmd)) keyStr=Utils.checkOutput(cmd.split()) m=p.match(keyStr) @@ -564,11 +567,11 @@ def validateAccounts(self, accounts, testSysAccounts=True): node.validateAccounts(myAccounts) - def createAccountAndVerify(self, account, creator, stakedDeposit=1000): + def createAccountAndVerify(self, account, creator, stakedDeposit=1000, stakeNet=100, stakeCPU=100, buyRAM=100): """create account, verify account and return transaction id""" assert(len(self.nodes) > 0) node=self.nodes[0] - trans=node.createInitializeAccount(account, creator, stakedDeposit) + trans=node.createInitializeAccount(account, creator, stakedDeposit, stakeNet=stakeNet, stakeCPU=stakeCPU, buyRAM=buyRAM) assert(trans) assert(node.verifyAccount(account)) return trans @@ -586,10 +589,10 @@ def createAccountAndVerify(self, account, creator, stakedDeposit=1000): # return transId # return None - def createInitializeAccount(self, account, creatorAccount, stakedDeposit=1000, waitForTransBlock=False): + def createInitializeAccount(self, account, creatorAccount, stakedDeposit=1000, waitForTransBlock=False, stakeNet=100, stakeCPU=100, buyRAM=100): assert(len(self.nodes) > 0) node=self.nodes[0] - trans=node.createInitializeAccount(account, creatorAccount, stakedDeposit, waitForTransBlock) + trans=node.createInitializeAccount(account, creatorAccount, stakedDeposit, waitForTransBlock, stakeNet=stakeNet, stakeCPU=stakeCPU, buyRAM=buyRAM) return trans @staticmethod diff --git a/tests/Node.py b/tests/Node.py index 2338c1a6293..c923ceb947c 100644 --- a/tests/Node.py +++ b/tests/Node.py @@ -1,3 +1,4 @@ +import copy import decimal import subprocess import time @@ -990,9 +991,26 @@ def myFunc(): self.killed=True return True + def verifyAlive(self): + if Utils.Debug: Utils.Print("Checking if node(pid=%s) is alive(killed=%s): %s" % (self.pid, self.killed, self.cmd)) + if self.killed or self.pid is None: + return False + + try: + os.kill(self.pid, 0) + except ProcessLookupError as ex: + # mark node as killed + self.pid=None + self.killed=True + return False + except PermissionError as ex: + return True + else: + return True + # TBD: make nodeId an internal property # pylint: disable=too-many-locals - def relaunch(self, nodeId, chainArg, newChain=False, timeout=Utils.systemWaitTimeout): + def relaunch(self, nodeId, chainArg, newChain=False, timeout=Utils.systemWaitTimeout, addOrSwapFlags=None): assert(self.pid is None) assert(self.killed) @@ -1001,8 +1019,10 @@ def relaunch(self, nodeId, chainArg, newChain=False, timeout=Utils.systemWaitTim cmdArr=[] myCmd=self.cmd + toAddOrSwap=copy.deepcopy(addOrSwapFlags) if addOrSwapFlags is not None else {} if not newChain: skip=False + swapValue=None for i in self.cmd.split(): Utils.Print("\"%s\"" % (i)) if skip: @@ -1012,7 +1032,19 @@ def relaunch(self, nodeId, chainArg, newChain=False, timeout=Utils.systemWaitTim skip=True continue - cmdArr.append(i) + if swapValue is None: + cmdArr.append(i) + else: + cmdArr.append(swapValue) + swapValue=None + + if i in toAddOrSwap: + swapValue=toAddOrSwap[i] + del toAddOrSwap[i] + for k,v in toAddOrSwap.items(): + cmdArr.append(k) + cmdArr.append(v) + myCmd=" ".join(cmdArr) dataDir="var/lib/node_%02d" % (nodeId) @@ -1044,5 +1076,6 @@ def isNodeAlive(): self.pid=None return False + self.cmd=cmd self.killed=False return True diff --git a/tests/WalletMgr.py b/tests/WalletMgr.py index 6a4201ad783..4b1c49bfcfd 100644 --- a/tests/WalletMgr.py +++ b/tests/WalletMgr.py @@ -53,7 +53,7 @@ def create(self, name, accounts=None): if Utils.Debug: Utils.Print("Wallet \"%s\" already exists. Returning same." % name) return wallet p = re.compile(r'\n\"(\w+)\"\n', re.MULTILINE) - cmd="%s %s wallet create --name %s" % (Utils.EosClientPath, self.endpointArgs, name) + cmd="%s %s wallet create --name %s --to-console" % (Utils.EosClientPath, self.endpointArgs, name) if Utils.Debug: Utils.Print("cmd: %s" % (cmd)) retStr=Utils.checkOutput(cmd.split()) #Utils.Print("create: %s" % (retStr)) diff --git a/tests/nodeos_under_min_avail_ram.py b/tests/nodeos_under_min_avail_ram.py new file mode 100755 index 00000000000..efe64d3bab6 --- /dev/null +++ b/tests/nodeos_under_min_avail_ram.py @@ -0,0 +1,348 @@ +#!/usr/bin/env python3 + +from core_symbol import CORE_SYMBOL +from Cluster import Cluster +from WalletMgr import WalletMgr +from Node import Node +from TestHelper import TestHelper +from testUtils import Utils +import testUtils +import time + +import decimal +import math +import re + +class NamedAccounts: + + def __init__(self, cluster, numAccounts): + Print("NamedAccounts %d" % (numAccounts)) + self.numAccounts=numAccounts + self.accounts=cluster.createAccountKeys(numAccounts) + if self.accounts is None: + errorExit("FAILURE - create keys") + accountNum = 0 + for account in self.accounts: + Print("NamedAccounts Name for %d" % (accountNum)) + account.name=self.setName(accountNum) + accountNum+=1 + + def setName(self, num): + retStr="test" + digits=[] + maxDigitVal=5 + maxDigits=8 + temp=num + while len(digits) < maxDigits: + digit=(num % maxDigitVal)+1 + num=int(num/maxDigitVal) + digits.append(digit) + + digits.reverse() + for digit in digits: + retStr=retStr+str(digit) + + Print("NamedAccounts Name for %d is %s" % (temp, retStr)) + return retStr + +############################################################### +# nodeos_voting_test +# --dump-error-details +# --keep-logs +############################################################### +Print=Utils.Print +errorExit=Utils.errorExit + +args = TestHelper.parse_args({"--dump-error-details","--keep-logs","-v","--leave-running","--clean-run"}) +Utils.Debug=args.v +totalNodes=4 +cluster=Cluster(walletd=True) +dumpErrorDetails=args.dump_error_details +keepLogs=args.keep_logs +dontKill=args.leave_running +killAll=args.clean_run + +walletMgr=WalletMgr(True) +testSuccessful=False +killEosInstances=not dontKill +killWallet=not dontKill + +WalletdName="keosd" +ClientName="cleos" + +try: + TestHelper.printSystemInfo("BEGIN") + + cluster.killall(allInstances=killAll) + cluster.cleanup() + Print("Stand up cluster") + minRAMFlag="--chain-state-db-guard-size-mb" + minRAMValue=1002 + maxRAMFlag="--chain-state-db-size-mb" + maxRAMValue=1010 + extraNodeosArgs=" %s %d %s %d " % (minRAMFlag, minRAMValue, maxRAMFlag, maxRAMValue) + if cluster.launch(onlyBios=False, dontKill=dontKill, pnodes=totalNodes, totalNodes=totalNodes, totalProducers=totalNodes*21, extraNodeosArgs=extraNodeosArgs) is False: + Utils.cmdError("launcher") + errorExit("Failed to stand up eos cluster.") + + Print("Validating system accounts after bootstrap") + cluster.validateAccounts(None) + + Print("creating accounts") + namedAccounts=NamedAccounts(cluster,10) + accounts=namedAccounts.accounts + + testWalletName="test" + + Print("Creating wallet \"%s\"." % (testWalletName)) + walletMgr.killall(allInstances=killAll) + walletMgr.cleanup() + if walletMgr.launch() is False: + Utils.cmdError("%s" % (WalletdName)) + errorExit("Failed to stand up eos walletd.") + + testWallet=walletMgr.create(testWalletName, [cluster.eosioAccount]) + if testWallet is None: + Utils.cmdError("eos wallet create") + errorExit("Failed to create wallet %s." % (testWalletName)) + + for _, account in cluster.defProducerAccounts.items(): + walletMgr.importKey(account, testWallet, ignoreDupKeyWarning=True) + + Print("Wallet \"%s\" password=%s." % (testWalletName, testWallet.password.encode("utf-8"))) + + nodes=[] + nodes.append(cluster.getNode(0)) + if nodes[0] is None: + errorExit("Cluster in bad state, received None node") + nodes.append(cluster.getNode(1)) + if nodes[1] is None: + errorExit("Cluster in bad state, received None node") + nodes.append(cluster.getNode(2)) + if nodes[2] is None: + errorExit("Cluster in bad state, received None node") + nodes.append(cluster.getNode(3)) + if nodes[3] is None: + errorExit("Cluster in bad state, received None node") + + + for account in accounts: + walletMgr.importKey(account, testWallet) + + # create accounts via eosio as otherwise a bid is needed + for account in accounts: + Print("Create new account %s via %s" % (account.name, cluster.eosioAccount.name)) + trans=nodes[0].createInitializeAccount(account, cluster.eosioAccount, stakedDeposit=500000, waitForTransBlock=False, stakeNet=50000, stakeCPU=50000, buyRAM=50000) + if trans is None: + Utils.cmdError("%s create account" % (account.name)) + errorExit("Failed to create account %s" % (account.name)) + transferAmount="70000000.0000 {0}".format(CORE_SYMBOL) + Print("Transfer funds %s from account %s to %s" % (transferAmount, cluster.eosioAccount.name, account.name)) + if nodes[0].transferFunds(cluster.eosioAccount, account, transferAmount, "test transfer") is None: + errorExit("Failed to transfer funds %d from account %s to %s" % ( + transferAmount, cluster.eosioAccount.name, account.name)) + trans=nodes[0].delegatebw(account, 1000000.0000, 68000000.0000) + if trans is None: + Utils.cmdError("delegate bandwidth for %s" % (account.name)) + errorExit("Failed to delegate bandwidth for %s" % (account.name)) + + contractAccount=cluster.createAccountKeys(1)[0] + contractAccount.name="contracttest" + walletMgr.importKey(contractAccount, testWallet) + Print("Create new account %s via %s" % (contractAccount.name, cluster.eosioAccount.name)) + trans=nodes[0].createInitializeAccount(contractAccount, cluster.eosioAccount, stakedDeposit=500000, waitForTransBlock=False, stakeNet=50000, stakeCPU=50000, buyRAM=50000) + if trans is None: + Utils.cmdError("%s create account" % (contractAccount.name)) + errorExit("Failed to create account %s" % (contractAccount.name)) + transferAmount="90000000.0000 {0}".format(CORE_SYMBOL) + Print("Transfer funds %s from account %s to %s" % (transferAmount, cluster.eosioAccount.name, contractAccount.name)) + if nodes[0].transferFunds(cluster.eosioAccount, contractAccount, transferAmount, "test transfer") is None: + errorExit("Failed to transfer funds %d from account %s to %s" % ( + transferAmount, cluster.eosioAccount.name, contractAccount.name)) + trans=nodes[0].delegatebw(contractAccount, 1000000.0000, 88000000.0000) + if trans is None: + Utils.cmdError("delegate bandwidth for %s" % (contractAccount.name)) + errorExit("Failed to delegate bandwidth for %s" % (contractAccount.name)) + + contractDir="contracts/integration_test" + wastFile="contracts/integration_test/integration_test.wast" + abiFile="contracts/integration_test/integration_test.abi" + Print("Publish contract") + trans=nodes[0].publishContract(contractAccount.name, contractDir, wastFile, abiFile, waitForTransBlock=True) + if trans is None: + cmdError("%s set contract %s" % (ClientName, contractAccount.name)) + errorExit("Failed to publish contract.") + + contract=contractAccount.name + Print("push create action to %s contract" % (contract)) + action="store" + numAmount=5000 + keepProcessing=True + count=0 + while keepProcessing: + numAmount+=1 + for fromIndex in range(namedAccounts.numAccounts): + count+=1 + toIndex=fromIndex+1 + if toIndex==namedAccounts.numAccounts: + toIndex=0 + fromAccount=accounts[fromIndex] + toAccount=accounts[toIndex] + data="{\"from\":\"%s\",\"to\":\"%s\",\"num\":%d}" % (fromAccount.name, toAccount.name, numAmount) + opts="--permission %s@active --permission %s@active --expiration 90" % (contract, fromAccount.name) + try: + trans=nodes[0].pushMessage(contract, action, data, opts) + if trans is None or not trans[0]: + Print("Failed to push create action to eosio contract. sleep for 60 seconds") + time.sleep(60) + time.sleep(1) + except TypeError as ex: + keepProcessing=False + break + + #spread the actions to all accounts, to use each accounts tps bandwidth + fromIndexStart=fromIndex+1 if fromIndex+1 15: + strMsg="little" if count < 15 else "much" + Utils.cmdError("Was able to send %d store actions which was too %s" % (count, strMsg)) + errorExit("Incorrect number of store actions sent") + + # Make sure all the nodes are shutdown (may take a little while for this to happen, so making multiple passes) + allDone=False + count=0 + while not allDone: + allDone=True + for node in nodes: + if node.verifyAlive(): + allDone=False + if not allDone: + time.sleep(5) + if ++count>5: + Utils.cmdError("All Nodes should have died") + errorExit("Failure - All Nodes should have died") + + Print("relaunch nodes with new capacity") + addOrSwapFlags={} + numNodes=len(nodes) + maxRAMValue+=2 + currentMinimumMaxRAM=maxRAMValue + for i in range(numNodes): + addOrSwapFlags[maxRAMFlag]=str(maxRAMValue) + if i==numNodes-1: + addOrSwapFlags["--enable-stale-production"]="" # just enable stale production for the first node + nodeIndex=numNodes-i-1 + if not nodes[nodeIndex].relaunch(nodeIndex, "", newChain=False, addOrSwapFlags=addOrSwapFlags): + Utils.cmdError("Failed to restart node0 with new capacity %s" % (maxRAMValue)) + errorExit("Failure - Node should have restarted") + addOrSwapFlags={} + maxRAMValue=currentMinimumMaxRAM+30 + + time.sleep(10) + for i in range(numNodes): + if not nodes[i].verifyAlive(): + Utils.cmdError("Node %d should be alive" % (i)) + errorExit("Failure - All Nodes should be alive") + + Print("push more actions to %s contract" % (contract)) + action="store" + keepProcessing=True + count=0 + while keepProcessing and count < 40: + Print("Send %s" % (action)) + numAmount+=1 + for fromIndexOffset in range(namedAccounts.numAccounts): + count+=1 + fromIndex=fromIndexStart+fromIndexOffset + if fromIndex>=namedAccounts.numAccounts: + fromIndex-=namedAccounts.numAccounts + toIndex=fromIndex+1 + if toIndex==namedAccounts.numAccounts: + toIndex=0 + fromAccount=accounts[fromIndex] + toAccount=accounts[toIndex] + data="{\"from\":\"%s\",\"to\":\"%s\",\"num\":%d}" % (fromAccount.name, toAccount.name, numAmount) + opts="--permission %s@active --permission %s@active --expiration 90" % (contract, fromAccount.name) + try: + trans=nodes[0].pushMessage(contract, action, data, opts) + if trans is None or not trans[0]: + Print("Failed to push create action to eosio contract. sleep for 60 seconds") + time.sleep(60) + time.sleep(1) + except TypeError as ex: + Print("Failed to send %s" % (action)) + + if not nodes[len(nodes)-1].verifyAlive(): + keepProcessing=False + break + + if keepProcessing: + Utils.cmdError("node[%d] never shutdown" % (numNodes-1)) + errorExit("Failure - Node should be shutdown") + + for i in range(numNodes): + # only the last node should be dead + if not nodes[i].verifyAlive() and i=len(nodes): + break + fromIndex=fromIndexStart+fromIndexOffset + if fromIndex>=namedAccounts.numAccounts: + fromIndex-=namedAccounts.numAccounts + toIndex=fromIndex+1 + if toIndex==namedAccounts.numAccounts: + toIndex=0 + fromAccount=accounts[fromIndex] + toAccount=accounts[toIndex] + node=nodes[fromIndexOffset] + data="{\"from\":\"%s\",\"to\":\"%s\",\"num\":%d}" % (fromAccount.name, toAccount.name, numAmount) + opts="--permission %s@active --permission %s@active --expiration 90" % (contract, fromAccount.name) + try: + trans=nodes[0].pushMessage(contract, action, data, opts) + if trans is None or not trans[0]: + Print("Failed to push create action to eosio contract. sleep for 60 seconds") + time.sleep(60) + continue + time.sleep(1) + except TypeError as ex: + Utils.cmdError("Failed to send %s action to node %d" % (fromAccount, fromIndexOffset, action)) + errorExit("Failure - send %s action should have succeeded" % (action)) + + time.sleep(10) + Print("Check nodes are alive") + allDone=True + for node in nodes: + if not node.verifyAlive(): + Utils.cmdError("All Nodes should be alive") + errorExit("Failure - All Nodes should be alive") + + testSuccessful=True +finally: + TestHelper.shutdown(cluster, walletMgr, testSuccessful, killEosInstances, killWallet, keepLogs, killAll, dumpErrorDetails) + +exit(0) diff --git a/tutorials/bios-boot-tutorial/bios-boot-tutorial.py b/tutorials/bios-boot-tutorial/bios-boot-tutorial.py index f4a03603b11..11aee4e34c1 100755 --- a/tutorials/bios-boot-tutorial/bios-boot-tutorial.py +++ b/tutorials/bios-boot-tutorial/bios-boot-tutorial.py @@ -266,7 +266,7 @@ def msigReplaceSystem(): def produceNewAccounts(): with open('newusers', 'w') as f: for i in range(120_000, 200_000): - x = getOutput(args.cleos + 'create key') + x = getOutput(args.cleos + 'create key --to-console') r = re.match('Private key: *([^ \n]*)\nPublic key: *([^ \n]*)', x, re.DOTALL | re.MULTILINE) name = 'user' for j in range(7, -1, -1):