From a7fc838b09ee05240ba05a1001f914697b910ca1 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 5 Feb 2019 17:44:01 -0500 Subject: [PATCH 0001/2426] Consolidated Security Fixes for 1.6.1 - net_plugin security fixes - Additional checktime calls to limit cpu usage - Limit memory usage in producer_plugin Co-Authored-By: Kevin Heifner Co-Authored-By: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Co-authored-by: Kayan --- libraries/chain/controller.cpp | 41 +-- .../chain/include/eosio/chain/controller.hpp | 18 +- .../eosio/chain/wasm_eosio_injection.hpp | 11 +- libraries/chain/wasm_eosio_injection.cpp | 2 +- .../testing/include/eosio/testing/tester.hpp | 11 + libraries/testing/tester.cpp | 21 +- plugins/net_plugin/net_plugin.cpp | 316 ++++++++++++------ plugins/producer_plugin/producer_plugin.cpp | 275 ++++++++------- unittests/api_tests.cpp | 4 +- unittests/delay_tests.cpp | 2 +- 10 files changed, 419 insertions(+), 282 deletions(-) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 98b2065d1e1..2d2af25f0c1 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -143,7 +143,7 @@ struct controller_impl { * are removed from this list if they are re-applied in other blocks. Producers * can query this list when scheduling new transactions into blocks. */ - map unapplied_transactions; + unapplied_transactions_type unapplied_transactions; void pop_block() { auto prev = fork_db.get_block( head->header.previous ); @@ -2106,41 +2106,12 @@ const account_object& controller::get_account( account_name name )const return my->db.get(name); } FC_CAPTURE_AND_RETHROW( (name) ) } -vector controller::get_unapplied_transactions() const { - vector result; - if ( my->read_mode == db_read_mode::SPECULATIVE ) { - result.reserve(my->unapplied_transactions.size()); - for ( const auto& entry: my->unapplied_transactions ) { - result.emplace_back(entry.second); - } - } else { - EOS_ASSERT( my->unapplied_transactions.empty(), transaction_exception, "not empty unapplied_transactions in non-speculative mode" ); //should never happen - } - return result; -} - -void controller::drop_unapplied_transaction(const transaction_metadata_ptr& trx) { - my->unapplied_transactions.erase(trx->signed_id); -} - -void controller::drop_all_unapplied_transactions() { - my->unapplied_transactions.clear(); -} - -vector controller::get_scheduled_transactions() const { - const auto& idx = db().get_index(); - - vector result; - - static const size_t max_reserve = 64; - result.reserve(std::min(idx.size(), max_reserve)); - - auto itr = idx.begin(); - while( itr != idx.end() && itr->delay_until <= pending_block_time() ) { - result.emplace_back(itr->trx_id); - ++itr; +unapplied_transactions_type& controller::get_unapplied_transactions() { + if ( my->read_mode != db_read_mode::SPECULATIVE ) { + EOS_ASSERT( my->unapplied_transactions.empty(), transaction_exception, + "not empty unapplied_transactions in non-speculative mode" ); //should never happen } - return result; + return my->unapplied_transactions; } bool controller::sender_avoids_whitelist_blacklist_enforcement( account_name sender )const { diff --git a/libraries/chain/include/eosio/chain/controller.hpp b/libraries/chain/include/eosio/chain/controller.hpp index f0d5a53f52d..9e6947fdff9 100644 --- a/libraries/chain/include/eosio/chain/controller.hpp +++ b/libraries/chain/include/eosio/chain/controller.hpp @@ -33,6 +33,7 @@ namespace eosio { namespace chain { class account_object; using resource_limits::resource_limits_manager; using apply_handler = std::function; + using unapplied_transactions_type = map; class fork_database; @@ -111,22 +112,9 @@ namespace eosio { namespace chain { * The caller is responsible for calling drop_unapplied_transaction on a failing transaction that * they never intend to retry * - * @return vector of transactions which have been unapplied + * @return map of transactions which have been unapplied */ - vector get_unapplied_transactions() const; - void drop_unapplied_transaction(const transaction_metadata_ptr& trx); - void drop_all_unapplied_transactions(); - - /** - * These transaction IDs represent transactions available in the head chain state as scheduled - * or otherwise generated transactions. - * - * calling push_scheduled_transaction with these IDs will remove the associated transaction from - * the chain state IFF it succeeds or objectively fails - * - * @return - */ - vector get_scheduled_transactions() const; + unapplied_transactions_type& get_unapplied_transactions(); /** * diff --git a/libraries/chain/include/eosio/chain/wasm_eosio_injection.hpp b/libraries/chain/include/eosio/chain/wasm_eosio_injection.hpp index f5ebf01c1f7..a67ca9ef696 100644 --- a/libraries/chain/include/eosio/chain/wasm_eosio_injection.hpp +++ b/libraries/chain/include/eosio/chain/wasm_eosio_injection.hpp @@ -272,7 +272,7 @@ namespace eosio { namespace chain { namespace wasm_injections { }; - struct call_depth_check { + struct call_depth_check_and_insert_checktime { static constexpr bool kills = true; static constexpr bool post = false; static int32_t global_idx; @@ -290,6 +290,7 @@ namespace eosio { namespace chain { namespace wasm_injections { injector_utils::add_import(*(arg.module), "call_depth_assert", assert_idx); wasm_ops::op_types<>::call_t call_assert; + wasm_ops::op_types<>::call_t call_checktime; wasm_ops::op_types<>::get_global_t get_global_inst; wasm_ops::op_types<>::set_global_t set_global_inst; @@ -301,6 +302,7 @@ namespace eosio { namespace chain { namespace wasm_injections { wasm_ops::op_types<>::else__t else_inst; call_assert.field = assert_idx; + call_checktime.field = checktime_injection::chktm_idx; get_global_inst.field = global_idx; set_global_inst.field = global_idx; const_inst.field = -1; @@ -334,6 +336,7 @@ namespace eosio { namespace chain { namespace wasm_injections { INSERT_INJECTED(const_inst); INSERT_INJECTED(add_inst); INSERT_INJECTED(set_global_inst); + INSERT_INJECTED(call_checktime); #undef INSERT_INJECTED } @@ -679,8 +682,8 @@ namespace eosio { namespace chain { namespace wasm_injections { }; struct pre_op_injectors : wasm_ops::op_types { - using call_t = wasm_ops::call ; - using call_indirect_t = wasm_ops::call_indirect ; + using call_t = wasm_ops::call ; + using call_indirect_t = wasm_ops::call_indirect ; // float binops using f32_add_t = wasm_ops::f32_add >; @@ -785,7 +788,7 @@ namespace eosio { namespace chain { namespace wasm_injections { // initialize static fields of injectors injector_utils::init( mod ); checktime_injection::init(); - call_depth_check::init(); + call_depth_check_and_insert_checktime::init(); } void inject() { diff --git a/libraries/chain/wasm_eosio_injection.cpp b/libraries/chain/wasm_eosio_injection.cpp index a4afa44d46d..2c627e13ea7 100644 --- a/libraries/chain/wasm_eosio_injection.cpp +++ b/libraries/chain/wasm_eosio_injection.cpp @@ -35,7 +35,7 @@ void max_memory_injection_visitor::inject( Module& m ) { } void max_memory_injection_visitor::initializer() {} -int32_t call_depth_check::global_idx = -1; +int32_t call_depth_check_and_insert_checktime::global_idx = -1; uint32_t instruction_counter::icnt = 0; uint32_t instruction_counter::tcnt = 0; uint32_t instruction_counter::bcnt = 0; diff --git a/libraries/testing/include/eosio/testing/tester.hpp b/libraries/testing/include/eosio/testing/tester.hpp index 10e7d4499e2..c9a4591d6c9 100644 --- a/libraries/testing/include/eosio/testing/tester.hpp +++ b/libraries/testing/include/eosio/testing/tester.hpp @@ -99,6 +99,17 @@ namespace eosio { namespace testing { void produce_min_num_of_blocks_to_spend_time_wo_inactive_prod(const fc::microseconds target_elapsed_time = fc::microseconds()); signed_block_ptr push_block(signed_block_ptr b); + /** + * These transaction IDs represent transactions available in the head chain state as scheduled + * or otherwise generated transactions. + * + * calling push_scheduled_transaction with these IDs will remove the associated transaction from + * the chain state IFF it succeeds or objectively fails + * + * @return + */ + vector get_scheduled_transactions() const; + transaction_trace_ptr push_transaction( packed_transaction& trx, fc::time_point deadline = fc::time_point::maximum(), uint32_t billed_cpu_time_us = DEFAULT_BILLED_CPU_TIME_US ); transaction_trace_ptr push_transaction( signed_transaction& trx, fc::time_point deadline = fc::time_point::maximum(), uint32_t billed_cpu_time_us = DEFAULT_BILLED_CPU_TIME_US ); action_result push_action(action&& cert_act, uint64_t authorizer); // TODO/QUESTION: Is this needed? diff --git a/libraries/testing/tester.cpp b/libraries/testing/tester.cpp index bcf811434d5..a632cb40643 100644 --- a/libraries/testing/tester.cpp +++ b/libraries/testing/tester.cpp @@ -3,6 +3,7 @@ #include #include #include +#include #include #include @@ -163,16 +164,16 @@ namespace eosio { namespace testing { } if( !skip_pending_trxs ) { - auto unapplied_trxs = control->get_unapplied_transactions(); - for (const auto& trx : unapplied_trxs ) { - auto trace = control->push_transaction(trx, fc::time_point::maximum()); + unapplied_transactions_type unapplied_trxs = control->get_unapplied_transactions(); // make copy of map + for (const auto& entry : unapplied_trxs ) { + auto trace = control->push_transaction(entry.second, fc::time_point::maximum()); if(trace->except) { trace->except->dynamic_rethrow_exception(); } } vector scheduled_trxs; - while( (scheduled_trxs = control->get_scheduled_transactions() ).size() > 0 ) { + while( (scheduled_trxs = get_scheduled_transactions() ).size() > 0 ) { for (const auto& trx : scheduled_trxs ) { auto trace = control->push_scheduled_transaction(trx, fc::time_point::maximum()); if(trace->except) { @@ -237,6 +238,18 @@ namespace eosio { namespace testing { } } + vector base_tester::get_scheduled_transactions() const { + const auto& idx = control->db().get_index(); + + vector result; + + auto itr = idx.begin(); + while( itr != idx.end() && itr->delay_until <= control->pending_block_time() ) { + result.emplace_back(itr->trx_id); + ++itr; + } + return result; + } void base_tester::produce_blocks_until_end_of_round() { uint64_t blocks_per_round; diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 0c00495c48d..1b398a8b53a 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -283,6 +283,10 @@ namespace eosio { */ constexpr auto def_send_buffer_size_mb = 4; constexpr auto def_send_buffer_size = 1024*1024*def_send_buffer_size_mb; + constexpr auto def_max_write_queue_size = def_send_buffer_size*10; + constexpr boost::asio::chrono::milliseconds def_read_delay_for_full_write_queue{100}; + constexpr auto def_max_reads_in_flight = 1000; + constexpr auto def_max_trx_in_progress_size = 100*1024*1024; // 100 MB constexpr auto def_max_clients = 25; // 0 for unlimited clients constexpr auto def_max_nodes_per_host = 1; constexpr auto def_conn_retry_wait = 30; @@ -400,6 +404,86 @@ namespace eosio { static void populate(handshake_message &hello); }; + class queued_buffer : boost::noncopyable { + public: + void clear_write_queue() { + _write_queue.clear(); + _sync_write_queue.clear(); + _write_queue_size = 0; + } + + void clear_out_queue() { + while ( _out_queue.size() > 0 ) { + _out_queue.pop_front(); + } + } + + uint32_t write_queue_size() const { return _write_queue_size; } + + bool is_out_queue_empty() const { return _out_queue.empty(); } + + bool ready_to_send() const { + // if out_queue is not empty then async_write is in progress + return ((!_sync_write_queue.empty() || !_write_queue.empty()) && _out_queue.empty()); + } + + bool add_write_queue( const std::shared_ptr>& buff, + std::function callback, + bool to_sync_queue ) { + if( to_sync_queue ) { + _sync_write_queue.push_back( {buff, callback} ); + } else { + _write_queue.push_back( {buff, callback} ); + } + _write_queue_size += buff->size(); + if( _write_queue_size > 2 * def_max_write_queue_size ) { + return false; + } + return true; + } + + void fill_out_buffer( std::vector& bufs ) { + if( _sync_write_queue.size() > 0 ) { // always send msgs from sync_write_queue first + fill_out_buffer( bufs, _sync_write_queue ); + } else { // postpone real_time write_queue if sync queue is not empty + fill_out_buffer( bufs, _write_queue ); + EOS_ASSERT( _write_queue_size == 0, plugin_exception, "write queue size expected to be zero" ); + } + } + + void out_callback( boost::system::error_code ec, std::size_t w ) { + for( auto& m : _out_queue ) { + m.callback( ec, w ); + } + } + + private: + struct queued_write; + void fill_out_buffer( std::vector& bufs, + deque& w_queue ) { + while ( w_queue.size() > 0 ) { + auto& m = w_queue.front(); + bufs.push_back( boost::asio::buffer( *m.buff )); + _write_queue_size -= m.buff->size(); + _out_queue.emplace_back( m ); + w_queue.pop_front(); + } + } + + private: + struct queued_write { + std::shared_ptr> buff; + std::function callback; + }; + + uint32_t _write_queue_size = 0; + deque _write_queue; + deque _sync_write_queue; // sync_write_queue will be sent first + deque _out_queue; + + }; // queued_buffer + + class connection : public std::enable_shared_from_this { public: explicit connection( string endpoint ); @@ -416,12 +500,11 @@ namespace eosio { fc::message_buffer<1024*1024> pending_message_buffer; fc::optional outstanding_read_bytes; - struct queued_write { - std::shared_ptr> buff; - std::function callback; - }; - deque write_queue; - deque out_queue; + + queued_buffer buffer_queue; + + uint32_t reads_in_flight = 0; + uint32_t trx_in_progress_size = 0; fc::sha256 node_id; handshake_message last_handshake_recv; handshake_message last_handshake_sent; @@ -431,6 +514,7 @@ namespace eosio { uint16_t protocol_version = 0; string peer_addr; unique_ptr response_expected; + unique_ptr read_delay_timer; optional pending_fetch; go_away_reason no_retry = no_reason; block_id_type fork_head; @@ -497,12 +581,14 @@ namespace eosio { void txn_send(const vector& txn_lis); void blk_send_branch(); - void blk_send(const vector &txn_lis); + void blk_send(const block_id_type& blkid); void stop_send(); void enqueue( const net_message &msg, bool trigger_send = true ); - void enqueue_block( const signed_block_ptr& sb, bool trigger_send = true ); - void enqueue_buffer( const std::shared_ptr>& send_buffer, bool trigger_send, go_away_reason close_after_send ); + void enqueue_block( const signed_block_ptr& sb, bool trigger_send = true, bool to_sync_queue = false ); + void enqueue_buffer( const std::shared_ptr>& send_buffer, + bool trigger_send, go_away_reason close_after_send, + bool to_sync_queue = false); void cancel_sync(go_away_reason); void flush_queues(); bool enqueue_sync_block(); @@ -516,7 +602,8 @@ namespace eosio { void queue_write(const std::shared_ptr>& buff, bool trigger_send, - std::function callback); + std::function callback, + bool to_sync_queue = false); void do_queue_write(); /** \brief Process the next message from the pending message buffer @@ -530,7 +617,7 @@ namespace eosio { */ bool process_next_message(net_plugin_impl& impl, uint32_t message_length); - bool add_peer_block(const peer_block_state &pbs); + bool add_peer_block(const peer_block_state& pbs); fc::optional _logger_variant; const fc::variant_object& get_logger_variant() { @@ -659,6 +746,7 @@ namespace eosio { protocol_version(0), peer_addr(endpoint), response_expected(), + read_delay_timer(), pending_fetch(), no_retry(no_reason), fork_head(), @@ -683,6 +771,7 @@ namespace eosio { protocol_version(0), peer_addr(), response_expected(), + read_delay_timer(), pending_fetch(), no_retry(no_reason), fork_head(), @@ -699,6 +788,7 @@ namespace eosio { auto *rnd = node_id.data(); rnd[0] = 0; response_expected.reset(new boost::asio::steady_timer(app().get_io_service())); + read_delay_timer.reset(new boost::asio::steady_timer(app().get_io_service())); } bool connection::connected() { @@ -716,7 +806,7 @@ namespace eosio { } void connection::flush_queues() { - write_queue.clear(); + buffer_queue.clear_write_queue(); } void connection::close() { @@ -739,6 +829,7 @@ namespace eosio { my_impl->sync_master->reset_lib_num(shared_from_this()); fc_dlog(logger, "canceling wait on ${p}", ("p",peer_name())); cancel_wait(); + if( read_delay_timer ) read_delay_timer->cancel(); pending_message_buffer.reset(); } @@ -803,75 +894,43 @@ namespace eosio { catch (...) { } - vector bstack; - block_id_type null_id; - for (auto bid = head_id; bid != null_id && bid != lib_id; ) { - try { - - // if the last handshake received indicates that we are catching up on a fork - // that the peer is already partially aware of, no need to resend blocks - if (remote_head_id == bid) { - break; - } - - signed_block_ptr b = cc.fetch_block_by_id(bid); - if ( b ) { - bid = b->previous; - bstack.push_back(b); - } - else { - break; - } - } catch (...) { - break; - } - } - size_t count = 0; - if (!bstack.empty()) { - if (bstack.back()->previous == lib_id || bstack.back()->previous == remote_head_id) { - count = bstack.size(); - while (bstack.size()) { - enqueue_block( bstack.back() ); - bstack.pop_back(); - } - } - fc_ilog(logger, "Sent ${n} blocks on my fork",("n",count)); + if( !peer_requested ) { + peer_requested = sync_state( block_header::num_from_id(lib_id)+1, + block_header::num_from_id(head_id), + block_header::num_from_id(lib_id) ); } else { - fc_ilog(logger, "Nothing to send on fork request"); + uint32_t start = std::min( peer_requested->last + 1, block_header::num_from_id(lib_id)+1 ); + uint32_t end = std::max( peer_requested->end_block, block_header::num_from_id(head_id) ); + peer_requested = sync_state( start, end, start - 1 ); } + enqueue_sync_block(); + // still want to send transactions along during blk branch sync syncing = false; } - void connection::blk_send(const vector &ids) { + void connection::blk_send(const block_id_type& blkid) { controller &cc = my_impl->chain_plug->chain(); - int count = 0; - for(auto &blkid : ids) { - ++count; - try { - signed_block_ptr b = cc.fetch_block_by_id(blkid); - if(b) { - fc_dlog(logger,"found block for id at num ${n}",("n",b->block_num())); - enqueue_block( b ); - } - else { - ilog("fetch block by id returned null, id ${id} on block ${c} of ${s} for ${p}", - ("id",blkid)("c",count)("s",ids.size())("p",peer_name())); - break; - } - } - catch (const assert_exception &ex) { - elog( "caught assert on fetch_block_by_id, ${ex}, id ${id} on block ${c} of ${s} for ${p}", - ("ex",ex.to_string())("id",blkid)("c",count)("s",ids.size())("p",peer_name())); - break; - } - catch (...) { - elog( "caught othser exception fetching block id ${id} on block ${c} of ${s} for ${p}", - ("id",blkid)("c",count)("s",ids.size())("p",peer_name())); - break; + try { + signed_block_ptr b = cc.fetch_block_by_id(blkid); + if(b) { + fc_dlog(logger,"found block for id at num ${n}",("n",b->block_num())); + peer_block_state pbstate = {blkid, block_header::num_from_id(blkid), true, true, time_point()}; + add_peer_block(pbstate); + enqueue_block( b ); + } else { + ilog("fetch block by id returned null, id ${id} for ${p}", + ("id",blkid)("p",peer_name())); } } - + catch (const assert_exception &ex) { + elog( "caught assert on fetch_block_by_id, ${ex}, id ${id} for ${p}", + ("ex",ex.to_string())("id",blkid)("p",peer_name())); + } + catch (...) { + elog( "caught other exception fetching block id ${id} for ${p}", + ("id",blkid)("p",peer_name())); + } } void connection::stop_send() { @@ -905,14 +964,21 @@ namespace eosio { void connection::queue_write(const std::shared_ptr>& buff, bool trigger_send, - std::function callback) { - write_queue.push_back({buff, callback}); - if(out_queue.empty() && trigger_send) + std::function callback, + bool to_sync_queue) { + if( !buffer_queue.add_write_queue( buff, callback, to_sync_queue )) { + fc_wlog( logger, "write_queue full ${s} bytes, giving up on connection ${p}", + ("s", buffer_queue.write_queue_size())("p", peer_name()) ); + my_impl->close( shared_from_this() ); + return; + } + if( buffer_queue.is_out_queue_empty() && trigger_send) { do_queue_write(); + } } void connection::do_queue_write() { - if(write_queue.empty() || !out_queue.empty()) + if( !buffer_queue.ready_to_send() ) return; connection_wptr c(shared_from_this()); if(!socket->is_open()) { @@ -921,21 +987,14 @@ namespace eosio { return; } std::vector bufs; - while (write_queue.size() > 0) { - auto& m = write_queue.front(); - bufs.push_back(boost::asio::buffer(*m.buff)); - out_queue.push_back(m); - write_queue.pop_front(); - } + buffer_queue.fill_out_buffer( bufs ); boost::asio::async_write(*socket, bufs, [c](boost::system::error_code ec, std::size_t w) { try { auto conn = c.lock(); if(!conn) return; - for (auto& m: conn->out_queue) { - m.callback(ec, w); - } + conn->buffer_queue.out_callback( ec, w ); if(ec) { string pname = conn ? conn->peer_name() : "no connection name"; @@ -948,9 +1007,7 @@ namespace eosio { my_impl->close(conn); return; } - while (conn->out_queue.size() > 0) { - conn->out_queue.pop_front(); - } + conn->buffer_queue.clear_out_queue(); conn->enqueue_sync_block(); conn->do_queue_write(); } @@ -973,8 +1030,8 @@ namespace eosio { } void connection::cancel_sync(go_away_reason reason) { - fc_dlog(logger,"cancel sync reason = ${m}, write queue size ${o} peer ${p}", - ("m",reason_str(reason)) ("o", write_queue.size())("p", peer_name())); + fc_dlog(logger,"cancel sync reason = ${m}, write queue size ${o} bytes peer ${p}", + ("m",reason_str(reason)) ("o", buffer_queue.write_queue_size())("p", peer_name())); cancel_wait(); flush_queues(); switch (reason) { @@ -1002,7 +1059,7 @@ namespace eosio { controller& cc = my_impl->chain_plug->chain(); signed_block_ptr sb = cc.fetch_block_by_number(num); if(sb) { - enqueue_block( sb, trigger_send); + enqueue_block( sb, trigger_send, true); return true; } } catch ( ... ) { @@ -1031,7 +1088,7 @@ namespace eosio { enqueue_buffer( send_buffer, trigger_send, close_after_send ); } - void connection::enqueue_block( const signed_block_ptr& sb, bool trigger_send ) { + void connection::enqueue_block( const signed_block_ptr& sb, bool trigger_send, bool to_sync_queue ) { // this implementation is to avoid copy of signed_block to net_message int which = 7; // matches which of net_message for signed_block @@ -1048,10 +1105,13 @@ namespace eosio { fc::raw::pack( ds, unsigned_int( which )); fc::raw::pack( ds, *sb ); - enqueue_buffer( send_buffer, trigger_send, no_reason ); + enqueue_buffer( send_buffer, trigger_send, no_reason, to_sync_queue ); } - void connection::enqueue_buffer( const std::shared_ptr>& send_buffer, bool trigger_send, go_away_reason close_after_send ) { + void connection::enqueue_buffer( const std::shared_ptr>& send_buffer, bool trigger_send, + go_away_reason close_after_send, + bool to_sync_queue ) + { connection_wptr weak_this = shared_from_this(); queue_write(send_buffer,trigger_send, [weak_this, close_after_send](boost::system::error_code ec, std::size_t ) { @@ -1065,7 +1125,8 @@ namespace eosio { } else { fc_wlog(logger, "connection expired before enqueued net_message called callback!"); } - }); + }, + to_sync_queue); } void connection::cancel_wait() { @@ -1165,7 +1226,7 @@ namespace eosio { return true; } - bool connection::add_peer_block(const peer_block_state &entry) { + bool connection::add_peer_block(const peer_block_state& entry) { auto bptr = blk_state.get().find(entry.id); bool added = (bptr == blk_state.end()); if (added){ @@ -1476,11 +1537,15 @@ namespace eosio { void sync_manager::recv_notice(const connection_ptr& c, const notice_message& msg) { fc_ilog(logger, "sync_manager got ${m} block notice",("m",modes_str(msg.known_blocks.mode))); + if( msg.known_blocks.ids.size() > 1 ) { + elog( "Invalid notice_message, known_blocks.ids.size ${s}", ("s", msg.known_blocks.ids.size()) ); + my_impl->close(c); + return; + } if (msg.known_blocks.mode == catch_up) { if (msg.known_blocks.ids.size() == 0) { elog("got a catch up with ids size = 0"); - } - else { + } else { verify_catchup(c, msg.known_blocks.pending, msg.known_blocks.ids.back()); } } @@ -1685,7 +1750,9 @@ namespace eosio { if (msg.known_blocks.mode == normal) { req.req_blocks.mode = normal; controller& cc = my_impl->chain_plug->chain(); - for( const auto& blkid : msg.known_blocks.ids) { + // known_blocks.ids is never > 1 + if( !msg.known_blocks.ids.empty() ) { + const block_id_type& blkid = msg.known_blocks.ids.back(); signed_block_ptr b; peer_block_state entry = {blkid,0,true,true,fc::time_point()}; try { @@ -1952,6 +2019,37 @@ namespace eosio { } }; + if( conn->buffer_queue.write_queue_size() > def_max_write_queue_size || + conn->reads_in_flight > def_max_reads_in_flight || + conn->trx_in_progress_size > def_max_trx_in_progress_size ) + { + // too much queued up, reschedule + if( conn->buffer_queue.write_queue_size() > def_max_write_queue_size ) { + peer_wlog( conn, "write_queue full ${s} bytes", ("s", conn->buffer_queue.write_queue_size()) ); + } else if( conn->reads_in_flight > def_max_reads_in_flight ) { + peer_wlog( conn, "max reads in flight ${s}", ("s", conn->reads_in_flight) ); + } else { + peer_wlog( conn, "max trx in progress ${s} bytes", ("s", conn->trx_in_progress_size) ); + } + if( conn->buffer_queue.write_queue_size() > 2*def_max_write_queue_size || + conn->reads_in_flight > 2*def_max_reads_in_flight || + conn->trx_in_progress_size > 2*def_max_trx_in_progress_size ) + { + fc_wlog( logger, "queues over full, giving up on connection ${p}", ("p", conn->peer_name()) ); + my_impl->close( conn ); + return; + } + if( !conn->read_delay_timer ) return; + conn->read_delay_timer->expires_from_now( def_read_delay_for_full_write_queue ); + conn->read_delay_timer->async_wait([this, weak_conn]( boost::system::error_code ) { + auto conn = weak_conn.lock(); + if( !conn ) return; + start_read_message( conn ); + } ); + return; + } + + ++conn->reads_in_flight; boost::asio::async_read(*conn->socket, conn->pending_message_buffer.get_buffer_sequence_for_boost_async_read(), completion_handler, [this,weak_conn]( boost::system::error_code ec, std::size_t bytes_transferred ) { @@ -1960,6 +2058,7 @@ namespace eosio { return; } + --conn->reads_in_flight; conn->outstanding_read_bytes.reset(); try { @@ -2308,6 +2407,12 @@ namespace eosio { } void net_plugin_impl::handle_message(const connection_ptr& c, const request_message& msg) { + if( msg.req_blocks.ids.size() > 1 ) { + elog( "Invalid request_message, req_blocks.ids.size ${s}", ("s", msg.req_blocks.ids.size()) ); + close(c); + return; + } + switch (msg.req_blocks.mode) { case catch_up : peer_ilog(c, "received request_message:catch_up"); @@ -2315,7 +2420,9 @@ namespace eosio { break; case normal : peer_ilog(c, "received request_message:normal"); - c->blk_send(msg.req_blocks.ids); + if( !msg.req_blocks.ids.empty() ) { + c->blk_send(msg.req_blocks.ids.back()); + } break; default:; } @@ -2347,6 +2454,13 @@ namespace eosio { } } + size_t calc_trx_size( const packed_transaction_ptr& trx ) { + // transaction is stored packed and unpacked, double packed_size and size of signed as an approximation of use + return (trx->get_packed_transaction().size() * 2 + sizeof(trx->get_signed_transaction())) * 2 + + trx->get_packed_context_free_data().size() * 4 + + trx->get_signatures().size() * sizeof(signature_type); + } + void net_plugin_impl::handle_message(const connection_ptr& c, const packed_transaction_ptr& trx) { fc_dlog(logger, "got a packed transaction, cancel wait"); peer_ilog(c, "received packed_transaction"); @@ -2369,7 +2483,9 @@ namespace eosio { return; } dispatcher->recv_transaction(c, tid); + c->trx_in_progress_size += calc_trx_size( ptrx->packed_trx ); chain_plug->accept_transaction(ptrx, [c, this, ptrx](const static_variant& result) { + c->trx_in_progress_size -= calc_trx_size( ptrx->packed_trx ); if (result.contains()) { peer_dlog(c, "bad packed_transaction : ${m}", ("m",result.get()->what())); } else { diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index 71583aee0c5..947ef48f46a 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -6,6 +6,7 @@ #include #include #include +#include #include #include @@ -1143,6 +1144,10 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { int orig_count = _persistent_transactions.size(); while(!persisted_by_expiry.empty() && persisted_by_expiry.begin()->expiry <= pbs->header.timestamp.to_time_point()) { + if (preprocess_deadline <= fc::time_point::now()) { + exhausted = true; + break; + } auto const& txid = persisted_by_expiry.begin()->trx_id; if (_pending_block_mode == pending_block_mode::producing) { fc_dlog(_trx_trace_log, "[TRX_TRACE] Block ${block_num} for producer ${prod} is EXPIRING PERSISTED tx: ${txid}", @@ -1158,9 +1163,15 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { num_expired_persistent++; } - fc_dlog(_log, "Processed ${n} persisted transactions, Expired ${expired}", - ("n", orig_count) - ("expired", num_expired_persistent)); + if( exhausted ) { + fc_wlog( _log, "Unable to process all ${n} persisted transactions before deadline, Expired ${expired}", + ( "n", orig_count ) + ( "expired", num_expired_persistent ) ); + } else { + fc_dlog( _log, "Processed ${n} persisted transactions, Expired ${expired}", + ( "n", orig_count ) + ( "expired", num_expired_persistent ) ); + } } try { @@ -1171,13 +1182,15 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { if (_producers.empty() && persisted_by_id.empty()) { // if this node can never produce and has no persisted transactions, // there is no need for unapplied transactions they can be dropped - chain.drop_all_unapplied_transactions(); + chain.get_unapplied_transactions().clear(); } else { - std::vector apply_trxs; - { // derive appliable transactions from unapplied_transactions and drop droppable transactions - auto unapplied_trxs = chain.get_unapplied_transactions(); - apply_trxs.reserve(unapplied_trxs.size()); - + // derive appliable transactions from unapplied_transactions and drop droppable transactions + unapplied_transactions_type& unapplied_trxs = chain.get_unapplied_transactions(); + if( !unapplied_trxs.empty() ) { + auto unapplied_trxs_size = unapplied_trxs.size(); + int num_applied = 0; + int num_failed = 0; + int num_processed = 0; auto calculate_transaction_category = [&](const transaction_metadata_ptr& trx) { if (trx->packed_trx->expiration() < pbs->header.timestamp.to_time_point()) { return tx_category::EXPIRED; @@ -1188,64 +1201,65 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { } }; - for (auto& trx: unapplied_trxs) { + auto itr = unapplied_trxs.begin(); + while( itr != unapplied_trxs.end() ) { + auto itr_next = itr; // save off next since itr may be invalidated by loop + ++itr_next; + + if( preprocess_deadline <= fc::time_point::now() ) exhausted = true; + if( exhausted ) break; + const auto& trx = itr->second; auto category = calculate_transaction_category(trx); - if (category == tx_category::EXPIRED || (category == tx_category::UNEXPIRED_UNPERSISTED && _producers.empty())) { + if (category == tx_category::EXPIRED || + (category == tx_category::UNEXPIRED_UNPERSISTED && _producers.empty())) + { if (!_producers.empty()) { fc_dlog(_trx_trace_log, "[TRX_TRACE] Node with producers configured is dropping an EXPIRED transaction that was PREVIOUSLY ACCEPTED : ${txid}", ("txid", trx->id)); } - chain.drop_unapplied_transaction(trx); - } else if (category == tx_category::PERSISTED || (category == tx_category::UNEXPIRED_UNPERSISTED && _pending_block_mode == pending_block_mode::producing)) { - apply_trxs.emplace_back(std::move(trx)); - } - } - } - - if (!apply_trxs.empty()) { - int num_applied = 0; - int num_failed = 0; - int num_processed = 0; - - for (const auto& trx: apply_trxs) { - if (preprocess_deadline <= fc::time_point::now()) exhausted = true; - if (exhausted) { - break; - } - - num_processed++; - - try { - auto deadline = fc::time_point::now() + fc::milliseconds(_max_transaction_time_ms); - bool deadline_is_subjective = false; - if (_max_transaction_time_ms < 0 || (_pending_block_mode == pending_block_mode::producing && preprocess_deadline < deadline)) { - deadline_is_subjective = true; - deadline = preprocess_deadline; - } + itr = unapplied_trxs.erase( itr ); // unapplied_trxs map has not been modified, so simply erase and continue + continue; + } else if (category == tx_category::PERSISTED || + (category == tx_category::UNEXPIRED_UNPERSISTED && _pending_block_mode == pending_block_mode::producing)) + { + ++num_processed; + + try { + auto deadline = fc::time_point::now() + fc::milliseconds(_max_transaction_time_ms); + bool deadline_is_subjective = false; + if (_max_transaction_time_ms < 0 || (_pending_block_mode == pending_block_mode::producing && preprocess_deadline < deadline)) { + deadline_is_subjective = true; + deadline = preprocess_deadline; + } - auto trace = chain.push_transaction(trx, deadline); - if (trace->except) { - if (failure_is_subjective(*trace->except, deadline_is_subjective)) { - exhausted = true; + auto trace = chain.push_transaction(trx, deadline); + if (trace->except) { + if (failure_is_subjective(*trace->except, deadline_is_subjective)) { + exhausted = true; + break; + } else { + // this failed our configured maximum transaction time, we don't want to replay it + // chain.plus_transactions can modify unapplied_trxs, so erase by id + unapplied_trxs.erase( trx->signed_id ); + ++num_failed; + } } else { - // this failed our configured maximum transaction time, we don't want to replay it - chain.drop_unapplied_transaction(trx); - num_failed++; + ++num_applied; } - } else { - num_applied++; - } - } catch ( const guard_exception& e ) { - chain_plug->handle_guard_exception(e); - return start_block_result::failed; - } FC_LOG_AND_DROP(); + } catch ( const guard_exception& e ) { + chain_plug->handle_guard_exception(e); + return start_block_result::failed; + } FC_LOG_AND_DROP(); + } + + itr = itr_next; } fc_dlog(_log, "Processed ${m} of ${n} previously applied transactions, Applied ${applied}, Failed/Dropped ${failed}", - ("m", num_processed) - ("n", apply_trxs.size()) - ("applied", num_applied) - ("failed", num_failed)); + ("m", num_processed) + ("n", unapplied_trxs_size) + ("applied", num_applied) + ("failed", num_failed)); } } @@ -1258,6 +1272,7 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { int orig_count = _blacklisted_transactions.size(); while (!blacklist_by_expiry.empty() && blacklist_by_expiry.begin()->expiry <= now) { + if (preprocess_deadline <= fc::time_point::now()) break; blacklist_by_expiry.erase(blacklist_by_expiry.begin()); num_expired++; } @@ -1267,85 +1282,105 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { ("expired", num_expired)); } - auto scheduled_trxs = chain.get_scheduled_transactions(); - if (!scheduled_trxs.empty()) { - int num_applied = 0; - int num_failed = 0; - int num_processed = 0; + // scheduled transactions + int num_applied = 0; + int num_failed = 0; + int num_processed = 0; + + auto scheduled_trx_deadline = preprocess_deadline; + if (_max_scheduled_transaction_time_per_block_ms >= 0) { + scheduled_trx_deadline = std::min( + scheduled_trx_deadline, + fc::time_point::now() + fc::milliseconds(_max_scheduled_transaction_time_per_block_ms) + ); + } + time_point pending_block_time = chain.pending_block_time(); + const auto& sch_idx = chain.db().get_index(); + const auto scheduled_trxs_size = sch_idx.size(); + auto sch_itr = sch_idx.begin(); + while( sch_itr != sch_idx.end() ) { + if( sch_itr->delay_until > pending_block_time) break; // not scheduled yet + if( sch_itr->published >= pending_block_time ) { + ++sch_itr; + continue; // do not allow schedule and execute in same block + } + if( scheduled_trx_deadline <= fc::time_point::now() ) { + exhausted = true; + break; + } - auto scheduled_trx_deadline = preprocess_deadline; - if (_max_scheduled_transaction_time_per_block_ms >= 0) { - scheduled_trx_deadline = std::min( - scheduled_trx_deadline, - fc::time_point::now() + fc::milliseconds(_max_scheduled_transaction_time_per_block_ms) - ); + const transaction_id_type trx_id = sch_itr->trx_id; // make copy since reference could be invalidated + if (blacklist_by_id.find(trx_id) != blacklist_by_id.end()) { + ++sch_itr; + continue; } - for (const auto& trx : scheduled_trxs) { - if (scheduled_trx_deadline <= fc::time_point::now()) exhausted = true; - if (exhausted) { - break; - } + auto sch_itr_next = sch_itr; // save off next since sch_itr may be invalidated by loop + ++sch_itr_next; + const auto next_delay_until = sch_itr_next != sch_idx.end() ? sch_itr_next->delay_until : sch_itr->delay_until; + const auto next_id = sch_itr_next != sch_idx.end() ? sch_itr_next->id : sch_itr->id; - num_processed++; + num_processed++; - // configurable ratio of incoming txns vs deferred txns - while (_incoming_trx_weight >= 1.0 && orig_pending_txn_size && _pending_incoming_transactions.size()) { - if (scheduled_trx_deadline <= fc::time_point::now()) break; + // configurable ratio of incoming txns vs deferred txns + while (_incoming_trx_weight >= 1.0 && orig_pending_txn_size && _pending_incoming_transactions.size()) { + if (scheduled_trx_deadline <= fc::time_point::now()) break; - auto e = _pending_incoming_transactions.front(); - _pending_incoming_transactions.pop_front(); - --orig_pending_txn_size; - _incoming_trx_weight -= 1.0; - process_incoming_transaction_async(std::get<0>(e), std::get<1>(e), std::get<2>(e)); - } + auto e = _pending_incoming_transactions.front(); + _pending_incoming_transactions.pop_front(); + --orig_pending_txn_size; + _incoming_trx_weight -= 1.0; + process_incoming_transaction_async(std::get<0>(e), std::get<1>(e), std::get<2>(e)); + } - if (scheduled_trx_deadline <= fc::time_point::now()) { - exhausted = true; - break; - } + if (scheduled_trx_deadline <= fc::time_point::now()) { + exhausted = true; + break; + } - if (blacklist_by_id.find(trx) != blacklist_by_id.end()) { - continue; + try { + auto deadline = fc::time_point::now() + fc::milliseconds(_max_transaction_time_ms); + bool deadline_is_subjective = false; + if (_max_transaction_time_ms < 0 || (_pending_block_mode == pending_block_mode::producing && scheduled_trx_deadline < deadline)) { + deadline_is_subjective = true; + deadline = scheduled_trx_deadline; } - try { - auto deadline = fc::time_point::now() + fc::milliseconds(_max_transaction_time_ms); - bool deadline_is_subjective = false; - if (_max_transaction_time_ms < 0 || (_pending_block_mode == pending_block_mode::producing && scheduled_trx_deadline < deadline)) { - deadline_is_subjective = true; - deadline = scheduled_trx_deadline; - } - - auto trace = chain.push_scheduled_transaction(trx, deadline); - if (trace->except) { - if (failure_is_subjective(*trace->except, deadline_is_subjective)) { - exhausted = true; - } else { - auto expiration = fc::time_point::now() + fc::seconds(chain.get_global_properties().configuration.deferred_trx_expiration_window); - // this failed our configured maximum transaction time, we don't want to replay it add it to a blacklist - _blacklisted_transactions.insert(transaction_id_with_expiry{trx, expiration}); - num_failed++; - } + auto trace = chain.push_scheduled_transaction(trx_id, deadline); + if (trace->except) { + if (failure_is_subjective(*trace->except, deadline_is_subjective)) { + exhausted = true; + break; } else { - num_applied++; + auto expiration = fc::time_point::now() + fc::seconds(chain.get_global_properties().configuration.deferred_trx_expiration_window); + // this failed our configured maximum transaction time, we don't want to replay it add it to a blacklist + _blacklisted_transactions.insert(transaction_id_with_expiry{trx_id, expiration}); + num_failed++; } - } catch ( const guard_exception& e ) { - chain_plug->handle_guard_exception(e); - return start_block_result::failed; - } FC_LOG_AND_DROP(); + } else { + num_applied++; + } + } catch ( const guard_exception& e ) { + chain_plug->handle_guard_exception(e); + return start_block_result::failed; + } FC_LOG_AND_DROP(); - _incoming_trx_weight += _incoming_defer_ratio; - if (!orig_pending_txn_size) _incoming_trx_weight = 0.0; - } + _incoming_trx_weight += _incoming_defer_ratio; + if (!orig_pending_txn_size) _incoming_trx_weight = 0.0; - fc_dlog(_log, "Processed ${m} of ${n} scheduled transactions, Applied ${applied}, Failed/Dropped ${failed}", - ("m", num_processed) - ("n", scheduled_trxs.size()) - ("applied", num_applied) - ("failed", num_failed)); + if( sch_itr_next == sch_idx.end() ) break; + sch_itr = sch_idx.lower_bound( boost::make_tuple( next_delay_until, next_id ) ); + } + if( scheduled_trxs_size > 0 ) { + fc_dlog( _log, + "Processed ${m} of ${n} scheduled transactions, Applied ${applied}, Failed/Dropped ${failed}", + ( "m", num_processed ) + ( "n", scheduled_trxs_size ) + ( "applied", num_applied ) + ( "failed", num_failed ) ); } + } if (exhausted || preprocess_deadline <= fc::time_point::now()) { @@ -1357,11 +1392,11 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { if (!_pending_incoming_transactions.empty()) { fc_dlog(_log, "Processing ${n} pending transactions"); while (orig_pending_txn_size && _pending_incoming_transactions.size()) { + if (preprocess_deadline <= fc::time_point::now()) return start_block_result::exhausted; auto e = _pending_incoming_transactions.front(); _pending_incoming_transactions.pop_front(); --orig_pending_txn_size; process_incoming_transaction_async(std::get<0>(e), std::get<1>(e), std::get<2>(e)); - if (preprocess_deadline <= fc::time_point::now()) return start_block_result::exhausted; } } return start_block_result::succeeded; diff --git a/unittests/api_tests.cpp b/unittests/api_tests.cpp index 89c94d6f393..955130130fa 100644 --- a/unittests/api_tests.cpp +++ b/unittests/api_tests.cpp @@ -1089,7 +1089,7 @@ BOOST_FIXTURE_TEST_CASE(deferred_transaction_tests, TESTER) { try { produce_blocks( 3 ); //check that only one deferred transaction executed - auto dtrxs = control->get_scheduled_transactions(); + auto dtrxs = get_scheduled_transactions(); BOOST_CHECK_EQUAL(dtrxs.size(), 1); for (const auto& trx: dtrxs) { control->push_scheduled_transaction(trx, fc::time_point::maximum()); @@ -1114,7 +1114,7 @@ BOOST_FIXTURE_TEST_CASE(deferred_transaction_tests, TESTER) { try { produce_blocks( 3 ); //check that only one deferred transaction executed - auto dtrxs = control->get_scheduled_transactions(); + auto dtrxs = get_scheduled_transactions(); BOOST_CHECK_EQUAL(dtrxs.size(), 1); for (const auto& trx: dtrxs) { control->push_scheduled_transaction(trx, fc::time_point::maximum()); diff --git a/unittests/delay_tests.cpp b/unittests/delay_tests.cpp index 9f14de4107c..ae1a3d114b5 100644 --- a/unittests/delay_tests.cpp +++ b/unittests/delay_tests.cpp @@ -72,7 +72,7 @@ BOOST_FIXTURE_TEST_CASE( delay_error_create_account, validating_tester) { try { produce_blocks(6); - auto scheduled_trxs = control->get_scheduled_transactions(); + auto scheduled_trxs = get_scheduled_transactions(); BOOST_REQUIRE_EQUAL(scheduled_trxs.size(), 1); auto dtrace = control->push_scheduled_transaction(scheduled_trxs.front(), fc::time_point::maximum()); BOOST_REQUIRE_EQUAL(dtrace->except.valid(), true); From 3dd25b47ae8e7414d27b16e32105930ac29b7098 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 6 Feb 2019 18:06:39 -0500 Subject: [PATCH 0002/2426] Bump version to 1.6.1 --- CMakeLists.txt | 2 +- Docker/README.md | 44 +++----------------------------------------- README.md | 16 ++++++++-------- 3 files changed, 12 insertions(+), 50 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index fc57979b0e9..c2bd0d7e57f 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -35,7 +35,7 @@ set( CXX_STANDARD_REQUIRED ON) set(VERSION_MAJOR 1) set(VERSION_MINOR 6) -set(VERSION_PATCH 0) +set(VERSION_PATCH 1) if(VERSION_SUFFIX) set(VERSION_FULL "${VERSION_MAJOR}.${VERSION_MINOR}.${VERSION_PATCH}-${VERSION_SUFFIX}") diff --git a/Docker/README.md b/Docker/README.md index 834f44ac39e..201bee96431 100644 --- a/Docker/README.md +++ b/Docker/README.md @@ -20,10 +20,10 @@ cd eos/Docker docker build . -t eosio/eos ``` -The above will build off the most recent commit to the master branch by default. If you would like to target a specific branch/tag, you may use a build argument. For example, if you wished to generate a docker image based off of the 1.6.0 tag, you could do the following: +The above will build off the most recent commit to the master branch by default. If you would like to target a specific branch/tag, you may use a build argument. For example, if you wished to generate a docker image based off of the 1.6.1 tag, you could do the following: ```bash -docker build -t eosio/eos:1.6.0 --build-arg branch=1.6.0 . +docker build -t eosio/eos:v1.6.1 --build-arg branch=1.6.1 . ``` By default, the symbol in eosio.system is set to SYS. You can override this using the symbol argument while building the docker image. @@ -133,45 +133,7 @@ docker volume rm keosd-data-volume ### Docker Hub -Docker Hub image available from [docker hub](https://hub.docker.com/r/eosio/eos/). -Create a new `docker-compose.yaml` file with the content below - -```bash -version: "3" - -services: - nodeosd: - image: eosio/eos:latest - command: /opt/eosio/bin/nodeosd.sh --data-dir /opt/eosio/bin/data-dir -e --http-alias=nodeosd:8888 --http-alias=127.0.0.1:8888 --http-alias=localhost:8888 - hostname: nodeosd - ports: - - 8888:8888 - - 9876:9876 - expose: - - "8888" - volumes: - - nodeos-data-volume:/opt/eosio/bin/data-dir - - keosd: - image: eosio/eos:latest - command: /opt/eosio/bin/keosd --wallet-dir /opt/eosio/bin/data-dir --http-server-address=127.0.0.1:8900 --http-alias=localhost:8900 --http-alias=keosd:8900 - hostname: keosd - links: - - nodeosd - volumes: - - keosd-data-volume:/opt/eosio/bin/data-dir - -volumes: - nodeos-data-volume: - keosd-data-volume: - -``` - -*NOTE:* the default version is the latest, you can change it to what you want - -run `docker pull eosio/eos:latest` - -run `docker-compose up` +Docker Hub images are now deprecated. New build images were discontinued on January 1st, 2019. The existing old images will be removed on June 1st, 2019. ### EOSIO Testnet diff --git a/README.md b/README.md index 17dfe24052b..8c7a7e1f2dc 100644 --- a/README.md +++ b/README.md @@ -39,13 +39,13 @@ $ brew remove eosio ``` #### Ubuntu 18.04 Debian Package Install ```sh -$ wget https://github.com/eosio/eos/releases/download/v1.6.0/eosio_1.6.0-1-ubuntu-18.04_amd64.deb -$ sudo apt install ./eosio_1.6.0-1-ubuntu-18.04_amd64.deb +$ wget https://github.com/eosio/eos/releases/download/v1.6.1/eosio_1.6.1-1-ubuntu-18.04_amd64.deb +$ sudo apt install ./eosio_1.6.1-1-ubuntu-18.04_amd64.deb ``` #### Ubuntu 16.04 Debian Package Install ```sh -$ wget https://github.com/eosio/eos/releases/download/v1.6.0/eosio_1.6.0-1-ubuntu-16.04_amd64.deb -$ sudo apt install ./eosio_1.6.0-1-ubuntu-16.04_amd64.deb +$ wget https://github.com/eosio/eos/releases/download/v1.6.1/eosio_1.6.1-1-ubuntu-16.04_amd64.deb +$ sudo apt install ./eosio_1.6.1-1-ubuntu-16.04_amd64.deb ``` #### Debian Package Uninstall ```sh @@ -53,8 +53,8 @@ $ sudo apt remove eosio ``` #### Centos RPM Package Install ```sh -$ wget https://github.com/eosio/eos/releases/download/v1.6.0/eosio-1.6.0-1.el7.x86_64.rpm -$ sudo yum install ./eosio-1.6.0-1.el7.x86_64.rpm +$ wget https://github.com/eosio/eos/releases/download/v1.6.1/eosio-1.6.1-1.el7.x86_64.rpm +$ sudo yum install ./eosio-1.6.1-1.el7.x86_64.rpm ``` #### Centos RPM Package Uninstall ```sh @@ -62,8 +62,8 @@ $ sudo yum remove eosio.cdt ``` #### Fedora RPM Package Install ```sh -$ wget https://github.com/eosio/eos/releases/download/v1.6.0/eosio-1.6.0-1.fc27.x86_64.rpm -$ sudo yum install ./eosio-1.6.0-1.fc27.x86_64.rpm +$ wget https://github.com/eosio/eos/releases/download/v1.6.1/eosio-1.6.1-1.fc27.x86_64.rpm +$ sudo yum install ./eosio-1.6.1-1.fc27.x86_64.rpm ``` #### Fedora RPM Package Uninstall ```sh From f7ecd76d9af94c8ef866e24cc02f057140e07a67 Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Mon, 11 Feb 2019 10:14:56 -0500 Subject: [PATCH 0003/2426] Fix boost 1.67 brew install The latest homebrew code balks at something in the old 1.67 package file. Fix the package file and store it locally in our repo for now. We try and pin the boost version because boost upgrades invalidate nodeos data files. --- scripts/boost.rb | 131 ++++++++++++++++++++++++++++++++++ scripts/eosio_build_darwin.sh | 2 +- 2 files changed, 132 insertions(+), 1 deletion(-) create mode 100644 scripts/boost.rb diff --git a/scripts/boost.rb b/scripts/boost.rb new file mode 100644 index 00000000000..a322acbdd11 --- /dev/null +++ b/scripts/boost.rb @@ -0,0 +1,131 @@ +class Boost < Formula + desc "Collection of portable C++ source libraries" + homepage "https://www.boost.org/" + revision 1 + head "https://github.com/boostorg/boost.git" + + stable do + url "https://dl.bintray.com/boostorg/release/1.67.0/source/boost_1_67_0.tar.bz2" + sha256 "2684c972994ee57fc5632e03bf044746f6eb45d4920c343937a465fd67a5adba" + + # Remove for > 1.67.0 + # Fix "error: no member named 'next' in namespace 'boost'" + # Upstream commit from 1 Dec 2017 "Add #include ; no + # longer in utility.hpp" + patch :p2 do + url "https://github.com/boostorg/lockfree/commit/12726cd.patch?full_index=1" + sha256 "f165823d961a588b622b20520668b08819eb5fdc08be7894c06edce78026ce0a" + end + end + + bottle do + cellar :any + sha256 "265ab8beaa6fa26a7c305ef2e6aec8bd26ca1db105aca0aaca028f32c5245a90" => :high_sierra + sha256 "567f3e9a294413c1701b698d666a521cfdeec846e256c6e66576d5b70eb26f08" => :sierra + sha256 "3f3f687a620f656fe2ac54f01306e00e6bbc0e9797db284a8d272648d427e640" => :el_capitan + end + + option "with-icu4c", "Build regexp engine with icu support" + option "without-single", "Disable building single-threading variant" + option "without-static", "Disable building static library variant" + + deprecated_option "with-icu" => "with-icu4c" + + depends_on "icu4c" => :optional + + def install + # Force boost to compile with the desired compiler + open("user-config.jam", "a") do |file| + file.write "using darwin : : #{ENV.cxx} ;\n" + end + + # libdir should be set by --prefix but isn't + bootstrap_args = ["--prefix=#{prefix}", "--libdir=#{lib}"] + + if build.with? "icu4c" + icu4c_prefix = Formula["icu4c"].opt_prefix + bootstrap_args << "--with-icu=#{icu4c_prefix}" + else + bootstrap_args << "--without-icu" + end + + # Handle libraries that will not be built. + without_libraries = ["python", "mpi"] + + # Boost.Log cannot be built using Apple GCC at the moment. Disabled + # on such systems. + without_libraries << "log" if ENV.compiler == :gcc + + bootstrap_args << "--without-libraries=#{without_libraries.join(",")}" + + # layout should be synchronized with boost-python and boost-mpi + args = ["--prefix=#{prefix}", + "--libdir=#{lib}", + "-d2", + "-j#{ENV.make_jobs}", + "--layout=tagged", + "--user-config=user-config.jam", + "-sNO_LZMA=1", + "install"] + + if build.with? "single" + args << "threading=multi,single" + else + args << "threading=multi" + end + + if build.with? "static" + args << "link=shared,static" + else + args << "link=shared" + end + + # Trunk starts using "clang++ -x c" to select C compiler which breaks C++11 + # handling using ENV.cxx11. Using "cxxflags" and "linkflags" still works. + args << "cxxflags=-std=c++11" + if ENV.compiler == :clang + args << "cxxflags=-stdlib=libc++" << "linkflags=-stdlib=libc++" + end + + system "./bootstrap.sh", *bootstrap_args + system "./b2", "headers" + system "./b2", *args + end + + def caveats + s = "" + # ENV.compiler doesn't exist in caveats. Check library availability + # instead. + if Dir["#{lib}/libboost_log*"].empty? + s += <<~EOS + Building of Boost.Log is disabled because it requires newer GCC or Clang. + EOS + end + + s + end + + test do + (testpath/"test.cpp").write <<~EOS + #include + #include + #include + #include + using namespace boost::algorithm; + using namespace std; + + int main() + { + string str("a,b"); + vector strVec; + split(strVec, str, is_any_of(",")); + assert(strVec.size()==2); + assert(strVec[0]=="a"); + assert(strVec[1]=="b"); + return 0; + } + EOS + system ENV.cxx, "test.cpp", "-std=c++1y", "-L#{lib}", "-lboost_system", "-o", "test" + system "./test" + end +end diff --git a/scripts/eosio_build_darwin.sh b/scripts/eosio_build_darwin.sh index a91e1611d44..af36abe38d0 100644 --- a/scripts/eosio_build_darwin.sh +++ b/scripts/eosio_build_darwin.sh @@ -218,7 +218,7 @@ done fi printf "\\tInstalling boost libraries.\\n" - if ! "${BREW}" install https://raw.githubusercontent.com/Homebrew/homebrew-core/f946d12e295c8a27519b73cc810d06593270a07f/Formula/boost.rb + if ! "${BREW}" install "${SOURCE_DIR}/scripts/boost.rb" then printf "\\tUnable to install boost 1.67 libraries at this time. 0\\n" printf "\\tExiting now.\\n\\n" From 74a143cc1d0cd7a6cc795030ea3a51b979a6bd43 Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Mon, 11 Feb 2019 15:24:34 -0500 Subject: [PATCH 0004/2426] Set proper directory for baked in macOS LLVM_DIR MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Some users (including myself) were seeing llvm@4 unpacked to 4.0.1_1 instead of 4.0.1. Stuff unpacked to the Cellar directory appears to be a kind of implementation detail — /usr/local/opt is the proper place to reference here. --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index c2bd0d7e57f..59b3952809a 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -17,7 +17,7 @@ list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/CMakeModules") if (UNIX) if (APPLE) if (LLVM_DIR STREQUAL "" OR NOT LLVM_DIR) - set(LLVM_DIR "/usr/local/Cellar/llvm@4/4.0.1/lib/cmake/llvm") + set(LLVM_DIR "/usr/local/opt/llvm@4/lib/cmake/llvm/") endif() endif() endif() From de26672781cb9c506c63376da5813f1f7d40d2d3 Mon Sep 17 00:00:00 2001 From: Emory Barlow Date: Tue, 12 Feb 2019 23:25:59 -0500 Subject: [PATCH 0005/2426] Add bk step to gather brew files for automatic update --- .buildkite/pipeline.yml | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index 3c7f63def07..893a19f9ff9 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -398,6 +398,7 @@ steps: - "os=high-sierra" artifact_paths: - "build/packages/*.tar.gz" + - "build/packages/*.rb" timeout: 60 - command: | @@ -412,6 +413,7 @@ steps: - "os=mojave" artifact_paths: - "build/packages/*.tar.gz" + - "build/packages/*.rb" timeout: 60 - command: | @@ -507,3 +509,18 @@ steps: OS: "el7" PKGTYPE: "rpm" timeout: 60 + + - wait + + - command: | + echo "--- :arrow_down: Downloading brew files" && \ + buildkite-agent artifact download "build/packages/eosio.rb" . --step ":darwin: High Sierra Package Builder" && \ + mv build/packages/eosio.rb build/packages/eosio_highsierra.rb && \ + buildkite-agent artifact download "build/packages/eosio.rb" . --step ":darwin: Mojave Package Builder" + label: ":darwin: Brew Updater" + agents: + queue: "automation-large-builder-fleet" + artifact_paths: + - "build/packages/eosio_highsierra.rb" + - "build/packages/eosio.rb" + timeout: 60 From f2d45399dbdbf713fc2c9e655a1b40c908cf8266 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 14 Feb 2019 13:29:37 -0600 Subject: [PATCH 0006/2426] Update version to 1.6.2 --- CMakeLists.txt | 2 +- Docker/README.md | 4 ++-- README.md | 16 ++++++++-------- 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 59b3952809a..e35a98973f4 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -35,7 +35,7 @@ set( CXX_STANDARD_REQUIRED ON) set(VERSION_MAJOR 1) set(VERSION_MINOR 6) -set(VERSION_PATCH 1) +set(VERSION_PATCH 2) if(VERSION_SUFFIX) set(VERSION_FULL "${VERSION_MAJOR}.${VERSION_MINOR}.${VERSION_PATCH}-${VERSION_SUFFIX}") diff --git a/Docker/README.md b/Docker/README.md index 201bee96431..45459b11b4b 100644 --- a/Docker/README.md +++ b/Docker/README.md @@ -20,10 +20,10 @@ cd eos/Docker docker build . -t eosio/eos ``` -The above will build off the most recent commit to the master branch by default. If you would like to target a specific branch/tag, you may use a build argument. For example, if you wished to generate a docker image based off of the 1.6.1 tag, you could do the following: +The above will build off the most recent commit to the master branch by default. If you would like to target a specific branch/tag, you may use a build argument. For example, if you wished to generate a docker image based off of the 1.6.2 tag, you could do the following: ```bash -docker build -t eosio/eos:v1.6.1 --build-arg branch=1.6.1 . +docker build -t eosio/eos:v1.6.2 --build-arg branch=1.6.2 . ``` By default, the symbol in eosio.system is set to SYS. You can override this using the symbol argument while building the docker image. diff --git a/README.md b/README.md index 8c7a7e1f2dc..3009bfb4c01 100644 --- a/README.md +++ b/README.md @@ -39,13 +39,13 @@ $ brew remove eosio ``` #### Ubuntu 18.04 Debian Package Install ```sh -$ wget https://github.com/eosio/eos/releases/download/v1.6.1/eosio_1.6.1-1-ubuntu-18.04_amd64.deb -$ sudo apt install ./eosio_1.6.1-1-ubuntu-18.04_amd64.deb +$ wget https://github.com/eosio/eos/releases/download/v1.6.2/eosio_1.6.2-1-ubuntu-18.04_amd64.deb +$ sudo apt install ./eosio_1.6.2-1-ubuntu-18.04_amd64.deb ``` #### Ubuntu 16.04 Debian Package Install ```sh -$ wget https://github.com/eosio/eos/releases/download/v1.6.1/eosio_1.6.1-1-ubuntu-16.04_amd64.deb -$ sudo apt install ./eosio_1.6.1-1-ubuntu-16.04_amd64.deb +$ wget https://github.com/eosio/eos/releases/download/v1.6.2/eosio_1.6.2-1-ubuntu-16.04_amd64.deb +$ sudo apt install ./eosio_1.6.2-1-ubuntu-16.04_amd64.deb ``` #### Debian Package Uninstall ```sh @@ -53,8 +53,8 @@ $ sudo apt remove eosio ``` #### Centos RPM Package Install ```sh -$ wget https://github.com/eosio/eos/releases/download/v1.6.1/eosio-1.6.1-1.el7.x86_64.rpm -$ sudo yum install ./eosio-1.6.1-1.el7.x86_64.rpm +$ wget https://github.com/eosio/eos/releases/download/v1.6.2/eosio-1.6.2-1.el7.x86_64.rpm +$ sudo yum install ./eosio-1.6.2-1.el7.x86_64.rpm ``` #### Centos RPM Package Uninstall ```sh @@ -62,8 +62,8 @@ $ sudo yum remove eosio.cdt ``` #### Fedora RPM Package Install ```sh -$ wget https://github.com/eosio/eos/releases/download/v1.6.1/eosio-1.6.1-1.fc27.x86_64.rpm -$ sudo yum install ./eosio-1.6.1-1.fc27.x86_64.rpm +$ wget https://github.com/eosio/eos/releases/download/v1.6.2/eosio-1.6.2-1.fc27.x86_64.rpm +$ sudo yum install ./eosio-1.6.2-1.fc27.x86_64.rpm ``` #### Fedora RPM Package Uninstall ```sh From a8c01c810e8730fefadf65e9c4bd5d5d2499ad66 Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Sat, 16 Feb 2019 22:52:25 -0500 Subject: [PATCH 0007/2426] add noninteractive option for build scripts --- eosio_build.sh | 11 +++++++++-- scripts/eosio_build_amazon.sh | 1 + scripts/eosio_build_centos.sh | 4 ++++ scripts/eosio_build_darwin.sh | 3 +++ scripts/eosio_build_fedora.sh | 1 + scripts/eosio_build_ubuntu.sh | 1 + 6 files changed, 19 insertions(+), 2 deletions(-) diff --git a/eosio_build.sh b/eosio_build.sh index d1e77b6e85f..173005a15d4 100755 --- a/eosio_build.sh +++ b/eosio_build.sh @@ -34,10 +34,14 @@ function usage() { - printf "\\tUsage: %s \\n\\t[Build Option -o ] \\n\\t[CodeCoverage -c] \\n\\t[Doxygen -d] \\n\\t[CoreSymbolName -s <1-7 characters>] \\n\\t[Avoid Compiling -a]\\n\\n" "$0" 1>&2 + printf "\\tUsage: %s \\n\\t[Build Option -o ] \\n\\t[CodeCoverage -c] \\n\\t[Doxygen -d] \\n\\t[CoreSymbolName -s <1-7 characters>] \\n\\t[Avoid Compiling -a]\\n\\t[Noninteractive -y]\\n\\n" "$0" 1>&2 exit 1 } + is_noninteractive() { + [[ -n "${EOSIO_BUILD_NONINTERACTIVE+1}" ]] + } + ARCH=$( uname ) if [ "${SOURCE_DIR}" == "${PWD}" ]; then BUILD_DIR="${PWD}/build" @@ -66,7 +70,7 @@ txtrst=$(tput sgr0) if [ $# -ne 0 ]; then - while getopts ":cdo:s:ah" opt; do + while getopts ":cdo:s:ahy" opt; do case "${opt}" in o ) options=( "Debug" "Release" "RelWithDebInfo" "MinSizeRel" ) @@ -100,6 +104,9 @@ usage exit 1 ;; + y) + EOSIO_BUILD_NONINTERACTIVE=1 + ;; \? ) printf "\\n\\tInvalid Option: %s\\n" "-${OPTARG}" 1>&2 usage diff --git a/scripts/eosio_build_amazon.sh b/scripts/eosio_build_amazon.sh index 1c96024b847..b4ee2e5321f 100644 --- a/scripts/eosio_build_amazon.sh +++ b/scripts/eosio_build_amazon.sh @@ -87,6 +87,7 @@ printf "\\n\\tThe following dependencies are required to install EOSIO.\\n" printf "\\n\\t${DISPLAY}\\n\\n" printf "\\tDo you wish to install these dependencies?\\n" + if is_noninteractive; then exec <<< "1"; fi select yn in "Yes" "No"; do case $yn in [Yy]* ) diff --git a/scripts/eosio_build_centos.sh b/scripts/eosio_build_centos.sh index fa5e3c61378..6c3f11ee1ca 100644 --- a/scripts/eosio_build_centos.sh +++ b/scripts/eosio_build_centos.sh @@ -63,6 +63,7 @@ SCL=$( rpm -qa | grep -E 'centos-release-scl-[0-9].*' ) if [ -z "${SCL}" ]; then printf "\\t - Do you wish to install and enable this repository?\\n" + if is_noninteractive; then exec <<< "1"; fi select yn in "Yes" "No"; do case $yn in [Yy]* ) @@ -87,6 +88,7 @@ DEVTOOLSET=$( rpm -qa | grep -E 'devtoolset-7-[0-9].*' ) if [ -z "${DEVTOOLSET}" ]; then printf "\\tDo you wish to install devtoolset-7?\\n" + if is_noninteractive; then exec <<< "1"; fi select yn in "Yes" "No"; do case $yn in [Yy]* ) @@ -118,6 +120,7 @@ PYTHON33=$( rpm -qa | grep -E 'python33-[0-9].*' ) if [ -z "${PYTHON33}" ]; then printf "\\tDo you wish to install python33?\\n" + if is_noninteractive; then exec <<< "1"; fi select yn in "Yes" "No"; do case $yn in [Yy]* ) @@ -170,6 +173,7 @@ printf "\\tThe following dependencies are required to install EOSIO.\\n" printf "\\t${DISPLAY}\\n\\n" printf "\\tDo you wish to install these dependencies?\\n" + if is_noninteractive; then exec <<< "1"; fi select yn in "Yes" "No"; do case $yn in [Yy]* ) diff --git a/scripts/eosio_build_darwin.sh b/scripts/eosio_build_darwin.sh index af36abe38d0..661238f18bc 100644 --- a/scripts/eosio_build_darwin.sh +++ b/scripts/eosio_build_darwin.sh @@ -70,6 +70,7 @@ then printf "\\tHomebrew must be installed to compile EOS.IO\\n\\n" printf "\\tDo you wish to install Home Brew?\\n" + if is_noninteractive; then exec <<< "1"; fi select yn in "Yes" "No"; do case "${yn}" in [Yy]* ) @@ -138,6 +139,7 @@ printf "\\n\\tThe following dependencies are required to install EOSIO.\\n" printf "\\n\\t${DISPLAY}\\n\\n" echo "Do you wish to install these packages?" + if is_noninteractive; then exec <<< "1"; fi select yn in "Yes" "No"; do case $yn in [Yy]* ) @@ -184,6 +186,7 @@ printf "\\tFound Boost Version %s.\\n" "${BVERSION}" printf "\\tEOS.IO requires Boost version 1.67.\\n" printf "\\tWould you like to uninstall version %s and install Boost version 1.67.\\n" "${BVERSION}" + if is_noninteractive; then exec <<< "1"; fi select yn in "Yes" "No"; do case $yn in [Yy]* ) diff --git a/scripts/eosio_build_fedora.sh b/scripts/eosio_build_fedora.sh index 661efea9fc0..4492e545c17 100644 --- a/scripts/eosio_build_fedora.sh +++ b/scripts/eosio_build_fedora.sh @@ -86,6 +86,7 @@ printf "\\n\\tThe following dependencies are required to install EOSIO.\\n" printf "\\n\\t${DISPLAY}\\n\\n" printf "\\tDo you wish to install these dependencies?\\n" + if is_noninteractive; then exec <<< "1"; fi select yn in "Yes" "No"; do case $yn in [Yy]* ) diff --git a/scripts/eosio_build_ubuntu.sh b/scripts/eosio_build_ubuntu.sh index 4c9873a60a1..f87397e8625 100644 --- a/scripts/eosio_build_ubuntu.sh +++ b/scripts/eosio_build_ubuntu.sh @@ -91,6 +91,7 @@ printf "\\n\\tThe following dependencies are required to install EOSIO.\\n" printf "\\n\\t${DISPLAY}\\n\\n" printf "\\tDo you wish to install these packages?\\n" + if is_noninteractive; then exec <<< "1"; fi select yn in "Yes" "No"; do case $yn in [Yy]* ) From 2aa4bed95492d2333b95113943a3b5301d5a8298 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 18 Feb 2019 12:00:15 -0600 Subject: [PATCH 0008/2426] Limit assert message to 1024 chars --- libraries/chain/wasm_interface.cpp | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/libraries/chain/wasm_interface.cpp b/libraries/chain/wasm_interface.cpp index 28de213c1c2..c8f81cacca3 100644 --- a/libraries/chain/wasm_interface.cpp +++ b/libraries/chain/wasm_interface.cpp @@ -22,6 +22,7 @@ #include #include #include +#include namespace eosio { namespace chain { using namespace webassembly; @@ -901,6 +902,8 @@ class system_api : public context_aware_api { }; +constexpr size_t max_assert_message = 1024; + class context_free_system_api : public context_aware_api { public: explicit context_free_system_api( apply_context& ctx ) @@ -913,14 +916,16 @@ class context_free_system_api : public context_aware_api { // Kept as intrinsic rather than implementing on WASM side (using eosio_assert_message and strlen) because strlen is faster on native side. void eosio_assert( bool condition, null_terminated_ptr msg ) { if( BOOST_UNLIKELY( !condition ) ) { - std::string message( msg ); + const size_t sz = strnlen( msg, max_assert_message ); + std::string message( msg, sz ); EOS_THROW( eosio_assert_message_exception, "assertion failure with message: ${s}", ("s",message) ); } } void eosio_assert_message( bool condition, array_ptr msg, size_t msg_len ) { if( BOOST_UNLIKELY( !condition ) ) { - std::string message( msg, msg_len ); + const size_t sz = msg_len > max_assert_message ? max_assert_message : msg_len; + std::string message( msg, sz ); EOS_THROW( eosio_assert_message_exception, "assertion failure with message: ${s}", ("s",message) ); } } From 0cbccbb03a04b509790f6a0aec6f4eab52f39794 Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Tue, 19 Feb 2019 09:52:08 -0500 Subject: [PATCH 0009/2426] Don't unlink what we install via brew unlinking eveything we install makes no sense -- it means things like cmake aren't in the path any longer like the script expects. So don't do that any more. Unfortuately this old script requires that gettext be force linked. So implement that behavior explictly for now --- scripts/eosio_build_darwin.sh | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/scripts/eosio_build_darwin.sh b/scripts/eosio_build_darwin.sh index 661238f18bc..adcae78a25c 100644 --- a/scripts/eosio_build_darwin.sh +++ b/scripts/eosio_build_darwin.sh @@ -161,13 +161,12 @@ printf "\\tExiting now.\\n\\n" exit 1; fi - if [[ "$DEP" == "llvm@4" ]]; then - "${BREW}" unlink ${DEP} - elif ! "${BREW}" unlink ${DEP} && "${BREW}" link --force ${DEP} - then - printf "\\tHomebrew exited with the above errors.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; + if [ $PERMISSION_GETTEXT -eq 1 ]; then + if ! "${BREW}" link --force gettext; then + printf "\\tHomebrew exited with the above errors.\\n" + printf "\\tExiting now.\\n\\n" + exit 1; + fi fi break;; [Nn]* ) echo "User aborting installation of required dependencies, Exiting now."; exit;; From 56726b0a4b45515705e35025120bdf9afcbb493c Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Tue, 19 Feb 2019 15:45:03 -0500 Subject: [PATCH 0010/2426] Make sure python-devel is installed for amazon linux builds --- scripts/eosio_build_amazon.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/eosio_build_amazon.sh b/scripts/eosio_build_amazon.sh index b4ee2e5321f..73c163330dd 100644 --- a/scripts/eosio_build_amazon.sh +++ b/scripts/eosio_build_amazon.sh @@ -56,11 +56,11 @@ if [[ "${OS_NAME}" == "Amazon Linux AMI" ]]; then DEP_ARRAY=( git gcc72.x86_64 gcc72-c++.x86_64 autoconf automake libtool make bzip2 \ bzip2-devel.x86_64 openssl-devel.x86_64 gmp-devel.x86_64 libstdc++72.x86_64 \ - python27.x86_64 python36-devel.x86_64 libedit-devel.x86_64 doxygen.x86_64 graphviz.x86_64) + python27.x86_64 python27-devel.x86_64 python36-devel.x86_64 libedit-devel.x86_64 doxygen.x86_64 graphviz.x86_64) else DEP_ARRAY=( git gcc gcc-c++ autoconf automake libtool make bzip2 \ bzip2-devel openssl-devel gmp-devel libstdc++ \ - python3 python3-devel libedit-devel doxygen graphviz) + python3 python3-devel python-devel libedit-devel doxygen graphviz) fi COUNT=1 DISPLAY="" From d995779208e8e3a6e308277c2d08bd4d24b90597 Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Fri, 22 Feb 2019 14:27:03 -0500 Subject: [PATCH 0011/2426] appbase: Block (queue) exit signals during shutdown MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When a shutdown signal is handled the sig_set is 1) canceled and 2) destroyed. This means that signals are no longer handled by boost::asio and revert back to default behavior. If someone accidentally hits ctrl-c a second time during a long shutdown they run the risk of leaving the database dirty as that second ctrl-c will be insta-kill. This patch changes the behavior and keeps the sig_set active forever. This means that even after the first handled async_wait() on the sig_set (that starts an appbase quit), additional signals in the set are effectively “blocked” (not posix blocked, but blocked in the sense they are consumed and queued by the sig_set that is not being async_wait()ed on) --- libraries/appbase | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/appbase b/libraries/appbase index 2208d40578f..b2f134a27f5 160000 --- a/libraries/appbase +++ b/libraries/appbase @@ -1 +1 @@ -Subproject commit 2208d40578fb206978418c1df2bb8408ecef3fe7 +Subproject commit b2f134a27f5022805985faf88400458ef6f0ae3c From 3b15c4f0d7bbd4c0138060a85dacce5711ba1b1c Mon Sep 17 00:00:00 2001 From: fsword Date: Sat, 23 Feb 2019 14:44:29 -0800 Subject: [PATCH 0012/2426] Wrong tag/branch name there is no tag or branch named "1.6.2", I think it should be "v1.6.2"? --- Docker/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Docker/README.md b/Docker/README.md index 45459b11b4b..9e01fe71f65 100644 --- a/Docker/README.md +++ b/Docker/README.md @@ -23,7 +23,7 @@ docker build . -t eosio/eos The above will build off the most recent commit to the master branch by default. If you would like to target a specific branch/tag, you may use a build argument. For example, if you wished to generate a docker image based off of the 1.6.2 tag, you could do the following: ```bash -docker build -t eosio/eos:v1.6.2 --build-arg branch=1.6.2 . +docker build -t eosio/eos:v1.6.2 --build-arg branch=v1.6.2 . ``` By default, the symbol in eosio.system is set to SYS. You can override this using the symbol argument while building the docker image. From c69954416cd392bd58b1e8f3722d60b41a3bd3a9 Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Fri, 1 Mar 2019 15:25:15 -0500 Subject: [PATCH 0013/2426] appbase: rework blocking (queuing) exit signals during shutdown MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The previous fix for blocking signals during shutdown seems to have problems on older platforms/compilers/stdlibs like gcc6. To be completely frank I couldn’t quite pinpoint the exact cause but I highly suspect it is something to do with the application instance being static and thus the ordering of static destructors being unfavorable. I’ve changed the implementation such that during startup a separate thread is run that catches the signals but after startup that thread is retired and signal handling is then handled on the main io_service. Signals end up being blocked (queued) until destruction of application’s io_service because the async_wait() will hold a shared_ptr to the signal_set. The implementation ends up being a bit long winded but means there are no shenanigans trying to clean up threads after main() has fully returned. --- libraries/appbase | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/appbase b/libraries/appbase index b2f134a27f5..45599938aca 160000 --- a/libraries/appbase +++ b/libraries/appbase @@ -1 +1 @@ -Subproject commit b2f134a27f5022805985faf88400458ef6f0ae3c +Subproject commit 45599938aca06a54a437e55258d0c068fa095cc3 From f5e4529d96af708885335cda2545b54ced4e155b Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Sat, 2 Mar 2019 09:01:56 -0600 Subject: [PATCH 0014/2426] Update to appbase with FIFO pririty queue --- libraries/appbase | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/appbase b/libraries/appbase index 45599938aca..85bd6a3f750 160000 --- a/libraries/appbase +++ b/libraries/appbase @@ -1 +1 @@ -Subproject commit 45599938aca06a54a437e55258d0c068fa095cc3 +Subproject commit 85bd6a3f750b741570d023b6c97e5dea6dd51501 From 7c1e14a81af8cf0f9da8e64774f411f4ccacc8f7 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 5 Mar 2019 09:00:21 -0500 Subject: [PATCH 0015/2426] Call recover keys before transactions execution so trx->sig_cpu_usage is set correctly --- libraries/chain/controller.cpp | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 2d2af25f0c1..8be2fa4ad7e 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -999,6 +999,9 @@ struct controller_impl { transaction_trace_ptr trace; try { auto start = fc::time_point::now(); + const bool check_auth = !self.skip_auth_check() && !trx->implicit; + // call recover keys so that trx->sig_cpu_usage is set correctly + const flat_set& recovered_keys = check_auth ? trx->recover_keys( chain_id ) : flat_set(); if( !explicit_billed_cpu_time ) { fc::microseconds already_consumed_time( EOS_PERCENT(trx->sig_cpu_usage.count(), conf.sig_cpu_bill_pct) ); @@ -1031,10 +1034,10 @@ struct controller_impl { trx_context.delay = fc::seconds(trn.delay_sec); - if( !self.skip_auth_check() && !trx->implicit ) { + if( check_auth ) { authorization.check_authorization( trn.actions, - trx->recover_keys( chain_id ), + recovered_keys, {}, trx_context.delay, [](){} From a8c1cedb4df2be4a687fda67eb7498c74e651509 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 5 Mar 2019 09:00:21 -0500 Subject: [PATCH 0016/2426] Call recover keys before transactions execution so trx->sig_cpu_usage is set correctly --- libraries/chain/controller.cpp | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 20f5478a079..0faac326537 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -997,6 +997,9 @@ struct controller_impl { transaction_trace_ptr trace; try { auto start = fc::time_point::now(); + const bool check_auth = !self.skip_auth_check() && !trx->implicit; + // call recover keys so that trx->sig_cpu_usage is set correctly + const flat_set& recovered_keys = check_auth ? trx->recover_keys( chain_id ) : flat_set(); if( !explicit_billed_cpu_time ) { fc::microseconds already_consumed_time( EOS_PERCENT(trx->sig_cpu_usage.count(), conf.sig_cpu_bill_pct) ); @@ -1029,10 +1032,10 @@ struct controller_impl { trx_context.delay = fc::seconds(trn.delay_sec); - if( !self.skip_auth_check() && !trx->implicit ) { + if( check_auth ) { authorization.check_authorization( trn.actions, - trx->recover_keys( chain_id ), + recovered_keys, {}, trx_context.delay, [](){} From 00a29f370f1eec8f4c9621d2e3d010f569a34469 Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Tue, 5 Mar 2019 15:55:40 -0500 Subject: [PATCH 0017/2426] appbase: ensure ctrl-c during startup handled correctly --- libraries/appbase | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/appbase b/libraries/appbase index 85bd6a3f750..9da0818154c 160000 --- a/libraries/appbase +++ b/libraries/appbase @@ -1 +1 @@ -Subproject commit 85bd6a3f750b741570d023b6c97e5dea6dd51501 +Subproject commit 9da0818154c3ebade946f29f5f0328117578058c From 12d996150d233bff65d6bcc328e742a6d564b0cf Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 5 Mar 2019 16:23:34 -0500 Subject: [PATCH 0018/2426] Consolidated Security Fixes for 1.6.3 - Fix small memory leak in net_plugin. - Add additional deadline checks to transaction authorization. --- libraries/chain/controller.cpp | 4 +--- plugins/net_plugin/net_plugin.cpp | 17 ++++++++++++++++- 2 files changed, 17 insertions(+), 4 deletions(-) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 2d2af25f0c1..cf904d6fb5b 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -1037,9 +1037,7 @@ struct controller_impl { trx->recover_keys( chain_id ), {}, trx_context.delay, - [](){} - /*std::bind(&transaction_context::add_cpu_usage_and_check_time, &trx_context, - std::placeholders::_1)*/, + [&trx_context](){ trx_context.checktime(); }, false ); } diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 1b398a8b53a..ab36d06cc6a 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -724,6 +724,7 @@ namespace eosio { void rejected_block(const block_id_type& id); void recv_block(const connection_ptr& conn, const block_id_type& msg, uint32_t bnum); + void expire_blocks( uint32_t bnum ); void recv_transaction(const connection_ptr& conn, const transaction_id_type& id); void recv_notice(const connection_ptr& conn, const notice_message& msg, bool generated); @@ -1656,11 +1657,23 @@ namespace eosio { } void dispatch_manager::rejected_block(const block_id_type& id) { - fc_dlog(logger,"not sending rejected transaction ${tid}",("tid",id)); + fc_dlog( logger, "rejected block ${id}", ("id", id) ); auto range = received_blocks.equal_range(id); received_blocks.erase(range.first, range.second); } + void dispatch_manager::expire_blocks( uint32_t lib_num ) { + for( auto i = received_blocks.begin(); i != received_blocks.end(); ) { + const block_id_type& blk_id = i->first; + uint32_t blk_num = block_header::num_from_id( blk_id ); + if( blk_num <= lib_num ) { + i = received_blocks.erase( i ); + } else { + ++i; + } + } + } + void dispatch_manager::bcast_transaction(const transaction_metadata_ptr& ptrx) { std::set skips; const auto& id = ptrx->id; @@ -2565,6 +2578,7 @@ namespace eosio { } else { sync_master->rejected_block(c, blk_num); + dispatcher->rejected_block( blk_id ); } } @@ -2626,6 +2640,7 @@ namespace eosio { controller& cc = chain_plug->chain(); uint32_t lib = cc.last_irreversible_block_num(); + dispatcher->expire_blocks( lib ); for ( auto &c : connections ) { auto &stale_txn = c->trx_state.get(); stale_txn.erase( stale_txn.lower_bound(1), stale_txn.upper_bound(lib) ); From 0776c115c45e9f1f72c8ed0c118699b4cc8425f2 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 5 Mar 2019 16:53:20 -0500 Subject: [PATCH 0019/2426] Consolidated Security Fixes for 1.7.0-rc2 - Fix small memory leak in net_plugin. - Add additional deadline checks to transaction authorization. --- libraries/chain/controller.cpp | 4 +--- plugins/net_plugin/net_plugin.cpp | 17 ++++++++++++++++- 2 files changed, 17 insertions(+), 4 deletions(-) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 20f5478a079..608d5dc43a0 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -1035,9 +1035,7 @@ struct controller_impl { trx->recover_keys( chain_id ), {}, trx_context.delay, - [](){} - /*std::bind(&transaction_context::add_cpu_usage_and_check_time, &trx_context, - std::placeholders::_1)*/, + [&trx_context](){ trx_context.checktime(); }, false ); } diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 7777949a7b9..91ff34f00d5 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -716,6 +716,7 @@ namespace eosio { void rejected_block(const block_id_type& id); void recv_block(const connection_ptr& conn, const block_id_type& msg, uint32_t bnum); + void expire_blocks( uint32_t bnum ); void recv_transaction(const connection_ptr& conn, const transaction_id_type& id); void recv_notice(const connection_ptr& conn, const notice_message& msg, bool generated); @@ -1646,11 +1647,23 @@ namespace eosio { } void dispatch_manager::rejected_block(const block_id_type& id) { - fc_dlog(logger,"not sending rejected transaction ${tid}",("tid",id)); + fc_dlog( logger, "rejected block ${id}", ("id", id) ); auto range = received_blocks.equal_range(id); received_blocks.erase(range.first, range.second); } + void dispatch_manager::expire_blocks( uint32_t lib_num ) { + for( auto i = received_blocks.begin(); i != received_blocks.end(); ) { + const block_id_type& blk_id = i->first; + uint32_t blk_num = block_header::num_from_id( blk_id ); + if( blk_num <= lib_num ) { + i = received_blocks.erase( i ); + } else { + ++i; + } + } + } + void dispatch_manager::bcast_transaction(const transaction_metadata_ptr& ptrx) { std::set skips; const auto& id = ptrx->id; @@ -2582,6 +2595,7 @@ namespace eosio { } else { sync_master->rejected_block(c, blk_num); + dispatcher->rejected_block( blk_id ); } } @@ -2649,6 +2663,7 @@ namespace eosio { controller& cc = chain_plug->chain(); uint32_t lib = cc.last_irreversible_block_num(); + dispatcher->expire_blocks( lib ); for ( auto &c : connections ) { auto &stale_txn = c->trx_state.get(); stale_txn.erase( stale_txn.lower_bound(1), stale_txn.upper_bound(lib) ); From bf85b4fcea6527882a0217fc0adab02cd3cd5bc2 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 6 Mar 2019 17:14:48 -0500 Subject: [PATCH 0020/2426] Bump version to 1.6.3 --- CMakeLists.txt | 2 +- Docker/README.md | 4 ++-- README.md | 16 ++++++++-------- 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index e35a98973f4..3a03c7b0ed2 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -35,7 +35,7 @@ set( CXX_STANDARD_REQUIRED ON) set(VERSION_MAJOR 1) set(VERSION_MINOR 6) -set(VERSION_PATCH 2) +set(VERSION_PATCH 3) if(VERSION_SUFFIX) set(VERSION_FULL "${VERSION_MAJOR}.${VERSION_MINOR}.${VERSION_PATCH}-${VERSION_SUFFIX}") diff --git a/Docker/README.md b/Docker/README.md index 9e01fe71f65..f697f1862a0 100644 --- a/Docker/README.md +++ b/Docker/README.md @@ -20,10 +20,10 @@ cd eos/Docker docker build . -t eosio/eos ``` -The above will build off the most recent commit to the master branch by default. If you would like to target a specific branch/tag, you may use a build argument. For example, if you wished to generate a docker image based off of the 1.6.2 tag, you could do the following: +The above will build off the most recent commit to the master branch by default. If you would like to target a specific branch/tag, you may use a build argument. For example, if you wished to generate a docker image based off of the 1.6.3 tag, you could do the following: ```bash -docker build -t eosio/eos:v1.6.2 --build-arg branch=v1.6.2 . +docker build -t eosio/eos:v1.6.3 --build-arg branch=v1.6.3 . ``` By default, the symbol in eosio.system is set to SYS. You can override this using the symbol argument while building the docker image. diff --git a/README.md b/README.md index 3009bfb4c01..9b39e234348 100644 --- a/README.md +++ b/README.md @@ -39,13 +39,13 @@ $ brew remove eosio ``` #### Ubuntu 18.04 Debian Package Install ```sh -$ wget https://github.com/eosio/eos/releases/download/v1.6.2/eosio_1.6.2-1-ubuntu-18.04_amd64.deb -$ sudo apt install ./eosio_1.6.2-1-ubuntu-18.04_amd64.deb +$ wget https://github.com/eosio/eos/releases/download/v1.6.3/eosio_1.6.3-1-ubuntu-18.04_amd64.deb +$ sudo apt install ./eosio_1.6.3-1-ubuntu-18.04_amd64.deb ``` #### Ubuntu 16.04 Debian Package Install ```sh -$ wget https://github.com/eosio/eos/releases/download/v1.6.2/eosio_1.6.2-1-ubuntu-16.04_amd64.deb -$ sudo apt install ./eosio_1.6.2-1-ubuntu-16.04_amd64.deb +$ wget https://github.com/eosio/eos/releases/download/v1.6.3/eosio_1.6.3-1-ubuntu-16.04_amd64.deb +$ sudo apt install ./eosio_1.6.3-1-ubuntu-16.04_amd64.deb ``` #### Debian Package Uninstall ```sh @@ -53,8 +53,8 @@ $ sudo apt remove eosio ``` #### Centos RPM Package Install ```sh -$ wget https://github.com/eosio/eos/releases/download/v1.6.2/eosio-1.6.2-1.el7.x86_64.rpm -$ sudo yum install ./eosio-1.6.2-1.el7.x86_64.rpm +$ wget https://github.com/eosio/eos/releases/download/v1.6.3/eosio-1.6.3-1.el7.x86_64.rpm +$ sudo yum install ./eosio-1.6.3-1.el7.x86_64.rpm ``` #### Centos RPM Package Uninstall ```sh @@ -62,8 +62,8 @@ $ sudo yum remove eosio.cdt ``` #### Fedora RPM Package Install ```sh -$ wget https://github.com/eosio/eos/releases/download/v1.6.2/eosio-1.6.2-1.fc27.x86_64.rpm -$ sudo yum install ./eosio-1.6.2-1.fc27.x86_64.rpm +$ wget https://github.com/eosio/eos/releases/download/v1.6.3/eosio-1.6.3-1.fc27.x86_64.rpm +$ sudo yum install ./eosio-1.6.3-1.fc27.x86_64.rpm ``` #### Fedora RPM Package Uninstall ```sh From a827e9d87900433ae5fe4d4d9d1a1dbf277b85ab Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 6 Mar 2019 17:17:21 -0500 Subject: [PATCH 0021/2426] Bump version to 1.7.0-rc2 --- CMakeLists.txt | 2 +- Docker/README.md | 4 ++-- README.md | 16 ++++++++-------- 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index ced9cf8fd92..6ec2bd9664b 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -38,7 +38,7 @@ set( CXX_STANDARD_REQUIRED ON) set(VERSION_MAJOR 1) set(VERSION_MINOR 7) set(VERSION_PATCH 0) -set(VERSION_SUFFIX rc1) +set(VERSION_SUFFIX rc2) if(VERSION_SUFFIX) set(VERSION_FULL "${VERSION_MAJOR}.${VERSION_MINOR}.${VERSION_PATCH}-${VERSION_SUFFIX}") diff --git a/Docker/README.md b/Docker/README.md index 1aa0513cca9..27f50973e99 100644 --- a/Docker/README.md +++ b/Docker/README.md @@ -20,10 +20,10 @@ cd eos/Docker docker build . -t eosio/eos ``` -The above will build off the most recent commit to the master branch by default. If you would like to target a specific branch/tag, you may use a build argument. For example, if you wished to generate a docker image based off of the v1.7.0-rc1 tag, you could do the following: +The above will build off the most recent commit to the master branch by default. If you would like to target a specific branch/tag, you may use a build argument. For example, if you wished to generate a docker image based off of the v1.7.0-rc2 tag, you could do the following: ```bash -docker build -t eosio/eos:v1.7.0-rc1 --build-arg branch=v1.7.0-rc1 . +docker build -t eosio/eos:v1.7.0-rc2 --build-arg branch=v1.7.0-rc2 . ``` By default, the symbol in eosio.system is set to SYS. You can override this using the symbol argument while building the docker image. diff --git a/README.md b/README.md index c36b2c6d1a5..2202dd7ecd0 100644 --- a/README.md +++ b/README.md @@ -39,13 +39,13 @@ $ brew remove eosio ``` #### Ubuntu 18.04 Debian Package Install ```sh -$ wget https://github.com/eosio/eos/releases/download/v1.6.0/eosio_1.7.0-rc1-ubuntu-18.04_amd64.deb -$ sudo apt install ./eosio_1.7.0-rc1-ubuntu-18.04_amd64.deb +$ wget https://github.com/eosio/eos/releases/download/v1.6.0/eosio_1.7.0-rc2-ubuntu-18.04_amd64.deb +$ sudo apt install ./eosio_1.7.0-rc2-ubuntu-18.04_amd64.deb ``` #### Ubuntu 16.04 Debian Package Install ```sh -$ wget https://github.com/eosio/eos/releases/download/v1.6.0/eosio_1.7.0-rc1-ubuntu-16.04_amd64.deb -$ sudo apt install ./eosio_1.7.0-rc1-ubuntu-16.04_amd64.deb +$ wget https://github.com/eosio/eos/releases/download/v1.6.0/eosio_1.7.0-rc2-ubuntu-16.04_amd64.deb +$ sudo apt install ./eosio_1.7.0-rc2-ubuntu-16.04_amd64.deb ``` #### Debian Package Uninstall ```sh @@ -53,8 +53,8 @@ $ sudo apt remove eosio ``` #### Centos RPM Package Install ```sh -$ wget https://github.com/eosio/eos/releases/download/v1.6.0/eosio-1.7.0-rc1.el7.x86_64.rpm -$ sudo yum install ./eosio-1.7.0-rc1.el7.x86_64.rpm +$ wget https://github.com/eosio/eos/releases/download/v1.6.0/eosio-1.7.0-rc2.el7.x86_64.rpm +$ sudo yum install ./eosio-1.7.0-rc2.el7.x86_64.rpm ``` #### Centos RPM Package Uninstall ```sh @@ -62,8 +62,8 @@ $ sudo yum remove eosio.cdt ``` #### Fedora RPM Package Install ```sh -$ wget https://github.com/eosio/eos/releases/download/v1.6.0/eosio-1.7.0-rc1.fc27.x86_64.rpm -$ sudo yum install ./eosio-1.7.0-rc1.fc27.x86_64.rpm +$ wget https://github.com/eosio/eos/releases/download/v1.6.0/eosio-1.7.0-rc2.fc27.x86_64.rpm +$ sudo yum install ./eosio-1.7.0-rc2.fc27.x86_64.rpm ``` #### Fedora RPM Package Uninstall ```sh From 90120e2d7d0ca77ade1a4a31663ee18558c7e6c8 Mon Sep 17 00:00:00 2001 From: arhag Date: Fri, 8 Mar 2019 12:42:23 -0500 Subject: [PATCH 0022/2426] add back integration_test contract; needed by tests/nodeos_under_min_avail_ram.py long running test --- unittests/test-contracts/CMakeLists.txt | 1 + .../integration_test/CMakeLists.txt | 6 ++ .../integration_test/integration_test.abi | 57 ++++++++++++++++++ .../integration_test/integration_test.cpp | 29 +++++++++ .../integration_test/integration_test.hpp | 27 +++++++++ .../integration_test/integration_test.wasm | Bin 0 -> 5651 bytes 6 files changed, 120 insertions(+) create mode 100644 unittests/test-contracts/integration_test/CMakeLists.txt create mode 100644 unittests/test-contracts/integration_test/integration_test.abi create mode 100644 unittests/test-contracts/integration_test/integration_test.cpp create mode 100644 unittests/test-contracts/integration_test/integration_test.hpp create mode 100755 unittests/test-contracts/integration_test/integration_test.wasm diff --git a/unittests/test-contracts/CMakeLists.txt b/unittests/test-contracts/CMakeLists.txt index 4a458969514..59f4ec0c28d 100644 --- a/unittests/test-contracts/CMakeLists.txt +++ b/unittests/test-contracts/CMakeLists.txt @@ -9,6 +9,7 @@ endif() add_subdirectory( asserter ) add_subdirectory( deferred_test ) +add_subdirectory( integration_test ) add_subdirectory( noop ) add_subdirectory( payloadless ) add_subdirectory( proxy ) diff --git a/unittests/test-contracts/integration_test/CMakeLists.txt b/unittests/test-contracts/integration_test/CMakeLists.txt new file mode 100644 index 00000000000..aaf8d2115ea --- /dev/null +++ b/unittests/test-contracts/integration_test/CMakeLists.txt @@ -0,0 +1,6 @@ +if( EOSIO_COMPILE_TEST_CONTRACTS ) + add_contract( integration_test integration_test integration_test.cpp ) +else() + configure_file( ${CMAKE_CURRENT_SOURCE_DIR}/integration_test.wasm ${CMAKE_CURRENT_BINARY_DIR}/integration_test.wasm COPYONLY ) + configure_file( ${CMAKE_CURRENT_SOURCE_DIR}/integration_test.abi ${CMAKE_CURRENT_BINARY_DIR}/integration_test.abi COPYONLY ) +endif() diff --git a/unittests/test-contracts/integration_test/integration_test.abi b/unittests/test-contracts/integration_test/integration_test.abi new file mode 100644 index 00000000000..8cd5c3ee8fa --- /dev/null +++ b/unittests/test-contracts/integration_test/integration_test.abi @@ -0,0 +1,57 @@ +{ + "____comment": "This file was generated with eosio-abigen. DO NOT EDIT ", + "version": "eosio::abi/1.1", + "types": [], + "structs": [ + { + "name": "payload", + "base": "", + "fields": [ + { + "name": "key", + "type": "uint64" + }, + { + "name": "data", + "type": "uint64[]" + } + ] + }, + { + "name": "store", + "base": "", + "fields": [ + { + "name": "from", + "type": "name" + }, + { + "name": "to", + "type": "name" + }, + { + "name": "num", + "type": "uint64" + } + ] + } + ], + "actions": [ + { + "name": "store", + "type": "store", + "ricardian_contract": "" + } + ], + "tables": [ + { + "name": "payloads", + "type": "payload", + "index_type": "i64", + "key_names": [], + "key_types": [] + } + ], + "ricardian_clauses": [], + "variants": [] +} \ No newline at end of file diff --git a/unittests/test-contracts/integration_test/integration_test.cpp b/unittests/test-contracts/integration_test/integration_test.cpp new file mode 100644 index 00000000000..ec8543caafb --- /dev/null +++ b/unittests/test-contracts/integration_test/integration_test.cpp @@ -0,0 +1,29 @@ +/** + * @file + * @copyright defined in eos/LICENSE + */ +#include "integration_test.hpp" + +using namespace eosio; + +void integration_test::store( name from, name to, uint64_t num ) { + require_auth( from ); + + check( is_account( to ), "to account does not exist" ); + check( num < std::numeric_limits::max(), "num to large" ); + + payloads_table data( get_self(), from.value ); + uint64_t key = 0; + const uint64_t num_keys = 5; + + while( data.find( key ) != data.end() ) { + key += num_keys; + } + + for( uint64_t i = 0; i < num_keys; ++i ) { + data.emplace( from, [&]( auto& g ) { + g.key = key + i; + g.data = std::vector( static_cast(num), 5 ); + } ); + } +} diff --git a/unittests/test-contracts/integration_test/integration_test.hpp b/unittests/test-contracts/integration_test/integration_test.hpp new file mode 100644 index 00000000000..cbdc02295b2 --- /dev/null +++ b/unittests/test-contracts/integration_test/integration_test.hpp @@ -0,0 +1,27 @@ +/** + * @file + * @copyright defined in eos/LICENSE + */ +#pragma once + +#include + +class [[eosio::contract]] integration_test : public eosio::contract { +public: + using eosio::contract::contract; + + [[eosio::action]] + void store( eosio::name from, eosio::name to, uint64_t num ); + + struct [[eosio::table("payloads")]] payload { + uint64_t key; + std::vector data; + + uint64_t primary_key()const { return key; } + + EOSLIB_SERIALIZE( payload, (key)(data) ) + }; + + using payloads_table = eosio::multi_index< "payloads"_n, payload >; + +}; diff --git a/unittests/test-contracts/integration_test/integration_test.wasm b/unittests/test-contracts/integration_test/integration_test.wasm new file mode 100755 index 0000000000000000000000000000000000000000..81e7b13d27478b078f916d8a02a6d95e0ae760dd GIT binary patch literal 5651 zcmcgwOKfCE6|K+vbyv6jJRiGl&m{F;oX`_~gC?>^Y;ai`Cm}>3QN#iv**$h=(ru61 zZnuMjhG_?}KnR5e8^kILBt%fygcU1~_=E)#kzgc{Lc}7AfQ0x739%rYQ`PNwvSEj3 zy5Dq4x{+x*Dkx*86}dUP9qco6b7K7J72dN>^V2goLr z@Q)fdSTP)3AS2clha`pNP^iDjx^C~hQ0uq%4m$nzTI*o&ypSBKcJ|j=8ymfYoqub+-b~e{Ki;swiBl*Tbzu(>&to7R)?amABzKH#BYh%#q z?W}FK2CcRI&X?LE@x%RgYZF>d=0wWjU454&(l=vJWsN8G8a)SccqlE zmO>`edMMq|u`om7-o0~jNh>mL?e1>BC?=|RADW9q{*ZZ5=VTz`sd>O_&@Qwj1` zEh~8F#G}xoifT>5)}m@OhW}sxY<1#3sp2FRS*2?7`o!eanX~84PtP=F=Pq2Fujwc< zHfbstZc|=hyNxbsyAt0j5khM-TUo7Nq~WzkjIFrmyQP3&X%GpkpRN~9V(N;qT$@=q zTh(R!?IoyM|{ zLkugd$n){)RZK;g`WOP~j+wXG9Hf|>h8K1XWC3?s7ynQ4lDQO%J_1KE^`_{C97hmd zR5kkREbg+9_ru%=p!szDewSg{hpFr3JJ97Sczl>v{Q*eHy14n*`hsc&dJ% zn}->fkP1Q^*|Ct!A>ucxa6iH5d+(sKsdfu$_S51TyfRqdM829TD=QqRVcR(~A_Z{DFiEe)8gPYgq+yquFAVkb?V?}xlHBfKyv-N2= ziOB~r*;LD=xmLg6atxFVD5|F<3QJ+lDki2dQ5bgy7fZuNpaa5uNMRwDpT+e32m~F; zIB*UFFt$QRG9L#xavp;hF^Jn~++F}0iP9N2gC}Bs{MlLdBrDD($gl|-gaMeOI!v&&PK?@sr!d}9= zPmU2btxI^0b&W1${8!Gq8mlP1R+k8J{-B}3jY5vLVO*~K@db^2hlo28=w zz&0=xjnIz0T`|LqKA!cU%#fheW~gIY7u)pY0`9STiw&mGfEvhPtR1Q|C%a`<7fcdX zq8mvjv=U%Nzr9NOU1nBuQ=QL3DDT2APj=Gd0o~P)VowSe8GDeor4nq>;VH4{R+%|{ z@LEv?!F!8f1C~`+(yx`(74ZzLLC{m15cQcm+_M_<9_#S+rRZ_S0NBl_6DS*FJX`Q? zfO$|cfq-zqVm6nIfrM=OX%Eml9-uMOL-?okngB)IJV3cYC7+&SVO-ML={8B;W-29d zTIK#Zc&gz8M(dt}bycp)KM&Mtgdu-$H1KR4AdC>YmD})E-on?HKR3^8#26&8**L$S zPWVUjGcDtx-TNLms772mosj^#W1f z1QET)HV$T01hk-#G|@x0sh ziuuefNImS2U`rwpdQo;kC(a_FwCGh@0Nhx_oj4!d5Zse&;Sm?XIGB{uB!3)}xF^Ul zdzuT>2^T1Cr%t#avMKE#em!51CCn=dX+$SD7N=SPW22VyIPeNO8()aL~)f`?qOV#(N)GMuq}ly^9d>OvWI&BO~&)EaBr{z{El644@rz{ z!Xi+JyI+vF{2p12bI@3b>_sN^WC&jOcF|+fk5IOBow#$lD^8X%lvBQlhBIZ z55MjxjUR)zqapK!e6Dy_q}}Y55v(C6Y6OfC0aic(aN+CYB1W{}$wviB0EMeq96m7` z>3dJ?p)b8B;rigUuYzaLqP82c#8nJ zAqCjt+2YtB`ci z$;W(FVH7d%c_KL>2xN?f?b(3{XL?!l3G4YA0j<8K$DN-1Ox!T8uK{Ep#{@5H&znK~ z0!=_DTw$w24L^fUm<|F+`=1n(P=l5Pe%x~6bf5CS5D`~|fpj2g@dyvdpf5r8a4h%O zC2I$CW@OvbMx-8bV~EXcjPrhzlBg7R$sF91LN{0`RU%1>8Z2v|_%L z`9dj3qR6Fih{7*j3ARM_ObOHmXAwsnYK2L#I-fBU3@!)p^q>i&vt|psf-*yRA)t8h zf>-=J0X1WNIxn%5XXueK?1V`w3ross^5SN6~`G6+`}@o&ywjz^`S6;Jt$851h>L6B8rB{(a0g%qTj;?8IYmXevZkp3MUkvQrMKC zHvDGsTZSLveoI;cEnbB=l2V`0l20>u8?boC(die;z|`-!F#48bP`V&!7+)e=Z#kxE zV8xRR&oUyhT30w!F**n$nSPObN!)N2WQ=k5on!y=1D4|0D9j@A6~F}DfB?e}48YFQ znmD?sehX7Rh3h-cn3q*W_*ZXN20c4^4QV%f?R~q`8`$)rOo0CISX>6dl}?S2by3fCan3A=Z8 z?9GGiL1ztbHrt0-i63odrrq!N`j#&&J3BY*co}BV3QdGJk+{Xn5_4svwL=>>APS3u y!hMTP2d(Sd7~C_j1kv literal 0 HcmV?d00001 From e245e07aba13a89fb74e2582539fd276ba3a3d84 Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Wed, 13 Mar 2019 15:45:25 -0400 Subject: [PATCH 0023/2426] add libicu as dependency for .deb packages Somewhere along the line nodeos picked up a dependency on libicuuc --- scripts/generate_deb.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/scripts/generate_deb.sh b/scripts/generate_deb.sh index e8c22d154fb..9686c904036 100755 --- a/scripts/generate_deb.sh +++ b/scripts/generate_deb.sh @@ -23,9 +23,9 @@ else fi if [ ${DISTRIB_RELEASE} = "16.04" ]; then - LIBSSL="libssl1.0.0" + RELEASE_SPECIFIC_DEPS="libssl1.0.0, libicu55" elif [ ${DISTRIB_RELEASE} = "18.04" ]; then - LIBSSL="libssl1.1" + RELEASE_SPECIFIC_DEPS="libssl1.1, libicu60" else echo "Unrecognized Ubuntu version. Update generate_deb.sh. Not generating .deb file." exit 1 @@ -37,7 +37,7 @@ echo "Package: ${PROJECT} Version: ${VERSION_NO_SUFFIX}-${RELEASE} Section: devel Priority: optional -Depends: libc6, libgcc1, ${LIBSSL}, libstdc++6, libtinfo5, zlib1g, libusb-1.0-0, libcurl3-gnutls +Depends: libc6, libgcc1, ${RELEASE_SPECIFIC_DEPS}, libstdc++6, libtinfo5, zlib1g, libusb-1.0-0, libcurl3-gnutls Architecture: amd64 Homepage: ${URL} Maintainer: ${EMAIL} From c83c212e3085024b808276998fc1c415d2993d2c Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 13 Mar 2019 15:40:48 -0500 Subject: [PATCH 0024/2426] Bump to 1.7.0 --- CMakeLists.txt | 1 - Docker/README.md | 4 ++-- README.md | 16 ++++++++-------- 3 files changed, 10 insertions(+), 11 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 6ec2bd9664b..59c72fb4d55 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -38,7 +38,6 @@ set( CXX_STANDARD_REQUIRED ON) set(VERSION_MAJOR 1) set(VERSION_MINOR 7) set(VERSION_PATCH 0) -set(VERSION_SUFFIX rc2) if(VERSION_SUFFIX) set(VERSION_FULL "${VERSION_MAJOR}.${VERSION_MINOR}.${VERSION_PATCH}-${VERSION_SUFFIX}") diff --git a/Docker/README.md b/Docker/README.md index 27f50973e99..8226fe66cd8 100644 --- a/Docker/README.md +++ b/Docker/README.md @@ -20,10 +20,10 @@ cd eos/Docker docker build . -t eosio/eos ``` -The above will build off the most recent commit to the master branch by default. If you would like to target a specific branch/tag, you may use a build argument. For example, if you wished to generate a docker image based off of the v1.7.0-rc2 tag, you could do the following: +The above will build off the most recent commit to the master branch by default. If you would like to target a specific branch/tag, you may use a build argument. For example, if you wished to generate a docker image based off of the v1.7.0 tag, you could do the following: ```bash -docker build -t eosio/eos:v1.7.0-rc2 --build-arg branch=v1.7.0-rc2 . +docker build -t eosio/eos:v1.7.0 --build-arg branch=v1.7.0 . ``` By default, the symbol in eosio.system is set to SYS. You can override this using the symbol argument while building the docker image. diff --git a/README.md b/README.md index 2202dd7ecd0..2b3a2583b1e 100644 --- a/README.md +++ b/README.md @@ -39,13 +39,13 @@ $ brew remove eosio ``` #### Ubuntu 18.04 Debian Package Install ```sh -$ wget https://github.com/eosio/eos/releases/download/v1.6.0/eosio_1.7.0-rc2-ubuntu-18.04_amd64.deb -$ sudo apt install ./eosio_1.7.0-rc2-ubuntu-18.04_amd64.deb +$ wget https://github.com/eosio/eos/releases/download/v1.7.0/eosio_1.7.0-ubuntu-18.04_amd64.deb +$ sudo apt install ./eosio_1.7.0-ubuntu-18.04_amd64.deb ``` #### Ubuntu 16.04 Debian Package Install ```sh -$ wget https://github.com/eosio/eos/releases/download/v1.6.0/eosio_1.7.0-rc2-ubuntu-16.04_amd64.deb -$ sudo apt install ./eosio_1.7.0-rc2-ubuntu-16.04_amd64.deb +$ wget https://github.com/eosio/eos/releases/download/v1.7.0/eosio_1.7.0-ubuntu-16.04_amd64.deb +$ sudo apt install ./eosio_1.7.0-ubuntu-16.04_amd64.deb ``` #### Debian Package Uninstall ```sh @@ -53,8 +53,8 @@ $ sudo apt remove eosio ``` #### Centos RPM Package Install ```sh -$ wget https://github.com/eosio/eos/releases/download/v1.6.0/eosio-1.7.0-rc2.el7.x86_64.rpm -$ sudo yum install ./eosio-1.7.0-rc2.el7.x86_64.rpm +$ wget https://github.com/eosio/eos/releases/download/v1.7.0/eosio-1.7.0.el7.x86_64.rpm +$ sudo yum install ./eosio-1.7.0.el7.x86_64.rpm ``` #### Centos RPM Package Uninstall ```sh @@ -62,8 +62,8 @@ $ sudo yum remove eosio.cdt ``` #### Fedora RPM Package Install ```sh -$ wget https://github.com/eosio/eos/releases/download/v1.6.0/eosio-1.7.0-rc2.fc27.x86_64.rpm -$ sudo yum install ./eosio-1.7.0-rc2.fc27.x86_64.rpm +$ wget https://github.com/eosio/eos/releases/download/v1.7.0/eosio-1.7.0.fc27.x86_64.rpm +$ sudo yum install ./eosio-1.7.0.fc27.x86_64.rpm ``` #### Fedora RPM Package Uninstall ```sh From 222ef4721aa3fe9f885e11f78b3b2131476162b6 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 13 Mar 2019 16:56:11 -0500 Subject: [PATCH 0025/2426] Add missing -1 to package names --- README.md | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index 2b3a2583b1e..ca5f9fbfe62 100644 --- a/README.md +++ b/README.md @@ -39,13 +39,13 @@ $ brew remove eosio ``` #### Ubuntu 18.04 Debian Package Install ```sh -$ wget https://github.com/eosio/eos/releases/download/v1.7.0/eosio_1.7.0-ubuntu-18.04_amd64.deb -$ sudo apt install ./eosio_1.7.0-ubuntu-18.04_amd64.deb +$ wget https://github.com/eosio/eos/releases/download/v1.7.0/eosio_1.7.0-1-ubuntu-18.04_amd64.deb +$ sudo apt install ./eosio_1.7.0-1-ubuntu-18.04_amd64.deb ``` #### Ubuntu 16.04 Debian Package Install ```sh -$ wget https://github.com/eosio/eos/releases/download/v1.7.0/eosio_1.7.0-ubuntu-16.04_amd64.deb -$ sudo apt install ./eosio_1.7.0-ubuntu-16.04_amd64.deb +$ wget https://github.com/eosio/eos/releases/download/v1.7.0/eosio_1.7.0-1-ubuntu-16.04_amd64.deb +$ sudo apt install ./eosio_1.7.0-1-ubuntu-16.04_amd64.deb ``` #### Debian Package Uninstall ```sh @@ -53,8 +53,8 @@ $ sudo apt remove eosio ``` #### Centos RPM Package Install ```sh -$ wget https://github.com/eosio/eos/releases/download/v1.7.0/eosio-1.7.0.el7.x86_64.rpm -$ sudo yum install ./eosio-1.7.0.el7.x86_64.rpm +$ wget https://github.com/eosio/eos/releases/download/v1.7.0/eosio-1.7.0-1.el7.x86_64.rpm +$ sudo yum install ./eosio-1.7.0-1.el7.x86_64.rpm ``` #### Centos RPM Package Uninstall ```sh @@ -62,8 +62,8 @@ $ sudo yum remove eosio.cdt ``` #### Fedora RPM Package Install ```sh -$ wget https://github.com/eosio/eos/releases/download/v1.7.0/eosio-1.7.0.fc27.x86_64.rpm -$ sudo yum install ./eosio-1.7.0.fc27.x86_64.rpm +$ wget https://github.com/eosio/eos/releases/download/v1.7.0/eosio-1.7.0-1.fc27.x86_64.rpm +$ sudo yum install ./eosio-1.7.0-1.fc27.x86_64.rpm ``` #### Fedora RPM Package Uninstall ```sh From 916c52280983a96a792984ccb487908e91309bef Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 18 Mar 2019 10:25:56 -0500 Subject: [PATCH 0026/2426] Add strand to protect internals of asio --- plugins/net_plugin/net_plugin.cpp | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 91ff34f00d5..736052dcca1 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -497,7 +497,8 @@ namespace eosio { transaction_state_index trx_state; optional peer_requested; // this peer is requesting info from us std::shared_ptr server_ioc; // keep ioc alive - socket_ptr socket; + boost::asio::io_context::strand strand; + socket_ptr socket; fc::message_buffer<1024*1024> pending_message_buffer; fc::optional outstanding_read_bytes; @@ -730,6 +731,7 @@ namespace eosio { trx_state(), peer_requested(), server_ioc( my_impl->server_ioc ), + strand( *my_impl->server_ioc ), socket( std::make_shared( std::ref( *my_impl->server_ioc ))), node_id(), last_handshake_recv(), @@ -755,6 +757,7 @@ namespace eosio { trx_state(), peer_requested(), server_ioc( my_impl->server_ioc ), + strand( *my_impl->server_ioc ), socket( s ), node_id(), last_handshake_recv(), @@ -976,7 +979,8 @@ namespace eosio { std::vector bufs; buffer_queue.fill_out_buffer( bufs ); - boost::asio::async_write(*socket, bufs, [c, priority]( boost::system::error_code ec, std::size_t w ) { + boost::asio::async_write(*socket, bufs, + boost::asio::bind_executor(strand, [c, priority]( boost::system::error_code ec, std::size_t w ) { app().post(priority, [c, priority, ec, w]() { try { auto conn = c.lock(); @@ -1016,7 +1020,7 @@ namespace eosio { fc_elog( logger,"Exception in do_queue_write to ${p}", ("p",pname) ); } }); - }); + })); } void connection::cancel_sync(go_away_reason reason) { @@ -1849,7 +1853,7 @@ namespace eosio { connection_wptr weak_conn = c; // Note: need to add support for IPv6 too - resolver->async_resolve( query, + resolver->async_resolve( query, boost::asio::bind_executor( c->strand, [weak_conn, this]( const boost::system::error_code& err, tcp::resolver::iterator endpoint_itr ) { app().post( priority::low, [err, endpoint_itr, weak_conn, this]() { auto c = weak_conn.lock(); @@ -1861,7 +1865,7 @@ namespace eosio { ("peer_addr", c->peer_name())( "error", err.message()) ); } } ); - } ); + } ) ); } void net_plugin_impl::connect(const connection_ptr& c, tcp::resolver::iterator endpoint_itr) { @@ -1873,7 +1877,8 @@ namespace eosio { ++endpoint_itr; c->connecting = true; connection_wptr weak_conn = c; - c->socket->async_connect( current_endpoint, [weak_conn, endpoint_itr, this]( const boost::system::error_code& err ) { + c->socket->async_connect( current_endpoint, boost::asio::bind_executor( c->strand, + [weak_conn, endpoint_itr, this]( const boost::system::error_code& err ) { app().post( priority::low, [weak_conn, endpoint_itr, this, err]() { auto c = weak_conn.lock(); if( !c ) return; @@ -1892,7 +1897,7 @@ namespace eosio { } } } ); - } ); + } ) ); } bool net_plugin_impl::start_session(const connection_ptr& con) { @@ -2042,6 +2047,7 @@ namespace eosio { ++conn->reads_in_flight; boost::asio::async_read(*conn->socket, conn->pending_message_buffer.get_buffer_sequence_for_boost_async_read(), completion_handler, + boost::asio::bind_executor( conn->strand, [this,weak_conn]( boost::system::error_code ec, std::size_t bytes_transferred ) { app().post( priority::medium, [this,weak_conn, ec, bytes_transferred]() { auto conn = weak_conn.lock(); @@ -2124,7 +2130,7 @@ namespace eosio { close( conn ); } }); - }); + })); } catch (...) { string pname = conn ? conn->peer_name() : "no connection name"; fc_elog( logger, "Undefined exception handling reading ${p}",("p",pname) ); From 88e1ff3b4d17977c364c75d36b912583f466ffea Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Tue, 19 Mar 2019 11:56:50 -0400 Subject: [PATCH 0027/2426] Don't build WAVM tools any longer Some of these don't work as intended due to changes in WAVM to support EOSIO --- libraries/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/CMakeLists.txt b/libraries/CMakeLists.txt index a40355971a9..39d0398305d 100644 --- a/libraries/CMakeLists.txt +++ b/libraries/CMakeLists.txt @@ -2,7 +2,7 @@ add_subdirectory( fc ) add_subdirectory( builtins ) add_subdirectory( softfloat ) add_subdirectory( chainbase ) -add_subdirectory( wasm-jit ) +add_subdirectory( wasm-jit EXCLUDE_FROM_ALL ) add_subdirectory( appbase ) add_subdirectory( chain ) add_subdirectory( testing ) From c4a327f4ff4eb5ada2266fcf0b827d3c2f384d70 Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Tue, 19 Mar 2019 11:24:29 -0400 Subject: [PATCH 0028/2426] Rename eosio-wat2wasm back to orginal name; don't install eosio-wat2wasm was really the Assemble command from WAVM and we used it for the old wasm build enviroment. It's no longer needed. Remove the rename and install changes effectively reverting ae9388d restoring this back to upstream --- libraries/wasm-jit/Source/Programs/Assemble.cpp | 2 +- libraries/wasm-jit/Source/Programs/CMakeLists.txt | 7 +++---- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/libraries/wasm-jit/Source/Programs/Assemble.cpp b/libraries/wasm-jit/Source/Programs/Assemble.cpp index a3328794ddd..60ca42cf0f9 100644 --- a/libraries/wasm-jit/Source/Programs/Assemble.cpp +++ b/libraries/wasm-jit/Source/Programs/Assemble.cpp @@ -7,7 +7,7 @@ int commandMain(int argc,char** argv) { if(argc < 3) { - std::cerr << "Usage: eosio-wast2wasm in.wast out.wasm [switches]" << std::endl; + std::cerr << "Usage: Assemble in.wast out.wasm [switches]" << std::endl; std::cerr << " -n|--omit-names\t\tOmits WAST function and local names from the output" << std::endl; return EXIT_FAILURE; } diff --git a/libraries/wasm-jit/Source/Programs/CMakeLists.txt b/libraries/wasm-jit/Source/Programs/CMakeLists.txt index 27a3aa427b4..260f4c1092c 100644 --- a/libraries/wasm-jit/Source/Programs/CMakeLists.txt +++ b/libraries/wasm-jit/Source/Programs/CMakeLists.txt @@ -1,7 +1,6 @@ -add_executable(eosio-wast2wasm Assemble.cpp CLI.h) -target_link_libraries(eosio-wast2wasm Logging IR WAST WASM) -set_target_properties(eosio-wast2wasm PROPERTIES FOLDER Programs) -INSTALL(TARGETS eosio-wast2wasm DESTINATION ${CMAKE_INSTALL_BINDIR}) +add_executable(Assemble Assemble.cpp CLI.h) +target_link_libraries(Assemble Logging IR WAST WASM) +set_target_properties(Assemble PROPERTIES FOLDER Programs) add_executable(Disassemble Disassemble.cpp CLI.h) target_link_libraries(Disassemble Logging IR WAST WASM) From 9e849da81b42fb292f5b3d1ad2b576ab9fbd3788 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 19 Mar 2019 15:52:04 -0500 Subject: [PATCH 0029/2426] Ensure that intermediate asio operations are on the same thread --- plugins/net_plugin/net_plugin.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 736052dcca1..521f36ca66c 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -731,7 +731,7 @@ namespace eosio { trx_state(), peer_requested(), server_ioc( my_impl->server_ioc ), - strand( *my_impl->server_ioc ), + strand( app().get_io_service() ), socket( std::make_shared( std::ref( *my_impl->server_ioc ))), node_id(), last_handshake_recv(), @@ -757,7 +757,7 @@ namespace eosio { trx_state(), peer_requested(), server_ioc( my_impl->server_ioc ), - strand( *my_impl->server_ioc ), + strand( app().get_io_service() ), socket( s ), node_id(), last_handshake_recv(), From 45bfc94bb026b05f35f165c723034bd96993dac3 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 20 Mar 2019 10:41:43 -0500 Subject: [PATCH 0030/2426] Prevent txn_test_gen_plugin from calling back into http_plugin multiple times per request. --- plugins/txn_test_gen_plugin/txn_test_gen_plugin.cpp | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/plugins/txn_test_gen_plugin/txn_test_gen_plugin.cpp b/plugins/txn_test_gen_plugin/txn_test_gen_plugin.cpp index d4f197df468..f86253fbfac 100755 --- a/plugins/txn_test_gen_plugin/txn_test_gen_plugin.cpp +++ b/plugins/txn_test_gen_plugin/txn_test_gen_plugin.cpp @@ -69,7 +69,10 @@ using io_work_t = boost::asio::executor_work_guard>(0);\ + auto result_handler = [times_called{std::move(times_called)}, cb, body](const fc::exception_ptr& e) mutable {\ + if( ++(*times_called) > 1 ) return;\ if (e) {\ try {\ e->dynamic_rethrow_exception();\ From 868ea8c9c6d55892acfbe431723866c502a48b47 Mon Sep 17 00:00:00 2001 From: Todd Fleming Date: Thu, 21 Mar 2019 13:23:07 -0400 Subject: [PATCH 0031/2426] #6980: memory leak when --trace-history not used --- plugins/state_history_plugin/state_history_plugin.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/state_history_plugin/state_history_plugin.cpp b/plugins/state_history_plugin/state_history_plugin.cpp index 49c47041e3d..7aead3d1052 100644 --- a/plugins/state_history_plugin/state_history_plugin.cpp +++ b/plugins/state_history_plugin/state_history_plugin.cpp @@ -345,7 +345,7 @@ struct state_history_plugin_impl : std::enable_shared_from_thisreceipt) { + if (p->receipt && trace_log) { if (is_onblock(p)) onblock_trace = p; else if (p->failed_dtrx_trace) From 5542e9d285a35bbac0a9ac695d6d46d35905644f Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Wed, 27 Mar 2019 13:20:53 -0400 Subject: [PATCH 0032/2426] Remove setting CMAKE_OSX_SYSROOT (#6986) Setting CMAKE_OSX_SYSROOT has shown to cause build failures on fresh macos 10.13 installs --- CMakeLists.txt | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 59c72fb4d55..584937c23dc 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -14,14 +14,8 @@ endif() list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/libraries/fc/CMakeModules") list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/CMakeModules") -if (UNIX) - if (APPLE) - execute_process(COMMAND xcrun --show-sdk-path - OUTPUT_VARIABLE CMAKE_OSX_SYSROOT - OUTPUT_STRIP_TRAILING_WHITESPACE) - list(APPEND CMAKE_PREFIX_PATH "/usr/local/opt/llvm@4") - list(APPEND CMAKE_PREFIX_PATH "/usr/local/opt/gettext") - endif() +if (UNIX AND APPLE) + list(APPEND CMAKE_PREFIX_PATH "/usr/local/opt/llvm@4" "/usr/local/opt/gettext") endif() include( GNUInstallDirs ) From 32a9a360b4eeb6bb7af06e5f045f26755c8ed4a5 Mon Sep 17 00:00:00 2001 From: Joseph J Guerra <8146030+josephjguerra@users.noreply.github.com> Date: Thu, 28 Mar 2019 13:30:36 -0400 Subject: [PATCH 0033/2426] Update LICENSE --- LICENSE | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LICENSE b/LICENSE index 1516b96cbdf..22d36d65db1 100644 --- a/LICENSE +++ b/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2019 Respective Authors all rights reserved. +Copyright (c) 2017-2019 block.one and its contributors. All rights reserved. The MIT License From 033b3b3e60cb8444abef6c6a5edc6849d3b94b53 Mon Sep 17 00:00:00 2001 From: Joseph J Guerra <8146030+josephjguerra@users.noreply.github.com> Date: Thu, 28 Mar 2019 13:33:07 -0400 Subject: [PATCH 0034/2426] Update README.md --- README.md | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/README.md b/README.md index ca5f9fbfe62..59c9c01d3fb 100644 --- a/README.md +++ b/README.md @@ -93,3 +93,17 @@ EOSIO currently supports the following operating systems: ## Getting Started Instructions detailing the process of getting the software, building it, running a simple test network that produces blocks, account creation and uploading a sample contract to the blockchain can be found in [Getting Started](https://developers.eos.io/eosio-home/docs) on the [EOSIO Developer Portal](https://developers.eos.io). + +## Contributing + +[Contributing Guide](./CONTRIBUTING.md) + +[Code of Conduct](./CONTRIBUTING.md#conduct) + +## License + +[MIT](./LICENSE) + +## Important + +See LICENSE for copyright and license terms. Block.one makes its contribution on a voluntary basis as a member of the EOSIO community and is not responsible for ensuring the overall performance of the software or any related applications. We make no representation, warranty, guarantee or undertaking in respect of the software or any related documentation, whether expressed or implied, including but not limited to the warranties or merchantability, fitness for a particular purpose and noninfringement. In no event shall we be liable for any claim, damages or other liability, whether in an action of contract, tort or otherwise, arising from, out of or in connection with the software or documentation or the use or other dealings in the software or documentation. Any test results or performance figures are indicative and will not reflect performance under all conditions. Any reference to any third party or third-party product, service or other resource is not an endorsement or recommendation by Block.one. We are not responsible, and disclaim any and all responsibility and liability, for your use of or reliance on any of these resources. Third-party resources may be updated, changed or terminated at any time, so the information here may be out of date or inaccurate. From 9931784169306ed0e092d362fadfe21e855a157a Mon Sep 17 00:00:00 2001 From: Joseph J Guerra <8146030+josephjguerra@users.noreply.github.com> Date: Thu, 28 Mar 2019 13:34:33 -0400 Subject: [PATCH 0035/2426] Create CONTRIBUTING.md --- CONTRIBUTING.md | 148 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 148 insertions(+) create mode 100644 CONTRIBUTING.md diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 00000000000..40ecbf9cea8 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,148 @@ +# Contributing to eos + +Interested in contributing? That's awesome! Here are some guidelines to get started quickly and easily: + +- [Reporting An Issue](#reporting-an-issue) + - [Bug Reports](#bug-reports) + - [Feature Requests](#feature-requests) + - [Change Requests](#change-requests) +- [Working on eos](#working-on-eos) + - [Feature Branches](#feature-branches) + - [Submitting Pull Requests](#submitting-pull-requests) + - [Testing and Quality Assurance](#testing-and-quality-assurance) +- [Conduct](#conduct) +- [Contributor License & Acknowledgments](#contributor-license--acknowledgments) +- [References](#references) + +## Reporting An Issue + +If you're about to raise an issue because you think you've found a problem with eos, or you'd like to make a request for a new feature in the codebase, or any other reason… please read this first. + +The GitHub issue tracker is the preferred channel for [bug reports](#bug-reports), [feature requests](#feature-requests), and [submitting pull requests](#submitting-pull-requests), but please respect the following restrictions: + +* Please **search for existing issues**. Help us keep duplicate issues to a minimum by checking to see if someone has already reported your problem or requested your idea. + +* Please **be civil**. Keep the discussion on topic and respect the opinions of others. See also our [Contributor Code of Conduct](#conduct). + +### Bug Reports + +A bug is a _demonstrable problem_ that is caused by the code in the repository. Good bug reports are extremely helpful - thank you! + +Guidelines for bug reports: + +1. **Use the GitHub issue search** — check if the issue has already been + reported. + +1. **Check if the issue has been fixed** — look for [closed issues in the + current milestone](https://github.com/EOSIO/eos/issues?q=is%3Aissue+is%3Aclosed) or try to reproduce it + using the latest `develop` branch. + +A good bug report shouldn't leave others needing to chase you up for more information. Be sure to include the details of your environment and relevant tests that demonstrate the failure. + +[Report a bug](https://github.com/EOSIO/eos/issues/new?title=Bug%3A) + +### Feature Requests + +Feature requests are welcome. Before you submit one be sure to have: + +1. **Use the GitHub search** and check the feature hasn't already been requested. +1. Take a moment to think about whether your idea fits with the scope and aims of the project. +1. Remember, it's up to *you* to make a strong case to convince the project's leaders of the merits of this feature. Please provide as much detail and context as possible, this means explaining the use case and why it is likely to be common. + +### Change Requests + +Change requests cover both architectural and functional changes to how eos works. If you have an idea for a new or different dependency, a refactor, or an improvement to a feature, etc - please be sure to: + +1. **Use the GitHub search** and check someone else didn't get there first +1. Take a moment to think about the best way to make a case for, and explain what you're thinking. Are you sure this shouldn't really be + a [bug report](#bug-reports) or a [feature request](#feature-requests)? Is it really one idea or is it many? What's the context? What problem are you solving? Why is what you are suggesting better than what's already there? + +## Working on eos + +Code contributions are welcome and encouraged! If you are looking for a good place to start, check out the [good first issue](https://github.com/EOSIO/eos/labels/good%20first%20issue) label in GitHub issues. + +Also, please follow these guidelines when submitting code: + +### Feature Branches + +To get it out of the way: + +- **[develop](https://github.com/EOSIO/eos/tree/develop)** is the development branch. All work on the next release happens here so you should generally branch off `develop`. Do **NOT** use this branch for a production site. +- **[master](https://github.com/EOSIO/eos/tree/master)** contains the latest release of eos. This branch may be used in production. Do **NOT** use this branch to work on eos's source. + +### Submitting Pull Requests + +Pull requests are awesome. If you're looking to raise a PR for something which doesn't have an open issue, please think carefully about [raising an issue](#reporting-an-issue) which your PR can close, especially if you're fixing a bug. This makes it more likely that there will be enough information available for your PR to be properly tested and merged. + +### Testing and Quality Assurance + +Never underestimate just how useful quality assurance is. If you're looking to get involved with the code base and don't know where to start, checking out and testing a pull request is one of the most useful things you could do. + +Essentially, [check out the latest develop branch](#working-on-eos), take it for a spin, and if you find anything odd, please follow the [bug report guidelines](#bug-reports) and let us know! + +## Conduct + +While contributing, please be respectful and constructive, so that participation in our project is a positive experience for everyone. + +Examples of behavior that contributes to creating a positive environment include: +- Using welcoming and inclusive language +- Being respectful of differing viewpoints and experiences +- Gracefully accepting constructive criticism +- Focusing on what is best for the community +- Showing empathy towards other community members + +Examples of unacceptable behavior include: +- The use of sexualized language or imagery and unwelcome sexual attention or advances +- Trolling, insulting/derogatory comments, and personal or political attacks +- Public or private harassment +- Publishing others’ private information, such as a physical or electronic address, without explicit permission +- Other conduct which could reasonably be considered inappropriate in a professional setting + +## Contributor License & Acknowledgments + +Whenever you make a contribution to this project, you license your contribution under the same terms as set out in LICENSE, and you represent and warrant that you have the right to license your contribution under those terms. Whenever you make a contribution to this project, you also certify in the terms of the Developer’s Certificate of Origin set out below: + +``` +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +1 Letterman Drive +Suite D4700 +San Francisco, CA, 94129 + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +``` + +## References + +* Overall CONTRIB adapted from https://github.com/mathjax/MathJax/blob/master/CONTRIBUTING.md +* Conduct section adapted from the Contributor Covenant, version 1.4, available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html From 05a343600d6fe6cea88e3c38335fa8e3b8234fd0 Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Fri, 29 Mar 2019 14:20:44 -0400 Subject: [PATCH 0036/2426] Remove boost::thread usage from mongo plugin boost::thread is problematic on some new compiler + old boost combos --- plugins/mongo_db_plugin/mongo_db_plugin.cpp | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/plugins/mongo_db_plugin/mongo_db_plugin.cpp b/plugins/mongo_db_plugin/mongo_db_plugin.cpp index ee22c65c7cc..58082dd74a2 100644 --- a/plugins/mongo_db_plugin/mongo_db_plugin.cpp +++ b/plugins/mongo_db_plugin/mongo_db_plugin.cpp @@ -16,11 +16,10 @@ #include #include #include -#include -#include -#include #include +#include +#include #include #include @@ -162,9 +161,9 @@ class mongo_db_plugin_impl { std::deque block_state_process_queue; std::deque irreversible_block_state_queue; std::deque irreversible_block_state_process_queue; - boost::mutex mtx; - boost::condition_variable condition; - boost::thread consume_thread; + std::mutex mtx; + std::condition_variable condition; + std::thread consume_thread; std::atomic_bool done{false}; std::atomic_bool startup{true}; fc::optional chain_id; @@ -290,7 +289,7 @@ bool mongo_db_plugin_impl::filter_include( const transaction& trx ) const template void mongo_db_plugin_impl::queue( Queue& queue, const Entry& e ) { - boost::mutex::scoped_lock lock( mtx ); + std::unique_lock lock( mtx ); auto queue_size = queue.size(); if( queue_size > max_queue_size ) { lock.unlock(); @@ -298,7 +297,7 @@ void mongo_db_plugin_impl::queue( Queue& queue, const Entry& e ) { queue_sleep_time += 10; if( queue_sleep_time > 1000 ) wlog("queue size: ${q}", ("q", queue_size)); - boost::this_thread::sleep_for( boost::chrono::milliseconds( queue_sleep_time )); + std::this_thread::sleep_for( std::chrono::milliseconds( queue_sleep_time )); lock.lock(); } else { queue_sleep_time -= 10; @@ -406,7 +405,7 @@ void mongo_db_plugin_impl::consume_blocks() { _account_controls = mongo_conn[db_name][account_controls_col]; while (true) { - boost::mutex::scoped_lock lock(mtx); + std::unique_lock lock(mtx); while ( transaction_metadata_queue.empty() && transaction_trace_queue.empty() && block_state_queue.empty() && @@ -1471,7 +1470,7 @@ void mongo_db_plugin_impl::init() { ilog("starting db plugin thread"); - consume_thread = boost::thread([this] { consume_blocks(); }); + consume_thread = std::thread([this] { consume_blocks(); }); startup = false; } From 537a6588f701f7533410342f7c53b194a5ad7385 Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Fri, 29 Mar 2019 14:28:25 -0400 Subject: [PATCH 0037/2426] disable asio's experimental string_view usage on macos Newer stdlibc++s can #error in experimental string_view --- CMakeLists.txt | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/CMakeLists.txt b/CMakeLists.txt index 584937c23dc..146fa454f75 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -115,6 +115,11 @@ FIND_PACKAGE(Boost 1.67 REQUIRED COMPONENTS locale iostreams) +# Some new stdlibc++s will #error on ; a problem for boost pre-1.69 +if( APPLE AND UNIX ) + add_definitions(-DBOOST_ASIO_DISABLE_STD_EXPERIMENTAL_STRING_VIEW) +endif() + if( WIN32 ) message( STATUS "Configuring EOSIO on WIN32") From 92f11463f0f0974dbb99dc82dbd0c3e6c3bad2f7 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 29 Mar 2019 15:59:30 -0500 Subject: [PATCH 0038/2426] Fix for close() called while async_read in-flight --- plugins/net_plugin/net_plugin.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 521f36ca66c..92c0481cb5f 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -826,7 +826,6 @@ namespace eosio { fc_dlog(logger, "canceling wait on ${p}", ("p",peer_name())); cancel_wait(); if( read_delay_timer ) read_delay_timer->cancel(); - pending_message_buffer.reset(); } void connection::txn_send_pending(const vector& ids) { @@ -1876,6 +1875,7 @@ namespace eosio { auto current_endpoint = *endpoint_itr; ++endpoint_itr; c->connecting = true; + c->pending_message_buffer.reset(); connection_wptr weak_conn = c; c->socket->async_connect( current_endpoint, boost::asio::bind_executor( c->strand, [weak_conn, endpoint_itr, this]( const boost::system::error_code& err ) { @@ -2051,7 +2051,7 @@ namespace eosio { [this,weak_conn]( boost::system::error_code ec, std::size_t bytes_transferred ) { app().post( priority::medium, [this,weak_conn, ec, bytes_transferred]() { auto conn = weak_conn.lock(); - if (!conn) { + if (!conn || !conn->connected()) { return; } From c920387944956e9868e1c21f1247fe1f9fd302b5 Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Fri, 29 Mar 2019 17:54:00 -0400 Subject: [PATCH 0039/2426] fc sync - Remove fc::shared_ptr & refactor logging code to not use it --- libraries/fc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/fc b/libraries/fc index 12956c33041..063353354d0 160000 --- a/libraries/fc +++ b/libraries/fc @@ -1 +1 @@ -Subproject commit 12956c330413e69bd998cd0657c8a82ef3e8a106 +Subproject commit 063353354d04b631541083ba65fbe2667ef4f097 From 2180c648e3f4075b48a89889be4e9a268bff4043 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 29 Mar 2019 16:54:49 -0500 Subject: [PATCH 0040/2426] Use shared_future instead of future since accessed across threads --- .../include/eosio/chain/transaction_metadata.hpp | 11 +++++++---- libraries/chain/transaction_metadata.cpp | 9 ++++++--- plugins/producer_plugin/producer_plugin.cpp | 10 ++++++---- 3 files changed, 19 insertions(+), 11 deletions(-) diff --git a/libraries/chain/include/eosio/chain/transaction_metadata.hpp b/libraries/chain/include/eosio/chain/transaction_metadata.hpp index 6136580fa44..923e5d42f14 100644 --- a/libraries/chain/include/eosio/chain/transaction_metadata.hpp +++ b/libraries/chain/include/eosio/chain/transaction_metadata.hpp @@ -15,6 +15,7 @@ namespace eosio { namespace chain { class transaction_metadata; using transaction_metadata_ptr = std::shared_ptr; +using signing_keys_future_type = std::shared_future>>; /** * This data structure should store context-free cached data about a transaction such as * packed/unpacked/compressed and recovered keys @@ -26,8 +27,7 @@ class transaction_metadata { packed_transaction_ptr packed_trx; fc::microseconds sig_cpu_usage; optional>> signing_keys; - std::future>> - signing_keys_future; + signing_keys_future_type signing_keys_future; bool accepted = false; bool implicit = false; bool scheduled = false; @@ -52,8 +52,11 @@ class transaction_metadata { const flat_set& recover_keys( const chain_id_type& chain_id ); - static void create_signing_keys_future( const transaction_metadata_ptr& mtrx, boost::asio::thread_pool& thread_pool, - const chain_id_type& chain_id, fc::microseconds time_limit ); + // must be called from main application thread + // signing_keys_future should only be accessed by main application thread + static signing_keys_future_type + create_signing_keys_future( const transaction_metadata_ptr& mtrx, boost::asio::thread_pool& thread_pool, + const chain_id_type& chain_id, fc::microseconds time_limit ); }; diff --git a/libraries/chain/transaction_metadata.cpp b/libraries/chain/transaction_metadata.cpp index 482b3c488f7..cbeda6cbec5 100644 --- a/libraries/chain/transaction_metadata.cpp +++ b/libraries/chain/transaction_metadata.cpp @@ -23,10 +23,11 @@ const flat_set& transaction_metadata::recover_keys( const chain return signing_keys->second; } -void transaction_metadata::create_signing_keys_future( const transaction_metadata_ptr& mtrx, - boost::asio::thread_pool& thread_pool, const chain_id_type& chain_id, fc::microseconds time_limit ) { +signing_keys_future_type transaction_metadata::create_signing_keys_future( const transaction_metadata_ptr& mtrx, + boost::asio::thread_pool& thread_pool, const chain_id_type& chain_id, fc::microseconds time_limit ) +{ if( mtrx->signing_keys_future.valid() || mtrx->signing_keys.valid() ) // already created - return; + return mtrx->signing_keys_future; std::weak_ptr mtrx_wp = mtrx; mtrx->signing_keys_future = async_thread_pool( thread_pool, [time_limit, chain_id, mtrx_wp]() { @@ -41,6 +42,8 @@ void transaction_metadata::create_signing_keys_future( const transaction_metadat } return std::make_tuple( chain_id, cpu_usage, std::move( recovered_pub_keys )); } ); + + return mtrx->signing_keys_future; } diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index f3eb2164cea..4ae608bee5e 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -351,10 +351,12 @@ class producer_plugin_impl : public std::enable_shared_from_this next) { chain::controller& chain = chain_plug->chain(); const auto& cfg = chain.get_global_properties().configuration; - transaction_metadata::create_signing_keys_future( trx, *_thread_pool, chain.get_chain_id(), fc::microseconds( cfg.max_transaction_cpu_usage ) ); - boost::asio::post( *_thread_pool, [self = this, trx, persist_until_expired, next]() { - if( trx->signing_keys_future.valid() ) - trx->signing_keys_future.wait(); + signing_keys_future_type future = + transaction_metadata::create_signing_keys_future( trx, *_thread_pool, chain.get_chain_id(), + fc::microseconds( cfg.max_transaction_cpu_usage ) ); + boost::asio::post( *_thread_pool, [self = this, future, trx, persist_until_expired, next]() { + if( future.valid() ) + future.wait(); app().post(priority::low, [self, trx, persist_until_expired, next]() { self->process_incoming_transaction_async( trx, persist_until_expired, next ); }); From 5bc1ccee5aa37ea5347330470fdf8d2888d8830c Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 29 Mar 2019 17:36:31 -0500 Subject: [PATCH 0041/2426] Can't call connected(), it checks flag that is only set after first read --- plugins/net_plugin/net_plugin.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 92c0481cb5f..af6ad9f73ff 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -2051,7 +2051,7 @@ namespace eosio { [this,weak_conn]( boost::system::error_code ec, std::size_t bytes_transferred ) { app().post( priority::medium, [this,weak_conn, ec, bytes_transferred]() { auto conn = weak_conn.lock(); - if (!conn || !conn->connected()) { + if (!conn || !conn->socket || !conn->socket->is_open()) { return; } From cf1a2faf9b4942d6c715020120505c699da9dcab Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Sat, 30 Mar 2019 08:53:58 -0500 Subject: [PATCH 0042/2426] Simplify key recovery future logic --- libraries/chain/controller.cpp | 7 ++-- .../eosio/chain/transaction_metadata.hpp | 15 +++++---- libraries/chain/transaction_metadata.cpp | 33 +++++++++---------- libraries/testing/tester.cpp | 15 +++++++-- plugins/mongo_db_plugin/mongo_db_plugin.cpp | 4 +-- plugins/producer_plugin/producer_plugin.cpp | 5 ++- unittests/misc_tests.cpp | 24 +++++++------- 7 files changed, 56 insertions(+), 47 deletions(-) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 09a3f7ecb32..75d27db31b4 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -999,9 +999,10 @@ struct controller_impl { auto start = fc::time_point::now(); const bool check_auth = !self.skip_auth_check() && !trx->implicit; // call recover keys so that trx->sig_cpu_usage is set correctly - const flat_set& recovered_keys = check_auth ? trx->recover_keys( chain_id ) : flat_set(); + const fc::microseconds sig_cpu_usage = check_auth ? std::get<0>( trx->recover_keys( chain_id ) ) : fc::microseconds(); + const flat_set& recovered_keys = check_auth ? std::get<1>( trx->recover_keys( chain_id ) ) : flat_set(); if( !explicit_billed_cpu_time ) { - fc::microseconds already_consumed_time( EOS_PERCENT(trx->sig_cpu_usage.count(), conf.sig_cpu_bill_pct) ); + fc::microseconds already_consumed_time( EOS_PERCENT(sig_cpu_usage.count(), conf.sig_cpu_bill_pct) ); if( start.time_since_epoch() < already_consumed_time ) { start = fc::time_point(); @@ -1199,7 +1200,7 @@ struct controller_impl { auto& pt = receipt.trx.get(); auto mtrx = std::make_shared( std::make_shared( pt ) ); if( !self.skip_auth_check() ) { - transaction_metadata::create_signing_keys_future( mtrx, thread_pool, chain_id, microseconds::maximum() ); + transaction_metadata::start_recover_keys( mtrx, thread_pool, chain_id, microseconds::maximum() ); } packed_transactions.emplace_back( std::move( mtrx ) ); } diff --git a/libraries/chain/include/eosio/chain/transaction_metadata.hpp b/libraries/chain/include/eosio/chain/transaction_metadata.hpp index 923e5d42f14..9d0c01e0a8c 100644 --- a/libraries/chain/include/eosio/chain/transaction_metadata.hpp +++ b/libraries/chain/include/eosio/chain/transaction_metadata.hpp @@ -16,6 +16,8 @@ namespace eosio { namespace chain { class transaction_metadata; using transaction_metadata_ptr = std::shared_ptr; using signing_keys_future_type = std::shared_future>>; +using recovery_keys_type = std::pair&>; + /** * This data structure should store context-free cached data about a transaction such as * packed/unpacked/compressed and recovered keys @@ -25,8 +27,6 @@ class transaction_metadata { transaction_id_type id; transaction_id_type signed_id; packed_transaction_ptr packed_trx; - fc::microseconds sig_cpu_usage; - optional>> signing_keys; signing_keys_future_type signing_keys_future; bool accepted = false; bool implicit = false; @@ -50,13 +50,14 @@ class transaction_metadata { signed_id = digest_type::hash(*packed_trx); } - const flat_set& recover_keys( const chain_id_type& chain_id ); - // must be called from main application thread - // signing_keys_future should only be accessed by main application thread static signing_keys_future_type - create_signing_keys_future( const transaction_metadata_ptr& mtrx, boost::asio::thread_pool& thread_pool, - const chain_id_type& chain_id, fc::microseconds time_limit ); + start_recover_keys( const transaction_metadata_ptr& mtrx, boost::asio::thread_pool& thread_pool, + const chain_id_type& chain_id, fc::microseconds time_limit ); + + // start_recover_keys must be called first + recovery_keys_type recover_keys( const chain_id_type& chain_id ); + }; diff --git a/libraries/chain/transaction_metadata.cpp b/libraries/chain/transaction_metadata.cpp index cbeda6cbec5..ded655c8d79 100644 --- a/libraries/chain/transaction_metadata.cpp +++ b/libraries/chain/transaction_metadata.cpp @@ -4,35 +4,32 @@ namespace eosio { namespace chain { - -const flat_set& transaction_metadata::recover_keys( const chain_id_type& chain_id ) { +recovery_keys_type transaction_metadata::recover_keys( const chain_id_type& chain_id ) { // Unlikely for more than one chain_id to be used in one nodeos instance - if( !signing_keys || signing_keys->first != chain_id ) { - if( signing_keys_future.valid() ) { - std::tuple> sig_keys = signing_keys_future.get(); - if( std::get<0>( sig_keys ) == chain_id ) { - sig_cpu_usage = std::get<1>( sig_keys ); - signing_keys.emplace( std::get<0>( sig_keys ), std::move( std::get<2>( sig_keys ))); - return signing_keys->second; - } + if( signing_keys_future.valid() ) { + const std::tuple>& sig_keys = signing_keys_future.get(); + if( std::get<0>( sig_keys ) == chain_id ) { + return std::make_pair( std::get<1>( sig_keys ), std::cref( std::get<2>( sig_keys ) ) ); } - flat_set recovered_pub_keys; - sig_cpu_usage = packed_trx->get_signed_transaction().get_signature_keys( chain_id, fc::time_point::maximum(), recovered_pub_keys ); - signing_keys.emplace( chain_id, std::move( recovered_pub_keys )); + EOS_ASSERT( false, chain_id_type_exception, "chain id ${cid} does not match start_recover_keys ${sid}", + ("cid", chain_id)( "sid", std::get<0>( sig_keys ) ) ); } - return signing_keys->second; + + EOS_ASSERT( false, chain_id_type_exception, "start_recover_keys for ${cid} is required", ("cid", chain_id) ); } -signing_keys_future_type transaction_metadata::create_signing_keys_future( const transaction_metadata_ptr& mtrx, - boost::asio::thread_pool& thread_pool, const chain_id_type& chain_id, fc::microseconds time_limit ) +signing_keys_future_type transaction_metadata::start_recover_keys( const transaction_metadata_ptr& mtrx, + boost::asio::thread_pool& thread_pool, + const chain_id_type& chain_id, + fc::microseconds time_limit ) { - if( mtrx->signing_keys_future.valid() || mtrx->signing_keys.valid() ) // already created + if( mtrx->signing_keys_future.valid() && std::get<0>( mtrx->signing_keys_future.get() ) == chain_id ) // already created return mtrx->signing_keys_future; std::weak_ptr mtrx_wp = mtrx; mtrx->signing_keys_future = async_thread_pool( thread_pool, [time_limit, chain_id, mtrx_wp]() { fc::time_point deadline = time_limit == fc::microseconds::maximum() ? - fc::time_point::maximum() : fc::time_point::now() + time_limit; + fc::time_point::maximum() : fc::time_point::now() + time_limit; auto mtrx = mtrx_wp.lock(); fc::microseconds cpu_usage; flat_set recovered_pub_keys; diff --git a/libraries/testing/tester.cpp b/libraries/testing/tester.cpp index a6a77ff2998..63a0788931f 100644 --- a/libraries/testing/tester.cpp +++ b/libraries/testing/tester.cpp @@ -346,7 +346,13 @@ namespace eosio { namespace testing { { try { if( !control->pending_block_state() ) _start_block(control->head_block_time() + fc::microseconds(config::block_interval_us)); - auto r = control->push_transaction( std::make_shared(std::make_shared(trx)), deadline, billed_cpu_time_us ); + + auto mtrx = std::make_shared( std::make_shared(trx) ); + auto time_limit = deadline == fc::time_point::maximum() ? + fc::microseconds::maximum() : + fc::microseconds( deadline - fc::time_point::now() ); + transaction_metadata::start_recover_keys( mtrx, control->get_thread_pool(), control->get_chain_id(), time_limit ); + auto r = control->push_transaction( mtrx, deadline, billed_cpu_time_us ); if( r->except_ptr ) std::rethrow_exception( r->except_ptr ); if( r->except ) throw *r->except; return r; @@ -365,7 +371,12 @@ namespace eosio { namespace testing { c = packed_transaction::zlib; } - auto r = control->push_transaction( std::make_shared(trx,c), deadline, billed_cpu_time_us ); + auto time_limit = deadline == fc::time_point::maximum() ? + fc::microseconds::maximum() : + fc::microseconds( deadline - fc::time_point::now() ); + auto mtrx = std::make_shared(trx, c); + transaction_metadata::start_recover_keys( mtrx, control->get_thread_pool(), control->get_chain_id(), time_limit ); + auto r = control->push_transaction( mtrx, deadline, billed_cpu_time_us ); if( r->except_ptr ) std::rethrow_exception( r->except_ptr ); if( r->except) throw *r->except; return r; diff --git a/plugins/mongo_db_plugin/mongo_db_plugin.cpp b/plugins/mongo_db_plugin/mongo_db_plugin.cpp index ee22c65c7cc..73d31fbba03 100644 --- a/plugins/mongo_db_plugin/mongo_db_plugin.cpp +++ b/plugins/mongo_db_plugin/mongo_db_plugin.cpp @@ -779,8 +779,8 @@ void mongo_db_plugin_impl::_process_accepted_transaction( const chain::transacti } string signing_keys_json; - if( t->signing_keys.valid() ) { - signing_keys_json = fc::json::to_string( t->signing_keys->second ); + if( t->signing_keys_future.valid() ) { + signing_keys_json = fc::json::to_string( std::get<2>( t->signing_keys_future.get() ) ); } else { flat_set keys; trx.get_signature_keys( *chain_id, fc::time_point::maximum(), keys, false ); diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index 4ae608bee5e..8f188f7ae39 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -351,9 +351,8 @@ class producer_plugin_impl : public std::enable_shared_from_this next) { chain::controller& chain = chain_plug->chain(); const auto& cfg = chain.get_global_properties().configuration; - signing_keys_future_type future = - transaction_metadata::create_signing_keys_future( trx, *_thread_pool, chain.get_chain_id(), - fc::microseconds( cfg.max_transaction_cpu_usage ) ); + signing_keys_future_type future = transaction_metadata::start_recover_keys( trx, *_thread_pool, + chain.get_chain_id(), fc::microseconds( cfg.max_transaction_cpu_usage ) ); boost::asio::post( *_thread_pool, [self = this, future, trx, persist_until_expired, next]() { if( future.valid() ) future.wait(); diff --git a/unittests/misc_tests.cpp b/unittests/misc_tests.cpp index 607c78859fd..ca145d769cc 100644 --- a/unittests/misc_tests.cpp +++ b/unittests/misc_tests.cpp @@ -833,28 +833,28 @@ BOOST_AUTO_TEST_CASE(transaction_metadata_test) { try { BOOST_CHECK( !mtrx->signing_keys_future.valid() ); BOOST_CHECK( !mtrx2->signing_keys_future.valid() ); - transaction_metadata::create_signing_keys_future( mtrx, thread_pool, test.control->get_chain_id(), fc::microseconds::maximum() ); - transaction_metadata::create_signing_keys_future( mtrx2, thread_pool, test.control->get_chain_id(), fc::microseconds::maximum() ); + transaction_metadata::start_recover_keys( mtrx, thread_pool, test.control->get_chain_id(), fc::microseconds::maximum() ); + transaction_metadata::start_recover_keys( mtrx2, thread_pool, test.control->get_chain_id(), fc::microseconds::maximum() ); BOOST_CHECK( mtrx->signing_keys_future.valid() ); BOOST_CHECK( mtrx2->signing_keys_future.valid() ); // no-op - transaction_metadata::create_signing_keys_future( mtrx, thread_pool, test.control->get_chain_id(), fc::microseconds::maximum() ); - transaction_metadata::create_signing_keys_future( mtrx2, thread_pool, test.control->get_chain_id(), fc::microseconds::maximum() ); + transaction_metadata::start_recover_keys( mtrx, thread_pool, test.control->get_chain_id(), fc::microseconds::maximum() ); + transaction_metadata::start_recover_keys( mtrx2, thread_pool, test.control->get_chain_id(), fc::microseconds::maximum() ); auto keys = mtrx->recover_keys( test.control->get_chain_id() ); - BOOST_CHECK_EQUAL(1u, keys.size()); - BOOST_CHECK_EQUAL(public_key, *keys.begin()); + BOOST_CHECK_EQUAL(1u, keys.second.size()); + BOOST_CHECK_EQUAL(public_key, *keys.second.begin()); // again - keys = mtrx->recover_keys( test.control->get_chain_id() ); - BOOST_CHECK_EQUAL(1u, keys.size()); - BOOST_CHECK_EQUAL(public_key, *keys.begin()); + auto keys2 = mtrx->recover_keys( test.control->get_chain_id() ); + BOOST_CHECK_EQUAL(1u, keys2.second.size()); + BOOST_CHECK_EQUAL(public_key, *keys2.second.begin()); - auto keys2 = mtrx2->recover_keys( test.control->get_chain_id() ); - BOOST_CHECK_EQUAL(1u, keys.size()); - BOOST_CHECK_EQUAL(public_key, *keys.begin()); + auto keys3 = mtrx2->recover_keys( test.control->get_chain_id() ); + BOOST_CHECK_EQUAL(1u, keys3.second.size()); + BOOST_CHECK_EQUAL(public_key, *keys3.second.begin()); } FC_LOG_AND_RETHROW() } From 1b593e6e6f344f28f0d5d29f236d0581eedc8cdb Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Fri, 29 Mar 2019 17:58:49 -0400 Subject: [PATCH 0043/2426] chainbase sync - Remove boost thread & chain locking code --- libraries/chain/controller.cpp | 9 +-------- libraries/chainbase | 2 +- 2 files changed, 2 insertions(+), 9 deletions(-) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 09a3f7ecb32..9804b0bcaec 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -421,14 +421,7 @@ struct controller_impl { void clear_all_undo() { // Rewind the database to the last irreversible block - db.with_write_lock([&] { - db.undo_all(); - /* - FC_ASSERT(db.revision() == self.head_block_num(), - "Chainbase revision does not match head block num", - ("rev", db.revision())("head_block", self.head_block_num())); - */ - }); + db.undo_all(); } void add_contract_tables_to_snapshot( const snapshot_writer_ptr& snapshot ) const { diff --git a/libraries/chainbase b/libraries/chainbase index 8ca96ad6b18..02c1ea29133 160000 --- a/libraries/chainbase +++ b/libraries/chainbase @@ -1 +1 @@ -Subproject commit 8ca96ad6b18709d65a7d1f67f8893978f25babcf +Subproject commit 02c1ea2913358959a26036779b512432f036946e From 2570a320ff3560372f6c4b9866dad97e9ee025b7 Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Sun, 31 Mar 2019 15:48:57 -0400 Subject: [PATCH 0044/2426] Remove final remnants of boost thread usage from cmake Because this could be the last boost thread reference, we need to tell cmake to still pass thread compiler flags --- CMakeLists.txt | 8 ++++---- libraries/appbase | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 146fa454f75..224ff31df09 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -103,16 +103,12 @@ IF( WIN32 ) set(BOOST_ALL_DYN_LINK OFF) # force dynamic linking for all libraries ENDIF(WIN32) FIND_PACKAGE(Boost 1.67 REQUIRED COMPONENTS - thread date_time filesystem system program_options - serialization chrono unit_test_framework - context - locale iostreams) # Some new stdlibc++s will #error on ; a problem for boost pre-1.69 @@ -120,6 +116,10 @@ if( APPLE AND UNIX ) add_definitions(-DBOOST_ASIO_DISABLE_STD_EXPERIMENTAL_STRING_VIEW) endif() +set(THREADS_PREFER_PTHREAD_FLAG 1) +find_package(Threads) +link_libraries(Threads::Threads) + if( WIN32 ) message( STATUS "Configuring EOSIO on WIN32") diff --git a/libraries/appbase b/libraries/appbase index 9da0818154c..737df2c70b0 160000 --- a/libraries/appbase +++ b/libraries/appbase @@ -1 +1 @@ -Subproject commit 9da0818154c3ebade946f29f5f0328117578058c +Subproject commit 737df2c70b0b5467ce928d97457985c852f7850e From 70ca1920abdb2eedc86734599542cf0f691fa192 Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Sun, 31 Mar 2019 15:50:20 -0400 Subject: [PATCH 0045/2426] When building boost on macos, only build the libraries needed by eosio --- scripts/eosio_build_darwin.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/scripts/eosio_build_darwin.sh b/scripts/eosio_build_darwin.sh index a7ec32ff7de..11bbbc37d99 100755 --- a/scripts/eosio_build_darwin.sh +++ b/scripts/eosio_build_darwin.sh @@ -174,7 +174,8 @@ if [ "${BOOSTVERSION}" != "${BOOST_VERSION_MAJOR}0${BOOST_VERSION_MINOR}0${BOOST && tar -xjf boost_$BOOST_VERSION.tar.bz2 \ && cd $BOOST_ROOT \ && ./bootstrap.sh --prefix=$BOOST_ROOT \ - && ./b2 -q -j$(sysctl -in machdep.cpu.core_count) install \ + && ./b2 -q -j$(sysctl -in machdep.cpu.core_count) --with-iostreams --with-date_time --with-filesystem \ + --with-system --with-program_options --with-chrono --with-test install \ && cd .. \ && rm -f boost_$BOOST_VERSION.tar.bz2 \ && rm -rf $BOOST_LINK_LOCATION \ From b698d19b2c889ecc4545b4f717902533c74c006d Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 1 Apr 2019 09:25:02 -0400 Subject: [PATCH 0046/2426] Calculate recovery keys instead of asserting if start not called or different chain_id. Restores old behavior. --- .../include/eosio/chain/transaction_metadata.hpp | 3 ++- libraries/chain/transaction_metadata.cpp | 13 ++++++++++--- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/libraries/chain/include/eosio/chain/transaction_metadata.hpp b/libraries/chain/include/eosio/chain/transaction_metadata.hpp index 9d0c01e0a8c..0847159e6de 100644 --- a/libraries/chain/include/eosio/chain/transaction_metadata.hpp +++ b/libraries/chain/include/eosio/chain/transaction_metadata.hpp @@ -15,7 +15,8 @@ namespace eosio { namespace chain { class transaction_metadata; using transaction_metadata_ptr = std::shared_ptr; -using signing_keys_future_type = std::shared_future>>; +using signing_keys_future_value_type = std::tuple>; +using signing_keys_future_type = std::shared_future; using recovery_keys_type = std::pair&>; /** diff --git a/libraries/chain/transaction_metadata.cpp b/libraries/chain/transaction_metadata.cpp index ded655c8d79..9c33121a5a6 100644 --- a/libraries/chain/transaction_metadata.cpp +++ b/libraries/chain/transaction_metadata.cpp @@ -11,11 +11,18 @@ recovery_keys_type transaction_metadata::recover_keys( const chain_id_type& chai if( std::get<0>( sig_keys ) == chain_id ) { return std::make_pair( std::get<1>( sig_keys ), std::cref( std::get<2>( sig_keys ) ) ); } - EOS_ASSERT( false, chain_id_type_exception, "chain id ${cid} does not match start_recover_keys ${sid}", - ("cid", chain_id)( "sid", std::get<0>( sig_keys ) ) ); } - EOS_ASSERT( false, chain_id_type_exception, "start_recover_keys for ${cid} is required", ("cid", chain_id) ); + // shared_keys_future not created or different chain_id + std::promise p; + flat_set recovered_pub_keys; + const signed_transaction& trn = packed_trx->get_signed_transaction(); + fc::microseconds cpu_usage = trn.get_signature_keys( chain_id, fc::time_point::maximum(), recovered_pub_keys ); + p.set_value( std::make_tuple( chain_id, cpu_usage, std::move( recovered_pub_keys ) ) ); + signing_keys_future = p.get_future().share(); + + const std::tuple>& sig_keys = signing_keys_future.get(); + return std::make_pair( std::get<1>( sig_keys ), std::cref( std::get<2>( sig_keys ) ) ); } signing_keys_future_type transaction_metadata::start_recover_keys( const transaction_metadata_ptr& mtrx, From dd0b171ce8611957f13056d0662037f3e1404644 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 1 Apr 2019 14:57:54 -0400 Subject: [PATCH 0047/2426] Add test for recover_keys without start_recover_keys --- unittests/misc_tests.cpp | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/unittests/misc_tests.cpp b/unittests/misc_tests.cpp index ca145d769cc..5ed82c742c0 100644 --- a/unittests/misc_tests.cpp +++ b/unittests/misc_tests.cpp @@ -856,6 +856,17 @@ BOOST_AUTO_TEST_CASE(transaction_metadata_test) { try { BOOST_CHECK_EQUAL(1u, keys3.second.size()); BOOST_CHECK_EQUAL(public_key, *keys3.second.begin()); + // recover keys without first calling start_recover_keys + transaction_metadata_ptr mtrx4 = std::make_shared( std::make_shared( trx, packed_transaction::none) ); + transaction_metadata_ptr mtrx5 = std::make_shared( std::make_shared( trx, packed_transaction::zlib) ); + + auto keys4 = mtrx4->recover_keys( test.control->get_chain_id() ); + BOOST_CHECK_EQUAL(1u, keys4.second.size()); + BOOST_CHECK_EQUAL(public_key, *keys4.second.begin()); + + auto keys5 = mtrx5->recover_keys( test.control->get_chain_id() ); + BOOST_CHECK_EQUAL(1u, keys5.second.size()); + BOOST_CHECK_EQUAL(public_key, *keys5.second.begin()); } FC_LOG_AND_RETHROW() } From 516485f3e1d96b652523620424d3d7daeeeb97fa Mon Sep 17 00:00:00 2001 From: arhag Date: Tue, 2 Apr 2019 14:25:48 -0400 Subject: [PATCH 0048/2426] print action traces in cleos even if nonmandatory fields are missing --- programs/cleos/main.cpp | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/programs/cleos/main.cpp b/programs/cleos/main.cpp index feef29cbfd9..b2cd9aa6988 100644 --- a/programs/cleos/main.cpp +++ b/programs/cleos/main.cpp @@ -443,9 +443,11 @@ bytes json_or_file_to_bin( const account_name& account, const action_name& actio void print_action_tree( const fc::variant& action ) { print_action( action ); - const auto& inline_traces = action["inline_traces"].get_array(); - for( const auto& t : inline_traces ) { - print_action_tree( t ); + if( action.get_object().contains( "inline_traces" ) ) { + const auto& inline_traces = action["inline_traces"].get_array(); + for( const auto& t : inline_traces ) { + print_action_tree( t ); + } } } @@ -453,12 +455,13 @@ void print_result( const fc::variant& result ) { try { if (result.is_object() && result.get_object().contains("processed")) { const auto& processed = result["processed"]; const auto& transaction_id = processed["id"].as_string(); - string status = processed["receipt"].is_object() ? processed["receipt"]["status"].as_string() : "failed"; + string status = "failed"; int64_t net = -1; int64_t cpu = -1; if( processed.get_object().contains( "receipt" )) { const auto& receipt = processed["receipt"]; if( receipt.is_object()) { + status = receipt["status"].as_string(); net = receipt["net_usage_words"].as_int64() * 8; cpu = receipt["cpu_usage_us"].as_int64(); } From 77a172022832178fdee288b88f56ca765632e8b2 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 2 Apr 2019 19:29:29 -0400 Subject: [PATCH 0049/2426] Consolidated Security Fixes for 1.7.1 - net_plugin security fixes --- plugins/net_plugin/net_plugin.cpp | 45 +++++-------------------------- 1 file changed, 7 insertions(+), 38 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index af6ad9f73ff..d2f9fcabfb6 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -579,9 +579,6 @@ namespace eosio { const string peer_name(); - void txn_send_pending(const vector& ids); - void txn_send(const vector& txn_lis); - void blk_send_branch(); void blk_send(const block_id_type& blkid); void stop_send(); @@ -828,26 +825,6 @@ namespace eosio { if( read_delay_timer ) read_delay_timer->cancel(); } - void connection::txn_send_pending(const vector& ids) { - const std::set known_ids(ids.cbegin(), ids.cend()); - my_impl->expire_local_txns(); - for(auto tx = my_impl->local_txns.begin(); tx != my_impl->local_txns.end(); ++tx ){ - const bool found = known_ids.find( tx->id ) != known_ids.cend(); - if( !found ) { - queue_write( tx->serialized_txn, true, priority::low, []( boost::system::error_code ec, std::size_t ) {} ); - } - } - } - - void connection::txn_send(const vector& ids) { - for(const auto& t : ids) { - auto tx = my_impl->local_txns.get().find(t); - if( tx != my_impl->local_txns.end() ) { - queue_write( tx->serialized_txn, true, priority::low, []( boost::system::error_code ec, std::size_t ) {} ); - } - } - } - void connection::blk_send_branch() { controller& cc = my_impl->chain_plug->chain(); uint32_t head_num = cc.fork_db_head_block_num(); @@ -2399,17 +2376,6 @@ namespace eosio { break; } case catch_up : { - if( msg.known_trx.pending > 0) { - // plan to get all except what we already know about. - req.req_trx.mode = catch_up; - send_req = true; - size_t known_sum = local_txns.size(); - if( known_sum ) { - for( const auto& t : local_txns.get() ) { - req.req_trx.ids.push_back( t.id ); - } - } - } break; } case normal: { @@ -2467,14 +2433,17 @@ namespace eosio { switch (msg.req_trx.mode) { case catch_up : - c->txn_send_pending(msg.req_trx.ids); - break; - case normal : - c->txn_send(msg.req_trx.ids); break; case none : if(msg.req_blocks.mode == none) c->stop_send(); + // no break + case normal : + if( !msg.req_trx.ids.empty() ) { + elog( "Invalid request_message, req_trx.ids.size ${s}", ("s", msg.req_trx.ids.size()) ); + close(c); + return; + } break; default:; } From 90338cfe98c4a1d49264cd35d6a3a3d8228b200f Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 2 Apr 2019 19:39:58 -0400 Subject: [PATCH 0050/2426] Bump to 1.7.1 --- CMakeLists.txt | 2 +- Docker/README.md | 4 ++-- README.md | 16 ++++++++-------- 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 224ff31df09..45275a7de8d 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -31,7 +31,7 @@ set( CXX_STANDARD_REQUIRED ON) set(VERSION_MAJOR 1) set(VERSION_MINOR 7) -set(VERSION_PATCH 0) +set(VERSION_PATCH 1) if(VERSION_SUFFIX) set(VERSION_FULL "${VERSION_MAJOR}.${VERSION_MINOR}.${VERSION_PATCH}-${VERSION_SUFFIX}") diff --git a/Docker/README.md b/Docker/README.md index 8226fe66cd8..a15e72c91e3 100644 --- a/Docker/README.md +++ b/Docker/README.md @@ -20,10 +20,10 @@ cd eos/Docker docker build . -t eosio/eos ``` -The above will build off the most recent commit to the master branch by default. If you would like to target a specific branch/tag, you may use a build argument. For example, if you wished to generate a docker image based off of the v1.7.0 tag, you could do the following: +The above will build off the most recent commit to the master branch by default. If you would like to target a specific branch/tag, you may use a build argument. For example, if you wished to generate a docker image based off of the v1.7.1 tag, you could do the following: ```bash -docker build -t eosio/eos:v1.7.0 --build-arg branch=v1.7.0 . +docker build -t eosio/eos:v1.7.1 --build-arg branch=v1.7.1 . ``` By default, the symbol in eosio.system is set to SYS. You can override this using the symbol argument while building the docker image. diff --git a/README.md b/README.md index 59c9c01d3fb..b3ea305737c 100644 --- a/README.md +++ b/README.md @@ -39,13 +39,13 @@ $ brew remove eosio ``` #### Ubuntu 18.04 Debian Package Install ```sh -$ wget https://github.com/eosio/eos/releases/download/v1.7.0/eosio_1.7.0-1-ubuntu-18.04_amd64.deb -$ sudo apt install ./eosio_1.7.0-1-ubuntu-18.04_amd64.deb +$ wget https://github.com/eosio/eos/releases/download/v1.7.1/eosio_1.7.1-1-ubuntu-18.04_amd64.deb +$ sudo apt install ./eosio_1.7.1-1-ubuntu-18.04_amd64.deb ``` #### Ubuntu 16.04 Debian Package Install ```sh -$ wget https://github.com/eosio/eos/releases/download/v1.7.0/eosio_1.7.0-1-ubuntu-16.04_amd64.deb -$ sudo apt install ./eosio_1.7.0-1-ubuntu-16.04_amd64.deb +$ wget https://github.com/eosio/eos/releases/download/v1.7.1/eosio_1.7.1-1-ubuntu-16.04_amd64.deb +$ sudo apt install ./eosio_1.7.1-1-ubuntu-16.04_amd64.deb ``` #### Debian Package Uninstall ```sh @@ -53,8 +53,8 @@ $ sudo apt remove eosio ``` #### Centos RPM Package Install ```sh -$ wget https://github.com/eosio/eos/releases/download/v1.7.0/eosio-1.7.0-1.el7.x86_64.rpm -$ sudo yum install ./eosio-1.7.0-1.el7.x86_64.rpm +$ wget https://github.com/eosio/eos/releases/download/v1.7.0/eosio-1.7.1-1.el7.x86_64.rpm +$ sudo yum install ./eosio-1.7.1-1.el7.x86_64.rpm ``` #### Centos RPM Package Uninstall ```sh @@ -62,8 +62,8 @@ $ sudo yum remove eosio.cdt ``` #### Fedora RPM Package Install ```sh -$ wget https://github.com/eosio/eos/releases/download/v1.7.0/eosio-1.7.0-1.fc27.x86_64.rpm -$ sudo yum install ./eosio-1.7.0-1.fc27.x86_64.rpm +$ wget https://github.com/eosio/eos/releases/download/v1.7.0/eosio-1.7.1-1.fc27.x86_64.rpm +$ sudo yum install ./eosio-1.7.1-1.fc27.x86_64.rpm ``` #### Fedora RPM Package Uninstall ```sh From ab0ff58d6cccfe208cfa6007f10501104112d568 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 2 Apr 2019 19:47:03 -0400 Subject: [PATCH 0051/2426] Update recommended --- README.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index b3ea305737c..e55c45a7026 100644 --- a/README.md +++ b/README.md @@ -53,7 +53,7 @@ $ sudo apt remove eosio ``` #### Centos RPM Package Install ```sh -$ wget https://github.com/eosio/eos/releases/download/v1.7.0/eosio-1.7.1-1.el7.x86_64.rpm +$ wget https://github.com/eosio/eos/releases/download/v1.7.1/eosio-1.7.1-1.el7.x86_64.rpm $ sudo yum install ./eosio-1.7.1-1.el7.x86_64.rpm ``` #### Centos RPM Package Uninstall @@ -62,7 +62,7 @@ $ sudo yum remove eosio.cdt ``` #### Fedora RPM Package Install ```sh -$ wget https://github.com/eosio/eos/releases/download/v1.7.0/eosio-1.7.1-1.fc27.x86_64.rpm +$ wget https://github.com/eosio/eos/releases/download/v1.7.1/eosio-1.7.1-1.fc27.x86_64.rpm $ sudo yum install ./eosio-1.7.1-1.fc27.x86_64.rpm ``` #### Fedora RPM Package Uninstall @@ -76,9 +76,9 @@ EOSIO currently supports the following operating systems: 2. Centos 7 3. Fedora 25 and higher (Fedora 27 recommended) 4. Mint 18 -5. Ubuntu 16.04 (Ubuntu 16.10 recommended) +5. Ubuntu 16.04 6. Ubuntu 18.04 -7. MacOS Darwin 10.12 and higher (MacOS 10.13.x recommended) +7. MacOS Darwin 10.12 and higher (MacOS 10.14.x recommended) ## Resources 1. [Website](https://eos.io) From 254cde20265da710f172a0178e73e1baacb611f2 Mon Sep 17 00:00:00 2001 From: Matias Romeo Date: Wed, 3 Apr 2019 12:39:12 -0300 Subject: [PATCH 0052/2426] cleos: handle no Content-length header in http response --- programs/cleos/httpc.cpp | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/programs/cleos/httpc.cpp b/programs/cleos/httpc.cpp index 5503c8fe8ec..9a9cdb8f866 100644 --- a/programs/cleos/httpc.cpp +++ b/programs/cleos/httpc.cpp @@ -89,17 +89,22 @@ namespace eosio { namespace client { namespace http { if(std::regex_search(header, match, clregex)) response_content_length = std::stoi(match[1]); } - EOS_ASSERT(response_content_length >= 0, invalid_http_response, "Invalid content-length response"); std::stringstream re; - // Write whatever content we already have to output. - response_content_length -= response.size(); - if (response.size() > 0) - re << &response; + if( response_content_length >= 0 ) { + // Write whatever content we already have to output. + response_content_length -= response.size(); + if (response.size() > 0) + re << &response; - boost::asio::read(socket, response, boost::asio::transfer_exactly(response_content_length)); - re << &response; + boost::asio::read(socket, response, boost::asio::transfer_exactly(response_content_length)); + } else { + boost::system::error_code ec; + boost::asio::read(socket, response, boost::asio::transfer_all(), ec); + EOS_ASSERT(!ec || ec == boost::asio::ssl::error::stream_truncated, http_exception, "Unable to read http response: ${err}", ("err",ec.message())); + } + re << &response; return re.str(); } From dd59cf5820599711dd84db93416c9ff4a66e1e2d Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 3 Apr 2019 18:09:48 -0400 Subject: [PATCH 0053/2426] Add back in Docker Hub deprecation that was accidentally removed --- Docker/README.md | 40 +--------------------------------------- 1 file changed, 1 insertion(+), 39 deletions(-) diff --git a/Docker/README.md b/Docker/README.md index a15e72c91e3..0fdb4c48626 100644 --- a/Docker/README.md +++ b/Docker/README.md @@ -133,45 +133,7 @@ docker volume rm keosd-data-volume ### Docker Hub -Docker Hub image available from [docker hub](https://hub.docker.com/r/eosio/eos/). -Create a new `docker-compose.yaml` file with the content below - -```bash -version: "3" - -services: - nodeosd: - image: eosio/eos:latest - command: /opt/eosio/bin/nodeosd.sh --data-dir /opt/eosio/bin/data-dir -e --http-alias=nodeosd:8888 --http-alias=127.0.0.1:8888 --http-alias=localhost:8888 - hostname: nodeosd - ports: - - 8888:8888 - - 9876:9876 - expose: - - "8888" - volumes: - - nodeos-data-volume:/opt/eosio/bin/data-dir - - keosd: - image: eosio/eos:latest - command: /opt/eosio/bin/keosd --wallet-dir /opt/eosio/bin/data-dir --http-server-address=127.0.0.1:8900 --http-alias=localhost:8900 --http-alias=keosd:8900 - hostname: keosd - links: - - nodeosd - volumes: - - keosd-data-volume:/opt/eosio/bin/data-dir - -volumes: - nodeos-data-volume: - keosd-data-volume: - -``` - -*NOTE:* the default version is the latest, you can change it to what you want - -run `docker pull eosio/eos:latest` - -run `docker-compose up` +Docker Hub images are now deprecated. New build images were discontinued on January 1st, 2019. The existing old images will be removed on June 1st, 2019. ### EOSIO Testnet From 3554f46703e729f6e8ae99fa6f6e980543c80fca Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Thu, 4 Apr 2019 15:01:05 -0400 Subject: [PATCH 0054/2426] fix rpm command for uninstalling eosio --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index e55c45a7026..57d409f5e23 100644 --- a/README.md +++ b/README.md @@ -58,7 +58,7 @@ $ sudo yum install ./eosio-1.7.1-1.el7.x86_64.rpm ``` #### Centos RPM Package Uninstall ```sh -$ sudo yum remove eosio.cdt +$ sudo yum remove eosio ``` #### Fedora RPM Package Install ```sh From c87c8f998598edd726ed5df9c8ff14fca2b3a1bf Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 8 Apr 2019 11:20:12 -0500 Subject: [PATCH 0055/2426] read_delay_timer runs on net_plugin thread_pool so app().post for execution of start_read_message --- plugins/net_plugin/net_plugin.cpp | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index d2f9fcabfb6..fb86a8bcd01 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -2012,12 +2012,13 @@ namespace eosio { } if( !conn->read_delay_timer ) return; conn->read_delay_timer->expires_from_now( def_read_delay_for_full_write_queue ); - conn->read_delay_timer->async_wait( - app().get_priority_queue().wrap( priority::low, [this, weak_conn]( boost::system::error_code ) { - auto conn = weak_conn.lock(); - if( !conn ) return; - start_read_message( conn ); - } ) ); + conn->read_delay_timer->async_wait( [this, weak_conn]( boost::system::error_code ec ) { + app().post( priority::low, [this, weak_conn]() { + auto conn = weak_conn.lock(); + if( !conn ) return; + start_read_message( conn ); + } ); + } ); return; } From 65cb7bdffe7bc378c2fde0853f4b97698a1bfcd0 Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Mon, 8 Apr 2019 14:59:28 -0400 Subject: [PATCH 0056/2426] Also fix fedora RPM uninstall --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 57d409f5e23..ac5e2c069dd 100644 --- a/README.md +++ b/README.md @@ -67,7 +67,7 @@ $ sudo yum install ./eosio-1.7.1-1.fc27.x86_64.rpm ``` #### Fedora RPM Package Uninstall ```sh -$ sudo yum remove eosio.cdt +$ sudo yum remove eosio ``` ## Supported Operating Systems From 5e8c459607f88cd51d226faf85fc12b1e04d7062 Mon Sep 17 00:00:00 2001 From: Matias Romeo Date: Tue, 9 Apr 2019 21:57:25 -0300 Subject: [PATCH 0057/2426] cleos: simplify http response body reading --- programs/cleos/httpc.cpp | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/programs/cleos/httpc.cpp b/programs/cleos/httpc.cpp index 9a9cdb8f866..7d9326b9ed7 100644 --- a/programs/cleos/httpc.cpp +++ b/programs/cleos/httpc.cpp @@ -90,20 +90,19 @@ namespace eosio { namespace client { namespace http { response_content_length = std::stoi(match[1]); } - std::stringstream re; - if( response_content_length >= 0 ) { - // Write whatever content we already have to output. + // Attempt to read the response body using the length indicated by the + // Content-length header. If the header was not present just read all available bytes. + if( response_content_length != -1 ) { response_content_length -= response.size(); - if (response.size() > 0) - re << &response; - - boost::asio::read(socket, response, boost::asio::transfer_exactly(response_content_length)); + if( response_content_length > 0 ) + boost::asio::read(socket, response, boost::asio::transfer_exactly(response_content_length)); } else { boost::system::error_code ec; boost::asio::read(socket, response, boost::asio::transfer_all(), ec); EOS_ASSERT(!ec || ec == boost::asio::ssl::error::stream_truncated, http_exception, "Unable to read http response: ${err}", ("err",ec.message())); } + std::stringstream re; re << &response; return re.str(); } From 698251caf943b56ddd8eac7c899ea30a641db2dc Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Wed, 10 Apr 2019 12:41:57 -0400 Subject: [PATCH 0058/2426] Revert "Allow cleos to query an API node behind Cloudflare" --- programs/cleos/httpc.cpp | 20 ++++++++------------ 1 file changed, 8 insertions(+), 12 deletions(-) diff --git a/programs/cleos/httpc.cpp b/programs/cleos/httpc.cpp index 7d9326b9ed7..5503c8fe8ec 100644 --- a/programs/cleos/httpc.cpp +++ b/programs/cleos/httpc.cpp @@ -89,21 +89,17 @@ namespace eosio { namespace client { namespace http { if(std::regex_search(header, match, clregex)) response_content_length = std::stoi(match[1]); } - - // Attempt to read the response body using the length indicated by the - // Content-length header. If the header was not present just read all available bytes. - if( response_content_length != -1 ) { - response_content_length -= response.size(); - if( response_content_length > 0 ) - boost::asio::read(socket, response, boost::asio::transfer_exactly(response_content_length)); - } else { - boost::system::error_code ec; - boost::asio::read(socket, response, boost::asio::transfer_all(), ec); - EOS_ASSERT(!ec || ec == boost::asio::ssl::error::stream_truncated, http_exception, "Unable to read http response: ${err}", ("err",ec.message())); - } + EOS_ASSERT(response_content_length >= 0, invalid_http_response, "Invalid content-length response"); std::stringstream re; + // Write whatever content we already have to output. + response_content_length -= response.size(); + if (response.size() > 0) + re << &response; + + boost::asio::read(socket, response, boost::asio::transfer_exactly(response_content_length)); re << &response; + return re.str(); } From c332e6b720753406471f585189368a0be811778c Mon Sep 17 00:00:00 2001 From: Bucky Kittinger Date: Tue, 16 Apr 2019 11:30:59 -0400 Subject: [PATCH 0059/2426] Revert EXCLUDE_FROM_ALL (release/1.7.x) (#7130) * Revert EXCLUDE_FROM_ALL * Disable building Programs and Emscripten stuff --- libraries/CMakeLists.txt | 4 ++-- libraries/wasm-jit/CMakeLists.txt | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/libraries/CMakeLists.txt b/libraries/CMakeLists.txt index 39d0398305d..54bb2f80e09 100644 --- a/libraries/CMakeLists.txt +++ b/libraries/CMakeLists.txt @@ -2,7 +2,7 @@ add_subdirectory( fc ) add_subdirectory( builtins ) add_subdirectory( softfloat ) add_subdirectory( chainbase ) -add_subdirectory( wasm-jit EXCLUDE_FROM_ALL ) +add_subdirectory( wasm-jit ) add_subdirectory( appbase ) add_subdirectory( chain ) add_subdirectory( testing ) @@ -26,4 +26,4 @@ get_property(_CTEST_CUSTOM_TESTS_IGNORE GLOBAL PROPERTY CTEST_CUSTOM_TESTS_IGNOR set_property(GLOBAL PROPERTY CTEST_CUSTOM_TESTS_IGNORE "change_authkey import_ed decrypt_ec decrypt_rsa ssh logs generate_rsa import_ec echo\ yubico_otp wrap_data wrap info import_rsa import_authkey generate_hmac generate_ec\ - attest pbkdf2 parsing ${_CTEST_CUSTOM_TESTS_IGNORE}") \ No newline at end of file + attest pbkdf2 parsing ${_CTEST_CUSTOM_TESTS_IGNORE}") diff --git a/libraries/wasm-jit/CMakeLists.txt b/libraries/wasm-jit/CMakeLists.txt index c06e45b5252..fc691f83a95 100644 --- a/libraries/wasm-jit/CMakeLists.txt +++ b/libraries/wasm-jit/CMakeLists.txt @@ -66,11 +66,11 @@ endif() add_subdirectory(Include/Inline) -add_subdirectory(Source/Emscripten) +#add_subdirectory(Source/Emscripten) add_subdirectory(Source/IR) add_subdirectory(Source/Logging) add_subdirectory(Source/Platform) -add_subdirectory(Source/Programs) +#add_subdirectory(Source/Programs) add_subdirectory(Source/Runtime) add_subdirectory(Source/WASM) add_subdirectory(Source/WAST) From 8f9bfc9c5072076473a8295d820b268794a0b170 Mon Sep 17 00:00:00 2001 From: Zach Butler Date: Tue, 16 Apr 2019 19:21:02 -0400 Subject: [PATCH 0060/2426] Added lib64 to CMake search space because of CentOS --- CMakeModules/eosio-config.cmake.in | 1 + 1 file changed, 1 insertion(+) diff --git a/CMakeModules/eosio-config.cmake.in b/CMakeModules/eosio-config.cmake.in index 97de49c4568..e30a3fa4ab2 100644 --- a/CMakeModules/eosio-config.cmake.in +++ b/CMakeModules/eosio-config.cmake.in @@ -2,6 +2,7 @@ if(EOSIO_ROOT STREQUAL "" OR NOT EOSIO_ROOT) set(EOSIO_ROOT "@EOS_ROOT_DIR@") endif() list(APPEND CMAKE_MODULE_PATH ${EOSIO_ROOT}/lib/cmake/eosio) +list(APPEND CMAKE_MODULE_PATH ${EOSIO_ROOT}/lib64/cmake/eosio) include(EosioTester) function(EXTRACT_MAJOR_MINOR_FROM_VERSION version success major minor) From 7ecd886c420e55d83abedf021741894fb2c6af36 Mon Sep 17 00:00:00 2001 From: Zach <34947245+kj4ezj@users.noreply.github.com> Date: Wed, 17 Apr 2019 19:51:41 -0400 Subject: [PATCH 0061/2426] .buildkite Folder Updates Merged in from develop (#7148) * Removed Buildkite pipelines which have been centralized * Buildkite: Merged in changes from eosio pipeline on develop * Case matters * BASH doesn't like exclamation points in quotes * Forgot these steps run in shell, not BASH * Why use quotes at all without variables when apostrophies exist * Migrated long-running tests from develop * Added .pipelinebranch to point to legacy-os for centralized pipelines * Disabled AWS-2 NP and LR tests * Git Submodule Regression Check requires a script 0_o --- .buildkite/coverage.yml | 29 -- .buildkite/debug.yml | 230 ---------- .buildkite/docker.yml | 101 ---- .buildkite/long_running_tests.yml | 411 +++++++++++------ .buildkite/pipeline.yml | 740 ++++++++++++++---------------- .buildkite/sanitizers.yml | 131 ------ .pipelinebranch | 1 + scripts/long-running-test.sh | 36 ++ scripts/parallel-test.sh | 38 ++ scripts/serial-test.sh | 36 ++ scripts/submodule_check.sh | 41 ++ 11 files changed, 757 insertions(+), 1037 deletions(-) delete mode 100644 .buildkite/coverage.yml delete mode 100644 .buildkite/debug.yml delete mode 100644 .buildkite/docker.yml delete mode 100644 .buildkite/sanitizers.yml create mode 100644 .pipelinebranch create mode 100755 scripts/long-running-test.sh create mode 100755 scripts/parallel-test.sh create mode 100755 scripts/serial-test.sh create mode 100755 scripts/submodule_check.sh diff --git a/.buildkite/coverage.yml b/.buildkite/coverage.yml deleted file mode 100644 index c5a50bc64f4..00000000000 --- a/.buildkite/coverage.yml +++ /dev/null @@ -1,29 +0,0 @@ -steps: - - command: | - echo "--- :hammer: Building" && \ - /usr/bin/cmake -GNinja -DCMAKE_BUILD_TYPE=Debug -DCMAKE_CXX_COMPILER=clang++-4.0 -DCMAKE_C_COMPILER=clang-4.0 -DBOOST_ROOT="${BOOST_ROOT}" -DWASM_ROOT="${WASM_ROOT}" -DOPENSSL_ROOT_DIR="${OPENSSL_ROOT_DIR}" -DBUILD_MONGO_DB_PLUGIN=true -DENABLE_COVERAGE_TESTING=true -DBUILD_DOXYGEN=false && \ - /usr/bin/ninja - echo "--- :spiral_note_pad: Generating Code Coverage Report" && \ - /usr/bin/ninja EOSIO_ut_coverage && \ - echo "--- :arrow_up: Publishing Code Coverage Report" && \ - buildkite-agent artifact upload "EOSIO_ut_coverage/**/*" s3://eos-coverage/$BUILDKITE_JOB_ID && \ - cp /config/.coveralls.yml . && \ - /usr/local/bin/coveralls-lcov EOSIO_ut_coverage_filtered.info && \ - echo "+++ View Report" && \ - printf "\033]1339;url=https://eos-coverage.s3-us-west-2.amazonaws.com/$BUILDKITE_JOB_ID/EOSIO_ut_coverage/index.html;content=View Full Coverage Report\a\n" - label: ":spiral_note_pad: Generate Report" - agents: - queue: "automation-large-builder-fleet" - plugins: - docker#v1.4.0: - image: "eosio/ci:ubuntu18" - workdir: /data/job - mounts: - - /etc/buildkite-agent/config:/config - environment: - - BOOST_ROOT=/root/opt/boost - - OPENSSL_ROOT_DIR=/usr/include/openssl - - WASM_ROOT=/root/opt/wasm - - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/root/opt/wasm/bin - - CI=true - timeout: 60 diff --git a/.buildkite/debug.yml b/.buildkite/debug.yml deleted file mode 100644 index 28576d56195..00000000000 --- a/.buildkite/debug.yml +++ /dev/null @@ -1,230 +0,0 @@ -steps: - - command: | - echo "--- Creating symbolic link to job directory :file_folder:" && \ - sleep 5 && ln -s "$(pwd)" /data/job && cd /data/job && \ - echo "+++ Building :hammer:" && \ - echo 1 | ./eosio_build.sh -o Debug && \ - echo "--- Compressing build directory :compression:" && \ - tar -pczf build.tar.gz build/ - label: ":darwin: Build" - agents: - - "role=macos-builder" - artifact_paths: "build.tar.gz" - timeout: 60 - - - command: | - echo "+++ :hammer: Building" && \ - echo 1 | ./eosio_build.sh -o Debug && \ - echo "--- :compression: Compressing build directory" && \ - tar -pczf build.tar.gz build/ - label: ":ubuntu: Build" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: "build.tar.gz" - plugins: - docker#v2.0.0: - image: "eosio/ci:ubuntu" - workdir: /data/job - timeout: 60 - - - command: | - echo "+++ :hammer: Building" && \ - echo 1 | ./eosio_build.sh -o Debug && \ - echo "--- :compression: Compressing build directory" && \ - tar -pczf build.tar.gz build/ - label: ":ubuntu: 18.04 Build" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: "build.tar.gz" - plugins: - docker#v2.0.0: - image: "eosio/ci:ubuntu18" - workdir: /data/job - timeout: 60 - - - command: | - echo "+++ :hammer: Building" && \ - echo 1 | ./eosio_build.sh -o Debug && \ - echo "--- :compression: Compressing build directory" && \ - tar -pczf build.tar.gz build/ - label: ":fedora: Build" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: "build.tar.gz" - plugins: - docker#v2.0.0: - image: "eosio/ci:fedora" - workdir: /data/job - timeout: 60 - - - command: | - echo "+++ :hammer: Building" && \ - echo 1 | ./eosio_build.sh -o Debug && \ - echo "--- :compression: Compressing build directory" && \ - tar -pczf build.tar.gz build/ - label: ":centos: Build" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: "build.tar.gz" - plugins: - docker#v2.0.0: - image: "eosio/ci:centos" - workdir: /data/job - timeout: 60 - - - command: | - echo "+++ :hammer: Building" && \ - echo 1 | ./eosio_build.sh -o Debug && \ - echo "--- :compression: Compressing build directory" && \ - tar -pczf build.tar.gz build/ - label: ":aws: Build" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: "build.tar.gz" - plugins: - docker#v2.0.0: - image: "eosio/ci:amazonlinux" - workdir: /data/job - timeout: 60 - - - wait - - - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":darwin: Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - ln -s "$(pwd)" /data/job && cd /data/job/build && ctest -LE long_running_tests --output-on-failure - retry: - automatic: - limit: 1 - label: ":darwin: Tests" - agents: - - "role=macos-tester" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" - timeout: 60 - - - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - cd /data/job/build && ctest -LE long_running_tests --output-on-failure - retry: - automatic: - limit: 1 - label: ":ubuntu: Tests" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" - plugins: - docker#v2.0.0: - image: "eosio/ci:ubuntu" - workdir: /data/job - timeout: 60 - - - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: 18.04 Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - cd /data/job/build && ctest -LE long_running_tests --output-on-failure - retry: - automatic: - limit: 1 - label: ":ubuntu: 18.04 Tests" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" - plugins: - docker#v2.0.0: - image: "eosio/ci:ubuntu18" - workdir: /data/job - timeout: 60 - - - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":fedora: Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - cd /data/job/build && ctest -LE long_running_tests --output-on-failure - retry: - automatic: - limit: 1 - label: ":fedora: Tests" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" - plugins: - docker#v2.0.0: - image: "eosio/ci:fedora" - workdir: /data/job - timeout: 60 - - - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":centos: Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - cd /data/job/build && ctest -LE long_running_tests --output-on-failure - retry: - automatic: - limit: 1 - label: ":centos: Tests" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" - plugins: - docker#v2.0.0: - image: "eosio/ci:centos" - workdir: /data/job - timeout: 60 - - - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":aws: Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - cd /data/job/build && ctest -LE long_running_tests --output-on-failure - retry: - automatic: - limit: 1 - label: ":aws: Tests" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" - plugins: - docker#v2.0.0: - image: "eosio/ci:amazonlinux" - workdir: /data/job - timeout: 60 diff --git a/.buildkite/docker.yml b/.buildkite/docker.yml deleted file mode 100644 index 9be30a77cef..00000000000 --- a/.buildkite/docker.yml +++ /dev/null @@ -1,101 +0,0 @@ -steps: - - command: | - echo "AUTHENTICATING GOOGLE SERVICE ACCOUNT" && \ - gcloud --quiet auth activate-service-account b1-automation-svc@b1-automation-dev.iam.gserviceaccount.com --key-file=/etc/gcp-service-account.json && \ - docker-credential-gcr configure-docker && \ - echo "BUILDING BUILD IMAGE" && \ - cd Docker/builder && \ - docker build -t eosio/builder:latest -t eosio/builder:$BUILDKITE_COMMIT -t eosio/builder:$BUILDKITE_BRANCH . --build-arg branch=$BUILDKITE_COMMIT && \ - [[ "$BUILDKITE_TAG" != "" ]] && docker tag eosio/builder:latest eosio/builder:$BUILDKITE_TAG || : && \ - docker tag eosio/builder:$BUILDKITE_COMMIT gcr.io/b1-automation-dev/eosio/builder:$BUILDKITE_COMMIT && \ - docker tag eosio/builder:$BUILDKITE_BRANCH gcr.io/b1-automation-dev/eosio/builder:$BUILDKITE_BRANCH && \ - [[ "$BUILDKITE_TAG" != "" ]] && docker tag eosio/builder:$BUILDKITE_TAG gcr.io/b1-automation-dev/eosio/builder:$BUILDKITE_TAG || : && \ - docker tag eosio/builder:latest gcr.io/b1-automation-dev/eosio/builder:latest && \ - echo "PUSHING DOCKER IMAGES" && \ - docker push gcr.io/b1-automation-dev/eosio/builder:$BUILDKITE_COMMIT && \ - docker push gcr.io/b1-automation-dev/eosio/builder:$BUILDKITE_BRANCH && \ - [[ "$BUILDKITE_TAG" != "" ]] && docker push gcr.io/b1-automation-dev/eosio/builder:$BUILDKITE_TAG || : && \ - docker push gcr.io/b1-automation-dev/eosio/builder:latest && \ - echo "TRASHING OLD IMAGES" && \ - docker rmi eosio/builder:$BUILDKITE_COMMIT && \ - docker rmi eosio/builder:$BUILDKITE_BRANCH && \ - [[ "$BUILDKITE_TAG" != "" ]] && docker rmi eosio/builder:$BUILDKITE_TAG || : && \ - docker rmi eosio/builder:latest && \ - docker rmi gcr.io/b1-automation-dev/eosio/builder:$BUILDKITE_COMMIT && \ - docker rmi gcr.io/b1-automation-dev/eosio/builder:$BUILDKITE_BRANCH && \ - [[ "$BUILDKITE_TAG" != "" ]] && docker rmi gcr.io/b1-automation-dev/eosio/builder:$BUILDKITE_TAG || : && \ - docker rmi gcr.io/b1-automation-dev/eosio/builder:latest - label: "Docker build builder" - agents: - queue: "automation-docker-builder-fleet" - timeout: 300 - - - wait - - - command: | - echo "AUTHENTICATING GOOGLE SERVICE ACCOUNT" && \ - gcloud --quiet auth activate-service-account b1-automation-svc@b1-automation-dev.iam.gserviceaccount.com --key-file=/etc/gcp-service-account.json && \ - docker-credential-gcr configure-docker && \ - echo "BUILDING EOS IMAGE" && \ - docker pull gcr.io/b1-automation-dev/eosio/builder:$BUILDKITE_COMMIT && \ - cd Docker && \ - docker build -t eosio/eos:latest -t eosio/eos:$BUILDKITE_COMMIT -t eosio/eos:$BUILDKITE_BRANCH . --build-arg branch=$BUILDKITE_BRANCH && \ - [[ "$BUILDKITE_TAG" != "" ]] && docker tag eosio/eos:latest eosio/eos:$BUILDKITE_TAG || : && \ - docker tag eosio/eos:$BUILDKITE_COMMIT gcr.io/b1-automation-dev/eosio/eos:$BUILDKITE_COMMIT && \ - docker tag eosio/eos:$BUILDKITE_BRANCH gcr.io/b1-automation-dev/eosio/eos:$BUILDKITE_BRANCH && \ - [[ "$BUILDKITE_TAG" != "" ]] && docker tag eosio/eos:$BUILDKITE_TAG gcr.io/b1-automation-dev/eosio/eos:$BUILDKITE_TAG || : && \ - docker tag eosio/eos:latest gcr.io/b1-automation-dev/eosio/eos:latest && \ - echo "PUSHING DOCKER IMAGES" && \ - docker push gcr.io/b1-automation-dev/eosio/eos:$BUILDKITE_COMMIT && \ - docker push gcr.io/b1-automation-dev/eosio/eos:$BUILDKITE_BRANCH && \ - [[ "$BUILDKITE_TAG" != "" ]] && docker push gcr.io/b1-automation-dev/eosio/eos:$BUILDKITE_TAG || : && \ - docker push gcr.io/b1-automation-dev/eosio/eos:latest && \ - echo "TRASHING OLD IMAGES" && \ - docker rmi eosio/eos:$BUILDKITE_COMMIT && \ - docker rmi eosio/eos:$BUILDKITE_BRANCH && \ - [[ "$BUILDKITE_TAG" != "" ]] && docker rmi eosio/eos:$BUILDKITE_TAG || : && \ - docker rmi eosio/eos:latest && \ - docker rmi gcr.io/b1-automation-dev/eosio/eos:$BUILDKITE_COMMIT && \ - docker rmi gcr.io/b1-automation-dev/eosio/eos:$BUILDKITE_BRANCH && \ - [[ "$BUILDKITE_TAG" != "" ]] && docker rmi gcr.io/b1-automation-dev/eosio/eos:$BUILDKITE_TAG || : && \ - docker rmi gcr.io/b1-automation-dev/eosio/eos:latest && \ - docker rmi gcr.io/b1-automation-dev/eosio/builder:$BUILDKITE_COMMIT - label: "Docker build eos" - agents: - queue: "automation-docker-builder-fleet" - timeout: 300 - - - command: | - echo "AUTHENTICATING GOOGLE SERVICE ACCOUNT" && \ - gcloud --quiet auth activate-service-account b1-automation-svc@b1-automation-dev.iam.gserviceaccount.com --key-file=/etc/gcp-service-account.json && \ - docker-credential-gcr configure-docker && \ - echo "BUILDING EOS DEV IMAGE" && \ - docker pull gcr.io/b1-automation-dev/eosio/builder:$BUILDKITE_COMMIT && \ - cd Docker/dev && \ - docker build -t eosio/eos-dev:latest -t eosio/eos-dev:$BUILDKITE_COMMIT -t eosio/eos-dev:$BUILDKITE_BRANCH . --build-arg branch=$BUILDKITE_BRANCH && \ - [[ "$BUILDKITE_TAG" != "" ]] && docker tag eosio/eos-dev:latest eosio/eos-dev:$BUILDKITE_TAG || : && \ - docker tag eosio/eos-dev:$BUILDKITE_COMMIT gcr.io/b1-automation-dev/eosio/eos-dev:$BUILDKITE_COMMIT && \ - docker tag eosio/eos-dev:$BUILDKITE_BRANCH gcr.io/b1-automation-dev/eosio/eos-dev:$BUILDKITE_BRANCH && \ - [[ "$BUILDKITE_TAG" != "" ]] && docker tag eosio/eos-dev:$BUILDKITE_TAG gcr.io/b1-automation-dev/eosio/eos-dev:$BUILDKITE_TAG || : && \ - docker tag eosio/eos-dev:latest gcr.io/b1-automation-dev/eosio/eos-dev:latest && \ - echo "PUSHING DOCKER IMAGES" && \ - docker push gcr.io/b1-automation-dev/eosio/eos-dev:$BUILDKITE_COMMIT && \ - docker push gcr.io/b1-automation-dev/eosio/eos-dev:$BUILDKITE_BRANCH && \ - [[ "$BUILDKITE_TAG" != "" ]] && docker push gcr.io/b1-automation-dev/eosio/eos-dev:$BUILDKITE_TAG || : && \ - docker push gcr.io/b1-automation-dev/eosio/eos-dev:latest && \ - echo "TRASHING OLD IMAGES" && \ - docker rmi eosio/eos-dev:$BUILDKITE_COMMIT && \ - docker rmi eosio/eos-dev:$BUILDKITE_BRANCH && \ - [[ "$BUILDKITE_TAG" != "" ]] && docker rmi eosio/eos-dev:$BUILDKITE_TAG || : && \ - docker rmi eosio/eos-dev:latest && \ - docker rmi gcr.io/b1-automation-dev/eosio/eos-dev:$BUILDKITE_COMMIT && \ - docker rmi gcr.io/b1-automation-dev/eosio/eos-dev:$BUILDKITE_BRANCH && \ - [[ "$BUILDKITE_TAG" != "" ]] && docker rmi gcr.io/b1-automation-dev/eosio/eos-dev:$BUILDKITE_TAG || : && \ - docker rmi gcr.io/b1-automation-dev/eosio/eos-dev:latest && \ - docker rmi gcr.io/b1-automation-dev/eosio/builder:$BUILDKITE_COMMIT - label: "Docker build eos-dev" - agents: - queue: "automation-docker-builder-fleet" - timeout: 300 - - - wait diff --git a/.buildkite/long_running_tests.yml b/.buildkite/long_running_tests.yml index e22016c4de4..c90ab8a12e9 100644 --- a/.buildkite/long_running_tests.yml +++ b/.buildkite/long_running_tests.yml @@ -1,212 +1,323 @@ steps: - - command: | - echo "--- Creating symbolic link to job directory :file_folder:" && \ - sleep 5 && ln -s "$(pwd)" /data/job && cd /data/job && \ - echo "+++ Building :hammer:" && \ - echo 1 | ./eosio_build.sh && \ - echo "--- Compressing build directory :compression:" && \ - tar -pczf build.tar.gz build/ - label: ":darwin: Build" - agents: - - "role=macos-builder" + - command: | # Amazon Linux 1 Build + echo "+++ :hammer: Building" + ./scripts/eosio_build.sh -y + echo "--- :compression: Compressing build directory" + tar -pczf build.tar.gz build + if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi + label: ":aws: Amazon Linux 1 Build" + agents: + queue: "automation-large-builder-fleet" artifact_paths: "build.tar.gz" + plugins: + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux1_2-2" + propagate-environment: true + workdir: /data/job timeout: 60 - - command: | - echo "+++ :hammer: Building" && \ - echo 1 | ./eosio_build.sh && \ - echo "--- :compression: Compressing build directory" && \ - tar -pczf build.tar.gz build/ - label: ":ubuntu: Build" + - command: | # Amazon Linux 2 Build + echo "+++ :hammer: Building" + ./scripts/eosio_build.sh -y + echo "--- :compression: Compressing build directory" + tar -pczf build.tar.gz build + if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi + label: ":aws: Amazon Linux 2 Build" agents: queue: "automation-large-builder-fleet" artifact_paths: "build.tar.gz" plugins: - docker#v1.4.0: - image: "eosio/ci:ubuntu" + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-2" + propagate-environment: true workdir: /data/job timeout: 60 - - - command: | - echo "+++ :hammer: Building" && \ - echo 1 | ./eosio_build.sh && \ - echo "--- :compression: Compressing build directory" && \ - tar -pczf build.tar.gz build/ - label: ":ubuntu: 18.04 Build" + + - command: | # CentOS 7 Build + echo "+++ :hammer: Building" + ./scripts/eosio_build.sh -y + echo "--- :compression: Compressing build directory" + tar -pczf build.tar.gz build + if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi + label: ":centos: CentOS 7 Build" agents: queue: "automation-large-builder-fleet" artifact_paths: "build.tar.gz" plugins: - docker#v1.4.0: - image: "eosio/ci:ubuntu18" + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-2" + propagate-environment: true workdir: /data/job timeout: 60 - - command: | - echo "+++ :hammer: Building" && \ - echo 1 | ./eosio_build.sh && \ - echo "--- :compression: Compressing build directory" && \ - tar -pczf build.tar.gz build/ - label: ":fedora: Build" + - command: | # Fedora 27 Build + echo "+++ :hammer: Building" + ./scripts/eosio_build.sh -y + echo "--- :compression: Compressing build directory" + tar -pczf build.tar.gz build + if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi + label: ":centos: Fedora 27 Build" agents: queue: "automation-large-builder-fleet" artifact_paths: "build.tar.gz" plugins: - docker#v1.4.0: - image: "eosio/ci:fedora" + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:fedora27_2-2" + propagate-environment: true workdir: /data/job timeout: 60 - - - command: | - echo "+++ :hammer: Building" && \ - echo 1 | ./eosio_build.sh && \ - echo "--- :compression: Compressing build directory" && \ - tar -pczf build.tar.gz build/ - label: ":centos: Build" + + - command: | # Ubuntu 16.04 Build + echo "+++ :hammer: Building" + ./scripts/eosio_build.sh -y + echo "--- :compression: Compressing build directory" + tar -pczf build.tar.gz build + if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi + label: ":ubuntu: Ubuntu 16.04 Build" agents: queue: "automation-large-builder-fleet" artifact_paths: "build.tar.gz" plugins: - docker#v1.4.0: - image: "eosio/ci:centos" + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-2" + propagate-environment: true workdir: /data/job timeout: 60 - - command: | - echo "+++ :hammer: Building" && \ - echo 1 | ./eosio_build.sh && \ - echo "--- :compression: Compressing build directory" && \ - tar -pczf build.tar.gz build/ - label: ":aws: Build" + - command: | # Ubuntu 18.04 Build + echo "+++ :hammer: Building" + ./scripts/eosio_build.sh -y + echo "--- :compression: Compressing build directory" + tar -pczf build.tar.gz build + if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi + label: ":ubuntu: Ubuntu 18.04 Build" agents: queue: "automation-large-builder-fleet" artifact_paths: "build.tar.gz" plugins: - docker#v1.4.0: - image: "eosio/ci:amazonlinux" + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-2" + propagate-environment: true workdir: /data/job timeout: 60 + - command: | # macOS High Sierra Build + echo "--- Creating symbolic link to job directory :file_folder:" + sleep 5 + ln -s "$(pwd)" /data/job + cd /data/job + echo "+++ Building :hammer:" + ./scripts/eosio_build.sh -y + echo "--- Compressing build directory :compression:" + tar -pczf build.tar.gz build + if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi + label: ":darwin: High Sierra Build" + agents: + - "role=builder-v2-1" + - "os=high-sierra" + artifact_paths: "build.tar.gz" + timeout: 60 + + - command: | # macOS Mojave Build + echo "--- Creating symbolic link to job directory :file_folder:" + sleep 5 + ln -s "$(pwd)" /data/job + cd /data/job + echo "+++ Building :hammer:" + ./scripts/eosio_build.sh -y + echo "--- Compressing build directory :compression:" + tar -pczf build.tar.gz build + if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi + label: ":darwin: Mojave Build" + agents: + - "role=builder-v2-1" + - "os=mojave" + artifact_paths: "build.tar.gz" + timeout: 60 + - wait - - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":darwin: Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - ln -s "$(pwd)" /data/job && cd /data/job/build && ctest -L long_running_tests --output-on-failure - label: ":darwin: Tests" - agents: - - "role=macos-tester" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" - timeout: 100 - - - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - cd /data/job/build && ctest -L long_running_tests --output-on-failure - label: ":ubuntu: Tests" + - command: | # Amazon Linux 1 Tests + echo "--- :arrow_down: Downloading Build Directory" + buildkite-agent artifact download "build.tar.gz" . --step ":aws: Amazon Linux 1 Build" + echo "+++ :microscope: Running LR Tests" + ./scripts/long-running-test.sh + label: ":aws: Amazon Linux 1 LR Tests" agents: queue: "automation-large-builder-fleet" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" plugins: - docker#v1.4.0: - image: "eosio/ci:ubuntu" + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux1_2-2" + propagate-environment: true workdir: /data/job - timeout: 100 - - - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: 18.04 Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - cd /data/job/build && ctest -L long_running_tests --output-on-failure - label: ":ubuntu: 18.04 Tests" + timeout: 90 + + - command: | # Amazon Linux 2 Tests + echo "--- :arrow_down: Downloading Build Directory" + buildkite-agent artifact download "build.tar.gz" . --step ":aws: Amazon Linux 2 Build" + echo "+++ :microscope: Running LR Tests" + ./scripts/long-running-test.sh + label: ":aws: Amazon Linux 2 LR Tests" agents: queue: "automation-large-builder-fleet" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" plugins: - docker#v1.4.0: - image: "eosio/ci:ubuntu18" + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-2" + propagate-environment: true workdir: /data/job - timeout: 100 + timeout: 90 + skip: true # fundamental test framework issue here, see https://buildkite.com/EOSIO/eosio/builds/10690 - - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":fedora: Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - cd /data/job/build && ctest -L long_running_tests --output-on-failure - label: ":fedora: Tests" + - command: | # centOS 7 Tests + echo "--- :arrow_down: Downloading Build Directory" + buildkite-agent artifact download "build.tar.gz" . --step ":centos: CentOS 7 Build" + echo "+++ :microscope: Running LR Tests" + ./scripts/long-running-test.sh + label: ":centos: CentOS 7 LR Tests" agents: queue: "automation-large-builder-fleet" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" plugins: - docker#v1.4.0: - image: "eosio/ci:fedora" + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-2" + propagate-environment: true workdir: /data/job - timeout: 100 + timeout: 90 - - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":centos: Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - cd /data/job/build && ctest -L long_running_tests --output-on-failure - label: ":centos: Tests" + - command: | # Fedora 27 Tests + echo "--- :arrow_down: Downloading Build Directory" + buildkite-agent artifact download "build.tar.gz" . --step ":centos: Fedora 27 Build" + echo "+++ :microscope: Running LR Tests" + ./scripts/long-running-test.sh + label: ":fedora: Fedora 27 LR Tests" agents: queue: "automation-large-builder-fleet" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" plugins: - docker#v1.4.0: - image: "eosio/ci:centos" + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:fedora27_2-2" + propagate-environment: true workdir: /data/job - timeout: 100 + timeout: 90 - - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":aws: Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - cd /data/job/build && ctest -L long_running_tests --output-on-failure - label: ":aws: Tests" + - command: | # Ubuntu 16.04 Tests + echo "--- :arrow_down: Downloading Build Directory" + buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: Ubuntu 16.04 Build" + echo "+++ :microscope: Running LR Tests" + ./scripts/long-running-test.sh + label: ":ubuntu: Ubuntu 16.04 LR Tests" + agents: + queue: "automation-large-builder-fleet" + plugins: + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-2" + propagate-environment: true + workdir: /data/job + timeout: 90 + + - command: | # Ubuntu 18.04 Tests + echo "--- :arrow_down: Downloading Build Directory" + buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: Ubuntu 18.04 Build" + echo "+++ :microscope: Running LR Tests" + ./scripts/long-running-test.sh + label: ":ubuntu: Ubuntu 18.04 LR Tests" agents: queue: "automation-large-builder-fleet" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" plugins: - docker#v1.4.0: - image: "eosio/ci:amazonlinux" + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-2" + propagate-environment: true workdir: /data/job - timeout: 100 + timeout: 90 + + - command: | # High Sierra Tests + echo "--- :arrow_down: Downloading Build Directory" + buildkite-agent artifact download "build.tar.gz" . --step ":darwin: High Sierra Build" + echo "+++ :microscope: Running LR Tests" + ln -s "$(pwd)" /data/job && ./scripts/long-running-test.sh + label: ":darwin: High Sierra LR Tests" + agents: + - "role=tester-v2-1" + - "os=high-sierra" + timeout: 90 + + - command: | # Mojave Tests + echo "--- :arrow_down: Downloading Build Directory" + buildkite-agent artifact download "build.tar.gz" . --step ":darwin: Mojave Build" + echo "+++ :microscope: Running LR Tests" + ln -s "$(pwd)" /data/job && ./scripts/long-running-test.sh + label: ":darwin: Mojave LR Tests" + agents: + - "role=tester-v2-1" + - "os=mojave" + timeout: 90 \ No newline at end of file diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index 6bfebd3e0c6..fff4c036c35 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -1,11 +1,11 @@ steps: - - - command: | - echo "+++ :hammer: Building" && \ - ./scripts/eosio_build.sh -y && \ - echo "--- :compression: Compressing build directory" && \ - tar -pczf build.tar.gz build/ - label: ":ubuntu: 16.04 Build" + - command: | # Amazon Linux 1 Build + echo "+++ :hammer: Building" + ./scripts/eosio_build.sh -y + echo "--- :compression: Compressing build directory" + tar -pczf build.tar.gz build + if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi + label: ":aws: Amazon Linux 1 Build" agents: queue: "automation-large-builder-fleet" artifact_paths: "build.tar.gz" @@ -17,16 +17,18 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux1_2-2" + propagate-environment: true workdir: /data/job timeout: 60 - - command: | - echo "+++ :hammer: Building" && \ - ./scripts/eosio_build.sh -y && \ - echo "--- :compression: Compressing build directory" && \ - tar -pczf build.tar.gz build/ - label: ":ubuntu: 18.04 Build" + - command: | # Amazon Linux 2 Build + echo "+++ :hammer: Building" + ./scripts/eosio_build.sh -y + echo "--- :compression: Compressing build directory" + tar -pczf build.tar.gz build + if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi + label: ":aws: Amazon Linux 2 Build" agents: queue: "automation-large-builder-fleet" artifact_paths: "build.tar.gz" @@ -38,16 +40,18 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-2" + propagate-environment: true workdir: /data/job timeout: 60 - - command: | - echo "+++ :hammer: Building" && \ - ./scripts/eosio_build.sh -y && \ - echo "--- :compression: Compressing build directory" && \ - tar -pczf build.tar.gz build/ - label: ":centos: 7 Build" + - command: | # CentOS 7 Build + echo "+++ :hammer: Building" + ./scripts/eosio_build.sh -y + echo "--- :compression: Compressing build directory" + tar -pczf build.tar.gz build + if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi + label: ":centos: CentOS 7 Build" agents: queue: "automation-large-builder-fleet" artifact_paths: "build.tar.gz" @@ -59,16 +63,18 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-2" + propagate-environment: true workdir: /data/job timeout: 60 - - command: | - echo "+++ :hammer: Building" && \ - ./scripts/eosio_build.sh -y && \ - echo "--- :compression: Compressing build directory" && \ - tar -pczf build.tar.gz build/ - label: ":aws: 1 Build" + - command: | # Fedora 27 Build + echo "+++ :hammer: Building" + ./scripts/eosio_build.sh -y + echo "--- :compression: Compressing build directory" + tar -pczf build.tar.gz build + if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi + label: ":fedora: Fedora 27 Build" agents: queue: "automation-large-builder-fleet" artifact_paths: "build.tar.gz" @@ -80,37 +86,18 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux1_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:fedora27_2-2" + propagate-environment: true workdir: /data/job timeout: 60 - # - command: | - # echo "+++ :hammer: Building" && \ - # ./scripts/eosio_build.sh -y && \ - # echo "--- :compression: Compressing build directory" && \ - # tar -pczf build.tar.gz build/ - # label: ":aws: 2 Build" - # agents: - # queue: "automation-large-builder-fleet" - # artifact_paths: "build.tar.gz" - # plugins: - # ecr#v1.1.4: - # login: true - # account_ids: "436617320021" - # no-include-email: true - # region: "us-west-2" - # docker#v2.1.0: - # debug: true - # image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-1" - # workdir: /data/job - # timeout: 60 - - - command: | - echo "+++ :hammer: Building" && \ - ./scripts/eosio_build.sh -y && \ - echo "--- :compression: Compressing build directory" && \ - tar -pczf build.tar.gz build/ - label: ":fedora: 27 Build" + - command: | # Ubuntu 16.04 Build + echo "+++ :hammer: Building" + ./scripts/eosio_build.sh -y + echo "--- :compression: Compressing build directory" + tar -pczf build.tar.gz build + if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi + label: ":ubuntu: Ubuntu 16.04 Build" agents: queue: "automation-large-builder-fleet" artifact_paths: "build.tar.gz" @@ -122,31 +109,44 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:fedora27_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-2" + propagate-environment: true workdir: /data/job timeout: 60 - - command: | - echo "--- Creating symbolic link to job directory :file_folder:" && \ - sleep 5 && ln -s "$(pwd)" /data/job && cd /data/job && \ - echo "+++ Building :hammer:" && \ - ./scripts/eosio_build.sh -y && \ - echo "--- Compressing build directory :compression:" && \ - tar -pczf build.tar.gz build/ - label: ":darwin: Mojave Build" + - command: | # Ubuntu 18.04 Build + echo "+++ :hammer: Building" + ./scripts/eosio_build.sh -y + echo "--- :compression: Compressing build directory" + tar -pczf build.tar.gz build + if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi + label: ":ubuntu: Ubuntu 18.04 Build" agents: - - "role=builder-v2-1" - - "os=mojave" + queue: "automation-large-builder-fleet" artifact_paths: "build.tar.gz" + plugins: + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-2" + propagate-environment: true + workdir: /data/job timeout: 60 - - command: | - echo "--- Creating symbolic link to job directory :file_folder:" && \ - sleep 5 && ln -s "$(pwd)" /data/job && cd /data/job && \ - echo "+++ Building :hammer:" && \ - ./scripts/eosio_build.sh -y && \ - echo "--- Compressing build directory :compression:" && \ - tar -pczf build.tar.gz build/ + - command: | # macOS High Sierra Build + echo "--- Creating symbolic link to job directory :file_folder:" + sleep 5 + ln -s "$(pwd)" /data/job + cd /data/job + echo "+++ Building :hammer:" + ./scripts/eosio_build.sh -y + echo "--- Compressing build directory :compression:" + tar -pczf build.tar.gz build + if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi label: ":darwin: High Sierra Build" agents: - "role=builder-v2-1" @@ -154,23 +154,34 @@ steps: artifact_paths: "build.tar.gz" timeout: 60 + - command: | # macOS Mojave Build + echo "--- Creating symbolic link to job directory :file_folder:" + sleep 5 + ln -s "$(pwd)" /data/job + cd /data/job + echo "+++ Building :hammer:" + ./scripts/eosio_build.sh -y + echo "--- Compressing build directory :compression:" + tar -pczf build.tar.gz build + if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi + label: ":darwin: Mojave Build" + agents: + - "role=builder-v2-1" + - "os=mojave" + artifact_paths: "build.tar.gz" + timeout: 60 + - wait + # Amazon Linux 1 Tests - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: 16.04 Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ~/bin/ctest -j8 -LE _tests --output-on-failure - label: ":ubuntu: 16.04 Tests" + echo "--- :arrow_down: Downloading Build Directory" + buildkite-agent artifact download "build.tar.gz" . --step ":aws: Amazon Linux 1 Build" + echo "+++ :microscope: Running Tests" + ./scripts/parallel-test.sh + label: ":aws: Amazon Linux 1 Tests" agents: queue: "automation-large-builder-fleet" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" plugins: ecr#v1.1.4: login: true @@ -179,25 +190,19 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux1_2-2" + propagate-environment: true workdir: /data/job timeout: 60 - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: 16.04 Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ~/bin/ctest -L nonparallelizable_tests --output-on-failure - label: ":ubuntu: 16.04 NP Tests" + echo "--- :arrow_down: Downloading Build Directory" + buildkite-agent artifact download "build.tar.gz" . --step ":aws: Amazon Linux 1 Build" + echo "+++ :microscope: Running Tests" + ./scripts/serial-test.sh + label: ":aws: Amazon Linux 1 NP Tests" agents: queue: "automation-large-builder-fleet" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" plugins: ecr#v1.1.4: login: true @@ -206,25 +211,20 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux1_2-2" + propagate-environment: true workdir: /data/job timeout: 60 - + + # Amazon Linux 2 Tests - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: 18.04 Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ~/bin/ctest -j8 -LE _tests --output-on-failure - label: ":ubuntu: 18.04 Tests" + echo "--- :arrow_down: Downloading Build Directory" + buildkite-agent artifact download "build.tar.gz" . --step ":aws: Amazon Linux 2 Build" + echo "+++ :microscope: Running Tests" + ./scripts/parallel-test.sh + label: ":aws: Amazon Linux 2 Tests" agents: queue: "automation-large-builder-fleet" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" plugins: ecr#v1.1.4: login: true @@ -233,25 +233,19 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-2" + propagate-environment: true workdir: /data/job timeout: 60 - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: 18.04 Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ~/bin/ctest -L nonparallelizable_tests --output-on-failure - label: ":ubuntu: 18.04 NP Tests" + echo "--- :arrow_down: Downloading Build Directory" + buildkite-agent artifact download "build.tar.gz" . --step ":aws: Amazon Linux 2 Build" + echo "+++ :microscope: Running Tests" + ./scripts/serial-test.sh + label: ":aws: Amazon Linux 2 NP Tests" agents: queue: "automation-large-builder-fleet" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" plugins: ecr#v1.1.4: login: true @@ -260,26 +254,42 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-2" + propagate-environment: true workdir: /data/job timeout: 60 + skip: true # fundamental test framework issue here, see https://buildkite.com/EOSIO/eosio/builds/10690 + # centOS 7 Tests + - command: | + echo "--- :arrow_down: Downloading Build Directory" + buildkite-agent artifact download "build.tar.gz" . --step ":centos: CentOS 7 Build" + echo "+++ :microscope: Running Tests" + ./scripts/parallel-test.sh + label: ":centos: CentOS 7 Tests" + agents: + queue: "automation-large-builder-fleet" + plugins: + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-2" + propagate-environment: true + workdir: /data/job + timeout: 60 - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":centos: 7 Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ~/bin/ctest -j8 -LE _tests --output-on-failure - label: ":centos: 7 Tests" + echo "--- :arrow_down: Downloading Build Directory" + buildkite-agent artifact download "build.tar.gz" . --step ":centos: CentOS 7 Build" + echo "+++ :microscope: Running Tests" + ./scripts/serial-test.sh + label: ":centos: CentOS 7 NP Tests" agents: queue: "automation-large-builder-fleet" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" plugins: ecr#v1.1.4: login: true @@ -288,25 +298,20 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-2" + propagate-environment: true workdir: /data/job timeout: 60 + # Fedora 27 Tests - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":centos: 7 Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ~/bin/ctest -L nonparallelizable_tests --output-on-failure - label: ":centos: 7 NP Tests" + echo "--- :arrow_down: Downloading Build Directory" + buildkite-agent artifact download "build.tar.gz" . --step ":fedora: Fedora 27 Build" + echo "+++ :microscope: Running Tests" + ./scripts/parallel-test.sh + label: ":fedora: Fedora 27 Tests" agents: queue: "automation-large-builder-fleet" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" plugins: ecr#v1.1.4: login: true @@ -315,25 +320,19 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:fedora27_2-2" + propagate-environment: true workdir: /data/job timeout: 60 - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":aws: 1 Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ~/bin/ctest -j8 -LE _tests --output-on-failure - label: ":aws: 1 Tests" + echo "--- :arrow_down: Downloading Build Directory" + buildkite-agent artifact download "build.tar.gz" . --step ":fedora: Fedora 27 Build" + echo "+++ :microscope: Running Tests" + ./scripts/serial-test.sh + label: ":fedora: Fedora 27 NP Tests" agents: queue: "automation-large-builder-fleet" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" plugins: ecr#v1.1.4: login: true @@ -342,25 +341,20 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux1_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:fedora27_2-2" + propagate-environment: true workdir: /data/job timeout: 60 + # Ubuntu 16.04 Tests - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":aws: 1 Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ~/bin/ctest -L nonparallelizable_tests --output-on-failure - label: ":aws: 1 NP Tests" + echo "--- :arrow_down: Downloading Build Directory" + buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: Ubuntu 16.04 Build" + echo "+++ :microscope: Running Tests" + ./scripts/parallel-test.sh + label: ":ubuntu: Ubuntu 16.04 Tests" agents: queue: "automation-large-builder-fleet" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" plugins: ecr#v1.1.4: login: true @@ -369,79 +363,41 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux1_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-2" + propagate-environment: true workdir: /data/job timeout: 60 - # - command: | - # echo "--- :arrow_down: Downloading build directory" && \ - # buildkite-agent artifact download "build.tar.gz" . --step ":aws: 2 Build" && \ - # tar -zxf build.tar.gz && \ - # echo "--- :m: Starting MongoDB" && \ - # ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ - # echo "+++ :microscope: Running tests" && \ - # cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ~/bin/ctest -j8 -LE _tests --output-on-failure - # label: ":aws: 2 Tests" - # agents: - # queue: "automation-large-builder-fleet" - # artifact_paths: - # - "mongod.log" - # - "build/genesis.json" - # - "build/config.ini" - # plugins: - # ecr#v1.1.4: - # login: true - # account_ids: "436617320021" - # no-include-email: true - # region: "us-west-2" - # docker#v2.1.0: - # debug: true - # image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-1" - # workdir: /data/job - # timeout: 60 - - # - command: | - # echo "--- :arrow_down: Downloading build directory" && \ - # buildkite-agent artifact download "build.tar.gz" . --step ":aws: 2 Build" && \ - # tar -zxf build.tar.gz && \ - # echo "--- :m: Starting MongoDB" && \ - # ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ - # echo "+++ :microscope: Running tests" && \ - # cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ~/bin/ctest -L nonparallelizable_tests --output-on-failure - # label: ":aws: 2 NP Tests" - # agents: - # queue: "automation-large-builder-fleet" - # artifact_paths: - # - "mongod.log" - # - "build/genesis.json" - # - "build/config.ini" - # plugins: - # ecr#v1.1.4: - # login: true - # account_ids: "436617320021" - # no-include-email: true - # region: "us-west-2" - # docker#v2.1.0: - # debug: true - # image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-1" - # workdir: /data/job - # timeout: 60 + - command: | + echo "--- :arrow_down: Downloading Build Directory" + buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: Ubuntu 16.04 Build" + echo "+++ :microscope: Running Tests" + ./scripts/serial-test.sh + label: ":ubuntu: Ubuntu 16.04 NP Tests" + agents: + queue: "automation-large-builder-fleet" + plugins: + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-2" + propagate-environment: true + workdir: /data/job + timeout: 60 + # Ubuntu 18.04 Tests - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":fedora: 27 Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ~/bin/ctest -j8 -LE _tests --output-on-failure - label: ":fedora: 27 Tests" + echo "--- :arrow_down: Downloading Build Directory" + buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: Ubuntu 18.04 Build" + echo "+++ :microscope: Running Tests" + ./scripts/parallel-test.sh + label: ":ubuntu: Ubuntu 18.04 Tests" agents: queue: "automation-large-builder-fleet" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" plugins: ecr#v1.1.4: login: true @@ -450,25 +406,19 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:fedora27_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-2" + propagate-environment: true workdir: /data/job timeout: 60 - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":fedora: 27 Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ~/bin/ctest -L nonparallelizable_tests --output-on-failure - label: ":fedora: 27 NP Tests" + echo "--- :arrow_down: Downloading Build Directory" + buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: Ubuntu 18.04 Build" + echo "+++ :microscope: Running Tests" + ./scripts/serial-test.sh + label: ":ubuntu: Ubuntu 18.04 NP Tests" agents: queue: "automation-large-builder-fleet" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" plugins: ecr#v1.1.4: login: true @@ -477,125 +427,82 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:fedora27_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-2" + propagate-environment: true workdir: /data/job timeout: 60 + # High Sierra Tests - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":darwin: High Sierra Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - ln -s "$(pwd)" /data/job && cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ctest -j8 -LE _tests --output-on-failure + echo "--- :arrow_down: Downloading Build Directory" + buildkite-agent artifact download "build.tar.gz" . --step ":darwin: High Sierra Build" + echo "+++ :microscope: Running Tests" + ln -s "$(pwd)" /data/job + ./scripts/parallel-test.sh label: ":darwin: High Sierra Tests" agents: - "role=tester-v2-1" - "os=high-sierra" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" timeout: 60 - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":darwin: High Sierra Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - ln -s "$(pwd)" /data/job && cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ctest -L nonparallelizable_tests --output-on-failure + echo "--- :arrow_down: Downloading Build Directory" + buildkite-agent artifact download "build.tar.gz" . --step ":darwin: High Sierra Build" + echo "+++ :microscope: Running Tests" + ln -s "$(pwd)" /data/job + ./scripts/serial-test.sh label: ":darwin: High Sierra NP Tests" agents: - "role=tester-v2-1" - "os=high-sierra" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" timeout: 60 + # Mojave Tests - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":darwin: Mojave Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - ln -s "$(pwd)" /data/job && cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ctest -j8 -LE _tests --output-on-failure + echo "--- :arrow_down: Downloading Build Directory" + buildkite-agent artifact download "build.tar.gz" . --step ":darwin: Mojave Build" + echo "+++ :microscope: Running Tests" + ln -s "$(pwd)" /data/job + ./scripts/parallel-test.sh label: ":darwin: Mojave Tests" agents: - "role=tester-v2-1" - "os=mojave" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" timeout: 60 - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":darwin: Mojave Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - ln -s "$(pwd)" /data/job && cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ctest -L nonparallelizable_tests --output-on-failure + echo "--- :arrow_down: Downloading Build Directory" + buildkite-agent artifact download "build.tar.gz" . --step ":darwin: Mojave Build" + echo "+++ :microscope: Running Tests" + ln -s "$(pwd)" /data/job + ./scripts/serial-test.sh label: ":darwin: Mojave NP Tests" agents: - "role=tester-v2-1" - "os=mojave" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" - timeout: 60 - - - wait - - - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":darwin: High Sierra Build" && \ - tar -zxf build.tar.gz && \ - echo "+++ :microscope: Starting package build" && \ - ln -s "$(pwd)" /data/job && cd /data/job/build/packages && bash generate_package.sh brew - label: ":darwin: High Sierra Package Builder" - agents: - - "role=builder-v2-1" - - "os=high-sierra" - artifact_paths: - - "build/packages/*.tar.gz" - - "build/packages/*.rb" timeout: 60 - - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":darwin: Mojave Build" && \ - tar -zxf build.tar.gz && \ - echo "+++ :microscope: Starting package build" && \ - ln -s "$(pwd)" /data/job && cd /data/job/build/packages && bash generate_package.sh brew - label: ":darwin: Mojave Package Builder" - agents: - - "role=builder-v2-1" - - "os=mojave" - artifact_paths: - - "build/packages/*.tar.gz" - - "build/packages/*.rb" - timeout: 60 + - wait - - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: 16.04 Build" && \ - tar -zxf build.tar.gz && \ - echo "+++ :microscope: Starting package build" && \ - cd /data/job/build/packages && bash generate_package.sh deb - label: ":ubuntu: 16.04 Package builder" + - command: | # CentOS 7 Package Builder + echo "--- :arrow_down: Downloading build directory" + buildkite-agent artifact download "build.tar.gz" . --step ":centos: CentOS 7 Build" + tar -zxf build.tar.gz + echo "+++ :microscope: Starting package build" + yum install -y rpm-build + mkdir -p /root/rpmbuild/BUILD + mkdir -p /root/rpmbuild/BUILDROOT + mkdir -p /root/rpmbuild/RPMS + mkdir -p /root/rpmbuild/SOURCES + mkdir -p /root/rpmbuild/SPECS + mkdir -p /root/rpmbuild/SRPMS + cd /data/job/build/packages + bash generate_package.sh rpm + label: ":centos: CentOS 7 Package Builder" agents: queue: "automation-large-builder-fleet" artifact_paths: - - "build/packages/*.deb" + - "build/packages/*.rpm" plugins: ecr#v1.1.4: login: true @@ -604,24 +511,33 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-2" + propagate-environment: true workdir: /data/job env: - OS: "ubuntu-16.04" - PKGTYPE: "deb" + OS: "el7" + PKGTYPE: "rpm" timeout: 60 - - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: 18.04 Build" && \ - tar -zxf build.tar.gz && \ - echo "+++ :microscope: Starting package build" && \ - cd /data/job/build/packages && bash generate_package.sh deb - label: ":ubuntu: 18.04 Package builder" + - command: | # Fedora 27 Package Builder + echo "--- :arrow_down: Downloading build directory" + buildkite-agent artifact download "build.tar.gz" . --step ":fedora: Fedora 27 Build" + tar -zxf build.tar.gz + echo "+++ :microscope: Starting package build" + yum install -y rpm-build + mkdir -p /root/rpmbuild/BUILD + mkdir -p /root/rpmbuild/BUILDROOT + mkdir -p /root/rpmbuild/RPMS + mkdir -p /root/rpmbuild/SOURCES + mkdir -p /root/rpmbuild/SPECS + mkdir -p /root/rpmbuild/SRPMS + cd /data/job/build/packages + bash generate_package.sh rpm + label: ":fedora: Fedora 27 Package Builder" agents: queue: "automation-large-builder-fleet" artifact_paths: - - "build/packages/*.deb" + - "build/packages/*.rpm" plugins: ecr#v1.1.4: login: true @@ -630,31 +546,25 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:fedora27_2-2" workdir: /data/job env: - OS: "ubuntu-18.04" - PKGTYPE: "deb" + OS: "fc27" + PKGTYPE: "rpm" timeout: 60 - - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":fedora: 27 Build" && \ - tar -zxf build.tar.gz && \ - echo "+++ :microscope: Starting package build" && \ - yum install -y rpm-build && \ - mkdir -p /root/rpmbuild/BUILD && \ - mkdir -p /root/rpmbuild/BUILDROOT && \ - mkdir -p /root/rpmbuild/RPMS && \ - mkdir -p /root/rpmbuild/SOURCES && \ - mkdir -p /root/rpmbuild/SPECS && \ - mkdir -p /root/rpmbuild/SRPMS && \ - cd /data/job/build/packages && bash generate_package.sh rpm - label: ":fedora: 27 Package builder" + - command: | # Ubuntu 16.04 Package Builder + echo "--- :arrow_down: Downloading build directory" + buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: Ubuntu 16.04 Build" + tar -zxf build.tar.gz + echo "+++ :microscope: Starting package build" + cd /data/job/build/packages + bash generate_package.sh deb + label: ":ubuntu: Ubuntu 16.04 Package Builder" agents: queue: "automation-large-builder-fleet" artifact_paths: - - "build/packages/*.rpm" + - "build/packages/*.deb" plugins: ecr#v1.1.4: login: true @@ -663,31 +573,26 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:fedora27_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-2" + propagate-environment: true workdir: /data/job env: - OS: "fc27" - PKGTYPE: "rpm" + OS: "ubuntu-16.04" + PKGTYPE: "deb" timeout: 60 - - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":centos: 7 Build" && \ - tar -zxf build.tar.gz && \ - echo "+++ :microscope: Starting package build" && \ - yum install -y rpm-build && \ - mkdir -p /root/rpmbuild/BUILD && \ - mkdir -p /root/rpmbuild/BUILDROOT && \ - mkdir -p /root/rpmbuild/RPMS && \ - mkdir -p /root/rpmbuild/SOURCES && \ - mkdir -p /root/rpmbuild/SPECS && \ - mkdir -p /root/rpmbuild/SRPMS && \ - cd /data/job/build/packages && bash generate_package.sh rpm - label: ":centos: 7 Package builder" + - command: | # Ubuntu 18.04 Package Builder + echo "--- :arrow_down: Downloading build directory" + buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: Ubuntu 18.04 Build" + tar -zxf build.tar.gz + echo "+++ :microscope: Starting package build" + cd /data/job/build/packages + bash generate_package.sh deb + label: ":ubuntu: Ubuntu 18.04 Package Builder" agents: queue: "automation-large-builder-fleet" artifact_paths: - - "build/packages/*.rpm" + - "build/packages/*.deb" plugins: ecr#v1.1.4: login: true @@ -696,19 +601,54 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-2" + propagate-environment: true workdir: /data/job env: - OS: "el7" - PKGTYPE: "rpm" + OS: "ubuntu-18.04" + PKGTYPE: "deb" + timeout: 60 + + - command: | # macOS High Sierra Package Builder + echo "--- :arrow_down: Downloading build directory" + buildkite-agent artifact download "build.tar.gz" . --step ":darwin: High Sierra Build" + tar -zxf build.tar.gz + echo "+++ :microscope: Starting package build" + ln -s "$(pwd)" /data/job + cd /data/job/build/packages + bash generate_package.sh brew + label: ":darwin: High Sierra Package Builder" + agents: + - "role=builder-v2-1" + - "os=high-sierra" + artifact_paths: + - "build/packages/*.tar.gz" + - "build/packages/*.rb" + timeout: 60 + + - command: | # macOS Mojave Package Builder + echo "--- :arrow_down: Downloading build directory" + buildkite-agent artifact download "build.tar.gz" . --step ":darwin: Mojave Build" + tar -zxf build.tar.gz + echo "+++ :microscope: Starting package build" + ln -s "$(pwd)" /data/job + cd /data/job/build/packages + bash generate_package.sh brew + label: ":darwin: Mojave Package Builder" + agents: + - "role=builder-v2-1" + - "os=mojave" + artifact_paths: + - "build/packages/*.tar.gz" + - "build/packages/*.rb" timeout: 60 - wait - - command: | - echo "--- :arrow_down: Downloading brew files" && \ - buildkite-agent artifact download "build/packages/eosio.rb" . --step ":darwin: High Sierra Package Builder" && \ - mv build/packages/eosio.rb build/packages/eosio_highsierra.rb && \ + - command: | # Brew Updater + echo "--- :arrow_down: Downloading brew files" + buildkite-agent artifact download "build/packages/eosio.rb" . --step ":darwin: High Sierra Package Builder" + mv build/packages/eosio.rb build/packages/eosio_highsierra.rb buildkite-agent artifact download "build/packages/eosio.rb" . --step ":darwin: Mojave Package Builder" label: ":darwin: Brew Updater" agents: @@ -716,4 +656,12 @@ steps: artifact_paths: - "build/packages/eosio_highsierra.rb" - "build/packages/eosio.rb" - timeout: 60 + timeout: 5 + + - command: | # Git Submodule Regression Check + echo "+++ :microscope: Running git submodule regression check" && \ + ./scripts/submodule_check.sh + label: "Git Submodule Regression Check" + agents: + queue: "automation-large-builder-fleet" + timeout: 5 \ No newline at end of file diff --git a/.buildkite/sanitizers.yml b/.buildkite/sanitizers.yml deleted file mode 100644 index b8588135610..00000000000 --- a/.buildkite/sanitizers.yml +++ /dev/null @@ -1,131 +0,0 @@ -steps: - - command: | - echo "--- :hammer: Building with Undefined Sanitizer" && \ - /usr/bin/cmake -GNinja \ - -DCMAKE_BUILD_TYPE=Debug \ - -DCMAKE_CXX_COMPILER=clang++-4.0 \ - -DCMAKE_C_COMPILER=clang-4.0 \ - -DBOOST_ROOT="${BOOST_ROOT}" \ - -DWASM_ROOT="${WASM_ROOT}" \ - -DOPENSSL_ROOT_DIR="${OPENSSL_ROOT_DIR}" \ - -DBUILD_MONGO_DB_PLUGIN=true \ - -DENABLE_COVERAGE_TESTING=true\ - -DBUILD_DOXYGEN=false -DCMAKE_CXX_FLAGS="-fsanitize=undefined -fsanitize-recover=all -g -fno-omit-frame-pointer" \ - -DCMAKE_C_FLAGS="-fsanitize=undefined -fsanitize-recover=all -g -fno-omit-frame-pointer" \ - -DCMAKE_EXE_LINKER_FLAGS="-fsanitize=undefined -fsanitize-recover=all -rtlib=compiler-rt -lgcc_s -pthread" \ - -DCMAKE_MODULE_LINKER_FLAGS="-fsanitize=undefined -fsanitize-recover=all -rtlib=compiler-rt -lgcc_s -pthread" && \ - echo "--- :shinto_shrine: Running ninja" && \ - /usr/bin/ninja | tee ninja.log && \ - echo "--- :compression: Compressing build directory" && \ - tar -pczf build.tar.gz * - echo "--- :beers: Done" - label: ":_: Undefined Sanitizer" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: - - "build.tar.gz" - - "ninja.log" - plugins: - docker#v1.4.0: - image: "eosio/ci:ubuntu18" - command: ["--privileged"] - workdir: /data/job - mounts: - - /etc/buildkite-agent/config:/config - environment: - - BOOST_ROOT=/root/opt/boost - - OPENSSL_ROOT_DIR=/usr/include/openssl - - WASM_ROOT=/root/opt/wasm - - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/root/opt/wasm/bin - - CI=true - - UBSAN_OPTIONS=print_stacktrace=1 - timeout: 60 - - - command: | - echo "--- :hammer: Building with Address Sanitizer" && \ - /usr/bin/cmake -GNinja \ - -DCMAKE_BUILD_TYPE=Debug \ - -DCMAKE_CXX_COMPILER=clang++-4.0 \ - -DCMAKE_C_COMPILER=clang-4.0 \ - -DBOOST_ROOT="${BOOST_ROOT}" \ - -DWASM_ROOT="${WASM_ROOT}" \ - -DOPENSSL_ROOT_DIR="${OPENSSL_ROOT_DIR}" \ - -DBUILD_MONGO_DB_PLUGIN=true \ - -DENABLE_COVERAGE_TESTING=true \ - -DBUILD_DOXYGEN=false \ - -DCMAKE_CXX_FLAGS="-fsanitize=address -fsanitize-recover=all -O1 -g -fno-omit-frame-pointer" \ - -DCMAKE_C_FLAGS="-fsanitize=address -fsanitize-recover=all -O1 -g -fno-omit-frame-pointer" \ - -DCMAKE_EXE_LINKER_FLAGS="-fsanitize=address -fsanitize-recover=all -rtlib=compiler-rt -lgcc_s" \ - -DCMAKE_MODULE_LINKER_FLAGS="-fsanitize=address -fsanitize-recover=all -rtlib=compiler-rt -lgcc_s" - echo "--- :shinto_shrine: Running ninja" && \ - /usr/bin/ninja | tee ninja.log && \ - echo "--- :compression: Compressing build directory" && \ - tar -pczf build.tar.gz * - echo "--- :beers: Done" - label: ":_: Address Sanitizer" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: - - "build.tar.gz" - - "ninja.log" - plugins: - docker#v1.4.0: - image: "eosio/ci:ubuntu18" - command: ["--privileged"] - workdir: /data/job - mounts: - - /etc/buildkite-agent/config:/config - environment: - - BOOST_ROOT=/root/opt/boost - - OPENSSL_ROOT_DIR=/usr/include/openssl - - WASM_ROOT=/root/opt/wasm - - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/root/opt/wasm/bin - - CI=true - - ASAN_OPTIONS=fast_unwind_on_malloc=0:halt_on_error=0:detect_odr_violation=0:detect_leaks=0:symbolize=1:verbosity=1 - timeout: 60 - - - wait - - - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":_: Undefined Sanitizer" && \ - tar -zxf build.tar.gz --no-same-owner && \ - echo "--- :m: Starting MongoDB" && \ - $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - ctest -j8 -LE _tests -V -O sanitizer.log || true - label: ":_: Undefined Sanitizer Tests" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: - - "mongod.log" - - "sanitizer.log" - plugins: - docker#v1.4.0: - image: "eosio/ci:ubuntu18" - workdir: /data/job - mounts: - - /etc/buildkite-agent/config:/config - timeout: 120 - - - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":_: Address Sanitizer" && \ - tar -zxf build.tar.gz --no-same-owner && \ - echo "--- :m: Starting MongoDB" && \ - $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - ctest -j8 -LE _tests -V -O sanitizer.log || true - label: ":_: Address Sanitizer Tests" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: - - "mongod.log" - - "sanitizer.log" - plugins: - docker#v1.4.0: - image: "eosio/ci:ubuntu18" - workdir: /data/job - mounts: - - /etc/buildkite-agent/config:/config - timeout: 120 \ No newline at end of file diff --git a/.pipelinebranch b/.pipelinebranch new file mode 100644 index 00000000000..c1688f268bc --- /dev/null +++ b/.pipelinebranch @@ -0,0 +1 @@ +legacy-os diff --git a/scripts/long-running-test.sh b/scripts/long-running-test.sh new file mode 100755 index 00000000000..30ec5faaa12 --- /dev/null +++ b/scripts/long-running-test.sh @@ -0,0 +1,36 @@ +#!/bin/bash +set -e # exit on failure of any "simple" command (excludes &&, ||, or | chains) +# prepare environment +PATH=$PATH:~/opt/mongodb/bin +echo "Extracting build directory..." +tar -zxf build.tar.gz +echo "Starting MongoDB..." +~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log +cd /data/job/build +# run tests +echo "Running tests..." +TEST_COUNT=$(ctest -N -L nonparallelizable_tests | grep -i 'Total Tests: ' | cut -d ':' -f 2 | awk '{print $1}') +[[ $TEST_COUNT > 0 ]] && echo "$TEST_COUNT tests found." || (echo "ERROR: No tests registered with ctest! Exiting..." && exit 1) +set +e # defer ctest error handling to end +echo "$ ctest -L long_running_tests --output-on-failure -T Test" +ctest -L long_running_tests --output-on-failure -T Test +EXIT_STATUS=$? +[[ "$EXIT_STATUS" == 0 ]] && set -e +echo "Done running long-running tests." +# upload artifacts +echo "Uploading artifacts..." +XML_FILENAME="test-results.xml" +mv $(pwd)/Testing/$(ls $(pwd)/Testing/ | grep '20' | tail -n 1)/Test.xml $XML_FILENAME +buildkite-agent artifact upload config.ini +buildkite-agent artifact upload genesis.json +cd .. +buildkite-agent artifact upload mongod.log +cd build +buildkite-agent artifact upload $XML_FILENAME +echo "Done uploading artifacts." +# ctest error handling +if [[ "$EXIT_STATUS" != 0 ]]; then + echo "Failing due to non-zero exit status from ctest: $EXIT_STATUS" + echo ' ^^^ scroll up for more information ^^^' + exit $EXIT_STATUS +fi \ No newline at end of file diff --git a/scripts/parallel-test.sh b/scripts/parallel-test.sh new file mode 100755 index 00000000000..fd53ca55198 --- /dev/null +++ b/scripts/parallel-test.sh @@ -0,0 +1,38 @@ +#!/bin/bash +set -e # exit on failure of any "simple" command (excludes &&, ||, or | chains) +# prepare environment +PATH=$PATH:~/opt/mongodb/bin +echo "Extracting build directory..." +tar -zxf build.tar.gz +echo "Starting MongoDB..." +~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log +cd /data/job/build +# run tests +echo "Running tests..." +CPU_CORES=$(getconf _NPROCESSORS_ONLN) +echo "$CPU_CORES cpu cores detected." +TEST_COUNT=$(ctest -N -LE _tests | grep -i 'Total Tests: ' | cut -d ':' -f 2 | awk '{print $1}') +[[ $TEST_COUNT > 0 ]] && echo "$TEST_COUNT tests found." || (echo "ERROR: No tests registered with ctest! Exiting..." && exit 1) +set +e # defer ctest error handling to end +echo "$ ctest -j $CPU_CORES -LE _tests --output-on-failure -T Test" +ctest -j $CPU_CORES -LE _tests --output-on-failure -T Test +EXIT_STATUS=$? +[[ "$EXIT_STATUS" == 0 ]] && set -e +echo "Done running parallelizable tests." +# upload artifacts +echo "Uploading artifacts..." +XML_FILENAME="test-results.xml" +mv $(pwd)/Testing/$(ls $(pwd)/Testing/ | grep '20' | tail -n 1)/Test.xml $XML_FILENAME +buildkite-agent artifact upload config.ini +buildkite-agent artifact upload genesis.json +cd .. +buildkite-agent artifact upload mongod.log +cd build +buildkite-agent artifact upload $XML_FILENAME +echo "Done uploading artifacts." +# ctest error handling +if [[ "$EXIT_STATUS" != 0 ]]; then + echo "Failing due to non-zero exit status from ctest: $EXIT_STATUS" + echo ' ^^^ scroll up for more information ^^^' + exit $EXIT_STATUS +fi \ No newline at end of file diff --git a/scripts/serial-test.sh b/scripts/serial-test.sh new file mode 100755 index 00000000000..1d36e081712 --- /dev/null +++ b/scripts/serial-test.sh @@ -0,0 +1,36 @@ +#!/bin/bash +set -e # exit on failure of any "simple" command (excludes &&, ||, or | chains) +# prepare environment +PATH=$PATH:~/opt/mongodb/bin +echo "Extracting build directory..." +tar -zxf build.tar.gz +echo "Starting MongoDB..." +~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log +cd /data/job/build +# run tests +echo "Running tests..." +TEST_COUNT=$(ctest -N -L nonparallelizable_tests | grep -i 'Total Tests: ' | cut -d ':' -f 2 | awk '{print $1}') +[[ $TEST_COUNT > 0 ]] && echo "$TEST_COUNT tests found." || (echo "ERROR: No tests registered with ctest! Exiting..." && exit 1) +set +e # defer ctest error handling to end +echo "$ ctest -L nonparallelizable_tests --output-on-failure -T Test" +ctest -L nonparallelizable_tests --output-on-failure -T Test +EXIT_STATUS=$? +[[ "$EXIT_STATUS" == 0 ]] && set -e +echo "Done running non-parallelizable tests." +# upload artifacts +echo "Uploading artifacts..." +XML_FILENAME="test-results.xml" +mv $(pwd)/Testing/$(ls $(pwd)/Testing/ | grep '20' | tail -n 1)/Test.xml $XML_FILENAME +buildkite-agent artifact upload config.ini +buildkite-agent artifact upload genesis.json +cd .. +buildkite-agent artifact upload mongod.log +cd build +buildkite-agent artifact upload $XML_FILENAME +echo "Done uploading artifacts." +# ctest error handling +if [[ "$EXIT_STATUS" != 0 ]]; then + echo "Failing due to non-zero exit status from ctest: $EXIT_STATUS" + echo ' ^^^ scroll up for more information ^^^' + exit $EXIT_STATUS +fi \ No newline at end of file diff --git a/scripts/submodule_check.sh b/scripts/submodule_check.sh new file mode 100755 index 00000000000..b9ec13204fa --- /dev/null +++ b/scripts/submodule_check.sh @@ -0,0 +1,41 @@ +#!/bin/bash + +REPO_DIR=`mktemp -d` +git clone "$BUILDKITE_REPO" "$REPO_DIR" +git submodule update --init --recursive +cd "$REPO_DIR" + +declare -A PR_MAP +declare -A BASE_MAP + +echo "getting submodule info for $BUILDKITE_BRANCH" +git checkout "$BUILDKITE_BRANCH" &> /dev/null +git submodule update --init &> /dev/null +while read -r a b; do + PR_MAP[$a]=$b +done < <(git submodule --quiet foreach --recursive 'echo $path `git log -1 --format=%ct`') + +echo "getting submodule info for $BUILDKITE_PULL_REQUEST_BASE_BRANCH" +git checkout "$BUILDKITE_PULL_REQUEST_BASE_BRANCH" &> /dev/null +git submodule update --init &> /dev/null +while read -r a b; do + BASE_MAP[$a]=$b +done < <(git submodule --quiet foreach --recursive 'echo $path `git log -1 --format=%ct`') + +for k in "${!BASE_MAP[@]}"; do + base_ts=${BASE_MAP[$k]} + pr_ts=${PR_MAP[$k]} + echo "submodule $k" + echo " timestamp on $BUILDKITE_BRANCH: $pr_ts" + echo " timestamp on $BUILDKITE_PULL_REQUEST_BASE_BRANCH: $base_ts" + if (( $pr_ts < $base_ts)); then + echo "$k is older on $BUILDKITE_BRANCH than $BUILDKITE_PULL_REQUEST_BASE_BRANCH; investigating..." + + if for c in `git log $BUILDKITE_BRANCH ^$BUILDKITE_PULL_REQUEST_BASE_BRANCH --pretty=format:"%H"`; do git show --pretty="" --name-only $c; done | grep -q "^$k$"; then + echo "ERROR: $k has regressed" + exit 1 + else + echo "$k was not in the diff; no regression detected" + fi + fi +done From 25fe2282c8f040e05e151d17e1c0033197c4358a Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 17 Apr 2019 19:54:25 -0500 Subject: [PATCH 0062/2426] Bump to 1.7.2 --- CMakeLists.txt | 2 +- Docker/README.md | 4 ++-- README.md | 16 ++++++++-------- 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 45275a7de8d..820e3b7796a 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -31,7 +31,7 @@ set( CXX_STANDARD_REQUIRED ON) set(VERSION_MAJOR 1) set(VERSION_MINOR 7) -set(VERSION_PATCH 1) +set(VERSION_PATCH 2) if(VERSION_SUFFIX) set(VERSION_FULL "${VERSION_MAJOR}.${VERSION_MINOR}.${VERSION_PATCH}-${VERSION_SUFFIX}") diff --git a/Docker/README.md b/Docker/README.md index 0fdb4c48626..c70264c02a7 100644 --- a/Docker/README.md +++ b/Docker/README.md @@ -20,10 +20,10 @@ cd eos/Docker docker build . -t eosio/eos ``` -The above will build off the most recent commit to the master branch by default. If you would like to target a specific branch/tag, you may use a build argument. For example, if you wished to generate a docker image based off of the v1.7.1 tag, you could do the following: +The above will build off the most recent commit to the master branch by default. If you would like to target a specific branch/tag, you may use a build argument. For example, if you wished to generate a docker image based off of the v1.7.2 tag, you could do the following: ```bash -docker build -t eosio/eos:v1.7.1 --build-arg branch=v1.7.1 . +docker build -t eosio/eos:v1.7.2 --build-arg branch=v1.7.2 . ``` By default, the symbol in eosio.system is set to SYS. You can override this using the symbol argument while building the docker image. diff --git a/README.md b/README.md index ac5e2c069dd..ea19b14d9d9 100644 --- a/README.md +++ b/README.md @@ -39,13 +39,13 @@ $ brew remove eosio ``` #### Ubuntu 18.04 Debian Package Install ```sh -$ wget https://github.com/eosio/eos/releases/download/v1.7.1/eosio_1.7.1-1-ubuntu-18.04_amd64.deb -$ sudo apt install ./eosio_1.7.1-1-ubuntu-18.04_amd64.deb +$ wget https://github.com/eosio/eos/releases/download/v1.7.2/eosio_1.7.2-1-ubuntu-18.04_amd64.deb +$ sudo apt install ./eosio_1.7.2-1-ubuntu-18.04_amd64.deb ``` #### Ubuntu 16.04 Debian Package Install ```sh -$ wget https://github.com/eosio/eos/releases/download/v1.7.1/eosio_1.7.1-1-ubuntu-16.04_amd64.deb -$ sudo apt install ./eosio_1.7.1-1-ubuntu-16.04_amd64.deb +$ wget https://github.com/eosio/eos/releases/download/v1.7.2/eosio_1.7.2-1-ubuntu-16.04_amd64.deb +$ sudo apt install ./eosio_1.7.2-1-ubuntu-16.04_amd64.deb ``` #### Debian Package Uninstall ```sh @@ -53,8 +53,8 @@ $ sudo apt remove eosio ``` #### Centos RPM Package Install ```sh -$ wget https://github.com/eosio/eos/releases/download/v1.7.1/eosio-1.7.1-1.el7.x86_64.rpm -$ sudo yum install ./eosio-1.7.1-1.el7.x86_64.rpm +$ wget https://github.com/eosio/eos/releases/download/v1.7.2/eosio-1.7.2-1.el7.x86_64.rpm +$ sudo yum install ./eosio-1.7.2-1.el7.x86_64.rpm ``` #### Centos RPM Package Uninstall ```sh @@ -62,8 +62,8 @@ $ sudo yum remove eosio ``` #### Fedora RPM Package Install ```sh -$ wget https://github.com/eosio/eos/releases/download/v1.7.1/eosio-1.7.1-1.fc27.x86_64.rpm -$ sudo yum install ./eosio-1.7.1-1.fc27.x86_64.rpm +$ wget https://github.com/eosio/eos/releases/download/v1.7.2/eosio-1.7.2-1.fc27.x86_64.rpm +$ sudo yum install ./eosio-1.7.2-1.fc27.x86_64.rpm ``` #### Fedora RPM Package Uninstall ```sh From 6868ad071c0c8dad9ce5cf785437e9baf0df08a4 Mon Sep 17 00:00:00 2001 From: Zach Butler Date: Thu, 18 Apr 2019 17:24:09 -0400 Subject: [PATCH 0063/2426] Remove long-running tests --- .buildkite/long_running_tests.yml | 323 ------------------------------ scripts/long-running-test.sh | 36 ---- 2 files changed, 359 deletions(-) delete mode 100644 .buildkite/long_running_tests.yml delete mode 100755 scripts/long-running-test.sh diff --git a/.buildkite/long_running_tests.yml b/.buildkite/long_running_tests.yml deleted file mode 100644 index c90ab8a12e9..00000000000 --- a/.buildkite/long_running_tests.yml +++ /dev/null @@ -1,323 +0,0 @@ -steps: - - command: | # Amazon Linux 1 Build - echo "+++ :hammer: Building" - ./scripts/eosio_build.sh -y - echo "--- :compression: Compressing build directory" - tar -pczf build.tar.gz build - if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi - label: ":aws: Amazon Linux 1 Build" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: "build.tar.gz" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux1_2-2" - propagate-environment: true - workdir: /data/job - timeout: 60 - - - command: | # Amazon Linux 2 Build - echo "+++ :hammer: Building" - ./scripts/eosio_build.sh -y - echo "--- :compression: Compressing build directory" - tar -pczf build.tar.gz build - if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi - label: ":aws: Amazon Linux 2 Build" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: "build.tar.gz" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-2" - propagate-environment: true - workdir: /data/job - timeout: 60 - - - command: | # CentOS 7 Build - echo "+++ :hammer: Building" - ./scripts/eosio_build.sh -y - echo "--- :compression: Compressing build directory" - tar -pczf build.tar.gz build - if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi - label: ":centos: CentOS 7 Build" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: "build.tar.gz" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-2" - propagate-environment: true - workdir: /data/job - timeout: 60 - - - command: | # Fedora 27 Build - echo "+++ :hammer: Building" - ./scripts/eosio_build.sh -y - echo "--- :compression: Compressing build directory" - tar -pczf build.tar.gz build - if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi - label: ":centos: Fedora 27 Build" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: "build.tar.gz" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:fedora27_2-2" - propagate-environment: true - workdir: /data/job - timeout: 60 - - - command: | # Ubuntu 16.04 Build - echo "+++ :hammer: Building" - ./scripts/eosio_build.sh -y - echo "--- :compression: Compressing build directory" - tar -pczf build.tar.gz build - if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi - label: ":ubuntu: Ubuntu 16.04 Build" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: "build.tar.gz" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-2" - propagate-environment: true - workdir: /data/job - timeout: 60 - - - command: | # Ubuntu 18.04 Build - echo "+++ :hammer: Building" - ./scripts/eosio_build.sh -y - echo "--- :compression: Compressing build directory" - tar -pczf build.tar.gz build - if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi - label: ":ubuntu: Ubuntu 18.04 Build" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: "build.tar.gz" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-2" - propagate-environment: true - workdir: /data/job - timeout: 60 - - - command: | # macOS High Sierra Build - echo "--- Creating symbolic link to job directory :file_folder:" - sleep 5 - ln -s "$(pwd)" /data/job - cd /data/job - echo "+++ Building :hammer:" - ./scripts/eosio_build.sh -y - echo "--- Compressing build directory :compression:" - tar -pczf build.tar.gz build - if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi - label: ":darwin: High Sierra Build" - agents: - - "role=builder-v2-1" - - "os=high-sierra" - artifact_paths: "build.tar.gz" - timeout: 60 - - - command: | # macOS Mojave Build - echo "--- Creating symbolic link to job directory :file_folder:" - sleep 5 - ln -s "$(pwd)" /data/job - cd /data/job - echo "+++ Building :hammer:" - ./scripts/eosio_build.sh -y - echo "--- Compressing build directory :compression:" - tar -pczf build.tar.gz build - if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi - label: ":darwin: Mojave Build" - agents: - - "role=builder-v2-1" - - "os=mojave" - artifact_paths: "build.tar.gz" - timeout: 60 - - - wait - - - command: | # Amazon Linux 1 Tests - echo "--- :arrow_down: Downloading Build Directory" - buildkite-agent artifact download "build.tar.gz" . --step ":aws: Amazon Linux 1 Build" - echo "+++ :microscope: Running LR Tests" - ./scripts/long-running-test.sh - label: ":aws: Amazon Linux 1 LR Tests" - agents: - queue: "automation-large-builder-fleet" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux1_2-2" - propagate-environment: true - workdir: /data/job - timeout: 90 - - - command: | # Amazon Linux 2 Tests - echo "--- :arrow_down: Downloading Build Directory" - buildkite-agent artifact download "build.tar.gz" . --step ":aws: Amazon Linux 2 Build" - echo "+++ :microscope: Running LR Tests" - ./scripts/long-running-test.sh - label: ":aws: Amazon Linux 2 LR Tests" - agents: - queue: "automation-large-builder-fleet" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-2" - propagate-environment: true - workdir: /data/job - timeout: 90 - skip: true # fundamental test framework issue here, see https://buildkite.com/EOSIO/eosio/builds/10690 - - - command: | # centOS 7 Tests - echo "--- :arrow_down: Downloading Build Directory" - buildkite-agent artifact download "build.tar.gz" . --step ":centos: CentOS 7 Build" - echo "+++ :microscope: Running LR Tests" - ./scripts/long-running-test.sh - label: ":centos: CentOS 7 LR Tests" - agents: - queue: "automation-large-builder-fleet" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-2" - propagate-environment: true - workdir: /data/job - timeout: 90 - - - command: | # Fedora 27 Tests - echo "--- :arrow_down: Downloading Build Directory" - buildkite-agent artifact download "build.tar.gz" . --step ":centos: Fedora 27 Build" - echo "+++ :microscope: Running LR Tests" - ./scripts/long-running-test.sh - label: ":fedora: Fedora 27 LR Tests" - agents: - queue: "automation-large-builder-fleet" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:fedora27_2-2" - propagate-environment: true - workdir: /data/job - timeout: 90 - - - command: | # Ubuntu 16.04 Tests - echo "--- :arrow_down: Downloading Build Directory" - buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: Ubuntu 16.04 Build" - echo "+++ :microscope: Running LR Tests" - ./scripts/long-running-test.sh - label: ":ubuntu: Ubuntu 16.04 LR Tests" - agents: - queue: "automation-large-builder-fleet" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-2" - propagate-environment: true - workdir: /data/job - timeout: 90 - - - command: | # Ubuntu 18.04 Tests - echo "--- :arrow_down: Downloading Build Directory" - buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: Ubuntu 18.04 Build" - echo "+++ :microscope: Running LR Tests" - ./scripts/long-running-test.sh - label: ":ubuntu: Ubuntu 18.04 LR Tests" - agents: - queue: "automation-large-builder-fleet" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-2" - propagate-environment: true - workdir: /data/job - timeout: 90 - - - command: | # High Sierra Tests - echo "--- :arrow_down: Downloading Build Directory" - buildkite-agent artifact download "build.tar.gz" . --step ":darwin: High Sierra Build" - echo "+++ :microscope: Running LR Tests" - ln -s "$(pwd)" /data/job && ./scripts/long-running-test.sh - label: ":darwin: High Sierra LR Tests" - agents: - - "role=tester-v2-1" - - "os=high-sierra" - timeout: 90 - - - command: | # Mojave Tests - echo "--- :arrow_down: Downloading Build Directory" - buildkite-agent artifact download "build.tar.gz" . --step ":darwin: Mojave Build" - echo "+++ :microscope: Running LR Tests" - ln -s "$(pwd)" /data/job && ./scripts/long-running-test.sh - label: ":darwin: Mojave LR Tests" - agents: - - "role=tester-v2-1" - - "os=mojave" - timeout: 90 \ No newline at end of file diff --git a/scripts/long-running-test.sh b/scripts/long-running-test.sh deleted file mode 100755 index 30ec5faaa12..00000000000 --- a/scripts/long-running-test.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/bin/bash -set -e # exit on failure of any "simple" command (excludes &&, ||, or | chains) -# prepare environment -PATH=$PATH:~/opt/mongodb/bin -echo "Extracting build directory..." -tar -zxf build.tar.gz -echo "Starting MongoDB..." -~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log -cd /data/job/build -# run tests -echo "Running tests..." -TEST_COUNT=$(ctest -N -L nonparallelizable_tests | grep -i 'Total Tests: ' | cut -d ':' -f 2 | awk '{print $1}') -[[ $TEST_COUNT > 0 ]] && echo "$TEST_COUNT tests found." || (echo "ERROR: No tests registered with ctest! Exiting..." && exit 1) -set +e # defer ctest error handling to end -echo "$ ctest -L long_running_tests --output-on-failure -T Test" -ctest -L long_running_tests --output-on-failure -T Test -EXIT_STATUS=$? -[[ "$EXIT_STATUS" == 0 ]] && set -e -echo "Done running long-running tests." -# upload artifacts -echo "Uploading artifacts..." -XML_FILENAME="test-results.xml" -mv $(pwd)/Testing/$(ls $(pwd)/Testing/ | grep '20' | tail -n 1)/Test.xml $XML_FILENAME -buildkite-agent artifact upload config.ini -buildkite-agent artifact upload genesis.json -cd .. -buildkite-agent artifact upload mongod.log -cd build -buildkite-agent artifact upload $XML_FILENAME -echo "Done uploading artifacts." -# ctest error handling -if [[ "$EXIT_STATUS" != 0 ]]; then - echo "Failing due to non-zero exit status from ctest: $EXIT_STATUS" - echo ' ^^^ scroll up for more information ^^^' - exit $EXIT_STATUS -fi \ No newline at end of file From 63ef287b7f3e2bc4f4139789ca9b8c880ea626cd Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 19 Apr 2019 12:20:30 -0500 Subject: [PATCH 0064/2426] Reduce logging of complete object when unable to serialize --- libraries/chain/include/eosio/chain/abi_serializer.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/chain/include/eosio/chain/abi_serializer.hpp b/libraries/chain/include/eosio/chain/abi_serializer.hpp index 398f219ced8..cf997e9cd35 100644 --- a/libraries/chain/include/eosio/chain/abi_serializer.hpp +++ b/libraries/chain/include/eosio/chain/abi_serializer.hpp @@ -688,7 +688,7 @@ void abi_serializer::to_variant( const T& o, variant& vo, Resolver resolver, con impl::abi_traverse_context ctx(max_serialization_time); impl::abi_to_variant::add(mvo, "_", o, resolver, ctx); vo = std::move(mvo["_"]); -} FC_RETHROW_EXCEPTIONS(error, "Failed to serialize type", ("object",o)) +} FC_RETHROW_EXCEPTIONS(error, "Failed to serialize type", ("type", typeid(o).name() )) template void abi_serializer::from_variant( const variant& v, T& o, Resolver resolver, const fc::microseconds& max_serialization_time ) try { From be8a219b58820c441716d55d5e5226b6354e1c5c Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 19 Apr 2019 13:15:32 -0500 Subject: [PATCH 0065/2426] Add demangle of type --- libraries/chain/include/eosio/chain/abi_serializer.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/chain/include/eosio/chain/abi_serializer.hpp b/libraries/chain/include/eosio/chain/abi_serializer.hpp index cf997e9cd35..3fd6aef137d 100644 --- a/libraries/chain/include/eosio/chain/abi_serializer.hpp +++ b/libraries/chain/include/eosio/chain/abi_serializer.hpp @@ -688,7 +688,7 @@ void abi_serializer::to_variant( const T& o, variant& vo, Resolver resolver, con impl::abi_traverse_context ctx(max_serialization_time); impl::abi_to_variant::add(mvo, "_", o, resolver, ctx); vo = std::move(mvo["_"]); -} FC_RETHROW_EXCEPTIONS(error, "Failed to serialize type", ("type", typeid(o).name() )) +} FC_RETHROW_EXCEPTIONS(error, "Failed to serialize: ${type}", ("type", boost::core::demangle( typeid(o).name() ) )) template void abi_serializer::from_variant( const variant& v, T& o, Resolver resolver, const fc::microseconds& max_serialization_time ) try { From 2e6599d539a57203afb3ed0a0678d733c0c9bcb6 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 23 Apr 2019 12:51:38 -0500 Subject: [PATCH 0066/2426] Keep block log open to minimize open/close of file --- libraries/chain/block_log.cpp | 115 ++++++++++++++-------------------- 1 file changed, 46 insertions(+), 69 deletions(-) diff --git a/libraries/chain/block_log.cpp b/libraries/chain/block_log.cpp index aa1f65cc1cd..e90019f0975 100644 --- a/libraries/chain/block_log.cpp +++ b/libraries/chain/block_log.cpp @@ -9,6 +9,7 @@ #define LOG_READ (std::ios::in | std::ios::binary) #define LOG_WRITE (std::ios::out | std::ios::binary | std::ios::app) +#define LOG_RW ( std::ios::in | std::ios::out | std::ios::binary ) namespace eosio { namespace chain { @@ -31,47 +32,42 @@ namespace eosio { namespace chain { std::fstream index_stream; fc::path block_file; fc::path index_file; - bool block_write; - bool index_write; + bool open_files = false; bool genesis_written_to_block_log = false; uint32_t version = 0; uint32_t first_block_num = 0; - inline void check_block_read() { - if (block_write) { - block_stream.close(); - block_stream.open(block_file.generic_string().c_str(), LOG_READ); - block_write = false; + inline void check_open_files() { + if( !open_files ) { + reopen(); } } + void reopen(); - inline void check_block_write() { - if (!block_write) { + void close() { + if( block_stream.is_open() ) block_stream.close(); - block_stream.open(block_file.generic_string().c_str(), LOG_WRITE); - block_write = true; - } - } - - inline void check_index_read() { - try { - if (index_write) { - index_stream.close(); - index_stream.open(index_file.generic_string().c_str(), LOG_READ); - index_write = false; - } - } - FC_LOG_AND_RETHROW() - } - - inline void check_index_write() { - if (!index_write) { + if( index_stream.is_open() ) index_stream.close(); - index_stream.open(index_file.generic_string().c_str(), LOG_WRITE); - index_write = true; - } + open_files = false; } }; + + void block_log_impl::reopen() { + close(); + + // open to create files if they don't exist + //ilog("Opening block log at ${path}", ("path", my->block_file.generic_string())); + block_stream.open(block_file.generic_string().c_str(), LOG_WRITE); + index_stream.open(index_file.generic_string().c_str(), LOG_WRITE); + + close(); + + block_stream.open(block_file.generic_string().c_str(), LOG_RW); + index_stream.open(index_file.generic_string().c_str(), LOG_RW); + + open_files = true; + } } block_log::block_log(const fc::path& data_dir) @@ -88,26 +84,21 @@ namespace eosio { namespace chain { block_log::~block_log() { if (my) { flush(); + my->close(); my.reset(); } } void block_log::open(const fc::path& data_dir) { - if (my->block_stream.is_open()) - my->block_stream.close(); - if (my->index_stream.is_open()) - my->index_stream.close(); + my->close(); if (!fc::is_directory(data_dir)) fc::create_directories(data_dir); + my->block_file = data_dir / "blocks.log"; my->index_file = data_dir / "blocks.index"; - //ilog("Opening block log at ${path}", ("path", my->block_file.generic_string())); - my->block_stream.open(my->block_file.generic_string().c_str(), LOG_WRITE); - my->index_stream.open(my->index_file.generic_string().c_str(), LOG_WRITE); - my->block_write = true; - my->index_write = true; + my->reopen(); /* On startup of the block log, there are several states the log file and the index file can be * in relation to each other. @@ -132,7 +123,6 @@ namespace eosio { namespace chain { if (log_size) { ilog("Log is nonempty"); - my->check_block_read(); my->block_stream.seekg( 0 ); my->version = 0; my->block_stream.read( (char*)&my->version, sizeof(my->version) ); @@ -155,9 +145,6 @@ namespace eosio { namespace chain { my->head_id = my->head->id(); if (index_size) { - my->check_block_read(); - my->check_index_read(); - ilog("Index is nonempty"); uint64_t block_pos; my->block_stream.seekg(-sizeof(uint64_t), std::ios::end); @@ -180,10 +167,9 @@ namespace eosio { namespace chain { } } else if (index_size) { ilog("Index is nonempty, remove and recreate it"); - my->index_stream.close(); + my->close(); fc::remove_all(my->index_file); - my->index_stream.open(my->index_file.generic_string().c_str(), LOG_WRITE); - my->index_write = true; + my->reopen(); } } @@ -191,9 +177,10 @@ namespace eosio { namespace chain { try { EOS_ASSERT( my->genesis_written_to_block_log, block_log_append_fail, "Cannot append to block log until the genesis is first written" ); - my->check_block_write(); - my->check_index_write(); + my->check_open_files(); + my->block_stream.seekp(0, std::ios::end); + my->index_stream.seekp(0, std::ios::end); uint64_t pos = my->block_stream.tellp(); EOS_ASSERT(my->index_stream.tellp() == sizeof(uint64_t) * (b->block_num() - my->first_block_num), block_log_append_fail, @@ -220,22 +207,17 @@ namespace eosio { namespace chain { } void block_log::reset( const genesis_state& gs, const signed_block_ptr& first_block, uint32_t first_block_num ) { - if (my->block_stream.is_open()) - my->block_stream.close(); - if (my->index_stream.is_open()) - my->index_stream.close(); + my->close(); fc::remove_all(my->block_file); fc::remove_all(my->index_file); - my->block_stream.open(my->block_file.generic_string().c_str(), LOG_WRITE); - my->index_stream.open(my->index_file.generic_string().c_str(), LOG_WRITE); - my->block_write = true; - my->index_write = true; + my->reopen(); auto data = fc::raw::pack(gs); my->version = 0; // version of 0 is invalid; it indicates that the genesis was not properly written to the block log my->first_block_num = first_block_num; + my->block_stream.seekp(0, std::ios::end); my->block_stream.write((char*)&my->version, sizeof(my->version)); my->block_stream.write((char*)&my->first_block_num, sizeof(my->first_block_num)); my->block_stream.write(data.data(), data.size()); @@ -251,22 +233,16 @@ namespace eosio { namespace chain { auto pos = my->block_stream.tellp(); - my->block_stream.close(); - my->block_stream.open(my->block_file.generic_string().c_str(), std::ios::in | std::ios::out | std::ios::binary ); // Bypass append-only writing just once - static_assert( block_log::max_supported_version > 0, "a version number of zero is not supported" ); my->version = block_log::max_supported_version; my->block_stream.seekp( 0 ); my->block_stream.write( (char*)&my->version, sizeof(my->version) ); my->block_stream.seekp( pos ); flush(); - - my->block_write = false; - my->check_block_write(); // Reset to append-only writing. } std::pair block_log::read_block(uint64_t pos)const { - my->check_block_read(); + my->check_open_files(); my->block_stream.seekg(pos); std::pair result; @@ -290,7 +266,7 @@ namespace eosio { namespace chain { } uint64_t block_log::get_block_pos(uint32_t block_num) const { - my->check_index_read(); + my->check_open_files(); if (!(my->head && block_num <= block_header::num_from_id(my->head_id) && block_num >= my->first_block_num)) return npos; my->index_stream.seekg(sizeof(uint64_t) * (block_num - my->first_block_num)); @@ -300,7 +276,7 @@ namespace eosio { namespace chain { } signed_block_ptr block_log::read_head()const { - my->check_block_read(); + my->check_open_files(); uint64_t pos; @@ -328,13 +304,13 @@ namespace eosio { namespace chain { void block_log::construct_index() { ilog("Reconstructing Block Log Index..."); - my->index_stream.close(); + my->close(); + fc::remove_all(my->index_file); - my->index_stream.open(my->index_file.generic_string().c_str(), LOG_WRITE); - my->index_write = true; + + my->reopen(); uint64_t end_pos; - my->check_block_read(); my->block_stream.seekg(-sizeof( uint64_t), std::ios::end); my->block_stream.read((char*)&end_pos, sizeof(end_pos)); @@ -357,6 +333,7 @@ namespace eosio { namespace chain { my->block_stream.read((char*) &totem, sizeof(totem)); } + my->index_stream.seekp(0, std::ios::end); while( pos < end_pos ) { fc::raw::unpack(my->block_stream, tmp); my->block_stream.read((char*)&pos, sizeof(pos)); From ec33e3f2fe905c878d91b3b2740be668812c604c Mon Sep 17 00:00:00 2001 From: arhag Date: Tue, 23 Apr 2019 16:50:42 -0400 Subject: [PATCH 0067/2426] allow opening block log with no blocks (fixes undefined behavior bug); contruct_index should leave index file empty if block log contains no blocks --- libraries/chain/block_log.cpp | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/libraries/chain/block_log.cpp b/libraries/chain/block_log.cpp index e90019f0975..0f7169b32b8 100644 --- a/libraries/chain/block_log.cpp +++ b/libraries/chain/block_log.cpp @@ -142,7 +142,11 @@ namespace eosio { namespace chain { } my->head = read_head(); - my->head_id = my->head->id(); + if( my->head ) { + my->head_id = my->head->id(); + } else { + my->head_id = {}; + } if (index_size) { ilog("Index is nonempty"); @@ -314,6 +318,12 @@ namespace eosio { namespace chain { my->block_stream.seekg(-sizeof( uint64_t), std::ios::end); my->block_stream.read((char*)&end_pos, sizeof(end_pos)); + + if( end_pos == npos ) { + ilog( "Block log contains no blocks. No need to construct index." ); + return; + } + signed_block tmp; uint64_t pos = 0; From f5a8abbd2f6d260a39513da9013e2ad31cd08ee0 Mon Sep 17 00:00:00 2001 From: lcgogo Date: Wed, 17 Apr 2019 16:05:44 +0800 Subject: [PATCH 0068/2426] Add some missing libs for build RUN apt-get update -y && apt-get install -y libcurl4-openssl-dev libusb-1.0-0-dev --- Docker/Dockerfile | 1 + 1 file changed, 1 insertion(+) diff --git a/Docker/Dockerfile b/Docker/Dockerfile index 6cce1a12bf4..a763f59c398 100644 --- a/Docker/Dockerfile +++ b/Docker/Dockerfile @@ -2,6 +2,7 @@ FROM eosio/builder as builder ARG branch=master ARG symbol=SYS +RUN apt-get update -y && apt-get install -y libcurl4-openssl-dev libusb-1.0-0-dev RUN git clone -b $branch https://github.com/EOSIO/eos.git --recursive \ && cd eos && echo "$branch:$(git rev-parse HEAD)" > /etc/eosio-version \ && cmake -H. -B"/tmp/build" -GNinja -DCMAKE_BUILD_TYPE=Release -DWASM_ROOT=/opt/wasm -DCMAKE_CXX_COMPILER=clang++ \ From a5c025867e65265fd1a42d5849c4d2f9847d5b18 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 25 Apr 2019 08:04:36 -0500 Subject: [PATCH 0069/2426] Bump to 1.7.3 --- CMakeLists.txt | 2 +- Docker/README.md | 4 ++-- README.md | 16 ++++++++-------- 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 820e3b7796a..89b3d750699 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -31,7 +31,7 @@ set( CXX_STANDARD_REQUIRED ON) set(VERSION_MAJOR 1) set(VERSION_MINOR 7) -set(VERSION_PATCH 2) +set(VERSION_PATCH 3) if(VERSION_SUFFIX) set(VERSION_FULL "${VERSION_MAJOR}.${VERSION_MINOR}.${VERSION_PATCH}-${VERSION_SUFFIX}") diff --git a/Docker/README.md b/Docker/README.md index c70264c02a7..d887ef42b8a 100644 --- a/Docker/README.md +++ b/Docker/README.md @@ -20,10 +20,10 @@ cd eos/Docker docker build . -t eosio/eos ``` -The above will build off the most recent commit to the master branch by default. If you would like to target a specific branch/tag, you may use a build argument. For example, if you wished to generate a docker image based off of the v1.7.2 tag, you could do the following: +The above will build off the most recent commit to the master branch by default. If you would like to target a specific branch/tag, you may use a build argument. For example, if you wished to generate a docker image based off of the v1.7.3 tag, you could do the following: ```bash -docker build -t eosio/eos:v1.7.2 --build-arg branch=v1.7.2 . +docker build -t eosio/eos:v1.7.3 --build-arg branch=v1.7.3 . ``` By default, the symbol in eosio.system is set to SYS. You can override this using the symbol argument while building the docker image. diff --git a/README.md b/README.md index ea19b14d9d9..fecf8e0da96 100644 --- a/README.md +++ b/README.md @@ -39,13 +39,13 @@ $ brew remove eosio ``` #### Ubuntu 18.04 Debian Package Install ```sh -$ wget https://github.com/eosio/eos/releases/download/v1.7.2/eosio_1.7.2-1-ubuntu-18.04_amd64.deb -$ sudo apt install ./eosio_1.7.2-1-ubuntu-18.04_amd64.deb +$ wget https://github.com/eosio/eos/releases/download/v1.7.3/eosio_1.7.3-1-ubuntu-18.04_amd64.deb +$ sudo apt install ./eosio_1.7.3-1-ubuntu-18.04_amd64.deb ``` #### Ubuntu 16.04 Debian Package Install ```sh -$ wget https://github.com/eosio/eos/releases/download/v1.7.2/eosio_1.7.2-1-ubuntu-16.04_amd64.deb -$ sudo apt install ./eosio_1.7.2-1-ubuntu-16.04_amd64.deb +$ wget https://github.com/eosio/eos/releases/download/v1.7.3/eosio_1.7.3-1-ubuntu-16.04_amd64.deb +$ sudo apt install ./eosio_1.7.3-1-ubuntu-16.04_amd64.deb ``` #### Debian Package Uninstall ```sh @@ -53,8 +53,8 @@ $ sudo apt remove eosio ``` #### Centos RPM Package Install ```sh -$ wget https://github.com/eosio/eos/releases/download/v1.7.2/eosio-1.7.2-1.el7.x86_64.rpm -$ sudo yum install ./eosio-1.7.2-1.el7.x86_64.rpm +$ wget https://github.com/eosio/eos/releases/download/v1.7.3/eosio-1.7.3-1.el7.x86_64.rpm +$ sudo yum install ./eosio-1.7.3-1.el7.x86_64.rpm ``` #### Centos RPM Package Uninstall ```sh @@ -62,8 +62,8 @@ $ sudo yum remove eosio ``` #### Fedora RPM Package Install ```sh -$ wget https://github.com/eosio/eos/releases/download/v1.7.2/eosio-1.7.2-1.fc27.x86_64.rpm -$ sudo yum install ./eosio-1.7.2-1.fc27.x86_64.rpm +$ wget https://github.com/eosio/eos/releases/download/v1.7.3/eosio-1.7.3-1.fc27.x86_64.rpm +$ sudo yum install ./eosio-1.7.3-1.fc27.x86_64.rpm ``` #### Fedora RPM Package Uninstall ```sh From 98326a909c69948e17776442046fc6990eb3f77b Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 26 Apr 2019 15:33:40 -0500 Subject: [PATCH 0070/2426] Update to fc with gcc7 fix --- libraries/fc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/fc b/libraries/fc index 063353354d0..f49422f4bbf 160000 --- a/libraries/fc +++ b/libraries/fc @@ -1 +1 @@ -Subproject commit 063353354d04b631541083ba65fbe2667ef4f097 +Subproject commit f49422f4bbfafbe4ee7a0a661ed6537b995167e5 From 3c7076ee0fb13569be861b7a2f4f66e1a1f287da Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 29 Apr 2019 10:28:35 -0500 Subject: [PATCH 0071/2426] Add additional platforms for pgrep so unpinned build passes tests --- tests/testUtils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/testUtils.py b/tests/testUtils.py index dc09eb34ae8..a7c3730251d 100755 --- a/tests/testUtils.py +++ b/tests/testUtils.py @@ -221,7 +221,7 @@ def arePortsAvailable(ports): def pgrepCmd(serverName): pgrepOpts="-fl" # pylint: disable=deprecated-method - if platform.linux_distribution()[0] in ["Ubuntu", "LinuxMint", "Fedora","CentOS Linux","arch"]: + if platform.linux_distribution()[0] in ["Ubuntu", "LinuxMint", "Fedora", "CentOS Linux", "arch", "Amazon Linux AMI", "Amazon Linux"]: pgrepOpts="-a" return "pgrep %s %s" % (pgrepOpts, serverName) From 740b94f17b974d5c92d8e9f685113c66c222f14c Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 29 Apr 2019 11:06:51 -0500 Subject: [PATCH 0072/2426] Determine flags for pgrep dynamicly instead of by list of hosts --- tests/testUtils.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/tests/testUtils.py b/tests/testUtils.py index a7c3730251d..facba9100c4 100755 --- a/tests/testUtils.py +++ b/tests/testUtils.py @@ -219,12 +219,17 @@ def arePortsAvailable(ports): @staticmethod def pgrepCmd(serverName): - pgrepOpts="-fl" # pylint: disable=deprecated-method - if platform.linux_distribution()[0] in ["Ubuntu", "LinuxMint", "Fedora", "CentOS Linux", "arch", "Amazon Linux AMI", "Amazon Linux"]: + # pgrep differs on different platform (amazonlinux1 and 2 for example). We need to check if pgrep -h has -a available and add that if so: + try: + pgrepHelp = re.search('-a', subprocess.Popen("pgrep --help 2>/dev/null", shell=True, stdout=subprocess.PIPE).stdout.read().decode('utf-8')) + pgrepHelp.group(0) # group() errors if -a is not found, so we don't need to do anything else special here. pgrepOpts="-a" + except AttributeError as error: + # If no -a, AttributeError: 'NoneType' object has no attribute 'group' + pgrepOpts="-fl" - return "pgrep %s %s" % (pgrepOpts, serverName) + return "pgrep %s %s" % (pgrepOpts, serverName)\ @staticmethod def getBlockLog(blockLogLocation, silentErrors=False, exitOnError=False): From fc817a197d5f321ea57b8aa4bd48973246a2bc9d Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 29 Apr 2019 13:28:05 -0500 Subject: [PATCH 0073/2426] Add missing import or re --- tests/testUtils.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/testUtils.py b/tests/testUtils.py index facba9100c4..5964fac068c 100755 --- a/tests/testUtils.py +++ b/tests/testUtils.py @@ -1,3 +1,4 @@ +import re import errno import subprocess import time From d2db42e31d365f0f39da4d23d5fc3156ad89b0e8 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 1 May 2019 11:59:48 -0500 Subject: [PATCH 0074/2426] Change default log level from debug to info. --- libraries/fc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/fc b/libraries/fc index f49422f4bbf..f0ca2761421 160000 --- a/libraries/fc +++ b/libraries/fc @@ -1 +1 @@ -Subproject commit f49422f4bbfafbe4ee7a0a661ed6537b995167e5 +Subproject commit f0ca276142159206e0dcdff096bd0a1548114a7a From c4c431ab252a05f97c8eb124faf32ecbf13012e6 Mon Sep 17 00:00:00 2001 From: Zach Butler Date: Thu, 2 May 2019 21:41:03 -0400 Subject: [PATCH 0075/2426] Created test metrics Buildkite job --- .buildkite/pipeline.yml | 11 + .../node_modules/node-fetch/CHANGELOG.md | 260 +++ .../node_modules/node-fetch/LICENSE.md | 22 + .../metrics/node_modules/node-fetch/README.md | 538 ++++++ .../node_modules/node-fetch/browser.js | 23 + .../node_modules/node-fetch/lib/index.es.js | 1631 ++++++++++++++++ .../node_modules/node-fetch/lib/index.js | 1640 +++++++++++++++++ .../node_modules/node-fetch/lib/index.mjs | 1629 ++++++++++++++++ .../node_modules/node-fetch/package.json | 94 + scripts/metrics/node_modules/sax/LICENSE | 41 + scripts/metrics/node_modules/sax/README.md | 225 +++ scripts/metrics/node_modules/sax/lib/sax.js | 1565 ++++++++++++++++ scripts/metrics/node_modules/sax/package.json | 61 + scripts/metrics/node_modules/xml2js/LICENSE | 19 + scripts/metrics/node_modules/xml2js/README.md | 406 ++++ .../metrics/node_modules/xml2js/lib/bom.js | 12 + .../node_modules/xml2js/lib/builder.js | 127 ++ .../node_modules/xml2js/lib/defaults.js | 72 + .../metrics/node_modules/xml2js/lib/parser.js | 357 ++++ .../node_modules/xml2js/lib/processors.js | 34 + .../metrics/node_modules/xml2js/lib/xml2js.js | 37 + .../metrics/node_modules/xml2js/package.json | 280 +++ .../node_modules/xmlbuilder/.npmignore | 5 + .../node_modules/xmlbuilder/CHANGELOG.md | 423 +++++ .../metrics/node_modules/xmlbuilder/LICENSE | 21 + .../metrics/node_modules/xmlbuilder/README.md | 85 + .../node_modules/xmlbuilder/lib/Utility.js | 73 + .../xmlbuilder/lib/XMLAttribute.js | 31 + .../node_modules/xmlbuilder/lib/XMLCData.js | 32 + .../node_modules/xmlbuilder/lib/XMLComment.js | 32 + .../xmlbuilder/lib/XMLDTDAttList.js | 50 + .../xmlbuilder/lib/XMLDTDElement.js | 35 + .../xmlbuilder/lib/XMLDTDEntity.js | 56 + .../xmlbuilder/lib/XMLDTDNotation.js | 37 + .../xmlbuilder/lib/XMLDeclaration.js | 40 + .../node_modules/xmlbuilder/lib/XMLDocType.js | 107 ++ .../xmlbuilder/lib/XMLDocument.js | 48 + .../xmlbuilder/lib/XMLDocumentCB.js | 402 ++++ .../node_modules/xmlbuilder/lib/XMLElement.js | 111 ++ .../node_modules/xmlbuilder/lib/XMLNode.js | 432 +++++ .../lib/XMLProcessingInstruction.js | 35 + .../node_modules/xmlbuilder/lib/XMLRaw.js | 32 + .../xmlbuilder/lib/XMLStreamWriter.js | 279 +++ .../xmlbuilder/lib/XMLStringWriter.js | 334 ++++ .../xmlbuilder/lib/XMLStringifier.js | 163 ++ .../node_modules/xmlbuilder/lib/XMLText.js | 32 + .../xmlbuilder/lib/XMLWriterBase.js | 90 + .../node_modules/xmlbuilder/lib/index.js | 53 + .../node_modules/xmlbuilder/package.json | 65 + scripts/metrics/package-lock.json | 30 + scripts/metrics/test-metrics.js | 415 +++++ scripts/metrics/test-metrics.json | 1 + 52 files changed, 12633 insertions(+) create mode 100644 scripts/metrics/node_modules/node-fetch/CHANGELOG.md create mode 100644 scripts/metrics/node_modules/node-fetch/LICENSE.md create mode 100644 scripts/metrics/node_modules/node-fetch/README.md create mode 100644 scripts/metrics/node_modules/node-fetch/browser.js create mode 100644 scripts/metrics/node_modules/node-fetch/lib/index.es.js create mode 100644 scripts/metrics/node_modules/node-fetch/lib/index.js create mode 100644 scripts/metrics/node_modules/node-fetch/lib/index.mjs create mode 100644 scripts/metrics/node_modules/node-fetch/package.json create mode 100644 scripts/metrics/node_modules/sax/LICENSE create mode 100644 scripts/metrics/node_modules/sax/README.md create mode 100644 scripts/metrics/node_modules/sax/lib/sax.js create mode 100644 scripts/metrics/node_modules/sax/package.json create mode 100644 scripts/metrics/node_modules/xml2js/LICENSE create mode 100644 scripts/metrics/node_modules/xml2js/README.md create mode 100644 scripts/metrics/node_modules/xml2js/lib/bom.js create mode 100644 scripts/metrics/node_modules/xml2js/lib/builder.js create mode 100644 scripts/metrics/node_modules/xml2js/lib/defaults.js create mode 100644 scripts/metrics/node_modules/xml2js/lib/parser.js create mode 100644 scripts/metrics/node_modules/xml2js/lib/processors.js create mode 100644 scripts/metrics/node_modules/xml2js/lib/xml2js.js create mode 100644 scripts/metrics/node_modules/xml2js/package.json create mode 100644 scripts/metrics/node_modules/xmlbuilder/.npmignore create mode 100644 scripts/metrics/node_modules/xmlbuilder/CHANGELOG.md create mode 100644 scripts/metrics/node_modules/xmlbuilder/LICENSE create mode 100644 scripts/metrics/node_modules/xmlbuilder/README.md create mode 100644 scripts/metrics/node_modules/xmlbuilder/lib/Utility.js create mode 100644 scripts/metrics/node_modules/xmlbuilder/lib/XMLAttribute.js create mode 100644 scripts/metrics/node_modules/xmlbuilder/lib/XMLCData.js create mode 100644 scripts/metrics/node_modules/xmlbuilder/lib/XMLComment.js create mode 100644 scripts/metrics/node_modules/xmlbuilder/lib/XMLDTDAttList.js create mode 100644 scripts/metrics/node_modules/xmlbuilder/lib/XMLDTDElement.js create mode 100644 scripts/metrics/node_modules/xmlbuilder/lib/XMLDTDEntity.js create mode 100644 scripts/metrics/node_modules/xmlbuilder/lib/XMLDTDNotation.js create mode 100644 scripts/metrics/node_modules/xmlbuilder/lib/XMLDeclaration.js create mode 100644 scripts/metrics/node_modules/xmlbuilder/lib/XMLDocType.js create mode 100644 scripts/metrics/node_modules/xmlbuilder/lib/XMLDocument.js create mode 100644 scripts/metrics/node_modules/xmlbuilder/lib/XMLDocumentCB.js create mode 100644 scripts/metrics/node_modules/xmlbuilder/lib/XMLElement.js create mode 100644 scripts/metrics/node_modules/xmlbuilder/lib/XMLNode.js create mode 100644 scripts/metrics/node_modules/xmlbuilder/lib/XMLProcessingInstruction.js create mode 100644 scripts/metrics/node_modules/xmlbuilder/lib/XMLRaw.js create mode 100644 scripts/metrics/node_modules/xmlbuilder/lib/XMLStreamWriter.js create mode 100644 scripts/metrics/node_modules/xmlbuilder/lib/XMLStringWriter.js create mode 100644 scripts/metrics/node_modules/xmlbuilder/lib/XMLStringifier.js create mode 100644 scripts/metrics/node_modules/xmlbuilder/lib/XMLText.js create mode 100644 scripts/metrics/node_modules/xmlbuilder/lib/XMLWriterBase.js create mode 100644 scripts/metrics/node_modules/xmlbuilder/lib/index.js create mode 100644 scripts/metrics/node_modules/xmlbuilder/package.json create mode 100644 scripts/metrics/package-lock.json create mode 100755 scripts/metrics/test-metrics.js create mode 100644 scripts/metrics/test-metrics.json diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index fff4c036c35..8aac7cfdb51 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -482,6 +482,17 @@ steps: - "os=mojave" timeout: 60 + - wait: + continue_on_failure: true + + - command: | + cd scripts/metrics + node --max-old-space-size=4096 test-metrics.js + label: ":bar_chart: Test Metrics" + agents: + queue: "automation-apps-builder-fleet" + timeout: 10 + - wait - command: | # CentOS 7 Package Builder diff --git a/scripts/metrics/node_modules/node-fetch/CHANGELOG.md b/scripts/metrics/node_modules/node-fetch/CHANGELOG.md new file mode 100644 index 00000000000..941b6a8d8b7 --- /dev/null +++ b/scripts/metrics/node_modules/node-fetch/CHANGELOG.md @@ -0,0 +1,260 @@ + +Changelog +========= + + +# 2.x release + +## v2.5.0 + +- Enhance: `Response` object now includes `redirected` property. +- Enhance: `fetch()` now accepts third-party `Blob` implementation as body. +- Other: disable `package-lock.json` generation as we never commit them. +- Other: dev dependency update. +- Other: readme update. + +## v2.4.1 + +- Fix: `Blob` import rule for node < 10, as `Readable` isn't a named export. + +## v2.4.0 + +- Enhance: added `Brotli` compression support (using node's zlib). +- Enhance: updated `Blob` implementation per spec. +- Fix: set content type automatically for `URLSearchParams`. +- Fix: `Headers` now reject empty header names. +- Fix: test cases, as node 12+ no longer accepts invalid header response. + +## v2.3.0 + +- Enhance: added `AbortSignal` support, with README example. +- Enhance: handle invalid `Location` header during redirect by rejecting them explicitly with `FetchError`. +- Fix: update `browser.js` to support react-native environment, where `self` isn't available globally. + +## v2.2.1 + +- Fix: `compress` flag shouldn't overwrite existing `Accept-Encoding` header. +- Fix: multiple `import` rules, where `PassThrough` etc. doesn't have a named export when using node <10 and `--exerimental-modules` flag. +- Other: Better README. + +## v2.2.0 + +- Enhance: Support all `ArrayBuffer` view types +- Enhance: Support Web Workers +- Enhance: Support Node.js' `--experimental-modules` mode; deprecate `.es.js` file +- Fix: Add `__esModule` property to the exports object +- Other: Better example in README for writing response to a file +- Other: More tests for Agent + +## v2.1.2 + +- Fix: allow `Body` methods to work on `ArrayBuffer`-backed `Body` objects +- Fix: reject promise returned by `Body` methods when the accumulated `Buffer` exceeds the maximum size +- Fix: support custom `Host` headers with any casing +- Fix: support importing `fetch()` from TypeScript in `browser.js` +- Fix: handle the redirect response body properly + +## v2.1.1 + +Fix packaging errors in v2.1.0. + +## v2.1.0 + +- Enhance: allow using ArrayBuffer as the `body` of a `fetch()` or `Request` +- Fix: store HTTP headers of a `Headers` object internally with the given case, for compatibility with older servers that incorrectly treated header names in a case-sensitive manner +- Fix: silently ignore invalid HTTP headers +- Fix: handle HTTP redirect responses without a `Location` header just like non-redirect responses +- Fix: include bodies when following a redirection when appropriate + +## v2.0.0 + +This is a major release. Check [our upgrade guide](https://github.com/bitinn/node-fetch/blob/master/UPGRADE-GUIDE.md) for an overview on some key differences between v1 and v2. + +### General changes + +- Major: Node.js 0.10.x and 0.12.x support is dropped +- Major: `require('node-fetch/lib/response')` etc. is now unsupported; use `require('node-fetch').Response` or ES6 module imports +- Enhance: start testing on Node.js v4.x, v6.x, v8.x LTS, as well as v9.x stable +- Enhance: use Rollup to produce a distributed bundle (less memory overhead and faster startup) +- Enhance: make `Object.prototype.toString()` on Headers, Requests, and Responses return correct class strings +- Other: rewrite in ES2015 using Babel +- Other: use Codecov for code coverage tracking +- Other: update package.json script for npm 5 +- Other: `encoding` module is now optional (alpha.7) +- Other: expose browser.js through package.json, avoid bundling mishaps (alpha.9) +- Other: allow TypeScript to `import` node-fetch by exposing default (alpha.9) + +### HTTP requests + +- Major: overwrite user's `Content-Length` if we can be sure our information is correct (per spec) +- Fix: errors in a response are caught before the body is accessed +- Fix: support WHATWG URL objects, created by `whatwg-url` package or `require('url').URL` in Node.js 7+ + +### Response and Request classes + +- Major: `response.text()` no longer attempts to detect encoding, instead always opting for UTF-8 (per spec); use `response.textConverted()` for the v1 behavior +- Major: make `response.json()` throw error instead of returning an empty object on 204 no-content respose (per spec; reverts behavior changed in v1.6.2) +- Major: internal methods are no longer exposed +- Major: throw error when a `GET` or `HEAD` Request is constructed with a non-null body (per spec) +- Enhance: add `response.arrayBuffer()` (also applies to Requests) +- Enhance: add experimental `response.blob()` (also applies to Requests) +- Enhance: `URLSearchParams` is now accepted as a body +- Enhance: wrap `response.json()` json parsing error as `FetchError` +- Fix: fix Request and Response with `null` body + +### Headers class + +- Major: remove `headers.getAll()`; make `get()` return all headers delimited by commas (per spec) +- Enhance: make Headers iterable +- Enhance: make Headers constructor accept an array of tuples +- Enhance: make sure header names and values are valid in HTTP +- Fix: coerce Headers prototype function parameters to strings, where applicable + +### Documentation + +- Enhance: more comprehensive API docs +- Enhance: add a list of default headers in README + + +# 1.x release + +## backport releases (v1.7.0 and beyond) + +See [changelog on 1.x branch](https://github.com/bitinn/node-fetch/blob/1.x/CHANGELOG.md) for details. + +## v1.6.3 + +- Enhance: error handling document to explain `FetchError` design +- Fix: support `form-data` 2.x releases (requires `form-data` >= 2.1.0) + +## v1.6.2 + +- Enhance: minor document update +- Fix: response.json() returns empty object on 204 no-content response instead of throwing a syntax error + +## v1.6.1 + +- Fix: if `res.body` is a non-stream non-formdata object, we will call `body.toString` and send it as a string +- Fix: `counter` value is incorrectly set to `follow` value when wrapping Request instance +- Fix: documentation update + +## v1.6.0 + +- Enhance: added `res.buffer()` api for convenience, it returns body as a Node.js buffer +- Enhance: better old server support by handling raw deflate response +- Enhance: skip encoding detection for non-HTML/XML response +- Enhance: minor document update +- Fix: HEAD request doesn't need decompression, as body is empty +- Fix: `req.body` now accepts a Node.js buffer + +## v1.5.3 + +- Fix: handle 204 and 304 responses when body is empty but content-encoding is gzip/deflate +- Fix: allow resolving response and cloned response in any order +- Fix: avoid setting `content-length` when `form-data` body use streams +- Fix: send DELETE request with content-length when body is present +- Fix: allow any url when calling new Request, but still reject non-http(s) url in fetch + +## v1.5.2 + +- Fix: allow node.js core to handle keep-alive connection pool when passing a custom agent + +## v1.5.1 + +- Fix: redirect mode `manual` should work even when there is no redirection or broken redirection + +## v1.5.0 + +- Enhance: rejected promise now use custom `Error` (thx to @pekeler) +- Enhance: `FetchError` contains `err.type` and `err.code`, allows for better error handling (thx to @pekeler) +- Enhance: basic support for redirect mode `manual` and `error`, allows for location header extraction (thx to @jimmywarting for the initial PR) + +## v1.4.1 + +- Fix: wrapping Request instance with FormData body again should preserve the body as-is + +## v1.4.0 + +- Enhance: Request and Response now have `clone` method (thx to @kirill-konshin for the initial PR) +- Enhance: Request and Response now have proper string and buffer body support (thx to @kirill-konshin) +- Enhance: Body constructor has been refactored out (thx to @kirill-konshin) +- Enhance: Headers now has `forEach` method (thx to @tricoder42) +- Enhance: back to 100% code coverage +- Fix: better form-data support (thx to @item4) +- Fix: better character encoding detection under chunked encoding (thx to @dsuket for the initial PR) + +## v1.3.3 + +- Fix: make sure `Content-Length` header is set when body is string for POST/PUT/PATCH requests +- Fix: handle body stream error, for cases such as incorrect `Content-Encoding` header +- Fix: when following certain redirects, use `GET` on subsequent request per Fetch Spec +- Fix: `Request` and `Response` constructors now parse headers input using `Headers` + +## v1.3.2 + +- Enhance: allow auto detect of form-data input (no `FormData` spec on node.js, this is form-data specific feature) + +## v1.3.1 + +- Enhance: allow custom host header to be set (server-side only feature, as it's a forbidden header on client-side) + +## v1.3.0 + +- Enhance: now `fetch.Request` is exposed as well + +## v1.2.1 + +- Enhance: `Headers` now normalized `Number` value to `String`, prevent common mistakes + +## v1.2.0 + +- Enhance: now fetch.Headers and fetch.Response are exposed, making testing easier + +## v1.1.2 + +- Fix: `Headers` should only support `String` and `Array` properties, and ignore others + +## v1.1.1 + +- Enhance: now req.headers accept both plain object and `Headers` instance + +## v1.1.0 + +- Enhance: timeout now also applies to response body (in case of slow response) +- Fix: timeout is now cleared properly when fetch is done/has failed + +## v1.0.6 + +- Fix: less greedy content-type charset matching + +## v1.0.5 + +- Fix: when `follow = 0`, fetch should not follow redirect +- Enhance: update tests for better coverage +- Enhance: code formatting +- Enhance: clean up doc + +## v1.0.4 + +- Enhance: test iojs support +- Enhance: timeout attached to socket event only fire once per redirect + +## v1.0.3 + +- Fix: response size limit should reject large chunk +- Enhance: added character encoding detection for xml, such as rss/atom feed (encoding in DTD) + +## v1.0.2 + +- Fix: added res.ok per spec change + +## v1.0.0 + +- Enhance: better test coverage and doc + + +# 0.x release + +## v0.1 + +- Major: initial public release diff --git a/scripts/metrics/node_modules/node-fetch/LICENSE.md b/scripts/metrics/node_modules/node-fetch/LICENSE.md new file mode 100644 index 00000000000..660ffecb58b --- /dev/null +++ b/scripts/metrics/node_modules/node-fetch/LICENSE.md @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2016 David Frank + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/scripts/metrics/node_modules/node-fetch/README.md b/scripts/metrics/node_modules/node-fetch/README.md new file mode 100644 index 00000000000..48f4215e4e7 --- /dev/null +++ b/scripts/metrics/node_modules/node-fetch/README.md @@ -0,0 +1,538 @@ +node-fetch +========== + +[![npm version][npm-image]][npm-url] +[![build status][travis-image]][travis-url] +[![coverage status][codecov-image]][codecov-url] +[![install size][install-size-image]][install-size-url] + +A light-weight module that brings `window.fetch` to Node.js + +(We are looking for [v2 maintainers and collaborators](https://github.com/bitinn/node-fetch/issues/567)) + + + +- [Motivation](#motivation) +- [Features](#features) +- [Difference from client-side fetch](#difference-from-client-side-fetch) +- [Installation](#installation) +- [Loading and configuring the module](#loading-and-configuring-the-module) +- [Common Usage](#common-usage) + - [Plain text or HTML](#plain-text-or-html) + - [JSON](#json) + - [Simple Post](#simple-post) + - [Post with JSON](#post-with-json) + - [Post with form parameters](#post-with-form-parameters) + - [Handling exceptions](#handling-exceptions) + - [Handling client and server errors](#handling-client-and-server-errors) +- [Advanced Usage](#advanced-usage) + - [Streams](#streams) + - [Buffer](#buffer) + - [Accessing Headers and other Meta data](#accessing-headers-and-other-meta-data) + - [Post data using a file stream](#post-data-using-a-file-stream) + - [Post with form-data (detect multipart)](#post-with-form-data-detect-multipart) + - [Request cancellation with AbortSignal](#request-cancellation-with-abortsignal) +- [API](#api) + - [fetch(url[, options])](#fetchurl-options) + - [Options](#options) + - [Class: Request](#class-request) + - [Class: Response](#class-response) + - [Class: Headers](#class-headers) + - [Interface: Body](#interface-body) + - [Class: FetchError](#class-fetcherror) +- [License](#license) +- [Acknowledgement](#acknowledgement) + + + +## Motivation + +Instead of implementing `XMLHttpRequest` in Node.js to run browser-specific [Fetch polyfill](https://github.com/github/fetch), why not go from native `http` to `fetch` API directly? Hence `node-fetch`, minimal code for a `window.fetch` compatible API on Node.js runtime. + +See Matt Andrews' [isomorphic-fetch](https://github.com/matthew-andrews/isomorphic-fetch) or Leonardo Quixada's [cross-fetch](https://github.com/lquixada/cross-fetch) for isomorphic usage (exports `node-fetch` for server-side, `whatwg-fetch` for client-side). + +## Features + +- Stay consistent with `window.fetch` API. +- Make conscious trade-off when following [WHATWG fetch spec][whatwg-fetch] and [stream spec](https://streams.spec.whatwg.org/) implementation details, document known differences. +- Use native promise, but allow substituting it with [insert your favorite promise library]. +- Use native Node streams for body, on both request and response. +- Decode content encoding (gzip/deflate) properly, and convert string output (such as `res.text()` and `res.json()`) to UTF-8 automatically. +- Useful extensions such as timeout, redirect limit, response size limit, [explicit errors](ERROR-HANDLING.md) for troubleshooting. + +## Difference from client-side fetch + +- See [Known Differences](LIMITS.md) for details. +- If you happen to use a missing feature that `window.fetch` offers, feel free to open an issue. +- Pull requests are welcomed too! + +## Installation + +Current stable release (`2.x`) + +```sh +$ npm install node-fetch --save +``` + +## Loading and configuring the module +We suggest you load the module via `require`, pending the stabalizing of es modules in node: +```js +const fetch = require('node-fetch'); +``` + +If you are using a Promise library other than native, set it through fetch.Promise: +```js +const Bluebird = require('bluebird'); + +fetch.Promise = Bluebird; +``` + +## Common Usage + +NOTE: The documentation below is up-to-date with `2.x` releases, [see `1.x` readme](https://github.com/bitinn/node-fetch/blob/1.x/README.md), [changelog](https://github.com/bitinn/node-fetch/blob/1.x/CHANGELOG.md) and [2.x upgrade guide](UPGRADE-GUIDE.md) for the differences. + +#### Plain text or HTML +```js +fetch('https://github.com/') + .then(res => res.text()) + .then(body => console.log(body)); +``` + +#### JSON + +```js + +fetch('https://api.github.com/users/github') + .then(res => res.json()) + .then(json => console.log(json)); +``` + +#### Simple Post +```js +fetch('https://httpbin.org/post', { method: 'POST', body: 'a=1' }) + .then(res => res.json()) // expecting a json response + .then(json => console.log(json)); +``` + +#### Post with JSON + +```js +const body = { a: 1 }; + +fetch('https://httpbin.org/post', { + method: 'post', + body: JSON.stringify(body), + headers: { 'Content-Type': 'application/json' }, + }) + .then(res => res.json()) + .then(json => console.log(json)); +``` + +#### Post with form parameters +`URLSearchParams` is available in Node.js as of v7.5.0. See [official documentation](https://nodejs.org/api/url.html#url_class_urlsearchparams) for more usage methods. + +NOTE: The `Content-Type` header is only set automatically to `x-www-form-urlencoded` when an instance of `URLSearchParams` is given as such: + +```js +const { URLSearchParams } = require('url'); + +const params = new URLSearchParams(); +params.append('a', 1); + +fetch('https://httpbin.org/post', { method: 'POST', body: params }) + .then(res => res.json()) + .then(json => console.log(json)); +``` + +#### Handling exceptions +NOTE: 3xx-5xx responses are *NOT* exceptions, and should be handled in `then()`, see the next section. + +Adding a catch to the fetch promise chain will catch *all* exceptions, such as errors originating from node core libraries, like network errors, and operational errors which are instances of FetchError. See the [error handling document](ERROR-HANDLING.md) for more details. + +```js +fetch('https://domain.invalid/') + .catch(err => console.error(err)); +``` + +#### Handling client and server errors +It is common to create a helper function to check that the response contains no client (4xx) or server (5xx) error responses: + +```js +function checkStatus(res) { + if (res.ok) { // res.status >= 200 && res.status < 300 + return res; + } else { + throw MyCustomError(res.statusText); + } +} + +fetch('https://httpbin.org/status/400') + .then(checkStatus) + .then(res => console.log('will not get here...')) +``` + +## Advanced Usage + +#### Streams +The "Node.js way" is to use streams when possible: + +```js +fetch('https://assets-cdn.github.com/images/modules/logos_page/Octocat.png') + .then(res => { + const dest = fs.createWriteStream('./octocat.png'); + res.body.pipe(dest); + }); +``` + +#### Buffer +If you prefer to cache binary data in full, use buffer(). (NOTE: buffer() is a `node-fetch` only API) + +```js +const fileType = require('file-type'); + +fetch('https://assets-cdn.github.com/images/modules/logos_page/Octocat.png') + .then(res => res.buffer()) + .then(buffer => fileType(buffer)) + .then(type => { /* ... */ }); +``` + +#### Accessing Headers and other Meta data +```js +fetch('https://github.com/') + .then(res => { + console.log(res.ok); + console.log(res.status); + console.log(res.statusText); + console.log(res.headers.raw()); + console.log(res.headers.get('content-type')); + }); +``` + +#### Post data using a file stream + +```js +const { createReadStream } = require('fs'); + +const stream = createReadStream('input.txt'); + +fetch('https://httpbin.org/post', { method: 'POST', body: stream }) + .then(res => res.json()) + .then(json => console.log(json)); +``` + +#### Post with form-data (detect multipart) + +```js +const FormData = require('form-data'); + +const form = new FormData(); +form.append('a', 1); + +fetch('https://httpbin.org/post', { method: 'POST', body: form }) + .then(res => res.json()) + .then(json => console.log(json)); + +// OR, using custom headers +// NOTE: getHeaders() is non-standard API + +const form = new FormData(); +form.append('a', 1); + +const options = { + method: 'POST', + body: form, + headers: form.getHeaders() +} + +fetch('https://httpbin.org/post', options) + .then(res => res.json()) + .then(json => console.log(json)); +``` + +#### Request cancellation with AbortSignal + +> NOTE: You may only cancel streamed requests on Node >= v8.0.0 + +You may cancel requests with `AbortController`. A suggested implementation is [`abort-controller`](https://www.npmjs.com/package/abort-controller). + +An example of timing out a request after 150ms could be achieved as follows: + +```js +import AbortController from 'abort-controller'; + +const controller = new AbortController(); +const timeout = setTimeout( + () => { controller.abort(); }, + 150, +); + +fetch(url, { signal: controller.signal }) + .then(res => res.json()) + .then( + data => { + useData(data) + }, + err => { + if (err.name === 'AbortError') { + // request was aborted + } + }, + ) + .finally(() => { + clearTimeout(timeout); + }); +``` + +See [test cases](https://github.com/bitinn/node-fetch/blob/master/test/test.js) for more examples. + + +## API + +### fetch(url[, options]) + +- `url` A string representing the URL for fetching +- `options` [Options](#fetch-options) for the HTTP(S) request +- Returns: Promise<[Response](#class-response)> + +Perform an HTTP(S) fetch. + +`url` should be an absolute url, such as `https://example.com/`. A path-relative URL (`/file/under/root`) or protocol-relative URL (`//can-be-http-or-https.com/`) will result in a rejected promise. + + +### Options + +The default values are shown after each option key. + +```js +{ + // These properties are part of the Fetch Standard + method: 'GET', + headers: {}, // request headers. format is the identical to that accepted by the Headers constructor (see below) + body: null, // request body. can be null, a string, a Buffer, a Blob, or a Node.js Readable stream + redirect: 'follow', // set to `manual` to extract redirect headers, `error` to reject redirect + signal: null, // pass an instance of AbortSignal to optionally abort requests + + // The following properties are node-fetch extensions + follow: 20, // maximum redirect count. 0 to not follow redirect + timeout: 0, // req/res timeout in ms, it resets on redirect. 0 to disable (OS limit applies). Signal is recommended instead. + compress: true, // support gzip/deflate content encoding. false to disable + size: 0, // maximum response body size in bytes. 0 to disable + agent: null // http(s).Agent instance, allows custom proxy, certificate, dns lookup etc. +} +``` + +##### Default Headers + +If no values are set, the following request headers will be sent automatically: + +Header | Value +------------------- | -------------------------------------------------------- +`Accept-Encoding` | `gzip,deflate` _(when `options.compress === true`)_ +`Accept` | `*/*` +`Connection` | `close` _(when no `options.agent` is present)_ +`Content-Length` | _(automatically calculated, if possible)_ +`Transfer-Encoding` | `chunked` _(when `req.body` is a stream)_ +`User-Agent` | `node-fetch/1.0 (+https://github.com/bitinn/node-fetch)` + + +### Class: Request + +An HTTP(S) request containing information about URL, method, headers, and the body. This class implements the [Body](#iface-body) interface. + +Due to the nature of Node.js, the following properties are not implemented at this moment: + +- `type` +- `destination` +- `referrer` +- `referrerPolicy` +- `mode` +- `credentials` +- `cache` +- `integrity` +- `keepalive` + +The following node-fetch extension properties are provided: + +- `follow` +- `compress` +- `counter` +- `agent` + +See [options](#fetch-options) for exact meaning of these extensions. + +#### new Request(input[, options]) + +*(spec-compliant)* + +- `input` A string representing a URL, or another `Request` (which will be cloned) +- `options` [Options][#fetch-options] for the HTTP(S) request + +Constructs a new `Request` object. The constructor is identical to that in the [browser](https://developer.mozilla.org/en-US/docs/Web/API/Request/Request). + +In most cases, directly `fetch(url, options)` is simpler than creating a `Request` object. + + +### Class: Response + +An HTTP(S) response. This class implements the [Body](#iface-body) interface. + +The following properties are not implemented in node-fetch at this moment: + +- `Response.error()` +- `Response.redirect()` +- `type` +- `trailer` + +#### new Response([body[, options]]) + +*(spec-compliant)* + +- `body` A string or [Readable stream][node-readable] +- `options` A [`ResponseInit`][response-init] options dictionary + +Constructs a new `Response` object. The constructor is identical to that in the [browser](https://developer.mozilla.org/en-US/docs/Web/API/Response/Response). + +Because Node.js does not implement service workers (for which this class was designed), one rarely has to construct a `Response` directly. + +#### response.ok + +*(spec-compliant)* + +Convenience property representing if the request ended normally. Will evaluate to true if the response status was greater than or equal to 200 but smaller than 300. + +#### response.redirected + +*(spec-compliant)* + +Convenience property representing if the request has been redirected at least once. Will evaluate to true if the internal redirect counter is greater than 0. + + +### Class: Headers + +This class allows manipulating and iterating over a set of HTTP headers. All methods specified in the [Fetch Standard][whatwg-fetch] are implemented. + +#### new Headers([init]) + +*(spec-compliant)* + +- `init` Optional argument to pre-fill the `Headers` object + +Construct a new `Headers` object. `init` can be either `null`, a `Headers` object, an key-value map object, or any iterable object. + +```js +// Example adapted from https://fetch.spec.whatwg.org/#example-headers-class + +const meta = { + 'Content-Type': 'text/xml', + 'Breaking-Bad': '<3' +}; +const headers = new Headers(meta); + +// The above is equivalent to +const meta = [ + [ 'Content-Type', 'text/xml' ], + [ 'Breaking-Bad', '<3' ] +]; +const headers = new Headers(meta); + +// You can in fact use any iterable objects, like a Map or even another Headers +const meta = new Map(); +meta.set('Content-Type', 'text/xml'); +meta.set('Breaking-Bad', '<3'); +const headers = new Headers(meta); +const copyOfHeaders = new Headers(headers); +``` + + +### Interface: Body + +`Body` is an abstract interface with methods that are applicable to both `Request` and `Response` classes. + +The following methods are not yet implemented in node-fetch at this moment: + +- `formData()` + +#### body.body + +*(deviation from spec)* + +* Node.js [`Readable` stream][node-readable] + +The data encapsulated in the `Body` object. Note that while the [Fetch Standard][whatwg-fetch] requires the property to always be a WHATWG `ReadableStream`, in node-fetch it is a Node.js [`Readable` stream][node-readable]. + +#### body.bodyUsed + +*(spec-compliant)* + +* `Boolean` + +A boolean property for if this body has been consumed. Per spec, a consumed body cannot be used again. + +#### body.arrayBuffer() +#### body.blob() +#### body.json() +#### body.text() + +*(spec-compliant)* + +* Returns: Promise + +Consume the body and return a promise that will resolve to one of these formats. + +#### body.buffer() + +*(node-fetch extension)* + +* Returns: Promise<Buffer> + +Consume the body and return a promise that will resolve to a Buffer. + +#### body.textConverted() + +*(node-fetch extension)* + +* Returns: Promise<String> + +Identical to `body.text()`, except instead of always converting to UTF-8, encoding sniffing will be performed and text converted to UTF-8, if possible. + +(This API requires an optional dependency on npm package [encoding](https://www.npmjs.com/package/encoding), which you need to install manually. `webpack` users may see [a warning message](https://github.com/bitinn/node-fetch/issues/412#issuecomment-379007792) due to this optional dependency.) + + +### Class: FetchError + +*(node-fetch extension)* + +An operational error in the fetching process. See [ERROR-HANDLING.md][] for more info. + + +### Class: AbortError + +*(node-fetch extension)* + +An Error thrown when the request is aborted in response to an `AbortSignal`'s `abort` event. It has a `name` property of `AbortError`. See [ERROR-HANDLING.MD][] for more info. + +## Acknowledgement + +Thanks to [github/fetch](https://github.com/github/fetch) for providing a solid implementation reference. + +`node-fetch` v1 was maintained by [@bitinn](https://github.com/bitinn); v2 was maintained by [@TimothyGu](https://github.com/timothygu), [@bitinn](https://github.com/bitinn) and [@jimmywarting](https://github.com/jimmywarting); v2 readme is written by [@jkantr](https://github.com/jkantr). + +## License + +MIT + +[npm-image]: https://flat.badgen.net/npm/v/node-fetch +[npm-url]: https://www.npmjs.com/package/node-fetch +[travis-image]: https://flat.badgen.net/travis/bitinn/node-fetch +[travis-url]: https://travis-ci.org/bitinn/node-fetch +[codecov-image]: https://flat.badgen.net/codecov/c/github/bitinn/node-fetch/master +[codecov-url]: https://codecov.io/gh/bitinn/node-fetch +[install-size-image]: https://flat.badgen.net/packagephobia/install/node-fetch +[install-size-url]: https://packagephobia.now.sh/result?p=node-fetch +[whatwg-fetch]: https://fetch.spec.whatwg.org/ +[response-init]: https://fetch.spec.whatwg.org/#responseinit +[node-readable]: https://nodejs.org/api/stream.html#stream_readable_streams +[mdn-headers]: https://developer.mozilla.org/en-US/docs/Web/API/Headers +[LIMITS.md]: https://github.com/bitinn/node-fetch/blob/master/LIMITS.md +[ERROR-HANDLING.md]: https://github.com/bitinn/node-fetch/blob/master/ERROR-HANDLING.md +[UPGRADE-GUIDE.md]: https://github.com/bitinn/node-fetch/blob/master/UPGRADE-GUIDE.md diff --git a/scripts/metrics/node_modules/node-fetch/browser.js b/scripts/metrics/node_modules/node-fetch/browser.js new file mode 100644 index 00000000000..0ad5de004c4 --- /dev/null +++ b/scripts/metrics/node_modules/node-fetch/browser.js @@ -0,0 +1,23 @@ +"use strict"; + +// ref: https://github.com/tc39/proposal-global +var getGlobal = function () { + // the only reliable means to get the global object is + // `Function('return this')()` + // However, this causes CSP violations in Chrome apps. + if (typeof self !== 'undefined') { return self; } + if (typeof window !== 'undefined') { return window; } + if (typeof global !== 'undefined') { return global; } + throw new Error('unable to locate global object'); +} + +var global = getGlobal(); + +module.exports = exports = global.fetch; + +// Needed for TypeScript and Webpack. +exports.default = global.fetch.bind(global); + +exports.Headers = global.Headers; +exports.Request = global.Request; +exports.Response = global.Response; \ No newline at end of file diff --git a/scripts/metrics/node_modules/node-fetch/lib/index.es.js b/scripts/metrics/node_modules/node-fetch/lib/index.es.js new file mode 100644 index 00000000000..20ab807872f --- /dev/null +++ b/scripts/metrics/node_modules/node-fetch/lib/index.es.js @@ -0,0 +1,1631 @@ +process.emitWarning("The .es.js file is deprecated. Use .mjs instead."); + +import Stream from 'stream'; +import http from 'http'; +import Url from 'url'; +import https from 'https'; +import zlib from 'zlib'; + +// Based on https://github.com/tmpvar/jsdom/blob/aa85b2abf07766ff7bf5c1f6daafb3726f2f2db5/lib/jsdom/living/blob.js + +// fix for "Readable" isn't a named export issue +const Readable = Stream.Readable; + +const BUFFER = Symbol('buffer'); +const TYPE = Symbol('type'); + +class Blob { + constructor() { + this[TYPE] = ''; + + const blobParts = arguments[0]; + const options = arguments[1]; + + const buffers = []; + let size = 0; + + if (blobParts) { + const a = blobParts; + const length = Number(a.length); + for (let i = 0; i < length; i++) { + const element = a[i]; + let buffer; + if (element instanceof Buffer) { + buffer = element; + } else if (ArrayBuffer.isView(element)) { + buffer = Buffer.from(element.buffer, element.byteOffset, element.byteLength); + } else if (element instanceof ArrayBuffer) { + buffer = Buffer.from(element); + } else if (element instanceof Blob) { + buffer = element[BUFFER]; + } else { + buffer = Buffer.from(typeof element === 'string' ? element : String(element)); + } + size += buffer.length; + buffers.push(buffer); + } + } + + this[BUFFER] = Buffer.concat(buffers); + + let type = options && options.type !== undefined && String(options.type).toLowerCase(); + if (type && !/[^\u0020-\u007E]/.test(type)) { + this[TYPE] = type; + } + } + get size() { + return this[BUFFER].length; + } + get type() { + return this[TYPE]; + } + text() { + return Promise.resolve(this[BUFFER].toString()); + } + arrayBuffer() { + const buf = this[BUFFER]; + const ab = buf.buffer.slice(buf.byteOffset, buf.byteOffset + buf.byteLength); + return Promise.resolve(ab); + } + stream() { + const readable = new Readable(); + readable._read = function () {}; + readable.push(this[BUFFER]); + readable.push(null); + return readable; + } + toString() { + return '[object Blob]'; + } + slice() { + const size = this.size; + + const start = arguments[0]; + const end = arguments[1]; + let relativeStart, relativeEnd; + if (start === undefined) { + relativeStart = 0; + } else if (start < 0) { + relativeStart = Math.max(size + start, 0); + } else { + relativeStart = Math.min(start, size); + } + if (end === undefined) { + relativeEnd = size; + } else if (end < 0) { + relativeEnd = Math.max(size + end, 0); + } else { + relativeEnd = Math.min(end, size); + } + const span = Math.max(relativeEnd - relativeStart, 0); + + const buffer = this[BUFFER]; + const slicedBuffer = buffer.slice(relativeStart, relativeStart + span); + const blob = new Blob([], { type: arguments[2] }); + blob[BUFFER] = slicedBuffer; + return blob; + } +} + +Object.defineProperties(Blob.prototype, { + size: { enumerable: true }, + type: { enumerable: true }, + slice: { enumerable: true } +}); + +Object.defineProperty(Blob.prototype, Symbol.toStringTag, { + value: 'Blob', + writable: false, + enumerable: false, + configurable: true +}); + +/** + * fetch-error.js + * + * FetchError interface for operational errors + */ + +/** + * Create FetchError instance + * + * @param String message Error message for human + * @param String type Error type for machine + * @param String systemError For Node.js system error + * @return FetchError + */ +function FetchError(message, type, systemError) { + Error.call(this, message); + + this.message = message; + this.type = type; + + // when err.type is `system`, err.code contains system error code + if (systemError) { + this.code = this.errno = systemError.code; + } + + // hide custom error implementation details from end-users + Error.captureStackTrace(this, this.constructor); +} + +FetchError.prototype = Object.create(Error.prototype); +FetchError.prototype.constructor = FetchError; +FetchError.prototype.name = 'FetchError'; + +let convert; +try { + convert = require('encoding').convert; +} catch (e) {} + +const INTERNALS = Symbol('Body internals'); + +// fix an issue where "PassThrough" isn't a named export for node <10 +const PassThrough = Stream.PassThrough; + +/** + * Body mixin + * + * Ref: https://fetch.spec.whatwg.org/#body + * + * @param Stream body Readable stream + * @param Object opts Response options + * @return Void + */ +function Body(body) { + var _this = this; + + var _ref = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {}, + _ref$size = _ref.size; + + let size = _ref$size === undefined ? 0 : _ref$size; + var _ref$timeout = _ref.timeout; + let timeout = _ref$timeout === undefined ? 0 : _ref$timeout; + + if (body == null) { + // body is undefined or null + body = null; + } else if (isURLSearchParams(body)) { + // body is a URLSearchParams + body = Buffer.from(body.toString()); + } else if (isBlob(body)) ; else if (Buffer.isBuffer(body)) ; else if (Object.prototype.toString.call(body) === '[object ArrayBuffer]') { + // body is ArrayBuffer + body = Buffer.from(body); + } else if (ArrayBuffer.isView(body)) { + // body is ArrayBufferView + body = Buffer.from(body.buffer, body.byteOffset, body.byteLength); + } else if (body instanceof Stream) ; else { + // none of the above + // coerce to string then buffer + body = Buffer.from(String(body)); + } + this[INTERNALS] = { + body, + disturbed: false, + error: null + }; + this.size = size; + this.timeout = timeout; + + if (body instanceof Stream) { + body.on('error', function (err) { + const error = err.name === 'AbortError' ? err : new FetchError(`Invalid response body while trying to fetch ${_this.url}: ${err.message}`, 'system', err); + _this[INTERNALS].error = error; + }); + } +} + +Body.prototype = { + get body() { + return this[INTERNALS].body; + }, + + get bodyUsed() { + return this[INTERNALS].disturbed; + }, + + /** + * Decode response as ArrayBuffer + * + * @return Promise + */ + arrayBuffer() { + return consumeBody.call(this).then(function (buf) { + return buf.buffer.slice(buf.byteOffset, buf.byteOffset + buf.byteLength); + }); + }, + + /** + * Return raw response as Blob + * + * @return Promise + */ + blob() { + let ct = this.headers && this.headers.get('content-type') || ''; + return consumeBody.call(this).then(function (buf) { + return Object.assign( + // Prevent copying + new Blob([], { + type: ct.toLowerCase() + }), { + [BUFFER]: buf + }); + }); + }, + + /** + * Decode response as json + * + * @return Promise + */ + json() { + var _this2 = this; + + return consumeBody.call(this).then(function (buffer) { + try { + return JSON.parse(buffer.toString()); + } catch (err) { + return Body.Promise.reject(new FetchError(`invalid json response body at ${_this2.url} reason: ${err.message}`, 'invalid-json')); + } + }); + }, + + /** + * Decode response as text + * + * @return Promise + */ + text() { + return consumeBody.call(this).then(function (buffer) { + return buffer.toString(); + }); + }, + + /** + * Decode response as buffer (non-spec api) + * + * @return Promise + */ + buffer() { + return consumeBody.call(this); + }, + + /** + * Decode response as text, while automatically detecting the encoding and + * trying to decode to UTF-8 (non-spec api) + * + * @return Promise + */ + textConverted() { + var _this3 = this; + + return consumeBody.call(this).then(function (buffer) { + return convertBody(buffer, _this3.headers); + }); + } +}; + +// In browsers, all properties are enumerable. +Object.defineProperties(Body.prototype, { + body: { enumerable: true }, + bodyUsed: { enumerable: true }, + arrayBuffer: { enumerable: true }, + blob: { enumerable: true }, + json: { enumerable: true }, + text: { enumerable: true } +}); + +Body.mixIn = function (proto) { + for (const name of Object.getOwnPropertyNames(Body.prototype)) { + // istanbul ignore else: future proof + if (!(name in proto)) { + const desc = Object.getOwnPropertyDescriptor(Body.prototype, name); + Object.defineProperty(proto, name, desc); + } + } +}; + +/** + * Consume and convert an entire Body to a Buffer. + * + * Ref: https://fetch.spec.whatwg.org/#concept-body-consume-body + * + * @return Promise + */ +function consumeBody() { + var _this4 = this; + + if (this[INTERNALS].disturbed) { + return Body.Promise.reject(new TypeError(`body used already for: ${this.url}`)); + } + + this[INTERNALS].disturbed = true; + + if (this[INTERNALS].error) { + return Body.Promise.reject(this[INTERNALS].error); + } + + let body = this.body; + + // body is null + if (body === null) { + return Body.Promise.resolve(Buffer.alloc(0)); + } + + // body is blob + if (isBlob(body)) { + body = body.stream(); + } + + // body is buffer + if (Buffer.isBuffer(body)) { + return Body.Promise.resolve(body); + } + + // istanbul ignore if: should never happen + if (!(body instanceof Stream)) { + return Body.Promise.resolve(Buffer.alloc(0)); + } + + // body is stream + // get ready to actually consume the body + let accum = []; + let accumBytes = 0; + let abort = false; + + return new Body.Promise(function (resolve, reject) { + let resTimeout; + + // allow timeout on slow response body + if (_this4.timeout) { + resTimeout = setTimeout(function () { + abort = true; + reject(new FetchError(`Response timeout while trying to fetch ${_this4.url} (over ${_this4.timeout}ms)`, 'body-timeout')); + }, _this4.timeout); + } + + // handle stream errors + body.on('error', function (err) { + if (err.name === 'AbortError') { + // if the request was aborted, reject with this Error + abort = true; + reject(err); + } else { + // other errors, such as incorrect content-encoding + reject(new FetchError(`Invalid response body while trying to fetch ${_this4.url}: ${err.message}`, 'system', err)); + } + }); + + body.on('data', function (chunk) { + if (abort || chunk === null) { + return; + } + + if (_this4.size && accumBytes + chunk.length > _this4.size) { + abort = true; + reject(new FetchError(`content size at ${_this4.url} over limit: ${_this4.size}`, 'max-size')); + return; + } + + accumBytes += chunk.length; + accum.push(chunk); + }); + + body.on('end', function () { + if (abort) { + return; + } + + clearTimeout(resTimeout); + + try { + resolve(Buffer.concat(accum, accumBytes)); + } catch (err) { + // handle streams that have accumulated too much data (issue #414) + reject(new FetchError(`Could not create Buffer from response body for ${_this4.url}: ${err.message}`, 'system', err)); + } + }); + }); +} + +/** + * Detect buffer encoding and convert to target encoding + * ref: http://www.w3.org/TR/2011/WD-html5-20110113/parsing.html#determining-the-character-encoding + * + * @param Buffer buffer Incoming buffer + * @param String encoding Target encoding + * @return String + */ +function convertBody(buffer, headers) { + if (typeof convert !== 'function') { + throw new Error('The package `encoding` must be installed to use the textConverted() function'); + } + + const ct = headers.get('content-type'); + let charset = 'utf-8'; + let res, str; + + // header + if (ct) { + res = /charset=([^;]*)/i.exec(ct); + } + + // no charset in content type, peek at response body for at most 1024 bytes + str = buffer.slice(0, 1024).toString(); + + // html5 + if (!res && str) { + res = / 0 && arguments[0] !== undefined ? arguments[0] : undefined; + + this[MAP] = Object.create(null); + + if (init instanceof Headers) { + const rawHeaders = init.raw(); + const headerNames = Object.keys(rawHeaders); + + for (const headerName of headerNames) { + for (const value of rawHeaders[headerName]) { + this.append(headerName, value); + } + } + + return; + } + + // We don't worry about converting prop to ByteString here as append() + // will handle it. + if (init == null) ; else if (typeof init === 'object') { + const method = init[Symbol.iterator]; + if (method != null) { + if (typeof method !== 'function') { + throw new TypeError('Header pairs must be iterable'); + } + + // sequence> + // Note: per spec we have to first exhaust the lists then process them + const pairs = []; + for (const pair of init) { + if (typeof pair !== 'object' || typeof pair[Symbol.iterator] !== 'function') { + throw new TypeError('Each header pair must be iterable'); + } + pairs.push(Array.from(pair)); + } + + for (const pair of pairs) { + if (pair.length !== 2) { + throw new TypeError('Each header pair must be a name/value tuple'); + } + this.append(pair[0], pair[1]); + } + } else { + // record + for (const key of Object.keys(init)) { + const value = init[key]; + this.append(key, value); + } + } + } else { + throw new TypeError('Provided initializer must be an object'); + } + } + + /** + * Return combined header value given name + * + * @param String name Header name + * @return Mixed + */ + get(name) { + name = `${name}`; + validateName(name); + const key = find(this[MAP], name); + if (key === undefined) { + return null; + } + + return this[MAP][key].join(', '); + } + + /** + * Iterate over all headers + * + * @param Function callback Executed for each item with parameters (value, name, thisArg) + * @param Boolean thisArg `this` context for callback function + * @return Void + */ + forEach(callback) { + let thisArg = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : undefined; + + let pairs = getHeaders(this); + let i = 0; + while (i < pairs.length) { + var _pairs$i = pairs[i]; + const name = _pairs$i[0], + value = _pairs$i[1]; + + callback.call(thisArg, value, name, this); + pairs = getHeaders(this); + i++; + } + } + + /** + * Overwrite header values given name + * + * @param String name Header name + * @param String value Header value + * @return Void + */ + set(name, value) { + name = `${name}`; + value = `${value}`; + validateName(name); + validateValue(value); + const key = find(this[MAP], name); + this[MAP][key !== undefined ? key : name] = [value]; + } + + /** + * Append a value onto existing header + * + * @param String name Header name + * @param String value Header value + * @return Void + */ + append(name, value) { + name = `${name}`; + value = `${value}`; + validateName(name); + validateValue(value); + const key = find(this[MAP], name); + if (key !== undefined) { + this[MAP][key].push(value); + } else { + this[MAP][name] = [value]; + } + } + + /** + * Check for header name existence + * + * @param String name Header name + * @return Boolean + */ + has(name) { + name = `${name}`; + validateName(name); + return find(this[MAP], name) !== undefined; + } + + /** + * Delete all header values given name + * + * @param String name Header name + * @return Void + */ + delete(name) { + name = `${name}`; + validateName(name); + const key = find(this[MAP], name); + if (key !== undefined) { + delete this[MAP][key]; + } + } + + /** + * Return raw headers (non-spec api) + * + * @return Object + */ + raw() { + return this[MAP]; + } + + /** + * Get an iterator on keys. + * + * @return Iterator + */ + keys() { + return createHeadersIterator(this, 'key'); + } + + /** + * Get an iterator on values. + * + * @return Iterator + */ + values() { + return createHeadersIterator(this, 'value'); + } + + /** + * Get an iterator on entries. + * + * This is the default iterator of the Headers object. + * + * @return Iterator + */ + [Symbol.iterator]() { + return createHeadersIterator(this, 'key+value'); + } +} +Headers.prototype.entries = Headers.prototype[Symbol.iterator]; + +Object.defineProperty(Headers.prototype, Symbol.toStringTag, { + value: 'Headers', + writable: false, + enumerable: false, + configurable: true +}); + +Object.defineProperties(Headers.prototype, { + get: { enumerable: true }, + forEach: { enumerable: true }, + set: { enumerable: true }, + append: { enumerable: true }, + has: { enumerable: true }, + delete: { enumerable: true }, + keys: { enumerable: true }, + values: { enumerable: true }, + entries: { enumerable: true } +}); + +function getHeaders(headers) { + let kind = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : 'key+value'; + + const keys = Object.keys(headers[MAP]).sort(); + return keys.map(kind === 'key' ? function (k) { + return k.toLowerCase(); + } : kind === 'value' ? function (k) { + return headers[MAP][k].join(', '); + } : function (k) { + return [k.toLowerCase(), headers[MAP][k].join(', ')]; + }); +} + +const INTERNAL = Symbol('internal'); + +function createHeadersIterator(target, kind) { + const iterator = Object.create(HeadersIteratorPrototype); + iterator[INTERNAL] = { + target, + kind, + index: 0 + }; + return iterator; +} + +const HeadersIteratorPrototype = Object.setPrototypeOf({ + next() { + // istanbul ignore if + if (!this || Object.getPrototypeOf(this) !== HeadersIteratorPrototype) { + throw new TypeError('Value of `this` is not a HeadersIterator'); + } + + var _INTERNAL = this[INTERNAL]; + const target = _INTERNAL.target, + kind = _INTERNAL.kind, + index = _INTERNAL.index; + + const values = getHeaders(target, kind); + const len = values.length; + if (index >= len) { + return { + value: undefined, + done: true + }; + } + + this[INTERNAL].index = index + 1; + + return { + value: values[index], + done: false + }; + } +}, Object.getPrototypeOf(Object.getPrototypeOf([][Symbol.iterator]()))); + +Object.defineProperty(HeadersIteratorPrototype, Symbol.toStringTag, { + value: 'HeadersIterator', + writable: false, + enumerable: false, + configurable: true +}); + +/** + * Export the Headers object in a form that Node.js can consume. + * + * @param Headers headers + * @return Object + */ +function exportNodeCompatibleHeaders(headers) { + const obj = Object.assign({ __proto__: null }, headers[MAP]); + + // http.request() only supports string as Host header. This hack makes + // specifying custom Host header possible. + const hostHeaderKey = find(headers[MAP], 'Host'); + if (hostHeaderKey !== undefined) { + obj[hostHeaderKey] = obj[hostHeaderKey][0]; + } + + return obj; +} + +/** + * Create a Headers object from an object of headers, ignoring those that do + * not conform to HTTP grammar productions. + * + * @param Object obj Object of headers + * @return Headers + */ +function createHeadersLenient(obj) { + const headers = new Headers(); + for (const name of Object.keys(obj)) { + if (invalidTokenRegex.test(name)) { + continue; + } + if (Array.isArray(obj[name])) { + for (const val of obj[name]) { + if (invalidHeaderCharRegex.test(val)) { + continue; + } + if (headers[MAP][name] === undefined) { + headers[MAP][name] = [val]; + } else { + headers[MAP][name].push(val); + } + } + } else if (!invalidHeaderCharRegex.test(obj[name])) { + headers[MAP][name] = [obj[name]]; + } + } + return headers; +} + +const INTERNALS$1 = Symbol('Response internals'); + +// fix an issue where "STATUS_CODES" aren't a named export for node <10 +const STATUS_CODES = http.STATUS_CODES; + +/** + * Response class + * + * @param Stream body Readable stream + * @param Object opts Response options + * @return Void + */ +class Response { + constructor() { + let body = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : null; + let opts = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {}; + + Body.call(this, body, opts); + + const status = opts.status || 200; + const headers = new Headers(opts.headers); + + if (body != null && !headers.has('Content-Type')) { + const contentType = extractContentType(body); + if (contentType) { + headers.append('Content-Type', contentType); + } + } + + this[INTERNALS$1] = { + url: opts.url, + status, + statusText: opts.statusText || STATUS_CODES[status], + headers, + counter: opts.counter + }; + } + + get url() { + return this[INTERNALS$1].url; + } + + get status() { + return this[INTERNALS$1].status; + } + + /** + * Convenience property representing if the request ended normally + */ + get ok() { + return this[INTERNALS$1].status >= 200 && this[INTERNALS$1].status < 300; + } + + get redirected() { + return this[INTERNALS$1].counter > 0; + } + + get statusText() { + return this[INTERNALS$1].statusText; + } + + get headers() { + return this[INTERNALS$1].headers; + } + + /** + * Clone this response + * + * @return Response + */ + clone() { + return new Response(clone(this), { + url: this.url, + status: this.status, + statusText: this.statusText, + headers: this.headers, + ok: this.ok, + redirected: this.redirected + }); + } +} + +Body.mixIn(Response.prototype); + +Object.defineProperties(Response.prototype, { + url: { enumerable: true }, + status: { enumerable: true }, + ok: { enumerable: true }, + redirected: { enumerable: true }, + statusText: { enumerable: true }, + headers: { enumerable: true }, + clone: { enumerable: true } +}); + +Object.defineProperty(Response.prototype, Symbol.toStringTag, { + value: 'Response', + writable: false, + enumerable: false, + configurable: true +}); + +const INTERNALS$2 = Symbol('Request internals'); + +// fix an issue where "format", "parse" aren't a named export for node <10 +const parse_url = Url.parse; +const format_url = Url.format; + +const streamDestructionSupported = 'destroy' in Stream.Readable.prototype; + +/** + * Check if a value is an instance of Request. + * + * @param Mixed input + * @return Boolean + */ +function isRequest(input) { + return typeof input === 'object' && typeof input[INTERNALS$2] === 'object'; +} + +function isAbortSignal(signal) { + const proto = signal && typeof signal === 'object' && Object.getPrototypeOf(signal); + return !!(proto && proto.constructor.name === 'AbortSignal'); +} + +/** + * Request class + * + * @param Mixed input Url or Request instance + * @param Object init Custom options + * @return Void + */ +class Request { + constructor(input) { + let init = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {}; + + let parsedURL; + + // normalize input + if (!isRequest(input)) { + if (input && input.href) { + // in order to support Node.js' Url objects; though WHATWG's URL objects + // will fall into this branch also (since their `toString()` will return + // `href` property anyway) + parsedURL = parse_url(input.href); + } else { + // coerce input to a string before attempting to parse + parsedURL = parse_url(`${input}`); + } + input = {}; + } else { + parsedURL = parse_url(input.url); + } + + let method = init.method || input.method || 'GET'; + method = method.toUpperCase(); + + if ((init.body != null || isRequest(input) && input.body !== null) && (method === 'GET' || method === 'HEAD')) { + throw new TypeError('Request with GET/HEAD method cannot have body'); + } + + let inputBody = init.body != null ? init.body : isRequest(input) && input.body !== null ? clone(input) : null; + + Body.call(this, inputBody, { + timeout: init.timeout || input.timeout || 0, + size: init.size || input.size || 0 + }); + + const headers = new Headers(init.headers || input.headers || {}); + + if (inputBody != null && !headers.has('Content-Type')) { + const contentType = extractContentType(inputBody); + if (contentType) { + headers.append('Content-Type', contentType); + } + } + + let signal = isRequest(input) ? input.signal : null; + if ('signal' in init) signal = init.signal; + + if (signal != null && !isAbortSignal(signal)) { + throw new TypeError('Expected signal to be an instanceof AbortSignal'); + } + + this[INTERNALS$2] = { + method, + redirect: init.redirect || input.redirect || 'follow', + headers, + parsedURL, + signal + }; + + // node-fetch-only options + this.follow = init.follow !== undefined ? init.follow : input.follow !== undefined ? input.follow : 20; + this.compress = init.compress !== undefined ? init.compress : input.compress !== undefined ? input.compress : true; + this.counter = init.counter || input.counter || 0; + this.agent = init.agent || input.agent; + } + + get method() { + return this[INTERNALS$2].method; + } + + get url() { + return format_url(this[INTERNALS$2].parsedURL); + } + + get headers() { + return this[INTERNALS$2].headers; + } + + get redirect() { + return this[INTERNALS$2].redirect; + } + + get signal() { + return this[INTERNALS$2].signal; + } + + /** + * Clone this request + * + * @return Request + */ + clone() { + return new Request(this); + } +} + +Body.mixIn(Request.prototype); + +Object.defineProperty(Request.prototype, Symbol.toStringTag, { + value: 'Request', + writable: false, + enumerable: false, + configurable: true +}); + +Object.defineProperties(Request.prototype, { + method: { enumerable: true }, + url: { enumerable: true }, + headers: { enumerable: true }, + redirect: { enumerable: true }, + clone: { enumerable: true }, + signal: { enumerable: true } +}); + +/** + * Convert a Request to Node.js http request options. + * + * @param Request A Request instance + * @return Object The options object to be passed to http.request + */ +function getNodeRequestOptions(request) { + const parsedURL = request[INTERNALS$2].parsedURL; + const headers = new Headers(request[INTERNALS$2].headers); + + // fetch step 1.3 + if (!headers.has('Accept')) { + headers.set('Accept', '*/*'); + } + + // Basic fetch + if (!parsedURL.protocol || !parsedURL.hostname) { + throw new TypeError('Only absolute URLs are supported'); + } + + if (!/^https?:$/.test(parsedURL.protocol)) { + throw new TypeError('Only HTTP(S) protocols are supported'); + } + + if (request.signal && request.body instanceof Stream.Readable && !streamDestructionSupported) { + throw new Error('Cancellation of streamed requests with AbortSignal is not supported in node < 8'); + } + + // HTTP-network-or-cache fetch steps 2.4-2.7 + let contentLengthValue = null; + if (request.body == null && /^(POST|PUT)$/i.test(request.method)) { + contentLengthValue = '0'; + } + if (request.body != null) { + const totalBytes = getTotalBytes(request); + if (typeof totalBytes === 'number') { + contentLengthValue = String(totalBytes); + } + } + if (contentLengthValue) { + headers.set('Content-Length', contentLengthValue); + } + + // HTTP-network-or-cache fetch step 2.11 + if (!headers.has('User-Agent')) { + headers.set('User-Agent', 'node-fetch/1.0 (+https://github.com/bitinn/node-fetch)'); + } + + // HTTP-network-or-cache fetch step 2.15 + if (request.compress && !headers.has('Accept-Encoding')) { + headers.set('Accept-Encoding', 'gzip,deflate'); + } + + if (!headers.has('Connection') && !request.agent) { + headers.set('Connection', 'close'); + } + + // HTTP-network fetch step 4.2 + // chunked encoding is handled by Node.js + + return Object.assign({}, parsedURL, { + method: request.method, + headers: exportNodeCompatibleHeaders(headers), + agent: request.agent + }); +} + +/** + * abort-error.js + * + * AbortError interface for cancelled requests + */ + +/** + * Create AbortError instance + * + * @param String message Error message for human + * @return AbortError + */ +function AbortError(message) { + Error.call(this, message); + + this.type = 'aborted'; + this.message = message; + + // hide custom error implementation details from end-users + Error.captureStackTrace(this, this.constructor); +} + +AbortError.prototype = Object.create(Error.prototype); +AbortError.prototype.constructor = AbortError; +AbortError.prototype.name = 'AbortError'; + +// fix an issue where "PassThrough", "resolve" aren't a named export for node <10 +const PassThrough$1 = Stream.PassThrough; +const resolve_url = Url.resolve; + +/** + * Fetch function + * + * @param Mixed url Absolute url or Request instance + * @param Object opts Fetch options + * @return Promise + */ +function fetch(url, opts) { + + // allow custom promise + if (!fetch.Promise) { + throw new Error('native promise missing, set fetch.Promise to your favorite alternative'); + } + + Body.Promise = fetch.Promise; + + // wrap http.request into fetch + return new fetch.Promise(function (resolve, reject) { + // build request object + const request = new Request(url, opts); + const options = getNodeRequestOptions(request); + + const send = (options.protocol === 'https:' ? https : http).request; + const signal = request.signal; + + let response = null; + + const abort = function abort() { + let error = new AbortError('The user aborted a request.'); + reject(error); + if (request.body && request.body instanceof Stream.Readable) { + request.body.destroy(error); + } + if (!response || !response.body) return; + response.body.emit('error', error); + }; + + if (signal && signal.aborted) { + abort(); + return; + } + + const abortAndFinalize = function abortAndFinalize() { + abort(); + finalize(); + }; + + // send request + const req = send(options); + let reqTimeout; + + if (signal) { + signal.addEventListener('abort', abortAndFinalize); + } + + function finalize() { + req.abort(); + if (signal) signal.removeEventListener('abort', abortAndFinalize); + clearTimeout(reqTimeout); + } + + if (request.timeout) { + req.once('socket', function (socket) { + reqTimeout = setTimeout(function () { + reject(new FetchError(`network timeout at: ${request.url}`, 'request-timeout')); + finalize(); + }, request.timeout); + }); + } + + req.on('error', function (err) { + reject(new FetchError(`request to ${request.url} failed, reason: ${err.message}`, 'system', err)); + finalize(); + }); + + req.on('response', function (res) { + clearTimeout(reqTimeout); + + const headers = createHeadersLenient(res.headers); + + // HTTP fetch step 5 + if (fetch.isRedirect(res.statusCode)) { + // HTTP fetch step 5.2 + const location = headers.get('Location'); + + // HTTP fetch step 5.3 + const locationURL = location === null ? null : resolve_url(request.url, location); + + // HTTP fetch step 5.5 + switch (request.redirect) { + case 'error': + reject(new FetchError(`redirect mode is set to error: ${request.url}`, 'no-redirect')); + finalize(); + return; + case 'manual': + // node-fetch-specific step: make manual redirect a bit easier to use by setting the Location header value to the resolved URL. + if (locationURL !== null) { + // handle corrupted header + try { + headers.set('Location', locationURL); + } catch (err) { + // istanbul ignore next: nodejs server prevent invalid response headers, we can't test this through normal request + reject(err); + } + } + break; + case 'follow': + // HTTP-redirect fetch step 2 + if (locationURL === null) { + break; + } + + // HTTP-redirect fetch step 5 + if (request.counter >= request.follow) { + reject(new FetchError(`maximum redirect reached at: ${request.url}`, 'max-redirect')); + finalize(); + return; + } + + // HTTP-redirect fetch step 6 (counter increment) + // Create a new Request object. + const requestOpts = { + headers: new Headers(request.headers), + follow: request.follow, + counter: request.counter + 1, + agent: request.agent, + compress: request.compress, + method: request.method, + body: request.body, + signal: request.signal, + timeout: request.timeout + }; + + // HTTP-redirect fetch step 9 + if (res.statusCode !== 303 && request.body && getTotalBytes(request) === null) { + reject(new FetchError('Cannot follow redirect with body being a readable stream', 'unsupported-redirect')); + finalize(); + return; + } + + // HTTP-redirect fetch step 11 + if (res.statusCode === 303 || (res.statusCode === 301 || res.statusCode === 302) && request.method === 'POST') { + requestOpts.method = 'GET'; + requestOpts.body = undefined; + requestOpts.headers.delete('content-length'); + } + + // HTTP-redirect fetch step 15 + resolve(fetch(new Request(locationURL, requestOpts))); + finalize(); + return; + } + } + + // prepare response + res.once('end', function () { + if (signal) signal.removeEventListener('abort', abortAndFinalize); + }); + let body = res.pipe(new PassThrough$1()); + + const response_options = { + url: request.url, + status: res.statusCode, + statusText: res.statusMessage, + headers: headers, + size: request.size, + timeout: request.timeout, + counter: request.counter + }; + + // HTTP-network fetch step 12.1.1.3 + const codings = headers.get('Content-Encoding'); + + // HTTP-network fetch step 12.1.1.4: handle content codings + + // in following scenarios we ignore compression support + // 1. compression support is disabled + // 2. HEAD request + // 3. no Content-Encoding header + // 4. no content response (204) + // 5. content not modified response (304) + if (!request.compress || request.method === 'HEAD' || codings === null || res.statusCode === 204 || res.statusCode === 304) { + response = new Response(body, response_options); + resolve(response); + return; + } + + // For Node v6+ + // Be less strict when decoding compressed responses, since sometimes + // servers send slightly invalid responses that are still accepted + // by common browsers. + // Always using Z_SYNC_FLUSH is what cURL does. + const zlibOptions = { + flush: zlib.Z_SYNC_FLUSH, + finishFlush: zlib.Z_SYNC_FLUSH + }; + + // for gzip + if (codings == 'gzip' || codings == 'x-gzip') { + body = body.pipe(zlib.createGunzip(zlibOptions)); + response = new Response(body, response_options); + resolve(response); + return; + } + + // for deflate + if (codings == 'deflate' || codings == 'x-deflate') { + // handle the infamous raw deflate response from old servers + // a hack for old IIS and Apache servers + const raw = res.pipe(new PassThrough$1()); + raw.once('data', function (chunk) { + // see http://stackoverflow.com/questions/37519828 + if ((chunk[0] & 0x0F) === 0x08) { + body = body.pipe(zlib.createInflate()); + } else { + body = body.pipe(zlib.createInflateRaw()); + } + response = new Response(body, response_options); + resolve(response); + }); + return; + } + + // for br + if (codings == 'br' && typeof zlib.createBrotliDecompress === 'function') { + body = body.pipe(zlib.createBrotliDecompress()); + response = new Response(body, response_options); + resolve(response); + return; + } + + // otherwise, use response as-is + response = new Response(body, response_options); + resolve(response); + }); + + writeToStream(req, request); + }); +} +/** + * Redirect code matching + * + * @param Number code Status code + * @return Boolean + */ +fetch.isRedirect = function (code) { + return code === 301 || code === 302 || code === 303 || code === 307 || code === 308; +}; + +// expose Promise +fetch.Promise = global.Promise; + +export default fetch; +export { Headers, Request, Response, FetchError }; diff --git a/scripts/metrics/node_modules/node-fetch/lib/index.js b/scripts/metrics/node_modules/node-fetch/lib/index.js new file mode 100644 index 00000000000..86c7c031229 --- /dev/null +++ b/scripts/metrics/node_modules/node-fetch/lib/index.js @@ -0,0 +1,1640 @@ +'use strict'; + +Object.defineProperty(exports, '__esModule', { value: true }); + +function _interopDefault (ex) { return (ex && (typeof ex === 'object') && 'default' in ex) ? ex['default'] : ex; } + +var Stream = _interopDefault(require('stream')); +var http = _interopDefault(require('http')); +var Url = _interopDefault(require('url')); +var https = _interopDefault(require('https')); +var zlib = _interopDefault(require('zlib')); + +// Based on https://github.com/tmpvar/jsdom/blob/aa85b2abf07766ff7bf5c1f6daafb3726f2f2db5/lib/jsdom/living/blob.js + +// fix for "Readable" isn't a named export issue +const Readable = Stream.Readable; + +const BUFFER = Symbol('buffer'); +const TYPE = Symbol('type'); + +class Blob { + constructor() { + this[TYPE] = ''; + + const blobParts = arguments[0]; + const options = arguments[1]; + + const buffers = []; + let size = 0; + + if (blobParts) { + const a = blobParts; + const length = Number(a.length); + for (let i = 0; i < length; i++) { + const element = a[i]; + let buffer; + if (element instanceof Buffer) { + buffer = element; + } else if (ArrayBuffer.isView(element)) { + buffer = Buffer.from(element.buffer, element.byteOffset, element.byteLength); + } else if (element instanceof ArrayBuffer) { + buffer = Buffer.from(element); + } else if (element instanceof Blob) { + buffer = element[BUFFER]; + } else { + buffer = Buffer.from(typeof element === 'string' ? element : String(element)); + } + size += buffer.length; + buffers.push(buffer); + } + } + + this[BUFFER] = Buffer.concat(buffers); + + let type = options && options.type !== undefined && String(options.type).toLowerCase(); + if (type && !/[^\u0020-\u007E]/.test(type)) { + this[TYPE] = type; + } + } + get size() { + return this[BUFFER].length; + } + get type() { + return this[TYPE]; + } + text() { + return Promise.resolve(this[BUFFER].toString()); + } + arrayBuffer() { + const buf = this[BUFFER]; + const ab = buf.buffer.slice(buf.byteOffset, buf.byteOffset + buf.byteLength); + return Promise.resolve(ab); + } + stream() { + const readable = new Readable(); + readable._read = function () {}; + readable.push(this[BUFFER]); + readable.push(null); + return readable; + } + toString() { + return '[object Blob]'; + } + slice() { + const size = this.size; + + const start = arguments[0]; + const end = arguments[1]; + let relativeStart, relativeEnd; + if (start === undefined) { + relativeStart = 0; + } else if (start < 0) { + relativeStart = Math.max(size + start, 0); + } else { + relativeStart = Math.min(start, size); + } + if (end === undefined) { + relativeEnd = size; + } else if (end < 0) { + relativeEnd = Math.max(size + end, 0); + } else { + relativeEnd = Math.min(end, size); + } + const span = Math.max(relativeEnd - relativeStart, 0); + + const buffer = this[BUFFER]; + const slicedBuffer = buffer.slice(relativeStart, relativeStart + span); + const blob = new Blob([], { type: arguments[2] }); + blob[BUFFER] = slicedBuffer; + return blob; + } +} + +Object.defineProperties(Blob.prototype, { + size: { enumerable: true }, + type: { enumerable: true }, + slice: { enumerable: true } +}); + +Object.defineProperty(Blob.prototype, Symbol.toStringTag, { + value: 'Blob', + writable: false, + enumerable: false, + configurable: true +}); + +/** + * fetch-error.js + * + * FetchError interface for operational errors + */ + +/** + * Create FetchError instance + * + * @param String message Error message for human + * @param String type Error type for machine + * @param String systemError For Node.js system error + * @return FetchError + */ +function FetchError(message, type, systemError) { + Error.call(this, message); + + this.message = message; + this.type = type; + + // when err.type is `system`, err.code contains system error code + if (systemError) { + this.code = this.errno = systemError.code; + } + + // hide custom error implementation details from end-users + Error.captureStackTrace(this, this.constructor); +} + +FetchError.prototype = Object.create(Error.prototype); +FetchError.prototype.constructor = FetchError; +FetchError.prototype.name = 'FetchError'; + +let convert; +try { + convert = require('encoding').convert; +} catch (e) {} + +const INTERNALS = Symbol('Body internals'); + +// fix an issue where "PassThrough" isn't a named export for node <10 +const PassThrough = Stream.PassThrough; + +/** + * Body mixin + * + * Ref: https://fetch.spec.whatwg.org/#body + * + * @param Stream body Readable stream + * @param Object opts Response options + * @return Void + */ +function Body(body) { + var _this = this; + + var _ref = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {}, + _ref$size = _ref.size; + + let size = _ref$size === undefined ? 0 : _ref$size; + var _ref$timeout = _ref.timeout; + let timeout = _ref$timeout === undefined ? 0 : _ref$timeout; + + if (body == null) { + // body is undefined or null + body = null; + } else if (isURLSearchParams(body)) { + // body is a URLSearchParams + body = Buffer.from(body.toString()); + } else if (isBlob(body)) ; else if (Buffer.isBuffer(body)) ; else if (Object.prototype.toString.call(body) === '[object ArrayBuffer]') { + // body is ArrayBuffer + body = Buffer.from(body); + } else if (ArrayBuffer.isView(body)) { + // body is ArrayBufferView + body = Buffer.from(body.buffer, body.byteOffset, body.byteLength); + } else if (body instanceof Stream) ; else { + // none of the above + // coerce to string then buffer + body = Buffer.from(String(body)); + } + this[INTERNALS] = { + body, + disturbed: false, + error: null + }; + this.size = size; + this.timeout = timeout; + + if (body instanceof Stream) { + body.on('error', function (err) { + const error = err.name === 'AbortError' ? err : new FetchError(`Invalid response body while trying to fetch ${_this.url}: ${err.message}`, 'system', err); + _this[INTERNALS].error = error; + }); + } +} + +Body.prototype = { + get body() { + return this[INTERNALS].body; + }, + + get bodyUsed() { + return this[INTERNALS].disturbed; + }, + + /** + * Decode response as ArrayBuffer + * + * @return Promise + */ + arrayBuffer() { + return consumeBody.call(this).then(function (buf) { + return buf.buffer.slice(buf.byteOffset, buf.byteOffset + buf.byteLength); + }); + }, + + /** + * Return raw response as Blob + * + * @return Promise + */ + blob() { + let ct = this.headers && this.headers.get('content-type') || ''; + return consumeBody.call(this).then(function (buf) { + return Object.assign( + // Prevent copying + new Blob([], { + type: ct.toLowerCase() + }), { + [BUFFER]: buf + }); + }); + }, + + /** + * Decode response as json + * + * @return Promise + */ + json() { + var _this2 = this; + + return consumeBody.call(this).then(function (buffer) { + try { + return JSON.parse(buffer.toString()); + } catch (err) { + return Body.Promise.reject(new FetchError(`invalid json response body at ${_this2.url} reason: ${err.message}`, 'invalid-json')); + } + }); + }, + + /** + * Decode response as text + * + * @return Promise + */ + text() { + return consumeBody.call(this).then(function (buffer) { + return buffer.toString(); + }); + }, + + /** + * Decode response as buffer (non-spec api) + * + * @return Promise + */ + buffer() { + return consumeBody.call(this); + }, + + /** + * Decode response as text, while automatically detecting the encoding and + * trying to decode to UTF-8 (non-spec api) + * + * @return Promise + */ + textConverted() { + var _this3 = this; + + return consumeBody.call(this).then(function (buffer) { + return convertBody(buffer, _this3.headers); + }); + } +}; + +// In browsers, all properties are enumerable. +Object.defineProperties(Body.prototype, { + body: { enumerable: true }, + bodyUsed: { enumerable: true }, + arrayBuffer: { enumerable: true }, + blob: { enumerable: true }, + json: { enumerable: true }, + text: { enumerable: true } +}); + +Body.mixIn = function (proto) { + for (const name of Object.getOwnPropertyNames(Body.prototype)) { + // istanbul ignore else: future proof + if (!(name in proto)) { + const desc = Object.getOwnPropertyDescriptor(Body.prototype, name); + Object.defineProperty(proto, name, desc); + } + } +}; + +/** + * Consume and convert an entire Body to a Buffer. + * + * Ref: https://fetch.spec.whatwg.org/#concept-body-consume-body + * + * @return Promise + */ +function consumeBody() { + var _this4 = this; + + if (this[INTERNALS].disturbed) { + return Body.Promise.reject(new TypeError(`body used already for: ${this.url}`)); + } + + this[INTERNALS].disturbed = true; + + if (this[INTERNALS].error) { + return Body.Promise.reject(this[INTERNALS].error); + } + + let body = this.body; + + // body is null + if (body === null) { + return Body.Promise.resolve(Buffer.alloc(0)); + } + + // body is blob + if (isBlob(body)) { + body = body.stream(); + } + + // body is buffer + if (Buffer.isBuffer(body)) { + return Body.Promise.resolve(body); + } + + // istanbul ignore if: should never happen + if (!(body instanceof Stream)) { + return Body.Promise.resolve(Buffer.alloc(0)); + } + + // body is stream + // get ready to actually consume the body + let accum = []; + let accumBytes = 0; + let abort = false; + + return new Body.Promise(function (resolve, reject) { + let resTimeout; + + // allow timeout on slow response body + if (_this4.timeout) { + resTimeout = setTimeout(function () { + abort = true; + reject(new FetchError(`Response timeout while trying to fetch ${_this4.url} (over ${_this4.timeout}ms)`, 'body-timeout')); + }, _this4.timeout); + } + + // handle stream errors + body.on('error', function (err) { + if (err.name === 'AbortError') { + // if the request was aborted, reject with this Error + abort = true; + reject(err); + } else { + // other errors, such as incorrect content-encoding + reject(new FetchError(`Invalid response body while trying to fetch ${_this4.url}: ${err.message}`, 'system', err)); + } + }); + + body.on('data', function (chunk) { + if (abort || chunk === null) { + return; + } + + if (_this4.size && accumBytes + chunk.length > _this4.size) { + abort = true; + reject(new FetchError(`content size at ${_this4.url} over limit: ${_this4.size}`, 'max-size')); + return; + } + + accumBytes += chunk.length; + accum.push(chunk); + }); + + body.on('end', function () { + if (abort) { + return; + } + + clearTimeout(resTimeout); + + try { + resolve(Buffer.concat(accum, accumBytes)); + } catch (err) { + // handle streams that have accumulated too much data (issue #414) + reject(new FetchError(`Could not create Buffer from response body for ${_this4.url}: ${err.message}`, 'system', err)); + } + }); + }); +} + +/** + * Detect buffer encoding and convert to target encoding + * ref: http://www.w3.org/TR/2011/WD-html5-20110113/parsing.html#determining-the-character-encoding + * + * @param Buffer buffer Incoming buffer + * @param String encoding Target encoding + * @return String + */ +function convertBody(buffer, headers) { + if (typeof convert !== 'function') { + throw new Error('The package `encoding` must be installed to use the textConverted() function'); + } + + const ct = headers.get('content-type'); + let charset = 'utf-8'; + let res, str; + + // header + if (ct) { + res = /charset=([^;]*)/i.exec(ct); + } + + // no charset in content type, peek at response body for at most 1024 bytes + str = buffer.slice(0, 1024).toString(); + + // html5 + if (!res && str) { + res = / 0 && arguments[0] !== undefined ? arguments[0] : undefined; + + this[MAP] = Object.create(null); + + if (init instanceof Headers) { + const rawHeaders = init.raw(); + const headerNames = Object.keys(rawHeaders); + + for (const headerName of headerNames) { + for (const value of rawHeaders[headerName]) { + this.append(headerName, value); + } + } + + return; + } + + // We don't worry about converting prop to ByteString here as append() + // will handle it. + if (init == null) ; else if (typeof init === 'object') { + const method = init[Symbol.iterator]; + if (method != null) { + if (typeof method !== 'function') { + throw new TypeError('Header pairs must be iterable'); + } + + // sequence> + // Note: per spec we have to first exhaust the lists then process them + const pairs = []; + for (const pair of init) { + if (typeof pair !== 'object' || typeof pair[Symbol.iterator] !== 'function') { + throw new TypeError('Each header pair must be iterable'); + } + pairs.push(Array.from(pair)); + } + + for (const pair of pairs) { + if (pair.length !== 2) { + throw new TypeError('Each header pair must be a name/value tuple'); + } + this.append(pair[0], pair[1]); + } + } else { + // record + for (const key of Object.keys(init)) { + const value = init[key]; + this.append(key, value); + } + } + } else { + throw new TypeError('Provided initializer must be an object'); + } + } + + /** + * Return combined header value given name + * + * @param String name Header name + * @return Mixed + */ + get(name) { + name = `${name}`; + validateName(name); + const key = find(this[MAP], name); + if (key === undefined) { + return null; + } + + return this[MAP][key].join(', '); + } + + /** + * Iterate over all headers + * + * @param Function callback Executed for each item with parameters (value, name, thisArg) + * @param Boolean thisArg `this` context for callback function + * @return Void + */ + forEach(callback) { + let thisArg = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : undefined; + + let pairs = getHeaders(this); + let i = 0; + while (i < pairs.length) { + var _pairs$i = pairs[i]; + const name = _pairs$i[0], + value = _pairs$i[1]; + + callback.call(thisArg, value, name, this); + pairs = getHeaders(this); + i++; + } + } + + /** + * Overwrite header values given name + * + * @param String name Header name + * @param String value Header value + * @return Void + */ + set(name, value) { + name = `${name}`; + value = `${value}`; + validateName(name); + validateValue(value); + const key = find(this[MAP], name); + this[MAP][key !== undefined ? key : name] = [value]; + } + + /** + * Append a value onto existing header + * + * @param String name Header name + * @param String value Header value + * @return Void + */ + append(name, value) { + name = `${name}`; + value = `${value}`; + validateName(name); + validateValue(value); + const key = find(this[MAP], name); + if (key !== undefined) { + this[MAP][key].push(value); + } else { + this[MAP][name] = [value]; + } + } + + /** + * Check for header name existence + * + * @param String name Header name + * @return Boolean + */ + has(name) { + name = `${name}`; + validateName(name); + return find(this[MAP], name) !== undefined; + } + + /** + * Delete all header values given name + * + * @param String name Header name + * @return Void + */ + delete(name) { + name = `${name}`; + validateName(name); + const key = find(this[MAP], name); + if (key !== undefined) { + delete this[MAP][key]; + } + } + + /** + * Return raw headers (non-spec api) + * + * @return Object + */ + raw() { + return this[MAP]; + } + + /** + * Get an iterator on keys. + * + * @return Iterator + */ + keys() { + return createHeadersIterator(this, 'key'); + } + + /** + * Get an iterator on values. + * + * @return Iterator + */ + values() { + return createHeadersIterator(this, 'value'); + } + + /** + * Get an iterator on entries. + * + * This is the default iterator of the Headers object. + * + * @return Iterator + */ + [Symbol.iterator]() { + return createHeadersIterator(this, 'key+value'); + } +} +Headers.prototype.entries = Headers.prototype[Symbol.iterator]; + +Object.defineProperty(Headers.prototype, Symbol.toStringTag, { + value: 'Headers', + writable: false, + enumerable: false, + configurable: true +}); + +Object.defineProperties(Headers.prototype, { + get: { enumerable: true }, + forEach: { enumerable: true }, + set: { enumerable: true }, + append: { enumerable: true }, + has: { enumerable: true }, + delete: { enumerable: true }, + keys: { enumerable: true }, + values: { enumerable: true }, + entries: { enumerable: true } +}); + +function getHeaders(headers) { + let kind = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : 'key+value'; + + const keys = Object.keys(headers[MAP]).sort(); + return keys.map(kind === 'key' ? function (k) { + return k.toLowerCase(); + } : kind === 'value' ? function (k) { + return headers[MAP][k].join(', '); + } : function (k) { + return [k.toLowerCase(), headers[MAP][k].join(', ')]; + }); +} + +const INTERNAL = Symbol('internal'); + +function createHeadersIterator(target, kind) { + const iterator = Object.create(HeadersIteratorPrototype); + iterator[INTERNAL] = { + target, + kind, + index: 0 + }; + return iterator; +} + +const HeadersIteratorPrototype = Object.setPrototypeOf({ + next() { + // istanbul ignore if + if (!this || Object.getPrototypeOf(this) !== HeadersIteratorPrototype) { + throw new TypeError('Value of `this` is not a HeadersIterator'); + } + + var _INTERNAL = this[INTERNAL]; + const target = _INTERNAL.target, + kind = _INTERNAL.kind, + index = _INTERNAL.index; + + const values = getHeaders(target, kind); + const len = values.length; + if (index >= len) { + return { + value: undefined, + done: true + }; + } + + this[INTERNAL].index = index + 1; + + return { + value: values[index], + done: false + }; + } +}, Object.getPrototypeOf(Object.getPrototypeOf([][Symbol.iterator]()))); + +Object.defineProperty(HeadersIteratorPrototype, Symbol.toStringTag, { + value: 'HeadersIterator', + writable: false, + enumerable: false, + configurable: true +}); + +/** + * Export the Headers object in a form that Node.js can consume. + * + * @param Headers headers + * @return Object + */ +function exportNodeCompatibleHeaders(headers) { + const obj = Object.assign({ __proto__: null }, headers[MAP]); + + // http.request() only supports string as Host header. This hack makes + // specifying custom Host header possible. + const hostHeaderKey = find(headers[MAP], 'Host'); + if (hostHeaderKey !== undefined) { + obj[hostHeaderKey] = obj[hostHeaderKey][0]; + } + + return obj; +} + +/** + * Create a Headers object from an object of headers, ignoring those that do + * not conform to HTTP grammar productions. + * + * @param Object obj Object of headers + * @return Headers + */ +function createHeadersLenient(obj) { + const headers = new Headers(); + for (const name of Object.keys(obj)) { + if (invalidTokenRegex.test(name)) { + continue; + } + if (Array.isArray(obj[name])) { + for (const val of obj[name]) { + if (invalidHeaderCharRegex.test(val)) { + continue; + } + if (headers[MAP][name] === undefined) { + headers[MAP][name] = [val]; + } else { + headers[MAP][name].push(val); + } + } + } else if (!invalidHeaderCharRegex.test(obj[name])) { + headers[MAP][name] = [obj[name]]; + } + } + return headers; +} + +const INTERNALS$1 = Symbol('Response internals'); + +// fix an issue where "STATUS_CODES" aren't a named export for node <10 +const STATUS_CODES = http.STATUS_CODES; + +/** + * Response class + * + * @param Stream body Readable stream + * @param Object opts Response options + * @return Void + */ +class Response { + constructor() { + let body = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : null; + let opts = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {}; + + Body.call(this, body, opts); + + const status = opts.status || 200; + const headers = new Headers(opts.headers); + + if (body != null && !headers.has('Content-Type')) { + const contentType = extractContentType(body); + if (contentType) { + headers.append('Content-Type', contentType); + } + } + + this[INTERNALS$1] = { + url: opts.url, + status, + statusText: opts.statusText || STATUS_CODES[status], + headers, + counter: opts.counter + }; + } + + get url() { + return this[INTERNALS$1].url; + } + + get status() { + return this[INTERNALS$1].status; + } + + /** + * Convenience property representing if the request ended normally + */ + get ok() { + return this[INTERNALS$1].status >= 200 && this[INTERNALS$1].status < 300; + } + + get redirected() { + return this[INTERNALS$1].counter > 0; + } + + get statusText() { + return this[INTERNALS$1].statusText; + } + + get headers() { + return this[INTERNALS$1].headers; + } + + /** + * Clone this response + * + * @return Response + */ + clone() { + return new Response(clone(this), { + url: this.url, + status: this.status, + statusText: this.statusText, + headers: this.headers, + ok: this.ok, + redirected: this.redirected + }); + } +} + +Body.mixIn(Response.prototype); + +Object.defineProperties(Response.prototype, { + url: { enumerable: true }, + status: { enumerable: true }, + ok: { enumerable: true }, + redirected: { enumerable: true }, + statusText: { enumerable: true }, + headers: { enumerable: true }, + clone: { enumerable: true } +}); + +Object.defineProperty(Response.prototype, Symbol.toStringTag, { + value: 'Response', + writable: false, + enumerable: false, + configurable: true +}); + +const INTERNALS$2 = Symbol('Request internals'); + +// fix an issue where "format", "parse" aren't a named export for node <10 +const parse_url = Url.parse; +const format_url = Url.format; + +const streamDestructionSupported = 'destroy' in Stream.Readable.prototype; + +/** + * Check if a value is an instance of Request. + * + * @param Mixed input + * @return Boolean + */ +function isRequest(input) { + return typeof input === 'object' && typeof input[INTERNALS$2] === 'object'; +} + +function isAbortSignal(signal) { + const proto = signal && typeof signal === 'object' && Object.getPrototypeOf(signal); + return !!(proto && proto.constructor.name === 'AbortSignal'); +} + +/** + * Request class + * + * @param Mixed input Url or Request instance + * @param Object init Custom options + * @return Void + */ +class Request { + constructor(input) { + let init = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {}; + + let parsedURL; + + // normalize input + if (!isRequest(input)) { + if (input && input.href) { + // in order to support Node.js' Url objects; though WHATWG's URL objects + // will fall into this branch also (since their `toString()` will return + // `href` property anyway) + parsedURL = parse_url(input.href); + } else { + // coerce input to a string before attempting to parse + parsedURL = parse_url(`${input}`); + } + input = {}; + } else { + parsedURL = parse_url(input.url); + } + + let method = init.method || input.method || 'GET'; + method = method.toUpperCase(); + + if ((init.body != null || isRequest(input) && input.body !== null) && (method === 'GET' || method === 'HEAD')) { + throw new TypeError('Request with GET/HEAD method cannot have body'); + } + + let inputBody = init.body != null ? init.body : isRequest(input) && input.body !== null ? clone(input) : null; + + Body.call(this, inputBody, { + timeout: init.timeout || input.timeout || 0, + size: init.size || input.size || 0 + }); + + const headers = new Headers(init.headers || input.headers || {}); + + if (inputBody != null && !headers.has('Content-Type')) { + const contentType = extractContentType(inputBody); + if (contentType) { + headers.append('Content-Type', contentType); + } + } + + let signal = isRequest(input) ? input.signal : null; + if ('signal' in init) signal = init.signal; + + if (signal != null && !isAbortSignal(signal)) { + throw new TypeError('Expected signal to be an instanceof AbortSignal'); + } + + this[INTERNALS$2] = { + method, + redirect: init.redirect || input.redirect || 'follow', + headers, + parsedURL, + signal + }; + + // node-fetch-only options + this.follow = init.follow !== undefined ? init.follow : input.follow !== undefined ? input.follow : 20; + this.compress = init.compress !== undefined ? init.compress : input.compress !== undefined ? input.compress : true; + this.counter = init.counter || input.counter || 0; + this.agent = init.agent || input.agent; + } + + get method() { + return this[INTERNALS$2].method; + } + + get url() { + return format_url(this[INTERNALS$2].parsedURL); + } + + get headers() { + return this[INTERNALS$2].headers; + } + + get redirect() { + return this[INTERNALS$2].redirect; + } + + get signal() { + return this[INTERNALS$2].signal; + } + + /** + * Clone this request + * + * @return Request + */ + clone() { + return new Request(this); + } +} + +Body.mixIn(Request.prototype); + +Object.defineProperty(Request.prototype, Symbol.toStringTag, { + value: 'Request', + writable: false, + enumerable: false, + configurable: true +}); + +Object.defineProperties(Request.prototype, { + method: { enumerable: true }, + url: { enumerable: true }, + headers: { enumerable: true }, + redirect: { enumerable: true }, + clone: { enumerable: true }, + signal: { enumerable: true } +}); + +/** + * Convert a Request to Node.js http request options. + * + * @param Request A Request instance + * @return Object The options object to be passed to http.request + */ +function getNodeRequestOptions(request) { + const parsedURL = request[INTERNALS$2].parsedURL; + const headers = new Headers(request[INTERNALS$2].headers); + + // fetch step 1.3 + if (!headers.has('Accept')) { + headers.set('Accept', '*/*'); + } + + // Basic fetch + if (!parsedURL.protocol || !parsedURL.hostname) { + throw new TypeError('Only absolute URLs are supported'); + } + + if (!/^https?:$/.test(parsedURL.protocol)) { + throw new TypeError('Only HTTP(S) protocols are supported'); + } + + if (request.signal && request.body instanceof Stream.Readable && !streamDestructionSupported) { + throw new Error('Cancellation of streamed requests with AbortSignal is not supported in node < 8'); + } + + // HTTP-network-or-cache fetch steps 2.4-2.7 + let contentLengthValue = null; + if (request.body == null && /^(POST|PUT)$/i.test(request.method)) { + contentLengthValue = '0'; + } + if (request.body != null) { + const totalBytes = getTotalBytes(request); + if (typeof totalBytes === 'number') { + contentLengthValue = String(totalBytes); + } + } + if (contentLengthValue) { + headers.set('Content-Length', contentLengthValue); + } + + // HTTP-network-or-cache fetch step 2.11 + if (!headers.has('User-Agent')) { + headers.set('User-Agent', 'node-fetch/1.0 (+https://github.com/bitinn/node-fetch)'); + } + + // HTTP-network-or-cache fetch step 2.15 + if (request.compress && !headers.has('Accept-Encoding')) { + headers.set('Accept-Encoding', 'gzip,deflate'); + } + + if (!headers.has('Connection') && !request.agent) { + headers.set('Connection', 'close'); + } + + // HTTP-network fetch step 4.2 + // chunked encoding is handled by Node.js + + return Object.assign({}, parsedURL, { + method: request.method, + headers: exportNodeCompatibleHeaders(headers), + agent: request.agent + }); +} + +/** + * abort-error.js + * + * AbortError interface for cancelled requests + */ + +/** + * Create AbortError instance + * + * @param String message Error message for human + * @return AbortError + */ +function AbortError(message) { + Error.call(this, message); + + this.type = 'aborted'; + this.message = message; + + // hide custom error implementation details from end-users + Error.captureStackTrace(this, this.constructor); +} + +AbortError.prototype = Object.create(Error.prototype); +AbortError.prototype.constructor = AbortError; +AbortError.prototype.name = 'AbortError'; + +// fix an issue where "PassThrough", "resolve" aren't a named export for node <10 +const PassThrough$1 = Stream.PassThrough; +const resolve_url = Url.resolve; + +/** + * Fetch function + * + * @param Mixed url Absolute url or Request instance + * @param Object opts Fetch options + * @return Promise + */ +function fetch(url, opts) { + + // allow custom promise + if (!fetch.Promise) { + throw new Error('native promise missing, set fetch.Promise to your favorite alternative'); + } + + Body.Promise = fetch.Promise; + + // wrap http.request into fetch + return new fetch.Promise(function (resolve, reject) { + // build request object + const request = new Request(url, opts); + const options = getNodeRequestOptions(request); + + const send = (options.protocol === 'https:' ? https : http).request; + const signal = request.signal; + + let response = null; + + const abort = function abort() { + let error = new AbortError('The user aborted a request.'); + reject(error); + if (request.body && request.body instanceof Stream.Readable) { + request.body.destroy(error); + } + if (!response || !response.body) return; + response.body.emit('error', error); + }; + + if (signal && signal.aborted) { + abort(); + return; + } + + const abortAndFinalize = function abortAndFinalize() { + abort(); + finalize(); + }; + + // send request + const req = send(options); + let reqTimeout; + + if (signal) { + signal.addEventListener('abort', abortAndFinalize); + } + + function finalize() { + req.abort(); + if (signal) signal.removeEventListener('abort', abortAndFinalize); + clearTimeout(reqTimeout); + } + + if (request.timeout) { + req.once('socket', function (socket) { + reqTimeout = setTimeout(function () { + reject(new FetchError(`network timeout at: ${request.url}`, 'request-timeout')); + finalize(); + }, request.timeout); + }); + } + + req.on('error', function (err) { + reject(new FetchError(`request to ${request.url} failed, reason: ${err.message}`, 'system', err)); + finalize(); + }); + + req.on('response', function (res) { + clearTimeout(reqTimeout); + + const headers = createHeadersLenient(res.headers); + + // HTTP fetch step 5 + if (fetch.isRedirect(res.statusCode)) { + // HTTP fetch step 5.2 + const location = headers.get('Location'); + + // HTTP fetch step 5.3 + const locationURL = location === null ? null : resolve_url(request.url, location); + + // HTTP fetch step 5.5 + switch (request.redirect) { + case 'error': + reject(new FetchError(`redirect mode is set to error: ${request.url}`, 'no-redirect')); + finalize(); + return; + case 'manual': + // node-fetch-specific step: make manual redirect a bit easier to use by setting the Location header value to the resolved URL. + if (locationURL !== null) { + // handle corrupted header + try { + headers.set('Location', locationURL); + } catch (err) { + // istanbul ignore next: nodejs server prevent invalid response headers, we can't test this through normal request + reject(err); + } + } + break; + case 'follow': + // HTTP-redirect fetch step 2 + if (locationURL === null) { + break; + } + + // HTTP-redirect fetch step 5 + if (request.counter >= request.follow) { + reject(new FetchError(`maximum redirect reached at: ${request.url}`, 'max-redirect')); + finalize(); + return; + } + + // HTTP-redirect fetch step 6 (counter increment) + // Create a new Request object. + const requestOpts = { + headers: new Headers(request.headers), + follow: request.follow, + counter: request.counter + 1, + agent: request.agent, + compress: request.compress, + method: request.method, + body: request.body, + signal: request.signal, + timeout: request.timeout + }; + + // HTTP-redirect fetch step 9 + if (res.statusCode !== 303 && request.body && getTotalBytes(request) === null) { + reject(new FetchError('Cannot follow redirect with body being a readable stream', 'unsupported-redirect')); + finalize(); + return; + } + + // HTTP-redirect fetch step 11 + if (res.statusCode === 303 || (res.statusCode === 301 || res.statusCode === 302) && request.method === 'POST') { + requestOpts.method = 'GET'; + requestOpts.body = undefined; + requestOpts.headers.delete('content-length'); + } + + // HTTP-redirect fetch step 15 + resolve(fetch(new Request(locationURL, requestOpts))); + finalize(); + return; + } + } + + // prepare response + res.once('end', function () { + if (signal) signal.removeEventListener('abort', abortAndFinalize); + }); + let body = res.pipe(new PassThrough$1()); + + const response_options = { + url: request.url, + status: res.statusCode, + statusText: res.statusMessage, + headers: headers, + size: request.size, + timeout: request.timeout, + counter: request.counter + }; + + // HTTP-network fetch step 12.1.1.3 + const codings = headers.get('Content-Encoding'); + + // HTTP-network fetch step 12.1.1.4: handle content codings + + // in following scenarios we ignore compression support + // 1. compression support is disabled + // 2. HEAD request + // 3. no Content-Encoding header + // 4. no content response (204) + // 5. content not modified response (304) + if (!request.compress || request.method === 'HEAD' || codings === null || res.statusCode === 204 || res.statusCode === 304) { + response = new Response(body, response_options); + resolve(response); + return; + } + + // For Node v6+ + // Be less strict when decoding compressed responses, since sometimes + // servers send slightly invalid responses that are still accepted + // by common browsers. + // Always using Z_SYNC_FLUSH is what cURL does. + const zlibOptions = { + flush: zlib.Z_SYNC_FLUSH, + finishFlush: zlib.Z_SYNC_FLUSH + }; + + // for gzip + if (codings == 'gzip' || codings == 'x-gzip') { + body = body.pipe(zlib.createGunzip(zlibOptions)); + response = new Response(body, response_options); + resolve(response); + return; + } + + // for deflate + if (codings == 'deflate' || codings == 'x-deflate') { + // handle the infamous raw deflate response from old servers + // a hack for old IIS and Apache servers + const raw = res.pipe(new PassThrough$1()); + raw.once('data', function (chunk) { + // see http://stackoverflow.com/questions/37519828 + if ((chunk[0] & 0x0F) === 0x08) { + body = body.pipe(zlib.createInflate()); + } else { + body = body.pipe(zlib.createInflateRaw()); + } + response = new Response(body, response_options); + resolve(response); + }); + return; + } + + // for br + if (codings == 'br' && typeof zlib.createBrotliDecompress === 'function') { + body = body.pipe(zlib.createBrotliDecompress()); + response = new Response(body, response_options); + resolve(response); + return; + } + + // otherwise, use response as-is + response = new Response(body, response_options); + resolve(response); + }); + + writeToStream(req, request); + }); +} +/** + * Redirect code matching + * + * @param Number code Status code + * @return Boolean + */ +fetch.isRedirect = function (code) { + return code === 301 || code === 302 || code === 303 || code === 307 || code === 308; +}; + +// expose Promise +fetch.Promise = global.Promise; + +module.exports = exports = fetch; +Object.defineProperty(exports, "__esModule", { value: true }); +exports.default = exports; +exports.Headers = Headers; +exports.Request = Request; +exports.Response = Response; +exports.FetchError = FetchError; diff --git a/scripts/metrics/node_modules/node-fetch/lib/index.mjs b/scripts/metrics/node_modules/node-fetch/lib/index.mjs new file mode 100644 index 00000000000..dca525658b4 --- /dev/null +++ b/scripts/metrics/node_modules/node-fetch/lib/index.mjs @@ -0,0 +1,1629 @@ +import Stream from 'stream'; +import http from 'http'; +import Url from 'url'; +import https from 'https'; +import zlib from 'zlib'; + +// Based on https://github.com/tmpvar/jsdom/blob/aa85b2abf07766ff7bf5c1f6daafb3726f2f2db5/lib/jsdom/living/blob.js + +// fix for "Readable" isn't a named export issue +const Readable = Stream.Readable; + +const BUFFER = Symbol('buffer'); +const TYPE = Symbol('type'); + +class Blob { + constructor() { + this[TYPE] = ''; + + const blobParts = arguments[0]; + const options = arguments[1]; + + const buffers = []; + let size = 0; + + if (blobParts) { + const a = blobParts; + const length = Number(a.length); + for (let i = 0; i < length; i++) { + const element = a[i]; + let buffer; + if (element instanceof Buffer) { + buffer = element; + } else if (ArrayBuffer.isView(element)) { + buffer = Buffer.from(element.buffer, element.byteOffset, element.byteLength); + } else if (element instanceof ArrayBuffer) { + buffer = Buffer.from(element); + } else if (element instanceof Blob) { + buffer = element[BUFFER]; + } else { + buffer = Buffer.from(typeof element === 'string' ? element : String(element)); + } + size += buffer.length; + buffers.push(buffer); + } + } + + this[BUFFER] = Buffer.concat(buffers); + + let type = options && options.type !== undefined && String(options.type).toLowerCase(); + if (type && !/[^\u0020-\u007E]/.test(type)) { + this[TYPE] = type; + } + } + get size() { + return this[BUFFER].length; + } + get type() { + return this[TYPE]; + } + text() { + return Promise.resolve(this[BUFFER].toString()); + } + arrayBuffer() { + const buf = this[BUFFER]; + const ab = buf.buffer.slice(buf.byteOffset, buf.byteOffset + buf.byteLength); + return Promise.resolve(ab); + } + stream() { + const readable = new Readable(); + readable._read = function () {}; + readable.push(this[BUFFER]); + readable.push(null); + return readable; + } + toString() { + return '[object Blob]'; + } + slice() { + const size = this.size; + + const start = arguments[0]; + const end = arguments[1]; + let relativeStart, relativeEnd; + if (start === undefined) { + relativeStart = 0; + } else if (start < 0) { + relativeStart = Math.max(size + start, 0); + } else { + relativeStart = Math.min(start, size); + } + if (end === undefined) { + relativeEnd = size; + } else if (end < 0) { + relativeEnd = Math.max(size + end, 0); + } else { + relativeEnd = Math.min(end, size); + } + const span = Math.max(relativeEnd - relativeStart, 0); + + const buffer = this[BUFFER]; + const slicedBuffer = buffer.slice(relativeStart, relativeStart + span); + const blob = new Blob([], { type: arguments[2] }); + blob[BUFFER] = slicedBuffer; + return blob; + } +} + +Object.defineProperties(Blob.prototype, { + size: { enumerable: true }, + type: { enumerable: true }, + slice: { enumerable: true } +}); + +Object.defineProperty(Blob.prototype, Symbol.toStringTag, { + value: 'Blob', + writable: false, + enumerable: false, + configurable: true +}); + +/** + * fetch-error.js + * + * FetchError interface for operational errors + */ + +/** + * Create FetchError instance + * + * @param String message Error message for human + * @param String type Error type for machine + * @param String systemError For Node.js system error + * @return FetchError + */ +function FetchError(message, type, systemError) { + Error.call(this, message); + + this.message = message; + this.type = type; + + // when err.type is `system`, err.code contains system error code + if (systemError) { + this.code = this.errno = systemError.code; + } + + // hide custom error implementation details from end-users + Error.captureStackTrace(this, this.constructor); +} + +FetchError.prototype = Object.create(Error.prototype); +FetchError.prototype.constructor = FetchError; +FetchError.prototype.name = 'FetchError'; + +let convert; +try { + convert = require('encoding').convert; +} catch (e) {} + +const INTERNALS = Symbol('Body internals'); + +// fix an issue where "PassThrough" isn't a named export for node <10 +const PassThrough = Stream.PassThrough; + +/** + * Body mixin + * + * Ref: https://fetch.spec.whatwg.org/#body + * + * @param Stream body Readable stream + * @param Object opts Response options + * @return Void + */ +function Body(body) { + var _this = this; + + var _ref = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {}, + _ref$size = _ref.size; + + let size = _ref$size === undefined ? 0 : _ref$size; + var _ref$timeout = _ref.timeout; + let timeout = _ref$timeout === undefined ? 0 : _ref$timeout; + + if (body == null) { + // body is undefined or null + body = null; + } else if (isURLSearchParams(body)) { + // body is a URLSearchParams + body = Buffer.from(body.toString()); + } else if (isBlob(body)) ; else if (Buffer.isBuffer(body)) ; else if (Object.prototype.toString.call(body) === '[object ArrayBuffer]') { + // body is ArrayBuffer + body = Buffer.from(body); + } else if (ArrayBuffer.isView(body)) { + // body is ArrayBufferView + body = Buffer.from(body.buffer, body.byteOffset, body.byteLength); + } else if (body instanceof Stream) ; else { + // none of the above + // coerce to string then buffer + body = Buffer.from(String(body)); + } + this[INTERNALS] = { + body, + disturbed: false, + error: null + }; + this.size = size; + this.timeout = timeout; + + if (body instanceof Stream) { + body.on('error', function (err) { + const error = err.name === 'AbortError' ? err : new FetchError(`Invalid response body while trying to fetch ${_this.url}: ${err.message}`, 'system', err); + _this[INTERNALS].error = error; + }); + } +} + +Body.prototype = { + get body() { + return this[INTERNALS].body; + }, + + get bodyUsed() { + return this[INTERNALS].disturbed; + }, + + /** + * Decode response as ArrayBuffer + * + * @return Promise + */ + arrayBuffer() { + return consumeBody.call(this).then(function (buf) { + return buf.buffer.slice(buf.byteOffset, buf.byteOffset + buf.byteLength); + }); + }, + + /** + * Return raw response as Blob + * + * @return Promise + */ + blob() { + let ct = this.headers && this.headers.get('content-type') || ''; + return consumeBody.call(this).then(function (buf) { + return Object.assign( + // Prevent copying + new Blob([], { + type: ct.toLowerCase() + }), { + [BUFFER]: buf + }); + }); + }, + + /** + * Decode response as json + * + * @return Promise + */ + json() { + var _this2 = this; + + return consumeBody.call(this).then(function (buffer) { + try { + return JSON.parse(buffer.toString()); + } catch (err) { + return Body.Promise.reject(new FetchError(`invalid json response body at ${_this2.url} reason: ${err.message}`, 'invalid-json')); + } + }); + }, + + /** + * Decode response as text + * + * @return Promise + */ + text() { + return consumeBody.call(this).then(function (buffer) { + return buffer.toString(); + }); + }, + + /** + * Decode response as buffer (non-spec api) + * + * @return Promise + */ + buffer() { + return consumeBody.call(this); + }, + + /** + * Decode response as text, while automatically detecting the encoding and + * trying to decode to UTF-8 (non-spec api) + * + * @return Promise + */ + textConverted() { + var _this3 = this; + + return consumeBody.call(this).then(function (buffer) { + return convertBody(buffer, _this3.headers); + }); + } +}; + +// In browsers, all properties are enumerable. +Object.defineProperties(Body.prototype, { + body: { enumerable: true }, + bodyUsed: { enumerable: true }, + arrayBuffer: { enumerable: true }, + blob: { enumerable: true }, + json: { enumerable: true }, + text: { enumerable: true } +}); + +Body.mixIn = function (proto) { + for (const name of Object.getOwnPropertyNames(Body.prototype)) { + // istanbul ignore else: future proof + if (!(name in proto)) { + const desc = Object.getOwnPropertyDescriptor(Body.prototype, name); + Object.defineProperty(proto, name, desc); + } + } +}; + +/** + * Consume and convert an entire Body to a Buffer. + * + * Ref: https://fetch.spec.whatwg.org/#concept-body-consume-body + * + * @return Promise + */ +function consumeBody() { + var _this4 = this; + + if (this[INTERNALS].disturbed) { + return Body.Promise.reject(new TypeError(`body used already for: ${this.url}`)); + } + + this[INTERNALS].disturbed = true; + + if (this[INTERNALS].error) { + return Body.Promise.reject(this[INTERNALS].error); + } + + let body = this.body; + + // body is null + if (body === null) { + return Body.Promise.resolve(Buffer.alloc(0)); + } + + // body is blob + if (isBlob(body)) { + body = body.stream(); + } + + // body is buffer + if (Buffer.isBuffer(body)) { + return Body.Promise.resolve(body); + } + + // istanbul ignore if: should never happen + if (!(body instanceof Stream)) { + return Body.Promise.resolve(Buffer.alloc(0)); + } + + // body is stream + // get ready to actually consume the body + let accum = []; + let accumBytes = 0; + let abort = false; + + return new Body.Promise(function (resolve, reject) { + let resTimeout; + + // allow timeout on slow response body + if (_this4.timeout) { + resTimeout = setTimeout(function () { + abort = true; + reject(new FetchError(`Response timeout while trying to fetch ${_this4.url} (over ${_this4.timeout}ms)`, 'body-timeout')); + }, _this4.timeout); + } + + // handle stream errors + body.on('error', function (err) { + if (err.name === 'AbortError') { + // if the request was aborted, reject with this Error + abort = true; + reject(err); + } else { + // other errors, such as incorrect content-encoding + reject(new FetchError(`Invalid response body while trying to fetch ${_this4.url}: ${err.message}`, 'system', err)); + } + }); + + body.on('data', function (chunk) { + if (abort || chunk === null) { + return; + } + + if (_this4.size && accumBytes + chunk.length > _this4.size) { + abort = true; + reject(new FetchError(`content size at ${_this4.url} over limit: ${_this4.size}`, 'max-size')); + return; + } + + accumBytes += chunk.length; + accum.push(chunk); + }); + + body.on('end', function () { + if (abort) { + return; + } + + clearTimeout(resTimeout); + + try { + resolve(Buffer.concat(accum, accumBytes)); + } catch (err) { + // handle streams that have accumulated too much data (issue #414) + reject(new FetchError(`Could not create Buffer from response body for ${_this4.url}: ${err.message}`, 'system', err)); + } + }); + }); +} + +/** + * Detect buffer encoding and convert to target encoding + * ref: http://www.w3.org/TR/2011/WD-html5-20110113/parsing.html#determining-the-character-encoding + * + * @param Buffer buffer Incoming buffer + * @param String encoding Target encoding + * @return String + */ +function convertBody(buffer, headers) { + if (typeof convert !== 'function') { + throw new Error('The package `encoding` must be installed to use the textConverted() function'); + } + + const ct = headers.get('content-type'); + let charset = 'utf-8'; + let res, str; + + // header + if (ct) { + res = /charset=([^;]*)/i.exec(ct); + } + + // no charset in content type, peek at response body for at most 1024 bytes + str = buffer.slice(0, 1024).toString(); + + // html5 + if (!res && str) { + res = / 0 && arguments[0] !== undefined ? arguments[0] : undefined; + + this[MAP] = Object.create(null); + + if (init instanceof Headers) { + const rawHeaders = init.raw(); + const headerNames = Object.keys(rawHeaders); + + for (const headerName of headerNames) { + for (const value of rawHeaders[headerName]) { + this.append(headerName, value); + } + } + + return; + } + + // We don't worry about converting prop to ByteString here as append() + // will handle it. + if (init == null) ; else if (typeof init === 'object') { + const method = init[Symbol.iterator]; + if (method != null) { + if (typeof method !== 'function') { + throw new TypeError('Header pairs must be iterable'); + } + + // sequence> + // Note: per spec we have to first exhaust the lists then process them + const pairs = []; + for (const pair of init) { + if (typeof pair !== 'object' || typeof pair[Symbol.iterator] !== 'function') { + throw new TypeError('Each header pair must be iterable'); + } + pairs.push(Array.from(pair)); + } + + for (const pair of pairs) { + if (pair.length !== 2) { + throw new TypeError('Each header pair must be a name/value tuple'); + } + this.append(pair[0], pair[1]); + } + } else { + // record + for (const key of Object.keys(init)) { + const value = init[key]; + this.append(key, value); + } + } + } else { + throw new TypeError('Provided initializer must be an object'); + } + } + + /** + * Return combined header value given name + * + * @param String name Header name + * @return Mixed + */ + get(name) { + name = `${name}`; + validateName(name); + const key = find(this[MAP], name); + if (key === undefined) { + return null; + } + + return this[MAP][key].join(', '); + } + + /** + * Iterate over all headers + * + * @param Function callback Executed for each item with parameters (value, name, thisArg) + * @param Boolean thisArg `this` context for callback function + * @return Void + */ + forEach(callback) { + let thisArg = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : undefined; + + let pairs = getHeaders(this); + let i = 0; + while (i < pairs.length) { + var _pairs$i = pairs[i]; + const name = _pairs$i[0], + value = _pairs$i[1]; + + callback.call(thisArg, value, name, this); + pairs = getHeaders(this); + i++; + } + } + + /** + * Overwrite header values given name + * + * @param String name Header name + * @param String value Header value + * @return Void + */ + set(name, value) { + name = `${name}`; + value = `${value}`; + validateName(name); + validateValue(value); + const key = find(this[MAP], name); + this[MAP][key !== undefined ? key : name] = [value]; + } + + /** + * Append a value onto existing header + * + * @param String name Header name + * @param String value Header value + * @return Void + */ + append(name, value) { + name = `${name}`; + value = `${value}`; + validateName(name); + validateValue(value); + const key = find(this[MAP], name); + if (key !== undefined) { + this[MAP][key].push(value); + } else { + this[MAP][name] = [value]; + } + } + + /** + * Check for header name existence + * + * @param String name Header name + * @return Boolean + */ + has(name) { + name = `${name}`; + validateName(name); + return find(this[MAP], name) !== undefined; + } + + /** + * Delete all header values given name + * + * @param String name Header name + * @return Void + */ + delete(name) { + name = `${name}`; + validateName(name); + const key = find(this[MAP], name); + if (key !== undefined) { + delete this[MAP][key]; + } + } + + /** + * Return raw headers (non-spec api) + * + * @return Object + */ + raw() { + return this[MAP]; + } + + /** + * Get an iterator on keys. + * + * @return Iterator + */ + keys() { + return createHeadersIterator(this, 'key'); + } + + /** + * Get an iterator on values. + * + * @return Iterator + */ + values() { + return createHeadersIterator(this, 'value'); + } + + /** + * Get an iterator on entries. + * + * This is the default iterator of the Headers object. + * + * @return Iterator + */ + [Symbol.iterator]() { + return createHeadersIterator(this, 'key+value'); + } +} +Headers.prototype.entries = Headers.prototype[Symbol.iterator]; + +Object.defineProperty(Headers.prototype, Symbol.toStringTag, { + value: 'Headers', + writable: false, + enumerable: false, + configurable: true +}); + +Object.defineProperties(Headers.prototype, { + get: { enumerable: true }, + forEach: { enumerable: true }, + set: { enumerable: true }, + append: { enumerable: true }, + has: { enumerable: true }, + delete: { enumerable: true }, + keys: { enumerable: true }, + values: { enumerable: true }, + entries: { enumerable: true } +}); + +function getHeaders(headers) { + let kind = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : 'key+value'; + + const keys = Object.keys(headers[MAP]).sort(); + return keys.map(kind === 'key' ? function (k) { + return k.toLowerCase(); + } : kind === 'value' ? function (k) { + return headers[MAP][k].join(', '); + } : function (k) { + return [k.toLowerCase(), headers[MAP][k].join(', ')]; + }); +} + +const INTERNAL = Symbol('internal'); + +function createHeadersIterator(target, kind) { + const iterator = Object.create(HeadersIteratorPrototype); + iterator[INTERNAL] = { + target, + kind, + index: 0 + }; + return iterator; +} + +const HeadersIteratorPrototype = Object.setPrototypeOf({ + next() { + // istanbul ignore if + if (!this || Object.getPrototypeOf(this) !== HeadersIteratorPrototype) { + throw new TypeError('Value of `this` is not a HeadersIterator'); + } + + var _INTERNAL = this[INTERNAL]; + const target = _INTERNAL.target, + kind = _INTERNAL.kind, + index = _INTERNAL.index; + + const values = getHeaders(target, kind); + const len = values.length; + if (index >= len) { + return { + value: undefined, + done: true + }; + } + + this[INTERNAL].index = index + 1; + + return { + value: values[index], + done: false + }; + } +}, Object.getPrototypeOf(Object.getPrototypeOf([][Symbol.iterator]()))); + +Object.defineProperty(HeadersIteratorPrototype, Symbol.toStringTag, { + value: 'HeadersIterator', + writable: false, + enumerable: false, + configurable: true +}); + +/** + * Export the Headers object in a form that Node.js can consume. + * + * @param Headers headers + * @return Object + */ +function exportNodeCompatibleHeaders(headers) { + const obj = Object.assign({ __proto__: null }, headers[MAP]); + + // http.request() only supports string as Host header. This hack makes + // specifying custom Host header possible. + const hostHeaderKey = find(headers[MAP], 'Host'); + if (hostHeaderKey !== undefined) { + obj[hostHeaderKey] = obj[hostHeaderKey][0]; + } + + return obj; +} + +/** + * Create a Headers object from an object of headers, ignoring those that do + * not conform to HTTP grammar productions. + * + * @param Object obj Object of headers + * @return Headers + */ +function createHeadersLenient(obj) { + const headers = new Headers(); + for (const name of Object.keys(obj)) { + if (invalidTokenRegex.test(name)) { + continue; + } + if (Array.isArray(obj[name])) { + for (const val of obj[name]) { + if (invalidHeaderCharRegex.test(val)) { + continue; + } + if (headers[MAP][name] === undefined) { + headers[MAP][name] = [val]; + } else { + headers[MAP][name].push(val); + } + } + } else if (!invalidHeaderCharRegex.test(obj[name])) { + headers[MAP][name] = [obj[name]]; + } + } + return headers; +} + +const INTERNALS$1 = Symbol('Response internals'); + +// fix an issue where "STATUS_CODES" aren't a named export for node <10 +const STATUS_CODES = http.STATUS_CODES; + +/** + * Response class + * + * @param Stream body Readable stream + * @param Object opts Response options + * @return Void + */ +class Response { + constructor() { + let body = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : null; + let opts = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {}; + + Body.call(this, body, opts); + + const status = opts.status || 200; + const headers = new Headers(opts.headers); + + if (body != null && !headers.has('Content-Type')) { + const contentType = extractContentType(body); + if (contentType) { + headers.append('Content-Type', contentType); + } + } + + this[INTERNALS$1] = { + url: opts.url, + status, + statusText: opts.statusText || STATUS_CODES[status], + headers, + counter: opts.counter + }; + } + + get url() { + return this[INTERNALS$1].url; + } + + get status() { + return this[INTERNALS$1].status; + } + + /** + * Convenience property representing if the request ended normally + */ + get ok() { + return this[INTERNALS$1].status >= 200 && this[INTERNALS$1].status < 300; + } + + get redirected() { + return this[INTERNALS$1].counter > 0; + } + + get statusText() { + return this[INTERNALS$1].statusText; + } + + get headers() { + return this[INTERNALS$1].headers; + } + + /** + * Clone this response + * + * @return Response + */ + clone() { + return new Response(clone(this), { + url: this.url, + status: this.status, + statusText: this.statusText, + headers: this.headers, + ok: this.ok, + redirected: this.redirected + }); + } +} + +Body.mixIn(Response.prototype); + +Object.defineProperties(Response.prototype, { + url: { enumerable: true }, + status: { enumerable: true }, + ok: { enumerable: true }, + redirected: { enumerable: true }, + statusText: { enumerable: true }, + headers: { enumerable: true }, + clone: { enumerable: true } +}); + +Object.defineProperty(Response.prototype, Symbol.toStringTag, { + value: 'Response', + writable: false, + enumerable: false, + configurable: true +}); + +const INTERNALS$2 = Symbol('Request internals'); + +// fix an issue where "format", "parse" aren't a named export for node <10 +const parse_url = Url.parse; +const format_url = Url.format; + +const streamDestructionSupported = 'destroy' in Stream.Readable.prototype; + +/** + * Check if a value is an instance of Request. + * + * @param Mixed input + * @return Boolean + */ +function isRequest(input) { + return typeof input === 'object' && typeof input[INTERNALS$2] === 'object'; +} + +function isAbortSignal(signal) { + const proto = signal && typeof signal === 'object' && Object.getPrototypeOf(signal); + return !!(proto && proto.constructor.name === 'AbortSignal'); +} + +/** + * Request class + * + * @param Mixed input Url or Request instance + * @param Object init Custom options + * @return Void + */ +class Request { + constructor(input) { + let init = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {}; + + let parsedURL; + + // normalize input + if (!isRequest(input)) { + if (input && input.href) { + // in order to support Node.js' Url objects; though WHATWG's URL objects + // will fall into this branch also (since their `toString()` will return + // `href` property anyway) + parsedURL = parse_url(input.href); + } else { + // coerce input to a string before attempting to parse + parsedURL = parse_url(`${input}`); + } + input = {}; + } else { + parsedURL = parse_url(input.url); + } + + let method = init.method || input.method || 'GET'; + method = method.toUpperCase(); + + if ((init.body != null || isRequest(input) && input.body !== null) && (method === 'GET' || method === 'HEAD')) { + throw new TypeError('Request with GET/HEAD method cannot have body'); + } + + let inputBody = init.body != null ? init.body : isRequest(input) && input.body !== null ? clone(input) : null; + + Body.call(this, inputBody, { + timeout: init.timeout || input.timeout || 0, + size: init.size || input.size || 0 + }); + + const headers = new Headers(init.headers || input.headers || {}); + + if (inputBody != null && !headers.has('Content-Type')) { + const contentType = extractContentType(inputBody); + if (contentType) { + headers.append('Content-Type', contentType); + } + } + + let signal = isRequest(input) ? input.signal : null; + if ('signal' in init) signal = init.signal; + + if (signal != null && !isAbortSignal(signal)) { + throw new TypeError('Expected signal to be an instanceof AbortSignal'); + } + + this[INTERNALS$2] = { + method, + redirect: init.redirect || input.redirect || 'follow', + headers, + parsedURL, + signal + }; + + // node-fetch-only options + this.follow = init.follow !== undefined ? init.follow : input.follow !== undefined ? input.follow : 20; + this.compress = init.compress !== undefined ? init.compress : input.compress !== undefined ? input.compress : true; + this.counter = init.counter || input.counter || 0; + this.agent = init.agent || input.agent; + } + + get method() { + return this[INTERNALS$2].method; + } + + get url() { + return format_url(this[INTERNALS$2].parsedURL); + } + + get headers() { + return this[INTERNALS$2].headers; + } + + get redirect() { + return this[INTERNALS$2].redirect; + } + + get signal() { + return this[INTERNALS$2].signal; + } + + /** + * Clone this request + * + * @return Request + */ + clone() { + return new Request(this); + } +} + +Body.mixIn(Request.prototype); + +Object.defineProperty(Request.prototype, Symbol.toStringTag, { + value: 'Request', + writable: false, + enumerable: false, + configurable: true +}); + +Object.defineProperties(Request.prototype, { + method: { enumerable: true }, + url: { enumerable: true }, + headers: { enumerable: true }, + redirect: { enumerable: true }, + clone: { enumerable: true }, + signal: { enumerable: true } +}); + +/** + * Convert a Request to Node.js http request options. + * + * @param Request A Request instance + * @return Object The options object to be passed to http.request + */ +function getNodeRequestOptions(request) { + const parsedURL = request[INTERNALS$2].parsedURL; + const headers = new Headers(request[INTERNALS$2].headers); + + // fetch step 1.3 + if (!headers.has('Accept')) { + headers.set('Accept', '*/*'); + } + + // Basic fetch + if (!parsedURL.protocol || !parsedURL.hostname) { + throw new TypeError('Only absolute URLs are supported'); + } + + if (!/^https?:$/.test(parsedURL.protocol)) { + throw new TypeError('Only HTTP(S) protocols are supported'); + } + + if (request.signal && request.body instanceof Stream.Readable && !streamDestructionSupported) { + throw new Error('Cancellation of streamed requests with AbortSignal is not supported in node < 8'); + } + + // HTTP-network-or-cache fetch steps 2.4-2.7 + let contentLengthValue = null; + if (request.body == null && /^(POST|PUT)$/i.test(request.method)) { + contentLengthValue = '0'; + } + if (request.body != null) { + const totalBytes = getTotalBytes(request); + if (typeof totalBytes === 'number') { + contentLengthValue = String(totalBytes); + } + } + if (contentLengthValue) { + headers.set('Content-Length', contentLengthValue); + } + + // HTTP-network-or-cache fetch step 2.11 + if (!headers.has('User-Agent')) { + headers.set('User-Agent', 'node-fetch/1.0 (+https://github.com/bitinn/node-fetch)'); + } + + // HTTP-network-or-cache fetch step 2.15 + if (request.compress && !headers.has('Accept-Encoding')) { + headers.set('Accept-Encoding', 'gzip,deflate'); + } + + if (!headers.has('Connection') && !request.agent) { + headers.set('Connection', 'close'); + } + + // HTTP-network fetch step 4.2 + // chunked encoding is handled by Node.js + + return Object.assign({}, parsedURL, { + method: request.method, + headers: exportNodeCompatibleHeaders(headers), + agent: request.agent + }); +} + +/** + * abort-error.js + * + * AbortError interface for cancelled requests + */ + +/** + * Create AbortError instance + * + * @param String message Error message for human + * @return AbortError + */ +function AbortError(message) { + Error.call(this, message); + + this.type = 'aborted'; + this.message = message; + + // hide custom error implementation details from end-users + Error.captureStackTrace(this, this.constructor); +} + +AbortError.prototype = Object.create(Error.prototype); +AbortError.prototype.constructor = AbortError; +AbortError.prototype.name = 'AbortError'; + +// fix an issue where "PassThrough", "resolve" aren't a named export for node <10 +const PassThrough$1 = Stream.PassThrough; +const resolve_url = Url.resolve; + +/** + * Fetch function + * + * @param Mixed url Absolute url or Request instance + * @param Object opts Fetch options + * @return Promise + */ +function fetch(url, opts) { + + // allow custom promise + if (!fetch.Promise) { + throw new Error('native promise missing, set fetch.Promise to your favorite alternative'); + } + + Body.Promise = fetch.Promise; + + // wrap http.request into fetch + return new fetch.Promise(function (resolve, reject) { + // build request object + const request = new Request(url, opts); + const options = getNodeRequestOptions(request); + + const send = (options.protocol === 'https:' ? https : http).request; + const signal = request.signal; + + let response = null; + + const abort = function abort() { + let error = new AbortError('The user aborted a request.'); + reject(error); + if (request.body && request.body instanceof Stream.Readable) { + request.body.destroy(error); + } + if (!response || !response.body) return; + response.body.emit('error', error); + }; + + if (signal && signal.aborted) { + abort(); + return; + } + + const abortAndFinalize = function abortAndFinalize() { + abort(); + finalize(); + }; + + // send request + const req = send(options); + let reqTimeout; + + if (signal) { + signal.addEventListener('abort', abortAndFinalize); + } + + function finalize() { + req.abort(); + if (signal) signal.removeEventListener('abort', abortAndFinalize); + clearTimeout(reqTimeout); + } + + if (request.timeout) { + req.once('socket', function (socket) { + reqTimeout = setTimeout(function () { + reject(new FetchError(`network timeout at: ${request.url}`, 'request-timeout')); + finalize(); + }, request.timeout); + }); + } + + req.on('error', function (err) { + reject(new FetchError(`request to ${request.url} failed, reason: ${err.message}`, 'system', err)); + finalize(); + }); + + req.on('response', function (res) { + clearTimeout(reqTimeout); + + const headers = createHeadersLenient(res.headers); + + // HTTP fetch step 5 + if (fetch.isRedirect(res.statusCode)) { + // HTTP fetch step 5.2 + const location = headers.get('Location'); + + // HTTP fetch step 5.3 + const locationURL = location === null ? null : resolve_url(request.url, location); + + // HTTP fetch step 5.5 + switch (request.redirect) { + case 'error': + reject(new FetchError(`redirect mode is set to error: ${request.url}`, 'no-redirect')); + finalize(); + return; + case 'manual': + // node-fetch-specific step: make manual redirect a bit easier to use by setting the Location header value to the resolved URL. + if (locationURL !== null) { + // handle corrupted header + try { + headers.set('Location', locationURL); + } catch (err) { + // istanbul ignore next: nodejs server prevent invalid response headers, we can't test this through normal request + reject(err); + } + } + break; + case 'follow': + // HTTP-redirect fetch step 2 + if (locationURL === null) { + break; + } + + // HTTP-redirect fetch step 5 + if (request.counter >= request.follow) { + reject(new FetchError(`maximum redirect reached at: ${request.url}`, 'max-redirect')); + finalize(); + return; + } + + // HTTP-redirect fetch step 6 (counter increment) + // Create a new Request object. + const requestOpts = { + headers: new Headers(request.headers), + follow: request.follow, + counter: request.counter + 1, + agent: request.agent, + compress: request.compress, + method: request.method, + body: request.body, + signal: request.signal, + timeout: request.timeout + }; + + // HTTP-redirect fetch step 9 + if (res.statusCode !== 303 && request.body && getTotalBytes(request) === null) { + reject(new FetchError('Cannot follow redirect with body being a readable stream', 'unsupported-redirect')); + finalize(); + return; + } + + // HTTP-redirect fetch step 11 + if (res.statusCode === 303 || (res.statusCode === 301 || res.statusCode === 302) && request.method === 'POST') { + requestOpts.method = 'GET'; + requestOpts.body = undefined; + requestOpts.headers.delete('content-length'); + } + + // HTTP-redirect fetch step 15 + resolve(fetch(new Request(locationURL, requestOpts))); + finalize(); + return; + } + } + + // prepare response + res.once('end', function () { + if (signal) signal.removeEventListener('abort', abortAndFinalize); + }); + let body = res.pipe(new PassThrough$1()); + + const response_options = { + url: request.url, + status: res.statusCode, + statusText: res.statusMessage, + headers: headers, + size: request.size, + timeout: request.timeout, + counter: request.counter + }; + + // HTTP-network fetch step 12.1.1.3 + const codings = headers.get('Content-Encoding'); + + // HTTP-network fetch step 12.1.1.4: handle content codings + + // in following scenarios we ignore compression support + // 1. compression support is disabled + // 2. HEAD request + // 3. no Content-Encoding header + // 4. no content response (204) + // 5. content not modified response (304) + if (!request.compress || request.method === 'HEAD' || codings === null || res.statusCode === 204 || res.statusCode === 304) { + response = new Response(body, response_options); + resolve(response); + return; + } + + // For Node v6+ + // Be less strict when decoding compressed responses, since sometimes + // servers send slightly invalid responses that are still accepted + // by common browsers. + // Always using Z_SYNC_FLUSH is what cURL does. + const zlibOptions = { + flush: zlib.Z_SYNC_FLUSH, + finishFlush: zlib.Z_SYNC_FLUSH + }; + + // for gzip + if (codings == 'gzip' || codings == 'x-gzip') { + body = body.pipe(zlib.createGunzip(zlibOptions)); + response = new Response(body, response_options); + resolve(response); + return; + } + + // for deflate + if (codings == 'deflate' || codings == 'x-deflate') { + // handle the infamous raw deflate response from old servers + // a hack for old IIS and Apache servers + const raw = res.pipe(new PassThrough$1()); + raw.once('data', function (chunk) { + // see http://stackoverflow.com/questions/37519828 + if ((chunk[0] & 0x0F) === 0x08) { + body = body.pipe(zlib.createInflate()); + } else { + body = body.pipe(zlib.createInflateRaw()); + } + response = new Response(body, response_options); + resolve(response); + }); + return; + } + + // for br + if (codings == 'br' && typeof zlib.createBrotliDecompress === 'function') { + body = body.pipe(zlib.createBrotliDecompress()); + response = new Response(body, response_options); + resolve(response); + return; + } + + // otherwise, use response as-is + response = new Response(body, response_options); + resolve(response); + }); + + writeToStream(req, request); + }); +} +/** + * Redirect code matching + * + * @param Number code Status code + * @return Boolean + */ +fetch.isRedirect = function (code) { + return code === 301 || code === 302 || code === 303 || code === 307 || code === 308; +}; + +// expose Promise +fetch.Promise = global.Promise; + +export default fetch; +export { Headers, Request, Response, FetchError }; diff --git a/scripts/metrics/node_modules/node-fetch/package.json b/scripts/metrics/node_modules/node-fetch/package.json new file mode 100644 index 00000000000..e93129c801f --- /dev/null +++ b/scripts/metrics/node_modules/node-fetch/package.json @@ -0,0 +1,94 @@ +{ + "_from": "node-fetch", + "_id": "node-fetch@2.5.0", + "_inBundle": false, + "_integrity": "sha512-YuZKluhWGJwCcUu4RlZstdAxr8bFfOVHakc1mplwHkk8J+tqM1Y5yraYvIUpeX8aY7+crCwiELJq7Vl0o0LWXw==", + "_location": "/node-fetch", + "_phantomChildren": {}, + "_requested": { + "type": "tag", + "registry": true, + "raw": "node-fetch", + "name": "node-fetch", + "escapedName": "node-fetch", + "rawSpec": "", + "saveSpec": null, + "fetchSpec": "latest" + }, + "_requiredBy": [ + "#USER", + "/" + ], + "_resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.5.0.tgz", + "_shasum": "8028c49fc1191bba56a07adc6e2a954644a48501", + "_spec": "node-fetch", + "_where": "/Users/zachary.butler/Work/auto-buildkite-pipelines/scripts/metrics", + "author": { + "name": "David Frank" + }, + "browser": "./browser.js", + "bugs": { + "url": "https://github.com/bitinn/node-fetch/issues" + }, + "bundleDependencies": false, + "dependencies": {}, + "deprecated": false, + "description": "A light-weight module that brings window.fetch to node.js", + "devDependencies": { + "@ungap/url-search-params": "^0.1.2", + "abort-controller": "^1.1.0", + "abortcontroller-polyfill": "^1.3.0", + "babel-core": "^6.26.3", + "babel-plugin-istanbul": "^4.1.6", + "babel-preset-env": "^1.6.1", + "babel-register": "^6.16.3", + "chai": "^3.5.0", + "chai-as-promised": "^7.1.1", + "chai-iterator": "^1.1.1", + "chai-string": "~1.3.0", + "codecov": "^3.3.0", + "cross-env": "^5.2.0", + "form-data": "^2.3.3", + "is-builtin-module": "^1.0.0", + "mocha": "^5.0.0", + "nyc": "11.9.0", + "parted": "^0.1.1", + "promise": "^8.0.3", + "resumer": "0.0.0", + "rollup": "^0.63.4", + "rollup-plugin-babel": "^3.0.7", + "string-to-arraybuffer": "^1.0.2", + "whatwg-url": "^5.0.0" + }, + "engines": { + "node": "4.x || >=6.0.0" + }, + "files": [ + "lib/index.js", + "lib/index.mjs", + "lib/index.es.js", + "browser.js" + ], + "homepage": "https://github.com/bitinn/node-fetch", + "keywords": [ + "fetch", + "http", + "promise" + ], + "license": "MIT", + "main": "lib/index", + "module": "lib/index.mjs", + "name": "node-fetch", + "repository": { + "type": "git", + "url": "git+https://github.com/bitinn/node-fetch.git" + }, + "scripts": { + "build": "cross-env BABEL_ENV=rollup rollup -c", + "coverage": "cross-env BABEL_ENV=coverage nyc --reporter json --reporter text mocha -R spec test/test.js && codecov -f coverage/coverage-final.json", + "prepare": "npm run build", + "report": "cross-env BABEL_ENV=coverage nyc --reporter lcov --reporter text mocha -R spec test/test.js", + "test": "cross-env BABEL_ENV=test mocha --require babel-register --throw-deprecation test/test.js" + }, + "version": "2.5.0" +} diff --git a/scripts/metrics/node_modules/sax/LICENSE b/scripts/metrics/node_modules/sax/LICENSE new file mode 100644 index 00000000000..ccffa082c99 --- /dev/null +++ b/scripts/metrics/node_modules/sax/LICENSE @@ -0,0 +1,41 @@ +The ISC License + +Copyright (c) Isaac Z. Schlueter and Contributors + +Permission to use, copy, modify, and/or distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR +IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +==== + +`String.fromCodePoint` by Mathias Bynens used according to terms of MIT +License, as follows: + + Copyright Mathias Bynens + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + "Software"), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE + LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/scripts/metrics/node_modules/sax/README.md b/scripts/metrics/node_modules/sax/README.md new file mode 100644 index 00000000000..afcd3f3dd65 --- /dev/null +++ b/scripts/metrics/node_modules/sax/README.md @@ -0,0 +1,225 @@ +# sax js + +A sax-style parser for XML and HTML. + +Designed with [node](http://nodejs.org/) in mind, but should work fine in +the browser or other CommonJS implementations. + +## What This Is + +* A very simple tool to parse through an XML string. +* A stepping stone to a streaming HTML parser. +* A handy way to deal with RSS and other mostly-ok-but-kinda-broken XML + docs. + +## What This Is (probably) Not + +* An HTML Parser - That's a fine goal, but this isn't it. It's just + XML. +* A DOM Builder - You can use it to build an object model out of XML, + but it doesn't do that out of the box. +* XSLT - No DOM = no querying. +* 100% Compliant with (some other SAX implementation) - Most SAX + implementations are in Java and do a lot more than this does. +* An XML Validator - It does a little validation when in strict mode, but + not much. +* A Schema-Aware XSD Thing - Schemas are an exercise in fetishistic + masochism. +* A DTD-aware Thing - Fetching DTDs is a much bigger job. + +## Regarding `Hello, world!').close(); + +// stream usage +// takes the same options as the parser +var saxStream = require("sax").createStream(strict, options) +saxStream.on("error", function (e) { + // unhandled errors will throw, since this is a proper node + // event emitter. + console.error("error!", e) + // clear the error + this._parser.error = null + this._parser.resume() +}) +saxStream.on("opentag", function (node) { + // same object as above +}) +// pipe is supported, and it's readable/writable +// same chunks coming in also go out. +fs.createReadStream("file.xml") + .pipe(saxStream) + .pipe(fs.createWriteStream("file-copy.xml")) +``` + + +## Arguments + +Pass the following arguments to the parser function. All are optional. + +`strict` - Boolean. Whether or not to be a jerk. Default: `false`. + +`opt` - Object bag of settings regarding string formatting. All default to `false`. + +Settings supported: + +* `trim` - Boolean. Whether or not to trim text and comment nodes. +* `normalize` - Boolean. If true, then turn any whitespace into a single + space. +* `lowercase` - Boolean. If true, then lowercase tag names and attribute names + in loose mode, rather than uppercasing them. +* `xmlns` - Boolean. If true, then namespaces are supported. +* `position` - Boolean. If false, then don't track line/col/position. +* `strictEntities` - Boolean. If true, only parse [predefined XML + entities](http://www.w3.org/TR/REC-xml/#sec-predefined-ent) + (`&`, `'`, `>`, `<`, and `"`) + +## Methods + +`write` - Write bytes onto the stream. You don't have to do this all at +once. You can keep writing as much as you want. + +`close` - Close the stream. Once closed, no more data may be written until +it is done processing the buffer, which is signaled by the `end` event. + +`resume` - To gracefully handle errors, assign a listener to the `error` +event. Then, when the error is taken care of, you can call `resume` to +continue parsing. Otherwise, the parser will not continue while in an error +state. + +## Members + +At all times, the parser object will have the following members: + +`line`, `column`, `position` - Indications of the position in the XML +document where the parser currently is looking. + +`startTagPosition` - Indicates the position where the current tag starts. + +`closed` - Boolean indicating whether or not the parser can be written to. +If it's `true`, then wait for the `ready` event to write again. + +`strict` - Boolean indicating whether or not the parser is a jerk. + +`opt` - Any options passed into the constructor. + +`tag` - The current tag being dealt with. + +And a bunch of other stuff that you probably shouldn't touch. + +## Events + +All events emit with a single argument. To listen to an event, assign a +function to `on`. Functions get executed in the this-context of +the parser object. The list of supported events are also in the exported +`EVENTS` array. + +When using the stream interface, assign handlers using the EventEmitter +`on` function in the normal fashion. + +`error` - Indication that something bad happened. The error will be hanging +out on `parser.error`, and must be deleted before parsing can continue. By +listening to this event, you can keep an eye on that kind of stuff. Note: +this happens *much* more in strict mode. Argument: instance of `Error`. + +`text` - Text node. Argument: string of text. + +`doctype` - The ``. Argument: +object with `name` and `body` members. Attributes are not parsed, as +processing instructions have implementation dependent semantics. + +`sgmldeclaration` - Random SGML declarations. Stuff like `` +would trigger this kind of event. This is a weird thing to support, so it +might go away at some point. SAX isn't intended to be used to parse SGML, +after all. + +`opentagstart` - Emitted immediately when the tag name is available, +but before any attributes are encountered. Argument: object with a +`name` field and an empty `attributes` set. Note that this is the +same object that will later be emitted in the `opentag` event. + +`opentag` - An opening tag. Argument: object with `name` and `attributes`. +In non-strict mode, tag names are uppercased, unless the `lowercase` +option is set. If the `xmlns` option is set, then it will contain +namespace binding information on the `ns` member, and will have a +`local`, `prefix`, and `uri` member. + +`closetag` - A closing tag. In loose mode, tags are auto-closed if their +parent closes. In strict mode, well-formedness is enforced. Note that +self-closing tags will have `closeTag` emitted immediately after `openTag`. +Argument: tag name. + +`attribute` - An attribute node. Argument: object with `name` and `value`. +In non-strict mode, attribute names are uppercased, unless the `lowercase` +option is set. If the `xmlns` option is set, it will also contains namespace +information. + +`comment` - A comment node. Argument: the string of the comment. + +`opencdata` - The opening tag of a ``) of a `` tags trigger a `"script"` +event, and their contents are not checked for special xml characters. +If you pass `noscript: true`, then this behavior is suppressed. + +## Reporting Problems + +It's best to write a failing test if you find an issue. I will always +accept pull requests with failing tests if they demonstrate intended +behavior, but it is very hard to figure out what issue you're describing +without a test. Writing a test is also the best way for you yourself +to figure out if you really understand the issue you think you have with +sax-js. diff --git a/scripts/metrics/node_modules/sax/lib/sax.js b/scripts/metrics/node_modules/sax/lib/sax.js new file mode 100644 index 00000000000..795d607ef63 --- /dev/null +++ b/scripts/metrics/node_modules/sax/lib/sax.js @@ -0,0 +1,1565 @@ +;(function (sax) { // wrapper for non-node envs + sax.parser = function (strict, opt) { return new SAXParser(strict, opt) } + sax.SAXParser = SAXParser + sax.SAXStream = SAXStream + sax.createStream = createStream + + // When we pass the MAX_BUFFER_LENGTH position, start checking for buffer overruns. + // When we check, schedule the next check for MAX_BUFFER_LENGTH - (max(buffer lengths)), + // since that's the earliest that a buffer overrun could occur. This way, checks are + // as rare as required, but as often as necessary to ensure never crossing this bound. + // Furthermore, buffers are only tested at most once per write(), so passing a very + // large string into write() might have undesirable effects, but this is manageable by + // the caller, so it is assumed to be safe. Thus, a call to write() may, in the extreme + // edge case, result in creating at most one complete copy of the string passed in. + // Set to Infinity to have unlimited buffers. + sax.MAX_BUFFER_LENGTH = 64 * 1024 + + var buffers = [ + 'comment', 'sgmlDecl', 'textNode', 'tagName', 'doctype', + 'procInstName', 'procInstBody', 'entity', 'attribName', + 'attribValue', 'cdata', 'script' + ] + + sax.EVENTS = [ + 'text', + 'processinginstruction', + 'sgmldeclaration', + 'doctype', + 'comment', + 'opentagstart', + 'attribute', + 'opentag', + 'closetag', + 'opencdata', + 'cdata', + 'closecdata', + 'error', + 'end', + 'ready', + 'script', + 'opennamespace', + 'closenamespace' + ] + + function SAXParser (strict, opt) { + if (!(this instanceof SAXParser)) { + return new SAXParser(strict, opt) + } + + var parser = this + clearBuffers(parser) + parser.q = parser.c = '' + parser.bufferCheckPosition = sax.MAX_BUFFER_LENGTH + parser.opt = opt || {} + parser.opt.lowercase = parser.opt.lowercase || parser.opt.lowercasetags + parser.looseCase = parser.opt.lowercase ? 'toLowerCase' : 'toUpperCase' + parser.tags = [] + parser.closed = parser.closedRoot = parser.sawRoot = false + parser.tag = parser.error = null + parser.strict = !!strict + parser.noscript = !!(strict || parser.opt.noscript) + parser.state = S.BEGIN + parser.strictEntities = parser.opt.strictEntities + parser.ENTITIES = parser.strictEntities ? Object.create(sax.XML_ENTITIES) : Object.create(sax.ENTITIES) + parser.attribList = [] + + // namespaces form a prototype chain. + // it always points at the current tag, + // which protos to its parent tag. + if (parser.opt.xmlns) { + parser.ns = Object.create(rootNS) + } + + // mostly just for error reporting + parser.trackPosition = parser.opt.position !== false + if (parser.trackPosition) { + parser.position = parser.line = parser.column = 0 + } + emit(parser, 'onready') + } + + if (!Object.create) { + Object.create = function (o) { + function F () {} + F.prototype = o + var newf = new F() + return newf + } + } + + if (!Object.keys) { + Object.keys = function (o) { + var a = [] + for (var i in o) if (o.hasOwnProperty(i)) a.push(i) + return a + } + } + + function checkBufferLength (parser) { + var maxAllowed = Math.max(sax.MAX_BUFFER_LENGTH, 10) + var maxActual = 0 + for (var i = 0, l = buffers.length; i < l; i++) { + var len = parser[buffers[i]].length + if (len > maxAllowed) { + // Text/cdata nodes can get big, and since they're buffered, + // we can get here under normal conditions. + // Avoid issues by emitting the text node now, + // so at least it won't get any bigger. + switch (buffers[i]) { + case 'textNode': + closeText(parser) + break + + case 'cdata': + emitNode(parser, 'oncdata', parser.cdata) + parser.cdata = '' + break + + case 'script': + emitNode(parser, 'onscript', parser.script) + parser.script = '' + break + + default: + error(parser, 'Max buffer length exceeded: ' + buffers[i]) + } + } + maxActual = Math.max(maxActual, len) + } + // schedule the next check for the earliest possible buffer overrun. + var m = sax.MAX_BUFFER_LENGTH - maxActual + parser.bufferCheckPosition = m + parser.position + } + + function clearBuffers (parser) { + for (var i = 0, l = buffers.length; i < l; i++) { + parser[buffers[i]] = '' + } + } + + function flushBuffers (parser) { + closeText(parser) + if (parser.cdata !== '') { + emitNode(parser, 'oncdata', parser.cdata) + parser.cdata = '' + } + if (parser.script !== '') { + emitNode(parser, 'onscript', parser.script) + parser.script = '' + } + } + + SAXParser.prototype = { + end: function () { end(this) }, + write: write, + resume: function () { this.error = null; return this }, + close: function () { return this.write(null) }, + flush: function () { flushBuffers(this) } + } + + var Stream + try { + Stream = require('stream').Stream + } catch (ex) { + Stream = function () {} + } + + var streamWraps = sax.EVENTS.filter(function (ev) { + return ev !== 'error' && ev !== 'end' + }) + + function createStream (strict, opt) { + return new SAXStream(strict, opt) + } + + function SAXStream (strict, opt) { + if (!(this instanceof SAXStream)) { + return new SAXStream(strict, opt) + } + + Stream.apply(this) + + this._parser = new SAXParser(strict, opt) + this.writable = true + this.readable = true + + var me = this + + this._parser.onend = function () { + me.emit('end') + } + + this._parser.onerror = function (er) { + me.emit('error', er) + + // if didn't throw, then means error was handled. + // go ahead and clear error, so we can write again. + me._parser.error = null + } + + this._decoder = null + + streamWraps.forEach(function (ev) { + Object.defineProperty(me, 'on' + ev, { + get: function () { + return me._parser['on' + ev] + }, + set: function (h) { + if (!h) { + me.removeAllListeners(ev) + me._parser['on' + ev] = h + return h + } + me.on(ev, h) + }, + enumerable: true, + configurable: false + }) + }) + } + + SAXStream.prototype = Object.create(Stream.prototype, { + constructor: { + value: SAXStream + } + }) + + SAXStream.prototype.write = function (data) { + if (typeof Buffer === 'function' && + typeof Buffer.isBuffer === 'function' && + Buffer.isBuffer(data)) { + if (!this._decoder) { + var SD = require('string_decoder').StringDecoder + this._decoder = new SD('utf8') + } + data = this._decoder.write(data) + } + + this._parser.write(data.toString()) + this.emit('data', data) + return true + } + + SAXStream.prototype.end = function (chunk) { + if (chunk && chunk.length) { + this.write(chunk) + } + this._parser.end() + return true + } + + SAXStream.prototype.on = function (ev, handler) { + var me = this + if (!me._parser['on' + ev] && streamWraps.indexOf(ev) !== -1) { + me._parser['on' + ev] = function () { + var args = arguments.length === 1 ? [arguments[0]] : Array.apply(null, arguments) + args.splice(0, 0, ev) + me.emit.apply(me, args) + } + } + + return Stream.prototype.on.call(me, ev, handler) + } + + // this really needs to be replaced with character classes. + // XML allows all manner of ridiculous numbers and digits. + var CDATA = '[CDATA[' + var DOCTYPE = 'DOCTYPE' + var XML_NAMESPACE = 'http://www.w3.org/XML/1998/namespace' + var XMLNS_NAMESPACE = 'http://www.w3.org/2000/xmlns/' + var rootNS = { xml: XML_NAMESPACE, xmlns: XMLNS_NAMESPACE } + + // http://www.w3.org/TR/REC-xml/#NT-NameStartChar + // This implementation works on strings, a single character at a time + // as such, it cannot ever support astral-plane characters (10000-EFFFF) + // without a significant breaking change to either this parser, or the + // JavaScript language. Implementation of an emoji-capable xml parser + // is left as an exercise for the reader. + var nameStart = /[:_A-Za-z\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u02FF\u0370-\u037D\u037F-\u1FFF\u200C-\u200D\u2070-\u218F\u2C00-\u2FEF\u3001-\uD7FF\uF900-\uFDCF\uFDF0-\uFFFD]/ + + var nameBody = /[:_A-Za-z\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u02FF\u0370-\u037D\u037F-\u1FFF\u200C-\u200D\u2070-\u218F\u2C00-\u2FEF\u3001-\uD7FF\uF900-\uFDCF\uFDF0-\uFFFD\u00B7\u0300-\u036F\u203F-\u2040.\d-]/ + + var entityStart = /[#:_A-Za-z\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u02FF\u0370-\u037D\u037F-\u1FFF\u200C-\u200D\u2070-\u218F\u2C00-\u2FEF\u3001-\uD7FF\uF900-\uFDCF\uFDF0-\uFFFD]/ + var entityBody = /[#:_A-Za-z\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u02FF\u0370-\u037D\u037F-\u1FFF\u200C-\u200D\u2070-\u218F\u2C00-\u2FEF\u3001-\uD7FF\uF900-\uFDCF\uFDF0-\uFFFD\u00B7\u0300-\u036F\u203F-\u2040.\d-]/ + + function isWhitespace (c) { + return c === ' ' || c === '\n' || c === '\r' || c === '\t' + } + + function isQuote (c) { + return c === '"' || c === '\'' + } + + function isAttribEnd (c) { + return c === '>' || isWhitespace(c) + } + + function isMatch (regex, c) { + return regex.test(c) + } + + function notMatch (regex, c) { + return !isMatch(regex, c) + } + + var S = 0 + sax.STATE = { + BEGIN: S++, // leading byte order mark or whitespace + BEGIN_WHITESPACE: S++, // leading whitespace + TEXT: S++, // general stuff + TEXT_ENTITY: S++, // & and such. + OPEN_WAKA: S++, // < + SGML_DECL: S++, // + SCRIPT: S++, //