Skip to content
This repository has been archived by the owner on Nov 15, 2023. It is now read-only.

Commit

Permalink
Merge remote-tracking branch 'origin/master' into Add-Event-to-Pallet…
Browse files Browse the repository at this point in the history
…-Asset-Tx-Payment
  • Loading branch information
parity-processbot committed Jul 29, 2022
2 parents fb7efed + 47c19a5 commit e438503
Show file tree
Hide file tree
Showing 22 changed files with 365 additions and 144 deletions.
4 changes: 4 additions & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

5 changes: 3 additions & 2 deletions bin/node-template/runtime/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -272,10 +272,11 @@ impl pallet_template::Config for Runtime {

// Create the runtime by composing the FRAME pallets that were previously configured.
construct_runtime!(
pub enum Runtime where
pub struct Runtime
where
Block = Block,
NodeBlock = opaque::Block,
UncheckedExtrinsic = UncheckedExtrinsic
UncheckedExtrinsic = UncheckedExtrinsic,
{
System: frame_system,
RandomnessCollectiveFlip: pallet_randomness_collective_flip,
Expand Down
97 changes: 56 additions & 41 deletions client/allocator/src/freeing_bump.rs
Original file line number Diff line number Diff line change
Expand Up @@ -314,27 +314,50 @@ impl IndexMut<Order> for FreeLists {
}
}

/// Memory allocation stats gathered during the lifetime of the allocator.
#[derive(Clone, Debug, Default)]
#[non_exhaustive]
pub struct AllocationStats {
/// The current number of bytes allocated.
///
/// This represents how many bytes are allocated *right now*.
pub bytes_allocated: u32,

/// The peak number of bytes ever allocated.
///
/// This is the maximum the `bytes_allocated` ever reached.
pub bytes_allocated_peak: u32,

/// The sum of every allocation ever made.
///
/// This increases every time a new allocation is made.
pub bytes_allocated_sum: u32,

/// The amount of address space (in bytes) used by the allocator.
///
/// This is calculated as the difference between the allocator's bumper
/// and the heap base.
///
/// Currently the bumper's only ever incremented, so this is simultaneously
/// the current value as well as the peak value.
pub address_space_used: u32,
}

/// An implementation of freeing bump allocator.
///
/// Refer to the module-level documentation for further details.
pub struct FreeingBumpHeapAllocator {
original_heap_base: u32,
bumper: u32,
free_lists: FreeLists,
total_size: u32,
poisoned: bool,
max_total_size: u32,
max_bumper: u32,
last_observed_memory_size: u32,
stats: AllocationStats,
}

impl Drop for FreeingBumpHeapAllocator {
fn drop(&mut self) {
log::debug!(
target: LOG_TARGET,
"allocator being destroyed, max_total_size {}, max_bumper {}",
self.max_total_size,
self.max_bumper,
)
log::debug!(target: LOG_TARGET, "allocator dropped: {:?}", self.stats)
}
}

Expand All @@ -348,13 +371,12 @@ impl FreeingBumpHeapAllocator {
let aligned_heap_base = (heap_base + ALIGNMENT - 1) / ALIGNMENT * ALIGNMENT;

FreeingBumpHeapAllocator {
original_heap_base: aligned_heap_base,
bumper: aligned_heap_base,
free_lists: FreeLists::new(),
total_size: 0,
poisoned: false,
max_total_size: 0,
max_bumper: aligned_heap_base,
last_observed_memory_size: 0,
stats: AllocationStats::default(),
}
}

Expand Down Expand Up @@ -412,22 +434,13 @@ impl FreeingBumpHeapAllocator {
// Write the order in the occupied header.
Header::Occupied(order).write_into(mem, header_ptr)?;

self.total_size += order.size() + HEADER_SIZE;

log::trace!(
target: LOG_TARGET,
"after allocation, total_size = {}, bumper = {}.",
self.total_size,
self.bumper,
);
self.stats.bytes_allocated += order.size() + HEADER_SIZE;
self.stats.bytes_allocated_sum += order.size() + HEADER_SIZE;
self.stats.bytes_allocated_peak =
std::cmp::max(self.stats.bytes_allocated_peak, self.stats.bytes_allocated);
self.stats.address_space_used = self.bumper - self.original_heap_base;

// update trackers if needed.
if self.total_size > self.max_total_size {
self.max_total_size = self.total_size;
}
if self.bumper > self.max_bumper {
self.max_bumper = self.bumper;
}
log::trace!(target: LOG_TARGET, "after allocation: {:?}", self.stats);

bomb.disarm();
Ok(Pointer::new(header_ptr + HEADER_SIZE))
Expand Down Expand Up @@ -469,21 +482,23 @@ impl FreeingBumpHeapAllocator {
let prev_head = self.free_lists.replace(order, Link::Ptr(header_ptr));
Header::Free(prev_head).write_into(mem, header_ptr)?;

// Do the total_size book keeping.
self.total_size = self
.total_size
self.stats.bytes_allocated = self
.stats
.bytes_allocated
.checked_sub(order.size() + HEADER_SIZE)
.ok_or_else(|| error("Unable to subtract from total heap size without overflow"))?;
log::trace!(
"after deallocation, total_size = {}, bumper = {}.",
self.total_size,
self.bumper,
);
.ok_or_else(|| error("underflow of the currently allocated bytes count"))?;

log::trace!("after deallocation: {:?}", self.stats);

bomb.disarm();
Ok(())
}

/// Returns the allocation stats for this allocator.
pub fn stats(&self) -> AllocationStats {
self.stats.clone()
}

/// Increases the `bumper` by `size`.
///
/// Returns the `bumper` from before the increase. Returns an `Error::AllocatorOutOfSpace` if
Expand Down Expand Up @@ -791,13 +806,13 @@ mod tests {
let ptr1 = heap.allocate(&mut mem[..], 32).unwrap();
assert_eq!(ptr1, to_pointer(HEADER_SIZE));
heap.deallocate(&mut mem[..], ptr1).expect("failed freeing ptr1");
assert_eq!(heap.total_size, 0);
assert_eq!(heap.stats.bytes_allocated, 0);
assert_eq!(heap.bumper, 40);

let ptr2 = heap.allocate(&mut mem[..], 16).unwrap();
assert_eq!(ptr2, to_pointer(48));
heap.deallocate(&mut mem[..], ptr2).expect("failed freeing ptr2");
assert_eq!(heap.total_size, 0);
assert_eq!(heap.stats.bytes_allocated, 0);
assert_eq!(heap.bumper, 64);

// when
Expand Down Expand Up @@ -825,7 +840,7 @@ mod tests {
heap.allocate(&mut mem[..], 9).unwrap();

// then
assert_eq!(heap.total_size, HEADER_SIZE + 16);
assert_eq!(heap.stats.bytes_allocated, HEADER_SIZE + 16);
}

#[test]
Expand All @@ -840,7 +855,7 @@ mod tests {
heap.deallocate(&mut mem[..], ptr).unwrap();

// then
assert_eq!(heap.total_size, 0);
assert_eq!(heap.stats.bytes_allocated, 0);
}

#[test]
Expand All @@ -856,7 +871,7 @@ mod tests {
}

// then
assert_eq!(heap.total_size, 0);
assert_eq!(heap.stats.bytes_allocated, 0);
}

#[test]
Expand Down
2 changes: 1 addition & 1 deletion client/allocator/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -26,4 +26,4 @@ mod error;
mod freeing_bump;

pub use error::Error;
pub use freeing_bump::FreeingBumpHeapAllocator;
pub use freeing_bump::{AllocationStats, FreeingBumpHeapAllocator};
117 changes: 69 additions & 48 deletions client/consensus/slots/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ use log::{debug, info, warn};
use sc_consensus::{BlockImport, JustificationSyncLink};
use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_DEBUG, CONSENSUS_INFO, CONSENSUS_WARN};
use sp_arithmetic::traits::BaseArithmetic;
use sp_consensus::{CanAuthorWith, Proposer, SelectChain, SyncOracle};
use sp_consensus::{CanAuthorWith, Proposal, Proposer, SelectChain, SyncOracle};
use sp_consensus_slots::{Slot, SlotDuration};
use sp_inherents::CreateInherentDataProviders;
use sp_runtime::{
Expand Down Expand Up @@ -103,7 +103,7 @@ pub trait SimpleSlotWorker<B: BlockT> {
type Proposer: Proposer<B> + Send;

/// Data associated with a slot claim.
type Claim: Send + 'static;
type Claim: Send + Sync + 'static;

/// Epoch data necessary for authoring.
type EpochData: Send + Sync + 'static;
Expand Down Expand Up @@ -183,6 +183,70 @@ pub trait SimpleSlotWorker<B: BlockT> {
/// Remaining duration for proposing.
fn proposing_remaining_duration(&self, slot_info: &SlotInfo<B>) -> Duration;

/// Propose a block by `Proposer`.
async fn propose(
&mut self,
proposer: Self::Proposer,
claim: &Self::Claim,
slot_info: SlotInfo<B>,
proposing_remaining: Delay,
) -> Option<
Proposal<
B,
<Self::Proposer as Proposer<B>>::Transaction,
<Self::Proposer as Proposer<B>>::Proof,
>,
> {
let slot = slot_info.slot;
let telemetry = self.telemetry();
let logging_target = self.logging_target();
let proposing_remaining_duration = self.proposing_remaining_duration(&slot_info);
let logs = self.pre_digest_data(slot, claim);

// deadline our production to 98% of the total time left for proposing. As we deadline
// the proposing below to the same total time left, the 2% margin should be enough for
// the result to be returned.
let proposing = proposer
.propose(
slot_info.inherent_data,
sp_runtime::generic::Digest { logs },
proposing_remaining_duration.mul_f32(0.98),
None,
)
.map_err(|e| sp_consensus::Error::ClientImport(e.to_string()));

let proposal = match futures::future::select(proposing, proposing_remaining).await {
Either::Left((Ok(p), _)) => p,
Either::Left((Err(err), _)) => {
warn!(target: logging_target, "Proposing failed: {}", err);

return None
},
Either::Right(_) => {
info!(
target: logging_target,
"⌛️ Discarding proposal for slot {}; block production took too long", slot,
);
// If the node was compiled with debug, tell the user to use release optimizations.
#[cfg(build_type = "debug")]
info!(
target: logging_target,
"👉 Recompile your node in `--release` mode to mitigate this problem.",
);
telemetry!(
telemetry;
CONSENSUS_INFO;
"slots.discarding_proposal_took_too_long";
"slot" => *slot,
);

return None
},
};

Some(proposal)
}

/// Implements [`SlotWorker::on_slot`].
async fn on_slot(
&mut self,
Expand Down Expand Up @@ -256,10 +320,8 @@ pub trait SimpleSlotWorker<B: BlockT> {
}

debug!(
target: self.logging_target(),
"Starting authorship at slot {}; timestamp = {}",
slot,
*timestamp,
target: logging_target,
"Starting authorship at slot {}; timestamp = {}", slot, *timestamp,
);

telemetry!(
Expand Down Expand Up @@ -287,48 +349,7 @@ pub trait SimpleSlotWorker<B: BlockT> {
},
};

let logs = self.pre_digest_data(slot, &claim);

// deadline our production to 98% of the total time left for proposing. As we deadline
// the proposing below to the same total time left, the 2% margin should be enough for
// the result to be returned.
let proposing = proposer
.propose(
slot_info.inherent_data,
sp_runtime::generic::Digest { logs },
proposing_remaining_duration.mul_f32(0.98),
None,
)
.map_err(|e| sp_consensus::Error::ClientImport(e.to_string()));

let proposal = match futures::future::select(proposing, proposing_remaining).await {
Either::Left((Ok(p), _)) => p,
Either::Left((Err(err), _)) => {
warn!(target: logging_target, "Proposing failed: {}", err);

return None
},
Either::Right(_) => {
info!(
target: logging_target,
"⌛️ Discarding proposal for slot {}; block production took too long", slot,
);
// If the node was compiled with debug, tell the user to use release optimizations.
#[cfg(build_type = "debug")]
info!(
target: logging_target,
"👉 Recompile your node in `--release` mode to mitigate this problem.",
);
telemetry!(
telemetry;
CONSENSUS_INFO;
"slots.discarding_proposal_took_too_long";
"slot" => *slot,
);

return None
},
};
let proposal = self.propose(proposer, &claim, slot_info, proposing_remaining).await?;

let (block, storage_proof) = (proposal.block, proposal.proof);
let (header, body) = block.deconstruct();
Expand Down
Loading

0 comments on commit e438503

Please sign in to comment.