Skip to content

Commit

Permalink
Fix typos
Browse files Browse the repository at this point in the history
  • Loading branch information
GoodDaisy committed Dec 14, 2023
1 parent 8f848b7 commit d65756f
Show file tree
Hide file tree
Showing 58 changed files with 108 additions and 108 deletions.
2 changes: 1 addition & 1 deletion accounts-db/src/account_info.rs
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ const CACHED_OFFSET: OffsetReduced = (1 << (OffsetReduced::BITS - 1)) - 1;
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, Eq, PartialEq)]
pub struct PackedOffsetAndFlags {
/// this provides 2^31 bits, which when multipled by 8 (sizeof(u64)) = 16G, which is the maximum size of an append vec
/// this provides 2^31 bits, which when multiplied by 8 (sizeof(u64)) = 16G, which is the maximum size of an append vec
offset_reduced: B31,
/// use 1 bit to specify that the entry is zero lamport
is_zero_lamport: bool,
Expand Down
2 changes: 1 addition & 1 deletion accounts-db/src/accounts.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2243,7 +2243,7 @@ mod tests {
);

/* This test assumes pubkey0 < pubkey1 < pubkey2.
* But the keys created with new_unique() does not gurantee this
* But the keys created with new_unique() does not guarantee this
* order because of the endianness. new_unique() calls add 1 at each
* key generaration as the little endian integer. A pubkey stores its
* value in a 32-byte array bytes, and its eq-partial trait considers
Expand Down
4 changes: 2 additions & 2 deletions accounts-db/src/accounts_db.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6005,7 +6005,7 @@ impl AccountsDb {
// with this function call. This means, if we get into this case, we can be
// confident that the entire state for this slot has been flushed to the storage
// already.
let mut scan_storages_elasped = Measure::start("scan_storages_elasped");
let mut scan_storages_elapsed = Measure::start("scan_storages_elapsed");
type ScanResult = ScanStorageResult<Pubkey, Arc<Mutex<HashSet<(Pubkey, Slot)>>>>;
let scan_result: ScanResult = self.scan_account_storage(
remove_slot,
Expand Down Expand Up @@ -15132,7 +15132,7 @@ pub mod tests {
return;
}
// Meddle load_limit to cover all branches of implementation.
// There should absolutely no behaviorial difference; the load_limit triggered
// There should absolutely no behavioral difference; the load_limit triggered
// slow branch should only affect the performance.
// Ordering::Relaxed is ok because of no data dependencies; the modified field is
// completely free-standing cfg(test) control-flow knob.
Expand Down
2 changes: 1 addition & 1 deletion accounts-db/src/accounts_hash.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1230,7 +1230,7 @@ pub enum ZeroLamportAccounts {
pub struct AccountHash(pub Hash);

// Ensure the newtype wrapper never changes size from the underlying Hash
// This also ensures there are no padding bytes, which is requried to safely implement Pod
// This also ensures there are no padding bytes, which is required to safely implement Pod
const _: () = assert!(std::mem::size_of::<AccountHash>() == std::mem::size_of::<Hash>());

/// Hash of accounts
Expand Down
2 changes: 1 addition & 1 deletion accounts-db/src/tiered_storage/byte_block.rs
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ impl ByteBlockWriter {

/// Write all the Some fields of the specified AccountMetaOptionalFields.
///
/// Note that the existance of each optional field is stored separately in
/// Note that the existence of each optional field is stored separately in
/// AccountMetaFlags.
pub fn write_optional_fields(
&mut self,
Expand Down
2 changes: 1 addition & 1 deletion bench-tps/src/bench.rs
Original file line number Diff line number Diff line change
Expand Up @@ -238,7 +238,7 @@ where
// Move on to next chunk
self.chunk_index = (self.chunk_index + 1) % self.account_chunks.source.len();

// Switch directions after transfering for each "chunk"
// Switch directions after transferring for each "chunk"
if self.chunk_index == 0 {
self.reclaim_lamports_back_to_source_account =
!self.reclaim_lamports_back_to_source_account;
Expand Down
2 changes: 1 addition & 1 deletion bucket_map/src/restart.rs
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ impl RestartableBucket {
bucket.random = random;
}
}
/// retreive the file_name and random that were used prior to the current restart.
/// retrieve the file_name and random that were used prior to the current restart.
/// This was written into the restart file on the prior run by `set_file`.
pub(crate) fn get(&self) -> Option<(u128, u64)> {
self.restart.as_ref().map(|restart| {
Expand Down
4 changes: 2 additions & 2 deletions cli/src/cluster_query.rs
Original file line number Diff line number Diff line change
Expand Up @@ -755,7 +755,7 @@ pub fn process_catchup(
if node_json_rpc_url.is_some() && node_json_rpc_url != gussed_default {
// go to new line to leave this message on console
println!(
"Prefering explicitly given rpc ({}) as us, although --our-localhost is given\n",
"Preferring explicitly given rpc ({}) as us, although --our-localhost is given\n",
node_json_rpc_url.as_ref().unwrap()
);
} else {
Expand All @@ -771,7 +771,7 @@ pub fn process_catchup(
(if node_pubkey.is_some() && node_pubkey != guessed_default {
// go to new line to leave this message on console
println!(
"Prefering explicitly given node pubkey ({}) as us, although --our-localhost \
"Preferring explicitly given node pubkey ({}) as us, although --our-localhost \
is given\n",
node_pubkey.unwrap()
);
Expand Down
2 changes: 1 addition & 1 deletion cli/src/spend_utils.rs
Original file line number Diff line number Diff line change
Expand Up @@ -161,7 +161,7 @@ where
dummy_message.recent_blockhash = *blockhash;
get_fee_for_messages(rpc_client, &[&dummy_message])?
}
None => 0, // Offline, cannot calulate fee
None => 0, // Offline, cannot calculate fee
};

match amount {
Expand Down
2 changes: 1 addition & 1 deletion client/src/connection_cache.rs
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ impl ConnectionCache {
Self::new_with_client_options(name, connection_pool_size, None, None, None)
}

/// Create a quic conneciton_cache with more client options
/// Create a quic connection_cache with more client options
pub fn new_with_client_options(
name: &'static str,
connection_pool_size: usize,
Expand Down
8 changes: 4 additions & 4 deletions core/src/banking_stage/forwarder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,7 @@ impl Forwarder {
slot_metrics_tracker.increment_forwardable_batches_count(1);

let batched_forwardable_packets_count = forward_batch.len();
let (_forward_result, sucessful_forwarded_packets_count, leader_pubkey) = self
let (_forward_result, successful_forwarded_packets_count, leader_pubkey) = self
.forward_buffered_packets(
&forward_option,
forward_batch.get_forwardable_packets(),
Expand All @@ -114,7 +114,7 @@ impl Forwarder {
);
}
let failed_forwarded_packets_count = batched_forwardable_packets_count
.saturating_sub(sucessful_forwarded_packets_count);
.saturating_sub(successful_forwarded_packets_count);

if failed_forwarded_packets_count > 0 {
slot_metrics_tracker.increment_failed_forwarded_packets_count(
Expand All @@ -123,9 +123,9 @@ impl Forwarder {
slot_metrics_tracker.increment_packet_batch_forward_failure_count(1);
}

if sucessful_forwarded_packets_count > 0 {
if successful_forwarded_packets_count > 0 {
slot_metrics_tracker.increment_successful_forwarded_packets_count(
sucessful_forwarded_packets_count as u64,
successful_forwarded_packets_count as u64,
);
}
});
Expand Down
2 changes: 1 addition & 1 deletion core/src/banking_stage/latest_unprocessed_votes.rs
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ pub enum VoteSource {
Tpu,
}

/// Holds deserialized vote messages as well as their source, foward status and slot
/// Holds deserialized vote messages as well as their source, forward status and slot
#[derive(Debug, Clone)]
pub struct LatestValidatorVotePacket {
vote_source: VoteSource,
Expand Down
4 changes: 2 additions & 2 deletions core/src/banking_stage/leader_slot_metrics.rs
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ pub(crate) struct ProcessTransactionsSummary {
// Total amount of time spent running the cost model
pub cost_model_us: u64,

// Breakdown of time spent executing and comitting transactions
// Breakdown of time spent executing and committing transactions
pub execute_and_commit_timings: LeaderExecuteAndCommitTimings,

// Breakdown of all the transaction errors from transactions passed for execution
Expand Down Expand Up @@ -104,7 +104,7 @@ struct LeaderSlotPacketCountMetrics {

// total number of transactions that were executed, but failed to be committed into the Poh stream because
// the block ended. Some of these may be already counted in `nonretryable_errored_transactions_count` if they
// then hit the age limit after failing to be comitted.
// then hit the age limit after failing to be committed.
executed_transactions_failed_commit_count: u64,

// total number of transactions that were excluded from the block because there were concurrent write locks active.
Expand Down
14 changes: 7 additions & 7 deletions core/src/banking_stage/qos_service.rs
Original file line number Diff line number Diff line change
Expand Up @@ -732,7 +732,7 @@ mod tests {
bank.read_cost_tracker().unwrap().block_cost()
);
// all transactions are committed with actual units more than estimated
let commited_status: Vec<CommitTransactionDetails> = qos_cost_results
let committed_status: Vec<CommitTransactionDetails> = qos_cost_results
.iter()
.map(|tx_cost| CommitTransactionDetails::Committed {
compute_units: tx_cost.as_ref().unwrap().bpf_execution_cost()
Expand All @@ -742,7 +742,7 @@ mod tests {
let final_txs_cost = total_txs_cost + execute_units_adjustment * transaction_count;

// All transactions are committed, no costs should be removed
QosService::remove_costs(qos_cost_results.iter(), Some(&commited_status), &bank);
QosService::remove_costs(qos_cost_results.iter(), Some(&committed_status), &bank);
assert_eq!(
total_txs_cost,
bank.read_cost_tracker().unwrap().block_cost()
Expand All @@ -752,7 +752,7 @@ mod tests {
bank.read_cost_tracker().unwrap().transaction_count()
);

QosService::update_costs(qos_cost_results.iter(), Some(&commited_status), &bank);
QosService::update_costs(qos_cost_results.iter(), Some(&committed_status), &bank);
assert_eq!(
final_txs_cost,
bank.read_cost_tracker().unwrap().block_cost()
Expand Down Expand Up @@ -835,7 +835,7 @@ mod tests {
.collect();
let execute_units_adjustment = 10u64;

// assert only commited tx_costs are applied cost_tracker
// assert only committed tx_costs are applied cost_tracker
{
let qos_service = QosService::new(1);
let txs_costs = qos_service.compute_transaction_costs(
Expand All @@ -854,7 +854,7 @@ mod tests {
bank.read_cost_tracker().unwrap().block_cost()
);
// Half of transactions are not committed, the rest with cost adjustment
let commited_status: Vec<CommitTransactionDetails> = qos_cost_results
let committed_status: Vec<CommitTransactionDetails> = qos_cost_results
.iter()
.enumerate()
.map(|(n, tx_cost)| {
Expand All @@ -869,8 +869,8 @@ mod tests {
})
.collect();

QosService::remove_costs(qos_cost_results.iter(), Some(&commited_status), &bank);
QosService::update_costs(qos_cost_results.iter(), Some(&commited_status), &bank);
QosService::remove_costs(qos_cost_results.iter(), Some(&committed_status), &bank);
QosService::update_costs(qos_cost_results.iter(), Some(&committed_status), &bank);

// assert the final block cost
let mut expected_final_txs_count = 0u64;
Expand Down
4 changes: 2 additions & 2 deletions core/src/consensus/heaviest_subtree_fork_choice.rs
Original file line number Diff line number Diff line change
Expand Up @@ -585,7 +585,7 @@ impl HeaviestSubtreeForkChoice {
let mut update_operations: UpdateOperations = BTreeMap::new();
// Insert aggregate operations up to the root
self.insert_aggregate_operations(&mut update_operations, *slot_hash_key);
// Remove child link so that this slot cannot be choosen as best or deepest
// Remove child link so that this slot cannot be chosen as best or deepest
assert!(self
.fork_infos
.get_mut(&parent)
Expand Down Expand Up @@ -1308,7 +1308,7 @@ impl ForkChoice for HeaviestSubtreeForkChoice {
// be for a slot that we currently do not have in our bank forks, so we
// return None.
//
// We are guarenteed that we will eventually repair a duplicate confirmed version
// We are guaranteed that we will eventually repair a duplicate confirmed version
// of this slot because the state machine will never dump a slot unless it has
// observed a duplicate confirmed version of the slot.
//
Expand Down
2 changes: 1 addition & 1 deletion core/src/repair/duplicate_repair_status.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1123,7 +1123,7 @@ pub mod tests {
let request_slot = 100;
let mut test_setup = setup_add_response_test_pruned(request_slot, 10);

// Insert all the correct ancestory
// Insert all the correct ancestry
let tree = test_setup
.correct_ancestors_response
.iter()
Expand Down
10 changes: 5 additions & 5 deletions core/src/repair/repair_weight.rs
Original file line number Diff line number Diff line change
Expand Up @@ -338,7 +338,7 @@ impl RepairWeight {
}
Some(TreeRoot::PrunedRoot(subtree_root)) => {
// Even if these orphaned slots were previously pruned, they should be added back to
// `self.trees` as we are no longer sure of their ancestory.
// `self.trees` as we are no longer sure of their ancestry.
// After they are repaired there is a chance that they are now part of the rooted path.
// This is possible for a duplicate slot with multiple ancestors, if the
// version we had pruned before had the wrong ancestor, and the correct version is
Expand Down Expand Up @@ -892,7 +892,7 @@ impl RepairWeight {
);
}

/// Finds any ancestors avaiable from `blockstore` for `slot`.
/// Finds any ancestors available from `blockstore` for `slot`.
/// Ancestor search is stopped when finding one that chains to any
/// tree in `self.trees` or `self.pruned_trees` or if the ancestor is < self.root.
///
Expand Down Expand Up @@ -2201,21 +2201,21 @@ mod test {
let (blockstore, _, mut repair_weight) = setup_orphan_repair_weight();

// Ancestor of slot 4 is slot 2, with an existing subtree rooted at 0
// because there wass a vote for a descendant
// because there was a vote for a descendant
assert_eq!(
repair_weight.find_ancestor_subtree_of_slot(&blockstore, 4),
(VecDeque::from([2]), Some(TreeRoot::Root(0)))
);

// Ancestors of 5 are [1, 3], with an existing subtree rooted at 0
// because there wass a vote for a descendant
// because there was a vote for a descendant
assert_eq!(
repair_weight.find_ancestor_subtree_of_slot(&blockstore, 5),
(VecDeque::from([1, 3]), Some(TreeRoot::Root(0)))
);

// Ancestors of slot 23 are [20, 22], with an existing subtree of 20
// because there wass a vote for 20
// because there was a vote for 20
assert_eq!(
repair_weight.find_ancestor_subtree_of_slot(&blockstore, 23),
(VecDeque::from([20, 22]), Some(TreeRoot::Root(20)))
Expand Down
2 changes: 1 addition & 1 deletion core/src/repair/serve_repair.rs
Original file line number Diff line number Diff line change
Expand Up @@ -965,7 +965,7 @@ impl ServeRepair {
stats.dropped_requests_outbound_bandwidth += 1;
continue;
}
// Bypass ping/pong check for requests comming from QUIC endpoint.
// Bypass ping/pong check for requests coming from QUIC endpoint.
if !matches!(&request, RepairProtocol::Pong(_)) && response_sender.is_none() {
let (check, ping_pkt) =
Self::check_ping_cache(ping_cache, &request, &from_addr, &identity_keypair);
Expand Down
2 changes: 1 addition & 1 deletion core/tests/snapshots.rs
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ struct SnapshotTestConfig {
full_snapshot_archives_dir: TempDir,
bank_snapshots_dir: TempDir,
accounts_dir: PathBuf,
// as the underscore prefix indicates, this isn't explictly used; but it's needed to keep
// as the underscore prefix indicates, this isn't explicitly used; but it's needed to keep
// TempDir::drop from running to retain that dir for the duration of test
_accounts_tmp_dir: TempDir,
}
Expand Down
4 changes: 2 additions & 2 deletions gossip/src/cluster_info.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1218,7 +1218,7 @@ impl ClusterInfo {
}

/// Returns epoch-slots inserted since the given cursor.
/// Excludes entries from nodes with unkown or different shred version.
/// Excludes entries from nodes with unknown or different shred version.
pub fn get_epoch_slots(&self, cursor: &mut Cursor) -> Vec<EpochSlots> {
let self_shred_version = Some(self.my_shred_version());
let gossip_crds = self.gossip.crds.read().unwrap();
Expand Down Expand Up @@ -1752,7 +1752,7 @@ impl ClusterInfo {
match gossip_crds.trim(cap, &keep, stakes, timestamp()) {
Err(err) => {
self.stats.trim_crds_table_failed.add_relaxed(1);
// TODO: Stakes are comming from the root-bank. Debug why/when
// TODO: Stakes are coming from the root-bank. Debug why/when
// they are empty/zero.
debug!("crds table trim failed: {:?}", err);
}
Expand Down
2 changes: 1 addition & 1 deletion gossip/src/contact_info.rs
Original file line number Diff line number Diff line change
Expand Up @@ -350,7 +350,7 @@ impl ContactInfo {
}

// Removes the IP address at the given index if
// no socket entry refrences that index.
// no socket entry references that index.
fn maybe_remove_addr(&mut self, index: u8) {
if !self.sockets.iter().any(|entry| entry.index == index) {
self.addrs.remove(usize::from(index));
Expand Down
2 changes: 1 addition & 1 deletion gossip/src/crds_value.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1066,7 +1066,7 @@ mod test {
assert!(!other.check_duplicate(&node_crds));
assert_eq!(node.overrides(&other_crds), None);
assert_eq!(other.overrides(&node_crds), None);
// Differnt crds value is not a duplicate.
// Different crds value is not a duplicate.
let other = LegacyContactInfo::new_rand(&mut rng, Some(pubkey));
let other = CrdsValue::new_unsigned(CrdsData::LegacyContactInfo(other));
assert!(!node.check_duplicate(&other));
Expand Down
8 changes: 4 additions & 4 deletions gossip/tests/crds_gossip.rs
Original file line number Diff line number Diff line change
Expand Up @@ -355,7 +355,7 @@ fn network_run_push(
)
})
.collect();
let transfered: Vec<_> = requests
let transferred: Vec<_> = requests
.into_par_iter()
.map(|(from, push_messages)| {
let mut bytes: usize = 0;
Expand Down Expand Up @@ -415,7 +415,7 @@ fn network_run_push(
})
.collect();

for (b, d, m, p) in transfered {
for (b, d, m, p) in transferred {
bytes += b;
delivered += d;
num_msgs += m;
Expand Down Expand Up @@ -520,7 +520,7 @@ fn network_run_pull(
})
.collect()
};
let transfered: Vec<_> = requests
let transferred: Vec<_> = requests
.into_iter()
.map(|(to, filters, caller_info)| {
let mut bytes: usize = 0;
Expand Down Expand Up @@ -585,7 +585,7 @@ fn network_run_pull(
(bytes, msgs, overhead)
})
.collect();
for (b, m, o) in transfered {
for (b, m, o) in transferred {
bytes += b;
msgs += m;
overhead += o;
Expand Down
Loading

0 comments on commit d65756f

Please sign in to comment.