Skip to content
This repository has been archived by the owner on Nov 6, 2020. It is now read-only.

Commit

Permalink
block_header can fail so return Result (#8581)
Browse files Browse the repository at this point in the history
* block_header can fail so return Result

* Restore previous return type based on feedback

* Fix failing doc tests running on non-code
  • Loading branch information
dvdplm authored and ascjones committed May 14, 2018
1 parent 9623840 commit 78edfe5
Show file tree
Hide file tree
Showing 3 changed files with 8 additions and 9 deletions.
9 changes: 4 additions & 5 deletions rpc/src/v1/impls/light/parity.rs
Original file line number Diff line number Diff line change
Expand Up @@ -395,9 +395,9 @@ impl Parity for ParityClient {

let engine = self.light_dispatch.client.engine().clone();
let from_encoded = move |encoded: encoded::Header| {
let header = encoded.decode().expect("decoding error"); // REVIEW: not sure what to do here; what is a decent return value for the error case here?
let header = encoded.decode().map_err(errors::decode)?;
let extra_info = engine.extra_info(&header);
RichHeader {
Ok(RichHeader {
inner: Header {
hash: Some(header.hash().into()),
size: Some(encoded.rlp().as_raw().len().into()),
Expand All @@ -418,9 +418,8 @@ impl Parity for ParityClient {
extra_data: Bytes::new(header.extra_data().clone()),
},
extra_info: extra_info,
}
})
};

// Note: Here we treat `Pending` as `Latest`.
// Since light clients don't produce pending blocks
// (they don't have state) we can safely fallback to `Latest`.
Expand All @@ -430,7 +429,7 @@ impl Parity for ParityClient {
BlockNumber::Latest | BlockNumber::Pending => BlockId::Latest,
};

Box::new(self.fetcher().header(id).map(from_encoded))
Box::new(self.fetcher().header(id).and_then(from_encoded))
}

fn ipfs_cid(&self, content: Bytes) -> Result<String> {
Expand Down
6 changes: 3 additions & 3 deletions util/journaldb/src/earlymergedb.rs
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ enum RemoveFrom {
/// the removals actually take effect.
///
/// journal format:
/// ```
/// ```text
/// [era, 0] => [ id, [insert_0, ...], [remove_0, ...] ]
/// [era, 1] => [ id, [insert_0, ...], [remove_0, ...] ]
/// [era, n] => [ ... ]
Expand All @@ -76,7 +76,7 @@ enum RemoveFrom {
/// which includes an original key, if any.
///
/// The semantics of the `counter` are:
/// ```
/// ```text
/// insert key k:
/// counter already contains k: count += 1
/// counter doesn't contain k:
Expand All @@ -92,7 +92,7 @@ enum RemoveFrom {
///
/// Practically, this means that for each commit block turning from recent to ancient we do the
/// following:
/// ```
/// ```text
/// is_canonical:
/// inserts: Ignored (left alone in the backing database).
/// deletes: Enacted; however, recent history queue is checked for ongoing references. This is
Expand Down
2 changes: 1 addition & 1 deletion util/journaldb/src/refcounteddb.rs
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ use util::{DatabaseKey, DatabaseValueView, DatabaseValueRef};
/// the removals actually take effect.
///
/// journal format:
/// ```
/// ```text
/// [era, 0] => [ id, [insert_0, ...], [remove_0, ...] ]
/// [era, 1] => [ id, [insert_0, ...], [remove_0, ...] ]
/// [era, n] => [ ... ]
Expand Down

0 comments on commit 78edfe5

Please sign in to comment.