Skip to content
This repository has been archived by the owner on Nov 15, 2023. It is now read-only.

Update to latest substrate-master and polkadot v0.3 #195

Merged
merged 53 commits into from
Mar 28, 2019
Merged
Show file tree
Hide file tree
Changes from 50 commits
Commits
Show all changes
53 commits
Select commit Hold shift + click to select a range
265add0
Rebuild runtime
gavofyork Dec 22, 2018
e589358
Remove invalid value from chainspec (#68)
gavofyork Jan 9, 2019
2024df8
service: use grandpa block import for locally sealed aura blocks (#85)
andresilva Jan 16, 2019
6112021
bump version to v0.3.1
andresilva Jan 17, 2019
29020a4
Merge branch 'v0.3' of github.com:paritytech/polkadot into v0.3
gavofyork Jan 18, 2019
fada3aa
Merge branch 'master' into v0.3
gavofyork Jan 18, 2019
dde0b5e
Update lock file.
gavofyork Jan 18, 2019
a35186d
limit number of transactions when building blocks (#91)
rphmeier Jan 18, 2019
4c7fe32
Update to latest Substrate
gavofyork Jan 18, 2019
8e0b074
Bump to 0.3.2
gavofyork Jan 18, 2019
b328153
Actually bump.
gavofyork Jan 18, 2019
b28ff9e
v0.3.2 (#98)
rphmeier Jan 21, 2019
a6f647a
point to alexander-backports of substrate
rphmeier Jan 23, 2019
3a88e24
bump version
rphmeier Jan 23, 2019
e837443
cli: fix node shutdown (#100)
andresilva Jan 22, 2019
b80733a
update to latest substrate, change to v0.3.4
rphmeier Jan 23, 2019
afbfee4
update to latest substrate, bump version to 0.3.5
andresilva Jan 25, 2019
d77698b
v0.3.6
rphmeier Jan 25, 2019
628ad2f
try to build on every v0.3 commit and update alexander-backports
rphmeier Jan 27, 2019
ab5d7ba
bump to v0.3.7
rphmeier Jan 31, 2019
c082b6b
bump to 0.3.8
rphmeier Jan 31, 2019
a9d2048
Bump to 0.3.9: network and pruning improvements
rphmeier Feb 1, 2019
cfa0d43
Bump to 0.3.10: reduce network bandwidth usage
andresilva Feb 6, 2019
0f12e0c
Use libp2p-kad 0.3.2 (#122)
tomaka Feb 6, 2019
ef8343e
Bump libp2p-identify to 0.3.1 (#123)
tomaka Feb 7, 2019
fe96cda
Bump to 0.3.12 (#127)
tomaka Feb 7, 2019
dae7c67
Update Substrate again (#128)
tomaka Feb 7, 2019
f94368b
update substrate and bump version to v0.3.13
andresilva Feb 11, 2019
2a5e8fc
bump version to v0.3.14: fix --reserved-nodes
andresilva Feb 11, 2019
05a7b52
add a manually curated grandpa module (#136)
rphmeier Feb 13, 2019
461ea31
updating v0.3 to use substrate v0.10 (#146)
gterzian Feb 25, 2019
c03d1cd
config: fix wrong ip for alexander bootnode (#161)
andresilva Feb 26, 2019
2dd861f
fix curated-grandpa and rebuild wasm (#162)
rphmeier Feb 26, 2019
8b07458
[v0.3] Integrates new gossip system into Polkadot (#166)
rphmeier Mar 4, 2019
3f23eb8
network: guard validation network future under exit signal (#168)
andresilva Mar 4, 2019
6d48897
bump version to v0.3.15: substrate v0.10
andresilva Mar 4, 2019
849b04b
[v0.3] update to substrate master (#175)
rphmeier Mar 13, 2019
fcf98ae
service: fix telemetry endpoints on alexander chainspec (#169) (#178)
andresilva Mar 19, 2019
b64e3cc
Update v0.3 to latest Substrate master (#177)
rphmeier Mar 20, 2019
a01bb1a
replace sr25519 accountid with anysigner
rphmeier Mar 20, 2019
e1b97b2
bump version to v0.3.17
andresilva Mar 20, 2019
4501a48
Some PoC-3 GRANDPA tweaks (#181)
rphmeier Mar 22, 2019
001f9aa
use authorities when calculating duty roster (#185)
rphmeier Mar 22, 2019
7296ba6
[v0.3] Update to substrate master (#183)
andresilva Mar 22, 2019
37bf8b4
update to substrate master: bump version to v0.3.19 (#188)
andresilva Mar 25, 2019
06fe5cc
polkadot v0.3.20 (#190)
andresilva Mar 26, 2019
d854250
bump spec version (#191)
andresilva Mar 26, 2019
71d9c0c
Merge remote-tracking branch 'origin/v0.3' into update-substrate
bkchr Mar 27, 2019
82e156a
Fix compilation
bkchr Mar 27, 2019
e3e599a
Update version to 0.4.0
bkchr Mar 28, 2019
7ed879e
Switch to use `polkadot-master` branch from substrate
bkchr Mar 28, 2019
2e6ea76
Remove unused struct
bkchr Mar 28, 2019
14751a7
Remove `grandpa::SyncedAuthorities` from `OnSessionChange`
bkchr Mar 28, 2019
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .gitlab-ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -235,7 +235,7 @@ publish-s3-release:
- kubectl get nodes -l node=polkadot
-o jsonpath='{range .items[*]}{.metadata.name}{"\t"}{range @.status.addresses[?(@.type=="ExternalIP")]}{.address}{"\n"}{end}'
- echo "# polkadots' nodes"
- kubectl -n polkadot get pods
- kubectl -n polkadot get pods
-o jsonpath='{range .items[*]}{.metadata.name}{"\t"}{.spec.nodeName}{"\n"}{end}'


Expand Down
1,702 changes: 873 additions & 829 deletions Cargo.lock

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ path = "src/main.rs"

[package]
name = "polkadot"
version = "0.3.0"
version = "0.4.0"
authors = ["Parity Technologies <admin@parity.io>"]
build = "build.rs"

Expand Down
2 changes: 1 addition & 1 deletion cli/Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[package]
name = "polkadot-cli"
version = "0.3.0"
version = "0.4.0"
authors = ["Parity Technologies <admin@parity.io>"]
description = "Polkadot node implementation in Rust."

Expand Down
3 changes: 1 addition & 2 deletions cli/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -36,14 +36,13 @@ use chain_spec::ChainSpec;
use futures::Future;
use tokio::runtime::Runtime;
use service::Service as BareService;
use cli::NoCustom;

pub use service::{
Components as ServiceComponents, PolkadotService, CustomConfiguration, ServiceFactory, Factory,
ProvideRuntimeApi, CoreApi, ParachainHost,
};

pub use cli::{VersionInfo, IntoExit};
pub use cli::{VersionInfo, IntoExit, NoCustom};
pub use cli::error;
pub use tokio::runtime::TaskExecutor;

Expand Down
18 changes: 10 additions & 8 deletions collator/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -259,14 +259,15 @@ impl<P, E> Worker for CollationNode<P, E> where
match known_oracle.block_status(&BlockId::hash(*block_hash)) {
Err(_) | Ok(BlockStatus::Unknown) | Ok(BlockStatus::Queued) => None,
Ok(BlockStatus::KnownBad) => Some(Known::Bad),
Ok(BlockStatus::InChain) => match known_oracle.leaves() {
Err(_) => None,
Ok(leaves) => if leaves.contains(block_hash) {
Some(Known::Leaf)
} else {
Some(Known::Old)
},
}
Ok(BlockStatus::InChainWithState) | Ok(BlockStatus::InChainPruned) =>
match known_oracle.leaves() {
Err(_) => None,
Ok(leaves) => if leaves.contains(block_hash) {
Some(Known::Leaf)
} else {
Some(Known::Old)
},
}
}
},
);
Expand Down Expand Up @@ -481,3 +482,4 @@ mod tests {
assert_eq!(collation.receipt.egress_queue_roots, vec![(a, root_a), (b, root_b)]);
}
}

4 changes: 2 additions & 2 deletions network/src/collator_pool.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,8 @@

//! Bridge between the network and consensus service for getting collations to it.

use polkadot_primitives::{parachain::CollatorId, Hash};
use polkadot_primitives::parachain::{Id as ParaId, Collation};
use polkadot_primitives::Hash;
use polkadot_primitives::parachain::{CollatorId, Id as ParaId, Collation};
use futures::sync::oneshot;

use std::collections::hash_map::{HashMap, Entry};
Expand Down
56 changes: 28 additions & 28 deletions network/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -55,9 +55,9 @@ pub mod gossip;

use codec::{Decode, Encode};
use futures::sync::oneshot;
use polkadot_primitives::{Block, SessionKey, Hash, Header, parachain::CollatorId};
use polkadot_primitives::parachain::{Id as ParaId, BlockData, CandidateReceipt, Collation};
use substrate_network::{NodeIndex, RequestId, Context, Severity};
use polkadot_primitives::{Block, SessionKey, Hash, Header};
use polkadot_primitives::parachain::{Id as ParaId, CollatorId, BlockData, CandidateReceipt, Collation};
use substrate_network::{PeerId, RequestId, Context, Severity};
use substrate_network::{message, generic_message};
use substrate_network::specialization::NetworkSpecialization as Specialization;
use substrate_network::StatusMessage as GenericFullStatus;
Expand Down Expand Up @@ -156,21 +156,21 @@ pub enum Message {
Collation(Hash, Collation),
}

fn send_polkadot_message(ctx: &mut Context<Block>, to: NodeIndex, message: Message) {
fn send_polkadot_message(ctx: &mut Context<Block>, to: PeerId, message: Message) {
trace!(target: "p_net", "Sending polkadot message to {}: {:?}", to, message);
let encoded = message.encode();
ctx.send_message(to, generic_message::Message::ChainSpecific(encoded))
}

/// Polkadot protocol attachment for substrate.
pub struct PolkadotProtocol {
peers: HashMap<NodeIndex, PeerInfo>,
peers: HashMap<PeerId, PeerInfo>,
collating_for: Option<(CollatorId, ParaId)>,
collators: CollatorPool,
validators: HashMap<SessionKey, NodeIndex>,
validators: HashMap<SessionKey, PeerId>,
local_collations: LocalCollations<Collation>,
live_validation_sessions: LiveValidationSessions,
in_flight: HashMap<(RequestId, NodeIndex), BlockDataRequest>,
in_flight: HashMap<(RequestId, PeerId), BlockDataRequest>,
pending: Vec<BlockDataRequest>,
extrinsic_store: Option<::av_store::Store>,
next_req_id: u64,
Expand Down Expand Up @@ -225,7 +225,7 @@ impl PolkadotProtocol {
{
peer_data.collator_state.send_key(new_local.clone(), |msg| send_polkadot_message(
ctx,
*id,
id.clone(),
msg
));
}
Expand Down Expand Up @@ -257,7 +257,7 @@ impl PolkadotProtocol {
}
Err(Some(known_keys)) => {
let next_peer = known_keys.iter()
.filter_map(|x| validator_keys.get(x).map(|id| (x.clone(), *id)))
.filter_map(|x| validator_keys.get(x).map(|id| (x.clone(), id.clone())))
.find(|&(ref key, _)| pending.attempted_peers.insert(key.clone()))
.map(|(_, id)| id);

Expand All @@ -268,7 +268,7 @@ impl PolkadotProtocol {

send_polkadot_message(
ctx,
who,
who.clone(),
Message::RequestBlockData(req_id, parent, c_hash),
);

Expand All @@ -290,7 +290,7 @@ impl PolkadotProtocol {
self.pending = new_pending;
}

fn on_polkadot_message(&mut self, ctx: &mut Context<Block>, who: NodeIndex, msg: Message) {
fn on_polkadot_message(&mut self, ctx: &mut Context<Block>, who: PeerId, msg: Message) {
trace!(target: "p_net", "Polkadot message from {}: {:?}", who, msg);
match msg {
Message::SessionKey(key) => self.on_session_key(ctx, who, key),
Expand All @@ -313,7 +313,7 @@ impl PolkadotProtocol {
}
}

fn on_session_key(&mut self, ctx: &mut Context<Block>, who: NodeIndex, key: SessionKey) {
fn on_session_key(&mut self, ctx: &mut Context<Block>, who: PeerId, key: SessionKey) {
{
let info = match self.peers.get_mut(&who) {
Some(peer) => peer,
Expand Down Expand Up @@ -343,7 +343,7 @@ impl PolkadotProtocol {
for (relay_parent, collation) in new_collations {
send_polkadot_message(
ctx,
who,
who.clone(),
Message::Collation(relay_parent, collation),
)
}
Expand All @@ -354,8 +354,8 @@ impl PolkadotProtocol {
self.dispatch_pending_requests(ctx);
}

fn on_block_data(&mut self, ctx: &mut Context<Block>, who: NodeIndex, req_id: RequestId, data: Option<BlockData>) {
match self.in_flight.remove(&(req_id, who)) {
fn on_block_data(&mut self, ctx: &mut Context<Block>, who: PeerId, req_id: RequestId, data: Option<BlockData>) {
match self.in_flight.remove(&(req_id, who.clone())) {
Some(req) => {
if let Some(data) = data {
if data.hash() == req.block_data_hash {
Expand All @@ -372,7 +372,7 @@ impl PolkadotProtocol {
}

// when a validator sends us (a collator) a new role.
fn on_new_role(&mut self, ctx: &mut Context<Block>, who: NodeIndex, role: Role) {
fn on_new_role(&mut self, ctx: &mut Context<Block>, who: PeerId, role: Role) {
let info = match self.peers.get_mut(&who) {
Some(peer) => peer,
None => {
Expand Down Expand Up @@ -400,7 +400,7 @@ impl PolkadotProtocol {
debug!(target: "p_net", "Broadcasting collation on relay parent {:?}", relay_parent);
send_polkadot_message(
ctx,
who,
who.clone(),
Message::Collation(relay_parent, collation),
)
}
Expand All @@ -413,7 +413,7 @@ impl Specialization<Block> for PolkadotProtocol {
Status { collating_for: self.collating_for.clone() }.encode()
}

fn on_connect(&mut self, ctx: &mut Context<Block>, who: NodeIndex, status: FullStatus) {
fn on_connect(&mut self, ctx: &mut Context<Block>, who: PeerId, status: FullStatus) {
let local_status = match Status::decode(&mut &status.chain_status[..]) {
Some(status) => status,
None => {
Expand All @@ -440,7 +440,7 @@ impl Specialization<Block> for PolkadotProtocol {

peer_info.collator_state.set_role(collator_role, |msg| send_polkadot_message(
ctx,
who,
who.clone(),
msg,
));
}
Expand All @@ -450,7 +450,7 @@ impl Specialization<Block> for PolkadotProtocol {
for local_session_key in self.live_validation_sessions.recent_keys() {
peer_info.collator_state.send_key(local_session_key.clone(), |msg| send_polkadot_message(
ctx,
who,
who.clone(),
msg,
));
}
Expand All @@ -460,7 +460,7 @@ impl Specialization<Block> for PolkadotProtocol {
self.dispatch_pending_requests(ctx);
}

fn on_disconnect(&mut self, ctx: &mut Context<Block>, who: NodeIndex) {
fn on_disconnect(&mut self, ctx: &mut Context<Block>, who: PeerId) {
if let Some(info) = self.peers.remove(&who) {
if let Some((acc_id, _)) = info.collating_for {
let new_primary = self.collators.on_disconnect(acc_id)
Expand All @@ -469,7 +469,7 @@ impl Specialization<Block> for PolkadotProtocol {
if let Some((new_primary, primary_info)) = new_primary {
primary_info.collator_state.set_role(Role::Primary, |msg| send_polkadot_message(
ctx,
new_primary,
new_primary.clone(),
msg,
));
}
Expand Down Expand Up @@ -502,7 +502,7 @@ impl Specialization<Block> for PolkadotProtocol {
}
}

fn on_message(&mut self, ctx: &mut Context<Block>, who: NodeIndex, message: &mut Option<message::Message<Block>>) {
fn on_message(&mut self, ctx: &mut Context<Block>, who: PeerId, message: &mut Option<message::Message<Block>>) {
match message.take() {
Some(generic_message::Message::ChainSpecific(raw)) => {
match Message::decode(&mut raw.as_slice()) {
Expand Down Expand Up @@ -532,7 +532,7 @@ impl Specialization<Block> for PolkadotProtocol {
Action::NewRole(account_id, role) => if let Some((collator, info)) = self.collator_peer(account_id) {
info.collator_state.set_role(role, |msg| send_polkadot_message(
ctx,
collator,
collator.clone(),
msg,
))
},
Expand All @@ -548,7 +548,7 @@ impl Specialization<Block> for PolkadotProtocol {

impl PolkadotProtocol {
// we received a collation from a peer
fn on_collation(&mut self, ctx: &mut Context<Block>, from: NodeIndex, relay_parent: Hash, collation: Collation) {
fn on_collation(&mut self, ctx: &mut Context<Block>, from: PeerId, relay_parent: Hash, collation: Collation) {
let collation_para = collation.receipt.parachain_index;
let collated_acc = collation.receipt.collator.clone();

Expand Down Expand Up @@ -577,7 +577,7 @@ impl PolkadotProtocol {
}

// get connected peer with given account ID for collation.
fn collator_peer(&mut self, collator_id: CollatorId) -> Option<(NodeIndex, &mut PeerInfo)> {
fn collator_peer(&mut self, collator_id: CollatorId) -> Option<(PeerId, &mut PeerInfo)> {
let check_info = |info: &PeerInfo| info
.collating_for
.as_ref()
Expand All @@ -586,7 +586,7 @@ impl PolkadotProtocol {
self.peers
.iter_mut()
.filter(|&(_, ref info)| check_info(&**info))
.map(|(who, info)| (*who, info))
.map(|(who, info)| (who.clone(), info))
.next()
}

Expand Down Expand Up @@ -616,7 +616,7 @@ impl PolkadotProtocol {
debug!(target: "p_net", "Sending local collation to {:?}", primary);
send_polkadot_message(
ctx,
*who,
who.clone(),
Message::Collation(relay_parent, cloned_collation),
)
},
Expand Down
Loading