From c89c92e18ef1f6b72e6f5f4cc72769e4f4f889d8 Mon Sep 17 00:00:00 2001 From: debris Date: Thu, 22 Feb 2018 11:05:37 +0100 Subject: [PATCH 1/6] removed old migrations --- Cargo.lock | 12 -- ethcore/migrations/Cargo.toml | 12 -- ethcore/migrations/src/blocks/mod.rs | 21 --- ethcore/migrations/src/blocks/v8.rs | 37 ---- ethcore/migrations/src/extras/mod.rs | 21 --- ethcore/migrations/src/extras/v6.rs | 102 ----------- ethcore/migrations/src/lib.rs | 25 --- ethcore/migrations/src/state/mod.rs | 21 --- ethcore/migrations/src/state/v7.rs | 263 --------------------------- ethcore/migrations/src/v10.rs | 119 ------------ ethcore/migrations/src/v9.rs | 82 --------- parity/helpers.rs | 2 +- parity/migration.rs | 151 +-------------- util/migration/src/lib.rs | 4 +- 14 files changed, 9 insertions(+), 863 deletions(-) delete mode 100644 ethcore/migrations/src/blocks/mod.rs delete mode 100644 ethcore/migrations/src/blocks/v8.rs delete mode 100644 ethcore/migrations/src/extras/mod.rs delete mode 100644 ethcore/migrations/src/extras/v6.rs delete mode 100644 ethcore/migrations/src/state/mod.rs delete mode 100644 ethcore/migrations/src/state/v7.rs delete mode 100644 ethcore/migrations/src/v10.rs delete mode 100644 ethcore/migrations/src/v9.rs diff --git a/Cargo.lock b/Cargo.lock index f1219a73d18..25e776ea289 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -600,19 +600,7 @@ dependencies = [ name = "ethcore-migrations" version = "0.1.0" dependencies = [ - "ethcore 1.9.0", - "ethcore-bloom-journal 0.1.0", - "ethcore-bytes 0.1.0", - "ethereum-types 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "journaldb 0.1.0", - "keccak-hash 0.1.0", - "kvdb 0.1.0", - "kvdb-rocksdb 0.1.0", - "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", - "macros 0.1.0", "migration 0.1.0", - "patricia-trie 0.1.0", - "rlp 0.2.1", ] [[package]] diff --git a/ethcore/migrations/Cargo.toml b/ethcore/migrations/Cargo.toml index db9c31bc348..561925be4c5 100644 --- a/ethcore/migrations/Cargo.toml +++ b/ethcore/migrations/Cargo.toml @@ -4,16 +4,4 @@ version = "0.1.0" authors = ["Parity Technologies "] [dependencies] -ethcore-bytes = { path = "../../util/bytes" } -ethereum-types = "0.2" -keccak-hash = { path = "../../util/hash" } -kvdb = { path = "../../util/kvdb" } -kvdb-rocksdb = { path = "../../util/kvdb-rocksdb" } -log = "0.3" -macros = { path = "../../util/macros" } migration = { path = "../../util/migration" } -rlp = { path = "../../util/rlp" } -patricia-trie = { path = "../../util/patricia_trie" } -journaldb = { path = "../../util/journaldb" } -ethcore-bloom-journal = { path = "../../util/bloom" } -ethcore = { path = ".." } diff --git a/ethcore/migrations/src/blocks/mod.rs b/ethcore/migrations/src/blocks/mod.rs deleted file mode 100644 index 1dba8c876da..00000000000 --- a/ethcore/migrations/src/blocks/mod.rs +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2015-2017 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -//! Blocks database migrations. - -mod v8; - -pub use self::v8::V8; diff --git a/ethcore/migrations/src/blocks/v8.rs b/ethcore/migrations/src/blocks/v8.rs deleted file mode 100644 index 6c5441c9729..00000000000 --- a/ethcore/migrations/src/blocks/v8.rs +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2015-2017 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -//! This migration compresses the state db. - -use migration::{SimpleMigration, Progress}; -use rlp::{Compressible, UntrustedRlp, RlpType}; - -/// Compressing migration. -#[derive(Default)] -pub struct V8(Progress); - -impl SimpleMigration for V8 { - fn version(&self) -> u32 { - 8 - } - - fn columns(&self) -> Option { None } - - fn simple_migrate(&mut self, key: Vec, value: Vec) -> Option<(Vec, Vec)> { - self.0.tick(); - Some((key,UntrustedRlp::new(&value).compress(RlpType::Blocks).into_vec())) - } -} diff --git a/ethcore/migrations/src/extras/mod.rs b/ethcore/migrations/src/extras/mod.rs deleted file mode 100644 index 01fddc66af1..00000000000 --- a/ethcore/migrations/src/extras/mod.rs +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2015-2017 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -//! Extras database migrations. - -mod v6; - -pub use self::v6::ToV6; \ No newline at end of file diff --git a/ethcore/migrations/src/extras/v6.rs b/ethcore/migrations/src/extras/v6.rs deleted file mode 100644 index 7dfc5427ea5..00000000000 --- a/ethcore/migrations/src/extras/v6.rs +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright 2015-2017 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -use migration::SimpleMigration; - -/// This migration reduces the sizes of keys and moves `ExtrasIndex` byte from back to the front. -pub struct ToV6; - -impl ToV6 { - fn migrate_old_key(&self, old_key: Vec, index: u8, len: usize) -> Vec { - let mut result = vec![]; - result.reserve(len); - unsafe { - result.set_len(len); - } - result[0] = index; - let old_key_start = 33 - len; - result[1..].clone_from_slice(&old_key[old_key_start..32]); - result - } -} - -impl SimpleMigration for ToV6 { - - fn columns(&self) -> Option { None } - - fn version(&self) -> u32 { 6 } - - fn simple_migrate(&mut self, mut key: Vec, value: Vec) -> Option<(Vec, Vec)> { - //// at this version all extras keys are 33 bytes long. - if key.len() == 33 { - // block details key changes: - // - index is moved to the front - if key[32] == 0 { - return Some((self.migrate_old_key(key, 0, 33), value)); - } - - // block hash key changes: - // - key is shorter 33 -> 5 bytes - // - index is moved to the front - if key[32] == 1 { - return Some((self.migrate_old_key(key, 1, 5), value)); - } - - // transaction addresses changes: - // - index is moved to the front - if key[32] == 2 { - return Some((self.migrate_old_key(key, 2, 33), value)); - } - - // block log blooms are removed - if key[32] == 3 { - return None; - } - - // blocks blooms key changes: - // - key is shorter 33 -> 6 bytes - // - index is moved to the front - // - index is changed 4 -> 3 - if key[32] == 4 { - key.reverse(); - // i have no idea why it was reversed - let reverse = key; - let result = vec![ - // new extras index is 3 - 3, - // 9th (+ prefix) byte was the level. Now it's second. - reverse[9], - reverse[4], - reverse[3], - reverse[2], - reverse[1], - ]; - - return Some((result, value)); - } - - // blocks receipts key changes: - // - index is moved to the front - // - index is changed 5 -> 4 - if key[32] == 5 { - return Some((self.migrate_old_key(key, 4, 33), value)); - } - } - - Some((key, value)) - } -} - diff --git a/ethcore/migrations/src/lib.rs b/ethcore/migrations/src/lib.rs index 44cc3045fd6..429c39102cb 100644 --- a/ethcore/migrations/src/lib.rs +++ b/ethcore/migrations/src/lib.rs @@ -16,35 +16,10 @@ //! Database migrations. -#[macro_use] -extern crate log; -#[macro_use] -extern crate macros; extern crate migration; -extern crate rlp; -extern crate ethereum_types; -extern crate ethcore_bytes as bytes; -extern crate kvdb; -extern crate kvdb_rocksdb; -extern crate keccak_hash as hash; -extern crate journaldb; -extern crate ethcore_bloom_journal as bloom_journal; -extern crate ethcore; -extern crate patricia_trie as trie; use migration::ChangeColumns; -pub mod state; -pub mod blocks; -pub mod extras; - -mod v9; -pub use self::v9::ToV9; -pub use self::v9::Extract; - -mod v10; -pub use self::v10::ToV10; - /// The migration from v10 to v11. /// Adds a column for node info. pub const TO_V11: ChangeColumns = ChangeColumns { diff --git a/ethcore/migrations/src/state/mod.rs b/ethcore/migrations/src/state/mod.rs deleted file mode 100644 index da4ef24846e..00000000000 --- a/ethcore/migrations/src/state/mod.rs +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2015-2017 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -//! State database migrations. - -mod v7; - -pub use self::v7::{ArchiveV7, OverlayRecentV7}; \ No newline at end of file diff --git a/ethcore/migrations/src/state/v7.rs b/ethcore/migrations/src/state/v7.rs deleted file mode 100644 index f43ccd0af46..00000000000 --- a/ethcore/migrations/src/state/v7.rs +++ /dev/null @@ -1,263 +0,0 @@ -// Copyright 2015-2017 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -//! This migration migrates the state db to use an accountdb which ensures uniqueness -//! using an address' hash as opposed to the address itself. - -use std::collections::HashMap; - -use ethereum_types::{H256, Address}; -use bytes::Bytes; -use kvdb_rocksdb::Database; -use migration::{Batch, Config, Error, ErrorKind, Migration, SimpleMigration, Progress}; -use hash::keccak; -use std::sync::Arc; - -use rlp::{decode, Rlp, RlpStream}; - -// attempt to migrate a key, value pair. None if migration not possible. -fn attempt_migrate(mut key_h: H256, val: &[u8]) -> Option { - let val_hash = keccak(val); - - if key_h != val_hash { - // this is a key which has been xor'd with an address. - // recover the address. - let address = key_h ^ val_hash; - - // check that the address is actually a 20-byte value. - // the leftmost 12 bytes should be zero. - if &address[0..12] != &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] { - return None; - } - - let address_hash = keccak(Address::from(address)); - - // create the xor'd key in place. - key_h.copy_from_slice(&*val_hash); - assert_eq!(key_h, val_hash); - - { - let last_src: &[u8] = &*address_hash; - let last_dst: &mut [u8] = &mut *key_h; - for (k, a) in last_dst[12..].iter_mut().zip(&last_src[12..]) { - *k ^= *a; - } - } - - Some(key_h) - } else { - None - } -} - -/// Version for `ArchiveDB`. -#[derive(Default)] -pub struct ArchiveV7(Progress); - -impl SimpleMigration for ArchiveV7 { - - fn columns(&self) -> Option { None } - - fn version(&self) -> u32 { 7 } - - fn simple_migrate(&mut self, key: Vec, value: Vec) -> Option<(Vec, Vec)> { - self.0.tick(); - - if key.len() != 32 { - // metadata key, ignore. - return Some((key, value)); - } - - let key_h = H256::from_slice(&key[..]); - if let Some(new_key) = attempt_migrate(key_h, &value[..]) { - Some((new_key[..].to_owned(), value)) - } else { - Some((key, value)) - } - } -} - -// magic numbers and constants for overlay-recent at v6. -// re-written here because it may change in the journaldb module. -const V7_LATEST_ERA_KEY: &'static [u8] = &[ b'l', b'a', b's', b't', 0, 0, 0, 0, 0, 0, 0, 0 ]; -const V7_VERSION_KEY: &'static [u8] = &[ b'j', b'v', b'e', b'r', 0, 0, 0, 0, 0, 0, 0, 0 ]; -const DB_VERSION: u32 = 0x203; -const PADDING : [u8; 10] = [0u8; 10]; - -/// Version for `OverlayRecent` database. -/// more involved than the archive version because of journaling. -#[derive(Default)] -pub struct OverlayRecentV7 { - migrated_keys: HashMap, -} - -impl OverlayRecentV7 { - // walk all journal entries in the database backwards. - // find migrations for any possible inserted keys. - fn walk_journal(&mut self, source: Arc) -> Result<(), Error> { - if let Some(val) = source.get(None, V7_LATEST_ERA_KEY)? { - let mut era = decode::(&val); - loop { - let mut index: usize = 0; - loop { - let entry_key = { - let mut r = RlpStream::new_list(3); - r.append(&era).append(&index).append(&&PADDING[..]); - r.out() - }; - - if let Some(journal_raw) = source.get(None, &entry_key)? { - let rlp = Rlp::new(&journal_raw); - - // migrate all inserted keys. - for r in rlp.at(1).iter() { - let key: H256 = r.val_at(0); - let v: Bytes = r.val_at(1); - - if self.migrated_keys.get(&key).is_none() { - if let Some(new_key) = attempt_migrate(key, &v) { - self.migrated_keys.insert(key, new_key); - } - } - } - index += 1; - } else { - break; - } - } - - if index == 0 || era == 0 { - break; - } - era -= 1; - } - } - Ok(()) - } - - // walk all journal entries in the database backwards. - // replace all possible inserted/deleted keys with their migrated counterparts - // and commit the altered entries. - fn migrate_journal(&self, source: Arc, mut batch: Batch, dest: &mut Database) -> Result<(), Error> { - if let Some(val) = source.get(None, V7_LATEST_ERA_KEY)? { - batch.insert(V7_LATEST_ERA_KEY.into(), val.clone().into_vec(), dest)?; - - let mut era = decode::(&val); - loop { - let mut index: usize = 0; - loop { - let entry_key = { - let mut r = RlpStream::new_list(3); - r.append(&era).append(&index).append(&&PADDING[..]); - r.out() - }; - - if let Some(journal_raw) = source.get(None, &entry_key)? { - let rlp = Rlp::new(&journal_raw); - let id: H256 = rlp.val_at(0); - let mut inserted_keys: Vec<(H256, Bytes)> = Vec::new(); - - // migrate all inserted keys. - for r in rlp.at(1).iter() { - let mut key: H256 = r.val_at(0); - let v: Bytes = r.val_at(1); - - if let Some(new_key) = self.migrated_keys.get(&key) { - key = *new_key; - } - - inserted_keys.push((key, v)); - } - - // migrate all deleted keys. - let mut deleted_keys: Vec = rlp.list_at(2); - for old_key in &mut deleted_keys { - if let Some(new) = self.migrated_keys.get(&*old_key) { - *old_key = new.clone(); - } - } - - // rebuild the journal entry rlp. - let mut stream = RlpStream::new_list(3); - stream.append(&id); - stream.begin_list(inserted_keys.len()); - for (k, v) in inserted_keys { - stream.begin_list(2).append(&k).append(&v); - } - - stream.append_list(&deleted_keys); - - // and insert it into the new database. - batch.insert(entry_key, stream.out(), dest)?; - - index += 1; - } else { - break; - } - } - - if index == 0 || era == 0 { - break; - } - era -= 1; - } - } - batch.commit(dest) - } -} - -impl Migration for OverlayRecentV7 { - - fn columns(&self) -> Option { None } - - fn version(&self) -> u32 { 7 } - - // walk all records in the database, attempting to migrate any possible and - // keeping records of those that we do. then migrate the journal using - // this information. - fn migrate(&mut self, source: Arc, config: &Config, dest: &mut Database, col: Option) -> Result<(), Error> { - let mut batch = Batch::new(config, col); - - // check version metadata. - match source.get(None, V7_VERSION_KEY)? { - Some(ref version) if decode::(&*version) == DB_VERSION => {} - _ => return Err(ErrorKind::MigrationImpossible.into()), // missing or wrong version - } - - let mut count = 0; - for (key, value) in source.iter(None).into_iter().flat_map(|inner| inner) { - count += 1; - if count == 100_000 { - count = 0; - flush!("."); - } - - let mut key = key.into_vec(); - if key.len() == 32 { - let key_h = H256::from_slice(&key[..]); - if let Some(new_key) = attempt_migrate(key_h.clone(), &value) { - self.migrated_keys.insert(key_h, new_key); - key.copy_from_slice(&new_key[..]); - } - } - - batch.insert(key, value.into_vec(), dest)?; - } - - self.walk_journal(source.clone())?; - self.migrate_journal(source, batch, dest) - } -} diff --git a/ethcore/migrations/src/v10.rs b/ethcore/migrations/src/v10.rs deleted file mode 100644 index 8caf97192a5..00000000000 --- a/ethcore/migrations/src/v10.rs +++ /dev/null @@ -1,119 +0,0 @@ -// Copyright 2015-2017 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -//! Bloom upgrade - -use std::sync::Arc; -use ethcore::db::{COL_EXTRA, COL_HEADERS, COL_STATE}; -use ethcore::state_db::{ACCOUNT_BLOOM_SPACE, DEFAULT_ACCOUNT_PRESET, StateDB}; -use trie::TrieDB; -use ethcore::views::HeaderView; -use bloom_journal::Bloom; -use migration::{Error, Migration, Progress, Batch, Config, ErrorKind}; -use journaldb; -use ethereum_types::H256; -use trie::Trie; -use kvdb::{DBTransaction, ResultExt}; -use kvdb_rocksdb::Database; - -/// Account bloom upgrade routine. If bloom already present, does nothing. -/// If database empty (no best block), does nothing. -/// Can be called on upgraded database with no issues (will do nothing). -pub fn generate_bloom(source: Arc, dest: &mut Database) -> Result<(), Error> { - trace!(target: "migration", "Account bloom upgrade started"); - let best_block_hash = match source.get(COL_EXTRA, b"best")? { - // no migration needed - None => { - trace!(target: "migration", "No best block hash, skipping"); - return Ok(()); - }, - Some(hash) => hash, - }; - let best_block_header = match source.get(COL_HEADERS, &best_block_hash)? { - // no best block, nothing to do - None => { - trace!(target: "migration", "No best block header, skipping"); - return Ok(()) - }, - Some(x) => x, - }; - let state_root = HeaderView::new(&best_block_header).state_root(); - - trace!("Adding accounts bloom (one-time upgrade)"); - let bloom_journal = { - let mut bloom = Bloom::new(ACCOUNT_BLOOM_SPACE, DEFAULT_ACCOUNT_PRESET); - // no difference what algorithm is passed, since there will be no writes - let state_db = journaldb::new( - source.clone(), - journaldb::Algorithm::OverlayRecent, - COL_STATE); - let account_trie = TrieDB::new(state_db.as_hashdb(), &state_root).chain_err(|| "Cannot open trie")?; - for item in account_trie.iter().map_err(|_| ErrorKind::MigrationImpossible)? { - let (ref account_key, _) = item.map_err(|_| ErrorKind::MigrationImpossible)?; - let account_key_hash = H256::from_slice(account_key); - bloom.set(&*account_key_hash); - } - - bloom.drain_journal() - }; - - trace!(target: "migration", "Generated {} bloom updates", bloom_journal.entries.len()); - - let mut batch = DBTransaction::new(); - StateDB::commit_bloom(&mut batch, bloom_journal).chain_err(|| "Failed to commit bloom")?; - dest.write(batch)?; - - trace!(target: "migration", "Finished bloom update"); - - - Ok(()) -} - -/// Account bloom migration. -#[derive(Default)] -pub struct ToV10 { - progress: Progress, -} - -impl ToV10 { - /// New v10 migration - pub fn new() -> ToV10 { ToV10 { progress: Progress::default() } } -} - -impl Migration for ToV10 { - fn version(&self) -> u32 { - 10 - } - - fn pre_columns(&self) -> Option { Some(5) } - - fn columns(&self) -> Option { Some(6) } - - fn migrate(&mut self, source: Arc, config: &Config, dest: &mut Database, col: Option) -> Result<(), Error> { - let mut batch = Batch::new(config, col); - for (key, value) in source.iter(col).into_iter().flat_map(|inner| inner) { - self.progress.tick(); - batch.insert(key.into_vec(), value.into_vec(), dest)?; - } - batch.commit(dest)?; - - if col == COL_STATE { - generate_bloom(source, dest)?; - } - - Ok(()) - } -} diff --git a/ethcore/migrations/src/v9.rs b/ethcore/migrations/src/v9.rs deleted file mode 100644 index 39637dc4eea..00000000000 --- a/ethcore/migrations/src/v9.rs +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright 2015-2017 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - - -//! This migration consolidates all databases into single one using Column Families. - -use rlp::{Rlp, RlpStream}; -use kvdb_rocksdb::Database; -use migration::{Batch, Config, Error, Migration, Progress}; -use std::sync::Arc; - -/// Which part of block to preserve -pub enum Extract { - /// Extract block header RLP. - Header, - /// Extract block body RLP. - Body, - /// Don't change the value. - All, -} - -/// Consolidation of extras/block/state databases into single one. -pub struct ToV9 { - progress: Progress, - column: Option, - extract: Extract, -} - -impl ToV9 { - /// Creates new V9 migration and assigns all `(key,value)` pairs from `source` DB to given Column Family - pub fn new(column: Option, extract: Extract) -> Self { - ToV9 { - progress: Progress::default(), - column: column, - extract: extract, - } - } -} - -impl Migration for ToV9 { - fn columns(&self) -> Option { Some(5) } - - fn version(&self) -> u32 { 9 } - - fn migrate(&mut self, source: Arc, config: &Config, dest: &mut Database, col: Option) -> Result<(), Error> { - let mut batch = Batch::new(config, self.column); - - for (key, value) in source.iter(col).into_iter().flat_map(|inner| inner) { - self.progress.tick(); - match self.extract { - Extract::Header => { - batch.insert(key.into_vec(), Rlp::new(&value).at(0).as_raw().to_vec(), dest)? - }, - Extract::Body => { - let mut body = RlpStream::new_list(2); - let block_rlp = Rlp::new(&value); - body.append_raw(block_rlp.at(1).as_raw(), 1); - body.append_raw(block_rlp.at(2).as_raw(), 1); - batch.insert(key.into_vec(), body.out(), dest)? - }, - Extract::All => { - batch.insert(key.into_vec(), value.into_vec(), dest)? - } - } - } - - batch.commit(dest) - } -} diff --git a/parity/helpers.rs b/parity/helpers.rs index 38dca8b9c2b..959dddba92d 100644 --- a/parity/helpers.rs +++ b/parity/helpers.rs @@ -275,7 +275,7 @@ pub fn execute_upgrades( } let client_path = dirs.db_path(pruning); - migrate(&client_path, pruning, compaction_profile).map_err(|e| format!("{}", e)) + migrate(&client_path, compaction_profile).map_err(|e| format!("{}", e)) } /// Prompts user asking for password. diff --git a/parity/migration.rs b/parity/migration.rs index 60a2afb4c78..bd659cba015 100644 --- a/parity/migration.rs +++ b/parity/migration.rs @@ -15,17 +15,12 @@ // along with Parity. If not, see . use std::fs; -use std::fs::File; use std::io::{Read, Write, Error as IoError, ErrorKind}; use std::path::{Path, PathBuf}; use std::fmt::{Display, Formatter, Error as FmtError}; -use std::sync::Arc; -use journaldb::Algorithm; -use migr::{self, Manager as MigrationManager, Config as MigrationConfig, Migration}; -use kvdb; -use kvdb_rocksdb::{CompactionProfile, Database, DatabaseConfig}; -use migrations::{self, Extract}; -use ethcore::db; +use migr::{self, Manager as MigrationManager, Config as MigrationConfig}; +use kvdb_rocksdb::CompactionProfile; +use migrations; /// Database is assumed to be at default version, when no version file is found. const DEFAULT_VERSION: u32 = 5; @@ -43,14 +38,10 @@ const VERSION_FILE_NAME: &'static str = "db_version"; pub enum Error { /// Returned when current version cannot be read or guessed. UnknownDatabaseVersion, - /// Migration does not support existing pruning algorithm. - UnsupportedPruningMethod, /// Existing DB is newer than the known one. FutureDBVersion, /// Migration is not possible. MigrationImpossible, - /// Migration unexpectadly failed. - MigrationFailed, /// Internal migration error. Internal(migr::Error), /// Migration was completed succesfully, @@ -62,10 +53,8 @@ impl Display for Error { fn fmt(&self, f: &mut Formatter) -> Result<(), FmtError> { let out = match *self { Error::UnknownDatabaseVersion => "Current database version cannot be read".into(), - Error::UnsupportedPruningMethod => "Unsupported pruning method for database migration. Delete DB and resync.".into(), Error::FutureDBVersion => "Database was created with newer client version. Upgrade your client or delete DB and resync.".into(), Error::MigrationImpossible => format!("Database migration to version {} is not possible.", CURRENT_VERSION), - Error::MigrationFailed => "Database migration unexpectedly failed".into(), Error::Internal(ref err) => format!("{}", err), Error::Io(ref err) => format!("Unexpected io error on DB migration: {}.", err), }; @@ -99,7 +88,7 @@ fn version_file_path(path: &Path) -> PathBuf { /// Reads current database version from the file at given path. /// If the file does not exist returns `DEFAULT_VERSION`. fn current_version(path: &Path) -> Result { - match File::open(version_file_path(path)) { + match fs::File::open(version_file_path(path)) { Err(ref err) if err.kind() == ErrorKind::NotFound => Ok(DEFAULT_VERSION), Err(_) => Err(Error::UnknownDatabaseVersion), Ok(mut file) => { @@ -114,7 +103,7 @@ fn current_version(path: &Path) -> Result { /// Creates a new file if the version file does not exist yet. fn update_version(path: &Path) -> Result<(), Error> { fs::create_dir_all(path)?; - let mut file = File::create(version_file_path(path))?; + let mut file = fs::File::create(version_file_path(path))?; file.write_all(format!("{}", CURRENT_VERSION).as_bytes())?; Ok(()) } @@ -145,49 +134,11 @@ pub fn default_migration_settings(compaction_profile: &CompactionProfile) -> Mig /// Migrations on the consolidated database. fn consolidated_database_migrations(compaction_profile: &CompactionProfile) -> Result { let mut manager = MigrationManager::new(default_migration_settings(compaction_profile)); - manager.add_migration(migrations::ToV10::new()).map_err(|_| Error::MigrationImpossible)?; manager.add_migration(migrations::TO_V11).map_err(|_| Error::MigrationImpossible)?; manager.add_migration(migrations::TO_V12).map_err(|_| Error::MigrationImpossible)?; Ok(manager) } -/// Consolidates legacy databases into single one. -fn consolidate_database( - old_db_path: PathBuf, - new_db_path: PathBuf, - column: Option, - extract: Extract, - compaction_profile: &CompactionProfile) -> Result<(), Error> { - fn db_error(e: kvdb::Error) -> Error { - warn!("Cannot open Database for consolidation: {:?}", e); - Error::MigrationFailed - } - - let mut migration = migrations::ToV9::new(column, extract); - let config = default_migration_settings(compaction_profile); - let mut db_config = DatabaseConfig { - max_open_files: 64, - memory_budget: None, - compaction: config.compaction_profile, - columns: None, - wal: true, - }; - - let old_path_str = old_db_path.to_str().ok_or(Error::MigrationImpossible)?; - let new_path_str = new_db_path.to_str().ok_or(Error::MigrationImpossible)?; - - let cur_db = Arc::new(Database::open(&db_config, old_path_str).map_err(db_error)?); - // open new DB with proper number of columns - db_config.columns = migration.columns(); - let mut new_db = Database::open(&db_config, new_path_str).map_err(db_error)?; - - // Migrate to new database (default column only) - migration.migrate(cur_db, &config, &mut new_db, None)?; - - Ok(()) -} - - /// Migrates database at given position with given migration rules. fn migrate_database(version: u32, db_path: PathBuf, mut migrations: MigrationManager) -> Result<(), Error> { // check if migration is needed @@ -225,7 +176,7 @@ fn exists(path: &Path) -> bool { } /// Migrates the database. -pub fn migrate(path: &Path, pruning: Algorithm, compaction_profile: CompactionProfile) -> Result<(), Error> { +pub fn migrate(path: &Path, compaction_profile: CompactionProfile) -> Result<(), Error> { // read version file. let version = current_version(path)?; @@ -240,32 +191,6 @@ pub fn migrate(path: &Path, pruning: Algorithm, compaction_profile: CompactionPr return Ok(()) } - // Perform pre-consolidation migrations - if version < CONSOLIDATION_VERSION && exists(&legacy::blocks_database_path(path)) { - println!("Migrating database from version {} to {}", version, CONSOLIDATION_VERSION); - - migrate_database(version, legacy::extras_database_path(path), legacy::extras_database_migrations(&compaction_profile)?)?; - migrate_database(version, legacy::state_database_path(path), legacy::state_database_migrations(pruning, &compaction_profile)?)?; - migrate_database(version, legacy::blocks_database_path(path), legacy::blocks_database_migrations(&compaction_profile)?)?; - - let db_path = consolidated_database_path(path); - // Remove the database dir (it shouldn't exist anyway, but it might when migration was interrupted) - let _ = fs::remove_dir_all(db_path.clone()); - consolidate_database(legacy::blocks_database_path(path), db_path.clone(), db::COL_HEADERS, Extract::Header, &compaction_profile)?; - consolidate_database(legacy::blocks_database_path(path), db_path.clone(), db::COL_BODIES, Extract::Body, &compaction_profile)?; - consolidate_database(legacy::extras_database_path(path), db_path.clone(), db::COL_EXTRA, Extract::All, &compaction_profile)?; - consolidate_database(legacy::state_database_path(path), db_path.clone(), db::COL_STATE, Extract::All, &compaction_profile)?; - consolidate_database(legacy::trace_database_path(path), db_path.clone(), db::COL_TRACE, Extract::All, &compaction_profile)?; - let _ = fs::remove_dir_all(legacy::blocks_database_path(path)); - let _ = fs::remove_dir_all(legacy::extras_database_path(path)); - let _ = fs::remove_dir_all(legacy::state_database_path(path)); - let _ = fs::remove_dir_all(legacy::trace_database_path(path)); - println!("Migration finished"); - } - - // update version so we can apply post-consolidation migrations. - let version = ::std::cmp::max(CONSOLIDATION_VERSION, version); - // Further migrations if version >= CONSOLIDATION_VERSION && version < CURRENT_VERSION && exists(&consolidated_database_path(path)) { println!("Migrating database from version {} to {}", ::std::cmp::max(CONSOLIDATION_VERSION, version), CURRENT_VERSION); @@ -276,67 +201,3 @@ pub fn migrate(path: &Path, pruning: Algorithm, compaction_profile: CompactionPr // update version file. update_version(path) } - -/// Old migrations utilities -mod legacy { - use super::*; - use std::path::{Path, PathBuf}; - use migr::{Manager as MigrationManager}; - use kvdb_rocksdb::CompactionProfile; - use migrations; - - /// Blocks database path. - pub fn blocks_database_path(path: &Path) -> PathBuf { - let mut blocks_path = path.to_owned(); - blocks_path.push("blocks"); - blocks_path - } - - /// Extras database path. - pub fn extras_database_path(path: &Path) -> PathBuf { - let mut extras_path = path.to_owned(); - extras_path.push("extras"); - extras_path - } - - /// State database path. - pub fn state_database_path(path: &Path) -> PathBuf { - let mut state_path = path.to_owned(); - state_path.push("state"); - state_path - } - - /// Trace database path. - pub fn trace_database_path(path: &Path) -> PathBuf { - let mut blocks_path = path.to_owned(); - blocks_path.push("tracedb"); - blocks_path - } - - /// Migrations on the blocks database. - pub fn blocks_database_migrations(compaction_profile: &CompactionProfile) -> Result { - let mut manager = MigrationManager::new(default_migration_settings(compaction_profile)); - manager.add_migration(migrations::blocks::V8::default()).map_err(|_| Error::MigrationImpossible)?; - Ok(manager) - } - - /// Migrations on the extras database. - pub fn extras_database_migrations(compaction_profile: &CompactionProfile) -> Result { - let mut manager = MigrationManager::new(default_migration_settings(compaction_profile)); - manager.add_migration(migrations::extras::ToV6).map_err(|_| Error::MigrationImpossible)?; - Ok(manager) - } - - /// Migrations on the state database. - pub fn state_database_migrations(pruning: Algorithm, compaction_profile: &CompactionProfile) -> Result { - let mut manager = MigrationManager::new(default_migration_settings(compaction_profile)); - let res = match pruning { - Algorithm::Archive => manager.add_migration(migrations::state::ArchiveV7::default()), - Algorithm::OverlayRecent => manager.add_migration(migrations::state::OverlayRecentV7::default()), - _ => return Err(Error::UnsupportedPruningMethod), - }; - - res.map_err(|_| Error::MigrationImpossible)?; - Ok(manager) - } -} diff --git a/util/migration/src/lib.rs b/util/migration/src/lib.rs index 7b678b3a0d7..d93242b1982 100644 --- a/util/migration/src/lib.rs +++ b/util/migration/src/lib.rs @@ -138,7 +138,7 @@ pub trait SimpleMigration: 'static { fn version(&self) -> u32; /// Should migrate existing object to new database. /// Returns `None` if the object does not exist in new version of database. - fn simple_migrate(&mut self, key: Vec, value: Vec) -> Option<(Vec, Vec)>; + fn simple_migrate(&mut self, key: Vec, value: Vec, col: Option) -> Option<(Vec, Vec)>; } impl Migration for T { @@ -157,7 +157,7 @@ impl Migration for T { }; for (key, value) in iter { - if let Some((key, value)) = self.simple_migrate(key.into_vec(), value.into_vec()) { + if let Some((key, value)) = self.simple_migrate(key.into_vec(), value.into_vec(), col) { batch.insert(key, value, dest)?; } } From 2b56cbd4f10ace60e886516b8c414663d015fab3 Mon Sep 17 00:00:00 2001 From: debris Date: Thu, 22 Feb 2018 11:33:08 +0100 Subject: [PATCH 2/6] improve SimpleMigration --- util/migration/src/lib.rs | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/util/migration/src/lib.rs b/util/migration/src/lib.rs index d93242b1982..8f44aff90b4 100644 --- a/util/migration/src/lib.rs +++ b/util/migration/src/lib.rs @@ -130,15 +130,17 @@ pub trait Migration: 'static { fn migrate(&mut self, source: Arc, config: &Config, destination: &mut Database, col: Option) -> Result<()>; } -/// A simple migration over key-value pairs. +/// A simple migration over key-value pairs of a single column. pub trait SimpleMigration: 'static { /// Number of columns in database after the migration. fn columns(&self) -> Option; /// Version of database after the migration. fn version(&self) -> u32; + /// Index of column which should be migrated. + fn migrated_column_index(&self) -> Option; /// Should migrate existing object to new database. /// Returns `None` if the object does not exist in new version of database. - fn simple_migrate(&mut self, key: Vec, value: Vec, col: Option) -> Option<(Vec, Vec)>; + fn simple_migrate(&mut self, key: Vec, value: Vec) -> Option<(Vec, Vec)>; } impl Migration for T { @@ -149,6 +151,7 @@ impl Migration for T { fn alters_existing(&self) -> bool { true } fn migrate(&mut self, source: Arc, config: &Config, dest: &mut Database, col: Option) -> Result<()> { + let migration_needed = col == SimpleMigration::migrated_column_index(self); let mut batch = Batch::new(config, col); let iter = match source.iter(col) { @@ -156,9 +159,13 @@ impl Migration for T { None => return Ok(()), }; - for (key, value) in iter { - if let Some((key, value)) = self.simple_migrate(key.into_vec(), value.into_vec(), col) { - batch.insert(key, value, dest)?; + for (key, value) in iter { + if migration_needed { + if let Some((key, value)) = self.simple_migrate(key.into_vec(), value.into_vec()) { + batch.insert(key, value, dest)?; + } + } else { + batch.insert(key.into_vec(), value.into_vec(), dest)?; } } From 90f47b6eb589d60b1919060dd645ed2d3f5cc5a5 Mon Sep 17 00:00:00 2001 From: debris Date: Thu, 22 Feb 2018 11:35:54 +0100 Subject: [PATCH 3/6] fixed migration tests --- util/migration/tests/tests.rs | 24 ++++++++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) diff --git a/util/migration/tests/tests.rs b/util/migration/tests/tests.rs index 6e136e814b3..c1ff8228f87 100644 --- a/util/migration/tests/tests.rs +++ b/util/migration/tests/tests.rs @@ -63,9 +63,17 @@ fn verify_migration(path: &Path, pairs: BTreeMap, Vec>) { struct Migration0; impl SimpleMigration for Migration0 { - fn columns(&self) -> Option { None } + fn columns(&self) -> Option { + None + } - fn version(&self) -> u32 { 1 } + fn version(&self) -> u32 { + 1 + } + + fn migrated_column_index(&self) -> Option { + None + } fn simple_migrate(&mut self, mut key: Vec, mut value: Vec) -> Option<(Vec, Vec)> { key.push(0x11); @@ -78,9 +86,17 @@ impl SimpleMigration for Migration0 { struct Migration1; impl SimpleMigration for Migration1 { - fn columns(&self) -> Option { None } + fn columns(&self) -> Option { + None + } + + fn version(&self) -> u32 { + 2 + } - fn version(&self) -> u32 { 2 } + fn migrated_column_index(&self) -> Option { + None + } fn simple_migrate(&mut self, key: Vec, _value: Vec) -> Option<(Vec, Vec)> { Some((key, vec![])) From aae65c8a25521a205214e1e4a65cef3622dba15c Mon Sep 17 00:00:00 2001 From: debris Date: Thu, 22 Feb 2018 11:37:39 +0100 Subject: [PATCH 4/6] fixed redundant whitespace --- util/migration/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/util/migration/src/lib.rs b/util/migration/src/lib.rs index 8f44aff90b4..fbc9681b40e 100644 --- a/util/migration/src/lib.rs +++ b/util/migration/src/lib.rs @@ -159,7 +159,7 @@ impl Migration for T { None => return Ok(()), }; - for (key, value) in iter { + for (key, value) in iter { if migration_needed { if let Some((key, value)) = self.simple_migrate(key.into_vec(), value.into_vec()) { batch.insert(key, value, dest)?; From f92c1e1a0a255096754c8a28dc26bd94bd8c245a Mon Sep 17 00:00:00 2001 From: debris Date: Thu, 22 Feb 2018 11:44:04 +0100 Subject: [PATCH 5/6] add ToV13 migration which removes bloom groups --- ethcore/migrations/src/lib.rs | 29 ++++++++++++++++++++++++++++- parity/migration.rs | 1 + 2 files changed, 29 insertions(+), 1 deletion(-) diff --git a/ethcore/migrations/src/lib.rs b/ethcore/migrations/src/lib.rs index 429c39102cb..9e19832b5b7 100644 --- a/ethcore/migrations/src/lib.rs +++ b/ethcore/migrations/src/lib.rs @@ -18,7 +18,7 @@ extern crate migration; -use migration::ChangeColumns; +use migration::{ChangeColumns, SimpleMigration}; /// The migration from v10 to v11. /// Adds a column for node info. @@ -35,3 +35,30 @@ pub const TO_V12: ChangeColumns = ChangeColumns { post_columns: Some(8), version: 12, }; + +#[derive(Default)] +pub struct ToV13; + +impl SimpleMigration for ToV13 { + fn columns(&self) -> Option { + Some(8) + } + + fn version(&self) -> u32 { + 13 + } + + fn migrated_column_index(&self) -> Option { + // extras! + Some(3) + } + + fn simple_migrate(&mut self, key: Vec, value: Vec) -> Option<(Vec, Vec)> { + // remove all bloom groups + if key[0] == 3 { + None + } else { + Some((key, value)) + } + } +} diff --git a/parity/migration.rs b/parity/migration.rs index bd659cba015..75cece5daab 100644 --- a/parity/migration.rs +++ b/parity/migration.rs @@ -136,6 +136,7 @@ fn consolidated_database_migrations(compaction_profile: &CompactionProfile) -> R let mut manager = MigrationManager::new(default_migration_settings(compaction_profile)); manager.add_migration(migrations::TO_V11).map_err(|_| Error::MigrationImpossible)?; manager.add_migration(migrations::TO_V12).map_err(|_| Error::MigrationImpossible)?; + manager.add_migration(migrations::ToV13::default()).map_err(|_| Error::MigrationImpossible)?; Ok(manager) } From c28d95c7faac66e5cbf0885f14c4a4bc6629ddb2 Mon Sep 17 00:00:00 2001 From: debris Date: Thu, 22 Feb 2018 12:00:31 +0100 Subject: [PATCH 6/6] bump CURRENT_VERSION of db --- parity/migration.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/parity/migration.rs b/parity/migration.rs index 75cece5daab..a3babedd12b 100644 --- a/parity/migration.rs +++ b/parity/migration.rs @@ -25,7 +25,7 @@ use migrations; /// Database is assumed to be at default version, when no version file is found. const DEFAULT_VERSION: u32 = 5; /// Current version of database models. -const CURRENT_VERSION: u32 = 12; +const CURRENT_VERSION: u32 = 13; /// First version of the consolidated database. const CONSOLIDATION_VERSION: u32 = 9; /// Defines how many items are migrated to the new version of database at once.