Skip to content
This repository has been archived by the owner on Nov 6, 2020. It is now read-only.

new blooms database #8712

Merged
merged 44 commits into from
Jun 20, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
44 commits
Select commit Hold shift + click to select a range
343b298
new blooms database
debris May 25, 2018
b9ac847
Merge branch 'master' into new-blooms-db
debris May 25, 2018
2eb233b
fixed conflict in Cargo.lock
debris May 25, 2018
f55ee49
removed bloomchain
debris May 25, 2018
94dfcd4
cleanup in progress
debris May 25, 2018
88aa4ff
Merge branch 'master' into new-blooms-db
debris May 26, 2018
0da3c9b
all tests passing in trace db with new blooms-db
debris May 26, 2018
0fb7b95
added trace_blooms to BlockChainDB interface, fixed db flushing
debris May 26, 2018
c8b30a4
Merge branch 'master' into new-blooms-db
debris May 30, 2018
975fdcb
Merge branch 'master' into new-blooms-db
debris May 31, 2018
24a6ba2
BlockChainDB no longer exposes RwLock in the interface
debris May 31, 2018
c321556
Merge branch 'master' into new-blooms-db
debris Jun 1, 2018
7fa05cd
automatically flush blooms-db after every insert
debris Jun 1, 2018
979ed33
blooms-db uses io::BufReader to read files, wrap blooms-db into Mutex…
debris Jun 1, 2018
77f0af8
fix json_tests
debris Jun 1, 2018
6367cea
blooms-db can filter multiple possibilities at the same time
debris Jun 1, 2018
6a806a1
removed enum trace/db.rs CacheId
debris Jun 1, 2018
65f9f91
lint fixes
debris Jun 1, 2018
ffd5a2a
fixed tests
debris Jun 1, 2018
1781d4a
Merge branch 'master' into new-blooms-db
debris Jun 2, 2018
646957a
Merge branch 'master' into new-blooms-db
debris Jun 4, 2018
fd06dca
kvdb-rocksdb uses fs-swap crate
debris Jun 4, 2018
bac4638
update Cargo.lock
debris Jun 4, 2018
db2740e
use fs::rename
debris Jun 5, 2018
50aa947
fixed failing test on linux
debris Jun 5, 2018
8f130b5
Merge branch 'master' into fs-swap
debris Jun 9, 2018
7edca53
fix tests
debris Jun 12, 2018
7e48afd
Merge branch 'master' into fs-swap
debris Jun 12, 2018
914aec5
use fs_swap
debris Jun 12, 2018
dc2e02a
Merge branch 'master' into fs-swap
debris Jun 12, 2018
3bc7a8c
fixed failing test on linux
debris Jun 12, 2018
bfc744b
cleanup after swap
debris Jun 13, 2018
41e5eb1
Merge branch 'master' into new-blooms-db
debris Jun 13, 2018
fdc943a
fix tests
debris Jun 13, 2018
249c368
Merge branch 'master' into new-blooms-db
debris Jun 14, 2018
f82f599
fixed osx permissions
debris Jun 14, 2018
1cc78f6
Merge branch 'fix_osx_permissions' into new-blooms-db
debris Jun 14, 2018
59f0fd3
simplify parity database opening functions
debris Jun 14, 2018
447950e
added migration to blooms-db
debris Jun 14, 2018
9894a16
Merge branch 'master' into new-blooms-db
debris Jun 20, 2018
f37f5e7
address @niklasad1 grumbles
debris Jun 20, 2018
26c45cd
fix license and authors field of blooms-db Cargo.toml
debris Jun 20, 2018
28ad097
Merge branch 'fs-swap' into new-blooms-db
debris Jun 20, 2018
f1ead7d
restore blooms-db after snapshot
debris Jun 20, 2018
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 19 additions & 5 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ license = "GPL-3.0"
authors = ["Parity Technologies <admin@parity.io>"]

[dependencies]
blooms-db = { path = "util/blooms-db" }
log = "0.3"
env_logger = "0.4"
rustc-hex = "1.0"
Expand Down
6 changes: 3 additions & 3 deletions ethcore/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ authors = ["Parity Technologies <admin@parity.io>"]

[dependencies]
ansi_term = "0.10"
bloomchain = { path = "../util/bloomchain" }
blooms-db = { path = "../util/blooms-db" }
bn = { git = "https://github.com/paritytech/bn", default-features = false }
byteorder = "1.0"
common-types = { path = "types" }
Expand Down Expand Up @@ -67,11 +67,11 @@ keccak-hash = { path = "../util/hash" }
triehash = { path = "../util/triehash" }
unexpected = { path = "../util/unexpected" }
journaldb = { path = "../util/journaldb" }
tempdir = "0.3"
kvdb-rocksdb = { path = "../util/kvdb-rocksdb" }

[dev-dependencies]
tempdir = "0.3"
trie-standardmap = { path = "../util/trie-standardmap" }
kvdb-rocksdb = { path = "../util/kvdb-rocksdb" }

[features]
# Display EVM debug traces.
Expand Down
13 changes: 5 additions & 8 deletions ethcore/light/src/client/service.rs
Original file line number Diff line number Diff line change
Expand Up @@ -21,11 +21,10 @@ use std::fmt;
use std::sync::Arc;

use ethcore::client::ClientIoMessage;
use ethcore::db;
use ethcore::{db, BlockChainDB};
use ethcore::error::Error as CoreError;
use ethcore::spec::Spec;
use io::{IoContext, IoError, IoHandler, IoService};
use kvdb::KeyValueDB;

use cache::Cache;
use parking_lot::Mutex;
Expand Down Expand Up @@ -65,11 +64,10 @@ pub struct Service<T> {

impl<T: ChainDataFetcher> Service<T> {
/// Start the service: initialize I/O workers and client itself.
pub fn start(config: ClientConfig, spec: &Spec, fetcher: T, db: Arc<KeyValueDB>, cache: Arc<Mutex<Cache>>) -> Result<Self, Error> {

pub fn start(config: ClientConfig, spec: &Spec, fetcher: T, db: Arc<BlockChainDB>, cache: Arc<Mutex<Cache>>) -> Result<Self, Error> {
let io_service = IoService::<ClientIoMessage>::start().map_err(Error::Io)?;
let client = Arc::new(Client::new(config,
db,
db.key_value().clone(),
db::COL_LIGHT_CHAIN,
spec,
fetcher,
Expand Down Expand Up @@ -122,12 +120,11 @@ mod tests {
use client::fetch;
use std::time::Duration;
use parking_lot::Mutex;
use kvdb_memorydb;
use ethcore::db::NUM_COLUMNS;
use ethcore::test_helpers;

#[test]
fn it_works() {
let db = Arc::new(kvdb_memorydb::create(NUM_COLUMNS.unwrap_or(0)));
let db = test_helpers::new_db();
let spec = Spec::new_test();
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::from_secs(6 * 3600))));

Expand Down
3 changes: 2 additions & 1 deletion ethcore/node_filter/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -115,6 +115,7 @@ mod test {
use ethcore::spec::Spec;
use ethcore::client::{BlockChainClient, Client, ClientConfig};
use ethcore::miner::Miner;
use ethcore::test_helpers;
use network::{ConnectionDirection, ConnectionFilter, NodeId};
use io::IoChannel;
use super::NodeFilter;
Expand All @@ -127,7 +128,7 @@ mod test {
let data = include_bytes!("../res/node_filter.json");
let tempdir = TempDir::new("").unwrap();
let spec = Spec::load(&tempdir.path(), &data[..]).unwrap();
let client_db = Arc::new(::kvdb_memorydb::create(::ethcore::db::NUM_COLUMNS.unwrap_or(0)));
let client_db = test_helpers::new_db();

let client = Client::new(
ClientConfig::default(),
Expand Down
39 changes: 12 additions & 27 deletions ethcore/service/src/service.rs
Original file line number Diff line number Diff line change
Expand Up @@ -22,10 +22,10 @@ use std::time::Duration;

use ansi_term::Colour;
use io::{IoContext, TimerToken, IoHandler, IoService, IoError};
use kvdb::{KeyValueDB, KeyValueDBHandler};
use stop_guard::StopGuard;

use sync::PrivateTxHandler;
use ethcore::{BlockChainDB, BlockChainDBHandler};
use ethcore::client::{Client, ClientConfig, ChainNotify, ClientIoMessage};
use ethcore::miner::Miner;
use ethcore::snapshot::service::{Service as SnapshotService, ServiceParams as SnapServiceParams};
Expand Down Expand Up @@ -69,7 +69,7 @@ pub struct ClientService {
client: Arc<Client>,
snapshot: Arc<SnapshotService>,
private_tx: Arc<PrivateTxService>,
database: Arc<KeyValueDB>,
database: Arc<BlockChainDB>,
_stop_guard: StopGuard,
}

Expand All @@ -78,9 +78,9 @@ impl ClientService {
pub fn start(
config: ClientConfig,
spec: &Spec,
client_db: Arc<KeyValueDB>,
blockchain_db: Arc<BlockChainDB>,
snapshot_path: &Path,
restoration_db_handler: Box<KeyValueDBHandler>,
restoration_db_handler: Box<BlockChainDBHandler>,
_ipc_path: &Path,
miner: Arc<Miner>,
account_provider: Arc<AccountProvider>,
Expand All @@ -93,7 +93,7 @@ impl ClientService {
info!("Configured for {} using {} engine", Colour::White.bold().paint(spec.name.clone()), Colour::Yellow.bold().paint(spec.engine.name()));

let pruning = config.pruning;
let client = Client::new(config, &spec, client_db.clone(), miner.clone(), io_service.channel())?;
let client = Client::new(config, &spec, blockchain_db.clone(), miner.clone(), io_service.channel())?;

let snapshot_params = SnapServiceParams {
engine: spec.engine.clone(),
Expand Down Expand Up @@ -131,7 +131,7 @@ impl ClientService {
client: client,
snapshot: snapshot,
private_tx,
database: client_db,
database: blockchain_db,
_stop_guard: stop_guard,
})
}
Expand Down Expand Up @@ -167,7 +167,7 @@ impl ClientService {
}

/// Get a handle to the database.
pub fn db(&self) -> Arc<KeyValueDB> { self.database.clone() }
pub fn db(&self) -> Arc<BlockChainDB> { self.database.clone() }

/// Shutdown the Client Service
pub fn shutdown(&self) {
Expand Down Expand Up @@ -259,8 +259,8 @@ mod tests {
use ethcore::miner::Miner;
use ethcore::spec::Spec;
use ethcore::db::NUM_COLUMNS;
use kvdb::Error;
use kvdb_rocksdb::{Database, DatabaseConfig, CompactionProfile};
use ethcore::test_helpers;
use kvdb_rocksdb::{DatabaseConfig, CompactionProfile};
use super::*;

use ethcore_private_tx;
Expand All @@ -278,24 +278,9 @@ mod tests {
client_db_config.compaction = CompactionProfile::auto(&client_path);
client_db_config.wal = client_config.db_wal;

let client_db = Arc::new(Database::open(
&client_db_config,
&client_path.to_str().expect("DB path could not be converted to string.")
).unwrap());

struct RestorationDBHandler {
config: DatabaseConfig,
}

impl KeyValueDBHandler for RestorationDBHandler {
fn open(&self, db_path: &Path) -> Result<Arc<KeyValueDB>, Error> {
Ok(Arc::new(Database::open(&self.config, &db_path.to_string_lossy())?))
}
}

let restoration_db_handler = Box::new(RestorationDBHandler {
config: client_db_config,
});
let client_db_handler = test_helpers::restoration_db_handler(client_db_config.clone());
let client_db = client_db_handler.open(&client_path).unwrap();
let restoration_db_handler = test_helpers::restoration_db_handler(client_db_config);

let spec = Spec::new_test();
let service = ClientService::start(
Expand Down
Loading