Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Refactor synchronization #62

Open
wants to merge 8 commits into
base: master
Choose a base branch
from
4 changes: 2 additions & 2 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -20,9 +20,9 @@ enum_primitive = "0.1.0"
byteorder = "0.4"
num = "0.1"
lazy_static = "0.1.15"
linear-map = "0.0.4"

clippy = { version = "0.0.27", optional = true }
compiletest_rs = { version = "0.1", optional = true }

[dev-dependencies]
rand = "0.3"
Expand All @@ -36,7 +36,7 @@ opencl = []
unstable_alloc = [] # faster but unstable memory allocation on native machines

dev = []
unstable = [] # for travis-cargo
unstable = ["compiletest_rs"] # for travis-cargo
travis = ["native"]
lint = ["clippy"]

Expand Down
16 changes: 8 additions & 8 deletions benches/shared_tensor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -59,14 +59,14 @@ fn sync_back_and_forth(
) {
b.iter(|| {
for _ in 0..n {
match mem.sync(&cl_device) {
match mem.read_write(&cl_device) {
Ok(_) => assert!(true),
Err(err) => {
println!("{:?}", err);
assert!(false);
}
}
match mem.sync(&nt_device) {
match mem.read_write(&nt_device) {
Ok(_) => assert!(true),
Err(err) => {
println!("{:?}", err);
Expand Down Expand Up @@ -112,8 +112,8 @@ fn bench_256_sync_1mb_native_opencl(b: &mut Bencher) {
// if let &DeviceType::OpenCL(ref cl_d) = cl_device {
// println!("{:?}", cl_d.hardwares()[0].clone().load_name());
// }
let mem = &mut SharedTensor::<u8>::new(nt_device, &1_048_576).unwrap();
mem.add_device(&cl_device);
let mem = &mut SharedTensor::<u8>::new(&1_048_576);
mem.write_only(&cl_device);
bench_256_sync_1mb_native_opencl_profile(b, nt_device, cl_device, mem);
}

Expand All @@ -133,8 +133,8 @@ fn bench_256_sync_1mb_native_cuda(b: &mut Bencher) {
// if let &DeviceType::Cuda(ref cl_d) = cl_device {
// println!("{:?}", cl_d.hardwares()[0].clone().load_name());
// }
let mem = &mut SharedTensor::<u8>::new(nt_device, &1_048_576).unwrap();
mem.add_device(&cl_device);
let mem = &mut SharedTensor::<u8>::new(&1_048_576);
mem.write_only(&cl_device);
bench_256_sync_1mb_native_cuda_profile(b, nt_device, cl_device, mem);
}

Expand All @@ -154,8 +154,8 @@ fn bench_2_sync_128mb_native_cuda(b: &mut Bencher) {
// if let &DeviceType::Cuda(ref cl_d) = cl_device {
// println!("{:?}", cl_d.hardwares()[0].clone().load_name());
// }
let mem = &mut SharedTensor::<u8>::new(nt_device, &(128 * 1_048_576)).unwrap();
mem.add_device(&cl_device);
let mem = &mut SharedTensor::<u8>::new(&(128 * 1_048_576));
mem.write_only(&cl_device);
bench_2_sync_128mb_native_cuda_profile(b, nt_device, cl_device, mem);
}

Expand Down
2 changes: 1 addition & 1 deletion src/device.rs
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ pub enum DeviceType {
Cuda(CudaContext),
}

#[derive(Debug, Copy, Clone)]
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
/// Defines a generic set of Memory Errors.
pub enum Error {
/// Failures related to the Native framework implementation.
Expand Down
2 changes: 1 addition & 1 deletion src/frameworks/cuda/api/driver/error.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

use std::{fmt, error};

#[derive(Debug, Copy, Clone)]
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
/// Defines OpenCL errors.
pub enum Error {
/// Failure with provided value.
Expand Down
2 changes: 1 addition & 1 deletion src/frameworks/native/error.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

use std::{fmt, error};

#[derive(Debug, Copy, Clone)]
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
/// Defines the Native Error.
pub enum Error {
/// Failure related to allocation, syncing memory
Expand Down
2 changes: 1 addition & 1 deletion src/frameworks/opencl/api/error.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

use std::{fmt, error};

#[derive(Debug, Copy, Clone)]
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
/// Defines OpenCL errors.
pub enum Error {
/// Failure with provided platform.
Expand Down
1 change: 0 additions & 1 deletion src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -157,7 +157,6 @@ extern crate enum_primitive;
extern crate lazy_static;
extern crate num;
extern crate byteorder;
extern crate linear_map;

pub mod backend;
pub mod device;
Expand Down
18 changes: 13 additions & 5 deletions src/plugin.rs
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@
//! [collenchyma-nn]: https://github.com/autumnai/collenchyma-nn

pub use self::numeric_helpers::Float;
use tensor;

/// Describes numeric types and traits for a Plugin.
pub mod numeric_helpers {
Expand All @@ -39,8 +40,9 @@ pub mod numeric_helpers {
#[derive(Debug, Copy, Clone)]
/// Defines a high-level Plugin Error.
pub enum Error {
/// Failure at receiving the correct device memory from the SharedTensor.
MissingMemoryForDevice(&'static str),
/// Failure related to `SharedTensor`: use of uninitialized memory,
/// synchronization error or memory allocation failure.
SharedTensor(tensor::Error),
/// Failure at the execution of the Operation.
Operation(&'static str),
/// Failure at the Plugin.
Expand All @@ -50,7 +52,7 @@ pub enum Error {
impl ::std::fmt::Display for Error {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
match *self {
Error::MissingMemoryForDevice(ref err) => write!(f, "MissingMemoryForDevice error: {}", err),
Error::SharedTensor(ref err) => write!(f, "SharedTensor error: {}", err),
Error::Operation(ref err) => write!(f, "Operation error: {}", err),
Error::Plugin(ref err) => write!(f, "Plugin error: {}", err),
}
Expand All @@ -60,15 +62,15 @@ impl ::std::fmt::Display for Error {
impl ::std::error::Error for Error {
fn description(&self) -> &str {
match *self {
Error::MissingMemoryForDevice(ref err) => err,
Error::SharedTensor(ref err) => err.description(),
Error::Operation(ref err) => err,
Error::Plugin(ref err) => err,
}
}

fn cause(&self) -> Option<&::std::error::Error> {
match *self {
Error::MissingMemoryForDevice(_) => None,
Error::SharedTensor(ref err) => err.cause(),
Error::Operation(_) => None,
Error::Plugin(_) => None,
}
Expand All @@ -80,3 +82,9 @@ impl From<Error> for ::error::Error {
::error::Error::Plugin(err)
}
}

impl From<tensor::Error> for Error {
fn from(err: tensor::Error) -> Error {
Error::SharedTensor(err)
}
}
Loading