From c128a9b203636a34f3bfd0e1f9ae72e21a4a0885 Mon Sep 17 00:00:00 2001 From: Alexander Morozov Date: Sat, 23 Apr 2016 16:18:28 +0300 Subject: [PATCH] refactor/tensor: return `SharedTensor` from `new` instead of `Result<..>` Allocation of `SharedTensor` may fail only on OOM, so returning `Result` type is redundant. --- benches/shared_tensor.rs | 6 +++--- src/tensor.rs | 8 ++++---- tests/framework_cuda_specs.rs | 2 +- tests/shared_memory_specs.rs | 18 +++++++++--------- tests/tensor_specs.rs | 2 +- 5 files changed, 18 insertions(+), 18 deletions(-) diff --git a/benches/shared_tensor.rs b/benches/shared_tensor.rs index 0f3f7c85..4791d8e0 100644 --- a/benches/shared_tensor.rs +++ b/benches/shared_tensor.rs @@ -112,7 +112,7 @@ fn bench_256_sync_1mb_native_opencl(b: &mut Bencher) { // if let &DeviceType::OpenCL(ref cl_d) = cl_device { // println!("{:?}", cl_d.hardwares()[0].clone().load_name()); // } - let mem = &mut SharedTensor::::new(&1_048_576).unwrap(); + let mem = &mut SharedTensor::::new(&1_048_576); mem.write_only(&cl_device); bench_256_sync_1mb_native_opencl_profile(b, nt_device, cl_device, mem); } @@ -133,7 +133,7 @@ fn bench_256_sync_1mb_native_cuda(b: &mut Bencher) { // if let &DeviceType::Cuda(ref cl_d) = cl_device { // println!("{:?}", cl_d.hardwares()[0].clone().load_name()); // } - let mem = &mut SharedTensor::::new(&1_048_576).unwrap(); + let mem = &mut SharedTensor::::new(&1_048_576); mem.write_only(&cl_device); bench_256_sync_1mb_native_cuda_profile(b, nt_device, cl_device, mem); } @@ -154,7 +154,7 @@ fn bench_2_sync_128mb_native_cuda(b: &mut Bencher) { // if let &DeviceType::Cuda(ref cl_d) = cl_device { // println!("{:?}", cl_d.hardwares()[0].clone().load_name()); // } - let mem = &mut SharedTensor::::new(&(128 * 1_048_576)).unwrap(); + let mem = &mut SharedTensor::::new(&(128 * 1_048_576)); mem.write_only(&cl_device); bench_2_sync_128mb_native_cuda_profile(b, nt_device, cl_device, mem); } diff --git a/src/tensor.rs b/src/tensor.rs index cc1234da..93062513 100644 --- a/src/tensor.rs +++ b/src/tensor.rs @@ -40,7 +40,7 @@ //! // allocate memory //! let native = Native::new(); //! let device = native.new_device(native.hardwares()).unwrap(); -//! let shared_data = &mut SharedTensor::::new(&5).unwrap(); +//! let shared_data = &mut SharedTensor::::new(&5); //! // fill memory with some numbers //! let mut mem = shared_data.write_only(&device).unwrap().as_mut_native().unwrap(); //! mem.as_mut_slice::().clone_from_slice(&[0, 1, 2, 3, 4]); @@ -266,13 +266,13 @@ impl fmt::Debug for SharedTensor { impl SharedTensor { /// Create new Tensor by allocating [Memory][1] on a Device. /// [1]: ../memory/index.html - pub fn new(desc: &D) -> Result, Error> { - Ok(SharedTensor { + pub fn new(desc: &D) -> SharedTensor { + SharedTensor { desc: desc.into(), locations: RefCell::new(Vec::new()), up_to_date: Cell::new(0), phantom: PhantomData, - }) + } } /// Change the shape of the Tensor. diff --git a/tests/framework_cuda_specs.rs b/tests/framework_cuda_specs.rs index b52ea360..26e543b9 100644 --- a/tests/framework_cuda_specs.rs +++ b/tests/framework_cuda_specs.rs @@ -48,7 +48,7 @@ mod framework_cuda_spec { let cuda = Cuda::new(); let device = cuda.new_device(&cuda.hardwares()[0..1]).unwrap(); for _ in 0..256 { - let x = &mut SharedTensor::::new(&vec![256, 1024, 128]).unwrap(); + let mut x = SharedTensor::::new(&vec![256, 1024, 128]); x.write_only(&device).unwrap(); } } diff --git a/tests/shared_memory_specs.rs b/tests/shared_memory_specs.rs index 23a89bb1..e263c3ee 100644 --- a/tests/shared_memory_specs.rs +++ b/tests/shared_memory_specs.rs @@ -24,7 +24,7 @@ mod shared_memory_spec { fn it_creates_new_shared_memory_for_native() { let ntv = Native::new(); let cpu = ntv.new_device(ntv.hardwares()).unwrap(); - let shared_data = &mut SharedTensor::::new(&10).unwrap(); + let mut shared_data = SharedTensor::::new(&10); match shared_data.write_only(&cpu).unwrap() { &mut MemoryType::Native(ref dat) => { let data = dat.as_slice::(); @@ -40,7 +40,7 @@ mod shared_memory_spec { fn it_creates_new_shared_memory_for_cuda() { let ntv = Cuda::new(); let device = ntv.new_device(&ntv.hardwares()[0..1]).unwrap(); - let shared_data = &mut SharedTensor::::new(&10).unwrap(); + let mut shared_data = SharedTensor::::new(&10); match shared_data.write_only(&device) { Ok(&mut MemoryType::Cuda(_)) => {}, #[cfg(any(feature = "cuda", feature = "opencl"))] @@ -53,7 +53,7 @@ mod shared_memory_spec { fn it_creates_new_shared_memory_for_opencl() { let ntv = OpenCL::new(); let device = ntv.new_device(&ntv.hardwares()[0..1]).unwrap(); - let shared_data = &mut SharedTensor::::new(&10).unwrap(); + let mut shared_data = SharedTensor::::new(&10); match shared_data.write_only(&device) { Ok(&mut MemoryType::OpenCL(_)) => {}, _ => assert!(false), @@ -65,7 +65,7 @@ mod shared_memory_spec { fn it_fails_on_initialized_memory_read() { let ntv = Native::new(); let cpu = ntv.new_device(ntv.hardwares()).unwrap(); - let shared_data = &mut SharedTensor::::new(&10).unwrap(); + let mut shared_data = SharedTensor::::new(&10); assert_eq!(shared_data.read(&cpu).unwrap_err(), Error::UninitializedMemory); assert_eq!(shared_data.read_write(&cpu).unwrap_err(), @@ -85,7 +85,7 @@ mod shared_memory_spec { let nt = Native::new(); let cu_device = cu.new_device(&cu.hardwares()[0..1]).unwrap(); let nt_device = nt.new_device(nt.hardwares()).unwrap(); - let mem = &mut SharedTensor::::new(&3).unwrap(); + let mut mem = SharedTensor::::new(&3); write_to_memory(mem.write_only(&nt_device).unwrap(), &[1.0f64, 2.0, 123.456]); match mem.read(&cu_device) { @@ -115,7 +115,7 @@ mod shared_memory_spec { let nt = Native::new(); let cl_device = cl.new_device(&cl.hardwares()[0..1]).unwrap(); let nt_device = nt.new_device(nt.hardwares()).unwrap(); - let mem = &mut SharedTensor::::new(&3).unwrap(); + let mut mem = SharedTensor::::new(&3); write_to_memory(mem.write_only(&nt_device).unwrap(), &[1.0f64, 2.0, 123.456]); match mem.read(&cl_device) { @@ -127,7 +127,7 @@ mod shared_memory_spec { } // It has not successfully synced to the device. // Not the other way around. - mem.drop_device(&nt_device); + mem.drop_device(&nt_device).unwrap(); match mem.read(&nt_device) { Ok(m) => assert_eq!(m.as_native().unwrap().as_slice::(), [1.0, 2.0, 123.456]), @@ -140,13 +140,13 @@ mod shared_memory_spec { #[test] fn it_reshapes_correctly() { - let mut shared_data = &mut SharedTensor::::new(&10).unwrap(); + let mut shared_data = SharedTensor::::new(&10); assert!(shared_data.reshape(&vec![5, 2]).is_ok()); } #[test] fn it_returns_err_for_invalid_size_reshape() { - let mut shared_data = &mut SharedTensor::::new(&10).unwrap(); + let mut shared_data = SharedTensor::::new(&10); assert!(shared_data.reshape(&vec![10, 2]).is_err()); } } diff --git a/tests/tensor_specs.rs b/tests/tensor_specs.rs index 61f7c666..8c85e976 100644 --- a/tests/tensor_specs.rs +++ b/tests/tensor_specs.rs @@ -31,7 +31,7 @@ mod tensor_spec { #[test] fn it_resizes_tensor() { - let mut tensor = SharedTensor::::new(&(10, 20, 30)).unwrap(); + let mut tensor = SharedTensor::::new(&(10, 20, 30)); assert_eq!(tensor.desc(), &[10, 20, 30]); tensor.resize(&(2, 3, 4, 5)).unwrap(); assert_eq!(tensor.desc(), &[2, 3, 4, 5]);