From 86f72bb16080f94f2268a3665ab7453cfa949f92 Mon Sep 17 00:00:00 2001 From: Aaron Lun Date: Mon, 13 May 2024 11:31:18 -0700 Subject: [PATCH] Move dimension-related choices out of the class template parameters. (#95) This refers to previously-template parameters controlling the choice of row-major/column-major for dense matrices, CSR/CSC for sparse matrices, whether to combine/subset on the rows or columns, etc. By converting them into regular arguments in the constructor, the class is more ergonomic for users who need to make the choice at runtime. It should also reduce the amount of work that the compiler needs to do, avoiding instantiation of near-redundant classes. This change should not be associated with a performance penalty as the dimension choice is not used within tight loops. At most, these parameters are used to decide the subclass of the Extractor that is created by the various *_row() and *_column() methods, after which the parameters are no longer used. So there is no real downside to converting them into regular arguments. The only exception is that of the delayed isometric unary class, which need these parameters to be compile-time in order to enable constexpr if's. These are allow us to avoid evaluating the untaken path where the methods might not be implemented for a particular Operation_. So, we just leave them be. We also updated the convert_to_* functions so that they accept the output order as a function argument. This allows streamlining of the tests. We removed the overloads that automatically match the order of the output matrix with that of the input matrix, as it is now trivial to do so with the other functions; this is also required to avoid difficult-to-debug problems with overload selection. --- include/tatami/base/Matrix.hpp | 2 +- include/tatami/dense/DenseMatrix.hpp | 59 ++++--- include/tatami/dense/convert_to_dense.hpp | 50 +----- .../tatami/isometric/binary/arith_helpers.hpp | 4 +- .../isometric/binary/boolean_helpers.hpp | 4 +- .../isometric/binary/compare_helpers.hpp | 4 +- .../tatami/isometric/unary/arith_helpers.hpp | 6 +- .../isometric/unary/boolean_helpers.hpp | 8 +- .../isometric/unary/compare_helpers.hpp | 6 +- .../tatami/isometric/unary/math_helpers.hpp | 2 - include/tatami/other/DelayedBind.hpp | 86 ++++++---- .../tatami/sparse/CompressedSparseMatrix.hpp | 82 +++++----- .../tatami/sparse/FragmentedSparseMatrix.hpp | 78 +++++---- .../sparse/convert_to_compressed_sparse.hpp | 94 +++++------ .../sparse/convert_to_fragmented_sparse.hpp | 92 ++++------- include/tatami/subset/DelayedSubset.hpp | 26 +-- include/tatami/subset/DelayedSubsetBlock.hpp | 52 ++++-- include/tatami/subset/DelayedSubsetSorted.hpp | 29 ++-- .../subset/DelayedSubsetSortedUnique.hpp | 29 ++-- include/tatami/subset/DelayedSubsetUnique.hpp | 29 ++-- include/tatami/subset/make_DelayedSubset.hpp | 39 +++-- tests/src/dense/DenseMatrix.cpp | 8 +- tests/src/dense/convert_to_dense.cpp | 94 +++-------- .../binary/DelayedBinaryIsometricOp.cpp | 12 +- tests/src/isometric/binary/arith_helpers.cpp | 22 +-- .../src/isometric/binary/boolean_helpers.cpp | 16 +- .../src/isometric/binary/compare_helpers.cpp | 20 +-- .../unary/DelayedUnaryIsometricOp.cpp | 8 +- .../isometric/unary/arith_scalar_helpers.cpp | 18 +-- .../isometric/unary/arith_vector_helpers.cpp | 34 ++-- .../unary/boolean_scalar_helpers.cpp | 14 +- .../unary/boolean_vector_helpers.cpp | 12 +- .../unary/compare_scalar_helpers.cpp | 16 +- .../unary/compare_vector_helpers.cpp | 16 +- tests/src/isometric/unary/math_helpers.cpp | 62 +++---- tests/src/other/DelayedBind.cpp | 2 +- tests/src/other/DelayedCast.cpp | 8 +- tests/src/other/DelayedTranspose.cpp | 6 +- tests/src/sparse/CompressedSparseMatrix.cpp | 6 +- .../sparse/convert_to_compressed_sparse.cpp | 152 +++++------------- .../sparse/convert_to_fragmented_sparse.cpp | 138 +++++----------- tests/src/subset/DelayedSubset.cpp | 26 +-- tests/src/subset/DelayedSubsetBlock.cpp | 12 +- tests/src/utils/wrap_shared_ptr.cpp | 2 +- 44 files changed, 654 insertions(+), 831 deletions(-) diff --git a/include/tatami/base/Matrix.hpp b/include/tatami/base/Matrix.hpp index 788bc4a1..f5a041ca 100644 --- a/include/tatami/base/Matrix.hpp +++ b/include/tatami/base/Matrix.hpp @@ -26,7 +26,7 @@ template using VectorPtr = std::shared_ptr >; /** - * @brief Virtual class for a matrix with a defined type. + * @brief Virtual class for a matrix. * * @tparam Value Data value type, should be numeric. * @tparam Index Row/column index type, should be integer. diff --git a/include/tatami/dense/DenseMatrix.hpp b/include/tatami/dense/DenseMatrix.hpp index 21d56f82..4cbf1c20 100644 --- a/include/tatami/dense/DenseMatrix.hpp +++ b/include/tatami/dense/DenseMatrix.hpp @@ -158,8 +158,6 @@ struct SecondaryMyopicIndexDense : public MyopicDenseExtractor { /** * @brief Dense matrix representation. * - * @tparam row_ Whether this is a row-major representation. - * If `false`, a column-major representation is assumed instead. * @tparam Value_ Type of the matrix values. * @tparam Index_ Type of the row/column indices. * @tparam Storage_ Vector class used to store the matrix values internally. @@ -167,25 +165,17 @@ struct SecondaryMyopicIndexDense : public MyopicDenseExtractor { * Methods should be available for `size()`, `begin()`, `end()` and `[]`. * If a method is available for `data()` that returns a `const Value_*`, it will also be used. */ -template > +template > class DenseMatrix : public Matrix { public: /** * @param nr Number of rows. * @param nc Number of columns. - * @param source Vector of values, or length equal to the product of `nr` and `nc`. + * @param vals Vector of values of length equal to the product of `nr` and `nc`. + * @param row Whether `vals` stores the matrix contents in a row-major representation. + * If `false`, a column-major representation is assumed instead. */ - DenseMatrix(Index_ nr, Index_ nc, const Storage_& source) : nrows(nr), ncols(nc), values(source) { - check_dimensions(nr, nc, values.size()); - return; - } - - /** - * @param nr Number of rows. - * @param nc Number of columns. - * @param source Vector of values, or length equal to the product of `nr` and `nc`. - */ - DenseMatrix(Index_ nr, Index_ nc, Storage_&& source) : nrows(nr), ncols(nc), values(source) { + DenseMatrix(Index_ nr, Index_ nc, Storage_ vals, bool row) : nrows(nr), ncols(nc), values(std::move(vals)), row_major(row) { check_dimensions(nr, nc, values.size()); return; } @@ -193,6 +183,7 @@ class DenseMatrix : public Matrix { private: Index_ nrows, ncols; Storage_ values; + bool row_major; static void check_dimensions(size_t nr, size_t nc, size_t expected) { // cast to size_t is deliberate to avoid overflow on Index_ on product. if (nr * nc != expected) { @@ -205,7 +196,7 @@ class DenseMatrix : public Matrix { Index_ ncol() const { return ncols; } - bool prefer_rows() const { return row_; } + bool prefer_rows() const { return row_major; } bool uses_oracle(bool) const { return false; } @@ -213,7 +204,7 @@ class DenseMatrix : public Matrix { double sparse_proportion() const { return 0; } - double prefer_rows_proportion() const { return static_cast(row_); } + double prefer_rows_proportion() const { return static_cast(row_major); } using Matrix::dense; @@ -221,7 +212,7 @@ class DenseMatrix : public Matrix { private: Index_ primary() const { - if constexpr(row_) { + if (row_major) { return nrows; } else { return ncols; @@ -229,7 +220,7 @@ class DenseMatrix : public Matrix { } Index_ secondary() const { - if constexpr(row_) { + if (row_major) { return ncols; } else { return nrows; @@ -241,7 +232,7 @@ class DenseMatrix : public Matrix { *****************************/ public: std::unique_ptr > dense(bool row, const Options&) const { - if (row_ == row) { + if (row_major == row) { return std::make_unique >(values, secondary()); } else { return std::make_unique >(values, secondary(), primary()); @@ -249,7 +240,7 @@ class DenseMatrix : public Matrix { } std::unique_ptr > dense(bool row, Index_ block_start, Index_ block_length, const Options&) const { - if (row_ == row) { + if (row_major == row) { return std::make_unique >(values, secondary(), block_start, block_length); } else { return std::make_unique >(values, secondary(), block_start, block_length); @@ -257,7 +248,7 @@ class DenseMatrix : public Matrix { } std::unique_ptr > dense(bool row, VectorPtr indices_ptr, const Options&) const { - if (row_ == row) { + if (row_major == row) { return std::make_unique >(values, secondary(), std::move(indices_ptr)); } else { return std::make_unique >(values, secondary(), std::move(indices_ptr)); @@ -318,15 +309,31 @@ class DenseMatrix : public Matrix { * Column-major matrix. * See `tatami::DenseMatrix` for details on the template parameters. */ -template > -using DenseColumnMatrix = DenseMatrix; +template > +class DenseColumnMatrix : public DenseMatrix { +public: + /** + * @param nr Number of rows. + * @param nc Number of columns. + * @param vals Vector of values of length equal to the product of `nr` and `nc`, storing the matrix in column-major format. + */ + DenseColumnMatrix(Index_ nr, Index_ nc, Storage_ vals) : DenseMatrix(nr, nc, std::move(vals), false) {} +}; /** * Row-major matrix. * See `tatami::DenseMatrix` for details on the template parameters. */ -template > -using DenseRowMatrix = DenseMatrix; +template > +class DenseRowMatrix : public DenseMatrix { +public: + /** + * @param nr Number of rows. + * @param nc Number of columns. + * @param vals Vector of values of length equal to the product of `nr` and `nc`, storing the matrix in row-major format. + */ + DenseRowMatrix(Index_ nr, Index_ nc, Storage_ vals) : DenseMatrix(nr, nc, std::move(vals), true) {} +}; } diff --git a/include/tatami/dense/convert_to_dense.hpp b/include/tatami/dense/convert_to_dense.hpp index 22e9e0c5..45dcd41a 100644 --- a/include/tatami/dense/convert_to_dense.hpp +++ b/include/tatami/dense/convert_to_dense.hpp @@ -166,63 +166,19 @@ inline std::shared_ptr > convert_to_dense(const Matrixncol(); std::vector buffer(static_cast(NR) * static_cast(NC)); convert_to_dense(incoming, row, buffer.data(), threads); - if (row) { - return std::shared_ptr >(new DenseMatrix(NR, NC, std::move(buffer))); - } else { - return std::shared_ptr >(new DenseMatrix(NR, NC, std::move(buffer))); - } -} - -/** - * This overload makes it easier to control the desired output order when it is not known at compile time. - * - * @tparam Value_ Type of data values in the output interface. - * @tparam Index Integer type for the indices in the output interface. - * @tparam StoredValue_ Type of data values to be stored in the output. - * @tparam Matrix_ Input matrix class, most typically a `tatami::Matrix`. - * - * @param incoming Pointer to a `tatami::Matrix`. - * @param order Ordering of values in the output dense matrix - row-major (0) or column-major (1). - * If set to -1, the ordering is chosen based on `tatami::Matrix::prefer_rows()`. - * @param threads Number of threads to use. - * - * @return A pointer to a new `tatami::DenseMatrix` with the same dimensions and type as the matrix referenced by `incoming`. - */ -template < - typename Value_ = double, - typename Index_ = int, - typename StoredValue_ = Value_, - typename InputValue_, - typename InputIndex_ -> -std::shared_ptr > convert_to_dense(const Matrix* incoming, int order, int threads = 1) { - if (order < 0) { - order = static_cast(!incoming->prefer_rows()); - } - if (order == 0) { - return convert_to_dense(incoming, threads); - } else { - return convert_to_dense(incoming, threads); - } + return std::shared_ptr >(new DenseMatrix(NR, NC, std::move(buffer), row)); } /** * @cond */ // Backwards compatbility. -template +template void convert_to_dense(const Matrix* incoming, StoredValue_* store, int threads = 1) { convert_to_dense(incoming, row_, store, threads); } -template < - bool row_, - typename Value_ = double, - typename Index_ = int, - typename StoredValue_ = Value_, - typename InputValue_, - typename InputIndex_ -> +template inline std::shared_ptr > convert_to_dense(const Matrix* incoming, int threads = 1) { return convert_to_dense(incoming, row_, threads); } diff --git a/include/tatami/isometric/binary/arith_helpers.hpp b/include/tatami/isometric/binary/arith_helpers.hpp index 9e489125..8a6f6759 100644 --- a/include/tatami/isometric/binary/arith_helpers.hpp +++ b/include/tatami/isometric/binary/arith_helpers.hpp @@ -10,8 +10,6 @@ * @file arith_helpers.hpp * * @brief Helper classes for binary arithmetic operations. - * - * Classes defined here should be used as the `OP` in the `DelayedBinaryIsometricOp` class. */ namespace tatami { @@ -19,7 +17,7 @@ namespace tatami { /** * @brief Delayed binary arithmetic. * - * This should be used as the `OP` in the `DelayedBinaryIsometricOp` class. + * This should be used as the `Operation_` in the `DelayedBinaryIsometricOp` class. * * @tparam op_ The arithmetic operation. */ diff --git a/include/tatami/isometric/binary/boolean_helpers.hpp b/include/tatami/isometric/binary/boolean_helpers.hpp index bf62499b..d54761fa 100644 --- a/include/tatami/isometric/binary/boolean_helpers.hpp +++ b/include/tatami/isometric/binary/boolean_helpers.hpp @@ -8,8 +8,6 @@ * @file boolean_helpers.hpp * * @brief Helper classes for binary boolean operations. - * - * Classes defined here should be used as the `OP` in the `DelayedBinaryIsometricOp` class. */ namespace tatami { @@ -17,7 +15,7 @@ namespace tatami { /** * @brief Delayed binary boolean operations. * - * This should be used as the `OP` in the `DelayedBinaryIsometricOp` class. + * This should be used as the `Operation_` in the `DelayedBinaryIsometricOp` class. * * @tparam op_ The boolean operation. */ diff --git a/include/tatami/isometric/binary/compare_helpers.hpp b/include/tatami/isometric/binary/compare_helpers.hpp index 5c39dd04..3a05fa96 100644 --- a/include/tatami/isometric/binary/compare_helpers.hpp +++ b/include/tatami/isometric/binary/compare_helpers.hpp @@ -8,8 +8,6 @@ * @file compare_helpers.hpp * * @brief Helper classes for binary comparison operations. - * - * Classes defined here should be used as the `OP` in the `DelayedBinaryIsometricOp` class. */ namespace tatami { @@ -17,7 +15,7 @@ namespace tatami { /** * @brief Delayed binary comparison. * - * This should be used as the `OP` in the `DelayedBinaryIsometricOp` class. + * This should be used as the `Operation_` in the `DelayedBinaryIsometricOp` class. * * @tparam op_ The comparison operation. */ diff --git a/include/tatami/isometric/unary/arith_helpers.hpp b/include/tatami/isometric/unary/arith_helpers.hpp index 5b7a6d87..ee7cca9d 100644 --- a/include/tatami/isometric/unary/arith_helpers.hpp +++ b/include/tatami/isometric/unary/arith_helpers.hpp @@ -9,8 +9,6 @@ * @file arith_helpers.hpp * * @brief Helper classes for delayed unary arithmetic operations. - * - * Classes defined here should be used as the `OP` in the `DelayedUnaryIsometricOp` class. */ namespace tatami { @@ -66,7 +64,7 @@ Value_ delayed_arith_zero(Scalar_ scalar) { /** * @brief Delayed scalar arithmetic. * - * This should be used as the `OP` in the `DelayedUnaryIsometricOp` class. + * This should be used as the `Operation_` in the `DelayedUnaryIsometricOp` class. * * @tparam op_ The arithmetic operation. * @tparam right_ Whether the scalar should be on the right hand side of the arithmetic operation. @@ -138,7 +136,7 @@ struct DelayedArithScalarHelper { /** * @brief Delayed vector arithmetic. * - * This should be used as the `OP` in the `DelayedUnaryIsometricOp` class. + * This should be used as the `Operation_` in the `DelayedUnaryIsometricOp` class. * * @tparam op_ The arithmetic operation. * @tparam right_ Whether the vector's values should be on the right hand side of the arithmetic operation. diff --git a/include/tatami/isometric/unary/boolean_helpers.hpp b/include/tatami/isometric/unary/boolean_helpers.hpp index d62f85ab..08316ee1 100644 --- a/include/tatami/isometric/unary/boolean_helpers.hpp +++ b/include/tatami/isometric/unary/boolean_helpers.hpp @@ -8,8 +8,6 @@ * @file boolean_helpers.hpp * * @brief Helper classes for delayed unary boolean operations. - * - * Classes defined here should be used as the `OP` in the `DelayedUnaryIsometricOp` class. */ namespace tatami { @@ -37,7 +35,7 @@ bool delayed_boolean_actual_sparse(bool scalar) { /** * @brief Delayed scalar boolean operation. * - * This should be used as the `OP` in the `DelayedUnaryIsometricOp` class. + * This should be used as the `Operation_` in the `DelayedUnaryIsometricOp` class. * * @tparam op_ The boolean operation. * @tparam Value_ Type of the data value. @@ -107,7 +105,7 @@ struct DelayedBooleanScalarHelper { /** * @brief Delayed boolean NOT operation. * - * This should be used as the `OP` in the `DelayedUnaryIsometricOp` class. + * This should be used as the `Operation_` in the `DelayedUnaryIsometricOp` class. * * @tparam Value_ Type of the data value. */ @@ -170,7 +168,7 @@ struct DelayedBooleanNotHelper { /** * @brief Delayed vector boolean operations. * - * This should be used as the `OP` in the `DelayedUnaryIsometricOp` class. + * This should be used as the `Operation_` in the `DelayedUnaryIsometricOp` class. * * @tparam op_ The boolean operation. * @tparam margin_ Matrix dimension along which the operation is to occur. diff --git a/include/tatami/isometric/unary/compare_helpers.hpp b/include/tatami/isometric/unary/compare_helpers.hpp index abd1a0f0..bce359f5 100644 --- a/include/tatami/isometric/unary/compare_helpers.hpp +++ b/include/tatami/isometric/unary/compare_helpers.hpp @@ -8,8 +8,6 @@ * @file compare_helpers.hpp * * @brief Helper classes for delayed unary comparison operations. - * - * Classes defined here should be used as the `OP` in the `DelayedUnaryIsometricOp` class. */ namespace tatami { @@ -37,7 +35,7 @@ bool delayed_compare_actual_sparse(Scalar_ scalar) { /** * @brief Delayed scalar comparison. * - * This should be used as the `OP` in the `DelayedUnaryIsometricOp` class. + * This should be used as the `Operation_` in the `DelayedUnaryIsometricOp` class. * * @tparam op_ The comparison operation. * @tparam Value_ Type of the data value. @@ -108,7 +106,7 @@ struct DelayedCompareScalarHelper { /** * @brief Delayed vector comparisons. * - * This should be used as the `OP` in the `DelayedUnaryIsometricOp` class. + * This should be used as the `Operation_` in the `DelayedUnaryIsometricOp` class. * * @tparam op_ The comparison operation. * @tparam margin_ Matrix dimension along which the operation is to occur. diff --git a/include/tatami/isometric/unary/math_helpers.hpp b/include/tatami/isometric/unary/math_helpers.hpp index 77a5c626..ec9ed170 100644 --- a/include/tatami/isometric/unary/math_helpers.hpp +++ b/include/tatami/isometric/unary/math_helpers.hpp @@ -5,8 +5,6 @@ * @file math_helpers.hpp * * @brief Helpers for unary math operations. - * - * These classes should be used as the `OP` in the `DelayedUnaryIsometricOp` class. */ #include diff --git a/include/tatami/other/DelayedBind.hpp b/include/tatami/other/DelayedBind.hpp index 4f08551a..90859ff5 100644 --- a/include/tatami/other/DelayedBind.hpp +++ b/include/tatami/other/DelayedBind.hpp @@ -544,26 +544,28 @@ struct OracularPerpendicularSparse : public OracularSparseExtractor +template class DelayedBind : public Matrix { public: /** * @param ps Pointers to the matrices to be combined. - * All matrices to be combined should have the same number of columns (if `margin_ == 0`) or rows (otherwise). + * All matrices to be combined should have the same number of columns (if `row = true`) or rows (otherwise). + * @param row Whether to combine matrices by the rows (i.e., the output matrix has number of rows equal to the sum of the number of rows in `ps`). + * If false, combining is applied by the columns. */ - DelayedBind(std::vector > > ps) : mats(std::move(ps)), cumulative(mats.size()+1) { + DelayedBind(std::vector > > ps, bool row) : + mats(std::move(ps)), by_row(row), cumulative(mats.size()+1) + { size_t sofar = 0; for (size_t i = 0, nmats = mats.size(); i < nmats; ++i) { auto& current = mats[i]; Index_ primary, secondary; - if constexpr(margin_ == 0) { + if (by_row) { primary = current->nrow(); secondary = current->ncol(); } else { @@ -574,7 +576,7 @@ class DelayedBind : public Matrix { if (i == 0) { otherdim = secondary; } else if (otherdim != secondary) { - throw std::runtime_error("all 'mats' should have the same number of " + (margin_ == 0 ? std::string("columns") : std::string("rows"))); + throw std::runtime_error("all 'mats' should have the same number of " + (by_row ? std::string("columns") : std::string("rows"))); } // Removing the matrices that don't contribute anything, @@ -597,7 +599,7 @@ class DelayedBind : public Matrix { // hence, using Index_ for the mapping should not overflow. mapping.reserve(cumulative.back()); for (Index_ i = 0, nmats = mats.size(); i < nmats; ++i) { - mapping.insert(mapping.end(), (margin_ == 0 ? mats[i]->nrow() : mats[i]->ncol()), i); + mapping.insert(mapping.end(), (by_row ? mats[i]->nrow() : mats[i]->ncol()), i); } double denom = 0; @@ -625,12 +627,17 @@ class DelayedBind : public Matrix { /** * @param ps Pointers to the matrices to be combined. - * All matrices to be combined should have the same number of columns (if `margin_ == 0`) or rows (otherwise). + * All matrices to be combined should have the same number of columns (if `row = true`) or rows (otherwise). + * @param row Whether to combine matrices by the rows (i.e., the output matrix has number of rows equal to the sum of the number of rows in `ps`). + * If false, combining is applied by the columns. */ - DelayedBind(const std::vector > >& ps) : DelayedBind(std::vector > >(ps.begin(), ps.end())) {} + DelayedBind(const std::vector > >& ps, bool row) : + DelayedBind(std::vector > >(ps.begin(), ps.end()), row) {} private: std::vector > > mats; + bool by_row; + Index_ otherdim = 0; std::vector cumulative; std::vector mapping; @@ -640,7 +647,7 @@ class DelayedBind : public Matrix { public: Index_ nrow() const { - if constexpr(margin_==0) { + if (by_row) { return cumulative.back(); } else { return otherdim; @@ -648,7 +655,7 @@ class DelayedBind : public Matrix { } Index_ ncol() const { - if constexpr(margin_==0) { + if (by_row) { return otherdim; } else { return cumulative.back(); @@ -690,7 +697,7 @@ class DelayedBind : public Matrix { std::unique_ptr > dense(bool row, const Options& opt) const { if (mats.size() == 1) { return mats[0]->dense(row, opt); - } else if (row == (margin_ == 0)) { + } else if (row == by_row) { return std::make_unique >(cumulative, mapping, mats, row, opt); } else { return std::make_unique >(cumulative, mapping, mats, row, false, opt); @@ -700,7 +707,7 @@ class DelayedBind : public Matrix { std::unique_ptr > dense(bool row, Index_ block_start, Index_ block_length, const Options& opt) const { if (mats.size() == 1) { return mats[0]->dense(row, block_start, block_length, opt); - } else if (row == (margin_ == 0)) { + } else if (row == by_row) { return std::make_unique >(cumulative, mapping, mats, row, block_start, block_length, opt); } else { return std::make_unique >(cumulative, mapping, mats, row, false, block_start, block_length, opt); @@ -710,7 +717,7 @@ class DelayedBind : public Matrix { std::unique_ptr > dense(bool row, VectorPtr indices_ptr, const Options& opt) const { if (mats.size() == 1) { return mats[0]->dense(row, std::move(indices_ptr), opt); - } else if (row == (margin_ == 0)) { + } else if (row == by_row) { return std::make_unique >(cumulative, mapping, mats, row, std::move(indices_ptr), opt); } else { return std::make_unique >(cumulative, mapping, mats, row, false, std::move(indices_ptr), opt); @@ -724,7 +731,7 @@ class DelayedBind : public Matrix { std::unique_ptr > sparse(bool row, const Options& opt) const { if (mats.size() == 1) { return mats[0]->sparse(row, opt); - } else if (row == (margin_ == 0)) { + } else if (row == by_row) { return std::make_unique >(cumulative, mapping, mats, row, opt); } else { return std::make_unique >(cumulative, mapping, mats, row, false, opt); @@ -734,7 +741,7 @@ class DelayedBind : public Matrix { std::unique_ptr > sparse(bool row, Index_ block_start, Index_ block_length, const Options& opt) const { if (mats.size() == 1) { return mats[0]->sparse(row, block_start, block_length, opt); - } else if (row == (margin_ == 0)) { + } else if (row == by_row) { return std::make_unique >(cumulative, mapping, mats, row, block_start, block_length, opt); } else { return std::make_unique >(cumulative, mapping, mats, row, false, block_start, block_length, opt); @@ -744,7 +751,7 @@ class DelayedBind : public Matrix { std::unique_ptr > sparse(bool row, VectorPtr indices_ptr, const Options& opt) const { if (mats.size() == 1) { return mats[0]->sparse(row, std::move(indices_ptr), opt); - } else if (row == (margin_ == 0)) { + } else if (row == by_row) { return std::make_unique >(cumulative, mapping, mats, row, std::move(indices_ptr), opt); } else { return std::make_unique >(cumulative, mapping, mats, row, false, std::move(indices_ptr), opt); @@ -760,7 +767,7 @@ class DelayedBind : public Matrix { return mats[0]->dense(row, std::move(oracle), opt); } else if (!stored_uses_oracle[row]) { return std::make_unique >(std::move(oracle), dense(row, opt)); - } else if (row == (margin_ == 0)) { + } else if (row == by_row) { return std::make_unique >(cumulative, mapping, mats, row, std::move(oracle), opt); } else { return std::make_unique >(cumulative, mapping, mats, row, std::move(oracle), opt); @@ -772,7 +779,7 @@ class DelayedBind : public Matrix { return mats[0]->dense(row, std::move(oracle), block_start, block_length, opt); } else if (!stored_uses_oracle[row]) { return std::make_unique >(std::move(oracle), dense(row, block_start, block_length, opt)); - } else if (row == (margin_ == 0)) { + } else if (row == by_row) { return std::make_unique >(cumulative, mapping, mats, row, std::move(oracle), block_start, block_length, opt); } else { return std::make_unique >(cumulative, mapping, mats, row, std::move(oracle), block_start, block_length, opt); @@ -784,7 +791,7 @@ class DelayedBind : public Matrix { return mats[0]->dense(row, std::move(oracle), std::move(indices_ptr), opt); } else if (!stored_uses_oracle[row]) { return std::make_unique >(std::move(oracle), dense(row, std::move(indices_ptr), opt)); - } else if (row == (margin_ == 0)) { + } else if (row == by_row) { return std::make_unique >(cumulative, mapping, mats, row, std::move(oracle), std::move(indices_ptr), opt); } else { return std::make_unique >(cumulative, mapping, mats, row, std::move(oracle), std::move(indices_ptr), opt); @@ -800,7 +807,7 @@ class DelayedBind : public Matrix { return mats[0]->sparse(row, std::move(oracle), opt); } else if (!stored_uses_oracle[row]) { return std::make_unique >(std::move(oracle), sparse(row, opt)); - } else if (row == (margin_ == 0)) { + } else if (row == by_row) { return std::make_unique >(cumulative, mapping, mats, row, std::move(oracle), opt); } else { return std::make_unique >(cumulative, mapping, mats, row, std::move(oracle), opt); @@ -812,7 +819,7 @@ class DelayedBind : public Matrix { return mats[0]->sparse(row, std::move(oracle), block_start, block_length, opt); } else if (!stored_uses_oracle[row]) { return std::make_unique >(std::move(oracle), sparse(row, block_start, block_length, opt)); - } else if (row == (margin_ == 0)) { + } else if (row == by_row) { return std::make_unique >(cumulative, mapping, mats, row, std::move(oracle), block_start, block_length, opt); } else { return std::make_unique >(cumulative, mapping, mats, row, std::move(oracle), block_start, block_length, opt); @@ -824,7 +831,7 @@ class DelayedBind : public Matrix { return mats[0]->sparse(row, std::move(oracle), std::move(indices_ptr), opt); } else if (!stored_uses_oracle[row]) { return std::make_unique >(std::move(oracle), sparse(row, std::move(indices_ptr), opt)); - } else if (row == (margin_ == 0)) { + } else if (row == by_row) { return std::make_unique >(cumulative, mapping, mats, row, std::move(oracle), std::move(indices_ptr), opt); } else { return std::make_unique >(cumulative, mapping, mats, row, std::move(oracle), std::move(indices_ptr), opt); @@ -835,26 +842,43 @@ class DelayedBind : public Matrix { /** * A `make_*` helper function to enable partial template deduction of supplied types. * - * @tparam margin_ Dimension along which the combining is to occur. - * If 0, matrices are combined along the rows; if 1, matrices are combined to the columns. * @tparam Value_ Type of matrix value. * @tparam Index_ Type of index value. * * @param ps Pointers to `Matrix` objects. + * @param row Whether to combine matrices by the rows (i.e., the output matrix has number of rows equal to the sum of the number of rows in `ps`). + * If false, combining is applied by the columns. * * @return A pointer to a `DelayedBind` instance. */ -template -std::shared_ptr > make_DelayedBind(std::vector > > ps) { - return std::shared_ptr >(new DelayedBind(std::move(ps))); +template +std::shared_ptr > make_DelayedBind(std::vector > > ps, bool row) { + return std::shared_ptr >(new DelayedBind(std::move(ps), row)); +} + +/** + * @cond + */ +template +std::shared_ptr > make_DelayedBind(std::vector > > ps, bool row) { + return std::shared_ptr >(new DelayedBind(std::move(ps), row)); } +/** + * @endcond + */ /** * @cond */ +// Back-compatibility. +template +std::shared_ptr > make_DelayedBind(std::vector > > ps) { + return make_DelayedBind(std::move(ps), margin_ == 0); +} + template std::shared_ptr > make_DelayedBind(std::vector > > ps) { - return std::shared_ptr >(new DelayedBind(std::move(ps))); + return make_DelayedBind(std::move(ps), margin_ == 0); } /** * @endcond diff --git a/include/tatami/sparse/CompressedSparseMatrix.hpp b/include/tatami/sparse/CompressedSparseMatrix.hpp index be95f023..e2f34276 100644 --- a/include/tatami/sparse/CompressedSparseMatrix.hpp +++ b/include/tatami/sparse/CompressedSparseMatrix.hpp @@ -19,8 +19,6 @@ * @file CompressedSparseMatrix.hpp * * @brief Compressed sparse matrix representation. - * - * `typedef`s are provided for the usual row and column formats. */ namespace tatami { @@ -391,8 +389,6 @@ struct SecondaryMyopicIndexSparse : public MyopicSparseExtractor /** * @brief Compressed sparse matrix representation. * - * @tparam row_ Whether this is a compressed sparse row representation. - * If `false`, a compressed sparse column representation is expected instead. * @tparam Value_ Type of the matrix values. * @tparam Index_ Type of the row/column indices. * @tparam ValueStorage_ Vector class used to store the matrix values internally. @@ -407,7 +403,6 @@ struct SecondaryMyopicIndexSparse : public MyopicSparseExtractor * Methods should be available for `size()`, `begin()`, `end()` and `[]`. */ template< - bool row_, typename Value_, typename Index_, class ValueStorage_ = std::vector, @@ -420,25 +415,26 @@ class CompressedSparseMatrix : public Matrix { * @param nr Number of rows. * @param nc Number of columns. * @param vals Vector of non-zero elements. - * @param idx Vector of row indices (if `row_ = false`) or column indices (if `row_ = true`) for the non-zero elements. + * @param idx Vector of row indices (if `row = false`) or column indices (if `row = true`) for the non-zero elements. * @param ptr Vector of index pointers. + * @param row Whether this is a compressed sparse row representation. * @param check Should the input vectors be checked for validity? * * If `check=true`, the constructor will check that `vals` and `idx` have the same length, equal to the number of structural non-zero elements; - * `ptr` has length equal to the number of rows (if `row_ = true`) or columns (otherwise) plus one; + * `ptr` has length equal to the number of rows (if `row = true`) or columns (otherwise) plus one; * `ptr` is non-decreasing with first and last values set to 0 and the number of structural non-zeroes, respectively; * `idx` is strictly increasing within each interval defined by successive elements of `ptr`; - * and all values of `idx` are non-negative and less than the number of columns (if `row_ = true`) or rows (otherwise). + * and all values of `idx` are non-negative and less than the number of columns (if `row = true`) or rows (otherwise). */ - CompressedSparseMatrix(Index_ nr, Index_ nc, ValueStorage_ vals, IndexStorage_ idx, PointerStorage_ ptr, bool check=true) : - nrows(nr), ncols(nc), values(std::move(vals)), indices(std::move(idx)), indptrs(std::move(ptr)) + CompressedSparseMatrix(Index_ nr, Index_ nc, ValueStorage_ vals, IndexStorage_ idx, PointerStorage_ ptr, bool row, bool check=true) : + nrows(nr), ncols(nc), values(std::move(vals)), indices(std::move(idx)), indptrs(std::move(ptr)), csr(row) { if (check) { if (values.size() != indices.size()) { throw std::runtime_error("'values' and 'indices' should be of the same length"); } - if constexpr(row_) { + if (csr) { if (indptrs.size() != static_cast(nrows) + 1){ throw std::runtime_error("length of 'indptrs' should be equal to 'nrows + 1'"); } @@ -457,7 +453,7 @@ class CompressedSparseMatrix : public Matrix { throw std::runtime_error("last element of 'indptrs' should be equal to length of 'indices'"); } - ElementType max_index = (row_ ? ncols : nrows); + ElementType max_index = (csr ? ncols : nrows); for (size_t i = 1; i < indptrs.size(); ++i) { auto start = indptrs[i- 1], end = indptrs[i]; if (end < start || end > last) { @@ -466,21 +462,13 @@ class CompressedSparseMatrix : public Matrix { for (auto x = start; x < end; ++x) { if (indices[x] < 0 || indices[x] >= max_index) { - if constexpr(row_) { - throw std::runtime_error("'indices' should contain non-negative integers less than the number of rows"); - } else { - throw std::runtime_error("'indices' should contain non-negative integers less than the number of columns"); - } + throw std::runtime_error("'indices' should contain non-negative integers less than the number of " + (csr ? std::string("columns") : std::string("rows"))); } } for (size_t j = start + 1; j < end; ++j) { if (indices[j] <= indices[j - 1]) { - if constexpr(row_) { - throw std::runtime_error("'indices' should be strictly increasing within each row"); - } else { - throw std::runtime_error("'indices' should be strictly increasing within each column"); - } + throw std::runtime_error("'indices' should be strictly increasing within each " + (csr ? std::string("row") : std::string("column"))); } } } @@ -492,6 +480,7 @@ class CompressedSparseMatrix : public Matrix { ValueStorage_ values; IndexStorage_ indices; PointerStorage_ indptrs; + bool csr; public: Index_ nrow() const { return nrows; } @@ -502,12 +491,9 @@ class CompressedSparseMatrix : public Matrix { double sparse_proportion() const { return 1; } - /** - * @return `true` if `row_ = true` (for `CompressedSparseRowMatrix` objects), otherwise returns `false` (for `CompressedSparseColumnMatrix` objects). - */ - bool prefer_rows() const { return row_; } + bool prefer_rows() const { return csr; } - double prefer_rows_proportion() const { return static_cast(row_); } + double prefer_rows_proportion() const { return static_cast(csr); } bool uses_oracle(bool) const { return false; } @@ -521,7 +507,7 @@ class CompressedSparseMatrix : public Matrix { private: Index_ secondary() const { - if constexpr(row_) { + if (csr) { return ncols; } else { return nrows; @@ -533,7 +519,7 @@ class CompressedSparseMatrix : public Matrix { *****************************/ public: std::unique_ptr > dense(bool row, const Options&) const { - if (row_ == row) { + if (csr == row) { return std::make_unique >(values, indices, indptrs, secondary()); } else { return std::make_unique >(values, indices, indptrs, secondary()); @@ -541,7 +527,7 @@ class CompressedSparseMatrix : public Matrix { } std::unique_ptr > dense(bool row, Index_ block_start, Index_ block_end, const Options&) const { - if (row_ == row) { + if (csr == row) { return std::make_unique >(values, indices, indptrs, secondary(), block_start, block_end); } else { return std::make_unique >(values, indices, indptrs, secondary(), block_start, block_end); @@ -549,7 +535,7 @@ class CompressedSparseMatrix : public Matrix { } std::unique_ptr > dense(bool row, VectorPtr subset_ptr, const Options&) const { - if (row_ == row) { + if (csr == row) { return std::make_unique >(values, indices, indptrs, secondary(), std::move(subset_ptr)); } else { return std::make_unique >(values, indices, indptrs, secondary(), std::move(subset_ptr)); @@ -561,7 +547,7 @@ class CompressedSparseMatrix : public Matrix { ******************************/ public: std::unique_ptr > sparse(bool row, const Options& opt) const { - if (row_ == row) { + if (csr == row) { return std::make_unique >(values, indices, indptrs, secondary(), opt); } else { return std::make_unique >(values, indices, indptrs, secondary(), opt); @@ -569,7 +555,7 @@ class CompressedSparseMatrix : public Matrix { } std::unique_ptr > sparse(bool row, Index_ block_start, Index_ block_end, const Options& opt) const { - if (row_ == row) { + if (csr == row) { return std::make_unique >(values, indices, indptrs, secondary(), block_start, block_end, opt); } else { return std::make_unique >(values, indices, indptrs, secondary(), block_start, block_end, opt); @@ -577,7 +563,7 @@ class CompressedSparseMatrix : public Matrix { } std::unique_ptr > sparse(bool row, VectorPtr subset_ptr, const Options& opt) const { - if (row_ == row) { + if (csr == row) { return std::make_unique >(values, indices, indptrs, secondary(), std::move(subset_ptr), opt); } else { return std::make_unique >(values, indices, indptrs, secondary(), std::move(subset_ptr), opt); @@ -622,14 +608,38 @@ class CompressedSparseMatrix : public Matrix { * See `tatami::CompressedSparseMatrix` for details on the template parameters. */ template, class IndexStorage_ = std::vector, class PointerStorage_ = std::vector > -using CompressedSparseColumnMatrix = CompressedSparseMatrix; +class CompressedSparseColumnMatrix : public CompressedSparseMatrix { +public: + /** + * @param nr Number of rows. + * @param nc Number of columns. + * @param vals Vector of non-zero elements. + * @param idx Vector of row indices for the non-zero elements. + * @param ptr Vector of index pointers, of length equal to the number of columns plus 1. + * @param check Should the input vectors be checked for validity? + */ + CompressedSparseColumnMatrix(Index_ nr, Index_ nc, ValueStorage_ vals, IndexStorage_ idx, PointerStorage_ ptr, bool check = true) : + CompressedSparseMatrix(nr, nc, std::move(vals), std::move(idx), std::move(ptr), false, check) {} +}; /** * Compressed sparse row matrix. * See `tatami::CompressedSparseMatrix` for details on the template parameters. */ template, class IndexStorage_ = std::vector, class PointerStorage_ = std::vector > -using CompressedSparseRowMatrix = CompressedSparseMatrix; +class CompressedSparseRowMatrix : public CompressedSparseMatrix { +public: + /** + * @param nr Number of rows. + * @param nc Number of columns. + * @param vals Vector of non-zero elements. + * @param idx Vector of row indices for the non-zero elements. + * @param ptr Vector of index pointers, of length equal to the number of columns plus 1. + * @param check Should the input vectors be checked for validity? + */ + CompressedSparseRowMatrix(Index_ nr, Index_ nc, ValueStorage_ vals, IndexStorage_ idx, PointerStorage_ ptr, bool check = true) : + CompressedSparseMatrix(nr, nc, std::move(vals), std::move(idx), std::move(ptr), true, check) {} +}; } diff --git a/include/tatami/sparse/FragmentedSparseMatrix.hpp b/include/tatami/sparse/FragmentedSparseMatrix.hpp index 28fd7466..e00e3562 100644 --- a/include/tatami/sparse/FragmentedSparseMatrix.hpp +++ b/include/tatami/sparse/FragmentedSparseMatrix.hpp @@ -17,8 +17,6 @@ * @file FragmentedSparseMatrix.hpp * * @brief Fragmented sparse matrix representation. - * - * `typedef`s are provided for the usual row and column formats. */ namespace tatami { @@ -381,16 +379,14 @@ struct SecondaryMyopicIndexSparse : public MyopicSparseExtractor * @endcond */ - /** * @brief Fragmented sparse matrix representation. * * In a fragmented sparse matrix, each element of the primary dimension has its own vector of indices and data values. * This differs from a compressed sparse matrix (see `CompressedSparseMatrix`) where the index/value vectors are concatenated across all elements. * For row sparse matrices, the rows are the primary dimension, while for column sparse matrices, the columns are the primary dimension. + * This representation is equivalent to SciPy's list-of-lists sparse matrix (Python), or SparseArray's SVT_SparseMatrix class (R/Bioconductor). * - * @tparam row_ Whether this is a row sparse representation. - * If `false`, a column sparse representation is assumed instead. * @tparam Value_ Type of the matrix values. * @tparam Index_ Type of the row/column indices. * @tparam ValueVectorStorage_ Vector class used to store the matrix value vectors. @@ -405,7 +401,6 @@ struct SecondaryMyopicIndexSparse : public MyopicSparseExtractor * The inner vector does not necessarily have to contain `Index_`, as long as the type is convertible to `Index_`. */ template< - bool row_, typename Value_, typename Index_, class ValueVectorStorage_ = std::vector >, @@ -417,22 +412,24 @@ class FragmentedSparseMatrix : public Matrix { * @param nr Number of rows. * @param nc Number of columns. * @param vals Vector of vectors of non-zero elements. - * @param idx Vector of vectors of row indices (if `ROW=false`) or column indices (if `ROW=true`) for the non-zero elements. + * @param idx Vector of vectors of row indices (if `row = false`) or column indices (if `row = true`) for the non-zero elements. + * @param row Whether this is a row sparse representation. + * If `false`, a column sparse representation is assumed instead. * @param check Should the input vectors be checked for validity? * - * If `check=true`, the constructor will check that `vals` and `idx` have the same length that is equal to the number of rows (for `row_ = true`) or columns (otherwise); + * If `check=true`, the constructor will check that `vals` and `idx` have the same length that is equal to the number of rows (for `row = true`) or columns (otherwise); * that corresponding elements of `vals` and `idx` also have the same length; - * and that each `idx` is ordered and contains non-negative values less than `nc` (for `row_ = true`) or `nr` (for `row_ = false`). + * and that each `idx` is ordered and contains non-negative values less than `nc` (for `row = true`) or `nr` (for `row = false`). */ - FragmentedSparseMatrix(Index_ nr, Index_ nc, ValueVectorStorage_ vals, IndexVectorStorage_ idx, bool check=true) : - nrows(nr), ncols(nc), values(std::move(vals)), indices(std::move(idx)) + FragmentedSparseMatrix(Index_ nr, Index_ nc, ValueVectorStorage_ vals, IndexVectorStorage_ idx, bool row, bool check = true) : + nrows(nr), ncols(nc), values(std::move(vals)), indices(std::move(idx)), row_based(row) { if (check) { if (values.size() != indices.size()) { throw std::runtime_error("'values' and 'indices' should be of the same length"); } - if (row_) { + if (row_based) { if (indices.size() != static_cast(nrows)) { throw std::runtime_error("length of 'indices' should be equal to number of rows'"); } @@ -442,7 +439,7 @@ class FragmentedSparseMatrix : public Matrix { } } - ElementType > max_index = (row_ ? ncols : nrows); + ElementType > max_index = (row_based ? ncols : nrows); for (size_t i = 0, end = indices.size(); i < end; ++i) { const auto& curv = values[i]; const auto& curi = indices[i]; @@ -452,11 +449,7 @@ class FragmentedSparseMatrix : public Matrix { for (auto x : curi) { if (x < 0 || x >= max_index) { - if constexpr(row_) { - throw std::runtime_error("'indices' should contain non-negative integers less than the number of rows"); - } else { - throw std::runtime_error("'indices' should contain non-negative integers less than the number of columns"); - } + throw std::runtime_error("'indices' should contain non-negative integers less than the number of " + (row_based ? std::string("columns") : std::string("rows"))); } } @@ -473,6 +466,7 @@ class FragmentedSparseMatrix : public Matrix { Index_ nrows, ncols; ValueVectorStorage_ values; IndexVectorStorage_ indices; + bool row_based; public: Index_ nrow() const { return nrows; } @@ -483,12 +477,9 @@ class FragmentedSparseMatrix : public Matrix { double sparse_proportion() const { return 1; } - /** - * @return `true` if `row_ = true` (for `FragmentedSparseRowMatrix` objects), otherwise returns `false` (for `FragmentedSparseColumnMatrix` objects). - */ - bool prefer_rows() const { return row_; } + bool prefer_rows() const { return row_based; } - double prefer_rows_proportion() const { return static_cast(row_); } + double prefer_rows_proportion() const { return static_cast(row_based); } bool uses_oracle(bool) const { return false; } @@ -502,7 +493,7 @@ class FragmentedSparseMatrix : public Matrix { private: Index_ secondary() const { - if constexpr(row_) { + if (row_based) { return ncols; } else { return nrows; @@ -514,7 +505,7 @@ class FragmentedSparseMatrix : public Matrix { *****************************/ private: std::unique_ptr > dense(bool row, const Options&) const { - if (row_ == row) { + if (row_based == row) { return std::make_unique >(values, indices, secondary()); } else { return std::make_unique >(values, indices, secondary()); @@ -522,7 +513,7 @@ class FragmentedSparseMatrix : public Matrix { } std::unique_ptr > dense(bool row, Index_ block_start, Index_ block_end, const Options&) const { - if (row_ == row) { + if (row_based == row) { return std::make_unique >(values, indices, secondary(), block_start, block_end); } else { return std::make_unique >(values, indices, secondary(), block_start, block_end); @@ -530,7 +521,7 @@ class FragmentedSparseMatrix : public Matrix { } std::unique_ptr > dense(bool row, VectorPtr subset_ptr, const Options&) const { - if (row_ == row) { + if (row_based == row) { return std::make_unique >(values, indices, secondary(), std::move(subset_ptr)); } else { return std::make_unique >(values, indices, secondary(), std::move(subset_ptr)); @@ -542,7 +533,7 @@ class FragmentedSparseMatrix : public Matrix { ******************************/ private: std::unique_ptr > sparse(bool row, const Options& opt) const { - if (row_ == row) { + if (row_based == row) { return std::make_unique >(values, indices, secondary(), opt); } else { return std::make_unique >(values, indices, secondary(), opt); @@ -550,7 +541,7 @@ class FragmentedSparseMatrix : public Matrix { } std::unique_ptr > sparse(bool row, Index_ block_start, Index_ block_end, const Options& opt) const { - if (row_ == row) { + if (row_based == row) { return std::make_unique >(values, indices, secondary(), block_start, block_end, opt); } else { return std::make_unique >(values, indices, secondary(), block_start, block_end, opt); @@ -558,7 +549,7 @@ class FragmentedSparseMatrix : public Matrix { } std::unique_ptr > sparse(bool row, VectorPtr subset_ptr, const Options& opt) const { - if (row_ == row) { + if (row_based == row) { return std::make_unique >(values, indices, secondary(), std::move(subset_ptr), opt); } else { return std::make_unique >(values, indices, secondary(), std::move(subset_ptr), opt); @@ -603,14 +594,37 @@ class FragmentedSparseMatrix : public Matrix { * See `tatami::FragmentedSparseMatrix` for details on the template parameters. */ template >, class IndexVectorStorage_ = std::vector > > -using FragmentedSparseColumnMatrix = FragmentedSparseMatrix; +class FragmentedSparseColumnMatrix : public FragmentedSparseMatrix { +public: + /** + * @param nr Number of rows. + * @param nc Number of columns. + * @param vals Vector of vectors of non-zero elements. + * @param idx Vector of vectors of row indices for the non-zero elements. + * @param check Should the input vectors be checked for validity? + */ + FragmentedSparseColumnMatrix(Index_ nr, Index_ nc, ValueVectorStorage_ vals, IndexVectorStorage_ idx, bool check = true) : + FragmentedSparseMatrix(nr, nc, std::move(vals), std::move(idx), false, check) {} +}; /** * Fragmented sparse row matrix. * See `tatami::FragmentedSparseMatrix` for details on the template parameters. */ template >, class IndexVectorStorage_ = std::vector > > -using FragmentedSparseRowMatrix = FragmentedSparseMatrix; +class FragmentedSparseRowMatrix : public FragmentedSparseMatrix { +public: + /** + * @param nr Number of rows. + * @param nc Number of columns. + * @param vals Vector of vectors of non-zero elements. + * @param idx Vector of vectors of column indices for the non-zero elements. + * @param check Should the input vectors be checked for validity? + */ + FragmentedSparseRowMatrix(Index_ nr, Index_ nc, ValueVectorStorage_ vals, IndexVectorStorage_ idx, bool check = true) : + FragmentedSparseMatrix(nr, nc, std::move(vals), std::move(idx), true, check) {} +}; + } diff --git a/include/tatami/sparse/convert_to_compressed_sparse.hpp b/include/tatami/sparse/convert_to_compressed_sparse.hpp index 9afd84f3..fa09fc00 100644 --- a/include/tatami/sparse/convert_to_compressed_sparse.hpp +++ b/include/tatami/sparse/convert_to_compressed_sparse.hpp @@ -44,36 +44,34 @@ struct CompressedSparseContents { }; /** - * @tparam row_ Whether to retrieve contents by row. - * @tparam Value_ Type of data values in the output interface. - * @tparam Index_ Integer type for the indices in the output interface. * @tparam StoredValue_ Type of data values to be stored in the output. * @tparam StoredIndex_ Integer type for storing the indices in the output. * @tparam InputValue_ Type of data values in the input interface. * @tparam InputIndex_ Integer type for indices in the input interface. * * @param incoming Pointer to a `tatami::Matrix`. + * @param row Whether to retrieve the contents of `incoming` by row, i.e., the output is a compressed sparse row matrix. * @param two_pass Whether to perform the retrieval in two passes. * This requires another pass through `incoming` but is more memory-efficient. * @param threads Number of threads to use. * - * @return Contents of the sparse matrix in compressed form, see `FragmentedSparseContents`. + * @return Contents of the sparse matrix in compressed form, see `CompressedSparseContents`. */ -template -CompressedSparseContents retrieve_compressed_sparse_contents(const Matrix* incoming, bool two_pass, int threads = 1) { - CompressedSparseContents output; +template +CompressedSparseContents retrieve_compressed_sparse_contents(const Matrix* incoming, bool row, bool two_pass, int threads = 1) { + CompressedSparseContents output; auto& output_v = output.value; auto& output_i = output.index; auto& output_p = output.pointers; InputIndex_ NR = incoming->nrow(); InputIndex_ NC = incoming->ncol(); - InputIndex_ primary = (row_ ? NR : NC); - InputIndex_ secondary = (row_ ? NC : NR); + InputIndex_ primary = (row ? NR : NC); + InputIndex_ secondary = (row ? NC : NR); if (!two_pass) { // Doing a single fragmented run and then concatenating everything together. - auto frag = retrieve_fragmented_sparse_contents(incoming, threads); + auto frag = retrieve_fragmented_sparse_contents(incoming, row, threads); const auto& store_v = frag.value; const auto& store_i = frag.index; @@ -89,7 +87,7 @@ CompressedSparseContents retrieve_compressed_sparse_contents(con output_i.insert(output_i.end(), store_i[p].begin(), store_i[p].end()); } - } else if (row_ == incoming->prefer_rows()) { + } else if (row == incoming->prefer_rows()) { // First pass to figure out how many non-zeros there are. output_p.resize(static_cast(primary) + 1); @@ -100,7 +98,7 @@ CompressedSparseContents retrieve_compressed_sparse_contents(con opt.sparse_ordered_index = false; parallelize([&](size_t, InputIndex_ start, InputIndex_ length) -> void { - auto wrk = consecutive_extractor(incoming, row_, start, length, opt); + auto wrk = consecutive_extractor(incoming, row, start, length, opt); for (InputIndex_ x = 0; x < length; ++x) { auto range = wrk->fetch(NULL, NULL); output_p[start + x + 1] = range.number; @@ -110,7 +108,7 @@ CompressedSparseContents retrieve_compressed_sparse_contents(con } else { parallelize([&](size_t, InputIndex_ start, InputIndex_ length) -> void { std::vector buffer_v(secondary); - auto wrk = consecutive_extractor(incoming, row_, start, length); + auto wrk = consecutive_extractor(incoming, row, start, length); for (InputIndex_ p = start, pe = start + length; p < pe; ++p) { auto ptr = wrk->fetch(buffer_v.data()); size_t count = 0; @@ -136,7 +134,7 @@ CompressedSparseContents retrieve_compressed_sparse_contents(con parallelize([&](size_t, InputIndex_ start, InputIndex_ length) -> void { std::vector buffer_v(secondary); std::vector buffer_i(secondary); - auto wrk = consecutive_extractor(incoming, row_, start, length, opt); + auto wrk = consecutive_extractor(incoming, row, start, length, opt); for (InputIndex_ p = start, pe = start + length; p < pe; ++p) { // Resist the urge to `fetch()` straight into 'output_p' @@ -154,7 +152,7 @@ CompressedSparseContents retrieve_compressed_sparse_contents(con } else { parallelize([&](size_t, InputIndex_ start, InputIndex_ length) -> void { std::vector buffer_v(secondary); - auto wrk = consecutive_extractor(incoming, row_, start, length); + auto wrk = consecutive_extractor(incoming, row, start, length); for (InputIndex_ p = start, pe = start + length; p < pe; ++p) { auto ptr = wrk->fetch(buffer_v.data()); @@ -184,7 +182,7 @@ CompressedSparseContents retrieve_compressed_sparse_contents(con parallelize([&](size_t t, InputIndex_ start, InputIndex_ length) -> void { std::vector buffer_i(primary); - auto wrk = consecutive_extractor(incoming, !row_, start, length, opt); + auto wrk = consecutive_extractor(incoming, !row, start, length, opt); auto& my_counts = nz_counts[t]; for (InputIndex_ x = 0; x < length; ++x) { @@ -197,7 +195,7 @@ CompressedSparseContents retrieve_compressed_sparse_contents(con } else { parallelize([&](size_t t, InputIndex_ start, InputIndex_ length) -> void { - auto wrk = consecutive_extractor(incoming, !row_, start, length); + auto wrk = consecutive_extractor(incoming, !row, start, length); std::vector buffer_v(primary); auto& my_counts = nz_counts[t]; @@ -236,7 +234,7 @@ CompressedSparseContents retrieve_compressed_sparse_contents(con parallelize([&](size_t, InputIndex_ start, InputIndex_ length) -> void { std::vector buffer_v(length); std::vector buffer_i(length); - auto wrk = consecutive_extractor(incoming, !row_, static_cast(0), secondary, start, length, opt); + auto wrk = consecutive_extractor(incoming, !row, static_cast(0), secondary, start, length, opt); std::vector offset_copy(output_p.begin() + start, output_p.begin() + start + length); for (InputIndex_ x = 0; x < secondary; ++x) { @@ -253,7 +251,7 @@ CompressedSparseContents retrieve_compressed_sparse_contents(con } else { parallelize([&](size_t, InputIndex_ start, InputIndex_ length) -> void { std::vector buffer_v(length); - auto wrk = consecutive_extractor(incoming, !row_, static_cast(0), secondary, start, length); + auto wrk = consecutive_extractor(incoming, !row, static_cast(0), secondary, start, length); std::vector offset_copy(output_p.begin() + start, output_p.begin() + start + length); for (InputIndex_ x = 0; x < secondary; ++x) { @@ -275,7 +273,6 @@ CompressedSparseContents retrieve_compressed_sparse_contents(con } /** - * @tparam row_ Whether to return a compressed sparse row matrix. * @tparam Value_ Type of data values in the output interface. * @tparam Index_ Integer type for the indices in the output interface. * @tparam StoredValue_ Type of data values to be stored in the output. @@ -284,13 +281,14 @@ CompressedSparseContents retrieve_compressed_sparse_contents(con * @tparam InputIndex_ Integer type for indices in the input interface. * * @param incoming Pointer to a `tatami::Matrix`, possibly containing delayed operations. + * @param row Whether to return a compressed sparse row matrix. * @param two_pass Whether to use a two-pass strategy that reduces peak memory usage at the cost of speed. * @param threads Number of threads to use. * * @return A pointer to a new `tatami::CompressedSparseMatrix`, with the same dimensions and type as the matrix referenced by `incoming`. - * If `row_ = true`, the matrix is compressed sparse row, otherwise it is compressed sparse column. + * If `row = true`, the matrix is in compressed sparse row format, otherwise it is compressed sparse column. */ -template -std::shared_ptr > convert_to_compressed_sparse(const Matrix* incoming, bool two_pass = false, int threads = 1) { - auto comp = retrieve_compressed_sparse_contents(incoming, two_pass, threads); +std::shared_ptr > convert_to_compressed_sparse(const Matrix* incoming, bool row, bool two_pass = false, int threads = 1) { + auto comp = retrieve_compressed_sparse_contents(incoming, row, two_pass, threads); return std::shared_ptr >( new CompressedSparseMatrix< - row_, Value_, Index_, std::vector, @@ -314,47 +311,28 @@ std::shared_ptr > convert_to_compressed_sparse(const Matr std::move(comp.value), std::move(comp.index), std::move(comp.pointers), + row, false // no need for checks, as we guarantee correctness. ) ); } /** - * This overload makes it easier to control the desired output order when it is not known at compile time. - * - * @tparam Value_ Type of data values in the output interface. - * @tparam Index_ Integer type for the indices in the output interface. - * @tparam StoredValue_ Type of data values to be stored in the output. - * @tparam StoredIndex_ Integer type for storing the indices in the output. - * @tparam InputValue_ Type of data values in the input interface. - * @tparam InputIndex_ Integer type for indices in the input interface. - * - * @param incoming Pointer to a `tatami::Matrix`. - * @param order Ordering of values in the output matrix - compressed sparse row (0) or column (1). - * If set to -1, the ordering is chosen based on `tatami::Matrix::prefer_rows()`. - * @param two_pass Whether to use a two-pass strategy that reduces peak memory usage at the cost of speed. - * @param threads Number of threads to use. - * - * @return A pointer to a new `tatami::CompressedSparseMatrix`, with the same dimensions and type as the matrix referenced by `incoming`. + * @cond */ -template < - typename Value_ = double, - typename Index_ = int, - typename StoredValue_ = Value_, - typename StoredIndex_ = Index_, - typename InputValue_, - typename InputIndex_ -> -std::shared_ptr > convert_to_compressed_sparse(const Matrix* incoming, int order, bool two_pass = false, int threads = 1) { - if (order < 0) { - order = static_cast(!incoming->prefer_rows()); - } - if (order == 0) { - return convert_to_compressed_sparse(incoming, two_pass, threads); - } else { - return convert_to_compressed_sparse(incoming, two_pass, threads); - } +// Backwards compatbility. +template +CompressedSparseContents retrieve_compressed_sparse_contents(const Matrix* incoming, bool two_pass, int threads = 1) { + return retrieve_compressed_sparse_contents(incoming, row_, two_pass, threads); +} + +template +std::shared_ptr > convert_to_compressed_sparse(const Matrix* incoming, bool two_pass = false, int threads = 1) { + return convert_to_compressed_sparse(incoming, row_, two_pass, threads); } +/** + * @endcond + */ } diff --git a/include/tatami/sparse/convert_to_fragmented_sparse.hpp b/include/tatami/sparse/convert_to_fragmented_sparse.hpp index e6689066..3a25679c 100644 --- a/include/tatami/sparse/convert_to_fragmented_sparse.hpp +++ b/include/tatami/sparse/convert_to_fragmented_sparse.hpp @@ -49,34 +49,34 @@ struct FragmentedSparseContents { }; /** - * @tparam row_ Whether to retrieve contents by row. - * @tparam Value_ Type of data values in the output interface. - * @tparam Index_ Integer type for the indices in the output interface. + * @tparam StoredValue_ Type of data values to be stored in the output. + * @tparam StoredIndex_ Integer type for storing the indices in the output. * @tparam InputValue_ Type of data values in the input interface. * @tparam InputIndex_ Integer type for indices in the input interface. * * @param incoming Pointer to a `tatami::Matrix`. + * @param row Whether to retrieve the contents of `incoming` by row, i.e., the output is a fragmented sparse row matrix. * @param threads Number of threads to use. * * @return Contents of the sparse matrix in fragmented form, see `FragmentedSparseContents`. */ -template -FragmentedSparseContents retrieve_fragmented_sparse_contents(const Matrix* incoming, int threads = 1) { +template +FragmentedSparseContents retrieve_fragmented_sparse_contents(const Matrix* incoming, bool row, int threads = 1) { InputIndex_ NR = incoming->nrow(); InputIndex_ NC = incoming->ncol(); - InputIndex_ primary = (row_ ? NR : NC); - InputIndex_ secondary = (row_ ? NC : NR); + InputIndex_ primary = (row ? NR : NC); + InputIndex_ secondary = (row ? NC : NR); - FragmentedSparseContents output(primary); + FragmentedSparseContents output(primary); auto& store_v = output.value; auto& store_i = output.index; - if (row_ == incoming->prefer_rows()) { + if (row == incoming->prefer_rows()) { if (incoming->sparse()) { parallelize([&](size_t, InputIndex_ start, InputIndex_ length) -> void { std::vector buffer_v(secondary); std::vector buffer_i(secondary); - auto wrk = consecutive_extractor(incoming, row_, start, length); + auto wrk = consecutive_extractor(incoming, row, start, length); for (InputIndex_ p = start, pe = start + length; p < pe; ++p) { auto range = wrk->fetch(buffer_v.data(), buffer_i.data()); @@ -97,7 +97,7 @@ FragmentedSparseContents retrieve_fragmented_sparse_contents(con } else { parallelize([&](size_t, InputIndex_ start, InputIndex_ length) -> void { std::vector buffer_v(secondary); - auto wrk = consecutive_extractor(incoming, row_, start, length); + auto wrk = consecutive_extractor(incoming, row, start, length); // Special conversion from dense to save ourselves from having to make // indices that we aren't really interested in. @@ -126,7 +126,7 @@ FragmentedSparseContents retrieve_fragmented_sparse_contents(con parallelize([&](size_t, InputIndex_ start, InputIndex_ length) -> void { std::vector buffer_v(primary); std::vector buffer_i(primary); - auto wrk = consecutive_extractor(incoming, !row_, static_cast(0), secondary, start, length); + auto wrk = consecutive_extractor(incoming, !row, static_cast(0), secondary, start, length); for (InputIndex_ x = 0; x < secondary; ++x) { auto range = wrk->fetch(buffer_v.data(), buffer_i.data()); @@ -140,8 +140,8 @@ FragmentedSparseContents retrieve_fragmented_sparse_contents(con }, primary, threads); } else { - parallelize([&](size_t, Index_ start, Index_ length) -> void { - auto wrk = consecutive_extractor(incoming, !row_, static_cast(0), secondary, start, length); + parallelize([&](size_t, InputIndex_ start, InputIndex_ length) -> void { + auto wrk = consecutive_extractor(incoming, !row, static_cast(0), secondary, start, length); std::vector buffer_v(length); for (InputIndex_ x = 0; x < secondary; ++x) { @@ -161,7 +161,6 @@ FragmentedSparseContents retrieve_fragmented_sparse_contents(con } /** - * @tparam row_ Whether to return a fragmented sparse row matrix. * @tparam Value_ Type of data values in the output interface. * @tparam Index_ Integer type for the indices in the output interface. * @tparam StoredValue_ Type of data values to be stored in the output. @@ -170,24 +169,17 @@ FragmentedSparseContents retrieve_fragmented_sparse_contents(con * @tparam InputIndex_ Integer type for indices in the input interface. * * @param incoming Pointer to a `tatami::Matrix`, possibly containing delayed operations. + * @param row Whether to return a fragmented sparse row matrix. * @param threads Number of threads to use. * * @return A pointer to a new `tatami::FragmentedSparseMatrix`, with the same dimensions and type as the matrix referenced by `incoming`. - * If `row_ = true`, the matrix is fragmented sparse row, otherwise it is fragmented sparse column. + * If `row = true`, the matrix is in fragmented sparse row format, otherwise it is fragmented sparse column. */ -template -std::shared_ptr > convert_to_fragmented_sparse(const Matrix* incoming, int threads = 1) { - auto frag = retrieve_fragmented_sparse_contents(incoming, threads); +template +std::shared_ptr > convert_to_fragmented_sparse(const Matrix* incoming, bool row, int threads = 1) { + auto frag = retrieve_fragmented_sparse_contents(incoming, row, threads); return std::shared_ptr >( new FragmentedSparseMatrix< - row_, Value_, Index_, std::vector >, @@ -197,46 +189,28 @@ std::shared_ptr > convert_to_fragmented_sparse(const Matr incoming->ncol(), std::move(frag.value), std::move(frag.index), + row, false // no need for checks, as we guarantee correctness. ) ); } /** - * This overload makes it easier to control the desired output order when it is not known at compile time. - * - * @tparam Value_ Type of data values in the output interface. - * @tparam Index_ Integer type for the indices in the output interface. - * @tparam StoredValue_ Type of data values to be stored in the output. - * @tparam StoredIndex_ Integer type for storing the indices in the output. - * @tparam InputValue_ Type of data values in the input interface. - * @tparam InputIndex_ Integer type for indices in the input interface. - * - * @param incoming Pointer to a `tatami::Matrix`. - * @param order Ordering of values in the output matrix - fragmented sparse row (0) or column (1). - * If set to -1, the ordering is chosen based on `tatami::Matrix::prefer_rows()`. - * @param threads Number of threads to use. - * - * @return A pointer to a new `tatami::FragmentedSparseMatrix`, with the same dimensions and type as the matrix referenced by `incoming`. + * @cond */ -template < - typename Value_ = double, - typename Index_ = int, - typename StoredValue_ = Value_, - typename StoredIndex_ = Index_, - typename InputValue_, - typename InputIndex_ -> -std::shared_ptr > convert_to_fragmented_sparse(const Matrix* incoming, int order, int threads = 1) { - if (order < 0) { - order = static_cast(!incoming->prefer_rows()); - } - if (order == 0) { - return convert_to_fragmented_sparse(incoming, threads); - } else { - return convert_to_fragmented_sparse(incoming, threads); - } +// Backwards compatbility. +template +FragmentedSparseContents retrieve_fragmented_sparse_contents(const Matrix* incoming, int threads = 1) { + return retrieve_fragmented_sparse_contents(incoming, row_, threads); +} + +template +std::shared_ptr > convert_to_fragmented_sparse(const Matrix* incoming, int threads = 1) { + return convert_to_fragmented_sparse(incoming, row_, threads); } +/** + * @endcond + */ } diff --git a/include/tatami/subset/DelayedSubset.hpp b/include/tatami/subset/DelayedSubset.hpp index 1ec2a33f..2d39fe61 100644 --- a/include/tatami/subset/DelayedSubset.hpp +++ b/include/tatami/subset/DelayedSubset.hpp @@ -369,32 +369,34 @@ struct ParallelSparse : public SparseExtractor { * @brief delayed subsetting of a matrix with general indices. * * Implements delayed subsetting (i.e., slicing) on the rows or columns of a matrix, given a vector of arbitrary indices. - * This operation is "delayed" in that it is only evaluated on request, e.g., with `row()` or friends. + * This operation is "delayed" in that it is only evaluated when rows or columns are requested from the matrix. * - * @tparam margin_ Dimension along which the subsetting is to occur. - * If 0, the subset is applied to the rows; if 1, the subset is applied to the columns. * @tparam Value_ Type of matrix value. * @tparam Index_ Type of index value. * @tparam IndexStorage_ Vector containing the subset indices. * Any class implementing `[`, `size()`, `begin()` and `end()` can be used here. */ -template +template class DelayedSubset : public Matrix { public: /** * @param p Pointer to the underlying (pre-subset) matrix. - * @param idx Vector of 0-based indices to use for subsetting on the rows (if `margin_ = 0`) or columns (if `margin_ = 1`). + * @param idx Vector of 0-based indices to use for subsetting on the rows (if `row = true`) or columns (otherwise). * These may be duplicated and/or unsorted. + * @param row Whether to apply the subset to the rows. + * If false, the subset is applied to the columns. */ - DelayedSubset(std::shared_ptr > p, IndexStorage_ idx) : mat(std::move(p)), indices(std::move(idx)) {} + DelayedSubset(std::shared_ptr > p, IndexStorage_ idx, bool row) : + mat(std::move(p)), indices(std::move(idx)), by_row(row) {} private: std::shared_ptr > mat; IndexStorage_ indices; + bool by_row; public: Index_ nrow() const { - if constexpr(margin_==0) { + if (by_row) { return indices.size(); } else { return mat->nrow(); @@ -402,7 +404,7 @@ class DelayedSubset : public Matrix { } Index_ ncol() const { - if constexpr(margin_==0) { + if (by_row) { return mat->ncol(); } else { return indices.size(); @@ -443,7 +445,7 @@ class DelayedSubset : public Matrix { private: template std::unique_ptr > populate_myopic_dense(bool row, Args_&& ... args) const { - if (row == (margin_ == 0)) { + if (row == by_row) { return std::make_unique >(mat.get(), indices, row, std::forward(args)...); } else { return std::make_unique >(mat.get(), indices, row, false, std::forward(args)...); @@ -469,7 +471,7 @@ class DelayedSubset : public Matrix { private: template std::unique_ptr > populate_myopic_sparse(bool row, Args_&& ... args) const { - if (row == (margin_ == 0)) { + if (row == by_row) { return std::make_unique >(mat.get(), indices, row, std::forward(args)...); } else { return std::make_unique >(mat.get(), indices, row, false, std::forward(args)...); @@ -495,7 +497,7 @@ class DelayedSubset : public Matrix { private: template std::unique_ptr > populate_oracular_dense(bool row, std::shared_ptr > oracle, Args_&& ... args) const { - if (row == (margin_ == 0)) { + if (row == by_row) { return std::make_unique >(mat.get(), indices, row, std::move(oracle), std::forward(args)...); } else { return std::make_unique >(mat.get(), indices, row, std::move(oracle), std::forward(args)...); @@ -521,7 +523,7 @@ class DelayedSubset : public Matrix { private: template std::unique_ptr > populate_oracular_sparse(bool row, std::shared_ptr > oracle, Args_&& ... args) const { - if (row == (margin_ == 0)) { + if (row == by_row) { return std::make_unique >(mat.get(), indices, row, std::move(oracle), std::forward(args)...); } else { return std::make_unique >(mat.get(), indices, row, std::move(oracle), std::forward(args)...); diff --git a/include/tatami/subset/DelayedSubsetBlock.hpp b/include/tatami/subset/DelayedSubsetBlock.hpp index 3c6515bb..e5cf4bdf 100644 --- a/include/tatami/subset/DelayedSubsetBlock.hpp +++ b/include/tatami/subset/DelayedSubsetBlock.hpp @@ -157,28 +157,30 @@ struct AcrossSparse : public SparseExtractor { * This is a specialized implementation that is more efficient than the `tatami::DelayedSubset` class. * This operation is "delayed" in that it is only evaluated on request, e.g., with `row()` or friends. * - * @tparam margin_ Dimension along which the subsetting is to occur. - * If 0, the subset is applied to the rows; if 1, the subset is applied to the columns. * @tparam Value_ Type of matrix value. * @tparam Index_ Integer type for the row/column indices. */ -template +template class DelayedSubsetBlock : public Matrix { public: /** * @param p Pointer to the underlying (pre-subset) matrix. - * @param s Index of the start of the block. This should be a row index if `margin_ = 0` and a column index otherwise. - * @param l Length of the block, in terms of the number of rows (if `margin_ = 0`) or columns (otherwise). + * @param s Index of the start of the block. This should be a row index if `row = true` and a column index otherwise. + * @param l Length of the block, in terms of the number of rows (if `row = true`) or columns (otherwise). + * @param r Whether to apply the subset to the rows. + * If false, the subset is applied to the columns. */ - DelayedSubsetBlock(std::shared_ptr > p, Index_ s, Index_ l) : mat(std::move(p)), block_start(s), block_length(l) {} + DelayedSubsetBlock(std::shared_ptr > p, Index_ s, Index_ l, bool r) : + mat(std::move(p)), block_start(s), block_length(l), by_row(r) {} private: std::shared_ptr > mat; Index_ block_start, block_length; + bool by_row; public: Index_ nrow() const { - if constexpr(margin_==0) { + if (by_row) { return block_length; } else { return mat->nrow(); @@ -186,7 +188,7 @@ class DelayedSubsetBlock : public Matrix { } Index_ ncol() const { - if constexpr(margin_==0) { + if (by_row) { return mat->ncol(); } else { return block_length; @@ -227,7 +229,7 @@ class DelayedSubsetBlock : public Matrix { private: template std::unique_ptr > dense_internal(bool row, Args_&&... args) const { - if (row != (margin_ == 0)) { + if (row != by_row) { return std::make_unique >(mat.get(), block_start, block_length, row, std::forward(args)...); } else { return std::make_unique >(mat.get(), block_start, row, std::forward(args)...); @@ -253,7 +255,7 @@ class DelayedSubsetBlock : public Matrix { private: template std::unique_ptr > sparse_internal(bool row, Args_&&... args) const { - if (row != (margin_ == 0)) { + if (row != by_row) { return std::make_unique >(mat.get(), block_start, block_length, row, std::forward(args)...); } else { return std::make_unique >(mat.get(), block_start, row, std::forward(args)...); @@ -309,28 +311,44 @@ class DelayedSubsetBlock : public Matrix { /** * A `make_*` helper function to enable partial template deduction of supplied types. * - * @tparam margin_ Dimension along which the addition is to occur. - * If 0, the subset is applied to the rows; if 1, the subset is applied to the columns. * @tparam Value_ Type of matrix value. * @tparam Index_ Integer type for the row/column indices. * * @param p Pointer to the underlying (pre-subset) `Matrix`. - * @param f Index of the start of the block. This should be a row index if `margin_ = 0` and a column index otherwise. + * @param f Index of the start of the block. This should be a row index if `r = true` and a column index otherwise. * @param l Index of the one-past-the-end of the block. + * @param r Whether to apply the subset to the rows. + * If false, the subset is applied to the columns. * * @return A pointer to a `DelayedSubsetBlock` instance. */ -template -std::shared_ptr > make_DelayedSubsetBlock(std::shared_ptr > p, Index_ f, Index_ l) { - return std::shared_ptr >(new DelayedSubsetBlock(std::move(p), f, l)); +template +std::shared_ptr > make_DelayedSubsetBlock(std::shared_ptr > p, Index_ f, Index_ l, bool r) { + return std::shared_ptr >(new DelayedSubsetBlock(std::move(p), f, l, r)); } /** * @cond */ +template +std::shared_ptr > make_DelayedSubsetBlock(std::shared_ptr > p, Index_ f, Index_ l, bool r) { + return std::shared_ptr >(new DelayedSubsetBlock(std::move(p), f, l, r)); +} +/** + * @endcond + */ + +/** + * @cond + */ +template +std::shared_ptr > make_DelayedSubsetBlock(std::shared_ptr > p, Index_ f, Index_ l) { + return make_DelayedSubsetBlock(std::move(p), f, l, margin_ == 0); +} + template std::shared_ptr > make_DelayedSubsetBlock(std::shared_ptr > p, Index_ f, Index_ l) { - return std::shared_ptr >(new DelayedSubsetBlock(std::move(p), f, l)); + return make_DelayedSubsetBlock(std::move(p), f, l, margin_ == 0); } /** * @endcond diff --git a/include/tatami/subset/DelayedSubsetSorted.hpp b/include/tatami/subset/DelayedSubsetSorted.hpp index 1b5c43a6..e466fde8 100644 --- a/include/tatami/subset/DelayedSubsetSorted.hpp +++ b/include/tatami/subset/DelayedSubsetSorted.hpp @@ -315,25 +315,27 @@ struct ParallelIndexSparse : public SparseExtractor, pu * @brief Delayed subsetting of a matrix with sorted indices. * * Implements delayed subsetting (i.e., slicing) on the rows or columns of a matrix, given a vector of sorted indices. - * This operation is "delayed" in that it is only evaluated on request, e.g., with `row()` or friends. + * This operation is "delayed" in that it is only evaluated when rows or columns are requested from the matrix. * - * @tparam margin_ Dimension along which the subsetting is to occur. - * If 0, the subset is applied to the rows; if 1, the subset is applied to the columns. * @tparam Value_ Type of matrix value. * @tparam Index_ Type of index value. * @tparam IndexStorage_ Vector containing the subset indices. * Any class implementing `[`, `size()`, `begin()` and `end()` can be used here. */ -template +template class DelayedSubsetSorted : public Matrix { public: /** * @param p Pointer to the underlying (pre-subset) matrix. - * @param idx Vector of 0-based indices to use for subsetting on the rows (if `margin_ = 0`) or columns (if `margin_ = 1`). + * @param idx Vector of 0-based indices to use for subsetting on the rows (if `row = true`) or columns (otherwise). * This should be sorted, but may be duplicated. + * @param row Whether to apply the subset to the rows. + * If false, the subset is applied to the columns. * @param check Whether to check `idx` for sorted values. */ - DelayedSubsetSorted(std::shared_ptr > p, IndexStorage_ idx, bool check = true) : mat(std::move(p)), indices(std::move(idx)) { + DelayedSubsetSorted(std::shared_ptr > p, IndexStorage_ idx, bool row, bool check = true) : + mat(std::move(p)), indices(std::move(idx)), by_row(row) + { if (check) { for (Index_ i = 1, end = indices.size(); i < end; ++i) { if (indices[i] < indices[i-1]) { @@ -346,9 +348,10 @@ class DelayedSubsetSorted : public Matrix { private: std::shared_ptr > mat; IndexStorage_ indices; + bool by_row; Index_ get_mapping_dim() const { - if constexpr(margin_ == 0) { + if (by_row) { return mat->nrow(); } else { return mat->ncol(); @@ -357,7 +360,7 @@ class DelayedSubsetSorted : public Matrix { public: Index_ nrow() const { - if constexpr(margin_==0) { + if (by_row) { return indices.size(); } else { return mat->nrow(); @@ -365,7 +368,7 @@ class DelayedSubsetSorted : public Matrix { } Index_ ncol() const { - if constexpr(margin_==0) { + if (by_row) { return mat->ncol(); } else { return indices.size(); @@ -406,7 +409,7 @@ class DelayedSubsetSorted : public Matrix { private: template std::unique_ptr > populate_myopic_dense(bool row, Args_&& ... args) const { - if (row == (margin_ == 0)) { + if (row == by_row) { return std::make_unique >(mat.get(), indices, row, std::forward(args)...); } else { return std::make_unique >(mat.get(), indices, row, false, std::forward(args)...); @@ -443,7 +446,7 @@ class DelayedSubsetSorted : public Matrix { template std::unique_ptr > populate_myopic_sparse(bool row, Args_&& ... args) const { - if (row == (margin_ == 0)) { + if (row == by_row) { return std::make_unique >(mat.get(), indices, row, std::forward(args)...); } else { return populate_sparse(row, false, std::forward(args)...); @@ -469,7 +472,7 @@ class DelayedSubsetSorted : public Matrix { private: template std::unique_ptr > populate_oracular_dense(bool row, std::shared_ptr > oracle, Args_&& ... args) const { - if (row == (margin_ == 0)) { + if (row == by_row) { return std::make_unique >(mat.get(), indices, row, std::move(oracle), std::forward(args)...); } else { return std::make_unique >(mat.get(), indices, row, std::move(oracle), std::forward(args)...); @@ -495,7 +498,7 @@ class DelayedSubsetSorted : public Matrix { private: template std::unique_ptr > populate_oracular_sparse(bool row, std::shared_ptr > oracle, Args_&& ... args) const { - if (row == (margin_ == 0)) { + if (row == by_row) { return std::make_unique >(mat.get(), indices, row, std::move(oracle), std::forward(args)...); } else { return populate_sparse(row, std::move(oracle), std::forward(args)...); diff --git a/include/tatami/subset/DelayedSubsetSortedUnique.hpp b/include/tatami/subset/DelayedSubsetSortedUnique.hpp index 407d8aef..4514bd5a 100644 --- a/include/tatami/subset/DelayedSubsetSortedUnique.hpp +++ b/include/tatami/subset/DelayedSubsetSortedUnique.hpp @@ -109,25 +109,25 @@ struct ParallelSparse : public SparseExtractor { * @brief Delayed subsetting of a matrix with sorted, unique indices. * * Implements delayed subsetting (i.e., slicing) on the rows or columns of a matrix, given a vector of sorted and unique indices. - * This operation is "delayed" in that it is only evaluated on request, e.g., with `row()` or friends. + * This operation is "delayed" in that it is only evaluated when rows or columns are requested from the matrix. * - * @tparam margin_ Dimension along which the subsetting is to occur. - * If 0, the subset is applied to the rows; if 1, the subset is applied to the columns. * @tparam Value_ Type of matrix value. * @tparam Index_ Type of index value. * @tparam IndexStorage_ Vector containing the subset indices. */ -template +template class DelayedSubsetSortedUnique : public Matrix { public: /** * @param p Pointer to the underlying (pre-subset) matrix. - * @param idx Vector of 0-based indices to use for subsetting on the rows (if `margin_ = 0`) or columns (if `margin_ = 1`). + * @param idx Vector of 0-based indices to use for subsetting on the rows (if `row = true`) or columns (otherwise). * This should be sorted and unique. + * @param row Whether to apply the subset to the rows. + * If false, the subset is applied to the columns. * @param check Whether to check `idx` for sorted and unique values. */ - DelayedSubsetSortedUnique(std::shared_ptr > p, IndexStorage_ idx, bool check = true) : - mat(std::move(p)), indices(std::move(idx)) + DelayedSubsetSortedUnique(std::shared_ptr > p, IndexStorage_ idx, bool row, bool check = true) : + mat(std::move(p)), indices(std::move(idx)), by_row(row) { if (check) { for (Index_ i = 1, end = indices.size(); i < end; ++i) { @@ -137,7 +137,7 @@ class DelayedSubsetSortedUnique : public Matrix { } } - Index_ mapping_dim = margin_ == 0 ? mat->nrow() : mat->ncol(); + Index_ mapping_dim = by_row ? mat->nrow() : mat->ncol(); mapping_single.resize(mapping_dim); for (Index_ i = 0, end = indices.size(); i < end; ++i) { mapping_single[indices[i]] = i; @@ -147,11 +147,12 @@ class DelayedSubsetSortedUnique : public Matrix { private: std::shared_ptr > mat; IndexStorage_ indices; + bool by_row; std::vector mapping_single; public: Index_ nrow() const { - if constexpr(margin_==0) { + if (by_row) { return indices.size(); } else { return mat->nrow(); @@ -159,7 +160,7 @@ class DelayedSubsetSortedUnique : public Matrix { } Index_ ncol() const { - if constexpr(margin_==0) { + if (by_row) { return mat->ncol(); } else { return indices.size(); @@ -200,7 +201,7 @@ class DelayedSubsetSortedUnique : public Matrix { private: template std::unique_ptr > populate_myopic_dense(bool row, Args_&& ... args) const { - if (row == (margin_ == 0)) { + if (row == by_row) { return std::make_unique >(mat.get(), indices, row, std::forward(args)...); } else { return std::make_unique >(mat.get(), indices, row, false, std::forward(args)...); @@ -226,7 +227,7 @@ class DelayedSubsetSortedUnique : public Matrix { private: template std::unique_ptr > populate_myopic_sparse(bool row, Args_&& ... args) const { - if (row == (margin_ == 0)) { + if (row == by_row) { return std::make_unique >(mat.get(), indices, row, std::forward(args)...); } else { return std::make_unique >(mat.get(), indices, mapping_single, row, false, std::forward(args)...); @@ -252,7 +253,7 @@ class DelayedSubsetSortedUnique : public Matrix { private: template std::unique_ptr > populate_oracular_dense(bool row, std::shared_ptr > oracle, Args_&& ... args) const { - if (row == (margin_ == 0)) { + if (row == by_row) { return std::make_unique >(mat.get(), indices, row, std::move(oracle), std::forward(args)...); } else { return std::make_unique >(mat.get(), indices, row, std::move(oracle), std::forward(args)...); @@ -278,7 +279,7 @@ class DelayedSubsetSortedUnique : public Matrix { private: template std::unique_ptr > populate_oracular_sparse(bool row, std::shared_ptr > oracle, Args_&& ... args) const { - if (row == (margin_ == 0)) { + if (row == by_row) { return std::make_unique >(mat.get(), indices, row, std::move(oracle), std::forward(args)...); } else { return std::make_unique >(mat.get(), indices, mapping_single, row, std::move(oracle), std::forward(args)...); diff --git a/include/tatami/subset/DelayedSubsetUnique.hpp b/include/tatami/subset/DelayedSubsetUnique.hpp index 2d2ab042..84e38d21 100644 --- a/include/tatami/subset/DelayedSubsetUnique.hpp +++ b/include/tatami/subset/DelayedSubsetUnique.hpp @@ -227,26 +227,28 @@ struct ParallelSparse : public SparseExtractor { * @brief Delayed subsetting of a matrix with unique indices. * * Implements delayed subsetting (i.e., slicing) on the rows or columns of a matrix, given a vector of unique indices. - * This operation is "delayed" in that it is only evaluated on request, e.g., with `row()` or friends. + * This operation is "delayed" in that it is only evaluated when rows or columns are requested from the matrix. * - * @tparam margin_ Dimension along which the subsetting is to occur. - * If 0, the subset is applied to the rows; if 1, the subset is applied to the columns. * @tparam Value_ Type of matrix value. * @tparam Index_ Integer type of index value. * @tparam IndexStorage_ Vector containing the subset indices. * Any class implementing `[`, `size()`, `begin()` and `end()` can be used here. */ -template +template class DelayedSubsetUnique : public Matrix { public: /** * @param p Pointer to the underlying (pre-subset) matrix. - * @param idx Vector of 0-based indices to use for subsetting on the rows (if `margin_ = 0`) or columns (if `margin_ = 1`). + * @param idx Vector of 0-based indices to use for subsetting on the rows (if `row = true`) or columns (otherwise). * This should be unique, but may be unsorted. + * @param row Whether to apply the subset to the rows. + * If false, the subset is applied to the columns. * @param check Whether to check `idx` for unique values. */ - DelayedSubsetUnique(std::shared_ptr > p, IndexStorage_ idx, bool check = true) : mat(std::move(p)), indices(std::move(idx)) { - Index_ fulldim = margin_ == 0 ? mat->nrow() : mat->ncol(); + DelayedSubsetUnique(std::shared_ptr > p, IndexStorage_ idx, bool row, bool check = true) : + mat(std::move(p)), indices(std::move(idx)), by_row(row) + { + Index_ fulldim = by_row ? mat->nrow() : mat->ncol(); if (check) { std::vector checks(fulldim); @@ -268,11 +270,12 @@ class DelayedSubsetUnique : public Matrix { private: std::shared_ptr > mat; IndexStorage_ indices; + bool by_row; std::vector mapping_single; public: Index_ nrow() const { - if constexpr(margin_==0) { + if (by_row) { return indices.size(); } else { return mat->nrow(); @@ -280,7 +283,7 @@ class DelayedSubsetUnique : public Matrix { } Index_ ncol() const { - if constexpr(margin_==0) { + if (by_row) { return mat->ncol(); } else { return indices.size(); @@ -321,7 +324,7 @@ class DelayedSubsetUnique : public Matrix { private: template std::unique_ptr > populate_myopic_dense(bool row, Args_&& ... args) const { - if (row == (margin_ == 0)) { + if (row == by_row) { return std::make_unique >(mat.get(), indices, row, std::forward(args)...); } else { return std::make_unique >(mat.get(), indices, row, false, std::forward(args)...); @@ -347,7 +350,7 @@ class DelayedSubsetUnique : public Matrix { private: template std::unique_ptr > populate_myopic_sparse(bool row, Args_&& ... args) const { - if (row == (margin_ == 0)) { + if (row == by_row) { return std::make_unique >(mat.get(), indices, row, std::forward(args)...); } else { return std::make_unique >(mat.get(), indices, mapping_single, row, false, std::forward(args)...); @@ -373,7 +376,7 @@ class DelayedSubsetUnique : public Matrix { private: template std::unique_ptr > populate_oracular_dense(bool row, std::shared_ptr > oracle, Args_&& ... args) const { - if (row == (margin_ == 0)) { + if (row == by_row) { return std::make_unique >(mat.get(), indices, row, std::move(oracle), std::forward(args)...); } else { return std::make_unique >(mat.get(), indices, row, std::move(oracle), std::forward(args)...); @@ -399,7 +402,7 @@ class DelayedSubsetUnique : public Matrix { private: template std::unique_ptr > populate_oracular_sparse(bool row, std::shared_ptr > oracle, Args_&& ... args) const { - if (row == (margin_ == 0)) { + if (row == by_row) { return std::make_unique >(mat.get(), indices, row, std::move(oracle), std::forward(args)...); } else { return std::make_unique >(mat.get(), indices, mapping_single, row, std::move(oracle), std::forward(args)...); diff --git a/include/tatami/subset/make_DelayedSubset.hpp b/include/tatami/subset/make_DelayedSubset.hpp index 8ef87a61..22926633 100644 --- a/include/tatami/subset/make_DelayedSubset.hpp +++ b/include/tatami/subset/make_DelayedSubset.hpp @@ -23,8 +23,6 @@ namespace tatami { * A `make_*` helper function to enable partial template deduction of supplied types. * This will automatically dispatch to `DelayedSubsetSortedUnique`, `DelayedSubsetUnique`, `DelayedSubsetSorted` or `DelayedSubset`, depending on the values in `idx`. * - * @tparam margin_ Dimension along which the subsetting is to occur. - * If 0, the subset is applied to the rows; if 1, the subset is applied to the columns. * @tparam Value_ Type of matrix value. * @tparam Index_ Integer type of the row/column indices. * @tparam IndexStorage_ Vector containing the subset indices, to be automatically deduced. @@ -32,11 +30,13 @@ namespace tatami { * * @param p Pointer to a (possibly `const`) `Matrix`. * @param idx Instance of the index vector. + * @param row Whether to apply the subset to the rows. + * If false, the subset is applied to the columns. * * @return A pointer to a `DelayedSubset` instance. */ -template -std::shared_ptr > make_DelayedSubset(std::shared_ptr > p, IndexStorage_ idx) { +template +std::shared_ptr > make_DelayedSubset(std::shared_ptr > p, IndexStorage_ idx, bool row) { bool is_unsorted = false; for (Index_ i = 0, end = idx.size(); i < end; ++i) { if (i) { @@ -72,22 +72,22 @@ std::shared_ptr > make_DelayedSubset(std::shared_ptr >( - new DelayedSubsetBlock(std::move(p), start, idx.size()) + new DelayedSubsetBlock(std::move(p), start, idx.size(), row) ); } else { return std::shared_ptr >( - new DelayedSubsetSortedUnique(std::move(p), std::move(idx), false) + new DelayedSubsetSortedUnique(std::move(p), std::move(idx), row, false) ); } } else { return std::shared_ptr >( - new DelayedSubsetSorted(std::move(p), std::move(idx), false) + new DelayedSubsetSorted(std::move(p), std::move(idx), row, false) ); } } bool has_duplicates = false; - std::vector accumulated(margin_ == 0 ? p->nrow() : p->ncol()); + std::vector accumulated(row ? p->nrow() : p->ncol()); for (Index_ i = 0, end = idx.size(); i < end; ++i) { auto& found = accumulated[idx[i]]; if (found) { @@ -100,11 +100,11 @@ std::shared_ptr > make_DelayedSubset(std::shared_ptr >( - new DelayedSubsetUnique(std::move(p), std::move(idx), false) + new DelayedSubsetUnique(std::move(p), std::move(idx), row, false) ); } else { return std::shared_ptr >( - new DelayedSubset(std::move(p), std::move(idx)) + new DelayedSubset(std::move(p), std::move(idx), row) ); } } @@ -112,9 +112,26 @@ std::shared_ptr > make_DelayedSubset(std::shared_ptr +std::shared_ptr > make_DelayedSubset(std::shared_ptr > p, IndexStorage_ idx, bool row) { + return make_DelayedSubset(std::shared_ptr >(std::move(p)), std::move(idx), row); +} +/** + * @endcond + */ + +/** + * @cond + */ +// Back-compatibility only. +template +std::shared_ptr > make_DelayedSubset(std::shared_ptr > p, IndexStorage_ idx) { + return make_DelayedSubset(std::move(p), std::move(idx), margin_ == 0); +} + template std::shared_ptr > make_DelayedSubset(std::shared_ptr > p, IndexStorage_ idx) { - return make_DelayedSubset(std::shared_ptr >(std::move(p)), std::move(idx)); + return make_DelayedSubset(std::move(p), std::move(idx), margin_ == 0); } /** * @endcond diff --git a/tests/src/dense/DenseMatrix.cpp b/tests/src/dense/DenseMatrix.cpp index b31f00bf..6b336a0a 100644 --- a/tests/src/dense/DenseMatrix.cpp +++ b/tests/src/dense/DenseMatrix.cpp @@ -12,7 +12,7 @@ TEST(DenseMatrix, Basic) { double counter = -105; for (auto& i : contents) { i = counter++; } - tatami::DenseColumnMatrix mat(10, 20, contents); + tatami::DenseColumnMatrix mat(10, 20, contents); EXPECT_FALSE(mat.sparse()); EXPECT_EQ(mat.sparse_proportion(), 0); @@ -50,7 +50,7 @@ TEST(DenseMatrix, Basic) { TEST(DenseMatrix, Errors) { std::vector contents; tatami_test::throws_error([&]() { - tatami::DenseColumnMatrix mat(10, 20, contents); + tatami::DenseColumnMatrix mat(10, 20, contents); }, "length of 'values' should be equal"); } @@ -208,7 +208,7 @@ TEST(DenseMatrix, IndexTypeOverflow) { double counter = -105; for (auto& i : contents) { i = counter++; } - tatami::DenseColumnMatrix ref(100, 200, contents); + tatami::DenseColumnMatrix ref(100, 200, contents); tatami::DenseColumnMatrix limited(100, 200, contents); EXPECT_EQ(limited.nrow(), 100); @@ -287,7 +287,7 @@ TEST(DenseMatrix, DifferentValueType) { double counter = -105; for (auto& i : contents) { i = counter++; } - tatami::DenseColumnMatrix ref(100, 200, std::vector(contents.begin(), contents.end())); + tatami::DenseColumnMatrix ref(100, 200, std::vector(contents.begin(), contents.end())); tatami::DenseColumnMatrix vstore(100, 200, contents); EXPECT_EQ(vstore.nrow(), 100); diff --git a/tests/src/dense/convert_to_dense.cpp b/tests/src/dense/convert_to_dense.cpp index 52566edb..efd234c6 100644 --- a/tests/src/dense/convert_to_dense.cpp +++ b/tests/src/dense/convert_to_dense.cpp @@ -6,104 +6,55 @@ #include "tatami_test/tatami_test.hpp" -class ConvertToDenseTest : public ::testing::TestWithParam > { +class ConvertToDenseTest : public ::testing::TestWithParam > { protected: size_t NR, NC; - bool row; + bool from_row, to_row; int threads; template void assemble(const Param& param) { NR = std::get<0>(param); NC = std::get<1>(param); - row = std::get<2>(param); - threads = std::get<3>(param); + from_row = std::get<2>(param); + to_row = std::get<3>(param); + threads = std::get<4>(param); } }; -TEST_P(ConvertToDenseTest, FromDenseRowMajor) { +TEST_P(ConvertToDenseTest, FromDense) { assemble(GetParam()); auto vec = tatami_test::simulate_dense_vector(NR * NC); - auto mat = std::make_shared >(NR, NC, vec); + auto mat = std::make_shared >(NR, NC, vec, from_row); - auto converted = tatami::convert_to_dense(mat.get(), row, threads); - EXPECT_EQ(converted->prefer_rows(), row); + auto converted = tatami::convert_to_dense(mat.get(), to_row, threads); + EXPECT_EQ(converted->prefer_rows(), to_row); EXPECT_FALSE(converted->sparse()); tatami_test::test_simple_row_access(converted.get(), mat.get()); tatami_test::test_simple_column_access(converted.get(), mat.get()); - auto converted2 = tatami::convert_to_dense(mat.get(), row, threads); // works for a different type. - EXPECT_EQ(converted2->prefer_rows(), row); + auto converted2 = tatami::convert_to_dense(mat.get(), to_row, threads); // works for a different type. + EXPECT_EQ(converted2->prefer_rows(), to_row); EXPECT_FALSE(converted2->sparse()); + auto old = mat->dense_row(); + std::vector buffer(NC); auto wrk2 = converted2->dense_row(); for (size_t i = 0; i < NR; ++i) { - auto start = vec.begin() + i * NC; - std::vector expected2(start, start + NC); - EXPECT_EQ(tatami_test::fetch(wrk2.get(), i, NC), expected2); + auto ptr = old->fetch(i, buffer.data()); + std::vector expected(ptr, ptr + NC); + EXPECT_EQ(tatami_test::fetch(wrk2.get(), i, NC), expected); } } -TEST_P(ConvertToDenseTest, FromColumnRowMajor) { +TEST_P(ConvertToDenseTest, FromSparse) { assemble(GetParam()); - auto vec = tatami_test::simulate_dense_vector(NR * NC); - auto mat = std::make_shared >(NR, NC, vec); - - auto converted = tatami::convert_to_dense(mat.get(), row, threads); - EXPECT_EQ(converted->prefer_rows(), row); - EXPECT_FALSE(converted->sparse()); - - tatami_test::test_simple_row_access(converted.get(), mat.get()); - tatami_test::test_simple_column_access(converted.get(), mat.get()); - - auto converted2 = tatami::convert_to_dense(mat.get(), row, threads); // works for a different type. - EXPECT_EQ(converted2->prefer_rows(), row); - EXPECT_FALSE(converted2->sparse()); - - auto wrk2 = converted2->dense_column(); - for (size_t i = 0; i < NC; ++i) { - auto start = vec.begin() + i * NR; - std::vector expected2(start, start + NR); - EXPECT_EQ(tatami_test::fetch(wrk2.get(), i, NR), expected2); - } -} - -TEST_P(ConvertToDenseTest, Automatic) { - assemble(GetParam()); - auto vec = tatami_test::simulate_dense_vector(NR * NC); - - if (row) { - tatami::DenseMatrix mat(NR, NC, vec); - auto converted = tatami::convert_to_dense(&mat, -1, threads); - EXPECT_TRUE(converted->prefer_rows()); - tatami_test::test_simple_row_access(converted.get(), &mat); - } else { - tatami::DenseMatrix mat(NR, NC, vec); - auto converted = tatami::convert_to_dense(&mat, -1, threads); - EXPECT_FALSE(converted->prefer_rows()); - tatami_test::test_simple_column_access(converted.get(), &mat); - } -} - -TEST_P(ConvertToDenseTest, FromSparseRowMajor) { - assemble(GetParam()); - auto vec = tatami_test::simulate_sparse_compressed(NR, NC, 0.2); - tatami::CompressedSparseRowMatrix smat(NR, NC, std::move(vec.value), std::move(vec.index), std::move(vec.ptr)); - - auto converted = tatami::convert_to_dense(&smat, row, threads); - EXPECT_EQ(converted->prefer_rows(), row); - EXPECT_FALSE(converted->sparse()); - tatami_test::test_simple_row_access(converted.get(), &smat); - tatami_test::test_simple_column_access(converted.get(), &smat); -} - -TEST_P(ConvertToDenseTest, FromSparseColumnMajor) { - auto vec = tatami_test::simulate_sparse_compressed(NC, NR, 0.2); - tatami::CompressedSparseColumnMatrix smat(NR, NC, std::move(vec.value), std::move(vec.index), std::move(vec.ptr)); + auto vec = tatami_test::simulate_sparse_compressed((from_row ? NR : NC), (from_row ? NC : NR), 0.2); + tatami::CompressedSparseMatrix smat(NR, NC, std::move(vec.value), std::move(vec.index), std::move(vec.ptr), from_row); - auto converted = tatami::convert_to_dense(&smat, row, threads); - EXPECT_EQ(converted->prefer_rows(), row); + auto converted = tatami::convert_to_dense(&smat, to_row, threads); + EXPECT_EQ(converted->prefer_rows(), to_row); EXPECT_FALSE(converted->sparse()); tatami_test::test_simple_row_access(converted.get(), &smat); tatami_test::test_simple_column_access(converted.get(), &smat); @@ -115,7 +66,8 @@ INSTANTIATE_TEST_SUITE_P( ::testing::Combine( ::testing::Values(10, 50, 100), // number of rows ::testing::Values(10, 50, 100), // number of columns - ::testing::Values(true, false), // row major? + ::testing::Values(true, false), // from row major? + ::testing::Values(true, false), // to row major? ::testing::Values(1, 3) // number of threads ) ); diff --git a/tests/src/isometric/binary/DelayedBinaryIsometricOp.cpp b/tests/src/isometric/binary/DelayedBinaryIsometricOp.cpp index bbf6de9e..0b505c5f 100644 --- a/tests/src/isometric/binary/DelayedBinaryIsometricOp.cpp +++ b/tests/src/isometric/binary/DelayedBinaryIsometricOp.cpp @@ -14,7 +14,7 @@ TEST(DelayedBinaryIsometricOp, ConstOverload) { int nrow = 23, ncol = 42; auto simulated = tatami_test::simulate_sparse_vector(nrow * ncol, 0.1); - auto dense = std::shared_ptr(new tatami::DenseRowMatrix(nrow, ncol, std::move(simulated))); + auto dense = std::shared_ptr(new tatami::DenseRowMatrix(nrow, ncol, std::move(simulated))); auto op = tatami::make_DelayedBinaryAddHelper(); auto mat = tatami::make_DelayedBinaryIsometricOp(dense, dense, std::move(op)); @@ -26,8 +26,8 @@ TEST(DelayedBinaryIsometricOp, ConstOverload) { TEST(DelayedBinaryIsometricOp, Misshappen) { std::vector src(200); - auto dense = std::shared_ptr(new tatami::DenseRowMatrix(10, 20, src)); - auto dense2 = std::shared_ptr(new tatami::DenseRowMatrix(20, 10, src)); + auto dense = std::shared_ptr(new tatami::DenseRowMatrix(10, 20, src)); + auto dense2 = std::shared_ptr(new tatami::DenseRowMatrix(20, 10, src)); tatami_test::throws_error([&]() { tatami::make_DelayedBinaryIsometricOp(dense, dense2, tatami::DelayedBinaryBasicMockHelper()); }, "should be the same"); @@ -41,12 +41,12 @@ class DelayedBinaryIsometricOpMockTest : public ::testing::TestWithParam(nrow * ncol, 0.1); - dense.reset(new tatami::DenseRowMatrix(nrow, ncol, std::move(simulated))); - sparse = tatami::convert_to_compressed_sparse(dense.get()); + dense.reset(new tatami::DenseRowMatrix(nrow, ncol, std::move(simulated))); + sparse = tatami::convert_to_compressed_sparse(dense.get()); bdense = tatami::make_DelayedBinaryIsometricOp(dense, dense, tatami::DelayedBinaryBasicMockHelper()); bsparse = tatami::make_DelayedBinaryIsometricOp(sparse, sparse, tatami::DelayedBinaryAdvancedMockHelper()); - ref.reset(new tatami::DenseRowMatrix(nrow, ncol, std::vector(nrow * ncol))); + ref.reset(new tatami::DenseRowMatrix(nrow, ncol, std::vector(nrow * ncol))); } }; diff --git a/tests/src/isometric/binary/arith_helpers.cpp b/tests/src/isometric/binary/arith_helpers.cpp index 7853c835..9ef310ad 100644 --- a/tests/src/isometric/binary/arith_helpers.cpp +++ b/tests/src/isometric/binary/arith_helpers.cpp @@ -24,12 +24,12 @@ class BinaryArithUtils { } simulated_left = tatami_test::simulate_sparse_vector(nrow * ncol, 0.1, /* lower = */ -5, /* upper = */ 5, /* seed */ 12345); - dense_left = std::shared_ptr(new tatami::DenseRowMatrix(nrow, ncol, simulated_left)); - sparse_left = tatami::convert_to_compressed_sparse(dense_left.get()); // column major. + dense_left = std::shared_ptr(new tatami::DenseRowMatrix(nrow, ncol, simulated_left)); + sparse_left = tatami::convert_to_compressed_sparse(dense_left.get()); // column major. simulated_right = tatami_test::simulate_sparse_vector(nrow * ncol, 0.1, /* lower = */ -5, /* upper = */ 5, /* seed */ 67890); - dense_right = std::shared_ptr(new tatami::DenseRowMatrix(nrow, ncol, simulated_right)); - sparse_right = tatami::convert_to_compressed_sparse(dense_right.get()); // column major. + dense_right = std::shared_ptr(new tatami::DenseRowMatrix(nrow, ncol, simulated_right)); + sparse_right = tatami::convert_to_compressed_sparse(dense_right.get()); // column major. return; } }; @@ -162,7 +162,7 @@ class BinaryArithAdditionUtils : public BinaryArithUtils { for (size_t i = 0; i < refvec.size(); ++i) { refvec[i] += simulated_right[i]; } - ref.reset(new tatami::DenseRowMatrix(nrow, ncol, std::move(refvec))); + ref.reset(new tatami::DenseRowMatrix(nrow, ncol, std::move(refvec))); } }; @@ -217,7 +217,7 @@ class BinaryArithSubtractionUtils : public BinaryArithUtils { for (size_t i = 0; i < refvec.size(); ++i) { refvec[i] -= simulated_right[i]; } - ref.reset(new tatami::DenseRowMatrix(nrow, ncol, std::move(refvec))); + ref.reset(new tatami::DenseRowMatrix(nrow, ncol, std::move(refvec))); } }; @@ -262,7 +262,7 @@ class BinaryArithMultiplicationUtils : public BinaryArithUtils { for (size_t i = 0; i < refvec.size(); ++i) { refvec[i] *= simulated_right[i]; } - ref.reset(new tatami::DenseRowMatrix(nrow, ncol, std::move(refvec))); + ref.reset(new tatami::DenseRowMatrix(nrow, ncol, std::move(refvec))); } }; @@ -399,7 +399,7 @@ class BinaryArithDivisionUtils : public BinaryArithUtils { for (size_t i = 0; i < refvec.size(); ++i) { refvec[i] = careful_division(refvec[i], simulated_right[i]); } - ref.reset(new tatami::DenseRowMatrix(nrow, ncol, std::move(refvec))); + ref.reset(new tatami::DenseRowMatrix(nrow, ncol, std::move(refvec))); } }; @@ -449,7 +449,7 @@ class BinaryArithPowerUtils : public BinaryArithUtils { for (size_t i = 0; i < refvec.size(); ++i) { refvec[i] = std::pow(std::abs(refvec[i]), std::abs(simulated_right[i])); } - ref.reset(new tatami::DenseRowMatrix(nrow, ncol, std::move(refvec))); + ref.reset(new tatami::DenseRowMatrix(nrow, ncol, std::move(refvec))); } }; @@ -491,7 +491,7 @@ class BinaryArithModuloUtils : public BinaryArithUtils { for (size_t i = 0; i < refvec.size(); ++i) { refvec[i] = std::fmod(refvec[i], simulated_right[i]); } - ref.reset(new tatami::DenseRowMatrix(nrow, ncol, std::move(refvec))); + ref.reset(new tatami::DenseRowMatrix(nrow, ncol, std::move(refvec))); } }; @@ -536,7 +536,7 @@ class BinaryArithIntegerDivisionUtils : public BinaryArithUtils { // x == (x %% y) + y * (x %/% y) refvec[i] = std::floor(refvec[i] / simulated_right[i]); } - ref.reset(new tatami::DenseRowMatrix(nrow, ncol, std::move(refvec))); + ref.reset(new tatami::DenseRowMatrix(nrow, ncol, std::move(refvec))); } }; diff --git a/tests/src/isometric/binary/boolean_helpers.cpp b/tests/src/isometric/binary/boolean_helpers.cpp index 5d81391c..a37e5b92 100644 --- a/tests/src/isometric/binary/boolean_helpers.cpp +++ b/tests/src/isometric/binary/boolean_helpers.cpp @@ -19,12 +19,12 @@ class BinaryBooleanTest : public ::testing::Test { static void SetUpTestSuite() { simulated_left = tatami_test::simulate_sparse_vector(nrow * ncol, 0.2, /* lower = */ -10, /* upper = */ 10, /* seed */ 12345); - dense_left = std::shared_ptr(new tatami::DenseRowMatrix(nrow, ncol, simulated_left)); - sparse_left = tatami::convert_to_compressed_sparse(dense_left.get()); // column major. + dense_left = std::shared_ptr(new tatami::DenseRowMatrix(nrow, ncol, simulated_left)); + sparse_left = tatami::convert_to_compressed_sparse(dense_left.get()); // column major. simulated_right = tatami_test::simulate_sparse_vector(nrow * ncol, 0.2, /* lower = */ -10, /* upper = */ 10, /* seed */ 67890); - dense_right = std::shared_ptr(new tatami::DenseRowMatrix(nrow, ncol, simulated_right)); - sparse_right = tatami::convert_to_compressed_sparse(dense_right.get()); // column major. + dense_right = std::shared_ptr(new tatami::DenseRowMatrix(nrow, ncol, simulated_right)); + sparse_right = tatami::convert_to_compressed_sparse(dense_right.get()); // column major. return; } }; @@ -44,7 +44,7 @@ TEST_F(BinaryBooleanTest, EQUAL) { for (size_t i = 0; i < refvec.size(); ++i) { refvec[i] = static_cast(simulated_left[i]) == static_cast(simulated_right[i]); } - tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); + tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); quick_test_all(dense_mod.get(), &ref); quick_test_all(sparse_mod.get(), &ref); @@ -65,7 +65,7 @@ TEST_F(BinaryBooleanTest, AND) { for (size_t i = 0; i < refvec.size(); ++i) { refvec[i] = static_cast(simulated_left[i]) && static_cast(simulated_right[i]); } - tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); + tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); quick_test_all(dense_mod.get(), &ref); quick_test_all(sparse_mod.get(), &ref); @@ -86,7 +86,7 @@ TEST_F(BinaryBooleanTest, OR) { for (size_t i = 0; i < refvec.size(); ++i) { refvec[i] = static_cast(simulated_left[i]) || static_cast(simulated_right[i]); } - tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); + tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); quick_test_all(dense_mod.get(), &ref); quick_test_all(sparse_mod.get(), &ref); @@ -107,7 +107,7 @@ TEST_F(BinaryBooleanTest, XOR) { for (size_t i = 0; i < refvec.size(); ++i) { refvec[i] = static_cast(simulated_left[i]) != static_cast(simulated_right[i]); } - tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); + tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); quick_test_all(dense_mod.get(), &ref); quick_test_all(sparse_mod.get(), &ref); diff --git a/tests/src/isometric/binary/compare_helpers.cpp b/tests/src/isometric/binary/compare_helpers.cpp index 0c823002..13031712 100644 --- a/tests/src/isometric/binary/compare_helpers.cpp +++ b/tests/src/isometric/binary/compare_helpers.cpp @@ -20,13 +20,13 @@ class BinaryCompareTest : public ::testing::Test { static void SetUpTestSuite() { simulated_left = tatami_test::simulate_sparse_vector(nrow * ncol, 0.2, /* lower = */ 1, /* upper = */ 4, /* seed */ 12345); for (auto& x : simulated_left) { x = std::round(x); } // Rounding for easier tests of exact equality. - dense_left = std::shared_ptr(new tatami::DenseRowMatrix(nrow, ncol, simulated_left)); - sparse_left = tatami::convert_to_compressed_sparse(dense_left.get()); // column major. + dense_left = std::shared_ptr(new tatami::DenseRowMatrix(nrow, ncol, simulated_left)); + sparse_left = tatami::convert_to_compressed_sparse(dense_left.get()); // column major. simulated_right = tatami_test::simulate_sparse_vector(nrow * ncol, 0.2, /* lower = */ 1, /* upper = */ 4, /* seed */ 67890); for (auto& x : simulated_right) { x = std::round(x); } // Rounding for easier tests of exact equality. - dense_right = std::shared_ptr(new tatami::DenseRowMatrix(nrow, ncol, simulated_right)); - sparse_right = tatami::convert_to_compressed_sparse(dense_right.get()); // column major. + dense_right = std::shared_ptr(new tatami::DenseRowMatrix(nrow, ncol, simulated_right)); + sparse_right = tatami::convert_to_compressed_sparse(dense_right.get()); // column major. return; } }; @@ -46,7 +46,7 @@ TEST_F(BinaryCompareTest, Equal) { for (size_t i = 0; i < refvec.size(); ++i) { refvec[i] = (simulated_left[i] == simulated_right[i]); } - tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); + tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); quick_test_all(dense_mod.get(), &ref); quick_test_all(sparse_mod.get(), &ref); @@ -67,7 +67,7 @@ TEST_F(BinaryCompareTest, GreaterThan) { for (size_t i = 0; i < refvec.size(); ++i) { refvec[i] = (simulated_left[i] > simulated_right[i]); } - tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); + tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); quick_test_all(dense_mod.get(), &ref); quick_test_all(sparse_mod.get(), &ref); @@ -88,7 +88,7 @@ TEST_F(BinaryCompareTest, LessThan) { for (size_t i = 0; i < refvec.size(); ++i) { refvec[i] = (simulated_left[i] < simulated_right[i]); } - tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); + tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); quick_test_all(dense_mod.get(), &ref); quick_test_all(sparse_mod.get(), &ref); @@ -109,7 +109,7 @@ TEST_F(BinaryCompareTest, GreaterThanOrEqual) { for (size_t i = 0; i < refvec.size(); ++i) { refvec[i] = (simulated_left[i] >= simulated_right[i]); } - tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); + tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); quick_test_all(dense_mod.get(), &ref); quick_test_all(sparse_mod.get(), &ref); @@ -130,7 +130,7 @@ TEST_F(BinaryCompareTest, LessThanOrEqual) { for (size_t i = 0; i < refvec.size(); ++i) { refvec[i] = (simulated_left[i] <= simulated_right[i]); } - tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); + tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); quick_test_all(dense_mod.get(), &ref); quick_test_all(sparse_mod.get(), &ref); @@ -151,7 +151,7 @@ TEST_F(BinaryCompareTest, NotEqual) { for (size_t i = 0; i < refvec.size(); ++i) { refvec[i] = (simulated_left[i] != simulated_right[i]); } - tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); + tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); quick_test_all(dense_mod.get(), &ref); quick_test_all(sparse_mod.get(), &ref); diff --git a/tests/src/isometric/unary/DelayedUnaryIsometricOp.cpp b/tests/src/isometric/unary/DelayedUnaryIsometricOp.cpp index 23c2d608..920b1495 100644 --- a/tests/src/isometric/unary/DelayedUnaryIsometricOp.cpp +++ b/tests/src/isometric/unary/DelayedUnaryIsometricOp.cpp @@ -14,7 +14,7 @@ TEST(DelayedUnaryIsometricOp, ConstOverload) { int nrow = 23, ncol = 42; auto simulated = tatami_test::simulate_sparse_vector(nrow * ncol, 0.1); - auto dense = std::shared_ptr(new tatami::DenseRowMatrix(nrow, ncol, simulated)); + auto dense = std::shared_ptr(new tatami::DenseRowMatrix(nrow, ncol, simulated)); auto vec = std::vector(nrow); auto op = tatami::make_DelayedAddVectorHelper<0>(vec); @@ -33,12 +33,12 @@ class DelayedUnaryIsometricOpMockTest : public ::testing::TestWithParam(nrow * ncol, 0.1); - dense.reset(new tatami::DenseRowMatrix(nrow, ncol, std::move(simulated))); - sparse = tatami::convert_to_compressed_sparse(dense.get()); + dense.reset(new tatami::DenseRowMatrix(nrow, ncol, std::move(simulated))); + sparse = tatami::convert_to_compressed_sparse(dense.get()); udense = tatami::make_DelayedUnaryIsometricOp(dense, tatami::DelayedUnaryBasicMockHelper()); usparse = tatami::make_DelayedUnaryIsometricOp(sparse, tatami::DelayedUnaryAdvancedMockHelper()); - ref.reset(new tatami::DenseRowMatrix(nrow, ncol, std::vector(nrow * ncol))); + ref.reset(new tatami::DenseRowMatrix(nrow, ncol, std::vector(nrow * ncol))); } }; diff --git a/tests/src/isometric/unary/arith_scalar_helpers.cpp b/tests/src/isometric/unary/arith_scalar_helpers.cpp index 69d93715..fe6866c8 100644 --- a/tests/src/isometric/unary/arith_scalar_helpers.cpp +++ b/tests/src/isometric/unary/arith_scalar_helpers.cpp @@ -23,8 +23,8 @@ class ArithScalarUtils { return; } simulated = tatami_test::simulate_sparse_vector(nrow * ncol, 0.1); - dense = std::shared_ptr(new tatami::DenseRowMatrix(nrow, ncol, simulated)); - sparse = tatami::convert_to_compressed_sparse(dense.get()); // column major. + dense = std::shared_ptr(new tatami::DenseRowMatrix(nrow, ncol, simulated)); + sparse = tatami::convert_to_compressed_sparse(dense.get()); // column major. } }; @@ -63,7 +63,7 @@ TEST_P(ArithCommutativeScalarTest, Addition) { for (auto& r : refvec) { r += val; } - tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); + tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); quick_test_all(dense_mod.get(), &ref); quick_test_all(sparse_mod.get(), &ref); } @@ -83,7 +83,7 @@ TEST_P(ArithCommutativeScalarTest, Multiplication) { for (auto& r : refvec) { r *= val; } - tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); + tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); quick_test_all(dense_mod.get(), &ref); quick_test_all(sparse_mod.get(), &ref); } @@ -140,7 +140,7 @@ TEST_P(ArithNonCommutativeScalarTest, Subtraction) { r = val - r; } } - tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); + tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); quick_test_all(dense_mod.get(), &ref); quick_test_all(sparse_mod.get(), &ref); @@ -179,7 +179,7 @@ TEST_P(ArithNonCommutativeScalarTest, Division) { r = careful_division(val, r); } } - tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); + tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); quick_test_all(dense_mod.get(), &ref, /* has_nan = */ (val == 0)); quick_test_all(sparse_mod.get(), &ref, /* has_nan = */ (val == 0)); @@ -224,7 +224,7 @@ TEST_P(ArithNonCommutativeScalarTest, Power) { r = std::pow(val, std::abs(r)); } } - tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); + tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); quick_test_all(dense_mod.get(), &ref); quick_test_all(sparse_mod.get(), &ref); @@ -264,7 +264,7 @@ TEST_P(ArithNonCommutativeScalarTest, Modulo) { r = std::fmod(val, r); } } - tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); + tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); quick_test_all(dense_mod.get(), &ref, /* has_nan = */ !(on_right && val)); quick_test_all(sparse_mod.get(), &ref, /* has_nan = */ !(on_right && val)); @@ -305,7 +305,7 @@ TEST_P(ArithNonCommutativeScalarTest, IntegerDivision) { r = std::floor(careful_division(val, r)); } } - tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); + tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); quick_test_all(dense_mod.get(), &ref, /* has_nan = */ !(on_right && val)); quick_test_all(sparse_mod.get(), &ref, /* has_nan = */ !(on_right && val)); diff --git a/tests/src/isometric/unary/arith_vector_helpers.cpp b/tests/src/isometric/unary/arith_vector_helpers.cpp index e74ecd6d..fbac3512 100644 --- a/tests/src/isometric/unary/arith_vector_helpers.cpp +++ b/tests/src/isometric/unary/arith_vector_helpers.cpp @@ -22,8 +22,8 @@ class ArithVectorUtils { return; } simulated = tatami_test::simulate_sparse_vector(nrow * ncol, 0.1); - dense = std::shared_ptr(new tatami::DenseRowMatrix(nrow, ncol, simulated)); - sparse = tatami::convert_to_compressed_sparse(dense.get()); // column major. + dense = std::shared_ptr(new tatami::DenseRowMatrix(nrow, ncol, simulated)); + sparse = tatami::convert_to_compressed_sparse(dense.get()); // column major. return; } @@ -214,7 +214,7 @@ class ArithVectorAdditionUtils : public ArithVectorUtils { refvec[r * ncol + c] += vec[row ? r : c]; } } - ref.reset(new tatami::DenseRowMatrix(nrow, ncol, std::move(refvec))); + ref.reset(new tatami::DenseRowMatrix(nrow, ncol, std::move(refvec))); } }; @@ -337,7 +337,7 @@ class ArithVectorSubtractionUtils : public ArithVectorUtils { } } } - ref.reset(new tatami::DenseRowMatrix(nrow, ncol, std::move(refvec))); + ref.reset(new tatami::DenseRowMatrix(nrow, ncol, std::move(refvec))); } }; @@ -386,7 +386,7 @@ TEST_P(ArithVectorSubtractionZeroedTest, Basic) { for (auto& x : copy) { x *= -1; } - tatami::DenseRowMatrix ref(nrow, ncol, std::move(copy)); + tatami::DenseRowMatrix ref(nrow, ncol, std::move(copy)); tatami_test::test_simple_column_access(dense_z.get(), &ref); tatami_test::test_simple_column_access(sparse_z.get(), &ref); @@ -451,7 +451,7 @@ class ArithVectorMultiplicationUtils : public ArithVectorUtils { refvec[r * ncol + c] *= vec[row ? r : c]; } } - ref.reset(new tatami::DenseRowMatrix(nrow, ncol, std::move(refvec))); + ref.reset(new tatami::DenseRowMatrix(nrow, ncol, std::move(refvec))); } }; @@ -487,7 +487,7 @@ TEST_P(ArithVectorMultiplicationZeroedTest, Basic) { std::shared_ptr dense_z, sparse_z; ArithVectorMultiplicationUtils::apply_operation(row, zeroed, dense_z, sparse_z); - tatami::DenseRowMatrix ref(nrow, ncol, std::vector(nrow * ncol)); + tatami::DenseRowMatrix ref(nrow, ncol, std::vector(nrow * ncol)); tatami_test::test_simple_column_access(dense_z.get(), &ref); tatami_test::test_simple_column_access(sparse_z.get(), &ref); @@ -578,7 +578,7 @@ class ArithVectorDivisionUtils : public ArithVectorUtils { } } } - ref.reset(new tatami::DenseRowMatrix(nrow, ncol, std::move(refvec))); + ref.reset(new tatami::DenseRowMatrix(nrow, ncol, std::move(refvec))); } }; @@ -630,7 +630,7 @@ TEST_P(ArithVectorDivisionZeroedTest, AllZero) { x = careful_division(0.0, x); } } - tatami::DenseRowMatrix ref(nrow, ncol, std::move(copy)); + tatami::DenseRowMatrix ref(nrow, ncol, std::move(copy)); EXPECT_FALSE(dense_z->sparse()); EXPECT_FALSE(sparse_z->sparse()); @@ -682,7 +682,7 @@ TEST_P(ArithVectorDivisionZeroedTest, OneZero) { } } } - tatami::DenseRowMatrix ref(nrow, ncol, std::move(copy)); + tatami::DenseRowMatrix ref(nrow, ncol, std::move(copy)); EXPECT_FALSE(dense_z->sparse()); EXPECT_FALSE(sparse_z->sparse()); @@ -775,7 +775,7 @@ class ArithVectorPowerUtils : public ArithVectorUtils { } } } - ref.reset(new tatami::DenseRowMatrix(nrow, ncol, std::move(refvec))); + ref.reset(new tatami::DenseRowMatrix(nrow, ncol, std::move(refvec))); } }; @@ -827,7 +827,7 @@ TEST_P(ArithVectorPowerZeroedTest, AllZero) { x = std::pow(0.0, std::abs(x)); } } - tatami::DenseRowMatrix ref(nrow, ncol, std::move(copy)); + tatami::DenseRowMatrix ref(nrow, ncol, std::move(copy)); EXPECT_FALSE(dense_z->sparse()); EXPECT_FALSE(sparse_z->sparse()); @@ -882,7 +882,7 @@ TEST_P(ArithVectorPowerZeroedTest, OneZero) { } } } - tatami::DenseRowMatrix ref(nrow, ncol, std::move(copy)); + tatami::DenseRowMatrix ref(nrow, ncol, std::move(copy)); EXPECT_FALSE(dense_z->sparse()); EXPECT_FALSE(sparse_z->sparse()); @@ -1068,7 +1068,7 @@ class ArithVectorModuloUtils : public ArithVectorUtils { } } } - ref.reset(new tatami::DenseRowMatrix(nrow, ncol, std::move(refvec))); + ref.reset(new tatami::DenseRowMatrix(nrow, ncol, std::move(refvec))); } }; @@ -1120,7 +1120,7 @@ TEST_P(ArithVectorModuloZeroedTest, AllZero) { x = std::fmod(0.0, x); } } - tatami::DenseRowMatrix ref(nrow, ncol, std::move(copy)); + tatami::DenseRowMatrix ref(nrow, ncol, std::move(copy)); EXPECT_FALSE(dense_z->sparse()); EXPECT_FALSE(sparse_z->sparse()); @@ -1210,7 +1210,7 @@ class ArithVectorIntegerDivisionUtils : public ArithVectorUtils { } } } - ref.reset(new tatami::DenseRowMatrix(nrow, ncol, std::move(refvec))); + ref.reset(new tatami::DenseRowMatrix(nrow, ncol, std::move(refvec))); } }; @@ -1263,7 +1263,7 @@ TEST_P(ArithVectorIntegerDivisionZeroedTest, AllZero) { x = std::floor(careful_division(0.0, x)); } } - tatami::DenseRowMatrix ref(nrow, ncol, std::move(copy)); + tatami::DenseRowMatrix ref(nrow, ncol, std::move(copy)); EXPECT_FALSE(dense_z->sparse()); EXPECT_FALSE(sparse_z->sparse()); diff --git a/tests/src/isometric/unary/boolean_scalar_helpers.cpp b/tests/src/isometric/unary/boolean_scalar_helpers.cpp index 3052c156..40cf9c07 100644 --- a/tests/src/isometric/unary/boolean_scalar_helpers.cpp +++ b/tests/src/isometric/unary/boolean_scalar_helpers.cpp @@ -22,8 +22,8 @@ class BooleanScalarUtils { return; } simulated = tatami_test::simulate_sparse_vector(nrow * ncol, 0.1, -2, 2); - dense = std::shared_ptr(new tatami::DenseRowMatrix(nrow, ncol, simulated)); - sparse = tatami::convert_to_compressed_sparse(dense.get()); // column major. + dense = std::shared_ptr(new tatami::DenseRowMatrix(nrow, ncol, simulated)); + sparse = tatami::convert_to_compressed_sparse(dense.get()); // column major. } }; @@ -53,7 +53,7 @@ TEST_P(BooleanScalarTest, AND) { for (auto& r : refvec) { r = r && other; } - tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); + tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); quick_test_all(dense_mod.get(), &ref); quick_test_all(sparse_mod.get(), &ref); @@ -83,7 +83,7 @@ TEST_P(BooleanScalarTest, OR) { for (auto& r : refvec) { r = r || other; } - tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); + tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); quick_test_all(dense_mod.get(), &ref); quick_test_all(sparse_mod.get(), &ref); @@ -113,7 +113,7 @@ TEST_P(BooleanScalarTest, XOR) { for (auto& r : refvec) { r = static_cast(r) != other; } - tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); + tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); quick_test_all(dense_mod.get(), &ref); quick_test_all(sparse_mod.get(), &ref); @@ -143,7 +143,7 @@ TEST_P(BooleanScalarTest, EQUAL) { for (auto& r : refvec) { r = static_cast(r) == other; } - tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); + tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); quick_test_all(dense_mod.get(), &ref); quick_test_all(sparse_mod.get(), &ref); @@ -179,7 +179,7 @@ TEST_F(BooleanNotTest, Basic) { for (auto& r : refvec) { r = !static_cast(r); } - tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); + tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); quick_test_all(dense_mod.get(), &ref); quick_test_all(sparse_mod.get(), &ref); diff --git a/tests/src/isometric/unary/boolean_vector_helpers.cpp b/tests/src/isometric/unary/boolean_vector_helpers.cpp index 9372e664..503ff55c 100644 --- a/tests/src/isometric/unary/boolean_vector_helpers.cpp +++ b/tests/src/isometric/unary/boolean_vector_helpers.cpp @@ -18,8 +18,8 @@ class BooleanVectorTest : public ::testing::TestWithParam static void SetUpTestSuite() { simulated = tatami_test::simulate_sparse_vector(nrow * ncol, 0.1, -3, 3); - dense = std::shared_ptr(new tatami::DenseRowMatrix(nrow, ncol, simulated)); - sparse = tatami::convert_to_compressed_sparse(dense.get()); // column major. + dense = std::shared_ptr(new tatami::DenseRowMatrix(nrow, ncol, simulated)); + sparse = tatami::convert_to_compressed_sparse(dense.get()); // column major. } static void fill_default_vector(std::vector& vec) { @@ -70,7 +70,7 @@ TEST_P(BooleanVectorTest, AND) { } } - tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); + tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); quick_test_all(dense_mod.get(), &ref); quick_test_all(sparse_mod.get(), &ref); } @@ -116,7 +116,7 @@ TEST_P(BooleanVectorTest, OR) { } } - tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); + tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); quick_test_all(dense_mod.get(), &ref); quick_test_all(sparse_mod.get(), &ref); } @@ -162,7 +162,7 @@ TEST_P(BooleanVectorTest, XOR) { } } - tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); + tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); quick_test_all(dense_mod.get(), &ref); quick_test_all(sparse_mod.get(), &ref); } @@ -210,7 +210,7 @@ TEST_P(BooleanVectorTest, EQUAL) { } } - tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); + tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); quick_test_all(dense_mod.get(), &ref); quick_test_all(sparse_mod.get(), &ref); } diff --git a/tests/src/isometric/unary/compare_scalar_helpers.cpp b/tests/src/isometric/unary/compare_scalar_helpers.cpp index f3e7feee..7572e832 100644 --- a/tests/src/isometric/unary/compare_scalar_helpers.cpp +++ b/tests/src/isometric/unary/compare_scalar_helpers.cpp @@ -29,8 +29,8 @@ class CompareScalarTest : public ::testing::TestWithParam { } } - dense = std::shared_ptr(new tatami::DenseRowMatrix(nrow, ncol, simulated)); - sparse = tatami::convert_to_compressed_sparse(dense.get()); // column major. + dense = std::shared_ptr(new tatami::DenseRowMatrix(nrow, ncol, simulated)); + sparse = tatami::convert_to_compressed_sparse(dense.get()); // column major. } }; @@ -58,7 +58,7 @@ TEST_P(CompareScalarTest, Equal) { for (auto& r : refvec) { r = r == val; } - tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); + tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); quick_test_all(dense_mod.get(), &ref); quick_test_all(sparse_mod.get(), &ref); @@ -88,7 +88,7 @@ TEST_P(CompareScalarTest, GreaterThan) { for (auto& r : refvec) { r = r > val; } - tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); + tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); quick_test_all(dense_mod.get(), &ref); quick_test_all(sparse_mod.get(), &ref); @@ -118,7 +118,7 @@ TEST_P(CompareScalarTest, LessThan) { for (auto& r : refvec) { r = r < val; } - tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); + tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); quick_test_all(dense_mod.get(), &ref); quick_test_all(sparse_mod.get(), &ref); @@ -148,7 +148,7 @@ TEST_P(CompareScalarTest, GreaterThanOrEqual) { for (auto& r : refvec) { r = r >= val; } - tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); + tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); quick_test_all(dense_mod.get(), &ref); quick_test_all(sparse_mod.get(), &ref); @@ -178,7 +178,7 @@ TEST_P(CompareScalarTest, LessThanOrEqual) { for (auto& r : refvec) { r = r <= val; } - tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); + tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); quick_test_all(dense_mod.get(), &ref); quick_test_all(sparse_mod.get(), &ref); @@ -208,7 +208,7 @@ TEST_P(CompareScalarTest, NotEqual) { for (auto& r : refvec) { r = r != val; } - tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); + tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); quick_test_all(dense_mod.get(), &ref); quick_test_all(sparse_mod.get(), &ref); diff --git a/tests/src/isometric/unary/compare_vector_helpers.cpp b/tests/src/isometric/unary/compare_vector_helpers.cpp index 011ee397..e4034f48 100644 --- a/tests/src/isometric/unary/compare_vector_helpers.cpp +++ b/tests/src/isometric/unary/compare_vector_helpers.cpp @@ -28,8 +28,8 @@ class CompareVectorTest : public ::testing::TestWithParam } } - dense = std::shared_ptr(new tatami::DenseRowMatrix(nrow, ncol, simulated)); - sparse = tatami::convert_to_compressed_sparse(dense.get()); // column major. + dense = std::shared_ptr(new tatami::DenseRowMatrix(nrow, ncol, simulated)); + sparse = tatami::convert_to_compressed_sparse(dense.get()); // column major. } static void fill_default_vector(std::vector& vec) { @@ -89,7 +89,7 @@ TEST_P(CompareVectorTest, Equal) { } } - tatami::DenseRowMatrix ref(this->nrow, this->ncol, std::move(refvec)); + tatami::DenseRowMatrix ref(this->nrow, this->ncol, std::move(refvec)); quick_test_all(dense_mod.get(), &ref); quick_test_all(sparse_mod.get(), &ref); } @@ -141,7 +141,7 @@ TEST_P(CompareVectorTest, GreaterThan) { } } - tatami::DenseRowMatrix ref(this->nrow, this->ncol, std::move(refvec)); + tatami::DenseRowMatrix ref(this->nrow, this->ncol, std::move(refvec)); quick_test_all(dense_mod.get(), &ref); quick_test_all(sparse_mod.get(), &ref); } @@ -193,7 +193,7 @@ TEST_P(CompareVectorTest, LessThan) { } } - tatami::DenseRowMatrix ref(this->nrow, this->ncol, std::move(refvec)); + tatami::DenseRowMatrix ref(this->nrow, this->ncol, std::move(refvec)); quick_test_all(dense_mod.get(), &ref); quick_test_all(sparse_mod.get(), &ref); } @@ -245,7 +245,7 @@ TEST_P(CompareVectorTest, GreaterThanOrEqual) { } } - tatami::DenseRowMatrix ref(this->nrow, this->ncol, std::move(refvec)); + tatami::DenseRowMatrix ref(this->nrow, this->ncol, std::move(refvec)); quick_test_all(dense_mod.get(), &ref); quick_test_all(sparse_mod.get(), &ref); } @@ -297,7 +297,7 @@ TEST_P(CompareVectorTest, LessThanOrEqual) { } } - tatami::DenseRowMatrix ref(this->nrow, this->ncol, std::move(refvec)); + tatami::DenseRowMatrix ref(this->nrow, this->ncol, std::move(refvec)); quick_test_all(dense_mod.get(), &ref); quick_test_all(sparse_mod.get(), &ref); } @@ -343,7 +343,7 @@ TEST_P(CompareVectorTest, NotEqual) { } } - tatami::DenseRowMatrix ref(this->nrow, this->ncol, std::move(refvec)); + tatami::DenseRowMatrix ref(this->nrow, this->ncol, std::move(refvec)); quick_test_all(dense_mod.get(), &ref); quick_test_all(sparse_mod.get(), &ref); } diff --git a/tests/src/isometric/unary/math_helpers.cpp b/tests/src/isometric/unary/math_helpers.cpp index ad37422b..9ed8578a 100644 --- a/tests/src/isometric/unary/math_helpers.cpp +++ b/tests/src/isometric/unary/math_helpers.cpp @@ -21,14 +21,14 @@ class MathTest : public ::testing::Test { static void SetUpTestSuite() { simulated = tatami_test::simulate_sparse_vector(nrow * ncol, 0.1, /* lower = */ -10, /* upper = */ 10); - dense = std::shared_ptr(new tatami::DenseRowMatrix(nrow, ncol, simulated)); - sparse = tatami::convert_to_compressed_sparse(dense.get()); // column major. + dense = std::shared_ptr(new tatami::DenseRowMatrix(nrow, ncol, simulated)); + sparse = tatami::convert_to_compressed_sparse(dense.get()); // column major. // Use a tighter range to get most values inside the domain of [-1, 1] // (but not all; we still leave a few outside for NaN testing purposes). simulated_unit = tatami_test::simulate_sparse_vector(nrow * ncol, 0.1, /* lower = */ -1.1, /* upper = */ 1.1); - dense_unit = std::shared_ptr(new tatami::DenseRowMatrix(nrow, ncol, simulated_unit)); - sparse_unit = tatami::convert_to_compressed_sparse(dense_unit.get()); // column major. + dense_unit = std::shared_ptr(new tatami::DenseRowMatrix(nrow, ncol, simulated_unit)); + sparse_unit = tatami::convert_to_compressed_sparse(dense_unit.get()); // column major. } }; @@ -46,7 +46,7 @@ TEST_F(MathTest, Abs) { for (auto& r : refvec) { r = std::abs(r); } - tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); + tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); // Toughest tests are handled by the Vector case; they would // be kind of redundant here, so we'll just do something simple @@ -69,7 +69,7 @@ TEST_F(MathTest, Sign) { for (auto& r : refvec) { r = (0 < r) - (r < 0); } - tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); + tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); // Again, doing some light tests. quick_test_all(dense_mod.get(), &ref); @@ -90,7 +90,7 @@ TEST_F(MathTest, Sqrt) { for (auto& r : refvec) { r = std::sqrt(r); } - tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); + tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); // Again, doing some light tests; we assume that we have IEEE floats so sqrt(-1) => NaN. quick_test_all(dense_mod.get(), &ref, /* has_nan = */ true); @@ -113,7 +113,7 @@ TEST_F(MathTest, Log) { for (auto& r : refvec) { r = std::log(r); } - tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); + tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); // Doing some light tests, assuming that log(-1) => NaN and log(0) => Inf. quick_test_all(dense_mod.get(), &ref, /* has_nan = */ true); @@ -135,7 +135,7 @@ TEST_F(MathTest, Log) { for (auto& r : refvec) { r = std::log(r) / std::log(2); } - tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); + tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); // Again, doing some light tests. quick_test_all(dense_mod.get(), &ref, /* has_nan = */ true); @@ -159,7 +159,7 @@ TEST_F(MathTest, Log1pBy) { for (auto& r : refvec) { r = std::log1p(r); } - tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); + tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); // Doing some light tests, assuming that log1p(-2) => NaN and log1p(-1) => Inf. quick_test_all(dense_mod.get(), &ref, /* has_nan = */ true); @@ -181,7 +181,7 @@ TEST_F(MathTest, Log1pBy) { for (auto& r : refvec) { r = std::log1p(r) / std::log(2); } - tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); + tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); // Again, doing some light tests. quick_test_all(dense_mod.get(), &ref, /* has_nan = */ true); @@ -203,7 +203,7 @@ TEST_F(MathTest, Exp) { for (auto& r : refvec) { r = std::exp(r); } - tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); + tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); // Again, doing some light tests. quick_test_all(dense_mod.get(), &ref); @@ -224,7 +224,7 @@ TEST_F(MathTest, Expm1) { for (auto& r : refvec) { r = std::expm1(r); } - tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); + tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); // Again, doing some light tests. quick_test_all(dense_mod.get(), &ref); @@ -245,7 +245,7 @@ TEST_F(MathTest, Round) { for (auto& r : refvec) { r = std::round(r); } - tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); + tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); // Again, doing some light tests. quick_test_all(dense_mod.get(), &ref); @@ -266,7 +266,7 @@ TEST_F(MathTest, Ceiling) { for (auto& r : refvec) { r = std::ceil(r); } - tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); + tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); // Again, doing some light tests. quick_test_all(dense_mod.get(), &ref); @@ -287,7 +287,7 @@ TEST_F(MathTest, Floor) { for (auto& r : refvec) { r = std::floor(r); } - tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); + tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); // Again, doing some light tests. quick_test_all(dense_mod.get(), &ref); @@ -308,7 +308,7 @@ TEST_F(MathTest, Trunc) { for (auto& r : refvec) { r = std::trunc(r); } - tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); + tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); // Again, doing some light tests. quick_test_all(dense_mod.get(), &ref); @@ -329,7 +329,7 @@ TEST_F(MathTest, Sin) { for (auto& r : refvec) { r = std::sin(r); } - tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); + tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); // Again, doing some light tests. quick_test_all(dense_mod.get(), &ref); @@ -350,7 +350,7 @@ TEST_F(MathTest, Cos) { for (auto& r : refvec) { r = std::cos(r); } - tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); + tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); // Again, doing some light tests. quick_test_all(dense_mod.get(), &ref); @@ -371,7 +371,7 @@ TEST_F(MathTest, Tan) { for (auto& r : refvec) { r = std::tan(r); } - tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); + tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); // Again, doing some light tests. quick_test_all(dense_mod.get(), &ref); @@ -393,7 +393,7 @@ TEST_F(MathTest, Asin) { for (auto& r : refvec) { r = std::asin(r); } - tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); + tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); // Again, doing some light tests. We assume that asin(2) => NaN. quick_test_all(dense_mod.get(), &ref, /* has_nan = */ true); @@ -414,7 +414,7 @@ TEST_F(MathTest, Acos) { for (auto& r : refvec) { r = std::acos(r); } - tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); + tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); // Again, doing some light tests. We assume that acos(-2) => NaN. quick_test_all(dense_mod.get(), &ref, /* has_nan = */ true); @@ -435,7 +435,7 @@ TEST_F(MathTest, Atan) { for (auto& r : refvec) { r = std::atan(r); } - tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); + tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); // Again, doing some light tests. quick_test_all(dense_mod.get(), &ref); @@ -456,7 +456,7 @@ TEST_F(MathTest, Sinh) { for (auto& r : refvec) { r = std::sinh(r); } - tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); + tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); // Again, doing some light tests. quick_test_all(dense_mod.get(), &ref); @@ -477,7 +477,7 @@ TEST_F(MathTest, Cosh) { for (auto& r : refvec) { r = std::cosh(r); } - tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); + tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); // Again, doing some light tests. quick_test_all(dense_mod.get(), &ref); @@ -498,7 +498,7 @@ TEST_F(MathTest, Tanh) { for (auto& r : refvec) { r = std::tanh(r); } - tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); + tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); // Again, doing some light tests. quick_test_all(dense_mod.get(), &ref); @@ -519,7 +519,7 @@ TEST_F(MathTest, Asinh) { for (auto& r : refvec) { r = std::asinh(r); } - tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); + tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); // Again, doing some light tests. quick_test_all(dense_mod.get(), &ref); @@ -540,7 +540,7 @@ TEST_F(MathTest, Acosh) { for (auto& r : refvec) { r = std::acosh(r); } - tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); + tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); // Again, doing some light tests. We assume that acosh(-1) => NaN. quick_test_all(dense_mod.get(), &ref, /* has_nan = */ true); @@ -561,7 +561,7 @@ TEST_F(MathTest, Atanh) { for (auto& r : refvec) { r = std::atanh(r); } - tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); + tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); // Again, doing some light tests. We assume that atanh(2) => NaN. quick_test_all(dense_mod.get(), &ref, /* has_nan = */ true); @@ -582,7 +582,7 @@ TEST_F(MathTest, Gamma) { for (auto& r : refvec) { r = std::tgamma(r); } - tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); + tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); // Again, doing some light tests. We assume that gamma(-1) => NaN. quick_test_all(dense_mod.get(), &ref, /* has_nan = */ true); @@ -603,7 +603,7 @@ TEST_F(MathTest, Lgamma) { for (auto& r : refvec) { r = std::lgamma(r); } - tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); + tatami::DenseRowMatrix ref(nrow, ncol, std::move(refvec)); // Again, doing some light tests. We assume that lgamma(-1) => NaN. quick_test_all(dense_mod.get(), &ref, /* has_nan = */ true); diff --git a/tests/src/other/DelayedBind.cpp b/tests/src/other/DelayedBind.cpp index 70ef6b2b..f34403d0 100644 --- a/tests/src/other/DelayedBind.cpp +++ b/tests/src/other/DelayedBind.cpp @@ -57,7 +57,7 @@ class DelayedBindUtils { } else { collected_dense.emplace_back(new tatami::DenseColumnMatrix(otherdim, lengths[i], to_add)); } - collected_sparse.push_back(tatami::convert_to_compressed_sparse(collected_dense.back().get())); // always CSC + collected_sparse.push_back(tatami::convert_to_compressed_sparse(collected_dense.back().get())); // always CSC forced_collected_dense.emplace_back(std::make_shared >(collected_dense.back())); forced_collected_sparse.emplace_back(std::make_shared >(collected_sparse.back())); diff --git a/tests/src/other/DelayedCast.cpp b/tests/src/other/DelayedCast.cpp index 12010e66..10a1b0fa 100644 --- a/tests/src/other/DelayedCast.cpp +++ b/tests/src/other/DelayedCast.cpp @@ -30,8 +30,8 @@ class CastUtils { } auto sparse_matrix = tatami_test::simulate_sparse_vector(nrow * ncol, 0.08); - dense = std::shared_ptr(new tatami::DenseRowMatrix(nrow, ncol, sparse_matrix)); - sparse = tatami::convert_to_compressed_sparse(dense.get()); // column-major. + dense = std::shared_ptr(new tatami::DenseRowMatrix(nrow, ncol, sparse_matrix)); + sparse = tatami::convert_to_compressed_sparse(dense.get()); // column-major. // Both the value and indices are changed in type. std::vector fsparse_matrix(sparse_matrix.begin(), sparse_matrix.end()); @@ -41,8 +41,8 @@ class CastUtils { // Reference with reduced precision, for comparison with double->float->double casts. { std::vector dsparse_matrix(fsparse_matrix.begin(), fsparse_matrix.end()); - fdense_ref = std::shared_ptr(new tatami::DenseRowMatrix(nrow, ncol, std::move(dsparse_matrix))); - fsparse_ref = tatami::convert_to_compressed_sparse(fdense_ref.get()); // column-major. + fdense_ref = std::shared_ptr(new tatami::DenseRowMatrix(nrow, ncol, std::move(dsparse_matrix))); + fsparse_ref = tatami::convert_to_compressed_sparse(fdense_ref.get()); // column-major. } // Only the value is changed in type. diff --git a/tests/src/other/DelayedTranspose.cpp b/tests/src/other/DelayedTranspose.cpp index 306598bb..543d0cd6 100644 --- a/tests/src/other/DelayedTranspose.cpp +++ b/tests/src/other/DelayedTranspose.cpp @@ -20,8 +20,8 @@ class TransposeUtils { } auto simulated = tatami_test::simulate_sparse_vector(nrow * ncol, 0.05); - dense = std::shared_ptr(new tatami::DenseRowMatrix(nrow, ncol, simulated)); - sparse = tatami::convert_to_compressed_sparse(dense.get()); // column-major. + dense = std::shared_ptr(new tatami::DenseRowMatrix(nrow, ncol, simulated)); + sparse = tatami::convert_to_compressed_sparse(dense.get()); // column-major. tdense = tatami::make_DelayedTranspose(dense); tsparse = tatami::make_DelayedTranspose(sparse); @@ -31,7 +31,7 @@ class TransposeUtils { refvec[c * nrow + r] = simulated[r * ncol + c]; } } - ref.reset(new tatami::DenseRowMatrix(ncol, nrow, refvec)); + ref.reset(new tatami::DenseRowMatrix(ncol, nrow, refvec)); } }; diff --git a/tests/src/sparse/CompressedSparseMatrix.cpp b/tests/src/sparse/CompressedSparseMatrix.cpp index f8f6ba6f..41f8ce7d 100644 --- a/tests/src/sparse/CompressedSparseMatrix.cpp +++ b/tests/src/sparse/CompressedSparseMatrix.cpp @@ -100,8 +100,8 @@ class SparseUtils { return; } dense.reset(new tatami::DenseRowMatrix(nrow, ncol, tatami_test::simulate_sparse_vector(nrow * ncol, 0.05))); - sparse_row = tatami::convert_to_compressed_sparse(dense.get()); - sparse_column = tatami::convert_to_compressed_sparse(dense.get()); + sparse_row = tatami::convert_to_compressed_sparse(dense.get()); + sparse_column = tatami::convert_to_compressed_sparse(dense.get()); } }; @@ -266,7 +266,7 @@ TEST(CompressedSparseMatrix, SecondarySkip) { // Make a column-sparse compressed matrix, so that we can check // that secondary extraction correctly skips the all-zero rows. - auto sparse_column = tatami::convert_to_compressed_sparse(&dense); + auto sparse_column = tatami::convert_to_compressed_sparse(&dense); tatami_test::TestAccessParameters param; param.use_row = true; param.order = tatami_test::FORWARD; diff --git a/tests/src/sparse/convert_to_compressed_sparse.cpp b/tests/src/sparse/convert_to_compressed_sparse.cpp index dbbe9539..9ed8eb3d 100644 --- a/tests/src/sparse/convert_to_compressed_sparse.cpp +++ b/tests/src/sparse/convert_to_compressed_sparse.cpp @@ -5,116 +5,67 @@ #include "tatami_test/tatami_test.hpp" -class ConvertToCompressedSparseTest : public ::testing::TestWithParam > {}; - -TEST_P(ConvertToCompressedSparseTest, RowToRow) { - auto param = GetParam(); - auto two_pass = std::get<0>(param); - auto nthreads = std::get<1>(param); - - size_t NR = 50, NC = 20; - auto vec = tatami_test::simulate_sparse_vector(NR * NC, 0.1); - auto mat = std::make_shared >(NR, NC, vec); - - auto converted = tatami::convert_to_compressed_sparse(mat.get(), two_pass, nthreads); - EXPECT_EQ(converted->nrow(), NR); - EXPECT_EQ(converted->ncol(), NC); - EXPECT_TRUE(converted->sparse()); - EXPECT_TRUE(converted->prefer_rows()); - tatami_test::test_simple_row_access(converted.get(), mat.get()); - tatami_test::test_simple_column_access(converted.get(), mat.get()); - - auto converted2 = tatami::convert_to_compressed_sparse(mat.get(), two_pass, nthreads); // works for a different type. - EXPECT_TRUE(converted2->sparse()); - EXPECT_TRUE(converted2->prefer_rows()); - - auto wrk2 = converted2->dense_row(); - for (size_t i = 0; i < NR; ++i) { - auto start = vec.begin() + i * NC; - std::vector expected2(start, start + NC); - EXPECT_EQ(tatami_test::fetch(wrk2.get(), i, NC), expected2); - } -} - -TEST_P(ConvertToCompressedSparseTest, ColumnToColumn) { - auto param = GetParam(); - auto two_pass = std::get<0>(param); - auto nthreads = std::get<1>(param); - - size_t NR = 30, NC = 50; - auto trip = tatami_test::simulate_sparse_compressed(NC, NR, 0.1); // check sparse->sparse conversion with matching preferred dimension. - auto mat = std::make_shared >(NR, NC, trip.value, trip.index, trip.ptr); - - auto converted = tatami::convert_to_compressed_sparse(mat.get(), two_pass, nthreads); - EXPECT_EQ(converted->nrow(), NR); - EXPECT_EQ(converted->ncol(), NC); - EXPECT_TRUE(converted->sparse()); - EXPECT_FALSE(converted->prefer_rows()); - tatami_test::test_simple_row_access(converted.get(), mat.get()); - tatami_test::test_simple_column_access(converted.get(), mat.get()); - - auto converted2 = tatami::convert_to_compressed_sparse(mat.get(), two_pass, nthreads); // works for a different type. - EXPECT_TRUE(converted2->sparse()); - EXPECT_FALSE(converted2->prefer_rows()); - - auto wrk = mat->dense_column(); - auto wrk2 = converted2->dense_column(); - for (size_t i = 0; i < NC; ++i) { - auto expected = tatami_test::fetch(wrk.get(), static_cast(i), NR); - std::vector expected2(expected.begin(), expected.end()); - EXPECT_EQ(tatami_test::fetch(wrk2.get(), i, NR), expected2); +class ConvertToCompressedSparseTest : public ::testing::TestWithParam > { +protected: + size_t NR, NC; + bool from_row, to_row; + bool two_pass; + int nthreads; + + template + void assemble(const Param& param) { + NR = std::get<0>(param); + NC = std::get<1>(param); + from_row = std::get<2>(param); + to_row = std::get<3>(param); + two_pass = std::get<4>(param); + nthreads = std::get<5>(param); } -} - -TEST_P(ConvertToCompressedSparseTest, RowToColumn) { - auto param = GetParam(); - auto two_pass = std::get<0>(param); - auto nthreads = std::get<1>(param); +}; - size_t NR = 70, NC = 50; - auto vec = tatami_test::simulate_sparse_vector(NR * NC, 0.15); - auto mat = std::make_shared >(NR, NC, vec); +TEST_P(ConvertToCompressedSparseTest, FromDense) { + assemble(GetParam()); + auto vec = tatami_test::simulate_sparse_vector(NR * NC, 0.1); + auto mat = std::make_shared >(NR, NC, vec, from_row); - auto converted = tatami::convert_to_compressed_sparse(mat.get(), two_pass, nthreads); + auto converted = tatami::convert_to_compressed_sparse(mat.get(), to_row, two_pass, nthreads); EXPECT_EQ(converted->nrow(), NR); EXPECT_EQ(converted->ncol(), NC); EXPECT_TRUE(converted->sparse()); - EXPECT_FALSE(converted->prefer_rows()); + EXPECT_EQ(converted->prefer_rows(), to_row); tatami_test::test_simple_row_access(converted.get(), mat.get()); tatami_test::test_simple_column_access(converted.get(), mat.get()); - auto converted2 = tatami::convert_to_compressed_sparse(mat.get(), two_pass, nthreads); // works for a different type. + auto converted2 = tatami::convert_to_compressed_sparse(mat.get(), to_row, two_pass, nthreads); // works for a different type. EXPECT_TRUE(converted2->sparse()); - EXPECT_FALSE(converted2->prefer_rows()); + EXPECT_EQ(converted2->prefer_rows(), to_row); + auto old = mat->dense_row(); + std::vector buffer(NC); auto wrk2 = converted2->dense_row(); for (size_t i = 0; i < NR; ++i) { - auto start = vec.begin() + i * NC; - std::vector expected2(start, start + NC); - EXPECT_EQ(tatami_test::fetch(wrk2.get(), i, NC), expected2); + auto ptr = old->fetch(i, buffer.data()); + std::vector expected(ptr, ptr + NC); + EXPECT_EQ(tatami_test::fetch(wrk2.get(), i, NC), expected); } } -TEST_P(ConvertToCompressedSparseTest, ColumnToRow) { - auto param = GetParam(); - auto two_pass = std::get<0>(param); - auto nthreads = std::get<1>(param); +TEST_P(ConvertToCompressedSparseTest, FromSparse) { + assemble(GetParam()); + auto trip = tatami_test::simulate_sparse_compressed((from_row ? NR : NC), (from_row ? NC : NR), 0.15); + auto mat = std::make_shared >(NR, NC, trip.value, trip.index, trip.ptr, from_row); - size_t NR = 20, NC = 50; - auto trip = tatami_test::simulate_sparse_compressed(NC, NR, 0.15); // check sparse->sparse conversion with non-matching preferred dimension. - auto mat = std::make_shared >(NR, NC, trip.value, trip.index, trip.ptr); - - auto converted = tatami::convert_to_compressed_sparse(mat.get(), two_pass, nthreads); + auto converted = tatami::convert_to_compressed_sparse(mat.get(), to_row, two_pass, nthreads); EXPECT_EQ(converted->nrow(), NR); EXPECT_EQ(converted->ncol(), NC); EXPECT_TRUE(converted->sparse()); - EXPECT_TRUE(converted->prefer_rows()); + EXPECT_EQ(converted->prefer_rows(), to_row); tatami_test::test_simple_row_access(converted.get(), mat.get()); tatami_test::test_simple_column_access(converted.get(), mat.get()); - auto converted2 = tatami::convert_to_compressed_sparse(mat.get(), two_pass, nthreads); // works for a different type. + auto converted2 = tatami::convert_to_compressed_sparse(mat.get(), to_row, two_pass, nthreads); // works for a different type. EXPECT_TRUE(converted2->sparse()); - EXPECT_TRUE(converted2->prefer_rows()); + EXPECT_EQ(converted2->prefer_rows(), to_row); auto wrk = mat->dense_column(); auto wrk2 = converted2->dense_column(); @@ -125,32 +76,15 @@ TEST_P(ConvertToCompressedSparseTest, ColumnToRow) { } } -TEST_P(ConvertToCompressedSparseTest, Automatic) { - auto param = GetParam(); - auto two_pass = std::get<0>(param); - auto nthreads = std::get<1>(param); - - size_t NR = 70, NC = 50; - auto vec = tatami_test::simulate_sparse_vector(NR * NC, 0.23); - - { - tatami::DenseMatrix mat(NR, NC, vec); - auto converted = tatami::convert_to_compressed_sparse(&mat, -1, two_pass, nthreads); - EXPECT_FALSE(converted->prefer_rows()); - } - - { - tatami::DenseMatrix mat(NR, NC, vec); - auto converted = tatami::convert_to_compressed_sparse(&mat, -1, two_pass, nthreads); - EXPECT_TRUE(converted->prefer_rows()); - } -} - INSTANTIATE_TEST_SUITE_P( ConvertToCompressedSparse, ConvertToCompressedSparseTest, ::testing::Combine( - ::testing::Values(false, true), - ::testing::Values(1, 3) + ::testing::Values(10, 50, 100), // number of rows + ::testing::Values(10, 50, 100), // number of columns + ::testing::Values(true, false), // from row major? + ::testing::Values(true, false), // to row major? + ::testing::Values(true, false), // two-pass? + ::testing::Values(1, 3) // number of threads ) ); diff --git a/tests/src/sparse/convert_to_fragmented_sparse.cpp b/tests/src/sparse/convert_to_fragmented_sparse.cpp index 6da94cd1..f74783d6 100644 --- a/tests/src/sparse/convert_to_fragmented_sparse.cpp +++ b/tests/src/sparse/convert_to_fragmented_sparse.cpp @@ -6,108 +6,65 @@ #include "tatami_test/tatami_test.hpp" -class ConvertToFragmentedSparseTest : public ::testing::TestWithParam {}; - -TEST_P(ConvertToFragmentedSparseTest, RowToRow) { - auto nthreads = GetParam(); +class ConvertToFragmentedSparseTest : public ::testing::TestWithParam > { +protected: + size_t NR, NC; + bool from_row, to_row; + int nthreads; + + template + void assemble(const Param& param) { + NR = std::get<0>(param); + NC = std::get<1>(param); + from_row = std::get<2>(param); + to_row = std::get<3>(param); + nthreads = std::get<4>(param); + } +}; - size_t NR = 50, NC = 20; +TEST_P(ConvertToFragmentedSparseTest, FromDense) { + assemble(GetParam()); auto vec = tatami_test::simulate_sparse_vector(NR * NC, 0.1); - auto mat = std::make_shared >(NR, NC, vec); + auto mat = std::make_shared >(NR, NC, vec, from_row); - auto converted = tatami::convert_to_fragmented_sparse(mat.get(), nthreads); + auto converted = tatami::convert_to_fragmented_sparse(mat.get(), to_row, nthreads); EXPECT_EQ(converted->nrow(), NR); EXPECT_EQ(converted->ncol(), NC); EXPECT_TRUE(converted->sparse()); - EXPECT_TRUE(converted->prefer_rows()); + EXPECT_EQ(converted->prefer_rows(), to_row); tatami_test::test_simple_row_access(converted.get(), mat.get()); tatami_test::test_simple_column_access(converted.get(), mat.get()); - auto converted2 = tatami::convert_to_fragmented_sparse(mat.get(), nthreads); // works for a different type. + auto converted2 = tatami::convert_to_fragmented_sparse(mat.get(), to_row, nthreads); // works for a different type. EXPECT_TRUE(converted2->sparse()); - EXPECT_TRUE(converted2->prefer_rows()); + EXPECT_EQ(converted2->prefer_rows(), to_row); + auto old = mat->dense_row(); + std::vector buffer(NC); auto wrk2 = converted2->dense_row(); for (size_t i = 0; i < NR; ++i) { - auto start = vec.begin() + i * NC; - std::vector expected2(start, start + NC); - EXPECT_EQ(tatami_test::fetch(wrk2.get(), i, NC), expected2); + auto ptr = old->fetch(i, buffer.data()); + std::vector expected(ptr, ptr + NC); + EXPECT_EQ(tatami_test::fetch(wrk2.get(), i, NC), expected); } } TEST_P(ConvertToFragmentedSparseTest, ColumnToColumn) { - auto nthreads = GetParam(); - - size_t NR = 30, NC = 50; - auto trip = tatami_test::simulate_sparse_compressed(NC, NR, 0.1); // check sparse->sparse conversion with matching preferred dimension. - auto mat = std::make_shared >(NR, NC, trip.value, trip.index, trip.ptr); - - auto converted = tatami::convert_to_fragmented_sparse(mat.get(), nthreads); - EXPECT_EQ(converted->nrow(), NR); - EXPECT_EQ(converted->ncol(), NC); - EXPECT_TRUE(converted->sparse()); - EXPECT_FALSE(converted->prefer_rows()); - tatami_test::test_simple_row_access(converted.get(), mat.get()); - tatami_test::test_simple_column_access(converted.get(), mat.get()); - - auto converted2 = tatami::convert_to_fragmented_sparse(mat.get(), nthreads); // works for a different type. - EXPECT_TRUE(converted2->sparse()); - EXPECT_FALSE(converted2->prefer_rows()); - - auto wrk = mat->dense_column(); - auto wrk2 = converted2->dense_column(); - for (size_t i = 0; i < NC; ++i) { - auto expected = tatami_test::fetch(wrk.get(), static_cast(i), NR); - std::vector expected2(expected.begin(), expected.end()); - EXPECT_EQ(tatami_test::fetch(wrk2.get(), i, NR), expected2); - } -} - -TEST_P(ConvertToFragmentedSparseTest, RowToColumn) { - auto nthreads = GetParam(); - - size_t NR = 70, NC = 50; - auto vec = tatami_test::simulate_sparse_vector(NR * NC, 0.15); - auto mat = std::make_shared >(NR, NC, vec); + assemble(GetParam()); + auto trip = tatami_test::simulate_sparse_compressed((from_row ? NR : NC), (from_row ? NC : NR), 0.1); + auto mat = std::make_shared >(NR, NC, trip.value, trip.index, trip.ptr, from_row); - auto converted = tatami::convert_to_fragmented_sparse(mat.get(), nthreads); + auto converted = tatami::convert_to_fragmented_sparse(mat.get(), to_row, nthreads); EXPECT_EQ(converted->nrow(), NR); EXPECT_EQ(converted->ncol(), NC); EXPECT_TRUE(converted->sparse()); - EXPECT_FALSE(converted->prefer_rows()); + EXPECT_EQ(converted->prefer_rows(), to_row); tatami_test::test_simple_row_access(converted.get(), mat.get()); tatami_test::test_simple_column_access(converted.get(), mat.get()); - auto converted2 = tatami::convert_to_fragmented_sparse(mat.get(), nthreads); // works for a different type. + auto converted2 = tatami::convert_to_fragmented_sparse(mat.get(), to_row, nthreads); // works for a different type. EXPECT_TRUE(converted2->sparse()); - EXPECT_FALSE(converted2->prefer_rows()); - - auto wrk2 = converted2->dense_row(); - for (size_t i = 0; i < NR; ++i) { - auto start = vec.begin() + i * NC; - std::vector expected2(start, start + NC); - EXPECT_EQ(tatami_test::fetch(wrk2.get(), i, NC), expected2); - } -} - -TEST_P(ConvertToFragmentedSparseTest, ColumnToRow) { - auto nthreads = GetParam(); - - size_t NR = 20, NC = 50; - auto trip = tatami_test::simulate_sparse_compressed(NC, NR, 0.15); // check sparse->sparse conversion with non-matching preferred dimension. - auto mat = std::make_shared >(NR, NC, trip.value, trip.index, trip.ptr); - - auto converted = tatami::convert_to_fragmented_sparse(mat.get(), nthreads); - EXPECT_EQ(converted->nrow(), NR); - EXPECT_EQ(converted->ncol(), NC); - EXPECT_TRUE(converted->sparse()); - EXPECT_TRUE(converted->prefer_rows()); - tatami_test::test_simple_row_access(converted.get(), mat.get()); - tatami_test::test_simple_column_access(converted.get(), mat.get()); - - auto converted2 = tatami::convert_to_fragmented_sparse(mat.get(), nthreads); // works for a different type. - EXPECT_TRUE(converted2->sparse()); - EXPECT_TRUE(converted2->prefer_rows()); + EXPECT_EQ(converted2->prefer_rows(), to_row); auto wrk = mat->dense_column(); auto wrk2 = converted2->dense_column(); @@ -118,27 +75,14 @@ TEST_P(ConvertToFragmentedSparseTest, ColumnToRow) { } } -TEST_P(ConvertToFragmentedSparseTest, Automatic) { - auto nthreads = GetParam(); - - size_t NR = 70, NC = 50; - auto vec = tatami_test::simulate_sparse_vector(NR * NC, 0.23); - - { - tatami::DenseMatrix mat(NR, NC, vec); - auto converted = tatami::convert_to_fragmented_sparse(&mat, -1, nthreads); - EXPECT_FALSE(converted->prefer_rows()); - } - - { - tatami::DenseMatrix mat(NR, NC, vec); - auto converted = tatami::convert_to_fragmented_sparse(&mat, -1, nthreads); - EXPECT_TRUE(converted->prefer_rows()); - } -} - INSTANTIATE_TEST_SUITE_P( ConvertToFragmentedSparse, ConvertToFragmentedSparseTest, - ::testing::Values(1, 3) + ::testing::Combine( + ::testing::Values(10, 50, 100), // number of rows + ::testing::Values(10, 50, 100), // number of columns + ::testing::Values(true, false), // from row major? + ::testing::Values(true, false), // to row major? + ::testing::Values(1, 3) // number of threads + ) ); diff --git a/tests/src/subset/DelayedSubset.cpp b/tests/src/subset/DelayedSubset.cpp index 32be914a..c4bc87c4 100644 --- a/tests/src/subset/DelayedSubset.cpp +++ b/tests/src/subset/DelayedSubset.cpp @@ -20,8 +20,8 @@ class SubsetCoreUtils { if (dense) { return; } - dense = std::shared_ptr(new tatami::DenseRowMatrix(NR, NC, tatami_test::simulate_sparse_vector(NR * NC, 0.1))); - sparse = tatami::convert_to_compressed_sparse(dense.get()); // column-major. + dense = std::shared_ptr(new tatami::DenseRowMatrix(NR, NC, tatami_test::simulate_sparse_vector(NR * NC, 0.1))); + sparse = tatami::convert_to_compressed_sparse(dense.get()); // column-major. } protected: @@ -61,7 +61,7 @@ class SubsetCoreUtils { ptr += NC; } - return std::shared_ptr(new tatami::DenseRowMatrix(sub.size(), NC, std::move(reference))); + return std::shared_ptr(new tatami::DenseRowMatrix(sub.size(), NC, std::move(reference))); } template @@ -79,7 +79,7 @@ class SubsetCoreUtils { } } - return std::shared_ptr(new tatami::DenseRowMatrix(NR, sub.size(), std::move(reference))); + return std::shared_ptr(new tatami::DenseRowMatrix(NR, sub.size(), std::move(reference))); } }; @@ -277,13 +277,13 @@ TEST_P(SubsetConstructorTest, SortedUnique) { auto sub = SubsetCoreUtils::spawn_indices(true, 5, duplicate, sorted); if (sorted && !duplicate) { - tatami::DelayedSubsetSortedUnique<0, double, int, decltype(sub)> manual(dense, sub); + tatami::DelayedSubsetSortedUnique manual(dense, sub, true); auto ref = SubsetCoreUtils::reference_on_rows(dense.get(), sub); tatami_test::test_simple_row_access(&manual, ref.get()); tatami_test::test_simple_column_access(&manual, ref.get()); } else { tatami_test::throws_error([&]() { - tatami::DelayedSubsetSortedUnique<0, double, int, decltype(sub)> manual(dense, sub); + tatami::DelayedSubsetSortedUnique manual(dense, sub, true); }, "unique"); } } @@ -295,13 +295,13 @@ TEST_P(SubsetConstructorTest, Sorted) { auto sub = SubsetCoreUtils::spawn_indices(true, 5, duplicate, sorted); if (sorted) { - tatami::DelayedSubsetSorted<0, double, int, decltype(sub)> manual(dense, sub); + tatami::DelayedSubsetSorted manual(dense, sub, true); auto ref = SubsetCoreUtils::reference_on_rows(dense.get(), sub); tatami_test::test_simple_row_access(&manual, ref.get()); tatami_test::test_simple_column_access(&manual, ref.get()); } else { tatami_test::throws_error([&]() { - tatami::DelayedSubsetSorted<0, double, int, decltype(sub)> manual(dense, sub); + tatami::DelayedSubsetSorted manual(dense, sub, true); }, "sorted"); } } @@ -313,13 +313,13 @@ TEST_P(SubsetConstructorTest, Unique) { auto sub = SubsetCoreUtils::spawn_indices(true, 5, duplicate, sorted); if (!duplicate) { - tatami::DelayedSubsetUnique<0, double, int, decltype(sub)> manual(dense, sub); + tatami::DelayedSubsetUnique manual(dense, sub, true); auto ref = SubsetCoreUtils::reference_on_rows(dense.get(), sub); tatami_test::test_simple_row_access(&manual, ref.get()); tatami_test::test_simple_column_access(&manual, ref.get()); } else { tatami_test::throws_error([&]() { - tatami::DelayedSubsetUnique<0, double, int, decltype(sub)> manual(dense, sub); + tatami::DelayedSubsetUnique manual(dense, sub, true); }, "unique"); } } @@ -330,7 +330,7 @@ TEST_P(SubsetConstructorTest, Any) { bool sorted = std::get<1>(param); auto sub = SubsetCoreUtils::spawn_indices(true, 5, duplicate, sorted); - tatami::DelayedSubset<0, double, int, decltype(sub)> manual(dense, sub); + tatami::DelayedSubset manual(dense, sub, true); auto ref = reference_on_rows(dense.get(), sub); tatami_test::test_simple_row_access(&manual, ref.get()); tatami_test::test_simple_column_access(&manual, ref.get()); @@ -350,7 +350,7 @@ INSTANTIATE_TEST_SUITE_P( TEST(DelayedSubset, ConstOverload) { int NR = 9, NC = 7; - auto dense = std::shared_ptr(new tatami::DenseRowMatrix(NR, NC, tatami_test::simulate_sparse_vector(NR * NC, 0.1))); + auto dense = std::shared_ptr(new tatami::DenseRowMatrix(NR, NC, tatami_test::simulate_sparse_vector(NR * NC, 0.1))); std::vector subset{ 1, 3, 5 }; auto sub = tatami::make_DelayedSubset<0>(dense, subset); @@ -360,7 +360,7 @@ TEST(DelayedSubset, ConstOverload) { TEST(DelayedSubset, ArrayView) { int NR = 9, NC = 7; - auto dense = std::shared_ptr(new tatami::DenseRowMatrix(NR, NC, tatami_test::simulate_sparse_vector(NR * NC, 0.1))); + auto dense = std::shared_ptr(new tatami::DenseRowMatrix(NR, NC, tatami_test::simulate_sparse_vector(NR * NC, 0.1))); std::vector subset{ 1, 3, 5 }; tatami::ArrayView aview(subset.data(), subset.size()); diff --git a/tests/src/subset/DelayedSubsetBlock.cpp b/tests/src/subset/DelayedSubsetBlock.cpp index 5713212f..109b08c7 100644 --- a/tests/src/subset/DelayedSubsetBlock.cpp +++ b/tests/src/subset/DelayedSubsetBlock.cpp @@ -21,8 +21,8 @@ class SubsetBlockUtils { return; } simulated = tatami_test::simulate_sparse_vector(NR * NC, 0.2); - dense = std::shared_ptr(new tatami::DenseRowMatrix(NR, NC, simulated)); - sparse = tatami::convert_to_compressed_sparse(dense.get()); // column-major. + dense = std::shared_ptr(new tatami::DenseRowMatrix(NR, NC, simulated)); + sparse = tatami::convert_to_compressed_sparse(dense.get()); // column-major. } public: @@ -61,7 +61,7 @@ class SubsetBlockUtils { if (bind_rows) { std::vector sub(simulated.data() + first * NC, simulated.data() + last * NC); - ref.reset(new tatami::DenseRowMatrix(block_length, NC, std::move(sub))); + ref.reset(new tatami::DenseRowMatrix(block_length, NC, std::move(sub))); dense_block = tatami::make_DelayedSubsetBlock<0>(dense, first, block_length); sparse_block = tatami::make_DelayedSubsetBlock<0>(sparse, first, block_length); } else { @@ -71,7 +71,7 @@ class SubsetBlockUtils { auto row = simulated.data() + r * NC; sub.insert(sub.end(), row + first, row + last); } - ref.reset(new tatami::DenseRowMatrix(NR, block_length, std::move(sub))); + ref.reset(new tatami::DenseRowMatrix(NR, block_length, std::move(sub))); dense_block = tatami::make_DelayedSubsetBlock<1>(dense, first, block_length); sparse_block = tatami::make_DelayedSubsetBlock<1>(sparse, first, block_length); } @@ -229,7 +229,7 @@ INSTANTIATE_TEST_SUITE_P( TEST(DelayedSubsetBlock, ConstOverload) { int NR = 9, NC = 7; - auto dense = std::shared_ptr(new tatami::DenseRowMatrix(NR, NC, tatami_test::simulate_sparse_vector(NR * NC, 0.1))); + auto dense = std::shared_ptr(new tatami::DenseRowMatrix(NR, NC, tatami_test::simulate_sparse_vector(NR * NC, 0.1))); auto sub = tatami::make_DelayedSubsetBlock<1>(dense, static_cast(5), static_cast(3)); EXPECT_EQ(sub->ncol(), 3); EXPECT_EQ(sub->nrow(), NR); @@ -237,7 +237,7 @@ TEST(DelayedSubsetBlock, ConstOverload) { TEST(DelayedSubsetBlock, CorrectMaker) { int NR = 90, NC = 50; - auto dense = std::shared_ptr(new tatami::DenseRowMatrix(NR, NC, tatami_test::simulate_dense_vector(NR * NC))); + auto dense = std::shared_ptr(new tatami::DenseRowMatrix(NR, NC, tatami_test::simulate_dense_vector(NR * NC))); // Checking that the make function dispatches correctly to the block subset class. { diff --git a/tests/src/utils/wrap_shared_ptr.cpp b/tests/src/utils/wrap_shared_ptr.cpp index 14700236..cb92ce82 100644 --- a/tests/src/utils/wrap_shared_ptr.cpp +++ b/tests/src/utils/wrap_shared_ptr.cpp @@ -8,7 +8,7 @@ TEST(WrapSharedPtrTest, Simple) { std::vector contents(200); double counter = -105; for (auto& i : contents) { i = counter++; } - tatami::DenseColumnMatrix mat(10, 20, contents); + tatami::DenseColumnMatrix mat(10, 20, contents); EXPECT_EQ(mat.nrow(), 10); EXPECT_EQ(mat.ncol(), 20);