diff --git a/stan/math/fwd/arr/fun/log_sum_exp.hpp b/stan/math/fwd/arr/fun/log_sum_exp.hpp index 69982fb27f1..fa6e3a11fa0 100644 --- a/stan/math/fwd/arr/fun/log_sum_exp.hpp +++ b/stan/math/fwd/arr/fun/log_sum_exp.hpp @@ -12,8 +12,9 @@ template fvar log_sum_exp(const std::vector >& v) { using std::exp; std::vector vals(v.size()); - for (size_t i = 0; i < v.size(); ++i) + for (size_t i = 0; i < v.size(); ++i) { vals[i] = v[i].val_; + } T deriv(0.0); T denominator(0.0); for (size_t i = 0; i < v.size(); ++i) { diff --git a/stan/math/fwd/arr/fun/sum.hpp b/stan/math/fwd/arr/fun/sum.hpp index 4514a37ba82..eedbf61f772 100644 --- a/stan/math/fwd/arr/fun/sum.hpp +++ b/stan/math/fwd/arr/fun/sum.hpp @@ -18,8 +18,9 @@ namespace math { */ template inline fvar sum(const std::vector >& m) { - if (m.size() == 0) + if (m.size() == 0) { return 0.0; + } std::vector vals(m.size()); std::vector tans(m.size()); for (size_t i = 0; i < m.size(); ++i) { diff --git a/stan/math/fwd/arr/fun/to_fvar.hpp b/stan/math/fwd/arr/fun/to_fvar.hpp index 75d6179725c..fcfa6d88bcc 100644 --- a/stan/math/fwd/arr/fun/to_fvar.hpp +++ b/stan/math/fwd/arr/fun/to_fvar.hpp @@ -11,8 +11,9 @@ namespace math { template inline std::vector> to_fvar(const std::vector& v) { std::vector> x(v.size()); - for (size_t i = 0; i < v.size(); ++i) + for (size_t i = 0; i < v.size(); ++i) { x[i] = T(v[i]); + } return x; } @@ -20,8 +21,9 @@ template inline std::vector> to_fvar(const std::vector& v, const std::vector& d) { std::vector> x(v.size()); - for (size_t i = 0; i < v.size(); ++i) + for (size_t i = 0; i < v.size(); ++i) { x[i] = fvar(v[i], d[i]); + } return x; } diff --git a/stan/math/fwd/core/fvar.hpp b/stan/math/fwd/core/fvar.hpp index 78ea4607d4e..9e52d047427 100644 --- a/stan/math/fwd/core/fvar.hpp +++ b/stan/math/fwd/core/fvar.hpp @@ -85,8 +85,9 @@ struct fvar { * @param[in] v value */ fvar(const T& v) : val_(v), d_(0.0) { // NOLINT(runtime/explicit) - if (unlikely(is_nan(v))) + if (unlikely(is_nan(v))) { d_ = v; + } } /** @@ -101,10 +102,12 @@ struct fvar { */ template fvar(const V& v, - typename std::enable_if::value>::type* dummy = 0) + typename std::enable_if::value>::type* dummy + = nullptr) : val_(v), d_(0.0) { - if (unlikely(is_nan(v))) + if (unlikely(is_nan(v))) { d_ = v; + } } /** @@ -120,8 +123,9 @@ struct fvar { */ template fvar(const V& v, const D& d) : val_(v), d_(d) { - if (unlikely(is_nan(v))) + if (unlikely(is_nan(v))) { d_ = v; + } } /** diff --git a/stan/math/fwd/mat/fun/Eigen_NumTraits.hpp b/stan/math/fwd/mat/fun/Eigen_NumTraits.hpp index 51cfe4f9ada..df7c2546ca8 100644 --- a/stan/math/fwd/mat/fun/Eigen_NumTraits.hpp +++ b/stan/math/fwd/mat/fun/Eigen_NumTraits.hpp @@ -58,7 +58,7 @@ struct NumTraits> : GenericNumTraits> { */ template struct ScalarBinaryOpTraits, double, BinaryOp> { - typedef stan::math::fvar ReturnType; + using ReturnType = stan::math::fvar; }; /** @@ -67,7 +67,7 @@ struct ScalarBinaryOpTraits, double, BinaryOp> { */ template struct ScalarBinaryOpTraits, BinaryOp> { - typedef stan::math::fvar ReturnType; + using ReturnType = stan::math::fvar; }; } // namespace Eigen diff --git a/stan/math/fwd/mat/fun/crossprod.hpp b/stan/math/fwd/mat/fun/crossprod.hpp index 39e20f65ebf..5d62efb7eaf 100644 --- a/stan/math/fwd/mat/fun/crossprod.hpp +++ b/stan/math/fwd/mat/fun/crossprod.hpp @@ -11,8 +11,9 @@ namespace math { template inline Eigen::Matrix, C, C> crossprod( const Eigen::Matrix, R, C>& m) { - if (m.rows() == 0) + if (m.rows() == 0) { return Eigen::Matrix, C, C>(0, 0); + } return multiply(transpose(m), m); } diff --git a/stan/math/fwd/mat/fun/divide.hpp b/stan/math/fwd/mat/fun/divide.hpp index 721941697ff..c53d820535a 100644 --- a/stan/math/fwd/mat/fun/divide.hpp +++ b/stan/math/fwd/mat/fun/divide.hpp @@ -12,8 +12,9 @@ inline Eigen::Matrix, R, C> divide( const Eigen::Matrix, R, C>& v, const fvar& c) { Eigen::Matrix, R, C> res(v.rows(), v.cols()); for (int i = 0; i < v.rows(); i++) { - for (int j = 0; j < v.cols(); j++) + for (int j = 0; j < v.cols(); j++) { res(i, j) = v(i, j) / c; + } } return res; } @@ -23,8 +24,9 @@ inline Eigen::Matrix, R, C> divide( const Eigen::Matrix, R, C>& v, double c) { Eigen::Matrix, R, C> res(v.rows(), v.cols()); for (int i = 0; i < v.rows(); i++) { - for (int j = 0; j < v.cols(); j++) + for (int j = 0; j < v.cols(); j++) { res(i, j) = v(i, j) / c; + } } return res; } @@ -34,8 +36,9 @@ inline Eigen::Matrix, R, C> divide(const Eigen::Matrix& v, const fvar& c) { Eigen::Matrix, R, C> res(v.rows(), v.cols()); for (int i = 0; i < v.rows(); i++) { - for (int j = 0; j < v.cols(); j++) + for (int j = 0; j < v.cols(); j++) { res(i, j) = v(i, j) / c; + } } return res; } diff --git a/stan/math/fwd/mat/fun/dot_product.hpp b/stan/math/fwd/mat/fun/dot_product.hpp index 44777f259c2..153fc18d1f2 100644 --- a/stan/math/fwd/mat/fun/dot_product.hpp +++ b/stan/math/fwd/mat/fun/dot_product.hpp @@ -18,8 +18,9 @@ inline fvar dot_product(const Eigen::Matrix, R1, C1>& v1, check_matching_sizes("dot_product", "v1", v1, "v2", v2); fvar ret(0, 0); - for (size_type i = 0; i < v1.size(); i++) + for (size_type i = 0; i < v1.size(); i++) { ret += v1(i) * v2(i); + } return ret; } @@ -31,8 +32,9 @@ inline fvar dot_product(const Eigen::Matrix, R1, C1>& v1, check_matching_sizes("dot_product", "v1", v1, "v2", v2); fvar ret(0, 0); - for (size_type i = 0; i < v1.size(); i++) + for (size_type i = 0; i < v1.size(); i++) { ret += v1(i) * v2(i); + } return ret; } @@ -44,8 +46,9 @@ inline fvar dot_product(const Eigen::Matrix& v1, check_matching_sizes("dot_product", "v1", v1, "v2", v2); fvar ret(0, 0); - for (size_type i = 0; i < v1.size(); i++) + for (size_type i = 0; i < v1.size(); i++) { ret += v1(i) * v2(i); + } return ret; } @@ -57,8 +60,9 @@ inline fvar dot_product(const Eigen::Matrix, R1, C1>& v1, check_vector("dot_product", "v2", v2); fvar ret(0, 0); - for (size_type i = 0; i < length; i++) + for (size_type i = 0; i < length; i++) { ret += v1(i) * v2(i); + } return ret; } @@ -70,8 +74,9 @@ inline fvar dot_product(const Eigen::Matrix, R1, C1>& v1, check_vector("dot_product", "v2", v2); fvar ret(0, 0); - for (size_type i = 0; i < length; i++) + for (size_type i = 0; i < length; i++) { ret += v1(i) * v2(i); + } return ret; } @@ -83,8 +88,9 @@ inline fvar dot_product(const Eigen::Matrix& v1, check_vector("dot_product", "v2", v2); fvar ret(0, 0); - for (size_type i = 0; i < length; i++) + for (size_type i = 0; i < length; i++) { ret += v1(i) * v2(i); + } return ret; } @@ -93,8 +99,9 @@ inline fvar dot_product(const std::vector >& v1, const std::vector >& v2) { check_matching_sizes("dot_product", "v1", v1, "v2", v2); fvar ret(0, 0); - for (size_t i = 0; i < v1.size(); i++) + for (size_t i = 0; i < v1.size(); i++) { ret += v1.at(i) * v2.at(i); + } return ret; } @@ -103,8 +110,9 @@ inline fvar dot_product(const std::vector& v1, const std::vector >& v2) { check_matching_sizes("dot_product", "v1", v1, "v2", v2); fvar ret(0, 0); - for (size_t i = 0; i < v1.size(); i++) + for (size_t i = 0; i < v1.size(); i++) { ret += v1.at(i) * v2.at(i); + } return ret; } @@ -113,8 +121,9 @@ inline fvar dot_product(const std::vector >& v1, const std::vector& v2) { check_matching_sizes("dot_product", "v1", v1, "v2", v2); fvar ret(0, 0); - for (size_t i = 0; i < v1.size(); i++) + for (size_t i = 0; i < v1.size(); i++) { ret += v1.at(i) * v2.at(i); + } return ret; } @@ -122,8 +131,9 @@ template inline fvar dot_product(const std::vector >& v1, const std::vector >& v2, size_type& length) { fvar ret(0, 0); - for (size_type i = 0; i < length; i++) + for (size_type i = 0; i < length; i++) { ret += v1.at(i) * v2.at(i); + } return ret; } @@ -131,8 +141,9 @@ template inline fvar dot_product(const std::vector& v1, const std::vector >& v2, size_type& length) { fvar ret(0, 0); - for (size_type i = 0; i < length; i++) + for (size_type i = 0; i < length; i++) { ret += v1.at(i) * v2.at(i); + } return ret; } @@ -140,8 +151,9 @@ template inline fvar dot_product(const std::vector >& v1, const std::vector& v2, size_type& length) { fvar ret(0, 0); - for (size_type i = 0; i < length; i++) + for (size_type i = 0; i < length; i++) { ret += v1.at(i) * v2.at(i); + } return ret; } diff --git a/stan/math/fwd/mat/fun/log_softmax.hpp b/stan/math/fwd/mat/fun/log_softmax.hpp index 398b676899e..711fa220b4e 100644 --- a/stan/math/fwd/mat/fun/log_softmax.hpp +++ b/stan/math/fwd/mat/fun/log_softmax.hpp @@ -17,8 +17,9 @@ inline Eigen::Matrix, Eigen::Dynamic, 1> log_softmax( using Eigen::Matrix; Matrix alpha_t(alpha.size()); - for (int k = 0; k < alpha.size(); ++k) + for (int k = 0; k < alpha.size(); ++k) { alpha_t(k) = alpha(k).val_; + } Matrix softmax_alpha_t = softmax(alpha_t); Matrix log_softmax_alpha_t = log_softmax(alpha_t); @@ -33,11 +34,12 @@ inline Eigen::Matrix, Eigen::Dynamic, 1> log_softmax( T negative_alpha_m_d_times_softmax_alpha_t_m = -alpha(m).d_ * softmax_alpha_t(m); for (int k = 0; k < alpha.size(); ++k) { - if (m == k) + if (m == k) { log_softmax_alpha(k).d_ += alpha(m).d_ + negative_alpha_m_d_times_softmax_alpha_t_m; - else + } else { log_softmax_alpha(k).d_ += negative_alpha_m_d_times_softmax_alpha_t_m; + } } } diff --git a/stan/math/fwd/mat/fun/mdivide_left_ldlt.hpp b/stan/math/fwd/mat/fun/mdivide_left_ldlt.hpp index b6c67f7097e..7b5b6cb1a83 100644 --- a/stan/math/fwd/mat/fun/mdivide_left_ldlt.hpp +++ b/stan/math/fwd/mat/fun/mdivide_left_ldlt.hpp @@ -26,11 +26,12 @@ inline Eigen::Matrix, R1, C2> mdivide_left_ldlt( Eigen::Matrix b_val(b.rows(), b.cols()); Eigen::Matrix b_der(b.rows(), b.cols()); - for (int i = 0; i < b.rows(); i++) + for (int i = 0; i < b.rows(); i++) { for (int j = 0; j < b.cols(); j++) { b_val(i, j) = b(i, j).val_; b_der(i, j) = b(i, j).d_; } + } return to_fvar(mdivide_left_ldlt(A, b_val), mdivide_left_ldlt(A, b_der)); } diff --git a/stan/math/fwd/mat/fun/multiply.hpp b/stan/math/fwd/mat/fun/multiply.hpp index b82ba59384b..7e3243fc183 100644 --- a/stan/math/fwd/mat/fun/multiply.hpp +++ b/stan/math/fwd/mat/fun/multiply.hpp @@ -15,8 +15,9 @@ inline Eigen::Matrix, R1, C1> multiply( const Eigen::Matrix, R1, C1>& m, const fvar& c) { Eigen::Matrix, R1, C1> res(m.rows(), m.cols()); for (int i = 0; i < m.rows(); i++) { - for (int j = 0; j < m.cols(); j++) + for (int j = 0; j < m.cols(); j++) { res(i, j) = c * m(i, j); + } } return res; } @@ -26,8 +27,9 @@ inline Eigen::Matrix, R2, C2> multiply( const Eigen::Matrix, R2, C2>& m, double c) { Eigen::Matrix, R2, C2> res(m.rows(), m.cols()); for (int i = 0; i < m.rows(); i++) { - for (int j = 0; j < m.cols(); j++) + for (int j = 0; j < m.cols(); j++) { res(i, j) = c * m(i, j); + } } return res; } @@ -37,8 +39,9 @@ inline Eigen::Matrix, R1, C1> multiply( const Eigen::Matrix& m, const fvar& c) { Eigen::Matrix, R1, C1> res(m.rows(), m.cols()); for (int i = 0; i < m.rows(); i++) { - for (int j = 0; j < m.cols(); j++) + for (int j = 0; j < m.cols(); j++) { res(i, j) = c * m(i, j); + } } return res; } diff --git a/stan/math/fwd/mat/fun/multiply_lower_tri_self_transpose.hpp b/stan/math/fwd/mat/fun/multiply_lower_tri_self_transpose.hpp index 6388a4831f4..8b2a00e64ec 100644 --- a/stan/math/fwd/mat/fun/multiply_lower_tri_self_transpose.hpp +++ b/stan/math/fwd/mat/fun/multiply_lower_tri_self_transpose.hpp @@ -12,14 +12,16 @@ namespace math { template inline Eigen::Matrix, R, R> multiply_lower_tri_self_transpose( const Eigen::Matrix, R, C>& m) { - if (m.rows() == 0) + if (m.rows() == 0) { return Eigen::Matrix, R, R>(0, 0); + } Eigen::Matrix, R, C> L(m.rows(), m.cols()); L.setZero(); for (size_type i = 0; i < m.rows(); i++) { - for (size_type j = 0; (j < i + 1) && (j < m.cols()); j++) + for (size_type j = 0; (j < i + 1) && (j < m.cols()); j++) { L(i, j) = m(i, j); + } } return multiply(L, transpose(L)); } diff --git a/stan/math/fwd/mat/fun/qr_Q.hpp b/stan/math/fwd/mat/fun/qr_Q.hpp index 25f8c896ca6..dd18a44a3cb 100644 --- a/stan/math/fwd/mat/fun/qr_Q.hpp +++ b/stan/math/fwd/mat/fun/qr_Q.hpp @@ -12,15 +12,17 @@ namespace math { template Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> qr_Q( const Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic>& m) { - typedef Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> matrix_fwd_t; + using matrix_fwd_t = Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic>; check_nonzero_size("qr_Q", "m", m); check_greater_or_equal("qr_Q", "m.rows()", m.rows(), m.cols()); Eigen::HouseholderQR qr(m.rows(), m.cols()); qr.compute(m); matrix_fwd_t Q = qr.householderQ(); - for (int i = 0; i < m.cols(); i++) - if (qr.matrixQR()(i, i) < 0.0) + for (int i = 0; i < m.cols(); i++) { + if (qr.matrixQR()(i, i) < 0.0) { Q.col(i) *= -1.0; + } + } return Q; } diff --git a/stan/math/fwd/mat/fun/qr_R.hpp b/stan/math/fwd/mat/fun/qr_R.hpp index 873891aef20..3fef24e31ab 100644 --- a/stan/math/fwd/mat/fun/qr_R.hpp +++ b/stan/math/fwd/mat/fun/qr_R.hpp @@ -12,17 +12,19 @@ namespace math { template Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> qr_R( const Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic>& m) { - typedef Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> matrix_fwd_t; + using matrix_fwd_t = Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic>; check_nonzero_size("qr_R", "m", m); check_greater_or_equal("qr_R", "m.rows()", m.rows(), m.cols()); Eigen::HouseholderQR qr(m.rows(), m.cols()); qr.compute(m); matrix_fwd_t R = qr.matrixQR().topLeftCorner(m.rows(), m.cols()); for (int i = 0; i < R.rows(); i++) { - for (int j = 0; j < i; j++) + for (int j = 0; j < i; j++) { R(i, j) = 0.0; - if (i < R.cols() && R(i, i) < 0.0) + } + if (i < R.cols() && R(i, i) < 0.0) { R.row(i) *= -1.0; + } } return R; } diff --git a/stan/math/fwd/mat/fun/softmax.hpp b/stan/math/fwd/mat/fun/softmax.hpp index 5db7ab0b44f..79daa4c5fda 100644 --- a/stan/math/fwd/mat/fun/softmax.hpp +++ b/stan/math/fwd/mat/fun/softmax.hpp @@ -15,8 +15,9 @@ inline Eigen::Matrix, Eigen::Dynamic, 1> softmax( using Eigen::Matrix; Matrix alpha_t(alpha.size()); - for (int k = 0; k < alpha.size(); ++k) + for (int k = 0; k < alpha.size(); ++k) { alpha_t(k) = alpha(k).val_; + } Matrix softmax_alpha_t = softmax(alpha_t); diff --git a/stan/math/fwd/mat/fun/sum.hpp b/stan/math/fwd/mat/fun/sum.hpp index ccb21a7ee77..9025972664a 100644 --- a/stan/math/fwd/mat/fun/sum.hpp +++ b/stan/math/fwd/mat/fun/sum.hpp @@ -19,8 +19,9 @@ namespace math { */ template inline fvar sum(const Eigen::Matrix, R, C>& m) { - if (m.size() == 0) + if (m.size() == 0) { return 0.0; + } Eigen::Matrix vals(m.size()); Eigen::Matrix tans(m.size()); for (int i = 0; i < m.size(); ++i) { diff --git a/stan/math/fwd/mat/fun/tcrossprod.hpp b/stan/math/fwd/mat/fun/tcrossprod.hpp index 8a347f5cbbe..7e079cadc47 100644 --- a/stan/math/fwd/mat/fun/tcrossprod.hpp +++ b/stan/math/fwd/mat/fun/tcrossprod.hpp @@ -11,8 +11,9 @@ namespace math { template inline Eigen::Matrix, R, R> tcrossprod( const Eigen::Matrix, R, C>& m) { - if (m.rows() == 0) + if (m.rows() == 0) { return Eigen::Matrix, R, R>(0, 0); + } return multiply(m, transpose(m)); } diff --git a/stan/math/fwd/mat/fun/to_fvar.hpp b/stan/math/fwd/mat/fun/to_fvar.hpp index afa0de5a6f6..de25628bb75 100644 --- a/stan/math/fwd/mat/fun/to_fvar.hpp +++ b/stan/math/fwd/mat/fun/to_fvar.hpp @@ -37,8 +37,9 @@ template inline Eigen::Matrix, R, C> to_fvar( const Eigen::Matrix& m) { Eigen::Matrix, R, C> m_fd(m.rows(), m.cols()); - for (int i = 0; i < m.size(); ++i) + for (int i = 0; i < m.size(); ++i) { m_fd(i) = m(i); + } return m_fd; } diff --git a/stan/math/fwd/mat/fun/typedefs.hpp b/stan/math/fwd/mat/fun/typedefs.hpp index f3ed71864c3..21a943f67a9 100644 --- a/stan/math/fwd/mat/fun/typedefs.hpp +++ b/stan/math/fwd/mat/fun/typedefs.hpp @@ -10,18 +10,18 @@ namespace math { typedef Eigen::Matrix::Index size_type; -typedef Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> matrix_fd; +using matrix_fd = Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic>; typedef Eigen::Matrix >, Eigen::Dynamic, Eigen::Dynamic> matrix_ffd; -typedef Eigen::Matrix, Eigen::Dynamic, 1> vector_fd; +using vector_fd = Eigen::Matrix, Eigen::Dynamic, 1>; -typedef Eigen::Matrix >, Eigen::Dynamic, 1> vector_ffd; +using vector_ffd = Eigen::Matrix >, Eigen::Dynamic, 1>; -typedef Eigen::Matrix, 1, Eigen::Dynamic> row_vector_fd; +using row_vector_fd = Eigen::Matrix, 1, Eigen::Dynamic>; -typedef Eigen::Matrix >, 1, Eigen::Dynamic> row_vector_ffd; +using row_vector_ffd = Eigen::Matrix >, 1, Eigen::Dynamic>; } // namespace math } // namespace stan diff --git a/stan/math/fwd/mat/fun/unit_vector_constrain.hpp b/stan/math/fwd/mat/fun/unit_vector_constrain.hpp index 9c5a6a53824..e8ee586b6e8 100644 --- a/stan/math/fwd/mat/fun/unit_vector_constrain.hpp +++ b/stan/math/fwd/mat/fun/unit_vector_constrain.hpp @@ -22,13 +22,15 @@ inline Eigen::Matrix, R, C> unit_vector_constrain( using std::sqrt; Matrix y_t(y.size()); - for (int k = 0; k < y.size(); ++k) + for (int k = 0; k < y.size(); ++k) { y_t.coeffRef(k) = y.coeff(k).val_; + } Matrix unit_vector_y_t = unit_vector_constrain(y_t); Matrix, R, C> unit_vector_y(y.size()); - for (int k = 0; k < y.size(); ++k) + for (int k = 0; k < y.size(); ++k) { unit_vector_y.coeffRef(k).val_ = unit_vector_y_t.coeff(k); + } T squared_norm = dot_self(y_t); T norm = sqrt(squared_norm); @@ -38,8 +40,9 @@ inline Eigen::Matrix, R, C> unit_vector_constrain( for (int m = 0; m < y.size(); ++m) { J.coeffRef(m, m) += inv_norm; - for (int k = 0; k < y.size(); ++k) + for (int k = 0; k < y.size(); ++k) { unit_vector_y.coeffRef(k).d_ = J.coeff(k, m); + } } return unit_vector_y; } diff --git a/stan/math/fwd/mat/functor/gradient.hpp b/stan/math/fwd/mat/functor/gradient.hpp index c1f84155d57..608254c2436 100644 --- a/stan/math/fwd/mat/functor/gradient.hpp +++ b/stan/math/fwd/mat/functor/gradient.hpp @@ -41,11 +41,13 @@ void gradient(const F& f, const Eigen::Matrix& x, T& fx, Eigen::Matrix, Eigen::Dynamic, 1> x_fvar(x.size()); grad_fx.resize(x.size()); for (int i = 0; i < x.size(); ++i) { - for (int k = 0; k < x.size(); ++k) + for (int k = 0; k < x.size(); ++k) { x_fvar(k) = fvar(x(k), k == i); + } fvar fx_fvar = f(x_fvar); - if (i == 0) + if (i == 0) { fx = fx_fvar.val_; + } grad_fx(i) = fx_fvar.d_; } } diff --git a/stan/math/fwd/mat/functor/hessian.hpp b/stan/math/fwd/mat/functor/hessian.hpp index cea6db21c3b..c7b3a5d3907 100644 --- a/stan/math/fwd/mat/functor/hessian.hpp +++ b/stan/math/fwd/mat/functor/hessian.hpp @@ -51,13 +51,16 @@ void hessian(const F& f, const Eigen::Matrix& x, T& fx, Eigen::Matrix >, Eigen::Dynamic, 1> x_fvar(x.size()); for (int i = 0; i < x.size(); ++i) { for (int j = i; j < x.size(); ++j) { - for (int k = 0; k < x.size(); ++k) + for (int k = 0; k < x.size(); ++k) { x_fvar(k) = fvar >(fvar(x(k), j == k), fvar(i == k, 0)); + } fvar > fx_fvar = f(x_fvar); - if (j == 0) + if (j == 0) { fx = fx_fvar.val_.val_; - if (i == j) + } + if (i == j) { grad(i) = fx_fvar.d_.val_; + } H(i, j) = fx_fvar.d_.d_; H(j, i) = H(i, j); } diff --git a/stan/math/fwd/mat/meta/operands_and_partials.hpp b/stan/math/fwd/mat/meta/operands_and_partials.hpp index 4161f97463f..6e6e0e7a2cb 100644 --- a/stan/math/fwd/mat/meta/operands_and_partials.hpp +++ b/stan/math/fwd/mat/meta/operands_and_partials.hpp @@ -12,10 +12,10 @@ namespace math { namespace internal { // Vectorized Univariate template -class ops_partials_edge > > { +class ops_partials_edge>> { public: - typedef std::vector > Op; - typedef Eigen::Matrix partials_t; + using Op = std::vector>; + using partials_t = Eigen::Matrix; partials_t partials_; // For univariate use-cases broadcast_array partials_vec_; // For multivariate explicit ops_partials_edge(const Op& ops) @@ -38,10 +38,10 @@ class ops_partials_edge > > { }; template -class ops_partials_edge, R, C> > { +class ops_partials_edge, R, C>> { public: - typedef Eigen::Matrix partials_t; - typedef Eigen::Matrix, R, C> Op; + using partials_t = Eigen::Matrix; + using Op = Eigen::Matrix, R, C>; partials_t partials_; // For univariate use-cases broadcast_array partials_vec_; // For multivariate explicit ops_partials_edge(const Op& ops) @@ -65,10 +65,10 @@ class ops_partials_edge, R, C> > { // Multivariate; vectors of eigen types template -class ops_partials_edge, R, C> > > { +class ops_partials_edge, R, C>>> { public: - typedef std::vector, R, C> > Op; - typedef Eigen::Matrix partial_t; + using Op = std::vector, R, C>>; + using partial_t = Eigen::Matrix; std::vector partials_vec_; explicit ops_partials_edge(const Op& ops) : partials_vec_(ops.size()), operands_(ops) { @@ -94,10 +94,10 @@ class ops_partials_edge, R, C> > > { }; template -class ops_partials_edge > > > { +class ops_partials_edge>>> { public: - typedef std::vector > > Op; - typedef std::vector partial_t; + using Op = std::vector>>; + using partial_t = std::vector; std::vector partials_vec_; explicit ops_partials_edge(const Op& ops) : partials_vec_(length(ops)), operands_(ops) { diff --git a/stan/math/fwd/mat/vectorize/apply_scalar_unary.hpp b/stan/math/fwd/mat/vectorize/apply_scalar_unary.hpp index 3d96fe52758..6e39db6c73e 100644 --- a/stan/math/fwd/mat/vectorize/apply_scalar_unary.hpp +++ b/stan/math/fwd/mat/vectorize/apply_scalar_unary.hpp @@ -23,7 +23,7 @@ struct apply_scalar_unary > { * Function return type, which is same as the argument type for * the function, fvar<T>. */ - typedef fvar return_t; + using return_t = fvar; /** * Apply the function specified by F to the specified argument. diff --git a/stan/math/fwd/scal/fun/abs.hpp b/stan/math/fwd/scal/fun/abs.hpp index b8ad3bd5690..72dadf9553b 100644 --- a/stan/math/fwd/scal/fun/abs.hpp +++ b/stan/math/fwd/scal/fun/abs.hpp @@ -12,14 +12,15 @@ namespace math { template inline fvar abs(const fvar& x) { - if (x.val_ > 0.0) + if (x.val_ > 0.0) { return x; - else if (x.val_ < 0.0) + } else if (x.val_ < 0.0) { return fvar(-x.val_, -x.d_); - else if (x.val_ == 0.0) + } else if (x.val_ == 0.0) { return fvar(0, 0); - else + } else { return fvar(abs(x.val_), NOT_A_NUMBER); + } } } // namespace math diff --git a/stan/math/fwd/scal/fun/binary_log_loss.hpp b/stan/math/fwd/scal/fun/binary_log_loss.hpp index ef6015e24ff..9c1924602b3 100644 --- a/stan/math/fwd/scal/fun/binary_log_loss.hpp +++ b/stan/math/fwd/scal/fun/binary_log_loss.hpp @@ -10,11 +10,12 @@ namespace math { template inline fvar binary_log_loss(int y, const fvar& y_hat) { - if (y) + if (y) { return fvar(binary_log_loss(y, y_hat.val_), -y_hat.d_ / y_hat.val_); - else + } else { return fvar(binary_log_loss(y, y_hat.val_), y_hat.d_ / (1.0 - y_hat.val_)); + } } } // namespace math } // namespace stan diff --git a/stan/math/fwd/scal/fun/fabs.hpp b/stan/math/fwd/scal/fun/fabs.hpp index e5b2a63030a..09347509872 100644 --- a/stan/math/fwd/scal/fun/fabs.hpp +++ b/stan/math/fwd/scal/fun/fabs.hpp @@ -15,14 +15,15 @@ template inline fvar fabs(const fvar& x) { using std::fabs; - if (unlikely(is_nan(value_of(x.val_)))) + if (unlikely(is_nan(value_of(x.val_)))) { return fvar(fabs(x.val_), NOT_A_NUMBER); - else if (x.val_ > 0.0) + } else if (x.val_ > 0.0) { return x; - else if (x.val_ < 0.0) + } else if (x.val_ < 0.0) { return fvar(-x.val_, -x.d_); - else + } else { return fvar(0, 0); + } } } // namespace math diff --git a/stan/math/fwd/scal/fun/fdim.hpp b/stan/math/fwd/scal/fun/fdim.hpp index ab59f91aa7c..6a26aef9c9e 100644 --- a/stan/math/fwd/scal/fun/fdim.hpp +++ b/stan/math/fwd/scal/fun/fdim.hpp @@ -19,10 +19,11 @@ namespace math { */ template inline fvar fdim(const fvar& x, const fvar& y) { - if (x.val_ < y.val_) + if (x.val_ < y.val_) { return fvar(fdim(x.val_, y.val_), 0); - else + } else { return fvar(fdim(x.val_, y.val_), x.d_ - y.d_); + } } /** @@ -36,10 +37,11 @@ inline fvar fdim(const fvar& x, const fvar& y) { */ template inline fvar fdim(const fvar& x, double y) { - if (x.val_ < y) + if (x.val_ < y) { return fvar(fdim(x.val_, y), 0); - else + } else { return fvar(fdim(x.val_, y), x.d_); + } } /** @@ -53,10 +55,11 @@ inline fvar fdim(const fvar& x, double y) { */ template inline fvar fdim(double x, const fvar& y) { - if (x < y.val_) + if (x < y.val_) { return fvar(fdim(x, y.val_), 0); - else + } else { return fvar(fdim(x, y.val_), -y.d_); + } } } // namespace math diff --git a/stan/math/fwd/scal/fun/fmax.hpp b/stan/math/fwd/scal/fun/fmax.hpp index ec0164ce8a4..4b7941dd8d4 100644 --- a/stan/math/fwd/scal/fun/fmax.hpp +++ b/stan/math/fwd/scal/fun/fmax.hpp @@ -21,10 +21,11 @@ namespace math { template inline fvar fmax(const fvar& x1, const fvar& x2) { if (unlikely(is_nan(x1.val_))) { - if (is_nan(x2.val_)) + if (is_nan(x2.val_)) { return fvar(fmax(x1.val_, x2.val_), NOT_A_NUMBER); - else + } else { return fvar(x2.val_, x2.d_); + } } else if (unlikely(is_nan(x2.val_))) { return fvar(x1.val_, x1.d_); } else if (x1.val_ > x2.val_) { @@ -47,10 +48,11 @@ inline fvar fmax(const fvar& x1, const fvar& x2) { template inline fvar fmax(double x1, const fvar& x2) { if (unlikely(is_nan(x1))) { - if (is_nan(x2.val_)) + if (is_nan(x2.val_)) { return fvar(fmax(x1, x2.val_), NOT_A_NUMBER); - else + } else { return fvar(x2.val_, x2.d_); + } } else if (unlikely(is_nan(x2.val_))) { return fvar(x1, 0.0); } else if (x1 > x2.val_) { @@ -73,10 +75,11 @@ inline fvar fmax(double x1, const fvar& x2) { template inline fvar fmax(const fvar& x1, double x2) { if (unlikely(is_nan(x1.val_))) { - if (is_nan(x2)) + if (is_nan(x2)) { return fvar(fmax(x1.val_, x2), NOT_A_NUMBER); - else + } else { return fvar(x2, 0.0); + } } else if (unlikely(is_nan(x2))) { return fvar(x1.val_, x1.d_); } else if (x1.val_ > x2) { diff --git a/stan/math/fwd/scal/fun/fmin.hpp b/stan/math/fwd/scal/fun/fmin.hpp index ec115a1980e..4f2efd90cda 100644 --- a/stan/math/fwd/scal/fun/fmin.hpp +++ b/stan/math/fwd/scal/fun/fmin.hpp @@ -13,10 +13,11 @@ namespace math { template inline fvar fmin(const fvar& x1, const fvar& x2) { if (unlikely(is_nan(x1.val_))) { - if (is_nan(x2.val_)) + if (is_nan(x2.val_)) { return fvar(fmin(x1.val_, x2.val_), NOT_A_NUMBER); - else + } else { return fvar(x2.val_, x2.d_); + } } else if (unlikely(is_nan(x2.val_))) { return fvar(x1.val_, x1.d_); } else if (x1.val_ < x2.val_) { @@ -31,10 +32,11 @@ inline fvar fmin(const fvar& x1, const fvar& x2) { template inline fvar fmin(double x1, const fvar& x2) { if (unlikely(is_nan(x1))) { - if (is_nan(x2.val_)) + if (is_nan(x2.val_)) { return fvar(fmin(x1, x2.val_), NOT_A_NUMBER); - else + } else { return fvar(x2.val_, x2.d_); + } } else if (unlikely(is_nan(x2.val_))) { return fvar(x1, 0.0); } else if (x1 < x2.val_) { @@ -49,10 +51,11 @@ inline fvar fmin(double x1, const fvar& x2) { template inline fvar fmin(const fvar& x1, double x2) { if (unlikely(is_nan(x1.val_))) { - if (is_nan(x2)) + if (is_nan(x2)) { return fvar(fmin(x1.val_, x2), NOT_A_NUMBER); - else + } else { return fvar(x2, 0.0); + } } else if (unlikely(is_nan(x2))) { return fvar(x1.val_, x1.d_); } else if (x1.val_ < x2) { diff --git a/stan/math/fwd/scal/fun/fmod.hpp b/stan/math/fwd/scal/fun/fmod.hpp index 7aab65e9829..5ffb731b717 100644 --- a/stan/math/fwd/scal/fun/fmod.hpp +++ b/stan/math/fwd/scal/fun/fmod.hpp @@ -21,10 +21,11 @@ inline fvar fmod(const fvar& x1, const fvar& x2) { template inline fvar fmod(const fvar& x1, double x2) { using std::fmod; - if (unlikely(is_any_nan(value_of(x1.val_), x2))) + if (unlikely(is_any_nan(value_of(x1.val_), x2))) { return fvar(fmod(x1.val_, x2), NOT_A_NUMBER); - else + } else { return fvar(fmod(x1.val_, x2), x1.d_ / x2); + } } template diff --git a/stan/math/fwd/scal/fun/gamma_p.hpp b/stan/math/fwd/scal/fun/gamma_p.hpp index 28013017bf1..2ba6a67c99d 100644 --- a/stan/math/fwd/scal/fun/gamma_p.hpp +++ b/stan/math/fwd/scal/fun/gamma_p.hpp @@ -17,10 +17,12 @@ inline fvar gamma_p(const fvar &x1, const fvar &x2) { using std::log; T u = gamma_p(x1.val_, x2.val_); - if (is_inf(x1.val_)) + if (is_inf(x1.val_)) { return fvar(u, std::numeric_limits::quiet_NaN()); - if (is_inf(x2.val_)) + } + if (is_inf(x2.val_)) { return fvar(u, std::numeric_limits::quiet_NaN()); + } T der1 = grad_reg_lower_inc_gamma(x1.val_, x2.val_, 1.0e-10); T der2 = exp(-x2.val_ + (x1.val_ - 1.0) * log(x2.val_) - lgamma(x1.val_)); @@ -31,10 +33,12 @@ inline fvar gamma_p(const fvar &x1, const fvar &x2) { template inline fvar gamma_p(const fvar &x1, double x2) { T u = gamma_p(x1.val_, x2); - if (is_inf(x1.val_)) + if (is_inf(x1.val_)) { return fvar(u, std::numeric_limits::quiet_NaN()); - if (is_inf(x2)) + } + if (is_inf(x2)) { return fvar(u, std::numeric_limits::quiet_NaN()); + } T der1 = grad_reg_lower_inc_gamma(x1.val_, x2, 1.0e-10); @@ -47,8 +51,9 @@ inline fvar gamma_p(double x1, const fvar &x2) { using std::log; T u = gamma_p(x1, x2.val_); - if (is_inf(x1)) + if (is_inf(x1)) { return fvar(u, std::numeric_limits::quiet_NaN()); + } T der2 = exp(-x2.val_ + (x1 - 1.0) * log(x2.val_) - lgamma(x1)); diff --git a/stan/math/fwd/scal/fun/grad_inc_beta.hpp b/stan/math/fwd/scal/fun/grad_inc_beta.hpp index 3ec57a57481..8452aab0bcb 100644 --- a/stan/math/fwd/scal/fun/grad_inc_beta.hpp +++ b/stan/math/fwd/scal/fun/grad_inc_beta.hpp @@ -41,8 +41,9 @@ void grad_inc_beta(fvar& g1, fvar& g2, fvar a, fvar b, fvar z) { fvar dF1 = 0; fvar dF2 = 0; - if (value_of(value_of(C))) + if (value_of(value_of(C))) { grad_2F1(dF1, dF2, a + b, fvar(1.0), a + 1, z); + } g1 = (c1 - 1.0 / a) * c3 + C * (dF1 + dF2); g2 = c2 * c3 + C * dF1; diff --git a/stan/math/fwd/scal/fun/lmgamma.hpp b/stan/math/fwd/scal/fun/lmgamma.hpp index 81076734cde..efa846055ad 100644 --- a/stan/math/fwd/scal/fun/lmgamma.hpp +++ b/stan/math/fwd/scal/fun/lmgamma.hpp @@ -14,8 +14,9 @@ inline fvar::type> lmgamma( int x1, const fvar& x2) { using std::log; T deriv = 0; - for (int count = 1; count < x1 + 1; count++) + for (int count = 1; count < x1 + 1; count++) { deriv += x2.d_ * digamma(x2.val_ + (1.0 - count) / 2.0); + } return fvar::type>(lmgamma(x1, x2.val_), deriv); } diff --git a/stan/math/fwd/scal/fun/log.hpp b/stan/math/fwd/scal/fun/log.hpp index a36c94d1f9d..2ee0983f8cd 100644 --- a/stan/math/fwd/scal/fun/log.hpp +++ b/stan/math/fwd/scal/fun/log.hpp @@ -11,10 +11,11 @@ namespace math { template inline fvar log(const fvar& x) { using std::log; - if (x.val_ < 0.0) + if (x.val_ < 0.0) { return fvar(NOT_A_NUMBER, NOT_A_NUMBER); - else + } else { return fvar(log(x.val_), x.d_ / x.val_); + } } } // namespace math } // namespace stan diff --git a/stan/math/fwd/scal/fun/log10.hpp b/stan/math/fwd/scal/fun/log10.hpp index 58e4bb9e702..ec8cab8e0cb 100644 --- a/stan/math/fwd/scal/fun/log10.hpp +++ b/stan/math/fwd/scal/fun/log10.hpp @@ -12,10 +12,11 @@ template inline fvar log10(const fvar& x) { using std::log; using std::log10; - if (x.val_ < 0.0) + if (x.val_ < 0.0) { return fvar(NOT_A_NUMBER, NOT_A_NUMBER); - else + } else { return fvar(log10(x.val_), x.d_ / (x.val_ * LOG_10)); + } } } // namespace math diff --git a/stan/math/fwd/scal/fun/log1m_exp.hpp b/stan/math/fwd/scal/fun/log1m_exp.hpp index 1231286dc99..6d4fb7e7b81 100644 --- a/stan/math/fwd/scal/fun/log1m_exp.hpp +++ b/stan/math/fwd/scal/fun/log1m_exp.hpp @@ -21,8 +21,9 @@ namespace math { */ template inline fvar log1m_exp(const fvar& x) { - if (x.val_ >= 0) + if (x.val_ >= 0) { return fvar(NOT_A_NUMBER); + } return fvar(log1m_exp(x.val_), x.d_ / -expm1(-x.val_)); } diff --git a/stan/math/fwd/scal/fun/log2.hpp b/stan/math/fwd/scal/fun/log2.hpp index ac1c8a82bd7..9294b13a19f 100644 --- a/stan/math/fwd/scal/fun/log2.hpp +++ b/stan/math/fwd/scal/fun/log2.hpp @@ -18,10 +18,11 @@ namespace math { */ template inline fvar log2(const fvar& x) { - if (x.val_ < 0.0) + if (x.val_ < 0.0) { return fvar(NOT_A_NUMBER, NOT_A_NUMBER); - else + } else { return fvar(log2(x.val_), x.d_ / (x.val_ * LOG_2)); + } } } // namespace math diff --git a/stan/math/fwd/scal/fun/log_diff_exp.hpp b/stan/math/fwd/scal/fun/log_diff_exp.hpp index 4a428db9ce8..9afb604126c 100644 --- a/stan/math/fwd/scal/fun/log_diff_exp.hpp +++ b/stan/math/fwd/scal/fun/log_diff_exp.hpp @@ -13,8 +13,9 @@ namespace math { template inline fvar log_diff_exp(const fvar& x1, const fvar& x2) { if (x1.val_ <= x2.val_) { - if (x1.val_ < INFTY && x1.val_ == x2.val_) + if (x1.val_ < INFTY && x1.val_ == x2.val_) { return fvar(NEGATIVE_INFTY, NOT_A_NUMBER); + } return fvar(NOT_A_NUMBER, NOT_A_NUMBER); } return fvar( @@ -25,8 +26,9 @@ inline fvar log_diff_exp(const fvar& x1, const fvar& x2) { template inline fvar log_diff_exp(const T1& x1, const fvar& x2) { if (x1 <= x2.val_) { - if (x1 < INFTY && x1 == x2.val_) + if (x1 < INFTY && x1 == x2.val_) { return fvar(NEGATIVE_INFTY, x2.d_ * NEGATIVE_INFTY); + } return fvar(NOT_A_NUMBER, NOT_A_NUMBER); } return fvar(log_diff_exp(x1, x2.val_), -x2.d_ / expm1(x1 - x2.val_)); @@ -36,8 +38,9 @@ template inline fvar log_diff_exp(const fvar& x1, const T2& x2) { if (x1.val_ <= x2) { if (x1.val_ < INFTY && x1.val_ == x2) { - if (x2 == NEGATIVE_INFTY) + if (x2 == NEGATIVE_INFTY) { return fvar(NEGATIVE_INFTY, x1.d_); + } return fvar(NEGATIVE_INFTY, x1.d_ * INFTY); } return fvar(NOT_A_NUMBER, NOT_A_NUMBER); diff --git a/stan/math/fwd/scal/fun/log_sum_exp.hpp b/stan/math/fwd/scal/fun/log_sum_exp.hpp index f1e0ab40d73..e9dea3b7789 100644 --- a/stan/math/fwd/scal/fun/log_sum_exp.hpp +++ b/stan/math/fwd/scal/fun/log_sum_exp.hpp @@ -20,16 +20,18 @@ inline fvar log_sum_exp(const fvar& x1, const fvar& x2) { template inline fvar log_sum_exp(double x1, const fvar& x2) { using std::exp; - if (x1 == NEGATIVE_INFTY) + if (x1 == NEGATIVE_INFTY) { return fvar(x2.val_, x2.d_); + } return fvar(log_sum_exp(x1, x2.val_), x2.d_ / (exp(x1 - x2.val_) + 1)); } template inline fvar log_sum_exp(const fvar& x1, double x2) { using std::exp; - if (x2 == NEGATIVE_INFTY) + if (x2 == NEGATIVE_INFTY) { return fvar(x1.val_, x1.d_); + } return fvar(log_sum_exp(x1.val_, x2), x1.d_ / (1 + exp(x2 - x1.val_))); } diff --git a/stan/math/fwd/scal/fun/logit.hpp b/stan/math/fwd/scal/fun/logit.hpp index 6a1553daf87..08a1fb8ed5f 100644 --- a/stan/math/fwd/scal/fun/logit.hpp +++ b/stan/math/fwd/scal/fun/logit.hpp @@ -12,10 +12,11 @@ namespace math { template inline fvar logit(const fvar& x) { - if (x.val_ > 1 || x.val_ < 0) + if (x.val_ > 1 || x.val_ < 0) { return fvar(NOT_A_NUMBER, NOT_A_NUMBER); - else + } else { return fvar(logit(x.val_), x.d_ / (x.val_ - square(x.val_))); + } } } // namespace math } // namespace stan diff --git a/stan/math/fwd/scal/fun/pow.hpp b/stan/math/fwd/scal/fun/pow.hpp index 8290a3a3c06..b413f8f404a 100644 --- a/stan/math/fwd/scal/fun/pow.hpp +++ b/stan/math/fwd/scal/fun/pow.hpp @@ -34,18 +34,24 @@ inline fvar pow(const fvar& x1, double x2) { using std::pow; using std::sqrt; - if (x2 == -2) + if (x2 == -2) { return inv_square(x1); - if (x2 == -1) + } + if (x2 == -1) { return inv(x1); - if (x2 == -0.5) + } + if (x2 == -0.5) { return inv_sqrt(x1); - if (x2 == 0.5) + } + if (x2 == 0.5) { return sqrt(x1); - if (x2 == 1.0) + } + if (x2 == 1.0) { return x1; - if (x2 == 2.0) + } + if (x2 == 2.0) { return square(x1); + } return fvar(pow(x1.val_, x2), x1.d_ * x2 * pow(x1.val_, x2 - 1)); } diff --git a/stan/math/fwd/scal/meta/operands_and_partials.hpp b/stan/math/fwd/scal/meta/operands_and_partials.hpp index 807a7858330..ffb9a51ad95 100644 --- a/stan/math/fwd/scal/meta/operands_and_partials.hpp +++ b/stan/math/fwd/scal/meta/operands_and_partials.hpp @@ -11,7 +11,7 @@ namespace internal { template class ops_partials_edge > { public: - typedef fvar Op; + using Op = fvar; Dx partial_; broadcast_array partials_; explicit ops_partials_edge(const Op& op) @@ -71,7 +71,7 @@ class operands_and_partials > { internal::ops_partials_edge edge3_; internal::ops_partials_edge edge4_; internal::ops_partials_edge edge5_; - typedef fvar T_return_type; + using T_return_type = fvar; explicit operands_and_partials(const Op1& o1) : edge1_(o1) {} operands_and_partials(const Op1& o1, const Op2& o2) : edge1_(o1), edge2_(o2) {} diff --git a/stan/math/fwd/scal/meta/partials_type.hpp b/stan/math/fwd/scal/meta/partials_type.hpp index fa92c4d4e07..50e7d112ee4 100644 --- a/stan/math/fwd/scal/meta/partials_type.hpp +++ b/stan/math/fwd/scal/meta/partials_type.hpp @@ -8,7 +8,7 @@ namespace stan { template struct partials_type > { - typedef T type; + using type = T; }; } // namespace stan diff --git a/stan/math/memory/stack_alloc.hpp b/stan/math/memory/stack_alloc.hpp index d832ff3915a..97b1f721145 100644 --- a/stan/math/memory/stack_alloc.hpp +++ b/stan/math/memory/stack_alloc.hpp @@ -37,8 +37,9 @@ const size_t DEFAULT_INITIAL_NBYTES = 1 << 16; // 64KB // big fun to inline, but only called twice inline char* eight_byte_aligned_malloc(size_t size) { char* ptr = static_cast(malloc(size)); - if (!ptr) + if (!ptr) { return ptr; // malloc failed to alloc + } if (!is_aligned(ptr, 8U)) { std::stringstream s; s << "invalid alignment to 8 bytes, ptr=" @@ -94,17 +95,20 @@ class stack_alloc { char* result; ++cur_block_; // Find the next block (if any) containing at least len bytes. - while ((cur_block_ < blocks_.size()) && (sizes_[cur_block_] < len)) + while ((cur_block_ < blocks_.size()) && (sizes_[cur_block_] < len)) { ++cur_block_; + } // Allocate a new block if necessary. if (unlikely(cur_block_ >= blocks_.size())) { // New block should be max(2*size of last block, len) bytes. size_t newsize = sizes_.back() * 2; - if (newsize < len) + if (newsize < len) { newsize = len; + } blocks_.push_back(internal::eight_byte_aligned_malloc(newsize)); - if (!blocks_.back()) + if (!blocks_.back()) { throw std::bad_alloc(); + } sizes_.push_back(newsize); } result = blocks_[cur_block_]; @@ -130,8 +134,9 @@ class stack_alloc { cur_block_(0), cur_block_end_(blocks_[0] + initial_nbytes), next_loc_(blocks_[0]) { - if (!blocks_[0]) + if (!blocks_[0]) { throw std::bad_alloc(); // no msg allowed in bad_alloc ctor + } } /** @@ -142,9 +147,11 @@ class stack_alloc { */ ~stack_alloc() { // free ALL blocks - for (auto& block : blocks_) - if (block) + for (auto& block : blocks_) { + if (block) { free(block); + } + } } /** @@ -164,8 +171,9 @@ class stack_alloc { char* result = next_loc_; next_loc_ += len; // Occasionally, we have to switch blocks. - if (unlikely(next_loc_ >= cur_block_end_)) + if (unlikely(next_loc_ >= cur_block_end_)) { result = move_to_next_block(len); + } return reinterpret_cast(result); } @@ -208,8 +216,9 @@ class stack_alloc { * recover memory back to the last start_nested call. */ inline void recover_nested() { - if (unlikely(nested_cur_blocks_.empty())) + if (unlikely(nested_cur_blocks_.empty())) { recover_all(); + } cur_block_ = nested_cur_blocks_.back(); nested_cur_blocks_.pop_back(); @@ -228,9 +237,11 @@ class stack_alloc { */ inline void free_all() { // frees all BUT the first (index 0) block - for (size_t i = 1; i < blocks_.size(); ++i) - if (blocks_[i]) + for (size_t i = 1; i < blocks_.size(); ++i) { + if (blocks_[i]) { free(blocks_[i]); + } + } sizes_.resize(1); blocks_.resize(1); recover_all(); @@ -263,11 +274,14 @@ class stack_alloc { * false otherwise. */ inline bool in_stack(const void* ptr) const { - for (size_t i = 0; i < cur_block_; ++i) - if (ptr >= blocks_[i] && ptr < blocks_[i] + sizes_[i]) + for (size_t i = 0; i < cur_block_; ++i) { + if (ptr >= blocks_[i] && ptr < blocks_[i] + sizes_[i]) { return true; - if (ptr >= blocks_[cur_block_] && ptr < next_loc_) + } + } + if (ptr >= blocks_[cur_block_] && ptr < next_loc_) { return true; + } return false; } }; diff --git a/stan/math/mix/mat/fun/typedefs.hpp b/stan/math/mix/mat/fun/typedefs.hpp index 0a331df27c6..2ec20ca3687 100644 --- a/stan/math/mix/mat/fun/typedefs.hpp +++ b/stan/math/mix/mat/fun/typedefs.hpp @@ -9,18 +9,18 @@ namespace stan { namespace math { -typedef Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> matrix_fv; +using matrix_fv = Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic>; typedef Eigen::Matrix >, Eigen::Dynamic, Eigen::Dynamic> matrix_ffv; -typedef Eigen::Matrix, Eigen::Dynamic, 1> vector_fv; +using vector_fv = Eigen::Matrix, Eigen::Dynamic, 1>; -typedef Eigen::Matrix >, Eigen::Dynamic, 1> vector_ffv; +using vector_ffv = Eigen::Matrix >, Eigen::Dynamic, 1>; -typedef Eigen::Matrix, 1, Eigen::Dynamic> row_vector_fv; +using row_vector_fv = Eigen::Matrix, 1, Eigen::Dynamic>; -typedef Eigen::Matrix >, 1, Eigen::Dynamic> row_vector_ffv; +using row_vector_ffv = Eigen::Matrix >, 1, Eigen::Dynamic>; } // namespace math } // namespace stan diff --git a/stan/math/mix/mat/functor/grad_hessian.hpp b/stan/math/mix/mat/functor/grad_hessian.hpp index 813b50fa419..328fb5eecf1 100644 --- a/stan/math/mix/mat/functor/grad_hessian.hpp +++ b/stan/math/mix/mat/functor/grad_hessian.hpp @@ -55,9 +55,10 @@ void grad_hessian( for (int j = i; j < d; ++j) { start_nested(); Matrix >, Dynamic, 1> x_ffvar(d); - for (int k = 0; k < d; ++k) + for (int k = 0; k < d; ++k) { x_ffvar(k) = fvar >(fvar(x(k), i == k), fvar(j == k, 0)); + } fvar > fx_ffvar = f(x_ffvar); H(i, j) = fx_ffvar.d_.d_.val(); H(j, i) = H(i, j); diff --git a/stan/math/mix/mat/functor/grad_tr_mat_times_hessian.hpp b/stan/math/mix/mat/functor/grad_tr_mat_times_hessian.hpp index 109a69e9370..79fe4b4a497 100644 --- a/stan/math/mix/mat/functor/grad_tr_mat_times_hessian.hpp +++ b/stan/math/mix/mat/functor/grad_tr_mat_times_hessian.hpp @@ -23,18 +23,21 @@ void grad_tr_mat_times_hessian( grad_tr_MH.resize(x.size()); Matrix x_var(x.size()); - for (int i = 0; i < x.size(); ++i) + for (int i = 0; i < x.size(); ++i) { x_var(i) = x(i); + } Matrix, Dynamic, 1> x_fvar(x.size()); var sum(0.0); Matrix M_n(x.size()); for (int n = 0; n < x.size(); ++n) { - for (int k = 0; k < x.size(); ++k) + for (int k = 0; k < x.size(); ++k) { M_n(k) = M(n, k); - for (int k = 0; k < x.size(); ++k) + } + for (int k = 0; k < x.size(); ++k) { x_fvar(k) = fvar(x_var(k), k == n); + } fvar fx; fvar grad_fx_dot_v; gradient_dot_vector, double>(f, x_fvar, M_n, fx, grad_fx_dot_v); @@ -42,8 +45,9 @@ void grad_tr_mat_times_hessian( } grad(sum.vi_); - for (int i = 0; i < x.size(); ++i) + for (int i = 0; i < x.size(); ++i) { grad_tr_MH(i) = x_var(i).adj(); + } } catch (const std::exception& e) { recover_memory_nested(); throw; diff --git a/stan/math/mix/mat/functor/gradient_dot_vector.hpp b/stan/math/mix/mat/functor/gradient_dot_vector.hpp index d6db5e14de1..f25d257f15b 100644 --- a/stan/math/mix/mat/functor/gradient_dot_vector.hpp +++ b/stan/math/mix/mat/functor/gradient_dot_vector.hpp @@ -16,8 +16,9 @@ void gradient_dot_vector(const F& f, T1& grad_fx_dot_v) { using Eigen::Matrix; Matrix, Eigen::Dynamic, 1> x_fvar(x.size()); - for (int i = 0; i < x.size(); ++i) + for (int i = 0; i < x.size(); ++i) { x_fvar(i) = fvar(x(i), v(i)); + } fvar fx_fvar = f(x_fvar); fx = fx_fvar.val_; grad_fx_dot_v = fx_fvar.d_; diff --git a/stan/math/mix/mat/functor/hessian.hpp b/stan/math/mix/mat/functor/hessian.hpp index e8fbe4d79c7..951899ad276 100644 --- a/stan/math/mix/mat/functor/hessian.hpp +++ b/stan/math/mix/mat/functor/hessian.hpp @@ -54,15 +54,18 @@ void hessian(const F& f, const Eigen::Matrix& x, for (int i = 0; i < x.size(); ++i) { start_nested(); Eigen::Matrix, Eigen::Dynamic, 1> x_fvar(x.size()); - for (int j = 0; j < x.size(); ++j) + for (int j = 0; j < x.size(); ++j) { x_fvar(j) = fvar(x(j), i == j); + } fvar fx_fvar = f(x_fvar); grad(i) = fx_fvar.d_.val(); - if (i == 0) + if (i == 0) { fx = fx_fvar.val_.val(); + } stan::math::grad(fx_fvar.d_.vi_); - for (int j = 0; j < x.size(); ++j) + for (int j = 0; j < x.size(); ++j) { H(i, j) = x_fvar(j).val_.adj(); + } recover_memory_nested(); } } catch (const std::exception& e) { diff --git a/stan/math/mix/mat/functor/hessian_times_vector.hpp b/stan/math/mix/mat/functor/hessian_times_vector.hpp index abe915292f7..a6d3546103a 100644 --- a/stan/math/mix/mat/functor/hessian_times_vector.hpp +++ b/stan/math/mix/mat/functor/hessian_times_vector.hpp @@ -20,16 +20,18 @@ void hessian_times_vector(const F& f, start_nested(); try { Matrix x_var(x.size()); - for (int i = 0; i < x_var.size(); ++i) + for (int i = 0; i < x_var.size(); ++i) { x_var(i) = x(i); + } var fx_var; var grad_fx_var_dot_v; gradient_dot_vector(f, x_var, v, fx_var, grad_fx_var_dot_v); fx = fx_var.val(); grad(grad_fx_var_dot_v.vi_); Hv.resize(x.size()); - for (int i = 0; i < x.size(); ++i) + for (int i = 0; i < x.size(); ++i) { Hv(i) = x_var(i).adj(); + } } catch (const std::exception& e) { recover_memory_nested(); throw; diff --git a/stan/math/mix/mat/functor/partial_derivative.hpp b/stan/math/mix/mat/functor/partial_derivative.hpp index 7033b6ae94e..09c7044d181 100644 --- a/stan/math/mix/mat/functor/partial_derivative.hpp +++ b/stan/math/mix/mat/functor/partial_derivative.hpp @@ -26,8 +26,9 @@ void partial_derivative(const F& f, const Eigen::Matrix& x, int n, T& fx, T& dfx_dxn) { Eigen::Matrix, Eigen::Dynamic, 1> x_fvar(x.size()); - for (int i = 0; i < x.size(); ++i) + for (int i = 0; i < x.size(); ++i) { x_fvar(i) = fvar(x(i), i == n); + } fvar fx_fvar = f(x_fvar); fx = fx_fvar.val_; dfx_dxn = fx_fvar.d_; diff --git a/stan/math/opencl/cholesky_decompose.hpp b/stan/math/opencl/cholesky_decompose.hpp index add056c137e..87275cddd1a 100644 --- a/stan/math/opencl/cholesky_decompose.hpp +++ b/stan/math/opencl/cholesky_decompose.hpp @@ -43,8 +43,9 @@ namespace math { */ template > inline void cholesky_decompose(matrix_cl& A) { - if (A.rows() == 0) + if (A.rows() == 0) { return; + } // Repeats the blocked cholesky decomposition until the size of the remaining // submatrix is smaller or equal to the minimum blocks size // or a heuristic of 100. diff --git a/stan/math/opencl/copy.hpp b/stan/math/opencl/copy.hpp index c6b3cdbdf36..c6203cac016 100644 --- a/stan/math/opencl/copy.hpp +++ b/stan/math/opencl/copy.hpp @@ -162,7 +162,7 @@ inline matrix_cl packed_copy(const std::vector& src, int rows) { cl::Event packed_event; const cl::CommandQueue queue = opencl_context.queue(); queue.enqueueWriteBuffer(packed.buffer(), CL_FALSE, 0, - sizeof(T) * packed_size, src.data(), NULL, + sizeof(T) * packed_size, src.data(), nullptr, &packed_event); packed.add_write_event(packed_event); stan::math::opencl_kernels::unpack(cl::NDRange(dst.rows(), dst.rows()), dst, diff --git a/stan/math/opencl/diagonal_multiply.hpp b/stan/math/opencl/diagonal_multiply.hpp index 4831eec1532..f7e232b3adf 100644 --- a/stan/math/opencl/diagonal_multiply.hpp +++ b/stan/math/opencl/diagonal_multiply.hpp @@ -21,12 +21,14 @@ template > inline matrix_cl> diagonal_multiply( const matrix_cl& A, const T2 scalar) { matrix_cl> B(A); - if (B.size() == 0) + if (B.size() == 0) { return B; + } // For rectangular matrices int min_dim = B.rows(); - if (B.cols() < min_dim) + if (B.cols() < min_dim) { min_dim = B.cols(); + } try { opencl_kernels::scalar_mul_diagonal(cl::NDRange(min_dim), B, scalar, B.rows(), min_dim); diff --git a/stan/math/opencl/err/check_diagonal_zeros.hpp b/stan/math/opencl/err/check_diagonal_zeros.hpp index 986f1a3b55c..722782a7772 100644 --- a/stan/math/opencl/err/check_diagonal_zeros.hpp +++ b/stan/math/opencl/err/check_diagonal_zeros.hpp @@ -24,8 +24,9 @@ namespace math { template > inline void check_diagonal_zeros(const char* function, const char* name, const matrix_cl& y) { - if (y.size() == 0) + if (y.size() == 0) { return; + } cl::CommandQueue cmd_queue = opencl_context.queue(); cl::Context ctx = opencl_context.context(); try { diff --git a/stan/math/opencl/err/check_nan.hpp b/stan/math/opencl/err/check_nan.hpp index 0e7ae5ede60..e1d2fbe9917 100644 --- a/stan/math/opencl/err/check_nan.hpp +++ b/stan/math/opencl/err/check_nan.hpp @@ -25,8 +25,9 @@ namespace math { template > inline void check_nan(const char* function, const char* name, const matrix_cl& y) { - if (y.size() == 0) + if (y.size() == 0) { return; + } try { int nan_flag = 0; matrix_cl nan_chk(1, 1); diff --git a/stan/math/opencl/err/check_symmetric.hpp b/stan/math/opencl/err/check_symmetric.hpp index eddf9a7e0ab..c4ed0297946 100644 --- a/stan/math/opencl/err/check_symmetric.hpp +++ b/stan/math/opencl/err/check_symmetric.hpp @@ -25,8 +25,9 @@ namespace math { template > inline void check_symmetric(const char* function, const char* name, const matrix_cl& y) { - if (y.size() == 0) + if (y.size() == 0) { return; + } check_square(function, name, y); try { int symmetric_flag = 1; diff --git a/stan/math/opencl/err/check_vector.hpp b/stan/math/opencl/err/check_vector.hpp index cf6b777a2f4..a1ae69836b5 100644 --- a/stan/math/opencl/err/check_vector.hpp +++ b/stan/math/opencl/err/check_vector.hpp @@ -25,12 +25,15 @@ namespace math { template inline void check_vector(const char* function, const char* name, const matrix_cl& x) { - if (x.rows() == 1) + if (x.rows() == 1) { return; - if (x.cols() == 1) + } + if (x.cols() == 1) { return; - if (x.rows() == 1 || x.cols() == 1) + } + if (x.rows() == 1 || x.cols() == 1) { return; + } std::ostringstream msg; msg << ") has " << x.rows() << " rows and " << x.cols() diff --git a/stan/math/opencl/matrix_cl.hpp b/stan/math/opencl/matrix_cl.hpp index 993b96a6147..6da08142983 100644 --- a/stan/math/opencl/matrix_cl.hpp +++ b/stan/math/opencl/matrix_cl.hpp @@ -43,7 +43,7 @@ class matrix_cl> { mutable std::vector read_events_; // Tracks reads public: - typedef T type; + using type = T; // Forward declare the methods that work in place on the matrix template void zeros(); @@ -195,8 +195,9 @@ class matrix_cl> { matrix_cl(const matrix_cl& A) : rows_(A.rows()), cols_(A.cols()), view_(A.view()) { - if (A.size() == 0) + if (A.size() == 0) { return; + } this->wait_for_read_write_events(); cl::Context& ctx = opencl_context.context(); cl::CommandQueue queue = opencl_context.queue(); @@ -237,8 +238,9 @@ class matrix_cl> { explicit matrix_cl(const std::vector>& A) try : rows_(A.empty() ? 0 : A[0].size()), cols_(A.size()) { - if (this->size() == 0) + if (this->size() == 0) { return; + } cl::Context& ctx = opencl_context.context(); cl::CommandQueue& queue = opencl_context.queue(); // creates the OpenCL buffer to copy the Eigen @@ -258,7 +260,7 @@ class matrix_cl> { cl::Event write_event; queue.enqueueWriteBuffer( buffer_cl_, CL_FALSE, sizeof(double) * offset_size, - sizeof(double) * rows_, A[i].data(), NULL, &write_event); + sizeof(double) * rows_, A[i].data(), nullptr, &write_event); this->add_write_event(write_event); } } catch (const cl::Error& e) { @@ -320,7 +322,7 @@ class matrix_cl> { buffer_cl_ = cl::Buffer(ctx, CL_MEM_READ_WRITE, sizeof(T) * A.size()); cl::Event transfer_event; queue.enqueueWriteBuffer(buffer_cl_, CL_FALSE, 0, sizeof(T) * A.size(), - A.data(), NULL, &transfer_event); + A.data(), nullptr, &transfer_event); this->add_write_event(transfer_event); } catch (const cl::Error& e) { check_opencl_error("matrix constructor", e); @@ -347,7 +349,7 @@ class matrix_cl> { buffer_cl_ = cl::Buffer(ctx, CL_MEM_READ_WRITE, sizeof(T) * A.size()); cl::Event transfer_event; queue.enqueueWriteBuffer(buffer_cl_, CL_FALSE, 0, sizeof(T) * A.size(), - A.data(), NULL, &transfer_event); + A.data(), nullptr, &transfer_event); this->add_write_event(transfer_event); } catch (const cl::Error& e) { check_opencl_error("matrix constructor", e); @@ -432,8 +434,9 @@ class matrix_cl> { a.rows(), "destination.rows()", rows()); check_size_match("assignment of (OpenCL) matrices", "source.cols()", a.cols(), "destination.cols()", cols()); - if (a.size() == 0) + if (a.size() == 0) { return *this; + } view_ = a.view(); this->wait_for_read_write_events(); cl::CommandQueue queue = opencl_context.queue(); diff --git a/stan/math/opencl/matrix_cl_view.hpp b/stan/math/opencl/matrix_cl_view.hpp index 3930cbe816b..da16fcce7fa 100644 --- a/stan/math/opencl/matrix_cl_view.hpp +++ b/stan/math/opencl/matrix_cl_view.hpp @@ -18,7 +18,7 @@ enum class matrix_cl_view { Diagonal = 0, Lower = 1, Upper = 2, Entire = 3 }; */ inline const matrix_cl_view either(const matrix_cl_view left_view, const matrix_cl_view right_view) { - typedef typename std::underlying_type::type underlying; + using underlying = typename std::underlying_type::type; return static_cast(static_cast(left_view) | static_cast(right_view)); } @@ -31,7 +31,7 @@ inline const matrix_cl_view either(const matrix_cl_view left_view, */ inline const matrix_cl_view both(const matrix_cl_view left_view, const matrix_cl_view right_view) { - typedef typename std::underlying_type::type underlying; + using underlying = typename std::underlying_type::type; return static_cast(static_cast(left_view) & static_cast(right_view)); } @@ -69,7 +69,7 @@ inline const matrix_cl_view transpose(const matrix_cl_view view) { * @return inverted view */ inline const matrix_cl_view invert(const matrix_cl_view view) { - typedef typename std::underlying_type::type underlying; + using underlying = typename std::underlying_type::type; return static_cast( static_cast(matrix_cl_view::Entire) & ~static_cast(view)); diff --git a/stan/math/opencl/multiply.hpp b/stan/math/opencl/multiply.hpp index 15b44e5d743..fb085f1c8bc 100644 --- a/stan/math/opencl/multiply.hpp +++ b/stan/math/opencl/multiply.hpp @@ -115,8 +115,9 @@ template > inline matrix_cl> multiply(const matrix_cl& A, const T2 scalar) { matrix_cl> temp(A.rows(), A.cols(), A.view()); - if (A.size() == 0) + if (A.size() == 0) { return temp; + } try { opencl_kernels::scalar_mul(cl::NDRange(A.rows(), A.cols()), temp, A, scalar, A.rows(), A.cols(), A.view()); diff --git a/stan/math/opencl/multiply_transpose.hpp b/stan/math/opencl/multiply_transpose.hpp index 5abe4fbdb11..8025043c5a8 100644 --- a/stan/math/opencl/multiply_transpose.hpp +++ b/stan/math/opencl/multiply_transpose.hpp @@ -29,8 +29,9 @@ inline matrix_cl multiply_transpose(const matrix_cl& A) { ? matrix_cl_view::Diagonal : matrix_cl_view::Entire); - if (A.size() == 0) + if (A.size() == 0) { return temp; + } // padding the matrices so the dimensions are divisible with local // improves performance becasuse we can omit if statements in the // multiply kernel diff --git a/stan/math/opencl/opencl_context.hpp b/stan/math/opencl/opencl_context.hpp index 07831a100f0..45586d5c0b7 100644 --- a/stan/math/opencl/opencl_context.hpp +++ b/stan/math/opencl/opencl_context.hpp @@ -56,8 +56,9 @@ inline cl::size_t to_size_t(const size_t (&values)[N]) { template <> inline cl::size_t<3> to_size_t(const size_t (&values)[3]) { cl::size_t<3> s; - for (size_t i = 0; i < 3; i++) + for (size_t i = 0; i < 3; i++) { s[i] = values[i]; + } return s; } } // namespace opencl @@ -173,7 +174,7 @@ class opencl_context_base { // the device // Holds Default parameter values for each Kernel. - typedef std::map map_base_opts; + using map_base_opts = std::map; map_base_opts base_opts_ = {{"LOWER", static_cast(matrix_cl_view::Lower)}, {"UPPER", static_cast(matrix_cl_view::Upper)}, diff --git a/stan/math/opencl/transpose.hpp b/stan/math/opencl/transpose.hpp index da4737d8e23..0716f33a38e 100644 --- a/stan/math/opencl/transpose.hpp +++ b/stan/math/opencl/transpose.hpp @@ -21,8 +21,9 @@ namespace math { template > inline matrix_cl transpose(const matrix_cl& src) { matrix_cl dst(src.cols(), src.rows(), transpose(src.view())); - if (dst.size() == 0) + if (dst.size() == 0) { return dst; + } try { opencl_kernels::transpose(cl::NDRange(src.rows(), src.cols()), dst, src, src.rows(), src.cols()); diff --git a/stan/math/opencl/zeros.hpp b/stan/math/opencl/zeros.hpp index 9104b95e3ad..72a21b46cc8 100644 --- a/stan/math/opencl/zeros.hpp +++ b/stan/math/opencl/zeros.hpp @@ -27,8 +27,9 @@ namespace math { template template inline void matrix_cl>::zeros() try { - if (size() == 0) + if (size() == 0) { return; + } this->view_ = both(this->view_, invert(matrix_view)); cl::CommandQueue cmdQueue = opencl_context.queue(); opencl_kernels::zeros(cl::NDRange(this->rows(), this->cols()), *this, diff --git a/stan/math/prim/arr/err/check_nonzero_size.hpp b/stan/math/prim/arr/err/check_nonzero_size.hpp index c94d1da3eb7..a6957711759 100644 --- a/stan/math/prim/arr/err/check_nonzero_size.hpp +++ b/stan/math/prim/arr/err/check_nonzero_size.hpp @@ -21,8 +21,9 @@ namespace math { template inline void check_nonzero_size(const char* function, const char* name, const T_y& y) { - if (y.size() > 0) + if (y.size() > 0) { return; + } invalid_argument(function, name, 0, "has size ", ", but must have a non-zero size"); } diff --git a/stan/math/prim/arr/err/is_ordered.hpp b/stan/math/prim/arr/err/is_ordered.hpp index 9920b0c8420..f6f053c87e4 100644 --- a/stan/math/prim/arr/err/is_ordered.hpp +++ b/stan/math/prim/arr/err/is_ordered.hpp @@ -18,8 +18,9 @@ namespace math { template inline bool is_ordered(const std::vector& y) { for (size_t n = 1; n < y.size(); ++n) { - if (!(y[n] > y[n - 1])) + if (!(y[n] > y[n - 1])) { return false; + } } return true; } diff --git a/stan/math/prim/arr/fun/common_type.hpp b/stan/math/prim/arr/fun/common_type.hpp index 350b5197237..9e7debe1c4f 100644 --- a/stan/math/prim/arr/fun/common_type.hpp +++ b/stan/math/prim/arr/fun/common_type.hpp @@ -18,7 +18,7 @@ namespace math { */ template struct common_type, std::vector > { - typedef std::vector::type> type; + using type = std::vector::type>; }; } // namespace math diff --git a/stan/math/prim/arr/fun/dot.hpp b/stan/math/prim/arr/fun/dot.hpp index 2a7c9997cb5..196452680b6 100644 --- a/stan/math/prim/arr/fun/dot.hpp +++ b/stan/math/prim/arr/fun/dot.hpp @@ -10,8 +10,9 @@ namespace math { inline double dot(const std::vector& x, const std::vector& y) { double sum = 0.0; - for (size_t i = 0; i < x.size(); ++i) + for (size_t i = 0; i < x.size(); ++i) { sum += x[i] * y[i]; + } return sum; } diff --git a/stan/math/prim/arr/fun/dot_self.hpp b/stan/math/prim/arr/fun/dot_self.hpp index 2ae969c1dd6..9b47bbaca5d 100644 --- a/stan/math/prim/arr/fun/dot_self.hpp +++ b/stan/math/prim/arr/fun/dot_self.hpp @@ -10,8 +10,9 @@ namespace math { inline double dot_self(const std::vector& x) { double sum = 0.0; - for (double i : x) + for (double i : x) { sum += i * i; + } return sum; } diff --git a/stan/math/prim/arr/fun/fill.hpp b/stan/math/prim/arr/fun/fill.hpp index a3dfb010226..210132be7c8 100644 --- a/stan/math/prim/arr/fun/fill.hpp +++ b/stan/math/prim/arr/fun/fill.hpp @@ -21,8 +21,9 @@ namespace math { */ template void fill(std::vector& x, const S& y) { - for (typename std::vector::size_type i = 0; i < x.size(); ++i) + for (typename std::vector::size_type i = 0; i < x.size(); ++i) { fill(x[i], y); + } } } // namespace math diff --git a/stan/math/prim/arr/fun/inverse_softmax.hpp b/stan/math/prim/arr/fun/inverse_softmax.hpp index 18f3e98124a..9a844fd279c 100644 --- a/stan/math/prim/arr/fun/inverse_softmax.hpp +++ b/stan/math/prim/arr/fun/inverse_softmax.hpp @@ -35,8 +35,9 @@ template void inverse_softmax(const Vector& simplex, Vector& y) { using std::log; check_matching_sizes("inverse_softmax", "simplex", simplex, "y", y); - for (size_t i = 0; i < simplex.size(); ++i) + for (size_t i = 0; i < simplex.size(); ++i) { y[i] = log(simplex[i]); + } } } // namespace math diff --git a/stan/math/prim/arr/fun/log_sum_exp.hpp b/stan/math/prim/arr/fun/log_sum_exp.hpp index 45729af2f09..1b7f7f2ddbe 100644 --- a/stan/math/prim/arr/fun/log_sum_exp.hpp +++ b/stan/math/prim/arr/fun/log_sum_exp.hpp @@ -28,14 +28,18 @@ inline double log_sum_exp(const std::vector& x) { using std::log; using std::numeric_limits; double max = -numeric_limits::infinity(); - for (double xx : x) - if (xx > max) + for (double xx : x) { + if (xx > max) { max = xx; + } + } double sum = 0.0; - for (size_t ii = 0; ii < x.size(); ii++) - if (x[ii] != -numeric_limits::infinity()) + for (size_t ii = 0; ii < x.size(); ii++) { + if (x[ii] != -numeric_limits::infinity()) { sum += exp(x[ii] - max); + } + } return max + log(sum); } diff --git a/stan/math/prim/arr/fun/promote_elements.hpp b/stan/math/prim/arr/fun/promote_elements.hpp index 523516fc659..bec98a40165 100644 --- a/stan/math/prim/arr/fun/promote_elements.hpp +++ b/stan/math/prim/arr/fun/promote_elements.hpp @@ -29,8 +29,9 @@ struct promote_elements, std::vector > { inline static std::vector promote(const std::vector& u) { std::vector t; t.reserve(u.size()); - for (size_t i = 0; i < u.size(); ++i) + for (size_t i = 0; i < u.size(); ++i) { t.push_back(promote_elements::promote(u[i])); + } return t; } }; diff --git a/stan/math/prim/arr/fun/promote_scalar.hpp b/stan/math/prim/arr/fun/promote_scalar.hpp index 899a18f8504..6e7b4c9706f 100644 --- a/stan/math/prim/arr/fun/promote_scalar.hpp +++ b/stan/math/prim/arr/fun/promote_scalar.hpp @@ -30,11 +30,12 @@ struct promote_scalar_struct > { */ static std::vector::type> apply( const std::vector& x) { - typedef std::vector::type> return_t; - typedef typename index_type::type idx_t; + using return_t = std::vector::type>; + using idx_t = typename index_type::type; return_t y(x.size()); - for (idx_t i = 0; i < x.size(); ++i) + for (idx_t i = 0; i < x.size(); ++i) { y[i] = promote_scalar_struct::apply(x[i]); + } return y; } }; diff --git a/stan/math/prim/arr/fun/promote_scalar_type.hpp b/stan/math/prim/arr/fun/promote_scalar_type.hpp index 3c0cbad45db..6105adac4fc 100644 --- a/stan/math/prim/arr/fun/promote_scalar_type.hpp +++ b/stan/math/prim/arr/fun/promote_scalar_type.hpp @@ -21,7 +21,7 @@ struct promote_scalar_type > { /** * The promoted type. */ - typedef std::vector::type> type; + using type = std::vector::type>; }; } // namespace math diff --git a/stan/math/prim/arr/fun/scaled_add.hpp b/stan/math/prim/arr/fun/scaled_add.hpp index 6429b33387d..63646447e76 100644 --- a/stan/math/prim/arr/fun/scaled_add.hpp +++ b/stan/math/prim/arr/fun/scaled_add.hpp @@ -10,8 +10,9 @@ namespace math { inline void scaled_add(std::vector& x, const std::vector& y, double lambda) { - for (size_t i = 0; i < x.size(); ++i) + for (size_t i = 0; i < x.size(); ++i) { x[i] += lambda * y[i]; + } } } // namespace math diff --git a/stan/math/prim/arr/fun/sub.hpp b/stan/math/prim/arr/fun/sub.hpp index 275e099a593..2b075e782d4 100644 --- a/stan/math/prim/arr/fun/sub.hpp +++ b/stan/math/prim/arr/fun/sub.hpp @@ -11,8 +11,9 @@ namespace math { inline void sub(std::vector& x, std::vector& y, std::vector& result) { result.resize(x.size()); - for (size_t i = 0; i < x.size(); ++i) + for (size_t i = 0; i < x.size(); ++i) { result[i] = x[i] - y[i]; + } } } // namespace math diff --git a/stan/math/prim/arr/fun/value_of.hpp b/stan/math/prim/arr/fun/value_of.hpp index 5607f2184c0..a3fafa86dcb 100644 --- a/stan/math/prim/arr/fun/value_of.hpp +++ b/stan/math/prim/arr/fun/value_of.hpp @@ -22,8 +22,9 @@ inline std::vector::type> value_of( const std::vector& x) { size_t size = x.size(); std::vector::type> result(size); - for (size_t i = 0; i < size; i++) + for (size_t i = 0; i < size; i++) { result[i] = value_of(x[i]); + } return result; } diff --git a/stan/math/prim/arr/fun/value_of_rec.hpp b/stan/math/prim/arr/fun/value_of_rec.hpp index 4cb06501742..eb04293df90 100644 --- a/stan/math/prim/arr/fun/value_of_rec.hpp +++ b/stan/math/prim/arr/fun/value_of_rec.hpp @@ -23,8 +23,9 @@ template inline std::vector value_of_rec(const std::vector& x) { size_t size = x.size(); std::vector result(size); - for (size_t i = 0; i < size; i++) + for (size_t i = 0; i < size; i++) { result[i] = value_of_rec(x[i]); + } return result; } diff --git a/stan/math/prim/arr/functor/coupled_ode_observer.hpp b/stan/math/prim/arr/functor/coupled_ode_observer.hpp index b11a0501c60..f2a113c8ead 100644 --- a/stan/math/prim/arr/functor/coupled_ode_observer.hpp +++ b/stan/math/prim/arr/functor/coupled_ode_observer.hpp @@ -28,7 +28,7 @@ namespace math { */ template struct coupled_ode_observer { - typedef return_type_t return_t; + using return_t = return_type_t; typedef operands_and_partials, std::vector, T_t0, T_ts> ops_partials_t; @@ -122,14 +122,16 @@ struct coupled_ode_observer { for (size_t j = 0; j < N_; j++) { // iterate over parameters for each equation if (!is_constant_all::value) { - for (std::size_t k = 0; k < N_; k++) + for (std::size_t k = 0; k < N_; k++) { ops_partials.edge1_.partials_[k] = coupled_state[N_ + N_ * k + j]; + } } if (!is_constant_all::value) { - for (std::size_t k = 0; k < M_; k++) + for (std::size_t k = 0; k < M_; k++) { ops_partials.edge2_.partials_[k] = coupled_state[N_ + index_offset_theta_ + N_ * k + j]; + } } if (!is_constant_all::value) { diff --git a/stan/math/prim/arr/functor/coupled_ode_system.hpp b/stan/math/prim/arr/functor/coupled_ode_system.hpp index 60c24f03999..752d92b10c4 100644 --- a/stan/math/prim/arr/functor/coupled_ode_system.hpp +++ b/stan/math/prim/arr/functor/coupled_ode_system.hpp @@ -114,8 +114,9 @@ class coupled_ode_system { */ std::vector initial_state() const { std::vector state(size_, 0.0); - for (size_t n = 0; n < N_; n++) + for (size_t n = 0; n < N_; n++) { state[n] = y0_dbl_[n]; + } return state; } }; diff --git a/stan/math/prim/arr/functor/integrate_1d.hpp b/stan/math/prim/arr/functor/integrate_1d.hpp index 3ec6cb96430..ed6f3788c4f 100644 --- a/stan/math/prim/arr/functor/integrate_1d.hpp +++ b/stan/math/prim/arr/functor/integrate_1d.hpp @@ -202,8 +202,9 @@ inline double integrate_1d( check_less_or_equal(function, "lower limit", a, b); if (a == b) { - if (std::isinf(a)) + if (std::isinf(a)) { domain_error(function, "Integration endpoints are both", a, "", ""); + } return 0.0; } else { return integrate( diff --git a/stan/math/prim/arr/functor/integrate_ode_rk45.hpp b/stan/math/prim/arr/functor/integrate_ode_rk45.hpp index bf65d6becae..6fa74daa79d 100644 --- a/stan/math/prim/arr/functor/integrate_ode_rk45.hpp +++ b/stan/math/prim/arr/functor/integrate_ode_rk45.hpp @@ -96,15 +96,18 @@ std::vector>> integrate_ode_rk45( check_ordered("integrate_ode_rk45", "times", ts_dbl); check_less("integrate_ode_rk45", "initial time", t0_dbl, ts_dbl[0]); - if (relative_tolerance <= 0) + if (relative_tolerance <= 0) { invalid_argument("integrate_ode_rk45", "relative_tolerance,", relative_tolerance, "", ", must be greater than 0"); - if (absolute_tolerance <= 0) + } + if (absolute_tolerance <= 0) { invalid_argument("integrate_ode_rk45", "absolute_tolerance,", absolute_tolerance, "", ", must be greater than 0"); - if (max_num_steps <= 0) + } + if (max_num_steps <= 0) { invalid_argument("integrate_ode_rk45", "max_num_steps,", max_num_steps, "", ", must be greater than 0"); + } // creates basic or coupled system by template specializations coupled_ode_system coupled_system(f, y0, theta, x, x_int, msgs); diff --git a/stan/math/prim/arr/meta/VectorBuilderHelper.hpp b/stan/math/prim/arr/meta/VectorBuilderHelper.hpp index bf95072fff2..16e1bc44014 100644 --- a/stan/math/prim/arr/meta/VectorBuilderHelper.hpp +++ b/stan/math/prim/arr/meta/VectorBuilderHelper.hpp @@ -18,7 +18,7 @@ class VectorBuilderHelper { public: explicit VectorBuilderHelper(size_t n) : x_(n) {} - typedef std::vector type; + using type = std::vector; T1& operator[](size_t i) { return x_[i]; } diff --git a/stan/math/prim/arr/meta/index_type.hpp b/stan/math/prim/arr/meta/index_type.hpp index 0dc9cd4bac1..5df00a63c8c 100644 --- a/stan/math/prim/arr/meta/index_type.hpp +++ b/stan/math/prim/arr/meta/index_type.hpp @@ -18,7 +18,7 @@ struct index_type > { /** * Typedef for index of standard vectors. */ - typedef typename std::vector::size_type type; + using type = typename std::vector::size_type; }; } // namespace math diff --git a/stan/math/prim/arr/meta/is_vector.hpp b/stan/math/prim/arr/meta/is_vector.hpp index 599eec52a88..cc8dbf09682 100644 --- a/stan/math/prim/arr/meta/is_vector.hpp +++ b/stan/math/prim/arr/meta/is_vector.hpp @@ -12,12 +12,12 @@ namespace stan { template struct is_vector { enum { value = is_vector::value }; - typedef T type; + using type = T; }; template struct is_vector > { enum { value = 1 }; - typedef T type; + using type = T; }; } // namespace stan #endif diff --git a/stan/math/prim/arr/meta/scalar_type.hpp b/stan/math/prim/arr/meta/scalar_type.hpp index 19457d85e1d..7b576f86108 100644 --- a/stan/math/prim/arr/meta/scalar_type.hpp +++ b/stan/math/prim/arr/meta/scalar_type.hpp @@ -7,22 +7,22 @@ namespace stan { template struct scalar_type > { - typedef typename scalar_type::type type; + using type = typename scalar_type::type; }; template struct scalar_type > { - typedef typename scalar_type::type type; + using type = typename scalar_type::type; }; template struct scalar_type&> { - typedef typename scalar_type::type type; + using type = typename scalar_type::type; }; template struct scalar_type&> { - typedef typename scalar_type::type type; + using type = typename scalar_type::type; }; } // namespace stan #endif diff --git a/stan/math/prim/arr/meta/value_type.hpp b/stan/math/prim/arr/meta/value_type.hpp index 118047f8877..8bf9f14f082 100644 --- a/stan/math/prim/arr/meta/value_type.hpp +++ b/stan/math/prim/arr/meta/value_type.hpp @@ -19,7 +19,7 @@ struct value_type > { * Type of value stored in a standard vector with type * T entries. */ - typedef T type; + using type = T; }; } // namespace math diff --git a/stan/math/prim/mat/err/check_cholesky_factor.hpp b/stan/math/prim/mat/err/check_cholesky_factor.hpp index 122487798f5..f8c6b21e3ce 100644 --- a/stan/math/prim/mat/err/check_cholesky_factor.hpp +++ b/stan/math/prim/mat/err/check_cholesky_factor.hpp @@ -32,9 +32,10 @@ inline void check_cholesky_factor( y.rows()); check_positive(function, "columns of Cholesky factor", y.cols()); check_lower_triangular(function, name, y); - for (int i = 0; i < y.cols(); ++i) + for (int i = 0; i < y.cols(); ++i) { // FIXME: should report row check_positive(function, name, y(i, i)); + } } } // namespace math diff --git a/stan/math/prim/mat/err/check_cholesky_factor_corr.hpp b/stan/math/prim/mat/err/check_cholesky_factor_corr.hpp index 86425864ddb..2c7334039f4 100644 --- a/stan/math/prim/mat/err/check_cholesky_factor_corr.hpp +++ b/stan/math/prim/mat/err/check_cholesky_factor_corr.hpp @@ -33,8 +33,9 @@ void check_cholesky_factor_corr( const Eigen::Matrix& y) { check_square(function, name, y); check_lower_triangular(function, name, y); - for (int i = 0; i < y.rows(); ++i) + for (int i = 0; i < y.rows(); ++i) { check_positive(function, name, y(i, i)); + } for (int i = 0; i < y.rows(); ++i) { Eigen::Matrix y_i = y.row(i).transpose(); check_unit_vector(function, name, y_i); diff --git a/stan/math/prim/mat/err/check_column_index.hpp b/stan/math/prim/mat/err/check_column_index.hpp index 1e0bc9934d3..545617c4473 100644 --- a/stan/math/prim/mat/err/check_column_index.hpp +++ b/stan/math/prim/mat/err/check_column_index.hpp @@ -30,8 +30,9 @@ template inline void check_column_index(const char* function, const char* name, const Eigen::Matrix& y, size_t i) { if (i >= stan::error_index::value - && i < static_cast(y.cols()) + stan::error_index::value) + && i < static_cast(y.cols()) + stan::error_index::value) { return; + } std::stringstream msg; msg << " for columns of " << name; diff --git a/stan/math/prim/mat/err/check_consistent_size_mvt.hpp b/stan/math/prim/mat/err/check_consistent_size_mvt.hpp index 45414d1e318..e9ced647479 100644 --- a/stan/math/prim/mat/err/check_consistent_size_mvt.hpp +++ b/stan/math/prim/mat/err/check_consistent_size_mvt.hpp @@ -28,17 +28,19 @@ inline void check_consistent_size_mvt(const char* function, const char* name, if (length(x) == 0) { size_x = 0; - if (expected_size == 0) + if (expected_size == 0) { return; + } } else { size_t size_x = stan::length_mvt(x); bool x_contains_vectors = is_vector< typename std::remove_reference::type>::value; - if (!x_contains_vectors) + if (!x_contains_vectors) { return; - else if (expected_size == size_x) + } else if (expected_size == size_x) { return; + } } std::stringstream msg; diff --git a/stan/math/prim/mat/err/check_finite.hpp b/stan/math/prim/mat/err/check_finite.hpp index 8ed76bbfe18..3dee01bdd11 100644 --- a/stan/math/prim/mat/err/check_finite.hpp +++ b/stan/math/prim/mat/err/check_finite.hpp @@ -29,9 +29,10 @@ struct finite, true> { const Eigen::Matrix& y) { if (!value_of(y).allFinite()) { for (int n = 0; n < y.size(); ++n) { - if (!(boost::math::isfinite)(y(n))) + if (!(boost::math::isfinite)(y(n))) { domain_error_vec(function, name, y, n, "is ", ", but must be finite!"); + } } } } diff --git a/stan/math/prim/mat/err/check_pos_definite.hpp b/stan/math/prim/mat/err/check_pos_definite.hpp index 08411a4094b..1687bd056e2 100644 --- a/stan/math/prim/mat/err/check_pos_definite.hpp +++ b/stan/math/prim/mat/err/check_pos_definite.hpp @@ -29,13 +29,15 @@ inline void check_pos_definite(const char* function, const char* name, const Eigen::Matrix& y) { check_symmetric(function, name, y); check_positive(function, name, "rows", y.rows()); - if (y.rows() == 1 && !(y(0, 0) > CONSTRAINT_TOLERANCE)) + if (y.rows() == 1 && !(y(0, 0) > CONSTRAINT_TOLERANCE)) { domain_error(function, name, "is not positive definite.", ""); + } Eigen::LDLT cholesky = value_of_rec(y).ldlt(); if (cholesky.info() != Eigen::Success || !cholesky.isPositive() - || (cholesky.vectorD().array() <= 0.0).any()) + || (cholesky.vectorD().array() <= 0.0).any()) { domain_error(function, name, "is not positive definite.", ""); + } check_not_nan(function, name, y); } @@ -53,8 +55,9 @@ template inline void check_pos_definite(const char* function, const char* name, const Eigen::LDLT& cholesky) { if (cholesky.info() != Eigen::Success || !cholesky.isPositive() - || !(cholesky.vectorD().array() > 0.0).all()) + || !(cholesky.vectorD().array() > 0.0).all()) { domain_error(function, "LDLT decomposition of", " failed", name); + } } /** @@ -72,8 +75,9 @@ template inline void check_pos_definite(const char* function, const char* name, const Eigen::LLT& cholesky) { if (cholesky.info() != Eigen::Success - || !(cholesky.matrixLLT().diagonal().array() > 0.0).all()) + || !(cholesky.matrixLLT().diagonal().array() > 0.0).all()) { domain_error(function, "Matrix", " is not positive definite", name); + } } } // namespace math diff --git a/stan/math/prim/mat/err/check_pos_semidefinite.hpp b/stan/math/prim/mat/err/check_pos_semidefinite.hpp index fe79123b904..261d98cb1f7 100644 --- a/stan/math/prim/mat/err/check_pos_semidefinite.hpp +++ b/stan/math/prim/mat/err/check_pos_semidefinite.hpp @@ -32,16 +32,18 @@ inline void check_pos_semidefinite( check_symmetric(function, name, y); check_positive(function, name, "rows", y.rows()); - if (y.rows() == 1 && !(y(0, 0) >= 0.0)) + if (y.rows() == 1 && !(y(0, 0) >= 0.0)) { domain_error(function, name, "is not positive semi-definite.", ""); + } using Eigen::Dynamic; using Eigen::LDLT; using Eigen::Matrix; LDLT > cholesky = value_of_rec(y).ldlt(); if (cholesky.info() != Eigen::Success - || (cholesky.vectorD().array() < 0.0).any()) + || (cholesky.vectorD().array() < 0.0).any()) { domain_error(function, name, "is not positive semi-definite.", ""); + } check_not_nan(function, name, y); } @@ -59,8 +61,9 @@ template inline void check_pos_semidefinite(const char* function, const char* name, const Eigen::LDLT& cholesky) { if (cholesky.info() != Eigen::Success - || (cholesky.vectorD().array() < 0.0).any()) + || (cholesky.vectorD().array() < 0.0).any()) { domain_error(function, name, "is not positive semi-definite.", ""); + } } } // namespace math diff --git a/stan/math/prim/mat/err/check_positive_ordered.hpp b/stan/math/prim/mat/err/check_positive_ordered.hpp index bac2b82f710..dfbba3a3484 100644 --- a/stan/math/prim/mat/err/check_positive_ordered.hpp +++ b/stan/math/prim/mat/err/check_positive_ordered.hpp @@ -27,8 +27,9 @@ void check_positive_ordered(const char* function, const char* name, using Eigen::Dynamic; using Eigen::Matrix; - if (y.size() == 0) + if (y.size() == 0) { return; + } if (y[0] < 0) { std::ostringstream msg; diff --git a/stan/math/prim/mat/err/check_range.hpp b/stan/math/prim/mat/err/check_range.hpp index ffd2497404d..eace472160a 100644 --- a/stan/math/prim/mat/err/check_range.hpp +++ b/stan/math/prim/mat/err/check_range.hpp @@ -24,8 +24,9 @@ namespace math { inline void check_range(const char* function, const char* name, int max, int index, int nested_level, const char* error_msg) { if ((index >= stan::error_index::value) - && (index < max + stan::error_index::value)) + && (index < max + stan::error_index::value)) { return; + } std::stringstream msg; msg << "; index position = " << nested_level; @@ -48,8 +49,9 @@ inline void check_range(const char* function, const char* name, int max, inline void check_range(const char* function, const char* name, int max, int index, const char* error_msg) { if ((index >= stan::error_index::value) - && (index < max + stan::error_index::value)) + && (index < max + stan::error_index::value)) { return; + } out_of_range(function, max, index, error_msg); } @@ -67,8 +69,9 @@ inline void check_range(const char* function, const char* name, int max, inline void check_range(const char* function, const char* name, int max, int index) { if ((index >= stan::error_index::value) - && (index < max + stan::error_index::value)) + && (index < max + stan::error_index::value)) { return; + } out_of_range(function, max, index); } diff --git a/stan/math/prim/mat/err/check_row_index.hpp b/stan/math/prim/mat/err/check_row_index.hpp index d6dd8aa8afa..b4d803dbb41 100644 --- a/stan/math/prim/mat/err/check_row_index.hpp +++ b/stan/math/prim/mat/err/check_row_index.hpp @@ -27,8 +27,9 @@ template inline void check_row_index(const char* function, const char* name, const Eigen::Matrix& y, size_t i) { if (i >= stan::error_index::value - && i < static_cast(y.rows()) + stan::error_index::value) + && i < static_cast(y.rows()) + stan::error_index::value) { return; + } std::stringstream msg; msg << " for rows of " << name; diff --git a/stan/math/prim/mat/err/check_std_vector_index.hpp b/stan/math/prim/mat/err/check_std_vector_index.hpp index 3358891b8f6..86de406710b 100644 --- a/stan/math/prim/mat/err/check_std_vector_index.hpp +++ b/stan/math/prim/mat/err/check_std_vector_index.hpp @@ -26,8 +26,9 @@ template inline void check_std_vector_index(const char* function, const char* name, const std::vector& y, int i) { if (i >= static_cast(stan::error_index::value) - && i < static_cast(y.size() + stan::error_index::value)) + && i < static_cast(y.size() + stan::error_index::value)) { return; + } std::stringstream msg; msg << " for " << name; diff --git a/stan/math/prim/mat/err/check_symmetric.hpp b/stan/math/prim/mat/err/check_symmetric.hpp index 87ee12cc984..60f5a34c3c4 100644 --- a/stan/math/prim/mat/err/check_symmetric.hpp +++ b/stan/math/prim/mat/err/check_symmetric.hpp @@ -35,8 +35,9 @@ inline void check_symmetric( Eigen::Matrix >::type size_type; size_type k = y.rows(); - if (k == 1) + if (k == 1) { return; + } for (size_type m = 0; m < k; ++m) { for (size_type n = m + 1; n < k; ++n) { if (!(fabs(value_of(y(m, n)) - value_of(y(n, m))) diff --git a/stan/math/prim/mat/err/check_vector.hpp b/stan/math/prim/mat/err/check_vector.hpp index 3b6be93a6f5..8bf88d65ebb 100644 --- a/stan/math/prim/mat/err/check_vector.hpp +++ b/stan/math/prim/mat/err/check_vector.hpp @@ -27,12 +27,15 @@ namespace math { template inline void check_vector(const char* function, const char* name, const Eigen::Matrix& x) { - if (R == 1) + if (R == 1) { return; - if (C == 1) + } + if (C == 1) { return; - if (x.rows() == 1 || x.cols() == 1) + } + if (x.rows() == 1 || x.cols() == 1) { return; + } std::ostringstream msg; msg << ") has " << x.rows() << " rows and " << x.cols() diff --git a/stan/math/prim/mat/err/is_cholesky_factor_corr.hpp b/stan/math/prim/mat/err/is_cholesky_factor_corr.hpp index 9996e3ed6fe..86f7545e5e9 100644 --- a/stan/math/prim/mat/err/is_cholesky_factor_corr.hpp +++ b/stan/math/prim/mat/err/is_cholesky_factor_corr.hpp @@ -25,12 +25,14 @@ namespace math { template inline bool is_cholesky_factor_corr( const Eigen::Matrix& y) { - if (!is_cholesky_factor(y)) + if (!is_cholesky_factor(y)) { return false; + } for (int i = 0; i < y.rows(); ++i) { Eigen::Matrix y_i = y.row(i).transpose(); - if (!is_unit_vector(y_i)) + if (!is_unit_vector(y_i)) { return false; + } } return true; } diff --git a/stan/math/prim/mat/err/is_corr_matrix.hpp b/stan/math/prim/mat/err/is_corr_matrix.hpp index 962cc25b81c..a05869f7fed 100644 --- a/stan/math/prim/mat/err/is_corr_matrix.hpp +++ b/stan/math/prim/mat/err/is_corr_matrix.hpp @@ -31,16 +31,20 @@ inline bool is_corr_matrix( typedef typename index_type< Eigen::Matrix >::type size_t; - if (!is_size_match(y.rows(), y.cols())) + if (!is_size_match(y.rows(), y.cols())) { return false; - if (!is_positive(y.rows())) + } + if (!is_positive(y.rows())) { return false; - if (!is_pos_definite(y)) + } + if (!is_pos_definite(y)) { return false; + } if (is_symmetric(y)) { for (size_t k = 0; k < y.rows(); ++k) { - if (!(fabs(y(k, k) - 1.0) <= CONSTRAINT_TOLERANCE)) + if (!(fabs(y(k, k) - 1.0) <= CONSTRAINT_TOLERANCE)) { return false; + } } } return true; diff --git a/stan/math/prim/mat/err/is_pos_definite.hpp b/stan/math/prim/mat/err/is_pos_definite.hpp index 1a79d32dd62..febc7c14178 100644 --- a/stan/math/prim/mat/err/is_pos_definite.hpp +++ b/stan/math/prim/mat/err/is_pos_definite.hpp @@ -25,16 +25,20 @@ namespace math { */ template inline bool is_pos_definite(const Eigen::Matrix& y) { - if (!is_symmetric(y)) + if (!is_symmetric(y)) { return false; - if (!is_positive(y.rows())) + } + if (!is_positive(y.rows())) { return false; - if (y.rows() == 1 && !(y(0, 0) > CONSTRAINT_TOLERANCE)) + } + if (y.rows() == 1 && !(y(0, 0) > CONSTRAINT_TOLERANCE)) { return false; + } Eigen::LDLT cholesky = value_of_rec(y).ldlt(); if (cholesky.info() != Eigen::Success || !cholesky.isPositive() - || (cholesky.vectorD().array() <= 0.0).any()) + || (cholesky.vectorD().array() <= 0.0).any()) { return false; + } return is_not_nan(y); } diff --git a/stan/math/prim/mat/err/is_symmetric.hpp b/stan/math/prim/mat/err/is_symmetric.hpp index fa7e395ceb3..bde27c7847a 100644 --- a/stan/math/prim/mat/err/is_symmetric.hpp +++ b/stan/math/prim/mat/err/is_symmetric.hpp @@ -20,20 +20,23 @@ namespace math { template inline bool is_symmetric( const Eigen::Matrix& y) { - if (!is_square(y)) + if (!is_square(y)) { return false; + } typedef typename index_type< Eigen::Matrix>::type size_type; size_type k = y.rows(); - if (k == 1) + if (k == 1) { return true; + } for (size_type m = 0; m < k; ++m) { for (size_type n = m + 1; n < k; ++n) { if (!(fabs(value_of(y(m, n)) - value_of(y(n, m))) - <= CONSTRAINT_TOLERANCE)) + <= CONSTRAINT_TOLERANCE)) { return false; + } } } return true; diff --git a/stan/math/prim/mat/fun/LDLT_factor.hpp b/stan/math/prim/mat/fun/LDLT_factor.hpp index 455513dbc72..76400a38170 100644 --- a/stan/math/prim/mat/fun/LDLT_factor.hpp +++ b/stan/math/prim/mat/fun/LDLT_factor.hpp @@ -62,11 +62,11 @@ namespace math { template class LDLT_factor { public: - typedef Eigen::Matrix vector_t; - typedef Eigen::Matrix matrix_t; - typedef Eigen::LDLT ldlt_t; - typedef size_t size_type; - typedef double value_type; + using vector_t = Eigen::Matrix; + using matrix_t = Eigen::Matrix; + using ldlt_t = Eigen::LDLT; + using size_type = size_t; + using value_type = double; LDLT_factor() : N_(0), ldltP_(new ldlt_t()) {} @@ -81,14 +81,18 @@ class LDLT_factor { } inline bool success() const { - if (ldltP_->info() != Eigen::Success) + if (ldltP_->info() != Eigen::Success) { return false; - if (!(ldltP_->isPositive())) + } + if (!(ldltP_->isPositive())) { return false; + } vector_t ldltP_diag(ldltP_->vectorD()); - for (int i = 0; i < ldltP_diag.size(); ++i) - if (ldltP_diag(i) <= 0 || is_nan(ldltP_diag(i))) + for (int i = 0; i < ldltP_diag.size(); ++i) { + if (ldltP_diag(i) <= 0 || is_nan(ldltP_diag(i))) { return false; + } + } return true; } diff --git a/stan/math/prim/mat/fun/MatrixExponential.h b/stan/math/prim/mat/fun/MatrixExponential.h index 766077f0919..35f7aa12479 100644 --- a/stan/math/prim/mat/fun/MatrixExponential.h +++ b/stan/math/prim/mat/fun/MatrixExponential.h @@ -38,7 +38,7 @@ namespace Eigen { return ldexp(x, -m_squarings); } - typedef std::complex ComplexScalar; + using ComplexScalar = std::complex; /** \brief Scale a matrix coefficient. * @@ -63,8 +63,9 @@ namespace Eigen { template void matrix_exp_pade3(const MatrixType &A, MatrixType &U, MatrixType &V) { - using Eigen::internal::traits; - typedef typename Eigen::NumTraits::Scalar>::Real RealScalar; + using Eigen::internal::traits; + using RealScalar = + typename Eigen::NumTraits::Scalar>::Real; const RealScalar b[] = {120.L, 60.L, 12.L, 1.L}; const MatrixType A2 = A * A; const MatrixType tmp = b[3] * A2 + b[1] * MatrixType::Identity(A.rows(), A.cols()); @@ -79,8 +80,9 @@ namespace Eigen { */ template void matrix_exp_pade5(const MatrixType &A, MatrixType &U, MatrixType &V) - { - typedef typename Eigen::NumTraits::Scalar>::Real RealScalar; + { + using RealScalar = typename Eigen::NumTraits< + typename Eigen::internal::traits::Scalar>::Real; const RealScalar b[] = {30240.L, 15120.L, 3360.L, 420.L, 30.L, 1.L}; const MatrixType A2 = A * A; const MatrixType A4 = A2 * A2; @@ -97,8 +99,9 @@ namespace Eigen { template void matrix_exp_pade7(const MatrixType &A, MatrixType &U, MatrixType &V) { - using Eigen::internal::traits; - typedef typename Eigen::NumTraits::Scalar>::Real RealScalar; + using Eigen::internal::traits; + using RealScalar = + typename Eigen::NumTraits::Scalar>::Real; const RealScalar b[] = {17297280.L, 8648640.L, 1995840.L, 277200.L, 25200.L, 1512.L, 56.L, 1.L}; const MatrixType A2 = A * A; const MatrixType A4 = A2 * A2; @@ -117,8 +120,9 @@ namespace Eigen { template void matrix_exp_pade9(const MatrixType &A, MatrixType &U, MatrixType &V) { - using Eigen::internal::traits; - typedef typename Eigen::NumTraits::Scalar>::Real RealScalar; + using Eigen::internal::traits; + using RealScalar = + typename Eigen::NumTraits::Scalar>::Real; const RealScalar b[] = {17643225600.L, 8821612800.L, 2075673600.L, 302702400.L, 30270240.L, 2162160.L, 110880.L, 3960.L, 90.L, 1.L}; const MatrixType A2 = A * A; @@ -139,8 +143,9 @@ namespace Eigen { template void matrix_exp_pade13(const MatrixType &A, MatrixType &U, MatrixType &V) { - using Eigen::internal::traits; - typedef typename Eigen::NumTraits::Scalar>::Real RealScalar; + using Eigen::internal::traits; + using RealScalar = + typename Eigen::NumTraits::Scalar>::Real; const RealScalar b[] = {64764752532480000.L, 32382376266240000.L, 7771770303897600.L, 1187353796428800.L, 129060195264000.L, 10559470521600.L, 670442572800.L, 33522128640.L, 1323241920.L, 40840800.L, 960960.L, 16380.L, 182.L, 1.L}; @@ -197,7 +202,9 @@ namespace Eigen { } else { const double maxnorm = 5.371920351148152; frexp(value_of_rec(l1norm) / value_of_rec(maxnorm), &squarings); - if (squarings < 0) squarings = 0; + if (squarings < 0) { + squarings = 0; + } MatrixType A = arg.unaryExpr(MatrixExponentialScalingOp(squarings)); matrix_exp_pade13(A, U, V); } diff --git a/stan/math/prim/mat/fun/accumulator.hpp b/stan/math/prim/mat/fun/accumulator.hpp index 6171fba55e8..5629812bf02 100644 --- a/stan/math/prim/mat/fun/accumulator.hpp +++ b/stan/math/prim/mat/fun/accumulator.hpp @@ -80,8 +80,9 @@ class accumulator { */ template void add(const Eigen::Matrix& m) { - for (int i = 0; i < m.size(); ++i) + for (int i = 0; i < m.size(); ++i) { this->add(m(i)); + } } /** @@ -95,8 +96,9 @@ class accumulator { */ template void add(const std::vector& xs) { - for (size_t i = 0; i < xs.size(); ++i) + for (size_t i = 0; i < xs.size(); ++i) { this->add(xs[i]); + } } /** diff --git a/stan/math/prim/mat/fun/append_array.hpp b/stan/math/prim/mat/fun/append_array.hpp index eff75999824..c0f610eddb5 100644 --- a/stan/math/prim/mat/fun/append_array.hpp +++ b/stan/math/prim/mat/fun/append_array.hpp @@ -37,10 +37,12 @@ append_array(const std::vector& x, const std::vector& y) { zdims[0] += y.size(); } resize(z, zdims); - for (size_t i = 0; i < x.size(); ++i) + for (size_t i = 0; i < x.size(); ++i) { assign(z[i], x[i]); - for (size_t i = 0; i < y.size(); ++i) + } + for (size_t i = 0; i < y.size(); ++i) { assign(z[i + x.size()], y[i]); + } return z; } diff --git a/stan/math/prim/mat/fun/append_col.hpp b/stan/math/prim/mat/fun/append_col.hpp index 46eb4cd1f47..eee2d3fc417 100644 --- a/stan/math/prim/mat/fun/append_col.hpp +++ b/stan/math/prim/mat/fun/append_col.hpp @@ -46,13 +46,17 @@ append_col(const Eigen::Matrix& A, check_size_match("append_col", "rows of A", Arows, "rows of B", Brows); Matrix, Dynamic, Dynamic> result(Arows, Acols + Bcols); - for (int j = 0; j < Acols; j++) - for (int i = 0; i < Arows; i++) + for (int j = 0; j < Acols; j++) { + for (int i = 0; i < Arows; i++) { result(i, j) = A(i, j); + } + } - for (int j = Acols, k = 0; k < Bcols; j++, k++) - for (int i = 0; i < Arows; i++) + for (int j = Acols, k = 0; k < Bcols; j++, k++) { + for (int i = 0; i < Arows; i++) { result(i, j) = B(i, k); + } + } return result; } @@ -82,10 +86,12 @@ inline Eigen::Matrix, 1, Eigen::Dynamic> append_col( int Asize = A.size(); int Bsize = B.size(); Matrix, 1, Dynamic> result(Asize + Bsize); - for (int i = 0; i < Asize; i++) + for (int i = 0; i < Asize; i++) { result(i) = A(i); - for (int i = 0, j = Asize; i < Bsize; i++, j++) + } + for (int i = 0, j = Asize; i < Bsize; i++, j++) { result(j) = B(i); + } return result; } @@ -172,7 +178,7 @@ inline Eigen::Matrix, 1, Eigen::Dynamic> append_col( const T1& A, const Eigen::Matrix& B) { using Eigen::Dynamic; using Eigen::Matrix; - typedef return_type_t return_type; + using return_type = return_type_t; Matrix result(B.size() + 1); result << A, B.template cast(); @@ -198,7 +204,7 @@ inline Eigen::Matrix, 1, Eigen::Dynamic> append_col( const Eigen::Matrix& A, const T2& B) { using Eigen::Dynamic; using Eigen::Matrix; - typedef return_type_t return_type; + using return_type = return_type_t; Matrix result(A.size() + 1); result << A.template cast(), B; diff --git a/stan/math/prim/mat/fun/append_row.hpp b/stan/math/prim/mat/fun/append_row.hpp index 63adcbfaa7f..91261943fa3 100644 --- a/stan/math/prim/mat/fun/append_row.hpp +++ b/stan/math/prim/mat/fun/append_row.hpp @@ -45,10 +45,12 @@ append_row(const Eigen::Matrix& A, Matrix, Dynamic, Dynamic> result(Arows + Brows, Acols); for (int j = 0; j < Acols; j++) { - for (int i = 0; i < Arows; i++) + for (int i = 0; i < Arows; i++) { result(i, j) = A(i, j); - for (int i = Arows, k = 0; k < Brows; i++, k++) + } + for (int i = Arows, k = 0; k < Brows; i++, k++) { result(i, j) = B(k, j); + } } return result; } @@ -77,10 +79,12 @@ inline Eigen::Matrix, Eigen::Dynamic, 1> append_row( int Asize = A.size(); int Bsize = B.size(); Matrix, 1, Dynamic> result(Asize + Bsize); - for (int i = 0; i < Asize; i++) + for (int i = 0; i < Asize; i++) { result(i) = A(i); - for (int i = 0, j = Asize; i < Bsize; i++, j++) + } + for (int i = 0, j = Asize; i < Bsize; i++, j++) { result(j) = B(i); + } return result; } @@ -165,7 +169,7 @@ inline Eigen::Matrix, Eigen::Dynamic, 1> append_row( const T1& A, const Eigen::Matrix& B) { using Eigen::Dynamic; using Eigen::Matrix; - typedef return_type_t return_type; + using return_type = return_type_t; Matrix result(B.size() + 1); result << A, B.template cast(); @@ -190,7 +194,7 @@ inline Eigen::Matrix, Eigen::Dynamic, 1> append_row( const Eigen::Matrix& A, const T2& B) { using Eigen::Dynamic; using Eigen::Matrix; - typedef return_type_t return_type; + using return_type = return_type_t; Matrix result(A.size() + 1); result << A.template cast(), B; diff --git a/stan/math/prim/mat/fun/assign.hpp b/stan/math/prim/mat/fun/assign.hpp index 637f04663dd..d8b2f017cbb 100644 --- a/stan/math/prim/mat/fun/assign.hpp +++ b/stan/math/prim/mat/fun/assign.hpp @@ -23,10 +23,11 @@ namespace math { */ template inline void print_mat_size(std::ostream& o) { - if (N == Eigen::Dynamic) + if (N == Eigen::Dynamic) { o << "dynamically sized"; - else + } else { o << N; + } } /** @@ -107,8 +108,9 @@ template inline void assign(Eigen::Matrix& x, const Eigen::Matrix& y) { check_matching_dims("assign", "left-hand-side", x, "right-hand-side", y); - for (int i = 0; i < x.size(); ++i) + for (int i = 0; i < x.size(); ++i) { assign(x(i), y(i)); + } } /** @@ -140,9 +142,11 @@ inline void assign(Eigen::Block x, const Eigen::Matrix& y) { "right-hand side rows", y.rows()); check_size_match("assign", "left-hand side cols", x.cols(), "right-hand side cols", y.cols()); - for (int n = 0; n < y.cols(); ++n) - for (int m = 0; m < y.rows(); ++m) + for (int n = 0; n < y.cols(); ++n) { + for (int m = 0; m < y.rows(); ++m) { assign(x(m, n), y(m, n)); + } + } } /** @@ -167,8 +171,9 @@ inline void assign(Eigen::Block x, const Eigen::Matrix& y) { template inline void assign(std::vector& x, const std::vector& y) { check_matching_sizes("assign", "left-hand side", x, "right-hand side", y); - for (size_t i = 0; i < x.size(); ++i) + for (size_t i = 0; i < x.size(); ++i) { assign(x[i], y[i]); + } } } // namespace math diff --git a/stan/math/prim/mat/fun/autocorrelation.hpp b/stan/math/prim/mat/fun/autocorrelation.hpp index 9e8951e8e3c..dfa1c603f0f 100644 --- a/stan/math/prim/mat/fun/autocorrelation.hpp +++ b/stan/math/prim/mat/fun/autocorrelation.hpp @@ -15,18 +15,23 @@ namespace internal { * a minimum number of zeros are padded. */ inline size_t fft_next_good_size(size_t N) { - if (N <= 2) + if (N <= 2) { return 2; + } while (true) { size_t m = N; - while ((m % 2) == 0) + while ((m % 2) == 0) { m /= 2; - while ((m % 3) == 0) + } + while ((m % 3) == 0) { m /= 3; - while ((m % 5) == 0) + } + while ((m % 5) == 0) { m /= 5; - if (m <= 1) + } + if (m <= 1) { return N; + } N++; } } @@ -68,12 +73,14 @@ void autocorrelation(const std::vector& y, std::vector& ac, vector centered_signal(y); centered_signal.insert(centered_signal.end(), Mt2 - N, 0.0); T mean = stan::math::mean(y); - for (size_t i = 0; i < N; i++) + for (size_t i = 0; i < N; i++) { centered_signal[i] -= mean; + } fft.fwd(freqvec, centered_signal); - for (size_t i = 0; i < Mt2; ++i) + for (size_t i = 0; i < Mt2; ++i) { freqvec[i] = complex(norm(freqvec[i]), 0.0); + } fft.inv(ac, freqvec); ac.resize(N); @@ -82,8 +89,9 @@ void autocorrelation(const std::vector& y, std::vector& ac, ac[i] /= (N - i); } T var = ac[0]; - for (size_t i = 0; i < N; ++i) + for (size_t i = 0; i < N; ++i) { ac[i] /= var; + } } /** @@ -128,8 +136,9 @@ void autocorrelation(const Eigen::MatrixBase& y, fft.inv(ac_tmp, freqvec); fft.ClearFlag(fft.HalfSpectrum); - for (size_t i = 0; i < N; ++i) + for (size_t i = 0; i < N; ++i) { ac_tmp(i) /= (N - i); + } ac = ac_tmp.head(N).array() / ac_tmp(0); } diff --git a/stan/math/prim/mat/fun/chol2inv.hpp b/stan/math/prim/mat/fun/chol2inv.hpp index f217396580a..929e2dd34da 100644 --- a/stan/math/prim/mat/fun/chol2inv.hpp +++ b/stan/math/prim/mat/fun/chol2inv.hpp @@ -26,9 +26,10 @@ Eigen::Matrix chol2inv( check_square("chol2inv", "L", L); check_lower_triangular("chol2inv", "L", L); int K = L.rows(); - typedef Eigen::Matrix matrix_t; - if (K == 0) + using matrix_t = Eigen::Matrix; + if (K == 0) { return L; + } if (K == 1) { matrix_t X(1, 1); X.coeffRef(0) = inv_square(L.coeff(0)); diff --git a/stan/math/prim/mat/fun/cholesky_corr_constrain.hpp b/stan/math/prim/mat/fun/cholesky_corr_constrain.hpp index 6f0fb6fdd2b..b1f2d2a8ff8 100644 --- a/stan/math/prim/mat/fun/cholesky_corr_constrain.hpp +++ b/stan/math/prim/mat/fun/cholesky_corr_constrain.hpp @@ -21,11 +21,13 @@ Eigen::Matrix cholesky_corr_constrain( check_size_match("cholesky_corr_constrain", "y.size()", y.size(), "k_choose_2", k_choose_2); Matrix z(k_choose_2); - for (int i = 0; i < k_choose_2; ++i) + for (int i = 0; i < k_choose_2; ++i) { z(i) = corr_constrain(y(i)); + } Matrix x(K, K); - if (K == 0) + if (K == 0) { return x; + } x.setZero(); x(0, 0) = 1; int k = 0; @@ -52,11 +54,13 @@ Eigen::Matrix cholesky_corr_constrain( check_size_match("cholesky_corr_constrain", "y.size()", y.size(), "k_choose_2", k_choose_2); Matrix z(k_choose_2); - for (int i = 0; i < k_choose_2; ++i) + for (int i = 0; i < k_choose_2; ++i) { z(i) = corr_constrain(y(i), lp); + } Matrix x(K, K); - if (K == 0) + if (K == 0) { return x; + } x.setZero(); x(0, 0) = 1; int k = 0; diff --git a/stan/math/prim/mat/fun/cholesky_factor_constrain.hpp b/stan/math/prim/mat/fun/cholesky_factor_constrain.hpp index 37f4f5aa314..27f6c5732ed 100644 --- a/stan/math/prim/mat/fun/cholesky_factor_constrain.hpp +++ b/stan/math/prim/mat/fun/cholesky_factor_constrain.hpp @@ -38,16 +38,20 @@ Eigen::Matrix cholesky_factor_constrain( int pos = 0; for (int m = 0; m < N; ++m) { - for (int n = 0; n < m; ++n) + for (int n = 0; n < m; ++n) { y(m, n) = x(pos++); + } y(m, m) = exp(x(pos++)); - for (int n = m + 1; n < N; ++n) + for (int n = m + 1; n < N; ++n) { y(m, n) = zero; + } } - for (int m = N; m < M; ++m) - for (int n = 0; n < N; ++n) + for (int m = N; m < M; ++m) { + for (int n = 0; n < N; ++n) { y(m, n) = x(pos++); + } + } return y; } diff --git a/stan/math/prim/mat/fun/cholesky_factor_free.hpp b/stan/math/prim/mat/fun/cholesky_factor_free.hpp index e959a5c3bf2..5b0fd68be9a 100644 --- a/stan/math/prim/mat/fun/cholesky_factor_free.hpp +++ b/stan/math/prim/mat/fun/cholesky_factor_free.hpp @@ -29,14 +29,17 @@ Eigen::Matrix cholesky_factor_free( int pos = 0; for (int m = 0; m < N; ++m) { - for (int n = 0; n < m; ++n) + for (int n = 0; n < m; ++n) { x(pos++) = y(m, n); + } x(pos++) = log(y(m, m)); } - for (int m = N; m < M; ++m) - for (int n = 0; n < N; ++n) + for (int m = N; m < M; ++m) { + for (int n = 0; n < N; ++n) { x(pos++) = y(m, n); + } + } return x; } diff --git a/stan/math/prim/mat/fun/common_type.hpp b/stan/math/prim/mat/fun/common_type.hpp index 5b136197ed2..34596758da3 100644 --- a/stan/math/prim/mat/fun/common_type.hpp +++ b/stan/math/prim/mat/fun/common_type.hpp @@ -18,7 +18,7 @@ namespace math { */ template struct common_type, Eigen::Matrix > { - typedef Eigen::Matrix::type, R, C> type; + using type = Eigen::Matrix::type, R, C>; }; } // namespace math diff --git a/stan/math/prim/mat/fun/corr_matrix_constrain.hpp b/stan/math/prim/mat/fun/corr_matrix_constrain.hpp index 73f9cfd8698..167fbe53796 100644 --- a/stan/math/prim/mat/fun/corr_matrix_constrain.hpp +++ b/stan/math/prim/mat/fun/corr_matrix_constrain.hpp @@ -38,17 +38,18 @@ namespace math { template Eigen::Matrix corr_matrix_constrain( const Eigen::Matrix& x, - typename math::index_type >::type k) { + typename math::index_type>::type k) { using Eigen::Dynamic; using Eigen::Matrix; - typedef typename index_type >::type size_type; + using size_type = typename index_type>::type; size_type k_choose_2 = (k * (k - 1)) / 2; check_size_match("cov_matrix_constrain", "x.size()", x.size(), "k_choose_2", k_choose_2); Eigen::Array cpcs(k_choose_2); - for (size_type i = 0; i < k_choose_2; ++i) + for (size_type i = 0; i < k_choose_2; ++i) { cpcs[i] = corr_constrain(x[i]); + } return read_corr_matrix(cpcs, k); } @@ -74,19 +75,20 @@ Eigen::Matrix corr_matrix_constrain( template Eigen::Matrix corr_matrix_constrain( const Eigen::Matrix& x, - typename math::index_type >::type k, + typename math::index_type>::type k, T& lp) { using Eigen::Array; using Eigen::Dynamic; using Eigen::Matrix; - typedef typename index_type >::type size_type; + using size_type = typename index_type>::type; size_type k_choose_2 = (k * (k - 1)) / 2; check_size_match("cov_matrix_constrain", "x.size()", x.size(), "k_choose_2", k_choose_2); Array cpcs(k_choose_2); - for (size_type i = 0; i < k_choose_2; ++i) + for (size_type i = 0; i < k_choose_2; ++i) { cpcs[i] = corr_constrain(x[i], lp); + } return read_corr_matrix(cpcs, k, lp); } diff --git a/stan/math/prim/mat/fun/corr_matrix_free.hpp b/stan/math/prim/mat/fun/corr_matrix_free.hpp index 92cb8178603..6b2bd991972 100644 --- a/stan/math/prim/mat/fun/corr_matrix_free.hpp +++ b/stan/math/prim/mat/fun/corr_matrix_free.hpp @@ -42,15 +42,16 @@ Eigen::Matrix corr_matrix_free( using Eigen::Array; using Eigen::Dynamic; using Eigen::Matrix; - typedef typename index_type >::type size_type; + using size_type = typename index_type>::type; size_type k = y.rows(); size_type k_choose_2 = (k * (k - 1)) / 2; Array x(k_choose_2); Array sds(k); bool successful = factor_cov_matrix(y, x, sds); - if (!successful) + if (!successful) { domain_error("corr_matrix_free", "factor_cov_matrix failed on y", y, ""); + } for (size_type i = 0; i < k; ++i) { check_bounded("corr_matrix_free", "log(sd)", sds[i], -CONSTRAINT_TOLERANCE, CONSTRAINT_TOLERANCE); diff --git a/stan/math/prim/mat/fun/cov_matrix_constrain.hpp b/stan/math/prim/mat/fun/cov_matrix_constrain.hpp index 71be3e59fd3..69556c60211 100644 --- a/stan/math/prim/mat/fun/cov_matrix_constrain.hpp +++ b/stan/math/prim/mat/fun/cov_matrix_constrain.hpp @@ -27,22 +27,24 @@ namespace math { template Eigen::Matrix cov_matrix_constrain( const Eigen::Matrix& x, - typename math::index_type >::type K) { + typename math::index_type>::type K) { using Eigen::Dynamic; using Eigen::Matrix; using std::exp; - typedef typename index_type >::type index_t; + using index_t = typename index_type>::type; Matrix L(K, K); check_size_match("cov_matrix_constrain", "x.size()", x.size(), "K + (K choose 2)", (K * (K + 1)) / 2); int i = 0; for (index_t m = 0; m < K; ++m) { - for (int n = 0; n < m; ++n) + for (int n = 0; n < m; ++n) { L(m, n) = x(i++); + } L(m, m) = exp(x(i++)); - for (index_t n = m + 1; n < K; ++n) + for (index_t n = m + 1; n < K; ++n) { L(m, n) = 0.0; + } } return multiply_lower_tri_self_transpose(L); } @@ -63,28 +65,31 @@ template Eigen::Matrix cov_matrix_constrain( const Eigen::Matrix& x, typename math::index_type< - Eigen::Matrix >::type K, + Eigen::Matrix>::type K, T& lp) { using Eigen::Dynamic; using Eigen::Matrix; using std::exp; using std::log; - typedef typename index_type >::type index_t; + using index_t = typename index_type>::type; check_size_match("cov_matrix_constrain", "x.size()", x.size(), "K + (K choose 2)", (K * (K + 1)) / 2); Matrix L(K, K); int i = 0; for (index_t m = 0; m < K; ++m) { - for (index_t n = 0; n < m; ++n) + for (index_t n = 0; n < m; ++n) { L(m, n) = x(i++); + } L(m, m) = exp(x(i++)); - for (index_t n = m + 1; n < K; ++n) + for (index_t n = m + 1; n < K; ++n) { L(m, n) = 0.0; + } } // Jacobian for complete transform, including exp() above lp += (K * LOG_2); // needless constant; want propto - for (index_t k = 0; k < K; ++k) + for (index_t k = 0; k < K; ++k) { lp += (K - k + 1) * log(L(k, k)); // only +1 because index from 0 + } return multiply_lower_tri_self_transpose(L); } diff --git a/stan/math/prim/mat/fun/cov_matrix_constrain_lkj.hpp b/stan/math/prim/mat/fun/cov_matrix_constrain_lkj.hpp index 7032e3834bb..eb5101e389c 100644 --- a/stan/math/prim/mat/fun/cov_matrix_constrain_lkj.hpp +++ b/stan/math/prim/mat/fun/cov_matrix_constrain_lkj.hpp @@ -34,11 +34,13 @@ Eigen::Matrix cov_matrix_constrain_lkj( size_t k_choose_2 = (k * (k - 1)) / 2; Eigen::Array cpcs(k_choose_2); int pos = 0; - for (size_t i = 0; i < k_choose_2; ++i) + for (size_t i = 0; i < k_choose_2; ++i) { cpcs[i] = corr_constrain(x[pos++]); + } Eigen::Array sds(k); - for (size_t i = 0; i < k; ++i) + for (size_t i = 0; i < k; ++i) { sds[i] = positive_constrain(x[pos++]); + } return read_cov_matrix(cpcs, sds); } @@ -72,11 +74,13 @@ Eigen::Matrix cov_matrix_constrain_lkj( size_t k_choose_2 = (k * (k - 1)) / 2; Eigen::Array cpcs(k_choose_2); int pos = 0; - for (size_t i = 0; i < k_choose_2; ++i) + for (size_t i = 0; i < k_choose_2; ++i) { cpcs[i] = corr_constrain(x[pos++], lp); + } Eigen::Array sds(k); - for (size_t i = 0; i < k; ++i) + for (size_t i = 0; i < k; ++i) { sds[i] = positive_constrain(x[pos++], lp); + } return read_cov_matrix(cpcs, sds, lp); } diff --git a/stan/math/prim/mat/fun/cov_matrix_free.hpp b/stan/math/prim/mat/fun/cov_matrix_free.hpp index 5ff3ed646ea..efb5206c4c3 100644 --- a/stan/math/prim/mat/fun/cov_matrix_free.hpp +++ b/stan/math/prim/mat/fun/cov_matrix_free.hpp @@ -41,8 +41,9 @@ Eigen::Matrix cov_matrix_free( using std::log; int K = y.rows(); - for (int k = 0; k < K; ++k) + for (int k = 0; k < K; ++k) { check_positive("cov_matrix_free", "y", y(k, k)); + } Eigen::Matrix x((K * (K + 1)) / 2); // FIXME: see Eigen LDLT for rank-revealing version -- use that // even if less efficient? @@ -51,8 +52,9 @@ Eigen::Matrix cov_matrix_free( Eigen::Matrix L = llt.matrixL(); int i = 0; for (int m = 0; m < K; ++m) { - for (int n = 0; n < m; ++n) + for (int n = 0; n < m; ++n) { x(i++) = L(m, n); + } x(i++) = log(L(m, m)); } return x; diff --git a/stan/math/prim/mat/fun/cov_matrix_free_lkj.hpp b/stan/math/prim/mat/fun/cov_matrix_free_lkj.hpp index 725af0df6ff..1a6c547cc3c 100644 --- a/stan/math/prim/mat/fun/cov_matrix_free_lkj.hpp +++ b/stan/math/prim/mat/fun/cov_matrix_free_lkj.hpp @@ -34,7 +34,7 @@ Eigen::Matrix cov_matrix_free_lkj( using Eigen::Array; using Eigen::Dynamic; using Eigen::Matrix; - typedef typename index_type >::type size_type; + using size_type = typename index_type>::type; check_nonzero_size("cov_matrix_free_lkj", "y", y); check_square("cov_matrix_free_lkj", "y", y); @@ -43,15 +43,18 @@ Eigen::Matrix cov_matrix_free_lkj( Array cpcs(k_choose_2); Array sds(k); bool successful = factor_cov_matrix(y, cpcs, sds); - if (!successful) + if (!successful) { domain_error("cov_matrix_free_lkj", "factor_cov_matrix failed on y", "", ""); + } Matrix x(k_choose_2 + k); size_type pos = 0; - for (size_type i = 0; i < k_choose_2; ++i) + for (size_type i = 0; i < k_choose_2; ++i) { x[pos++] = cpcs[i]; - for (size_type i = 0; i < k; ++i) + } + for (size_type i = 0; i < k; ++i) { x[pos++] = sds[i]; + } return x; } diff --git a/stan/math/prim/mat/fun/csr_extract_u.hpp b/stan/math/prim/mat/fun/csr_extract_u.hpp index a22cce3c765..517c1b6a30f 100644 --- a/stan/math/prim/mat/fun/csr_extract_u.hpp +++ b/stan/math/prim/mat/fun/csr_extract_u.hpp @@ -24,8 +24,9 @@ template const std::vector csr_extract_u( const Eigen::SparseMatrix& A) { std::vector u(A.outerSize() + 1); // last entry is garbage. - for (int nze = 0; nze <= A.outerSize(); ++nze) + for (int nze = 0; nze <= A.outerSize(); ++nze) { u[nze] = *(A.outerIndexPtr() + nze) + stan::error_index::value; + } return u; } diff --git a/stan/math/prim/mat/fun/csr_extract_v.hpp b/stan/math/prim/mat/fun/csr_extract_v.hpp index 7b0834760a3..66208dcb75e 100644 --- a/stan/math/prim/mat/fun/csr_extract_v.hpp +++ b/stan/math/prim/mat/fun/csr_extract_v.hpp @@ -25,8 +25,9 @@ template const std::vector csr_extract_v( const Eigen::SparseMatrix& A) { std::vector v(A.nonZeros()); - for (int nze = 0; nze < A.nonZeros(); ++nze) + for (int nze = 0; nze < A.nonZeros(); ++nze) { v[nze] = *(A.innerIndexPtr() + nze) + stan::error_index::value; + } return v; } diff --git a/stan/math/prim/mat/fun/csr_extract_w.hpp b/stan/math/prim/mat/fun/csr_extract_w.hpp index 5d8d67d4d46..07d270dd542 100644 --- a/stan/math/prim/mat/fun/csr_extract_w.hpp +++ b/stan/math/prim/mat/fun/csr_extract_w.hpp @@ -21,8 +21,9 @@ const Eigen::Matrix csr_extract_w( const Eigen::SparseMatrix& A) { Eigen::Matrix w(A.nonZeros()); w.setZero(); - for (int nze = 0; nze < A.nonZeros(); ++nze) + for (int nze = 0; nze < A.nonZeros(); ++nze) { w[nze] = *(A.valuePtr() + nze); + } return w; } diff --git a/stan/math/prim/mat/fun/csr_matrix_times_vector.hpp b/stan/math/prim/mat/fun/csr_matrix_times_vector.hpp index adb6572527f..957014936f5 100644 --- a/stan/math/prim/mat/fun/csr_matrix_times_vector.hpp +++ b/stan/math/prim/mat/fun/csr_matrix_times_vector.hpp @@ -78,7 +78,7 @@ csr_matrix_times_vector(int m, int n, const Eigen::Matrix& w, const std::vector& v, const std::vector& u, const Eigen::Matrix& b) { - typedef return_type_t result_t; + using result_t = return_type_t; check_positive("csr_matrix_times_vector", "m", m); check_positive("csr_matrix_times_vector", "n", n); @@ -87,8 +87,9 @@ csr_matrix_times_vector(int m, int n, check_size_match("csr_matrix_times_vector", "w", w.size(), "v", v.size()); check_size_match("csr_matrix_times_vector", "u/z", u[m - 1] + csr_u_to_z(u, m - 1) - 1, "v", v.size()); - for (int i : v) + for (int i : v) { check_range("csr_matrix_times_vector", "v[]", n, i); + } Eigen::Matrix result(m); result.setZero(); diff --git a/stan/math/prim/mat/fun/csr_to_dense_matrix.hpp b/stan/math/prim/mat/fun/csr_to_dense_matrix.hpp index ecd3ee618ba..eca795ab404 100644 --- a/stan/math/prim/mat/fun/csr_to_dense_matrix.hpp +++ b/stan/math/prim/mat/fun/csr_to_dense_matrix.hpp @@ -45,8 +45,9 @@ inline Eigen::Matrix csr_to_dense_matrix( check_size_match("csr_to_dense_matrix", "w", w.size(), "v", v.size()); check_size_match("csr_to_dense_matrix", "u/z", u[m - 1] + csr_u_to_z(u, m - 1) - 1, "v", v.size()); - for (int i : v) + for (int i : v) { check_range("csr_to_dense_matrix", "v[]", n, i); + } Matrix result(m, n); result.setZero(); diff --git a/stan/math/prim/mat/fun/cumulative_sum.hpp b/stan/math/prim/mat/fun/cumulative_sum.hpp index 94d11558438..c7e281d037c 100644 --- a/stan/math/prim/mat/fun/cumulative_sum.hpp +++ b/stan/math/prim/mat/fun/cumulative_sum.hpp @@ -23,8 +23,9 @@ namespace math { template inline std::vector cumulative_sum(const std::vector& x) { std::vector result(x.size()); - if (x.size() == 0) + if (x.size() == 0) { return result; + } std::partial_sum(x.begin(), x.end(), result.begin(), std::plus()); return result; } @@ -46,8 +47,9 @@ inline std::vector cumulative_sum(const std::vector& x) { template inline Eigen::Matrix cumulative_sum(const Eigen::Matrix& m) { Eigen::Matrix result(m.rows(), m.cols()); - if (m.size() == 0) + if (m.size() == 0) { return result; + } std::partial_sum(m.data(), m.data() + m.size(), result.data(), std::plus()); return result; diff --git a/stan/math/prim/mat/fun/dims.hpp b/stan/math/prim/mat/fun/dims.hpp index 174f965269d..283f4b81ad3 100644 --- a/stan/math/prim/mat/fun/dims.hpp +++ b/stan/math/prim/mat/fun/dims.hpp @@ -19,8 +19,9 @@ inline void dims(const Eigen::Matrix& x, std::vector& result) { template inline void dims(const std::vector& x, std::vector& result) { result.push_back(x.size()); - if (x.size() > 0) + if (x.size() > 0) { dims(x[0], result); + } } template diff --git a/stan/math/prim/mat/fun/dot_product.hpp b/stan/math/prim/mat/fun/dot_product.hpp index 34f62068834..4beecd59371 100644 --- a/stan/math/prim/mat/fun/dot_product.hpp +++ b/stan/math/prim/mat/fun/dot_product.hpp @@ -33,8 +33,9 @@ inline double dot_product(const Eigen::Matrix &v1, */ inline double dot_product(const double *v1, const double *v2, size_t length) { double result = 0; - for (size_t i = 0; i < length; i++) + for (size_t i = 0; i < length; i++) { result += v1[i] * v2[i]; + } return result; } /** diff --git a/stan/math/prim/mat/fun/factor_cov_matrix.hpp b/stan/math/prim/mat/fun/factor_cov_matrix.hpp index 5d5c1f65802..0326c44968c 100644 --- a/stan/math/prim/mat/fun/factor_cov_matrix.hpp +++ b/stan/math/prim/mat/fun/factor_cov_matrix.hpp @@ -28,8 +28,9 @@ bool factor_cov_matrix( size_t K = sds.rows(); sds = Sigma.diagonal().array(); - if ((sds <= 0.0).any()) + if ((sds <= 0.0).any()) { return false; + } sds = sds.sqrt(); Eigen::DiagonalMatrix D(K); @@ -41,8 +42,9 @@ bool factor_cov_matrix( R.diagonal().setOnes(); Eigen::LDLT > ldlt; ldlt = R.ldlt(); - if (!ldlt.isPositive()) + if (!ldlt.isPositive()) { return false; + } Eigen::Matrix U = ldlt.matrixU(); factor_U(U, CPCs); return true; diff --git a/stan/math/prim/mat/fun/gp_dot_prod_cov.hpp b/stan/math/prim/mat/fun/gp_dot_prod_cov.hpp index fe80d993c2c..fe10a2cc45b 100644 --- a/stan/math/prim/mat/fun/gp_dot_prod_cov.hpp +++ b/stan/math/prim/mat/fun/gp_dot_prod_cov.hpp @@ -51,8 +51,9 @@ gp_dot_prod_cov(const std::vector> &x, Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> cov(x_size, x_size); - if (x_size == 0) + if (x_size == 0) { return cov; + } T_sigma sigma_sq = square(sigma); @@ -102,8 +103,9 @@ gp_dot_prod_cov(const std::vector &x, const T_sigma &sigma) { Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> cov(x_size, x_size); - if (x_size == 0) + if (x_size == 0) { return cov; + } T_sigma sigma_sq = square(sigma); @@ -164,8 +166,9 @@ gp_dot_prod_cov(const std::vector> &x1, Eigen::Dynamic> cov(x1_size, x2_size); - if (x1_size == 0 || x2_size == 0) + if (x1_size == 0 || x2_size == 0) { return cov; + } T_sigma sigma_sq = square(sigma); @@ -219,8 +222,9 @@ gp_dot_prod_cov(const std::vector &x1, const std::vector &x2, Eigen::Dynamic> cov(x1_size, x2_size); - if (x1_size == 0 || x2_size == 0) + if (x1_size == 0 || x2_size == 0) { return cov; + } T_sigma sigma_sq = square(sigma); diff --git a/stan/math/prim/mat/fun/gp_exp_quad_cov.hpp b/stan/math/prim/mat/fun/gp_exp_quad_cov.hpp index 2220d7a7c4b..10e57959d45 100644 --- a/stan/math/prim/mat/fun/gp_exp_quad_cov.hpp +++ b/stan/math/prim/mat/fun/gp_exp_quad_cov.hpp @@ -187,11 +187,13 @@ gp_exp_quad_cov(const std::vector &x, const T_sigma &sigma, Eigen::Dynamic> cov(x_size, x_size); - if (x_size == 0) + if (x_size == 0) { return cov; + } - for (size_t n = 0; n < x_size; ++n) + for (size_t n = 0; n < x_size; ++n) { check_not_nan("gp_exp_quad_cov", "x", x[n]); + } cov = internal::gp_exp_quad_cov(x, square(sigma), -0.5 / square(length_scale)); @@ -224,8 +226,9 @@ gp_exp_quad_cov(const std::vector> &x, Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> cov(x_size, x_size); - if (x_size == 0) + if (x_size == 0) { return cov; + } check_size_match("gp_exp_quad_cov", "x dimension", x[0].size(), "number of length scales", length_scale.size()); @@ -268,13 +271,16 @@ gp_exp_quad_cov(const std::vector &x1, const std::vector &x2, Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> cov(x1_size, x2_size); - if (x1_size == 0 || x2_size == 0) + if (x1_size == 0 || x2_size == 0) { return cov; + } - for (size_t i = 0; i < x1_size; ++i) + for (size_t i = 0; i < x1_size; ++i) { check_not_nan(function_name, "x1", x1[i]); - for (size_t i = 0; i < x2_size; ++i) + } + for (size_t i = 0; i < x2_size; ++i) { check_not_nan(function_name, "x2", x2[i]); + } cov = internal::gp_exp_quad_cov(x1, x2, square(sigma), -0.5 / square(length_scale)); @@ -314,14 +320,17 @@ gp_exp_quad_cov(const std::vector> &x1, Eigen::Dynamic> cov(x1_size, x2_size); - if (x1_size == 0 || x2_size == 0) + if (x1_size == 0 || x2_size == 0) { return cov; + } const char *function_name = "gp_exp_quad_cov"; - for (size_t i = 0; i < x1_size; ++i) + for (size_t i = 0; i < x1_size; ++i) { check_not_nan(function_name, "x1", x1[i]); - for (size_t i = 0; i < x2_size; ++i) + } + for (size_t i = 0; i < x2_size; ++i) { check_not_nan(function_name, "x2", x2[i]); + } check_positive_finite(function_name, "magnitude", sigma); check_positive_finite(function_name, "length scale", length_scale); check_size_match(function_name, "x dimension", x1[0].size(), @@ -355,12 +364,14 @@ inline Eigen::MatrixXd gp_exp_quad_cov(const std::vector &x, const auto x_size = x.size(); Eigen::MatrixXd cov(x_size, x_size); - if (x_size == 0) + if (x_size == 0) { return cov; + } const auto total_size = x_size + cov.size(); if (total_size < opencl_context.tuning_opts().gp_exp_quad_cov_simple) { - for (size_t n = 0; n < x_size; ++n) + for (size_t n = 0; n < x_size; ++n) { check_not_nan("gp_exp_quad_cov", "x", x[n]); + } cov = internal::gp_exp_quad_cov(x, square(sigma), -0.5 / square(length_scale)); @@ -397,14 +408,16 @@ inline Eigen::MatrixXd gp_exp_quad_cov(const std::vector &x, const size_t x_size = x.size(); Eigen::MatrixXd cov(x_size, x_size); - if (x_size == 0) + if (x_size == 0) { return cov; + } const size_t inner_x1_size = x[0].size(); const auto total_size = x_size * inner_x1_size + cov.size(); if (total_size < opencl_context.tuning_opts().gp_exp_quad_cov_complex) { - for (size_t i = 0; i < x_size; ++i) + for (size_t i = 0; i < x_size; ++i) { check_not_nan("gp_exp_quad_cov", "x", x[i]); + } cov = internal::gp_exp_quad_cov(x, square(sigma), -0.5 / square(length_scale)); return cov; @@ -439,8 +452,9 @@ inline Eigen::MatrixXd gp_exp_quad_cov( const size_t x_size = x.size(); Eigen::MatrixXd cov(x_size, x_size); - if (x_size == 0) + if (x_size == 0) { return cov; + } const size_t inner_x1_size = x[0].size(); check_size_match(function_name, "x dimension", inner_x1_size, @@ -484,14 +498,17 @@ inline typename Eigen::MatrixXd gp_exp_quad_cov(const std::vector &x1, check_positive_finite(function_name, "length scale", length_scale); Eigen::MatrixXd cov(x1.size(), x2.size()); - if (x1.size() == 0 || x1.size() == 0) + if (x1.size() == 0 || x1.size() == 0) { return cov; + } const auto total_size = x1.size() + x2.size() + cov.size(); if (total_size < opencl_context.tuning_opts().gp_exp_quad_cov_simple) { - for (size_t i = 0; i < x1.size(); ++i) + for (size_t i = 0; i < x1.size(); ++i) { check_not_nan(function_name, "x1", x1[i]); - for (size_t i = 0; i < x2.size(); ++i) + } + for (size_t i = 0; i < x2.size(); ++i) { check_not_nan(function_name, "x2", x2[i]); + } cov = internal::gp_exp_quad_cov(x1, x2, square(sigma), -0.5 / square(length_scale)); @@ -534,18 +551,21 @@ inline typename Eigen::MatrixXd gp_exp_quad_cov( check_positive_finite(function_name, "length scale", length_scale); Eigen::MatrixXd cov(x1.size(), x2.size()); - if (x1.size() == 0 || x1.size() == 0) + if (x1.size() == 0 || x1.size() == 0) { return cov; + } const int x1_inner_size = x1[0].size(); const int x2_inner_size = x1[0].size(); const auto total_size = x1_size * x1_inner_size + x2_size * x2_inner_size + cov.size(); if (total_size < opencl_context.tuning_opts().gp_exp_quad_cov_complex) { - for (size_t i = 0; i < x1.size(); ++i) + for (size_t i = 0; i < x1.size(); ++i) { check_not_nan(function_name, "x1", x1[i]); - for (size_t i = 0; i < x2.size(); ++i) + } + for (size_t i = 0; i < x2.size(); ++i) { check_not_nan(function_name, "x2", x2[i]); + } cov = internal::gp_exp_quad_cov(x1, x2, square(sigma), -0.5 / square(length_scale)); @@ -585,8 +605,9 @@ inline typename Eigen::MatrixXd gp_exp_quad_cov( size_t l_size = length_scale.size(); Eigen::MatrixXd cov(x1_size, x2_size); - if (x1_size == 0 || x2_size == 0) + if (x1_size == 0 || x2_size == 0) { return cov; + } const int x1_inner_size = x1[0].size(); const int x2_inner_size = x1[0].size(); @@ -600,10 +621,12 @@ inline typename Eigen::MatrixXd gp_exp_quad_cov( const auto total_size = x1_size * x1_inner_size + x2_size * x2_inner_size + l_size + cov.size(); if (total_size < opencl_context.tuning_opts().gp_exp_quad_cov_complex) { - for (size_t i = 0; i < x1_size; ++i) + for (size_t i = 0; i < x1_size; ++i) { check_not_nan(function_name, "x1", x1[i]); - for (size_t i = 0; i < x2_size; ++i) + } + for (size_t i = 0; i < x2_size; ++i) { check_not_nan(function_name, "x1", x2[i]); + } cov = internal::gp_exp_quad_cov(divide_columns(x1, length_scale), divide_columns(x2, length_scale), square(sigma)); diff --git a/stan/math/prim/mat/fun/gp_exponential_cov.hpp b/stan/math/prim/mat/fun/gp_exponential_cov.hpp index 15db9444ff9..a3ca2efe711 100644 --- a/stan/math/prim/mat/fun/gp_exponential_cov.hpp +++ b/stan/math/prim/mat/fun/gp_exponential_cov.hpp @@ -42,17 +42,20 @@ gp_exponential_cov(const std::vector &x, const T_s &sigma, size_t x_size = size_of(x); Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> cov(x_size, x_size); - if (x_size == 0) + if (x_size == 0) { return cov; + } const char *function = "gp_exponential_cov"; size_t x_obs_size = size_of(x[0]); - for (size_t i = 0; i < x_size; ++i) + for (size_t i = 0; i < x_size; ++i) { check_size_match(function, "x row", x_obs_size, "x's other row", size_of(x[i])); + } - for (size_t i = 0; i < x_size; ++i) + for (size_t i = 0; i < x_size; ++i) { check_not_nan(function, "x", x[i]); + } check_positive_finite(function, "magnitude", sigma); check_positive_finite(function, "length scale", length_scale); @@ -97,12 +100,14 @@ gp_exponential_cov(const std::vector> &x, size_t x_size = size_of(x); Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> cov(x_size, x_size); - if (x_size == 0) + if (x_size == 0) { return cov; + } const char *function = "gp_exponential_cov"; - for (size_t n = 0; n < x_size; ++n) + for (size_t n = 0; n < x_size; ++n) { check_not_nan(function, "x", x[n]); + } check_positive_finite(function, "magnitude", sigma); check_positive_finite(function, "length scale", length_scale); @@ -159,22 +164,27 @@ gp_exponential_cov(const std::vector &x1, const std::vector &x2, Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> cov(x1_size, x2_size); - if (x1_size == 0 || x2_size == 0) + if (x1_size == 0 || x2_size == 0) { return cov; + } const char *function = "gp_exponential_cov"; size_t x1_obs_size = size_of(x1[0]); - for (size_t i = 0; i < x1_size; ++i) + for (size_t i = 0; i < x1_size; ++i) { check_size_match(function, "x1's row", x1_obs_size, "x1's other row", size_of(x1[i])); - for (size_t i = 0; i < x2_size; ++i) + } + for (size_t i = 0; i < x2_size; ++i) { check_size_match(function, "x1's row", x1_obs_size, "x2's other row", size_of(x2[i])); + } - for (size_t n = 0; n < x1_size; ++n) + for (size_t n = 0; n < x1_size; ++n) { check_not_nan(function, "x1", x1[n]); - for (size_t n = 0; n < x2_size; ++n) + } + for (size_t n = 0; n < x2_size; ++n) { check_not_nan(function, "x2", x2[n]); + } check_positive_finite(function, "magnitude", sigma); check_positive_finite(function, "length scale", length_scale); @@ -225,24 +235,29 @@ gp_exponential_cov(const std::vector> &x1, Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> cov(x1_size, x2_size); - if (x1_size == 0 || x2_size == 0) + if (x1_size == 0 || x2_size == 0) { return cov; + } const char *function = "gp_exponential_cov"; - for (size_t n = 0; n < x1_size; ++n) + for (size_t n = 0; n < x1_size; ++n) { check_not_nan(function, "x1", x1[n]); - for (size_t n = 0; n < x2_size; ++n) + } + for (size_t n = 0; n < x2_size; ++n) { check_not_nan(function, "x2", x2[n]); + } check_positive_finite(function, "magnitude", sigma); check_positive_finite(function, "length scale", length_scale); - for (size_t i = 0; i < x1_size; ++i) + for (size_t i = 0; i < x1_size; ++i) { check_size_match(function, "x1's row", size_of(x1[i]), "number of length scales", l_size); - for (size_t i = 0; i < x2_size; ++i) + } + for (size_t i = 0; i < x2_size; ++i) { check_size_match(function, "x2's row", size_of(x2[i]), "number of length scales", l_size); + } T_s sigma_sq = square(sigma); T_l temp; diff --git a/stan/math/prim/mat/fun/gp_matern32_cov.hpp b/stan/math/prim/mat/fun/gp_matern32_cov.hpp index 1324e8c5560..8e6d3c2398a 100644 --- a/stan/math/prim/mat/fun/gp_matern32_cov.hpp +++ b/stan/math/prim/mat/fun/gp_matern32_cov.hpp @@ -48,17 +48,20 @@ gp_matern32_cov(const std::vector &x, const T_s &sigma, Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> cov(x_size, x_size); - if (x_size == 0) + if (x_size == 0) { return cov; + } const char *function = "gp_matern32_cov"; size_t x_obs_size = size_of(x[0]); - for (size_t i = 0; i < x_size; ++i) + for (size_t i = 0; i < x_size; ++i) { check_size_match(function, "x row", x_obs_size, "x's other row", size_of(x[i])); + } - for (size_t n = 0; n < x_size; ++n) + for (size_t n = 0; n < x_size; ++n) { check_not_nan(function, "x", x[n]); + } check_positive_finite(function, "magnitude", sigma); check_positive_finite(function, "length scale", length_scale); @@ -108,18 +111,21 @@ gp_matern32_cov(const std::vector> &x, Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> cov(x_size, x_size); - if (x_size == 0) + if (x_size == 0) { return cov; + } const char *function = "gp_matern32_cov"; size_t l_size = length_scale.size(); - for (size_t n = 0; n < x_size; ++n) + for (size_t n = 0; n < x_size; ++n) { check_not_nan(function, "x", x[n]); + } check_positive_finite(function, "magnitude", sigma); check_positive_finite(function, "length scale", length_scale); - for (size_t n = 0; n < x_size; ++n) + for (size_t n = 0; n < x_size; ++n) { check_not_nan(function, "length scale", length_scale[n]); + } check_size_match(function, "x dimension", size_of(x[0]), "number of length scales", l_size); @@ -178,22 +184,27 @@ gp_matern32_cov(const std::vector &x1, const std::vector &x2, Eigen::Dynamic> cov(x1_size, x2_size); - if (x1_size == 0 || x2_size == 0) + if (x1_size == 0 || x2_size == 0) { return cov; + } const char *function = "gp_matern32_cov"; size_t x1_obs_size = size_of(x1[0]); - for (size_t i = 0; i < x1_size; ++i) + for (size_t i = 0; i < x1_size; ++i) { check_size_match(function, "x1's row", x1_obs_size, "x1's other row", size_of(x1[i])); - for (size_t i = 0; i < x2_size; ++i) + } + for (size_t i = 0; i < x2_size; ++i) { check_size_match(function, "x1's row", x1_obs_size, "x2's other row", size_of(x2[i])); + } - for (size_t n = 0; n < x1_size; ++n) + for (size_t n = 0; n < x1_size; ++n) { check_not_nan(function, "x1", x1[n]); - for (size_t n = 0; n < x2_size; ++n) + } + for (size_t n = 0; n < x2_size; ++n) { check_not_nan(function, "x2", x2[n]); + } check_positive_finite(function, "magnitude", sigma); check_positive_finite(function, "length scale", length_scale); @@ -250,28 +261,34 @@ gp_matern32_cov(const std::vector> &x1, Eigen::Dynamic> cov(x1_size, x2_size); - if (x1_size == 0 || x2_size == 0) + if (x1_size == 0 || x2_size == 0) { return cov; + } const char *function = "gp_matern_32_cov"; - for (size_t n = 0; n < x1_size; ++n) + for (size_t n = 0; n < x1_size; ++n) { check_not_nan(function, "x1", x1[n]); - for (size_t n = 0; n < x2_size; ++n) + } + for (size_t n = 0; n < x2_size; ++n) { check_not_nan(function, "x2", x2[n]); + } check_positive_finite(function, "magnitude", sigma); check_positive_finite(function, "length scale", length_scale); size_t l_size = length_scale.size(); - for (size_t n = 0; n < l_size; ++n) + for (size_t n = 0; n < l_size; ++n) { check_not_nan(function, "length scale", length_scale[n]); + } - for (size_t i = 0; i < x1_size; ++i) + for (size_t i = 0; i < x1_size; ++i) { check_size_match(function, "x1's row", size_of(x1[i]), "number of length scales", l_size); - for (size_t i = 0; i < x2_size; ++i) + } + for (size_t i = 0; i < x2_size; ++i) { check_size_match(function, "x2's row", size_of(x2[i]), "number of length scales", l_size); + } T_s sigma_sq = square(sigma); double root_3 = sqrt(3.0); diff --git a/stan/math/prim/mat/fun/gp_matern52_cov.hpp b/stan/math/prim/mat/fun/gp_matern52_cov.hpp index 4cbbf2d5203..5f1fa5d310c 100644 --- a/stan/math/prim/mat/fun/gp_matern52_cov.hpp +++ b/stan/math/prim/mat/fun/gp_matern52_cov.hpp @@ -49,11 +49,13 @@ gp_matern52_cov(const std::vector &x, const T_s &sigma, Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> cov(x_size, x_size); - if (x_size == 0) + if (x_size == 0) { return cov; + } - for (size_t n = 0; n < x_size; ++n) + for (size_t n = 0; n < x_size; ++n) { check_not_nan("gp_matern52_cov", "x", x[n]); + } check_positive_finite("gp_matern52_cov", "magnitude", sigma); check_positive_finite("gp_matern52_cov", "length scale", length_scale); @@ -110,12 +112,14 @@ gp_matern52_cov(const std::vector> &x, size_t x_size = x.size(); Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> cov(x_size, x_size); - if (x_size == 0) + if (x_size == 0) { return cov; + } size_t l_size = length_scale.size(); - for (size_t n = 0; n < x_size; ++n) + for (size_t n = 0; n < x_size; ++n) { check_not_nan("gp_matern52_cov", "x", x[n]); + } check_positive_finite("gp_matern52_cov", "magnitude", sigma); check_positive_finite("gp_matern52_cov", "length scale", length_scale); @@ -179,13 +183,16 @@ gp_matern52_cov(const std::vector &x1, const std::vector &x2, Eigen::Dynamic> cov(x1_size, x2_size); - if (x1_size == 0 || x2_size == 0) + if (x1_size == 0 || x2_size == 0) { return cov; + } - for (size_t n = 0; n < x1_size; ++n) + for (size_t n = 0; n < x1_size; ++n) { check_not_nan("gp_matern52_cov", "x1", x1[n]); - for (size_t n = 0; n < x2_size; ++n) + } + for (size_t n = 0; n < x2_size; ++n) { check_not_nan("gp_matern52_cov", "x1", x2[n]); + } check_positive_finite("gp_matern52_cov", "magnitude", sigma); check_positive_finite("gp_matern52_cov", "length scale", length_scale); @@ -247,15 +254,18 @@ gp_matern52_cov(const std::vector> &x1, Eigen::Dynamic> cov(x1_size, x2_size); - if (x1_size == 0 || x2_size == 0) + if (x1_size == 0 || x2_size == 0) { return cov; + } size_t l_size = length_scale.size(); - for (size_t n = 0; n < x1_size; ++n) + for (size_t n = 0; n < x1_size; ++n) { check_not_nan("gp_matern52_cov", "x1", x1[n]); - for (size_t n = 0; n < x2_size; ++n) + } + for (size_t n = 0; n < x2_size; ++n) { check_not_nan("gp_matern52_cov", "x1", x2[n]); + } check_positive_finite("gp_matern52_cov", "magnitude", sigma); check_positive_finite("gp_matern52_cov", "length scale", length_scale); diff --git a/stan/math/prim/mat/fun/gp_periodic_cov.hpp b/stan/math/prim/mat/fun/gp_periodic_cov.hpp index 7693e0b74b6..f9e2a4f2d7e 100644 --- a/stan/math/prim/mat/fun/gp_periodic_cov.hpp +++ b/stan/math/prim/mat/fun/gp_periodic_cov.hpp @@ -55,16 +55,18 @@ gp_periodic_cov(const std::vector &x, const T_sigma &sigma, const T_l &l, check_positive(fun, "signal standard deviation", sigma); check_positive(fun, "length-scale", l); check_positive(fun, "period", p); - for (size_t n = 0; n < x.size(); ++n) + for (size_t n = 0; n < x.size(); ++n) { check_not_nan(fun, "element of x", x[n]); + } Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> cov(x.size(), x.size()); size_t x_size = x.size(); - if (x_size == 0) + if (x_size == 0) { return cov; + } T_sigma sigma_sq = square(sigma); T_l neg_two_inv_l_sq = -2.0 * inv_square(l); @@ -124,16 +126,19 @@ gp_periodic_cov(const std::vector &x1, const std::vector &x2, check_positive(fun, "signal standard deviation", sigma); check_positive(fun, "length-scale", l); check_positive(fun, "period", p); - for (size_t n = 0; n < x1.size(); ++n) + for (size_t n = 0; n < x1.size(); ++n) { check_not_nan(fun, "element of x1", x1[n]); - for (size_t n = 0; n < x2.size(); ++n) + } + for (size_t n = 0; n < x2.size(); ++n) { check_not_nan(fun, "element of x2", x2[n]); + } Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> cov(x1.size(), x2.size()); - if (x1.size() == 0 || x2.size() == 0) + if (x1.size() == 0 || x2.size() == 0) { return cov; + } T_sigma sigma_sq = square(sigma); T_l neg_two_inv_l_sq = -2.0 * inv_square(l); diff --git a/stan/math/prim/mat/fun/head.hpp b/stan/math/prim/mat/fun/head.hpp index bc3562e018d..8138793a467 100644 --- a/stan/math/prim/mat/fun/head.hpp +++ b/stan/math/prim/mat/fun/head.hpp @@ -23,8 +23,9 @@ namespace math { template inline Eigen::Matrix head( const Eigen::Matrix& v, size_t n) { - if (n != 0) + if (n != 0) { check_row_index("head", "n", v, n); + } return v.head(n); } @@ -41,8 +42,9 @@ inline Eigen::Matrix head( template inline Eigen::Matrix head( const Eigen::Matrix& rv, size_t n) { - if (n != 0) + if (n != 0) { check_column_index("head", "n", rv, n); + } return rv.head(n); } @@ -58,12 +60,14 @@ inline Eigen::Matrix head( */ template std::vector head(const std::vector& sv, size_t n) { - if (n != 0) + if (n != 0) { check_std_vector_index("head", "n", sv, n); + } std::vector s; - for (size_t i = 0; i < n; ++i) + for (size_t i = 0; i < n; ++i) { s.push_back(sv[i]); + } return s; } diff --git a/stan/math/prim/mat/fun/initialize.hpp b/stan/math/prim/mat/fun/initialize.hpp index fe545469108..998f17450b5 100644 --- a/stan/math/prim/mat/fun/initialize.hpp +++ b/stan/math/prim/mat/fun/initialize.hpp @@ -22,13 +22,15 @@ inline void initialize(T& x, V v) { } template inline void initialize(Eigen::Matrix& x, const V& v) { - for (int i = 0; i < x.size(); ++i) + for (int i = 0; i < x.size(); ++i) { initialize(x(i), v); + } } template inline void initialize(std::vector& x, const V& v) { - for (size_t i = 0; i < x.size(); ++i) + for (size_t i = 0; i < x.size(); ++i) { initialize(x[i], v); + } } } // namespace math diff --git a/stan/math/prim/mat/fun/inverse_spd.hpp b/stan/math/prim/mat/fun/inverse_spd.hpp index d5c982a3463..8647d1db1e2 100644 --- a/stan/math/prim/mat/fun/inverse_spd.hpp +++ b/stan/math/prim/mat/fun/inverse_spd.hpp @@ -24,14 +24,18 @@ inline Eigen::Matrix inverse_spd( check_symmetric("inverse_spd", "m", m); Matrix mmt = T(0.5) * (m + m.transpose()); LDLT > ldlt(mmt); - if (ldlt.info() != Eigen::Success) + if (ldlt.info() != Eigen::Success) { domain_error("invese_spd", "LDLT factor failed", "", ""); - if (!ldlt.isPositive()) + } + if (!ldlt.isPositive()) { domain_error("invese_spd", "matrix not positive definite", "", ""); + } Matrix diag_ldlt = ldlt.vectorD(); - for (int i = 0; i < diag_ldlt.size(); ++i) - if (diag_ldlt(i) <= 0) + for (int i = 0; i < diag_ldlt.size(); ++i) { + if (diag_ldlt(i) <= 0) { domain_error("invese_spd", "matrix not positive definite", "", ""); + } + } return ldlt.solve(Eigen::Matrix::Identity( m.rows(), m.cols())); } diff --git a/stan/math/prim/mat/fun/log_mix.hpp b/stan/math/prim/mat/fun/log_mix.hpp index e9a48d65c47..2ef3dede108 100644 --- a/stan/math/prim/mat/fun/log_mix.hpp +++ b/stan/math/prim/mat/fun/log_mix.hpp @@ -41,9 +41,9 @@ template return_type_t log_mix(const T_theta& theta, const T_lam& lambda) { static const char* function = "log_mix"; - typedef partials_return_type_t T_partials_return; + using T_partials_return = partials_return_type_t; - typedef typename Eigen::Matrix T_partials_vec; + using T_partials_vec = typename Eigen::Matrix; const int N = length(theta); @@ -56,13 +56,15 @@ return_type_t log_mix(const T_theta& theta, scalar_seq_view theta_vec(theta); T_partials_vec theta_dbl(N); - for (int n = 0; n < N; ++n) + for (int n = 0; n < N; ++n) { theta_dbl[n] = value_of(theta_vec[n]); + } scalar_seq_view lam_vec(lambda); T_partials_vec lam_dbl(N); - for (int n = 0; n < N; ++n) + for (int n = 0; n < N; ++n) { lam_dbl[n] = value_of(lam_vec[n]); + } T_partials_return logp = log_sum_exp((log(theta_dbl) + lam_dbl).eval()); @@ -73,13 +75,15 @@ return_type_t log_mix(const T_theta& theta, operands_and_partials ops_partials(theta, lambda); if (!is_constant_all::value) { - for (int n = 0; n < N; ++n) + for (int n = 0; n < N; ++n) { ops_partials.edge1_.partials_[n] = theta_deriv[n]; + } } if (!is_constant_all::value) { - for (int n = 0; n < N; ++n) + for (int n = 0; n < N; ++n) { ops_partials.edge2_.partials_[n] = lam_deriv[n]; + } } return ops_partials.build(logp); @@ -113,19 +117,18 @@ return_type_t log_mix(const T_theta& theta, * @return log mixture of densities in specified proportion */ template -return_type_t > > log_mix( +return_type_t>> log_mix( const T_theta& theta, - const std::vector >& lambda) { + const std::vector>& lambda) { static const char* function = "log_mix"; typedef typename stan::partials_return_type< - T_theta, std::vector > >::type - T_partials_return; + T_theta, std::vector>>::type T_partials_return; - typedef typename Eigen::Matrix T_partials_vec; + using T_partials_vec = typename Eigen::Matrix; - typedef typename Eigen::Matrix T_partials_mat; + using T_partials_mat = typename Eigen::Matrix; - typedef typename std::vector > T_lamvec_type; + using T_lamvec_type = typename std::vector>; const int N = length(lambda); const int M = theta.size(); @@ -141,20 +144,24 @@ return_type_t > > log_mix( scalar_seq_view theta_vec(theta); T_partials_vec theta_dbl(M); - for (int m = 0; m < M; ++m) + for (int m = 0; m < M; ++m) { theta_dbl[m] = value_of(theta_vec[m]); + } T_partials_mat lam_dbl(M, N); vector_seq_view lam_vec(lambda); - for (int n = 0; n < N; ++n) - for (int m = 0; m < M; ++m) + for (int n = 0; n < N; ++n) { + for (int m = 0; m < M; ++m) { lam_dbl(m, n) = value_of(lam_vec[n][m]); + } + } T_partials_mat logp_tmp = log(theta_dbl).replicate(1, N) + lam_dbl; T_partials_vec logp(N); - for (int n = 0; n < N; ++n) + for (int n = 0; n < N; ++n) { logp[n] = log_sum_exp(logp_tmp.col(n).eval()); + } operands_and_partials ops_partials(theta, lambda); @@ -163,14 +170,16 @@ return_type_t > > log_mix( = (lam_dbl - logp.transpose().replicate(M, 1)) .unaryExpr([](T_partials_return x) { return exp(x); }); if (!is_constant_all::value) { - for (int m = 0; m < M; ++m) + for (int m = 0; m < M; ++m) { ops_partials.edge1_.partials_[m] = derivs.row(m).sum(); + } } if (!is_constant_all::value) { - for (int n = 0; n < N; ++n) + for (int n = 0; n < N; ++n) { ops_partials.edge2_.partials_vec_[n] = derivs.col(n).cwiseProduct(theta_dbl); + } } } return ops_partials.build(logp.sum()); @@ -203,17 +212,17 @@ return_type_t > > log_mix( * @return log mixture of densities in specified proportion */ template -return_type_t > > log_mix( - const T_theta& theta, const std::vector >& lambda) { +return_type_t>> log_mix( + const T_theta& theta, const std::vector>& lambda) { static const char* function = "log_mix"; typedef typename stan::partials_return_type< - T_theta, std::vector > >::type T_partials_return; + T_theta, std::vector>>::type T_partials_return; - typedef typename Eigen::Matrix T_partials_vec; + using T_partials_vec = typename Eigen::Matrix; - typedef typename Eigen::Matrix T_partials_mat; + using T_partials_mat = typename Eigen::Matrix; - typedef typename std::vector > T_lamvec_type; + using T_lamvec_type = typename std::vector>; const int N = length(lambda); const int M = theta.size(); @@ -229,38 +238,46 @@ return_type_t > > log_mix( scalar_seq_view theta_vec(theta); T_partials_vec theta_dbl(M); - for (int m = 0; m < M; ++m) + for (int m = 0; m < M; ++m) { theta_dbl[m] = value_of(theta_vec[m]); + } T_partials_mat lam_dbl(M, N); - for (int n = 0; n < N; ++n) - for (int m = 0; m < M; ++m) + for (int n = 0; n < N; ++n) { + for (int m = 0; m < M; ++m) { lam_dbl(m, n) = value_of(lambda[n][m]); + } + } T_partials_mat logp_tmp = log(theta_dbl).replicate(1, N) + lam_dbl; T_partials_vec logp(N); - for (int n = 0; n < N; ++n) + for (int n = 0; n < N; ++n) { logp[n] = log_sum_exp(logp_tmp.col(n).eval()); + } T_partials_mat derivs = (lam_dbl - logp.transpose().replicate(M, 1)) .unaryExpr([](T_partials_return x) { return exp(x); }); T_partials_mat lam_deriv(M, N); - for (int n = 0; n < N; ++n) + for (int n = 0; n < N; ++n) { lam_deriv.col(n) = derivs.col(n).cwiseProduct(theta_dbl); + } operands_and_partials ops_partials(theta, lambda); if (!is_constant_all::value) { - for (int m = 0; m < M; ++m) + for (int m = 0; m < M; ++m) { ops_partials.edge1_.partials_[m] = derivs.row(m).sum(); + } } if (!is_constant_all::value) { - for (int n = 0; n < N; ++n) - for (int m = 0; m < M; ++m) + for (int n = 0; n < N; ++n) { + for (int m = 0; m < M; ++m) { ops_partials.edge2_.partials_vec_[n][m] = lam_deriv(m, n); + } + } } return ops_partials.build(logp.sum()); } diff --git a/stan/math/prim/mat/fun/log_sum_exp.hpp b/stan/math/prim/mat/fun/log_sum_exp.hpp index 988d99321b6..f8fd1c05fdc 100644 --- a/stan/math/prim/mat/fun/log_sum_exp.hpp +++ b/stan/math/prim/mat/fun/log_sum_exp.hpp @@ -25,8 +25,9 @@ namespace math { template double log_sum_exp(const Eigen::Matrix& x) { const double max = x.maxCoeff(); - if (!std::isfinite(max)) + if (!std::isfinite(max)) { return max; + } return max + std::log((x.array() - max).exp().sum()); } diff --git a/stan/math/prim/mat/fun/make_nu.hpp b/stan/math/prim/mat/fun/make_nu.hpp index 7ccfd5ca8c8..6b4db295bcf 100644 --- a/stan/math/prim/mat/fun/make_nu.hpp +++ b/stan/math/prim/mat/fun/make_nu.hpp @@ -19,21 +19,23 @@ Eigen::Array make_nu(const T& eta, size_t K) { using Eigen::Array; using Eigen::Dynamic; using Eigen::Matrix; - typedef typename index_type >::type size_type; + using size_type = typename index_type>::type; // Best (1978) implies nu = 2 * alpha for the dof in a t // distribution that generates a beta variate on (-1, 1) Array nu(K * (K - 1) / 2); T alpha = eta + 0.5 * (K - 2.0); // from Lewandowski et. al. T alpha2 = 2.0 * alpha; - for (size_type j = 0; j < (K - 1); ++j) + for (size_type j = 0; j < (K - 1); ++j) { nu(j) = alpha2; + } size_t counter = K - 1; for (size_type i = 1; i < (K - 1); ++i) { alpha -= 0.5; alpha2 = 2.0 * alpha; - for (size_type j = i + 1; j < K; ++j, ++counter) + for (size_type j = i + 1; j < K; ++j, ++counter) { nu(counter) = alpha2; + } } return nu; } diff --git a/stan/math/prim/mat/fun/matrix_exp_action_handler.hpp b/stan/math/prim/mat/fun/matrix_exp_action_handler.hpp index c435e5750d0..c3dcb0a0acc 100644 --- a/stan/math/prim/mat/fun/matrix_exp_action_handler.hpp +++ b/stan/math/prim/mat/fun/matrix_exp_action_handler.hpp @@ -79,8 +79,9 @@ class matrix_exp_action_handler { } F *= eta; B = F; - if (conv) + if (conv) { break; + } } res.col(col) = F; } // loop b columns diff --git a/stan/math/prim/mat/fun/matrix_exp_pade.hpp b/stan/math/prim/mat/fun/matrix_exp_pade.hpp index 001ddd6938d..752305d9592 100644 --- a/stan/math/prim/mat/fun/matrix_exp_pade.hpp +++ b/stan/math/prim/mat/fun/matrix_exp_pade.hpp @@ -26,9 +26,10 @@ MatrixType matrix_exp_pade(const MatrixType& arg) { MatrixType numer = U + V; MatrixType denom = -U + V; MatrixType pade_approximation = denom.partialPivLu().solve(numer); - for (int i = 0; i < squarings; ++i) + for (int i = 0; i < squarings; ++i) { pade_approximation *= pade_approximation; // undo scaling by - // repeated squaring + } + // repeated squaring return pade_approximation; } } // namespace math diff --git a/stan/math/prim/mat/fun/max.hpp b/stan/math/prim/mat/fun/max.hpp index 7fe6eb865b8..edc8bcf3289 100644 --- a/stan/math/prim/mat/fun/max.hpp +++ b/stan/math/prim/mat/fun/max.hpp @@ -33,8 +33,9 @@ inline int max(const std::vector& x) { */ template inline T max(const std::vector& x) { - if (x.size() == 0) + if (x.size() == 0) { return -std::numeric_limits::infinity(); + } Eigen::Map> m(&x[0], x.size()); return m.maxCoeff(); } @@ -47,8 +48,9 @@ inline T max(const std::vector& x) { */ template inline T max(const Eigen::Matrix& m) { - if (m.size() == 0) + if (m.size() == 0) { return -std::numeric_limits::infinity(); + } return m.maxCoeff(); } diff --git a/stan/math/prim/mat/fun/mdivide_right_tri.hpp b/stan/math/prim/mat/fun/mdivide_right_tri.hpp index 5ca12e19c6d..749395bbeac 100644 --- a/stan/math/prim/mat/fun/mdivide_right_tri.hpp +++ b/stan/math/prim/mat/fun/mdivide_right_tri.hpp @@ -28,10 +28,11 @@ inline Eigen::Matrix, R1, C2> mdivide_right_tri( const Eigen::Matrix &b, const Eigen::Matrix &A) { check_square("mdivide_right_tri", "A", A); check_multiplicable("mdivide_right_tri", "b", b, "A", A); - if (TriView != Eigen::Lower && TriView != Eigen::Upper) + if (TriView != Eigen::Lower && TriView != Eigen::Upper) { domain_error("mdivide_left_tri", "triangular view must be Eigen::Lower or Eigen::Upper", "", ""); + } return promote_common, Eigen::Matrix >( A) .template triangularView() diff --git a/stan/math/prim/mat/fun/min.hpp b/stan/math/prim/mat/fun/min.hpp index 1fd86cce4b6..8ef449c7241 100644 --- a/stan/math/prim/mat/fun/min.hpp +++ b/stan/math/prim/mat/fun/min.hpp @@ -32,8 +32,9 @@ inline int min(const std::vector& x) { */ template inline T min(const std::vector& x) { - if (x.size() == 0) + if (x.size() == 0) { return std::numeric_limits::infinity(); + } Eigen::Map> m(&x[0], x.size()); return m.minCoeff(); } @@ -46,8 +47,9 @@ inline T min(const std::vector& x) { */ template inline T min(const Eigen::Matrix& m) { - if (m.size() == 0) + if (m.size() == 0) { return std::numeric_limits::infinity(); + } return m.minCoeff(); } diff --git a/stan/math/prim/mat/fun/multiply_lower_tri_self_transpose.hpp b/stan/math/prim/mat/fun/multiply_lower_tri_self_transpose.hpp index f174b795939..c1f0c9d6705 100644 --- a/stan/math/prim/mat/fun/multiply_lower_tri_self_transpose.hpp +++ b/stan/math/prim/mat/fun/multiply_lower_tri_self_transpose.hpp @@ -17,8 +17,9 @@ namespace math { */ inline matrix_d multiply_lower_tri_self_transpose(const matrix_d& L) { int K = L.rows(); - if (K == 0) + if (K == 0) { return L; + } if (K == 1) { matrix_d result(1, 1); result(0) = square(L(0)); // first elt, so don't need double idx @@ -30,8 +31,9 @@ inline matrix_d multiply_lower_tri_self_transpose(const matrix_d& L) { for (int m = 0; m < K; ++m) { int k = (J < m + 1) ? J : m + 1; LLt(m, m) = Lt.col(m).head(k).squaredNorm(); - for (int n = (m + 1); n < K; ++n) + for (int n = (m + 1); n < K; ++n) { LLt(n, m) = LLt(m, n) = Lt.col(m).head(k).dot(Lt.col(n).head(k)); + } } return LLt; } diff --git a/stan/math/prim/mat/fun/num_elements.hpp b/stan/math/prim/mat/fun/num_elements.hpp index fe19fec7070..6667567c253 100644 --- a/stan/math/prim/mat/fun/num_elements.hpp +++ b/stan/math/prim/mat/fun/num_elements.hpp @@ -39,8 +39,9 @@ inline int num_elements(const Eigen::Matrix& m) { */ template inline int num_elements(const std::vector& v) { - if (v.size() == 0) + if (v.size() == 0) { return 0; + } return v.size() * num_elements(v[0]); } diff --git a/stan/math/prim/mat/fun/ordered_constrain.hpp b/stan/math/prim/mat/fun/ordered_constrain.hpp index 0e3d4799f47..a2981eba2c7 100644 --- a/stan/math/prim/mat/fun/ordered_constrain.hpp +++ b/stan/math/prim/mat/fun/ordered_constrain.hpp @@ -24,15 +24,17 @@ Eigen::Matrix ordered_constrain( using Eigen::Matrix; using std::exp; - typedef typename index_type >::type size_type; + using size_type = typename index_type>::type; size_type k = x.size(); Matrix y(k); - if (k == 0) + if (k == 0) { return y; + } y[0] = x[0]; - for (size_type i = 1; i < k; ++i) + for (size_type i = 1; i < k; ++i) { y[i] = y[i - 1] + exp(x[i]); + } return y; } @@ -54,10 +56,11 @@ inline Eigen::Matrix ordered_constrain( using Eigen::Dynamic; using Eigen::Matrix; - typedef typename index_type >::type size_type; + using size_type = typename index_type>::type; - for (size_type i = 1; i < x.size(); ++i) + for (size_type i = 1; i < x.size(); ++i) { lp += x(i); + } return ordered_constrain(x); } diff --git a/stan/math/prim/mat/fun/ordered_free.hpp b/stan/math/prim/mat/fun/ordered_free.hpp index 55148ee60c7..b9932477075 100644 --- a/stan/math/prim/mat/fun/ordered_free.hpp +++ b/stan/math/prim/mat/fun/ordered_free.hpp @@ -28,15 +28,17 @@ Eigen::Matrix ordered_free( using Eigen::Dynamic; using Eigen::Matrix; using std::log; - typedef typename index_type >::type size_type; + using size_type = typename index_type>::type; size_type k = y.size(); Matrix x(k); - if (k == 0) + if (k == 0) { return x; + } x[0] = y[0]; - for (size_type i = 1; i < k; ++i) + for (size_type i = 1; i < k; ++i) { x[i] = log(y[i] - y[i - 1]); + } return x; } } // namespace math diff --git a/stan/math/prim/mat/fun/positive_ordered_constrain.hpp b/stan/math/prim/mat/fun/positive_ordered_constrain.hpp index a39d4d0c246..672a604a0e2 100644 --- a/stan/math/prim/mat/fun/positive_ordered_constrain.hpp +++ b/stan/math/prim/mat/fun/positive_ordered_constrain.hpp @@ -23,15 +23,17 @@ Eigen::Matrix positive_ordered_constrain( using Eigen::Dynamic; using Eigen::Matrix; using std::exp; - typedef typename index_type >::type size_type; + using size_type = typename index_type>::type; size_type k = x.size(); Matrix y(k); - if (k == 0) + if (k == 0) { return y; + } y[0] = exp(x[0]); - for (size_type i = 1; i < k; ++i) + for (size_type i = 1; i < k; ++i) { y[i] = y[i - 1] + exp(x[i]); + } return y; } @@ -52,10 +54,11 @@ inline Eigen::Matrix positive_ordered_constrain( const Eigen::Matrix& x, T& lp) { using Eigen::Dynamic; using Eigen::Matrix; - typedef typename index_type >::type size_type; + using size_type = typename index_type>::type; - for (size_type i = 0; i < x.size(); ++i) + for (size_type i = 0; i < x.size(); ++i) { lp += x(i); + } return positive_ordered_constrain(x); } diff --git a/stan/math/prim/mat/fun/positive_ordered_free.hpp b/stan/math/prim/mat/fun/positive_ordered_free.hpp index 3c777682ffb..65ab12e2deb 100644 --- a/stan/math/prim/mat/fun/positive_ordered_free.hpp +++ b/stan/math/prim/mat/fun/positive_ordered_free.hpp @@ -27,17 +27,19 @@ Eigen::Matrix positive_ordered_free( using Eigen::Dynamic; using Eigen::Matrix; using std::log; - typedef typename index_type >::type size_type; + using size_type = typename index_type>::type; check_positive_ordered("stan::math::positive_ordered_free", "Positive ordered variable", y); size_type k = y.size(); Matrix x(k); - if (k == 0) + if (k == 0) { return x; + } x[0] = log(y[0]); - for (size_type i = 1; i < k; ++i) + for (size_type i = 1; i < k; ++i) { x[i] = log(y[i] - y[i - 1]); + } return x; } } // namespace math diff --git a/stan/math/prim/mat/fun/prod.hpp b/stan/math/prim/mat/fun/prod.hpp index f6963689211..49bba5a4015 100644 --- a/stan/math/prim/mat/fun/prod.hpp +++ b/stan/math/prim/mat/fun/prod.hpp @@ -15,8 +15,9 @@ namespace math { */ template inline T prod(const std::vector& v) { - if (v.size() == 0) + if (v.size() == 0) { return 1; + } Eigen::Map> m(&v[0], v.size()); return m.prod(); } @@ -29,8 +30,9 @@ inline T prod(const std::vector& v) { */ template inline T prod(const Eigen::Matrix& v) { - if (v.size() == 0) + if (v.size() == 0) { return 1.0; + } return v.prod(); } diff --git a/stan/math/prim/mat/fun/promote_elements.hpp b/stan/math/prim/mat/fun/promote_elements.hpp index 0efc831b41a..c598bf5440e 100644 --- a/stan/math/prim/mat/fun/promote_elements.hpp +++ b/stan/math/prim/mat/fun/promote_elements.hpp @@ -27,8 +27,9 @@ struct promote_elements, Eigen::Matrix > { inline static Eigen::Matrix promote( const Eigen::Matrix& u) { Eigen::Matrix t(u.rows(), u.cols()); - for (int i = 0; i < u.size(); ++i) + for (int i = 0; i < u.size(); ++i) { t(i) = promote_elements::promote(u(i)); + } return t; } }; diff --git a/stan/math/prim/mat/fun/promote_scalar.hpp b/stan/math/prim/mat/fun/promote_scalar.hpp index fc37d40b11a..d38173f0e3d 100644 --- a/stan/math/prim/mat/fun/promote_scalar.hpp +++ b/stan/math/prim/mat/fun/promote_scalar.hpp @@ -30,8 +30,9 @@ struct promote_scalar_struct > { const Eigen::Matrix& x) { Eigen::Matrix::type, -1, -1> y(x.rows(), x.cols()); - for (int i = 0; i < x.size(); ++i) + for (int i = 0; i < x.size(); ++i) { y(i) = promote_scalar_struct::apply(x(i)); + } return y; } }; @@ -58,8 +59,9 @@ struct promote_scalar_struct > { const Eigen::Matrix& x) { Eigen::Matrix::type, 1, -1> y(x.rows(), x.cols()); - for (int i = 0; i < x.size(); ++i) + for (int i = 0; i < x.size(); ++i) { y(i) = promote_scalar_struct::apply(x(i)); + } return y; } }; @@ -86,8 +88,9 @@ struct promote_scalar_struct > { const Eigen::Matrix& x) { Eigen::Matrix::type, -1, 1> y(x.rows(), x.cols()); - for (int i = 0; i < x.size(); ++i) + for (int i = 0; i < x.size(); ++i) { y(i) = promote_scalar_struct::apply(x(i)); + } return y; } }; diff --git a/stan/math/prim/mat/fun/qr_Q.hpp b/stan/math/prim/mat/fun/qr_Q.hpp index 6530d4b8930..dc80f9203cd 100644 --- a/stan/math/prim/mat/fun/qr_Q.hpp +++ b/stan/math/prim/mat/fun/qr_Q.hpp @@ -18,15 +18,17 @@ namespace math { template Eigen::Matrix qr_Q( const Eigen::Matrix& m) { - typedef Eigen::Matrix matrix_t; + using matrix_t = Eigen::Matrix; check_nonzero_size("qr_Q", "m", m); Eigen::HouseholderQR qr(m.rows(), m.cols()); qr.compute(m); matrix_t Q = qr.householderQ(); const int min_size = std::min(m.rows(), m.cols()); - for (int i = 0; i < min_size; i++) - if (qr.matrixQR().coeff(i, i) < 0) + for (int i = 0; i < min_size; i++) { + if (qr.matrixQR().coeff(i, i) < 0) { Q.col(i) *= -1.0; + } + } return Q; } diff --git a/stan/math/prim/mat/fun/qr_R.hpp b/stan/math/prim/mat/fun/qr_R.hpp index 184c4739ab8..1ab2cf3e0f4 100644 --- a/stan/math/prim/mat/fun/qr_R.hpp +++ b/stan/math/prim/mat/fun/qr_R.hpp @@ -18,19 +18,22 @@ namespace math { template Eigen::Matrix qr_R( const Eigen::Matrix& m) { - typedef Eigen::Matrix matrix_t; + using matrix_t = Eigen::Matrix; check_nonzero_size("qr_R", "m", m); Eigen::HouseholderQR qr(m.rows(), m.cols()); qr.compute(m); matrix_t R = qr.matrixQR(); - if (m.rows() > m.cols()) + if (m.rows() > m.cols()) { R.bottomRows(m.rows() - m.cols()).setZero(); + } const int min_size = std::min(m.rows(), m.cols()); for (int i = 0; i < min_size; i++) { - for (int j = 0; j < i; j++) + for (int j = 0; j < i; j++) { R.coeffRef(i, j) = 0.0; - if (R(i, i) < 0) + } + if (R(i, i) < 0) { R.row(i) *= -1.0; + } } return R; } diff --git a/stan/math/prim/mat/fun/qr_thin_Q.hpp b/stan/math/prim/mat/fun/qr_thin_Q.hpp index 29ebda9f2f2..ffc1c55b8d1 100644 --- a/stan/math/prim/mat/fun/qr_thin_Q.hpp +++ b/stan/math/prim/mat/fun/qr_thin_Q.hpp @@ -18,15 +18,17 @@ namespace math { template Eigen::Matrix qr_thin_Q( const Eigen::Matrix& m) { - typedef Eigen::Matrix matrix_t; + using matrix_t = Eigen::Matrix; check_nonzero_size("qr_thin_Q", "m", m); Eigen::HouseholderQR qr(m.rows(), m.cols()); qr.compute(m); const int min_size = std::min(m.rows(), m.cols()); matrix_t Q = qr.householderQ() * matrix_t::Identity(m.rows(), min_size); - for (int i = 0; i < min_size; i++) - if (qr.matrixQR().coeff(i, i) < 0) + for (int i = 0; i < min_size; i++) { + if (qr.matrixQR().coeff(i, i) < 0) { Q.col(i) *= -1.0; + } + } return Q; } diff --git a/stan/math/prim/mat/fun/qr_thin_R.hpp b/stan/math/prim/mat/fun/qr_thin_R.hpp index 9fabc1bae40..79575f2cac4 100644 --- a/stan/math/prim/mat/fun/qr_thin_R.hpp +++ b/stan/math/prim/mat/fun/qr_thin_R.hpp @@ -18,17 +18,19 @@ namespace math { template Eigen::Matrix qr_thin_R( const Eigen::Matrix& m) { - typedef Eigen::Matrix matrix_t; + using matrix_t = Eigen::Matrix; check_nonzero_size("qr_thin_R", "m", m); Eigen::HouseholderQR qr(m.rows(), m.cols()); qr.compute(m); const int min_size = std::min(m.rows(), m.cols()); matrix_t R = qr.matrixQR().topLeftCorner(min_size, m.cols()); for (int i = 0; i < min_size; i++) { - for (int j = 0; j < i; j++) + for (int j = 0; j < i; j++) { R.coeffRef(i, j) = 0.0; - if (R(i, i) < 0) + } + if (R(i, i) < 0) { R.row(i) *= -1.0; + } } return R; } diff --git a/stan/math/prim/mat/fun/rank.hpp b/stan/math/prim/mat/fun/rank.hpp index fd830959942..7db5d289aee 100644 --- a/stan/math/prim/mat/fun/rank.hpp +++ b/stan/math/prim/mat/fun/rank.hpp @@ -21,9 +21,11 @@ inline int rank(const C& v, int s) { check_range("rank", "v", v.size(), s); --s; // adjust for indexing by one int count = 0; - for (typename index_type::type i = 0; i < v.size(); ++i) - if (v[i] < v[s]) + for (typename index_type::type i = 0; i < v.size(); ++i) { + if (v[i] < v[s]) { ++count; + } + } return count; } diff --git a/stan/math/prim/mat/fun/read_corr_L.hpp b/stan/math/prim/mat/fun/read_corr_L.hpp index ac5f0bc64f4..7223aef952f 100644 --- a/stan/math/prim/mat/fun/read_corr_L.hpp +++ b/stan/math/prim/mat/fun/read_corr_L.hpp @@ -92,11 +92,12 @@ Eigen::Matrix read_corr_L( // no need to abs() because this Jacobian determinant // is strictly positive (and triangular) // see inverse of Jacobian in equation 11 of LKJ paper - for (size_t k = 1; k <= (K - 2); k++) + for (size_t k = 1; k <= (K - 2); k++) { for (size_t i = k + 1; i <= K; i++) { values(pos) = (K - k - 1) * log1m(square(CPCs(pos))); pos++; } + } log_prob += 0.5 * sum(values); return read_corr_L(CPCs, K); diff --git a/stan/math/prim/mat/fun/resize.hpp b/stan/math/prim/mat/fun/resize.hpp index 7ca03765760..94321108ef0 100644 --- a/stan/math/prim/mat/fun/resize.hpp +++ b/stan/math/prim/mat/fun/resize.hpp @@ -21,10 +21,12 @@ template void resize(std::vector& x, const std::vector& dims, int pos) { x.resize(dims[pos]); ++pos; - if (pos >= static_cast(dims.size())) + if (pos >= static_cast(dims.size())) { return; // skips lowest loop to scalar - for (size_t i = 0; i < x.size(); ++i) + } + for (size_t i = 0; i < x.size(); ++i) { resize(x[i], dims, pos); + } } } // namespace internal diff --git a/stan/math/prim/mat/fun/sd.hpp b/stan/math/prim/mat/fun/sd.hpp index a69147ad684..2208e715b21 100644 --- a/stan/math/prim/mat/fun/sd.hpp +++ b/stan/math/prim/mat/fun/sd.hpp @@ -19,8 +19,9 @@ namespace math { template inline return_type_t sd(const std::vector& v) { check_nonzero_size("sd", "v", v); - if (v.size() == 1) + if (v.size() == 1) { return 0.0; + } return sqrt(variance(v)); } @@ -33,8 +34,9 @@ inline return_type_t sd(const std::vector& v) { template inline return_type_t sd(const Eigen::Matrix& m) { check_nonzero_size("sd", "m", m); - if (m.size() == 1) + if (m.size() == 1) { return 0.0; + } return sqrt(variance(m)); } diff --git a/stan/math/prim/mat/fun/segment.hpp b/stan/math/prim/mat/fun/segment.hpp index 97ad55d522f..8e870eec0b2 100644 --- a/stan/math/prim/mat/fun/segment.hpp +++ b/stan/math/prim/mat/fun/segment.hpp @@ -50,8 +50,9 @@ std::vector segment(const std::vector& sv, size_t i, size_t n) { static_cast(sv.size())); } std::vector s; - for (size_t j = 0; j < n; ++j) + for (size_t j = 0; j < n; ++j) { s.push_back(sv[i + j - 1]); + } return s; } diff --git a/stan/math/prim/mat/fun/simplex_constrain.hpp b/stan/math/prim/mat/fun/simplex_constrain.hpp index e5ab094933e..55c1c7d3da9 100644 --- a/stan/math/prim/mat/fun/simplex_constrain.hpp +++ b/stan/math/prim/mat/fun/simplex_constrain.hpp @@ -31,7 +31,7 @@ Eigen::Matrix simplex_constrain( using Eigen::Dynamic; using Eigen::Matrix; using std::log; - typedef typename index_type >::type size_type; + using size_type = typename index_type>::type; int Km1 = y.size(); Matrix x(Km1 + 1); @@ -65,7 +65,7 @@ Eigen::Matrix simplex_constrain( using Eigen::Matrix; using std::log; - typedef typename index_type >::type size_type; + using size_type = typename index_type>::type; int Km1 = y.size(); // K = Km1 + 1 Matrix x(Km1 + 1); diff --git a/stan/math/prim/mat/fun/simplex_free.hpp b/stan/math/prim/mat/fun/simplex_free.hpp index 768436bd3d6..b55311bedf9 100644 --- a/stan/math/prim/mat/fun/simplex_free.hpp +++ b/stan/math/prim/mat/fun/simplex_free.hpp @@ -31,7 +31,7 @@ Eigen::Matrix simplex_free( using Eigen::Matrix; using std::log; - typedef typename index_type >::type size_type; + using size_type = typename index_type>::type; check_simplex("stan::math::simplex_free", "Simplex variable", x); int Km1 = x.size() - 1; diff --git a/stan/math/prim/mat/fun/sort_indices.hpp b/stan/math/prim/mat/fun/sort_indices.hpp index a9772078120..5d970d7d8a8 100644 --- a/stan/math/prim/mat/fun/sort_indices.hpp +++ b/stan/math/prim/mat/fun/sort_indices.hpp @@ -39,10 +39,11 @@ class index_comparator { * @param j Index of second value for comparison */ bool operator()(int i, int j) const { - if (ascending) + if (ascending) { return xs_[i - 1] < xs_[j - 1]; - else + } else { return xs_[i - 1] > xs_[j - 1]; + } } }; @@ -60,12 +61,13 @@ class index_comparator { */ template std::vector sort_indices(const C& xs) { - typedef typename index_type::type idx_t; + using idx_t = typename index_type::type; idx_t size = xs.size(); std::vector idxs; idxs.resize(size); - for (idx_t i = 0; i < size; ++i) + for (idx_t i = 0; i < size; ++i) { idxs[i] = i + 1; + } internal::index_comparator comparator(xs); std::sort(idxs.begin(), idxs.end(), comparator); return idxs; diff --git a/stan/math/prim/mat/fun/stan_print.hpp b/stan/math/prim/mat/fun/stan_print.hpp index 667d53bb2a3..eaedec00076 100644 --- a/stan/math/prim/mat/fun/stan_print.hpp +++ b/stan/math/prim/mat/fun/stan_print.hpp @@ -17,8 +17,9 @@ template void stan_print(std::ostream* o, const std::vector& x) { *o << '['; for (size_t i = 0; i < x.size(); ++i) { - if (i > 0) + if (i > 0) { *o << ','; + } stan_print(o, x[i]); } *o << ']'; @@ -28,8 +29,9 @@ template void stan_print(std::ostream* o, const Eigen::Matrix& x) { *o << '['; for (int i = 0; i < x.size(); ++i) { - if (i > 0) + if (i > 0) { *o << ','; + } stan_print(o, x(i)); } *o << ']'; @@ -39,8 +41,9 @@ template void stan_print(std::ostream* o, const Eigen::Matrix& x) { *o << '['; for (int i = 0; i < x.size(); ++i) { - if (i > 0) + if (i > 0) { *o << ','; + } stan_print(o, x(i)); } *o << ']'; @@ -51,12 +54,14 @@ void stan_print(std::ostream* o, const Eigen::Matrix& x) { *o << '['; for (int i = 0; i < x.rows(); ++i) { - if (i > 0) + if (i > 0) { *o << ','; + } *o << '['; for (int j = 0; j < x.row(i).size(); ++j) { - if (j > 0) + if (j > 0) { *o << ','; + } stan_print(o, x.row(i)(j)); } *o << ']'; diff --git a/stan/math/prim/mat/fun/sub_col.hpp b/stan/math/prim/mat/fun/sub_col.hpp index 37d56c47d8f..88d950fe152 100644 --- a/stan/math/prim/mat/fun/sub_col.hpp +++ b/stan/math/prim/mat/fun/sub_col.hpp @@ -22,8 +22,9 @@ inline Eigen::Matrix sub_col( const Eigen::Matrix& m, size_t i, size_t j, size_t nrows) { check_row_index("sub_col", "i", m, i); - if (nrows > 0) + if (nrows > 0) { check_row_index("sub_col", "i+nrows-1", m, i + nrows - 1); + } check_column_index("sub_col", "j", m, j); return m.block(i - 1, j - 1, nrows, 1); } diff --git a/stan/math/prim/mat/fun/sub_row.hpp b/stan/math/prim/mat/fun/sub_row.hpp index 60b510d8334..72dc22433c0 100644 --- a/stan/math/prim/mat/fun/sub_row.hpp +++ b/stan/math/prim/mat/fun/sub_row.hpp @@ -23,8 +23,9 @@ inline Eigen::Matrix sub_row( size_t j, size_t ncols) { check_row_index("sub_row", "i", m, i); check_column_index("sub_row", "j", m, j); - if (ncols > 0) + if (ncols > 0) { check_column_index("sub_col", "j+ncols-1", m, j + ncols - 1); + } return m.block(i - 1, j - 1, 1, ncols); } diff --git a/stan/math/prim/mat/fun/tail.hpp b/stan/math/prim/mat/fun/tail.hpp index 9bf4306ee79..69561779313 100644 --- a/stan/math/prim/mat/fun/tail.hpp +++ b/stan/math/prim/mat/fun/tail.hpp @@ -24,8 +24,9 @@ namespace math { template inline Eigen::Matrix tail( const Eigen::Matrix& v, size_t n) { - if (n != 0) + if (n != 0) { check_row_index("tail", "n", v, n); + } return v.tail(n); } @@ -42,8 +43,9 @@ inline Eigen::Matrix tail( template inline Eigen::Matrix tail( const Eigen::Matrix& rv, size_t n) { - if (n != 0) + if (n != 0) { check_column_index("tail", "n", rv, n); + } return rv.tail(n); } @@ -59,12 +61,14 @@ inline Eigen::Matrix tail( */ template std::vector tail(const std::vector& sv, size_t n) { - typedef typename index_type >::type idx_t; - if (n != 0) + using idx_t = typename index_type >::type; + if (n != 0) { check_std_vector_index("tail", "n", sv, n); + } std::vector s; - for (idx_t i = sv.size() - n; i < sv.size(); ++i) + for (idx_t i = sv.size() - n; i < sv.size(); ++i) { s.push_back(sv[i]); + } return s; } diff --git a/stan/math/prim/mat/fun/tcrossprod.hpp b/stan/math/prim/mat/fun/tcrossprod.hpp index 4fcf10b2221..63e6c9ca5f7 100644 --- a/stan/math/prim/mat/fun/tcrossprod.hpp +++ b/stan/math/prim/mat/fun/tcrossprod.hpp @@ -14,10 +14,12 @@ namespace math { * @return M times its transpose. */ inline matrix_d tcrossprod(const matrix_d& M) { - if (M.rows() == 0) + if (M.rows() == 0) { return matrix_d(0, 0); - if (M.rows() == 1) + } + if (M.rows() == 1) { return M * M.transpose(); + } matrix_d result(M.rows(), M.rows()); return result.setZero().selfadjointView().rankUpdate(M); } diff --git a/stan/math/prim/mat/fun/to_array_1d.hpp b/stan/math/prim/mat/fun/to_array_1d.hpp index a3c95d7e198..40344639653 100644 --- a/stan/math/prim/mat/fun/to_array_1d.hpp +++ b/stan/math/prim/mat/fun/to_array_1d.hpp @@ -16,8 +16,9 @@ inline std::vector to_array_1d(const Eigen::Matrix& matrix) { const T* datap = matrix.data(); int size = matrix.size(); std::vector result(size); - for (int i = 0; i < size; i++) + for (int i = 0; i < size; i++) { result[i] = datap[i]; + } return result; } @@ -33,12 +34,15 @@ inline std::vector::type> to_array_1d( const std::vector >& x) { size_t size1 = x.size(); size_t size2 = 0; - if (size1 != 0) + if (size1 != 0) { size2 = x[0].size(); + } std::vector y(size1 * size2); - for (size_t i = 0, ij = 0; i < size1; i++) - for (size_t j = 0; j < size2; j++, ij++) + for (size_t i = 0, ij = 0; i < size1; i++) { + for (size_t j = 0; j < size2; j++, ij++) { y[ij] = x[i][j]; + } + } return to_array_1d(y); } diff --git a/stan/math/prim/mat/fun/to_array_2d.hpp b/stan/math/prim/mat/fun/to_array_2d.hpp index 7a7433e796c..7751111ae5a 100644 --- a/stan/math/prim/mat/fun/to_array_2d.hpp +++ b/stan/math/prim/mat/fun/to_array_2d.hpp @@ -16,9 +16,11 @@ inline std::vector > to_array_2d( int C = matrix.cols(); int R = matrix.rows(); vector > result(R, vector(C)); - for (int i = 0, ij = 0; i < C; i++) - for (int j = 0; j < R; j++, ij++) + for (int i = 0, ij = 0; i < C; i++) { + for (int j = 0; j < R; j++, ij++) { result[j][i] = datap[ij]; + } + } return result; } diff --git a/stan/math/prim/mat/fun/to_matrix.hpp b/stan/math/prim/mat/fun/to_matrix.hpp index 6e8959dd606..35356745a71 100644 --- a/stan/math/prim/mat/fun/to_matrix.hpp +++ b/stan/math/prim/mat/fun/to_matrix.hpp @@ -39,13 +39,16 @@ template inline Eigen::Matrix to_matrix( const std::vector >& x) { int rows = x.size(); - if (rows == 0) + if (rows == 0) { return Eigen::Matrix(0, 0); + } int cols = x[0].size(); Eigen::Matrix result(rows, cols); - for (int i = 0, ij = 0; i < cols; i++) - for (int j = 0; j < rows; j++, ij++) + for (int i = 0, ij = 0; i < cols; i++) { + for (int j = 0; j < rows; j++, ij++) { result(ij) = x[j][i]; + } + } return result; } @@ -62,15 +65,18 @@ inline Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> to_matrix(const std::vector >& x) { using boost::math::tools::promote_args; size_t rows = x.size(); - if (rows == 0) + if (rows == 0) { return Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic>(0, 0); + } size_t cols = x[0].size(); Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> result(rows, cols); - for (size_t i = 0, ij = 0; i < cols; i++) - for (size_t j = 0; j < rows; j++, ij++) + for (size_t i = 0, ij = 0; i < cols; i++) { + for (size_t j = 0; j < rows; j++, ij++) { result(ij) = x[j][i]; + } + } return result; } @@ -134,8 +140,9 @@ inline Eigen::Matrix to_matrix( int size = x.size(); check_size_match(function, "rows * columns", m * n, "vector size", size); Eigen::Matrix result(m, n); - for (int i = 0; i < size; i++) + for (int i = 0; i < size; i++) { result(i) = x[i]; + } return result; } @@ -158,14 +165,17 @@ inline Eigen::Matrix to_matrix( template inline Eigen::Matrix to_matrix( const Eigen::Matrix& x, int m, int n, bool col_major) { - if (col_major) + if (col_major) { return to_matrix(x, m, n); + } check_size_match("to_matrix", "rows * columns", m * n, "matrix size", x.size()); Eigen::Matrix result(m, n); - for (int i = 0, ij = 0; i < m; i++) - for (int j = 0; j < n; j++, ij++) + for (int i = 0, ij = 0; i < m; i++) { + for (int j = 0; j < n; j++, ij++) { result(i, j) = x(ij); + } + } return result; } @@ -188,15 +198,18 @@ inline Eigen::Matrix to_matrix( template inline Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> to_matrix(const std::vector& x, int m, int n, bool col_major) { - if (col_major) + if (col_major) { return to_matrix(x, m, n); + } check_size_match("to_matrix", "rows * columns", m * n, "matrix size", x.size()); Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> result(m, n); - for (int i = 0, ij = 0; i < m; i++) - for (int j = 0; j < n; j++, ij++) + for (int i = 0, ij = 0; i < m; i++) { + for (int j = 0; j < n; j++, ij++) { result(i, j) = x[ij]; + } + } return result; } diff --git a/stan/math/prim/mat/fun/to_row_vector.hpp b/stan/math/prim/mat/fun/to_row_vector.hpp index d873f4d468b..4064d3c027b 100644 --- a/stan/math/prim/mat/fun/to_row_vector.hpp +++ b/stan/math/prim/mat/fun/to_row_vector.hpp @@ -30,8 +30,9 @@ inline Eigen::Matrix to_row_vector( const std::vector& vec) { int C = vec.size(); Eigen::Matrix result(C); - for (int i = 0; i < C; i++) + for (int i = 0; i < C; i++) { result(i) = vec[i]; + } return result; } diff --git a/stan/math/prim/mat/fun/to_vector.hpp b/stan/math/prim/mat/fun/to_vector.hpp index b952ae2a0fa..dce573893f7 100644 --- a/stan/math/prim/mat/fun/to_vector.hpp +++ b/stan/math/prim/mat/fun/to_vector.hpp @@ -30,8 +30,9 @@ inline Eigen::Matrix to_vector( const std::vector& vec) { int R = vec.size(); Eigen::Matrix result(R); - for (int i = 0; i < R; i++) + for (int i = 0; i < R; i++) { result(i) = vec[i]; + } return result; } diff --git a/stan/math/prim/mat/fun/typedefs.hpp b/stan/math/prim/mat/fun/typedefs.hpp index 8ace5f07efd..1ff4802995f 100644 --- a/stan/math/prim/mat/fun/typedefs.hpp +++ b/stan/math/prim/mat/fun/typedefs.hpp @@ -16,17 +16,17 @@ typedef index_type >::type /** * Type for matrix of double values. */ -typedef Eigen::Matrix matrix_d; +using matrix_d = Eigen::Matrix; /** * Type for (column) vector of double values. */ -typedef Eigen::Matrix vector_d; +using vector_d = Eigen::Matrix; /** * Type for (row) vector of double values. */ -typedef Eigen::Matrix row_vector_d; +using row_vector_d = Eigen::Matrix; } // namespace math } // namespace stan diff --git a/stan/math/prim/mat/fun/value_of.hpp b/stan/math/prim/mat/fun/value_of.hpp index ff3c3f49166..42f510e8816 100644 --- a/stan/math/prim/mat/fun/value_of.hpp +++ b/stan/math/prim/mat/fun/value_of.hpp @@ -24,9 +24,11 @@ template inline Eigen::Matrix::type, R, C> value_of( const Eigen::Matrix& M) { Eigen::Matrix::type, R, C> Md(M.rows(), M.cols()); - for (int j = 0; j < M.cols(); j++) - for (int i = 0; i < M.rows(); i++) + for (int j = 0; j < M.cols(); j++) { + for (int i = 0; i < M.rows(); i++) { Md(i, j) = value_of(M(i, j)); + } + } return Md; } diff --git a/stan/math/prim/mat/fun/value_of_rec.hpp b/stan/math/prim/mat/fun/value_of_rec.hpp index e2e0c3e2c74..049aff1e7f4 100644 --- a/stan/math/prim/mat/fun/value_of_rec.hpp +++ b/stan/math/prim/mat/fun/value_of_rec.hpp @@ -23,9 +23,11 @@ template inline Eigen::Matrix value_of_rec( const Eigen::Matrix& M) { Eigen::Matrix Md(M.rows(), M.cols()); - for (int j = 0; j < M.cols(); j++) - for (int i = 0; i < M.rows(); i++) + for (int j = 0; j < M.cols(); j++) { + for (int i = 0; i < M.rows(); i++) { Md(i, j) = value_of_rec(M(i, j)); + } + } return Md; } diff --git a/stan/math/prim/mat/fun/variance.hpp b/stan/math/prim/mat/fun/variance.hpp index a6c82cf2996..28ca0bf9723 100644 --- a/stan/math/prim/mat/fun/variance.hpp +++ b/stan/math/prim/mat/fun/variance.hpp @@ -21,8 +21,9 @@ namespace math { template inline return_type_t variance(const std::vector& v) { check_nonzero_size("variance", "v", v); - if (v.size() == 1) + if (v.size() == 1) { return 0.0; + } T v_mean(mean(v)); T sum_sq_diff(0); for (size_t i = 0; i < v.size(); ++i) { @@ -42,8 +43,9 @@ template inline return_type_t variance(const Eigen::Matrix& m) { check_nonzero_size("variance", "m", m); - if (m.size() == 1) + if (m.size() == 1) { return 0.0; + } return_type_t mn(mean(m)); return_type_t sum_sq_diff(0); for (int i = 0; i < m.size(); ++i) { diff --git a/stan/math/prim/mat/fun/welford_covar_estimator.hpp b/stan/math/prim/mat/fun/welford_covar_estimator.hpp index ef7891caea7..b679dfde898 100644 --- a/stan/math/prim/mat/fun/welford_covar_estimator.hpp +++ b/stan/math/prim/mat/fun/welford_covar_estimator.hpp @@ -33,8 +33,9 @@ class welford_covar_estimator { void sample_mean(Eigen::VectorXd& mean) { mean = m_; } void sample_covariance(Eigen::MatrixXd& covar) { - if (num_samples_ > 1) + if (num_samples_ > 1) { covar = m2_ / (num_samples_ - 1.0); + } } protected: diff --git a/stan/math/prim/mat/fun/welford_var_estimator.hpp b/stan/math/prim/mat/fun/welford_var_estimator.hpp index 4b668fe609c..c3cd7784176 100644 --- a/stan/math/prim/mat/fun/welford_var_estimator.hpp +++ b/stan/math/prim/mat/fun/welford_var_estimator.hpp @@ -33,8 +33,9 @@ class welford_var_estimator { void sample_mean(Eigen::VectorXd& mean) { mean = m_; } void sample_variance(Eigen::VectorXd& var) { - if (num_samples_ > 1) + if (num_samples_ > 1) { var = m2_ / (num_samples_ - 1.0); + } } protected: diff --git a/stan/math/prim/mat/functor/map_rect.hpp b/stan/math/prim/mat/functor/map_rect.hpp index bd5102ea000..34dbbb765ce 100644 --- a/stan/math/prim/mat/functor/map_rect.hpp +++ b/stan/math/prim/mat/functor/map_rect.hpp @@ -164,8 +164,9 @@ map_rect(const Eigen::Matrix& shared_params, size_x_i); } - if (job_params_dims[0] == 0) + if (job_params_dims[0] == 0) { return return_t(); + } #ifdef STAN_MPI return internal::map_rect_mpi( diff --git a/stan/math/prim/mat/functor/map_rect_combine.hpp b/stan/math/prim/mat/functor/map_rect_combine.hpp index 8ecd3b04968..2db158d6547 100644 --- a/stan/math/prim/mat/functor/map_rect_combine.hpp +++ b/stan/math/prim/mat/functor/map_rect_combine.hpp @@ -59,8 +59,9 @@ class map_rect_combine { num_shared_operands_(shared_params.rows()), num_job_operands_(dims(job_params)[1]) { ops_partials_.reserve(job_params.size()); - for (const auto& job_param : job_params) + for (const auto& job_param : job_params) { ops_partials_.emplace_back(shared_params, job_param); + } } result_t operator()(const matrix_d& world_result, @@ -74,13 +75,15 @@ class map_rect_combine { for (std::size_t i = 0, ij = 0; i != num_jobs; ++i) { for (int j = 0; j != world_f_out[i]; ++j, ++ij) { - if (!is_constant_all::value) + if (!is_constant_all::value) { ops_partials_[i].edge1_.partials_ = world_result.block(1, ij, num_shared_operands_, 1); + } - if (!is_constant_all::value) + if (!is_constant_all::value) { ops_partials_[i].edge2_.partials_ = world_result.block(offset_job_params, ij, num_job_operands_, 1); + } out(ij) = ops_partials_[i].build(world_result(0, ij)); } diff --git a/stan/math/prim/mat/functor/map_rect_mpi.hpp b/stan/math/prim/mat/functor/map_rect_mpi.hpp index b141f4b5bfd..71cb8ad65a3 100644 --- a/stan/math/prim/mat/functor/map_rect_mpi.hpp +++ b/stan/math/prim/mat/functor/map_rect_mpi.hpp @@ -25,8 +25,8 @@ map_rect_mpi( job_params, const std::vector>& x_r, const std::vector>& x_i, std::ostream* msgs = nullptr) { - typedef internal::map_rect_reduce ReduceF; - typedef internal::map_rect_combine CombineF; + using ReduceF = internal::map_rect_reduce; + using CombineF = internal::map_rect_combine; // whenever the cluster is already busy with some command we fall // back to serial execution (possible if map_rect calls are nested diff --git a/stan/math/prim/mat/meta/append_return_type.hpp b/stan/math/prim/mat/meta/append_return_type.hpp index d173eb5c824..02bb1591a3e 100644 --- a/stan/math/prim/mat/meta/append_return_type.hpp +++ b/stan/math/prim/mat/meta/append_return_type.hpp @@ -20,7 +20,7 @@ namespace math { */ template struct append_return_type { - typedef return_type_t type; + using type = return_type_t; }; /** @@ -34,7 +34,7 @@ struct append_return_type { */ template <> struct append_return_type { - typedef int type; + using type = int; }; /** @@ -53,7 +53,7 @@ struct append_return_type { */ template struct append_return_type, Eigen::Matrix > { - typedef typename Eigen::Matrix, R, C> type; + using type = typename Eigen::Matrix, R, C>; }; /** @@ -69,7 +69,7 @@ struct append_return_type, Eigen::Matrix > { */ template struct append_return_type, std::vector > { - typedef typename std::vector::type> type; + using type = typename std::vector::type>; }; } // namespace math } // namespace stan diff --git a/stan/math/prim/mat/meta/is_vector.hpp b/stan/math/prim/mat/meta/is_vector.hpp index cd848789547..b2668e5d235 100644 --- a/stan/math/prim/mat/meta/is_vector.hpp +++ b/stan/math/prim/mat/meta/is_vector.hpp @@ -12,17 +12,17 @@ namespace stan { template struct is_vector > { enum { value = 1 }; - typedef T type; + using type = T; }; template struct is_vector > { enum { value = 1 }; - typedef T type; + using type = T; }; template struct is_vector > { enum { value = 1 }; - typedef T type; + using type = T; }; } // namespace stan #endif diff --git a/stan/math/prim/mat/meta/operands_and_partials.hpp b/stan/math/prim/mat/meta/operands_and_partials.hpp index 6aaca072513..c8f8e2d963c 100644 --- a/stan/math/prim/mat/meta/operands_and_partials.hpp +++ b/stan/math/prim/mat/meta/operands_and_partials.hpp @@ -18,7 +18,7 @@ namespace internal { template class ops_partials_edge> { public: - typedef empty_broadcast_array> partials_t; + using partials_t = empty_broadcast_array>; partials_t partials_; empty_broadcast_array> partials_vec_; ops_partials_edge() {} @@ -37,7 +37,7 @@ class ops_partials_edge> { template class ops_partials_edge>> { public: - typedef empty_broadcast_array> partials_t; + using partials_t = empty_broadcast_array>; empty_broadcast_array> partials_vec_; ops_partials_edge() {} explicit ops_partials_edge( diff --git a/stan/math/prim/mat/meta/scalar_type.hpp b/stan/math/prim/mat/meta/scalar_type.hpp index 3af731a7344..effcd1a9ea3 100644 --- a/stan/math/prim/mat/meta/scalar_type.hpp +++ b/stan/math/prim/mat/meta/scalar_type.hpp @@ -15,7 +15,7 @@ namespace stan { */ template struct scalar_type > { - typedef typename scalar_type::type type; + using type = typename scalar_type::type; }; /** @@ -28,7 +28,7 @@ struct scalar_type > { */ template struct scalar_type > { - typedef typename scalar_type::type type; + using type = typename scalar_type::type; }; /** @@ -41,7 +41,7 @@ struct scalar_type > { */ template struct scalar_type&> { - typedef typename scalar_type::type type; + using type = typename scalar_type::type; }; /** @@ -54,7 +54,7 @@ struct scalar_type&> { */ template struct scalar_type&> { - typedef typename scalar_type::type type; + using type = typename scalar_type::type; }; /** @@ -65,7 +65,7 @@ struct scalar_type&> { */ template struct scalar_type > { - typedef typename scalar_type::type type; + using type = typename scalar_type::type; }; } // namespace stan #endif diff --git a/stan/math/prim/mat/meta/seq_view.hpp b/stan/math/prim/mat/meta/seq_view.hpp index d208e921039..8f50a204fd1 100644 --- a/stan/math/prim/mat/meta/seq_view.hpp +++ b/stan/math/prim/mat/meta/seq_view.hpp @@ -9,28 +9,28 @@ namespace math { template struct store_type { - typedef const T& type; + using type = const T &; }; template <> struct store_type { - typedef const double type; + using type = const double; }; template <> struct store_type { - typedef const int type; + using type = const int; }; template struct pass_type { - typedef const T& type; + using type = const T &; }; template <> struct pass_type { - typedef double type; + using type = double; }; template <> struct pass_type { - typedef int type; + using type = int; }; // S assignable to T diff --git a/stan/math/prim/mat/meta/value_type.hpp b/stan/math/prim/mat/meta/value_type.hpp index 076f89e421e..8f55208adc2 100644 --- a/stan/math/prim/mat/meta/value_type.hpp +++ b/stan/math/prim/mat/meta/value_type.hpp @@ -17,7 +17,7 @@ namespace math { */ template struct value_type > { - typedef T type; + using type = T; }; } // namespace math diff --git a/stan/math/prim/mat/prob/bernoulli_logit_glm_lpmf.hpp b/stan/math/prim/mat/prob/bernoulli_logit_glm_lpmf.hpp index af456f8dc71..5d3776b2823 100644 --- a/stan/math/prim/mat/prob/bernoulli_logit_glm_lpmf.hpp +++ b/stan/math/prim/mat/prob/bernoulli_logit_glm_lpmf.hpp @@ -47,7 +47,7 @@ template bernoulli_logit_glm_lpmf( const T_y &y, const T_x &x, const T_alpha &alpha, const T_beta &beta) { static const char *function = "bernoulli_logit_glm_lpmf"; - typedef partials_return_type_t T_partials_return; + using T_partials_return = partials_return_type_t; typedef typename std::conditional_t< is_vector::value, Eigen::Matrix, -1, 1>, partials_return_type_t> @@ -63,15 +63,18 @@ return_type_t bernoulli_logit_glm_lpmf( check_bounded(function, "Vector of dependent variables", y, 0, 1); check_consistent_size(function, "Vector of dependent variables", y, N); check_consistent_size(function, "Weight vector", beta, M); - if (is_vector::value) + if (is_vector::value) { check_consistent_sizes(function, "Vector of intercepts", alpha, "Vector of dependent variables", y); + } - if (size_zero(y, x, beta)) + if (size_zero(y, x, beta)) { return 0; + } - if (!include_summand::value) + if (!include_summand::value) { return 0; + } T_partials_return logp(0); const auto &x_val = value_of_rec(x); @@ -122,10 +125,11 @@ return_type_t bernoulli_logit_glm_lpmf( = (beta_val_vec * theta_derivative.transpose()).transpose(); } if (!is_constant_all::value) { - if (is_vector::value) + if (is_vector::value) { ops_partials.edge2_.partials_ = theta_derivative; - else + } else { ops_partials.edge2_.partials_[0] = sum(theta_derivative); + } } } return ops_partials.build(logp); diff --git a/stan/math/prim/mat/prob/bernoulli_logit_glm_rng.hpp b/stan/math/prim/mat/prob/bernoulli_logit_glm_rng.hpp index ce981ea9a44..f04255d7145 100644 --- a/stan/math/prim/mat/prob/bernoulli_logit_glm_rng.hpp +++ b/stan/math/prim/mat/prob/bernoulli_logit_glm_rng.hpp @@ -57,8 +57,9 @@ inline typename VectorBuilder::type bernoulli_logit_glm_rng( scalar_seq_view beta_vec(beta); Eigen::VectorXd beta_vector(M); - for (int i = 0; i < M; ++i) + for (int i = 0; i < M; ++i) { beta_vector[i] = beta_vec[i]; + } Eigen::VectorXd x_beta = x * beta_vector; diff --git a/stan/math/prim/mat/prob/categorical_logit_glm_lpmf.hpp b/stan/math/prim/mat/prob/categorical_logit_glm_lpmf.hpp index 6fce70d2b44..9f3421d9a6c 100644 --- a/stan/math/prim/mat/prob/categorical_logit_glm_lpmf.hpp +++ b/stan/math/prim/mat/prob/categorical_logit_glm_lpmf.hpp @@ -64,12 +64,14 @@ categorical_logit_glm_lpmf( check_bounded(function, "categorical outcome out of support", y, 1, N_classes); - if (size_zero(y, x, beta)) + if (size_zero(y, x, beta)) { return 0; + } if (!include_summand::value) + T_beta_scalar>::value) { return 0; + } const auto& x_val = value_of_rec(x); const auto& beta_val = value_of_rec(beta); @@ -80,10 +82,9 @@ categorical_logit_glm_lpmf( Array lin = (x_val * beta_val).rowwise() + alpha_val_vec; Array lin_max - = lin.rowwise() - .maxCoeff(); // This is used to prevent overflow when - // calculating softmax/log_sum_exp and similar - // expressions + = lin.rowwise().maxCoeff(); // This is used to prevent overflow when + // calculating softmax/log_sum_exp and + // similar expressions Array exp_lin = exp(lin.colwise() - lin_max); Array inv_sum_exp_lin diff --git a/stan/math/prim/mat/prob/categorical_logit_lpmf.hpp b/stan/math/prim/mat/prob/categorical_logit_lpmf.hpp index 6c44b5ce9c4..eb7c907a1db 100644 --- a/stan/math/prim/mat/prob/categorical_logit_lpmf.hpp +++ b/stan/math/prim/mat/prob/categorical_logit_lpmf.hpp @@ -24,8 +24,9 @@ return_type_t categorical_logit_lpmf( beta.size()); check_finite(function, "log odds parameter", beta); - if (!include_summand::value) + if (!include_summand::value) { return 0.0; + } // FIXME: wasteful vs. creating term (n-1) if not vectorized return beta(n - 1) - log_sum_exp(beta); // == log_softmax(beta)(n-1); @@ -43,23 +44,27 @@ return_type_t categorical_logit_lpmf( const Eigen::Matrix& beta) { static const char* function = "categorical_logit_lpmf"; - for (const auto& x : ns) + for (const auto& x : ns) { check_bounded(function, "categorical outcome out of support", x, 1, beta.size()); + } check_finite(function, "log odds parameter", beta); - if (!include_summand::value) + if (!include_summand::value) { return 0.0; + } - if (ns.empty()) + if (ns.empty()) { return 0.0; + } Eigen::Matrix log_softmax_beta = log_softmax(beta); // FIXME: replace with more efficient sum() Eigen::Matrix, Eigen::Dynamic, 1> results(ns.size()); - for (size_t i = 0; i < ns.size(); ++i) + for (size_t i = 0; i < ns.size(); ++i) { results[i] = log_softmax_beta(ns[i] - 1); + } return sum(results); } diff --git a/stan/math/prim/mat/prob/categorical_logit_rng.hpp b/stan/math/prim/mat/prob/categorical_logit_rng.hpp index 3d4f11f96e7..bdad79c396b 100644 --- a/stan/math/prim/mat/prob/categorical_logit_rng.hpp +++ b/stan/math/prim/mat/prob/categorical_logit_rng.hpp @@ -40,8 +40,9 @@ inline int categorical_logit_rng(const Eigen::VectorXd& beta, RNG& rng) { double c = uniform01_rng(); int b = 0; - while (c > index(b)) + while (c > index(b)) { b++; + } return b + 1; } } // namespace math diff --git a/stan/math/prim/mat/prob/categorical_lpmf.hpp b/stan/math/prim/mat/prob/categorical_lpmf.hpp index 01c157017b3..71a52b7e392 100644 --- a/stan/math/prim/mat/prob/categorical_lpmf.hpp +++ b/stan/math/prim/mat/prob/categorical_lpmf.hpp @@ -25,8 +25,9 @@ return_type_t categorical_lpmf( check_bounded(function, "Number of categories", n, lb, theta.size()); check_simplex(function, "Probabilities parameter", theta); - if (include_summand::value) + if (include_summand::value) { return log(theta(n - 1)); + } return 0.0; } @@ -48,26 +49,31 @@ return_type_t categorical_lpmf( int lb = 1; - for (size_t i = 0; i < ns.size(); ++i) + for (size_t i = 0; i < ns.size(); ++i) { check_bounded(function, "element of outcome array", ns[i], lb, theta.size()); + } check_simplex(function, "Probabilities parameter", theta); - if (!include_summand::value) + if (!include_summand::value) { return 0.0; + } - if (ns.size() == 0) + if (ns.size() == 0) { return 0.0; + } Eigen::Matrix log_theta(theta.size()); - for (int i = 0; i < theta.size(); ++i) + for (int i = 0; i < theta.size(); ++i) { log_theta(i) = log(theta(i)); + } Eigen::Matrix, Eigen::Dynamic, 1> log_theta_ns( ns.size()); - for (size_t i = 0; i < ns.size(); ++i) + for (size_t i = 0; i < ns.size(); ++i) { log_theta_ns(i) = log_theta(ns[i] - 1); + } return sum(log_theta_ns); } diff --git a/stan/math/prim/mat/prob/categorical_rng.hpp b/stan/math/prim/mat/prob/categorical_rng.hpp index 425d885bf3f..c1cc9236d66 100644 --- a/stan/math/prim/mat/prob/categorical_rng.hpp +++ b/stan/math/prim/mat/prob/categorical_rng.hpp @@ -29,8 +29,9 @@ inline int categorical_rng( double c = uniform01_rng(); int b = 0; - while (c > index(b, 0)) + while (c > index(b, 0)) { b++; + } return b + 1; } } // namespace math diff --git a/stan/math/prim/mat/prob/dirichlet_lpmf.hpp b/stan/math/prim/mat/prob/dirichlet_lpmf.hpp index 1eb627bcaaf..d3cced02416 100644 --- a/stan/math/prim/mat/prob/dirichlet_lpmf.hpp +++ b/stan/math/prim/mat/prob/dirichlet_lpmf.hpp @@ -50,8 +50,8 @@ return_type_t dirichlet_lpmf(const T_prob& theta, const T_prior_size& alpha) { static const char* function = "dirichlet_lpmf"; - typedef partials_return_type_t T_partials_return; - typedef typename Eigen::Matrix T_partials_vec; + using T_partials_return = partials_return_type_t; + using T_partials_vec = typename Eigen::Matrix; check_consistent_sizes(function, "probabilities", theta, "prior sample sizes", alpha); @@ -66,11 +66,13 @@ return_type_t dirichlet_lpmf(const T_prob& theta, T_partials_return lp(0.0); - if (include_summand::value) + if (include_summand::value) { lp += lgamma(alpha_dbl.sum()) - lgamma(alpha_dbl).sum(); + } - if (include_summand::value) + if (include_summand::value) { lp += (theta_dbl.array().log() * (alpha_dbl.array() - 1.0)).sum(); + } T_partials_vec theta_deriv = (alpha_dbl.array() - 1.0) / theta_dbl.array(); @@ -79,11 +81,13 @@ return_type_t dirichlet_lpmf(const T_prob& theta, + theta_dbl.array().log(); operands_and_partials ops_partials(theta, alpha); - if (!is_constant_all::value) + if (!is_constant_all::value) { ops_partials.edge1_.partials_ = theta_deriv; + } - if (!is_constant_all::value) + if (!is_constant_all::value) { ops_partials.edge2_.partials_ = alpha_deriv; + } return ops_partials.build(lp); } diff --git a/stan/math/prim/mat/prob/dirichlet_rng.hpp b/stan/math/prim/mat/prob/dirichlet_rng.hpp index c228bda6f07..86c12892824 100644 --- a/stan/math/prim/mat/prob/dirichlet_rng.hpp +++ b/stan/math/prim/mat/prob/dirichlet_rng.hpp @@ -36,10 +36,10 @@ namespace math { template inline Eigen::VectorXd dirichlet_rng( const Eigen::Matrix& alpha, RNG& rng) { - using Eigen::VectorXd; using boost::gamma_distribution; - using boost::random::uniform_real_distribution; using boost::variate_generator; + using boost::random::uniform_real_distribution; + using Eigen::VectorXd; using std::exp; using std::log; @@ -56,8 +56,9 @@ inline Eigen::VectorXd dirichlet_rng( } double log_sum_y = log_sum_exp(log_y); VectorXd theta(alpha.size()); - for (int i = 0; i < alpha.size(); ++i) + for (int i = 0; i < alpha.size(); ++i) { theta(i) = exp(log_y(i) - log_sum_y); + } return theta; } diff --git a/stan/math/prim/mat/prob/gaussian_dlm_obs_lpdf.hpp b/stan/math/prim/mat/prob/gaussian_dlm_obs_lpdf.hpp index 724ed9c3792..0295e30d29a 100644 --- a/stan/math/prim/mat/prob/gaussian_dlm_obs_lpdf.hpp +++ b/stan/math/prim/mat/prob/gaussian_dlm_obs_lpdf.hpp @@ -102,8 +102,9 @@ gaussian_dlm_obs_lpdf( check_pos_definite(function, "C0", C0); check_finite(function, "C0", C0); - if (size_zero(y)) + if (size_zero(y)) { return 0; + } T_lp lp(0); if (include_summand::value) { @@ -257,8 +258,9 @@ gaussian_dlm_obs_lpdf( check_finite(function, "C0", C0); check_not_nan(function, "C0", C0); - if (y.cols() == 0 || y.rows() == 0) + if (y.cols() == 0 || y.rows() == 0) { return 0; + } T_lp lp(0); if (include_summand::value) { diff --git a/stan/math/prim/mat/prob/gaussian_dlm_obs_rng.hpp b/stan/math/prim/mat/prob/gaussian_dlm_obs_rng.hpp index a7723032633..ef507f55348 100644 --- a/stan/math/prim/mat/prob/gaussian_dlm_obs_rng.hpp +++ b/stan/math/prim/mat/prob/gaussian_dlm_obs_rng.hpp @@ -44,8 +44,9 @@ inline Eigen::VectorXd multi_normal_semidefinite_rng( Eigen::VectorXd stddev = S_ldlt.vectorD().array().sqrt().matrix(); size_t M = S_ldlt.vectorD().size(); Eigen::VectorXd z(M); - for (int i = 0; i < M; i++) + for (int i = 0; i < M; i++) { z(i) = stddev(i) * std_normal_rng(); + } Eigen::VectorXd Y = mu + (S_ldlt.transpositionsP().transpose() * (S_ldlt.matrixL() * z)); @@ -81,8 +82,9 @@ inline Eigen::VectorXd multi_normal_definite_rng( size_t M = S_llt.matrixL().rows(); Eigen::VectorXd z(M); - for (int i = 0; i < M; i++) + for (int i = 0; i < M; i++) { z(i) = std_normal_rng(); + } Eigen::VectorXd Y = mu + S_llt.matrixL() * z; diff --git a/stan/math/prim/mat/prob/inv_wishart_lpdf.hpp b/stan/math/prim/mat/prob/inv_wishart_lpdf.hpp index ef95fc72d7e..b2164dc6595 100644 --- a/stan/math/prim/mat/prob/inv_wishart_lpdf.hpp +++ b/stan/math/prim/mat/prob/inv_wishart_lpdf.hpp @@ -50,9 +50,9 @@ return_type_t inv_wishart_lpdf( const Eigen::Matrix& S) { static const char* function = "inv_wishart_lpdf"; + using boost::math::tools::promote_args; using Eigen::Dynamic; using Eigen::Matrix; - using boost::math::tools::promote_args; typename index_type >::type k = S.rows(); return_type_t lp(0.0); @@ -68,8 +68,9 @@ return_type_t inv_wishart_lpdf( LDLT_factor ldlt_S(S); check_ldlt_factor(function, "LDLT_Factor of scale parameter", ldlt_S); - if (include_summand::value) + if (include_summand::value) { lp -= lmgamma(k, 0.5 * nu); + } if (include_summand::value) { lp += 0.5 * nu * log_determinant_ldlt(ldlt_S); } @@ -93,8 +94,9 @@ return_type_t inv_wishart_lpdf( S.template selfadjointView()))); lp -= 0.5 * trace(Winv_S); } - if (include_summand::value) + if (include_summand::value) { lp += nu * k * NEG_LOG_TWO_OVER_TWO; + } return lp; } diff --git a/stan/math/prim/mat/prob/lkj_corr_cholesky_lpdf.hpp b/stan/math/prim/mat/prob/lkj_corr_cholesky_lpdf.hpp index 129c3ec315b..ff15b7ba0e0 100644 --- a/stan/math/prim/mat/prob/lkj_corr_cholesky_lpdf.hpp +++ b/stan/math/prim/mat/prob/lkj_corr_cholesky_lpdf.hpp @@ -20,24 +20,27 @@ return_type_t lkj_corr_cholesky_lpdf( using boost::math::tools::promote_args; - typedef return_type_t lp_ret; + using lp_ret = return_type_t; lp_ret lp(0.0); check_positive(function, "Shape parameter", eta); check_lower_triangular(function, "Random variable", L); const unsigned int K = L.rows(); - if (K == 0) + if (K == 0) { return 0.0; + } - if (include_summand::value) + if (include_summand::value) { lp += do_lkj_constant(eta, K); + } if (include_summand::value) { const int Km1 = K - 1; Eigen::Matrix log_diagonals = L.diagonal().tail(Km1).array().log(); Eigen::Matrix values(Km1); - for (int k = 0; k < Km1; k++) + for (int k = 0; k < Km1; k++) { values(k) = (Km1 - k - 1) * log_diagonals(k); + } if ((eta == 1.0) && stan::is_constant_all >::value) { lp += sum(values); diff --git a/stan/math/prim/mat/prob/lkj_corr_lpdf.hpp b/stan/math/prim/mat/prob/lkj_corr_lpdf.hpp index 236a23ffe2b..63e1055f103 100644 --- a/stan/math/prim/mat/prob/lkj_corr_lpdf.hpp +++ b/stan/math/prim/mat/prob/lkj_corr_lpdf.hpp @@ -20,20 +20,23 @@ return_type_t do_lkj_constant(const T_shape& eta, if (eta == 1.0) { // C++ integer division is appropriate in this block Eigen::VectorXd denominator(Km1 / 2); - for (int k = 1; k <= denominator.rows(); k++) + for (int k = 1; k <= denominator.rows(); k++) { denominator(k - 1) = lgamma(2.0 * k); + } constant = -denominator.sum(); - if ((K % 2) == 1) + if ((K % 2) == 1) { constant -= 0.25 * (K * K - 1) * LOG_PI - 0.25 * (Km1 * Km1) * LOG_2 - Km1 * lgamma(0.5 * (K + 1)); - else + } else { constant -= 0.25 * K * (K - 2) * LOG_PI + 0.25 * (3 * K * K - 4 * K) * LOG_2 + K * lgamma(0.5 * K) - Km1 * lgamma(static_cast(K)); + } } else { constant = Km1 * lgamma(eta + 0.5 * Km1); - for (int k = 1; k <= Km1; k++) + for (int k = 1; k <= Km1; k++) { constant -= 0.5 * k * LOG_PI + lgamma(eta + 0.5 * (Km1 - k)); + } } return constant; } @@ -53,18 +56,22 @@ return_type_t lkj_corr_lpdf( check_corr_matrix(function, "Correlation matrix", y); const unsigned int K = y.rows(); - if (K == 0) + if (K == 0) { return 0.0; + } - if (include_summand::value) + if (include_summand::value) { lp += do_lkj_constant(eta, K); + } if ((eta == 1.0) - && stan::is_constant_all >::value) + && stan::is_constant_all >::value) { return lp; + } - if (!include_summand::value) + if (!include_summand::value) { return lp; + } Eigen::Matrix values = y.ldlt().vectorD().array().log().matrix(); diff --git a/stan/math/prim/mat/prob/lkj_cov_lpdf.hpp b/stan/math/prim/mat/prob/lkj_cov_lpdf.hpp index 579a886f931..f6f33c58a4a 100644 --- a/stan/math/prim/mat/prob/lkj_cov_lpdf.hpp +++ b/stan/math/prim/mat/prob/lkj_cov_lpdf.hpp @@ -34,9 +34,11 @@ return_type_t lkj_cov_lpdf( check_positive(function, "Shape parameter", eta); check_finite(function, "Location parameter", mu); check_finite(function, "Scale parameter", sigma); - for (int m = 0; m < y.rows(); ++m) - for (int n = 0; n < y.cols(); ++n) + for (int m = 0; m < y.rows(); ++m) { + for (int n = 0; n < y.cols(); ++n) { check_finite(function, "Covariance matrix", y(m, n)); + } + } const unsigned int K = y.rows(); const Eigen::Array sds = y.diagonal().array().sqrt(); diff --git a/stan/math/prim/mat/prob/matrix_normal_prec_lpdf.hpp b/stan/math/prim/mat/prob/matrix_normal_prec_lpdf.hpp index 55c4c16cdb8..58fb2c9760e 100644 --- a/stan/math/prim/mat/prob/matrix_normal_prec_lpdf.hpp +++ b/stan/math/prim/mat/prob/matrix_normal_prec_lpdf.hpp @@ -65,8 +65,9 @@ return_type_t matrix_normal_prec_lpdf( check_finite(function, "Location parameter", Mu); check_finite(function, "Random variable", y); - if (include_summand::value) + if (include_summand::value) { lp += NEG_LOG_SQRT_TWO_PI * y.cols() * y.rows(); + } if (include_summand::value) { lp += log_determinant_ldlt(ldlt_Sigma) * (0.5 * y.rows()); diff --git a/stan/math/prim/mat/prob/multi_gp_cholesky_lpdf.hpp b/stan/math/prim/mat/prob/multi_gp_cholesky_lpdf.hpp index 78b95bcca7c..c201364b9ff 100644 --- a/stan/math/prim/mat/prob/multi_gp_cholesky_lpdf.hpp +++ b/stan/math/prim/mat/prob/multi_gp_cholesky_lpdf.hpp @@ -40,7 +40,7 @@ return_type_t multi_gp_cholesky_lpdf( const Eigen::Matrix& L, const Eigen::Matrix& w) { static const char* function = "multi_gp_cholesky_lpdf"; - typedef return_type_t T_lp; + using T_lp = return_type_t; check_size_match(function, "Size of random variable (rows y)", y.rows(), "Size of kernel scales (w)", w.size()); @@ -50,8 +50,9 @@ return_type_t multi_gp_cholesky_lpdf( check_positive(function, "Kernel scales", w); check_finite(function, "Random variable", y); - if (y.rows() == 0) + if (y.rows() == 0) { return 0; + } T_lp lp(0); if (include_summand::value) { diff --git a/stan/math/prim/mat/prob/multi_gp_lpdf.hpp b/stan/math/prim/mat/prob/multi_gp_lpdf.hpp index cf66444e7b6..0bbdfacb0c4 100644 --- a/stan/math/prim/mat/prob/multi_gp_lpdf.hpp +++ b/stan/math/prim/mat/prob/multi_gp_lpdf.hpp @@ -40,7 +40,7 @@ return_type_t multi_gp_lpdf( const Eigen::Matrix& Sigma, const Eigen::Matrix& w) { static const char* function = "multi_gp_lpdf"; - typedef return_type_t T_lp; + using T_lp = return_type_t; T_lp lp(0.0); check_positive(function, "Kernel rows", Sigma.rows()); @@ -57,8 +57,9 @@ return_type_t multi_gp_lpdf( check_positive_finite(function, "Kernel scales", w); check_finite(function, "Random variable", y); - if (y.rows() == 0) + if (y.rows() == 0) { return lp; + } if (include_summand::value) { lp += NEG_LOG_SQRT_TWO_PI * y.rows() * y.cols(); diff --git a/stan/math/prim/mat/prob/multi_normal_cholesky_lpdf.hpp b/stan/math/prim/mat/prob/multi_normal_cholesky_lpdf.hpp index adaf52f0042..29dcb510bbc 100644 --- a/stan/math/prim/mat/prob/multi_normal_cholesky_lpdf.hpp +++ b/stan/math/prim/mat/prob/multi_normal_cholesky_lpdf.hpp @@ -41,20 +41,21 @@ template return_type_t multi_normal_cholesky_lpdf( const T_y& y, const T_loc& mu, const T_covar& L) { static const char* function = "multi_normal_cholesky_lpdf"; - typedef typename scalar_type::type T_covar_elem; - typedef return_type_t T_return; - typedef partials_return_type_t T_partials_return; + using T_covar_elem = typename scalar_type::type; + using T_return = return_type_t; + using T_partials_return = partials_return_type_t; typedef Eigen::Matrix matrix_partials_t; - typedef Eigen::Matrix vector_partials_t; + using vector_partials_t = Eigen::Matrix; typedef Eigen::Matrix row_vector_partials_t; check_consistent_sizes_mvt(function, "y", y, "mu", mu); size_t number_of_y = length_mvt(y); size_t number_of_mu = length_mvt(mu); - if (number_of_y == 0 || number_of_mu == 0) + if (number_of_y == 0 || number_of_mu == 0) { return 0; + } vector_seq_view y_vec(y); vector_seq_view mu_vec(mu); const size_t size_vec = max_size_mvt(y, mu); @@ -102,14 +103,16 @@ return_type_t multi_normal_cholesky_lpdf( check_not_nan(function, "Random variable", y_vec[i]); } - if (unlikely(size_y == 0)) + if (unlikely(size_y == 0)) { return T_return(0); + } T_partials_return logp(0); operands_and_partials ops_partials(y, mu, L); - if (include_summand::value) + if (include_summand::value) { logp += NEG_LOG_SQRT_TWO_PI * size_y * size_vec; + } const matrix_partials_t inv_L_dbl = mdivide_left_tri(value_of(L)); @@ -117,8 +120,9 @@ return_type_t multi_normal_cholesky_lpdf( if (include_summand::value) { for (size_t i = 0; i < size_vec; i++) { vector_partials_t y_minus_mu_dbl(size_y); - for (int j = 0; j < size_y; j++) + for (int j = 0; j < size_y; j++) { y_minus_mu_dbl(j) = value_of(y_vec[i](j)) - value_of(mu_vec[i](j)); + } const row_vector_partials_t half = (inv_L_dbl.template triangularView() * y_minus_mu_dbl) @@ -130,12 +134,14 @@ return_type_t multi_normal_cholesky_lpdf( logp -= 0.5 * dot_self(half); if (!is_constant_all::value) { - for (int j = 0; j < size_y; j++) + for (int j = 0; j < size_y; j++) { ops_partials.edge1_.partials_vec_[i](j) -= scaled_diff(j); + } } if (!is_constant_all::value) { - for (int j = 0; j < size_y; j++) + for (int j = 0; j < size_y; j++) { ops_partials.edge2_.partials_vec_[i](j) += scaled_diff(j); + } } if (!is_constant_all::value) { ops_partials.edge3_.partials_ += scaled_diff * half; diff --git a/stan/math/prim/mat/prob/multi_normal_cholesky_rng.hpp b/stan/math/prim/mat/prob/multi_normal_cholesky_rng.hpp index 0cbf56f7702..25253cfff07 100644 --- a/stan/math/prim/mat/prob/multi_normal_cholesky_rng.hpp +++ b/stan/math/prim/mat/prob/multi_normal_cholesky_rng.hpp @@ -63,8 +63,9 @@ multi_normal_cholesky_rng( for (size_t n = 0; n < N; ++n) { Eigen::VectorXd z(L.cols()); - for (int i = 0; i < L.cols(); i++) + for (int i = 0; i < L.cols(); i++) { z(i) = std_normal_rng(); + } output[n] = Eigen::VectorXd(mu_vec[n]) + L * z; } diff --git a/stan/math/prim/mat/prob/multi_normal_lpdf.hpp b/stan/math/prim/mat/prob/multi_normal_lpdf.hpp index c04a5ed417c..fc78bf4c3d1 100644 --- a/stan/math/prim/mat/prob/multi_normal_lpdf.hpp +++ b/stan/math/prim/mat/prob/multi_normal_lpdf.hpp @@ -21,8 +21,8 @@ return_type_t multi_normal_lpdf(const T_y& y, const T_loc& mu, const T_covar& Sigma) { static const char* function = "multi_normal_lpdf"; - typedef typename scalar_type::type T_covar_elem; - typedef return_type_t lp_type; + using T_covar_elem = typename scalar_type::type; + using lp_type = return_type_t; using Eigen::Dynamic; @@ -35,8 +35,9 @@ return_type_t multi_normal_lpdf(const T_y& y, size_t number_of_y = length_mvt(y); size_t number_of_mu = length_mvt(mu); - if (number_of_y == 0 || number_of_mu == 0) + if (number_of_y == 0 || number_of_mu == 0) { return 0.0; + } check_consistent_sizes_mvt(function, "y", y, "mu", mu); lp_type lp(0.0); @@ -91,21 +92,25 @@ return_type_t multi_normal_lpdf(const T_y& y, check_not_nan(function, "Random variable", y_vec[i]); } - if (size_y == 0) + if (size_y == 0) { return lp; + } - if (include_summand::value) + if (include_summand::value) { lp += NEG_LOG_SQRT_TWO_PI * size_y * size_vec; + } - if (include_summand::value) + if (include_summand::value) { lp -= 0.5 * log_determinant_ldlt(ldlt_Sigma) * size_vec; + } if (include_summand::value) { lp_type sum_lp_vec(0.0); for (size_t i = 0; i < size_vec; i++) { Eigen::Matrix, Dynamic, 1> y_minus_mu(size_y); - for (int j = 0; j < size_y; j++) + for (int j = 0; j < size_y; j++) { y_minus_mu(j) = y_vec[i](j) - mu_vec[i](j); + } sum_lp_vec += trace_inv_quad_form_ldlt(ldlt_Sigma, y_minus_mu); } lp -= 0.5 * sum_lp_vec; diff --git a/stan/math/prim/mat/prob/multi_normal_prec_lpdf.hpp b/stan/math/prim/mat/prob/multi_normal_prec_lpdf.hpp index 7b83c88134a..e58df44a094 100644 --- a/stan/math/prim/mat/prob/multi_normal_prec_lpdf.hpp +++ b/stan/math/prim/mat/prob/multi_normal_prec_lpdf.hpp @@ -21,8 +21,8 @@ template return_type_t multi_normal_prec_lpdf( const T_y& y, const T_loc& mu, const T_covar& Sigma) { static const char* function = "multi_normal_prec_lpdf"; - typedef typename scalar_type::type T_covar_elem; - typedef return_type_t lp_type; + using T_covar_elem = typename scalar_type::type; + using lp_type = return_type_t; check_positive(function, "Precision matrix rows", Sigma.rows()); check_symmetric(function, "Precision matrix", Sigma); @@ -35,8 +35,9 @@ return_type_t multi_normal_prec_lpdf( size_t number_of_y = length_mvt(y); size_t number_of_mu = length_mvt(mu); - if (number_of_y == 0 || number_of_mu == 0) + if (number_of_y == 0 || number_of_mu == 0) { return 0; + } check_consistent_sizes_mvt(function, "y", y, "mu", mu); lp_type lp(0); @@ -91,22 +92,26 @@ return_type_t multi_normal_prec_lpdf( check_not_nan(function, "Random variable", y_vec[i]); } - if (size_y == 0) + if (size_y == 0) { return lp; + } - if (include_summand::value) + if (include_summand::value) { lp += 0.5 * log_determinant_ldlt(ldlt_Sigma) * size_vec; + } - if (include_summand::value) + if (include_summand::value) { lp += NEG_LOG_SQRT_TWO_PI * size_y * size_vec; + } if (include_summand::value) { lp_type sum_lp_vec(0.0); for (size_t i = 0; i < size_vec; i++) { Eigen::Matrix, Eigen::Dynamic, 1> y_minus_mu( size_y); - for (int j = 0; j < size_y; j++) + for (int j = 0; j < size_y; j++) { y_minus_mu(j) = y_vec[i](j) - mu_vec[i](j); + } sum_lp_vec += trace_quad_form(Sigma, y_minus_mu); } lp -= 0.5 * sum_lp_vec; diff --git a/stan/math/prim/mat/prob/multi_normal_prec_rng.hpp b/stan/math/prim/mat/prob/multi_normal_prec_rng.hpp index 8d04a4843e1..235eb3a6c15 100644 --- a/stan/math/prim/mat/prob/multi_normal_prec_rng.hpp +++ b/stan/math/prim/mat/prob/multi_normal_prec_rng.hpp @@ -76,8 +76,9 @@ multi_normal_prec_rng(const T_loc &mu, const Eigen::MatrixXd &S, RNG &rng) { for (size_t n = 0; n < N; ++n) { Eigen::VectorXd z(S.cols()); - for (int i = 0; i < S.cols(); i++) + for (int i = 0; i < S.cols(); i++) { z(i) = std_normal_rng(); + } output[n] = Eigen::VectorXd(mu_vec[n]) + llt_of_S.matrixU().solve(z); } diff --git a/stan/math/prim/mat/prob/multi_normal_rng.hpp b/stan/math/prim/mat/prob/multi_normal_rng.hpp index a07dccc34b7..f3ea118a924 100644 --- a/stan/math/prim/mat/prob/multi_normal_rng.hpp +++ b/stan/math/prim/mat/prob/multi_normal_rng.hpp @@ -72,8 +72,9 @@ multi_normal_rng(const T_loc& mu, for (size_t n = 0; n < N; ++n) { Eigen::VectorXd z(S.cols()); - for (int i = 0; i < S.cols(); i++) + for (int i = 0; i < S.cols(); i++) { z(i) = std_normal_rng(); + } output[n] = Eigen::VectorXd(mu_vec[n]) + llt_of_S.matrixL() * z; } diff --git a/stan/math/prim/mat/prob/multi_student_t_lpdf.hpp b/stan/math/prim/mat/prob/multi_student_t_lpdf.hpp index cb378315e9d..d504bb41563 100644 --- a/stan/math/prim/mat/prob/multi_student_t_lpdf.hpp +++ b/stan/math/prim/mat/prob/multi_student_t_lpdf.hpp @@ -32,22 +32,24 @@ return_type_t multi_student_t_lpdf( const T_y& y, const T_dof& nu, const T_loc& mu, const T_scale& Sigma) { static const char* function = "multi_student_t"; using std::log; - typedef typename scalar_type::type T_scale_elem; - typedef return_type_t lp_type; + using T_scale_elem = typename scalar_type::type; + using lp_type = return_type_t; check_not_nan(function, "Degrees of freedom parameter", nu); check_positive(function, "Degrees of freedom parameter", nu); - if (is_inf(nu)) + if (is_inf(nu)) { return multi_normal_log(y, mu, Sigma); + } using Eigen::Matrix; using std::vector; size_t number_of_y = length_mvt(y); size_t number_of_mu = length_mvt(mu); - if (number_of_y == 0 || number_of_mu == 0) + if (number_of_y == 0 || number_of_mu == 0) { return 0; + } check_consistent_sizes_mvt(function, "y", y, "mu", mu); vector_seq_view y_vec(y); @@ -102,8 +104,9 @@ return_type_t multi_student_t_lpdf( LDLT_factor ldlt_Sigma(Sigma); check_ldlt_factor(function, "LDLT_Factor of scale parameter", ldlt_Sigma); - if (size_y == 0) + if (size_y == 0) { return 0; + } lp_type lp(0); @@ -113,8 +116,9 @@ return_type_t multi_student_t_lpdf( lp -= (0.5 * size_y) * log(nu) * size_vec; } - if (include_summand::value) + if (include_summand::value) { lp -= (0.5 * size_y) * LOG_PI * size_vec; + } using Eigen::Array; @@ -127,8 +131,9 @@ return_type_t multi_student_t_lpdf( for (size_t i = 0; i < size_vec; i++) { Eigen::Matrix, Eigen::Dynamic, 1> y_minus_mu( size_y); - for (int j = 0; j < size_y; j++) + for (int j = 0; j < size_y; j++) { y_minus_mu(j) = y_vec[i](j) - mu_vec[i](j); + } sum_lp_vec += log1p(trace_inv_quad_form_ldlt(ldlt_Sigma, y_minus_mu) / nu); } diff --git a/stan/math/prim/mat/prob/multi_student_t_rng.hpp b/stan/math/prim/mat/prob/multi_student_t_rng.hpp index b2c00d60a0b..a01f4bd7640 100644 --- a/stan/math/prim/mat/prob/multi_student_t_rng.hpp +++ b/stan/math/prim/mat/prob/multi_student_t_rng.hpp @@ -38,8 +38,8 @@ multi_student_t_rng( double nu, const T_loc& mu, const Eigen::Matrix& S, RNG& rng) { using boost::normal_distribution; - using boost::random::gamma_distribution; using boost::variate_generator; + using boost::random::gamma_distribution; static const char* function = "multi_student_t_rng"; @@ -81,8 +81,9 @@ multi_student_t_rng( double w = 1.0 / gamma_rng(); for (size_t n = 0; n < N; ++n) { Eigen::VectorXd z(S.cols()); - for (int i = 0; i < S.cols(); i++) + for (int i = 0; i < S.cols(); i++) { z(i) = std::sqrt(w) * std_normal_rng(); + } output[n] = Eigen::VectorXd(mu_vec[n]) + llt_of_S.matrixL() * z; } diff --git a/stan/math/prim/mat/prob/multinomial_lpmf.hpp b/stan/math/prim/mat/prob/multinomial_lpmf.hpp index ed0a353cbb0..5345c68d573 100644 --- a/stan/math/prim/mat/prob/multinomial_lpmf.hpp +++ b/stan/math/prim/mat/prob/multinomial_lpmf.hpp @@ -29,15 +29,18 @@ return_type_t multinomial_lpmf( if (include_summand::value) { double sum = 1.0; - for (int n : ns) + for (int n : ns) { sum += n; + } lp += lgamma(sum); - for (int n : ns) + for (int n : ns) { lp -= lgamma(n + 1.0); + } } if (include_summand::value) { - for (unsigned int i = 0; i < ns.size(); ++i) + for (unsigned int i = 0; i < ns.size(); ++i) { lp += multiply_log(ns[i], theta[i]); + } } return lp; } diff --git a/stan/math/prim/mat/prob/multinomial_rng.hpp b/stan/math/prim/mat/prob/multinomial_rng.hpp index ea59f0184da..ba293e575ed 100644 --- a/stan/math/prim/mat/prob/multinomial_rng.hpp +++ b/stan/math/prim/mat/prob/multinomial_rng.hpp @@ -23,8 +23,9 @@ inline std::vector multinomial_rng( int n_left = N; for (int k = 0; n_left > 0 && k < theta.size(); ++k) { double p = theta[k] / mass_left; - if (p > 1.0) + if (p > 1.0) { p = 1.0; + } result[k] = binomial_rng(n_left, p, rng); n_left -= result[k]; mass_left -= theta[k]; diff --git a/stan/math/prim/mat/prob/neg_binomial_2_log_glm_lpmf.hpp b/stan/math/prim/mat/prob/neg_binomial_2_log_glm_lpmf.hpp index 4850e74f774..4f04cafa7f8 100644 --- a/stan/math/prim/mat/prob/neg_binomial_2_log_glm_lpmf.hpp +++ b/stan/math/prim/mat/prob/neg_binomial_2_log_glm_lpmf.hpp @@ -72,9 +72,9 @@ return_type_t neg_binomial_2_log_glm_lpmf( using Eigen::Array; using Eigen::Dynamic; - using Eigen::Matrix; using Eigen::exp; using Eigen::log1p; + using Eigen::Matrix; const size_t N = x.rows(); const size_t M = x.cols(); @@ -85,18 +85,22 @@ return_type_t neg_binomial_2_log_glm_lpmf( check_positive_finite(function, "Precision parameter", phi); check_consistent_size(function, "Vector of dependent variables", y, N); check_consistent_size(function, "Weight vector", beta, M); - if (is_vector::value) + if (is_vector::value) { check_consistent_sizes(function, "Vector of precision parameters", phi, "Vector of dependent variables", y); - if (is_vector::value) + } + if (is_vector::value) { check_consistent_sizes(function, "Vector of intercepts", alpha, "Vector of dependent variables", y); + } - if (size_zero(y, x, beta, phi)) + if (size_zero(y, x, beta, phi)) { return 0; + } - if (!include_summand::value) + if (!include_summand::value) { return 0; + } T_partials_return logp(0); const auto& x_val = value_of_rec(x); @@ -131,18 +135,21 @@ return_type_t neg_binomial_2_log_glm_lpmf( if (include_summand::value) { if (is_vector::value) { scalar_seq_view phi_vec(phi_val); - for (size_t n = 0; n < N; ++n) + for (size_t n = 0; n < N; ++n) { logp += multiply_log(phi_vec[n], phi_vec[n]) - lgamma(phi_vec[n]); + } } else { logp += N * (multiply_log(as_scalar(phi_val), as_scalar(phi_val)) - lgamma(as_scalar(phi_val))); } } - if (include_summand::value) + if (include_summand::value) { logp -= sum(y_plus_phi * logsumexp_theta_logphi); - if (include_summand::value) + } + if (include_summand::value) { logp += sum(y_arr * theta); + } if (include_summand::value) { logp += sum(lgamma(y_plus_phi)); } @@ -163,10 +170,11 @@ return_type_t neg_binomial_2_log_glm_lpmf( = (beta_val_vec * theta_derivative.transpose()).transpose(); } if (!is_constant_all::value) { - if (is_vector::value) + if (is_vector::value) { ops_partials.edge2_.partials_ = theta_derivative; - else + } else { ops_partials.edge2_.partials_[0] = sum(theta_derivative); + } } } if (!is_constant_all::value) { diff --git a/stan/math/prim/mat/prob/normal_id_glm_lpdf.hpp b/stan/math/prim/mat/prob/normal_id_glm_lpdf.hpp index 98673565e9a..b6f38b17f55 100644 --- a/stan/math/prim/mat/prob/normal_id_glm_lpdf.hpp +++ b/stan/math/prim/mat/prob/normal_id_glm_lpdf.hpp @@ -68,17 +68,21 @@ return_type_t normal_id_glm_lpdf( check_positive_finite(function, "Scale vector", sigma); check_consistent_size(function, "Vector of dependent variables", y, N); check_consistent_size(function, "Weight vector", beta, M); - if (is_vector::value) + if (is_vector::value) { check_consistent_sizes(function, "Vector of scale parameters", sigma, "Vector of dependent variables", y); - if (is_vector::value) + } + if (is_vector::value) { check_consistent_sizes(function, "Vector of intercepts", alpha, "Vector of dependent variables", y); - if (size_zero(y, x, beta, sigma)) + } + if (size_zero(y, x, beta, sigma)) { return 0; + } - if (!include_summand::value) + if (!include_summand::value) { return 0; + } const auto &x_val = value_of_rec(x); const auto &beta_val = value_of_rec(beta); @@ -119,10 +123,11 @@ return_type_t normal_id_glm_lpdf( ops_partials.edge4_.partials_ = mu_derivative.transpose() * x_val; } if (!is_constant_all::value) { - if (is_vector::value) + if (is_vector::value) { ops_partials.edge3_.partials_ = mu_derivative; - else + } else { ops_partials.edge3_.partials_[0] = sum(mu_derivative); + } } if (!is_constant_all::value) { if (is_vector::value) { @@ -155,16 +160,19 @@ return_type_t normal_id_glm_lpdf( // Compute log probability. T_partials_return logp(0.0); - if (include_summand::value) + if (include_summand::value) { logp += NEG_LOG_SQRT_TWO_PI * N; + } if (include_summand::value) { - if (is_vector::value) + if (is_vector::value) { logp -= sum(log(sigma_val_vec)); - else + } else { logp -= N * log(as_scalar(sigma_val)); + } } - if (include_summand::value) + if (include_summand::value) { logp -= 0.5 * y_minus_mu_over_sigma_squared_sum; + } return ops_partials.build(logp); } diff --git a/stan/math/prim/mat/prob/ordered_logistic_lpmf.hpp b/stan/math/prim/mat/prob/ordered_logistic_lpmf.hpp index e47ba39bbb7..62abc917614 100644 --- a/stan/math/prim/mat/prob/ordered_logistic_lpmf.hpp +++ b/stan/math/prim/mat/prob/ordered_logistic_lpmf.hpp @@ -74,8 +74,8 @@ return_type_t ordered_logistic_lpmf(const T_y& y, const T_cut& c) { static const char* function = "ordered_logistic"; - typedef partials_return_type_t T_partials_return; - typedef typename Eigen::Matrix T_partials_vec; + using T_partials_return = partials_return_type_t; + using T_partials_vec = typename Eigen::Matrix; scalar_seq_view lam_vec(lambda); scalar_seq_view y_vec(y); @@ -86,9 +86,10 @@ return_type_t ordered_logistic_lpmf(const T_y& y, int C_l = length_mvt(c); check_consistent_sizes(function, "Integers", y, "Locations", lambda); - if (C_l > 1) + if (C_l > 1) { check_size_match(function, "Length of location variables ", N, "Number of cutpoint vectors ", C_l); + } int size_c_old = c_vec[0].size(); for (int i = 1; i < C_l; i++) { @@ -117,29 +118,34 @@ return_type_t ordered_logistic_lpmf(const T_y& y, T_partials_vec c_dbl = value_of(c_vec[0]).template cast(); for (int n = 0; n < N; ++n) { - if (C_l > 1) + if (C_l > 1) { c_dbl = value_of(c_vec[n]).template cast(); + } T_partials_return lam_dbl = value_of(lam_vec[n]); if (y_vec[n] == 1) { logp -= log1p_exp(lam_dbl - c_dbl[0]); T_partials_return d = inv_logit(lam_dbl - c_dbl[0]); - if (!is_constant_all::value) + if (!is_constant_all::value) { ops_partials.edge1_.partials_[n] -= d; + } - if (!is_constant_all::value) + if (!is_constant_all::value) { ops_partials.edge2_.partials_vec_[n](0) += d; + } } else if (y_vec[n] == K) { logp -= log1p_exp(c_dbl[K - 2] - lam_dbl); T_partials_return d = inv_logit(c_dbl[K - 2] - lam_dbl); - if (!is_constant_all::value) + if (!is_constant_all::value) { ops_partials.edge1_.partials_[n] = d; + } - if (!is_constant_all::value) + if (!is_constant_all::value) { ops_partials.edge2_.partials_vec_[n](K - 2) -= d; + } } else { T_partials_return d1 @@ -151,8 +157,9 @@ return_type_t ordered_logistic_lpmf(const T_y& y, logp += log_inv_logit_diff(lam_dbl - c_dbl[y_vec[n] - 2], lam_dbl - c_dbl[y_vec[n] - 1]); - if (!is_constant_all::value) + if (!is_constant_all::value) { ops_partials.edge1_.partials_[n] -= d1 + d2; + } if (!is_constant_all::value) { ops_partials.edge2_.partials_vec_[n](y_vec[n] - 2) += d1; diff --git a/stan/math/prim/mat/prob/ordered_logistic_rng.hpp b/stan/math/prim/mat/prob/ordered_logistic_rng.hpp index da843f92ea2..5889f071c94 100644 --- a/stan/math/prim/mat/prob/ordered_logistic_rng.hpp +++ b/stan/math/prim/mat/prob/ordered_logistic_rng.hpp @@ -28,8 +28,9 @@ inline int ordered_logistic_rng( Eigen::VectorXd cut(c.rows() + 1); cut(0) = 1 - inv_logit(eta - c(0)); - for (int j = 1; j < c.rows(); j++) + for (int j = 1; j < c.rows(); j++) { cut(j) = inv_logit(eta - c(j - 1)) - inv_logit(eta - c(j)); + } cut(c.rows()) = inv_logit(eta - c(c.rows() - 1)); return categorical_rng(cut, rng); diff --git a/stan/math/prim/mat/prob/ordered_probit_rng.hpp b/stan/math/prim/mat/prob/ordered_probit_rng.hpp index 3d2a5ec9811..72d3d7fdce4 100644 --- a/stan/math/prim/mat/prob/ordered_probit_rng.hpp +++ b/stan/math/prim/mat/prob/ordered_probit_rng.hpp @@ -22,8 +22,9 @@ inline int ordered_probit_rng(double eta, const Eigen::VectorXd& c, RNG& rng) { Eigen::VectorXd cut(c.rows() + 1); cut(0) = 1 - Phi(eta - c(0)); - for (int j = 1; j < c.rows(); j++) + for (int j = 1; j < c.rows(); j++) { cut(j) = Phi(eta - c(j - 1)) - Phi(eta - c(j)); + } cut(c.rows()) = Phi(eta - c(c.rows() - 1)); return categorical_rng(cut, rng); diff --git a/stan/math/prim/mat/prob/poisson_log_glm_lpmf.hpp b/stan/math/prim/mat/prob/poisson_log_glm_lpmf.hpp index 6822e44ee6f..7360a140b1f 100644 --- a/stan/math/prim/mat/prob/poisson_log_glm_lpmf.hpp +++ b/stan/math/prim/mat/prob/poisson_log_glm_lpmf.hpp @@ -48,7 +48,7 @@ return_type_t poisson_log_glm_lpmf(const T_y& y, const T_alpha& alpha, const T_beta& beta) { static const char* function = "poisson_log_glm_lpmf"; - typedef partials_return_type_t T_partials_return; + using T_partials_return = partials_return_type_t; typedef typename std::conditional_t< is_vector::value, Eigen::Array, -1, 1>, @@ -65,14 +65,17 @@ return_type_t poisson_log_glm_lpmf(const T_y& y, check_nonnegative(function, "Vector of dependent variables", y); check_consistent_size(function, "Vector of dependent variables", y, N); check_consistent_size(function, "Weight vector", beta, M); - if (is_vector::value) + if (is_vector::value) { check_consistent_sizes(function, "Vector of intercepts", alpha, "Vector of dependent variables", y); - if (size_zero(y, x, beta)) + } + if (size_zero(y, x, beta)) { return 0; + } - if (!include_summand::value) + if (!include_summand::value) { return 0; + } T_partials_return logp(0); @@ -118,10 +121,11 @@ return_type_t poisson_log_glm_lpmf(const T_y& y, = (beta_val_vec * theta_derivative.transpose()).transpose(); } if (!is_constant_all::value) { - if (is_vector::value) + if (is_vector::value) { ops_partials.edge2_.partials_ = theta_derivative; - else + } else { ops_partials.edge2_.partials_[0] = theta_derivative_sum; + } } return ops_partials.build(logp); } diff --git a/stan/math/prim/mat/prob/wishart_lpdf.hpp b/stan/math/prim/mat/prob/wishart_lpdf.hpp index 88f7db0d628..3f9d957a21b 100644 --- a/stan/math/prim/mat/prob/wishart_lpdf.hpp +++ b/stan/math/prim/mat/prob/wishart_lpdf.hpp @@ -52,10 +52,10 @@ return_type_t wishart_lpdf( const Eigen::Matrix& S) { static const char* function = "wishart_lpdf"; + using boost::math::tools::promote_args; using Eigen::Dynamic; using Eigen::Lower; using Eigen::Matrix; - using boost::math::tools::promote_args; typename index_type >::type k = W.rows(); return_type_t lp(0.0); @@ -71,14 +71,17 @@ return_type_t wishart_lpdf( LDLT_factor ldlt_S(S); check_ldlt_factor(function, "LDLT_Factor of scale parameter", ldlt_S); - if (include_summand::value) + if (include_summand::value) { lp += nu * k * NEG_LOG_TWO_OVER_TWO; + } - if (include_summand::value) + if (include_summand::value) { lp -= lmgamma(k, 0.5 * nu); + } - if (include_summand::value) + if (include_summand::value) { lp -= 0.5 * nu * log_determinant_ldlt(ldlt_S); + } if (include_summand::value) { Matrix, Dynamic, Dynamic> Sinv_W( @@ -87,8 +90,9 @@ return_type_t wishart_lpdf( lp -= 0.5 * trace(Sinv_W); } - if (include_summand::value && nu != (k + 1)) + if (include_summand::value && nu != (k + 1)) { lp += 0.5 * (nu - k - 1.0) * log_determinant_ldlt(ldlt_W); + } return lp; } diff --git a/stan/math/prim/mat/prob/wishart_rng.hpp b/stan/math/prim/mat/prob/wishart_rng.hpp index fa800c1052a..b1a57de3dd5 100644 --- a/stan/math/prim/mat/prob/wishart_rng.hpp +++ b/stan/math/prim/mat/prob/wishart_rng.hpp @@ -24,8 +24,9 @@ inline Eigen::MatrixXd wishart_rng(double nu, const Eigen::MatrixXd& S, MatrixXd B = MatrixXd::Zero(k, k); for (int j = 0; j < k; ++j) { - for (int i = 0; i < j; ++i) + for (int i = 0; i < j; ++i) { B(i, j) = normal_rng(0, 1, rng); + } B(j, j) = std::sqrt(chi_square_rng(nu - j, rng)); } return crossprod(B * S.llt().matrixU()); diff --git a/stan/math/prim/mat/vectorize/apply_scalar_unary.hpp b/stan/math/prim/mat/vectorize/apply_scalar_unary.hpp index 09296073d92..e36fef6a8b1 100644 --- a/stan/math/prim/mat/vectorize/apply_scalar_unary.hpp +++ b/stan/math/prim/mat/vectorize/apply_scalar_unary.hpp @@ -36,7 +36,7 @@ struct apply_scalar_unary { /** * Type of underlying scalar for the matrix type T. */ - typedef typename Eigen::internal::traits::Scalar scalar_t; + using scalar_t = typename Eigen::internal::traits::Scalar; /** * Return type for applying the function elementwise to a matrix @@ -70,7 +70,7 @@ struct apply_scalar_unary { /** * The return type, double. */ - typedef double return_t; + using return_t = double; /** * Apply the function specified by F to the specified argument. @@ -97,7 +97,7 @@ struct apply_scalar_unary { /** * The return type, double. */ - typedef double return_t; + using return_t = double; /** * Apply the function specified by F to the specified argument. @@ -140,8 +140,9 @@ struct apply_scalar_unary > { */ static inline return_t apply(const std::vector& x) { return_t fx(x.size()); - for (size_t i = 0; i < x.size(); ++i) + for (size_t i = 0; i < x.size(); ++i) { fx[i] = apply_scalar_unary::apply(x[i]); + } return fx; } }; diff --git a/stan/math/prim/scal/err/check_2F1_converges.hpp b/stan/math/prim/scal/err/check_2F1_converges.hpp index f42bc04ab33..3db50a2d0f6 100644 --- a/stan/math/prim/scal/err/check_2F1_converges.hpp +++ b/stan/math/prim/scal/err/check_2F1_converges.hpp @@ -56,8 +56,9 @@ inline void check_2F1_converges(const char* function, const T_a1& a1, bool is_undefined = is_nonpositive_integer(b1) && fabs(b1) <= num_terms; if (!is_undefined - && (is_polynomial || fabs(z) < 1 || (fabs(z) == 1 && b1 > a1 + a2))) + && (is_polynomial || fabs(z) < 1 || (fabs(z) == 1 && b1 > a1 + a2))) { return; + } std::stringstream msg; msg << "called from function '" << function << "', " diff --git a/stan/math/prim/scal/err/check_3F2_converges.hpp b/stan/math/prim/scal/err/check_3F2_converges.hpp index a876ab66e0d..135739508ea 100644 --- a/stan/math/prim/scal/err/check_3F2_converges.hpp +++ b/stan/math/prim/scal/err/check_3F2_converges.hpp @@ -68,12 +68,15 @@ inline void check_3F2_converges(const char* function, const T_a1& a1, bool is_undefined = (is_nonpositive_integer(b1) && fabs(b1) <= num_terms) || (is_nonpositive_integer(b2) && fabs(b2) <= num_terms); - if (is_polynomial && !is_undefined) + if (is_polynomial && !is_undefined) { return; - if (fabs(z) < 1.0 && !is_undefined) + } + if (fabs(z) < 1.0 && !is_undefined) { return; - if (fabs(z) == 1.0 && !is_undefined && b1 + b2 > a1 + a2 + a3) + } + if (fabs(z) == 1.0 && !is_undefined && b1 + b2 > a1 + a2 + a3) { return; + } std::stringstream msg; msg << "called from function '" << function << "', " diff --git a/stan/math/prim/scal/err/check_consistent_size.hpp b/stan/math/prim/scal/err/check_consistent_size.hpp index a7e65329c82..7318a7ef7c3 100644 --- a/stan/math/prim/scal/err/check_consistent_size.hpp +++ b/stan/math/prim/scal/err/check_consistent_size.hpp @@ -23,8 +23,9 @@ template inline void check_consistent_size(const char* function, const char* name, const T& x, size_t expected_size) { if (!is_vector::value - || (is_vector::value && expected_size == stan::size_of(x))) + || (is_vector::value && expected_size == stan::size_of(x))) { return; + } std::stringstream msg; msg << ", expecting dimension = " << expected_size diff --git a/stan/math/prim/scal/err/check_finite.hpp b/stan/math/prim/scal/err/check_finite.hpp index af3c59302f1..4dbbbbeaf8d 100644 --- a/stan/math/prim/scal/err/check_finite.hpp +++ b/stan/math/prim/scal/err/check_finite.hpp @@ -14,8 +14,9 @@ namespace internal { template struct finite { static void check(const char* function, const char* name, const T_y& y) { - if (!(boost::math::isfinite(value_of_rec(y)))) + if (!(boost::math::isfinite(value_of_rec(y)))) { domain_error(function, name, y, "is ", ", but must be finite!"); + } } }; @@ -23,8 +24,9 @@ template struct finite { static void check(const char* function, const char* name, const T_y& y) { for (size_t n = 0; n < stan::length(y); n++) { - if (!(boost::math::isfinite(value_of_rec(stan::get(y, n))))) + if (!(boost::math::isfinite(value_of_rec(stan::get(y, n))))) { domain_error_vec(function, name, y, n, "is ", ", but must be finite!"); + } } } }; diff --git a/stan/math/prim/scal/err/check_nonnegative.hpp b/stan/math/prim/scal/err/check_nonnegative.hpp index 8e41702cf3e..5ea18713ca5 100644 --- a/stan/math/prim/scal/err/check_nonnegative.hpp +++ b/stan/math/prim/scal/err/check_nonnegative.hpp @@ -15,8 +15,9 @@ struct nonnegative { static void check(const char* function, const char* name, const T_y& y) { // have to use not is_unsigned. is_signed will be false // floating point types that have no unsigned versions. - if (!std::is_unsigned::value && !(y >= 0)) + if (!std::is_unsigned::value && !(y >= 0)) { domain_error(function, name, y, "is ", ", but must be >= 0!"); + } } }; @@ -25,8 +26,9 @@ struct nonnegative { static void check(const char* function, const char* name, const T_y& y) { for (size_t n = 0; n < stan::length(y); n++) { if (!std::is_unsigned::type>::value - && !(stan::get(y, n) >= 0)) + && !(stan::get(y, n) >= 0)) { domain_error_vec(function, name, y, n, "is ", ", but must be >= 0!"); + } } } }; diff --git a/stan/math/prim/scal/err/check_not_nan.hpp b/stan/math/prim/scal/err/check_not_nan.hpp index 9534147b8c6..9d8dbd285fe 100644 --- a/stan/math/prim/scal/err/check_not_nan.hpp +++ b/stan/math/prim/scal/err/check_not_nan.hpp @@ -14,8 +14,9 @@ namespace internal { template struct not_nan { static void check(const char* function, const char* name, const T_y& y) { - if (is_nan(value_of_rec(y))) + if (is_nan(value_of_rec(y))) { domain_error(function, name, y, "is ", ", but must not be nan!"); + } } }; @@ -23,8 +24,9 @@ template struct not_nan { static void check(const char* function, const char* name, const T_y& y) { for (size_t n = 0; n < stan::length(y); n++) { - if (is_nan(value_of_rec(stan::get(y, n)))) + if (is_nan(value_of_rec(stan::get(y, n)))) { domain_error_vec(function, name, y, n, "is ", ", but must not be nan!"); + } } } }; diff --git a/stan/math/prim/scal/err/check_positive.hpp b/stan/math/prim/scal/err/check_positive.hpp index b2aedebfb1a..cac27a2c456 100644 --- a/stan/math/prim/scal/err/check_positive.hpp +++ b/stan/math/prim/scal/err/check_positive.hpp @@ -18,8 +18,9 @@ struct positive { static void check(const char* function, const char* name, const T_y& y) { // have to use not is_unsigned. is_signed will be false // floating point types that have no unsigned versions. - if (!std::is_unsigned::value && !(y > 0)) + if (!std::is_unsigned::value && !(y > 0)) { domain_error(function, name, y, "is ", ", but must be > 0!"); + } } }; @@ -29,8 +30,9 @@ struct positive { using stan::length; for (size_t n = 0; n < length(y); n++) { if (!std::is_unsigned::type>::value - && !(stan::get(y, n) > 0)) + && !(stan::get(y, n) > 0)) { domain_error_vec(function, name, y, n, "is ", ", but must be > 0!"); + } } } }; diff --git a/stan/math/prim/scal/err/check_size_match.hpp b/stan/math/prim/scal/err/check_size_match.hpp index d48a8042019..5586c068eea 100644 --- a/stan/math/prim/scal/err/check_size_match.hpp +++ b/stan/math/prim/scal/err/check_size_match.hpp @@ -23,8 +23,9 @@ namespace math { template inline void check_size_match(const char* function, const char* name_i, T_size1 i, const char* name_j, T_size2 j) { - if (i == static_cast(j)) + if (i == static_cast(j)) { return; + } std::ostringstream msg; msg << ") and " << name_j << " (" << j << ") must match in size"; @@ -49,8 +50,9 @@ template inline void check_size_match(const char* function, const char* expr_i, const char* name_i, T_size1 i, const char* expr_j, const char* name_j, T_size2 j) { - if (i == static_cast(j)) + if (i == static_cast(j)) { return; + } std::ostringstream updated_name; updated_name << expr_i << name_i; std::string updated_name_str(updated_name.str()); diff --git a/stan/math/prim/scal/err/is_less_or_equal.hpp b/stan/math/prim/scal/err/is_less_or_equal.hpp index 59146667e14..76da280739c 100644 --- a/stan/math/prim/scal/err/is_less_or_equal.hpp +++ b/stan/math/prim/scal/err/is_less_or_equal.hpp @@ -22,8 +22,9 @@ template inline bool is_less_or_equal(const T_y& y, const T_high& high) { scalar_seq_view high_vec(high); for (size_t n = 0; n < stan::length(high); n++) { - if (!(stan::get(y, n) <= high_vec[n])) + if (!(stan::get(y, n) <= high_vec[n])) { return false; + } } return true; } diff --git a/stan/math/prim/scal/err/is_not_nan.hpp b/stan/math/prim/scal/err/is_not_nan.hpp index 3dd8484615f..38999b04dc1 100644 --- a/stan/math/prim/scal/err/is_not_nan.hpp +++ b/stan/math/prim/scal/err/is_not_nan.hpp @@ -20,8 +20,9 @@ namespace math { template inline bool is_not_nan(const T_y& y) { for (size_t n = 0; n < stan::length(y); ++n) { - if (is_nan(value_of_rec(stan::get(y, n)))) + if (is_nan(value_of_rec(stan::get(y, n)))) { return false; + } } return true; } diff --git a/stan/math/prim/scal/err/is_positive.hpp b/stan/math/prim/scal/err/is_positive.hpp index d765cbaeb5a..73e930c7418 100644 --- a/stan/math/prim/scal/err/is_positive.hpp +++ b/stan/math/prim/scal/err/is_positive.hpp @@ -17,8 +17,9 @@ namespace math { template inline bool is_positive(const T_y& y) { for (size_t n = 0; n < stan::length(y); ++n) { - if (!(stan::get(y, n) > 0)) + if (!(stan::get(y, n) > 0)) { return false; + } } return true; } diff --git a/stan/math/prim/scal/err/is_scal_finite.hpp b/stan/math/prim/scal/err/is_scal_finite.hpp index fdf1ad5f385..167e1db3f10 100644 --- a/stan/math/prim/scal/err/is_scal_finite.hpp +++ b/stan/math/prim/scal/err/is_scal_finite.hpp @@ -19,8 +19,9 @@ namespace math { template inline bool is_scal_finite(const T_y& y) { for (size_t n = 0; n < stan::length(y); ++n) { - if (!(boost::math::isfinite(value_of_rec(stan::get(y, n))))) + if (!(boost::math::isfinite(value_of_rec(stan::get(y, n))))) { return false; + } } return true; } diff --git a/stan/math/prim/scal/fun/F32.hpp b/stan/math/prim/scal/fun/F32.hpp index fd303431200..097581b7269 100644 --- a/stan/math/prim/scal/fun/F32.hpp +++ b/stan/math/prim/scal/fun/F32.hpp @@ -63,16 +63,18 @@ T F32(const T& a1, const T& a2, const T& a3, const T& b1, const T& b2, for (int k = 0; k <= max_steps; ++k) { T p = (a1 + k) * (a2 + k) * (a3 + k) / ((b1 + k) * (b2 + k) * (k + 1)); - if (p == 0.0) + if (p == 0.0) { return t_acc; + } log_t += log(fabs(p)) + log_z; t_sign = p >= 0.0 ? t_sign : -t_sign; T t_new = t_sign > 0.0 ? exp(log_t) : -exp(log_t); t_acc += t_new; - if (fabs(t_new) <= precision) + if (fabs(t_new) <= precision) { return t_acc; + } if (is_inf(t_acc)) { domain_error("F32", "sum (output)", t_acc, "overflow ", diff --git a/stan/math/prim/scal/fun/Phi.hpp b/stan/math/prim/scal/fun/Phi.hpp index 8e037fa3a19..dbbbaa3f2ee 100644 --- a/stan/math/prim/scal/fun/Phi.hpp +++ b/stan/math/prim/scal/fun/Phi.hpp @@ -29,14 +29,15 @@ namespace math { */ inline double Phi(double x) { check_not_nan("Phi", "x", x); - if (x < -37.5) + if (x < -37.5) { return 0; - else if (x < -5.0) + } else if (x < -5.0) { return 0.5 * erfc(-INV_SQRT_2 * x); - else if (x > 8.25) + } else if (x > 8.25) { return 1; - else + } else { return 0.5 * (1.0 + erf(INV_SQRT_2 * x)); + } } } // namespace math diff --git a/stan/math/prim/scal/fun/choose.hpp b/stan/math/prim/scal/fun/choose.hpp index 3f89d2f0e6e..54cc02bcdb2 100644 --- a/stan/math/prim/scal/fun/choose.hpp +++ b/stan/math/prim/scal/fun/choose.hpp @@ -29,8 +29,9 @@ namespace math { inline int choose(int n, int k) { check_nonnegative("choose", "n", n); check_nonnegative("choose", "k", k); - if (k > n) + if (k > n) { return 0; + } const double choices = boost::math::binomial_coefficient(n, k); check_less_or_equal("choose", "n choose k", choices, std::numeric_limits::max()); diff --git a/stan/math/prim/scal/fun/common_type.hpp b/stan/math/prim/scal/fun/common_type.hpp index 41e9a67af68..32388944681 100644 --- a/stan/math/prim/scal/fun/common_type.hpp +++ b/stan/math/prim/scal/fun/common_type.hpp @@ -23,7 +23,7 @@ namespace math { */ template struct common_type { - typedef return_type_t type; + using type = return_type_t; }; } // namespace math diff --git a/stan/math/prim/scal/fun/divide.hpp b/stan/math/prim/scal/fun/divide.hpp index c4a6d69dd7f..cbdf8145f68 100644 --- a/stan/math/prim/scal/fun/divide.hpp +++ b/stan/math/prim/scal/fun/divide.hpp @@ -22,8 +22,9 @@ inline return_type_t divide(const T1& x, const T2& y) { } inline int divide(int x, int y) { - if (unlikely(y == 0)) + if (unlikely(y == 0)) { domain_error("divide", "denominator is", y, ""); + } return x / y; } diff --git a/stan/math/prim/scal/fun/fdim.hpp b/stan/math/prim/scal/fun/fdim.hpp index 86dc1200edb..1c5e6db7ada 100644 --- a/stan/math/prim/scal/fun/fdim.hpp +++ b/stan/math/prim/scal/fun/fdim.hpp @@ -23,10 +23,11 @@ namespace math { */ template inline return_type_t fdim(T1 x, T2 y) { - typedef return_type_t return_t; + using return_t = return_type_t; using std::numeric_limits; - if (is_any_nan(x, y)) + if (is_any_nan(x, y)) { return NOT_A_NUMBER; + } return (x <= y) ? 0 : x - y; } diff --git a/stan/math/prim/scal/fun/fmax.hpp b/stan/math/prim/scal/fun/fmax.hpp index 48504fdcca0..bf68cbf76b5 100644 --- a/stan/math/prim/scal/fun/fmax.hpp +++ b/stan/math/prim/scal/fun/fmax.hpp @@ -18,10 +18,12 @@ namespace math { */ template inline return_type_t fmax(const T1& x, const T2& y) { - if (is_nan(x)) + if (is_nan(x)) { return y; - if (is_nan(y)) + } + if (is_nan(y)) { return x; + } return x > y ? x : y; } diff --git a/stan/math/prim/scal/fun/fmin.hpp b/stan/math/prim/scal/fun/fmin.hpp index e1ba1945e3e..e6cf1cd9fcc 100644 --- a/stan/math/prim/scal/fun/fmin.hpp +++ b/stan/math/prim/scal/fun/fmin.hpp @@ -18,10 +18,12 @@ namespace math { */ template inline return_type_t fmin(const T1& x, const T2& y) { - if (is_nan(x)) + if (is_nan(x)) { return y; - if (is_nan(y)) + } + if (is_nan(y)) { return x; + } return y > x ? x : y; } diff --git a/stan/math/prim/scal/fun/gamma_p.hpp b/stan/math/prim/scal/fun/gamma_p.hpp index 510f5dbb7e8..0a8eb48e2d3 100644 --- a/stan/math/prim/scal/fun/gamma_p.hpp +++ b/stan/math/prim/scal/fun/gamma_p.hpp @@ -65,10 +65,12 @@ namespace math { * if z is at a pole of the function */ inline double gamma_p(double z, double a) { - if (is_nan(z)) + if (is_nan(z)) { return not_a_number(); - if (is_nan(a)) + } + if (is_nan(a)) { return not_a_number(); + } check_positive("gamma_p", "first argument (z)", z); check_nonnegative("gamma_p", "second argument (a)", a); return boost::math::gamma_p(z, a, boost_policy_t()); diff --git a/stan/math/prim/scal/fun/grad_2F1.hpp b/stan/math/prim/scal/fun/grad_2F1.hpp index e3719fb5665..e2976c11829 100644 --- a/stan/math/prim/scal/fun/grad_2F1.hpp +++ b/stan/math/prim/scal/fun/grad_2F1.hpp @@ -45,8 +45,9 @@ void grad_2F1(T& g_a1, T& g_b1, const T& a1, const T& a2, const T& b1, g_b1 = 0.0; T log_g_old[2]; - for (auto& i : log_g_old) + for (auto& i : log_g_old) { i = NEGATIVE_INFTY; + } T log_t_old = 0.0; T log_t_new = 0.0; @@ -56,13 +57,15 @@ void grad_2F1(T& g_a1, T& g_b1, const T& a1, const T& a2, const T& b1, double log_t_new_sign = 1.0; double log_t_old_sign = 1.0; double log_g_old_sign[2]; - for (double& x : log_g_old_sign) + for (double& x : log_g_old_sign) { x = 1.0; + } for (int k = 0; k <= max_steps; ++k) { T p = (a1 + k) * (a2 + k) / ((b1 + k) * (1 + k)); - if (p == 0) + if (p == 0) { return; + } log_t_new += log(fabs(p)) + log_z; log_t_new_sign = p >= 0.0 ? log_t_new_sign : -log_t_new_sign; @@ -80,8 +83,9 @@ void grad_2F1(T& g_a1, T& g_b1, const T& a1, const T& a2, const T& b1, g_a1 += log_g_old_sign[0] > 0 ? exp(log_g_old[0]) : -exp(log_g_old[0]); g_b1 += log_g_old_sign[1] > 0 ? exp(log_g_old[1]) : -exp(log_g_old[1]); - if (log_t_new <= log(precision)) + if (log_t_new <= log(precision)) { return; // implicit abs + } log_t_old = log_t_new; log_t_old_sign = log_t_new_sign; diff --git a/stan/math/prim/scal/fun/grad_F32.hpp b/stan/math/prim/scal/fun/grad_F32.hpp index b73a03a786f..3b777e85dfc 100644 --- a/stan/math/prim/scal/fun/grad_F32.hpp +++ b/stan/math/prim/scal/fun/grad_F32.hpp @@ -44,12 +44,14 @@ void grad_F32(T* g, const T& a1, const T& a2, const T& a3, const T& b1, using std::fabs; using std::log; - for (int i = 0; i < 6; ++i) + for (int i = 0; i < 6; ++i) { g[i] = 0.0; + } T log_g_old[6]; - for (auto& x : log_g_old) + for (auto& x : log_g_old) { x = NEGATIVE_INFTY; + } T log_t_old = 0.0; T log_t_new = 0.0; @@ -59,13 +61,15 @@ void grad_F32(T* g, const T& a1, const T& a2, const T& a3, const T& b1, double log_t_new_sign = 1.0; double log_t_old_sign = 1.0; double log_g_old_sign[6]; - for (int i = 0; i < 6; ++i) + for (int i = 0; i < 6; ++i) { log_g_old_sign[i] = 1.0; + } for (int k = 0; k <= max_steps; ++k) { T p = (a1 + k) * (a2 + k) * (a3 + k) / ((b1 + k) * (b2 + k) * (1 + k)); - if (p == 0) + if (p == 0) { return; + } log_t_new += log(fabs(p)) + log_z; log_t_new_sign = p >= 0.0 ? log_t_new_sign : -log_t_new_sign; @@ -110,8 +114,9 @@ void grad_F32(T* g, const T& a1, const T& a2, const T& a3, const T& b1, g[i] += log_g_old_sign[i] * exp(log_g_old[i]); } - if (log_t_new <= log(precision)) + if (log_t_new <= log(precision)) { return; // implicit abs + } log_t_old = log_t_new; log_t_old_sign = log_t_new_sign; diff --git a/stan/math/prim/scal/fun/grad_inc_beta.hpp b/stan/math/prim/scal/fun/grad_inc_beta.hpp index 366549895e0..f02901eeb44 100644 --- a/stan/math/prim/scal/fun/grad_inc_beta.hpp +++ b/stan/math/prim/scal/fun/grad_inc_beta.hpp @@ -27,8 +27,9 @@ inline void grad_inc_beta(double& g1, double& g2, double a, double b, double C = exp(a * c1 + b * c2) / a; double dF1 = 0; double dF2 = 0; - if (C) + if (C) { grad_2F1(dF1, dF2, a + b, 1.0, a + 1, z); + } g1 = fma((c1 - inv(a)), c3, C * (dF1 + dF2)); g2 = fma(c2, c3, C * dF1); } diff --git a/stan/math/prim/scal/fun/grad_reg_inc_gamma.hpp b/stan/math/prim/scal/fun/grad_reg_inc_gamma.hpp index 31339e447dd..4dd837077d5 100644 --- a/stan/math/prim/scal/fun/grad_reg_inc_gamma.hpp +++ b/stan/math/prim/scal/fun/grad_reg_inc_gamma.hpp @@ -52,10 +52,11 @@ return_type_t grad_reg_inc_gamma(T1 a, T2 z, T1 g, T1 dig, using std::exp; using std::fabs; using std::log; - typedef return_type_t TP; + using TP = return_type_t; - if (is_any_nan(a, z, g, dig)) + if (is_any_nan(a, z, g, dig)) { return std::numeric_limits::quiet_NaN(); + } T2 l = log(z); if (z >= a && z >= 8) { @@ -77,8 +78,9 @@ return_type_t grad_reg_inc_gamma(T1 a, T2 z, T1 g, T1 dig, fac *= a_minus_one_minus_k; delta = dfac / zpow; - if (is_inf(delta)) + if (is_inf(delta)) { domain_error("grad_reg_inc_gamma", "is not converging", "", ""); + } } return gamma_q(a, z) * (l - dig) + exp(-z + (a - 1) * l) * S / g; @@ -94,10 +96,12 @@ return_type_t grad_reg_inc_gamma(T1 a, T2 z, T1 g, T1 dig, log_s += log_z - log(k); s_sign = -s_sign; log_delta = log_s - multiply_log(2, k + a); - if (is_inf(log_delta)) + if (is_inf(log_delta)) { domain_error("grad_reg_inc_gamma", "is not converging", "", ""); - if (log_delta <= log(precision)) + } + if (log_delta <= log(precision)) { return gamma_p(a, z) * (dig - l) + exp(a * l) * S / g; + } } domain_error("grad_reg_inc_gamma", "k (internal counter)", max_steps, "exceeded ", diff --git a/stan/math/prim/scal/fun/grad_reg_lower_inc_gamma.hpp b/stan/math/prim/scal/fun/grad_reg_lower_inc_gamma.hpp index 38796b4267f..2cc194e2f2a 100644 --- a/stan/math/prim/scal/fun/grad_reg_lower_inc_gamma.hpp +++ b/stan/math/prim/scal/fun/grad_reg_lower_inc_gamma.hpp @@ -109,15 +109,17 @@ return_type_t grad_reg_lower_inc_gamma(const T1& a, const T2& z, using std::exp; using std::log; using std::pow; - typedef return_type_t TP; + using TP = return_type_t; - if (is_any_nan(a, z)) + if (is_any_nan(a, z)) { return std::numeric_limits::quiet_NaN(); + } check_positive_finite("grad_reg_lower_inc_gamma", "a", a); - if (z == 0.0) + if (z == 0.0) { return 0.0; + } check_positive_finite("grad_reg_lower_inc_gamma", "z", z); if ((a < 0.8 && z > 15.0) || (a < 12.0 && z > 30.0) @@ -140,13 +142,15 @@ return_type_t grad_reg_lower_inc_gamma(const T1& a, const T2& z, while (true) { term = exp(a_plus_n * log_z - lgamma_a_plus_n_plus_1); sum_a += term; - if (term <= precision) + if (term <= precision) { break; - if (n >= max_steps) + } + if (n >= max_steps) { domain_error("grad_reg_lower_inc_gamma", "n (internal counter)", max_steps, "exceeded ", " iterations, gamma_p(a,z) gradient (a) " "did not converge."); + } ++n; lgamma_a_plus_n_plus_1 += log1p(a_plus_n); ++a_plus_n; @@ -160,13 +164,15 @@ return_type_t grad_reg_lower_inc_gamma(const T1& a, const T2& z, term = exp(a_plus_n * log_z - lgamma_a_plus_n_plus_1) * digamma(a_plus_n + 1); sum_b += term; - if (term <= precision) + if (term <= precision) { return emz * (log_z * sum_a - sum_b); - if (n >= max_steps) + } + if (n >= max_steps) { domain_error("grad_reg_lower_inc_gamma", "n (internal counter)", max_steps, "exceeded ", " iterations, gamma_p(a,z) gradient (a) " "did not converge."); + } ++n; lgamma_a_plus_n_plus_1 += log1p(a_plus_n); ++a_plus_n; diff --git a/stan/math/prim/scal/fun/inc_beta_dda.hpp b/stan/math/prim/scal/fun/inc_beta_dda.hpp index a2b084fd4a6..332b8d450c2 100644 --- a/stan/math/prim/scal/fun/inc_beta_dda.hpp +++ b/stan/math/prim/scal/fun/inc_beta_dda.hpp @@ -37,19 +37,25 @@ T inc_beta_dda(T a, T b, T z, T digamma_a, T digamma_ab) { using std::log; using std::pow; - if (b > a) + if (b > a) { if ((0.1 < z && z <= 0.75 && b > 500) || (0.01 < z && z <= 0.1 && b > 2500) - || (0.001 < z && z <= 0.01 && b > 1e5)) + || (0.001 < z && z <= 0.01 && b > 1e5)) { return -inc_beta_ddb(b, a, 1 - z, digamma_a, digamma_ab); + } + } - if (z > 0.75 && a < 500) + if (z > 0.75 && a < 500) { return -inc_beta_ddb(b, a, 1 - z, digamma_a, digamma_ab); - if (z > 0.9 && a < 2500) + } + if (z > 0.9 && a < 2500) { return -inc_beta_ddb(b, a, 1 - z, digamma_a, digamma_ab); - if (z > 0.99 && a < 1e5) + } + if (z > 0.99 && a < 1e5) { return -inc_beta_ddb(b, a, 1 - z, digamma_a, digamma_ab); - if (z > 0.999) + } + if (z > 0.999) { return -inc_beta_ddb(b, a, 1 - z, digamma_a, digamma_ab); + } double threshold = 1e-10; @@ -80,9 +86,10 @@ T inc_beta_dda(T a, T b, T z, T digamma_a, T digamma_ab) { ++k; summand *= z / k; - if (k > 1e5) + if (k > 1e5) { domain_error("inc_beta_dda", "did not converge within 10000 iterations", "", ""); + } } return inc_beta(a, b, z) * (log(z) + sum_numer / sum_denom); } diff --git a/stan/math/prim/scal/fun/inc_beta_ddb.hpp b/stan/math/prim/scal/fun/inc_beta_ddb.hpp index 0000a9954b1..ed375878111 100644 --- a/stan/math/prim/scal/fun/inc_beta_ddb.hpp +++ b/stan/math/prim/scal/fun/inc_beta_ddb.hpp @@ -40,14 +40,17 @@ T inc_beta_ddb(T a, T b, T z, T digamma_b, T digamma_ab) { using std::log; using std::pow; - if (b > a) + if (b > a) { if ((0.1 < z && z <= 0.75 && b > 500) || (0.01 < z && z <= 0.1 && b > 2500) - || (0.001 < z && z <= 0.01 && b > 1e5)) + || (0.001 < z && z <= 0.01 && b > 1e5)) { return -inc_beta_dda(b, a, 1 - z, digamma_b, digamma_ab); + } + } if ((z > 0.75 && a < 500) || (z > 0.9 && a < 2500) || (z > 0.99 && a < 1e5) - || (z > 0.999)) + || (z > 0.999)) { return -inc_beta_dda(b, a, 1 - z, digamma_b, digamma_ab); + } double threshold = 1e-10; @@ -74,9 +77,10 @@ T inc_beta_ddb(T a, T b, T z, T digamma_b, T digamma_ab) { ++k; summand *= z / k; - if (k > 1e5) + if (k > 1e5) { domain_error("inc_beta_ddb", "did not converge within 100000 iterations", "", ""); + } } return inc_beta(a, b, z) * (log(1 - z) - digamma_b + sum_numer / sum_denom); diff --git a/stan/math/prim/scal/fun/inv_Phi.hpp b/stan/math/prim/scal/fun/inv_Phi.hpp index ae6b35989ad..77715da818e 100644 --- a/stan/math/prim/scal/fun/inv_Phi.hpp +++ b/stan/math/prim/scal/fun/inv_Phi.hpp @@ -28,10 +28,12 @@ namespace math { inline double inv_Phi(double p) { check_bounded("inv_Phi", "Probability variable", p, 0, 1); - if (p < 8e-311) + if (p < 8e-311) { return NEGATIVE_INFTY; - if (p == 1) + } + if (p == 1) { return INFTY; + } static const double a[6] = {-3.969683028665376e+01, 2.209460984245205e+02, -2.759285104469687e+02, diff --git a/stan/math/prim/scal/fun/inv_logit.hpp b/stan/math/prim/scal/fun/inv_logit.hpp index cd33b71feba..6e4589bfcf1 100644 --- a/stan/math/prim/scal/fun/inv_logit.hpp +++ b/stan/math/prim/scal/fun/inv_logit.hpp @@ -50,8 +50,9 @@ inline double inv_logit(double a) { using std::exp; if (a < 0) { double exp_a = exp(a); - if (a < LOG_EPSILON) + if (a < LOG_EPSILON) { return exp_a; + } return exp_a / (1 + exp_a); } return inv(1 + exp(-a)); diff --git a/stan/math/prim/scal/fun/lb_constrain.hpp b/stan/math/prim/scal/fun/lb_constrain.hpp index 550b41ec150..aaea971283b 100644 --- a/stan/math/prim/scal/fun/lb_constrain.hpp +++ b/stan/math/prim/scal/fun/lb_constrain.hpp @@ -32,8 +32,9 @@ namespace math { template inline return_type_t lb_constrain(const T& x, const L& lb) { using std::exp; - if (lb == NEGATIVE_INFTY) + if (lb == NEGATIVE_INFTY) { return identity_constrain(x); + } return exp(x) + lb; } @@ -56,8 +57,9 @@ inline return_type_t lb_constrain(const T& x, const L& lb) { template inline return_type_t lb_constrain(const T& x, const L& lb, T& lp) { using std::exp; - if (lb == NEGATIVE_INFTY) + if (lb == NEGATIVE_INFTY) { return identity_constrain(x, lp); + } lp += x; return exp(x) + lb; } diff --git a/stan/math/prim/scal/fun/lb_free.hpp b/stan/math/prim/scal/fun/lb_free.hpp index 51b0f753343..ac498ac1e49 100644 --- a/stan/math/prim/scal/fun/lb_free.hpp +++ b/stan/math/prim/scal/fun/lb_free.hpp @@ -29,8 +29,9 @@ namespace math { template inline return_type_t lb_free(const T& y, const L& lb) { using std::log; - if (lb == NEGATIVE_INFTY) + if (lb == NEGATIVE_INFTY) { return identity_free(y); + } check_greater_or_equal("lb_free", "Lower bounded variable", y, lb); return log(y - lb); } diff --git a/stan/math/prim/scal/fun/lmgamma.hpp b/stan/math/prim/scal/fun/lmgamma.hpp index 8644f8ff3df..af492f60727 100644 --- a/stan/math/prim/scal/fun/lmgamma.hpp +++ b/stan/math/prim/scal/fun/lmgamma.hpp @@ -55,8 +55,9 @@ template inline return_type_t lmgamma(int k, T x) { return_type_t result = k * (k - 1) * LOG_PI_OVER_FOUR; - for (int j = 1; j <= k; ++j) + for (int j = 1; j <= k; ++j) { result += lgamma(x + (1.0 - j) / 2.0); + } return result; } diff --git a/stan/math/prim/scal/fun/locscale_constrain.hpp b/stan/math/prim/scal/fun/locscale_constrain.hpp index 622e2c2ec12..2edf7070de8 100644 --- a/stan/math/prim/scal/fun/locscale_constrain.hpp +++ b/stan/math/prim/scal/fun/locscale_constrain.hpp @@ -40,8 +40,9 @@ inline return_type_t locscale_constrain(const T& x, const M& mu, const S& sigma) { check_finite("locscale_constrain", "location", mu); if (sigma == 1) { - if (mu == 0) + if (mu == 0) { return identity_constrain(x); + } return mu + x; } check_positive_finite("locscale_constrain", "scale", sigma); @@ -80,8 +81,9 @@ inline return_type_t locscale_constrain(const T& x, const M& mu, using std::log; check_finite("locscale_constrain", "location", mu); if (sigma == 1) { - if (mu == 0) + if (mu == 0) { return identity_constrain(x); + } return mu + x; } check_positive_finite("locscale_constrain", "scale", sigma); diff --git a/stan/math/prim/scal/fun/locscale_free.hpp b/stan/math/prim/scal/fun/locscale_free.hpp index 79cd80bdd9b..685c132cee7 100644 --- a/stan/math/prim/scal/fun/locscale_free.hpp +++ b/stan/math/prim/scal/fun/locscale_free.hpp @@ -43,8 +43,9 @@ inline return_type_t locscale_free(const T& y, const L& mu, const S& sigma) { check_finite("locscale_free", "location", mu); if (sigma == 1) { - if (mu == 0) + if (mu == 0) { return identity_free(y); + } return y - mu; } check_positive_finite("locscale_free", "scale", sigma); diff --git a/stan/math/prim/scal/fun/log1m.hpp b/stan/math/prim/scal/fun/log1m.hpp index 8bed9b08718..c909100fb9b 100644 --- a/stan/math/prim/scal/fun/log1m.hpp +++ b/stan/math/prim/scal/fun/log1m.hpp @@ -40,8 +40,9 @@ namespace math { * @throw std::overflow_error If the computation overflows. */ inline double log1m(double x) { - if (!is_nan(x)) + if (!is_nan(x)) { check_less_or_equal("log1m", "x", x, 1); + } return stan::math::log1p(-x); } diff --git a/stan/math/prim/scal/fun/log1m_exp.hpp b/stan/math/prim/scal/fun/log1m_exp.hpp index 181441a91b9..c82bcb13a14 100644 --- a/stan/math/prim/scal/fun/log1m_exp.hpp +++ b/stan/math/prim/scal/fun/log1m_exp.hpp @@ -46,12 +46,13 @@ namespace math { inline double log1m_exp(double a) { using std::exp; using std::log; - if (a >= 0) + if (a >= 0) { return NOT_A_NUMBER; - else if (a > -0.693147) + } else if (a > -0.693147) { return log(-expm1(a)); // 0.693147 ~= log(2) - else + } else { return log1m(exp(a)); + } } } // namespace math diff --git a/stan/math/prim/scal/fun/log1m_inv_logit.hpp b/stan/math/prim/scal/fun/log1m_inv_logit.hpp index fa0dd7b8947..214080264e1 100644 --- a/stan/math/prim/scal/fun/log1m_inv_logit.hpp +++ b/stan/math/prim/scal/fun/log1m_inv_logit.hpp @@ -34,8 +34,9 @@ namespace math { */ inline double log1m_inv_logit(double u) { using std::exp; - if (u > 0.0) + if (u > 0.0) { return -u - log1p(exp(-u)); // prevent underflow + } return -log1p(exp(u)); } diff --git a/stan/math/prim/scal/fun/log1p_exp.hpp b/stan/math/prim/scal/fun/log1p_exp.hpp index 64b435287c6..eff51abc5d2 100644 --- a/stan/math/prim/scal/fun/log1p_exp.hpp +++ b/stan/math/prim/scal/fun/log1p_exp.hpp @@ -43,8 +43,9 @@ namespace math { inline double log1p_exp(double a) { using std::exp; // like log_sum_exp below with b=0.0; prevents underflow - if (a > 0.0) + if (a > 0.0) { return a + log1p(exp(-a)); + } return log1p(exp(a)); } diff --git a/stan/math/prim/scal/fun/log_diff_exp.hpp b/stan/math/prim/scal/fun/log_diff_exp.hpp index 3e7b35dc824..9467b58837d 100644 --- a/stan/math/prim/scal/fun/log_diff_exp.hpp +++ b/stan/math/prim/scal/fun/log_diff_exp.hpp @@ -46,8 +46,9 @@ namespace math { */ template inline return_type_t log_diff_exp(const T1 x, const T2 y) { - if (x <= y) + if (x <= y) { return (x < INFTY && x == y) ? NEGATIVE_INFTY : NOT_A_NUMBER; + } return x + log1m_exp(y - x); } diff --git a/stan/math/prim/scal/fun/log_falling_factorial.hpp b/stan/math/prim/scal/fun/log_falling_factorial.hpp index 8c42e8b297a..19c23a5368d 100644 --- a/stan/math/prim/scal/fun/log_falling_factorial.hpp +++ b/stan/math/prim/scal/fun/log_falling_factorial.hpp @@ -53,8 +53,9 @@ namespace math { */ template inline return_type_t log_falling_factorial(const T1 x, const T2 n) { - if (is_any_nan(x, n)) + if (is_any_nan(x, n)) { return NOT_A_NUMBER; + } static const char* function = "log_falling_factorial"; check_positive(function, "first argument", x); return lgamma(x + 1) - lgamma(x - n + 1); diff --git a/stan/math/prim/scal/fun/log_inv_logit.hpp b/stan/math/prim/scal/fun/log_inv_logit.hpp index 8fc2104eb26..237a30f87d7 100644 --- a/stan/math/prim/scal/fun/log_inv_logit.hpp +++ b/stan/math/prim/scal/fun/log_inv_logit.hpp @@ -31,8 +31,9 @@ namespace math { */ inline double log_inv_logit(double u) { using std::exp; - if (u < 0.0) + if (u < 0.0) { return u - log1p(exp(u)); // prevent underflow + } return -log1p(exp(-u)); } diff --git a/stan/math/prim/scal/fun/log_modified_bessel_first_kind.hpp b/stan/math/prim/scal/fun/log_modified_bessel_first_kind.hpp index ae30627f978..cc5788f6e92 100644 --- a/stan/math/prim/scal/fun/log_modified_bessel_first_kind.hpp +++ b/stan/math/prim/scal/fun/log_modified_bessel_first_kind.hpp @@ -50,17 +50,20 @@ inline return_type_t log_modified_bessel_first_kind( using std::log; using std::sqrt; - typedef return_type_t T; + using T = return_type_t; if (z == 0) { - if (v == 0) + if (v == 0) { return 0.0; - if (v > 0) + } + if (v > 0) { return NEGATIVE_INFTY; + } return INFTY; } - if (is_inf(z)) + if (is_inf(z)) { return z; + } if (v == 0) { // WARNING: will not autodiff for v = 0 correctly // modified from Boost's bessel_i0_imp in the double precision case, diff --git a/stan/math/prim/scal/fun/log_rising_factorial.hpp b/stan/math/prim/scal/fun/log_rising_factorial.hpp index f3cc2ef0435..56735749b08 100644 --- a/stan/math/prim/scal/fun/log_rising_factorial.hpp +++ b/stan/math/prim/scal/fun/log_rising_factorial.hpp @@ -51,8 +51,9 @@ namespace math { */ template inline return_type_t log_rising_factorial(const T1& x, const T2& n) { - if (is_any_nan(x, n)) + if (is_any_nan(x, n)) { return std::numeric_limits::quiet_NaN(); + } static const char* function = "log_rising_factorial"; check_positive(function, "first argument", x); return lgamma(x + n) - lgamma(x); diff --git a/stan/math/prim/scal/fun/log_sum_exp.hpp b/stan/math/prim/scal/fun/log_sum_exp.hpp index 29b8335f8df..63e9af29979 100644 --- a/stan/math/prim/scal/fun/log_sum_exp.hpp +++ b/stan/math/prim/scal/fun/log_sum_exp.hpp @@ -46,12 +46,15 @@ namespace math { */ template inline return_type_t log_sum_exp(const T2& a, const T1& b) { - if (a == NEGATIVE_INFTY) + if (a == NEGATIVE_INFTY) { return b; - if (a == INFTY && b == INFTY) + } + if (a == INFTY && b == INFTY) { return INFTY; - if (a > b) + } + if (a > b) { return a + log1p_exp(b - a); + } return b + log1p_exp(a - b); } diff --git a/stan/math/prim/scal/fun/lub_constrain.hpp b/stan/math/prim/scal/fun/lub_constrain.hpp index b2ef90da846..8494662bf8b 100644 --- a/stan/math/prim/scal/fun/lub_constrain.hpp +++ b/stan/math/prim/scal/fun/lub_constrain.hpp @@ -45,22 +45,26 @@ inline return_type_t lub_constrain(const T& x, const L& lb, const U& ub) { using std::exp; check_less("lub_constrain", "lb", lb, ub); - if (lb == NEGATIVE_INFTY) + if (lb == NEGATIVE_INFTY) { return ub_constrain(x, ub); - if (ub == INFTY) + } + if (ub == INFTY) { return lb_constrain(x, lb); + } T inv_logit_x; if (x > 0) { inv_logit_x = inv_logit(x); // Prevent x from reaching one unless it really really should. - if ((x < INFTY) && (inv_logit_x == 1)) + if ((x < INFTY) && (inv_logit_x == 1)) { inv_logit_x = 1 - 1e-15; + } } else { inv_logit_x = inv_logit(x); // Prevent x from reaching zero unless it really really should. - if ((x > NEGATIVE_INFTY) && (inv_logit_x == 0)) + if ((x > NEGATIVE_INFTY) && (inv_logit_x == 0)) { inv_logit_x = 1e-15; + } } return fma((ub - lb), inv_logit_x, lb); } @@ -112,25 +116,29 @@ inline return_type_t lub_constrain(const T& x, const L& lb, using std::exp; using std::log; check_less("lub_constrain", "lb", lb, ub); - if (lb == NEGATIVE_INFTY) + if (lb == NEGATIVE_INFTY) { return ub_constrain(x, ub, lp); - if (ub == INFTY) + } + if (ub == INFTY) { return lb_constrain(x, lb, lp); + } T inv_logit_x; if (x > 0) { T exp_minus_x = exp(-x); inv_logit_x = inv_logit(x); lp += log(ub - lb) - x - 2 * log1p(exp_minus_x); // Prevent x from reaching one unless it really really should. - if ((x < INFTY) && (inv_logit_x == 1)) + if ((x < INFTY) && (inv_logit_x == 1)) { inv_logit_x = 1 - 1e-15; + } } else { T exp_x = exp(x); inv_logit_x = inv_logit(x); lp += log(ub - lb) + x - 2 * log1p(exp_x); // Prevent x from reaching zero unless it really really should. - if ((x > NEGATIVE_INFTY) && (inv_logit_x == 0)) + if ((x > NEGATIVE_INFTY) && (inv_logit_x == 0)) { inv_logit_x = 1e-15; + } } return fma((ub - lb), inv_logit_x, lb); } diff --git a/stan/math/prim/scal/fun/lub_free.hpp b/stan/math/prim/scal/fun/lub_free.hpp index f23b91460e2..b18e13b6f0b 100644 --- a/stan/math/prim/scal/fun/lub_free.hpp +++ b/stan/math/prim/scal/fun/lub_free.hpp @@ -46,10 +46,12 @@ namespace math { template inline return_type_t lub_free(const T& y, const L& lb, const U& ub) { check_bounded("lub_free", "Bounded variable", y, lb, ub); - if (lb == NEGATIVE_INFTY) + if (lb == NEGATIVE_INFTY) { return ub_free(y, ub); - if (ub == INFTY) + } + if (ub == INFTY) { return lb_free(y, lb); + } return logit((y - lb) / (ub - lb)); } diff --git a/stan/math/prim/scal/fun/modulus.hpp b/stan/math/prim/scal/fun/modulus.hpp index 16c54ac7515..04d2a9febf4 100644 --- a/stan/math/prim/scal/fun/modulus.hpp +++ b/stan/math/prim/scal/fun/modulus.hpp @@ -10,8 +10,9 @@ namespace stan { namespace math { inline int modulus(int x, int y) { - if (unlikely(y == 0)) + if (unlikely(y == 0)) { domain_error("modulus", "divisor is", 0, ""); + } return x % y; } diff --git a/stan/math/prim/scal/fun/multiply_log.hpp b/stan/math/prim/scal/fun/multiply_log.hpp index 7c8896775e3..664c2e5b2a8 100644 --- a/stan/math/prim/scal/fun/multiply_log.hpp +++ b/stan/math/prim/scal/fun/multiply_log.hpp @@ -50,8 +50,9 @@ namespace math { template inline return_type_t multiply_log(const T_a a, const T_b b) { using std::log; - if (b == 0.0 && a == 0.0) + if (b == 0.0 && a == 0.0) { return 0.0; + } return a * log(b); } diff --git a/stan/math/prim/scal/fun/offset_multiplier_constrain.hpp b/stan/math/prim/scal/fun/offset_multiplier_constrain.hpp index 4e4bac79f5a..024782d258a 100644 --- a/stan/math/prim/scal/fun/offset_multiplier_constrain.hpp +++ b/stan/math/prim/scal/fun/offset_multiplier_constrain.hpp @@ -43,8 +43,9 @@ inline return_type_t offset_multiplier_constrain(const T& x, const S& sigma) { check_finite("offset_multiplier_constrain", "offset", mu); if (sigma == 1) { - if (mu == 0) + if (mu == 0) { return identity_constrain(x); + } return mu + x; } check_positive_finite("offset_multiplier_constrain", "multiplier", sigma); @@ -85,8 +86,9 @@ inline return_type_t offset_multiplier_constrain(const T& x, using std::log; check_finite("offset_multiplier_constrain", "offset", mu); if (sigma == 1) { - if (mu == 0) + if (mu == 0) { return identity_constrain(x); + } return mu + x; } check_positive_finite("offset_multiplier_constrain", "multiplier", sigma); diff --git a/stan/math/prim/scal/fun/offset_multiplier_free.hpp b/stan/math/prim/scal/fun/offset_multiplier_free.hpp index 1167ad3473b..6e92b645e63 100644 --- a/stan/math/prim/scal/fun/offset_multiplier_free.hpp +++ b/stan/math/prim/scal/fun/offset_multiplier_free.hpp @@ -43,8 +43,9 @@ inline return_type_t offset_multiplier_free(const T& y, const L& mu, const S& sigma) { check_finite("offset_multiplier_free", "offset", mu); if (sigma == 1) { - if (mu == 0) + if (mu == 0) { return identity_free(y); + } return y - mu; } check_positive_finite("offset_multiplier_free", "multiplier", sigma); diff --git a/stan/math/prim/scal/fun/promote_scalar_type.hpp b/stan/math/prim/scal/fun/promote_scalar_type.hpp index ee3edf210c2..1f8cc7841ef 100644 --- a/stan/math/prim/scal/fun/promote_scalar_type.hpp +++ b/stan/math/prim/scal/fun/promote_scalar_type.hpp @@ -17,7 +17,7 @@ struct promote_scalar_type { /** * The promoted type. */ - typedef T type; + using type = T; }; } // namespace math diff --git a/stan/math/prim/scal/fun/tgamma.hpp b/stan/math/prim/scal/fun/tgamma.hpp index 6830834432f..776e635f3b0 100644 --- a/stan/math/prim/scal/fun/tgamma.hpp +++ b/stan/math/prim/scal/fun/tgamma.hpp @@ -16,8 +16,9 @@ namespace math { * @return The gamma function applied to argument. */ inline double tgamma(double x) { - if (x == 0.0 || is_nonpositive_integer(x)) + if (x == 0.0 || is_nonpositive_integer(x)) { domain_error("tgamma", "x", x, "x == 0 or negative integer"); + } return std::tgamma(x); } diff --git a/stan/math/prim/scal/fun/trigamma.hpp b/stan/math/prim/scal/fun/trigamma.hpp index e6202262d2a..ea4adaf9cd9 100644 --- a/stan/math/prim/scal/fun/trigamma.hpp +++ b/stan/math/prim/scal/fun/trigamma.hpp @@ -61,8 +61,9 @@ inline T trigamma_impl(const T& x) { } // small value approximation if x <= small. - if (x <= small) + if (x <= small) { return inv_square(x); + } // use recurrence relation until x >= large // see http://mathworld.wolfram.com/PolygammaFunction.html diff --git a/stan/math/prim/scal/fun/ub_constrain.hpp b/stan/math/prim/scal/fun/ub_constrain.hpp index 1b054428d7a..badf291b8ab 100644 --- a/stan/math/prim/scal/fun/ub_constrain.hpp +++ b/stan/math/prim/scal/fun/ub_constrain.hpp @@ -32,8 +32,9 @@ namespace math { template inline return_type_t ub_constrain(const T& x, const U& ub) { using std::exp; - if (ub == INFTY) + if (ub == INFTY) { return identity_constrain(x); + } return ub - exp(x); } @@ -63,8 +64,9 @@ inline return_type_t ub_constrain(const T& x, const U& ub) { template inline return_type_t ub_constrain(const T& x, const U& ub, T& lp) { using std::exp; - if (ub == INFTY) + if (ub == INFTY) { return identity_constrain(x, lp); + } lp += x; return ub - exp(x); } diff --git a/stan/math/prim/scal/fun/ub_free.hpp b/stan/math/prim/scal/fun/ub_free.hpp index 43fb5d5eca2..09630aa4fbb 100644 --- a/stan/math/prim/scal/fun/ub_free.hpp +++ b/stan/math/prim/scal/fun/ub_free.hpp @@ -36,8 +36,9 @@ namespace math { template inline return_type_t ub_free(const T& y, const U& ub) { using std::log; - if (ub == INFTY) + if (ub == INFTY) { return identity_free(y); + } check_less_or_equal("ub_free", "Upper bounded variable", y, ub); return log(ub - y); } diff --git a/stan/math/prim/scal/meta/StdVectorBuilder.hpp b/stan/math/prim/scal/meta/StdVectorBuilder.hpp index 22f3af93095..9a3249e5523 100644 --- a/stan/math/prim/scal/meta/StdVectorBuilder.hpp +++ b/stan/math/prim/scal/meta/StdVectorBuilder.hpp @@ -39,7 +39,7 @@ class StdVectorBuilder { helper; public: - typedef typename helper::type type; + using type = typename helper::type; helper a; explicit StdVectorBuilder(size_t n) : a(n) {} diff --git a/stan/math/prim/scal/meta/VectorBuilder.hpp b/stan/math/prim/scal/meta/VectorBuilder.hpp index 8dde92059cd..ad8ca9bd950 100644 --- a/stan/math/prim/scal/meta/VectorBuilder.hpp +++ b/stan/math/prim/scal/meta/VectorBuilder.hpp @@ -32,7 +32,7 @@ class VectorBuilder { helper; public: - typedef typename helper::type type; + using type = typename helper::type; helper a; explicit VectorBuilder(size_t n) : a(n) {} diff --git a/stan/math/prim/scal/meta/VectorBuilderHelper.hpp b/stan/math/prim/scal/meta/VectorBuilderHelper.hpp index a79e0f6a0f6..b1cdb0cc68c 100644 --- a/stan/math/prim/scal/meta/VectorBuilderHelper.hpp +++ b/stan/math/prim/scal/meta/VectorBuilderHelper.hpp @@ -29,7 +29,7 @@ class VectorBuilderHelper { throw std::logic_error("used is false. this should never be called"); } - typedef T1 type; + using type = T1; inline type& data() { throw std::logic_error("used is false. this should never be called"); @@ -45,7 +45,7 @@ class VectorBuilderHelper { explicit VectorBuilderHelper(size_t /* n */) : x_(0) {} T1& operator[](size_t /* i */) { return x_; } - typedef T1 type; + using type = T1; inline type& data() { return x_; } }; diff --git a/stan/math/prim/scal/meta/child_type.hpp b/stan/math/prim/scal/meta/child_type.hpp index 9788cfb1cad..741d0c5255f 100644 --- a/stan/math/prim/scal/meta/child_type.hpp +++ b/stan/math/prim/scal/meta/child_type.hpp @@ -16,7 +16,7 @@ namespace math { template struct child_type { - typedef double type; + using type = double; }; /** @@ -31,7 +31,7 @@ struct child_type { template