From 3ec8ab62b2df1ec81490787165f27c27b367748e Mon Sep 17 00:00:00 2001 From: Charles Margossian Date: Wed, 4 Mar 2020 10:55:27 -0500 Subject: [PATCH 01/53] Port relevant laplace files and fix some unit tests. --- stan/math/laplace/laplace.hpp | 8 + stan/math/laplace/laplace_likelihood.hpp | 203 ++++ stan/math/laplace/laplace_marginal.hpp | 472 +++++++++ .../laplace/laplace_marginal_bernoulli.hpp | 100 ++ .../math/laplace/laplace_marginal_poisson.hpp | 80 ++ stan/math/laplace/laplace_pseudo_target.hpp | 100 ++ .../prob/laplace_approx_poisson_rng.hpp | 68 ++ stan/math/laplace/prob/laplace_approx_rng.hpp | 73 ++ .../laplace/aki_disease_data/spatial1.txt | 911 ++++++++++++++++++ .../unit/math/laplace/aki_disease_data/x1.csv | 1 + .../unit/math/laplace/aki_disease_data/x2.csv | 1 + test/unit/math/laplace/aki_disease_data/y.csv | 1 + .../unit/math/laplace/aki_disease_data/ye.csv | 1 + .../unit/math/laplace/aki_synth_data/synth.tr | 251 +++++ .../math/laplace/aki_synth_data/testdata.csv | 501 ++++++++++ test/unit/math/laplace/aki_synth_data/x1.csv | 1 + test/unit/math/laplace/aki_synth_data/x2.csv | 1 + test/unit/math/laplace/aki_synth_data/y.csv | 1 + test/unit/math/laplace/data_cpp/ReadMe.rtf | 13 + test/unit/math/laplace/data_cpp/index_10.csv | 1 + test/unit/math/laplace/data_cpp/index_100.csv | 1 + test/unit/math/laplace/data_cpp/index_20.csv | 1 + test/unit/math/laplace/data_cpp/index_200.csv | 1 + test/unit/math/laplace/data_cpp/index_30.csv | 1 + test/unit/math/laplace/data_cpp/index_40.csv | 1 + test/unit/math/laplace/data_cpp/index_50.csv | 1 + test/unit/math/laplace/data_cpp/index_500.csv | 1 + test/unit/math/laplace/data_cpp/m_10.csv | 1 + test/unit/math/laplace/data_cpp/m_100.csv | 1 + test/unit/math/laplace/data_cpp/m_20.csv | 1 + test/unit/math/laplace/data_cpp/m_200.csv | 1 + test/unit/math/laplace/data_cpp/m_30.csv | 1 + test/unit/math/laplace/data_cpp/m_40.csv | 1 + test/unit/math/laplace/data_cpp/m_50.csv | 1 + test/unit/math/laplace/data_cpp/m_500.csv | 1 + test/unit/math/laplace/data_cpp/sums_10.csv | 1 + test/unit/math/laplace/data_cpp/sums_100.csv | 1 + test/unit/math/laplace/data_cpp/sums_20.csv | 1 + test/unit/math/laplace/data_cpp/sums_200.csv | 1 + test/unit/math/laplace/data_cpp/sums_30.csv | 1 + test/unit/math/laplace/data_cpp/sums_40.csv | 1 + test/unit/math/laplace/data_cpp/sums_50.csv | 1 + test/unit/math/laplace/data_cpp/sums_500.csv | 1 + test/unit/math/laplace/data_cpp/y_10.csv | 1 + test/unit/math/laplace/data_cpp/y_100.csv | 1 + test/unit/math/laplace/data_cpp/y_20.csv | 1 + test/unit/math/laplace/data_cpp/y_200.csv | 1 + test/unit/math/laplace/data_cpp/y_30.csv | 1 + test/unit/math/laplace/data_cpp/y_40.csv | 1 + test/unit/math/laplace/data_cpp/y_50.csv | 1 + test/unit/math/laplace/data_cpp/y_500.csv | 1 + test/unit/math/laplace/disease_map_test.cpp | 122 +++ .../laplace_approx_poisson_rng_test.cpp | 137 +++ .../laplace_marginal_bernoulli_test.cpp | 186 ++++ .../laplace/laplace_marginal_poisson_test.cpp | 140 +++ test/unit/math/laplace/laplace_skim_test.cpp | 237 +++++ test/unit/math/laplace/laplace_utility.hpp | 283 ++++++ test/unit/math/laplace/skim_data/X.csv | 1 + test/unit/math/laplace/skim_data/lambda.csv | 1 + test/unit/math/laplace/skim_data/y.csv | 1 + 60 files changed, 3927 insertions(+) create mode 100644 stan/math/laplace/laplace.hpp create mode 100644 stan/math/laplace/laplace_likelihood.hpp create mode 100644 stan/math/laplace/laplace_marginal.hpp create mode 100644 stan/math/laplace/laplace_marginal_bernoulli.hpp create mode 100644 stan/math/laplace/laplace_marginal_poisson.hpp create mode 100644 stan/math/laplace/laplace_pseudo_target.hpp create mode 100644 stan/math/laplace/prob/laplace_approx_poisson_rng.hpp create mode 100644 stan/math/laplace/prob/laplace_approx_rng.hpp create mode 100644 test/unit/math/laplace/aki_disease_data/spatial1.txt create mode 100644 test/unit/math/laplace/aki_disease_data/x1.csv create mode 100644 test/unit/math/laplace/aki_disease_data/x2.csv create mode 100644 test/unit/math/laplace/aki_disease_data/y.csv create mode 100644 test/unit/math/laplace/aki_disease_data/ye.csv create mode 100644 test/unit/math/laplace/aki_synth_data/synth.tr create mode 100644 test/unit/math/laplace/aki_synth_data/testdata.csv create mode 100644 test/unit/math/laplace/aki_synth_data/x1.csv create mode 100644 test/unit/math/laplace/aki_synth_data/x2.csv create mode 100644 test/unit/math/laplace/aki_synth_data/y.csv create mode 100644 test/unit/math/laplace/data_cpp/ReadMe.rtf create mode 100644 test/unit/math/laplace/data_cpp/index_10.csv create mode 100644 test/unit/math/laplace/data_cpp/index_100.csv create mode 100644 test/unit/math/laplace/data_cpp/index_20.csv create mode 100644 test/unit/math/laplace/data_cpp/index_200.csv create mode 100644 test/unit/math/laplace/data_cpp/index_30.csv create mode 100644 test/unit/math/laplace/data_cpp/index_40.csv create mode 100644 test/unit/math/laplace/data_cpp/index_50.csv create mode 100644 test/unit/math/laplace/data_cpp/index_500.csv create mode 100644 test/unit/math/laplace/data_cpp/m_10.csv create mode 100644 test/unit/math/laplace/data_cpp/m_100.csv create mode 100644 test/unit/math/laplace/data_cpp/m_20.csv create mode 100644 test/unit/math/laplace/data_cpp/m_200.csv create mode 100644 test/unit/math/laplace/data_cpp/m_30.csv create mode 100644 test/unit/math/laplace/data_cpp/m_40.csv create mode 100644 test/unit/math/laplace/data_cpp/m_50.csv create mode 100644 test/unit/math/laplace/data_cpp/m_500.csv create mode 100644 test/unit/math/laplace/data_cpp/sums_10.csv create mode 100644 test/unit/math/laplace/data_cpp/sums_100.csv create mode 100644 test/unit/math/laplace/data_cpp/sums_20.csv create mode 100644 test/unit/math/laplace/data_cpp/sums_200.csv create mode 100644 test/unit/math/laplace/data_cpp/sums_30.csv create mode 100644 test/unit/math/laplace/data_cpp/sums_40.csv create mode 100644 test/unit/math/laplace/data_cpp/sums_50.csv create mode 100644 test/unit/math/laplace/data_cpp/sums_500.csv create mode 100644 test/unit/math/laplace/data_cpp/y_10.csv create mode 100644 test/unit/math/laplace/data_cpp/y_100.csv create mode 100644 test/unit/math/laplace/data_cpp/y_20.csv create mode 100644 test/unit/math/laplace/data_cpp/y_200.csv create mode 100644 test/unit/math/laplace/data_cpp/y_30.csv create mode 100644 test/unit/math/laplace/data_cpp/y_40.csv create mode 100644 test/unit/math/laplace/data_cpp/y_50.csv create mode 100644 test/unit/math/laplace/data_cpp/y_500.csv create mode 100755 test/unit/math/laplace/disease_map_test.cpp create mode 100644 test/unit/math/laplace/laplace_approx_poisson_rng_test.cpp create mode 100755 test/unit/math/laplace/laplace_marginal_bernoulli_test.cpp create mode 100644 test/unit/math/laplace/laplace_marginal_poisson_test.cpp create mode 100755 test/unit/math/laplace/laplace_skim_test.cpp create mode 100644 test/unit/math/laplace/laplace_utility.hpp create mode 100644 test/unit/math/laplace/skim_data/X.csv create mode 100644 test/unit/math/laplace/skim_data/lambda.csv create mode 100644 test/unit/math/laplace/skim_data/y.csv diff --git a/stan/math/laplace/laplace.hpp b/stan/math/laplace/laplace.hpp new file mode 100644 index 00000000000..3aa0a88d9e0 --- /dev/null +++ b/stan/math/laplace/laplace.hpp @@ -0,0 +1,8 @@ +#ifndef STAN_MATH_LAPLACE_LAPLACE_HPP +#define STAN_MATH_LAPLACE_LAPLACE_HPP + +#include +#include +#include + +#endif diff --git a/stan/math/laplace/laplace_likelihood.hpp b/stan/math/laplace/laplace_likelihood.hpp new file mode 100644 index 00000000000..89d48e61e6b --- /dev/null +++ b/stan/math/laplace/laplace_likelihood.hpp @@ -0,0 +1,203 @@ +#ifndef STAN_MATH_LAPLACE_LAPLACE_LIKELIHOOD_HPP +#define STAN_MATH_LAPLACE_LAPLACE_LIKELIHOOD_HPP + +#include + +namespace stan { +namespace math { + +// TO DO: create a parent structure, with each likelihood +// function acting as a child structure. + +/** + * Create an Eigen vector whose elements are all ones. + */ +// Eigen::VectorXd init_one(int n) { +// Eigen::VectorXd ones(n); +// for (int i = 0; i < n; i++) ones(i) = 1; +// return ones; +// } + +/** + * A structure to compute the log density, first, second, + * and third-order derivatives for a log poisson likelihood + * whith multiple groups. + * This structure can be passed to the the laplace_marginal function. + * Uses sufficient statistics for the data. + */ +struct diff_poisson_log { + /* The number of samples in each group. */ + Eigen::VectorXd n_samples_; + /* The sum of counts in each group. */ + Eigen::VectorXd sums_; + /* exposure, i.e. off-set term for the latent variable. */ + Eigen::VectorXd log_exposure_; + + diff_poisson_log(const Eigen::VectorXd& n_samples, + const Eigen::VectorXd& sums) + : n_samples_(n_samples), sums_(sums) { + log_exposure_ = Eigen::VectorXd::Zero(sums.size()); + } + + diff_poisson_log(const Eigen::VectorXd& n_samples, + const Eigen::VectorXd& sums, + const Eigen::VectorXd& log_exposure) + : n_samples_(n_samples), sums_(sums), log_exposure_(log_exposure) { } + + /** + * Return the log density. + * @tparam T type of the log poisson parameter. + * @param[in] theta log poisson parameters for each group. + * @return the log density. + */ + template + T log_likelihood (const Eigen::Matrix& theta) + const { + double factorial_term = 0; + for (int i = 0; i < sums_.size(); i++) + factorial_term += lgamma(sums_(i) + 1); + Eigen::Matrix shifted_mean = theta + log_exposure_; + + return - factorial_term + + (shifted_mean).dot(sums_) - n_samples_.dot(exp(shifted_mean)); + } + + /** + * Returns the gradient of the log density, and the hessian. + * Since the latter is diagonal, it is stored inside a vector. + * The two objects are computed together, because we always use + * both when solving the Newton iteration of the Laplace + * approximation, and to avoid redundant computation. + * @tparam T type of the log poisson parameter. + * @param[in] theta log poisson parameters for each group. + * @param[in, out] gradient + * @param[in, out] hessian diagonal, so stored in a vector. + */ + template + void diff (const Eigen::Matrix& theta, + Eigen::Matrix& gradient, + Eigen::Matrix& hessian) const { + Eigen::Matrix + common_term = n_samples_.cwiseProduct(exp(theta + log_exposure_)); + + gradient = sums_ - common_term; + hessian = - common_term; + } + + /** + * Returns the third derivative tensor. Because it is ("cubic") diagonal, + * the object is stored in a vector. + * @tparam T type of the log poisson parameter. + * @param[in] theta log poisson parameters for each group. + * @return A vector containing the non-zero elements of the third + * derivative tensor. + */ + template + Eigen::Matrix + third_diff(const Eigen::Matrix& theta) const { + return -n_samples_.cwiseProduct(exp(theta + log_exposure_)); + } +}; + +/** + * A structure to compute the log density, first, second, + * and third-order derivatives for a Bernoulli logistic likelihood + * whith multiple groups. + * This structure can be passed to the the laplace_marginal function. + * Uses sufficient statistics for the data. + */ +struct diff_logistic_log { + /* The number of samples in each group. */ + Eigen::VectorXd n_samples_; + /* The sum of counts in each group. */ + Eigen::VectorXd sums_; + + diff_logistic_log(const Eigen::VectorXd& n_samples, + const Eigen::VectorXd& sums) + : n_samples_(n_samples), sums_(sums) { } + + /** + * Return the log density. + * @tparam T type of the log poisson parameter. + * @param[in] theta log poisson parameters for each group. + * @return the log density. + */ + template + T log_likelihood (const Eigen::Matrix& theta) + const { + Eigen::VectorXd one = rep_vector(1, theta.size()); + return sum(theta.cwiseProduct(sums_) + - n_samples_.cwiseProduct(log(one + exp(theta)))); + } + + /** + * Returns the gradient of the log density, and the hessian. + * Since the latter is diagonal, it is stored inside a vector. + * The two objects are computed together, because we always use + * both when solving the Newton iteration of the Laplace + * approximation, and to avoid redundant computation. + * @tparam T type of the Bernoulli logistic parameter. + * @param[in] theta Bernoulli logistic parameters for each group. + * @param[in, out] gradient + * @param[in, out] hessian diagonal, so stored in a vector. + */ + template + void diff (const Eigen::Matrix& theta, + Eigen::Matrix& gradient, + Eigen::Matrix& hessian) const { + Eigen::Matrix exp_theta = exp(theta); + Eigen::VectorXd one = rep_vector(1, theta.size()); + + gradient = sums_ - n_samples_.cwiseProduct(inv_logit(theta)); + + hessian = - n_samples_.cwiseProduct(elt_divide(exp_theta, + square(one + exp_theta))); + } + + /** + * Returns the third derivative tensor. Because it is (cubic) diagonal, + * the object is stored in a vector. + * @tparam T type of the log poisson parameter. + * @param[in] theta log poisson parameters for each group. + * @return A vector containing the non-zero elements of the third + * derivative tensor. + */ + template + Eigen::Matrix + third_diff(const Eigen::Matrix& theta) const { + Eigen::VectorXd exp_theta = exp(theta); + Eigen::VectorXd one = rep_vector(1, theta.size()); + Eigen::VectorXd nominator = exp_theta.cwiseProduct(exp_theta - one); + Eigen::VectorXd denominator = square(one + exp_theta) + .cwiseProduct(one + exp_theta); + + return n_samples_.cwiseProduct(elt_divide(nominator, denominator)); + } +}; + +// TO DO: delete this structure. +// To experiment with the prototype, provide a built-in covariance +// function. In the final version, the user will pass the covariance +// function. +struct sqr_exp_kernel_functor { + template + Eigen::Matrix + operator() (const Eigen::Matrix& phi, + const T2& x, + const std::vector& delta, + const std::vector& delta_int, + std::ostream* msgs = nullptr) const { + double jitter = 1e-8; + Eigen::Matrix + kernel = stan::math::gp_exp_quad_cov(x, phi(0), phi(1)); + for (int i = 0; i < kernel.cols(); i++) + kernel(i, i) += jitter; + + return kernel; + } +}; + +} // namespace math +} // namespace stan + +#endif diff --git a/stan/math/laplace/laplace_marginal.hpp b/stan/math/laplace/laplace_marginal.hpp new file mode 100644 index 00000000000..4eb48a31419 --- /dev/null +++ b/stan/math/laplace/laplace_marginal.hpp @@ -0,0 +1,472 @@ +#ifndef STAN_MATH_LAPLACE_LAPLACE_MARGINAL_HPP +#define STAN_MATH_LAPLACE_LAPLACE_MARGINAL_HPP + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include // CHECK -- do we need this? +#include // CHECK -- do we need this? + +// Reference for calculations of marginal and its gradients: +// Rasmussen and Williams, +// "Gaussian Processes for Machine Learning", +// Algorithms 3.1 and 5.1. +// The MIT Press, 2006. +// & +// Margossian, +// "The Search for simulation algorithms in pathological spaces" +// Algorithms 3 and 5 +// Thesis proposal, 2020 +// Note 1: where I didn't conflict with my own notation, I used their notation, +// which significantly helps when debuging the code. + +namespace stan { +namespace math { + /** + * For a latent Gaussian model with global parameters phi, latent + * variables theta, and observations y, this function computes + * an approximation of the log marginal density, p(y | phi). + * This is done by marginalizing out theta, using a Laplace + * approxmation. The latter is obtained by finding the mode, + * via Newton's method, and computing the Hessian of the likelihood. + * + * The convergence criterion for the Newton is a small change in + * log marginal density. The user controls the tolerance (i.e. + * threshold under which change is deemed small enough) and + * maximum number of steps. + * TO DO: add more robust convergence criterion. + * + * This algorithm is adapted from Rasmussen and Williams, + * "Gaussian Processes for Machine Learning", second edition, + * MIT Press 2006, algorithm 3.1. + * + * Variables needed for the gradient or generating quantities + * are stored by reference. + * + * @tparam D structure type for the likelihood object. + * @tparam K structure type for the covariance object. + * @tparam Tx type of x, which can in Stan be passed as a matrix or + * an array of vectors. + * @param[in] D structure to compute and differentiate the log likelihood. + * @param[in] K structure to compute the covariance function. + * @param[in] phi the global parameter (input for the covariance function). + * @param[in] x fixed spatial data (input for the covariance function). + * @param[in] delta additional fixed real data (input for covariance + * function). + * @param[in] delta_int additional fixed integer data (input for covariance + * function). + * @param[in, out] covariance the evaluated covariance function for the + * latent gaussian variable. + * @param[in, out] theta a vector to store the mode. + * @param[in, out] W_root a vector to store the square root of the + * diagonal negative Hessian. + * @param[in, out] L cholesky decomposition of stabilized inverse covariance. + * @param[in, out] a element in the Newton step + * @param[in, out] l_grad the log density of the likelihood. + * @param[in] theta_0 the initial guess for the mode. + * @param[in] tolerance the convergence criterion for the Newton solver. + * @param[in] max_num_steps maximum number of steps for the Newton solver. + * @return the log marginal density, p(y | phi). + */ + template + double + laplace_marginal_density (const D& diff_likelihood, + const K& covariance_function, + const Eigen::VectorXd& phi, + // const std::vector& x, + const Tx& x, + const std::vector& delta, + const std::vector& delta_int, + Eigen::MatrixXd& covariance, + Eigen::VectorXd& theta, + Eigen::VectorXd& W_root, + Eigen::MatrixXd& L, + Eigen::VectorXd& a, + Eigen::VectorXd& l_grad, + const Eigen::VectorXd& theta_0, + std::ostream* msgs = nullptr, + double tolerance = 1e-6, + long int max_num_steps = 100) { + using Eigen::MatrixXd; + using Eigen::VectorXd; + + int group_size = theta_0.size(); // CHECK -- do we ever need this? + covariance = covariance_function(phi, x, delta, delta_int, msgs); + // CHECK -- should we compute the derivatives here too? + theta = theta_0; + double objective_old = - 1e+10; // CHECK -- what value to use? + double objective_new; + + for (int i = 0; i <= max_num_steps; i++) { + if (i == max_num_steps) { + std::ostringstream message; + message << "laplace_marginal_density: max number of iterations:" + << max_num_steps << " exceeded."; + throw boost::math::evaluation_error(message.str()); + } + + // Compute variable a. + VectorXd hessian; + diff_likelihood.diff(theta, l_grad, hessian); + VectorXd W = - hessian; + W_root = sqrt(W); + { + MatrixXd B = MatrixXd::Identity(group_size, group_size) + + quad_form_diag(covariance, W_root); + L = cholesky_decompose(B); + } + VectorXd b = W.cwiseProduct(theta) + l_grad; + a = b - W_root.asDiagonal() * mdivide_left_tri(transpose(L), + mdivide_left_tri(L, + diag_pre_multiply(W_root, multiply(covariance, b)))); + + // Simple Newton step + theta = covariance * a; + + // Check for convergence. + if (i != 0) objective_old = objective_new; + objective_new = -0.5 * a.dot(theta) + + diff_likelihood.log_likelihood(theta); + double objective_diff = abs(objective_new - objective_old); + if (objective_diff < tolerance) break; + } + + return objective_new - sum(L.diagonal().array().log()); + } + + /** + * For a latent Gaussian model with global parameters phi, latent + * variables theta, and observations y, this function computes + * an approximation of the log marginal density, p(y | phi). + * This is done by marginalizing out theta, using a Laplace + * approxmation. The latter is obtained by finding the mode, + * using a custom Newton method, and the Hessian of the likelihood. + * + * The convergence criterion for the Newton is a small change in + * log marginal density. The user controls the tolerance (i.e. + * threshold under which change is deemed small enough) and + * maximum number of steps. + * + * Wrapper for when the global parameter is passed as a double. + * + * @tparam T type of the initial guess. + * @tparam D structure type for the likelihood object. + * @tparam K structure type for the covariance object. + * @tparam Tx type of spatial data for covariance: in Stan, this can + * either be a matrix or an array of vectors. + * @param[in] D structure to compute and differentiate the log likelihood. + * The object stores the sufficient stats for the observations. + * @param[in] K structure to compute the covariance function. + * @param[in] phi the global parameter (input for the covariance function). + * @param[in] x data for the covariance function. + * @param[in] delta additional fixed real data (input for covariance + * function). + * @param[in] delta_int additional fixed integer data (input for covariance + * function). + * @param[in] theta_0 the initial guess for the mode. + * @param[in] tolerance the convergence criterion for the Newton solver. + * @param[in] max_num_steps maximum number of steps for the Newton solver. + * @return the log maginal density, p(y | phi). + */ + template + double + laplace_marginal_density (const D& diff_likelihood, + const K& covariance_function, + const Eigen::VectorXd& phi, + const Tx& x, + // const std::vector& x, + const std::vector& delta, + const std::vector& delta_int, + const Eigen::Matrix& theta_0, + std::ostream* msgs = nullptr, + double tolerance = 1e-6, + long int max_num_steps = 100) { + Eigen::VectorXd theta, W_root, a, l_grad; + Eigen::MatrixXd L, covariance; + return laplace_marginal_density(diff_likelihood, covariance_function, + phi, x, delta, delta_int, + covariance, + theta, W_root, L, a, l_grad, + value_of(theta_0), msgs, + tolerance, max_num_steps); + } + + // TO DO -- remove this code from final implementation. + /** + * A structure to compute sensitivities of the covariance + * function using forward mode autodiff. The functor is formatted + * so that it can be passed to Jacobian(). This requires one input + * vector and one output vector. + * + * TO DO: make this structure no templated. See comment by @SteveBronder. + * TO DO: remove this structure for final code: new differentiation + * algorithm does not require it. + */ + // template + // struct covariance_sensitivities { + // /* input data for the covariance function. */ + // std::vector x_; + // /* additional fixed real variable */ + // std::vector delta_; + // /* additional fixed integer variable */ + // std::vector delta_int_; + // /* structure to compute the covariance function. */ + // K covariance_function_; + // /* ostream for printing statements inside covariance function */ + // std::ostream* msgs_; + // + // covariance_sensitivities (const std::vector& x, + // const std::vector& delta, + // const std::vector& delta_int, + // const K& covariance_function, + // std::ostream* msgs) : + // // TO DO -- make covariance function the first argument + // x_(x), delta_(delta), delta_int_(delta_int), + // covariance_function_(covariance_function), msgs_(msgs) { } + // + // template + // Eigen::Matrix + // operator() (const Eigen::Matrix& phi) const { + // return to_vector(covariance_function_(phi, x_, delta_, + // delta_int_, msgs_)); + // } + // }; + + /** + * The vari class for the laplace marginal density. + * The method is adapted from algorithm 5.1 in Rasmussen & Williams, + * "Gaussian Processes for Machine Learning" + * with modifications described in my (Charles Margossian) + * thesis proposal. + * + * To make computation efficient, variables produced during the + * Newton step are stored and reused. To avoid storing these variables + * for too long, the sensitivies are computed in the constructor, and + * stored for the chain method. Hence, we store a single small vector, + * instead of multiple large matrices. + */ + struct laplace_marginal_density_vari : public vari { + /* dimension of the global parameters. */ + int phi_size_; + /* global parameters. */ + vari** phi_; + /* the marginal density of the observation, conditional on the + * globl parameters. */ + vari** marginal_density_; + /* An object to store the sensitivities of phi. */ + Eigen::VectorXd phi_adj_; + + template + laplace_marginal_density_vari + (const D& diff_likelihood, + const K& covariance_function, + const Eigen::Matrix& phi, + const Tx& x, + // const std::vector& x, + const std::vector& delta, + const std::vector& delta_int, + double marginal_density, + const Eigen::MatrixXd& covariance, + const Eigen::VectorXd& theta, + const Eigen::VectorXd& W_root, + const Eigen::MatrixXd& L, + const Eigen::VectorXd& a, + const Eigen::VectorXd& l_grad, + std::ostream* msgs = nullptr) + : vari(marginal_density), + phi_size_(phi.size()), + phi_(ChainableStack::instance_->memalloc_.alloc_array( + phi.size())), + marginal_density_( + ChainableStack::instance_->memalloc_.alloc_array(1)) { + using Eigen::Matrix; + using Eigen::Dynamic; + + int theta_size = theta.size(); + for (int i = 0; i < phi_size_; i++) phi_[i] = phi(i).vi_; + + // CHECK -- is there a cleaner way of doing this? + marginal_density_[0] = this; + marginal_density_[0] = new vari(marginal_density, false); + + // compute derivatives of covariance matrix with respect to phi. + // EXPERIMENT: reverse-mode variation + + // auto start = std::chrono::system_clock::now(); + + Eigen::MatrixXd R; + { + Eigen::MatrixXd W_root_diag = W_root.asDiagonal(); + R = W_root_diag * + L.transpose().triangularView() + .solve(L.triangularView() + .solve(W_root_diag)); + } + + Eigen::MatrixXd + C = mdivide_left_tri(L, + diag_pre_multiply(W_root, covariance)); + + // CHECK -- should there be a minus sign here? + Eigen::VectorXd s2 = 0.5 * (covariance.diagonal() + - (C.transpose() * C).diagonal()) + .cwiseProduct(diff_likelihood.third_diff(theta)); + + phi_adj_ = Eigen::VectorXd(phi_size_); + start_nested(); + try { + // = std::chrono::system_clock::now(); + Matrix phi_v = value_of(phi); + Matrix + K_var = covariance_function(phi_v, x, delta, delta_int, msgs); + var Z = laplace_pseudo_target(K_var, a, R, l_grad, s2); + + set_zero_all_adjoints_nested(); + grad(Z.vi_); + + for (int j = 0; j < phi_size_; j++) + phi_adj_[j] = phi_v(j).adj(); + } catch (const std::exception& e) { + recover_memory_nested(); + throw; + } + recover_memory_nested(); + + // auto end = std::chrono::system_clock::now(); + // std::chrono::duration time = end - ; + // std::cout << "diffentiation time: " << time.count() << std::endl; + + // Implementation with fwd mode computation of C, + // and then following R&W's scheme. + /* + = std::chrono::system_clock::now(); + covariance_sensitivities f(x, delta, delta_int, + covariance_function, msgs); + Eigen::MatrixXd diff_cov; + { + Eigen::VectorXd covariance_vector; + jacobian_fwd(f, value_of(phi), covariance_vector, diff_cov); + // covariance = to_matrix(covariance_vector, theta_size, theta_size); + } + + phi_adj_ = Eigen::VectorXd(phi_size_); + + for (int j = 0; j < phi_size_; j++) { + Eigen::VectorXd j_col = diff_cov.col(j); + C = to_matrix(j_col, theta_size, theta_size); + double s1 = 0.5 * quad_form(C, a) - 0.5 * sum((R * C).diagonal()); + Eigen::VectorXd b = C * l_grad; + Eigen::VectorXd s3 = b - covariance * (R * b); + // std::cout << "old Z: " << s1 + s2.dot(s3) << std::endl; + phi_adj_[j] = s1 + s2.dot(s3); + } + end = std::chrono::system_clock::now(); + time = end - ; + std::cout << "Former diff: " << time.count() << std::endl; + */ + } + + void chain() { + for (int j = 0; j < phi_size_; j++) + phi_[j]->adj_ += marginal_density_[0]->adj_ * phi_adj_[j]; + } + }; + + /** + * For a latent Gaussian model with global parameters phi, latent + * variables theta, and observations y, this function computes + * an approximation of the log marginal density, p(y | phi). + * This is done by marginalizing out theta, using a Laplace + * approxmation. The latter is obtained by finding the mode, + * using a custom Newton method, and the Hessian of the likelihood. + * + * The convergence criterion for the Newton is a small change in + * the log marginal density. The user controls the tolerance (i.e. + * threshold under which change is deemed small enough) and + * maximum number of steps. + * + * Wrapper for when the global parameter is passed as a double. + * + * @tparam T0 type of the initial guess. + * @tparam T1 type of the global parameters. + * @tparam D structure type for the likelihood object. + * @tparam K structure type for the covariance object. + *@tparam Tx type for the spatial data passed to the covariance. + * @param[in] D structure to compute and differentiate the log likelihood. + * The object stores the sufficient stats for the observations. + * @param[in] K structure to compute the covariance function. + * @param[in] phi the global parameter (input for the covariance function). + * @param[in] x data for the covariance function. + * @param[in] delta addition real data for covariance function. + * @param[in] delta_int additional interger data for covariance function. + * @param[in] theta_0 the initial guess for the mode. + * @param[in] tolerance the convergence criterion for the Newton solver. + * @param[in] max_num_steps maximum number of steps for the Newton solver. + * @return the log maginal density, p(y | phi). + */ + template + T1 laplace_marginal_density + (const D& diff_likelihood, + const K& covariance_function, + const Eigen::Matrix& phi, + const Tx& x, + // const std::vector& x, + const std::vector& delta, + const std::vector& delta_int, + const Eigen::Matrix& theta_0, + std::ostream* msgs = nullptr, + double tolerance = 1e-6, + long int max_num_steps = 100) { + Eigen::VectorXd theta, W_root, a, l_grad; + Eigen::MatrixXd L; + double marginal_density_dbl; + Eigen::MatrixXd covariance; + + // TEST + // auto start = std::chrono::system_clock::now(); + + marginal_density_dbl + = laplace_marginal_density(diff_likelihood, + covariance_function, + value_of(phi), x, delta, delta_int, + covariance, + theta, W_root, L, a, l_grad, + value_of(theta_0), + msgs, + tolerance, max_num_steps); + + // TEST + // auto end = std::chrono::system_clock::now(); + // std::chrono::duration elapsed_time = end - start; + // std::cout << "Evaluation time: " << elapsed_time.count() << std::endl; + + // TEST + // start = std::chrono::system_clock::now(); + + // construct vari + laplace_marginal_density_vari* vi0 + = new laplace_marginal_density_vari(diff_likelihood, + covariance_function, + phi, x, delta, delta_int, + marginal_density_dbl, + covariance, + theta, W_root, L, a, l_grad, + msgs); + + var marginal_density = var(vi0->marginal_density_[0]); + + return marginal_density; + } + +} // namespace math +} // namespace stan + +#endif diff --git a/stan/math/laplace/laplace_marginal_bernoulli.hpp b/stan/math/laplace/laplace_marginal_bernoulli.hpp new file mode 100644 index 00000000000..1c606be68c0 --- /dev/null +++ b/stan/math/laplace/laplace_marginal_bernoulli.hpp @@ -0,0 +1,100 @@ +#ifndef STAN_MATH_LAPLACE_LAPLACE_MARGINAL_BERNOULLI_HPP +#define STAN_MATH_LAPLACE_LAPLACE_MARGINAL_BERNOULLI_HPP + +#include +#include + +namespace stan { +namespace math { + // EXPERIMENT + // Use the squared exponential kernel, for the time defined + // in the laplace_likelihood folder. + // In the final version, the user will provide the covariance + // function. + + /** + * Wrapper function around the laplace_marginal function for + * a logistic Bernoulli likelihood. Returns the marginal density + * p(y | phi) by marginalizing out the latent gaussian variable, + * with a Laplace approximation. See the laplace_marginal function + * for more details. + * + * @tparam T0 The type of the initial guess, theta_0. + * @tparam T1 The type for the global parameter, phi. + * @param[in] theta_0 the initial guess for the Laplace approximation. + * @param[in] phi model parameters for the covariance function. + * @param[in] x data for the covariance function. + * @param[in] n_samples number of samples per group. First sufficient + * statistics. + * @param[in] y total counts per group. Second sufficient statistics. + * @param[in] tolerance controls the convergence criterion when finding + * the mode in the Laplace approximation. + * @param[in] max_num_steps maximum number of steps before the Newton solver + * breaks and returns an error. + */ + template + T1 laplace_marginal_bernoulli + (const std::vector& y, + const std::vector& n_samples, + // const K& covariance function, + const Eigen::Matrix& phi, + const std::vector& x, + const std::vector& delta, + const std::vector& delta_int, + const Eigen::Matrix& theta_0, + std::ostream* msgs = nullptr, + double tolerance = 1e-6, + long int max_num_steps = 100) { + return laplace_marginal_density( + diff_logistic_log(to_vector(n_samples), to_vector(y)), + sqr_exp_kernel_functor(), + phi, x, delta, delta_int, + theta_0, msgs, tolerance, max_num_steps); + } + + // Add signature that takes in a Kernel functor specified by the user. + template + T1 laplace_marginal_bernoulli + (const std::vector& y, + const std::vector& n_samples, + const K& covariance_function, + const Eigen::Matrix& phi, + const std::vector& x, + const std::vector& delta, + const std::vector& delta_int, + const Eigen::Matrix& theta_0, + std::ostream* msgs = nullptr, + double tolerance = 1e-6, + long int max_num_steps = 100) { + return laplace_marginal_density( + diff_logistic_log(to_vector(n_samples), to_vector(y)), + covariance_function, + phi, x, delta, delta_int, + theta_0, msgs, tolerance, max_num_steps); + } + + // Add signature that takes x as a matrix instead of a vector. + template + T1 laplace_marginal_bernoulli + (const std::vector& y, + const std::vector& n_samples, + const K& covariance_function, + const Eigen::Matrix& phi, + const Eigen::MatrixXd& x, + const std::vector& delta, + const std::vector& delta_int, + const Eigen::Matrix& theta_0, + std::ostream* msgs = nullptr, + double tolerance = 1e-6, + long int max_num_steps = 100) { + return laplace_marginal_density( + diff_logistic_log(to_vector(n_samples), to_vector(y)), + covariance_function, + phi, x, delta, delta_int, + theta_0, msgs, tolerance, max_num_steps); + } + +} // namespace math +} // namespace stan + +#endif diff --git a/stan/math/laplace/laplace_marginal_poisson.hpp b/stan/math/laplace/laplace_marginal_poisson.hpp new file mode 100644 index 00000000000..081d9efcc44 --- /dev/null +++ b/stan/math/laplace/laplace_marginal_poisson.hpp @@ -0,0 +1,80 @@ +#ifndef STAN_MATH_LAPLACE_LAPLACE_MARGINAL_POISSON_HPP +#define STAN_MATH_LAPLACE_LAPLACE_MARGINAL_POISSON_HPP + +#include +#include + +namespace stan { +namespace math { + // EXPERIMENTAL + // Use the squared exponential kernel, for the time defined + // in the laplace_likelihood folder. + // In the final version, the user will provide the covariance + // function. + + /** + * Wrapper function around the laplace_marginal function for + * a log poisson likelihood. Returns the marginal density + * p(y | phi) by marginalizing out the latent gaussian variable, + * with a Laplace approximation. See the laplace_marginal function + * for more details. + * + * @tparam T0 The type of the initial guess, theta_0. + * @tparam T1 The type for the global parameter, phi. + * @param[in] y total counts per group. Second sufficient statistics. + * @param[in] n_samples number of samples per group. First sufficient + * statistics. + * NOTE: here we would have the covariance functor + * @param[in] phi model parameters for the covariance functor. + * @param[in] x data for the covariance functor. + * @param[in] delta additional real data for the covariance functor. + * @param[in] delta_int additional integer data for covariance functor. + * @param[in] theta_0 the initial guess for the Laplace approximation. + * @param[in] tolerance controls the convergence criterion when finding + * the mode in the Laplace approximation. + * @param[in] max_num_steps maximum number of steps before the Newton solver + * breaks and returns an error. + */ + template + T1 laplace_marginal_poisson + (const std::vector& y, + const std::vector& n_samples, + const K& covariance_function, + const Eigen::Matrix& phi, + const std::vector& x, + const std::vector& delta, + const std::vector& delta_int, + const Eigen::Matrix& theta_0, + std::ostream* msgs = nullptr, + double tolerance = 1e-6, + long int max_num_steps = 100) { + return laplace_marginal_density( + diff_poisson_log(to_vector(n_samples), to_vector(y)), + covariance_function, phi, x, delta, delta_int, + theta_0, msgs, tolerance, max_num_steps); + } + + template + T1 laplace_marginal_poisson + (const std::vector& y, + const std::vector& n_samples, + const Eigen::VectorXd& ye, + const K& covariance_function, + const Eigen::Matrix& phi, + const std::vector& x, + const std::vector& delta, + const std::vector& delta_int, + const Eigen::Matrix& theta_0, + std::ostream* msgs = nullptr, + double tolerance = 1e-6, + long int max_num_steps = 100) { + return laplace_marginal_density( + diff_poisson_log(to_vector(n_samples), to_vector(y), log(ye)), + covariance_function, phi, x, delta, delta_int, + theta_0, msgs, tolerance, max_num_steps); + } + +} // namespace math +} // namespace stan + +#endif diff --git a/stan/math/laplace/laplace_pseudo_target.hpp b/stan/math/laplace/laplace_pseudo_target.hpp new file mode 100644 index 00000000000..03ceed3bc26 --- /dev/null +++ b/stan/math/laplace/laplace_pseudo_target.hpp @@ -0,0 +1,100 @@ +#ifndef STAN_MATH_LAPLACE_LAPLACE_PSEUDO_TARGET_HPP +#define STAN_MATH_LAPLACE_LAPLACE_PSEUDO_TARGET_HPP + +#include +#include +#include + +namespace stan { +namespace math { + /** + * Function to compute the pseudo target, $\tilde Z$, + * with a custom derivative method. + */ + inline double laplace_pseudo_target ( + const Eigen::MatrixXd& K, + const Eigen::VectorXd& a, + const Eigen::MatrixXd& R, + const Eigen::VectorXd& l_grad, + const Eigen::VectorXd& s2) { + double s1 = 0.5 * quad_form(K, a) - 0.5 * sum((R * K).diagonal()); + Eigen::VectorXd b = K * l_grad; + Eigen::VectorXd s3 = b - K * (R * b); + return s1 + s2.dot(s3); + } + + /** + * Vari class for the function. + */ + struct laplace_pseudo_target_vari : public vari { + /* number of elements in covariance matrix. */ + int K_size_; + /* covariance matrix. */ + vari** K_; + /* pseudo target. */ + vari** pseudo_target_; + /* An object to store the sensitivities of K. */ + Eigen::MatrixXd K_adj_; + + template + laplace_pseudo_target_vari ( + const Eigen::VectorXd& a, + const Eigen::MatrixXd& R, + const Eigen::Matrix& K, + const Eigen::VectorXd& s2, + const Eigen::VectorXd& l, + double pseudo_target) + : vari(pseudo_target), + K_size_(K.size()), + K_(ChainableStack::instance_->memalloc_.alloc_array( + K.size())), + pseudo_target_( + ChainableStack::instance_->memalloc_.alloc_array(1)) { + int dim_theta = K.rows(); + for (int j = 0; j < dim_theta; j++) + for (int i = 0; i < dim_theta; i++) + K_[j * dim_theta + i] = K(i, j).vi_; + + pseudo_target_[0] = this; + pseudo_target_[0] = new vari(pseudo_target, false); + + K_adj_ = 0.5 * a * a.transpose() - 0.5 * R + + s2 * l.transpose() + - (R * (value_of(K) * s2)) * l.transpose(); + } + + void chain() { + int dim_theta = K_adj_.rows(); + for (int j = 0; j < dim_theta; j++) + for (int i = 0; i < dim_theta; i++) + K_[j * dim_theta + i]->adj_ += + pseudo_target_[0]->adj_ * K_adj_(i, j); + } + }; + + /** + * Overload function for case where K is passed as a matrix of var. + */ + template + inline T laplace_pseudo_target ( + const Eigen::Matrix& K, + const Eigen::VectorXd& a, + const Eigen::MatrixXd& R, + const Eigen::VectorXd& l_grad, + const Eigen::VectorXd& s2) { + double pseudo_target_dbl + = laplace_pseudo_target(value_of(K), a, R, l_grad, s2); + + // construct vari + laplace_pseudo_target_vari* vi0 + = new laplace_pseudo_target_vari(a, R, K, s2, l_grad, + pseudo_target_dbl); + + var pseudo_target = var(vi0->pseudo_target_[0]); + return pseudo_target; + } + +} // namespace math +} // namespace stan + +#endif diff --git a/stan/math/laplace/prob/laplace_approx_poisson_rng.hpp b/stan/math/laplace/prob/laplace_approx_poisson_rng.hpp new file mode 100644 index 00000000000..1dcf6126fcb --- /dev/null +++ b/stan/math/laplace/prob/laplace_approx_poisson_rng.hpp @@ -0,0 +1,68 @@ +#ifndef STAN_MATH_LAPLACE_LAPLACE_APPROX_RNG_HPP +#define STAN_MATH_LAPLACE_LAPLACE_APPROX_RNG_HPP + +#include + +namespace stan { +namespace math { + +/** + * In a latent gaussian model, + * + * theta ~ Normal(theta | 0, Sigma(phi)) + * y ~ pi(y | theta) + * + * return a multivariate normal random variate sampled + * from the gaussian approximation of p(theta | y, phi). + */ +template +inline Eigen::VectorXd // CHECK -- right return type + laplace_approx_poisson_rng + (const std::vector& y, + const std::vector& n_samples, + const K& covariance_function, + const Eigen::Matrix& phi, + const std::vector& x, + const std::vector& delta, + const std::vector& delta_int, + const Eigen::Matrix& theta_0, + RNG& rng, + std::ostream* msgs = nullptr, + double tolerance = 1e-6, + long int max_num_steps = 100) { + return + laplace_approx_rng(diff_poisson_log(to_vector(n_samples), to_vector(y)), + covariance_function, phi, x, delta, delta_int, theta_0, + rng, msgs, tolerance, max_num_steps); + } + +/** + * Overload for case where user passes exposure. + */ +template +inline Eigen::VectorXd // CHECK -- right return type + laplace_approx_poisson_rng + (const std::vector& y, + const std::vector& n_samples, + const Eigen::VectorXd& exposure, + const K& covariance_function, + const Eigen::Matrix& phi, + const std::vector& x, + const std::vector& delta, + const std::vector& delta_int, + const Eigen::Matrix& theta_0, + RNG& rng, + std::ostream* msgs = nullptr, + double tolerance = 1e-6, + long int max_num_steps = 100) { + return + laplace_approx_rng(diff_poisson_log(to_vector(n_samples), to_vector(y), + log(exposure)), + covariance_function, phi, x, delta, delta_int, theta_0, + rng, msgs, tolerance, max_num_steps); + } + +} // namespace math +} // namespace stan + +#endif diff --git a/stan/math/laplace/prob/laplace_approx_rng.hpp b/stan/math/laplace/prob/laplace_approx_rng.hpp new file mode 100644 index 00000000000..4f05015685e --- /dev/null +++ b/stan/math/laplace/prob/laplace_approx_rng.hpp @@ -0,0 +1,73 @@ +#ifndef STAN_MATH_LAPLACE_PROB_LAPLACE_APPROX_RNG_HPP +#define STAN_MATH_LAPLACE_PROB_LAPLACE_APPROX_RNG_HPP + +#include +#include +#include + +namespace stan { +namespace math { + +/** + * In a latent gaussian model, + * + * theta ~ Normal(theta | 0, Sigma(phi)) + * y ~ pi(y | theta) + * + * return a multivariate normal random variate sampled + * from the gaussian approximation of p(theta | y, phi). + */ +template +inline Eigen::VectorXd // CHECK -- right return type +laplace_approx_rng + (const D& diff_likelihood, + const K& covariance_function, + const Eigen::Matrix& phi, + const std::vector& x, + const std::vector& delta, + const std::vector& delta_int, + const Eigen::Matrix& theta_0, + RNG& rng, + std::ostream* msgs = nullptr, + double tolerance = 1e-6, + long int max_num_steps = 100) { + Eigen::VectorXd theta; + Eigen::VectorXd W_root; + Eigen::MatrixXd L; + { + Eigen::MatrixXd covariance; + Eigen::VectorXd a; + Eigen::VectorXd l_grad; + double marginal_density + = laplace_marginal_density(diff_likelihood, covariance_function, + value_of(phi), x, delta, delta_int, + covariance, theta, W_root, L, a, l_grad, + value_of(theta_0), msgs, + tolerance, max_num_steps); + } + + // Modified R&W method + Eigen::VectorXd W_root_inv = inv(W_root); + Eigen::MatrixXd V_dec = mdivide_left_tri(L, + diag_matrix(W_root_inv)); + + return multi_normal_rng( + theta, + diag_matrix(square(W_root_inv)) - V_dec.transpose() * V_dec, + rng); + + // CHECK -- which method to use? Both seem equivalent. + // R&W method + // Eigen::MatrixXd V; + // V = mdivide_left_tri(L, + // diag_pre_multiply(W_root, covariance)); + // return multi_normal_rng( + // theta, + // covariance - V.transpose() * V, + // rng); +} + +} // namespace math +} // namespace stan + +#endif diff --git a/test/unit/math/laplace/aki_disease_data/spatial1.txt b/test/unit/math/laplace/aki_disease_data/spatial1.txt new file mode 100644 index 00000000000..dd6c7aefd1a --- /dev/null +++ b/test/unit/math/laplace/aki_disease_data/spatial1.txt @@ -0,0 +1,911 @@ + 1.0000000e+00 4.0000000e+00 2.8079055e+00 4.0000000e+00 + 1.0000000e+00 5.0000000e+00 7.0898599e+00 3.0000000e+00 + 2.0000000e+00 3.0000000e+00 1.8430127e+00 0.0000000e+00 + 2.0000000e+00 4.0000000e+00 1.7496143e+02 1.2200000e+02 + 2.0000000e+00 5.0000000e+00 4.2061934e+01 2.9000000e+01 + 2.0000000e+00 6.0000000e+00 1.2833066e-01 0.0000000e+00 + 3.0000000e+00 3.0000000e+00 4.3401990e+00 6.0000000e+00 + 3.0000000e+00 4.0000000e+00 3.4082447e+01 2.5000000e+01 + 3.0000000e+00 5.0000000e+00 5.1485710e+01 3.2000000e+01 + 3.0000000e+00 6.0000000e+00 1.3278291e-01 0.0000000e+00 + 4.0000000e+00 3.0000000e+00 1.0589369e+01 1.6000000e+01 + 4.0000000e+00 4.0000000e+00 7.3046351e+00 0.0000000e+00 + 4.0000000e+00 5.0000000e+00 4.8329585e+00 4.0000000e+00 + 5.0000000e+00 3.0000000e+00 7.8532574e+00 4.0000000e+00 + 5.0000000e+00 4.0000000e+00 5.8476511e+00 0.0000000e+00 + 5.0000000e+00 5.0000000e+00 5.7880622e+00 6.0000000e+00 + 5.0000000e+00 6.0000000e+00 7.2856462e-01 0.0000000e+00 + 6.0000000e+00 2.0000000e+00 2.7997280e-01 0.0000000e+00 + 6.0000000e+00 3.0000000e+00 1.2444456e-01 0.0000000e+00 + 6.0000000e+00 4.0000000e+00 1.1557298e+01 1.0000000e+01 + 6.0000000e+00 5.0000000e+00 1.0396495e+01 4.0000000e+00 + 6.0000000e+00 6.0000000e+00 7.7440359e+00 7.0000000e+00 + 6.0000000e+00 7.0000000e+00 8.4993784e-01 0.0000000e+00 + 6.0000000e+00 8.0000000e+00 3.9912302e+00 1.0000000e+00 + 7.0000000e+00 2.0000000e+00 9.7159000e-02 0.0000000e+00 + 7.0000000e+00 3.0000000e+00 2.3783082e+00 1.0000000e+00 + 7.0000000e+00 4.0000000e+00 1.8981563e+01 2.5000000e+01 + 7.0000000e+00 5.0000000e+00 9.9705535e+00 3.0000000e+00 + 7.0000000e+00 6.0000000e+00 5.8144073e+01 4.5000000e+01 + 7.0000000e+00 7.0000000e+00 1.7951441e+02 1.7200000e+02 + 7.0000000e+00 8.0000000e+00 4.7729256e+01 5.2000000e+01 + 7.0000000e+00 9.0000000e+00 4.5818490e+02 3.2400000e+02 + 7.0000000e+00 1.0000000e+01 8.2946817e+00 4.0000000e+00 + 7.0000000e+00 1.1000000e+01 4.8010997e+00 5.0000000e+00 + 7.0000000e+00 1.2000000e+01 3.2635018e+01 3.4000000e+01 + 7.0000000e+00 1.3000000e+01 4.7074167e+00 4.0000000e+00 + 7.0000000e+00 1.4000000e+01 1.3387533e+01 1.2000000e+01 + 7.0000000e+00 1.5000000e+01 3.2884418e+01 2.7000000e+01 + 7.0000000e+00 1.6000000e+01 6.6614769e+01 4.6000000e+01 + 7.0000000e+00 1.7000000e+01 9.9756987e+01 5.5000000e+01 + 7.0000000e+00 1.8000000e+01 5.0290440e+01 3.1000000e+01 + 7.0000000e+00 1.9000000e+01 2.2515207e+01 1.8000000e+01 + 7.0000000e+00 2.0000000e+01 1.7914949e-01 0.0000000e+00 + 7.0000000e+00 2.1000000e+01 4.3733269e+00 6.0000000e+00 + 8.0000000e+00 2.0000000e+00 1.3572700e-02 0.0000000e+00 + 8.0000000e+00 3.0000000e+00 7.2286756e-01 2.0000000e+00 + 8.0000000e+00 4.0000000e+00 1.7405913e+01 1.5000000e+01 + 8.0000000e+00 5.0000000e+00 1.6586485e+02 1.6200000e+02 + 8.0000000e+00 6.0000000e+00 1.0328350e+02 6.7000000e+01 + 8.0000000e+00 7.0000000e+00 9.1931015e+01 8.2000000e+01 + 8.0000000e+00 8.0000000e+00 1.1852939e+02 1.2400000e+02 + 8.0000000e+00 9.0000000e+00 1.0983366e+02 8.1000000e+01 + 8.0000000e+00 1.0000000e+01 8.6619977e+01 6.2000000e+01 + 8.0000000e+00 1.1000000e+01 1.0250153e+03 9.5300000e+02 + 8.0000000e+00 1.2000000e+01 1.0101579e+02 1.0400000e+02 + 8.0000000e+00 1.3000000e+01 7.8657199e+01 1.0700000e+02 + 8.0000000e+00 1.4000000e+01 2.3971224e+01 1.9000000e+01 + 8.0000000e+00 1.5000000e+01 8.3336609e+01 5.7000000e+01 + 8.0000000e+00 1.6000000e+01 7.8757016e+01 5.2000000e+01 + 8.0000000e+00 1.7000000e+01 1.1406701e+02 7.4000000e+01 + 8.0000000e+00 1.8000000e+01 6.2166550e+01 3.3000000e+01 + 8.0000000e+00 1.9000000e+01 1.0495877e+02 5.7000000e+01 + 8.0000000e+00 2.0000000e+01 5.3455587e+02 4.2100000e+02 + 8.0000000e+00 2.1000000e+01 4.7620912e+01 2.6000000e+01 + 8.0000000e+00 2.2000000e+01 2.2954138e+00 3.0000000e+00 + 9.0000000e+00 2.0000000e+00 7.7934308e+00 9.0000000e+00 + 9.0000000e+00 3.0000000e+00 4.0158531e+01 3.1000000e+01 + 9.0000000e+00 4.0000000e+00 1.6321877e+02 1.6000000e+02 + 9.0000000e+00 5.0000000e+00 2.5060659e+03 2.2880000e+03 + 9.0000000e+00 6.0000000e+00 1.9297843e+02 1.3300000e+02 + 9.0000000e+00 7.0000000e+00 2.1835028e+01 1.5000000e+01 + 9.0000000e+00 8.0000000e+00 4.2302459e+01 4.6000000e+01 + 9.0000000e+00 9.0000000e+00 1.8488548e+02 1.5500000e+02 + 9.0000000e+00 1.0000000e+01 2.4429468e+02 2.5100000e+02 + 9.0000000e+00 1.1000000e+01 5.2362386e+01 4.0000000e+01 + 9.0000000e+00 1.2000000e+01 5.2176951e+01 4.8000000e+01 + 9.0000000e+00 1.3000000e+01 2.5664934e+01 3.0000000e+01 + 9.0000000e+00 1.4000000e+01 2.9689173e+01 2.7000000e+01 + 9.0000000e+00 1.5000000e+01 2.9218903e+01 2.9000000e+01 + 9.0000000e+00 1.6000000e+01 6.0022954e+01 4.2000000e+01 + 9.0000000e+00 1.7000000e+01 5.7198501e+01 2.9000000e+01 + 9.0000000e+00 1.8000000e+01 6.8015308e+01 4.4000000e+01 + 9.0000000e+00 1.9000000e+01 1.1522675e+02 7.9000000e+01 + 9.0000000e+00 2.0000000e+01 2.8680855e+02 1.6900000e+02 + 9.0000000e+00 2.1000000e+01 3.2900904e+01 1.7000000e+01 + 1.0000000e+01 2.0000000e+00 1.5194390e+00 4.0000000e+00 + 1.0000000e+01 3.0000000e+00 6.3684781e+01 6.0000000e+01 + 1.0000000e+01 4.0000000e+00 6.2185461e+01 6.2000000e+01 + 1.0000000e+01 5.0000000e+00 2.2858175e+02 2.2300000e+02 + 1.0000000e+01 6.0000000e+00 1.3588382e+02 1.2600000e+02 + 1.0000000e+01 7.0000000e+00 8.7431600e+01 9.4000000e+01 + 1.0000000e+01 8.0000000e+00 8.0619066e+01 8.3000000e+01 + 1.0000000e+01 9.0000000e+00 1.3583706e+02 1.2000000e+02 + 1.0000000e+01 1.0000000e+01 1.4165653e+02 1.3500000e+02 + 1.0000000e+01 1.1000000e+01 5.1037915e+01 4.7000000e+01 + 1.0000000e+01 1.2000000e+01 4.8618129e+01 4.2000000e+01 + 1.0000000e+01 1.3000000e+01 1.4122714e+02 1.4300000e+02 + 1.0000000e+01 1.4000000e+01 5.3295604e+01 5.9000000e+01 + 1.0000000e+01 1.5000000e+01 4.1964699e+01 3.4000000e+01 + 1.0000000e+01 1.6000000e+01 1.2358541e+02 9.7000000e+01 + 1.0000000e+01 1.7000000e+01 1.5429759e+02 1.2800000e+02 + 1.0000000e+01 1.8000000e+01 5.7650262e+01 2.8000000e+01 + 1.0000000e+01 1.9000000e+01 1.0747161e+02 7.3000000e+01 + 1.0000000e+01 2.0000000e+01 9.8554211e+01 8.0000000e+01 + 1.0000000e+01 2.1000000e+01 5.6876424e+01 5.3000000e+01 + 1.0000000e+01 2.2000000e+01 2.1474296e+01 1.1000000e+01 + 1.0000000e+01 5.2000000e+01 4.0422619e-02 0.0000000e+00 + 1.0000000e+01 5.3000000e+01 3.4821039e-01 0.0000000e+00 + 1.1000000e+01 1.0000000e+00 1.0000000e-03 0.0000000e+00 + 1.1000000e+01 2.0000000e+00 1.4323232e+02 1.1600000e+02 + 1.1000000e+01 3.0000000e+00 3.7982352e+01 2.6000000e+01 + 1.1000000e+01 4.0000000e+00 1.0045313e+02 9.0000000e+01 + 1.1000000e+01 5.0000000e+00 3.9651826e+02 3.4200000e+02 + 1.1000000e+01 6.0000000e+00 9.2769604e+01 8.9000000e+01 + 1.1000000e+01 7.0000000e+00 2.2384957e+02 2.0500000e+02 + 1.1000000e+01 8.0000000e+00 7.9091398e+01 9.6000000e+01 + 1.1000000e+01 9.0000000e+00 6.4660394e+01 6.4000000e+01 + 1.1000000e+01 1.0000000e+01 2.3374731e+02 2.1300000e+02 + 1.1000000e+01 1.1000000e+01 5.9249283e+01 5.9000000e+01 + 1.1000000e+01 1.2000000e+01 2.9543396e+01 3.5000000e+01 + 1.1000000e+01 1.3000000e+01 5.5312306e+01 6.8000000e+01 + 1.1000000e+01 1.4000000e+01 2.0178246e+01 1.8000000e+01 + 1.1000000e+01 1.5000000e+01 3.0197972e+01 2.6000000e+01 + 1.1000000e+01 1.6000000e+01 8.3957626e+01 9.5000000e+01 + 1.1000000e+01 1.7000000e+01 6.2695163e+01 5.4000000e+01 + 1.1000000e+01 1.8000000e+01 4.1231784e+02 3.2700000e+02 + 1.1000000e+01 1.9000000e+01 8.5738651e+01 1.0400000e+02 + 1.1000000e+01 2.0000000e+01 8.6717987e+01 6.3000000e+01 + 1.1000000e+01 2.1000000e+01 4.6352508e+01 3.5000000e+01 + 1.1000000e+01 2.2000000e+01 1.0376509e+02 7.1000000e+01 + 1.1000000e+01 2.3000000e+01 3.0676142e+02 2.5000000e+02 + 1.1000000e+01 2.4000000e+01 6.2909621e+00 6.0000000e+00 + 1.2000000e+01 2.0000000e+00 1.7082238e+02 1.3900000e+02 + 1.2000000e+01 3.0000000e+00 7.8163638e+01 7.3000000e+01 + 1.2000000e+01 4.0000000e+00 6.5361058e+01 5.5000000e+01 + 1.2000000e+01 5.0000000e+00 9.0182609e+01 9.6000000e+01 + 1.2000000e+01 6.0000000e+00 1.3245141e+02 1.2100000e+02 + 1.2000000e+01 7.0000000e+00 1.1509998e+02 8.9000000e+01 + 1.2000000e+01 8.0000000e+00 7.4322633e+01 7.9000000e+01 + 1.2000000e+01 9.0000000e+00 7.3654898e+01 5.5000000e+01 + 1.2000000e+01 1.0000000e+01 3.9684313e+01 4.3000000e+01 + 1.2000000e+01 1.1000000e+01 7.5021054e+01 8.0000000e+01 + 1.2000000e+01 1.2000000e+01 1.9074998e+02 1.5700000e+02 + 1.2000000e+01 1.3000000e+01 2.5387099e+01 2.6000000e+01 + 1.2000000e+01 1.4000000e+01 8.4748298e+01 8.6000000e+01 + 1.2000000e+01 1.5000000e+01 3.8512918e+01 3.8000000e+01 + 1.2000000e+01 1.6000000e+01 2.7881239e+01 3.2000000e+01 + 1.2000000e+01 1.7000000e+01 6.5435182e+01 6.8000000e+01 + 1.2000000e+01 1.8000000e+01 7.6083240e+01 6.7000000e+01 + 1.2000000e+01 1.9000000e+01 1.6990848e+02 1.6800000e+02 + 1.2000000e+01 2.0000000e+01 1.3348166e+02 1.3300000e+02 + 1.2000000e+01 2.1000000e+01 7.7146067e+01 1.0500000e+02 + 1.2000000e+01 2.2000000e+01 3.2069336e+01 2.1000000e+01 + 1.2000000e+01 2.3000000e+01 1.3266612e+02 1.0800000e+02 + 1.2000000e+01 2.4000000e+01 2.1308964e+02 2.3200000e+02 + 1.2000000e+01 5.0000000e+01 1.8067821e+00 0.0000000e+00 + 1.3000000e+01 2.0000000e+00 1.1997476e+01 1.5000000e+01 + 1.3000000e+01 3.0000000e+00 1.6721566e+02 1.3600000e+02 + 1.3000000e+01 4.0000000e+00 5.3346788e+01 6.5000000e+01 + 1.3000000e+01 5.0000000e+00 6.3462813e+01 6.4000000e+01 + 1.3000000e+01 6.0000000e+00 5.0036795e+01 5.4000000e+01 + 1.3000000e+01 7.0000000e+00 2.9092892e+02 3.0400000e+02 + 1.3000000e+01 8.0000000e+00 8.7058757e+01 8.3000000e+01 + 1.3000000e+01 9.0000000e+00 1.1532806e+02 1.1800000e+02 + 1.3000000e+01 1.0000000e+01 1.9798640e+02 1.9600000e+02 + 1.3000000e+01 1.1000000e+01 1.8619949e+03 1.8550000e+03 + 1.3000000e+01 1.2000000e+01 3.1789102e+01 2.4000000e+01 + 1.3000000e+01 1.3000000e+01 2.9640872e+01 4.8000000e+01 + 1.3000000e+01 1.4000000e+01 1.2361098e+01 9.0000000e+00 + 1.3000000e+01 1.5000000e+01 1.9489799e+01 1.2000000e+01 + 1.3000000e+01 1.6000000e+01 2.8202109e+01 2.4000000e+01 + 1.3000000e+01 1.7000000e+01 1.2593309e+02 1.1100000e+02 + 1.3000000e+01 1.8000000e+01 7.4330987e+01 7.9000000e+01 + 1.3000000e+01 1.9000000e+01 3.9673470e+01 4.4000000e+01 + 1.3000000e+01 2.0000000e+01 3.5313346e+01 4.2000000e+01 + 1.3000000e+01 2.1000000e+01 4.1783712e+01 4.1000000e+01 + 1.3000000e+01 2.2000000e+01 5.3589689e+01 3.7000000e+01 + 1.3000000e+01 2.3000000e+01 5.6439326e+01 4.2000000e+01 + 1.3000000e+01 2.4000000e+01 2.3771279e+02 2.2100000e+02 + 1.3000000e+01 2.5000000e+01 1.6848286e+01 1.0000000e+01 + 1.3000000e+01 4.9000000e+01 2.1993524e-01 0.0000000e+00 + 1.3000000e+01 5.0000000e+01 3.1928984e+00 4.0000000e+00 + 1.3000000e+01 5.1000000e+01 3.7911704e-02 0.0000000e+00 + 1.4000000e+01 2.0000000e+00 6.9110819e-01 0.0000000e+00 + 1.4000000e+01 3.0000000e+00 1.2720582e+02 1.1500000e+02 + 1.4000000e+01 4.0000000e+00 3.0293702e+02 2.7900000e+02 + 1.4000000e+01 5.0000000e+00 1.7752261e+02 1.8300000e+02 + 1.4000000e+01 6.0000000e+00 2.5085587e+01 2.0000000e+01 + 1.4000000e+01 7.0000000e+00 3.7601925e+01 3.0000000e+01 + 1.4000000e+01 8.0000000e+00 5.4156493e+01 5.2000000e+01 + 1.4000000e+01 9.0000000e+00 3.1159822e+02 3.0700000e+02 + 1.4000000e+01 1.0000000e+01 1.9823110e+02 1.7700000e+02 + 1.4000000e+01 1.1000000e+01 7.6771236e+02 6.7900000e+02 + 1.4000000e+01 1.2000000e+01 3.0528553e+01 3.2000000e+01 + 1.4000000e+01 1.3000000e+01 6.8529954e+01 5.0000000e+01 + 1.4000000e+01 1.4000000e+01 2.7118555e+01 2.1000000e+01 + 1.4000000e+01 1.5000000e+01 9.4311358e+01 8.4000000e+01 + 1.4000000e+01 1.6000000e+01 4.1451931e+01 2.6000000e+01 + 1.4000000e+01 1.7000000e+01 8.9847890e+01 8.7000000e+01 + 1.4000000e+01 1.8000000e+01 5.5667646e+01 4.3000000e+01 + 1.4000000e+01 1.9000000e+01 9.5134950e+01 9.0000000e+01 + 1.4000000e+01 2.0000000e+01 9.4043529e+01 1.0700000e+02 + 1.4000000e+01 2.1000000e+01 3.8134755e+01 2.7000000e+01 + 1.4000000e+01 2.2000000e+01 7.8031443e+01 8.3000000e+01 + 1.4000000e+01 2.3000000e+01 1.4399746e+01 3.0000000e+01 + 1.4000000e+01 2.4000000e+01 6.0123560e+01 6.7000000e+01 + 1.4000000e+01 2.5000000e+01 5.4886318e+01 5.6000000e+01 + 1.4000000e+01 2.6000000e+01 8.5315266e+00 7.0000000e+00 + 1.4000000e+01 4.7000000e+01 3.8080321e-02 0.0000000e+00 + 1.4000000e+01 4.8000000e+01 1.5793305e+00 1.0000000e+00 + 1.4000000e+01 4.9000000e+01 2.7579936e+00 1.0000000e+00 + 1.4000000e+01 5.0000000e+01 1.0496870e+00 0.0000000e+00 + 1.5000000e+01 2.0000000e+00 1.0982942e+00 1.0000000e+00 + 1.5000000e+01 3.0000000e+00 2.5862518e+02 2.2300000e+02 + 1.5000000e+01 4.0000000e+00 1.7522942e+02 1.7900000e+02 + 1.5000000e+01 5.0000000e+00 9.3322562e+01 9.5000000e+01 + 1.5000000e+01 6.0000000e+00 7.5218137e+01 7.1000000e+01 + 1.5000000e+01 7.0000000e+00 6.0056009e+01 6.3000000e+01 + 1.5000000e+01 8.0000000e+00 6.7176550e+02 5.3600000e+02 + 1.5000000e+01 9.0000000e+00 7.0222474e+01 6.2000000e+01 + 1.5000000e+01 1.0000000e+01 7.7078647e+01 8.3000000e+01 + 1.5000000e+01 1.1000000e+01 4.5786527e+01 4.8000000e+01 + 1.5000000e+01 1.2000000e+01 1.2141169e+02 1.1600000e+02 + 1.5000000e+01 1.3000000e+01 2.7840659e+01 2.4000000e+01 + 1.5000000e+01 1.4000000e+01 7.2341196e+01 7.4000000e+01 + 1.5000000e+01 1.5000000e+01 5.0032733e+01 5.2000000e+01 + 1.5000000e+01 1.6000000e+01 2.5070173e+01 2.1000000e+01 + 1.5000000e+01 1.7000000e+01 3.1929108e+01 2.3000000e+01 + 1.5000000e+01 1.8000000e+01 3.9344009e+01 3.6000000e+01 + 1.5000000e+01 1.9000000e+01 2.1690175e+01 2.1000000e+01 + 1.5000000e+01 2.0000000e+01 1.5116468e+01 1.6000000e+01 + 1.5000000e+01 2.1000000e+01 1.0575750e+01 1.0000000e+01 + 1.5000000e+01 2.2000000e+01 2.1302586e+01 1.8000000e+01 + 1.5000000e+01 2.3000000e+01 4.7678595e+01 4.4000000e+01 + 1.5000000e+01 2.4000000e+01 3.5711475e+01 2.7000000e+01 + 1.5000000e+01 2.5000000e+01 2.7435279e+01 2.2000000e+01 + 1.5000000e+01 2.6000000e+01 1.0957075e+02 1.3200000e+02 + 1.5000000e+01 2.7000000e+01 4.0166880e+01 3.3000000e+01 + 1.5000000e+01 3.5000000e+01 6.7930981e+01 5.7000000e+01 + 1.5000000e+01 3.6000000e+01 2.1375488e+01 2.4000000e+01 + 1.5000000e+01 3.7000000e+01 2.0002431e+01 1.2000000e+01 + 1.5000000e+01 3.8000000e+01 5.2465632e+01 5.6000000e+01 + 1.5000000e+01 3.9000000e+01 1.5375098e+01 1.7000000e+01 + 1.5000000e+01 4.0000000e+01 3.4018903e+01 3.7000000e+01 + 1.5000000e+01 4.1000000e+01 3.1449289e+00 2.0000000e+00 + 1.5000000e+01 4.2000000e+01 9.6746873e+00 1.1000000e+01 + 1.5000000e+01 4.3000000e+01 1.0729735e+01 1.2000000e+01 + 1.5000000e+01 4.4000000e+01 5.0687518e+00 9.0000000e+00 + 1.5000000e+01 4.5000000e+01 8.5546808e-01 1.0000000e+00 + 1.5000000e+01 4.6000000e+01 2.3337694e+00 2.0000000e+00 + 1.5000000e+01 4.7000000e+01 1.8805409e+01 2.5000000e+01 + 1.5000000e+01 4.8000000e+01 2.8000834e-01 0.0000000e+00 + 1.5000000e+01 4.9000000e+01 6.5446359e+00 1.6000000e+01 + 1.5000000e+01 5.0000000e+01 2.5544882e+00 5.0000000e+00 + 1.5000000e+01 5.1000000e+01 1.7414397e-03 0.0000000e+00 + 1.6000000e+01 3.0000000e+00 3.2601201e+03 3.1950000e+03 + 1.6000000e+01 4.0000000e+00 2.4570023e+03 2.2900000e+03 + 1.6000000e+01 5.0000000e+00 2.1681670e+02 2.0900000e+02 + 1.6000000e+01 6.0000000e+00 7.3741461e+02 7.2600000e+02 + 1.6000000e+01 7.0000000e+00 2.2052428e+02 1.9800000e+02 + 1.6000000e+01 8.0000000e+00 5.0633002e+01 4.5000000e+01 + 1.6000000e+01 9.0000000e+00 5.6160596e+01 4.5000000e+01 + 1.6000000e+01 1.0000000e+01 4.1716996e+01 3.9000000e+01 + 1.6000000e+01 1.1000000e+01 3.8146600e+01 3.8000000e+01 + 1.6000000e+01 1.2000000e+01 3.1966245e+01 3.3000000e+01 + 1.6000000e+01 1.3000000e+01 4.7295488e+01 5.3000000e+01 + 1.6000000e+01 1.4000000e+01 1.4125750e+02 1.1300000e+02 + 1.6000000e+01 1.5000000e+01 1.2298356e+02 1.4500000e+02 + 1.6000000e+01 1.6000000e+01 3.4109307e+01 2.7000000e+01 + 1.6000000e+01 1.7000000e+01 1.5887184e+01 1.8000000e+01 + 1.6000000e+01 1.8000000e+01 5.6383073e+01 6.3000000e+01 + 1.6000000e+01 1.9000000e+01 3.2921152e+01 2.6000000e+01 + 1.6000000e+01 2.0000000e+01 2.7342766e+01 2.8000000e+01 + 1.6000000e+01 2.1000000e+01 3.4082205e+00 5.0000000e+00 + 1.6000000e+01 2.2000000e+01 2.2667169e+01 2.9000000e+01 + 1.6000000e+01 2.3000000e+01 1.0824355e+01 1.8000000e+01 + 1.6000000e+01 2.4000000e+01 4.2948460e+01 4.8000000e+01 + 1.6000000e+01 2.5000000e+01 1.2795805e+02 1.5000000e+02 + 1.6000000e+01 2.6000000e+01 2.7497560e+01 1.9000000e+01 + 1.6000000e+01 2.7000000e+01 1.9170851e+01 1.7000000e+01 + 1.6000000e+01 2.8000000e+01 1.8289490e+02 1.8200000e+02 + 1.6000000e+01 2.9000000e+01 8.1964545e+00 9.0000000e+00 + 1.6000000e+01 3.0000000e+01 9.1188566e-01 0.0000000e+00 + 1.6000000e+01 3.4000000e+01 1.9126598e+02 2.2100000e+02 + 1.6000000e+01 3.5000000e+01 1.9167284e+02 1.6100000e+02 + 1.6000000e+01 3.6000000e+01 2.8450453e+01 2.0000000e+01 + 1.6000000e+01 3.7000000e+01 2.2579429e+00 1.0000000e+00 + 1.6000000e+01 3.8000000e+01 1.0019578e+01 1.0000000e+01 + 1.6000000e+01 3.9000000e+01 4.0464199e+00 7.0000000e+00 + 1.6000000e+01 4.0000000e+01 1.0708548e+01 1.7000000e+01 + 1.6000000e+01 4.1000000e+01 2.8114207e+00 7.0000000e+00 + 1.6000000e+01 4.2000000e+01 4.7689440e+00 7.0000000e+00 + 1.6000000e+01 4.3000000e+01 4.1341440e+00 6.0000000e+00 + 1.6000000e+01 4.4000000e+01 5.8534102e+00 6.0000000e+00 + 1.6000000e+01 4.5000000e+01 2.1623490e+00 4.0000000e+00 + 1.6000000e+01 4.6000000e+01 3.9541432e+00 4.0000000e+00 + 1.6000000e+01 4.7000000e+01 1.3426530e+00 1.0000000e+00 + 1.6000000e+01 4.8000000e+01 9.1529879e-01 1.0000000e+00 + 1.6000000e+01 4.9000000e+01 3.9002465e+00 1.0000000e+00 + 1.7000000e+01 3.0000000e+00 7.4300630e+02 6.9000000e+02 + 1.7000000e+01 4.0000000e+00 1.5980077e+03 1.7130000e+03 + 1.7000000e+01 5.0000000e+00 5.3634986e+02 5.4000000e+02 + 1.7000000e+01 6.0000000e+00 1.2393432e+02 1.6300000e+02 + 1.7000000e+01 7.0000000e+00 1.1150481e+02 1.1900000e+02 + 1.7000000e+01 8.0000000e+00 1.1017095e+02 1.0000000e+02 + 1.7000000e+01 9.0000000e+00 1.9456837e+01 2.1000000e+01 + 1.7000000e+01 1.0000000e+01 5.5574570e+01 5.8000000e+01 + 1.7000000e+01 1.1000000e+01 5.0254706e+01 4.8000000e+01 + 1.7000000e+01 1.2000000e+01 3.8115542e+01 2.8000000e+01 + 1.7000000e+01 1.3000000e+01 2.0206064e+02 2.3300000e+02 + 1.7000000e+01 1.4000000e+01 2.2735728e+01 2.6000000e+01 + 1.7000000e+01 1.5000000e+01 4.7437094e+01 4.4000000e+01 + 1.7000000e+01 1.6000000e+01 1.6675338e+01 7.0000000e+00 + 1.7000000e+01 1.7000000e+01 3.8569630e+01 3.1000000e+01 + 1.7000000e+01 1.8000000e+01 3.2691523e+01 3.3000000e+01 + 1.7000000e+01 1.9000000e+01 1.4118096e+01 2.0000000e+01 + 1.7000000e+01 2.0000000e+01 1.7311103e+01 3.1000000e+01 + 1.7000000e+01 2.1000000e+01 2.6749477e+01 3.5000000e+01 + 1.7000000e+01 2.2000000e+01 3.4056677e+01 4.4000000e+01 + 1.7000000e+01 2.3000000e+01 1.7736093e+01 1.9000000e+01 + 1.7000000e+01 2.4000000e+01 1.1728666e+02 1.3200000e+02 + 1.7000000e+01 2.5000000e+01 2.2756700e+01 2.2000000e+01 + 1.7000000e+01 2.6000000e+01 1.0775481e+02 1.0700000e+02 + 1.7000000e+01 2.7000000e+01 4.4131476e+01 3.7000000e+01 + 1.7000000e+01 2.8000000e+01 3.0494340e+01 3.7000000e+01 + 1.7000000e+01 2.9000000e+01 2.1645093e+01 3.1000000e+01 + 1.7000000e+01 3.0000000e+01 1.9348974e+01 2.3000000e+01 + 1.7000000e+01 3.1000000e+01 7.9406058e-02 0.0000000e+00 + 1.7000000e+01 3.4000000e+01 1.0495783e+02 1.1900000e+02 + 1.7000000e+01 3.5000000e+01 3.3090951e+01 3.6000000e+01 + 1.7000000e+01 3.6000000e+01 3.8296270e+01 3.5000000e+01 + 1.7000000e+01 3.7000000e+01 6.6571875e+00 8.0000000e+00 + 1.7000000e+01 3.8000000e+01 2.6229133e-01 0.0000000e+00 + 1.7000000e+01 3.9000000e+01 1.0488489e+01 1.0000000e+01 + 1.7000000e+01 4.0000000e+01 3.9361475e+00 3.0000000e+00 + 1.7000000e+01 4.1000000e+01 1.5829231e+00 3.0000000e+00 + 1.7000000e+01 4.2000000e+01 3.1799059e+00 3.0000000e+00 + 1.7000000e+01 4.3000000e+01 3.8968506e+00 3.0000000e+00 + 1.7000000e+01 4.4000000e+01 3.8265142e+00 2.0000000e+00 + 1.7000000e+01 4.5000000e+01 3.9171756e-01 2.0000000e+00 + 1.7000000e+01 4.6000000e+01 5.5257424e+00 5.0000000e+00 + 1.7000000e+01 4.7000000e+01 1.3473818e+00 1.0000000e+00 + 1.7000000e+01 4.8000000e+01 2.3453171e-02 0.0000000e+00 + 1.7000000e+01 4.9000000e+01 2.2121550e+00 1.0000000e+00 + 1.7000000e+01 5.0000000e+01 4.0656913e-03 0.0000000e+00 + 1.8000000e+01 3.0000000e+00 6.0265964e-01 0.0000000e+00 + 1.8000000e+01 4.0000000e+00 3.7455744e+02 3.8600000e+02 + 1.8000000e+01 5.0000000e+00 1.1037954e+02 1.2200000e+02 + 1.8000000e+01 6.0000000e+00 6.0202321e+01 5.4000000e+01 + 1.8000000e+01 7.0000000e+00 8.0970051e+01 6.7000000e+01 + 1.8000000e+01 8.0000000e+00 1.1614218e+03 1.1800000e+03 + 1.8000000e+01 9.0000000e+00 1.1064017e+02 1.0300000e+02 + 1.8000000e+01 1.0000000e+01 1.1777246e+01 1.1000000e+01 + 1.8000000e+01 1.1000000e+01 3.6505843e+01 4.5000000e+01 + 1.8000000e+01 1.2000000e+01 1.5452632e+01 1.0000000e+01 + 1.8000000e+01 1.3000000e+01 3.6911494e+01 3.8000000e+01 + 1.8000000e+01 1.4000000e+01 5.6918281e+01 5.5000000e+01 + 1.8000000e+01 1.5000000e+01 4.4833513e+01 4.5000000e+01 + 1.8000000e+01 1.6000000e+01 4.9549443e+01 3.6000000e+01 + 1.8000000e+01 1.7000000e+01 9.5043703e+01 8.3000000e+01 + 1.8000000e+01 1.8000000e+01 1.6121778e+01 1.5000000e+01 + 1.8000000e+01 1.9000000e+01 3.4872602e+01 3.1000000e+01 + 1.8000000e+01 2.0000000e+01 1.5616248e+01 2.0000000e+01 + 1.8000000e+01 2.1000000e+01 4.6090323e+01 5.8000000e+01 + 1.8000000e+01 2.2000000e+01 1.6137712e+01 2.4000000e+01 + 1.8000000e+01 2.3000000e+01 7.6921971e+01 8.4000000e+01 + 1.8000000e+01 2.4000000e+01 3.8102508e+01 3.6000000e+01 + 1.8000000e+01 2.5000000e+01 7.3290657e+01 9.7000000e+01 + 1.8000000e+01 2.6000000e+01 1.0725773e+01 8.0000000e+00 + 1.8000000e+01 2.7000000e+01 1.6935231e+01 1.0000000e+01 + 1.8000000e+01 2.8000000e+01 3.3517890e+01 3.7000000e+01 + 1.8000000e+01 2.9000000e+01 5.9073008e+01 6.5000000e+01 + 1.8000000e+01 3.0000000e+01 7.3771698e+02 7.6700000e+02 + 1.8000000e+01 3.1000000e+01 1.7517118e+02 1.9800000e+02 + 1.8000000e+01 3.2000000e+01 5.8225519e+01 8.3000000e+01 + 1.8000000e+01 3.3000000e+01 1.6669716e+01 2.8000000e+01 + 1.8000000e+01 3.4000000e+01 2.3898271e+01 2.7000000e+01 + 1.8000000e+01 3.5000000e+01 4.3778595e+00 2.0000000e+00 + 1.8000000e+01 3.6000000e+01 3.0043434e+00 6.0000000e+00 + 1.8000000e+01 3.7000000e+01 2.1875790e+01 2.0000000e+01 + 1.8000000e+01 3.8000000e+01 1.5348173e+01 1.3000000e+01 + 1.8000000e+01 3.9000000e+01 5.2770556e+00 5.0000000e+00 + 1.8000000e+01 4.0000000e+01 2.2717941e+00 2.0000000e+00 + 1.8000000e+01 4.1000000e+01 7.5654304e+00 9.0000000e+00 + 1.8000000e+01 4.2000000e+01 6.4390779e+00 6.0000000e+00 + 1.8000000e+01 4.3000000e+01 3.1644486e+00 1.0000000e+00 + 1.8000000e+01 4.4000000e+01 7.2063012e+00 4.0000000e+00 + 1.8000000e+01 4.5000000e+01 2.4896623e+01 4.8000000e+01 + 1.8000000e+01 4.6000000e+01 5.5661099e+00 3.0000000e+00 + 1.8000000e+01 4.7000000e+01 1.7579170e-01 0.0000000e+00 + 1.8000000e+01 4.8000000e+01 4.6016137e-01 1.0000000e+00 + 1.8000000e+01 4.9000000e+01 2.8401166e-02 0.0000000e+00 + 1.9000000e+01 3.0000000e+00 2.2769759e+00 4.0000000e+00 + 1.9000000e+01 4.0000000e+00 3.5836204e+01 3.8000000e+01 + 1.9000000e+01 5.0000000e+00 6.3768107e+01 7.5000000e+01 + 1.9000000e+01 6.0000000e+00 6.5961175e+01 7.0000000e+01 + 1.9000000e+01 7.0000000e+00 2.1319376e+02 2.2600000e+02 + 1.9000000e+01 8.0000000e+00 1.8410700e+02 1.5700000e+02 + 1.9000000e+01 9.0000000e+00 2.3120680e+02 2.6900000e+02 + 1.9000000e+01 1.0000000e+01 2.5779306e+01 3.0000000e+01 + 1.9000000e+01 1.1000000e+01 8.2553184e+01 1.2200000e+02 + 1.9000000e+01 1.2000000e+01 2.5921050e+01 2.8000000e+01 + 1.9000000e+01 1.3000000e+01 1.6050637e+01 1.4000000e+01 + 1.9000000e+01 1.4000000e+01 1.0940016e+02 1.2300000e+02 + 1.9000000e+01 1.5000000e+01 9.6737663e+02 9.8300000e+02 + 1.9000000e+01 1.6000000e+01 1.1358660e+02 1.3300000e+02 + 1.9000000e+01 1.7000000e+01 1.8080319e+02 1.9100000e+02 + 1.9000000e+01 1.8000000e+01 1.8844468e+01 3.5000000e+01 + 1.9000000e+01 1.9000000e+01 7.2801862e+01 9.4000000e+01 + 1.9000000e+01 2.0000000e+01 2.0602838e+01 1.4000000e+01 + 1.9000000e+01 2.1000000e+01 1.5446136e+01 1.2000000e+01 + 1.9000000e+01 2.2000000e+01 1.4714812e+01 1.7000000e+01 + 1.9000000e+01 2.3000000e+01 6.7195246e+01 7.4000000e+01 + 1.9000000e+01 2.4000000e+01 3.8206297e+01 4.6000000e+01 + 1.9000000e+01 2.5000000e+01 9.6948426e+00 9.0000000e+00 + 1.9000000e+01 2.6000000e+01 3.8804963e+01 3.3000000e+01 + 1.9000000e+01 2.7000000e+01 2.5273864e+01 2.4000000e+01 + 1.9000000e+01 2.8000000e+01 1.1340326e+01 9.0000000e+00 + 1.9000000e+01 2.9000000e+01 6.7054077e+01 7.3000000e+01 + 1.9000000e+01 3.0000000e+01 2.7515858e+02 2.4600000e+02 + 1.9000000e+01 3.1000000e+01 5.7967788e+01 6.2000000e+01 + 1.9000000e+01 3.2000000e+01 2.1957164e+01 3.3000000e+01 + 1.9000000e+01 3.3000000e+01 6.6336674e+00 1.0000000e+01 + 1.9000000e+01 3.4000000e+01 6.8855312e+00 9.0000000e+00 + 1.9000000e+01 3.5000000e+01 6.3093907e+00 6.0000000e+00 + 1.9000000e+01 3.6000000e+01 1.0269473e-01 0.0000000e+00 + 1.9000000e+01 3.7000000e+01 2.8306819e+00 3.0000000e+00 + 1.9000000e+01 3.8000000e+01 2.4627315e+02 2.3700000e+02 + 1.9000000e+01 3.9000000e+01 1.5743093e+02 1.7900000e+02 + 1.9000000e+01 4.0000000e+01 8.5258479e+00 5.0000000e+00 + 1.9000000e+01 4.1000000e+01 3.3886577e+00 1.0000000e+00 + 1.9000000e+01 4.2000000e+01 3.6383763e+00 2.0000000e+00 + 1.9000000e+01 4.3000000e+01 2.9913716e+00 8.0000000e+00 + 1.9000000e+01 4.4000000e+01 4.8594352e+00 2.0000000e+00 + 1.9000000e+01 4.5000000e+01 2.1390241e+00 1.0000000e+00 + 1.9000000e+01 4.6000000e+01 1.9017458e+00 2.0000000e+00 + 1.9000000e+01 4.8000000e+01 4.1315486e-01 0.0000000e+00 + 1.9000000e+01 4.9000000e+01 4.2741621e-01 0.0000000e+00 + 1.9000000e+01 5.1000000e+01 1.0000000e-03 0.0000000e+00 + 1.9000000e+01 5.2000000e+01 5.3867980e-01 0.0000000e+00 + 1.9000000e+01 5.3000000e+01 1.5294440e-01 0.0000000e+00 + 1.9000000e+01 5.4000000e+01 9.4871205e-02 0.0000000e+00 + 2.0000000e+01 4.0000000e+00 1.5197767e+01 1.5000000e+01 + 2.0000000e+01 5.0000000e+00 1.4348816e+02 1.6200000e+02 + 2.0000000e+01 6.0000000e+00 6.9229037e+01 8.1000000e+01 + 2.0000000e+01 7.0000000e+00 9.8517541e+01 9.8000000e+01 + 2.0000000e+01 8.0000000e+00 2.5512032e+01 3.4000000e+01 + 2.0000000e+01 9.0000000e+00 2.4252967e+01 2.2000000e+01 + 2.0000000e+01 1.0000000e+01 2.7706300e+01 3.5000000e+01 + 2.0000000e+01 1.1000000e+01 1.9196998e+01 2.2000000e+01 + 2.0000000e+01 1.2000000e+01 6.6564102e+01 7.8000000e+01 + 2.0000000e+01 1.3000000e+01 2.5374879e+01 2.3000000e+01 + 2.0000000e+01 1.4000000e+01 3.1827813e+01 4.7000000e+01 + 2.0000000e+01 1.5000000e+01 6.2439111e+01 6.7000000e+01 + 2.0000000e+01 1.6000000e+01 3.4501844e+01 3.2000000e+01 + 2.0000000e+01 1.7000000e+01 4.9554048e+01 7.2000000e+01 + 2.0000000e+01 1.8000000e+01 2.5699775e+01 2.0000000e+01 + 2.0000000e+01 1.9000000e+01 2.0588072e+01 1.6000000e+01 + 2.0000000e+01 2.0000000e+01 4.3626833e+01 4.5000000e+01 + 2.0000000e+01 2.1000000e+01 1.1844879e+01 7.0000000e+00 + 2.0000000e+01 2.2000000e+01 1.4474697e+01 2.1000000e+01 + 2.0000000e+01 2.3000000e+01 2.2756740e+01 2.8000000e+01 + 2.0000000e+01 2.4000000e+01 4.2225824e+00 8.0000000e+00 + 2.0000000e+01 2.5000000e+01 2.0096198e+01 2.5000000e+01 + 2.0000000e+01 2.6000000e+01 2.6487958e+01 3.2000000e+01 + 2.0000000e+01 2.7000000e+01 2.9531129e+00 6.0000000e+00 + 2.0000000e+01 2.8000000e+01 5.2937605e+00 8.0000000e+00 + 2.0000000e+01 2.9000000e+01 6.2531464e+01 8.1000000e+01 + 2.0000000e+01 3.0000000e+01 2.4401850e+01 3.0000000e+01 + 2.0000000e+01 3.1000000e+01 8.4113858e+00 9.0000000e+00 + 2.0000000e+01 3.2000000e+01 3.6627988e+00 5.0000000e+00 + 2.0000000e+01 3.3000000e+01 8.3542272e+00 1.1000000e+01 + 2.0000000e+01 3.4000000e+01 2.4619647e+00 4.0000000e+00 + 2.0000000e+01 3.5000000e+01 3.3846724e+00 1.0000000e+00 + 2.0000000e+01 3.6000000e+01 4.8488691e+00 5.0000000e+00 + 2.0000000e+01 3.7000000e+01 7.6661352e+00 1.1000000e+01 + 2.0000000e+01 3.8000000e+01 9.1054431e+00 1.2000000e+01 + 2.0000000e+01 3.9000000e+01 5.1034876e+00 6.0000000e+00 + 2.0000000e+01 4.0000000e+01 4.8284948e+00 5.0000000e+00 + 2.0000000e+01 4.1000000e+01 1.6566176e+00 2.0000000e+00 + 2.0000000e+01 4.2000000e+01 1.5212708e+00 0.0000000e+00 + 2.0000000e+01 4.3000000e+01 8.2614538e+00 1.8000000e+01 + 2.0000000e+01 4.4000000e+01 6.1771674e+00 8.0000000e+00 + 2.0000000e+01 4.5000000e+01 2.0841611e+00 0.0000000e+00 + 2.0000000e+01 4.9000000e+01 1.3372279e-01 0.0000000e+00 + 2.0000000e+01 5.1000000e+01 2.4060309e-01 0.0000000e+00 + 2.0000000e+01 5.2000000e+01 2.0573540e-01 0.0000000e+00 + 2.0000000e+01 5.3000000e+01 6.7665890e-01 1.0000000e+00 + 2.0000000e+01 5.4000000e+01 5.7142167e-01 0.0000000e+00 + 2.0000000e+01 5.5000000e+01 4.3523340e+00 3.0000000e+00 + 2.0000000e+01 5.6000000e+01 2.1999308e+00 0.0000000e+00 + 2.0000000e+01 5.7000000e+01 7.0988057e-01 0.0000000e+00 + 2.1000000e+01 4.0000000e+00 3.0362130e+00 3.0000000e+00 + 2.1000000e+01 5.0000000e+00 8.2739916e+01 8.0000000e+01 + 2.1000000e+01 6.0000000e+00 1.4436021e+02 1.2700000e+02 + 2.1000000e+01 7.0000000e+00 7.4933922e+02 8.5300000e+02 + 2.1000000e+01 8.0000000e+00 5.3218416e+01 5.1000000e+01 + 2.1000000e+01 9.0000000e+00 1.4611087e+01 1.2000000e+01 + 2.1000000e+01 1.0000000e+01 2.6886852e+01 2.7000000e+01 + 2.1000000e+01 1.1000000e+01 5.2053184e+01 4.9000000e+01 + 2.1000000e+01 1.2000000e+01 1.7426813e+01 1.6000000e+01 + 2.1000000e+01 1.3000000e+01 8.0121529e+01 9.1000000e+01 + 2.1000000e+01 1.4000000e+01 1.4195308e+01 1.9000000e+01 + 2.1000000e+01 1.5000000e+01 4.8278151e+01 5.1000000e+01 + 2.1000000e+01 1.6000000e+01 3.2023248e+01 2.7000000e+01 + 2.1000000e+01 1.7000000e+01 1.8632297e+01 2.3000000e+01 + 2.1000000e+01 1.8000000e+01 2.7257992e+01 2.7000000e+01 + 2.1000000e+01 1.9000000e+01 5.2727063e+01 5.3000000e+01 + 2.1000000e+01 2.0000000e+01 6.8564754e+01 6.9000000e+01 + 2.1000000e+01 2.1000000e+01 2.1357125e+01 1.8000000e+01 + 2.1000000e+01 2.2000000e+01 3.0769108e+01 3.9000000e+01 + 2.1000000e+01 2.3000000e+01 1.2153690e+02 1.3600000e+02 + 2.1000000e+01 2.4000000e+01 1.1100909e+01 6.0000000e+00 + 2.1000000e+01 2.5000000e+01 4.8862493e+00 5.0000000e+00 + 2.1000000e+01 2.6000000e+01 5.3067940e+00 6.0000000e+00 + 2.1000000e+01 2.7000000e+01 1.5384940e+01 2.2000000e+01 + 2.1000000e+01 2.8000000e+01 1.9934964e+01 2.0000000e+01 + 2.1000000e+01 2.9000000e+01 2.6441305e+01 4.7000000e+01 + 2.1000000e+01 3.0000000e+01 3.1095696e+00 4.0000000e+00 + 2.1000000e+01 3.1000000e+01 1.1390125e+01 1.3000000e+01 + 2.1000000e+01 3.2000000e+01 7.7813322e+00 3.0000000e+00 + 2.1000000e+01 3.3000000e+01 3.7115850e+00 6.0000000e+00 + 2.1000000e+01 3.4000000e+01 3.3504113e+00 7.0000000e+00 + 2.1000000e+01 3.5000000e+01 2.5727677e+01 2.8000000e+01 + 2.1000000e+01 3.6000000e+01 3.9458818e+00 3.0000000e+00 + 2.1000000e+01 3.7000000e+01 9.8641880e-01 1.0000000e+00 + 2.1000000e+01 3.8000000e+01 1.1884295e+01 1.7000000e+01 + 2.1000000e+01 3.9000000e+01 2.3753064e+00 5.0000000e+00 + 2.1000000e+01 4.1000000e+01 1.2145425e+00 5.0000000e+00 + 2.1000000e+01 4.2000000e+01 4.0695821e+00 2.0000000e+00 + 2.1000000e+01 4.3000000e+01 6.2647199e+00 8.0000000e+00 + 2.1000000e+01 4.4000000e+01 4.4949764e+01 5.1000000e+01 + 2.1000000e+01 4.5000000e+01 1.3823914e+00 1.0000000e+00 + 2.1000000e+01 4.6000000e+01 7.6236933e-02 0.0000000e+00 + 2.1000000e+01 4.9000000e+01 1.3903797e-01 0.0000000e+00 + 2.1000000e+01 5.1000000e+01 9.3641143e-01 0.0000000e+00 + 2.1000000e+01 5.2000000e+01 2.1220815e+00 1.0000000e+00 + 2.1000000e+01 5.4000000e+01 2.0417439e-01 0.0000000e+00 + 2.1000000e+01 5.7000000e+01 4.3063847e-01 0.0000000e+00 + 2.1000000e+01 5.8000000e+01 7.8581350e-01 0.0000000e+00 + 2.2000000e+01 4.0000000e+00 5.3243063e-02 0.0000000e+00 + 2.2000000e+01 5.0000000e+00 7.9243513e+02 9.0100000e+02 + 2.2000000e+01 6.0000000e+00 1.4646798e+02 1.6400000e+02 + 2.2000000e+01 7.0000000e+00 4.9513223e+01 4.9000000e+01 + 2.2000000e+01 8.0000000e+00 3.1125114e+01 3.6000000e+01 + 2.2000000e+01 9.0000000e+00 7.7515767e+00 8.0000000e+00 + 2.2000000e+01 1.0000000e+01 8.4972886e+01 1.2000000e+02 + 2.2000000e+01 1.1000000e+01 4.1134958e+01 4.4000000e+01 + 2.2000000e+01 1.2000000e+01 2.1137359e+01 2.9000000e+01 + 2.2000000e+01 1.3000000e+01 2.4840748e+01 1.7000000e+01 + 2.2000000e+01 1.4000000e+01 1.7998721e+01 1.7000000e+01 + 2.2000000e+01 1.5000000e+01 2.3534337e+02 2.4100000e+02 + 2.2000000e+01 1.6000000e+01 1.1115606e+01 1.7000000e+01 + 2.2000000e+01 1.7000000e+01 1.5378061e+02 1.6600000e+02 + 2.2000000e+01 1.8000000e+01 3.5507648e+01 4.7000000e+01 + 2.2000000e+01 1.9000000e+01 1.9259173e+01 1.0000000e+01 + 2.2000000e+01 2.0000000e+01 2.2079289e+01 2.9000000e+01 + 2.2000000e+01 2.1000000e+01 1.8680777e+01 2.1000000e+01 + 2.2000000e+01 2.2000000e+01 1.7954699e+02 1.7000000e+02 + 2.2000000e+01 2.3000000e+01 5.6349346e+01 6.0000000e+01 + 2.2000000e+01 2.4000000e+01 1.0596560e+01 8.0000000e+00 + 2.2000000e+01 2.5000000e+01 1.5408957e+01 1.3000000e+01 + 2.2000000e+01 2.6000000e+01 1.5862917e+01 1.8000000e+01 + 2.2000000e+01 2.7000000e+01 5.7561348e+00 1.0000000e+01 + 2.2000000e+01 2.8000000e+01 3.3133203e+01 3.7000000e+01 + 2.2000000e+01 2.9000000e+01 4.5479332e-01 0.0000000e+00 + 2.2000000e+01 3.0000000e+01 5.7638218e+00 1.1000000e+01 + 2.2000000e+01 3.1000000e+01 2.7705378e+00 2.0000000e+00 + 2.2000000e+01 3.2000000e+01 5.2777928e+01 7.9000000e+01 + 2.2000000e+01 3.3000000e+01 5.8761671e+00 7.0000000e+00 + 2.2000000e+01 3.4000000e+01 4.4351054e+00 5.0000000e+00 + 2.2000000e+01 3.5000000e+01 2.7159767e+00 3.0000000e+00 + 2.2000000e+01 3.6000000e+01 1.7203242e+00 0.0000000e+00 + 2.2000000e+01 3.7000000e+01 1.2879771e+01 1.3000000e+01 + 2.2000000e+01 3.8000000e+01 6.2936517e+00 4.0000000e+00 + 2.2000000e+01 3.9000000e+01 1.8223816e+00 1.0000000e+00 + 2.2000000e+01 4.0000000e+01 8.9532800e-01 0.0000000e+00 + 2.2000000e+01 4.1000000e+01 1.3266594e+00 2.0000000e+00 + 2.2000000e+01 4.2000000e+01 5.8655748e-01 0.0000000e+00 + 2.2000000e+01 4.3000000e+01 1.6712497e+00 3.0000000e+00 + 2.2000000e+01 4.4000000e+01 5.6194667e+00 5.0000000e+00 + 2.2000000e+01 4.5000000e+01 1.7400984e+00 2.0000000e+00 + 2.2000000e+01 4.6000000e+01 2.0793027e-01 0.0000000e+00 + 2.2000000e+01 4.7000000e+01 4.6306127e+00 4.0000000e+00 + 2.2000000e+01 4.8000000e+01 1.3316812e-01 0.0000000e+00 + 2.2000000e+01 4.9000000e+01 4.8640432e-03 0.0000000e+00 + 2.2000000e+01 5.0000000e+01 6.8769836e-03 0.0000000e+00 + 2.2000000e+01 5.1000000e+01 5.5226084e-02 0.0000000e+00 + 2.2000000e+01 5.2000000e+01 8.8360272e+00 5.0000000e+00 + 2.2000000e+01 5.3000000e+01 2.5540894e+00 2.0000000e+00 + 2.2000000e+01 5.4000000e+01 2.7563540e-01 0.0000000e+00 + 2.2000000e+01 5.5000000e+01 1.2813590e-01 0.0000000e+00 + 2.2000000e+01 5.6000000e+01 4.3958640e-01 1.0000000e+00 + 2.2000000e+01 5.7000000e+01 3.3616155e+00 6.0000000e+00 + 2.2000000e+01 5.8000000e+01 8.2291967e-01 0.0000000e+00 + 2.3000000e+01 4.0000000e+00 3.9481028e-01 0.0000000e+00 + 2.3000000e+01 5.0000000e+00 1.4274963e+02 1.6800000e+02 + 2.3000000e+01 6.0000000e+00 5.5532040e+01 5.9000000e+01 + 2.3000000e+01 7.0000000e+00 2.2933974e+01 2.7000000e+01 + 2.3000000e+01 8.0000000e+00 2.8241655e+01 2.0000000e+01 + 2.3000000e+01 9.0000000e+00 1.5673616e+01 1.8000000e+01 + 2.3000000e+01 1.0000000e+01 2.5966506e+01 3.5000000e+01 + 2.3000000e+01 1.1000000e+01 9.2549959e+01 1.0500000e+02 + 2.3000000e+01 1.2000000e+01 3.8210259e+02 4.6600000e+02 + 2.3000000e+01 1.3000000e+01 4.0362133e+01 5.7000000e+01 + 2.3000000e+01 1.4000000e+01 3.6125211e+01 3.5000000e+01 + 2.3000000e+01 1.5000000e+01 3.5778578e+01 4.8000000e+01 + 2.3000000e+01 1.6000000e+01 1.6991068e+01 9.0000000e+00 + 2.3000000e+01 1.7000000e+01 1.8131618e+01 2.2000000e+01 + 2.3000000e+01 1.8000000e+01 2.7919869e+01 2.0000000e+01 + 2.3000000e+01 1.9000000e+01 2.7890157e+01 2.9000000e+01 + 2.3000000e+01 2.0000000e+01 5.7641388e+01 6.9000000e+01 + 2.3000000e+01 2.1000000e+01 9.1698704e+01 1.0000000e+02 + 2.3000000e+01 2.2000000e+01 1.0940946e+02 1.0700000e+02 + 2.3000000e+01 2.3000000e+01 5.2729831e+01 5.9000000e+01 + 2.3000000e+01 2.4000000e+01 2.0356057e+01 1.7000000e+01 + 2.3000000e+01 2.5000000e+01 1.2833251e+01 1.1000000e+01 + 2.3000000e+01 2.6000000e+01 8.1737293e+00 8.0000000e+00 + 2.3000000e+01 2.7000000e+01 9.1068086e+00 1.5000000e+01 + 2.3000000e+01 2.8000000e+01 1.3078578e+00 3.0000000e+00 + 2.3000000e+01 2.9000000e+01 3.6704633e+00 4.0000000e+00 + 2.3000000e+01 3.0000000e+01 2.4364525e+00 2.0000000e+00 + 2.3000000e+01 3.1000000e+01 2.5573847e+00 4.0000000e+00 + 2.3000000e+01 3.2000000e+01 1.0312683e+01 7.0000000e+00 + 2.3000000e+01 3.3000000e+01 4.2610746e+00 8.0000000e+00 + 2.3000000e+01 3.4000000e+01 5.5071067e+00 9.0000000e+00 + 2.3000000e+01 3.5000000e+01 2.3660010e+00 4.0000000e+00 + 2.3000000e+01 3.6000000e+01 1.4528914e+00 2.0000000e+00 + 2.3000000e+01 3.7000000e+01 1.5167980e+00 1.0000000e+00 + 2.3000000e+01 3.8000000e+01 8.9310845e+00 1.2000000e+01 + 2.3000000e+01 3.9000000e+01 9.0072596e+00 1.6000000e+01 + 2.3000000e+01 4.0000000e+01 9.7002265e+01 1.0500000e+02 + 2.3000000e+01 4.1000000e+01 1.0861113e+01 1.2000000e+01 + 2.3000000e+01 4.2000000e+01 1.3479311e+01 8.0000000e+00 + 2.3000000e+01 4.3000000e+01 2.2650819e+00 1.0000000e+00 + 2.3000000e+01 4.4000000e+01 7.3767952e-01 1.0000000e+00 + 2.3000000e+01 4.6000000e+01 3.7231800e-02 0.0000000e+00 + 2.3000000e+01 4.7000000e+01 9.3589144e-03 0.0000000e+00 + 2.3000000e+01 4.8000000e+01 4.0105089e-03 0.0000000e+00 + 2.3000000e+01 4.9000000e+01 5.7965630e-01 1.0000000e+00 + 2.3000000e+01 5.0000000e+01 2.5093921e+01 2.4000000e+01 + 2.3000000e+01 5.1000000e+01 1.5963899e+01 1.0000000e+01 + 2.3000000e+01 5.2000000e+01 5.8904687e-01 0.0000000e+00 + 2.3000000e+01 5.3000000e+01 1.4116927e-01 1.0000000e+00 + 2.3000000e+01 5.4000000e+01 3.3265210e-02 0.0000000e+00 + 2.3000000e+01 5.7000000e+01 1.0000000e-03 0.0000000e+00 + 2.3000000e+01 5.8000000e+01 6.3403667e-01 0.0000000e+00 + 2.4000000e+01 5.0000000e+00 3.9598572e+01 5.6000000e+01 + 2.4000000e+01 6.0000000e+00 5.1413634e+01 5.4000000e+01 + 2.4000000e+01 7.0000000e+00 6.9978597e+01 1.0000000e+02 + 2.4000000e+01 8.0000000e+00 5.5809331e+01 6.6000000e+01 + 2.4000000e+01 9.0000000e+00 6.1876961e+01 7.7000000e+01 + 2.4000000e+01 1.0000000e+01 1.2595016e+01 9.0000000e+00 + 2.4000000e+01 1.1000000e+01 2.6042962e+01 3.9000000e+01 + 2.4000000e+01 1.2000000e+01 2.3855305e+01 2.8000000e+01 + 2.4000000e+01 1.3000000e+01 7.6454059e+01 1.0300000e+02 + 2.4000000e+01 1.4000000e+01 5.5654986e+01 7.9000000e+01 + 2.4000000e+01 1.5000000e+01 3.0485673e+02 3.4600000e+02 + 2.4000000e+01 1.6000000e+01 1.0460265e+02 1.2800000e+02 + 2.4000000e+01 1.7000000e+01 2.6253281e+01 3.7000000e+01 + 2.4000000e+01 1.8000000e+01 7.4098202e+02 8.5100000e+02 + 2.4000000e+01 1.9000000e+01 1.6028843e+02 1.8800000e+02 + 2.4000000e+01 2.0000000e+01 3.0758761e+01 3.3000000e+01 + 2.4000000e+01 2.1000000e+01 5.4308421e+01 6.7000000e+01 + 2.4000000e+01 2.2000000e+01 1.8648536e+01 2.2000000e+01 + 2.4000000e+01 2.3000000e+01 7.3469814e+00 1.0000000e+01 + 2.4000000e+01 2.4000000e+01 1.7897547e+00 2.0000000e+00 + 2.4000000e+01 2.5000000e+01 8.8329055e+00 4.0000000e+00 + 2.4000000e+01 2.6000000e+01 3.4795782e+02 4.1500000e+02 + 2.4000000e+01 2.7000000e+01 4.4763751e+01 7.4000000e+01 + 2.4000000e+01 2.8000000e+01 1.2487016e+01 9.0000000e+00 + 2.4000000e+01 2.9000000e+01 3.1360225e+01 5.0000000e+01 + 2.4000000e+01 3.0000000e+01 5.9201984e+00 1.5000000e+01 + 2.4000000e+01 3.1000000e+01 6.0486533e+00 7.0000000e+00 + 2.4000000e+01 3.2000000e+01 4.6356517e+00 5.0000000e+00 + 2.4000000e+01 3.3000000e+01 4.0135790e+00 8.0000000e+00 + 2.4000000e+01 3.4000000e+01 4.5960256e+00 2.0000000e+00 + 2.4000000e+01 3.5000000e+01 3.3634080e+00 2.0000000e+00 + 2.4000000e+01 3.6000000e+01 1.7823716e+00 1.0000000e+00 + 2.4000000e+01 3.7000000e+01 8.0281325e+00 1.4000000e+01 + 2.4000000e+01 3.8000000e+01 2.3626662e+00 3.0000000e+00 + 2.4000000e+01 3.9000000e+01 8.1467089e+00 3.0000000e+00 + 2.4000000e+01 4.0000000e+01 7.4792722e+00 6.0000000e+00 + 2.4000000e+01 4.1000000e+01 3.1072764e+00 3.0000000e+00 + 2.4000000e+01 4.2000000e+01 3.8412526e-01 0.0000000e+00 + 2.4000000e+01 4.3000000e+01 2.7627489e+00 0.0000000e+00 + 2.4000000e+01 4.4000000e+01 1.0496475e+00 3.0000000e+00 + 2.4000000e+01 4.6000000e+01 1.5968670e+00 1.0000000e+00 + 2.4000000e+01 4.9000000e+01 6.5632298e-03 0.0000000e+00 + 2.4000000e+01 5.0000000e+01 4.5555971e-02 0.0000000e+00 + 2.4000000e+01 5.1000000e+01 1.5793621e+00 1.0000000e+00 + 2.4000000e+01 5.2000000e+01 9.9377085e-03 0.0000000e+00 + 2.4000000e+01 5.3000000e+01 9.5849964e-01 3.0000000e+00 + 2.4000000e+01 5.4000000e+01 1.1275722e+00 1.0000000e+00 + 2.4000000e+01 5.8000000e+01 2.2229409e+00 0.0000000e+00 + 2.5000000e+01 6.0000000e+00 8.2507844e+00 1.4000000e+01 + 2.5000000e+01 7.0000000e+00 3.5703470e+01 4.6000000e+01 + 2.5000000e+01 8.0000000e+00 6.1922109e+02 6.4400000e+02 + 2.5000000e+01 9.0000000e+00 4.2057051e+01 4.3000000e+01 + 2.5000000e+01 1.0000000e+01 6.3773366e+00 8.0000000e+00 + 2.5000000e+01 1.1000000e+01 4.1632950e+01 5.0000000e+01 + 2.5000000e+01 1.2000000e+01 2.0664321e+01 1.9000000e+01 + 2.5000000e+01 1.3000000e+01 2.1547638e+01 2.3000000e+01 + 2.5000000e+01 1.4000000e+01 6.5243731e+01 7.2000000e+01 + 2.5000000e+01 1.5000000e+01 3.3661125e+01 3.3000000e+01 + 2.5000000e+01 1.6000000e+01 1.5856443e+01 1.9000000e+01 + 2.5000000e+01 1.7000000e+01 1.3135322e+01 1.4000000e+01 + 2.5000000e+01 1.8000000e+01 4.3565092e+01 6.1000000e+01 + 2.5000000e+01 1.9000000e+01 6.0257330e+01 8.3000000e+01 + 2.5000000e+01 2.0000000e+01 8.6816681e+01 7.4000000e+01 + 2.5000000e+01 2.1000000e+01 1.5712241e+01 2.2000000e+01 + 2.5000000e+01 2.2000000e+01 2.7118575e+01 4.2000000e+01 + 2.5000000e+01 2.3000000e+01 1.0592478e+00 3.0000000e+00 + 2.5000000e+01 2.4000000e+01 3.7175065e+00 2.0000000e+00 + 2.5000000e+01 2.5000000e+01 8.7488486e+01 9.4000000e+01 + 2.5000000e+01 2.6000000e+01 2.0077608e+01 2.0000000e+01 + 2.5000000e+01 2.7000000e+01 2.3360356e+01 3.9000000e+01 + 2.5000000e+01 2.8000000e+01 1.0216764e+01 1.4000000e+01 + 2.5000000e+01 2.9000000e+01 4.2137635e+00 3.0000000e+00 + 2.5000000e+01 3.0000000e+01 4.2270488e+00 2.0000000e+00 + 2.5000000e+01 3.1000000e+01 7.3231727e+00 8.0000000e+00 + 2.5000000e+01 3.2000000e+01 4.8329137e+00 1.0000000e+01 + 2.5000000e+01 3.3000000e+01 2.4783687e+01 2.9000000e+01 + 2.5000000e+01 3.4000000e+01 2.3717800e+00 4.0000000e+00 + 2.5000000e+01 3.5000000e+01 4.0414849e+00 7.0000000e+00 + 2.5000000e+01 3.6000000e+01 2.4734675e+01 3.1000000e+01 + 2.5000000e+01 3.7000000e+01 4.3612481e+00 6.0000000e+00 + 2.5000000e+01 3.8000000e+01 1.0947756e+01 1.1000000e+01 + 2.5000000e+01 3.9000000e+01 7.5072895e-01 1.0000000e+00 + 2.5000000e+01 4.0000000e+01 8.0458263e+00 9.0000000e+00 + 2.5000000e+01 4.1000000e+01 4.8723387e+00 1.1000000e+01 + 2.5000000e+01 4.2000000e+01 6.3292160e-01 0.0000000e+00 + 2.5000000e+01 4.3000000e+01 9.5825983e+00 1.2000000e+01 + 2.5000000e+01 4.4000000e+01 2.4072976e+00 6.0000000e+00 + 2.5000000e+01 4.5000000e+01 5.4871230e-02 0.0000000e+00 + 2.5000000e+01 4.9000000e+01 2.1761201e-03 0.0000000e+00 + 2.5000000e+01 5.1000000e+01 4.5056743e-02 0.0000000e+00 + 2.5000000e+01 5.2000000e+01 3.0180046e+00 0.0000000e+00 + 2.5000000e+01 5.3000000e+01 2.8401166e-02 0.0000000e+00 + 2.5000000e+01 5.4000000e+01 1.1067772e+00 0.0000000e+00 + 2.5000000e+01 5.5000000e+01 1.3809492e+00 1.0000000e+00 + 2.5000000e+01 5.6000000e+01 5.4914295e-03 0.0000000e+00 + 2.6000000e+01 7.0000000e+00 7.2256951e+00 1.0000000e+01 + 2.6000000e+01 8.0000000e+00 1.3675220e+02 1.7000000e+02 + 2.6000000e+01 9.0000000e+00 3.8679883e+01 4.5000000e+01 + 2.6000000e+01 1.0000000e+01 1.0983757e+01 1.0000000e+01 + 2.6000000e+01 1.1000000e+01 1.0725049e+01 8.0000000e+00 + 2.6000000e+01 1.2000000e+01 4.0504980e+01 5.1000000e+01 + 2.6000000e+01 1.3000000e+01 2.4500563e+01 2.9000000e+01 + 2.6000000e+01 1.4000000e+01 1.4056835e+01 1.5000000e+01 + 2.6000000e+01 1.5000000e+01 1.4061547e+01 1.5000000e+01 + 2.6000000e+01 1.6000000e+01 5.8961962e+01 8.0000000e+01 + 2.6000000e+01 1.7000000e+01 1.0334360e+01 2.1000000e+01 + 2.6000000e+01 1.8000000e+01 4.4505135e+01 6.1000000e+01 + 2.6000000e+01 1.9000000e+01 4.9584511e+01 7.5000000e+01 + 2.6000000e+01 2.0000000e+01 2.0503963e+01 2.3000000e+01 + 2.6000000e+01 2.1000000e+01 1.1563546e+01 7.0000000e+00 + 2.6000000e+01 2.2000000e+01 5.8747409e+00 3.0000000e+00 + 2.6000000e+01 2.3000000e+01 2.3990314e+01 3.6000000e+01 + 2.6000000e+01 2.4000000e+01 4.6233188e+00 7.0000000e+00 + 2.6000000e+01 2.5000000e+01 1.8878110e+01 1.9000000e+01 + 2.6000000e+01 2.6000000e+01 7.3301682e+00 2.0000000e+00 + 2.6000000e+01 2.7000000e+01 3.7671048e+00 1.0000000e+00 + 2.6000000e+01 2.8000000e+01 3.0866675e+01 4.0000000e+01 + 2.6000000e+01 2.9000000e+01 1.4871680e+01 2.0000000e+01 + 2.6000000e+01 3.0000000e+01 1.9460711e+01 3.5000000e+01 + 2.6000000e+01 3.1000000e+01 4.7434952e+00 1.0000000e+01 + 2.6000000e+01 3.2000000e+01 3.8362977e+00 8.0000000e+00 + 2.6000000e+01 3.3000000e+01 6.2833446e+00 6.0000000e+00 + 2.6000000e+01 3.4000000e+01 3.4414523e+00 4.0000000e+00 + 2.6000000e+01 3.5000000e+01 8.2668733e+00 1.2000000e+01 + 2.6000000e+01 3.6000000e+01 1.0024785e+01 8.0000000e+00 + 2.6000000e+01 3.7000000e+01 8.7906324e+00 1.4000000e+01 + 2.6000000e+01 3.8000000e+01 2.8087193e+00 3.0000000e+00 + 2.6000000e+01 3.9000000e+01 1.1800107e+01 5.0000000e+00 + 2.6000000e+01 4.0000000e+01 3.2609097e+01 3.9000000e+01 + 2.6000000e+01 4.1000000e+01 9.4208165e+00 1.0000000e+01 + 2.6000000e+01 4.2000000e+01 7.1831193e+00 3.0000000e+00 + 2.6000000e+01 4.4000000e+01 1.0688280e-01 0.0000000e+00 + 2.6000000e+01 4.5000000e+01 1.3592667e-01 0.0000000e+00 + 2.6000000e+01 4.7000000e+01 2.6545723e-03 0.0000000e+00 + 2.6000000e+01 5.2000000e+01 1.0000000e-03 0.0000000e+00 + 2.6000000e+01 5.4000000e+01 3.7911704e-02 0.0000000e+00 + 2.6000000e+01 5.6000000e+01 3.9889666e-01 0.0000000e+00 + 2.7000000e+01 8.0000000e+00 6.6165956e+00 9.0000000e+00 + 2.7000000e+01 9.0000000e+00 4.9218765e+02 5.9400000e+02 + 2.7000000e+01 1.0000000e+01 3.1221589e+01 3.0000000e+01 + 2.7000000e+01 1.1000000e+01 1.4705176e+01 1.7000000e+01 + 2.7000000e+01 1.2000000e+01 9.0949455e+00 7.0000000e+00 + 2.7000000e+01 1.3000000e+01 3.5265831e+02 3.2100000e+02 + 2.7000000e+01 1.4000000e+01 3.9767338e+01 3.4000000e+01 + 2.7000000e+01 1.5000000e+01 9.2815253e+00 1.3000000e+01 + 2.7000000e+01 1.6000000e+01 1.7333625e+01 2.1000000e+01 + 2.7000000e+01 1.7000000e+01 8.4566872e+01 1.1600000e+02 + 2.7000000e+01 1.8000000e+01 3.9066769e+01 4.8000000e+01 + 2.7000000e+01 1.9000000e+01 6.0803828e+00 1.1000000e+01 + 2.7000000e+01 2.0000000e+01 9.9190387e+00 1.2000000e+01 + 2.7000000e+01 2.1000000e+01 9.3333777e+00 8.0000000e+00 + 2.7000000e+01 2.2000000e+01 1.2838794e+02 1.2700000e+02 + 2.7000000e+01 2.3000000e+01 2.8302207e+01 4.8000000e+01 + 2.7000000e+01 2.4000000e+01 8.6527991e-01 3.0000000e+00 + 2.7000000e+01 2.5000000e+01 6.8277449e+00 7.0000000e+00 + 2.7000000e+01 2.6000000e+01 8.3706889e+00 4.0000000e+00 + 2.7000000e+01 2.7000000e+01 5.0262387e+00 7.0000000e+00 + 2.7000000e+01 2.8000000e+01 1.1903669e+01 1.2000000e+01 + 2.7000000e+01 2.9000000e+01 5.1586752e+01 7.3000000e+01 + 2.7000000e+01 3.0000000e+01 2.2525381e+01 3.2000000e+01 + 2.7000000e+01 3.1000000e+01 7.3011150e+00 6.0000000e+00 + 2.7000000e+01 3.2000000e+01 7.7974257e+00 9.0000000e+00 + 2.7000000e+01 3.3000000e+01 2.5721985e+00 3.0000000e+00 + 2.7000000e+01 3.4000000e+01 7.9080151e+00 8.0000000e+00 + 2.7000000e+01 3.5000000e+01 2.0013651e+01 1.2000000e+01 + 2.7000000e+01 3.6000000e+01 9.6705323e+01 1.1500000e+02 + 2.7000000e+01 3.7000000e+01 1.3753545e+01 8.0000000e+00 + 2.7000000e+01 3.8000000e+01 3.0540841e+00 6.0000000e+00 + 2.7000000e+01 3.9000000e+01 3.9432765e+00 6.0000000e+00 + 2.7000000e+01 4.0000000e+01 2.7296655e+00 0.0000000e+00 + 2.7000000e+01 4.2000000e+01 1.3362144e+00 0.0000000e+00 + 2.7000000e+01 4.3000000e+01 2.5177499e-02 0.0000000e+00 + 2.7000000e+01 4.6000000e+01 2.8377512e-03 0.0000000e+00 + 2.8000000e+01 9.0000000e+00 7.8346715e+00 6.0000000e+00 + 2.8000000e+01 1.0000000e+01 6.7709182e+01 9.7000000e+01 + 2.8000000e+01 1.1000000e+01 2.4750486e+01 1.3000000e+01 + 2.8000000e+01 1.2000000e+01 6.3133497e+01 5.2000000e+01 + 2.8000000e+01 1.3000000e+01 7.3137140e+01 8.7000000e+01 + 2.8000000e+01 1.4000000e+01 2.3247488e+01 1.9000000e+01 + 2.8000000e+01 1.5000000e+01 1.5011142e+01 2.2000000e+01 + 2.8000000e+01 1.6000000e+01 4.6249985e+01 6.3000000e+01 + 2.8000000e+01 1.7000000e+01 6.7052228e+01 7.3000000e+01 + 2.8000000e+01 1.8000000e+01 6.3328789e+01 1.0800000e+02 + 2.8000000e+01 1.9000000e+01 1.3404621e+01 1.8000000e+01 + 2.8000000e+01 2.0000000e+01 5.9655427e+01 9.5000000e+01 + 2.8000000e+01 2.1000000e+01 9.1633943e+00 9.0000000e+00 + 2.8000000e+01 2.2000000e+01 2.0022761e+01 2.8000000e+01 + 2.8000000e+01 2.3000000e+01 5.4770284e+00 4.0000000e+00 + 2.8000000e+01 2.4000000e+01 2.5958268e+00 1.0000000e+00 + 2.8000000e+01 2.5000000e+01 9.2574856e+01 1.2500000e+02 + 2.8000000e+01 2.6000000e+01 7.1500347e+00 1.2000000e+01 + 2.8000000e+01 2.7000000e+01 4.0019164e+00 2.0000000e+00 + 2.8000000e+01 2.8000000e+01 3.2512260e+00 7.0000000e+00 + 2.8000000e+01 2.9000000e+01 5.7336570e+00 7.0000000e+00 + 2.8000000e+01 3.0000000e+01 1.3568457e+00 3.0000000e+00 + 2.8000000e+01 3.1000000e+01 7.3253572e+00 3.0000000e+00 + 2.8000000e+01 3.2000000e+01 4.2843157e+00 3.0000000e+00 + 2.8000000e+01 3.3000000e+01 1.2275175e+00 2.0000000e+00 + 2.8000000e+01 3.4000000e+01 1.9271144e+00 1.0000000e+00 + 2.8000000e+01 3.5000000e+01 6.4264267e+00 7.0000000e+00 + 2.8000000e+01 3.6000000e+01 6.7801983e+00 9.0000000e+00 + 2.8000000e+01 3.7000000e+01 2.0850692e+00 1.0000000e+00 + 2.8000000e+01 3.8000000e+01 7.9213142e-01 1.0000000e+00 + 2.9000000e+01 1.0000000e+01 2.3543667e+00 4.0000000e+00 + 2.9000000e+01 1.1000000e+01 7.5710074e+01 8.2000000e+01 + 2.9000000e+01 1.2000000e+01 4.7040601e+01 4.8000000e+01 + 2.9000000e+01 1.3000000e+01 3.6497108e+01 4.5000000e+01 + 2.9000000e+01 1.4000000e+01 1.7639033e+01 2.4000000e+01 + 2.9000000e+01 1.5000000e+01 5.2660943e+01 6.9000000e+01 + 2.9000000e+01 1.6000000e+01 2.2679494e+01 2.3000000e+01 + 2.9000000e+01 1.7000000e+01 5.6996476e+02 6.0500000e+02 + 2.9000000e+01 1.8000000e+01 4.0502742e+01 5.3000000e+01 + 2.9000000e+01 1.9000000e+01 1.3524114e+01 2.4000000e+01 + 2.9000000e+01 2.0000000e+01 8.8073649e+00 1.6000000e+01 + 2.9000000e+01 2.1000000e+01 2.5968678e+01 4.0000000e+01 + 2.9000000e+01 2.2000000e+01 1.3612600e+01 1.5000000e+01 + 2.9000000e+01 2.3000000e+01 6.8392291e-01 0.0000000e+00 + 2.9000000e+01 2.4000000e+01 1.4700891e+00 1.0000000e+00 + 2.9000000e+01 2.5000000e+01 3.1304939e+00 5.0000000e+00 + 2.9000000e+01 2.6000000e+01 3.5925140e+00 1.0000000e+00 + 2.9000000e+01 2.7000000e+01 4.9835516e+00 9.0000000e+00 + 2.9000000e+01 2.8000000e+01 1.3162852e+00 2.0000000e+00 + 2.9000000e+01 2.9000000e+01 2.5140758e+00 3.0000000e+00 + 2.9000000e+01 3.1000000e+01 1.2674878e-01 0.0000000e+00 + 2.9000000e+01 3.4000000e+01 6.9865516e-02 0.0000000e+00 + 2.9000000e+01 3.5000000e+01 1.3280474e+00 0.0000000e+00 + 2.9000000e+01 3.6000000e+01 1.6261318e+00 0.0000000e+00 + 3.0000000e+01 1.2000000e+01 6.8285696e+00 1.4000000e+01 + 3.0000000e+01 1.3000000e+01 1.5979119e+01 1.8000000e+01 + 3.0000000e+01 1.4000000e+01 1.0322879e+02 1.4000000e+02 + 3.0000000e+01 1.5000000e+01 2.0682900e+01 2.0000000e+01 + 3.0000000e+01 1.6000000e+01 4.9314788e+01 6.6000000e+01 + 3.0000000e+01 1.7000000e+01 1.9906749e+01 2.4000000e+01 + 3.0000000e+01 1.8000000e+01 5.8002137e+01 8.6000000e+01 + 3.0000000e+01 1.9000000e+01 3.6003474e+01 4.1000000e+01 + 3.0000000e+01 2.0000000e+01 1.5245109e+01 2.0000000e+01 + 3.0000000e+01 2.1000000e+01 1.6028187e+02 2.0800000e+02 + 3.0000000e+01 2.2000000e+01 2.5661325e+00 3.0000000e+00 + 3.0000000e+01 2.3000000e+01 2.2747381e-01 0.0000000e+00 + 3.0000000e+01 2.4000000e+01 5.3246837e-02 0.0000000e+00 + 3.0000000e+01 2.5000000e+01 1.0809564e+00 1.0000000e+00 + 3.0000000e+01 2.6000000e+01 2.1454960e+00 1.0000000e+00 + 3.1000000e+01 1.3000000e+01 9.9126199e-01 1.0000000e+00 + 3.1000000e+01 1.4000000e+01 1.4193248e+01 1.6000000e+01 + 3.1000000e+01 1.5000000e+01 6.8097195e+01 1.0800000e+02 + 3.1000000e+01 1.6000000e+01 2.1604477e+01 3.2000000e+01 + 3.1000000e+01 1.7000000e+01 2.0424173e+01 2.1000000e+01 + 3.1000000e+01 1.8000000e+01 1.4523847e+01 1.1000000e+01 + 3.1000000e+01 1.9000000e+01 8.5958830e+00 9.0000000e+00 + 3.1000000e+01 2.0000000e+01 2.5293397e+00 0.0000000e+00 + 3.1000000e+01 2.1000000e+01 5.5249527e+00 7.0000000e+00 + 3.1000000e+01 2.2000000e+01 4.7465493e-01 0.0000000e+00 + 3.2000000e+01 1.5000000e+01 3.8843123e+00 4.0000000e+00 + 3.2000000e+01 1.6000000e+01 1.3802460e+01 2.4000000e+01 + 3.2000000e+01 1.7000000e+01 8.2321295e+01 8.2000000e+01 + 3.2000000e+01 1.8000000e+01 1.3871562e+01 1.4000000e+01 + 3.2000000e+01 1.9000000e+01 2.4878699e+00 6.0000000e+00 + 3.2000000e+01 2.0000000e+01 9.7101431e-01 4.0000000e+00 + 3.2000000e+01 2.1000000e+01 5.9880931e-02 0.0000000e+00 + 3.3000000e+01 1.6000000e+01 8.4816308e-01 0.0000000e+00 + 3.3000000e+01 1.7000000e+01 2.7030209e+00 5.0000000e+00 + 3.3000000e+01 1.8000000e+01 2.9096270e+00 3.0000000e+00 + 3.3000000e+01 1.9000000e+01 3.1878836e+00 3.0000000e+00 + 3.3000000e+01 2.0000000e+01 4.1693701e-01 0.0000000e+00 diff --git a/test/unit/math/laplace/aki_disease_data/x1.csv b/test/unit/math/laplace/aki_disease_data/x1.csv new file mode 100644 index 00000000000..ed20b8f006d --- /dev/null +++ b/test/unit/math/laplace/aki_disease_data/x1.csv @@ -0,0 +1 @@ +1 1 2 2 2 2 3 3 3 3 4 4 4 5 5 5 5 6 6 6 6 6 6 6 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 12 12 12 12 12 12 12 12 12 12 12 12 12 12 12 12 12 12 12 12 12 12 12 12 13 13 13 13 13 13 13 13 13 13 13 13 13 13 13 13 13 13 13 13 13 13 13 13 13 13 13 14 14 14 14 14 14 14 14 14 14 14 14 14 14 14 14 14 14 14 14 14 14 14 14 14 14 14 14 14 15 15 15 15 15 15 15 15 15 15 15 15 15 15 15 15 15 15 15 15 15 15 15 15 15 15 15 15 15 15 15 15 15 15 15 15 15 15 15 15 15 15 15 16 16 16 16 16 16 16 16 16 16 16 16 16 16 16 16 16 16 16 16 16 16 16 16 16 16 16 16 16 16 16 16 16 16 16 16 16 16 16 16 16 16 16 16 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 18 18 18 18 18 18 18 18 18 18 18 18 18 18 18 18 18 18 18 18 18 18 18 18 18 18 18 18 18 18 18 18 18 18 18 18 18 18 18 18 18 18 18 18 18 18 18 19 19 19 19 19 19 19 19 19 19 19 19 19 19 19 19 19 19 19 19 19 19 19 19 19 19 19 19 19 19 19 19 19 19 19 19 19 19 19 19 19 19 19 19 19 19 19 19 19 19 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 21 21 21 21 21 21 21 21 21 21 21 21 21 21 21 21 21 21 21 21 21 21 21 21 21 21 21 21 21 21 21 21 21 21 21 21 21 21 21 21 21 21 21 21 21 21 21 21 22 22 22 22 22 22 22 22 22 22 22 22 22 22 22 22 22 22 22 22 22 22 22 22 22 22 22 22 22 22 22 22 22 22 22 22 22 22 22 22 22 22 22 22 22 22 22 22 22 22 22 22 22 22 22 23 23 23 23 23 23 23 23 23 23 23 23 23 23 23 23 23 23 23 23 23 23 23 23 23 23 23 23 23 23 23 23 23 23 23 23 23 23 23 23 23 23 23 23 23 23 23 23 23 23 23 23 24 24 24 24 24 24 24 24 24 24 24 24 24 24 24 24 24 24 24 24 24 24 24 24 24 24 24 24 24 24 24 24 24 24 24 24 24 24 24 24 24 24 24 24 24 24 24 24 25 25 25 25 25 25 25 25 25 25 25 25 25 25 25 25 25 25 25 25 25 25 25 25 25 25 25 25 25 25 25 25 25 25 25 25 25 25 25 25 25 25 25 25 25 25 25 26 26 26 26 26 26 26 26 26 26 26 26 26 26 26 26 26 26 26 26 26 26 26 26 26 26 26 26 26 26 26 26 26 26 26 26 26 26 26 26 26 26 27 27 27 27 27 27 27 27 27 27 27 27 27 27 27 27 27 27 27 27 27 27 27 27 27 27 27 27 27 27 27 27 27 27 27 27 28 28 28 28 28 28 28 28 28 28 28 28 28 28 28 28 28 28 28 28 28 28 28 28 28 28 28 28 28 28 29 29 29 29 29 29 29 29 29 29 29 29 29 29 29 29 29 29 29 29 29 29 29 29 30 30 30 30 30 30 30 30 30 30 30 30 30 30 30 31 31 31 31 31 31 31 31 31 31 32 32 32 32 32 32 32 33 33 33 33 33 diff --git a/test/unit/math/laplace/aki_disease_data/x2.csv b/test/unit/math/laplace/aki_disease_data/x2.csv new file mode 100644 index 00000000000..dc0bc75a665 --- /dev/null +++ b/test/unit/math/laplace/aki_disease_data/x2.csv @@ -0,0 +1 @@ +4 5 3 4 5 6 3 4 5 6 3 4 5 3 4 5 6 2 3 4 5 6 7 8 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 52 53 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 50 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 49 50 51 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 47 48 49 50 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 48 49 51 52 53 54 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 49 51 52 53 54 55 56 57 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 41 42 43 44 45 46 49 51 52 54 57 58 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 46 47 48 49 50 51 52 53 54 57 58 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 46 49 50 51 52 53 54 58 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 49 51 52 53 54 55 56 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 44 45 47 52 54 56 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 42 43 46 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 31 34 35 36 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 13 14 15 16 17 18 19 20 21 22 15 16 17 18 19 20 21 16 17 18 19 20 diff --git a/test/unit/math/laplace/aki_disease_data/y.csv b/test/unit/math/laplace/aki_disease_data/y.csv new file mode 100644 index 00000000000..27e97f7b551 --- /dev/null +++ b/test/unit/math/laplace/aki_disease_data/y.csv @@ -0,0 +1 @@ +4 3 0 122 29 0 6 25 32 0 16 0 4 4 0 6 0 0 0 10 4 7 0 1 0 1 25 3 45 172 52 324 4 5 34 4 12 27 46 55 31 18 0 6 0 2 15 162 67 82 124 81 62 953 104 107 19 57 52 74 33 57 421 26 3 9 31 160 2288 133 15 46 155 251 40 48 30 27 29 42 29 44 79 169 17 4 60 62 223 126 94 83 120 135 47 42 143 59 34 97 128 28 73 80 53 11 0 0 0 116 26 90 342 89 205 96 64 213 59 35 68 18 26 95 54 327 104 63 35 71 250 6 139 73 55 96 121 89 79 55 43 80 157 26 86 38 32 68 67 168 133 105 21 108 232 0 15 136 65 64 54 304 83 118 196 1855 24 48 9 12 24 111 79 44 42 41 37 42 221 10 0 4 0 0 115 279 183 20 30 52 307 177 679 32 50 21 84 26 87 43 90 107 27 83 30 67 56 7 0 1 1 0 1 223 179 95 71 63 536 62 83 48 116 24 74 52 21 23 36 21 16 10 18 44 27 22 132 33 57 24 12 56 17 37 2 11 12 9 1 2 25 0 16 5 0 3195 2290 209 726 198 45 45 39 38 33 53 113 145 27 18 63 26 28 5 29 18 48 150 19 17 182 9 0 221 161 20 1 10 7 17 7 7 6 6 4 4 1 1 1 690 1713 540 163 119 100 21 58 48 28 233 26 44 7 31 33 20 31 35 44 19 132 22 107 37 37 31 23 0 119 36 35 8 0 10 3 3 3 3 2 2 5 1 0 1 0 0 386 122 54 67 1180 103 11 45 10 38 55 45 36 83 15 31 20 58 24 84 36 97 8 10 37 65 767 198 83 28 27 2 6 20 13 5 2 9 6 1 4 48 3 0 1 0 4 38 75 70 226 157 269 30 122 28 14 123 983 133 191 35 94 14 12 17 74 46 9 33 24 9 73 246 62 33 10 9 6 0 3 237 179 5 1 2 8 2 1 2 0 0 0 0 0 0 15 162 81 98 34 22 35 22 78 23 47 67 32 72 20 16 45 7 21 28 8 25 32 6 8 81 30 9 5 11 4 1 5 11 12 6 5 2 0 18 8 0 0 0 0 1 0 3 0 0 3 80 127 853 51 12 27 49 16 91 19 51 27 23 27 53 69 18 39 136 6 5 6 22 20 47 4 13 3 6 7 28 3 1 17 5 5 2 8 51 1 0 0 0 1 0 0 0 0 901 164 49 36 8 120 44 29 17 17 241 17 166 47 10 29 21 170 60 8 13 18 10 37 0 11 2 79 7 5 3 0 13 4 1 0 2 0 3 5 2 0 4 0 0 0 0 5 2 0 0 1 6 0 0 168 59 27 20 18 35 105 466 57 35 48 9 22 20 29 69 100 107 59 17 11 8 15 3 4 2 4 7 8 9 4 2 1 12 16 105 12 8 1 1 0 0 0 1 24 10 0 1 0 0 0 56 54 100 66 77 9 39 28 103 79 346 128 37 851 188 33 67 22 10 2 4 415 74 9 50 15 7 5 8 2 2 1 14 3 3 6 3 0 0 3 1 0 0 1 0 3 1 0 14 46 644 43 8 50 19 23 72 33 19 14 61 83 74 22 42 3 2 94 20 39 14 3 2 8 10 29 4 7 31 6 11 1 9 11 0 12 6 0 0 0 0 0 0 1 0 10 170 45 10 8 51 29 15 15 80 21 61 75 23 7 3 36 7 19 2 1 40 20 35 10 8 6 4 12 8 14 3 5 39 10 3 0 0 0 0 0 0 9 594 30 17 7 321 34 13 21 116 48 11 12 8 127 48 3 7 4 7 12 73 32 6 9 3 8 12 115 8 6 6 0 0 0 0 6 97 13 52 87 19 22 63 73 108 18 95 9 28 4 1 125 12 2 7 7 3 3 3 2 1 7 9 1 1 4 82 48 45 24 69 23 605 53 24 16 40 15 0 1 5 1 9 2 3 0 0 0 0 14 18 140 20 66 24 86 41 20 208 3 0 0 1 1 1 16 108 32 21 11 9 0 7 0 4 24 82 14 6 4 0 0 5 3 3 0 diff --git a/test/unit/math/laplace/aki_disease_data/ye.csv b/test/unit/math/laplace/aki_disease_data/ye.csv new file mode 100644 index 00000000000..91791681c03 --- /dev/null +++ b/test/unit/math/laplace/aki_disease_data/ye.csv @@ -0,0 +1 @@ +2.8079055 7.0898599 1.8430127 174.96143 42.061934 0.12833066 4.340199 34.082447 51.48571 0.13278291 10.589369 7.3046351 4.8329585 7.8532574 5.8476511 5.7880622 0.72856462 0.2799728 0.12444456 11.557298 10.396495 7.7440359 0.84993784 3.9912302 0.097159 2.3783082 18.981563 9.9705535 58.144073 179.51441 47.729256 458.1849 8.2946817 4.8010997 32.635018 4.7074167 13.387533 32.884418 66.614769 99.756987 50.29044 22.515207 0.17914949 4.3733269 0.0135727 0.72286756 17.405913 165.86485 103.2835 91.931015 118.52939 109.83366 86.619977 1025.0153 101.01579 78.657199 23.971224 83.336609 78.757016 114.06701 62.16655 104.95877 534.55587 47.620912 2.2954138 7.7934308 40.158531 163.21877 2506.0659 192.97843 21.835028 42.302459 184.88548 244.29468 52.362386 52.176951 25.664934 29.689173 29.218903 60.022954 57.198501 68.015308 115.22675 286.80855 32.900904 1.519439 63.684781 62.185461 228.58175 135.88382 87.4316 80.619066 135.83706 141.65653 51.037915 48.618129 141.22714 53.295604 41.964699 123.58541 154.29759 57.650262 107.47161 98.554211 56.876424 21.474296 0.040422619 0.34821039 0.001 143.23232 37.982352 100.45313 396.51826 92.769604 223.84957 79.091398 64.660394 233.74731 59.249283 29.543396 55.312306 20.178246 30.197972 83.957626 62.695163 412.31784 85.738651 86.717987 46.352508 103.76509 306.76142 6.2909621 170.82238 78.163638 65.361058 90.182609 132.45141 115.09998 74.322633 73.654898 39.684313 75.021054 190.74998 25.387099 84.748298 38.512918 27.881239 65.435182 76.08324 169.90848 133.48166 77.146067 32.069336 132.66612 213.08964 1.8067821 11.997476 167.21566 53.346788 63.462813 50.036795 290.92892 87.058757 115.32806 197.9864 1861.9949 31.789102 29.640872 12.361098 19.489799 28.202109 125.93309 74.330987 39.67347 35.313346 41.783712 53.589689 56.439326 237.71279 16.848286 0.21993524 3.1928984 0.037911704 0.69110819 127.20582 302.93702 177.52261 25.085587 37.601925 54.156493 311.59822 198.2311 767.71236 30.528553 68.529954 27.118555 94.311358 41.451931 89.84789 55.667646 95.13495 94.043529 38.134755 78.031443 14.399746 60.12356 54.886318 8.5315266 0.038080321 1.5793305 2.7579936 1.049687 1.0982942 258.62518 175.22942 93.322562 75.218137 60.056009 671.7655 70.222474 77.078647 45.786527 121.41169 27.840659 72.341196 50.032733 25.070173 31.929108 39.344009 21.690175 15.116468 10.57575 21.302586 47.678595 35.711475 27.435279 109.57075 40.16688 67.930981 21.375488 20.002431 52.465632 15.375098 34.018903 3.1449289 9.6746873 10.729735 5.0687518 0.85546808 2.3337694 18.805409 0.28000834 6.5446359 2.5544882 0.0017414397 3260.1201 2457.0023 216.8167 737.41461 220.52428 50.633002 56.160596 41.716996 38.1466 31.966245 47.295488 141.2575 122.98356 34.109307 15.887184 56.383073 32.921152 27.342766 3.4082205 22.667169 10.824355 42.94846 127.95805 27.49756 19.170851 182.8949 8.1964545 0.91188566 191.26598 191.67284 28.450453 2.2579429 10.019578 4.0464199 10.708548 2.8114207 4.768944 4.134144 5.8534102 2.162349 3.9541432 1.342653 0.91529879 3.9002465 743.0063 1598.0077 536.34986 123.93432 111.50481 110.17095 19.456837 55.57457 50.254706 38.115542 202.06064 22.735728 47.437094 16.675338 38.56963 32.691523 14.118096 17.311103 26.749477 34.056677 17.736093 117.28666 22.7567 107.75481 44.131476 30.49434 21.645093 19.348974 0.079406058 104.95783 33.090951 38.29627 6.6571875 0.26229133 10.488489 3.9361475 1.5829231 3.1799059 3.8968506 3.8265142 0.39171756 5.5257424 1.3473818 0.023453171 2.212155 0.0040656913 0.60265964 374.55744 110.37954 60.202321 80.970051 1161.4218 110.64017 11.777246 36.505843 15.452632 36.911494 56.918281 44.833513 49.549443 95.043703 16.121778 34.872602 15.616248 46.090323 16.137712 76.921971 38.102508 73.290657 10.725773 16.935231 33.51789 59.073008 737.71698 175.17118 58.225519 16.669716 23.898271 4.3778595 3.0043434 21.87579 15.348173 5.2770556 2.2717941 7.5654304 6.4390779 3.1644486 7.2063012 24.896623 5.5661099 0.1757917 0.46016137 0.028401166 2.2769759 35.836204 63.768107 65.961175 213.19376 184.107 231.2068 25.779306 82.553184 25.92105 16.050637 109.40016 967.37663 113.5866 180.80319 18.844468 72.801862 20.602838 15.446136 14.714812 67.195246 38.206297 9.6948426 38.804963 25.273864 11.340326 67.054077 275.15858 57.967788 21.957164 6.6336674 6.8855312 6.3093907 0.10269473 2.8306819 246.27315 157.43093 8.5258479 3.3886577 3.6383763 2.9913716 4.8594352 2.1390241 1.9017458 0.41315486 0.42741621 0.001 0.5386798 0.1529444 0.094871205 15.197767 143.48816 69.229037 98.517541 25.512032 24.252967 27.7063 19.196998 66.564102 25.374879 31.827813 62.439111 34.501844 49.554048 25.699775 20.588072 43.626833 11.844879 14.474697 22.75674 4.2225824 20.096198 26.487958 2.9531129 5.2937605 62.531464 24.40185 8.4113858 3.6627988 8.3542272 2.4619647 3.3846724 4.8488691 7.6661352 9.1054431 5.1034876 4.8284948 1.6566176 1.5212708 8.2614538 6.1771674 2.0841611 0.13372279 0.24060309 0.2057354 0.6766589 0.57142167 4.352334 2.1999308 0.70988057 3.036213 82.739916 144.36021 749.33922 53.218416 14.611087 26.886852 52.053184 17.426813 80.121529 14.195308 48.278151 32.023248 18.632297 27.257992 52.727063 68.564754 21.357125 30.769108 121.5369 11.100909 4.8862493 5.306794 15.38494 19.934964 26.441305 3.1095696 11.390125 7.7813322 3.711585 3.3504113 25.727677 3.9458818 0.9864188 11.884295 2.3753064 1.2145425 4.0695821 6.2647199 44.949764 1.3823914 0.076236933 0.13903797 0.93641143 2.1220815 0.20417439 0.43063847 0.7858135 0.053243063 792.43513 146.46798 49.513223 31.125114 7.7515767 84.972886 41.134958 21.137359 24.840748 17.998721 235.34337 11.115606 153.78061 35.507648 19.259173 22.079289 18.680777 179.54699 56.349346 10.59656 15.408957 15.862917 5.7561348 33.133203 0.45479332 5.7638218 2.7705378 52.777928 5.8761671 4.4351054 2.7159767 1.7203242 12.879771 6.2936517 1.8223816 0.895328 1.3266594 0.58655748 1.6712497 5.6194667 1.7400984 0.20793027 4.6306127 0.13316812 0.0048640432 0.0068769836 0.055226084 8.8360272 2.5540894 0.2756354 0.1281359 0.4395864 3.3616155 0.82291967 0.39481028 142.74963 55.53204 22.933974 28.241655 15.673616 25.966506 92.549959 382.10259 40.362133 36.125211 35.778578 16.991068 18.131618 27.919869 27.890157 57.641388 91.698704 109.40946 52.729831 20.356057 12.833251 8.1737293 9.1068086 1.3078578 3.6704633 2.4364525 2.5573847 10.312683 4.2610746 5.5071067 2.366001 1.4528914 1.516798 8.9310845 9.0072596 97.002265 10.861113 13.479311 2.2650819 0.73767952 0.0372318 0.0093589144 0.0040105089 0.5796563 25.093921 15.963899 0.58904687 0.14116927 0.03326521 0.001 0.63403667 39.598572 51.413634 69.978597 55.809331 61.876961 12.595016 26.042962 23.855305 76.454059 55.654986 304.85673 104.60265 26.253281 740.98202 160.28843 30.758761 54.308421 18.648536 7.3469814 1.7897547 8.8329055 347.95782 44.763751 12.487016 31.360225 5.9201984 6.0486533 4.6356517 4.013579 4.5960256 3.363408 1.7823716 8.0281325 2.3626662 8.1467089 7.4792722 3.1072764 0.38412526 2.7627489 1.0496475 1.596867 0.0065632298 0.045555971 1.5793621 0.0099377085 0.95849964 1.1275722 2.2229409 8.2507844 35.70347 619.22109 42.057051 6.3773366 41.63295 20.664321 21.547638 65.243731 33.661125 15.856443 13.135322 43.565092 60.25733 86.816681 15.712241 27.118575 1.0592478 3.7175065 87.488486 20.077608 23.360356 10.216764 4.2137635 4.2270488 7.3231727 4.8329137 24.783687 2.37178 4.0414849 24.734675 4.3612481 10.947756 0.75072895 8.0458263 4.8723387 0.6329216 9.5825983 2.4072976 0.05487123 0.0021761201 0.045056743 3.0180046 0.028401166 1.1067772 1.3809492 0.0054914295 7.2256951 136.7522 38.679883 10.983757 10.725049 40.50498 24.500563 14.056835 14.061547 58.961962 10.33436 44.505135 49.584511 20.503963 11.563546 5.8747409 23.990314 4.6233188 18.87811 7.3301682 3.7671048 30.866675 14.87168 19.460711 4.7434952 3.8362977 6.2833446 3.4414523 8.2668733 10.024785 8.7906324 2.8087193 11.800107 32.609097 9.4208165 7.1831193 0.1068828 0.13592667 0.0026545723 0.001 0.037911704 0.39889666 6.6165956 492.18765 31.221589 14.705176 9.0949455 352.65831 39.767338 9.2815253 17.333625 84.566872 39.066769 6.0803828 9.9190387 9.3333777 128.38794 28.302207 0.86527991 6.8277449 8.3706889 5.0262387 11.903669 51.586752 22.525381 7.301115 7.7974257 2.5721985 7.9080151 20.013651 96.705323 13.753545 3.0540841 3.9432765 2.7296655 1.3362144 0.025177499 0.0028377512 7.8346715 67.709182 24.750486 63.133497 73.13714 23.247488 15.011142 46.249985 67.052228 63.328789 13.404621 59.655427 9.1633943 20.022761 5.4770284 2.5958268 92.574856 7.1500347 4.0019164 3.251226 5.733657 1.3568457 7.3253572 4.2843157 1.2275175 1.9271144 6.4264267 6.7801983 2.0850692 0.79213142 2.3543667 75.710074 47.040601 36.497108 17.639033 52.660943 22.679494 569.96476 40.502742 13.524114 8.8073649 25.968678 13.6126 0.68392291 1.4700891 3.1304939 3.592514 4.9835516 1.3162852 2.5140758 0.12674878 0.069865516 1.3280474 1.6261318 6.8285696 15.979119 103.22879 20.6829 49.314788 19.906749 58.002137 36.003474 15.245109 160.28187 2.5661325 0.22747381 0.053246837 1.0809564 2.145496 0.99126199 14.193248 68.097195 21.604477 20.424173 14.523847 8.595883 2.5293397 5.5249527 0.47465493 3.8843123 13.80246 82.321295 13.871562 2.4878699 0.97101431 0.059880931 0.84816308 2.7030209 2.909627 3.1878836 0.41693701 diff --git a/test/unit/math/laplace/aki_synth_data/synth.tr b/test/unit/math/laplace/aki_synth_data/synth.tr new file mode 100644 index 00000000000..82be5dae179 --- /dev/null +++ b/test/unit/math/laplace/aki_synth_data/synth.tr @@ -0,0 +1,251 @@ +% xs ys yc + 0.05100797 0.16086164 0 + -0.74807425 0.08904024 0 + -0.77293371 0.26317168 0 + 0.21837360 0.12706142 0 + 0.37268336 0.49656200 0 + -0.62931544 0.63202159 0 + -0.43307167 0.14479166 0 + -0.84151970 -0.19131316 0 + 0.47525648 0.22483671 0 + 0.32082976 0.32721288 0 + 0.32061253 0.33407547 0 + -0.89077472 0.41168783 0 + 0.17850119 0.44691359 0 + 0.31558002 0.38853383 0 + 0.55777224 0.47272748 0 + 0.03191877 0.01222964 0 + 0.25090585 0.30716705 0 + 0.23571547 0.22493837 0 + -0.07236203 0.33376524 0 + 0.50440241 0.08054579 0 + -0.63223351 0.44552458 0 + -0.76784656 0.23614689 0 + -0.70017557 0.21038848 0 + -0.64713491 0.15921366 0 + -0.76739248 0.09259038 0 + -0.51788734 0.03288107 0 + 0.17516644 0.34534871 0 + -0.68031190 0.47612156 0 + 0.01595199 0.32167526 0 + -0.71481078 0.51421443 0 + 0.07837946 0.32284981 0 + -0.80872251 0.47036593 0 + -0.84211234 0.09294232 0 + -0.98591577 0.48309267 0 + 0.29104081 0.34275967 0 + 0.24321541 0.51488295 0 + -0.60104419 0.05060116 0 + -1.24652451 0.45923165 0 + -0.82769016 0.36187460 0 + -0.62117301 -0.10912158 0 + -0.70584105 0.65907662 0 + 0.06718867 0.60574850 0 + 0.30505147 0.47417973 0 + 0.60788138 0.39361588 0 + -0.78937483 0.17591675 0 + -0.53123209 0.42652809 0 + 0.25202071 0.17029707 0 + -0.57880357 0.26553665 0 + -0.83176749 0.54447377 0 + -0.69859164 0.38566851 0 + -0.73642607 0.11857527 0 + -0.93496195 0.11370707 0 + 0.43959309 0.41430638 0 + -0.54690854 0.24956276 0 + -0.08405550 0.36521058 0 + 0.32211458 0.69087105 0 + 0.10764739 0.57946932 0 + -0.71864030 0.25645757 0 + -0.87877752 0.45064757 0 + -0.69846046 0.95053870 0 + 0.39757434 0.11810207 0 + -0.50451354 0.57196376 0 + 0.25023622 0.39783889 0 + 0.61709156 0.10185808 0 + 0.31832860 0.08790562 0 + -0.57453363 0.18624195 0 + 0.09761865 0.55176786 0 + 0.48449339 0.35372973 0 + 0.52400684 0.46616851 0 + -0.78138463 -0.07534713 0 + -0.49704591 0.59948077 0 + -0.96984525 0.46624927 0 + 0.43541407 0.12192386 0 + -0.67942462 0.30753942 0 + -0.62529036 0.07099046 0 + -0.02318116 0.40442601 0 + 0.23200141 0.71066846 0 + 0.09384354 0.46674396 0 + 0.14234301 0.17898711 0 + -0.61686357 0.25507763 0 + 0.23636288 0.51543839 0 + 0.38914177 0.40429568 0 + -0.95178678 -0.03772239 0 + 0.24087822 0.71948890 0 + 0.12446266 0.45178849 0 + -0.60566430 0.26906478 0 + -0.71397188 0.30871780 0 + 0.31008428 0.34675335 0 + 0.18018786 0.46204643 0 + -0.42663885 0.64723225 0 + 0.06143230 0.32491150 0 + 0.07736952 0.32183287 0 + 0.42814970 0.13445957 0 + -0.80250753 0.66878999 0 + 0.40142623 0.42516398 0 + 0.37084776 0.26407123 0 + -0.80774748 0.41485899 0 + 0.50163585 0.23934856 0 + 0.58238323 0.22842741 0 + -0.59136100 0.30230321 0 + -0.87037236 0.26941446 0 + -0.72086765 0.19676678 0 + 0.27778443 0.21792253 0 + 0.33240813 0.27349865 0 + -0.14092068 0.39247351 0 + -0.59759518 0.14790267 0 + -0.85581534 0.14513961 0 + -0.88912232 0.26896001 0 + 0.21345680 0.43611756 0 + -0.53467949 0.57901229 0 + 0.31686848 0.39705856 0 + -0.68121733 0.04209840 0 + -0.97586127 0.45964811 0 + 0.41457183 0.27141230 0 + 0.32751292 0.36780137 0 + -0.93209192 0.09362034 0 + 0.58395341 0.47147282 0 + -0.44437309 0.23010142 0 + 0.29109441 0.19365556 0 + -0.51080722 0.41496003 0 + -0.96597511 0.17931052 0 + 0.18741315 0.29747132 0 + 0.17965417 0.45175449 0 + -0.72689602 0.35728387 0 + -0.54339877 0.41012013 0 + -0.59823393 0.98701425 1 + -0.20194736 0.62101680 1 + 0.47146103 0.48221146 1 + -0.09821987 0.58755577 1 + -0.35657658 0.63709705 1 + 0.63881392 0.42112135 1 + 0.62980614 0.28146085 1 + -0.46223286 0.61661031 1 + -0.07331555 0.55821736 1 + -0.55405533 0.51253129 1 + -0.43761773 0.87811781 1 + -0.22237814 0.88850773 1 + 0.09346162 0.67310494 1 + 0.53174745 0.54372650 1 + 0.40207539 0.51638462 1 + 0.47555171 0.65056336 1 + -0.23383266 0.63642580 1 + -0.31579316 0.75031340 1 + -0.47351720 0.63854125 1 + 0.59239464 0.89256953 1 + -0.22605324 0.79789454 1 + -0.43995011 0.52099256 1 + -0.54645044 0.74577198 1 + 0.46404306 0.51065152 1 + -0.15194296 0.81218439 1 + 0.48536395 0.82018093 1 + 0.34725649 0.70813773 1 + 0.43897015 0.62817158 1 + -0.21415914 0.64363951 1 + 0.57380231 0.63713466 1 + 0.38717361 0.58578395 1 + 0.32038322 0.53529127 1 + -0.20781491 0.65132467 1 + -0.18651283 0.81754816 1 + 0.24752692 0.39081936 1 + 0.66049881 0.89919213 1 + -0.28658801 0.73375946 1 + -0.32588080 0.39865509 1 + -0.25204565 0.67358326 1 + 0.37259022 0.49785904 1 + -0.29096564 1.04372060 1 + -0.30469807 0.86858292 1 + -0.21389978 1.09317811 1 + -0.36830015 0.75639546 1 + -0.46928218 0.88775091 1 + 0.39350146 0.77975197 1 + -0.45639966 0.80523454 1 + 0.51128242 0.76606136 1 + 0.22550468 0.46451215 1 + 0.01462984 0.40190926 1 + -0.19172785 0.80943313 1 + 0.38323479 0.75601744 1 + 0.49791612 0.61334375 1 + 0.35335230 0.77324337 1 + -0.34722575 0.70177856 1 + 0.58380468 0.76357539 1 + -0.13727764 0.71246351 1 + 0.38827268 0.44977123 1 + -0.53172709 0.61934293 1 + -0.11684624 0.87851210 1 + 0.54335864 0.41174865 1 + -0.45399302 0.66512988 1 + -0.21913200 0.83484947 1 + 0.30485742 0.98028760 1 + 0.65676798 0.75766017 1 + 0.61420447 0.75039019 1 + -0.45809964 0.77968606 1 + -0.21617465 0.88626305 1 + -0.26016108 0.81008591 1 + 0.31884531 0.84517725 1 + -0.23727415 0.80178784 1 + 0.58310323 0.77709806 1 + 0.02841337 0.75792620 1 + -0.41840136 0.68041440 1 + 0.67412880 0.60245461 1 + -0.25278281 0.70526103 1 + 0.51609843 0.62092390 1 + 0.20392294 0.91641482 1 + -0.17207124 1.00884096 1 + 0.27274507 0.29346977 1 + 0.07634798 0.56222204 1 + -0.36653499 0.64831007 1 + 0.44290673 0.80087721 1 + -0.19976385 0.54295162 1 + -0.54075738 0.65293033 1 + -0.07060266 1.00296912 1 + 0.50715054 0.35045758 1 + -0.06048611 0.62982713 1 + 0.21532928 0.60260249 1 + 0.46809108 0.87182416 1 + -0.29888511 0.73669866 1 + 0.86129620 0.47289330 1 + 0.70120877 0.74572893 1 + -0.11342797 0.60067099 1 + 0.31234354 0.90756345 1 + -0.12172541 0.84112851 1 + 0.36867857 0.37052586 1 + 0.57311489 0.40949740 1 + -0.25841225 0.67192335 1 + 0.30937186 0.50823318 1 + 0.43319338 0.77016967 1 + -0.30448035 0.57820106 1 + 0.44276338 0.58023403 1 + -0.19442057 0.89876808 1 + -0.06105237 0.74184946 1 + 0.07619347 0.35386246 1 + 0.85826993 0.95819523 1 + 0.37039200 0.72342401 1 + 0.51481515 0.76203996 1 + 0.43127521 0.54259166 1 + 0.42286091 0.65242185 1 + 0.29815001 0.93453682 1 + 0.37128253 0.70089181 1 + -0.51528729 0.76473490 1 + 0.38525783 0.65528189 1 + -0.34825368 0.50529981 1 + 0.68510504 0.78067440 1 + -0.36528923 0.45703265 1 + -0.40903577 0.74230433 1 + 0.43574387 0.44689789 1 + 0.26887846 0.44559230 1 + -0.49254862 1.01443372 1 + 0.07615960 0.63795180 1 + 0.49226224 0.46876241 1 + -0.40249641 0.71301084 1 diff --git a/test/unit/math/laplace/aki_synth_data/testdata.csv b/test/unit/math/laplace/aki_synth_data/testdata.csv new file mode 100644 index 00000000000..40a4de42687 --- /dev/null +++ b/test/unit/math/laplace/aki_synth_data/testdata.csv @@ -0,0 +1,501 @@ +x1,x2,y +0.051008,0.16086,-1 +-0.74807,0.08904,-1 +-0.77293,0.26317,-1 +0.21837,0.12706,-1 +0.37268,0.49656,-1 +-0.62932,0.63202,-1 +-0.43307,0.14479,-1 +-0.84152,-0.19131,-1 +0.47526,0.22484,-1 +0.32083,0.32721,-1 +0.32061,0.33408,-1 +-0.89077,0.41169,-1 +0.1785,0.44691,-1 +0.31558,0.38853,-1 +0.55777,0.47273,-1 +0.031919,0.01223,-1 +0.25091,0.30717,-1 +0.23572,0.22494,-1 +-0.072362,0.33377,-1 +0.5044,0.080546,-1 +-0.63223,0.44552,-1 +-0.76785,0.23615,-1 +-0.70018,0.21039,-1 +-0.64713,0.15921,-1 +-0.76739,0.09259,-1 +-0.51789,0.032881,-1 +0.17517,0.34535,-1 +-0.68031,0.47612,-1 +0.015952,0.32168,-1 +-0.71481,0.51421,-1 +0.078379,0.32285,-1 +-0.80872,0.47037,-1 +-0.84211,0.092942,-1 +-0.98592,0.48309,-1 +0.29104,0.34276,-1 +0.24322,0.51488,-1 +-0.60104,0.050601,-1 +-1.2465,0.45923,-1 +-0.82769,0.36187,-1 +-0.62117,-0.10912,-1 +-0.70584,0.65908,-1 +0.067189,0.60575,-1 +0.30505,0.47418,-1 +0.60788,0.39362,-1 +-0.78937,0.17592,-1 +-0.53123,0.42653,-1 +0.25202,0.1703,-1 +-0.5788,0.26554,-1 +-0.83177,0.54447,-1 +-0.69859,0.38567,-1 +-0.73643,0.11858,-1 +-0.93496,0.11371,-1 +0.43959,0.41431,-1 +-0.54691,0.24956,-1 +-0.084056,0.36521,-1 +0.32211,0.69087,-1 +0.10765,0.57947,-1 +-0.71864,0.25646,-1 +-0.87878,0.45065,-1 +-0.69846,0.95054,-1 +0.39757,0.1181,-1 +-0.50451,0.57196,-1 +0.25024,0.39784,-1 +0.61709,0.10186,-1 +0.31833,0.087906,-1 +-0.57453,0.18624,-1 +0.097619,0.55177,-1 +0.48449,0.35373,-1 +0.52401,0.46617,-1 +-0.78138,-0.075347,-1 +-0.49705,0.59948,-1 +-0.96985,0.46625,-1 +0.43541,0.12192,-1 +-0.67942,0.30754,-1 +-0.62529,0.07099,-1 +-0.023181,0.40443,-1 +0.232,0.71067,-1 +0.093844,0.46674,-1 +0.14234,0.17899,-1 +-0.61686,0.25508,-1 +0.23636,0.51544,-1 +0.38914,0.4043,-1 +-0.95179,-0.037722,-1 +0.24088,0.71949,-1 +0.12446,0.45179,-1 +-0.60566,0.26906,-1 +-0.71397,0.30872,-1 +0.31008,0.34675,-1 +0.18019,0.46205,-1 +-0.42664,0.64723,-1 +0.061432,0.32491,-1 +0.07737,0.32183,-1 +0.42815,0.13446,-1 +-0.80251,0.66879,-1 +0.40143,0.42516,-1 +0.37085,0.26407,-1 +-0.80775,0.41486,-1 +0.50164,0.23935,-1 +0.58238,0.22843,-1 +-0.59136,0.3023,-1 +-0.87037,0.26941,-1 +-0.72087,0.19677,-1 +0.27778,0.21792,-1 +0.33241,0.2735,-1 +-0.14092,0.39247,-1 +-0.5976,0.1479,-1 +-0.85582,0.14514,-1 +-0.88912,0.26896,-1 +0.21346,0.43612,-1 +-0.53468,0.57901,-1 +0.31687,0.39706,-1 +-0.68122,0.042098,-1 +-0.97586,0.45965,-1 +0.41457,0.27141,-1 +0.32751,0.3678,-1 +-0.93209,0.09362,-1 +0.58395,0.47147,-1 +-0.44437,0.2301,-1 +0.29109,0.19366,-1 +-0.51081,0.41496,-1 +-0.96598,0.17931,-1 +0.18741,0.29747,-1 +0.17965,0.45175,-1 +-0.7269,0.35728,-1 +-0.5434,0.41012,-1 +-0.59823,0.98701,1 +-0.20195,0.62102,1 +0.47146,0.48221,1 +-0.09822,0.58756,1 +-0.35658,0.6371,1 +0.63881,0.42112,1 +0.62981,0.28146,1 +-0.46223,0.61661,1 +-0.073316,0.55822,1 +-0.55406,0.51253,1 +-0.43762,0.87812,1 +-0.22238,0.88851,1 +0.093462,0.6731,1 +0.53175,0.54373,1 +0.40208,0.51638,1 +0.47555,0.65056,1 +-0.23383,0.63643,1 +-0.31579,0.75031,1 +-0.47352,0.63854,1 +0.59239,0.89257,1 +-0.22605,0.79789,1 +-0.43995,0.52099,1 +-0.54645,0.74577,1 +0.46404,0.51065,1 +-0.15194,0.81218,1 +0.48536,0.82018,1 +0.34726,0.70814,1 +0.43897,0.62817,1 +-0.21416,0.64364,1 +0.5738,0.63713,1 +0.38717,0.58578,1 +0.32038,0.53529,1 +-0.20781,0.65132,1 +-0.18651,0.81755,1 +0.24753,0.39082,1 +0.6605,0.89919,1 +-0.28659,0.73376,1 +-0.32588,0.39866,1 +-0.25205,0.67358,1 +0.37259,0.49786,1 +-0.29097,1.0437,1 +-0.3047,0.86858,1 +-0.2139,1.0932,1 +-0.3683,0.7564,1 +-0.46928,0.88775,1 +0.3935,0.77975,1 +-0.4564,0.80523,1 +0.51128,0.76606,1 +0.2255,0.46451,1 +0.01463,0.40191,1 +-0.19173,0.80943,1 +0.38323,0.75602,1 +0.49792,0.61334,1 +0.35335,0.77324,1 +-0.34723,0.70178,1 +0.5838,0.76358,1 +-0.13728,0.71246,1 +0.38827,0.44977,1 +-0.53173,0.61934,1 +-0.11685,0.87851,1 +0.54336,0.41175,1 +-0.45399,0.66513,1 +-0.21913,0.83485,1 +0.30486,0.98029,1 +0.65677,0.75766,1 +0.6142,0.75039,1 +-0.4581,0.77969,1 +-0.21617,0.88626,1 +-0.26016,0.81009,1 +0.31885,0.84518,1 +-0.23727,0.80179,1 +0.5831,0.7771,1 +0.028413,0.75793,1 +-0.4184,0.68041,1 +0.67413,0.60245,1 +-0.25278,0.70526,1 +0.5161,0.62092,1 +0.20392,0.91641,1 +-0.17207,1.0088,1 +0.27275,0.29347,1 +0.076348,0.56222,1 +-0.36653,0.64831,1 +0.44291,0.80088,1 +-0.19976,0.54295,1 +-0.54076,0.65293,1 +-0.070603,1.003,1 +0.50715,0.35046,1 +-0.060486,0.62983,1 +0.21533,0.6026,1 +0.46809,0.87182,1 +-0.29889,0.7367,1 +0.8613,0.47289,1 +0.70121,0.74573,1 +-0.11343,0.60067,1 +0.31234,0.90756,1 +-0.12173,0.84113,1 +0.36868,0.37053,1 +0.57311,0.4095,1 +-0.25841,0.67192,1 +0.30937,0.50823,1 +0.43319,0.77017,1 +-0.30448,0.5782,1 +0.44276,0.58023,1 +-0.19442,0.89877,1 +-0.061052,0.74185,1 +0.076193,0.35386,1 +0.85827,0.9582,1 +0.37039,0.72342,1 +0.51482,0.76204,1 +0.43128,0.54259,1 +0.42286,0.65242,1 +0.29815,0.93454,1 +0.37128,0.70089,1 +-0.51529,0.76473,1 +0.38526,0.65528,1 +-0.34825,0.5053,1 +0.68511,0.78067,1 +-0.36529,0.45703,1 +-0.40904,0.7423,1 +0.43574,0.4469,1 +0.26888,0.44559,1 +-0.49255,1.0144,1 +0.07616,0.63795,1 +0.49226,0.46876,1 +-0.4025,0.71301,1 +0.10477,0.087445,-1 +-0.56469,0.085959,-1 +-0.99882,0.28641,-1 +0.30459,0.1697,-1 +0.40456,0.45928,-1 +-0.76008,0.60838,-1 +-0.47643,0.34716,-1 +-0.80726,-0.41715,-1 +0.8331,0.44778,-1 +0.59777,0.36097,-1 +0.18562,0.43408,-1 +-0.58728,0.24527,-1 +0.25104,0.38791,-1 +0.30927,0.36073,-1 +0.62925,0.515,-1 +0.011422,-0.15479,-1 +0.23849,0.35433,-1 +0.38469,0.10365,-1 +0.068541,0.34038,-1 +0.64612,0.14578,-1 +-0.56508,0.47823,-1 +-0.8886,0.34441,-1 +-0.62845,0.311,-1 +-0.48411,0.094123,-1 +-0.7185,0.1183,-1 +-0.41442,-0.061557,-1 +0.24785,0.21317,-1 +-0.71066,0.5686,-1 +0.045339,0.32168,-1 +-0.79354,0.50872,-1 +0.16722,0.41396,-1 +-0.92343,0.52982,-1 +-0.949,0.12796,-1 +-1.0669,0.60812,-1 +-0.0033876,0.43574,-1 +0.38705,0.53886,-1 +-0.56853,-0.018435,-1 +-1.322,0.39408,-1 +-0.69066,0.48108,-1 +-0.79232,-0.2703,-1 +-0.71607,0.65663,-1 +0.043044,0.41086,-1 +0.33697,0.57623,-1 +0.63917,0.47979,-1 +-0.87586,0.17603,-1 +-0.53424,0.41944,-1 +0.23553,-0.078331,-1 +-0.51603,0.32365,-1 +-0.72244,0.32523,-1 +-0.58766,0.15374,-1 +-0.82279,0.12657,-1 +-0.92723,0.018859,-1 +0.31818,0.45546,-1 +-0.65826,0.31726,-1 +-0.08474,0.45098,-1 +0.47538,0.62176,-1 +0.030681,0.62441,-1 +-0.6815,0.26652,-1 +-0.90134,0.53325,-1 +-0.58672,1.0042,-1 +0.28867,0.20789,-1 +-0.50126,0.55877,-1 +0.30549,0.38312,-1 +0.72715,0.20264,-1 +0.47275,-0.12446,-1 +-0.56594,0.13578,-1 +-0.05154,0.42471,-1 +0.41026,0.31547,-1 +0.41785,0.53104,-1 +-0.54634,0.0072256,-1 +-0.55861,0.49799,-1 +-0.89504,0.41914,-1 +0.41617,0.13563,-1 +-0.59056,0.27835,-1 +-0.70178,0.10117,-1 +-0.16341,0.44442,-1 +0.089764,0.61767,-1 +0.14266,0.44906,-1 +0.12461,-0.034222,-1 +-0.63647,0.36961,-1 +0.37829,0.45253,-1 +0.4183,0.28391,-1 +-0.93201,-0.063117,-1 +0.39965,0.57662,-1 +0.044016,0.4497,-1 +-0.536,0.213,-1 +-0.63046,0.5265,-1 +0.28571,0.4606,-1 +0.20175,0.21236,-1 +-0.54322,0.69136,-1 +-0.053363,0.1851,-1 +0.087857,0.29633,-1 +0.50038,0.1509,-1 +-0.54396,0.74356,-1 +0.33474,0.39786,-1 +0.38958,0.4217,-1 +-0.816,0.36677,-1 +0.30833,0.2721,-1 +0.53849,0.2949,-1 +-0.77083,0.31082,-1 +-0.78633,0.35751,-1 +-0.80967,0.22909,-1 +0.28779,0.13951,-1 +0.27796,0.092961,-1 +-0.11057,0.57833,-1 +-0.65763,0.08745,-1 +-0.80682,0.15548,-1 +-0.81519,0.32528,-1 +0.38465,0.44748,-1 +-0.55409,0.48854,-1 +0.10303,0.35029,-1 +-0.76518,0.029609,-1 +-0.8404,0.60754,-1 +0.30736,0.18533,-1 +0.42361,0.44627,-1 +-0.91969,0.12448,-1 +0.72762,0.44809,-1 +-0.64046,0.1244,-1 +0.27132,0.16524,-1 +-0.63159,0.40629,-1 +-0.67517,0.032371,-1 +0.26994,0.31669,-1 +0.31755,0.36953,-1 +-0.83271,0.34786,-1 +-0.59026,0.44374,-1 +-0.62548,0.89655,1 +-0.092105,0.59219,1 +0.44367,0.51722,1 +-0.028066,0.40397,1 +-0.56176,0.74069,1 +0.60343,0.66357,1 +0.54745,0.3774,1 +-0.61994,0.58503,1 +-0.022518,0.60108,1 +-0.52586,0.40893,1 +-0.43427,1.0659,1 +-0.35575,0.98258,1 +0.20621,0.75184,1 +0.56677,0.45614,1 +0.37217,0.54838,1 +0.47784,0.59473,1 +-0.26003,0.60528,1 +-0.49081,0.69331,1 +-0.50208,0.53597,1 +0.50926,0.80169,1 +-0.32397,0.7769,1 +-0.55559,0.35111,1 +-0.59981,0.80653,1 +0.26378,0.49887,1 +-0.05552,0.8821,1 +0.53737,0.84715,1 +0.34525,0.75757,1 +0.43549,0.47986,1 +-0.29398,0.54161,1 +0.67567,0.59244,1 +0.37385,0.59675,1 +0.24893,0.64816,1 +-0.072676,0.62233,1 +-0.20899,0.9437,1 +0.18862,0.43836,1 +0.63112,1.0166,1 +-0.37138,0.74645,1 +-0.43789,0.33297,1 +0.00055432,0.52544,1 +0.53814,0.51341,1 +-0.26021,1.1256,1 +-0.43041,0.83932,1 +-0.30045,1.0391,1 +-0.38595,0.72553,1 +-0.39014,0.77809,1 +0.2603,0.73045,1 +-0.68939,0.78716,1 +0.36637,0.77065,1 +0.25886,0.45813,1 +0.053765,0.46304,1 +-0.14656,0.82036,1 +0.37021,0.93742,1 +0.51629,0.64455,1 +0.30574,0.95369,1 +-0.26102,0.62947,1 +0.44764,0.81623,1 +-0.091775,0.68644,1 +0.3034,0.50979,1 +-0.56522,0.67874,1 +-0.061568,0.65991,1 +0.64727,0.27904,1 +-0.56576,0.52103,1 +-0.093066,0.87503,1 +0.37087,1.1273,1 +0.64998,0.72498,1 +0.59468,0.83162,1 +-0.47986,0.83424,1 +-0.24649,0.7811,1 +-0.25786,0.84983,1 +0.32397,0.76999,1 +-0.15467,0.95341,1 +0.7358,0.77384,1 +0.075105,0.92153,1 +-0.43937,0.63791,1 +0.73665,0.6614,1 +-0.23446,0.69898,1 +0.41312,0.41873,1 +0.29885,0.8182,1 +-0.14137,1.0701,1 +0.28626,0.28798,1 +0.12787,0.45035,1 +-0.34039,0.58567,1 +0.34876,0.82583,1 +-0.216,0.44365,1 +-0.55536,0.75043,1 +-0.1238,0.9389,1 +0.67536,0.53134,1 +-0.14806,0.52184,1 +0.16695,0.62252,1 +0.39689,0.71972,1 +-0.41631,0.66434,1 +0.84207,0.41357,1 +0.6738,0.78586,1 +0.039579,0.69488,1 +0.28744,0.93761,1 +-0.22815,0.80382,1 +0.52902,0.45207,1 +0.69658,0.48939,1 +-0.28137,0.68394,1 +0.15876,0.56536,1 +0.38873,0.81145,1 +-0.32007,0.4795,1 +0.47037,0.65619,1 +-0.22054,0.83305,1 +-0.01671,0.68146,1 +0.11538,0.37156,1 +0.7332,0.92744,1 +0.2756,0.71024,1 +0.4407,0.82158,1 +0.38049,0.64727,1 +0.3908,0.63263,1 +0.2994,0.9673,1 +0.068365,0.67706,1 +-0.56099,0.78769,1 +0.5095,0.69928,1 +-0.45492,0.44361,1 +0.77848,0.80816,1 +-0.33026,0.51714,1 +-0.41194,0.75154,1 +0.45399,0.61988,1 +0.11237,0.38474,1 +-0.501,0.94073,1 +0.23655,0.46296,1 +0.5021,0.55981,1 +-0.39836,0.79972,1 diff --git a/test/unit/math/laplace/aki_synth_data/x1.csv b/test/unit/math/laplace/aki_synth_data/x1.csv new file mode 100644 index 00000000000..d4a53c3b7e1 --- /dev/null +++ b/test/unit/math/laplace/aki_synth_data/x1.csv @@ -0,0 +1 @@ +0.051008 -0.74807 -0.77293 0.21837 0.37268 -0.62932 -0.43307 -0.84152 0.47526 0.32083 0.32061 -0.89077 0.1785 0.31558 0.55777 0.031919 0.25091 0.23572 -0.072362 0.5044 -0.63223 -0.76785 -0.70018 -0.64713 -0.76739 -0.51789 0.17517 -0.68031 0.015952 -0.71481 0.078379 -0.80872 -0.84211 -0.98592 0.29104 0.24322 -0.60104 -1.2465 -0.82769 -0.62117 -0.70584 0.067189 0.30505 0.60788 -0.78937 -0.53123 0.25202 -0.5788 -0.83177 -0.69859 -0.73643 -0.93496 0.43959 -0.54691 -0.084056 0.32211 0.10765 -0.71864 -0.87878 -0.69846 0.39757 -0.50451 0.25024 0.61709 0.31833 -0.57453 0.097619 0.48449 0.52401 -0.78138 -0.49705 -0.96985 0.43541 -0.67942 -0.62529 -0.023181 0.232 0.093844 0.14234 -0.61686 0.23636 0.38914 -0.95179 0.24088 0.12446 -0.60566 -0.71397 0.31008 0.18019 -0.42664 0.061432 0.07737 0.42815 -0.80251 0.40143 0.37085 -0.80775 0.50164 0.58238 -0.59136 -0.87037 -0.72087 0.27778 0.33241 -0.14092 -0.5976 -0.85582 -0.88912 0.21346 -0.53468 0.31687 -0.68122 -0.97586 0.41457 0.32751 -0.93209 0.58395 -0.44437 0.29109 -0.51081 -0.96598 0.18741 0.17965 -0.7269 -0.5434 -0.59823 -0.20195 0.47146 -0.09822 -0.35658 0.63881 0.62981 -0.46223 -0.073316 -0.55406 -0.43762 -0.22238 0.093462 0.53175 0.40208 0.47555 -0.23383 -0.31579 -0.47352 0.59239 -0.22605 -0.43995 -0.54645 0.46404 -0.15194 0.48536 0.34726 0.43897 -0.21416 0.5738 0.38717 0.32038 -0.20781 -0.18651 0.24753 0.6605 -0.28659 -0.32588 -0.25205 0.37259 -0.29097 -0.3047 -0.2139 -0.3683 -0.46928 0.3935 -0.4564 0.51128 0.2255 0.01463 -0.19173 0.38323 0.49792 0.35335 -0.34723 0.5838 -0.13728 0.38827 -0.53173 -0.11685 0.54336 -0.45399 -0.21913 0.30486 0.65677 0.6142 -0.4581 -0.21617 -0.26016 0.31885 -0.23727 0.5831 0.028413 -0.4184 0.67413 -0.25278 0.5161 0.20392 -0.17207 0.27275 0.076348 -0.36653 0.44291 -0.19976 -0.54076 -0.070603 0.50715 -0.060486 0.21533 0.46809 -0.29889 0.8613 0.70121 -0.11343 0.31234 -0.12173 0.36868 0.57311 -0.25841 0.30937 0.43319 -0.30448 0.44276 -0.19442 -0.061052 0.076193 0.85827 0.37039 0.51482 0.43128 0.42286 0.29815 0.37128 -0.51529 0.38526 -0.34825 0.68511 -0.36529 -0.40904 0.43574 0.26888 -0.49255 0.07616 0.49226 -0.4025 0.10477 -0.56469 -0.99882 0.30459 0.40456 -0.76008 -0.47643 -0.80726 0.8331 0.59777 0.18562 -0.58728 0.25104 0.30927 0.62925 0.011422 0.23849 0.38469 0.068541 0.64612 -0.56508 -0.8886 -0.62845 -0.48411 -0.7185 -0.41442 0.24785 -0.71066 0.045339 -0.79354 0.16722 -0.92343 -0.949 -1.0669 -0.0033876 0.38705 -0.56853 -1.322 -0.69066 -0.79232 -0.71607 0.043044 0.33697 0.63917 -0.87586 -0.53424 0.23553 -0.51603 -0.72244 -0.58766 -0.82279 -0.92723 0.31818 -0.65826 -0.08474 0.47538 0.030681 -0.6815 -0.90134 -0.58672 0.28867 -0.50126 0.30549 0.72715 0.47275 -0.56594 -0.05154 0.41026 0.41785 -0.54634 -0.55861 -0.89504 0.41617 -0.59056 -0.70178 -0.16341 0.089764 0.14266 0.12461 -0.63647 0.37829 0.4183 -0.93201 0.39965 0.044016 -0.536 -0.63046 0.28571 0.20175 -0.54322 -0.053363 0.087857 0.50038 -0.54396 0.33474 0.38958 -0.816 0.30833 0.53849 -0.77083 -0.78633 -0.80967 0.28779 0.27796 -0.11057 -0.65763 -0.80682 -0.81519 0.38465 -0.55409 0.10303 -0.76518 -0.8404 0.30736 0.42361 -0.91969 0.72762 -0.64046 0.27132 -0.63159 -0.67517 0.26994 0.31755 -0.83271 -0.59026 -0.62548 -0.092105 0.44367 -0.028066 -0.56176 0.60343 0.54745 -0.61994 -0.022518 -0.52586 -0.43427 -0.35575 0.20621 0.56677 0.37217 0.47784 -0.26003 -0.49081 -0.50208 0.50926 -0.32397 -0.55559 -0.59981 0.26378 -0.05552 0.53737 0.34525 0.43549 -0.29398 0.67567 0.37385 0.24893 -0.072676 -0.20899 0.18862 0.63112 -0.37138 -0.43789 0.00055432 0.53814 -0.26021 -0.43041 -0.30045 -0.38595 -0.39014 0.2603 -0.68939 0.36637 0.25886 0.053765 -0.14656 0.37021 0.51629 0.30574 -0.26102 0.44764 -0.091775 0.3034 -0.56522 -0.061568 0.64727 -0.56576 -0.093066 0.37087 0.64998 0.59468 -0.47986 -0.24649 -0.25786 0.32397 -0.15467 0.7358 0.075105 -0.43937 0.73665 -0.23446 0.41312 0.29885 -0.14137 0.28626 0.12787 -0.34039 0.34876 -0.216 -0.55536 -0.1238 0.67536 -0.14806 0.16695 0.39689 -0.41631 0.84207 0.6738 0.039579 0.28744 -0.22815 0.52902 0.69658 -0.28137 0.15876 0.38873 -0.32007 0.47037 -0.22054 -0.01671 0.11538 0.7332 0.2756 0.4407 0.38049 0.3908 0.2994 0.068365 -0.56099 0.5095 -0.45492 0.77848 -0.33026 -0.41194 0.45399 0.11237 -0.501 0.23655 0.5021 -0.39836 diff --git a/test/unit/math/laplace/aki_synth_data/x2.csv b/test/unit/math/laplace/aki_synth_data/x2.csv new file mode 100644 index 00000000000..34827306a26 --- /dev/null +++ b/test/unit/math/laplace/aki_synth_data/x2.csv @@ -0,0 +1 @@ +0.16086 0.08904 0.26317 0.12706 0.49656 0.63202 0.14479 -0.19131 0.22484 0.32721 0.33408 0.41169 0.44691 0.38853 0.47273 0.01223 0.30717 0.22494 0.33377 0.080546 0.44552 0.23615 0.21039 0.15921 0.09259 0.032881 0.34535 0.47612 0.32168 0.51421 0.32285 0.47037 0.092942 0.48309 0.34276 0.51488 0.050601 0.45923 0.36187 -0.10912 0.65908 0.60575 0.47418 0.39362 0.17592 0.42653 0.1703 0.26554 0.54447 0.38567 0.11858 0.11371 0.41431 0.24956 0.36521 0.69087 0.57947 0.25646 0.45065 0.95054 0.1181 0.57196 0.39784 0.10186 0.087906 0.18624 0.55177 0.35373 0.46617 -0.075347 0.59948 0.46625 0.12192 0.30754 0.07099 0.40443 0.71067 0.46674 0.17899 0.25508 0.51544 0.4043 -0.037722 0.71949 0.45179 0.26906 0.30872 0.34675 0.46205 0.64723 0.32491 0.32183 0.13446 0.66879 0.42516 0.26407 0.41486 0.23935 0.22843 0.3023 0.26941 0.19677 0.21792 0.2735 0.39247 0.1479 0.14514 0.26896 0.43612 0.57901 0.39706 0.042098 0.45965 0.27141 0.3678 0.09362 0.47147 0.2301 0.19366 0.41496 0.17931 0.29747 0.45175 0.35728 0.41012 0.98701 0.62102 0.48221 0.58756 0.6371 0.42112 0.28146 0.61661 0.55822 0.51253 0.87812 0.88851 0.6731 0.54373 0.51638 0.65056 0.63643 0.75031 0.63854 0.89257 0.79789 0.52099 0.74577 0.51065 0.81218 0.82018 0.70814 0.62817 0.64364 0.63713 0.58578 0.53529 0.65132 0.81755 0.39082 0.89919 0.73376 0.39866 0.67358 0.49786 1.0437 0.86858 1.0932 0.7564 0.88775 0.77975 0.80523 0.76606 0.46451 0.40191 0.80943 0.75602 0.61334 0.77324 0.70178 0.76358 0.71246 0.44977 0.61934 0.87851 0.41175 0.66513 0.83485 0.98029 0.75766 0.75039 0.77969 0.88626 0.81009 0.84518 0.80179 0.7771 0.75793 0.68041 0.60245 0.70526 0.62092 0.91641 1.0088 0.29347 0.56222 0.64831 0.80088 0.54295 0.65293 1.003 0.35046 0.62983 0.6026 0.87182 0.7367 0.47289 0.74573 0.60067 0.90756 0.84113 0.37053 0.4095 0.67192 0.50823 0.77017 0.5782 0.58023 0.89877 0.74185 0.35386 0.9582 0.72342 0.76204 0.54259 0.65242 0.93454 0.70089 0.76473 0.65528 0.5053 0.78067 0.45703 0.7423 0.4469 0.44559 1.0144 0.63795 0.46876 0.71301 0.087445 0.085959 0.28641 0.1697 0.45928 0.60838 0.34716 -0.41715 0.44778 0.36097 0.43408 0.24527 0.38791 0.36073 0.515 -0.15479 0.35433 0.10365 0.34038 0.14578 0.47823 0.34441 0.311 0.094123 0.1183 -0.061557 0.21317 0.5686 0.32168 0.50872 0.41396 0.52982 0.12796 0.60812 0.43574 0.53886 -0.018435 0.39408 0.48108 -0.2703 0.65663 0.41086 0.57623 0.47979 0.17603 0.41944 -0.078331 0.32365 0.32523 0.15374 0.12657 0.018859 0.45546 0.31726 0.45098 0.62176 0.62441 0.26652 0.53325 1.0042 0.20789 0.55877 0.38312 0.20264 -0.12446 0.13578 0.42471 0.31547 0.53104 0.0072256 0.49799 0.41914 0.13563 0.27835 0.10117 0.44442 0.61767 0.44906 -0.034222 0.36961 0.45253 0.28391 -0.063117 0.57662 0.4497 0.213 0.5265 0.4606 0.21236 0.69136 0.1851 0.29633 0.1509 0.74356 0.39786 0.4217 0.36677 0.2721 0.2949 0.31082 0.35751 0.22909 0.13951 0.092961 0.57833 0.08745 0.15548 0.32528 0.44748 0.48854 0.35029 0.029609 0.60754 0.18533 0.44627 0.12448 0.44809 0.1244 0.16524 0.40629 0.032371 0.31669 0.36953 0.34786 0.44374 0.89655 0.59219 0.51722 0.40397 0.74069 0.66357 0.3774 0.58503 0.60108 0.40893 1.0659 0.98258 0.75184 0.45614 0.54838 0.59473 0.60528 0.69331 0.53597 0.80169 0.7769 0.35111 0.80653 0.49887 0.8821 0.84715 0.75757 0.47986 0.54161 0.59244 0.59675 0.64816 0.62233 0.9437 0.43836 1.0166 0.74645 0.33297 0.52544 0.51341 1.1256 0.83932 1.0391 0.72553 0.77809 0.73045 0.78716 0.77065 0.45813 0.46304 0.82036 0.93742 0.64455 0.95369 0.62947 0.81623 0.68644 0.50979 0.67874 0.65991 0.27904 0.52103 0.87503 1.1273 0.72498 0.83162 0.83424 0.7811 0.84983 0.76999 0.95341 0.77384 0.92153 0.63791 0.6614 0.69898 0.41873 0.8182 1.0701 0.28798 0.45035 0.58567 0.82583 0.44365 0.75043 0.9389 0.53134 0.52184 0.62252 0.71972 0.66434 0.41357 0.78586 0.69488 0.93761 0.80382 0.45207 0.48939 0.68394 0.56536 0.81145 0.4795 0.65619 0.83305 0.68146 0.37156 0.92744 0.71024 0.82158 0.64727 0.63263 0.9673 0.67706 0.78769 0.69928 0.44361 0.80816 0.51714 0.75154 0.61988 0.38474 0.94073 0.46296 0.55981 0.79972 diff --git a/test/unit/math/laplace/aki_synth_data/y.csv b/test/unit/math/laplace/aki_synth_data/y.csv new file mode 100644 index 00000000000..b34da88e004 --- /dev/null +++ b/test/unit/math/laplace/aki_synth_data/y.csv @@ -0,0 +1 @@ +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 diff --git a/test/unit/math/laplace/data_cpp/ReadMe.rtf b/test/unit/math/laplace/data_cpp/ReadMe.rtf new file mode 100644 index 00000000000..156b6811c2b --- /dev/null +++ b/test/unit/math/laplace/data_cpp/ReadMe.rtf @@ -0,0 +1,13 @@ +{\rtf1\ansi\ansicpg1252\cocoartf1671 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +{\*\expandedcolortbl;;} +\margl1440\margr1440\vieww11840\viewh9500\viewkind0 +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\pardirnatural\partightenfactor0 + +\f0\fs24 \cf0 READ ME\ +#########\ +\ +data_cpp\ +########\ +This data is generated with make_data_hd.r. The correlation is 0.9, and the average number of samples per group is 3.} \ No newline at end of file diff --git a/test/unit/math/laplace/data_cpp/index_10.csv b/test/unit/math/laplace/data_cpp/index_10.csv new file mode 100644 index 00000000000..e2397c9a59a --- /dev/null +++ b/test/unit/math/laplace/data_cpp/index_10.csv @@ -0,0 +1 @@ +2 7 9 10 9 1 8 10 6 9 8 6 3 3 10 7 10 4 8 10 5 10 7 7 3 3 1 10 1 3 diff --git a/test/unit/math/laplace/data_cpp/index_100.csv b/test/unit/math/laplace/data_cpp/index_100.csv new file mode 100644 index 00000000000..a4c74f83357 --- /dev/null +++ b/test/unit/math/laplace/data_cpp/index_100.csv @@ -0,0 +1 @@ +97 69 23 1 23 79 46 93 71 14 76 31 24 89 82 79 84 46 62 55 58 6 48 34 39 2 67 37 1 60 7 84 75 32 87 7 88 17 68 53 95 16 73 34 33 39 33 50 70 21 90 45 18 40 38 13 7 53 56 47 77 49 3 31 52 2 48 32 69 41 86 82 15 15 63 71 39 30 59 62 32 2 97 60 85 82 88 81 93 52 27 24 30 9 22 30 35 53 30 95 50 78 77 53 33 51 63 39 7 53 84 23 16 3 4 43 35 61 75 88 82 93 67 14 83 51 38 29 45 91 23 62 60 26 82 20 86 22 23 29 33 9 1 33 62 79 30 61 83 6 28 91 10 95 91 17 39 8 91 8 69 78 36 12 56 42 68 72 45 62 88 5 14 29 40 48 83 66 68 92 11 37 4 66 56 89 74 61 99 33 26 85 66 58 52 85 46 97 65 38 45 97 87 24 73 68 90 73 53 94 36 1 91 57 89 65 73 7 68 1 100 82 28 31 72 36 74 10 37 19 91 77 76 67 100 20 41 22 52 13 18 4 15 13 84 90 62 89 14 92 3 81 64 5 21 91 58 95 38 55 87 15 8 44 51 29 74 83 37 75 41 17 67 49 65 33 64 66 34 83 88 20 18 5 85 91 44 76 68 34 100 54 29 4 96 40 72 6 34 78 diff --git a/test/unit/math/laplace/data_cpp/index_20.csv b/test/unit/math/laplace/data_cpp/index_20.csv new file mode 100644 index 00000000000..0bfd020d086 --- /dev/null +++ b/test/unit/math/laplace/data_cpp/index_20.csv @@ -0,0 +1 @@ +16 13 16 19 14 6 20 11 20 13 11 4 8 13 11 19 8 9 15 8 9 2 18 9 6 2 1 5 20 20 12 14 14 2 19 3 15 13 1 18 13 2 1 15 16 13 15 5 16 15 13 20 3 8 20 14 4 10 9 15 diff --git a/test/unit/math/laplace/data_cpp/index_200.csv b/test/unit/math/laplace/data_cpp/index_200.csv new file mode 100644 index 00000000000..50337e7d771 --- /dev/null +++ b/test/unit/math/laplace/data_cpp/index_200.csv @@ -0,0 +1 @@ +186 179 114 47 47 84 78 82 63 136 12 123 76 168 4 7 167 5 8 162 74 63 119 52 102 36 168 168 132 186 40 112 69 5 146 12 77 166 154 38 33 180 108 120 19 123 40 103 164 146 134 10 160 74 117 152 168 104 132 181 23 152 57 132 39 67 12 157 70 39 123 18 3 194 145 18 101 41 100 19 170 183 20 57 175 197 48 149 91 139 107 27 107 7 81 35 97 9 69 11 14 10 58 188 195 123 131 25 174 21 18 82 172 77 39 112 136 190 132 87 67 185 80 162 75 185 178 87 118 20 154 100 157 126 31 57 62 98 97 165 57 99 193 194 106 139 190 153 75 179 55 80 2 7 83 8 19 69 103 149 98 101 196 173 162 168 85 145 52 181 136 51 52 137 87 157 105 113 70 136 67 190 79 87 140 119 8 15 33 39 190 162 143 54 20 137 15 137 146 10 8 174 171 87 187 32 28 114 115 1 39 187 81 16 35 180 84 34 145 98 130 23 31 121 176 75 96 113 143 162 28 106 28 82 56 79 152 6 168 137 154 15 32 10 77 134 40 60 156 111 88 55 184 186 19 193 141 112 102 195 98 157 101 185 94 65 89 194 20 89 20 30 72 116 100 126 105 83 182 144 160 132 104 170 98 43 168 178 58 47 62 176 87 73 52 8 149 21 121 67 184 197 147 5 19 178 154 125 64 3 89 39 149 94 154 117 38 192 156 12 175 3 41 142 75 17 13 44 179 79 137 74 46 145 70 42 155 168 14 150 136 107 74 23 48 70 120 20 161 3 65 123 105 160 85 167 57 154 199 136 94 128 42 98 96 140 98 28 111 106 2 32 140 61 108 59 68 148 40 194 193 43 190 43 9 137 105 65 115 23 102 169 113 59 3 65 109 31 120 190 192 130 148 63 54 121 64 151 128 65 126 19 79 13 184 122 49 164 190 99 113 58 48 136 69 18 60 196 18 28 107 180 42 191 14 132 111 95 152 59 126 78 35 56 197 147 171 24 73 138 30 148 164 14 145 53 118 36 36 170 111 199 11 195 147 163 126 45 56 156 167 6 156 65 7 96 41 39 106 4 66 101 125 184 7 113 89 117 177 139 108 12 162 188 89 109 147 148 87 111 35 196 140 182 135 156 165 1 62 81 27 152 178 54 18 181 93 178 127 192 75 141 71 42 196 128 42 58 59 123 124 127 82 120 153 3 93 52 148 118 156 193 53 191 153 60 30 173 71 122 15 21 107 144 2 51 24 38 12 79 70 175 108 199 160 46 23 117 86 22 133 159 105 152 144 87 43 28 167 46 146 38 67 18 53 175 57 175 157 149 174 80 100 85 159 141 199 162 91 152 diff --git a/test/unit/math/laplace/data_cpp/index_30.csv b/test/unit/math/laplace/data_cpp/index_30.csv new file mode 100644 index 00000000000..9f57a743130 --- /dev/null +++ b/test/unit/math/laplace/data_cpp/index_30.csv @@ -0,0 +1 @@ +18 7 13 28 13 13 1 25 6 8 23 6 27 30 8 11 23 30 21 6 21 26 20 8 6 20 17 8 21 20 11 12 3 19 18 6 17 24 3 28 30 11 13 7 16 24 20 7 17 10 24 6 17 25 13 18 5 2 17 21 28 30 23 8 24 23 18 27 28 26 20 28 8 22 26 4 28 12 3 11 27 6 20 21 13 27 29 13 20 19 diff --git a/test/unit/math/laplace/data_cpp/index_40.csv b/test/unit/math/laplace/data_cpp/index_40.csv new file mode 100644 index 00000000000..c25fd4cd1d1 --- /dev/null +++ b/test/unit/math/laplace/data_cpp/index_40.csv @@ -0,0 +1 @@ +5 24 19 36 24 23 30 5 32 21 20 40 19 5 20 33 17 31 7 4 40 22 31 12 32 20 2 20 34 32 39 40 31 5 28 2 22 12 13 19 11 8 6 25 10 34 32 28 12 20 17 8 11 19 20 4 24 13 2 21 28 17 27 9 34 13 12 37 3 20 19 23 26 23 16 23 32 20 22 30 6 13 28 35 37 5 9 17 39 26 26 37 18 32 14 8 12 18 29 15 21 21 14 30 8 15 31 12 20 34 6 3 40 9 7 4 2 17 31 34 diff --git a/test/unit/math/laplace/data_cpp/index_50.csv b/test/unit/math/laplace/data_cpp/index_50.csv new file mode 100644 index 00000000000..77293854300 --- /dev/null +++ b/test/unit/math/laplace/data_cpp/index_50.csv @@ -0,0 +1 @@ +46 25 25 43 50 8 13 45 50 26 12 41 3 14 2 8 47 39 50 21 3 21 50 47 49 25 46 17 35 2 26 28 4 35 29 4 45 4 23 40 44 44 35 38 37 12 39 30 49 3 24 12 6 18 46 38 44 32 22 38 2 29 47 41 14 16 26 44 3 14 47 8 39 24 13 23 5 1 42 23 31 10 22 36 13 32 32 4 5 46 1 33 32 24 12 43 9 38 31 22 7 42 4 24 50 15 20 42 37 31 37 2 26 36 6 23 1 6 17 48 44 48 9 32 26 13 29 32 48 16 9 44 47 31 27 38 25 8 34 48 38 10 12 28 29 21 16 15 29 10 diff --git a/test/unit/math/laplace/data_cpp/index_500.csv b/test/unit/math/laplace/data_cpp/index_500.csv new file mode 100644 index 00000000000..b8db82843f7 --- /dev/null +++ b/test/unit/math/laplace/data_cpp/index_500.csv @@ -0,0 +1 @@ +140 219 375 424 238 440 353 28 496 213 336 105 213 138 301 338 204 108 20 237 499 3 151 187 237 390 74 185 491 23 11 34 479 114 465 260 149 187 181 317 109 137 397 274 282 433 490 317 17 283 443 11 393 262 454 146 465 334 455 395 86 94 481 143 389 426 286 1 208 211 303 387 103 342 427 268 484 147 178 42 496 385 482 376 58 301 459 122 469 471 280 453 346 429 290 402 265 40 291 324 416 276 3 99 37 342 476 473 490 158 418 136 361 61 8 359 88 486 10 340 436 78 36 150 185 262 52 190 86 392 427 72 195 181 350 432 209 368 293 132 500 175 371 357 198 66 36 203 256 407 376 97 235 446 41 325 367 103 295 25 347 334 135 247 438 33 296 229 206 327 92 30 88 218 386 81 146 226 295 154 374 253 325 402 472 185 65 432 326 166 25 395 32 496 161 229 60 317 87 496 9 185 461 474 138 100 465 254 255 4 169 132 298 87 16 495 474 52 249 39 16 172 193 475 235 360 355 5 292 448 414 427 208 150 299 143 154 273 290 35 378 442 231 106 115 402 478 401 435 127 93 195 91 149 249 478 72 36 416 84 380 312 197 262 231 80 261 350 401 56 340 417 500 191 242 272 342 372 347 60 162 109 185 92 201 363 353 43 385 492 114 22 256 264 308 81 93 296 162 170 146 6 276 47 99 189 465 142 215 44 329 438 460 303 488 447 372 73 233 241 495 209 165 147 330 226 346 254 377 120 410 444 142 71 189 170 201 295 71 198 22 13 173 234 55 377 187 326 194 70 364 67 232 463 218 325 283 466 356 133 455 186 465 255 21 302 16 84 273 53 483 191 12 411 395 299 174 150 117 366 247 105 253 493 476 260 93 341 39 93 136 69 185 168 265 412 248 61 443 49 3 241 220 76 232 197 484 37 250 143 264 109 380 316 401 466 52 282 212 147 131 500 294 261 411 342 335 355 492 336 462 331 319 194 50 288 57 469 348 78 445 346 34 44 110 447 144 54 269 258 307 44 328 274 405 347 403 222 376 103 73 462 163 403 371 306 40 73 130 498 70 275 171 158 285 67 357 28 163 137 327 10 164 107 385 185 253 8 163 420 412 318 115 189 375 96 9 215 473 45 99 414 224 454 102 479 187 291 43 235 420 279 186 85 155 173 294 267 490 236 318 229 175 160 293 173 168 147 421 280 65 308 392 204 299 19 150 123 278 73 400 402 110 107 225 47 216 401 107 336 135 464 464 204 92 204 105 407 442 466 51 402 195 339 38 426 27 134 172 497 149 381 440 287 83 312 41 235 139 26 31 478 323 427 436 170 322 288 453 172 444 200 103 398 259 414 305 475 283 216 275 414 97 41 31 406 100 28 254 128 458 106 312 32 120 253 459 278 137 145 124 55 238 210 130 239 420 390 8 178 243 136 159 181 200 345 280 253 293 19 228 41 488 277 284 222 426 2 26 381 315 372 156 331 165 319 186 47 334 191 320 383 110 141 382 467 200 171 140 231 306 87 16 352 321 111 18 231 187 467 5 109 187 325 41 328 52 445 448 85 15 277 64 400 380 191 181 416 82 334 29 226 484 425 22 192 301 406 136 23 462 54 408 198 399 482 125 8 307 382 199 322 181 357 176 457 270 277 285 196 262 472 259 135 252 341 169 198 394 230 320 228 155 233 285 67 408 153 499 137 178 426 243 340 469 479 338 107 307 275 350 436 340 119 196 98 90 436 292 376 38 315 264 116 494 286 84 97 168 276 28 464 318 126 345 171 16 35 102 386 366 246 139 162 35 218 138 348 433 132 68 138 269 205 351 314 212 140 42 361 266 129 424 387 340 197 493 205 86 434 332 208 8 272 235 216 112 456 369 309 41 19 476 109 11 5 421 343 28 255 133 489 327 166 337 217 284 424 104 426 444 486 151 497 418 437 464 404 220 349 429 238 481 42 121 198 399 179 86 427 420 415 270 37 32 86 157 415 391 233 192 397 311 184 251 112 35 31 319 126 17 448 371 463 428 146 32 393 334 10 313 187 155 358 423 465 139 205 242 219 253 3 225 88 320 322 298 470 448 366 485 120 306 83 265 458 395 276 228 5 4 440 42 150 412 227 100 106 193 266 356 418 12 24 205 142 468 28 243 441 468 379 464 267 367 379 480 298 178 336 287 345 129 164 339 18 255 43 190 236 452 486 417 50 201 262 269 77 21 16 230 302 478 410 260 104 17 484 446 133 492 214 467 167 497 269 272 109 432 304 292 369 341 121 297 486 240 37 165 31 79 129 387 171 56 415 378 40 450 127 380 272 422 308 95 487 262 459 307 16 423 176 285 203 73 494 19 106 385 394 262 314 479 121 313 202 120 309 500 88 186 408 471 187 439 250 208 439 345 27 385 145 26 87 293 349 32 139 334 438 27 371 55 423 417 137 55 296 419 267 369 425 279 179 40 319 102 364 283 316 86 401 475 182 141 199 285 349 187 55 209 469 459 354 154 173 122 103 186 147 455 98 112 387 469 209 480 105 13 200 114 274 7 408 498 103 242 164 356 238 187 407 369 400 376 470 93 94 151 49 449 12 304 353 227 467 242 19 121 390 241 489 425 195 465 216 341 267 96 393 324 255 139 101 129 463 45 361 243 201 426 260 479 270 481 38 307 410 258 294 341 85 498 260 500 390 456 63 70 497 228 336 312 296 163 456 325 217 404 285 304 210 288 47 186 283 203 367 157 78 183 423 401 211 56 208 89 381 358 337 157 194 481 426 332 273 331 497 119 8 420 270 499 363 279 300 49 142 167 263 472 96 340 97 337 11 15 314 153 118 2 459 163 12 269 213 113 137 19 41 447 471 105 450 404 287 237 76 482 419 226 74 103 229 434 382 361 393 356 174 226 140 392 94 218 436 40 195 97 427 91 259 448 288 371 492 287 479 396 358 161 54 23 123 32 149 369 231 237 486 337 117 192 221 136 342 438 116 36 439 311 206 401 85 362 302 270 135 184 454 149 121 62 50 158 470 357 370 475 307 495 167 391 121 335 112 443 308 32 163 373 344 443 453 135 187 369 487 399 493 397 194 257 133 323 208 388 99 313 90 302 18 122 484 36 102 148 11 454 158 120 363 75 282 470 310 295 62 397 307 42 344 64 401 422 38 179 478 197 114 147 320 156 256 73 372 102 220 246 233 35 241 492 126 362 259 485 20 165 425 9 59 173 277 341 47 132 339 463 162 70 468 367 103 76 382 381 48 408 193 47 97 430 366 441 26 384 32 125 230 161 264 310 317 87 217 490 88 94 21 389 288 407 350 362 167 332 473 99 481 298 245 60 356 31 49 353 49 428 93 209 274 478 345 370 74 299 194 449 258 347 76 443 249 496 120 178 265 464 46 diff --git a/test/unit/math/laplace/data_cpp/m_10.csv b/test/unit/math/laplace/data_cpp/m_10.csv new file mode 100644 index 00000000000..588e1382ed6 --- /dev/null +++ b/test/unit/math/laplace/data_cpp/m_10.csv @@ -0,0 +1 @@ +3 1 5 1 1 2 4 3 3 7 diff --git a/test/unit/math/laplace/data_cpp/m_100.csv b/test/unit/math/laplace/data_cpp/m_100.csv new file mode 100644 index 00000000000..a485b659810 --- /dev/null +++ b/test/unit/math/laplace/data_cpp/m_100.csv @@ -0,0 +1 @@ +5 3 3 4 3 3 5 3 2 2 1 1 3 4 4 2 3 3 1 3 2 3 5 3 0 2 1 2 5 5 3 3 7 5 2 3 4 4 5 3 3 1 1 2 4 3 1 3 2 2 3 4 6 1 2 3 1 3 1 3 3 6 2 2 3 4 4 6 3 1 2 3 4 3 3 3 3 3 3 0 2 6 5 4 4 2 3 5 4 3 8 2 3 1 4 1 4 0 1 3 diff --git a/test/unit/math/laplace/data_cpp/m_20.csv b/test/unit/math/laplace/data_cpp/m_20.csv new file mode 100644 index 00000000000..33c9627f520 --- /dev/null +++ b/test/unit/math/laplace/data_cpp/m_20.csv @@ -0,0 +1 @@ +3 4 2 2 2 2 0 4 4 1 3 1 7 4 6 4 0 2 3 6 diff --git a/test/unit/math/laplace/data_cpp/m_200.csv b/test/unit/math/laplace/data_cpp/m_200.csv new file mode 100644 index 00000000000..e0a6080a429 --- /dev/null +++ b/test/unit/math/laplace/data_cpp/m_200.csv @@ -0,0 +1 @@ +2 3 6 2 3 2 5 5 2 4 2 6 2 4 4 1 1 7 6 6 3 1 5 2 1 0 2 6 0 3 3 3 2 1 4 3 0 4 7 4 3 5 4 1 1 3 3 3 1 0 2 5 3 3 2 3 6 4 4 3 1 3 3 2 6 1 5 1 4 5 2 1 2 4 5 1 3 2 5 3 3 4 2 2 3 1 8 1 5 0 2 0 2 3 1 3 2 7 2 4 4 3 2 2 5 4 5 4 2 0 5 3 5 2 2 1 4 3 2 4 3 2 6 1 2 5 2 3 0 2 1 6 1 2 1 7 6 1 3 4 3 1 2 3 5 4 4 5 5 1 1 7 3 6 1 6 5 0 2 4 1 7 1 3 2 1 4 8 1 3 2 1 2 3 5 2 1 5 3 3 3 2 1 4 3 3 2 2 0 7 2 3 4 4 3 4 3 0 4 0 diff --git a/test/unit/math/laplace/data_cpp/m_30.csv b/test/unit/math/laplace/data_cpp/m_30.csv new file mode 100644 index 00000000000..4473ef0e264 --- /dev/null +++ b/test/unit/math/laplace/data_cpp/m_30.csv @@ -0,0 +1 @@ +1 1 3 1 1 7 3 6 0 1 4 2 7 0 0 1 5 4 2 7 5 1 4 4 2 3 4 6 1 4 diff --git a/test/unit/math/laplace/data_cpp/m_40.csv b/test/unit/math/laplace/data_cpp/m_40.csv new file mode 100644 index 00000000000..f8c630e4595 --- /dev/null +++ b/test/unit/math/laplace/data_cpp/m_40.csv @@ -0,0 +1 @@ +0 4 2 3 5 3 2 4 3 1 2 6 4 2 2 1 5 2 5 9 4 3 4 3 1 3 1 4 1 3 5 6 1 5 1 1 3 0 2 4 diff --git a/test/unit/math/laplace/data_cpp/m_50.csv b/test/unit/math/laplace/data_cpp/m_50.csv new file mode 100644 index 00000000000..49739655bda --- /dev/null +++ b/test/unit/math/laplace/data_cpp/m_50.csv @@ -0,0 +1 @@ +3 4 4 5 2 3 1 4 3 3 0 5 4 3 2 3 2 1 0 1 3 3 4 4 4 5 1 2 5 1 4 6 1 1 3 2 3 6 3 1 2 3 2 6 2 4 5 4 2 5 diff --git a/test/unit/math/laplace/data_cpp/m_500.csv b/test/unit/math/laplace/data_cpp/m_500.csv new file mode 100644 index 00000000000..b081fa01643 --- /dev/null +++ b/test/unit/math/laplace/data_cpp/m_500.csv @@ -0,0 +1 @@ +1 2 4 2 4 1 1 6 3 3 5 4 2 0 2 7 3 3 6 2 3 3 3 1 2 4 3 6 1 1 5 8 1 2 5 5 4 4 2 5 7 5 3 3 2 1 6 1 5 3 1 4 1 3 5 3 1 1 1 3 2 2 1 2 2 1 3 1 1 4 2 2 6 3 1 4 1 3 1 1 2 1 2 3 4 6 5 5 1 2 2 3 6 4 1 3 6 2 5 3 1 5 8 2 5 4 4 1 6 3 1 4 1 4 2 2 2 1 2 6 6 3 2 1 2 3 2 1 4 2 1 4 4 1 5 5 6 4 5 4 2 4 3 1 2 4 6 1 5 5 3 0 2 3 3 2 3 4 1 1 3 4 6 3 4 2 4 3 2 3 4 3 5 2 2 2 0 5 3 0 5 1 1 2 7 6 11 0 3 2 4 3 3 5 5 2 4 5 2 4 4 1 3 4 4 2 0 6 5 2 2 2 3 1 2 4 3 4 2 3 1 2 0 1 2 5 2 4 4 3 5 2 4 1 5 2 4 4 1 1 4 4 4 0 1 2 2 1 3 2 1 1 6 3 5 3 1 3 4 5 2 7 1 4 4 2 4 1 5 5 0 4 3 4 3 4 4 2 3 3 0 3 5 2 6 2 4 5 0 2 2 3 4 3 4 4 1 4 4 1 3 4 2 3 1 3 7 4 2 2 2 4 3 3 2 2 4 3 4 4 1 3 2 2 5 2 3 2 1 1 3 3 0 6 2 5 4 2 3 6 6 5 1 2 5 3 4 2 3 4 1 1 4 1 2 5 4 3 1 1 4 3 3 2 0 4 4 1 6 2 5 4 1 1 2 5 2 2 2 4 4 4 1 1 5 2 4 1 2 4 2 3 4 2 4 1 4 1 3 3 8 5 2 3 1 2 4 5 0 3 2 3 0 4 3 3 3 3 2 5 2 2 4 3 4 7 6 2 2 1 0 3 2 2 1 5 1 4 3 3 2 2 5 3 2 2 3 5 2 2 0 1 3 4 3 3 1 2 5 1 1 3 4 6 7 3 4 3 5 4 3 3 3 2 4 3 0 6 6 2 5 3 1 5 2 5 2 2 2 4 1 5 3 2 3 5 5 3 3 5 diff --git a/test/unit/math/laplace/data_cpp/sums_10.csv b/test/unit/math/laplace/data_cpp/sums_10.csv new file mode 100644 index 00000000000..daf18b6161a --- /dev/null +++ b/test/unit/math/laplace/data_cpp/sums_10.csv @@ -0,0 +1 @@ +2 0 1 2 1 5 3 8 6 6 diff --git a/test/unit/math/laplace/data_cpp/sums_100.csv b/test/unit/math/laplace/data_cpp/sums_100.csv new file mode 100644 index 00000000000..efeef9b6225 --- /dev/null +++ b/test/unit/math/laplace/data_cpp/sums_100.csv @@ -0,0 +1 @@ +8 4 6 10 7 3 7 6 1 1 0 5 7 5 9 2 4 5 0 3 0 1 0 0 0 3 1 3 5 4 4 8 3 7 1 6 6 10 17 14 24 4 4 8 18 4 3 4 3 3 10 9 19 2 3 6 0 2 1 1 1 10 2 0 2 4 0 7 1 0 1 4 5 1 0 3 1 2 1 0 0 4 2 4 2 4 4 7 6 3 11 2 3 1 1 1 4 0 1 1 diff --git a/test/unit/math/laplace/data_cpp/sums_20.csv b/test/unit/math/laplace/data_cpp/sums_20.csv new file mode 100644 index 00000000000..bb058098b8f --- /dev/null +++ b/test/unit/math/laplace/data_cpp/sums_20.csv @@ -0,0 +1 @@ +6 6 5 9 3 3 0 2 4 1 5 8 14 6 24 15 0 9 7 12 diff --git a/test/unit/math/laplace/data_cpp/sums_200.csv b/test/unit/math/laplace/data_cpp/sums_200.csv new file mode 100644 index 00000000000..432074a1ba9 --- /dev/null +++ b/test/unit/math/laplace/data_cpp/sums_200.csv @@ -0,0 +1 @@ +3 9 9 3 8 1 6 8 3 5 5 8 1 1 4 2 0 8 8 6 0 0 9 1 3 0 6 13 0 4 9 4 4 0 2 2 0 2 2 1 0 1 1 1 0 0 0 1 0 0 1 2 2 0 0 0 4 7 4 2 0 3 3 2 3 0 3 1 0 4 0 1 0 3 3 0 0 2 2 0 1 1 3 1 2 0 2 0 4 0 0 0 0 0 1 2 0 1 0 2 2 3 3 2 15 6 8 3 2 0 15 17 39 10 11 5 20 15 7 12 22 13 25 5 19 15 4 20 0 11 3 47 1 4 3 13 6 1 3 1 5 0 0 4 7 2 1 3 1 2 0 3 5 4 1 5 6 0 3 11 3 14 1 1 7 1 8 12 2 8 1 3 2 1 3 1 1 7 3 1 5 0 1 2 2 0 1 7 0 14 2 2 6 14 8 20 15 0 9 0 diff --git a/test/unit/math/laplace/data_cpp/sums_30.csv b/test/unit/math/laplace/data_cpp/sums_30.csv new file mode 100644 index 00000000000..c2cf26b8d20 --- /dev/null +++ b/test/unit/math/laplace/data_cpp/sums_30.csv @@ -0,0 +1 @@ +0 1 0 1 0 2 0 3 0 0 5 10 52 0 0 5 51 41 16 46 22 6 16 13 9 28 19 19 1 8 diff --git a/test/unit/math/laplace/data_cpp/sums_40.csv b/test/unit/math/laplace/data_cpp/sums_40.csv new file mode 100644 index 00000000000..e71cd600d04 --- /dev/null +++ b/test/unit/math/laplace/data_cpp/sums_40.csv @@ -0,0 +1 @@ +0 1 1 8 8 7 1 3 2 1 0 1 2 1 2 1 3 1 1 0 1 0 0 0 0 1 1 2 1 0 1 4 2 6 1 2 1 0 1 0 diff --git a/test/unit/math/laplace/data_cpp/sums_50.csv b/test/unit/math/laplace/data_cpp/sums_50.csv new file mode 100644 index 00000000000..98821a36040 --- /dev/null +++ b/test/unit/math/laplace/data_cpp/sums_50.csv @@ -0,0 +1 @@ +6 7 12 6 2 5 3 3 8 4 0 6 3 6 1 1 1 0 0 3 6 7 11 15 25 16 0 1 5 0 9 14 3 2 3 8 4 7 6 3 1 1 0 17 2 8 5 4 1 3 diff --git a/test/unit/math/laplace/data_cpp/sums_500.csv b/test/unit/math/laplace/data_cpp/sums_500.csv new file mode 100644 index 00000000000..6a6fd98f4f4 --- /dev/null +++ b/test/unit/math/laplace/data_cpp/sums_500.csv @@ -0,0 +1 @@ +0 0 2 2 4 1 1 5 1 1 1 0 1 0 2 9 0 2 0 0 0 2 0 0 1 2 0 5 1 0 1 6 1 0 1 0 3 2 0 1 5 3 2 2 3 0 1 0 4 0 0 4 1 4 6 1 1 1 2 4 1 1 2 2 0 0 0 1 1 6 7 2 15 2 4 4 1 8 3 0 4 0 2 0 3 9 4 3 2 3 0 2 4 1 5 0 3 0 0 3 2 1 8 0 4 0 3 1 3 4 1 4 0 0 8 1 1 0 1 8 7 2 0 1 0 0 0 0 0 0 0 2 1 0 3 0 1 3 0 5 1 1 2 1 1 5 3 2 1 6 1 0 0 1 1 0 0 0 0 0 0 0 4 1 0 2 10 3 4 2 3 0 1 1 0 0 0 3 2 0 3 2 0 3 1 2 5 0 2 0 3 0 2 0 3 2 4 15 5 2 0 2 3 3 4 0 0 2 2 1 0 0 0 0 0 3 0 2 0 3 0 0 0 0 1 1 4 7 2 9 2 6 4 0 4 3 3 5 3 2 3 3 10 0 0 0 1 0 0 1 0 0 2 1 1 2 0 0 0 2 0 2 0 3 5 0 9 3 16 16 0 48 44 55 28 17 12 2 3 4 0 8 2 2 3 2 3 6 0 1 2 3 2 2 6 7 1 5 3 1 2 3 0 1 2 5 2 0 2 1 1 3 0 0 1 2 8 8 15 23 3 7 1 3 5 1 4 0 1 2 5 2 0 10 2 3 0 1 0 10 5 4 1 4 10 3 5 1 5 4 0 1 3 1 1 2 2 1 0 0 7 4 6 4 0 15 8 1 5 1 13 3 0 0 0 14 3 6 2 4 26 18 0 3 5 2 3 1 2 5 1 5 3 3 3 0 4 0 9 4 19 10 14 8 4 6 8 24 0 14 13 12 0 14 16 16 6 5 0 5 2 2 5 3 7 8 9 6 7 3 0 4 6 5 2 5 1 2 4 13 10 20 28 15 28 8 17 38 11 36 0 6 18 47 47 73 17 13 37 5 2 32 14 25 23 12 5 4 9 9 3 3 10 1 2 2 0 6 10 6 11 3 4 16 7 47 18 16 8 20 4 31 4 3 9 14 11 10 4 13 diff --git a/test/unit/math/laplace/data_cpp/y_10.csv b/test/unit/math/laplace/data_cpp/y_10.csv new file mode 100644 index 00000000000..21509a3da36 --- /dev/null +++ b/test/unit/math/laplace/data_cpp/y_10.csv @@ -0,0 +1 @@ +0 0 2 1 2 2 3 2 1 2 4 4 1 0 1 1 0 2 1 0 1 1 1 1 0 0 0 1 0 0 diff --git a/test/unit/math/laplace/data_cpp/y_100.csv b/test/unit/math/laplace/data_cpp/y_100.csv new file mode 100644 index 00000000000..1cd8952d689 --- /dev/null +++ b/test/unit/math/laplace/data_cpp/y_100.csv @@ -0,0 +1 @@ +1 1 0 0 0 0 4 1 0 1 1 1 0 1 2 0 2 0 1 2 1 1 1 0 3 3 0 4 2 1 1 0 0 4 1 2 1 3 3 2 0 1 2 0 0 2 0 2 0 0 0 1 1 7 4 3 2 8 0 3 0 2 2 1 1 1 1 1 0 8 3 1 1 3 1 1 3 2 1 3 3 0 1 0 0 0 4 0 1 2 1 0 0 1 1 2 1 2 0 0 1 0 0 4 0 3 1 4 2 2 0 0 1 3 3 4 0 0 0 0 0 1 0 2 1 4 0 2 6 4 0 1 0 1 0 1 1 0 0 1 1 0 4 1 2 1 0 0 0 1 2 0 0 0 1 1 5 1 1 4 0 1 2 5 3 4 1 2 5 2 2 2 0 2 4 2 0 2 0 2 0 1 1 1 3 2 1 1 1 0 2 0 0 1 2 0 0 1 0 4 6 1 0 0 1 2 2 0 1 1 4 1 3 0 3 1 2 0 1 1 1 1 1 2 2 0 0 1 1 0 1 1 2 0 0 2 8 0 4 2 4 3 4 2 2 1 1 0 2 0 1 0 0 4 0 1 0 1 2 1 3 1 1 4 3 0 0 0 0 0 8 0 0 1 1 1 0 1 4 1 0 0 0 1 2 0 4 0 0 2 0 2 0 3 1 3 0 1 1 1 diff --git a/test/unit/math/laplace/data_cpp/y_20.csv b/test/unit/math/laplace/data_cpp/y_20.csv new file mode 100644 index 00000000000..7bc49775a8b --- /dev/null +++ b/test/unit/math/laplace/data_cpp/y_20.csv @@ -0,0 +1 @@ +3 2 3 3 1 1 3 2 0 3 2 4 0 1 1 1 0 0 3 1 1 1 7 2 2 1 3 1 2 4 8 2 1 2 3 2 5 0 3 2 1 2 0 7 4 2 6 2 5 1 5 1 3 1 2 2 5 1 1 2 diff --git a/test/unit/math/laplace/data_cpp/y_200.csv b/test/unit/math/laplace/data_cpp/y_200.csv new file mode 100644 index 00000000000..9405993efc1 --- /dev/null +++ b/test/unit/math/laplace/data_cpp/y_200.csv @@ -0,0 +1 @@ +0 0 4 0 0 0 0 0 0 0 1 5 0 4 2 1 2 3 2 3 0 0 4 0 1 1 4 0 10 0 0 8 0 3 0 1 0 1 1 1 3 1 0 3 3 4 0 1 0 1 3 1 3 2 3 0 3 2 6 2 0 1 1 6 0 0 1 3 0 0 5 2 1 4 2 1 0 0 1 0 3 1 1 1 0 5 0 1 0 3 2 4 2 2 0 2 0 1 0 3 0 2 0 4 0 2 3 3 0 0 0 0 3 0 0 4 2 0 8 0 0 0 0 0 0 0 0 0 5 1 0 1 1 2 0 0 1 0 0 5 2 0 1 4 2 0 1 2 0 1 0 0 4 0 3 2 0 0 2 0 1 1 7 0 2 0 2 2 0 2 0 0 0 3 1 1 4 12 1 2 1 3 0 1 1 3 0 1 1 0 2 2 0 0 1 0 3 1 0 1 2 1 1 0 0 2 4 6 5 2 2 1 1 2 0 0 1 0 1 0 7 2 4 8 1 1 0 8 0 3 1 1 0 0 0 0 0 0 0 0 0 0 1 1 0 1 1 0 2 4 0 0 0 0 3 3 2 5 0 3 0 0 0 2 0 0 0 4 0 2 2 1 1 5 0 5 4 0 0 1 4 11 0 3 0 1 0 4 3 0 2 0 0 0 2 2 0 0 5 1 1 5 1 2 0 1 3 12 0 0 1 0 0 0 0 6 1 1 0 3 1 0 0 0 2 0 1 1 2 0 0 1 0 1 2 0 1 1 1 2 2 2 0 2 1 0 2 1 3 2 0 6 3 1 0 4 0 0 3 5 0 6 1 0 0 0 0 1 5 0 2 1 0 0 3 0 1 1 0 2 1 0 1 0 2 2 3 0 6 2 2 2 7 0 2 0 2 5 3 5 0 4 1 3 0 9 2 0 11 1 4 2 0 0 0 9 0 1 2 0 4 3 0 2 0 0 1 3 4 4 2 0 0 1 0 6 3 1 0 3 3 2 0 0 5 0 0 1 0 1 1 0 0 0 1 2 6 0 1 2 2 2 2 5 0 1 1 0 0 1 2 1 1 2 1 2 0 0 3 1 0 1 7 1 2 8 1 5 1 0 0 1 2 3 0 0 0 0 0 1 0 7 0 0 3 0 2 1 0 0 2 2 0 0 1 1 0 2 2 1 0 2 0 0 3 3 0 1 1 3 5 2 1 4 3 4 0 0 1 4 1 1 0 1 0 1 2 2 0 4 0 0 0 1 3 1 0 0 1 2 1 1 0 1 3 0 3 6 0 0 1 1 1 0 2 0 0 3 0 0 1 0 1 0 0 1 0 0 1 0 0 0 0 0 2 1 3 2 0 0 diff --git a/test/unit/math/laplace/data_cpp/y_30.csv b/test/unit/math/laplace/data_cpp/y_30.csv new file mode 100644 index 00000000000..6c3f5a064b2 --- /dev/null +++ b/test/unit/math/laplace/data_cpp/y_30.csv @@ -0,0 +1 @@ +12 0 9 5 8 12 0 6 0 0 4 0 7 3 0 2 5 1 2 0 4 11 9 2 0 7 14 0 3 1 1 5 0 6 9 0 4 4 0 4 3 2 6 0 5 3 3 0 8 0 3 1 12 3 7 12 0 1 13 3 3 1 4 0 3 3 8 6 3 9 8 2 1 6 8 1 2 5 0 0 5 1 7 10 6 1 1 4 11 10 diff --git a/test/unit/math/laplace/data_cpp/y_40.csv b/test/unit/math/laplace/data_cpp/y_40.csv new file mode 100644 index 00000000000..9607c6e3487 --- /dev/null +++ b/test/unit/math/laplace/data_cpp/y_40.csv @@ -0,0 +1 @@ +1 0 1 2 0 0 0 2 0 1 0 0 0 2 0 2 1 0 1 3 0 0 1 0 1 0 1 0 1 1 1 0 0 0 0 0 0 0 2 0 0 0 2 0 1 0 0 1 0 0 0 2 0 0 0 3 0 0 0 0 0 0 1 1 1 0 1 1 0 0 0 0 0 0 1 0 2 0 0 0 3 0 1 1 0 3 0 1 0 0 1 0 0 0 0 1 0 1 1 1 0 0 1 0 0 1 0 0 0 4 2 1 0 1 0 2 0 1 0 0 diff --git a/test/unit/math/laplace/data_cpp/y_50.csv b/test/unit/math/laplace/data_cpp/y_50.csv new file mode 100644 index 00000000000..cd39fa19fc8 --- /dev/null +++ b/test/unit/math/laplace/data_cpp/y_50.csv @@ -0,0 +1 @@ +2 7 9 0 0 2 0 0 0 9 0 1 5 3 1 1 0 1 1 2 2 2 2 2 0 4 1 0 3 0 4 1 1 0 0 0 2 3 4 3 4 1 0 1 2 0 2 0 1 3 4 2 0 0 2 0 5 2 1 0 2 0 0 0 3 0 2 2 2 0 1 0 3 5 1 3 1 2 0 0 5 2 2 3 1 4 1 2 1 3 2 3 0 3 3 0 1 1 2 4 3 0 0 3 0 1 3 1 1 1 1 4 0 5 3 4 2 2 1 1 1 0 2 4 1 1 2 3 1 1 5 4 2 1 0 3 5 0 2 2 2 0 1 0 1 2 0 0 2 2 diff --git a/test/unit/math/laplace/data_cpp/y_500.csv b/test/unit/math/laplace/data_cpp/y_500.csv new file mode 100644 index 00000000000..b8fbbd8a83f --- /dev/null +++ b/test/unit/math/laplace/data_cpp/y_500.csv @@ -0,0 +1 @@ +1 0 0 1 3 6 1 2 5 0 1 1 0 0 0 0 1 1 0 1 2 0 1 2 0 0 1 0 4 0 0 0 1 0 7 0 0 0 0 1 0 0 0 10 6 3 5 5 0 0 6 0 1 0 9 2 2 2 18 1 3 0 1 1 1 1 1 0 0 0 0 0 0 1 1 3 2 1 1 0 4 1 2 3 1 0 8 0 0 1 1 2 1 3 1 2 1 0 2 3 8 8 2 0 0 1 0 3 3 0 2 0 3 0 1 0 2 11 0 2 1 1 0 1 0 0 1 0 1 3 1 1 0 1 1 2 1 1 0 1 3 0 2 1 2 0 0 2 2 3 2 0 1 2 0 2 1 0 0 1 3 2 0 1 1 1 2 0 0 2 1 0 0 1 0 1 2 0 2 0 0 1 0 1 1 0 0 2 1 0 0 0 0 1 0 0 2 1 0 4 0 0 2 0 0 1 4 0 0 1 2 0 2 1 1 2 1 2 0 0 1 0 1 1 1 0 1 1 2 6 4 2 0 2 2 1 0 13 0 0 3 9 1 0 5 4 0 3 2 0 0 1 0 0 0 2 1 0 3 0 2 1 2 1 1 0 0 1 0 0 3 3 2 0 0 12 0 0 2 0 0 1 0 1 0 1 0 1 1 8 0 1 0 1 0 3 0 1 0 0 1 1 4 0 0 0 3 1 0 2 1 0 5 0 7 6 0 4 1 0 3 1 0 0 2 0 1 0 1 1 3 3 0 1 1 0 0 1 6 3 0 1 1 0 1 2 0 0 0 0 3 0 1 3 1 2 1 4 0 1 13 0 3 0 0 2 0 0 18 1 4 1 0 8 1 0 1 0 0 2 0 0 0 2 1 0 1 1 0 0 0 1 0 1 2 5 0 1 9 2 0 1 1 0 5 0 4 1 1 0 1 0 1 1 5 2 0 2 0 1 0 4 0 0 5 1 0 0 5 0 6 3 2 0 0 2 1 2 1 3 15 1 0 0 0 3 1 2 4 0 0 0 0 18 4 0 8 0 5 1 4 13 1 6 3 3 1 1 0 1 3 8 1 0 1 0 0 0 0 0 1 0 0 1 2 1 0 0 2 1 5 2 3 1 0 0 0 0 4 0 0 4 0 13 0 3 0 0 0 0 0 1 1 2 0 0 1 3 6 2 4 1 0 0 0 0 0 0 2 2 0 0 0 0 1 0 2 0 2 0 0 1 2 1 1 0 0 4 0 0 1 6 2 1 0 1 1 1 11 6 0 2 0 0 0 0 0 0 0 2 0 10 5 0 1 1 1 2 0 1 0 2 1 3 0 2 2 0 7 0 7 2 2 0 0 1 2 0 0 1 8 5 1 1 0 2 0 0 1 0 4 0 0 1 0 1 7 0 0 1 1 3 1 1 0 3 1 1 2 1 1 0 0 2 0 5 1 0 2 0 1 0 9 4 1 0 1 0 0 3 1 3 0 1 0 4 0 0 0 0 11 0 2 0 5 2 0 0 1 0 1 2 2 1 3 1 0 0 0 1 3 1 1 1 0 0 1 13 6 0 1 3 0 0 1 2 0 5 0 2 1 0 3 2 1 0 2 4 0 0 13 0 7 6 2 0 0 0 0 6 3 2 0 1 0 17 1 4 0 2 0 2 0 1 0 1 2 2 1 2 2 4 0 0 1 0 5 0 1 0 1 1 3 1 3 2 1 1 1 12 1 2 3 1 0 0 2 2 1 1 1 0 1 1 0 1 0 1 2 4 0 2 2 0 0 1 2 0 0 2 2 0 0 0 0 0 1 0 3 1 1 2 2 1 0 0 0 2 1 1 0 0 1 1 0 2 1 2 1 3 0 1 1 3 0 1 1 32 2 0 2 0 1 0 0 0 0 1 2 1 0 5 1 2 0 0 1 1 0 3 5 10 0 4 1 1 6 1 2 1 4 0 2 1 1 2 4 0 1 1 3 2 6 1 1 1 0 6 0 1 0 0 0 2 0 2 0 0 5 0 0 11 3 3 4 0 1 1 4 1 0 0 1 0 0 3 0 0 1 0 0 0 0 1 6 3 2 1 8 3 4 1 1 1 1 9 1 1 0 0 1 2 0 1 2 0 2 0 0 0 0 2 0 0 1 0 1 1 4 3 3 2 3 2 4 0 3 0 0 1 1 2 0 1 0 1 0 1 0 1 6 13 1 0 0 0 3 1 0 0 1 0 0 5 1 0 0 3 6 0 7 0 1 3 1 2 14 1 0 0 0 0 1 1 1 9 2 1 0 0 3 0 0 1 0 8 3 0 15 0 0 19 0 0 5 6 1 8 1 3 1 0 1 1 4 3 0 0 1 2 0 0 1 1 0 2 2 2 3 0 0 4 1 0 2 0 0 0 1 0 0 0 1 0 0 2 2 0 0 0 0 3 1 3 2 0 0 2 0 4 1 1 1 2 0 4 1 1 1 1 2 1 0 2 1 2 0 2 0 1 0 2 9 1 1 0 1 0 1 1 16 0 0 2 2 0 3 0 0 0 0 15 1 4 7 3 0 0 1 1 2 3 1 4 3 1 2 0 0 1 4 0 0 1 4 1 2 0 3 2 1 3 3 0 1 1 1 0 0 0 0 0 0 2 0 2 3 1 2 0 2 0 2 4 2 1 0 6 0 1 0 1 2 1 1 2 22 2 1 3 2 1 1 2 1 19 0 0 4 0 1 0 2 0 0 0 0 1 0 4 0 1 2 0 1 1 2 8 0 0 0 0 2 0 0 13 1 1 0 1 0 4 1 1 1 1 1 0 4 0 0 0 1 0 0 0 1 0 0 0 0 5 0 0 5 0 0 1 0 1 8 1 2 21 3 1 0 1 1 0 1 0 0 1 2 4 2 1 1 0 0 1 2 1 0 0 0 2 1 1 0 0 7 1 2 4 1 1 0 1 0 2 0 0 0 0 0 0 2 4 0 1 0 0 0 1 1 0 0 2 1 0 3 0 2 0 1 1 1 13 1 1 0 0 0 5 0 0 1 0 4 3 1 0 2 1 6 0 0 0 0 1 5 9 0 0 1 12 3 1 3 0 0 0 0 0 1 0 0 1 1 1 1 4 0 0 2 1 12 0 1 4 4 0 2 1 3 1 1 0 1 3 2 1 2 0 0 0 0 0 0 4 0 0 2 0 0 0 0 2 1 1 7 0 1 0 3 0 0 1 1 2 0 1 1 1 0 0 6 0 2 0 2 2 1 3 5 0 4 1 0 0 3 8 7 0 3 1 0 6 0 0 0 1 1 0 6 0 0 0 1 1 1 1 1 0 2 3 0 4 1 0 2 0 1 0 1 0 2 1 0 12 2 2 1 1 0 0 7 0 0 2 2 0 0 3 0 1 6 0 diff --git a/test/unit/math/laplace/disease_map_test.cpp b/test/unit/math/laplace/disease_map_test.cpp new file mode 100755 index 00000000000..259c1f4917c --- /dev/null +++ b/test/unit/math/laplace/disease_map_test.cpp @@ -0,0 +1,122 @@ +#include +#include +// #include +// #include + +#include +#include + +#include +#include + +#include +#include +#include +#include +#include + + +TEST(laplace, disease_map_dim_911) { + // Based on (Vanhatalo, Pietilainen and Vethari, 2010). See + // https://research.cs.aalto.fi/pml/software/gpstuff/demo_spatial1.shtml + using stan::math::var; + using stan::math::laplace_marginal_poisson; + using stan::math::sqr_exp_kernel_functor; + + int dim_theta = 911; + int n_observations = 911; + std::string data_directory = "test/unit/math/laplace/aki_disease_data/"; + std::vector x1(dim_theta), x2(dim_theta); + std::vector y(n_observations); + Eigen::VectorXd ye(n_observations); + read_in_data(dim_theta, n_observations, data_directory, x1, x2, y, ye); + + // look at some of the data + std::cout << "x_1: " << x1[0] << " " << x2[0] << std::endl + << "x_2: " << x1[1] << " " << x2[1] << std::endl + << "y_1: " << y[0] << " y_2: " << y[1] << std::endl + << "ye_1: " << ye[0] << " ye_2: " << ye[1] << std::endl; + + int dim_x = 2; + std::vector x(dim_theta); + for (int i = 0; i < dim_theta; i++) { + Eigen::VectorXd coordinate(dim_x); + coordinate << x1[i], x2[i]; + x[i] = coordinate; + } + + // one observation per group + std::vector n_samples(dim_theta); + for (int i = 0; i < dim_theta; i++) n_samples[i] = 1; + + std::vector delta; + std::vector delta_int; + + Eigen::VectorXd theta_0 = Eigen::VectorXd::Zero(dim_theta); + int dim_phi = 2; + Eigen::Matrix phi(dim_phi); + phi << 0.3162278, 200; // variance, length scale + + auto start = std::chrono::system_clock::now(); + + var marginal_density + = laplace_marginal_poisson(y, n_samples, ye, sqr_exp_kernel_functor(), + phi, x, delta, delta_int, theta_0); + + auto end = std::chrono::system_clock::now(); + std::chrono::duration elapsed_time = end - start; + + VEC g; + AVEC parm_vec = createAVEC(phi(0), phi(1)); + marginal_density.grad(parm_vec, g); + + std::cout << "LAPLACE MARGINAL AND VARI CLASS" << std::endl + << "density: " << value_of(marginal_density) << std::endl + << "autodiff grad: " << g[0] << " " << g[1] << std::endl + << "total time: " << elapsed_time.count() << std::endl + << std::endl; + + // Expected result + // density: -2866.88 + // autodiff grad: 266.501 -0.425901 + // total time: 0.627501 + + //////////////////////////////////////////////////////////////////////// + // Let's now generate a sample theta from the estimated posterior + using stan::math::diff_poisson_log; + using stan::math::to_vector; + using stan::math::sqr_exp_kernel_functor; + + diff_poisson_log diff_likelihood(to_vector(n_samples), + to_vector(y), + stan::math::log(ye)); + boost::random::mt19937 rng; + start = std::chrono::system_clock::now(); + Eigen::VectorXd + theta_pred = laplace_approx_rng(diff_likelihood, + sqr_exp_kernel_functor(), + phi, x, delta, delta_int, + theta_0, rng); + + end = std::chrono::system_clock::now(); + elapsed_time = end - start; + + std::cout << "LAPLACE_APPROX_RNG" << std::endl + << "total time: " << elapsed_time.count() << std::endl + << std::endl; + + // Expected result + // total time: 0.404114 + + start = std::chrono::system_clock::now(); + theta_pred = laplace_approx_poisson_rng(y, n_samples, ye, + sqr_exp_kernel_functor(), + phi, x, delta, delta_int, + theta_0, rng); + end = std::chrono::system_clock::now(); + elapsed_time = end - start; + + std::cout << "LAPLACE_APPROX_POISSON_RNG" << std::endl + << "total time: " << elapsed_time.count() << std::endl + << std::endl; +} diff --git a/test/unit/math/laplace/laplace_approx_poisson_rng_test.cpp b/test/unit/math/laplace/laplace_approx_poisson_rng_test.cpp new file mode 100644 index 00000000000..91b556451cc --- /dev/null +++ b/test/unit/math/laplace/laplace_approx_poisson_rng_test.cpp @@ -0,0 +1,137 @@ +#include +#include + +#include +#include + +#include +#include +#include +#include +#include + +struct stationary_point { + template + inline Eigen::Matrix::type, + Eigen::Dynamic, 1> + operator() (const Eigen::Matrix& theta, + const Eigen::Matrix& parms, + const std::vector& dat, + const std::vector& dat_int, + std::ostream* pstream__ = 0) const { + Eigen::Matrix::type, + Eigen::Dynamic, 1> z(2); + z(0) = 1 - exp(theta(0)) - theta(0) / (parms(0) * parms(0)); + z(1) = - exp(theta(1)) - theta(1) / (parms(1) * parms(1)); + return z; + } +}; + +struct diagonal_kernel_functor { + template + Eigen::Matrix + operator() (const Eigen::Matrix& phi, + const T2& x, + const std::vector& delta, + const std::vector& delta_int, + std::ostream* msgs = nullptr) const { + Eigen::Matrix K(2, 2); + K(0, 0) = phi(0) * phi(0); + K(1, 1) = phi(1) * phi(1); + K(0, 1) = 0; + K(1, 0) = 0; + return K; + } +}; + +TEST(laplace, basic_rng) { + using stan::math::algebra_solver; + using stan::math::diff_poisson_log; + using stan::math::to_vector; + using stan::math::diag_matrix; + using stan::math::laplace_approx_rng; + using stan::math::value_of; + using stan::math::mdivide_left_tri; + using stan::math::diag_pre_multiply; + using stan::math::inv; + using stan::math::square; + + + Eigen::VectorXd theta_0(2); + theta_0 << 1, 1; + Eigen::VectorXd sigma(2); + sigma << 3, 2; + std::vector n_samples = {1, 1}; + std::vector sums = {1, 0}; + + diff_poisson_log diff_likelihood(to_vector(n_samples), + to_vector(sums)); + std::vector d0; + std::vector di0; + + + // Method 1: brute force and straightforward + Eigen::VectorXd theta_root + = algebra_solver(stationary_point(), + theta_0, sigma, d0, di0); + + Eigen::VectorXd gradient, W; + diff_likelihood.diff(theta_root, gradient, W); + W = -W; + diagonal_kernel_functor covariance_function; + std::vector x_dummy; + Eigen::MatrixXd K = covariance_function(sigma, x_dummy, d0, di0, 0); + + std::cout << "K (brute force): " + << std::endl + << (K.inverse() + diag_matrix(W)).inverse() + << std::endl << std::endl; + + // Method 2: Vectorized R&W method + double tolerance = 1e-6; + int max_num_steps = 100; + + // First find the mode using the custom Newton step + Eigen::MatrixXd covariance; + Eigen::VectorXd theta; + Eigen::VectorXd W_root; + Eigen::MatrixXd L; + { + Eigen::VectorXd a; + Eigen::VectorXd l_grad; + double marginal_density + = laplace_marginal_density(diff_likelihood, + covariance_function, + sigma, x_dummy, d0, di0, + covariance, theta, W_root, L, a, l_grad, + value_of(theta_0), 0, + tolerance, max_num_steps); + } + + Eigen::MatrixXd V; + V = mdivide_left_tri(L, + diag_pre_multiply(W_root, covariance)); + std::cout << "K (method 1): " << std::endl + << covariance - V.transpose() * V << std::endl + << std::endl; + + // Method 3: Modified R&W method + Eigen::VectorXd W_root_inv = inv(W_root); + Eigen::MatrixXd V_dec = mdivide_left_tri(L, + diag_matrix(W_root_inv)); + std::cout << "K (method 2): " << std::endl + << - V_dec.transpose() * V_dec + diag_matrix(square(W_root_inv)) + << std::endl << std::endl; + + + // Call to rng function + boost::random::mt19937 rng; + Eigen::MatrixXd theta_pred + = laplace_approx_rng(diff_likelihood, covariance_function, + sigma, x_dummy, d0, di0, theta_0, + rng); + + // = laplace_approx_rng(theta_0, sigma, x_dummy, + // diff_likelihood, covariance_function, + // rng); +} diff --git a/test/unit/math/laplace/laplace_marginal_bernoulli_test.cpp b/test/unit/math/laplace/laplace_marginal_bernoulli_test.cpp new file mode 100755 index 00000000000..952ff3c05ac --- /dev/null +++ b/test/unit/math/laplace/laplace_marginal_bernoulli_test.cpp @@ -0,0 +1,186 @@ +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include + +TEST(laplace, likelihood_differentiation) { + using stan::math::diff_logistic_log; + using stan::math::var; + + double test_tolerance = 2e-4; + + Eigen::VectorXd theta(2); + theta << -2.45809, -3.6127; + Eigen::VectorXd y(2), n_samples(2); + y << 1, 0; + n_samples << 1, 1; + Eigen::Matrix theta_v = theta; + + diff_logistic_log diff_functor(n_samples, y); + double log_density = diff_functor.log_likelihood(theta); + Eigen::VectorXd gradient, hessian; + diff_functor.diff(theta, gradient, hessian); + Eigen::VectorXd third_tensor = diff_functor.third_diff(theta); + + EXPECT_NEAR(-2.566843, log_density, test_tolerance); + + // finite diff calculations for first-order derivatives + double diff = 1e-12; + Eigen::VectorXd theta_1u = theta; + Eigen::VectorXd theta_1l = theta; + Eigen::VectorXd theta_2u = theta; + Eigen::VectorXd theta_2l = theta; + theta_1u(0) = theta(0) + diff; + theta_1l(0) = theta(0) - diff; + theta_2u(1) = theta(1) + diff; + theta_2l(1) = theta(1) - diff; + double diff_1 = (diff_functor.log_likelihood(theta_1u) + - diff_functor.log_likelihood(theta_1l)) / (2 * diff); + double diff_2 = (diff_functor.log_likelihood(theta_2u) + - diff_functor.log_likelihood(theta_2l)) / (2 * diff); + + EXPECT_NEAR(diff_1, gradient(0), test_tolerance); + EXPECT_NEAR(diff_2, gradient(1), test_tolerance); + + // finite diff calculation for second-order derivatives + Eigen::VectorXd gradient_1u, gradient_1l, hessian_1u, hessian_1l, + gradient_2u, gradient_2l, hessian_2u, hessian_2l; + diff_functor.diff(theta_1u, gradient_1u, hessian_1u); + diff_functor.diff(theta_1l, gradient_1l, hessian_1l); + diff_functor.diff(theta_2u, gradient_2u, hessian_2u); + diff_functor.diff(theta_2l, gradient_2l, hessian_2l); + + double diff_grad_1 = (gradient_1u(0) - gradient_1l(0)) / (2 * diff); + double diff_grad_2 = (gradient_2u(1) - gradient_2l(1)) / (2 * diff); + + EXPECT_NEAR(diff_grad_1, hessian(0), test_tolerance); + EXPECT_NEAR(diff_grad_2, hessian(1), test_tolerance); + + // finite diff calculation for third-order derivatives + double diff_hess_1 = (hessian_1u(0) - hessian_1l(0)) / (2 * diff); + double diff_hess_2 = (hessian_2u(1) - hessian_2l(1)) / (2 * diff); + + EXPECT_NEAR(diff_hess_1, third_tensor(0), test_tolerance); + EXPECT_NEAR(diff_hess_2, third_tensor(1), test_tolerance); +} + +TEST(laplace, logistic_lgm_dim500) { + using stan::math::var; + using stan::math::to_vector; + using stan::math::diff_logistic_log; + using stan::math::sqr_exp_kernel_functor; + + int dim_theta = 500; + int n_observations = 500; + std::string data_directory = "test/unit/math/laplace/aki_synth_data/"; + std::vector x1(dim_theta), x2(dim_theta); + std::vector y(n_observations); + read_in_data(dim_theta, n_observations, data_directory, x1, x2, y); + + // Look a some of the data. + // std::cout << "x_1: " << x1[0] << " " << x2[0] << std::endl + // << "x_2: " << x1[1] << " " << x2[1] << std::endl + // << "y_1: " << y[0] << " y_2: " << y[1] << std::endl; + + int dim_x = 2; + std::vector x(dim_theta); + for (int i = 0; i < dim_theta; i++) { + Eigen::VectorXd coordinate(dim_x); + coordinate << x1[i], x2[i]; + x[i] = coordinate; + } + std::vector n_samples = stan::math::rep_array(1, dim_theta); + + Eigen::VectorXd theta_0 = Eigen::VectorXd::Zero(dim_theta); + + Eigen::VectorXd theta_laplace, W_root, a, l_grad; + Eigen::MatrixXd L, covariance; + std::vector delta; + std::vector delta_int; + + // CASE 1: phi is passed as a double. + Eigen::VectorXd phi(2); + phi << 1.6, 1; // standard deviation, length scale + + auto start_optimization = std::chrono::system_clock::now(); + + double marginal_density + = laplace_marginal_density( + diff_logistic_log(to_vector(n_samples), to_vector(y)), + sqr_exp_kernel_functor(), + phi, x, delta, delta_int, + covariance, theta_laplace, W_root, L, a, l_grad, + theta_0, 0, 1e-3, 100); + + auto end_optimization = std::chrono::system_clock::now(); + std::chrono::duration + elapsed_time_optimization = end_optimization - start_optimization; + + std::cout << "LAPLACE MARGINAL FOR DOUBLE: " << std::endl + << "density: " << marginal_density << std::endl + << "time: " << elapsed_time_optimization.count() + << std::endl << std::endl; + + // Expected output + // density: -195.368 + // time: 0.059645 + + // CASE 2: phi is passed as a var + Eigen::Matrix phi_v2 = phi; + + start_optimization = std::chrono::system_clock::now(); + var marginal_density_v + = laplace_marginal_density( + diff_logistic_log(to_vector(n_samples), to_vector(y)), + sqr_exp_kernel_functor(), + phi_v2, x, delta, delta_int, + theta_0, 0, 1e-3, 100); + + VEC g2; + AVEC parm_vec2 = createAVEC(phi_v2(0), phi_v2(1)); + marginal_density_v.grad(parm_vec2, g2); + + end_optimization = std::chrono::system_clock::now(); + elapsed_time_optimization = end_optimization - start_optimization; + + std::cout << "LAPLACE MARGINAL AND VARI CLASS" << std::endl + << "density: " << value_of(marginal_density_v) << std::endl + << "autodiff grad: " << g2[0] << " " << g2[1] + << std::endl + << "total time: " << elapsed_time_optimization.count() + << std::endl << std::endl; + + // EXPECTED + // density: -195.368 + // autodiff grad: 21.9495 -32.5123 + // total time: 0.147897 + + // TO DO -- get total time from GPStuff and do more comparisons. + + // CASE 3: use wrapper function and compare result. + using stan::math::laplace_marginal_bernoulli; + using stan::math::value_of; + + double marginal_density_v2 + = laplace_marginal_bernoulli(y, n_samples, + phi, x, delta, delta_int, + theta_0, 0, 1e-3, 100); + + EXPECT_FLOAT_EQ(marginal_density, marginal_density_v2); + + marginal_density_v2 + = laplace_marginal_bernoulli(y, n_samples, + sqr_exp_kernel_functor(), + phi, x, delta, delta_int, + theta_0, 0, 1e-3, 100); + + EXPECT_FLOAT_EQ(marginal_density, marginal_density_v2); +} diff --git a/test/unit/math/laplace/laplace_marginal_poisson_test.cpp b/test/unit/math/laplace/laplace_marginal_poisson_test.cpp new file mode 100644 index 00000000000..21d7c6094bf --- /dev/null +++ b/test/unit/math/laplace/laplace_marginal_poisson_test.cpp @@ -0,0 +1,140 @@ +#include +#include + +#include +#include + +#include +#include +#include +#include +#include + + +TEST(laplace, likelihood_differentiation) { + using stan::math::diff_poisson_log; + using stan::math::to_vector; + + Eigen::VectorXd theta(2); + theta << 1, 1; + std::vector n_samples = {1, 1}; + std::vector sums = {1, 0}; + + diff_poisson_log diff_functor(to_vector(n_samples), + to_vector(sums)); + double log_density = diff_functor.log_likelihood(theta); + Eigen::VectorXd gradient, hessian; + diff_functor.diff(theta, gradient, hessian); + Eigen::VectorXd third_tensor = diff_functor.third_diff(theta); + + EXPECT_FLOAT_EQ(-4.436564, log_density); + EXPECT_FLOAT_EQ(-1.718282, gradient(0)); + EXPECT_FLOAT_EQ(-2.718282, gradient(1)); + EXPECT_FLOAT_EQ(-2.718282, hessian(0)); + EXPECT_FLOAT_EQ(-2.718282, hessian(1)); + EXPECT_FLOAT_EQ(-2.718282, third_tensor(0)); + EXPECT_FLOAT_EQ(-2.718282, third_tensor(1)); +} + +TEST(laplace, likelihood_differentiation2) { + // Test exposure argument + using stan::math::diff_poisson_log; + using stan::math::to_vector; + + Eigen::VectorXd theta(2); + theta << 1, 1; + std::vector n_samples = {1, 1}; + std::vector sums = {1, 0}; + std::vector log_exposure = {log(0.5), log(2)}; + + diff_poisson_log diff_functor(to_vector(n_samples), + to_vector(sums), + to_vector(log_exposure)); + + double log_density = diff_functor.log_likelihood(theta); + Eigen::VectorXd gradient, hessian; + diff_functor.diff(theta, gradient, hessian); + Eigen::VectorXd third_tensor = diff_functor.third_diff(theta); + + EXPECT_FLOAT_EQ(-6.488852, log_density); + EXPECT_FLOAT_EQ(-0.3591409, gradient(0)); + EXPECT_FLOAT_EQ(-5.4365637, gradient(1)); + EXPECT_FLOAT_EQ(-1.359141, hessian(0)); + EXPECT_FLOAT_EQ(-5.436564, hessian(1)); + EXPECT_FLOAT_EQ(-1.359141, third_tensor(0)); + EXPECT_FLOAT_EQ(-5.436564, third_tensor(1)); + +} + +TEST(laplace, poisson_lgm_dim2) { + using stan::math::laplace_marginal_poisson; + using stan::math::var; + using stan::math::to_vector; + using stan::math::value_of; + + int dim_phi = 2; + Eigen::Matrix phi(dim_phi); + phi << 1.6, 0.45; + + int dim_theta = 2; + Eigen::VectorXd theta_0(dim_theta); + theta_0 << 0, 0; + + int dim_x = 2; + std::vector x(dim_theta); + Eigen::VectorXd x_0(2); + x_0 << 0.05100797, 0.16086164; + Eigen::VectorXd x_1(2); + x_1 << -0.59823393, 0.98701425; + x[0] = x_0; + x[1] = x_1; + + std::vector delta; + std::vector delta_int; + + std::vector n_samples = {1, 1}; + std::vector sums = {1, 0}; + + squared_kernel_functor K; + var target = laplace_marginal_poisson(sums, n_samples, K, phi, x, delta, + delta_int, theta_0); + + // Test with exposure argument + // Eigen::VectorXd exposure(2); + // exposure << 1, 1; + // var target = laplace_marginal_poisson(theta_0, phi, x, n_samples, sums, + // exposure); + + // How to test this? The best way would be to generate a few + // benchmarks using gpstuff. + VEC g; + AVEC parm_vec = createAVEC(phi(0), phi(1)); + target.grad(parm_vec, g); +/* + // finite diff test + double diff = 1e-7; + Eigen::VectorXd phi_dbl = value_of(phi); + Eigen::VectorXd phi_1l = phi_dbl, phi_1u = phi_dbl, + phi_2l = phi_dbl, phi_2u = phi_dbl; + phi_1l(0) -= diff; + phi_1u(0) += diff; + phi_2l(1) -= diff; + phi_2u(1) += diff; + + double target_1u = laplace_marginal_poisson(sums, n_samples, phi_1u, x, + delta, delta_int, theta_0), + target_1l = laplace_marginal_poisson(sums, n_samples, phi_1l, x, + delta, delta_int, theta_0), + target_2u = laplace_marginal_poisson(sums, n_samples, phi_2u, x, + delta, delta_int, theta_0), + target_2l = laplace_marginal_poisson(sums, n_samples, phi_2l, x, + delta, delta_int, theta_0); + + VEC g_finite(dim_phi); + g_finite[0] = (target_1u - target_1l) / (2 * diff); + g_finite[1] = (target_2u - target_2l) / (2 * diff); + + double tol = 1.1e-4; + EXPECT_NEAR(g_finite[0], g[0], tol); + EXPECT_NEAR(g_finite[1], g[1], tol); */ +} diff --git a/test/unit/math/laplace/laplace_skim_test.cpp b/test/unit/math/laplace/laplace_skim_test.cpp new file mode 100755 index 00000000000..654ea88ba27 --- /dev/null +++ b/test/unit/math/laplace/laplace_skim_test.cpp @@ -0,0 +1,237 @@ +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include + + +struct K_functor { + template + Eigen::Matrix + operator()(const Eigen::Matrix& parm, + const std::vector& x_tot, + const std::vector& delta, + const std::vector& delta_int, + std::ostream* pstream) const { + using stan::math::add; + using stan::math::multiply; + using stan::math::diag_post_multiply; + using stan::math::square; + using stan::math::transpose; + + int N = delta_int[0]; + int M = delta_int[1]; + + Eigen::Matrix lambda_tilde(M); + for (int m = 0; m < M; m++) lambda_tilde[m] = parm[m]; + + T eta = parm[M]; + T alpha = parm[M + 1]; + T phi = parm[M + 2]; + T sigma = parm[M + 3]; + double psi = delta[0]; + + // Note -- when the object is declared as a scalar matrix, + // the differentiation slows down. + // Eigen::Matrix X(N, M); + // Eigen::Matrix X2(N, M); + Eigen::MatrixXd X(N, M); + Eigen::MatrixXd X2(N, M); + + // CHECK -- does Stan really do a for loop here? + // CHECK -- does Stan construct X and X2 as matrices of scalars + // and does this have an effect? + // TO DO -- test when this is constructed using the block method. + for (int n = 0; n < N; n++) + for (int m = 0; m < M; m++) { + X(n, m) = x_tot[n](m); + X2(n, m) = x_tot[N + n](m); + } + + Eigen::Matrix + K1 = multiply(diag_post_multiply(X, lambda_tilde), transpose(X)); + Eigen::Matrix + K2 = multiply(diag_post_multiply(X2, lambda_tilde), transpose(X2)); + + Eigen::Matrix K; + K = square(eta) * square(add(K1, 1)) + + (square(alpha) - 0.5 * square(eta)) * K2 + + (square(phi) - square(eta)) * K1; + K = add(0.5 + square(psi) - 0.5 * square(eta), K); + + // Add jitter to make linear algebra more numerically stable + for (int n = 0; n < N; n++) K(n, n) += square(sigma) + 1e-7; + return K; + } +}; + +// Overload structure for case where x is passed as a matrix. +struct K_functor2 { + template + Eigen::Matrix + operator()(const Eigen::Matrix& parm, + const Eigen::MatrixXd& x_tot, + const std::vector& delta, + const std::vector& delta_int, + std::ostream* pstream) const { + using stan::math::add; + using stan::math::multiply; + using stan::math::diag_post_multiply; + using stan::math::square; + using stan::math::transpose; + + int N = delta_int[0]; + int M = delta_int[1]; + + Eigen::Matrix lambda_tilde(M); + for (int m = 0; m < M; m++) lambda_tilde[m] = parm[m]; + + T eta = parm[M]; + T alpha = parm[M + 1]; + T phi = parm[M + 2]; + T sigma = parm[M + 3]; + double psi = delta[0]; + + Eigen::MatrixXd X = x_tot.block(0, 0, N, M); + Eigen::MatrixXd X2 = x_tot.block(N, 0, N, M); + + Eigen::Matrix + K1 = multiply(diag_post_multiply(X, lambda_tilde), transpose(X)); + Eigen::Matrix + K2 = multiply(diag_post_multiply(X2, lambda_tilde), transpose(X2)); + + Eigen::Matrix K; + K = square(eta) * square(add(K1, 1)) + + (square(alpha) - 0.5 * square(eta)) * K2 + + (square(phi) - square(eta)) * K1; + K = add(0.5 + square(psi) - 0.5 * square(eta), K); + + // Add jitter to make linear algebra more numerically stable + for (int n = 0; n < N; n++) K(n, n) += square(sigma) + 1e-7; + return K; + } +}; + + +TEST(laplace, skm) { + using stan::math::diff_logistic_log; + using stan::math::var; + using stan::math::square; + using stan::math::elt_divide; + using stan::math::add; + using Eigen::MatrixXd; + using Eigen::VectorXd; + + typedef Eigen::Matrix Vector_v; + typedef Eigen::Matrix Matrix_v; + + // DATA AND TRANSFORMED DATA BLOCK + int N = 100; + int M = 200; // options: 2, 50, 100, 150, 200 + + std::string data_directory = "test/unit/math/laplace/skim_data/" + + std::to_string(M) + "_" + std::to_string(N) + "/"; + MatrixXd X(N, M); + std::vector y(N); + VectorXd lambda(M); + + read_in_data(M, N, data_directory, X, y, lambda); + + // std::cout << X << std::endl; + // std::cout << lambda.transpose() << std::endl; + // for (int i = 0; i < N; i++) std::cout << y[i] << " "; + // std::cout << std::endl; + + double alpha_base = 0, psi = 1, m0 = 1, // options: m0 = 2 + slab_scale = 3, + slab_scale2 = slab_scale * slab_scale, + slab_df = 25, + half_slab_df = 0.5 * slab_df; + + VectorXd mu = VectorXd::Zero(N); + std::vector delta(1); + delta[0] = psi; + std::vector delta_int(2); + delta_int[0] = N; + delta_int[1] = M; + + std::vector n_samples(N, 1); + VectorXd theta_0 = VectorXd::Zero(N); + + MatrixXd X2 = square(X); + + std::vector x_tot(2 * N); + for (int n = 0; n < N; n++) x_tot[n] = X.block(n, 0, 1, M).transpose(); + for (int n = 0; n < N; n++) x_tot[N + n] = X2.block(n, 0, 1, M).transpose(); + + Eigen::MatrixXd x_tot_m(2 * N, M); + x_tot_m.block(0, 0, N, M) = X; + x_tot_m.block(N, 0, N, M) = X2; + + // PARAMETERS BLOCK + // lambda term is defined above + var c2_tilde = 1.112843, + tau_tilde = 7.615908, + sigma = 1.708423, + eta_base = 0.9910583; + + // TRANSFORMED PARAMETERS BLOCK + var phi = (m0 / (M - m0)) * (sigma / sqrt(N)) * tau_tilde, + c2 = slab_scale2 * c2_tilde, + eta = square(phi) / c2 * eta_base, + alpha = square(phi) / c2 * alpha_base; + + Vector_v lambda_tilde = + c2 * elt_divide(square(lambda), + add(c2, multiply(square(phi), square(lambda)))); + + Vector_v parm(M + 4); + parm.head(M) = lambda_tilde; + parm(M) = eta; + parm(M + 1) = alpha; + parm(M + 2) = phi; + parm(M + 3) = sigma; + + // K_functor K; + // for (int i = 0; i < parm.size(); i++) std::cout << parm(i) << " "; + // std::cout << std::endl; + // std::cout << "x_tot" << std::endl; + // for (size_t i = 0; i < x_tot.size(); i++) std::cout << x_tot[i].transpose() << std::endl; + // std::cout << std::endl << std::endl; + // for (size_t i = 0; i < delta.size(); i++) std::cout << delta[i] << std::endl; + // for (size_t i = 0; i < delta_int.size(); i++) std::cout << delta_int[i] << std::endl; + + // std::cout << K(parm, x_tot, delta, delta_int, 0) << std::endl; + + auto start = std::chrono::system_clock::now(); + + // var marginal_density = laplace_marginal_bernoulli(y, n_samples, K_functor(), + // parm, x_tot, delta, delta_int, theta_0); + + var marginal_density = laplace_marginal_bernoulli(y, n_samples, K_functor2(), + parm, x_tot_m, delta, delta_int, theta_0); + + auto end = std::chrono::system_clock::now(); + std::chrono::duration elapsed_time = end - start; + + VEC g; + AVEC parm_vec(M); + for (int m = 0; m < M; m++) parm_vec[m] = parm(m); + marginal_density.grad(parm_vec, g); + + std::cout << "LAPLACE MARGINAL AND VARI CLASS" << std::endl + << "M: " << M << std::endl + << "density: " << marginal_density << std::endl + << "autodiff grad: "; + // for (size_t i = 0; i < parm.size(); i++) std::cout << g[i] << " "; + std::cout << std::endl + << "total time: " << elapsed_time.count() << std::endl + << std::endl; +} diff --git a/test/unit/math/laplace/laplace_utility.hpp b/test/unit/math/laplace/laplace_utility.hpp new file mode 100644 index 00000000000..90c3539a5f9 --- /dev/null +++ b/test/unit/math/laplace/laplace_utility.hpp @@ -0,0 +1,283 @@ +#include +#include +#include + +/* Functions and functors used in several lgp tests. */ + +///////////////////////////////////////////////////////////////////// +// Covariance functions + +// Function to construct spatial covariance matrix. +template +Eigen::Matrix +covariance (Eigen::Matrix phi, int M, + bool space_matters = false) { + using std::pow; + T sigma = phi[0]; + T rho = phi[1]; + double exponent; + + Eigen::Matrix Sigma(M, M); + + for (int i = 0; i < M; i++) { + for (int j = 0; j < i; j++) { + if (space_matters) {exponent = i - j;} else {exponent = 1;} + Sigma(i, j) = pow(rho, exponent) * sigma; + Sigma(j, i) = Sigma(i, j); + } + Sigma(i, i) = sigma; + } + + return Sigma; +} + +struct spatial_covariance { + template + Eigen::Matrix::type, + Eigen::Dynamic, Eigen::Dynamic> + operator() (const Eigen::Matrix& phi, + const std::vector>& x, + int M = 0) const { + typedef typename stan::return_type::type scalar; + int space_matters = true; + using std::pow; + scalar sigma = phi[0]; + scalar rho = phi[1]; + double exponent; + + Eigen::Matrix Sigma(M, M); + + for (int i = 0; i < M; i++) { + for (int j = 0; j < i; j++) { + if (space_matters) {exponent = i - j;} else {exponent = 1;} + Sigma(i, j) = pow(rho, exponent) * sigma; + Sigma(j, i) = Sigma(i, j); + } + Sigma(i, i) = sigma; + } + return Sigma; + } +}; + +struct squared_kernel_functor { + template + Eigen::Matrix + operator() (const Eigen::Matrix& phi, + const T2& x, + const std::vector& delta, + const std::vector& delta_int, + std::ostream* msgs = nullptr) const { + return stan::math::gp_exp_quad_cov(x, phi(0), phi(1)) + + 1e-9 * Eigen::MatrixXd::Identity(x.size(), x.size()); + } +}; + +// Naive implementation of the functor (a smarter implementation +// precomputes the covariance matrix). +struct inla_functor { + template + inline Eigen::Matrix::type, + Eigen::Dynamic, 1> + operator() (const Eigen::Matrix& theta, + const Eigen::Matrix& parm, + const std::vector& dat, + const std::vector& dat_int, + std::ostream* pstream__ = 0) const { + using stan::math::to_vector; + using stan::math::head; + using stan::math::tail; + + int n_groups = theta.size(); + Eigen::VectorXd n_samples = to_vector(head(dat, n_groups)); + Eigen::VectorXd sums = to_vector(tail(dat, dat.size() - n_groups)); + Eigen::Matrix + Sigma = covariance(parm, n_groups, 1); + + return sums - stan::math::elt_multiply(n_samples, stan::math::exp(theta)) - + stan::math::mdivide_left(Sigma, theta); + } +}; + +// simple case where the covariance matrix is diagonal. +struct lgp_functor { + template + inline Eigen::Matrix::type, + Eigen::Dynamic, 1> + operator ()(const Eigen::Matrix& theta, + const Eigen::Matrix& phi, + const std::vector& dat, + const std::vector& dat_int, + std::ostream* pstream__) const { + typedef typename stan::return_type::type scalar; + Eigen::Matrix fgrad; + int dim_theta = 2; + + Eigen::VectorXd n_samples(dim_theta); + n_samples(0) = dat[0]; + n_samples(1) = dat[1]; + + Eigen::VectorXd sums(dim_theta); + sums(0) = dat[2]; + sums(1) = dat[3]; + + return sums - stan::math::elt_multiply(n_samples, + stan::math::exp(theta)) + - theta / phi(0); + } +}; + +// Function to read in data for computer experiment. +// Note y and index are only required to compute the likelihood, +// although it is more efficient to do this using sufficient +// statistics. +void read_in_data (int dim_theta, + int n_observations, + std::string data_directory, + std::vector& y, + std::vector& index, + std::vector& sums, + std::vector& n_samples, + bool get_raw_data = false) { + std::ifstream input_data; + std::string dim_theta_string = std::to_string(dim_theta); + std::string file_y = data_directory + "y_" + dim_theta_string + ".csv"; + std::string file_index = data_directory + "index_" + + dim_theta_string + ".csv"; + std::string file_m = data_directory + "m_" + dim_theta_string + ".csv"; + std::string file_sums = data_directory + "sums_" + + dim_theta_string + ".csv"; + + input_data.open(file_m); + double buffer = 0.0; + for (int n = 0; n < dim_theta; ++n) { + input_data >> buffer; + n_samples[n] = buffer; + } + input_data.close(); + + input_data.open(file_sums); + buffer = 0.0; + for (int n = 0; n < dim_theta; ++n) { + input_data >> buffer; + sums[n] = buffer; + } + input_data.close(); + + if (get_raw_data) { + input_data.open(file_y); + buffer = 0.0; + for (int n = 0; n < n_observations; ++n) { + input_data >> buffer; + y[n] = buffer; + } + input_data.close(); + + input_data.open(file_index); + buffer = 0.0; + for (int n = 0; n < n_observations; ++n) { + input_data >> buffer; + index[n] = buffer; + } + input_data.close(); + } +} + +// Overload function to read data from Aki's experiment +// using a logistic and latent Gaussian process. +void read_in_data (int dim_theta, + int n_observations, + std::string data_directory, + std::vector& x1, + std::vector& x2, + std::vector& y) { + std::ifstream input_data; + std::string file_x1 = data_directory + "x1.csv"; + std::string file_x2 = data_directory + "x2.csv"; + std::string file_y = data_directory + "y.csv"; + + input_data.open(file_x1); + double buffer = 0.0; + for (int n = 0; n < dim_theta; ++n) { + input_data >> buffer; + x1[n] = buffer; + } + input_data.close(); + + input_data.open(file_x2); + buffer = 0.0; + for (int n = 0; n < dim_theta; ++n) { + input_data >> buffer; + x2[n] = buffer; + } + input_data.close(); + + input_data.open(file_y); + buffer = 0.0; + for (int n = 0; n < dim_theta; ++n) { + input_data >> buffer; + y[n] = buffer; + } + input_data.close(); +} + +// Overload function to read in disease mapping data. +// Same as above, but in addition include an exposure term. +void read_in_data(int dim_theta, + int dim_observations, + std::string data_directory, + std::vector& x1, + std::vector& x2, + std::vector& y, + Eigen::VectorXd& ye) { + read_in_data(dim_theta, dim_observations, data_directory, x1, x2, y); + + std::ifstream input_data; + std::string file_ye = data_directory + "ye.csv"; + + input_data.open(file_ye); + double buffer = 0.0; + for (int n = 0; n < dim_theta; ++n) { + input_data >> buffer; + ye(n) = buffer; + } + input_data.close(); +} + +// Overload function to read in skim data. +// The covariates have a different structure. +void read_in_data(int dim_theta, + int dim_observations, + std::string data_directory, + Eigen::MatrixXd& X, + std::vector& y, + Eigen::VectorXd& lambda) { + std::ifstream input_data; + std::string file_y = data_directory + "y.csv"; + std::string file_X = data_directory + "X.csv"; + std::string file_lambda = data_directory + "lambda.csv"; + + input_data.open(file_X); + double buffer = 0.0; + for (int m = 0; m < dim_theta; ++m) + for (int n = 0; n < dim_observations; ++n) { + input_data >> buffer; + X(n, m) = buffer; + } + input_data.close(); + + input_data.open(file_y); + buffer = 0.0; + for (int n = 0; n < dim_observations; ++n) { + input_data >> buffer; + y[n] = buffer; + } + input_data.close(); + + input_data.open(file_lambda); + buffer = 0.0; + for (int m = 0; m < dim_theta; ++m) { + input_data >> buffer; + lambda[m] = buffer; + } +} diff --git a/test/unit/math/laplace/skim_data/X.csv b/test/unit/math/laplace/skim_data/X.csv new file mode 100644 index 00000000000..377018480bb --- /dev/null +++ b/test/unit/math/laplace/skim_data/X.csv @@ -0,0 +1 @@ +1.41194878822149 -1.08033039280536 -2.25251263422204 -1.4216671389552 -0.193819465892494 -0.511112596754085 0.204958750659042 0.672056196872123 0.411686786168164 2.54799046214539 -1.045368769978 -0.645679471284471 -0.524312405308333 0.255655575950434 0.640454200089282 -0.872384609301254 1.26219885248633 1.16364388854211 1.88436860713814 -1.63086268930244 0.628135679648113 0.916299285585042 0.752712429409296 -0.488896881738602 1.39888129949839 0.207126473627195 -0.420886962443598 0.899905430546271 0.347169584382158 -0.262291950722002 -0.182387474138264 1.24801817507418 -0.105767532056934 -0.562250286319652 -0.306236362488114 0.758597957676436 0.929506397644404 0.267024308778612 0.899868034427574 -0.803292556473773 -0.398702873372505 -1.57701556728249 -0.362188604323827 1.44209594981287 -0.557494659125631 -0.642468774825233 -0.842563773206379 1.07784348685749 -0.257466634327099 -0.577943055241554 -0.763349504177613 -1.10964716346523 0.413630364226616 1.56822620761674 -0.968490425911571 1.56469507132869 -1.53656603632149 -0.496786568263708 -1.12421460856107 -0.826200254379986 -0.402437946354498 0.437632578706549 -0.86222474027146 1.48807784664412 -1.08309487934184 -0.871122080440232 1.36337538578206 -1.12562488296457 0.57859271151988 2.01745779734012 -0.716362630242162 0.835769785041615 1.1340575163298 0.974999237926261 0.112415283824621 1.4641064289067 -0.221452424928823 -2.3324547850924 -1.39522654036352 0.630577312855937 -0.182137349674148 2.06014651636951 -0.415885510436833 -0.252434662620958 2.89271233274113 0.407041718125035 1.64160043628962 -2.21318868268893 0.130419082236906 -0.75985786965686 0.167447385522016 -0.6747617827435 0.758964731205767 -1.32717204905215 0.844886871053887 -1.15961800469421 0.600549301413695 -0.175140640499122 -1.00436807121581 -0.753295799420399 -0.225221878756523 -1.64810378374817 -0.681906560986011 0.726062516494881 -1.14696722511513 -0.0609340212006697 0.925553736047245 -0.575786274811451 -0.729840209102814 -1.38804987118231 2.38483578362095 -0.399009435226855 -1.38906794392623 -0.406428583835914 -0.838817491536145 -0.814607541108295 -0.218535157624505 2.30993950700086 0.102529703619852 0.29143009221501 0.990860460417988 0.607880386595105 0.713753046059582 -1.23277485007756 0.105221246900323 0.256692709202131 -0.0284642452559318 -0.857077203224251 0.178013709150352 -0.552978676317462 0.53279953263768 0.778478840434865 -0.029072180665632 0.0876364190723692 -0.0269459105535494 -0.947232120884138 -1.03919610006509 0.351272471434569 -0.504815588482955 -1.24131430182809 -1.23664008739033 -0.1343722231948 -1.38090974137272 -1.91276487371855 0.555239207834678 -0.635051388488811 1.41368728836908 1.72274083097664 -0.268528753645472 -0.606022241561143 1.19440515551826 0.393909357106224 0.227535451449792 1.41139027354479 0.0647086437144513 -1.56216377029613 1.0356069216624 0.602492772360534 -0.103456812080217 1.14954285634933 -0.0387416617790285 -0.137508536716401 0.989682127895958 0.197599260477522 -0.0387177292035466 -0.397495457375654 -0.876537844336985 0.865441316043699 1.53481873923849 -0.899015317547886 0.666112861760526 -0.306396458101142 0.420752936192055 -1.70893734607543 -0.879913546131048 0.0386710351374846 1.30522339569541 -0.0673772960661171 1.47282661406393 -0.153656358167031 -1.24703563992836 0.844019270169475 0.86748661770827 -0.0284560853051432 0.503199752026219 0.0964488411328546 -1.07845920994329 0.435824404328647 1.37011400139322 1.72150683342066 -0.533855863487758 0.2549820342043 -2.12172798463396 0.536970231448402 -0.204864379403491 0.355961261953778 1.81331426909235 -1.69171024272071 0.154771190596492 -0.0746932587935722 -1.08648690958492 -1.51148242790453 0.177569551242952 0.222360024645056 -0.822280828759444 -0.995363459276417 0.0839104406562389 1.2442174555804 1.26413018999798 0.386933598419641 0.520300991482871 -0.15396845176486 -0.170972957026992 0.221077913080216 -0.490826787210874 -1.17872242136023 0.598261924027034 0.77204644452585 -0.0600941463554081 0.346836831110826 -0.997361666792871 1.49301685860694 2.088158080303 -0.680059328965592 -0.71490301626367 1.50459631747148 -0.426356889957869 -2.89445523210271 -1.35768706697553 0.295437825646309 1.27186398050096 -0.847418953430954 -1.41544800375137 -1.36772430714441 -2.57155506267163 -0.168676938196349 -0.274260785071846 -1.24575041283702 -0.28644660072488 0.689859694016598 0.763258814148818 1.09406119182132 -1.34632496737462 -1.66017324492625 0.397709307829072 0.824782821587022 -1.26735884049143 -1.16321014424515 -0.0437927297451257 0.0569014792802552 -0.93916296296121 1.2450287977259 2.37970153026773 0.414248700062885 0.865295172416464 -0.557237694452486 0.837395050673269 1.45971533295247 0.117613419665191 2.82474037747092 0.86202406431571 -0.076009593306887 -1.63953634172723 -0.355135820253899 -1.36886741885596 0.742346295095362 -0.961394034362092 -0.129800119586552 1.95611382907679 -0.417495127408779 1.23476997162698 -1.38951391466883 0.154515045701999 -2.87394678162268 1.7114134680609 0.671815212934699 -1.45590871858073 -1.72145547199367 1.48239086604027 -1.93500533839503 -0.253075530180383 0.907722600166273 1.53572614977071 -0.233634151180655 0.233394010267759 0.701035963110245 -0.519194582775703 -0.782470015075162 0.657486591693795 0.788944841748326 0.00794764654123919 0.664006162694988 0.158237701406653 0.728803861316083 0.340719771581202 -0.534711673077168 -0.400989052265405 -1.08131027442444 -1.88734343359643 -0.85162638096025 -0.732717865456576 -0.173108240481884 0.4156862864991 -2.4207295480187 -0.0132088542792834 0.322353228410953 -0.305307043492691 -0.625156838101779 -0.793294225509838 0.368531602927724 -0.406767667588757 0.311068548397287 -0.639147184342917 1.89181114557259 -1.69563578783155 -1.21940796376213 1.21847827290113 0.700221261507221 -0.0892545746825791 -1.34734560051715 1.14504604714776 2.04532226130218 0.0389211185236466 -1.73935643456045 -0.129664723703355 -0.084636032696091 -0.823924669457362 0.00410868324203977 -0.589247964761264 0.22830874985472 0.318707524633085 0.103649135142288 -1.13016146684486 -0.19896543262896 0.449049291603291 0.514596898052121 0.493894377938368 -0.186650736981143 0.550035988770939 2.45482079958684 -0.687717731504621 -0.38750954443647 -0.912749964117664 -2.19405495366081 -1.73301583716978 -0.577239325442348 -0.884293007552235 0.297346947703819 -0.0705886225820771 0.598467045993641 0.602078205124608 -1.03164026514272 1.00189192812309 0.507082235011541 0.828479128274112 1.88810307338955 0.131693814541433 0.943118551984819 0.474766469325165 -0.734802716434263 -0.483172769465195 0.439169682870372 -0.579435364206286 -0.0799127012421657 0.683948377955839 -0.628438286352969 -2.15033100178258 -0.70262986565115 -2.02574516183013 0.668404904117422 0.891833466355702 -1.20921871383337 1.8779262620654 0.980962549752187 0.413244684184369 0.300160095825773 0.216709348659412 -0.570816018987897 0.493099800937513 0.886381347524489 -1.61637580854078 -2.15376432289787 -0.383047385151217 -0.63368461697689 0.236978938354215 -0.533929279320066 1.13409824230854 0.438855985196528 -1.15249299560402 -0.857884215681247 0.01390235859134 -0.39184893160782 -0.149469566660223 0.200110140920306 0.145597154951757 -1.18706399596966 -0.636619150139966 1.2521246729461 0.739018574263328 -1.512866373913 0.0233544627747918 -0.243478456783863 -0.0479814356478189 0.314476797575401 -1.68810083553953 0.619093354724172 -0.324260137892297 1.07682919542785 1.76710466442955 -0.617302762967657 0.503208264399065 -0.0490722026776724 -0.992811794266306 1.79899888625996 0.0111057805879588 -0.233238795097505 -0.351746018779787 0.785441815411618 0.974942115492521 2.21837100952143 0.177665078676275 0.152512175369826 0.360655785546968 -0.193025220393426 0.0430363346762296 1.14993717898532 -0.493116692874584 -0.916737337266617 0.201956004014933 0.0748944582430646 -0.115988620323041 -1.47909044865943 -2.17882725159873 0.9647368473266 0.134602055465737 -0.733311577160309 -0.0214331612842467 -0.625007373595004 -2.61879634286401 0.368443239154844 -1.17992458736235 -0.978022745090203 2.25942599135459 -0.701122619221457 0.650710564089444 1.56360329107316 0.894672695057242 -1.06207262490639 -0.304602996440839 -0.16731974402876 -1.19150311211858 0.940182283488165 -0.341207341355648 0.699802412479276 0.0754329926924037 0.341600516155832 -0.599399843119631 -1.44082959505175 1.36981468253524 0.72322410744234 -0.420420179111359 0.0684862472536263 1.68874833370111 0.963011438900247 -0.607173089114215 -0.210745771060275 -0.66191483774058 -0.584326241060411 -0.109626029806979 0.933659467079565 1.83126229625283 -1.04452348098806 -0.373036221574433 -1.6990410735421 0.0674501870906235 0.705440168073321 -0.182048473615482 -0.882883102634153 -0.0324494897372639 0.764236357543844 -0.0621943305306974 -0.806754722685171 -1.08155295324332 0.78349461402408 -0.722264022606826 -1.47215545138625 0.712013907113138 1.16240359484396 0.768714331744574 1.29160740867121 0.929725209203272 -0.404759182405953 -0.304429952155632 1.91335494307481 -0.376677471781 -0.122873082654875 -1.04069600667716 0.0850019028255163 -1.93801551933187 2.08206893065936 0.824170930477937 1.82540935697186 2.10835001555413 0.178400442748284 0.970652881681448 -0.0138750378795678 -1.64329222207801 1.4233180854879 -1.01953033119752 0.990960723727022 0.249513533217576 1.25956892508781 0.172834871103054 0.395574851842429 1.33614071056106 -0.0719650379599723 0.415484196981004 1.43482293611547 -0.477527709790191 0.369772700898251 -0.796907039434716 -0.690392775786417 1.43586970826974 -1.29759140413107 -0.750032695813836 -0.0993433500640309 1.43931459077055 0.679042003484037 -0.571434029212412 1.03511475257717 -1.0788195936793 -0.532017657294125 0.450714364062752 0.727529844534862 -0.168762592325738 -0.0201398340990094 -0.227504073307525 -0.086387034867459 -1.95980002431454 1.56618654418753 -0.10348207521706 -0.360301122462907 -0.255840441286975 -0.691586336252522 1.43665122257092 1.03742583636043 1.37246617695945 -1.5111115946133 -0.556205221423802 0.942454059823078 0.157648068799073 0.239224106433593 -1.36211398344086 -1.2058583872192 0.0293184176550579 1.32709687882755 -2.1405855105535 -2.25557126767452 0.568597824060945 0.705888475830139 0.657743325975524 0.56339447370239 -1.48581779298493 -0.160545190702689 -0.68474532945636 1.17503499238007 -0.37788613803976 -0.437446597833901 -0.423006011959649 -1.04223366944446 -0.373249466412092 -0.299276874710001 0.469181746223754 -2.32255536051938 -0.263923101096581 -0.561824743724736 -0.164805432174361 0.0549696521604632 -0.35196022598895 1.75692439224641 0.229188961490592 1.4184887490811 -1.12938048473389 -0.0838680237327751 0.296432362232625 -1.18769761801328 0.318922826779443 0.124458952263488 -0.0694895987272795 0.571844330759523 -0.732635604956081 -1.74942840159737 -1.78243931337005 0.256611598984857 2.03194328720239 -1.02099089914166 0.0926743266679249 -2.00200197511132 -1.0047162494109 -1.65087555608039 2.67020112252052 -0.867431332233137 0.411994534596265 0.982698854673667 0.597622431888503 -0.578410329709381 -1.94551788946741 0.502907442172056 -0.767142802229358 1.9289319718582 0.527498744932058 0.238742602079973 1.06110281081727 -0.552592481699226 0.656042868390902 -0.289830752258826 -0.835045174895338 -0.355756204661694 0.669846237313736 -0.54246988931201 -0.607457292605511 1.3293177930598 -0.719578137350677 -0.799742121903243 0.242136326870368 1.54555536834097 -0.634265783496879 -1.25798277901602 -2.93376329814381 -0.101758965253714 -0.90907745248729 1.88657724003032 0.0769488962792265 -1.59143859859108 0.0221917630613423 1.70554123465209 0.605039408138612 0.032263232458241 1.14633994568188 1.02410267649897 -1.64109806919864 0.629953034996178 1.91533645041683 0.068372269309171 -1.56636109338616 0.373585994747808 -1.54230421142535 1.42437084977586 -0.649074551968924 0.611532717937546 -0.68401409303167 -0.810022419220943 -0.803077703306337 -0.173321489524126 -0.34390360158365 0.549069947279544 0.476654713087211 0.307160942528386 -0.24486724790779 0.197625284798199 -0.270914526504406 -1.17962489097214 0.5821897571218 2.74015086183872 0.801456930394666 -0.975361192898294 1.34368872279245 1.97861186356221 0.544402830046245 1.54401644036757 -2.10731821293285 0.360162514609217 -1.64043525592812 0.21204144417403 -0.00918906374687383 -1.1301394387184 0.0737225976766356 -1.03706259629056 -1.08532910510335 -0.131850514278808 0.387097944067784 -1.36998432122137 -0.36315383852901 -1.17389551022725 -0.97815260320092 0.915376034144109 -1.51314273667437 -0.327849863536933 1.14190083612222 -0.948604820096592 0.398377910604667 -1.05189896559828 -1.130218343936 0.198336934944055 1.49288442845403 -1.50700085534688 0.185258745232005 -0.541370171366513 -1.32507016061647 0.402383853638466 -0.229278758277228 1.14142294180702 -1.41623747899521 -1.74982934657212 -0.108508679161232 0.506326037554906 -0.489724159493374 -0.402987207574722 0.000477545808879523 -1.28100312406666 0.186795114140073 0.017761533846098 -0.259834955287948 2.29079472162841 1.59519728052143 -1.78866339293556 1.08255340973174 -0.393676861097992 -0.552585839366876 1.12302750441709 1.38637096607094 1.25480403636547 -0.147901333635966 -3.1678683817935 0.0766550333733955 -0.554920970889894 0.384224990649493 -2.17744839421209 -2.56275524514044 -1.63825234329217 0.465928825647443 1.83367677844293 -0.179716431364388 -1.40632306942084 -0.320746202650505 -0.420536522987217 2.45762084600075 -0.991871034447633 0.207847544246621 0.356915423800963 -1.90600679692873 -0.12804705170885 0.511685185303888 1.91684714497542 0.351187758694979 0.507842675488116 -0.838133050512168 1.98533843289673 -0.00427997562136357 -1.28135391353907 -0.110884024781119 0.881751576755015 0.813998918622959 -0.0786572894182244 1.3405123015202 -0.410402369606246 0.125592298513413 0.553603552929782 1.19770757516231 -1.54910420495592 -0.696457009394054 -0.585959559601166 -0.308023583250522 -1.13184770639071 -0.236145395507289 -0.872064522831547 2.38993308397789 -0.224534677040751 0.0590036004152823 -1.04750339545594 0.390026652707724 -0.198334245195564 -0.693205643055615 -0.24217078743371 1.46070747477177 0.988662459731823 -0.165327687546682 0.672665986668607 -1.76653079291734 -0.760902320391877 1.39500327051875 -0.303654300865817 0.10728952638422 -0.176593289162247 -1.65555581732125 -0.492353931140173 0.268656740576803 -0.0468864128625129 1.0068354937744 0.551743182057019 0.52700864463873 -0.00907428674294712 0.916586277022515 -0.981808942355219 0.908243396829101 -2.20240199755766 1.64703239581001 0.734432958014948 -0.824263551807997 0.208579477210307 -1.07029760380974 -0.526639853926189 -0.899866680750677 0.570583678416227 0.129272323640536 -0.713721823491639 0.0257644070934406 1.77447903546337 1.022225941258 0.1076402527318 -1.15973845911022 -0.020475641677307 -0.095315149476885 1.59937136605936 0.320695888673897 -0.134300287855203 0.0128955056267583 1.20089747225031 -1.20074860439148 -0.335511655254388 -0.919009253873977 0.989931494873261 -1.21619922548516 -0.492440779173192 -1.1234024270576 0.223536170604839 0.636287408928142 -0.678505180512803 -0.529889488161381 0.936428383629058 -0.437817934456808 -0.484926083331504 -0.307220493570621 0.841565621606141 -0.108864914497533 -1.38103531895671 -0.140686729314402 0.950546617173344 0.819958195082542 -0.342020330150722 0.452135881423283 -1.05496095543767 -0.802960201587699 -1.48391660424828 -0.547865790111996 1.12315940290004 -0.8464887901296 1.25679452701977 0.526061862014203 0.483004074522878 1.11041763299066 0.961065075163446 -0.978780611062075 -0.264107870079735 0.741643159186779 0.452565877895092 -0.868219034871955 -0.535847777980301 -0.92754661356343 0.957852048519793 0.924792328474075 0.14500322637911 0.49389653915198 0.996969942164965 0.407042058753594 0.930853365593946 2.65084707170731 -0.4831462663523 -0.485082215260089 0.0673172667239729 0.109570397709469 -0.719385171862921 -0.175309845823759 -0.828213213671591 0.0231952376743329 -0.0236885810876311 -1.61168974142807 -0.371217735114922 1.60891790578465 -0.426837647542924 1.21659947231776 1.52586002589549 1.48631853782209 0.18160940562263 -0.680012830867125 -0.495609427135318 -0.166657585710809 -1.26874027894131 -2.25634677132136 -0.93649969343633 0.000958927923583631 -0.778922024575094 -0.991344242850882 -0.435343888666312 -0.53057945600075 1.41567987910945 0.13295285008763 -0.770031727817746 -0.830428526001353 -1.45894557417218 -1.38876649723593 0.459190426932222 0.418610516408695 0.163593104575396 1.40680255510292 0.900112060698708 0.659132801449115 0.39255102918187 -1.09124072734672 2.2562537996993 -0.243735901040642 -0.849157996065237 -0.656779401263382 -1.03497547165926 -0.334270407266154 -0.615576474297706 0.894915696980817 0.0707029885836805 0.281265283958574 -0.638985472123897 -0.546312534635576 -1.6401592754445 -0.483383498358257 -0.0592671516611193 0.0182337948070389 2.26782493543517 1.02353315090708 0.726525316809029 1.36091632317609 -0.028044245580878 0.469466203846035 3.78134640775012 2.0871956369115 0.126127222564797 1.13561996612714 0.751736366726208 0.894145994142501 -0.0713575290720206 -0.605909753682915 1.46853242566882 -0.00503678458908424 -0.125467909554735 -0.351132872639375 0.649797371667192 -0.043102960755879 -0.444647859168457 0.225920822105771 1.17183642531223 -0.707943235945446 -1.71762313461072 1.69983381893637 -0.469682021385917 1.57426955384086 -1.65843420191021 1.40480152642035 1.48299468007532 -0.778850131546035 0.939581770493544 0.935472736164652 -1.22450465951631 0.242029313534203 -0.855686404333641 0.339424514856822 -0.40508340911824 -1.94463187028393 -1.31138937480015 -0.395445597946256 0.808801779875248 -0.366018943097246 0.575751611543568 1.08788426341666 -1.4581111275284 0.782898394595646 -0.25138910914959 0.284923247991985 0.894135777735745 1.64594233357599 0.772350447283922 -1.08910941242935 -0.55577034468545 2.20212609841135 0.518846829550949 -1.05601824017908 -1.26658030282202 1.56460336618175 1.58404875000101 -0.990095886378405 0.181878974812361 -0.383785078583801 -0.129335207401552 -0.0967785774767577 1.51224279290951 -0.376310789871577 -0.77264643881956 -0.933007721020252 1.31118939282682 1.01124984932988 -1.54876447148987 0.780956708999394 0.13836285047886 -0.601910967021622 -0.3839973054179 0.325420763986556 -0.496599220064859 0.747296060418931 -0.511325508057371 -1.08913649164091 0.179529637497072 0.160686886346782 0.150435581807193 -0.985817517483331 -1.42871618939531 -0.177182990052717 -0.740426131468009 0.349765128453436 -1.49124777778047 0.311841946497749 0.140383462584985 0.0722182052631961 0.916263821281493 -1.33008970150649 1.02837066245733 -0.386343365884111 -0.497352535582936 -1.61944668502282 -0.37718647233064 1.90840105552768 -1.26282638266047 0.585498231802479 -0.519259138309213 0.392170709573997 -0.626594024138924 0.416644781755628 -1.08744268140218 -2.26034550592706 -0.0216275029784681 -1.27776331760638 -0.354412206130128 0.585620796266269 -0.770870504020722 0.628874748941285 -0.891243501584927 -0.534811161989357 -0.399877539988579 -0.514869875878733 1.02773752180908 -0.258967507258597 -0.584275144691519 0.331641974689463 -0.447099590826263 -0.197096427564111 -0.0795515674511557 -0.396161772267125 -2.11301192199532 0.365480204388597 0.0202501730314127 0.35169413159037 0.60110794443905 -0.389152331579306 0.897697575819037 0.328396321385518 0.509336399324446 -0.373513849734129 -1.43051289796381 -0.740489494067513 -0.271263156735781 0.558874621325703 2.27074842819653 0.736276825728621 -0.106889941153252 -0.387410259380397 -2.48756214316285 0.492653176760243 -0.586760295896494 0.482730512179546 0.806243776164746 0.505341499455825 -0.0538259320848644 -0.567202616909106 2.98407270952813 1.25174009276602 -0.608142181117194 0.701360476617699 0.770631018502198 -0.803173497143153 0.20967602340987 -0.309155986293849 1.24353565078479 0.231683965052315 -1.87953091462006 0.927861349408337 -0.656582669644272 0.0886777991300843 0.0192309977537087 -2.22444216664999 -0.465845643044587 -0.232611295175549 1.37103029409535 2.16485641359656 2.92017904295201 -0.116176644758149 -0.351054496954421 -0.40688098974924 -0.382112463651818 0.663433070620725 0.215437288417903 1.35264365734225 -0.340077293342897 0.157953227962074 -0.187988737666236 0.0750964936758167 -0.961880101945339 -1.77877201512896 -0.418274493689542 0.384852557232324 -2.01811950296194 -0.841535926000469 1.46922247457878 -1.01223711181877 0.303976284418912 0.446636497921248 -1.20736886129372 0.670139387244003 0.694519682662411 -0.807062928176184 -0.924266367373315 0.442636440741651 1.07018895911269 0.749951115416326 -0.510756172987281 1.55151250247195 0.588311156780728 0.449397935909188 0.890786420387745 -0.301597637850619 0.632048872803805 0.155684230252406 -0.323048979253342 -1.11627751893735 1.55967157996935 1.15107164351688 0.406569075728021 -0.0646763639389333 -0.0832001113583249 0.102626079454997 -1.54678797858902 -0.886703465109413 0.13485631007292 -0.209185251426709 -0.549790573712672 -0.489674730759448 1.29527325294141 0.916212429624849 1.32282001125136 -1.22628321739166 0.2279121121648 1.64053954160293 1.20959008654867 0.588742169831048 -0.297806526242965 -1.14910254874984 -0.0854518484298128 1.13983397953018 -0.50452397081366 -0.136730498344586 2.21815524876265 -0.103899773352818 0.0830456496559522 -0.12535977614529 -0.214295695445033 -1.25676415684765 -0.198816906606848 0.68794393196138 0.893166654487624 2.26361201075715 0.879593980763486 -0.389817770213117 0.77876012513516 1.19936765568385 0.56804586425248 0.282942236170145 1.25455376187573 -1.47416169176138 0.20908139653089 -2.51676634839431 1.61606765903008 0.522842351719535 -1.12587465655668 -1.31293075974217 -0.587382539924986 0.109352532767512 0.160733719776633 1.36489032675522 -0.677392132236418 -0.700419520873413 -0.806436519813017 1.16157886259004 -0.115327081574712 0.939969859330299 1.34795790473553 0.587323588613837 1.02411073871025 0.194184526425641 0.0558769918664851 -1.92200940152521 0.287266738571072 0.978202015355734 -0.401757121442314 -1.60681489629257 2.64984924144469 2.14042848355071 -1.31802859500962 1.56372212622809 -0.122337451584746 0.107663441714915 1.99172921542775 0.233748491060585 0.619624409758386 0.0694823625374778 1.07723654637899 -0.207123343300934 0.171215228801258 0.822060068419074 1.8842707970716 0.282858066303179 -0.187034359523394 -0.100578896066017 1.22177884440931 0.277183065950938 -1.2601210777329 2.6217517255589 -2.15108579082343 0.874079132711365 0.906512821505528 2.5582574414911 -1.28633289197739 -0.564688896381302 0.500374328666209 -0.323215997303177 -0.696261651486603 0.144210024721831 -0.551516436751053 -0.58165397952722 1.16252390973559 -0.0763493431692813 -1.11325356980057 -0.685970891863518 -0.794109479491619 0.985997036100503 0.901310736897201 -0.0554125065916177 0.726434147703937 -1.58795326589978 -0.342419248530682 0.466748694981973 1.18091842617962 0.509108249414248 -0.39727052705426 -0.150415463222295 0.116672146274165 -1.2651895854021 0.00615532719651495 0.051643957411227 -2.97377105311941 -0.567598794671637 -2.49719960867167 1.27937942367635 0.722842843863101 0.130755434275699 0.862299613455653 0.135644666965898 -1.32985579468256 -0.0773678839977756 -0.865121870645659 -1.19756336423718 0.205125155754061 0.491543272168862 0.106232621190543 0.475478346040229 0.0781701665235417 -0.467781312226839 -0.57555492900757 1.03701097099994 -1.41186191059804 0.439944895226616 -0.268610379413988 1.91674823139523 0.468279184262339 -0.862491165819006 0.219656839941892 -0.381988648075255 0.619661710196382 0.51048653452122 2.19541067747086 -0.455926355860758 -0.788651502232946 2.12238635368565 0.697072822329217 0.32971790353693 0.507371974608117 -0.0692865632345185 -0.693490144507464 -0.953543189382669 -0.73537421380564 -1.63226861855085 0.51640687578814 0.0174832017766219 -0.222470920848988 0.179301368741942 1.09298186305628 1.58082765217152 -0.778502831059311 0.651640329535586 -1.41753529293806 1.33047210947955 0.248562806848934 0.483366617716588 0.181375990287539 1.1534365175727 -0.362767358506924 -0.973969882927774 -1.3205751023933 1.29653427440837 0.47596568849858 1.69601837361163 0.971809668264045 1.6029536486488 0.722702734848784 -0.556506867504499 1.10385327910155 0.716693101745507 0.781215646311661 1.31585210042289 0.130436591446778 -0.0706549778909309 0.631415688661842 -0.994849977002629 -1.84153059032658 -0.268833744479666 1.67545752310407 0.6933104581971 -0.208725377579739 -0.0554062485135939 -0.929258784703461 -0.123936392322332 -0.907391491272595 0.249034563486276 -0.971194597634143 1.18327535774846 -1.49232150501535 -0.0471191820922718 0.994598420704336 1.41172714162404 -0.184126355829936 -0.533711941117278 1.36574283333403 0.82245433740338 -1.09064522046622 0.638064282300218 1.63746633412163 -1.72726692468363 -1.80223362685641 0.284137752636135 0.552982680708777 -0.135214592757547 -1.02744873410328 -0.25205412525445 0.693741807268132 0.68599278398396 -0.804010453627399 0.00996911179328632 0.558875583034402 1.05953054702931 -0.338272984688474 1.36664061265871 0.324591623484178 1.63248332744516 0.593488725395063 0.0897090443986595 -1.08502476308007 0.0265602653462932 -1.50360747676606 -0.38014745812989 0.668041044304757 -0.473775646531764 -0.719144060303129 -0.381273345496902 -2.11388348618585 0.429729608146271 1.04098451857853 -0.166660638491932 -0.182430569964653 -0.601388100932631 -0.181841565777412 0.268375535021459 0.224306954008473 -0.522486995640813 -0.45996950054024 -0.148021326914837 1.04768432684344 -0.297635210204521 1.52284266944825 -0.400826964430624 1.74219385345354 -1.39553990196031 -0.315989113006783 2.03622684138967 0.562848784471738 0.267727320828293 0.349753630411925 -0.00930681687457179 0.555139447588413 1.29530266293142 -0.580171749122155 1.51342759432789 -0.568021230940194 -0.185773956951809 -1.04505500648161 0.686112456445543 -0.311129788691051 -1.61665279298556 0.915958303685196 -0.0813266141495719 0.312586622438449 -1.43737918766373 -0.543474299447261 -1.47292374400336 -0.882804234508962 -0.38460623462554 0.56165325926217 0.934564362060657 1.12355748874553 -0.160531615500467 1.10469221939522 0.0533942933023407 -0.874385905351258 0.455038800378477 -1.15629765516898 -0.781293556674605 0.994549069469252 0.134523315027988 -0.110710369746739 -0.283852040564968 1.28563096204912 0.18190801994899 -1.7805948323735 -0.0812974494799467 0.373040970096535 -0.240791045359425 0.523997761739201 0.392817215036844 -1.19979576754227 0.628222711663003 0.603681758836761 0.395346190684872 -1.35521349150929 -0.562475680293294 -0.275672545818785 0.185176144345496 1.140005082133 -2.61930317629324 -0.0566538024868319 1.09366247031166 -0.469359016344872 -0.611456450674294 0.329194294823599 0.744265593476395 0.734456826059513 1.96733946275295 0.468987473533462 -0.0461390624762999 -0.165858061418134 -2.12308648341216 -0.55592127336367 -0.117340731482695 -0.855755350083285 -0.0726827703162181 -1.78509623134275 -0.449476469062663 0.0224550515428571 0.13307961394457 0.558159184823329 1.66566729469345 0.442218332078562 -0.297068226172624 0.052638322027083 1.97985254691478 -0.851433579999197 -0.139906309216491 -1.06782061261255 -0.933675484978106 -0.865573525196724 -0.815487309327892 0.452394177889964 0.950320831264917 0.886066246537141 -0.231412784888618 1.05879717362009 -0.10397837309025 -2.28294352947858 0.0651381509857071 0.20709675969125 -0.782473031999853 -0.45007768841033 -0.424384265944416 0.702834895207836 1.2465439963808 -1.03418032287046 1.76143364308868 0.0324482183906796 -0.458639489715131 0.881273243554804 1.25584506775628 -2.29215794899482 -0.670968796712529 -1.10092021029129 0.499753253054123 -0.324932820941963 0.291373645838351 0.828207554229904 0.51535788367795 0.283002394416986 0.843416777213958 -0.0048601233547934 -1.39863957289224 -0.588677814342572 -1.65725202462318 2.12491620380133 -1.90242478255906 -1.77561715535264 -0.164341761269197 0.237954539446596 0.17867186837216 0.357342957555367 -1.30313704301233 -0.417611409512262 -1.00928994112476 0.516511157592351 -1.32966147420057 1.31595947122823 0.435689965137966 0.699468182384118 -3.01830861006228 -0.590696162148241 0.365915902626156 1.97637755071801 -0.560777291189601 -0.594437661319449 -0.275588619678769 -0.880349934013766 1.48344066311379 1.13479202233859 1.65676171547499 -0.562890319226605 0.431288184393717 -0.817235190176603 -1.3636329513215 0.497110508886076 0.389112109855753 -0.427318810578396 1.2427689988223 1.95534559180578 -0.908746999060113 1.45860016522147 -0.282857079558206 1.67625232698047 1.84344120182457 -0.458913519948982 0.77203072276076 0.723185200423764 0.165648341345115 0.635312528991432 2.09272156584692 -1.19368421278761 -0.594263921320972 1.19699385809454 -1.16940602756297 -1.07126064714095 0.474296223461375 0.881733200415491 1.54034387380286 0.29308492130354 -0.16243907938048 1.07665505587936 -0.869296143174872 -0.870879189462591 -0.00598932121173168 0.72181660484518 -0.723516643933217 0.704617317033089 0.634846741433392 -1.56304824298044 1.18147868298637 -1.39595908340801 0.315607930769194 -0.809319430667712 1.16354089200149 0.166246808968649 -0.552119819845965 0.209946932227467 -1.25087569319275 -0.488587523459796 0.203960866509961 -2.07807717484442 -0.424367046686881 0.245876948422776 0.0298573910222614 -0.59933656328731 -1.42876646210088 -0.86342479060493 -0.288428678720349 2.66345160372067 1.15772322719447 -0.467941390444502 -1.22604070006244 0.607810846177313 -0.633287879180094 -0.204143382373257 -0.556060660896124 -0.21165417659864 1.8053480033099 1.35143673933659 -0.647027418728152 -0.646338860710741 -1.26351948993952 1.2958337584758 -0.580135808369234 0.32301288164628 -0.336584546696521 1.49772065807526 -1.57799656429059 0.340736334895009 0.186044078024852 -0.898708109010863 -0.223666441106839 -1.09461116206189 0.316356620677107 -2.81959911815968 0.85264266604246 -0.899920095570249 -0.268383726482203 0.411976047581824 -0.0851065351255348 -2.07468795877183 -0.325736160735336 -1.15517340093219 0.244359887767823 -0.652307312515274 0.00801497095753781 0.202600254308262 2.12969861629339 1.19050140591097 -1.90364691851523 -1.20709098570051 0.330645309637292 -0.00439599349803591 0.697302099659879 -1.33866947154813 1.47954397452764 -1.25539022953075 -0.690305371275662 -0.105755042894163 1.51550897058269 -0.0761877372681535 -0.713648083734102 -0.0434717874135494 -1.62025178994813 -0.180046001606853 1.46673907145653 -0.33163589506951 0.531033138325597 1.1023424859246 0.359056538903846 0.898199150181674 0.70118718505694 -0.408095023564753 -0.355242503102894 -0.436983664063687 -1.04906148735478 -1.31488764698 -1.21530814589592 -1.09230417058001 -0.862974847894897 -0.299763446643716 1.88239084878878 0.380786540463039 -1.88047588305578 1.96594903784726 -0.2681330926143 0.255422315938767 -0.0894279361269736 2.24123189483724 -0.985773262002533 0.189859883361488 -0.478160522004088 0.820037519376052 2.52002508204769 0.324108494904513 -0.138914457815826 1.56643770753423 0.38280722150267 -0.674757438615606 -0.312621186162729 -1.01271632859063 -1.07591095729573 -0.602337081039929 -0.715302592128648 -0.653543804130208 -1.46003429640654 -1.22134102119475 1.84739414470267 -0.820572053494195 0.0918915603038989 -1.03923871235146 -0.434609135235187 0.0933665638205162 0.257468678712013 0.131709859479978 0.469445563123917 1.05920038557861 0.279120405066546 -0.934780403758739 2.15231863190792 0.700128558827241 -0.775776313757631 -1.89087084831096 1.00733094128377 0.287008152104242 -1.31049441292931 -0.346577042144833 2.01514846031793 -0.0516980535417138 -0.480250493702057 -1.47449790267734 0.594229658449542 -0.101897037294237 0.065185508255532 0.363999117710991 -1.64061191847692 -1.11127613747274 1.28302646563442 1.12077799572726 -2.23123064132351 1.78070485097231 -0.0754921461714795 -3.72567552817604 -0.295792673093047 -0.231054310107096 0.148842492700894 -1.4516104953139 0.584353743894846 0.666867158779206 0.988454956781022 -1.04065414749664 -1.69464562638416 0.838892457700349 -1.02668905756741 1.61077368545474 -0.221906389233237 1.46828603803523 -0.698699332882928 0.965353307544708 0.160202605390511 -0.124061147665221 1.31305714736029 0.90514310214739 2.59311562538185 -0.760316011696359 0.278287785836516 -1.26348954105471 1.69796644402564 -0.450841313315674 -1.09329681923538 -0.0584718837596415 -0.488081053920067 -0.257417392332122 1.96786404976841 -0.197958850732643 -1.27592885452715 -0.0422350930888244 0.735467172957831 1.80188541557788 -0.255430627145505 0.467663205586049 -0.103891086714277 0.656126044203511 -1.4887073729658 1.09947651968736 -1.21638394439857 0.548382228721743 -0.880396637172789 -0.0174654185304391 0.140433113319661 0.223112200587511 -0.129787128285816 -1.52567431404375 1.32308383177913 -1.18854055891948 1.22761668690189 0.632923632078091 -1.39642007872339 -2.0144423479731 0.39004059476527 0.489530953674207 -1.18632782132843 1.09661055009915 -0.456850714361392 -0.788863295981469 0.508495455347666 -0.996637364236743 -0.46932417119113 0.35433675621236 2.81572626356338 0.428167053558248 1.82209991143172 -1.33163086657257 -0.699715406573176 0.545010408981462 1.71864721996199 -0.14219657675579 0.706814163569085 0.167993563141393 0.807960403616777 1.65170534489747 0.563474615373237 -0.362021400754603 1.15354309380777 -0.337860644081955 -0.954276373296112 1.18228535632609 -1.37532055742284 0.558205389956663 -0.412838924574377 0.956076504861916 -0.00305562505686819 1.59311512332894 1.92131708021239 0.103335702903825 0.383442522879979 0.495236429043339 1.12065699963046 -1.10218132443733 0.992948559560726 0.514850487206993 -0.342839845580005 -0.784071509786288 -3.65856129253219 1.51997259405951 1.20693222343599 1.06273440277415 -0.424715206815016 1.48393259637459 -0.743181517322687 -0.147291004631364 -1.25746297541496 0.262711798406521 0.583943863821159 0.214817966922199 -0.0716323691056908 0.182847784287687 1.21543183449498 -0.454128729308263 -0.573757661577523 2.78930167078293 0.508739100410765 0.459818273480785 0.987193625360222 0.928386951852152 0.418361507994034 -0.369277001438227 -0.732791299533402 1.04253894122875 0.0913278268869255 -1.06091631820561 -1.29536134331224 0.608468202080421 -1.89575427942718 -0.486962529002436 2.0571142607222 -0.0545104557941608 0.177691154083739 -0.789212517731673 -0.0270558112658501 1.01980471514512 -1.30434147454789 0.820221694264907 -0.103093243338688 0.126027179733261 -0.219094141235104 -1.4941891157925 -0.662148409360904 -0.420249979536943 1.40477071877684 -0.959164985462423 -1.21518780172969 0.113396482286297 0.808208628519838 1.05420181718246 -1.93275009358717 0.496230911084015 -0.73239699692501 0.971888334624642 0.637351498132901 0.478982290310788 0.266735522748004 -0.57666263360298 -0.300589032554712 0.999929381046662 -0.16426755324866 1.93911950465103 -0.386263853988656 1.23704709847883 -3.29508071011216 0.911867185489383 -0.642272606889402 -0.566816343997371 2.7384447349785 0.187921187086532 -0.844022632791712 0.12643683433708 0.0167457853515454 -0.658728200206313 -0.764134954752277 0.291174818396691 -0.200923859017163 1.76247840782556 -0.974139957264337 1.31269629511509 1.38984121970122 0.260532977451813 -0.14301129255467 0.500071416580978 -0.482535093015078 -0.14936137921385 -1.29593493681372 -0.532112382484826 0.434649286866446 -0.250672237256819 0.0261979287614335 -1.90785309935497 1.96879913838607 -0.504393346794899 0.765795939703818 -0.683809559136111 -0.260086438003579 1.01187024575391 1.26966480662603 -0.445614278523101 -1.2396519720923 -0.318590483507914 -0.160093239173331 -0.484957191828104 0.8783841836709 -0.0196353622635185 -0.144625595662292 -0.410439200547869 0.582204942816184 -2.44106864904687 1.13036125176532 0.279827999314483 0.444749756331818 -0.0804954738013813 0.675324948445026 1.43194534435554 -2.21374941849416 1.85433812643803 -1.52558387083994 0.678476240770936 -1.62261259192465 0.404070314161274 -2.27228976459785 -1.09899450664658 -0.0303536918803252 0.878131247905324 -1.38440168431001 0.633862033491456 -0.525635929862306 -0.114345861475997 0.175025504323914 1.04450811300096 0.485273409776216 0.0803719203256546 0.896157453430302 0.581690072559866 -2.47165798514182 0.741953922686369 0.874377570258916 0.0547435354929153 -1.14351388579748 0.399733669671528 -0.286231650195204 -0.0507272627094725 -1.18200825068143 0.8107038102367 -0.0806475567335025 1.19650480555346 1.60619307759482 -0.786869700436128 -0.51736134278964 0.939855493613857 0.351745812929353 -2.05307944747016 -1.78556051176534 0.577212321668242 1.59537691097178 0.616019474457263 -1.93443212986731 -1.08407750197163 0.408591411328607 1.14755904742382 -0.336142709543469 -0.24573784686071 0.92446232517241 0.463209501440688 0.0364112572874488 -1.18538938522404 -0.746597047295248 -0.0101447055720985 -0.579770399233674 0.651872560874093 -1.28808825924636 -1.02102154848237 -0.272147388083447 -0.505633696714161 0.259254766852402 0.0818993828335472 -0.488767183512249 -2.23199608997932 0.0836662313392549 0.322177587792443 -0.140412630255048 -0.198886107616948 -0.41399447603997 -0.464704713016078 -0.891399081908397 -0.165078194550704 1.48805231478884 -0.992639424293809 -0.611277432551933 2.09295495251972 0.73669240798287 -0.349956000306896 1.41962219717572 -0.837299085913318 0.870920707599892 -0.482417753567837 0.714432052095644 0.943019456979258 2.07380903489007 0.453920872614775 0.561239787905431 1.86192383821418 0.254298738137248 -0.597780875736375 -1.01676669663748 -0.921453416985917 -0.384776679699972 0.240895892433143 -0.45644319067662 0.203206320550033 -1.124864050702 -0.493303619472746 0.173171563841797 0.0692317194663574 -1.15308300563184 -0.613043769719218 0.495056656641097 -0.136569086505494 0.0284673561498479 2.64349931081395 -0.408530340513307 -0.0288745365165266 -0.366888008229636 0.612879701842164 -1.47761359934889 -0.45480512028354 1.81950835549388 1.27264498775039 0.149467090658309 0.885153297610261 0.28265383488222 -0.298705490110983 0.234051069071419 -1.17978204369732 -1.33245910640414 -1.45448093523839 0.947647361253028 0.297940716139219 -1.78388484773388 -1.17880855190081 -0.681119709669803 -0.481802280529847 -0.5306570531192 -1.25815762866858 -0.815637462740978 -1.18585493275952 -0.435792220306415 -1.7695119522787 1.97277478598539 0.828502903633587 -0.973218959362918 -0.0151254989637448 -0.152062073409937 -0.129937627239587 0.0449069708110376 -0.270685443906819 0.201658032987934 1.08723316312189 0.794423041766921 -1.2457235157352 -0.132747444787547 -0.119107960810134 -0.858746341403567 1.036477096309 1.12724478027214 -0.462780945792337 -0.226466658090355 -0.342985860407274 2.4278997021399 -0.413477642692083 -0.879870858460489 0.990320866426555 -2.07391433056243 -0.455094666452425 -0.175229122771382 0.568423319750076 0.150243201710389 0.474203353552826 1.77508090657551 0.0736362002077486 -0.43908084899315 -0.582612008142951 -0.914252335825926 0.625165181508695 0.487370393719167 -0.478326968726751 -0.374321697411975 -0.620491656343916 -0.711039425449649 0.294964095343103 0.706092154208997 1.26040881410863 -0.699079283679808 0.0420222248743829 1.13085897607413 -0.433201783831432 0.155777656445957 0.831704004782023 -0.494297062467438 -0.29968657890906 -1.15441660654483 -0.184750967360312 -0.837723127356363 0.000802004244206229 1.07434953991371 1.23599836065444 1.47176365336081 0.635840899322966 -0.14906521487019 -1.667260726123 -0.479548134193353 0.127999367336409 0.132448393028504 -0.792315787570886 0.246607873519508 1.46904352197304 1.18875318163619 -0.137409967196296 -0.447962761924304 -0.122410531874977 -1.38078146652852 0.595284731432051 -1.49632285075648 0.0170020995034971 1.23612741270505 0.63440803169048 -0.0662037512144913 1.41484957296958 0.0848490598598857 0.795577518351541 -0.101597948823711 0.290980008678573 0.318298729543989 0.293646666902731 -1.35518882965733 0.023566134164656 1.44927111151414 -0.723522101109359 -1.56156661628726 -0.304855545181654 -0.276554887453829 0.20728814550534 0.184470819921526 0.0939379851951872 0.894316850712354 0.734413552228843 -0.650151893398463 0.145732767359433 -0.406291001794641 0.627098460132557 1.1093950385069 0.39536862473217 -0.564467149537581 0.184504264948548 -1.94627961806001 0.833697724137808 -0.934935445406817 -0.236802113642568 0.439857485398858 1.54653053238865 -1.36570942670561 1.30843152076966 -1.06101233096786 -1.73913983442666 -1.05128193554655 0.611006990647714 -1.74038005783473 -0.462896879284223 -1.03765641609531 -0.863387231721895 -0.716339849420414 -1.22638355429663 -0.57418480896261 -0.94938985135677 -1.33332272669131 0.696949036339029 0.0695211092208299 0.11060630648987 0.354483331848932 1.80262800138355 1.05496934755749 0.868187982049444 -0.0484006648464503 -0.324830325201465 0.202671620279831 -0.15034435706575 -1.29717045273809 -0.654086643674618 -0.0151479208764658 0.196950949187157 0.738146755162354 -0.522087683814672 -0.130091567072127 -0.110501283650358 -0.711730733887015 1.15505499509908 -0.347654731744367 1.15870374942264 1.21831247992954 0.719839748733622 0.202943512758713 1.45973862854136 -1.12654547723401 -1.78149491526292 1.33717950510482 0.00857708956946441 0.153079464139509 0.225257693604775 -0.260698592438703 0.0171888038330899 1.23508564199693 -0.172388833234094 -0.426567982833608 -0.290246537624181 0.0518557838560987 -0.123247118312775 -0.381771669874689 0.151314092426026 1.2997289208393 -0.156014186464675 0.414795017222848 0.0818613079588934 -0.0502360493125397 -1.35211583849431 -1.18000524629205 -0.310229686934406 -1.20974345284907 -0.0070688705588827 1.09656823373461 0.0204291703150352 -0.676503233643772 0.0816176142916565 1.97505802493706 -0.705275925189776 1.1317240279025 -1.38617828001902 -1.71455364523543 0.0137677616880514 0.156546926836209 0.46521802554385 0.426858636288151 -0.044780239942336 0.351527100597761 0.499316488053781 -1.24996887475939 0.444814125588986 0.339629588753518 0.583673464726843 0.413751124944123 -0.0353871994752442 -0.374276270776394 1.18426674784003 0.655641159047007 -1.28408990632963 -0.792840589265885 -0.349194987052051 1.07155674917894 0.125039823423326 -0.0947826456772718 -1.01955317193234 -1.00524405468789 -0.120848562495336 -0.249988049883965 -0.251422144671929 -0.838411204423072 0.118549270274572 -1.84228466243084 0.811683612092479 -0.61814750448266 0.555120774689674 0.103250331152433 -1.5280435569145 -0.611097605341251 0.987193855343597 0.662829970299968 0.0902898847199522 -1.42498679708666 -1.01677762354336 -0.24251915471169 1.77506016221614 0.902331517277856 1.27905980068279 2.28445454419253 -0.31295157049188 0.22975957801223 0.778137223194105 -0.604781646780061 1.66258915774605 -0.166561942188307 1.75391619711387 0.922947067927296 1.14518794277089 -0.127490162351764 -0.379968199645188 0.956924312965095 -0.414896377648142 -0.970196696032443 -0.311612293442133 0.25146690306588 0.722351557562824 0.631791531091089 -0.271694356551103 1.57467491631422 1.65582367921174 2.51502771602591 -2.54479340195683 -1.48684242819526 -1.37350111189653 -1.81986410257018 0.627979543182311 1.86246002139329 -1.08184833660546 -1.37558469353806 -0.05955377986379 -0.611462053273754 0.928568538134994 1.40869193220067 0.708907839976468 -0.985359537388846 0.690699117033239 -1.37439139306649 -0.720968785262354 -1.25002695696011 -0.160871334042366 -0.653157902714353 0.497273260663865 0.166481718569366 0.740193129287934 -1.25675626246336 1.14614483673444 0.704081010063267 -0.187938850436135 -0.0269795153334003 0.934571597934088 -0.113784142414052 -1.09272528545169 0.275401176035673 -0.686671348590012 -1.02350394938688 -1.561279923129 -1.08271249856723 2.81613185322827 -0.90633689997145 -0.026029945580359 0.647291770848952 -0.521262411619004 -0.041187625049085 -1.31388037911351 -0.684746493855069 1.84892100712256 -0.480181925438187 -0.339709554502107 0.198312563980255 0.87882358915472 -0.206431825633854 -1.86587810353596 -0.683157258084922 -1.04770320330993 -0.714960293490607 0.233161801315302 -0.202898117039059 -1.1505712248126 0.76235381293267 1.2293481306032 2.12417249400026 0.311107493855378 0.962540907273878 -1.63293952420909 1.17723222299256 -0.521021713289698 -1.33882443196419 1.06921143201328 -0.880444676666607 -0.659233577270743 -0.712539285413588 2.44757552582659 1.35164269537756 1.92001968147486 1.36419926316207 -0.357467747524257 -0.458539323548248 -0.692884334074973 -1.99629700058009 -0.271064402682147 -0.0555500260841894 0.583942278956735 0.874981074014864 -0.67292062387022 0.93433196611019 -0.765013584204673 -1.67406564091342 -1.97322164452517 0.657261449120902 -0.519041671940968 0.181035011316907 0.0462820570620652 -2.17458950818991 0.742676812122332 1.30381939648158 -2.28158048016646 -1.84941579340357 0.74923783332708 1.00806009313667 -0.550165781375249 0.478771135375542 -0.64807286274225 -1.75730600781473 -0.225777160417184 0.439997839582182 0.0224476329971647 0.84262416385035 -1.46374286798195 2.29738603594053 0.944644410659708 -1.12139034622423 -1.55360523623767 -0.290392356186504 1.13843961046175 -1.71939114313034 -0.939270228236415 0.137741062869815 0.686027367743556 0.248119858195058 0.796218642498017 0.94851983710033 -0.410208647286072 -1.18134774675812 -1.15803048540148 -0.689910093334551 0.728903973454201 -0.999265273747 -1.06718291667058 0.0552384819728953 0.187122417938961 -0.495886040947359 -0.368504724587386 -2.0139154298732 1.11869548824854 -0.878715033129417 -0.333431077371857 -0.652204068470891 -0.493835281637616 0.429399449126834 0.148882934224078 -0.00606656888805418 -0.409037010724431 -1.01424467238946 -0.471846831534444 -0.590662953931181 0.289471625193706 -0.397801041681476 0.250054661246353 -0.426221631478793 0.723428943500155 0.439561128344944 0.478224361375133 0.645664796625743 0.246406555459855 0.142143550613209 0.327093132872614 -1.11175678088058 1.19517203929447 -0.933938947033682 0.227210395986413 -0.606951143431678 0.117219960245566 -1.1072535344061 -0.281257683685847 -1.54179018218599 -0.943739712986128 -0.444405165814805 -1.25823996578609 0.1912973649424 -0.972242134885244 -1.43682815831326 -0.110240561301214 0.21834536115264 0.354644754105087 -0.526019167261578 0.534765557708088 1.71021612188196 1.21255533297307 1.26126501187556 1.48171706683518 0.0517434887542123 0.688819002314531 0.649357247113515 -0.311944273052213 -1.40147165731983 -0.430662581510144 1.69770585684474 -1.20911716391067 1.3629588844793 0.0130073053273334 0.765809927489189 -0.279489461474263 2.52394675122282 1.19875168594436 0.156306125964061 -0.448793784745447 -1.77787511971158 -0.00050145487997882 0.0294063005797245 0.881758797258356 0.11364828650051 1.48959136852905 -1.53220720861409 0.160598303928932 -0.3518594416961 0.0680047068599982 -1.0541132485218 0.0228561965722117 -0.420422958628569 0.767888659248789 -0.219126449590361 -0.544899044547351 1.29748920816515 -1.51820066854415 -0.512332370803878 -0.390099275728835 2.29325821088935 0.267212281487341 -0.0151928948146757 0.676103600347935 2.06319075096154 0.291073024386062 0.24959058890773 -0.890373980093928 -1.06184122444565 -1.03798769093772 1.37657997757746 0.0260645812657549 0.0496843309606463 0.271381559512055 2.403046684338 1.28952522534086 2.13895656373674 0.332260564370597 1.15148615666549 -0.629511605136999 -0.471545173227162 -0.8419938364489 -1.89740115005096 1.88317990888687 0.865137565824638 1.03352140889051 -1.7785851878051 -0.25291050649338 0.783162736283528 1.75691798614975 -1.70869840095312 -0.00912273378055361 -0.631500993147928 -1.70742162193103 -0.873303100174171 2.38090705021943 0.471561899363472 -0.367935688510347 0.597096590524185 2.26114887489013 0.295799194463711 -0.715633809807286 -1.98089830107854 -0.575878615811967 -2.62718040312961 0.313750035531744 0.516520518137358 -2.13563431019764 1.06432996708187 1.19576563294623 -1.43950514991166 -0.512562506626658 -0.518629394899764 -0.161690221180485 0.385863274915625 1.07349758505856 0.311089786262723 -2.3016178649338 -0.779823188884915 1.5739374377128 -0.0507675000374154 -1.01631040262434 0.181449153929529 -0.325037449143341 3.03251856367855 -0.420864340693811 -0.290329210483378 0.0969151962638814 -1.2104143198751 -1.52732552952491 -0.534951436614278 1.25948278225945 -1.05646957556908 0.0829754830419755 0.679914765583477 0.644410559350237 -0.617785592249797 0.0480881894065016 -0.0352707853589458 0.730379009956261 -2.44591646001795 -0.130582502154422 -2.13006565311527 1.15891130701528 0.779314630150908 -0.0314132696462154 -0.982997334069277 0.791743638704352 -1.6693345744218 1.04844862409533 -0.561465947786538 -0.851416944615652 -1.27673948970735 -1.11199552296129 1.84967343100797 0.859829789159452 -0.431377572881296 -0.361028838068423 0.425199821901076 -0.447372441676566 -1.47372722278569 0.758379538481997 -0.421449226762265 0.294422770891389 -0.420927367024344 -2.01675232356887 -0.668845090391239 -0.0970621460137261 -0.480314497003046 0.456900208531098 -0.0771590149637866 -0.915463492105229 0.263788263373287 0.161115535935096 0.651558293399016 1.27338637093357 0.640311786904393 0.0881514892904949 0.0845457034619391 -0.366713103032086 0.175405010097305 -0.565650425931598 0.321236120439051 -1.02801678502986 0.0156366217785223 -0.870768155230152 -0.71629927009442 0.0788030442929349 0.130645742774231 -1.06876027844363 0.161887196659835 1.85231900569399 0.988477101563006 1.61988563906755 -0.898762421410398 0.74662548373756 0.258787564588643 -1.23509554675388 0.20087261258296 -0.567195369381496 -1.00923209707997 1.05272924724694 1.59371705211502 0.456077652219168 -0.0269547006151804 0.838954487652503 -1.73769110048388 -0.546205719505964 -1.3032587172205 -0.402803446568018 0.463418718209955 3.03318033741639 0.543698990023638 -1.73390734003393 1.03768898909057 -0.311400085362816 1.67754394286759 -1.04279070576843 -0.997281221710957 0.927170589775073 0.631456717302279 -0.726675695902673 -0.530339917096469 -0.207536894745122 -0.354228323114093 2.38849544550064 2.00214793346689 2.11512123773433 0.272449767879895 -0.0352927383735609 0.245784713746613 0.624393594163564 1.59342357273577 0.532418086180745 -1.34134657779897 0.530642994138585 -0.596549812724344 -0.782606355052096 -0.680488414482119 0.609850521009258 -0.285532140062442 -1.67339054989078 0.201055586224061 1.29776350825783 2.49102904504995 1.21651487711036 -0.0453056931813642 -0.374921592209172 -0.307338927749295 1.2278219118757 0.642979681946262 -0.197852837850081 -0.891387826250348 -0.576629042484627 -0.514261242510764 0.710744685977209 -0.729004332371551 -1.74914868354329 -1.50412349534151 -0.280316979163012 0.146062019463371 -1.1015631628477 1.69511426700198 -1.28062553345602 -0.522359273684756 0.450188523579981 -0.942968292074914 1.67544772862144 0.184883426303011 -1.05403874446947 1.22101182768427 -1.80629268131459 0.404878832382697 -0.0229660172027257 -0.986966244812164 -1.59941447243515 1.90718080859242 1.8157774599819 0.635695676827252 1.21181819597448 -0.666574316597648 2.35125021803435 -1.31747088729594 -0.998832376487694 -0.927618959622041 0.947245232428523 -0.0518448811531908 -0.651605692229497 0.956116226967275 -0.572172635265356 2.74781143935543 0.891261019237159 -0.0231983487213918 0.381554739933216 0.735304423291366 -0.0591615640367172 -0.685323896394671 -1.2713575967809 0.871722769159815 -0.10412632399463 1.44036013836654 1.05522561610718 1.14369554797966 -0.612702612966659 -1.37872071222839 -0.33938127566341 -1.20244710491529 -1.84573053678781 -0.459156647546516 0.809332575786798 0.560513609454536 0.159518885755558 -1.4462187834906 1.33167055584189 1.24635210126008 -0.734126141322619 2.12245383026719 -0.0164293765331044 0.124177255587854 -1.00383567318813 -0.912307979153302 -1.31875625202056 -1.34738100323166 0.595647843305337 -0.237760266828041 1.54121099126833 -0.114537154741609 0.977227339865088 0.922934725724924 -0.297915258577856 0.613266212654501 -1.23053508043139 0.0332249066517883 -0.944833233267999 -0.121418088325303 0.563827814856467 0.423858528123982 1.08902242465268 0.0421420122377492 -1.00047379362991 -0.195764528731572 -1.1821391943965 0.778090839394336 0.535122042669154 -0.153702102570362 -0.238794181554873 1.10605876637113 0.865895393360931 -1.69047503158481 0.158420697781725 0.576835269712066 0.840269414367375 1.63004559666917 1.48588214551047 -0.480683172482958 -1.61895654344102 0.179867264356831 0.770873365305124 2.35583645354671 -1.13789772798139 0.0885301934129257 0.567222695586405 -0.638260996781658 -0.102280518413034 -0.453606367093639 -0.764939485722119 -1.31532301542183 -1.08418989300472 -1.54673357329242 -0.00349701980957218 -0.397153519330188 -1.28298203528229 0.911511162064234 -0.567676546560906 -1.6065895373379 0.939732571167917 -0.84017781058408 -0.51096784619683 0.307582434080972 -1.01387394512439 -1.80757220973953 -1.14780299264593 0.107107551606664 0.0199215977973106 -0.402849318205434 0.208428359780318 -0.117606135453873 -0.103258686832416 3.17051425694195 -0.371527871037039 0.22824051690063 -0.226175337094175 -0.525086432070656 -0.235697847211318 -0.172303848681935 0.0640486205035276 -1.16243039777659 -1.20952388867041 0.996233871914449 -1.09806421762864 -2.91387869066471 -1.04048739778801 -1.13370411854841 -1.49072598869727 0.440302162972155 -0.145697089697058 0.666376167775424 -0.118047179215655 -0.921926065866896 -1.92697531953664 1.41574849198436 0.241795262936567 1.68861999302011 -1.40073715737445 0.651333255723333 0.0349277478413841 0.00623075523160021 -0.314007830153486 1.99133175526072 -0.0435579517398323 -1.10719589208988 -2.50233226511243 -0.601437553542819 -0.077337946008293 -0.182907017675086 -0.445747926026554 1.39735516715874 0.307432914121901 1.30168511121763 1.97114076117538 -0.891162951348015 -0.179246549388248 0.353002651071421 -1.60933260484301 -1.27517006936873 -1.10885905138048 0.426932970745741 0.172052498697038 -0.604322333578673 -0.534932180228257 0.565388700336762 0.351644443694533 -0.151685629297891 -0.863288718934854 0.128082647958898 0.393760423984882 -0.893731159978301 -0.97991943159713 1.17573449646889 -0.219755725911359 0.46813810946684 3.25708917764777 -0.0545725205759931 -0.738889740770187 -0.727041449488698 -1.01810682405706 -0.830246931860797 0.0799346284174171 -0.59026737178059 -0.0504915453698081 -2.00455338538678 1.91349814238005 -1.28570822789565 -0.483333267609872 -1.41271244363714 -0.192364774469817 -0.41022268421033 1.61499386041491 -1.13691585553636 0.036425354155555 0.855922259443957 -0.667371719705615 -1.45746319261599 2.67349780565136 -1.07324667402683 -0.790740789908891 -0.367894120161794 -0.153825234395181 0.610881074198297 1.23897720294364 -1.18417882245319 0.858893588312129 2.16807963087602 0.191697753624708 1.66890205181461 1.10097189968488 0.619373881202755 -0.0183424083249391 0.261390256081365 -2.18446934257032 -2.19052868200624 -0.253372585433854 0.495748205904902 -1.68519754580513 0.341029620425233 1.60198303994071 -0.287050056111248 0.954943528615805 0.776953168354219 0.406000958845074 -0.42258109041734 -1.01532009107594 0.304188195746796 0.396451617214746 -0.271789594792633 0.269698019994178 0.361062949256848 -0.546747439346549 0.44346204184314 -0.586448219239968 -0.170929912538546 -0.886309551777573 0.489957206848307 -0.598679152726268 0.475195698640197 -0.13744152391502 -1.94376637026485 0.555194048034792 0.0862886707649726 -1.60026577655313 0.0240977254912 0.332861747952113 -0.0339200774589088 -0.693531475780141 -1.59130160945421 -0.590749857749938 0.537562058775486 0.466137529548647 -0.30981211855153 0.214625717231147 -0.0307457204789743 -1.1880073009276 -0.55377118266717 -0.0657236181155447 -0.0916631867482456 0.718224435327261 -0.86667269996986 1.26950284542135 1.18886753736579 0.147337110202777 -0.281947575644543 -0.811296562245445 0.294459717878955 0.908901485709033 0.838322947234095 -0.146623209849676 -0.14324629758394 0.487905223638553 2.51851035772912 0.231158093378979 -0.677266427215092 -0.0464412966420877 -0.0149858746049227 0.596145125846712 0.301189373091114 -1.04956842427418 1.24704545964857 0.690510967634236 -0.874515604829936 -0.631414068755783 0.0377978014147372 0.836514425434533 0.213934561309329 1.08280595552591 0.754118514808427 0.367181336274492 0.430310336908127 0.637273025534317 0.616808395404665 0.483182699235232 0.405360675511369 -0.0696730347317194 -0.660595857111471 -0.136301712831288 0.637600737707825 0.55765347049155 -0.0872984371176684 -0.984754419173878 1.58970171440521 -0.264077422957605 0.517075422833298 -1.05486798932297 0.320386347275106 0.205029677262089 -1.41915950661278 -1.03678518128409 -0.0721672918996937 0.0736103683297148 -0.135048954725221 -0.306838782381194 1.12097734372713 1.80134259780471 -0.792250445564741 0.19727461911116 0.0803845679560629 -0.74100923401372 -0.162221171267813 0.596044563324518 0.451675905712982 -0.456593673469231 0.116148948787984 0.760834679510597 0.369532184097969 -0.704738220663473 -0.669909044566792 -0.122324579287573 -0.888170578264222 0.0397050582116424 0.564422143746908 0.593780600650564 0.581107554719765 -0.0419168904992699 -0.345304831573626 -0.735929403475568 1.26773159412321 -0.030442318695367 0.459944073817612 -0.193809758317981 -1.42450739857056 0.525540202203348 0.360151881714432 -0.919709575823153 -0.713068198021116 1.29771141635372 0.857959982285048 -0.0296241314837643 -0.745359162031209 0.718513534381018 -1.02375172778875 0.201894979312963 1.35293817315887 -0.0280821816857506 -0.970756269599545 -1.33120782055354 0.085894178027969 1.20224156219034 -0.603406851506084 -0.148861712808985 -0.673079994576552 0.224220265734304 -0.552077599685958 -1.81854817238534 0.732189075742344 0.860821582746349 0.114671693330511 -0.0111275729271404 0.0977304634388803 0.272251657434173 0.136770630779537 0.215432073282858 -1.30766356227685 0.0746581959285301 0.03662894309421 -1.77968103610945 0.414266249406483 1.45884692860767 -0.126599508117915 0.0880253812352905 -0.0558870408401573 -0.128809366797041 -0.842963692590359 0.40358281463942 -1.58341710171242 -1.2576081179142 -0.518883238118347 -1.36583955220751 0.27850637711745 -0.694579760724919 0.207784051166713 1.1812633009081 -0.0657166007021888 0.985631000623591 1.1139842893164 1.26586643267885 0.278857536730819 1.84164962343179 -1.14771221315691 0.610931957246624 -0.846412773797486 0.200993019279761 0.152828305580235 -1.61498097655385 1.96379153286243 0.354571638182307 0.624577367567992 0.433722302555837 -0.22944490120033 -0.754344439666609 -1.02464890753159 -1.27753900842739 -1.80868678232889 -0.359982329376701 -1.43835567200656 0.969759858009952 0.12954220144812 -1.55268128474507 -0.0440515684733083 -0.931232339964737 -0.544883426278688 -0.111813588901081 -1.09787115746723 0.89484500011667 -1.27155448806467 -0.0157674041472628 -1.21624642876154 0.353573458601516 -0.782265592376939 -0.609615409461313 -0.258668415367767 -0.616460822895411 -0.92653502512751 -0.780764109536986 0.191553634121758 -0.0510082251085164 2.76105669167688 0.0482380234788329 0.8214987229959 0.499088066247605 -0.244369656137693 -0.655064339202449 1.43978480740246 -0.668416615377219 -0.888350636856763 1.81393367883468 -0.788092779972333 -0.572063049220546 0.989098993805744 1.21271351300573 1.0166909288761 0.761525037423863 -0.20950452599121 0.744840618015206 -0.212890389030757 1.17626839673269 -0.440783111410784 0.631181848505454 -1.31309719909985 1.40657435316037 0.73573566837143 -1.07720484368963 1.75027198375605 0.22760619831576 -0.201920777510325 0.436022431258926 0.83829838010119 1.0867194234487 1.22212258629143 -0.505758681495279 -0.248104453137063 1.74599628278431 -0.611495817932562 -0.536402482706101 -0.0775823121031333 0.616660209292618 0.259193964531914 -0.150492620372588 0.00255575318030121 1.41800927078565 0.109259467296712 -0.280622670257144 -0.829088945724917 -1.02804394701865 0.188825057636457 1.31323462760063 -0.023192535355964 -2.51354569591212 0.325838065437465 -2.26794516554047 -0.547578666813103 0.651334693630651 0.308263207985796 -0.943932805854052 -0.0524156282212708 -1.3930720178878 -1.53987710076336 -0.257769353138844 0.298381633251354 2.00665415997601 -0.548309596783323 -1.50422727054264 0.367516711001086 0.0881416500933905 0.298894014662687 -0.419077304561594 1.55702335082758 2.39746378889802 -0.300435307391101 -0.214629206878983 -0.480782424715124 0.974503523858981 -1.02646823032377 0.947150214677388 -0.229397274362031 2.00090673445408 1.21800051224092 -1.61748653673871 -0.591994049073078 -1.10783036027777 -0.801873212899005 1.45210965870877 -0.905185213733534 0.539353922875431 -1.12783555890179 -1.03494628654167 -0.0557199966960452 -0.269101932538892 0.542891475800203 0.140209073502571 1.04074399203131 -1.42697765973363 -0.400925282903061 0.858903529405735 -0.964485151689267 -0.873756330043798 -1.33970907585325 0.827992486648804 0.773701569797347 -0.438577035252065 -2.37810339385169 -0.188166768753088 0.498698725695954 1.06745197750805 -0.437284151949883 0.897237497110742 -0.375047169497131 -0.170231630031104 0.712590740235277 -0.681704159007725 -0.0595613660568924 0.551017146285355 1.49069680154465 0.310479833337557 1.84299786953949 -0.0185475169635446 1.54654143466673 -0.0322875803649204 0.278198534735164 -0.0135796244700895 0.451875200907879 1.05352210868021 0.349948253092162 0.876633083932073 -0.913640656748645 0.31497015867773 -0.851242292431336 1.65023319697433 1.10905434033031 -0.897728517840644 1.41776795695551 1.05527586240603 -0.144163082733023 -0.892633714117472 -0.836489290842615 -2.02160622742287 -0.0994809026917662 -0.915380918678503 2.11231908213377 -0.221372471588647 -1.05527506339632 -2.84546766191457 -0.762536585520772 -0.678466930996391 1.10744944326884 1.21358109894333 0.213482012536756 0.227633892959243 3.72570799388247 -0.355910823150788 -0.0653765797558984 -0.777275035964472 0.214676010720155 -0.488169698024125 0.31862361083181 0.190381163083404 -0.357600702771243 -0.275262381864838 0.033563470046191 -1.23915646716087 -0.527921033907502 0.13150040422359 -1.41473115541718 -1.05817882291253 -1.78430662505626 -1.52835841969085 -0.250447214133698 -1.7594590818049 0.2748542541389 -0.112272681150782 0.986485917625249 -0.678198373312215 -1.44964735260557 0.219934146348266 0.312799204460455 -1.55985927804254 -0.735015900228429 0.776663959306187 1.41564125055489 -0.228009216622164 0.216515138994738 -1.99432779824339 0.0631455778093257 1.07514388106174 0.564145504378294 -0.145694991063573 0.556760157857767 0.565305304431191 1.70518249155922 0.755887177720512 -0.350145086389877 -1.15702663486772 -0.185856287583357 1.25231244301692 0.734251648878126 1.05556244435849 -1.17857038975756 -0.321253849346682 -0.0316171020281091 -0.0424345975203949 1.23163777665519 0.133378530640263 -0.493511841380705 -0.57439665628127 -0.112284922858372 1.05845197694977 0.0760960627727598 0.537764620529084 0.54619490772004 -1.14960923288417 1.87643695927885 -1.03332223920437 -0.0381258226040725 0.0283809275390652 -0.191256342385117 -0.177978127206613 0.937228139065865 1.08080827962009 -1.10293922314483 -1.11050556022403 1.05894381865894 0.360481666539659 -0.212298273796883 0.557509883612059 0.988386066473009 -1.10623744985535 -0.827226383374807 -0.34635995503541 -0.392281994681802 0.800341318990391 1.05441328442501 0.840601455605603 2.19578847189777 0.81175416878807 -0.801009983814146 0.288168086711951 -2.69251276175195 -1.77495657352138 -1.22689420822452 1.63083731946584 -0.54203201937905 -0.1881118021478 -0.37118715083709 -0.646764223793788 0.572226120861199 0.770163751197449 0.384800621118502 -0.853065897527533 0.280767623617259 1.21030840767409 0.79845398198798 0.428356012884661 -2.19989744332936 -1.37828146998623 -0.311003664540495 0.524391487859872 0.957622845201231 0.467232175527281 -0.108083675044806 -0.316333377128122 1.39982878760448 0.298868526459192 1.26693400177161 1.87440549079869 -0.233915408428533 -0.146541187233782 0.24640593570847 1.15614647687694 -0.950793961094255 -1.55350794259995 0.164138036826187 0.489419093357504 -2.67526810561993 0.837376821663563 0.64120268224268 1.33776581044066 -2.27630660533057 -0.657760731949144 -1.4517797065131 -0.382252219452968 1.40415404166938 1.61898702608293 0.597400490583512 0.22812350204789 1.43940678042165 -1.11448241653916 -0.222999542625347 0.2518161491732 1.05555407955744 -0.251101129664903 -0.842740545245142 0.0134917195938276 0.30900415877087 0.617417586434604 -0.619410072016917 -1.21487455739701 0.251141965902034 0.398823503094941 0.417654264316209 1.23864838165442 -0.515475728047204 -0.994577795718861 1.1734643735583 0.957737108069567 1.63478782773667 1.40053473735996 0.370765139024218 -0.0233073313841045 -1.06391682667434 -0.259046379691331 -0.600121096946254 0.099401492746721 -0.287184070440079 1.06719594913691 1.856945700729 -0.706791438021691 0.390101560584066 0.996418095649991 -0.562261269197287 -0.663798632534042 -0.321296489407373 -0.113832220504424 -0.70307709960232 -0.457875920878039 0.612454207490729 -0.646711012485618 0.495234608912166 1.03442385298709 1.04735372832175 0.577568023106728 -1.11806569993603 -2.66846481362669 0.968866163042179 1.16940099930959 0.0314020296680747 -0.0617964653987708 -0.323667927217774 0.818534738995609 0.436903142590086 -0.916592313105866 -2.49148768224485 -0.138570275995144 0.848255201327653 -1.05887866490541 0.895919549179237 -0.300642543733262 0.445080550217955 -0.624878277583245 0.109006394985579 -0.839721489948294 0.264427852135932 1.05867681787359 0.752160631368993 0.539129843030231 0.143303924350367 -0.23996232799122 0.744339998365395 -1.1798853310457 -0.690476866326698 1.01116711538486 -1.75750167595222 -1.15929601421498 0.320811322237837 1.54254042219761 -0.152485791695551 -0.282607413089729 -0.315438013352258 1.70935321202172 -0.53143448178165 1.61738118954915 0.647121546733493 0.0884859038068285 -0.722127404237522 0.810746489172537 0.0895552674950916 1.52643255811808 -0.262107093324125 -0.445612705510213 -0.135113965565497 2.35811742066079 0.670181936535875 0.799753262167641 0.0636707975302824 -0.635568761952864 0.0691815892701036 0.166797096594992 2.70803218899759 -0.916523472026041 -0.393881250026696 -0.170381752201939 0.905919654447802 -0.646581058192183 -0.918415625770652 -0.0138196233796875 0.396086465293564 -0.897036052322959 -0.139031597753885 -1.75905645406239 -0.304308721648286 1.60813547577583 -0.251708057641714 0.452763723268063 1.25714590330516 1.48999899048495 0.862870016125854 1.34212973753446 1.5944756073329 0.92134211375644 -1.63803566443081 0.118516845311103 0.0595775092170287 -1.34390908567762 -2.62007670094798 0.976118806620323 -0.782992720357029 2.05385234336118 1.47704478449237 1.04058541535032 -0.234320713916956 -1.10632892787023 -0.377600559448615 0.577325261340466 0.363804807980638 0.126378864612687 0.459293135173783 0.615620628435582 0.488061312737528 -0.857638115828602 -0.847046815843974 0.31323778581673 0.0930929677934584 -0.333688407425412 -0.320651667726904 0.365830703300809 -0.439158858690002 2.35076308446657 -1.62583193870149 1.08675862213586 -0.591741943933479 -0.485333695926931 0.773266921218415 1.01698572736938 0.715446606850786 -1.23113430314921 0.6647623781647 0.395924817026807 -0.294695305394683 0.39211487156134 -0.833512003037232 0.650888867085161 -1.23130517385993 0.766832670086124 -1.11199285685129 -0.0614135837114228 -0.65109635653275 0.408608406246287 -0.945484584372455 -1.63120513813685 1.0975708534747 -1.71113646817767 -0.573808247461686 0.216771931816867 1.19840782922859 -1.84798701096852 1.0343339202085 -0.540971379060965 0.0646836527501535 2.33202631952322 0.358993294442537 0.713791464196622 0.212751422726745 0.0845478708701764 -0.21035673791323 -1.32409320335023 -0.183886946157438 1.42081418497253 0.626702599279871 0.564257152303668 -0.799428436157796 -0.047417613415456 -0.333935243459284 0.585719826488757 0.90213385387221 -0.672012834999041 -0.569112406517544 -0.151892344490014 -0.0548681556199063 -1.79295088375973 -0.153489523121654 0.116754964142674 -0.98973886873235 -0.763880618595454 -0.959870105895456 0.85958053165919 -0.613862526271168 -0.375805531533176 -0.278231410905954 -2.45605330201117 -0.044180761288353 0.233943430115504 -2.0958475153307 -0.71295678784445 -0.131914345475054 -0.650302046885039 -0.724367207888854 -0.880538272711499 -0.126366155407032 -0.956957547201754 0.477884062402971 0.781469373518213 -0.339921685353784 1.15867823699865 0.562351297886891 0.234239030848249 0.140114214698589 -1.01813125148166 -1.74049027556157 -0.806954523167205 0.588980850520231 -1.54474361457199 0.352150117364837 -1.47583285095923 0.247151380125299 -2.181358922594 0.545152468607156 -1.44884427480637 -0.308766603017983 1.95386097160437 -1.20282247516694 1.66470771464963 -0.862885323380215 0.366551050303916 -0.847571214489951 0.599857014849393 1.76515090121678 -0.340823388203728 -0.545653668818961 -1.32436781127335 -0.117344318216985 -0.392047737896592 -0.592720705764182 1.03260675393905 -0.869953450688638 -0.691302672537151 -0.766345419900533 1.23958685438272 1.01479081001885 -0.0790350010219198 -0.85022530002265 0.4274403164853 0.353686470936288 -0.904535938930322 0.599020414518593 -1.31473709804566 0.290767100052329 1.46588782887238 -0.350376312309502 -1.5225913774861 -0.350705261625006 1.09693511470408 -0.242123343367481 -2.64630515723983 1.58653915055686 -0.351458364922108 -0.510724949338464 -1.30445481280315 -2.2687921819794 0.0661657044584373 1.18528092077427 0.134312435174305 -0.822333799147641 -1.10829865920254 1.31989108682785 -1.63637901197572 0.806888463421178 0.424902086871384 0.526204718073728 -0.884014096039501 -0.389690287559194 1.01223683467527 1.19065360856227 0.0869934843670578 0.822965297873089 0.452823412218187 -1.65826091949271 0.812733694128746 0.0189524153843605 -0.677332544736332 -1.27999165561846 1.01524159774597 3.33977258046252 -0.844846783889941 1.06095503241302 -1.28896250885091 -0.268817899992818 -1.06806008605482 -0.163909908821851 2.5918599733052 -1.77962388756631 -1.69896009367667 0.795294525641808 0.995362497645917 -0.336639404482804 -0.0168508657405832 -1.27393744125512 0.438130928647383 0.254185010269913 0.643522652461736 0.521887342677413 -0.631127718574225 -0.797837722836477 0.347852526604957 0.415509011291576 0.335936596785089 0.984636636711437 1.17454909616295 -0.917774826258016 0.239900977163901 -0.574761760855212 -0.0163019882554003 2.15817127686026 -0.842464086621889 0.794364064980567 -1.60742367491771 1.1004727467917 0.342931316380556 0.336110668176863 -0.649402008205644 -1.13750392187199 -0.406171473934778 1.2927678250993 0.0203149389783619 -0.76605820288692 0.16544795475465 1.29235892214865 0.0622195868562569 1.56829052690693 -0.0534848373039879 0.635149452464192 -1.31997110194809 -0.262073475759352 0.271998450108577 0.605310299121362 -0.776191359103294 1.66520197703394 -0.0408855057719054 0.905669543514824 0.575953839205468 1.98388932938572 1.77621143026126 -1.21543495768279 0.584855014989515 -0.389373518266779 -0.0456411168675508 1.40174415347813 0.30235377180844 -0.915278091944254 0.242224772342785 0.938408236538771 -1.00366777900319 0.0657383206230829 -0.369170100962199 -0.427673523178155 0.431124045748566 -1.98187297591335 -1.16286068908869 -0.044113316532593 -1.10915668922056 0.497070609242008 -0.457150243094914 0.743034834322772 0.0292274194604778 1.83644090993861 0.270857575869037 0.208899865468551 -0.603132731727299 0.963599632425504 0.421557645183709 1.31998659019798 1.19919300093816 0.0637380410663102 1.60052492293775 -0.115076448264884 0.0787299045034961 -0.21464591470875 1.44690552064142 1.11396280579829 0.834800746769435 -0.325699624176752 0.788082414169906 -0.18801181669081 1.33544013610154 0.400864761302241 -0.979959221785929 0.36412566729587 -1.84917744115463 -0.986031807083123 -1.75410002934289 0.302955400326647 -1.84502357648647 -0.440994286097263 -2.53351732329683 0.280571995156241 0.48072960669726 -0.435664269766809 -1.83272330626758 -0.0758860451949637 0.476624452390959 1.38291888276693 0.18391312609843 -0.0279039862012901 -0.377713503210535 -1.00656198088415 -1.94637641773742 -1.42103424172301 0.0139301613423216 -0.637578006073619 -0.637904892748487 1.17393701612467 -1.00332473589761 -0.26332674015169 -0.903911293568404 -1.40440999384795 0.174229967131169 0.487033415161942 -1.10622597584233 0.52423649819299 1.73420728638808 1.536074265534 -1.11952371784612 -1.79558256376488 -1.48492764252231 -0.924476408260033 -0.116038707298517 -1.35980023969727 0.713119761685912 -0.607368609560794 0.0362315743109336 -1.57485583416102 1.15952956704506 -0.346853453137942 -0.497204264629623 -0.977554245626668 0.0746569506795282 0.658936058294893 -0.888524582988652 -0.406187401592152 -0.127821006975361 1.99962830725363 -0.168473062065511 -1.39146743851423 0.732407386122841 -0.960542085955712 0.0365546539048593 -1.89820320943374 2.38741343366636 -0.0508955275075941 -0.645575671412208 -0.711800146987307 -0.750565078949405 -1.65728975492317 0.124251067730838 1.07109032324635 -1.86400774662481 -0.104400182752322 0.734321401319091 -0.808265366639377 1.7516227215989 -0.648561502598163 1.23343211952294 -1.56795329745735 -0.104321376360747 0.821235978353072 -0.811960450914776 -1.14249096298659 2.19832137264373 0.182451498333494 1.03935854841975 -0.275448563574965 -0.346037122953809 0.012379019940492 0.0141435851673168 0.986540362994867 -1.17366993342301 0.361998033244066 0.791927476911487 2.1458865262815 -0.366891376281286 0.877658095463355 0.812971433794263 0.0693829736530996 -2.20682526631836 1.40718459870589 1.50934327529721 1.07440328065923 -0.41672267057316 -1.02646167277642 -0.00542376712690544 0.104744529124507 -1.49295732894508 1.35752660093037 0.35377939726221 0.77746194866998 -0.510238446494086 0.227577482836081 0.847729889456086 -0.0291879730575543 -0.640262249827145 0.346449457959163 -0.128528260930574 -1.08369815082826 1.5405671278633 -0.834772995751855 -0.124801923485115 1.40770943302144 0.792975858742803 0.768407113940861 0.642108953779457 0.746374974131941 0.591341254482477 0.725348673639809 -0.142371932996417 -1.36254720070099 0.845778569333683 -0.779103357457833 -0.186117430354198 1.387586119383 -0.0848306727578831 -1.64312839633222 -1.53236663575551 -0.733753378362221 -0.605797056495418 -0.17514261291101 -1.18370142686434 -1.26983313531214 0.0404381697260685 -0.463963253669032 -0.3023280592593 -1.23649907391265 -0.958186257215226 1.47063065247391 -1.51565480381238 -2.0806164629942 0.289278777250731 -1.50052088223309 -0.0158547234135385 -1.29909269669236 -0.351946151350548 0.33986255962856 0.561447564119916 2.73233959726577 0.566280688968831 0.00695204632454278 1.39048999896923 -1.69466890803654 -1.05640423275133 1.58859536491891 0.678133327638688 -0.233720117185266 1.91285368698959 0.625459587587281 -0.78556496373795 1.18630821839947 0.433034699974095 -0.39669417085843 -0.756694552990582 0.721482044335685 -2.23925207782713 1.58877828264404 0.22760807349415 2.01896160896331 -0.410481817608774 -0.288941652257802 1.083834679912 2.33909331444277 -0.890598253616792 0.47606037467617 -2.23928074010383 1.56842037638754 0.115567654867018 -2.24787979789662 0.931094929148693 -0.428403727324546 -0.00142457563673805 -0.799541437020706 0.465774711994503 -0.0077549146093563 0.760850375300831 -0.408669776554166 0.74065198535253 2.29784523359844 0.252261410131494 0.36535466597062 1.64677548545091 -0.521715490952555 1.47505864911377 -0.365772090819084 0.603266790966106 -1.03910388329208 1.34832053944088 -0.575591191905638 1.18175594569822 1.43296611592857 1.82756781085882 -1.58778579192129 0.115450570527453 -0.110084293166392 0.21531025481967 -0.370548226475831 0.303844150758048 -0.425113458593088 0.709101595456969 -1.24574337064857 -0.956907743013413 0.520814808272083 -0.642685361471702 -1.4723622170734 0.311673872513947 0.0640285097591956 0.740249038621781 1.73649657460989 -0.223819557794688 -1.02866751619423 0.227861145231084 1.82529185217711 -0.254843773025442 0.39399860386304 1.17656779664252 0.142131624377616 -2.26759673937856 -2.41943515799521 -0.377277505673766 1.8493104769975 0.0462622317030602 -0.528092009129674 0.924768471624149 -0.579895490206167 -0.0209487164996917 -1.14230720692919 1.99840783938313 2.70477956742878 0.0742635567416407 -0.759759383396725 -1.74177791036606 -0.827154003355355 -0.322501797357455 0.938820655656111 0.306662931904022 1.153457229066 -0.550967061092229 -0.223445001317164 -0.354371427280766 -0.663412943731389 0.0626867117569577 -0.14192621788393 0.641892943288513 1.7577533560888 -0.335230017448167 0.428388244426165 0.451630938943252 -0.322542798700457 1.23255931374317 0.764130147427982 0.726240672096353 -0.0511147189975696 0.397752800264925 1.06947332244637 -0.790809285268721 -1.87970657121717 -0.252360657228441 0.823218660019098 0.493471357079734 -0.297009983958721 -0.488318416885328 -0.514688673742626 0.831782329054817 -1.10944660268615 -1.16736756639805 0.170932382805944 0.0680221291876024 0.0873929541901871 -0.652210195785589 0.342869853803326 -0.0401992224349074 -1.80589472072929 0.966028321766042 0.421515169117981 0.196455137001865 -0.878367246879947 0.459733267459056 -0.270855154316321 -0.599682207745095 -1.29818854584127 -0.694937125150116 0.199276666907029 -3.10125695053553 0.631367195156806 -0.268355218648516 0.128441285165494 0.44701858014346 0.158243323359426 0.951599240237753 0.313795287737948 -0.159709460204599 0.346007196109696 0.057200218070985 0.894974037782802 -0.752876197043947 1.24375067204799 1.8033420872399 -0.0442520215940661 0.505950810855958 -0.556715159536502 -0.0928357202515057 -0.492157711838911 -1.15862337035503 0.194085210646986 1.83701579915357 -0.545041234148884 -0.536276112292199 0.102088100423996 -0.659315028940735 -1.39741859228724 -0.0384063166809338 1.06454021424499 2.07933083137696 1.35169676556788 2.69032438746934 -1.69697770393274 -0.793295424272861 0.310956683595215 -0.0806994973448989 0.443015801456147 0.937847800928927 -0.83062516728518 0.304137064788653 0.64633813053934 2.28813632522108 0.918435471117904 -1.5881338665367 1.64406134911241 -0.978120070003968 0.957307122053789 0.140544771204921 0.510936362206909 0.543278862371311 1.46999625698232 0.408030040299698 -1.36060530365351 -0.136634170849147 0.63390943457113 0.912004701547733 1.44954834272084 -0.371645455752208 0.641198061991719 0.739728857278025 -1.70828075147923 0.952363028209386 -0.0677103192764298 0.503904010284309 -0.476471936433904 0.294169681129565 0.642462574999082 0.447676290098409 -0.305340191439291 -1.14721304902666 0.245079731302191 -0.960736252916714 0.252481606698265 -0.545796387988206 0.417649043903705 -0.742086224655189 -1.18699145863984 -0.168097415912434 0.0210027700257506 -1.95132055102099 1.64802716598445 -1.74182708496284 0.446825659010744 -1.05572650146747 1.58551818489106 -0.816034348453329 -1.39717200717114 -0.337755283505634 -0.785024774879204 -0.670972775098788 -0.167541666497962 0.397223233931847 0.238055457103629 0.852295785049355 -0.385766621348593 -0.962130921422911 -0.147603006239885 -0.048680541696912 1.27298457643635 0.443273057781186 -0.26392846175678 0.0256482987121538 1.23277640837183 0.504914427508299 0.640531081356759 -0.457818467567269 0.228705149614137 0.938964624638592 0.0625221663281088 -0.530748896135785 1.40216208638732 0.405098650659298 1.5670828705099 0.18838718210665 -1.44350924036836 -1.57104696959113 0.70423023992917 0.0229774979858536 0.268902464210383 0.825339036620091 0.653272297116565 0.935339708124369 0.46193914804694 0.266447438458225 -0.512963030093814 -0.273936534057759 0.126940672915836 -0.541517284910625 -0.046756425460948 -0.0791216296853565 -0.305276870755562 -0.00333545168301708 0.120893152356894 0.510441554376792 -0.550841459338872 -1.2426880873967 -0.18893137821696 0.840843360095833 -0.246207684452712 -0.346367481288422 1.24836001355433 -0.0317523930201072 1.2475774897755 -1.3254801433185 -0.0574689035291512 -2.14064306456447 -1.07433246500375 -1.24487136354702 -0.925623617129998 1.57502368172645 1.98470047733422 0.103220405325968 -0.143120551783309 2.27099684216388 0.382351866144891 0.759496798921734 0.412261350073189 -0.937410124413919 2.41274025970138 1.06060640669425 1.05360459626141 1.18837057946824 -2.0515179075335 -1.52227171816556 1.06783754146607 1.63524760492359 0.445768451513641 0.49631436460276 1.68573270948724 1.17455157002709 0.623499249695123 -1.48800303250405 -1.52141776943601 -0.78530103313039 0.585197499238604 -0.153705081286614 -0.280272984283495 0.953033586098227 0.800174779656424 -1.60267893398009 0.544461312058339 0.763537990477147 0.751827676809549 -1.03539676568938 -0.706726146143653 1.038126063615 1.61961922055422 1.25662685239872 1.20444824059122 -1.38098540240542 1.36283888660943 -0.30807122589316 0.180449952706772 -1.48420417166908 2.86949739989719 0.876205208793786 -0.519478856716486 1.52467620523079 1.36494349743271 1.36450543835082 0.66044641584211 2.55314019886931 0.543987746981863 -1.13220036531766 1.38224077099493 1.58333329057923 0.877510419671203 1.24542166348573 -0.775715339574777 -0.566623999425345 0.732395821012641 -1.40163220810973 -0.630840747404495 -0.0133123685346082 2.58756365188018 0.744810626624428 0.5420695985289 2.14351788257538 -0.0765337255054262 1.13381953273901 -0.510067729091486 -0.931322057218652 -0.533462369958073 -0.406730573096834 1.22944908404271 -0.349242560127932 0.703717381314067 -0.383625915725779 1.09155762987374 0.645083703463816 -1.5802149794699 0.478168061069559 2.52463950969118 -1.26973499250497 0.0193142872486421 -0.881026367361739 1.0589631107054 1.24573208618628 -1.65067049238753 -0.303816487256791 -0.281087494627772 1.0342552092021 0.551370563210397 0.448846358970038 0.946340547218797 -1.58331345013844 -0.408448677347431 0.577532680431745 -0.0307844854269505 0.00498788703295159 -0.617730398838622 0.46425528544518 -0.720713156631569 -0.0751278511971092 -0.485156368727383 0.202043599541537 1.24930400454064 -0.900164313686244 0.535059174646689 -0.814739691694312 1.00682790566543 -1.07284746817055 0.347058694232951 0.384669437967852 -0.285275616995363 0.101994233772458 -0.022079179603654 0.131598774241425 -0.268351476410459 -0.821281174895715 -0.741377549308939 0.431214475112529 0.71875950534405 -0.0541503153715438 -1.8293903114217 0.34460474106733 1.06561669580155 0.6187188525919 0.830317950914893 -0.482238992331641 0.234587107179606 0.783004822346455 0.181816606358156 -0.298594980924012 -0.588028289339414 -0.148589559579512 -0.704043026574719 -0.924778347326665 0.262575562756696 -1.68355765025163 -1.99949579651696 0.301439520732266 0.925540358533593 -0.78571628366395 2.31778978316829 1.36348112710939 1.38827401554688 1.22157596135865 -0.00345212570085346 -0.227779958285474 0.779180465517084 0.117279314745686 0.00330588632272207 0.163396891292865 0.931408616586687 -0.486050352678103 0.220769703225744 2.32164496438526 -0.326407956028692 -0.388942297199401 -0.147958816497002 0.169289203096158 -0.030049591056556 0.613210732812948 0.345047815111833 -0.409602985339928 -2.89424766199756 -1.98152237198856 -0.0576030273029008 0.412671808266738 -0.65758262162702 2.40660612569963 -0.706716117044305 1.30943173407238 0.666521423370663 0.679400177444175 -0.0180483748409849 0.119190079570733 -0.775028136280451 -0.269780342203041 1.1316852960018 -0.161027308921025 0.601243536586382 0.0300881905260794 1.12234201622486 -0.0570898953325716 -1.80100535837238 1.24152313730006 -0.691880821990688 1.05341123969505 -0.927489410750052 -2.40431780454181 1.21039596531027 0.183125166648408 -0.727919936905215 0.836834004744308 -1.09155782777508 0.361955611940004 -0.879102420504752 -1.15984679582333 0.126773102499521 0.938553228356382 1.62871335996067 -2.07660585127943 -0.372781774770094 1.70418753311827 -0.587770159305188 0.348515896069922 0.942404057773533 -1.26341410512622 -1.37100291174533 -0.0683752228435631 -0.293682037437964 -0.350622582085776 -0.876276022398565 1.26532304846514 -1.29822806351872 1.07519719367742 0.237104041984133 -1.61016061888319 -0.125310669665502 -0.656144759984761 1.27375272627963 -1.48501986018352 0.262520805335074 -1.33233146448864 -0.991896395784071 0.532471906666442 -0.314473151896048 -0.634158467229814 0.365189609276397 -0.585944011177897 -1.35712546107284 -0.0519217897372231 -0.365724510630233 0.45774832296958 -2.2265905966027 0.512899640723194 -0.0580235164079682 -0.20315953556098 -0.98363035095542 -1.96442287476298 0.269403787869482 0.624637681432922 0.446090699300148 -0.26608740357705 -0.459389224651069 -0.140844864649861 2.30121507753445 0.786433238386284 -1.44701529260551 1.46346238515505 0.306150752823338 -0.659521872480454 0.468106031540793 -0.934357821685167 -0.745536260686482 0.755900215670307 -0.178367010130525 -0.31945087367349 -0.874564104291932 1.15541823491744 1.24913935830151 -0.281018726056789 -1.83373410898253 1.96304118291188 -0.0732750669575959 0.532440852761856 -0.480905244264867 -1.06856666869253 1.24765467219462 -0.942841395430049 -0.425724405110607 0.217886150853245 0.0656119116374006 0.834464513800544 0.499012834223533 -0.361461657215745 -0.765928089558978 -2.13104058019855 -1.62872043748254 0.00468087580035739 -0.979428073487237 -0.16725380294705 -0.844150116331737 0.996925548319491 -0.543006380099061 0.469234838077315 -0.177876629661726 -0.569352492206327 -0.0734002090325208 -0.258643962031892 -0.545666133078635 -0.624478612416289 2.27856016449487 -0.019730610051663 -1.24787808250968 -0.746446680633218 0.406127087548393 -1.30694088129602 2.35574028230945 0.616313322607696 1.3647572148765 -0.143412770732051 -0.290696606767702 -0.960031200786357 0.692008211265826 0.085439853170689 0.332893459709038 0.330042868240218 -0.380575732464575 -0.891790910212012 0.954825203678841 1.44892922533935 -0.807874724630357 0.489739674172906 -1.59304367655373 0.582460176189008 -1.52871154832877 0.753846391375348 1.13752489233621 -1.42966962032773 -2.1501007924042 -0.238928788268909 -0.0608321458990047 -0.137250901401207 0.37311292168629 0.908387356453835 -2.06461164546106 -0.466906159539075 -0.244265653142394 0.63360245125929 1.52156926589416 -1.0739291192138 0.526373852573256 1.11448602696025 -0.102974418940028 0.398219918589398 0.167820704442286 -2.58054753554466 -0.916730652202394 0.476956874183207 0.0730383714867118 1.43363071887477 0.403699228452342 0.446970690657619 0.0062459188465896 2.75331614884367 1.7570639549639 0.00563177705271635 -1.6438253592846 -1.15409712288368 -0.338712311923137 -1.3896630338153 0.966430419093687 -0.456906085678467 1.8026658333059 0.690709260950982 0.0208402641575453 -0.588476366746395 -0.303381888317284 -1.33759839666873 1.61617348325961 1.7494628130531 -0.0745179733428067 0.374498523904012 0.923616438143681 -0.0149743668559885 -0.358292602690674 0.31670856376215 1.13639354428729 1.26376836603855 -0.2882822732543 -1.47077282909302 0.760235691722075 0.236106956261571 0.879676374535946 -0.706118702276281 1.32691137509088 -1.5280713883032 0.817409176740146 -0.0467172085282721 -0.616424711259109 0.245058949062053 0.396245879133998 0.318806382889947 -0.429692940049415 -1.70037495048113 -1.35186103839291 -0.0206116666457488 0.37084640028714 -0.694933830353734 0.0851541108279354 -1.08366508280266 0.486014080448462 0.182407908724286 1.35838639449429 0.51135940230292 -1.28712990821578 1.38103316417429 -0.815342996546266 -1.2906223989782 -0.283812469595739 -0.373978545005292 -1.12166091193229 -1.12436703156194 -0.398107153444031 -0.463058850271869 0.620676823617744 0.181495434885065 0.0814129854075348 0.240119441125271 -0.817487460187545 0.0251564960399588 -0.27306388802006 -0.238054947941938 -0.176358838432142 -0.576748679936056 0.0339111921178636 -2.31701594516828 -0.204813441289923 -0.447636439040768 2.10144260826626 0.328616660070468 -1.27192380620166 -1.83719166121077 -0.922497493054975 -1.04315184951716 0.349994257429768 0.818094778401667 -1.58470297931382 1.04112924990734 0.623306193930344 -0.162996744833951 -0.909498331011403 -0.776412920095198 -1.75437607199217 0.610153153108695 -0.741461164642525 0.626671343252872 -0.731142216693685 -0.808588668331823 -0.755132030393225 0.205632108367724 -0.219341188403051 -1.08199697028344 0.155315898841839 0.264362217659696 -0.803322015858881 0.703501284684712 1.07562415598012 -1.75146821708393 -0.992268652067801 -0.240724144650436 -0.390896248907289 0.477554419650606 -1.35259016399569 -1.51835535039518 -0.249030280329794 0.04180556207618 -0.197834002878493 -1.97248042054215 -0.342756969263474 -2.28037334383181 -0.652329343048787 -0.956446655007375 -0.213726980262736 1.55037405371707 -0.217245145669074 1.13623251044003 0.46826467521474 -0.189942942452151 1.59131413964578 -0.448992075274647 -0.264250115625666 -1.53755969451538 0.580686949269852 1.95013453302699 0.512920402347291 -0.833212033397566 -0.690527057349474 -0.534035521213236 -1.1079307746812 -0.998710396118314 -0.00674540302090843 -0.295037200295737 -0.0927151953693123 0.550013071616108 -0.726454210527513 1.56321160632883 0.304239083764947 1.1191784063493 0.0262641764638801 -0.443715477626465 0.0699425930272696 -1.20607026186335 0.757518440243555 -0.171456634636109 -1.62704659464166 0.987820078583999 0.076285485005447 1.49621937693124 0.514562309995182 -0.817758258129736 -0.302851259226799 -1.02635397817285 1.17831670189176 1.20698828416194 0.0664791242537821 -0.521255070727193 0.524769300892011 -0.607267207698719 -0.134962548444911 -1.09128739759169 -0.184423225553222 1.46618934894354 1.67875228755467 0.576883188470896 0.478203900622211 0.0945099040662724 2.10209580144665 -0.243458875201947 -0.679133505465286 0.309832721950543 -0.168617393628301 -1.06683495329286 1.07536856981676 1.41475314437169 0.664249054170279 -0.709302914887018 1.93344232393477 -0.584457118051036 -0.363363130726324 1.18180698813667 0.288924145389748 -0.741766267953143 1.40632220217266 -0.134937254216134 -2.33594803862124 0.434484326396299 1.36145347192687 -0.291747183630027 -0.538821426974549 1.2150397182525 -0.489849444474314 1.02150032686049 0.348765370957842 -0.793962883819218 -0.4569032788078 -0.569514149315258 -0.742637953078935 -0.959019459744796 -0.254340072935853 -0.504676212775012 -0.781835893286915 -1.17575114925448 -0.0904261176812172 0.556955866455621 0.713647932228567 0.386983225977846 0.458256983036372 0.837466576878651 -0.837446038031784 -0.00685935240255778 2.20574940813094 0.308933227611617 0.999315414580939 0.883184433294739 0.297178304595655 1.3079725174479 -1.67838742385094 1.3016634371274 -0.0622789216533804 -0.183085143775078 -1.13832679349119 0.439835399610943 -0.394351717752959 0.170618756432833 -0.801397537113825 0.0412806952623778 1.84420989569294 -0.628759076399866 1.12954288941013 1.01986325685811 -1.45195501891412 0.383213484591027 -0.104602507283663 -1.54191211759182 -0.81984048657821 0.657966731092604 -1.17136217985037 0.308223176315427 -1.21366434712324 -1.87692800444249 1.21562990230127 -1.14192843609561 -1.24362584036566 1.08497507488253 1.48535281905023 0.192744398988085 -0.300545027293917 -1.44906929780851 0.0742554658402967 -0.519952636931848 -0.355231729963888 1.23744948777022 -0.273062048381045 0.6472152049512 0.362479566297004 1.41403461643113 1.21657068323939 -0.834260889203539 1.19070669612529 -2.20980473091372 0.245838237370624 -0.979995921683034 -0.171888206256829 -0.769750841612379 -0.620335896773502 -1.14648329005861 1.33743833157003 0.0362016105276848 -1.23181876536433 0.841571667063281 -1.43277785716785 -0.6560293739621 -1.92136446104643 -0.116282065245323 -1.74299651331825 -1.30300943797524 -0.693018594429434 0.877223717513343 -0.433465435932388 1.7529464045083 -1.41708351234939 -1.27807051496264 -0.805754428095855 -0.0586723598558676 0.25521385910055 -1.46805261443674 -1.21848567511167 -0.572380559517851 0.734237041049432 -0.852556246005496 -1.47585922882418 -0.130781807656349 0.477943409282637 -0.35275449892041 0.387681178262629 -0.502451998266435 -0.00965143275795231 1.00845548943011 -0.213542804321768 -0.365067422945488 -0.389390003228568 -1.88327234966275 0.624392054504788 -0.421125309222417 0.0858524588777799 -0.406650365359768 -0.519948461580103 0.55420193122665 0.637110263364608 0.872629010986777 0.25712175645795 -0.366376269000038 1.07934595011665 0.280354405446186 -0.896696860219925 -0.471064110301094 2.54736147420611 -0.403286224743119 -0.0141998859162554 0.0916283228278534 0.0725969004964064 0.163389188920608 0.511746254982021 -0.135776970167691 1.66863764947329 0.681202370091106 -0.221157692116826 -0.772980348842506 -0.235598369193327 -1.76869831141538 -0.587264918023621 0.765052664931076 -0.727209339770655 -3.17645700033154 -0.510049686788983 0.597442344218271 -0.521069308490455 -0.489022625987138 1.50338213353901 -0.153218350763556 -0.615122635658431 -0.400623348461763 0.776009867546688 2.59704369112878 -0.00164829749937317 -0.474550128864526 0.456500591938997 -0.742735553148909 1.89000932185616 -0.213322993705212 1.03462039411415 0.000411247435182127 -0.158793067547103 -0.347522830569282 1.06304507621149 1.69645160903512 0.449372658309824 1.04430916803957 -0.353294196575022 -1.18791607788316 -0.618176515827711 0.482459659737781 -1.12724255674774 0.654062491103121 1.15708718605621 0.179937556443435 0.015276095589546 -1.06452362667114 -0.324967245680331 -1.83826763637691 -0.982177549488613 -0.321036159810446 -0.777364680704797 -0.277105261926134 0.0781401542267075 -0.485073900827301 -0.332878350783258 -0.5648687649752 2.26340495327981 -1.73321578973036 -0.338727477958266 -1.08325005130837 -1.174649942517 0.62736643299588 -0.0898522664534268 0.335088654761392 0.66836690385121 0.0886604406786176 1.00451217567935 0.260554764982044 0.820274690042084 0.904559995813598 0.654942411822107 1.57315393815022 -0.484000059421158 -0.703361686569228 0.166970041159008 -0.0951328552650036 -0.096132925329841 0.797204059804174 0.0907338253289304 -0.623858513806901 1.02871710710139 -0.22999590884774 -1.42319818791754 0.0781058472081535 0.473478012642669 -3.09356913737593 -0.0179526905453985 2.80171079410159 1.82299864142295 -1.25450483670511 -0.40368122934098 -1.61115412211856 0.265628434005276 -1.37519700796583 -0.380182014767565 -0.679437390523847 0.949968426874285 0.0161377370681317 -0.0550958214254895 0.253283070347544 1.17127723195554 -2.19436028007263 -0.796924705784605 -0.279156260469245 -2.89296420233914 -0.25194857017117 0.406958520773323 -1.00050594748693 1.40367288531729 1.71898123676213 -1.09190879136278 0.224896215329341 -1.1579445057552 -0.456622554906337 0.643953147098585 -1.23228206031077 0.756304887553891 -0.0315536311080471 1.3682712116716 1.27206962665077 -0.166543290776521 -0.818124993591173 -0.746121279141905 -0.0517032956785761 0.661279606073574 -0.541962348007725 0.0118263383769705 -1.32076573917213 0.0285071150879574 0.046956318889707 1.10285085621525 0.559963815772193 -0.0696013515176456 -0.4749751749152 -0.414935980743129 1.98923619747401 2.14301139759633 0.609126457871242 -1.3897535661613 -0.51288556129164 -0.465512584801753 0.472636752406447 -0.62989537806727 0.346427796603103 0.934485063832332 -0.516688016248911 -0.0638956885923711 0.614587230286545 -1.7527351603198 -1.62777541956637 0.0798281683444716 0.2728562230147 -1.16918665671082 0.501644082924831 0.737785620289383 -0.0171202334619958 -0.424843483083755 -1.7439383426987 -0.475946634116494 -0.267785338277848 1.12913750136575 0.836502157068443 0.414704256781102 1.1170990748547 -1.48660388237065 -0.0418996090420789 -0.00189998379473587 0.0754896712258804 -0.755014311868663 -0.816193796458593 -0.470077202788842 -0.542579268495076 -0.0933594714844819 0.912204296717336 -0.717367064241294 0.821792317696181 -1.04578785446331 0.591107027693987 1.62101575307786 0.376735481278301 1.35991081867008 1.94631958473185 -0.902617645220996 -0.058458454272187 -1.08131914362643 0.507496854800397 0.880414688418457 -0.630748200454852 0.455956013434621 0.162235636319736 1.22413519640689 -0.689986440953507 0.313981427032611 1.27165895128978 0.0800091170540955 -0.292396674892822 0.816207918636427 0.327248533550839 -0.846661073434342 0.358162294124154 -2.68968991893421 -0.174559156709045 0.260834949660987 2.05226794990514 0.205047878519229 -0.44265862372598 -0.0864566329664552 -0.155156313475728 -1.10900810735854 1.22062974108574 1.03741265941512 0.117378175357335 0.391441237202554 1.11565592186685 0.910180857495409 -1.2270483692553 -0.164310272494692 1.31509206012804 -0.759545285535414 0.564885303673419 -0.316679995694338 -0.968077011592093 0.76032152080009 0.757084479521234 0.126134986550928 0.236461382981172 -2.37219899604473 0.593841344020278 -1.32792883181793 -0.587786005787092 0.581908462003593 0.809402300773445 -1.77217153325678 -0.0458190822112228 0.202725084557452 1.37523666758464 -0.250033266909026 1.14601030798282 -1.33593310611924 -0.296780367533978 -1.32379948370559 0.114711416789645 0.831190173070843 -0.14131108589733 -0.991038820584534 -1.67014045400257 0.189295339241308 2.1833611369348 -1.46439420691658 1.83596314423206 0.0550978962576239 -0.143440547890388 0.349279652593867 0.252511637092463 0.0672822734393397 0.378326427470739 -0.165606360603319 1.01532292291402 -0.153607905246958 1.04285729919717 0.490004505085436 -0.133821307321233 0.754646802302151 -0.100177292204067 0.0549951886146529 -1.15263549944343 -1.26730664970922 -0.308523458290273 -1.31922687474102 0.244655235980266 1.77793438494378 -1.40759753621268 0.948892332145419 0.838303753165283 0.0877705812350322 -1.10745756406019 -0.308083843033849 1.33607262685638 0.397862578607286 0.150750351420408 1.64006774330078 0.685654408525718 -0.777241236018573 -1.78462703278485 1.72791350103752 0.17516669253022 -1.03392933495245 -0.0519807444892308 0.344396685350286 -0.390782912828462 2.35695509679052 1.49312710159455 0.0993745939455621 1.01645536739615 0.226772994891646 0.968703112181833 0.493882507713719 0.147934940627284 0.0747693960066796 0.640833676854808 0.574320668337973 -0.9044713410034 1.02524378941863 0.242964948803609 -0.863085816602291 0.285303584930381 1.06299687991689 0.234668108491533 1.10430877692494 -0.726477734312579 0.495477100742662 0.665255328272408 0.0316015214547317 -0.949790649258184 0.959107320954673 0.989107181876173 -0.799735654312551 2.14827651525559 -0.056271885889141 -0.901304701610219 1.17136312447753 0.88453524634294 -1.43670008689209 -0.087387236846149 -0.923346199768324 -0.586236739369063 1.03723869498337 -1.44702222819221 -0.307895657924798 0.21570196337838 0.782448107598302 -0.368739749511176 -0.1189343223301 1.23215312394296 -2.57658107421813 0.593223932160008 -0.493065716933823 0.0696024830371172 0.462859768479227 -0.468218950409173 1.05034607709295 -1.42133191224395 1.20142849688727 1.20907145032153 -0.74391674272425 0.0612442729342219 -1.06566805166942 -2.04450343263037 0.541774046066459 1.44870979811829 -0.745871010910894 0.0139348016269365 0.0629535900908473 2.2521172167487 0.767280929082252 0.634826508525279 -0.933491928720239 -0.668541449297477 -0.374256704484587 -0.613155408028751 0.0945081124513295 -0.375328350122288 -0.726958228881488 1.0582563230664 -0.644499165520856 1.5778310047231 1.79863497129579 0.33009443577751 -0.147365823714984 -0.15705521255695 0.372856658598075 0.39623224900411 -1.56204704600925 -1.08426729985073 0.548330647655867 -1.83177955177162 1.35096860970107 0.45612703987095 -0.247299329209231 0.316490585487424 0.148468865567077 1.65441942132891 -0.801611426779598 -0.126421301756803 1.55518552393018 -1.78182715724084 1.16012500026507 -0.872187553243227 -0.190947049994666 0.655690090615091 0.259567984393506 -0.238869075699194 -0.80837792301056 -0.832591959388109 -1.15353363924166 -0.675789389277608 1.36187559449486 -1.47772678858349 -0.423549295024035 -0.518389520059524 -0.449071701436497 1.8996959376973 0.222093236472384 1.09354781433273 -0.130403271142177 1.06605943669933 1.01542362147921 0.277248189856295 0.682452775784699 -0.804879541912074 0.0509537362057066 -0.73684477409537 0.55158602635075 1.32681310885527 0.610392025543088 -0.355850627251331 0.143579439819512 -1.20969837145199 -0.687289836978771 0.306734119040344 0.787591040388683 -1.0189609003661 -1.58890187189095 1.64005369855501 -1.27708704554197 -0.754504081596812 1.1431462160495 0.244499437135192 -0.627468751755224 0.0754920818041958 -1.56885791705363 0.281029558753267 -0.774035244433598 -1.91867005208727 -1.47237357435238 -0.149922380101094 -0.617289655465347 -1.65996283853448 0.0507256884141483 0.0963150820837774 -2.21034971296924 1.49156568905604 -0.662316775991689 -0.541019224972056 -0.0452975764801612 -1.4100247701984 0.323052255617158 -0.606747660290717 0.491605440165569 -1.39918979947128 0.68951070529323 -0.00268105433526175 -0.404150694942715 0.365132196201884 -0.366607925902301 -0.84208758994394 0.943256410553811 1.12958060084093 -1.31483460117736 1.64230461088997 0.151953257918201 0.310528576200651 0.671371753818107 0.327541493761298 -1.5673138789478 -0.922116464124059 0.792830551583268 -0.157189138951054 -0.294441029671721 0.0532741647644801 -0.29154826157353 -0.816632853766581 0.136384286958702 -0.558621304862179 -0.000678740150537528 -0.577632512864168 -0.0392160764794415 2.54774034660347 -0.784121890980358 -0.492424655785463 0.800693638771496 0.397813953709886 -0.714094131017873 0.264367567501644 -1.28977315012941 -0.059515719618421 -0.439986229880919 -0.189973818950502 -0.645769577756801 1.56259953776139 -0.16648253857291 -0.961521471384181 -1.51333195216661 0.524543560946524 -0.75153137182 1.40601192487477 -0.393578964283229 -1.35078687271062 -0.91642847413671 0.98415437860638 0.658638078246079 0.839564816017282 0.164139176096527 1.76071777869174 0.706477755874173 0.685146530634435 0.942089409012124 -0.151692006974159 0.897602606536707 -0.364335147137656 -0.103024787732606 0.0274381587037579 0.278246128912634 -0.0338802613375849 -0.0702122804703759 0.975995568281042 -0.105149913990021 0.0832245706755843 -0.528533374198719 0.387645336754116 -1.09170292706566 0.22021060894348 -1.12194935270403 0.387053337504581 -0.23802331490075 -0.91569222142889 0.0580435231255844 -2.00830593588596 0.605897378821055 -0.805409513895934 0.304000833497863 -0.84323036254023 -0.468552419029988 2.00610851049373 0.959623226306708 0.149114214539596 -0.00535437149017191 0.165211246231277 -1.1631117222758 -0.671252827081464 1.92819239682167 0.660971301215925 0.406742281021519 -0.69876429394243 1.01960477725356 0.92176203288524 0.312955587316647 -0.0104738470395352 -0.46080324596071 -0.72787764269623 -2.20904742236922 -0.760927266393082 -0.0196720160849279 -0.0375635001110749 0.382402001787278 -1.90810488241759 0.590969050623838 1.20294179400519 -0.415967578721579 -1.11823242394949 -1.26157893046183 0.418149755707205 -0.18395336105232 0.878001639916253 0.298795689336807 -1.82205975573725 0.992581944632367 -0.465244814891669 -0.111682763599006 1.52122286935127 1.61837444019494 -0.103667910580687 0.151490259744032 0.807590728350102 -1.03714943679399 -0.463141305506528 2.08181010536946 -1.15093988546949 0.153124178059657 -0.00652930605065746 1.67916713121056 0.67767708436549 -1.29759093381574 0.906265046315049 0.468006619339936 -0.153815286631996 0.309993966192892 -0.320484813334352 0.885865193398463 -0.426141325050201 -0.965224903386282 3.35376929481414 -0.511761642012023 -0.231766341270233 0.933362973786951 -1.38850059823329 -1.23475796121737 -1.15560757924297 -2.48035207760414 0.498559298952822 -0.024997613855187 -0.205026827579516 -0.520582036628044 -1.62635789978555 0.766159390775843 0.472308129472283 -0.346964942070142 1.07929093090901 -1.62797584037127 1.3251294976513 -1.1192917264329 -0.559892391345929 -0.63950806636972 -0.960323916826883 0.440305689238546 -0.464667101949065 -1.81427443701328 -0.166730592073185 -0.412708837173863 -0.126719606254497 0.930126448048372 -0.56887489122569 1.41538646623701 0.00998741553318565 -0.476603181367183 -0.431177097789594 -0.661995966088463 -0.213108441014426 0.279823486863887 0.70475339973119 0.692329304433448 -0.545548577071927 -0.0888014057050226 0.178164181164206 1.11096989258459 -0.137179534974666 -1.33083306418887 -0.334846068251093 -1.67366875779684 -0.208389882125109 -0.0711314789918982 -0.443819409033895 -0.460792268177427 0.561566448681975 0.134646509928967 0.217718735393024 0.716460398786457 1.38978085277378 1.9165256502599 -1.28622753851279 -0.843754324799457 -1.04861416817595 -0.1198131855374 -1.41201432428653 -0.385635039167918 -1.49474176062738 0.597188627996277 -0.735639868592418 -0.671053783557501 1.6205138785955 0.175243009800653 -0.345563674543766 0.680924112745256 0.146295148356579 -0.0566375856282105 0.277773109724024 1.69344297059082 -1.05286438572098 0.821214328939151 -3.80717136987213 2.40526479601645 0.63227342408548 -0.79010307390785 -1.03536325524813 -0.292389293496244 1.42863728050031 0.0810233417228925 -0.157225974088937 -0.0701078410231383 -0.908411831931676 -0.174027308907263 -0.49097021983198 -1.44101478654169 0.2435902442019 -1.65385273423481 -0.675409128489683 0.251697201711848 -0.871335122221755 0.581571378416011 -2.15363564707761 0.880357959199536 -1.28583145212935 2.17149953318589 -0.69537612075658 -0.742564798004806 -0.603354952796947 -0.619726048861899 0.658303095314671 0.339319528224 -0.573278947791587 1.57418935206702 -1.10706659052367 -0.555718027259268 1.53517325745775 -0.499032038412712 0.211476893564755 0.187966018318783 1.43367113349651 -0.483159191226041 -1.16747989010691 1.23354801771173 1.77809771390534 -0.396272529133345 0.221500728563377 -0.905047406716152 0.295495268324875 -1.7162650884129 1.86510744423782 0.106765515408785 -1.32789759323524 -0.331675066487279 0.982978147035871 0.63414454445262 -0.414901167012603 -0.126808308000379 0.677316340729963 1.55838843195409 0.577061868299599 -0.793579384046144 -1.58162002091744 -0.83645086118478 -1.38203538660944 -0.538127230163731 0.429847892362152 0.627401445142636 0.158008413229265 -0.369436122701123 0.151411515643042 -1.32963306424165 -0.293351856691587 0.230206279490514 1.48275391768773 0.783842330498465 0.559452406275214 0.86880227119077 0.108377023885021 -1.11572650303611 -0.348216283892209 0.528514405475326 0.998415349897636 -0.931707327209549 -0.701748505660792 -1.08456398974032 -0.50127427681244 -0.445938650593215 1.10564518314175 -0.580317535014042 0.220499597359938 0.710984467886261 2.58419487663301 1.31713356408096 -1.45778668969025 0.458618245797753 1.59553450094854 0.983158493613662 0.0940786047598318 -0.0400406846492142 -2.02943744359412 -0.155742947733447 -2.76179321632097 -0.441895579315103 0.817903910017389 0.325020511648044 1.26982882462548 -0.835873261872402 -0.932050033061166 -1.23751708934435 -0.0891540700668714 1.58893234547722 0.99898576184545 -1.13832118668788 0.46887029060497 -0.478964307186762 -0.416205935362922 -2.08781821635721 -0.459804941102746 -1.52784820373275 1.17095969617543 0.911990368887255 0.639286483482595 0.21100588119082 -1.42237865582962 -0.201392511753354 -0.560031460283575 -0.539151353835836 -0.714981803974707 -0.377366981553133 0.00557204866333441 -0.203298636668882 -0.650401448419309 -0.172386302509915 2.10090653679029 -1.35235991322606 0.446520462089729 1.05047265451102 -0.902409300993289 -0.899950563547411 0.806606399173879 0.768959489220868 -0.1774261024422 -0.593432421726726 0.997069456185981 1.06595682512452 0.420162656142794 -0.59049946013966 0.0538746843502033 0.666965762211361 -0.96475983359027 0.0693870362975982 1.31220666682191 -0.430707421415416 -0.373892639101818 0.583505193877041 -1.19467754075255 -1.70712799164443 -0.851961178702237 1.96596149907997 -0.23741539710782 -0.241136967645864 0.100964465201404 0.0912683025789452 0.264535800360082 0.894187185906865 0.931820272475447 0.565739970486445 0.00771772089516337 -0.219294773887464 0.974660155088501 0.453395290834662 0.617961072016794 -1.03553858742815 -0.83967539355254 -1.52737541549865 -0.54527469152315 1.64977807938339 0.673214142618234 -1.29783650953233 -0.361391138666781 -0.139697231204582 1.30009624209308 -0.477810780631682 -0.428298956479252 -0.394343880436841 1.52762479734883 -1.68204445812314 -0.4976836489281 -0.307663418194365 -1.77579956588543 0.307024907131843 -1.56924317720253 0.054574766750331 0.220799164618174 -0.710340428026269 2.13056046240553 -0.192689778287684 1.30625591797738 -0.275735578994787 -1.05369291944345 -1.38958078247105 2.05642225536642 0.0841376653705566 -0.109488324015721 -0.123702405032395 0.27709748348941 0.787848096143742 -0.294389658059684 0.451509535578905 -0.0315646947816951 -1.36238332080103 0.311610298990651 -1.641351288593 -0.287881603277746 -0.392720685091473 -0.990811322833287 0.367887945833459 -0.480939171957598 1.6675580332832 -0.684150555662673 1.78207000313079 0.0417187084537476 0.356969313299534 -0.335517128067608 -0.594863296371927 0.438082379855255 0.9184347884115 0.593289368860632 -1.84263084975575 -0.467576127380438 0.610783559563189 -2.43192579459161 0.353511434981834 -0.0171040526453618 -1.04391771867861 -1.40906457163107 -0.160892484077163 0.468935452904849 0.619546299932355 0.354797747090567 0.0723283015763129 0.563685631639404 0.102276115308803 -0.353240139207138 -0.225503117589108 0.819263723474303 0.479842590931767 -1.33653387389603 0.0993844188797245 -3.22125007579418 0.730842798268083 -1.15568188446482 0.160198596088698 1.86296315969412 2.36601876559727 -0.803423247656525 -0.880482547506013 -0.0454550504802428 -0.165457653308913 -0.542975820855545 0.417317288774928 0.282215654919179 -2.45096023975774 -0.556599368456292 -0.219263244818826 -1.49684939258558 -0.423238670899956 -0.203187409682863 1.60261188090775 0.441534268127117 -1.22803920253061 0.796695126865515 0.939046530609587 -0.632079364970831 0.163162034230193 -1.54557602247249 0.251597857812124 0.548595608443996 0.886526024071551 0.987698837914062 0.128886478396579 0.0104569530733828 -0.76231218753093 -0.760871914071626 -0.514305340623818 -0.62467888690583 -0.434114889796008 2.57357518333502 -1.34086864319194 -0.661897587570779 -0.881293566982903 1.01339779930282 0.568340439732852 -0.453627028057718 -0.46403734791431 2.19359377864681 0.873509497422154 -0.0898295511590145 -0.777350691249051 0.379313950960788 0.445367465063452 -1.0195818490292 0.839719954708018 1.25044964045807 1.02158065444021 2.35552798548523 1.42795528849149 -0.102349549953059 -0.551156586384538 -0.611407403228779 0.613037319857457 0.776139938249011 0.38450588870788 0.603716202295201 0.110131501472425 -1.15096274447671 0.357172052939438 -1.21094621908926 0.54746249907491 -0.251135512435971 -0.797168282704542 1.51634450442259 0.445448752817927 0.698613215363426 -0.685866045499878 -1.07814719441013 1.69290059489757 0.119990309225975 -0.796948024670431 -0.0599577416415011 -0.191974431993243 0.604053008129762 -1.0685813177246 -1.69955431838855 -0.0398676329210513 -0.459003945642881 0.410198658007765 0.810148975654125 -0.569422213681379 -0.238921281016053 -1.49865973551581 -0.556451249604511 0.275114730165586 0.689019251882464 0.324482021581073 -0.229940481805584 1.89371933099539 -0.73434792438011 -0.957180014838825 1.23652636035126 0.404399656193889 1.23892402389446 -0.660605500339832 -2.19915458284179 -0.0319162476301874 -0.0779103316984577 -0.388969359939858 1.31331994253051 0.759137603657288 0.100530491375411 2.53520942237799 0.159746650376156 -1.11774177412206 0.744387525062114 0.0884720974977449 2.95261157556723 0.919330897444647 0.389735921890882 -1.17311308923086 1.46438531294772 -1.30872047654743 0.757026318163573 -1.00255060307017 1.02614036610044 -1.15221016967391 -0.589833377619539 1.02829968856206 0.0385392155627795 0.665250527229713 -1.60601747562115 1.52625930892234 0.67942603754109 -1.51450804371543 0.856167384434305 0.66931342781265 -0.548823961833802 -0.130929755446275 -1.51799387142546 0.297083289707748 0.463525320898164 0.904877066294875 -1.32589359651622 0.815327467766921 0.418159825157793 0.977132769047159 0.62516868056182 -0.648232768844244 1.52086978859939 -0.0667572537158988 0.509567944154344 -0.186071554493636 0.24055850858572 0.757109658405354 -0.296148596196936 -0.597242232649396 -0.304518063321579 -0.745833907286797 1.91800688307543 1.07035205945618 1.36597096157732 -2.31319479490121 0.81350856516798 2.02381334681169 0.94596307471946 -2.10167289644025 1.15445081132774 0.0667494617951416 0.366920486086855 -0.641893603561833 -1.00441663024877 0.75574155129527 -0.0902967767912835 -0.628796193165203 -0.330256198122344 0.726278659568377 2.87612186539838 1.84863103865902 1.19779013798686 0.00697345942870278 -0.567023095856094 -2.48805641991171 -0.258798023225428 0.497680299250431 -0.803638281529846 2.01310514761643 0.0999552393134198 2.16838400629883 -1.30712042051704 -1.62725084499459 0.432221454203056 -1.61711691774201 -0.301344231410705 0.814202728277559 -1.8655798876495 0.805850576210876 1.90695474288024 -0.663192777773675 0.465810541887488 -1.34746724432334 0.926239935664939 0.930529072685793 0.596471199411077 1.72029066090655 0.287848767260843 1.83313805774416 0.942715868649732 1.80860949190247 -0.758500120055002 1.12729836550902 -0.783901651963715 -0.697789360280611 0.627297984509243 0.269292898112021 -0.874409500744018 -0.0574643714271719 0.152200418322451 -0.40230491230729 -1.38674644269582 0.31591296255237 -0.908428727920732 -0.140138132597396 -0.265584803814452 -0.252685563406929 -0.0441241861857218 0.82181536480196 0.816639084293682 -1.59662944985562 0.829894104044017 0.304103017919402 1.35996468373852 -0.0585271887073698 -0.487030005492457 1.18639927116239 -1.31329756155819 -0.442674825284291 0.942311108353412 -0.127469859560278 0.851636388922201 1.21694047811184 0.512795193431194 -1.42435168289286 -0.980676210827535 0.418875952322512 0.861197343054383 -0.633056804147734 0.721997983241472 -1.37254655461516 0.218812341691827 0.566244634745356 1.7934212178895 -2.91980464298874 0.267297294470207 0.337136421095293 1.31348758101654 0.21222303838713 0.319341156428282 2.07090300085463 -1.1443012776089 -1.46730633827057 -1.52981675127515 0.331569097252627 1.22052446164231 0.324000012271362 1.20824469918603 -1.4321183161313 -0.230794077433264 -0.636317150828064 -0.367800250555228 0.15647011347782 -0.988798585208867 -0.0612083031994222 -1.58443317990212 0.808394930151127 0.0615192420760726 -0.991527428092411 -1.19911282087677 1.43314357089497 -0.0188791326733425 -0.373937985458352 -0.813128319423797 0.891641115814844 -1.73378156083008 0.320080696774501 1.16009606673238 -1.1156071995832 -0.706881158258539 0.790863892947412 0.619002753228913 1.31606981032515 0.104329827574407 0.291776838564224 -0.52286182206577 2.01166977157096 -0.710314285687144 0.493285784739941 0.994028499090364 -0.535243311092704 -0.28788066311289 -0.593691872976684 -0.720123398853608 0.743376250977319 0.45403181925814 -0.320689965699126 1.44999029783852 0.187190201325992 -0.186096023090851 0.00221125066451941 1.04621321225649 0.686994659904993 0.475412474736815 1.27550308483122 1.19222331219213 -1.72198614614246 -0.195417798946078 -0.120685554479646 -0.110371708143986 -1.22238538651812 -0.337046569249377 0.239055598049108 -1.84482814835059 -0.780277932341326 0.329473357950688 0.573098097515453 -0.792864287467333 -0.312803514243139 0.248049089399508 0.268929909425207 0.587355783070388 0.331779452948115 -0.660304177923035 0.276532888903178 0.787738765753627 -0.419194626338726 0.605434104559326 0.932149004990951 -0.475167931359133 -0.120942550730474 1.67568674854283 -2.30881276356908 0.44714253337224 1.88399233444242 -1.59615403898248 0.431001177684745 0.583387392567287 -0.650065799171026 0.280062049115057 0.442740271844109 -0.0853080068731771 1.25371204519828 -2.00943842699934 -1.9877670187 -1.32191455935499 1.88997627179603 -0.488339133100486 1.46371641681015 -0.253394350030758 -0.0503232029448062 0.952598763920315 0.157047947644546 0.877764384322425 0.424113131273819 0.399426835209569 -0.471910536583268 -0.137607217475503 0.328897111067761 -1.22251414204602 0.470911797904985 -0.440929675698841 1.02832257780363 -0.547440660043909 0.111889478785269 -0.817707131848115 0.279298087834832 -0.276920878545603 0.673666096523936 0.277446656060735 0.493649807753224 -0.724535280212239 0.83305728023917 -0.475099229793007 0.618625174849123 1.39712046268731 -0.238993215843589 0.0876506344667866 0.465781954221407 -0.905997984083573 -0.325491931287048 0.13765118003968 0.746097945024198 -1.02773049884456 -1.54059385919847 -0.0561190418385881 -0.883429743550168 0.692007932402329 -0.261045894960799 1.60030907255231 1.70951386876796 0.877759416569685 -2.09976901182813 -1.22800835116809 0.0438696334145832 -1.06187421035397 0.540268973940955 1.75416581600592 0.599222168958411 0.580611481203396 -0.467582355137302 0.335359127029864 0.290172915621234 0.667955930561478 0.848024187840117 -0.114247387848151 1.8112351869666 -0.279231084809881 0.648502495066022 -0.201241773356313 0.934598505598491 -1.78145452661388 -0.911266595556072 0.408627784422578 0.293388212379063 0.278921840972653 -1.03187471581032 0.219945958450892 1.58009260149347 0.679001659262399 0.343110499333397 0.220896455960423 -0.03117695885953 0.120361478471212 -0.861359310140902 0.215460720522798 -0.462297031968465 0.0852623701348537 -0.132768361154905 -0.724938852284934 2.14525694364786 0.743161888629515 -0.281141472033044 -0.0445873850937766 -0.143059741096214 -1.57100400429085 0.169456557720676 -1.46524679589538 0.489967583300796 -1.1209860372625 -0.948350297629273 -0.311151984180468 0.905401287258505 -2.32279977061694 1.04394799131515 -1.56366285204715 1.34366436682731 0.389044366340534 -1.2454831551894 -1.64314457539062 0.0998950648289663 -0.622956585641124 -0.293789354103975 2.52221527761657 -0.0287826008118203 -1.45247621938596 0.345765379200307 -1.12893577358962 0.76953009017969 -1.5221211773663 0.487385878603691 1.06846984739174 1.00286939646469 1.5771540779263 0.689275108549976 -1.18387076627962 -2.27084286662134 1.03842835211601 0.687085530430428 -0.232539556645269 -0.823037112401972 -0.654410491711962 0.10269549502887 1.79412358295226 1.31922676223409 1.62243155924308 -1.25212103628544 1.63788615234447 -0.687768586258385 0.674551084351415 0.464290496275077 -1.2048251828196 1.57666152531429 0.00366123577589499 0.0617634576224198 0.772339323330499 -0.193619374173498 -1.54563550679731 1.1765638814555 -0.0598161115368231 0.998631702552121 1.1016257185462 0.999699369988815 -2.5443503691874 1.41250116669879 -0.290521201757227 0.576828839618585 -0.22497501256388 -0.665007840980092 -0.259300365850977 -0.576205122455107 -0.267361723380401 0.812294205508201 -0.853167319261427 1.80764153527814 -0.921119741274986 0.792592117861253 -0.18291781953558 -0.304350563811817 0.06110428172938 -0.626720730372666 -0.122217827847534 0.0648118739410567 0.641290449241323 0.204884134381807 -1.78830670602534 0.667371410278447 1.18941509560684 -1.29045710502704 -0.0732360678438216 -1.80478313930558 -1.33440991397853 0.716101534895983 -0.307440299536414 -0.177361699704606 1.10833995091875 -1.10918266299567 -1.4193852403295 -0.947658818111706 -1.26912126648926 1.24151101273275 1.49792600298573 0.392491957644302 -1.24759019516976 0.0348914186670005 -1.00588768944096 -2.16993448519467 0.813848414383295 0.123914595084336 -1.06107629794533 1.57297096759018 -1.28643023380021 -0.103089390960454 -1.10107061939148 -1.39728120976288 -0.854962796505035 1.85284134082336 1.37379917542666 -0.420612385354936 1.21403191236939 0.823537154851747 0.425940947878983 -0.599370039241174 -0.114798553432011 0.714631771625958 -0.881123010307155 0.257408228450363 -0.504868479430833 -0.495044212156146 -1.388888508638 0.679437574936986 0.799394384283455 -1.09836262466391 1.9289096490514 1.78120058238284 -0.5585005093845 -0.324653731223248 0.274055066186832 -1.13606274114866 -0.69184722707958 -0.0197736022745304 0.877358229133921 1.03821059863705 -1.46368520757162 -0.271529948477025 -1.01284695035935 -0.778119792841224 0.217976001934842 0.406046442275292 0.0659014508141872 0.123590983774921 0.449911781420519 0.561871941933016 -0.752626028631902 -0.647614009739801 0.892056400899018 0.1468994970595 -2.21182162524073 0.903347465002943 0.0491771737995306 0.101694072889448 -0.988712092671906 -1.00890431988457 -0.303915349582504 -0.819325810149763 0.123941405153891 -0.505516492218867 -1.04430218095809 -2.04559525799265 -1.06838051032802 -0.38277061409597 0.498037973687704 0.80282682835911 -1.09914384380645 -1.5795419346262 -0.307521350319339 0.174130358545938 -0.971158141984597 0.90690373476157 -0.544587330573254 0.912832575777462 1.815163140521 -0.415544540781212 0.321231163945146 0.0297002671527855 -0.128242484832903 -1.65619821636114 0.873345830244102 -0.939695040200998 -1.13235998646915 -0.0117654406931712 0.924180363893362 -0.272203052207508 -0.412546622762879 0.400288667767904 0.611979530877938 -0.352666536766804 0.299128951000862 -1.93727393995669 0.536559788662748 -0.697239647746861 -0.525919689530882 -0.372257716407438 -0.547585727116811 0.0115111643489543 -0.294761238773395 -0.114831503012453 0.104182093944366 1.52763331376401 -0.297407797606155 -1.67566956899419 0.376484592480021 0.298412550561828 -0.653529381957721 -1.45802983351844 -0.172294844759841 0.575257894505814 -0.301678258194175 0.510111626670444 0.20658209686549 -0.901042360405403 -0.0615420762752473 0.410496312993654 0.454800671044218 0.437540848707069 0.414853792496589 0.00025196800258322 -1.31373988674289 0.584257050276256 0.65589568246315 1.00081412283276 0.650850939015059 -0.0110752163300213 -0.190316611563763 0.238245825476929 -1.03950113723386 1.72444875425024 -0.89554639642722 1.42922328562641 0.513575270720776 -0.738393473898065 -1.14343921055123 -0.416870295629586 -1.10704632172499 -0.203949864747612 -1.37385791352481 1.17952832162751 -0.416360602107866 -1.71307099485224 -1.96540435463421 -2.17415438272017 -0.141857617375202 -1.05020141455142 0.44439894113128 0.372892033041996 0.0682032869707509 1.83435839346538 2.10521592798401 0.434996903192003 1.07428990935723 0.616975168585183 -0.319563363940399 1.36115526698667 -1.09604498989436 -1.94963025057965 0.262940050774648 -1.17877525135272 0.799494368164117 0.188262258599123 0.0564270317637231 -2.68664613375061 1.02595646274866 -0.608596453083701 0.771795468237427 1.17115829131332 -1.63589790817962 1.01561253983528 -0.608200153812537 -0.187423263683056 -0.0152338612554408 -2.18624347579893 -1.58061862935317 0.391396321040979 -0.62916465706315 0.30350386654338 0.0400314197432837 1.33874751141998 -0.243152653728466 0.216605615768976 0.688762818474425 -0.567205764290555 -1.28885255148714 0.664205617668148 1.73554456095156 -1.01757965155297 0.936272756357221 0.378448428295102 -1.19240898337114 0.971087346221944 -0.0104786370305994 -1.02380297298538 1.07838105850309 0.604838426689661 -0.524973812927531 0.342774432703184 -0.431411519596276 0.0825955048656101 -0.272867589575475 1.08307174136961 0.908716359955959 1.07431669999911 -0.185329756419565 1.54680384918444 1.24661276785963 -0.650395305970205 1.46315845904657 -0.40360786845437 -1.36032797001679 -0.556679930524838 -1.6019780300299 1.29459903101352 -0.36042678848128 0.609168446582383 0.541423030466923 -0.346923359336107 -0.493061635862825 -0.946011189777337 -0.574035075669533 1.32148726343824 2.09095851236882 0.349921414193099 -1.00399116334225 -0.29685350851978 0.434339070337856 -0.53089099898975 0.381798892097224 0.746185003110468 -0.464826357457698 0.0393600357521253 1.20958432531918 1.17818568624951 -0.557749612576791 0.334847482913977 0.955816253844194 -0.492435777524491 0.610338456132026 -0.870195353491905 -0.520102160928339 -0.381736501359476 -0.909733229504559 -0.431333518120792 -1.4598426553663 1.05871425998987 1.32778013908916 -0.271918171982587 -2.15253806939278 0.672681036603622 0.174225345837724 -1.31988952446051 0.142130557750974 -0.611982944052401 0.205127563690711 -1.55567931369534 0.459833918488239 -0.845672870205598 -1.977501385424 -1.54966739530527 -0.416375149536577 0.173706294503112 -0.716414129474361 -0.798756338496311 1.53010678838223 -0.0497494737109916 0.238801313360246 0.412522338140789 0.668806219648599 0.345774468960271 -1.13891311209509 0.419859312084005 -0.99855011645594 1.81561957487704 1.33789830205209 -1.03923835345225 0.332598796861858 -0.895504991765493 0.35087274487399 0.216131568336822 -0.619905305463345 -0.944631687145515 -0.847758360589849 -1.32126690319292 1.89428735502195 -0.449297503471409 -0.0479424424812033 0.673372183735467 0.41533529180924 -0.896468653885257 0.206470885555577 -0.106618776589287 -0.321181953677461 0.17473618146867 0.216590486785686 0.55064132259476 -0.860838048202404 -2.5489383600727 0.997660499668279 -1.73585977004001 1.35958555578258 -1.68922538465813 -2.25665292689132 -0.90958357143652 0.692130191336433 1.25624958908971 -0.363553377607642 -1.45130766683928 -1.72741511482883 0.474588121753873 0.0446845396076932 -0.400907422741144 -1.6881460897791 0.092343250711954 -0.47964326229895 0.703028584268917 2.53430081421574 -0.676290898225453 0.916855684994217 0.4685924969635 0.632719681257837 1.07675817633241 0.467769604371542 0.426057362162303 0.982866938576929 0.0903439969010845 -0.597574197824952 1.38163213602418 -1.06754850686856 -1.12418358792689 0.678459720418835 -1.20904103808908 2.3483100957517 -0.0541828429288335 -0.174629145087105 -0.0124353648983844 1.28164544098354 -1.52899194429031 1.13726743898915 0.34871471457854 -0.721363030200215 0.696157536519147 0.249154967409322 -0.707577529304255 -0.555901525130615 -1.21308556313239 -0.229994119568746 -2.78962550475305 0.815015813425857 1.83226644360842 1.35634377708712 -0.337189631521863 -0.78283039319994 -0.080272396108965 0.246035912958891 0.123066593644852 1.02963970330626 0.574589027700348 1.14831559460515 0.95603548408395 -1.33242017426925 -1.05324627425194 1.33265927901152 1.5029489674238 -0.787686381645374 -0.29015409788803 0.0933968448995028 -0.275451707082901 -0.315662529788088 0.371779820733273 0.0408279523479106 1.89762731088407 0.14454790906535 -0.426305734656958 0.0197152467754163 2.5265859686095 1.88654898751348 -0.72192844500429 0.430785039824548 0.952934412492814 -0.330097663320903 0.393417526378993 -1.27368400292632 -0.418498347181812 0.852334875661187 0.935175823998798 -0.553920379890757 0.782568913432756 1.77496205431479 -0.930115362878944 0.42310058315979 -1.36264460574331 -0.701835360660187 0.558287428554188 -0.94819672760599 0.439235788396558 0.726851378577855 0.234773064543932 -0.420701969037681 -0.0485058704734304 0.669082179899203 0.442238248470451 0.934433912884042 0.580430598350318 -0.704487903869087 0.448272347934663 0.311826125129563 1.33881446237135 0.680206329654638 -0.688626888354309 0.145512133234336 -0.182066387702279 0.653692073735001 0.00415804801714079 -0.643209720912451 0.446176529118007 -0.571303385470889 -2.22956513757192 -0.473306870244262 -1.43509870052629 0.537782134751646 0.572793319332662 -0.199471322261433 -0.837440857891577 -0.263144522412629 -1.15865990879457 2.57246643109016 1.40180633149802 1.53938132081749 1.52702933513858 0.798775339309905 -1.44911721526067 -0.0771042932836661 -0.965991350096582 -2.7238123661715 -0.196170188579622 -0.727128692343901 0.681158629730023 1.19473530953591 0.72691588985421 -0.692567772841346 -0.232384752327298 1.48738399740132 0.0548916281477479 0.24940534804283 -0.100422782285365 -0.208088489807717 1.48394513553028 -0.882489867373927 1.01598773631984 -0.292462429735955 0.127054480466424 0.990310789418628 1.15039830360803 0.167311926177668 -0.695550369488453 -0.343909954633727 1.49185067039185 0.532114258337328 0.701866025034659 -0.15729854791653 0.0836225667727086 1.74835928513409 -1.80221443895206 0.0675600682996948 -1.89140135307778 0.645360073874307 1.14120536915185 1.5054416629252 1.67366194977232 -1.55866069214085 -0.906634245106553 -0.814547278671751 0.391076717001439 -1.11814126641394 -0.666818675888453 -1.02711600805246 -1.15381690600675 -0.359958395072398 -0.291016570814615 1.59960212296902 1.027603389805 1.72904693004598 -1.34697325824167 -0.0576612328367347 1.03168244532444 1.13554816992807 -0.154710969628199 1.43038563582863 0.131938678991007 0.45924756835131 0.409309179366764 1.03475896869364 -0.906413949135279 -0.545099217455865 0.0944185049865709 0.587908301086571 -0.360829224179541 -0.205894495311759 0.166588211572725 0.0406881466102047 0.143748514778874 -0.352673574818929 -2.08895812869124 0.13630861973945 -0.524231734010818 -0.215226518958972 0.553490786587541 -1.57857921706494 0.177696246108426 -1.18640646303802 1.63680707827092 2.29587500521363 1.31162152451711 0.160151914214084 -0.465067661702403 0.309641970890842 1.06413756798646 -0.520592749913884 -1.25875968876296 0.776709108658678 0.468946862896412 -0.161947132452988 1.60251947488206 0.322386603039046 -0.474729285388034 -0.225237841453725 0.642481759781552 0.539141923417281 -0.810462354507638 -0.362443523711389 -0.793522368377818 0.0714776837792712 1.0125832214746 -0.638829365614544 0.847235058488497 2.21299387253267 0.653808555269787 -1.0703979461772 0.851900800504598 -0.336589399625379 0.811439958584311 1.01277387430686 -0.532436260064299 -1.14894828897462 0.302822415834409 -0.769192851019463 -0.782798563191853 0.732559560553744 0.855933497490891 1.24867428322376 -0.629213748003744 1.4340232546533 -0.242530659343201 0.382004792859194 -0.0620241034466883 0.823789842445364 0.0966055766654464 -0.17005301535618 -0.910565699675069 0.159049598965484 0.0412550844634348 -0.285261028568531 -0.568822389519272 -0.99455051950371 -0.384575099823741 1.10429220666406 -0.234190816688418 1.11689892704955 -1.92799927964097 1.17441168509909 0.390880351467513 -0.613613772182575 0.749392651210714 0.539373120561385 -0.686791142142285 1.76114484051411 -0.293522663607718 -0.283147511504926 0.632477743634284 1.37210063366277 -0.529151979441865 -0.806976405494407 -1.59021754478097 0.904104268031803 1.05789297618047 0.0165739661411375 0.683907353416683 -0.960593287076535 -2.30448116689513 0.0277338297070273 -0.0435202648434086 -0.607897861029216 -0.848304275158921 0.0028781470281538 0.0258035401061607 0.114723533825037 0.595696507347655 -0.0651298654359156 -0.526752845646763 -0.462295976895281 -0.107718794477992 0.427361520593689 -0.165989925796801 -0.0719610973879664 -1.28204776897642 -0.0652032411466803 2.87945807442814 0.918623083046708 0.315910438898409 -0.546693016874911 -0.20337836428166 0.465266626603312 -0.0357509151444176 0.116981503521059 0.392987182085851 -1.13656722930174 -0.167614885839719 1.43847380585617 0.0530476292434148 0.00243251534823049 -0.364654023499672 0.344384366500488 1.07047748747263 0.681016734512255 0.189894992192459 1.21080081630119 -1.26984065552941 -0.00278183739421816 -2.65505809304924 -0.0152821187223636 -0.397250429660337 0.768346841524972 -0.723650829449671 -0.306667320599729 -0.994618003141148 1.14338648337497 -1.5450273234565 -0.793168439076133 -1.33438648795657 1.58852297651317 0.0829376438297431 2.37586469646351 2.12544211708984 2.08332817299549 -0.0873964895197169 0.418363456732688 -0.789144433185867 0.58603748203625 -1.59435264204998 -1.91255664126596 1.48887493376291 -1.11636508214386 -0.753727952216282 0.291426515761842 0.94899622717295 1.03525309932974 -0.109658217943535 -0.649253944065953 0.298099325916307 0.466204327376824 -0.00450126676567364 -1.16683210843266 1.72929975489438 -0.595224453835014 0.141506630828186 -0.866633238288696 1.37950657276629 -1.54142736742234 0.857552142148824 -0.572050249935636 -0.144982043940856 0.122581619887157 -0.455472901461148 0.681593358439952 -0.84134354600429 -0.842046941623649 -0.434381131265374 1.28092066496495 -0.32864949758339 -0.692132580953624 2.39075167677178 0.393789973080112 -2.53798437290024 -1.35325970094162 0.127175993005 -0.203871148584871 0.636283186874763 -0.422356721538856 -0.492432577866744 0.359605065205992 0.149710864528427 -0.982887179920615 0.640012494384679 -1.25451533228644 -0.0626433373164928 0.0117902175001763 1.31740977740875 -0.934061798517813 0.48591541361249 0.334282260053902 0.666246307621385 -1.43270595272902 0.0894909382187994 -0.549316505056247 -0.835332561814054 0.21920429747383 -0.000922208833094971 0.847574475681687 -0.930256158819726 1.76864832701219 -0.090167785907412 -1.81537074502017 0.219001021662049 2.16476466828642 -0.454822316961153 1.57820140036126 -0.496808909576315 -0.0573133394498831 -0.768496081191487 0.204894745287096 -1.41745328583036 -0.0449020617732001 0.648719178508959 1.11311538948102 -1.53320849666337 0.126299170171933 -0.461207507381101 1.40195919677628 -1.75170818360413 0.839090667973536 0.87710018485929 -1.6383569584402 0.194066957884834 -0.470608269545046 0.731616072940889 -0.467632208713086 0.982643620253801 0.984564818468665 0.35191767867945 0.0253881518073777 -1.72253856892491 0.583552883607494 -1.42642007050119 -1.40380493522833 -0.558072846245343 0.35909692494151 0.0849374453385438 0.932988181090742 0.473899945948626 0.52049359177964 -0.866983214397762 -0.35730940110641 -0.546497220522359 0.254901175616059 -0.978368189119918 0.418617184889345 1.16995907959545 0.669524504325547 0.46816847462305 0.596098378283663 -0.67797291876492 -1.78025565917708 -0.742330262693543 0.151207634170567 -3.27534650002505 -0.6138755938539 1.55453424113883 0.0284086552169823 -1.26313470176353 -0.110695742454859 -0.0171922472984403 0.00803627985005378 2.74576099725726 -0.564495409045549 0.940705228826437 0.265378042485209 -0.502077496460474 1.15770980032609 -0.628456060790464 -0.692971001227558 -0.92265725276487 1.35266129466083 1.04374600072374 -1.36346460373216 -0.812682119572927 -1.16292036992999 -0.607282362896794 0.394861609080923 -0.168878484741374 -0.564127941747607 0.764657946621354 -1.00860054297155 -1.16204238438463 -0.390305472830613 1.06907538802112 -0.157734584463931 2.03442932957575 0.688627852548778 0.733482520261622 -0.330525416814844 -1.10112447381488 0.358636731842739 0.870888505135662 -0.228494873450894 0.310978158882221 0.212068205970269 -0.28574538820057 -1.15657255471495 0.000864077733588219 -0.0669810087715076 1.60174387087891 -0.145620976410483 -0.00495111721576477 0.797072318598274 1.08634185430243 1.84868696972768 0.0810920663367534 1.61680151701454 -0.233475516586999 2.65168088059172 0.0905855293835613 -1.38279563243069 0.266068013060301 -0.918767808521892 0.0141883261647103 0.920523736647892 1.13437566521345 -0.0984656273844896 -1.16167368199195 0.405756457882889 1.14851625465098 -0.399357158289332 0.591239492071269 0.129090065705114 -0.173900516697996 -0.344162535708451 0.568377718349578 0.760839522482253 0.565953388930957 1.16347605894727 1.54108110598796 0.115726266480989 0.3825792627175 0.916899551258615 1.42990275890494 -0.527287168841865 -1.62547311238069 -0.0625880900855838 -0.0123275487962428 -0.480785347674271 0.78142299982113 0.638095944485235 1.01695233701938 -0.58986243992655 2.80636040662513 0.751173377266183 0.038314864016518 -0.41196214992961 1.82159312068755 -0.214799313882297 -0.926308910159337 0.706376582958355 0.0842952985848333 0.717492826921647 0.247142959666469 0.0148096449058371 -0.0499242967452645 -1.55320662045843 -2.22138610225642 -0.897235617889353 -0.450589894581067 0.98528171496969 0.538828407627598 -0.820415456487463 -1.33175828839567 2.15733004554791 -0.468311992000674 -1.00307868859501 -1.98082409929913 -1.17762375950328 0.161124166700743 -0.204894009451195 -0.798766316869156 -0.0804710558074739 1.33653686480722 0.720818193832097 -0.989280551443105 -0.287556799144514 -0.0480253533891086 0.143026974522312 1.06559591904019 -0.413894577283044 -1.04661313918892 -0.424193171155767 -1.02376314184857 1.67103168173912 -0.0957727793965131 1.33916986895131 0.455880689819861 0.665170197298831 -0.200486500698299 1.41824563418971 -1.80272229664351 -0.215196168095262 0.844532321062619 -0.115611513781941 -0.829893023909849 0.569939046570773 1.8695634453177 0.527782019903362 0.169851374480175 0.23984391398399 -0.606927261193858 1.17752071293707 -0.705843670029003 0.594993911612451 0.0421723611378749 1.74253281522889 -0.791703218395688 -0.614503832864188 -0.913376829945741 -0.455804325381575 -0.330253033149594 -0.233882787134392 -0.0238911675376078 -1.22050069257981 -0.974081078455056 1.58483626608932 -0.521061748155909 0.0435686431006194 -0.337732976252389 -0.0965567979371086 0.671556887829879 -0.449637691959959 1.32930974901776 0.972309686075239 -1.43768244159392 -0.767773511363673 0.207661076500663 -0.986463824632324 0.103863650055649 0.49789632719085 -0.532078400157837 -0.195264747939321 -0.64298020851827 1.14935007014628 0.495882358086569 -0.0910680083209538 1.03108200322664 1.31894595831517 -0.643559310630941 -0.859818383416563 0.9728828336732 0.0904123807866006 -0.694739763687159 -0.35535815925445 -0.338795053641978 0.188801963962867 1.35029397790659 1.01182001899171 0.532519679622801 0.536903234818726 -0.490938317147179 0.963468633893735 -0.989646082983127 0.141559224328092 0.759092357972654 -1.09871574105311 -1.12100939852251 -2.23565818900195 0.843299044893063 1.61305443576428 -1.10262566845074 1.58063139106695 -0.0201271894708499 0.430164584789253 -0.838851201306786 0.949834544788152 1.30230194094071 1.37246741111101 -0.485577882539757 -0.234468267063754 0.116832588885761 0.626304513834318 -0.518893550996986 0.478095391715504 -0.621700768684388 0.632912863227403 -0.825787854243209 0.974968860190377 -0.272810203167358 1.03922370599932 -0.342487059990379 -0.111698257520222 -0.405490927090068 0.154100807777209 -1.26359916143241 -1.3456869725001 -0.300879303711002 1.41982392848196 0.274790862602674 0.346958787882802 0.678099740775829 0.468545394220443 -0.226455491311451 -0.293026305995168 0.674991353362982 0.128670725444082 -0.932523179566714 -1.38379943476967 -0.0511064490566615 1.0738791376762 0.249475872994799 2.46366688542534 -0.471472053906656 -0.70317950725729 1.13590858805789 -0.885702989167811 0.084908782951282 0.0268337484856782 -0.944317558886148 0.103410241030682 1.04193650437491 -0.972561535017848 -0.432137552824061 0.703267632698695 -0.291660617498556 -0.639592798783762 -1.26433619529244 0.999302094594348 -0.389992425508042 -0.51795817910425 1.00464658004827 -1.53132820811713 0.548416437588917 -0.22137143126969 0.441602675375845 1.44882804864948 0.478455307557498 0.251823658647646 1.27561722497636 0.641522997664849 -0.408242376005419 -0.656751763744307 -1.02594324916465 -0.400599936896076 1.9275335540326 -0.230937591835529 0.200032462458558 2.02102669444167 1.15411480252942 2.2528648371759 -0.218136145029299 -2.52140313277387 0.0132266141644835 1.21575622167381 0.150314344600037 0.10329425016803 -0.041801619828592 -1.33234593871119 0.0104642423066356 0.0719840603443657 -0.187272004779356 -1.1455495985817 -1.14501180913214 0.900507782761513 1.26923413916574 -0.783552537082451 -1.02832638551447 -1.03509048724777 -1.47423207780196 1.1567094797207 -0.135243788008128 1.04463172890066 0.90343676110851 2.49206112975759 1.3179695766107 0.548474495180747 0.258190082393996 0.837725254709251 0.168399050480875 -0.736397810416785 0.383807785956475 1.32262422428739 -0.47267551143437 -0.541510848597344 -0.537186807883855 -1.31024042138299 0.842553912301685 -0.656384302044428 -1.52016136062266 -0.660650294496375 -0.41868709185556 0.254908688023992 -0.366988557774694 1.50461334915251 -0.979738336316524 -2.10245080247532 -0.0757210330903025 0.975910878570299 0.288050275202971 -0.209316977322497 -0.791345827917445 0.0840461168898585 0.325986470008376 0.397011561371887 -1.98695779922253 0.46482268943439 1.03983812859997 -0.476049002129867 -0.0809995258237126 -1.47920764359602 -0.0163571716399989 -1.12537767688426 -0.48127263878819 -0.649221435026728 -0.778405127689253 0.2336959071487 -1.58107189483119 -0.260522344981779 -0.374231190206119 -0.165335125050814 -1.61364543581106 -0.150579826077483 0.805555895509873 0.792817811474582 -1.30584154733416 -1.03131656730747 0.52922349689049 -0.515148444474471 0.056405623212568 -0.00151233648531737 0.0685971841881167 -0.549033030255373 0.710071565571301 -0.765356365436625 0.167613814620624 1.71528799129695 -0.711529050326992 1.01075589161478 0.776862544878748 -1.43977884444112 0.471773685138515 0.0934572556414282 -0.240696584921792 0.914027938405957 0.75417228514478 -1.23449534471849 -0.491871139977735 -0.291977393675099 1.40718733837795 -0.350155070929482 0.335476894321904 1.34151082198748 -0.617230384749405 -0.983937620296898 0.421009103128071 -2.0922524887473 -0.864226500745397 0.389984490163131 -0.876539336870844 -0.532897057062161 -0.918827721592232 -1.10261973820345 -0.298881104843454 1.19530046719045 -0.440563613755946 -0.09651849234716 -0.650351699481631 0.342021128149363 0.602341685352688 -0.231071964999162 0.0736826880837671 -1.370510437658 1.60656817547164 0.680568924251098 0.223947177954555 -1.69436629533735 -0.259889895262265 1.19685984262014 -0.782543376934049 -1.44380509948794 -0.595512753504482 -0.02354955881715 -1.16948320338362 -0.599581105186558 0.0385276312293605 0.0337984054121613 0.460594219205494 -0.242577965206591 0.440726497292072 0.647649757080909 -0.295946422638123 0.599576979892534 -0.78638306865547 1.87457748220435 2.58769776634314 0.34534236303513 -1.08796868525965 0.509364641910858 1.02190180558591 0.463644386975016 0.144413491259212 -0.200000285658675 -0.319362430243264 -0.0861587895488914 -1.08983970185781 1.12099912564806 -0.357313586494811 0.0229015069361483 -0.666297743152546 -1.49640873018699 0.818244402589725 -1.47542721544656 -0.729710047030187 0.43831685078649 0.523140780168471 -0.154854513601763 0.218759137866106 -0.287850233905239 -0.052372040032341 -0.249354423626617 1.37561596826351 -0.751523199441206 1.26490494947581 -0.0320429298074421 -1.70069463529312 -0.671007491414435 0.627543363496721 -0.164539404697783 -0.323763147926051 -0.162404963075109 -0.459040394321644 -0.385386377944296 -1.54720194481358 -0.177377991871415 1.01591710927838 -0.606061176349686 -1.40913009586173 -0.772948324003578 -1.47499235827419 0.324128256540452 2.61774411791833 0.294344733005282 0.987002718518078 -0.29248988512208 -0.0528659545372711 0.398927438300398 1.29472770784579 -0.155871324819631 1.85116377402249 -0.235606753822921 -0.773384070124947 -0.911563222924176 -1.13018164279303 1.72296528651457 0.780576180356411 -0.0495182091158544 1.0882616722255 -0.0960915068836024 1.26393022241075 0.666138640011003 0.0516783931749567 0.239102545266958 1.03782840165765 0.635342718713325 -0.236810662144041 0.00426131660889351 0.144560775880365 0.102916028849751 1.71423844924511 0.77869619270528 0.177179531499574 1.07360811202581 -0.283628142791583 -0.427635398053262 0.60520782958695 -1.10798949493329 -1.19827439819045 2.1284306252443 -0.576008290756923 0.348123737009162 1.02389628775935 0.184278673260589 0.570549311186952 -0.471714118797556 0.0815209136454515 -1.27110150403685 -0.564227225925896 -0.615999717493513 -0.196766438421252 -0.958034873565567 1.38243390952978 -0.559078148282493 -0.56282181705008 2.62688085543423 -0.386206643245332 -0.792621500030266 -1.86497172300775 0.0758608743954296 0.508613382479958 -1.24884714708864 0.318430987344996 0.88082744867904 -0.0387599831738561 -1.10939868939237 -1.32026679135309 0.192448928698992 -1.55106451752905 -0.443127470985364 0.654270341027351 -1.14728788038171 -2.39365962852291 1.35375224558336 -1.02806131751679 2.81628596087358 -1.25801887481588 -0.878691645618907 0.707778729842618 -0.659525283019529 0.646826233975709 1.52741311058705 0.406665759615404 1.24934434304358 -2.21825973295059 0.350152092432603 0.49106247964974 1.17564905058519 -0.183799624100884 1.14006143099152 -0.871238786947965 1.28696612429987 0.280285339049381 -0.653600204535569 0.861421490777591 -0.000645603734778069 0.429757333322726 -1.00796873759073 -1.69835159188979 -0.328928353693571 0.28499750697373 1.58451346376021 0.57251562253851 0.182780060870466 2.08081171140024 -2.00978364801989 -1.78637385739856 -0.860897041731253 -1.20910937797098 -0.767436810356724 -0.226801597104655 1.94062207563063 -0.0290373972579643 -0.40803584549473 0.373760994887189 -1.45287317154899 0.0167803397541903 -0.293258583667134 -1.07720306804977 0.190607360604052 -0.599799189511574 0.677048564487266 0.0591076365763369 -0.820575765320291 -0.11906157616512 -1.3561360868717 0.940586523936913 -0.868434881796947 0.503441350737494 0.222257778072063 -1.61443134462827 0.747581911428497 -0.206713621861832 -0.0868331877663163 -1.35891019522522 0.010574214977011 0.132361726279908 0.187093428749618 0.451211248325164 -0.675186574787216 0.814004052986168 1.68463186371107 1.0870946415918 0.769497549028569 -0.93797719488543 -0.987915967580941 -0.219421923430728 -1.37471524432574 0.277614129980192 0.319382596952842 -2.08856286604524 1.32114767788042 1.05701006281459 0.876230736667198 0.725294276180597 0.834664852318892 -1.05729643926253 -1.17223935159787 0.204204759260464 -0.18498637085169 0.35478372610144 -1.86342721280891 0.168668848835879 0.0480053571437803 0.467236215614215 -0.0463000564341056 0.800973890138816 -1.03426936756081 -2.09732068685029 -1.72851495792849 0.0254785577206192 -0.741307745675074 -1.88484884164951 -0.366776544282693 -2.09906124228829 0.341937710475902 0.354994488551958 1.07759030461399 -0.166515441513604 -1.3636980406315 1.16815471540882 2.37211941776047 0.579835437702058 -1.18103022127791 0.915312940088062 -1.9583254274333 0.160685887050929 -0.988666130481498 2.51296751785862 0.0471690452439202 0.318391179913185 -0.27235267033076 -0.531419251175839 -0.331603438021856 0.699176049114218 0.455737189718061 -0.873671586269601 0.399706242312808 0.176940211180061 -0.11853605034177 0.534948310355344 2.84749499873218 1.93749065252617 0.679045294058288 0.29739106808418 1.46161238477715 -2.24671595563101 -0.738280276961082 0.939666468492416 0.0439297983311331 -0.0327034080263182 0.545157921358995 -0.850225440145722 0.499823001575882 2.63354314815697 -0.234631652320424 1.01176885043413 0.238752037346787 -0.977050134891531 -0.104804471078464 1.52357023365313 0.622561524829281 -0.738790895609533 -0.424289430504205 0.016000066176309 -1.13770473157563 0.130032055409759 -0.252466234139902 0.515064290617956 1.12005440251766 0.384939065361426 -0.610819295238787 -0.203406738695103 -0.898628806300884 0.294023312828676 -1.31044873854108 -0.221173733842902 -2.1214774125262 -0.174277407979081 1.0968256080952 1.00461194401949 0.688110376754027 -0.795333314926777 -1.05749334612085 0.953533282743228 0.765495436089659 0.845622395272908 -2.15409148408876 0.541880624376617 0.911900295516159 -0.971357347354405 1.96194029237354 0.686316034945177 -0.0492133353439073 -1.45017911402357 -1.52217589514236 -1.92844199018677 1.09454342749716 0.180176163741934 0.202193498936763 0.975462060894018 1.08061348940997 -0.714112442372185 -2.59777262952508 0.525122354759734 -0.509161797232425 0.401932180749686 1.28866688336616 -0.70901067331443 0.475210253347632 -0.973204733833663 -0.461384836943944 -0.310405117361193 -0.410592012977426 -1.17560390655819 0.529589284218311 1.42872145774489 -0.011530941792732 1.77230082958585 1.65813427540587 -0.916689458884643 0.257317493743043 -0.429422635498706 1.67645997671782 0.155468746419825 0.391149807718564 0.248310854355786 2.51104855149026 0.0663424265404922 -0.378923879806993 -1.00454129012264 -0.261296264022013 0.35902843833759 -1.36706865100081 -1.50919262382089 -0.504297395436634 1.6433395281194 -1.7474890359537 -0.400436671732257 -0.353005517250598 1.32516634359532 0.217173181151845 0.125891535895757 0.517024885057092 0.588345061086317 0.574586171347614 -0.26134041544333 -0.567418173696742 -0.677295649286568 -0.0301348032096941 -0.416373155340017 -0.906819007803384 1.14093135586788 0.210901075090082 2.10640741453612 0.529414402700491 1.49693568874506 -1.68095019484861 0.474884773648746 -0.495648601983284 0.119601512960107 0.32808535094299 0.440898491151489 -0.1671631584199 0.806135851290906 -1.08149683791594 -1.10962201933977 -0.632481248059936 0.736172796888084 -1.47794708273892 -1.18897798100281 0.996313111711988 -1.03093161895122 -1.19179729822097 0.944280077096815 -1.11666236464608 -0.181609388897052 -0.574973849363052 0.282803212253733 0.610869630420177 2.62722834670904 1.33345187052413 0.828421028467012 -0.529336354751164 -0.0556617243021158 0.262270187496393 0.10927758824696 0.564642187800648 -0.548390765065836 -0.42944807292844 0.0249670715748424 0.260829904305446 -0.676539653921562 -0.261845812745097 0.0287804234106582 -1.75223243283308 0.39230502920778 0.0546410392804641 0.590373157556472 -1.28235397565852 0.94013701353643 -1.35803910347643 0.802375764180476 -1.60185326901182 -1.14183714469849 0.404425653550597 -0.550109641001719 0.642833589344145 -0.00310246659821017 0.106363408084304 -1.51370001763493 0.178086142822259 0.866547659500005 1.13421221259269 -0.462722934572432 -0.747054217091932 -0.0817056579765284 0.873959959960761 0.117948370797245 0.438282110805548 -0.088993722203188 -0.635964572705193 -1.44288461665407 0.0514934180588756 -0.595190627108215 2.69799932964235 -0.129595737833799 -1.15954793467517 -0.796579563207168 0.0602138132210102 0.91446066813251 -0.415896766076478 -0.936870934614781 -0.976545958641673 -0.648106223324159 1.3652105904584 0.245349308681966 -0.605422852413717 0.121541799401869 1.14463324977151 0.112576936050247 -0.211380213016787 -0.731263032637687 -0.148586703560677 0.406469833773656 -0.646146692002536 -0.32918253530662 -0.389492462226115 0.109363886816962 1.28291448492422 -1.83012838545631 0.472205982954478 -0.46267857283458 -1.12357715291073 -0.999730235482785 0.828174272473399 -0.204374831283823 -1.18892894848489 0.266081251907264 1.85283584605653 0.87370769941484 0.486283620172437 -0.528705721711733 0.471520999971706 1.89879750629615 -0.998104773986659 0.398748261273009 -1.30949799737821 0.0772365589271675 -1.32716691081228 0.70858344386677 -1.14634767524283 -0.203825645289519 1.02343625395902 -0.202420677111393 -0.0776476808160362 -0.862843504895346 2.31341452506366 0.595190109838426 -1.30492839325995 1.375389138159 -0.507352794599205 0.539905413036511 -0.0488120335235867 0.0464188257982068 -1.25831669813602 0.246772459174106 0.373361070815954 0.354792815323912 -0.5383670174491 -1.57430555292422 -0.680053512543474 -0.266365678680861 2.27407268927861 -0.916569307843465 -0.861060538704821 -2.23589800228778 -0.997948935542932 0.631514221820579 1.70997519286431 -0.400447839652745 -0.721639020928088 0.837427173471715 -0.922881776690162 -1.03774266238516 2.67418923718081 0.639226029665085 -1.47806902182335 -0.229258191326481 -0.273829786186736 -0.517851547720218 -0.838073152692729 -1.10561272103136 1.09803250152841 1.11145436590642 0.224515868563896 -2.27240842323118 -0.596286922697773 0.312022169760633 1.16744629386767 -1.78640028653353 -0.39836019443999 0.63864246601236 -0.418040432341555 0.571714072736048 2.58242211283669 -0.175032109305357 -0.0247064393320726 1.6184507069476 -1.31215612332703 0.117912273864533 -1.18424330752107 1.04109623986962 0.795148742023973 -0.0146442247041298 -0.278553891298517 -0.148906885350164 0.0329686196704569 -0.679162465167366 -1.23075106260234 2.09467515664585 -0.415948527495611 -0.462890249453945 0.0462959769430822 0.238046080370344 -1.15611438080399 0.000653309164085472 1.31393286343323 0.505634336839351 -0.259861370624777 -1.0073001103557 0.664129430677829 0.302755026968425 0.454381683923004 0.348901384326134 0.27314434677974 0.458322894722699 -1.02866345152944 -0.523923954267928 -1.09983226966702 0.875625643320104 -0.208891269640337 0.610202352810481 -1.54531425937588 1.97832168754745 0.858973012036356 0.376188280379914 -1.32165446855001 -0.345591990457074 -0.72885620797426 0.195044993414757 -1.12927654489734 -0.323590143463034 0.209995367306935 1.41701861601849 -0.840395706523353 -0.208188411167222 -0.219892118569363 0.0600286981433392 0.0186085554468923 0.385507965785957 0.0636348150863522 1.89977354713128 0.14426808723548 0.388557549064034 -0.232868105059455 -0.177992776655665 0.567134268907557 -1.37380883187961 -0.885632001384755 0.526589346010224 -0.442493182849199 0.429555905254853 0.300262695883114 -0.810579463889064 -1.46515756157631 -1.29801456652435 -0.00904284159770289 0.99690990451999 -0.0719897622618894 0.117310953570361 -0.838186600042888 2.00858284779976 0.465053280259201 -0.0128741398146803 0.245265676430049 0.231248409945934 0.679298022924539 -0.295719755841866 0.316568119475426 -1.8003121441767 0.00245543760157766 1.13805693421957 1.74608520891142 -0.218550580755086 -0.340043382363979 -0.124879824295527 -0.180376224758795 0.583259845957915 0.377453859067974 1.39591668893756 -0.355363155753715 -1.1349296285649 -0.334240609299699 0.759747178113751 -1.33329479355825 -0.188807414278263 0.196611221201962 0.692301896665164 -0.468334742520685 -1.3686129952634 -1.15096085514311 0.673938054935202 0.086959345382858 0.710353753601089 0.724006741009153 -0.109303795069583 -1.11129748929691 1.1281565568277 -0.0461758876553274 0.869045133543494 -0.963712529972494 -0.416791908198217 -0.614487151954777 -0.229032509625514 -0.1511376679356 1.12818473782144 -1.42342096437166 0.139661176360818 0.404829059112266 -1.3805216649369 -0.33002172021347 0.615150975309845 0.858778811915888 -1.74700313554543 0.563790943665319 -1.80210725342624 -1.00344994651506 0.43337538581285 0.861516306324098 0.590125833984148 -0.48028161529012 -0.896155259470697 0.303205981630685 0.563531953790067 1.05047689661206 0.0182995001057119 0.800983955371091 1.65050758584431 0.975162082393511 -0.595497664163254 -0.320236979695418 1.05645131638249 -2.00141502579631 0.514364896986471 1.61542302571304 -0.906671385856358 -2.03278407029129 -0.375848306483052 -0.40195421355355 1.15003258260663 0.422709219240162 0.0747526335400558 0.459179080414577 -0.559243351268928 0.228860864469399 1.06811426750526 0.308506677826238 -0.143958780623101 0.345030690684674 -1.03118641641543 0.161851782919349 0.518192236508929 -0.74629536351254 0.552740363304363 -0.677031734944659 1.16700629828776 0.0568002036505394 -0.768528209126985 -1.40946990766282 1.59062929747422 -0.206999964722343 0.95430646705832 0.646001651905239 0.0200316571947603 0.874905069502498 -1.06999448046854 -0.194042679154372 0.22933601793864 -0.797872103227691 -0.144086726297545 0.194446388195501 1.2185772392204 2.58914198755243 0.963349789951667 1.11907809261648 -0.861341380833346 -1.78077869481042 -0.287106696425427 0.389888518395388 0.631611533432359 -0.610459078155225 -0.699097404040541 2.35373277896932 -0.859455950222655 -1.46481002717781 1.44947518781204 -1.08574579268701 -0.833583548524532 0.783101733644614 -1.11468475745382 0.64406740406677 0.805749685457689 0.0341586203347556 0.276330215951186 -0.37363639787727 1.37749134561413 0.968146298458139 -0.369584752423403 1.25900323797024 0.759084606565685 -0.63715578174265 -1.64115930316865 1.67196599284286 -1.32443348734459 -0.0196907828301819 0.437753823365565 0.695895637191799 -0.693290320180198 1.33954311182447 0.874501282442734 -0.172270563892154 0.292489859775468 -0.121937591856789 -1.30828566797125 0.452154358221696 -0.196648895877793 -0.698368104843463 -0.8678734988592 1.97320454730319 0.33460468753383 0.01921601217096 0.611167690735648 -0.232025607029359 -0.0832424575817039 0.694162695216831 0.208074110285998 -0.437056063312257 -0.780526089813084 0.898914427384808 1.93886772568908 -0.0775752775449419 -0.375465096019308 -0.521556601212633 -1.51919100516843 -0.00803826726849604 -1.27405611230008 1.70099250430986 0.139826582119639 -0.779810069628689 0.533984584900314 -1.00387522365949 0.200328497845171 -0.597378589133177 -1.53941135032986 -0.245653049194344 -0.179135491033924 -0.527762733043949 0.112906250589085 -1.1112205594785 0.477414838521201 1.280957777275 -0.0578691449309611 2.22907962518684 0.650009427212306 -0.599688061723024 -1.81222128854602 0.106982405314913 1.85660661562513 -0.870171119754026 -0.217318611878953 0.107162339846306 2.81355871171355 0.241980337826977 -0.0731755181478452 -0.893695836116271 -1.54292130897982 -1.154474662377 -0.481833718878417 -1.28503640006065 -0.928664244218292 0.633743701896172 -0.461348479056181 0.0668064090612408 -0.24306279568705 -0.932729355206428 -0.521135045742623 0.541253573813153 0.94059140709549 -1.13214380529072 -1.42028162349735 1.60951642554429 0.0135086056397304 -0.987835130716359 -0.118767401478916 -1.4155786056952 -1.75475849803496 -1.00379102885171 0.948698527869534 -0.5359013378171 0.837676918404235 0.0897215953510564 -0.926392564941326 0.174671618791764 0.273681382900052 0.994987551173764 -0.325487700538476 -1.62921038683153 -1.4006530065578 -1.13744345452025 0.136589883102958 -0.564026347461956 -0.0135247403674182 1.51278495428309 -0.767366007903815 0.265256185319762 0.610824385244805 0.901724465724415 -0.673911439562596 -0.381074265864113 -0.0502093024876541 1.47782199723429 -0.278166572776515 1.40980110281393 0.779773975091861 0.681284959351341 -0.625397421275174 -0.632332932071907 -2.71076046620437 -0.133011130034082 -0.493333455938639 0.0612137915050116 0.409073568101262 1.51518418415228 0.0285590980877485 1.06579578308693 -0.585999299315984 -0.557227262906315 -0.0802578300018127 -2.10370971518015 1.18539482614131 -0.284057469622579 0.368614675510855 0.561389110771723 0.505744313501438 -2.43591499642048 1.19972681756138 0.516998198875642 -1.62976043349046 0.751104497603766 -0.181864541920237 -0.0894738844634696 0.275734272565213 0.228581996402349 -1.93505131218056 0.187542845111383 -0.331688875360517 2.38415844527927 0.166616416603605 0.776848368399144 0.94450160879962 2.19444405203266 -0.614047479139762 -0.0200162512876818 -0.123621769744976 -0.550102979285789 -1.31460849444105 -0.887451688856427 -0.319126587517515 0.36958822852585 -0.958663733718488 -0.0816737098687295 -1.13262758135279 0.431682590560593 0.144376665217376 -0.667696349089472 -1.67803006428889 -0.685886689545329 2.66023139094879 0.066701707773499 0.813574203897941 0.21808316618407 0.061388844849525 -3.30605799565807 2.07050778756616 -0.762292952075033 -0.474081167941408 -0.830321486908455 1.08808280964625 1.10748971739523 0.501565359802863 -0.790141340983523 1.14402062974778 -1.21275532399637 0.140358714358605 1.46176828210628 0.898741713242479 -0.773913212103044 0.191431599876385 0.626153168380099 -1.5906558560786 1.14988219357276 -0.0252058875961475 -0.489797301547639 -0.27616582856685 -1.14927137821396 0.623162343850131 -1.94826964430901 1.57913241199435 -0.974427010655532 -0.37728238353813 0.383493569922858 -0.139719503729397 -1.14381878213594 0.706468204443735 -0.35073451800793 0.516885934628057 0.267215243660733 -1.16628733690614 0.584236091976655 0.119818190959509 -2.41603756464046 -1.01783659441377 0.444291566124163 1.66807640665706 -0.0624451539097659 -0.0205267117217568 -0.0832198061059433 -0.0524923121673053 -0.625001684555401 -1.34493954007972 0.54020246570849 0.802700012129055 0.822296971671518 -1.91511794529187 0.690823632213313 0.478448698230138 1.25294247791065 1.44049102736802 -1.03619166418477 0.0743191743038873 0.750740724326171 0.897865598241968 -0.667880471519357 -0.679168788691376 -1.34250579223694 -0.188150152042645 0.500946267430862 1.38584848090618 -0.579137361543594 0.380257712971183 -0.370697526999435 -0.176227143754297 0.752298693120818 -1.99728868183935 0.10662878355532 0.387568012617324 0.0738513856369552 1.31778254486348 -0.644293069383247 -0.240195857234763 -0.298178335088763 0.29125678824625 0.66657777939664 -1.11057490024637 0.804061827725986 0.750237107836882 -1.44228822945314 -0.608088612035398 0.983787233771763 1.29375573350063 1.60721378842695 1.30599771651558 0.699594900215545 0.566723866655927 0.642367512278225 0.507331078145853 0.33118028592888 -1.92658299279184 0.428784371375511 -1.34999389653775 1.27631567968068 -0.89900784518164 0.840640537427748 1.46082267581568 0.490414454438817 0.975357072303655 -0.12963023580959 -0.34419331990439 0.0447842268471689 0.291001248980841 -0.738646539429388 -0.966659244204977 -1.78772959661877 -0.497260189742561 0.265348213236691 1.77887423254788 0.318330770229018 -0.885365900409087 -0.744277629743491 0.640006730399603 0.249517969627831 0.239908852427323 -1.23436187930659 0.65568507504729 0.183797673830324 -0.282337607608628 1.39964587233045 -0.144137100139408 -1.86898105628023 0.100332877773378 1.0090320322632 -1.15731674897134 -0.108091822068536 -1.18464569506188 1.79894820373233 0.538481460807158 -1.06872610400386 0.601099297428429 -1.94643922713932 -0.125806185314018 0.703559057476251 -0.991966715982324 -1.89185461407379 -0.604343489163375 -0.612488812628723 -1.58502680086246 -1.06983500872687 -1.44366586044633 -0.829075327544792 2.22479246102299 1.66698057348643 -0.689813700579331 0.156344911931058 0.633387718114565 -0.423000171613403 0.577478773346684 0.824596681334571 0.478961590510673 -0.157413792840289 0.312884301330165 -1.33296516836088 0.650501049687386 -0.683714837085026 -1.08071724250707 -1.09287475430921 -1.09679650707853 1.45670568157606 0.270029353624379 -0.68353547979269 -0.986728619922426 -0.664540867831385 -2.01162583323579 0.389066380305624 1.0349212380755 -0.617489103630007 0.811979059109394 1.42700432624872 -0.119342785809027 -1.13287995184185 -1.94379483187525 0.0823278640785619 -1.78580212329102 1.12321005320898 1.08675619744031 1.64188401251026 0.0924829461154532 0.654827668183792 -0.418045351037882 2.72262823458522 -0.127904167491381 -1.44175305417831 -0.453132129327856 0.772906789840894 0.685483516980779 -0.474901833381807 0.0432010778935388 0.224906033883109 0.0926726883847951 -1.84119069421305 0.729528552104243 -0.48897751633958 -0.396114157704361 1.59603382698601 -1.17766906305352 0.392305591765253 1.22250989925696 -1.24701612766933 -0.711735432494215 1.42660572452412 -2.15809917123208 -0.310217998291161 -0.586295143122665 0.770427623650273 1.78206877990694 -1.77555416667731 -0.868448033163839 0.929299316084915 -1.28886323959696 0.540902713465989 -1.09198377306868 0.638984731739342 -0.415125501434419 0.478419357753821 0.025791560028534 0.046675525058685 0.253522592932935 0.198429934797753 0.720483922905033 1.35057068723859 -0.148196076507592 -0.562482716581842 0.10770802165935 0.82503485584563 0.103297424416313 -3.88374322022945 1.33346453647961 -1.14694742273291 0.558956715855858 -0.34790243509106 0.6771156509837 1.47454552629754 1.11914107805623 -1.97732020456452 -0.0130172728464496 -1.92718898060051 0.848272956025759 0.533123611265112 1.43301364286812 0.558975321334369 -0.708079629275784 -0.0174558444712675 -0.686826241088654 -1.51026380719093 -0.146957456932335 2.43476899136393 -0.746636964939647 -0.430727818232374 -0.250247760025379 0.0298723995174376 2.14090241450162 -0.851433715355975 -0.818222822155406 0.0393692584375114 -0.634057442428343 -0.403994513995888 -2.38618918282105 0.0461153667152412 -0.63326201454741 -2.00168708161626 0.171334918107573 0.0306305412877546 -1.17919413223721 0.314655188804788 -0.762649453195519 1.3574270710203 0.516255485946801 0.949052122986968 1.16302239336871 -0.828538995018354 -1.07753687021239 -1.63436540757351 -0.112452368295191 0.419191590867244 0.93650949958504 0.58013900033612 0.44268550914832 0.111119197184774 0.962749298446224 0.621826742647393 0.561600542112836 -1.25199963751525 0.775287815938301 0.119313635605822 -1.05300293367472 0.73251757220266 -1.13262912199697 -0.817280705762422 1.47621474868494 -1.19803534392608 0.140172759531856 -0.759397016678563 0.114282273832506 0.630702945760991 1.07593933181244 0.958894496291087 -0.405438102998694 1.40057874034471 -0.586024796528855 -0.11854589967984 -0.0574437061246435 -1.47301460989864 -0.0848810213111087 -0.782332508673094 -1.77630423479698 0.962457494172487 -1.12920089373183 1.96731591423342 -0.699035141646181 1.01943768699503 1.01081791038502 -1.96623189810992 -0.23668879406379 0.182492385171463 0.136022388431724 1.21498810089231 -1.11662752652125 -0.131084563624902 1.50813514138239 -0.850618515455366 -1.00021145503472 0.620023126743295 1.45605158477574 -0.414344508358076 -0.920548704339737 1.31011196842655 0.0849304111450795 -0.604420869860516 -0.809439416605609 0.771265358917851 -0.0793953056685316 1.63205984261436 1.34683614069152 -0.0545463676788431 1.21574849249427 -0.791922431072748 -0.358349313687329 1.53214718802385 -1.46985410630351 0.668923119735329 -1.51217809649672 -0.838203220145469 -0.546258509996119 1.09803733251261 0.696238998114308 1.75947779326017 0.615667760996203 -0.495825246882597 -0.345374199203562 -0.765719901897992 1.24172998269672 -0.801563543924739 -0.488714372324825 0.429287361859588 1.11665644979459 -0.376822509567347 1.19238231844116 0.339729692091267 2.48881749161319 -1.06927345192532 -0.531952565386668 0.818947401694 0.885272982638437 0.262156910529484 0.61715708834619 -0.268140476253095 -0.0499128042293383 -0.995744118871003 0.174066301520496 -0.16516846302627 2.56820396002141 0.373087114757805 -1.44092395198273 1.37022720117598 -0.150712826116033 -0.671828212194323 -0.826761971622472 0.160281934673643 -0.296961918624877 -0.560534871240079 -2.43414044686635 -0.270992343635852 0.00866073004782755 0.485969826032447 -0.106778807987599 1.09116830919322 0.933422864334623 0.903652836779897 -0.0288743484985004 -0.892988785184013 2.8154326704876 -1.1581336928208 0.559566098670699 -0.286631953768626 0.0139006651622798 -1.36201081511621 -1.12931263377914 1.45517244041016 1.15038860090297 -0.614190337489825 -0.847485370037424 1.03750369593119 -0.955500892241896 -0.7589800578919 0.406921160054805 0.767975427088964 -1.93750352167086 -0.0294268948850579 -1.27953798077585 -0.639902198077615 1.62225091465431 1.78026606226249 2.37877557660121 -1.91000715382524 -0.373483574923406 -1.42240134840563 -0.295396529244034 -0.114899369171603 1.57417774237557 -1.47773532562632 0.710542299285197 -0.648043561195344 1.91220666394086 -0.507526529598099 -1.28928309132848 0.0735553694837124 -0.542439535074427 -2.41320004032503 -0.282876593668591 -0.418990429406848 0.980827784186334 -0.125576365709594 0.149113638142089 -0.883059411334483 0.198283638981381 -1.35728914398652 -1.17153852487565 0.897391102592634 1.19041257236112 1.63230562290221 -1.69719887360728 -0.0291773471067775 -0.0689722614425723 -1.03991975618277 0.250369881865832 -1.10792063270175 -0.322404739930461 -1.052299384203 1.18235546623286 -0.138645484532107 0.129158781363745 -2.21340684395449 -0.299571549086763 -1.81506083904492 0.472020569071606 0.0578208785040298 -1.82673951986301 0.949254750063935 0.145314752643466 1.66230934751712 -1.34612096919958 0.660261916952679 -1.26355141263788 0.733494914488376 0.812623707588097 -0.853933533057084 1.0254601075704 1.26234732788511 0.422780629215407 0.201922537601686 0.650719518319629 -0.665207048967185 -0.147344541006205 -1.0913288871772 -0.973165984711445 0.77343738820124 1.60838619613461 -1.63879990749536 0.095565772891056 0.0135534985939957 0.335136727826856 0.251887992208323 0.330694695848117 -1.11815904804272 0.00851845710862135 0.469498224561465 0.0325044247941919 -0.369611675374054 0.719031798753633 0.332821246586699 -1.07432921105198 -0.928349540698306 0.225137415634096 0.300923682814784 1.2204036370445 1.33453130550005 0.292483850424579 0.958065027205255 0.156994618255227 -1.30906914560656 -1.07536493487374 -0.833251149546648 0.115147825462372 0.0872966570043256 0.727947147225389 1.61342823910902 -0.48047832681574 0.705421817143121 0.972134570965759 -1.3390665766263 0.52751445353132 -0.80040261141876 -0.965274242904422 0.72323906789658 -0.207043826650556 0.316877410494174 -1.22323786123274 -0.170534613220684 0.0399747346530881 0.70937630856239 0.465615243224167 -0.721651981101169 -0.812557434820301 -0.187246455355095 -0.148108177454048 -0.571752967495861 0.845214523627268 -0.174030532829399 -0.344957893450374 -1.24403242896481 0.690657883681499 -0.210317143203912 0.642476151996503 0.733683600402815 -0.714258950849762 -1.06886129301179 1.40970785924702 0.0559845003028715 -0.505133675646136 1.25091485640389 -0.28051863904734 -0.00122688181288642 -0.497437810708866 0.443137584565845 -0.531739517639452 0.653881850183772 0.558036076799891 -0.811245045061184 0.469762134683625 -0.534727950575255 0.971567603174089 0.177965061863253 0.163521920663404 0.327166754701231 -0.115039352543763 1.66396367133224 0.715372097477487 -0.783513083515828 -3.17194992551923 -0.0990921663073052 0.6753598622053 0.888450511819852 -0.151406652591133 0.77914171554905 -1.41658339896909 -0.12515623840823 -1.26345093697765 0.491055136216434 1.17520820353497 1.25788956348324 0.372294745847243 -2.44446849543225 -0.0280622291734681 0.0618585520435839 -0.267353008498361 -1.85157263570943 -0.199595351417403 -0.815711340429428 -0.867048523650785 -0.959913527276546 -0.282790500152225 1.72747789027696 -0.453576540060227 -0.506452386912522 -0.166808642198667 -0.169974102481819 1.686263731182 -0.410640854185787 -1.94154094823314 0.0560321724045318 0.36263287419051 0.110260556534004 -0.140511311049645 -0.485115277546148 -0.476728482490809 -1.2094983493541 -0.274452546676921 1.34993759272074 -1.76186441998406 -0.70309836036058 0.855989442782243 -3.60849414387043 -0.118471389945945 0.340661140726858 1.39505990509367 0.516172961222258 0.273746398602526 0.560112859755045 -0.90122888897897 2.19812647418738 -0.458556506866098 -1.92014477619726 0.450164327888981 0.187764430255568 -2.5405176437796 -0.440806766087006 -0.248850498052152 0.246099816381958 -0.413300647569332 -1.13082041007454 1.18289017750066 -0.832382147262956 -0.573892176541356 0.542148018701259 -1.08798657728235 0.629567931734792 -1.72410172436967 0.544805702285149 2.03630313596545 -0.849356632410571 0.552394835224544 0.319111122611143 -0.19506766219645 -0.146022698079729 -0.484395380111008 1.26868732501976 0.850093432470433 -0.534533821181366 -2.49038612447059 -0.519812272799663 1.38341271725558 0.937560159148685 -1.14564300982072 0.417486604386879 -0.583592433488322 -0.123368665351186 1.08140436193304 0.663226486425432 0.07487618783104 0.900081963339204 -0.818097130183254 0.991821058357758 0.571901673518236 0.759816830478461 0.0423727819442264 0.639831303413856 0.0383828734249001 -0.945123597278391 -0.889513622162889 -1.49510109724888 1.14239290913373 0.154675729673705 -0.812024433597321 0.533863796280966 -0.326584521253774 1.77598956688218 1.19632022327634 -1.47517022231469 -0.439231193344981 -0.65233083632939 -1.18516978808493 -0.30941410888261 -0.504478409788245 -0.719033256479448 1.6108506386419 -2.32174390301326 0.63005642193985 -2.58581302090237 -0.13334317747493 -0.93753894321462 1.30551546176242 0.608526963271984 0.79201007307029 1.64509180828303 0.292909181409146 0.534051253476905 0.5282514421416 -0.516279445518576 -0.687351054826911 -0.509762452311963 1.39638051198845 1.002228711856 0.144961546967402 -0.671919685770313 0.205818066701629 -1.65426263912728 -0.87274837114419 0.256861537074478 -1.40658832292615 1.27200273965711 1.88006449618698 -1.42650679594214 0.271782262379357 0.0984642457301565 0.754431053812766 -0.571128513275831 0.215751225454286 0.0640790991757981 0.0310240307677721 0.297221764157034 1.63718327557601 0.846691353382389 0.108065083449075 1.41528799582057 0.724505406233851 1.35795839944352 1.12571241605563 1.04077528054807 0.632171395147678 0.63720973940818 0.855132553346636 -0.00756854124687301 -0.128574058833579 -1.34043886365762 -1.0133917390138 -0.699102442127636 0.472738803467612 0.270163310052185 0.907220088245251 -0.994104450356581 0.565081955425876 -0.64920848292402 1.38872559974512 -1.87427826241006 -2.38880045263488 -0.0622745002331107 -0.767724235107251 0.270070039516238 2.20280578697507 -1.24325841529301 -0.731229746203014 -0.445273038494024 0.907123724044917 0.778059598356705 1.91762366446928 0.461744193675182 -0.337484276367163 1.01669073266022 2.22797524476672 1.49301743046316 0.261884455989884 -1.87537521993278 -0.77145636843026 -0.837536587111895 0.0717477421641399 -0.241585740860459 -2.13088609356326 0.624787334945924 -0.311041565550938 0.336123316476641 -1.49892498235225 -0.723033579273143 -0.812151534107656 0.531963804082018 0.0461388339020903 0.250863901864168 -0.691183726856588 0.402033975955522 1.90724025267996 1.20948474512633 0.334006412288642 0.934813531003461 -0.385909469055293 -1.38508819302458 0.00121135114011172 0.300307649585454 -0.99409485337144 0.439070903047395 -1.7153293990511 -0.166930999402771 -0.331898522142235 -0.338963459715254 -0.417513797801659 -1.36930801870719 -0.0737994314548596 0.00699547535633241 1.16304726286301 -0.112609025612138 -0.764733445834267 -0.518324537184039 0.844506822089817 -1.22848590268402 0.0647131388740817 0.543813317453199 0.320178480653621 1.29463492270716 0.635583009580207 1.00232334666487 -1.10889852633287 2.29203040638316 1.1302351787324 -0.92798342520818 1.03392309438831 0.32627272454568 1.45791260461414 -0.189507128033552 2.11106256862274 -0.0453813527355394 0.178571876060849 0.631450849415325 -0.514690584720389 -0.218187672922794 -0.596758920575524 0.490846480236598 0.480811136591624 0.23828387073086 1.17045443259543 -1.02958822950059 -0.955152280136288 -0.501820247209483 1.52883513284236 -0.567163615342806 0.665456798527626 -0.842592158622907 -0.219912481597515 0.12276431638581 0.611794537156656 -1.39363649839824 0.577266343956349 0.550326670161686 1.29497568788859 3.28402486458093 -0.112875766027027 2.46343374501248 0.0219202923463802 0.374562724029227 -0.0120403721585487 0.0758301086288174 -1.75315351507086 1.1555256196025 -0.800785811292921 -1.38104484350107 -0.643636737200541 0.473309212662136 0.15193679643147 -0.291367473106529 -0.593220526907667 -0.428278840111298 0.217161436988056 -1.27599980085775 -0.346462989710141 -0.138397482236123 -0.7440192203628 1.85306948606385 0.456120850943624 0.423470028757963 -0.904341864371881 -0.820532680862475 -0.536267299005882 -1.34313721126157 -0.235607740060303 0.315509337260947 0.441474116508196 1.97021758694706 0.513801650033191 0.357743671933319 0.278875949500787 -1.43813703206874 0.104983386858893 1.19181451485834 -0.781033756758105 1.30878285472591 -0.214669833544845 -1.43873425390387 -0.355521101693221 2.11768061175892 0.730318896536259 0.0525113435714764 -0.532020015289196 -0.267681985893518 1.12866080810699 0.115324735752316 1.50681179991258 -0.874009056852335 1.26724472114973 -1.26419342909048 -1.33965528276584 -1.33592965700705 0.843677733196743 -0.411909924649027 -0.000206694573954837 -1.13317643724888 1.68387960503017 0.470447473802785 -0.546245468437597 -0.943563053862753 -0.0553965422264319 -1.84189562309162 -1.29648857000683 -0.550384609551515 -0.0361264992201122 -2.13228566640484 -0.11934917589542 -0.105049653368918 -0.579522426561759 -0.735287443201416 -1.04589060421712 0.73357159371675 0.918985889821838 0.769097527066173 1.6116612072095 -0.39787747291877 0.425740792179797 -1.01451945575031 -1.71484834551936 -0.229084528447963 0.200293055349274 1.82414098694061 0.157818512734408 1.09824920828442 1.01673796150267 -0.0439942037232318 -0.534262788393147 -0.616336213549733 -0.579752667509171 0.325178573969836 -0.0990615489798119 -0.484764382501908 0.275244943375589 -0.655874719072469 -1.36430417971151 1.32176230526705 -0.499851940541155 1.04041237250391 -0.88338235191622 0.435316829975631 1.43057143172571 -0.201205568556449 0.909725258730998 -1.92007754959724 -0.368385784189106 -1.07766572742248 -0.461702924990524 -0.959185926500786 1.22909554847406 0.686713108481018 -1.36452309654012 0.345248135460626 -0.122721504733627 -0.561762126616389 1.85167656339443 1.82893725501544 -0.590129494426408 0.191574345133366 0.864503425650094 0.260984551704674 -0.88017185781298 -0.45747437293273 0.00317692655552979 0.992258707955779 -1.28411489715244 0.173855539048426 -0.672933624192565 0.151801212151166 -0.529895060696682 0.902312955921887 -0.501663564047972 1.76520896189753 -1.48999838638329 1.69534874392653 0.789201250245167 -2.41571262202218 -0.617005043903441 -0.164019942229836 -1.88682111241577 -0.295327339357297 -0.52260079432109 -1.33506226837791 1.42795430023859 0.73322978601797 0.472046961637535 0.877472007793835 -1.26550244657396 0.823020921727898 0.165069979607652 -0.00516889165458744 0.39925986191818 1.36730494566659 0.384344403728139 -0.862867116401618 0.266809551425056 -0.558289741009786 0.512446614178215 1.65041102401337 2.3027849232418 -0.544264754927802 0.84037658495392 0.0792395883375505 0.783048154630909 1.69339002865671 0.172094874519819 0.255405998567121 -2.40139307537022 -1.29835053466789 1.92722185092813 1.89994048559467 1.80701760710835 0.293940645088292 -0.165639924784434 0.375359206017834 0.634431652223524 -0.83747034147429 -2.04189262956313 -1.39339328832874 0.582829448720471 0.482303142271676 -0.247164827976385 -0.255523902417932 1.93288280489537 0.510448319800232 0.0418386232880911 1.22419711719576 -0.196334651825039 -0.515257763724576 0.877585735103042 -0.570364151466985 -3.14642388787374 0.676188355966632 -0.726208120692391 -2.05342597155015 -0.570734859997515 -0.0632726149399605 0.687763053548179 0.321532707687207 0.548348673506371 0.29318783842607 -0.278043189132061 -1.56544629090883 1.25068929824944 -0.581931242763443 -0.403503716427653 2.11619109611877 -0.995353731951313 0.130843668717069 -1.50881700752839 0.995234811621525 0.46731954934378 -1.35117337789003 0.414597261965694 -2.20465310414463 -1.20710481923154 1.50173924359562 -0.730312052141655 0.817967993435539 -0.835504981968327 1.98856908202378 0.634606884713242 -0.361194882326625 -0.833633255185312 1.55923164903782 -0.341529478306862 1.47314843151254 -0.175988867614668 0.735991153109155 1.552040342751 0.162325641799443 -1.89442505715833 -0.174557536488217 -0.657832497364725 -0.148759224858622 0.288133791126544 0.0867897440845975 -0.640031550504746 -0.112643197580407 0.379816004718849 0.153059672432139 -2.92431696216302 0.429282349934488 -0.707933037951995 0.61558496837646 0.150423730851819 -0.598655630666356 -0.884435714504128 0.413375367812897 0.369130352569232 1.74066814700323 -0.118310933295426 1.23527085747861 -0.729859040066539 0.924977971704453 -0.589128842034174 0.18252559443117 -3.57226394947384 -0.207372218191104 -0.923796825435917 0.634138906735135 -0.191073022291 -0.797023626297265 0.226592801840874 1.39306072208665 0.40985879881083 -0.859823053054569 -1.34186030713495 -0.911536223035533 -0.49547468895234 0.113408673817109 0.382319650094568 1.29388107727717 0.87986372299676 0.0922221428636907 -0.976725108257486 -0.861745495287988 0.31233617675466 -0.16126244119088 0.463557808634625 0.363834363808035 -0.353653260605209 1.64964320408752 -0.774477375786708 0.198239513126522 -0.00644241037513409 0.451817838605666 0.977577644571577 0.307226647279057 1.43788672135238 -0.0712075478938497 -1.49230231976963 0.152537225085827 0.358403770833812 0.48398275469308 -0.788960755417961 -0.588030945428375 -0.722330074272689 -0.377426300257385 -0.549682437611262 -0.656559625401748 1.08611510410005 0.152446600809374 -0.726027016742733 -0.571012856820558 0.951072664453407 1.01081116807509 -1.14288972479152 0.584305549426307 0.163696080978756 -0.903074702720252 0.507776185186518 0.331331693324505 -0.0990086282145586 -0.712452885313754 1.24874272151336 2.22971483174977 0.813508276277955 -0.33785887792736 0.0217315747061298 -0.786905504457859 -1.34907644666293 0.300468888108411 -1.14187951227394 0.479133454530107 0.27870792632265 0.82889388601784 1.35900736272361 -1.13485674374412 -0.626895746552469 0.271731392182597 0.840343978856863 -3.04762292074116 -1.40446698295123 0.887050224961863 -0.0406914779961064 -0.775047821425013 0.138957773057757 0.795342252131694 -1.1250889479857 -0.146745740416116 -0.140315593732487 0.582981106488221 0.147589039613389 1.86200822519112 1.04976053820341 0.275166419546825 1.11939490178625 -1.11564323607213 0.0838302551406055 0.190333345049169 0.673620573766554 -0.265536174908585 2.39223593939127 0.565933019587073 1.11909827755873 -0.750823757535588 -1.26704377832947 1.31542178538145 -0.806776881694561 0.617807367510943 1.8524513419249 0.55062212133703 -0.512124897639375 -0.420110849196929 -0.30096228196524 -0.92039588667272 -1.20708440141131 -1.13925553061266 0.0343554154118828 -1.89450413000615 -0.662879819695281 1.08536299988838 0.788283638612564 -2.23088238392957 0.528962242795045 -1.03596856788118 -1.34142574188352 0.0504218019201511 -0.390271337810127 0.34385271551878 0.309999715436728 0.0666249830441521 -0.325013184842399 0.754338712484467 -0.0358497607195433 -0.298692961598845 0.212061632100093 -0.499064044039027 -0.495829135645132 -1.58012543473789 1.30792368499298 -0.250068536100967 0.140127724396872 -0.212273093507039 1.50213260288513 -0.180868725831793 -0.636515875850952 1.32429972350664 1.14078143017882 -0.247446825078462 1.05529201851138 0.0118891828674468 0.178873580198304 1.27951266636935 -0.0727299877813671 0.825957069613162 0.0837978900548146 0.705223021239909 1.31156367284246 0.535313400967436 -1.74383775677155 -1.31694081415286 -0.0232703286748343 -1.58924583214864 -0.902048445060217 -0.894168668624188 -0.709267569018671 -0.494121822885972 -1.00859812880474 0.383429768222104 0.0156564430835184 0.943597378895837 -0.492833640544611 -1.8998885468329 -0.154190232331366 0.10043345759458 -0.620456755669588 0.850442922345715 0.729413503099781 0.300649837564221 -0.977024523200396 -1.19355511904924 -0.635749382400107 -0.0309381740340526 0.12009499520042 0.282579413420776 0.228097778085193 -1.41977964303163 -0.984836104464847 -0.236587139999932 -0.712240741649948 0.484718907172851 0.00871469424836399 0.616605456525407 0.34574865696152 0.103624815637655 0.838167128066462 -0.049329270815801 -2.22945628680659 -1.11942334233287 1.06662793947186 -0.597113832124916 0.668326444491958 0.315100014848628 -0.613341978403498 -0.0964808980206862 -0.409177974525552 0.182927419501093 0.863152144249701 1.05211254846452 0.899723257168779 -0.0871717812868195 -0.410589647301876 0.015561371173198 -0.323592142916448 -0.0348438931708735 -0.437047891284167 0.037537505787059 -0.687185825615976 -0.433394750642196 -1.55263667521781 -1.29747741690378 -0.896367181133055 0.260987640330839 -1.41026187806902 2.46042910238951 0.0850774316105734 -0.758624099705415 0.914032497374789 0.458225272393268 0.585787056582573 0.673549470742315 0.686188441119055 -0.226995941497359 0.427785671209024 -2.04739876097912 1.09681104129263 0.95865320455044 0.011936280782299 -1.16814663776667 -2.16972918444012 -0.28924689940276 0.0687535076579922 0.49979426680268 -0.694776484417421 -1.24458252974694 -1.56682646684482 -0.843505325635001 0.887969861904354 -1.12441643956633 -1.9314535756311 -2.05210209926814 0.504193554007832 0.704625069325941 0.901560149255411 0.156631643642315 -1.69059864718553 -0.170537174557473 0.322948006928216 0.134698655652534 0.793506765064196 1.23047465621529 -0.874108471500253 0.387255539083911 -0.57995711122721 -0.254061015840415 1.21972720376081 -1.67489145769919 -0.85061960559145 0.397939500348431 -0.56941149787233 -2.11366511314649 0.490979783270232 0.372631473874891 0.196709673552401 1.1936760850202 -0.12174848852891 -1.29461549908642 0.756244767560337 0.743333182964509 1.65230567438914 -1.30188257728537 0.494348207301629 0.248401909391531 0.334637875143348 1.44541580998465 0.566820729515072 -0.587369964446591 -0.547668347915637 1.94958062061867 0.769440140366372 -0.966995315662397 -2.02073407620338 0.238136759804359 -1.24830196754842 -1.57035795122429 0.176034328531392 -0.0432200121809123 -0.231242518817666 -0.103110671518685 -0.452810246336046 0.160699222760169 1.53575300842425 0.382016702104691 -1.25739947319917 -0.920743990351861 -2.74870807474135 0.559321964973459 -1.70559709935 -2.19041710192936 0.0366290112017698 -1.04998347167559 0.473979699117288 0.218495483718198 -0.439406294430703 2.43096043433942 -0.783665774708492 -1.80995025845156 -1.46510955731992 0.666751838777453 -0.632806349360056 1.24043451614867 -0.415295102107628 -0.950812440552662 0.352558599986425 -0.939556067499391 0.259550264997419 1.86917674508491 0.0957288264516357 1.06594225566888 -0.460966224954949 1.85833912767873 1.25055207781623 -0.701300372356429 1.58111885516622 0.33664939233714 0.0253582055093335 -1.92807673863622 -0.421843622173689 -0.0115775385822814 -0.53277254897892 -0.0672041824081782 0.860412760357898 -1.02307353268206 -1.62553721909903 0.0167853885494208 0.229272580765983 0.34700108092369 -1.20469944088882 -0.710160994400236 -0.361562045671189 0.910072819990188 1.18183728069638 0.571333185797264 1.76532787632908 -0.668962742028908 0.320314014915082 0.927762800996352 1.13890153354083 0.211787108914072 0.823659875523702 -0.283362267006488 0.167407371871887 1.28882587366571 -0.730962070096039 -0.889337123659733 -1.26504674760729 -0.614303331226797 -1.35763699037957 0.259086351015804 1.56018383078885 -1.51353744665136 1.00391939536123 -1.43713746646509 -0.952608013597442 -0.498358781799774 -0.160491928050733 -0.876939944810515 0.82266014941896 -1.1537077520349 1.1178650651966 -0.473076756022517 2.01794878284461 -0.0969357121617416 -1.23730443401594 -1.70360149107304 -0.620967808613417 -0.278862733848006 0.990286575358276 -0.423163471284083 0.841594421267289 0.706456468451527 0.0453447625276872 0.928943432439532 -1.10629975622067 0.205824668978941 0.519864867983218 0.0939911290148935 -0.534461736538857 -1.24221457730725 0.988302811134053 1.1241403617453 0.090113613827811 -0.103900660156018 1.3960190970102 -2.6444408823503 -0.0748833632506436 -1.68063129958903 -0.797908498842693 -0.199032260290006 -0.809166540613056 0.102263654965838 -0.316069996522277 0.0864178459163137 0.525191452873576 0.902117818024528 1.96670001939582 -0.889696380351261 0.621846421570834 0.723925882510947 -0.407150720846005 -0.0739741972264425 -0.308287473593788 -0.857545781420649 -1.77864463294331 0.664155620627919 -1.09881492859446 0.486173226520644 -1.13624553355268 0.4154578813606 1.30814238196607 -1.52819196312522 -1.24465994896711 0.169496645097526 -1.30239721750977 -1.17591434263168 -0.896103306609165 -0.957313568051644 -1.33001892952836 0.566216439058043 0.403892563400387 -0.104148234654097 -1.60838828293161 1.34339988688047 0.983603414051633 -0.899418879696619 -0.699443287951364 -0.466377780895049 -0.871796398244515 -0.683443896316159 -1.51479738159279 2.02722472462136 -0.755207147248998 0.456202227393929 0.665731759675573 -0.545896118145785 -0.557964974808115 -0.124729234878778 1.31834448332714 0.840298928810414 0.77514600299058 0.20958229321258 -2.59741339402261 -1.03177486881029 0.0460812254703404 -0.0287225489035297 -0.859116438082864 -0.79673485912147 0.931353627013709 -0.949396033699497 0.423689801565852 -2.1457319080692 0.0074671437512299 0.0122013160823086 -2.93122256803952 -1.69488070837888 -0.41771803530474 1.06769511548455 2.59336595848091 0.0268841812379851 0.380497989497921 0.231361171626434 1.25564099142918 0.667963789629113 1.33530578452905 1.38410535531616 0.676784617695435 -0.305465169180304 -1.65925559039473 -0.601377572205507 -0.401604572038612 -1.16101665765924 -0.928017895144589 -0.915557698208324 -0.0390786918132826 -0.736767657072544 0.273955388819466 -0.773750772755941 -0.448020973630498 1.03357648089028 -0.822628645165114 1.60161923675393 0.59238418190126 -0.481135855933112 -0.190103833859898 -0.204991291512894 -2.68786511486379 -0.572399223384282 -0.840011120221617 0.949266364409903 -0.895107394947129 0.314490971574133 -0.397714603803357 -0.954123742577606 1.14134818869733 0.271152731565523 -0.748988354986831 0.660037456645926 -1.4635666488725 1.7321222191935 0.31559661314376 -0.640849836686229 0.671060688112108 0.767749713108078 -1.37327360185911 -0.166791232190706 -0.268691654836914 0.27228728219571 0.602430208644558 -1.26487777930878 0.517838086017136 0.854023806479415 1.90455292152512 0.305548795078501 1.64898031602051 1.47904727787428 1.41415181128892 0.176945170604863 -0.476060381613661 0.160304192401172 -1.37017688254463 1.65392992605726 -0.769162147137426 -1.49569656661117 -0.481934786041017 0.631833245905578 0.302351073664366 1.23809634597847 -1.09599524766779 0.394273106773906 0.200466215179627 -2.21618408096384 0.629670709688624 0.476960431796571 -0.138794545053747 -0.49368002175027 -1.74845748916653 -1.4908314755875 0.587174438845683 0.691269801202305 -0.673508722300814 1.02257558729032 0.0548638830485298 0.4327485906753 -1.67569885650523 0.55775685595636 0.0589791446168722 0.162628776469247 0.0584045932453224 0.353714125936173 0.718284701645173 -1.78997759861577 0.30777371270306 0.148973856876669 0.707623613710442 -1.1684600825943 0.0546384828230709 -0.456037384900921 0.958921282631283 0.161929326473221 -0.930392547200287 -0.185870540445343 0.555346886703221 1.19833699710851 -0.662848343438821 -0.311356403311084 -0.239622582774699 0.346005914640636 0.331178435185317 0.112619823503037 0.521982369001779 -1.35344769832504 2.12942325556725 -1.59834028796881 1.96602089905726 -0.753409696547766 0.416072941944258 -0.378795402920176 -0.0309680773384556 0.376375294415139 -2.80162437614513 -0.65619600314023 0.384202040984694 -2.46273161458337 -0.539236259311833 0.303354510451579 -0.931998950983014 -1.0605423629044 -0.799102881170576 0.0342722555763193 -1.28599397912152 0.355675736364059 -1.15586109990502 1.07790038663933 0.630046347359919 0.0463814499828725 -1.00844049692158 0.893183254203266 -1.68028055691004 -1.26335301464675 0.102350004047667 0.204480752469083 0.274098974040353 -0.591698711748217 0.892755293669551 -0.833483901487202 1.74116628269704 0.359690345321061 -0.685772169761136 -1.70934373480736 -0.36546771754617 1.80260723670865 1.03463372544197 -1.40562042940697 -0.872899225000831 0.83484475464045 -1.42834895792205 0.62913775424851 -0.714359192700594 0.301687162139699 0.0623009151593067 -0.523051501073992 0.748645967315892 -1.2385028984126 0.0163877244691227 -0.477187761373466 -1.17157753845521 1.46857130907961 0.220138168983423 0.0981416156639356 -2.12033669884306 -1.93921266295323 -0.119323541662468 -0.480243848870297 -1.10585534271295 -0.403390593832733 -0.534604124531138 -1.1739789751054 1.07926789449888 -0.531773631170508 -1.328637507871 1.36718456537374 -0.958733893609444 0.927873839939591 -0.196493215910731 1.39701527596285 0.831677919911278 0.200604021592524 -0.348482090927122 0.812202373956313 1.02624842486394 -0.517917144597059 -0.672950538697735 -0.0588266327345373 0.572415973253919 -2.14747876857991 0.483193337628593 -1.45738838285639 -0.910229426845162 -0.0898774949827017 -0.0702046662320121 -0.0949415151815137 -0.531931949262489 1.01272317818328 -0.619932004064727 1.01995643239961 -0.926556254443579 -0.873618397757696 -0.843469010137188 -0.308665979715013 0.28046583516448 1.0774015925934 -1.31710731515335 0.229396968699605 -1.11593220317881 0.833205817671962 -0.296163062332504 1.60196337969512 -1.32624944838507 0.795155134103778 0.378355503103812 -1.23810167178843 0.984198716570902 1.74186552530552 -1.37855020260136 0.0397625261629816 0.386964605532901 -0.325945152884914 -0.447992941141976 0.475911076812387 -0.374643536682118 -0.848613664753909 -0.874355480938669 -0.00334276398431294 -0.812415496485456 0.501466694302591 -0.604596812923245 1.44494101526107 1.09486761681233 -0.518209746236487 0.687918647542457 0.771406693833321 0.499830940911916 0.071256519455697 -1.26455001924018 0.0471067936847202 -0.0306407953618761 -0.39453278019654 1.29622527684496 0.709251805756791 -1.44625043313418 0.36774140559977 -1.41560905912971 1.02130934840393 1.27670205789051 1.03833909063682 -0.688090629636143 0.649771783822511 0.673228470857534 -0.671584447405476 0.313326330770938 0.840426086107981 0.697081105450307 0.258074005609161 0.905077761759635 0.816440954213518 1.61005867568837 1.33514587395213 -0.146277244883752 -0.245292938090967 -0.352839468604277 -1.0687940410991 0.638093900372818 1.49172358173978 -0.340743781627187 1.85417979527861 -0.904259203910972 -0.646216698138509 0.122841128657923 0.229216057769351 1.37555594954548 -1.28293867320837 -0.401326100116583 0.0321245952373377 -0.462143411467714 0.00686087784813755 1.61146303234862 0.315322629636586 0.374058516721923 -0.438160699535804 0.182734309357929 0.999658998813425 1.10244373192099 0.12647706669147 0.552566329583934 -1.39661716351564 -1.58351338546532 -0.0998448527108893 1.60049447325697 -0.192010792032793 -1.49655551935678 -0.248465733210808 0.655836066506005 0.186610547222889 0.516191184539458 0.271040553117698 0.693880303780523 -0.619250552725758 1.49631965319382 -1.83138278733076 -0.496502334913829 -1.33428140845185 1.537212980919 -1.14247942011881 -1.01973978608804 0.7033868646378 -0.646184892303736 0.0517028419210115 -1.17891360814366 0.337668026200617 -0.414732077074999 -0.878879537325268 -0.161227147276914 -0.537978952352497 -0.456406281419114 0.0546862292133818 -0.0286606043872391 1.03472857211572 2.25627375021165 -0.157443602987039 -0.386559601063884 0.222687039816612 1.25447116972956 -0.876611597627913 0.154460603596613 -0.259035440752475 -0.0352506197112402 -0.184539937890477 -0.0307195492318882 -0.190486764263158 -0.542942910805354 1.097966084031 -0.0825677840397933 0.346588920389584 0.0380677987957903 0.776159638528149 0.949619882425685 -1.36072619162906 -0.759593028215251 -0.418505422964702 1.79589020880727 1.14189223181685 -0.60576533774014 -0.426794600052424 -0.115787131395055 -0.750671020855053 0.194627780193644 -1.2963074640735 -0.260990354317869 -2.19533444378544 0.44983558963329 -0.498700198782251 -1.69252908425988 -0.0978779769021284 2.06902658931701 1.22196210376711 0.698806707390191 0.229020275568389 -0.391137509593975 0.484622368777939 0.19303235828073 -0.270256648313982 -0.306858738838858 -0.184471631763595 -0.111043237103483 0.0712803138261464 0.069033104941729 -0.646186785219398 -0.397026680347828 -0.213784861996152 0.146498817103751 0.633874899948541 0.61090602050562 0.0752228075038087 0.754431503774238 0.525750827420398 -0.0863860263363114 0.798450199499518 0.903878844965964 -0.119781822101254 -0.219991532592134 0.919585794895185 0.754833635270645 -0.151943742599781 0.437697959874156 -0.909556314941523 0.62275017525898 0.271118084537816 -0.963011625362281 0.73154118260578 -0.441791817143739 -0.263834195471115 -1.49523899301833 0.400170841477103 0.600517663012056 -1.02226442790114 -1.29451991194294 -0.831087644781793 0.342473548985028 0.666979606789405 -1.11287053523836 -0.343114799839874 0.846670419596464 -1.52477002169286 -1.26977036959862 -0.854858177519682 -1.1649822639149 -0.85339536674172 -0.515034721067575 0.014641942413204 -1.01547985294488 -0.015719180352601 -0.13329688877327 -2.36016130207052 -0.047973751658871 -1.20524331348529 0.105950415129029 -1.73966004210813 0.419199149301665 3.01547507334776 -1.81194125611391 1.22721541474992 0.818676523965525 0.218669234879436 0.203144714487787 1.3773407578409 0.334790250577886 -1.3693747570933 1.24401881897786 -2.14424353835503 0.168975424622086 0.96769589429201 0.158361873740408 -1.29924750946261 0.753245602586393 -0.701948436708245 0.891241546238679 -0.566752769437973 0.582477041155945 0.0398590138660649 0.609776253856289 1.17507437075026 0.759921279228494 1.28344337358784 1.36899519300545 -1.14911407694881 -1.86033077447983 -0.0280812913289978 0.344701653516693 -0.763880674664662 0.35803351823574 -0.344215806697265 -2.16830727778133 -0.055700546376217 -0.382171651200261 0.812943099212833 -1.10843658695404 -0.229748008546884 -0.973203524554273 0.802405459092261 0.0237383259953921 -1.1285928179298 -0.113532065952552 1.04697904724663 -0.301118030037466 -0.508040082613724 0.705858384948704 -0.542826034880792 -1.07467908269081 -2.07703089361876 -0.488920116144998 -0.0572957155283995 1.51299712631693 -0.681892699708449 0.70829241901191 0.969762804157673 -1.80596833410821 -0.956879847383055 0.535412481331494 0.00146422181726288 -0.768745876149014 -0.492639851816736 -0.118266395450466 -0.956539252583613 -1.38102082283662 0.936521070715863 0.302473721575712 0.300991940159431 0.78339753810048 -1.60701170876208 -0.0808115151298767 0.363482252545289 1.12445698794434 0.805201640345494 1.28508221098627 -1.0969042256566 -0.00945546482665793 -1.34256590632798 -1.01956194895116 0.487919248394673 1.48579910100967 0.680274926957445 -0.590223556011866 1.48617042978391 -0.509833462448595 0.235043397014514 0.425321118046265 -0.408713718254528 0.34412493731984 1.95187771603679 0.0538268727940987 -1.36357107076147 0.29257989855801 -0.404897062548899 -0.245635875558434 -0.227195448696963 0.185850918519831 -0.89844600060159 -0.0942910681635744 -0.733992088184412 -0.404441013524356 -0.570221832299294 1.68349278520495 -1.27293466356443 1.00602982486073 -1.84176068768124 -0.329231125167677 0.866315112952886 -0.0429795802180816 0.193159678673313 1.29370279592315 1.14588608843715 0.414661571750457 -1.08238134201057 -0.00510822550936395 0.0525908365976578 -0.663337194423276 0.260457634419389 -1.36494347786096 -1.07908463640492 0.0150618085017747 1.02218961473433 -0.125713729371727 -1.34148639432856 -0.588698690880871 -0.0212092972302265 0.523047830550293 -1.71635539813448 -1.16946657898034 -1.21115200389433 1.01826023474973 1.43701272760741 -1.43612163357814 -0.938158957858989 0.227991559455109 -0.824946402737414 -0.591588810014524 -1.07058988268223 0.340458589467576 1.04390961562972 1.32718884913627 0.67243638277859 -0.127958773781748 0.264971449818468 0.20368077364809 1.27511836397907 0.473322143082079 1.51771185985427 0.756173526144792 -0.331876082748698 0.73538350929317 -0.351241446277963 -0.978735484779297 -1.73423832415133 1.30558267116535 -0.111331755974486 -1.87379127918285 -1.10117117367878 -1.8416615318718 0.741552841285384 -0.7538098090248 0.748221791239653 -2.41811956221108 0.180680633757187 -0.496134036740987 0.56954356995789 -0.843104447386121 1.18135675987223 -1.2185874609297 1.66906260767279 -0.166371256350343 -0.716115301726982 0.511430380969399 -1.20160920966468 1.14999778932194 0.595412789902451 0.548115349933673 -0.588762238575276 1.71154513005813 -0.220517501299949 0.646473054022367 1.7228761041688 0.872331926198592 -0.754643682041172 0.310443295367279 -0.0934201730403879 -1.11275922687313 0.114513766814518 0.0518015714979205 -1.34920344152006 -0.886137087702024 -0.0798805139328585 0.628976954402294 1.25016574662839 0.491710364971214 1.17031886232086 0.89328948311733 2.00361526330016 0.213176091034556 -0.0866871525348129 3.43498269080474 0.523927161522343 0.319643949619999 0.0629345988836868 -0.868546332199505 0.749367011324662 1.36639763764299 2.11448063657546 -1.18098810960722 0.173953757213452 0.814118048078425 -2.03116799227735 -0.42248231316775 -0.894200576562481 0.476965439898133 -1.99488166885382 -0.332439616999108 -0.770045879891371 1.16834116056081 1.41735523215098 1.34791274538182 0.335147022496459 0.575665194598652 -0.225100140250186 -1.02242693382982 0.754077839947584 -2.66237010126708 -1.99106326211619 -0.588826394882368 0.193617189422116 -1.02487908071992 0.00466117051292695 0.49648547314093 0.596873670029405 0.864597260075862 -0.299469279698144 0.000617334249447421 0.132322019702073 0.214425972905629 -1.83699492265879 -0.024659790293112 -0.355194466247361 -2.03405143682684 1.13685900225515 -0.737827559645479 -0.41126876592358 0.782214731626828 -0.351291729994155 -0.322491956696544 1.80504017833305 0.479240129549324 -1.56493649214917 -0.236067224567193 -0.145115148222524 -1.25005436904285 0.661543936177309 0.377910909864976 0.591162323905745 0.715083372046493 -0.0179450203373172 0.056860929585664 -1.71918305759116 0.852727705273625 -0.798280411023442 0.258177865444641 2.30389941085541 -0.21757773182363 -1.60196229579241 -0.21587870060705 0.637622386977874 -0.832165535344982 1.0118538298696 -0.991912482137275 -0.773113701204675 0.716198129493362 -0.81638293176527 -0.552125401307718 0.394402788253815 0.778508454320871 0.523091972764094 -0.731679919992709 0.192349199425795 0.189613727656121 1.25514078493246 -0.134655837528613 0.18083848742221 -1.45654401411151 1.77750722760883 -0.304945952050366 -0.303600984404584 1.94755216047914 -0.226138203976102 1.16422773498688 0.709495314511717 0.111465598458508 0.90954954134642 0.17951175611952 1.43132995171431 0.142923226431373 0.0839348640496984 -1.07649047533341 0.536598428844172 -0.258083582242791 0.339391744106919 1.169345285502 0.84633663407181 -0.64311667863638 1.50349631459485 0.738458177993977 1.93592917147893 -0.501517108406376 -0.0972924585771926 0.504423844558716 0.791901349680134 1.03145494550711 0.390708688924612 0.712645980527464 -0.522802731267028 -0.0906587713287921 -0.667538130664226 -0.582405319007352 0.584881046103428 -0.85264068142372 1.38291282369089 -0.756109113034768 -0.537703593878252 0.0788201543771675 -1.2481488906956 -1.29105638392069 -0.149029299701626 -0.76926370237739 -1.17604582238906 -0.0473520221867161 -1.19287448811269 -0.00869925060030655 -0.934525947888595 0.665465338113558 -0.766108468608549 -2.7004832913489 -2.39823709956077 0.555110739168469 1.03046918810912 0.452993097145835 -0.758649392826739 -1.03185509750288 -1.23216174999279 1.94905008430867 0.286743971581369 1.34503327075207 -2.30492305806731 1.60166637611316 0.0653953971131591 0.379683359970061 0.231914837900383 -0.110793624644176 -0.687869891391577 0.775452774979291 0.684492382963594 -0.82001336710439 -0.930333490191486 -0.822668632086463 0.277961875535394 0.24396463594663 0.412417483249606 -0.560999878422445 0.411445622556794 -2.00287495681599 1.10942034396671 1.51504443918345 0.610127681968952 1.17326718968237 1.06055987309664 -0.479933868491819 -0.0921754422031442 0.202068503165094 -0.136127341317702 -0.791166921423129 0.234318355525691 -1.34065512654528 -1.51083512685395 1.60236154128956 -0.820700071282633 -0.417547104547898 0.0360487202189537 -0.534782807310439 1.80267506455308 1.42237344491279 0.867792576022782 0.0174693416519392 0.39087694084746 -1.15765175711267 1.85936422629661 0.163421788636368 -1.82544606970335 -0.880801818823394 0.571395610509115 -1.49721686223883 1.38781745214094 -0.0172809529359821 -0.64709129863154 0.154774463851514 -2.25274683703329 -1.14691203878031 0.438703083657374 -1.13031491180536 1.1247403931708 -2.07062801415678 -0.62196159350287 -1.74460663778642 -0.165984145581933 -0.937598691681324 -1.91812741493118 -0.740141401602699 0.734476705235371 0.865179458119276 -0.0449954057192177 -0.129269344673738 -0.180693546372263 -0.135587313711395 -1.14248739998832 -0.0106756123825917 -0.0963302427994979 0.0608778087539228 -0.402960366244288 -0.254922641896379 -0.46758286676813 1.77777627051526 -0.565359354039681 -0.375787879976937 0.202039336073145 0.478857415899995 0.07241946545173 0.209865322628364 -0.669485126089484 0.818312158522764 0.170708704494128 -0.495393599781484 1.20069499286522 0.26264954008921 0.189666798387116 0.348707188705322 1.72413767177599 0.228247642763471 -0.482162601133374 0.570934952966859 -1.2041282061967 -0.72674501555122 -0.893021011255649 -0.259098026980557 -0.31134564276695 0.979719164902613 0.0528775223036379 -0.593285047685136 0.123213832354376 1.00580668620567 2.10489299252358 0.851649953383079 -0.239565062628683 -1.18595559084518 -1.41172098075035 0.0716603429926489 -2.62899918760252 0.830284190997249 -0.79018488093806 0.370290762609154 0.914764848213776 -0.255857160769521 -0.556341420143332 -1.11185995442934 -0.521558704262844 -0.784624132163584 -0.403662759562948 0.219660770050305 0.424158248035184 -0.537681378953854 -1.22126985571583 0.500738940416827 -0.0700149862787212 0.597106388136574 -0.995585607150378 0.448813205848226 -0.481474699273457 0.440483002344633 0.629081806905909 0.364081872498657 -0.595360801707501 0.146191604375095 -0.0231377546793305 0.115251907708472 -0.0412863887131626 2.19074529398041 2.86024603766002 -0.22518347882246 0.84037903349145 -1.07105705254685 -0.145059861879199 -0.00628839925653846 -1.4432062601431 -0.0342044067111823 -0.188524817015531 0.422136173035388 -0.578529658330841 -0.639493746847496 0.429547211851807 -1.60774408957633 0.556551391615459 -1.2481685115401 -1.01337279361198 0.470856306257593 -1.69973242562099 1.2626740314251 -0.769925468305284 0.416591753027046 0.99851374008211 -0.151999041703136 1.7580278474692 -0.250661591147263 -0.331670259773158 0.182446753095673 1.56503798893072 0.35262080190035 -0.301787024991532 -1.33532195216085 -0.0493882562815689 1.64982115877259 -0.710497478065919 1.33690453841363 -0.0813137101426762 -1.61655330688455 0.262197247056908 1.38995192083266 0.736594361517073 0.987150076391294 1.03202493602998 0.812938459541518 -0.469534120555618 1.16860612773437 0.984783605955873 1.7798513653154 1.42797793586493 0.107567103320739 0.370532692625583 1.48568758799301 1.46716526458957 0.750238366540506 -0.825081282268988 1.06171778688741 1.03703636394351 0.902628519645254 -0.380075651374964 -0.252434760833541 -0.943175460708771 -0.431060588730806 0.80612541076859 0.236544937911869 0.12458933583541 -2.4534594948051 -1.48698537199419 -0.140928611729749 -0.645464829413122 -1.77193760400654 0.382003368652727 0.351440115393212 2.00367212885929 -1.34443314558747 -0.49071341421314 -1.01655131134881 -0.116589995449917 -0.0202605224122543 0.126705331203893 -1.29876390298057 1.43901832937522 -0.110856066404622 0.823915422092099 -1.01902741085063 -0.160679639599816 -1.11008133687342 2.99297860683278 -0.0588698016799305 -0.832184960455889 -0.296384916725575 0.801062720551661 -1.44363362392572 -0.478206549869718 -0.291777131415334 -0.215420644855226 -0.598673500359856 -0.656675727682718 -2.03158194195931 1.3457880394693 -1.0795046249445 0.0308449496673655 -0.696231233513476 0.30725774796052 -0.45907349765509 -0.627928635302002 1.68109569939718 0.861451321020083 -0.123404155188038 -0.371296220317146 1.1295187189634 1.42649508605668 -1.48697566638162 -0.759595614709104 -0.429548544359833 0.146290536291852 0.963938552579197 1.07737033517124 2.05188796373155 -0.250531190922992 -0.0386402614629527 -0.36971796910216 0.875639986320499 -1.28334370515521 -0.0809699302696715 -0.32487295086279 1.2493992428824 -0.182456102983662 -0.718470714854989 -0.106067606148618 -0.0992227084518148 -0.759581916412414 -1.22605463070723 -0.0396097048397725 0.40942048701314 -0.789306720657814 0.403688698053036 -0.743117870607754 -0.773495503561071 -1.33091447700863 -1.68652402945598 -0.972929791020229 -1.81325678854221 -0.820863399712262 -0.48436559882962 2.18622193672009 0.376161673047942 -2.09486262052172 -0.156132436608405 -1.71866729451001 -0.870655704313858 -0.771486117858205 -0.704761969316723 -0.0535110771715369 0.961160313292503 -1.61367155398154 0.686434780361957 -1.16683422097568 1.67140541431998 2.25582338956993 0.12874436433892 0.242934512471144 0.88216298620952 0.342168814369135 -2.30109941205853 1.22611164283197 0.342229293424449 -0.361938739398383 0.897518333513383 -0.210748247836383 0.371136516967616 -2.17586183278946 0.555548194247625 -1.04431140736627 0.393406784258389 -0.962986668116165 1.00943830763827 2.20165993653386 -0.1674809579511 -1.05834625482758 1.1270948039685 -0.290302597536519 0.0425440010082632 0.69844714583424 1.88580851489457 0.150081602154306 -1.00840220926926 0.0372721739980501 -0.490216971101606 -0.97012467395793 0.235670766924593 -0.2306690555127 -1.02970703798806 -0.441914580615758 -1.61996214073751 -0.261282812809135 0.067416822567321 0.8635179218597 0.102113617294483 -0.420742366608756 -0.0719567088446299 -1.29743211212435 -0.318422288152349 -0.307855399831922 1.34473190569188 -2.89217850741819 -0.46407824374859 0.0360857980934581 0.21615277951135 -0.468364229527013 0.548250816150301 1.0438456001897 -0.346439567006843 0.750338949665176 -0.40184118575084 1.07687578445355 -0.447978443880255 0.832101480058019 0.933985970373706 -0.256956509871445 -1.16175077047755 1.10176086624337 1.49461113310622 -1.26548070847356 2.17702874474757 -2.27574754925364 -0.065338527047491 0.471422629086625 -0.621473410979056 -1.48717301282076 0.605532855758061 1.57627820076822 1.46262200672268 1.29661955135011 1.22229182817171 0.75663973213466 -0.678637710109585 2.57882444966684 0.255403509879281 -0.000132465198989654 1.68459622046669 1.21373032261535 -0.0660558733682857 -1.13317303993582 -0.224650683544592 -0.515241718298969 -0.580351283792805 0.304671528574099 -1.13866561739651 -1.73683978627426 -1.49976069475513 1.32829501392498 -0.419839713965889 2.18981967949742 0.346347721653246 0.638491140049941 2.30101469428892 -1.52574076083903 -0.344787848564663 0.00689112388134058 0.238223436313484 -0.0578963003270537 -0.401800367897681 -1.59175653490392 0.87118596747448 0.44011920425863 -1.48108669837408 -0.442716565135188 -0.341714115851299 0.906060820473228 0.540814016149442 0.986197793705559 -0.800303753358966 -1.28618771881231 0.83152377432021 0.0356629243723075 -0.148232787354284 0.219316270185573 1.14823587606071 -1.15346720837672 -0.880210077978141 0.2050663527814 0.0962543418166912 0.173513391872907 1.15923067157189 0.178092265242544 0.788840465745713 -0.892337752565096 1.23415347290948 0.0858157791203835 0.931234010088605 0.166589651137956 -0.333707117323564 -0.522312225158472 0.457105295542314 -0.0386445739967236 -0.77715206579468 -0.948495963672669 -0.896997431182786 -0.402883650181762 -1.31697515921046 -0.907167450218754 0.944389008909978 -0.181163676546473 -1.21534087869098 1.85550044904448 -1.77872927609421 0.526503367429042 0.398747417530334 -2.1214937776354 -1.16371181174336 -0.345219430542608 0.339633555187756 0.88160996635285 -1.19384062181957 -0.502341292147252 -0.874980901448902 -0.491398964750302 1.01091989370603 0.254614071641846 0.514934372590366 0.305863035763144 1.33811957595358 -0.543699986972388 0.943828513354875 1.12966920834117 0.110491124850352 -2.37406031280651 -1.85147455598366 -0.573253566900617 -0.55008171898254 -0.0517943075205226 -1.3423683929975 -1.07081864218927 0.989460512044846 1.14552717703709 0.67111396431559 1.77066829652133 -0.0394337378596216 0.628721580196268 1.02145162561459 -1.53189947302505 0.398933189250683 -0.579641103559734 0.768568049922278 -0.0370300367015848 -0.439729653897271 2.56446322003109 0.528443379468662 1.21151655864216 -0.940432813964309 -0.162947940101101 0.382888821976288 -0.791739136176106 1.39229511441435 0.77475650725973 0.31365644759944 -0.365057720679972 -0.212224006165248 0.2117737005704 1.81496703155731 0.488128461776252 -0.884220252684031 -0.22579181694392 0.0107980543797992 0.992856655188675 -0.936316514016049 -0.405307937266788 -0.387411762752582 1.26389385149848 0.364520010669767 -0.97921835521704 -1.04909490959868 -1.24612520515422 0.680210691151802 -0.670449128211874 -0.319844794598341 -0.252739155116599 1.37713576999167 -0.119026416519924 1.05934282654959 -0.116820689137268 0.928430394883889 -0.975023433496462 0.609569040879611 -0.807808853395322 -0.952621758799204 1.84693837726709 -0.0679264484962744 -0.718234789153001 1.49840463898738 -0.434658406956111 0.0667056193208705 -0.947808544911014 -1.6096287508205 -1.21171058116154 -0.869975214200431 1.68237269955269 -1.25828381149734 -0.343720578362198 0.521694881876241 -0.411965154137033 0.00488428742720183 0.914410303453646 1.16537358510105 -1.56203505262012 0.4330563236677 0.40859306210024 -0.627633733248294 0.11326515009712 0.610676604031848 -0.857020843580027 0.232844250197538 -0.377442970099452 0.298887924917386 1.56380322725243 1.49464350975016 0.784440775555646 0.0713692970607294 1.42971835900278 0.627826211248218 -0.516933373674698 0.571100296692623 0.826524440188451 1.29721910747418 0.910325768534967 -0.528253759131301 -0.139641559247241 -1.02381999355582 -0.0692476542226927 -0.474837534060361 1.63160907922075 0.176881723440991 1.34600364498518 -0.865051525627325 0.783873467695583 0.511542534278065 0.499641398391678 -0.673752786288019 0.335505918867755 1.15738117856777 0.980753115617902 0.364252534239679 1.33532848773117 1.3152523043265 -1.12243959043638 0.67332330291759 -0.444713965627083 -0.659268347788192 1.81336005525774 -0.401136158368958 -1.17594005019048 0.714476596561446 -0.742614393899805 0.30291671206793 0.429237533329881 -0.586381515489838 0.704012811378354 -0.908093152471665 -0.434749143893917 -0.653171234738767 0.0846737868794901 0.569747315101298 -0.981124124065336 0.173747185409748 -1.21713343104688 -1.20822185672769 0.507549260781435 2.60558760435574 -0.470002161741543 0.653223574424228 0.849207998319089 0.908434199558987 0.0290181807227394 1.25796441898702 1.24935489164938 0.72975037203204 0.602642206429522 -0.0316367783463063 1.04290482533747 -0.333642402460327 -2.08518422842097 -0.839834584561613 -0.0639819627547058 -0.833468460092843 0.0919335407870971 0.185502346068012 -1.62245829453317 -0.65444775195492 -1.58494038800115 0.0371261628729287 2.03317418946069 1.29119178014884 -1.09759985618712 -0.688145697214626 -0.847639917795698 -0.854986398346138 0.354315203507393 0.526736817523456 0.916914918835006 0.859632133720443 1.63376394505469 -0.960481162496874 1.04317916205815 -0.559718548424746 -0.24386106651737 1.06831147375448 -0.35469081996684 -0.52994431331725 0.691295092610971 -1.29800921457614 -0.767617337707225 0.198019043688973 -0.0875477961032994 -0.42581504959757 0.668171122147046 2.11420385050998 -1.46423820699225 0.0398139468162512 -0.332766892544513 0.950204535159405 -0.299810733074574 -1.24192217369527 0.405360931993654 1.22368247856691 -0.0817583535452337 0.0130029999856791 -0.095206507628029 -1.23833793455122 0.655702126462303 -2.0755437813162 0.626773433178063 -0.357619756282317 -1.19457832050981 -1.68524485282151 0.682691907252386 1.14760201025032 -0.308062980850811 -0.25268923762367 0.501910310723064 2.18853368253956 0.386737155619659 0.26412637358452 2.23504930605247 -0.436388203989114 -0.200431527855394 -1.53643639005562 0.369179390121845 -0.167665746728306 0.0466962994614553 0.542599007251414 0.513354238595885 0.822145539028912 -0.281559472105388 1.96891510153359 0.203279130587799 0.148107036060622 0.682674593635601 -0.131919352579093 0.00184243913530916 -0.294586700505302 -0.0269460238616774 0.217124079249248 -0.00923827367091901 -1.09851087030603 -0.172399409451789 0.10695768933443 -0.52565462991822 -0.328307215656201 -0.657471338799436 -0.00206804153683413 -1.10094143245126 2.38865582426007 -2.21710714017583 1.03547315088277 0.142272558198286 0.222159517549726 -0.309547597172235 -0.648869606045788 -0.227730295933063 1.35464103079411 1.63831584361708 1.83102668304475 0.921349061884728 -1.06514268824316 1.10001771292631 -0.607182467702266 0.576379323093204 1.92190851964911 0.748173124707126 0.33890215048365 -0.0590921664353387 -0.98527092255738 -2.52777441796406 1.53039987771557 -2.00162308337921 2.24212392489803 0.82069379421832 1.01284607800743 0.86244996274128 0.955833802129695 -0.564957811591916 -2.54673581760326 -1.16827281516648 -0.410921739952984 0.216165023187014 -0.199683459778405 0.995467359899756 -0.40710211286522 -0.0865914400052819 -0.642100850421122 -0.257777651878459 -0.253274091969378 -0.756100220039478 1.45888694334556 0.146233692883115 0.0210672701662957 -0.656715441288628 0.702814582211407 -0.396722645338847 -0.0866498173855444 -0.933206355023191 -1.12441142364579 0.816196930088608 -0.512152328101113 1.37225975747577 0.883677023986234 0.0362085825107632 -1.78919851912921 0.916414607038256 -1.73613323994681 0.840951821686363 -0.233103725955414 1.16986332343175 -0.201184981609894 0.681454206307549 -0.574363020073788 -0.565166537610302 0.0721875214679257 0.321505218831447 -0.294973966066026 0.541715373079405 -0.347665360492003 -0.082259707838504 1.66734153281466 0.515846783955644 0.631117188333124 0.458095692937228 -0.57049508915847 1.0479740722056 -0.76142824349458 -0.670434408638812 1.22480770098337 -0.638098348661982 0.287547834185286 1.15145978527909 0.26982701801281 -0.449368581664478 -0.846686119214923 1.41516253106812 -0.534161553748926 0.413115595505915 0.556711812210935 -0.218266222657739 1.56865165773038 -1.55754487161031 1.54127497287866 -0.835201218322152 0.733261284201203 1.54203650335945 -1.91206032186795 0.485013242486201 -0.472740190770049 -0.712578292560477 0.271124220782475 -0.564392525116512 -0.157656736319248 2.16189358711572 0.871978702651509 0.76479641169582 -0.708753471159535 1.51469816215363 -0.442838804609494 -0.935384330240676 0.914097322985048 -0.575350511462101 -0.240039828670309 -0.371299681484638 -0.795656711067688 -2.34344230851379 0.57756728727821 -0.855903516335911 0.353860577939248 -0.702301725721938 0.0676065243241213 -0.114027413017685 -0.584334655525029 1.2238693045886 -0.664282557553314 2.56800258145159 0.448687424650362 -0.823376009226942 1.03756005233291 0.518060345170846 -2.1198063790098 0.124123890656807 1.52101051045324 1.14333015217503 1.48750047485285 0.716831258548963 1.10019723100422 -0.35638896504498 0.573026317876411 -1.05290491471598 0.55381286046508 1.09614228003463 -0.270720843450755 1.0901732271542 -0.516143744595755 -0.734855090537652 0.550837041362013 1.4731194198017 -2.0773809079384 0.571209386113947 -0.716381944786543 -0.849329151007542 1.72406865810258 1.05525230063183 2.89406271836571 -1.04384863947938 1.00659193840261 0.579434906446999 1.11356284143497 0.257726106584607 1.10816070836529 0.926990944831456 0.279718741199342 0.476561874043513 1.6450606059836 -0.0292244533919338 1.53019736723923 0.065012300865971 -0.48041010134121 -0.336362724368351 0.96045414166075 0.899730595781847 0.628621655429957 -0.635155246216865 0.436737826177283 -2.03851978843662 -0.796691127389461 -0.240980396300584 0.189192303184482 -0.342917403556883 -1.84266138259406 0.0603517686712998 1.06425992519353 1.06386154308209 2.07733107404459 -0.00720171803477467 0.29118105091179 0.0925954681182777 -0.22109521798015 1.36338114631389 -0.467970506094867 1.04150539176858 -0.47778557564093 0.196021213751726 -0.524158468317511 0.492973544462467 1.50181179541305 -0.419902359549218 0.164686297866783 0.948883171818354 0.494679911340249 0.975252030018136 1.2375729999823 0.0945291432499092 0.263733829889386 1.4700510411714 -0.526717727611693 -0.0494888951095234 0.276209305896088 -0.249410643317513 -0.0443513588064464 -0.776525405310867 0.382383571424605 0.446744778806444 -0.22277268440701 0.899118267366066 1.51980690526097 -0.0417037348621796 0.528461919743009 -1.07487282769026 -1.02878556877874 -1.25316094060638 0.0364871415796308 -1.29198556135174 0.280269580442165 -0.932959562132428 1.66501325442846 -0.611552795320316 0.877182627459838 0.0824979373989372 0.211538339500031 0.135840545408625 -2.10156780907952 1.83115850375593 1.34933377049618 -0.239312342682424 0.582850710315004 -0.642224231480598 0.91209926996414 -0.282602642674029 1.3059872428339 0.763937680994021 0.128991735337776 -1.63651316346495 -1.23577530879212 -0.457455634034839 -0.931257918369294 0.884005375881233 -0.465194557293719 -1.75241512240087 -0.084155802085979 -1.05870273895894 0.197982775058401 0.837991920925561 -0.0702984363072338 -0.425406419372169 0.868722793502189 -0.058845912050455 -0.187693625299886 -0.0915658803559103 0.453370423473812 -0.28670195118675 -0.125786857883193 -0.822784632005262 -0.35309937561404 -0.151921290648034 -0.196453080781659 0.988240195863558 -0.780874064130498 0.131791816141727 -0.223059825536576 -1.12984874000778 -0.589723330920194 -0.318525974190596 0.80012843362935 0.952888517643319 0.926502920816676 -0.684744544800252 0.319974655319292 -1.60160212328911 -0.40377782737642 -0.277736705306579 -1.60019884178907 -0.00675516787492021 2.87395403006679 0.706778672662282 -0.81622240074756 -2.06516998293344 0.220289326713742 0.391177893382166 0.355540454680961 -0.410728786772474 1.21711190295274 1.90032993179716 2.0046897042145 0.167834019804946 -2.04179184428473 0.3935937995556 1.44965886955442 0.970022676404806 0.577888917852618 -0.908569759457535 -0.317572145969755 1.3180878803855 -1.81117721137224 -1.25715899974499 -0.63574400590866 -0.265272408892859 1.01715214639092 -0.516001265326358 -0.390979562734766 -0.28900588772051 0.300754602338655 0.460304726650753 1.36677870431997 0.312192217686138 -0.51211770986485 -1.46218495650175 -1.27038715938206 -0.83769426185661 -0.970038471291885 -0.546401740529617 -1.25502321297796 -0.0137520629079386 1.00795916779314 0.594965063549804 0.231081065533227 0.816934794647009 -0.941610316444897 -1.17854038014605 -0.290624604335008 0.357318081416922 -0.881112239048307 -0.625451951121001 1.45972694060522 2.7154510720647 0.654056127508484 0.839727227421772 -0.823400022958067 -2.7342657859799 -2.19550621602518 -0.227691713106882 -0.547279063945858 0.232052550642366 0.722519929798971 -0.332147382954192 1.35259080534326 0.785143982859432 1.35470195129312 0.775149456572774 0.361283859934649 -1.06781850658341 -0.0323767169887386 0.899657706073029 -1.31691602067816 0.796571004302485 2.46795670682119 1.15755692925857 0.777713416737713 0.492648039812274 -0.533837239148275 -0.724276543088514 -0.231347307829337 -2.73816475111189 0.19032616020124 -0.344755761552848 -0.462844455793042 0.284995754909706 -0.609114080108698 0.611719730162425 0.0658651238981605 0.508778394319195 1.71326379974389 -0.975153016124479 0.691767573646118 -1.68875589492545 -0.0624531234479089 -0.531136561118524 -1.4965538815572 0.25285425477772 0.758267918957488 1.53546463086861 -0.575279852346045 0.897588171013088 -0.482692785879724 -0.0991045440271982 1.06337291744746 -1.18569497805217 -1.57533707103849 0.758811794673787 -0.893783245513015 -0.147748451296382 -1.37570620379823 -0.45893831936321 -1.51156041070117 -0.404567353116677 1.45350127584919 0.00462356097003827 0.273131841596875 0.974169098959776 0.134861540499628 -1.9391214423795 0.880915464947027 0.310083447452272 -0.410772153017835 0.274975609632133 -1.2861291264672 0.990735114734517 0.791608351566091 1.60344157594805 0.808775785630004 0.625673552814268 -1.02515389427827 1.61359214671585 0.991142524954401 1.8845027323643 0.0650460193312748 0.160321768632952 -1.53914849152439 0.0121026978794591 -0.201610669954064 -1.48984417184519 -0.603901719031291 -0.556961558669305 -0.410654591092058 -1.76683182184692 -0.204791915246247 0.79520860070465 0.745056386925273 -0.699106260818179 0.958165390181786 2.67789465251605 -0.305072103460919 -0.589923864049347 0.254915621529697 -0.0415779341606365 1.55435397133485 0.244011159029143 -0.465562367036882 -0.52837128434259 -0.495720604685031 1.47515851897488 0.679041948642956 2.24817575631605 -0.0281996378627599 0.0761392929008162 1.53083092563097 -0.447734560417328 -0.40869637738312 0.877976216375942 -0.555874447580762 -0.209851879074764 -1.34913506805225 -0.186962548122296 0.209855667711914 -0.665867513775665 0.36025488831952 -0.720778925064245 2.30425872877633 -1.90860043128539 -1.27084332332676 -1.52639235128269 -1.20899361101364 0.362540475258514 1.40143516879255 -0.123627763620654 -0.864346712690631 -0.514121802610704 0.0482800669784834 0.238912525519036 0.404037437777874 -0.362931781969507 -0.816801224819697 -0.638735851119245 0.629046123729127 -0.497723127322151 0.708205341611706 -0.95809741171614 1.29082983436639 2.36236543083418 -0.319233039854571 -0.809859323345497 0.489297424924213 -2.06536156690498 0.861451987838872 0.293113780266662 -1.08244008306655 0.828420312459143 0.741495942540149 0.590303576012731 0.333810892492534 -1.12164848946611 -0.00387900028206997 0.349157085732737 -1.48534130475504 -0.24749699171198 -1.33329698856629 -0.616288025040708 -0.609039176292098 -0.304823723559552 0.468200489021911 -0.57409182799523 -0.0211008871573175 -1.88725224520571 -0.895496169934278 -0.132086958955634 -0.356437541092476 -2.6886497714572 -0.970253857634358 -0.231592099343786 0.589903061639824 -1.69870149387794 0.284552464139889 -0.420564125976766 0.0780960268393513 0.116699021311058 -0.0747156319521243 0.354677558625757 -0.636218633150462 0.303860715169021 0.905731379412357 1.41665015781546 0.155993603260365 -1.17661225255574 0.0748262807653498 1.22099236654765 -0.597462713485036 1.53064871960721 0.794345893605611 -0.0387271508174214 0.72943532438617 -0.0856786017735447 1.27061220173337 1.20420545854658 -0.775525157576716 -1.30515180378664 1.59194213827391 0.632032198481904 -0.592816264363023 -2.26795118785009 -2.23699180123799 0.793200315923068 0.0779203280287803 0.144746763109618 1.2726170202442 -1.65289772649229 -0.563004807229131 -1.17508068140601 -0.903933672561598 0.636593216145441 -0.199839809667901 0.0822878734251179 -1.67471473877703 0.140734412915828 1.50030285645827 1.1232732507932 -0.0120260529592893 0.662476453266831 -0.330499834523631 0.145973475633336 -0.681695170032688 -1.48363456105752 -0.145885108356127 2.49959715612395 0.333432972903552 -0.0756587068933304 0.00655355844945002 0.613345155428579 -2.5248360255663 -0.0325526047130933 1.07978769583265 -0.0138765767282683 0.00916934668221137 1.68819215268057 -0.273063913043217 -0.625484703653296 0.712531857480345 0.508192199585068 0.0940117374781352 1.12314570933907 -1.66650749031685 0.167698305993295 -2.11627502892365 0.77893648265739 0.227720393510918 0.907361502268536 0.610341958789074 1.08274930163952 0.376189917831439 0.600262267123466 -0.259317219491681 0.474695516660039 0.838324868578509 -1.03352869590957 0.035121412954103 1.56828375490541 0.589580102635721 0.822813081794019 -0.37727217508114 0.403762503192584 0.251133954794746 -0.233172208961145 0.02230996835097 -0.635014914748733 1.83266676106025 -0.963670580382379 -0.264067585226001 1.08070700693135 0.358885841090099 0.585533511978011 1.76770832540837 -0.784935926363395 1.4569399343466 -0.85314714573555 0.511569951420645 -0.193301475709725 -0.163467671693943 -1.26204729486832 -0.539698324590554 0.451065022472923 -1.06402189619099 -0.0622867048145779 1.06569405493703 0.571013353965613 -1.50679168050571 0.490777662511096 0.245516081944653 0.361471761721914 -0.621487594665261 -0.925411265578157 -0.660628886989162 -0.517938874418631 -0.468062292695508 -0.543285679096768 -0.505211584044982 1.32962414267675 0.184181200060273 -1.12845781704082 -0.282191971770909 3.59836674656936 0.334068495292728 0.0572385563836475 -0.428089005319304 -1.31595012520689 -0.9597478468612 0.568292760900484 -0.177645777574491 0.0934932846878224 -0.807167051635478 -0.709684245376809 2.33853422784774 -0.042861692497472 -1.4421736901891 0.1797736881204 -0.556884147404694 0.542961830560802 0.582742387632836 -2.1670468295557 -0.966663667227964 -1.80054512306617 0.940091298353648 -0.470226641848731 0.585372611662859 -2.67384436273235 1.16434511949812 -0.181194528138251 0.878001848170052 1.85713609514143 -0.438515823524022 -0.216652776847408 0.703476162063725 -0.698637321816607 1.02334921221117 0.407797201015888 1.14929661339988 0.626051879437502 0.204201889896323 0.911531069240434 -1.51256531384326 0.388686903146617 -0.391853533699858 1.96136088162417 0.702387825123919 0.626352837966699 -1.36323964667781 -0.46175487000385 0.0618212784176971 -1.64196987648601 -0.212436711798454 -1.50321739711607 -1.19688793408237 1.03346567894217 -0.0855406626182112 -1.61818616865118 -0.459813311237905 -0.680556686863647 -0.183583140690168 0.438901429324621 1.78340860504107 -0.182284905877152 -1.30466827250959 -0.453364772800482 0.171476065524903 0.644296274817623 0.34710710006096 -1.08734895757727 -0.753259484035448 0.366548701682631 -0.567304967111665 -1.59991972885559 0.821954503147778 0.445784984272121 0.319091556533105 -1.48876503806322 -0.32658525174566 0.219262287915184 1.48340528967276 0.401521993881281 0.141594364310692 -1.59015869915253 -0.152773276228678 -0.889633519907915 0.0941085246129424 0.536769178352173 -0.424077390722876 -1.4857820470239 -0.0792843625182888 0.941514958006786 -0.0279226551039579 -0.273082600488398 -0.220991169889856 -1.12342712735436 0.574558700101556 0.0107423132306948 -0.389393912178633 0.873992182391399 0.965836953262622 -0.318090685757789 -0.823267364689593 1.26984613311686 0.655139577947106 -0.183236582219213 0.347439742123513 0.293229926355428 0.693758033208973 -0.983099186015255 0.231837784014449 0.39312520646177 0.903571089323566 -1.2011447671277 1.54054523721457 1.26810112301427 -1.1807357556839 0.757953909407252 0.206608405764112 -1.53645877502788 1.20613201874253 -0.720093517810767 -1.06522226793932 -0.299785911387322 1.70335771769012 0.864297023386082 0.175883264858758 -0.0305834549124674 -1.20647389234014 0.516853430264493 -0.388463803545152 -1.29833118055964 0.45683986303325 -0.0709670514564084 0.835391293253413 0.202848655965996 1.12074748511558 -1.90254062003316 0.339203444699473 1.07003805775644 1.92803204753962 -0.224419592616437 -1.54156651595364 0.862425673219733 -0.179586566087696 -0.795300447111663 -1.57281174426607 0.850082255717735 -0.135749634149385 -0.51109445647723 -0.484390230955396 -0.593430710348855 -1.77056512009086 -0.74918740048359 -0.276122262848666 0.181521887485128 -1.16039362843504 0.789694935640303 -1.8489270369214 0.12217580270271 -0.548038365926281 -1.34032281964742 0.106119867162092 0.161053627153454 -0.68600143558857 -0.549586164410123 0.145415127391549 1.42946286507157 -0.738847528910131 -0.322841480134539 0.955587164239863 -0.218714513317376 -0.194081692877876 0.765055141435531 -1.57993951543339 -1.20353821817794 -1.73273723909982 2.53481801092666 -0.2770437731315 -0.962061960415488 0.0873056786848684 0.17153731053221 1.63606558979407 0.150963289369799 1.35653888625305 -0.943492193336862 1.17252696415654 -0.406142428274389 0.870289632319152 -0.136358163406124 -1.78548950628934 -1.10280575460702 -0.531922861433922 0.774963158320099 -0.658150251058337 -1.21146815801785 0.204613309160681 -0.190894502137359 0.390678047627235 -2.19681486506355 3.56469870346185 -1.04536245067793 -0.0310394409912035 -1.21019400202016 -0.449935554424039 -0.0406568847363286 -1.39222762368263 0.323155491209282 -1.57256277686622 -0.0473806638357706 0.117679495227979 -1.15152089191089 -1.11842659380235 -0.411101981080252 0.873463160990814 0.245184503505263 -0.588001886484842 0.226115995472716 1.07342130923165 -1.38009237856701 -1.15359430291839 0.00402573794600368 0.233697002026866 1.97739456885736 0.147234231936812 -0.293844570528323 -0.0223229795571849 -0.303538022991914 1.49110210311767 -0.547083273786323 2.16878638702619 0.430057343310529 0.125582982427165 0.857106146839755 -1.33507068728721 1.17961083238004 1.81363856823262 2.18780564043276 0.791597753316789 0.958907330852103 -1.91405316194691 1.38331759960323 0.24088896574296 -1.31200610130127 0.861763282199723 -0.938124863531328 0.307732090469078 -1.25301430066312 -1.08797299463922 -0.141496420160072 2.10690856499272 1.5030010867659 -0.3657993940556 -0.708012500075116 -1.83592416788058 1.18135339451835 2.93577228339744 -0.435581341811353 -0.0490708509041978 -0.155580460791972 -0.549700306212557 0.517545393709299 0.427278410196171 1.33696821990187 -2.6079511463834 -0.0393942541090741 0.812555363137834 -0.788939766721229 0.100633639976487 -2.52497718789921 1.21454699153901 -0.494843524967164 -1.05974246986474 -0.213482346373157 0.442884679247906 -0.138211793266126 0.994044049375832 0.197754671327737 1.88154311960806 1.52799057545181 -0.448656399062089 -1.41068406731743 1.61365647598522 1.05887035710452 -0.648199105051201 0.0759418864707083 -1.76106169444135 -0.0326937629965958 0.663174194871142 1.99590370551385 -0.563653817442196 -1.65269502350098 -0.0322559491330951 0.407768599591702 -0.925907687445417 -0.124935676852356 0.0343806078952106 0.961947467773503 -0.713518105406474 0.968279999975188 -0.122383300216831 -1.56395685196163 -0.736527559472835 0.618063776097324 0.466920910676507 -0.28150301222598 0.753858948332047 1.41726864037432 -1.5072014473221 -0.699676311884173 0.851193205184487 -0.0605312023042188 -0.943985567535374 1.15721140787685 -0.099629051852015 1.90371721891151 1.82336502330301 1.00073649992945 0.501512021381676 0.599231244263478 0.0797598086741595 -1.64020086851705 -0.13932519113427 1.07720837810284 1.67762499007835 1.95767516821931 -2.53354848625642 0.766143606347418 -0.0761611725687081 -0.862198898003854 2.8700826541539 -0.359850886016284 1.69322252547072 -0.665733362978159 2.12332024497962 1.72238088218132 -1.01452326520999 -0.448427035906777 0.359751228080752 -1.40894612624582 1.18620131357438 -1.85141394620529 -1.69972124382294 -0.69013259737817 -0.556591139036045 1.25267102089639 -0.985789279078256 -0.242399160355649 0.228216723491355 -0.0456337772677659 0.344971001137842 -1.17805308764751 1.93067985021817 -0.0806432149963593 -0.542429572740475 0.663616033010178 0.493851264759579 0.830441827006545 0.0367628105970184 0.200653234545016 -0.336100235719408 1.11947604627399 0.00442149506455493 -0.968383727130266 1.32400908865678 -0.0349494804948608 -0.420425108987279 -0.598576482632135 -1.71468216348314 -1.46802777769655 0.16373832952004 1.4488929051749 0.822716446266154 0.127961473555707 -0.937434148413189 0.363386506593787 -0.0309064113938869 2.27602476788476 2.00783825514599 -0.263814635786672 -0.9137435749839 -0.90752299205335 1.49307223547876 0.327553287747479 0.83141550145293 0.161987437275493 0.183213940381525 -0.950372626568836 0.48566069085265 0.467737116493501 2.03420096698005 0.991539455237584 -0.29545332882258 1.01726533697241 0.270973417234298 0.0716935729893051 0.174415469710853 -0.95500712037148 1.29759572948346 -0.677160810674917 1.31923957434652 0.23557635356557 0.396561977004471 1.46196376442463 0.454973669532811 0.217963275563462 0.217404116701556 1.861475126046 -0.368386221251342 0.103715927309093 -0.295863898849972 -0.309809853489301 1.36498603153545 0.581919257576405 -0.0779343494131999 0.936957829580377 -0.908405292378964 -0.10815664414456 1.5800773112011 -1.73839405065758 -0.668777033860067 0.895020576584839 0.515318795912631 0.828192544948664 0.238528658204734 0.788415009925315 -1.24658018776579 0.47088252258133 0.143880971008274 0.0370492523723518 0.873703488289878 -1.28715791351796 -0.685032800541569 2.22326544248254 -1.50522155680946 -1.13734275839651 -0.288301635281063 -0.779881363180152 -0.171843656376616 -0.927853976462273 1.00228684233552 -1.1545546365892 2.62933766830345 0.725746409747575 0.0929139559563449 0.224524032785838 -0.608305740850353 -1.37852771108806 -0.32498215509477 0.730946837506223 0.585968690451631 -1.25622478643602 0.102180963886976 -1.7524175304653 0.96367522494322 2.03616048928461 -0.0506094565034537 0.0824570379530858 0.559175517585843 0.837445877819924 0.500392996530437 -0.029410014211447 1.0706599994092 1.52054113248363 0.232567008679614 -0.581973738871358 0.98053773341506 0.217291907969533 0.145849317442028 0.494663452709616 -0.417054875057912 0.279544249114065 -0.460756763491392 -0.0717501395451915 -1.29797369882413 1.2113606068687 -2.08188389303618 -1.34444898717008 -1.06248206902984 1.33214114566333 -0.183244617724943 1.06221890083713 0.285704795327966 1.34567022998301 2.21745324738321 0.539556462375909 3.28328179364677 -0.409072602208636 -0.474919036518892 -0.352402749067795 0.0946984432924429 1.30196439000454 -1.40925763846516 -1.2522738220056 -0.6309481762148 -1.69515260833637 -0.524115148257855 -0.470941808861366 -0.387964021353118 -0.409635894556247 1.02417729656014 -0.402450027185502 -0.273532880202703 -0.224555529681328 -1.08518153784326 -0.150364615929183 1.37178513865195 0.0585221665275876 -1.17708206773214 -2.78433330393822 -0.679183109684733 0.218605022099878 3.36153389065846 1.09924880022684 -0.448506046025218 -0.678639396573956 0.476007942982342 -0.242324721731632 0.490966695850829 -1.78877323869443 -0.00311240778707608 0.975798589241748 1.63665849951329 -1.49614661563194 -0.883759241765294 0.211582215726558 -1.50479500652589 -0.0256383629434054 0.362880365509075 -1.1361370957654 -1.31935705639389 -0.437866880908415 -0.748917135264176 1.72133932628132 -1.10736982214326 -0.492194950826945 0.21681975751394 -0.313052162613576 0.871818674302284 -1.57501894494939 0.694243719803792 -1.47555051276665 -0.983874435059721 -0.436465404018063 -0.273498372935533 0.0910184824984124 -0.611875852423394 -1.13193864715429 -0.71172859433304 0.212093312220183 0.530537018698009 0.48775295345998 0.517747570625665 1.74974813247687 -1.71591107346182 -0.265167180588146 1.02424839912396 0.0834707753416246 1.13081359662885 1.55794413165773 -0.922349516963831 0.332897716976399 0.530677292255172 -1.81514511242173 -1.26952178356064 0.556922113348222 -0.406193171792063 -0.0469559739158169 -0.161915042215605 -1.01082555937073 -0.84719668532927 -2.21614844441138 1.37711502596053 -0.401906057655795 0.647026751565294 -0.371728890515308 0.369819499584845 -1.10910340782314 0.535623244623752 -0.998928066664386 0.854046033131702 1.41471155274388 1.627598945816 0.587783557704131 0.97031098663876 -1.37691092517482 -1.92665154513765 1.29368069366204 0.923308319494675 -0.11058440134778 -1.89236640941871 0.647583000252996 -0.16595090655123 -1.30927700556178 -0.945933216572158 0.497779429863234 0.518785772503535 0.235632509359668 -0.422946090083062 -0.56227643564429 -2.54397938126402 1.96587493898727 -0.497794479344108 0.726163818447858 1.70115465521124 1.21757740497423 0.96506022503075 0.248046087391808 -1.04282873761509 -0.458522091724742 -0.278549630139828 0.438034860074673 -1.24251098172077 0.361801480808207 -2.26211836930248 0.363461318103466 0.844912646344807 -0.0801564664838664 0.852924734155823 -1.95346785899828 -0.854092426181688 0.369278013075117 -0.758069874882184 -0.845584270267056 0.87034937795332 1.19645354753766 0.401462785615719 0.397918274223295 -0.460356437230777 3.099545095234 -0.470429079949977 -0.879899397577171 -0.056008432749752 1.14705135438026 0.331465481105747 -0.107613608749503 0.215677330041064 -1.52933331038544 0.615550221468623 1.02396134733664 0.776512866754869 0.166617012386687 1.08288167559371 -0.543266299560641 -0.130656619390358 -1.29239207303341 0.119765262344619 -0.267604412012214 -1.84278661177115 -1.09293641131901 0.696894936044907 -0.595359689700577 -0.13905092825673 0.998943676043539 -1.64440507321023 1.61263041460782 0.535970917738814 -0.649629112999844 1.48657983361753 -0.259631787560641 -0.401304818923389 -0.46946147570382 -1.25796041612205 0.197308736041243 0.18711859633737 -0.412337716267346 -0.0810550462345074 0.380859939606442 -0.0119837497359389 0.233457380720423 2.05943515271123 -0.453122287257903 1.73558220913885 -0.924131277963135 1.1870997045204 -0.287821164105675 -0.122984908188996 -1.23572238276141 1.27722921406124 2.21814581600473 0.598033012630937 -0.418525652335859 0.0110538211488166 0.345059664444164 0.622319618484348 0.630883245395388 -0.175733179388052 0.228513314284387 0.402191178245465 0.138178693159402 -0.394046111780955 -2.12588898095831 0.360750985313892 1.24399182080255 0.612191454193768 -0.679820050224465 0.165864914609286 0.612255859753929 1.18948844560203 -2.01883880746256 0.0616708321226535 0.588069423236725 1.29762271364003 -0.0581219734559169 -1.32489150103357 -0.761499255571535 0.340953753640448 0.665624105890752 0.177524430612629 -2.19867921672195 -0.524367965261407 0.582548209100963 1.18452411630265 0.455510936437296 -0.233007071553038 0.327011529139915 -1.43327345762856 -0.526976807893921 0.944396669679551 1.11818605029803 -0.448124506871894 -1.45526791998925 1.51436131242835 0.882001570840572 -0.481637955742015 -2.01814061355571 -0.0422982097899968 -0.881662151528459 -0.0594832503565945 0.927983610468536 -0.495979341823142 -1.63202394970027 -0.821462962614267 2.11416922989959 1.6736115222079 1.238582043724 0.7346442807925 -0.849466973661426 0.00474189471983219 -0.929821745881856 0.718854170481676 0.841553080293802 -0.652162758720636 -0.588860899154409 -1.80844687603546 0.0521401718075524 0.967770482541142 -0.470285007329405 -0.0236914396422196 -0.880822671570642 1.06498675367589 0.316137286984047 -0.632682106714448 0.0152366546185121 -0.301146324112196 0.671426618388944 -0.302075953003912 -0.0995695817220368 -0.0790470197633516 -0.208635399495741 -0.564263410482616 -0.771849327812244 -1.09207311987088 0.779178844820348 0.337120054957643 -2.1991905983479 0.948490301725021 -0.189698165114838 -0.161090386459004 0.074898532868909 -0.30239725144738 0.105813920002041 0.675408457197419 -0.901715305006752 1.25235582722266 0.116950867197295 -0.519192700650443 0.15380217281214 -0.447976868775173 1.03449481007936 1.96017788008389 0.63437979884484 -0.27066424331592 -1.39411288580713 1.07080839195048 0.412442459540543 0.779992034007557 -2.42911381392987 -0.597042146072396 -0.642767132039128 0.0242434451662664 1.02365680975731 0.513077936124843 0.637711938841509 0.776130048610638 -0.887271548888866 -1.2240409664843 0.738329251295331 2.85139736152989 0.918030345130239 -0.779357779152387 -2.63512015045769 0.832288963727132 -0.338553257015139 1.65701310576812 0.118706526332187 1.66326372545829 2.33294748796414 -1.0798851303541 -2.12038938560157 -0.151280616170086 -1.16239581953524 0.349765298374872 0.918859497199134 0.342849175056539 1.618592427037 2.41527406695011 0.00451214675491105 0.536259552201373 -0.331051483772922 0.715278139708521 -0.92697442430263 0.943468747561946 -0.0234771230582964 0.93196381431731 0.864478275593955 -0.390164600799079 -0.0428717705621292 -0.817524256143995 1.57999949599957 -0.649544941372016 0.0770936659081101 -0.810411468110718 0.412229253033452 -1.15439009804743 0.0564428128453563 -1.96715950723702 0.894322275803651 0.0614918725352215 -0.515401765402705 -0.339046834847735 0.024399824056424 0.294239163520386 -0.860418162189377 1.31329629234029 0.241802294750692 -0.444802456788945 1.12477383831616 -0.47221458104947 -1.18071131900867 -1.17717549856775 1.7369858079486 0.449335380605518 0.941040056716191 1.72826388869388 -0.37558966285735 -0.328651945176957 2.39101001566915 -0.714546162330724 -0.998302514988229 0.910788664741567 1.33134348244858 0.273362870737121 -0.877141781686279 -0.206114369087971 -0.671395325794588 -0.66603232708733 0.892493330939806 -1.02845919565055 0.83709535999185 1.06270348886336 -1.62705598598824 -0.000811566809742438 -0.610452861329381 -0.506333749815391 2.48701660262366 0.692348964005374 1.99185605569447 2.58940122250144 0.272215319789073 -0.536711267183537 1.57931175960771 -0.353040647598571 -0.204046586336367 1.50428238258549 -0.429372880301326 -0.777267512949974 0.602409506558577 0.122887085318485 -0.501134060329996 -0.108512585845594 -1.05299465106963 -0.403634777308393 -0.77306208435265 0.752436640814661 -0.347614350038579 0.255353529550091 -0.548221345559216 0.0970127989284281 0.0144190386976394 -0.247291936495064 -1.20419897789652 1.08334371284711 -1.27173647626568 -0.967519163081747 -0.895343591071632 0.245086756770467 1.37585842566925 0.412871198982382 0.931139073607787 0.443473698202598 -0.269386158937587 0.31150460877612 0.762396051914723 1.24241110403496 1.50074041675952 -0.47029849070278 0.662447015491166 0.247459616498681 0.594570413958824 1.07511715102352 1.7180279396898 0.598969325469918 0.752328150993272 -0.258098111976643 0.0525387134864121 -1.12342467294236 -0.142309549355562 -0.321923919790413 1.62585044089205 -0.387125814566443 0.723354331796995 1.34506103869716 1.0918743858752 -2.27513182602663 0.432014422213559 1.15983353754697 -0.128285560289074 0.338084886111541 -0.158426957838227 -0.355355394940559 0.643151271433627 1.09462992835366 -0.186799044023607 -0.393013668173586 -0.716130008224861 -0.923289431222351 -1.07877938862693 1.1157521208807 -0.332248388553788 -1.26588672338325 1.27165737242792 -1.12622349481324 -0.269216769446573 -0.179650382107393 -0.95763067787984 -0.462229900165583 0.580510029767719 0.332588244189394 -0.126487669951316 -0.25577309669617 -0.528275818334898 0.898936315346308 0.372363779789298 0.339746971601686 -0.434983258267488 -1.14024138183148 1.76628480577307 0.997788294809096 -1.05044191626172 -0.0789818798832229 -0.000223929992913904 -0.843278696877711 1.51277131972135 -0.26280099371921 -1.29789651931509 0.929294454006159 -0.711027923075589 -0.624952026179651 0.331118701375529 1.36700192701385 -2.40554569147294 -0.740960563696435 -0.678981446286292 -0.22438162047869 0.180774170299754 0.0269338770399284 -0.190469771750497 -0.583960200055032 0.0511525428833281 -0.892172614263645 -0.367093196162206 -1.02895974790542 0.684435471419445 -0.0996065684263089 -0.145247465903237 -0.299924678522883 0.5165904287171 0.893477672241818 -2.01619181522341 0.225607224498195 1.41114722829529 0.904169871150047 -1.18353232759204 0.025364704417783 1.50384577649969 -1.07690932314777 1.77945635865356 0.242003060616123 0.01987667783348 -0.294603399536039 -0.0700034283208949 0.791135961377662 -0.523242930938623 1.67921482201848 2.33249798011691 -1.18686035693006 -0.299802253048611 0.892618155392172 -0.284650165859007 -0.801092794417895 -0.299029959308362 0.170293402118512 -0.0105243472640615 -0.292619772933068 -0.45538023411819 1.77718217830449 -0.783601717608516 -1.24947046405017 1.38086319672436 -1.4654301856148 -1.08497834256588 0.178587363468706 0.621914872498245 -0.979594283182508 -0.864088513890254 -0.355371489710049 0.917083076114149 1.04187374642895 0.383978672107304 -1.85114526997877 0.421621149981496 1.5061528149133 0.319452205092662 -0.715642043647044 0.396468003801436 0.810667240514945 -1.76470675497646 0.123884593965564 -0.918451751725173 0.461477021414077 -0.202550803296892 -0.60276103123834 -0.283732681152421 -1.01266277125503 -0.308757189867484 0.772944399452889 0.365448757075218 0.659667605626246 0.188990648011007 -0.704204756005816 0.516916694025014 -1.12151601210497 0.230723266527487 0.553917977092757 -0.183924863552832 -0.0521710663427507 -0.318969315136649 -0.0599952813226321 0.957400050093771 -0.0265463882084863 -0.759419653188763 -1.47382067625407 -1.50164240482275 1.01291917366052 -0.312411769741991 0.88661697795708 -0.0314898069226012 -0.00248404439273142 -0.789800823085258 -0.241340688174328 -0.0207900748841582 0.745382364110092 0.646319499533928 -1.54171877351745 0.900141927110038 -0.231609913673761 -1.1580340546511 1.83759743160871 1.10760894532649 -1.36470627760907 0.126763503495165 0.739157219085545 0.155311584993743 -0.185215728840105 1.04937273953901 1.33812003853223 -1.17965363680769 0.100617626774449 0.232237360740584 0.547333900520828 -1.86179238289685 0.39305369211268 0.411506296236179 0.0155975158465706 1.01820901632331 1.52949948297301 0.931792287281878 -0.284978419553594 -0.432644428629334 -0.888083849022574 -0.00825571996211479 -0.0744445401759428 -1.0214567696349 0.0861998021359691 0.458725809715328 -0.254962517172353 -1.89837068938051 -1.05982584097826 -1.25568372865672 -0.0718512630201132 -0.663188743085356 -0.733305950740168 -0.459562717115934 0.0961091722657528 1.56449105347706 1.79891583965744 0.2901065100239 -0.30799988787416 -1.96367749373311 -0.9402746379664 0.292464756728331 -0.340624259379089 2.63482234898041 -0.264308112562355 0.500766725785637 0.650184497068632 0.972016450851554 -0.559341695909604 -0.333034599715605 0.808433953094309 -0.815869467524643 0.742164437550277 -1.15825569004132 0.923679453161881 -1.29168902491682 -1.29724927838583 -1.35138335472292 -0.0892053047074557 0.292118848093484 -0.410764585609949 1.79725343420708 -0.135412193532348 0.685137371060364 0.726426284365358 1.83501472403258 1.12385542610639 -1.3805039928298 -0.379744789147738 -0.259833748254629 0.820448366741526 -0.608125245124715 1.09681480966901 0.370595380326063 -0.798181301126243 -0.688661187415743 0.316622552052461 -0.0431857788966901 -0.814558728110318 -0.183955985375308 1.1676338060897 -0.125212476104777 2.66005507332328 -0.677370027574568 0.0682837182512862 0.733109114072074 2.38847880758763 -0.145141641599741 -1.81765794140993 0.295780878023491 -0.854660841035929 0.266082128307637 -2.01415101297006 1.80326622209458 -0.519103985498428 0.545993042301416 1.09491717048345 -0.859023269049203 -0.883257240628 0.405405442195956 0.610137258560133 0.212644676293486 -0.717512754139545 0.0463405139956835 0.759882564623725 0.0893201636313759 0.133128536358533 1.61224079948432 0.39754365375143 -0.165410792871021 0.341052320393856 -0.390234387395245 -0.22021597161911 -0.772931729564516 0.132220253813398 -2.98256170403233 1.305630051571 0.00581645436075928 -0.239906235081031 0.311439725025127 0.0274912518371319 -0.70816446316451 1.32795516716021 -0.251468802082074 -0.825147917063302 0.445212791823753 1.12717055921758 0.309525787774853 -0.475484688929782 -0.35025252348452 0.368866617955253 -0.777421820394184 -0.731181482855223 0.161114655144579 0.582720476601238 -1.42158402353881 1.48220188634873 -0.785107704352554 -0.793734409654367 -0.384160894203558 0.926875907625278 0.983747928351894 -0.404403268164038 0.903084285351686 0.903691908316344 -0.0610590643368135 -0.788333112142763 0.758677358136286 -0.596763503810239 0.420244407987802 -1.62196758669258 0.612569398750813 -1.0546174546503 1.74361176237512 -2.10960996814916 1.50329465960344 0.268464201146037 1.33503730648968 -0.795457574767075 0.439355369830314 -0.43799757853545 0.768170676916103 -3.35571331522646 0.0678931293371069 -0.469030111084909 0.914329618215277 2.67662331789943 -0.328993999192855 -1.83792808860092 0.770256219359799 0.00681092359569981 0.69918300427921 1.13258043325216 -1.50024959061864 0.572453043971209 -0.693149187907528 -0.852735065390689 -0.601873383297381 1.08647233585193 0.6077142049299 2.17079154744862 0.35709535679358 1.17812690826574 0.948845231718582 -1.23922630329655 -1.22653616766062 1.59235653226779 2.43373807009353 -0.592594580929058 1.97949078596821 -0.125852224558934 -0.073499839780047 0.220821928456627 -0.581510614890349 1.66706107422833 1.25275572758257 0.021098257898244 -1.13575448926672 -0.594419919927958 0.0398158099930166 -0.0593051483571547 2.13061955120784 0.605650373977868 -0.365245509132964 0.161651421711805 1.57201198890439 -0.124678921541689 0.0648508977083124 0.907131884217969 -1.79776725008851 -0.493649917485068 0.316480115441762 0.565698901223921 0.300962633322415 2.59160710116367 0.44117485828113 0.411558152957336 -0.983650640000599 -1.36592855806614 -2.75410981169819 0.101615555524457 1.18641553003148 -1.12358298867988 -0.204065145716071 2.60353186685571 -0.948115134992327 -1.22670263992492 0.136210753461122 0.312915793785597 -1.06292603331176 -0.6475357235262 1.21593066701244 -0.853379047732964 -0.135651653178593 -0.966725910438379 1.22113062950847 -1.10162164978393 1.43964533108292 2.41327043170015 -0.926920045698686 -0.370822263592642 -0.438900237454298 0.423645797768398 -1.30328451481479 -0.150436148078657 0.576010686276752 1.34739203165739 0.178371771475954 -0.831842593412463 1.27055442160946 1.3220034857234 0.346605773160885 0.513294903186786 0.604326939255709 1.07608747517347 -0.170295188203316 0.254261986422858 0.59534122992711 -0.295316630911256 1.7511104401353 -0.246586299762925 0.763007075691059 -1.01674558493788 0.0780943336364122 1.07531866446389 -1.81836515472901 -1.42266103308254 0.487370273865756 0.326754190407931 -0.439482919751785 -1.10446124935692 -0.672839299459558 -0.465294129540996 -0.194668043420764 0.881683204336304 0.183445668464069 0.631792281827124 -1.30854015750767 -1.63439807338591 -0.930935296708735 -0.663914080350557 0.100124487298448 0.980143152067833 2.46128437764789 -0.359925943755208 0.388956804279804 -0.291154543520943 0.0587116445311588 -1.32611936620559 -0.9231383941388 0.723011737702178 -0.637206030197036 -0.748492038066302 0.490793922891785 0.495171762919922 1.64802850978532 -0.43825276149195 0.308095821424853 -1.76185551541997 0.959936821673241 -0.00459968458761156 1.67573047313762 -0.88121540019894 2.32546364343416 1.06789774157486 -1.38160481142037 -1.53764473567812 -0.172140431297098 -0.475257870662206 1.28034144182469 -2.71901365058666 -2.25153350349473 0.948571868058247 0.0751171864497251 0.423756317320343 -1.04118523654307 0.864413321112886 -3.62167550925528 2.52861835248356 1.44217394544834 -1.47609739954365 1.56370852388809 0.0852811146718108 -1.65793861751746 -0.0161409142320568 -0.190391414521841 0.795642721919709 1.03804768551525 0.592019921007874 -0.654631673036721 -0.809853209059694 -0.321363367506513 -0.58553390700529 -0.881436234361421 -1.57972264151558 -0.13554045092796 -1.31338777974151 0.923893806304209 -1.55270466310683 -0.23153599563447 0.146303040018031 0.118947911485121 -0.3735993385411 -1.85640079157917 0.46203214502994 -0.173556205187223 0.62965268632332 -0.676421339292244 -1.90479978284472 0.47269588572471 -1.02850046940314 0.236421321445832 2.05300778482908 -1.41548614266197 -1.39782450624216 -0.122961922430224 -0.0739936923712304 1.02491992022657 0.858722671764123 1.2628292761291 -0.475805017999163 -0.202530765735766 -0.859913415717614 -0.835422250805205 1.36639404268095 0.400806139390329 -0.372883514632261 0.0842676464763932 -0.549095008985468 -0.0664361536543377 -1.20050692693334 0.0768553403225724 0.861933165113576 -0.536261916108249 1.10689181413643 -0.302538102752972 -0.698772132792385 -0.823111676827748 -1.57542527030439 -1.40078803425642 0.482796866476859 -0.1515652715032 -0.746153806243216 0.143654606637073 0.0361601337445026 -0.0365757553178581 -0.604080271385838 -1.31078466797117 -0.852952069504466 1.96620722437228 0.757910770863776 -0.313693455633364 0.0938043449060655 1.53957497734871 1.04946865786328 0.146113248986012 -0.332995625875218 -0.113845548220672 -0.207937056657722 1.37832002463768 -0.872994520043763 1.05453468188213 -1.1773440128084 -0.0273620147829887 0.037200636356115 0.659844396107599 -0.860478821933209 0.854106203257261 -0.568978695990242 -0.251120031682365 -0.133890021643266 -0.946169199788904 1.22653684609861 0.471833273849611 -0.158392723356798 0.865402607755126 -1.20286418254682 1.32271149609491 0.037280647773815 -0.750022257257824 -0.129979439878806 -0.477096740605986 -0.0126233098186351 -0.500696586955167 -0.583439919732592 1.42086753708748 -0.675828235229697 -1.57165838809746 0.0357028904609025 0.388951498578668 0.970638944004762 0.965118088634133 -1.35447430880115 0.553025251104695 -0.0915826687378426 0.718264710755737 0.544896338281351 1.73657150052595 -0.690167778617321 -0.539308400534018 0.797891560993992 -0.931548955169196 -2.01895650828262 0.625256802739982 2.321432668795 0.676716039233058 -0.0947697819414008 -0.794104795586698 -1.01152714359714 -2.45122394612786 -1.00083663728725 0.874761136450458 -0.496894447720444 -1.7610841988485 -0.11293956955003 0.762421628323815 -0.168193074687968 -0.550016366259656 0.636830412280461 -1.33973060795844 -0.161446420102852 1.54954903343472 -0.556215388779432 -2.75121639381908 -0.972008834257777 -0.367050741139341 -1.40089973444768 -0.780143889564394 0.0248726752347513 0.880701635615707 -1.18757682200302 0.200833591680337 -2.34556057590745 2.44254613589249 0.0729105971966459 -0.932018072217192 -0.378728149288432 -1.92248665318932 0.941051314778772 0.433070639151367 0.873026523225801 0.152433886436888 1.09515166239551 -1.06313456123375 0.157489370645128 0.223444403321485 -0.1118177555264 -1.09722792627794 0.381945369496329 -0.0141647416254496 -0.719384930576292 0.230740172295485 -1.21045822884604 -2.01132019388076 1.39879943898311 -1.27730635222676 -1.01879866209364 -2.08456735364917 -1.62126679505542 -0.384480674566687 -0.169775873818583 0.147631194039277 -2.99863497810414 1.34305775314836 -0.663821763335313 -1.44191308016078 -2.28115489121157 0.988891092748694 -0.901944054054278 0.0497097719233895 -0.00615677550952789 -0.790471179650428 -1.19796582834955 1.3509207418925 -0.239889565825251 0.869833266195817 -0.0598974173844072 2.27059116538674 0.938211140302191 0.450850295002393 -0.366808795907178 -0.800238626011962 -0.341494943257687 0.175711843055033 -0.562669526906653 -0.823965180228165 1.53463851738693 1.91541717087404 -1.4445933058858 -0.0303918226870888 0.361463413101563 0.28839528487175 -0.500567288395878 -0.129706396078701 1.40143595750317 -0.750966525922507 -0.0277078285972863 -0.482376932243871 0.545398267616834 -0.303199018759196 -0.793058372128494 -0.569377106410686 -0.669863073749182 -0.367356197639686 -0.305903294339427 0.10103319401818 -2.53315670457422 1.23211635406084 0.519372569731966 1.65119642913265 1.64942410598604 -0.787738065041244 2.24903401042376 0.161916989345874 1.24531245515239 0.143256555739713 -0.266854238871089 1.41085917095421 0.288239879215476 0.568744462985992 1.89822102339509 2.20439193478905 0.236120218715501 -0.631959804124913 1.0680960432441 0.710085937377992 0.613142862893398 0.064450529416188 1.17358715682384 0.405903587267809 0.9261651929366 -0.280366618572515 1.40329538781514 -1.47446173516152 1.12856988970063 0.563656756509283 -1.41928254195586 0.0614501989492635 0.912738408823835 -0.493006304472403 0.108273833570213 0.541848742790054 -0.976462789980838 -0.926761250199065 -1.65959515013496 -1.68749705798047 -2.30546760068322 0.449591857550724 0.770033452026566 -1.39071565706705 -0.65993190055407 -0.411833734955233 -0.713054367444319 1.15126721725884 -0.114052435419987 -1.08810718232244 0.641767165787583 -1.00827926743336 1.52052370712251 -1.41662526596312 0.0386171463991461 0.947992699861075 -0.359057341110699 1.52776048359045 -0.664957187073254 -1.2394611835817 1.56228412901019 -0.675936069884659 -0.459790617704797 -0.827699025779936 -0.227372839713426 0.471943004198216 1.75679340725871 -0.467480816156195 0.887590813526058 -1.38882161909425 0.64925407043909 -0.537249836399781 -0.164876705435168 -0.0139278865244309 0.0946534897264754 1.67584709294927 -0.211838142792892 -0.149336896562557 -0.579183180363978 -0.0371569443853721 -0.287174850327574 -0.0622390131415889 -1.67283867069612 0.0640738702353421 0.872930360745533 -0.480325295028642 1.18879939103608 -0.248996824369337 1.08397647784894 -1.26639770574193 1.90832762228455 1.50488847889386 0.829930134599295 -0.656882293402749 -0.622716410443952 0.845862822933081 -1.17253000754947 0.443639633349858 -0.565360691377922 -0.0941979799026995 -0.557261778416614 0.0752015954465569 -0.917877712168407 0.317446308413493 -1.70865186686396 -0.117784431447451 -0.0712222252458445 -0.604106015332209 1.05646326812357 -1.01805462654873 0.43583770858449 -0.278543474577771 1.31431770028281 -0.0559987499127391 0.901294922892968 1.23857069737419 1.2859722624849 -0.882360349193647 1.82652108601794 0.897879333304124 0.544402622885858 0.99181121543571 -0.844062981794215 0.450532074989158 1.30368098761864 -0.371969544557285 -0.545866811193394 -0.229249068514149 -0.077212316156778 -1.06495084968959 -0.314512622500546 1.05118028000791 0.638454066426246 1.80859633191888 2.28335284191932 -2.26885941218509 0.251244390792706 -1.10490408662562 -1.15964534299338 0.496947790615305 -0.0519520720669056 0.293734314638071 -0.174323873969695 -0.748654053823909 -0.156362312697122 -0.316703768720983 -0.10819455137061 0.153852362692122 -0.70161660653288 -1.93935981432555 0.0410190542282542 -0.232968880520072 0.571142411360365 -0.96053897202896 -0.97786273619486 1.33584110792844 0.00927338682001823 0.350316272702107 0.165057899508498 1.39620657787314 0.885470067085163 -0.0796452541738671 1.62558266815534 0.0530035668108698 0.102180848986165 0.412800056323797 0.867689613423194 0.660431883923802 0.467093501853716 0.513818265197903 0.697629049696979 0.654907078496984 -2.41621958161709 -0.989143069847705 -0.39212060523174 -2.62535734601288 1.01438367900166 0.597060034235689 -0.63737854967076 -0.357760490100548 -0.530009217229494 1.89794066380435 0.531617494510512 1.08842741875782 -1.1799699532059 -0.733725837048397 1.75069882685798 0.267509114222269 0.691438737186722 -0.562564953359034 -0.291964989281756 -1.46404882288891 -0.173903683411329 0.53644090423117 -1.47137947321143 -1.43571256436341 1.48407595318407 0.386550381470754 -0.155927513858293 0.682784889432251 1.24828499785248 -0.577179608379173 -0.541254753960156 -0.0704779981706632 0.676559918570658 0.997732086628881 0.789998171891312 1.29210345946814 -1.13784136362921 -0.387985812696852 0.395298840876471 0.160829927481321 -0.537280722382177 1.18845876730409 -0.599148771894074 -1.03985445616165 -0.0712713683731024 0.244196606544525 0.360064393624753 0.197730341701306 0.508627369436684 -1.47234190986524 -0.0225598352328631 0.469197248526909 -0.6651144513552 0.174106530811553 -0.239577633932638 -0.485658885210858 0.572873369547819 0.763844584329103 -1.10423888399109 -0.735490754268887 0.831849936561138 0.310179184712156 -0.062978399122162 -0.648558574516268 0.25959463584685 0.264776490201386 2.23379424277011 -0.468768881523872 -1.00305233446181 0.34815846267072 -1.32461258726116 1.36984886902658 0.150348100848457 -0.319993621859436 -0.218077696806333 -0.0322589219197765 -1.24318683423329 0.371622951185061 -0.937558807930447 -0.642570558715618 1.00117469844737 -0.0046121529606768 0.556162621688943 -0.469778083592394 -0.599916332162734 0.993021429227849 -0.71821354352638 0.469822369179183 -0.609266776414363 0.555782150290424 -0.492098691229464 -0.199918243024579 0.161913318973096 -1.52666148567048 -0.157644677401389 0.162143214066691 -1.26423316727175 0.547974088065311 -0.798847335351298 1.22713308619038 0.41854571693142 -0.557126441640213 -0.857434630537126 1.22678764528362 0.476937998120073 0.503724079972003 0.0336742826679843 -0.371397762873115 0.628687193551349 0.124777276602787 1.07351674137853 -1.16252153178514 0.00651560354779174 1.05144852352203 -2.49367180991957 -0.322827776752103 0.627630659398357 0.322220205173911 0.500147635765203 1.00783796627164 -0.135297516957062 -0.762903306029673 0.863924429076208 -0.36582428125962 -0.42459302710601 -0.550971189118302 0.383733658287598 -1.13902102673107 1.42261681344548 1.1881731397765 0.0858547545420569 -1.5057063255932 0.648012747962487 -0.106982168033497 -0.640672417579672 0.789858074690603 -0.867985946631208 -1.20514890157879 -1.47896095523293 -0.555625790076959 0.479902496112053 0.109508129809283 -0.159140743222701 -1.05306554543803 -1.40476288146315 0.815647004547232 -1.12334508589961 -0.135759526705643 -0.805964879687362 0.106499364489658 0.0878058634204834 0.61943862280096 0.502354935117552 -0.00318232366918443 -0.738740398540578 -0.831627500780693 -0.448360324701731 -1.93060535753869 -0.351415033948739 -0.0512224746553083 -0.21256670922399 0.558523503827361 0.774540247955446 -0.948619279219249 0.305139517350978 0.269856402777674 -0.799362435884333 -0.919240799167444 0.631868097590917 -0.625930485083309 -0.707597484694945 1.4369394372948 1.42808757100627 -0.843042054515583 -1.12509053519751 -0.770197353001276 1.03795075009843 -1.1452060849609 -1.11306760959816 1.16092031619583 -1.64849063224919 0.295936522922907 0.193736303915385 -0.606490636304776 0.351379650476455 -2.58316823691269 0.931085339802945 1.28480686820061 0.284018093537292 -0.164260392652456 -0.223252549513953 -0.820841820903776 -1.22150002529372 -0.0242608117372599 0.984240319293763 -1.44524150565445 -0.224876551551507 -1.54673294142786 0.3143422268116 1.061866136999 -1.61069717808048 2.15244935298799 -0.332785573130329 -1.09371085168546 2.13349215130687 -1.05997289400868 0.903708945084422 0.708341187983413 -0.954673052458575 0.0719782733975542 0.549879067178294 0.648504773258042 0.0792487134556445 -1.71394211080158 1.01734559613228 0.78201310540591 0.407900776709749 -0.594629196411376 -2.21798745368056 -1.46068365522631 0.715112143741949 -0.121255772153999 2.03171551134557 0.673149980385707 -1.18382285797566 0.646988767798928 1.16088407450272 -1.43928683658095 0.643480923105179 -0.031166355393287 -1.60539292971819 0.0908029785889366 0.799529152472581 1.68374252300716 -0.227580042373514 -1.34212510397749 -0.160493149731326 -0.711600786259103 -0.477380742075005 0.807387683109384 0.715754342183955 0.471616179985988 -0.352964433227653 0.748269954575497 -0.475417582488295 0.557996227258678 0.531373282444861 -0.734972734317696 2.82349744372548 1.88887792035816 -0.766550324742778 0.895728384300564 0.269065116315068 1.00007656573682 -0.180659973650412 -0.123532378720181 1.28478921007049 -1.16528060665688 -0.693473630543389 0.866948637458814 -0.211172271271968 -0.448173205444454 -0.220545060189136 1.22015983446133 0.899473346278142 0.172769765574 -1.85721501346801 -0.681133080207694 -1.0450561769605 -0.522063591080177 -0.0945239556237654 0.579590808388517 -0.79753852722575 -0.165380903271713 0.257428363540149 2.00347192412519 -0.0006020193956636 1.66318607470751 0.698774698664878 -0.186723548798786 0.290172182963104 1.31548553726487 0.545181216099562 -1.25093482560583 1.35209323758885 1.05938659591092 0.144154233353193 -0.207127548503183 -0.304657036812061 -0.431068704991716 -0.928363991347145 0.49241398146718 -0.940447093115583 0.527789967314015 -0.350908295434402 0.53660518228656 1.26042810708495 -0.452807479602172 0.696727548117823 0.995369367696925 -1.07627848702379 1.52608130734865 -2.42531582873194 -1.85504302376086 0.51347650660946 -0.399599759453928 -1.31767413926156 0.477683681866336 1.37800844404657 -1.30593137459912 1.13096181301848 1.27892496600693 0.888076755594307 1.19721495365927 -1.0646844707926 0.920226327182484 -0.686252460674514 0.460889592909355 -0.0569445438308981 1.64778253177256 1.36221977300074 -0.0220675132393141 0.27228818438766 0.02496472672858 -0.1794623597464 -0.149157734884899 1.59246225629504 -2.20366692349037 -0.119651498993073 0.0964195101089941 0.150332752885624 0.315583930308996 -0.950843928825342 -1.20596681015425 0.219873877786316 2.67051423894879 -1.61429598781266 -0.54560594463884 0.630727724079719 -1.2539864448792 -0.235629694536888 -0.46795637426841 1.73679619840595 0.548925445620611 0.310976713602279 -0.745753878098662 0.901651390154673 -0.817852298260501 0.968642198110605 0.85415019525374 0.284773282805121 0.233666455258025 0.256299595661431 0.536975248431135 0.263233910746939 -0.248551550691573 -0.988603492660776 -0.754038780575571 0.367560772438334 0.788019416857277 0.760326878686672 -1.11325020616679 0.845365255243958 0.313667258170978 -0.405019225407846 0.154813943582985 -0.727243565008621 -0.0991292141686788 0.758562598006598 -1.60488041710962 -2.06620954544034 -0.216656389488509 0.661647658882569 0.54101744412246 -1.31964325003766 -0.692152377018911 1.05477066885175 -0.398076548887699 -2.2806579646864 -1.01214325666369 0.204671231603262 -0.865530916432082 0.70646573666943 -0.907792070316211 2.25749590094019 -1.19804212358639 -0.842022140250428 0.0161771245098185 0.616401468383859 -0.274871711787017 -0.236751923605252 -0.105176747887057 1.0103565331707 0.313445440636135 -0.238842286729601 0.0684877797222846 -1.86963787445623 -1.29987189601302 -0.189651808297903 0.143960146463226 -1.25043652090988 -1.92359672224178 -1.7313577667306 -1.33054598981911 1.42778649703653 0.491371271365256 -2.00524659397608 0.567231310044974 0.452016269048106 1.12548129580426 0.541373118028814 -0.190291895501018 -2.17427145637105 1.50606551725427 -0.00705999508215035 1.69399841599593 -0.276893352338614 -1.18646392907696 -0.06194320290316 0.887702554156626 1.50158213862423 0.282077452503052 -0.141429522364566 1.12213470464118 -1.02326695911903 -1.11835928077351 -1.51292169255832 0.456410616711446 1.51546726657375 -0.129684441576661 -0.0464627673330741 1.17509568694523 -0.120772109728836 -0.828771593044151 0.0739956373708325 0.354907094594843 0.676686421780912 -1.09971846757474 -0.817458002497099 -0.656886181272924 -0.330526775611109 1.60379186914524 -0.457805217299499 -1.78174969014681 0.0670327636938318 -0.58831054128434 -0.266200657122089 -0.931841737799837 -0.858920421207823 0.284368299897288 0.364905329265138 0.521428742167899 0.434319468177891 0.658181153692931 -0.212208751335539 -3.62620009096601 -0.439092144811565 -0.578814822446321 -0.290288616687577 0.213294060179334 0.545762556516433 1.79554459034505 0.596605800569595 0.739761349494708 -2.21548290304073 -1.56862898824819 1.8194892802011 1.00831710500645 -1.94762870164253 -0.417404221465304 1.98436590186386 0.892774952120522 1.03934036761925 0.0819122754464765 0.921881763706546 2.0120540531483 2.35641428819617 -0.171044032144701 -0.0439704214052755 -0.474136894958572 -1.04754454652419 -0.952095758436697 -2.44092611762025 -0.912269843397023 -1.31541379070517 0.866637457659847 -0.849747300964368 -0.479598229669884 0.130640842012507 0.514553258485801 -0.120259943243476 -0.819750503545506 -0.662428793745136 0.992771950909495 -1.37545072267795 -1.0865305172168 0.0881419849600862 -0.543577804285913 0.650477554373228 0.189230647827634 -1.41208247981669 1.20733923353128 0.0308394078740148 0.209002956944172 -0.437080070037891 -2.10903391997774 0.906293085571792 -0.292422009812915 -0.368640346709745 0.383805720825994 0.544172856270459 -0.472497083306905 -0.0690547678578439 -1.19715314288144 -0.429976300512546 -0.101273739590355 -1.08153483154996 -0.270661325316722 -0.17069340738681 -0.556117213237022 -0.261514326837172 -0.117136727703628 1.26856670154061 1.02060073795481 -0.36093889920488 -0.618955702512029 -0.399571775561949 -1.08361812645515 -1.44589722675503 1.60663926466531 1.29343835641916 -0.220789583795908 0.321663318440701 0.0637897683377621 -1.10898553276418 0.393832399575522 1.42999860454679 0.188153215876415 -1.34739738545818 0.294518916265306 -0.166541384916598 0.739082891485541 1.66666481330091 -1.3849874247961 -1.88434632770741 0.15282391393034 1.1837602863696 -0.201333651283734 -1.04245277350241 1.17278664277257 0.950057666496714 0.373744021831527 1.54562116157856 0.205899971771044 -1.71358369331538 0.255672978395096 -0.574743760184329 0.660846075689147 -0.0494140392923896 2.89968768114598 -0.100733024830392 -1.25555855614248 -1.12805352522051 -1.62285836054951 -0.299107439989713 -1.33258754463843 -0.543992429002091 -1.07429868285259 0.236123740451017 -0.543734723108905 -1.00758702069802 1.37326630127195 -1.66391900173292 2.13549778859905 -1.46459338202057 -1.08061296811503 1.92134771966819 -1.73933232208452 -0.778847462421839 0.10119276214459 0.193068434000404 0.13826490973517 0.764487785254068 1.53662417548162 0.120566675718847 -0.401121078761832 1.26177225470123 0.229599924341592 0.653439128333936 0.50425640232427 -0.437592431248452 -0.46727460253464 0.952930300726063 -1.18837204374339 -2.4763227108629 0.0699913316782759 -0.710736037788558 0.846567963943703 0.247966323067629 0.208072131393432 -0.466568425626508 1.6623172199939 -2.02499706624202 0.928380852201159 -0.308501053351402 -2.9550857765682 -1.57764801622609 -0.223407406050785 -0.43494928665928 0.721817237749953 0.906975382611472 -0.292449202916663 -0.811978508507613 1.65283443021443 -0.677689333462611 -1.35366700082782 0.693962982644675 -1.07074838407594 -1.30513166253905 0.976393026883955 1.86279081043632 -0.988395345517739 0.531054935450948 -0.77540338245109 0.573707017274976 -0.248776855420303 -0.240807453272652 -0.436001342890082 -0.419046187377502 0.682307419071681 -0.286683041020721 0.23259458164607 0.913376260253548 -1.19828049923781 0.263973612113189 0.454806888258169 -1.17639607945861 2.08191769218557 -0.166874323960052 1.03335581024041 -1.47108523445932 0.924534073484574 -1.2158073192093 0.0288471762541359 -1.44182999881313 -1.92205535839751 0.16666276210398 0.859478436925471 0.530124360203245 1.53110495710527 -0.251709005186947 -1.2084427844097 0.609409299821395 -2.42636168860286 -0.51929774907522 -0.698043238759183 1.05750550611015 -0.8562359070199 -0.717989617422142 -0.393600399920332 -0.279506076711902 0.117204829743388 0.845771510821395 -1.49286698072435 -0.0694584728392713 -0.00843727585716846 0.0903129277359957 -0.310987810011473 0.72879185729765 0.998560448743133 -0.202760899645641 -1.84868669605075 -0.574066916454254 -0.00926281645478874 1.10879376516252 1.00481582802115 -2.04949082821674 -0.215368211442911 1.81544944049013 -0.0859805625621278 -0.411270174191617 -1.21522520976879 2.34948066025497 1.9309017955086 -0.105084150172933 -0.183107632657083 -0.880634024368076 -1.21447645616295 0.558217307800182 0.574462147567364 -0.162913967736487 1.71794613793681 -1.14321826035987 -0.825246356716606 1.10690422523508 -1.61926642701068 0.797977272359774 -0.123349400241154 2.09553458949153 0.709155086153732 -0.449206061832682 0.158253743706963 -1.27629211479354 -0.390020972443614 -0.283956251055326 -1.26579937191216 0.827392912024489 0.53754480909796 0.198833354416071 2.20771994379425 -0.0418192530253688 -0.0835492055811802 1.8998816907115 -0.341517756450161 0.637011925194512 -0.596751197703786 -1.16881656709693 -0.414632557077742 0.616348791136961 0.396261162728876 1.04854373302562 -0.200096618965682 0.734152705700648 1.1714966081138 -0.354884156561414 -0.644897464481226 -0.323530893980059 -0.433255879671444 -1.19811959596173 -0.89274048529635 -1.62315102384389 -0.112357010286145 2.19404082097735 1.24345575518387 0.527394967852631 -0.322240537618626 -1.16314659783457 -0.683950729329418 -1.35950312668373 0.554331629827763 0.0369757067882182 -0.661684450010986 -1.41731213431294 0.346531480931359 -1.45207120956548 0.448826225836055 -2.60142025663875 -1.28167354035538 0.156603717691551 0.0939327649910132 0.889019276414596 -0.988972784667795 -1.28907250008049 0.677260343969593 -0.257785654883696 -1.13279536422548 -1.37262524573478 0.633083216132946 0.816275243622425 -1.46832838722338 -0.936490854923081 0.0579794566995287 -0.41641644638257 0.111000641132572 -0.0143672149039388 0.595535257777994 -1.60723095929001 -0.324275786364173 -0.953591407462806 1.46490828765201 -0.284932060250937 -0.479657886432859 1.58191094599202 -0.0809840824018386 0.250049826308235 0.0602924117753771 0.145501054911605 -0.829182114974749 -0.782757369305593 0.591763530106363 -1.10860951907995 0.765639149748008 0.507883100258826 -0.4388136004822 -0.409266473951247 -0.741489735481478 0.542650460480436 -1.52286066635768 0.0650113128289241 0.556864710936922 -0.417481186801316 -1.2170033878415 -0.423317252290532 -0.564416512626281 1.39734464089762 0.0404276420968656 1.00304802986491 -0.731160636585178 -0.21633589122563 -1.48607400424633 0.552084555260855 0.40201184001161 -1.36237006487127 -1.14290133985195 1.71527017785393 0.795431661683996 1.0306954224586 0.635393395345194 -0.0179482803169234 -0.684323162069417 -0.420508832767108 -0.49768459160394 -1.2827869422789 0.168846078116453 -0.494152455109268 0.139613197318454 0.520648314275551 -0.909763369600261 0.756659709491497 0.485677850041569 -1.14220274252287 -0.178420717498777 0.557024083094736 0.931178257731748 0.108198260630227 -0.628332457032189 -0.371894094872272 -0.458672543372819 0.217350687111805 -0.625063473925143 0.937338087440774 -0.515614492525644 0.968639297437741 0.140529964738889 0.737747925487955 -0.00970699651778177 -0.639812553264731 0.15541877767063 -0.742147412676396 -0.0251272314576876 1.25852833943688 -1.00518071761394 1.38170915109998 -0.785277269140255 1.41370634219118 -0.944125713951035 -0.847091456605343 1.52894136674075 -0.0472517142583173 0.466566276881321 1.18173012687818 0.533651006915928 0.560724739531609 -1.54374724054434 -0.795081432286435 1.12743816642635 -0.625238706639651 -0.247943219267468 -0.411200624230963 -1.28242588813394 -0.598928691867507 0.0050779804924158 0.499873595801796 -1.74638518498407 0.0449699893091606 1.55972744384062 -1.26179915740385 0.0970690916549438 0.236258524888823 0.713942103940038 0.361750021063126 -0.320434948720291 0.428383780588565 -0.562771372346509 -2.30842971129277 0.53801083725336 0.435789211895343 -1.61577830774831 -1.04003184682512 -0.302355271197597 0.824310242446983 0.208528765546527 -0.834276048168764 -0.265859129336236 0.000371930437652071 0.587771697744543 1.05940112879364 0.306665293112774 0.830484772549036 -0.16409232075715 -0.0893083646217569 -0.356716610939434 0.536539698493072 2.60064813983446 0.264439206656756 -0.162085314532427 -0.420154647024892 -0.103018679627705 -0.628292457474441 -0.7370540287334 -1.07765049904521 -1.37613380763481 0.180069107121203 0.837680118815095 0.580605241296925 1.63972857175821 -0.39094806157269 1.53861396624956 1.43067940387491 -1.24805232966397 0.0236877173488938 1.25809894446457 0.022796162489332 -1.63436835610018 -1.4036531975122 -0.124923984807983 -0.00475867507470257 1.98300063035649 -0.120344273110428 -1.69461274879484 0.864070188451048 0.831164218248534 -0.471002022511409 0.980544800711768 -1.04518817690164 0.634182534656996 1.43750098013342 -2.63789766677815 -0.398546577042807 0.991733243616441 -1.39652307814383 -0.885139837635701 -0.164109941009164 0.38426929272069 0.944691462360119 -0.345449781867037 -0.135899268477541 -0.317447839669642 -0.249987778558689 0.769349535660002 0.598450997429168 -0.106249976967088 0.87980349531155 -0.345814239435676 0.470721773515019 -1.50835167094677 -1.65099587586082 -0.218656404259912 -0.167416037882536 -0.426339185802743 0.515924726212838 -0.670471215169517 1.91699376498569 0.945531595137362 -0.171776850602151 -0.888865805522317 1.00744967899607 1.91035763477555 -1.37683064923581 -1.43630289799412 -2.06739000310842 1.3096052882056 -0.151168428291268 0.232492796637096 1.79000794197172 -1.09940535529609 1.31615525026458 0.511468060431916 0.663632619299033 -1.3529225724037 0.503798031269489 0.756807274688 -1.28439145632859 -1.39967909499924 -1.0781757851503 -0.0624424807729467 0.802233064468754 -1.48766793185631 -0.729554134503313 0.565940473236571 0.238325921931822 0.293910786422686 1.35472583017882 -0.870367018868522 -1.95671258875154 1.39253826072518 0.959640273692305 -1.28163259995582 -0.475499985308881 1.93821972914194 0.584000610899356 1.39134072846595 0.0844304657269107 -0.984353702852956 0.47805455887115 0.313648735030697 -0.936519457802427 -2.12367574057957 -0.213327558718244 0.374085022330487 -1.95520879176911 1.84384420894395 -0.620255955811445 0.0830298432707596 -0.301280273125672 -3.26009300732639 0.705940485039357 -1.60130004675171 -0.547759206681339 0.0764286545120718 -1.29183909742479 1.17895408206869 -0.234911433867788 0.875581240843228 0.253152696427465 0.0425648031224899 -0.760713804629085 -0.562337885288337 -0.405807888328571 1.01813235593852 -0.293229998652898 0.987564447789454 1.45290953444732 -1.65380184398061 -0.281740877143037 0.677368410784143 -1.72518654924229 -1.96952160738025 0.943143786403175 0.28693525488602 1.41765554902009 0.362774842175513 -0.00540159561113578 0.255351284061944 -0.87082253062258 -0.409435986809531 1.69659506077828 -0.861538625499646 -0.0732673501874157 1.83780217598257 -0.122174779964017 -0.702913425553536 2.18735352258712 1.32744528212377 -0.13572704009291 0.550535824909451 -0.993849278630331 2.46919509988025 -0.87723637061274 0.140704772353182 -0.108695372935866 -0.196414194620408 -0.979713911483941 0.968875051969007 0.330920939825258 1.19434455348228 -1.4677336937052 1.04773725087957 0.170970240147604 -0.921936194372838 1.1333953045796 -0.927007781086875 -0.695542217863961 2.08068202344865 -0.542871014469842 -0.39945555366013 0.92545982595174 0.492354941058898 -0.146027473194684 -0.261035400206327 -0.977405044142743 -0.0187795349431881 1.07542530269362 -0.466748729047433 -0.656643663812462 -0.163304872647734 -0.453530132597759 1.21233971526526 -0.69731815154249 0.556016220940275 -0.166991520506581 -2.16997698872083 -0.299789198587608 -0.474958305190704 0.317141686717139 0.674272388362364 0.463914822104726 -1.24007601129142 -1.36291714405496 -1.65795878272468 -1.38487788889857 0.509882367315287 -0.293414184574461 1.44338422524699 -0.860404656615527 0.634239624018897 -0.592305863498917 1.28583899675622 2.08054528295692 0.654170708489982 0.377128129140545 0.599374783618571 1.22739590504954 -0.304579736293299 -0.404555036904862 0.489534650431837 -1.04655099971632 -2.46817009518818 0.673317870052707 2.19318474812429 -0.208968428823805 1.97739367831692 -2.08119902900781 -1.07525953547255 -0.354555551015617 0.0659275309525406 0.539398570025696 0.390019171547949 0.439096018721345 -2.03530430682707 -0.649277116114311 -0.39550791765686 1.07395868532085 -0.0105460423229409 0.545506506419462 -0.388814254827355 0.229367397473304 -1.82773803079365 1.66212324515446 -0.879744986985682 -0.0291053031459951 -0.461230495415817 -0.85363029133003 2.10204272935631 -0.626433482534404 1.32769015433223 1.39188637981832 -0.0201897623845293 -1.64620160688365 1.65105278273129 0.0501059593572546 0.552374847631361 0.372552110579564 1.66757830678979 0.408015871468397 -0.554174016379432 -0.0821519395317064 -0.628306298884068 -0.554423359515768 0.546820512315322 0.38461733436549 -0.281659103733106 1.19217779107655 -0.3860749218529 1.54067723780574 -0.721520205158021 -0.495667006821478 0.211075124161956 -0.14135558077775 0.261563963955174 0.572007122880893 -1.9224400609257 -1.97899838670172 -2.24498875803133 0.458486268475992 0.0882817250322057 -1.82764982902713 -0.548374354276706 1.27194848973728 -0.643948653730217 -0.867617545184652 -1.22495276347147 0.198776385722917 -0.904657300852715 -0.0320898856191692 -0.536335630906281 -0.00175803182059764 0.599077500724961 1.37223917325351 -1.25452173859238 0.228217842823899 -0.00529167966068118 1.37242051487993 -0.360961720458232 -0.943875441472501 -1.04146105759153 -1.13040846992446 -2.48623237220671 0.882105621064909 -0.246452622469469 -0.224251641891792 -0.917629179098351 1.31582189218621 1.48733693292253 -0.0344383411322336 -0.0195978098617929 0.56315011230444 0.88965779374172 -0.615934576417789 1.3499211065944 1.14321381256431 0.0241727846356393 -0.137637929176326 0.376886532555578 0.374814793536012 -0.318721054431005 -1.76247767587756 1.07299134982123 -0.00725589624743676 1.10776735615648 0.0387139878009007 0.458111258165835 0.749362392896939 -0.117633064690678 -0.518245793742587 -0.14760005107226 -1.16083664873377 -0.549549609996987 -1.94132025742486 0.0999711026420318 -1.71422166173057 0.184354752074041 0.205379973724411 -2.6634520891305 0.586934721998169 1.05917966803159 -1.72311489253091 0.944219602184013 -0.732003956817408 -2.15585435310337 -0.487745731502482 -0.106402459429654 -0.59367622209282 -0.721956516582048 -0.899655124985096 -0.853206020577284 -0.150029433765977 -0.0911727694217952 -0.367618662549988 1.41750042360038 -0.806266955031051 -0.9899275724344 1.02728174662168 -0.735736626630506 -0.221352334307654 0.546725065388953 0.245161067047748 -1.70360346534838 0.53515205154884 -0.453880411172159 1.19622460778312 0.229895055262314 -0.062653232315758 0.61227903489066 -2.11983018895322 -0.121613108746217 -0.109625679006277 0.547472859382546 0.290036362753701 1.15719338113876 -0.682934558138504 -0.379148845175847 0.731388870113897 0.00177936662554061 -1.28930287764819 -0.959742859852453 -1.25810827933556 -1.89527110031177 -0.778299223901564 -0.15329778470483 0.881406851417597 1.84719518785483 -0.0191368888511123 -0.126232381820343 0.14421763942793 -0.268586413957374 0.442468739519671 1.10897159430928 -2.00993392970961 0.147672928259695 -0.655187245263673 -0.800717063339121 -0.426552851697945 1.07205020820438 -0.404792711296651 0.647500692076058 -0.908693408504738 -1.2001250854211 -0.393967396263894 2.34545842438146 0.0258704414695723 -0.984427399170288 -0.413822645314177 -1.2982845583577 0.351766104497125 0.548230335232957 -0.12006176642995 -0.170630004822836 0.807137257662916 -1.02524760750788 -0.172160439243944 -1.18550118027572 -0.913466923093459 0.117667977498413 -0.327801796062563 -1.85965098294103 -1.63168318516929 1.80519657693419 -0.440069214477749 -0.621803121049609 -0.318873442198903 0.287479901227466 -1.48058400223278 0.528597007984266 0.307467375675673 -0.771470846805043 0.355993201902456 -1.06691370930587 0.519056086592085 -1.31998823232471 -0.636171518341125 0.691826462472988 1.40931422008546 0.20242824635315 0.203116579802512 -0.809601105790838 0.0902071269164102 0.29163450679985 -0.0320870297657033 1.83938864434231 -1.16111630937117 0.126544235602544 0.368793259693039 1.50090656144591 0.144587281524891 0.506027452226707 0.326969593151471 1.15880720760531 -0.954900272635041 0.368382777912031 -0.0807744787500067 -1.31799535454806 -0.168277671078612 0.247764449126785 1.50055703268144 1.03216548327029 0.475562870115839 0.975787275286348 0.574395071827977 0.178900760270328 -0.201033996471577 -2.20160402427014 1.01054755323079 0.357518129854007 -1.21872627463201 -0.895355478317004 -1.00829830153017 0.399351427042412 0.52361725221432 1.79961099902611 -0.735722451248865 1.00090820189741 1.01772799627329 1.36836003547543 0.339299226685846 -0.160296048998711 -3.56391743379306 1.45220091172387 0.212213737138599 0.899237630163507 0.635631056693557 -1.68872452878646 -0.0303064793296069 -0.433196946216926 0.371866952563608 2.21616745942681 1.08798944986151 -0.303474810937313 3.23003027876031 -3.33647790107696 -0.490322836462391 2.08787118652377 1.07728657801464 0.512785664056241 2.31521747820477 0.42655404615162 -0.779140057369081 0.105009529243508 -0.43023065169521 2.21764158352673 1.38344159327933 1.629503920143 0.550777984447502 -0.595916803019591 1.17069175110831 -0.203765831924131 -1.38701545998537 1.24235244444022 -0.722018374018794 -0.893061322933032 -0.277697592414535 1.38144714606319 -1.91485718161693 0.730748837490443 -1.24272390370651 0.934008538297358 1.61109608882115 0.278095820761901 -0.0108922722611476 1.66743674458521 0.989672462036698 0.710357791005352 -0.89171639121205 0.927530359760434 -0.238455589773582 -0.066684673214246 -0.700343265336604 -0.95656777766493 1.31963483781366 -0.130072056002615 -0.50602905691246 -0.932271880994765 0.134185386518373 0.960304809565721 -0.756908419698454 2.13299011022314 -0.083960870872156 -0.361777242430814 -0.284889403088628 0.694397360153315 -2.13471472389621 1.4416824317643 -1.43839479611584 1.5858228074871 -1.43464933635676 0.705126401130031 -0.644167061763821 -1.28335264753348 -0.590259102167194 -1.18291696739648 -2.02302232618498 -0.161257568633482 0.623566470999851 0.0404562776966335 -0.448930824741145 -1.01205561691209 -0.346701536846081 -0.500095760653047 1.66027026025506 -0.143045083361638 2.16380531049898 -0.540745902151652 1.52983555959852 -0.502825402552458 -0.23565173325341 0.185902301315725 -1.01036887128849 -0.396333099189595 -1.67017802154445 0.126217336861428 -1.2557917704196 -0.843250680895298 0.198932403634824 0.238539862782101 1.37978052353038 -0.95593133382435 -1.31280388403216 1.55788561304759 0.723490434016598 -1.67769698883884 0.884278319036593 -1.41400386626969 0.413744938161369 -0.973620671223628 1.15192986969828 0.906816141071901 0.611745796500798 0.0432147021768866 0.523241120842983 0.839467465297327 0.749395686880285 -0.795672354585974 0.833923630699129 0.767589879743415 -0.861466682591863 -0.131368406094177 -0.69336376991996 0.554024666017235 0.625289106395806 -0.364640565942858 -0.450467258173105 -0.621134540520282 0.818635685008351 -0.249127332375985 0.662601427127522 -0.0285890625392028 -1.34414675935322 1.1191402196925 0.200698903216923 0.354883972594885 1.21027509849133 -0.991456271892832 0.119502281744515 -1.13562750400889 -0.988848960477642 1.09773560217294 -2.0998171523199 -1.34566113112057 0.457173677051568 0.171609827295963 1.07152429291374 -0.951815862491976 -0.879584240948123 0.36072556208281 0.396013545297108 0.596429743595931 -0.157332619822575 0.0171314106056783 0.373898823018514 0.343150446792298 -1.12433276635949 -2.55535629333008 2.65588570459049 1.09637440139567 -1.3169444600908 1.02134241679003 0.526434154560998 -0.496833815801303 -0.0383329481135213 -0.293360028802733 0.0231288682246561 0.583357395606297 0.519666009788858 1.11508560326319 -0.0646835427910787 1.29821297484794 1.9190309010209 -0.013006795545908 -0.571773090550011 0.0767343408320994 -0.324242069340274 -0.706021070840418 -0.298821303002167 -1.1264352516099 -2.01665866943423 1.51003270571167 0.59728271606213 -0.235171152142608 -1.25866782741984 -1.14352401273072 -1.07772969836122 0.31155456990084 -0.347652661099377 -0.76382414357635 -1.24738805019186 -0.0825710256804357 0.0171858254439676 -1.90364465906981 -2.96031848445984 -0.482318736350768 1.24505824433033 0.0411016214897073 -0.184651212396775 -1.05538024975455 -2.32817166857446 0.000702368371584571 1.48865298233974 -0.357048933050413 -0.801698995090563 -1.76533507009711 0.614434494383055 -1.54983806927161 0.828282722057073 2.08075272052307 0.696745536169066 1.03681202135879 0.479372066286534 2.45021080379218 0.843177360395314 0.567150878187475 0.547304165190387 0.619590964621988 -1.11344049458562 -0.107238052886038 1.15215227659389 -0.118986283917159 -1.13135791830109 1.08621210564976 0.196255457950419 -1.43782054101429 0.814298068663129 -0.526782674815982 -0.34602619283817 0.859088083187848 0.480995156248175 -1.60656917294682 -0.575006461518547 -0.187400500192521 0.656461388863251 -0.574609371341722 0.153894556390815 -0.625358729014802 -0.34634412272087 -0.0159036030114054 0.116458221465798 0.269324080591629 0.31519626245578 -0.226901802667126 0.380064289007592 0.953276010387976 0.376418005408241 -0.221705484486813 -0.511227151580818 -0.24467356108264 1.49136228074997 -1.54058393491061 -1.17040544261842 -1.28718986499888 1.49900602191446 -0.215891126989985 0.193057046140162 0.468402577154075 -0.790984390987406 0.0988141261611883 0.760745554198538 -1.50466613744152 0.173627996706236 -0.34500011694886 0.948551717529496 2.30149667649144 -0.580441180154499 -0.251607913192654 0.279344444067023 1.00380132067966 -1.55963849347868 -0.639770592828734 -0.733983198056977 1.15088328700619 -0.587992729147223 0.291053784086129 1.07067559530776 -1.39865648183401 0.633613758992964 0.097716578315224 0.284078060021274 -1.11585188760896 2.06787552910668 0.39166237268055 0.285212491194984 -1.84739657407337 0.70510357767133 -0.546743194683744 -0.484054478152415 0.47836402810092 -0.564141900778401 0.272658773468453 -1.3071928274297 -1.72163385021259 1.16974719541357 -0.829237738799139 0.693617613941664 1.25978321404783 -0.950287905197962 0.590067803230168 -0.449870024654737 1.32887575523106 1.06700646846477 -0.304656265654632 -0.63104189453131 0.0301336799336869 -0.530970724885694 0.0409397072193138 -0.595423454446745 -0.17418344400709 0.461276110977514 1.05671452435817 1.49183368062846 0.372717769240931 0.611901752275491 1.83985360051678 -0.164663080509737 0.803876968579916 -0.314644780946315 0.968942856229537 1.55619212805779 0.747716019979018 -0.524638152796737 -0.0545650504191111 0.322831246967529 0.0103457926015176 0.0723444579928024 1.6885283876181 -0.0748035777543096 -1.65036629073732 -0.245205938802827 0.281696685457662 0.511093146306872 0.796251245727026 0.997756943922678 -0.00532032243257892 -0.824055213692943 1.06579731769265 0.523670636563972 1.95201341104213 -0.678874583477327 1.21712061840729 -1.00044230064574 -0.395644072845303 0.0206483784575429 -0.10317751025216 1.65400055554227 -0.426699181394195 1.66475672671825 0.242274628209037 1.44011082280951 0.539993456446863 0.511250388577482 -1.29542751659885 2.01131104020062 -0.415740694942028 0.875898421336071 -0.575215543492253 0.173696205446592 -1.95235834433992 -2.15238258433914 0.249933590870519 -0.0274572645572105 2.05376230306521 0.898233977326249 -0.140297174378089 0.294563325795813 0.236926881286342 -0.826498877528655 -0.669693530404495 0.479927673879731 -1.65827207703821 0.0567034521253638 0.743872149410205 -1.56437028745112 1.59608732772918 0.900391157945533 1.29566243645513 0.0176840772349548 1.07359071353232 0.367975373310407 -0.762702827571376 -0.553424261607313 1.63538290574129 -0.976514674410866 0.740077811129352 -0.111785076289789 -0.628990866052654 -0.203315157930184 0.0952200599504054 -1.90255364090772 0.381172233105915 -1.49058876241008 -0.806711266342555 0.197781449654153 0.555228076965117 1.66247946887613 2.64182691256198 -0.129587962213786 0.589410743010797 0.172854117508603 0.160166464772955 1.81989091980022 -0.302332836737318 -0.743631055339244 -0.279793275744619 -0.106182860044095 -1.15390644121591 1.1671726855376 -1.47325697106906 -2.17253424047883 0.270939560690627 -1.31255138307997 0.012529720592949 0.994484280007926 0.989842763509498 -0.151418548198981 0.36703822641123 -0.878652597818673 0.469296768853848 1.46884668011483 -3.01782003095345 -0.420901803907365 0.368091229326788 0.103594283434899 0.960468343489533 1.90990291040579 -0.436972602379005 1.02684886349251 -1.23447487727634 1.1090517960036 1.13526037475427 -1.61835786464424 -0.225244972364426 0.943332810813781 -1.2894146837505 -0.69799348038514 1.29121176670703 0.449048358098025 1.84025496376005 0.284216235122059 0.36635396114359 1.46318711857976 -1.68049975033696 0.260069868112784 0.907761484504851 -0.937980301361132 0.0861639588504398 0.150896367169567 -0.974286004306245 1.11406295012787 -1.57326151828482 -0.148862930922056 1.12109187988214 -0.681027675392306 -0.431466371218625 -0.92335149844071 -0.525509300855644 0.362710754864885 0.0935161757941863 1.13141621393212 0.390754817577 0.384588935407395 -0.220115409466031 0.141040546933381 0.985417707779978 0.330503308737455 0.258385721704117 -2.37391209200873 1.1780714045478 -0.123329555723897 0.307239792676825 -1.75976028973407 0.0535418517923621 -0.252862127598344 -0.802487129276201 -1.21075557612747 -0.773630502929547 -1.45025867970853 0.842181683285421 -1.02339965525316 0.729184188296885 -0.367901476340298 -0.368182622730997 1.85791236135939 -0.0517041147033548 -0.210775541195662 -1.49486264155059 0.428725549860554 -0.19297236609283 -0.33548598037583 0.261120368341016 1.01826619253881 -0.680779679365951 0.0751645938559643 1.34557536089946 -0.086338670149509 0.423370003756135 0.00581161079210779 -0.790146878765623 0.155295963039513 -0.323892511373611 0.897590282081921 -0.562378915417653 0.444549826418084 -0.0757560122203057 -0.544623543613113 0.00410893577661908 -1.06796003915589 -0.527277717714272 0.875968616010388 1.47144167922318 1.38258090099381 0.804885677157762 0.262385457656259 -1.62130372038848 0.549620412009921 -0.294183344966474 -2.00951611875383 -0.593190539225861 -0.122599892273745 0.335307794621251 -1.74040063466417 1.01553066734126 -1.25096913335111 1.71168357742329 1.44923986861435 0.254944912995435 0.53065517582862 0.404022273536333 -0.83095387100415 0.948874379641869 -0.375548699252608 -1.5773107495311 -0.510895720128527 0.178418192404218 0.0335352491554104 -0.18025505467206 0.30534396350395 0.390041187103371 -1.44311864089977 0.968545441554133 -0.274643936951856 2.21901809869067 -0.391379867794819 -2.16129557792737 -0.0268336365055759 0.602587947179748 -0.680810340120486 0.625711596804254 -0.494044052429994 1.67515007806423 0.0624423213884336 0.698972728992358 -0.199848490236787 1.2245651563239 -0.469372399157044 0.454884759407582 -1.40438798397152 -0.38163199169365 -1.01150548002493 0.376138561792016 -1.45861873261722 1.73562169085404 1.44000589119711 1.75455324157654 0.572415701989553 0.265449920897207 -0.674212772999399 1.08866499033064 -0.972199899970864 -0.780258552705313 1.23732118020751 0.408047463276978 -0.664681446738609 1.87202608730481 -1.75369891950016 1.20092786227583 2.1443523041164 2.176127270008 0.970637924065159 -0.16122693061283 -0.42116474880477 1.15979063222627 0.467379949101582 -0.447212530671374 0.141422985891516 0.0141755125160361 0.992708983569319 -0.0657251348529642 0.00734279439501464 0.0852179402964659 0.383986002719273 -0.403516785960204 -0.0681801527808254 -0.746605780367625 0.201873779985932 1.53139237484311 -1.09010175316222 -0.194123626439886 -0.910370310432019 0.513109856195489 -0.986038700772811 0.42702375838002 0.975075493074261 -1.03642070393003 -0.0911762142648676 1.85294348593428 2.08175516461587 -1.45784025491927 -0.33352530623418 0.39587978397261 2.13682033361215 -0.222104279477697 -0.11942070776327 -1.90917525747447 0.411292258019698 -0.98004770267316 -0.0558412854317306 -0.853955271277838 -0.74587523858388 -0.0027309676359195 0.608269962948101 1.87095055471628 -1.63891404011441 -1.4289189857793 -0.2246082795679 -0.358066331584807 1.62475199590663 -0.264951669071637 1.48464186997232 0.141834643313375 -0.869194960945463 1.22384272596699 -1.36370714824995 -0.640910664906367 -0.0391671608068995 0.861495854888092 0.680593247162199 0.479394102652222 0.311424542661684 -0.384424378785388 -1.64016749207243 -1.09137900534574 -0.551261496818367 -0.657943788084226 -1.5213922626991 0.833936068559964 0.127448416040899 1.11632349002203 0.0272537515773201 -0.188273216361618 -0.889940168458256 -0.977300088533164 0.55569084037916 -1.63436315943483 0.237301610392204 -1.13199832274968 -0.414390698776231 -0.111400821233087 -0.598478918828625 0.0762713368322147 0.10611550411954 1.29986121532977 -0.506719471343763 -1.17446458459568 1.10663961121495 -0.680501447643274 -0.437286841557754 -1.02143715694561 1.3624420312268 0.156719875840035 -0.373999079438083 1.01829319081435 -0.765124121087196 -1.56181590053006 -0.94726839928952 -0.20706078961276 -1.89455370083848 0.982084098496765 -1.26105966840896 -1.32798519868495 -0.291617865279606 -0.197449596045678 -0.721368637916312 -0.470595088882773 -0.80201224819348 -0.102056899107498 -0.0143947377254386 0.0784219687371638 -0.248914929738114 0.00432534739311043 0.0313969928801869 -0.0419796230858012 -0.308835688360393 0.45624856208728 -1.25131029506464 -0.630808170290668 1.37447548132492 -1.52312458509423 0.414160424787353 -0.389455944915927 -0.0586311278528056 -0.691306232019161 -0.0655747563117267 -0.0207446615847355 -1.16205570115779 -0.929147654590198 -0.420470604030288 -0.398173907453371 -0.705755211682651 -0.200771520334942 0.68733514814192 1.63010730356162 0.66752791824589 -0.892095664819021 2.07827164610233 -0.457495886080102 -0.365808404362332 -0.280189249200094 -0.879961879928085 0.114395682524185 -0.676965324278496 -0.197355018325134 -0.235229631758669 0.361322621538253 0.438917672555317 0.103170292460201 -0.581010783358207 -1.52515799916046 -0.782555235732997 0.288713774844775 0.184253856275391 0.889464944749342 0.0152290423097086 0.768042017802572 -0.668729111387625 0.14426737234803 0.469153965041398 0.305878822766231 -0.812194269637454 0.165518107609184 -0.279961201344294 0.732484759893664 0.382217273564483 -0.822311501258541 -1.33522231612366 -0.60150260202511 -0.992649622377368 -0.798993695850575 0.822664349778921 0.298953128281322 0.688646027494917 0.10214376609705 -1.64347022867034 0.34149913409872 -0.269429260796497 -0.255001333085486 1.54468294290595 -0.048525164690187 -0.0309145663755001 1.78050623426113 0.537666472583749 -0.400622234119942 -1.10762037193379 -0.306326267801934 1.68469159291959 -0.570800513476501 -0.230736534387824 -0.0222286599115364 -0.59743103931907 1.72754729047898 -0.967903958312448 -0.0715541572257063 0.358292885028058 0.681464423742495 -0.217243320861973 0.617803714433863 -0.173460422625171 -0.793071533806298 -0.409485975350162 0.200411886423731 -0.264914571615375 -0.430759939948352 0.873094615549513 0.112392155761742 0.364262348286645 0.562138230659643 -1.17010508064679 -1.24699570131467 1.51825941670025 -0.151284336193081 0.868973465050819 -0.205026835554264 0.812173168795404 -0.100681350189074 -0.822779893751985 0.162642937548425 -0.0601724268552336 0.336672849203856 -1.29894593178504 0.459905186058407 0.916057779634593 -0.804753728533704 0.249032890650988 -0.779512040058245 0.271872036051572 -1.88856722549585 -0.431474964564533 1.36494021432737 0.534981494018966 1.15999840976034 -0.188723600523157 -0.646368241765965 1.55545374984289 1.29952881948981 -1.26842981198375 -0.985590008201258 0.274833998612236 0.138224012866311 0.893168773222113 -0.263665524305608 0.702575538953117 0.74941294505828 -0.34860274522765 -0.712909221788893 2.48855688019606 0.953543764540143 -1.59633868956732 1.80372267535367 1.09415390398389 -0.0992913795152203 2.06315833364213 0.331720895207323 0.491032239676711 -2.25966551849286 -1.91945491297903 1.61287367674497 -1.49072753614795 0.61075640199536 -0.363445207124464 -0.724691236217535 -1.09223140356732 -1.1387524430527 2.7882667802559 -0.608856062698758 0.377897987141111 -0.0917550098729746 -0.792860935193508 0.0728511966615178 -0.999288325685302 -0.625157575120322 -0.562524198083683 1.78345648900152 0.706807925956867 0.263140844771208 0.428557868406487 0.437690297563576 1.25440333698428 0.973615784139732 -0.588938991082731 -0.139596009420087 1.20357748736272 1.19444417283375 0.608049849637124 -1.0437283890601 0.761587249879599 -0.487462472458238 1.61707057372153 1.80749523007058 1.27942579978418 0.355228141030102 -1.57591255977218 0.318598773974345 -1.27751195763531 0.262932197731979 1.8645179242679 -0.913826431337778 1.52415440160875 0.0658315839606149 0.253460841582541 -2.56616638628894 0.688246567378729 -0.0571882916607364 0.0793305392208512 -0.297792101838824 -1.46668352070434 -1.7574083950425 -0.613340336965837 -0.700726933716305 -0.145951778882214 -0.64676702960805 1.30283949309537 0.805880495270114 -0.12169372543097 0.522545457929213 -1.08192544228741 -0.00446964075815921 0.835097975607055 0.738784874579024 -0.733929391010531 -0.922509824097287 0.209293189188554 0.351025925264691 -1.15785299124512 0.701016937415791 0.282622699322746 -0.869916503943439 -0.143988230813082 0.976258433808557 1.52756345977794 -0.406247871416745 -0.463035106765535 -0.296901324619663 -0.569572114516502 2.16901781284581 0.391694661522303 -0.132064602697109 -0.448296493315275 -0.832277930749499 -0.441853376790558 0.370429825938938 -0.561941341129417 -1.3436473334795 0.743829509840578 0.247547844793568 0.534222665429682 -0.607008549512264 -0.389069806179194 0.0798023233254259 -0.916153444045465 0.000964539355893942 0.368708953856833 0.634086763974954 1.02919844126769 -0.64082701481607 -2.25468354026091 -0.375281956192166 0.298883612609414 2.31092141444906 -0.976067262572664 -0.82683626159371 2.16001089592333 -0.903473842279864 1.45632088394322 -0.815570544147417 0.822279541652072 0.162911822829194 0.0436657739685278 -0.330097438563123 -1.01650986249354 -0.200908534951932 0.355387886265698 0.424872790684608 -1.67169797917702 2.20726398266 -0.796123099588502 -1.79876813317648 0.298717657709731 -0.18149000014754 0.623889861336059 -0.679314241581645 1.04610126666764 1.05153603418722 -0.0485002789058896 -0.333810691658759 -0.726271543697796 -0.252440445092926 0.182710611314493 -0.00408335700749205 0.10892455914855 -0.62603077207844 -0.469557173947159 0.0548326643354476 0.652444572019059 -2.02026511056869 -1.01166558288007 1.07993399998439 -0.138391658918071 0.13292387349204 -0.999559909125953 0.722882606359898 -0.138545676488744 2.33361090776177 1.72494975526782 0.35972446005901 -1.23692631614741 0.199189459302078 -1.71103757589802 1.85199406791108 1.14467743786923 0.899280241501724 -1.48623326993013 -0.731457263084678 -1.25403635606816 -1.07652518123932 1.15559534775623 0.173854482755714 0.182598242452407 -1.15810020441508 -1.74312495596657 -2.16428572127505 1.03645956392594 0.893601775792835 -0.0145281483949285 1.15986046040938 0.41415188736043 -1.05357359202085 1.03507644913375 0.897658102969288 1.6082734001373 -0.126737307626181 -0.811440735837829 -0.49407830131919 0.422821175370531 -1.34587637044875 -1.52166840326007 0.730087105867063 0.259027842107595 0.563467377395876 -0.637599296538266 1.0543543183725 -1.46703041053347 -0.063006538258401 0.952515401035356 0.209079431751766 0.381448588668543 -0.515830981352403 0.535870311344101 1.20743467391515 0.975932769940907 -0.797997386642279 0.422746107177356 -1.10085364458453 0.313145002376449 0.816675795042008 3.10996894147795 0.271814468131331 0.0353174975097241 0.883830358372691 -0.613030060900426 0.791773272576265 1.00605188958331 -1.11179174974099 0.0916941652994528 -1.65724498949333 -0.0277145934019539 0.313917385181612 1.03568439568562 0.0289304614415002 1.49395770742507 0.672323847612382 -0.950786913472217 0.0558688719337717 0.471357005835259 -0.127899714405564 -1.01374118191215 0.0694041985276521 -0.244059704766334 -0.0109220558463 -0.170912609365534 -0.689587333397624 0.0191262113754865 0.787676230533674 0.0198257032573827 0.94939543102528 -0.573807442659799 -0.513118699047949 -1.79200090180193 0.567569275649366 1.85398524993451 -0.32776481583472 0.133624284383051 -1.63164596786624 -1.91538381132055 -2.03250828807839 1.62300980157401 -1.23203216757427 1.29615025047575 -0.191021127147318 -0.438829475321709 -1.21664185336083 -2.4915601970366 -0.362911599620403 -1.01272403113158 -0.191921295289069 0.962023220398004 0.909586104477487 0.69006057342525 0.135198526032581 -1.21862543558716 -0.0643050799619974 1.43791441134337 -0.627822775380711 0.120007608183207 0.353106342599292 0.18044807057231 -0.310069938359788 -0.404171512593563 0.97317914107215 1.59163165807599 -0.134511448226901 0.822972419352344 0.828594107223639 -1.39150248447985 1.1151687562421 -1.00492768098305 0.69875353675849 -1.43059960177021 -0.601288028753735 -0.33692196965542 1.82470260860216 -0.32966552543185 -0.148874684940715 0.935526384530988 -0.632271437083383 0.678838039685163 -0.786167297943762 -1.35247035660481 -0.512648878346853 -0.615357200508473 -1.16998691063177 0.951900692326615 1.64335209008068 0.654964277023112 1.09751619463406 0.456261932700256 0.477331810672944 -0.0937464317685107 1.49513454592732 0.180735812669728 0.321130472169364 0.463578195318168 -0.381604171529782 -1.16582864777616 -0.447933306889827 0.314847323267972 0.1614562775813 0.556412081468101 1.45162683224101 -2.20190192853336 -0.0510537974202618 0.0153905677885014 0.534987513156897 -0.666629025642582 -3.27900715613033 -0.271977615811476 0.206535125300117 -0.683738439084563 0.0788222264720575 -2.17218906627734 -0.314633784717057 -0.17311448344195 1.8774637037009 -0.125203074289792 -1.47743346310224 -0.254789937373097 0.814521472725333 -0.234274500374992 0.214290705856514 0.673786908525264 -0.384422901084465 -1.16108357098052 0.885671099747961 2.42565901460699 0.850567629263721 -0.752105094232791 -0.41212332476554 -0.50424260320515 -0.99558015401199 0.233337289988154 0.137127342958817 -0.161075817996259 -0.544573224482637 -0.912374152470185 -0.243896058796578 1.37047898185987 1.37869329687417 -0.362613539876185 -0.185698279665251 -0.928012146716357 -0.471947794496586 -0.0616424324379145 -1.37482502217079 0.383968519201908 3.08591570336216 -3.63867504679154 0.431049183650167 0.428683312535383 -0.191279871437184 -1.06658100483493 0.154153326123461 -0.49237273271983 -1.17445009411507 0.473486230304977 -2.21403424667786 1.48465828992244 0.82703759450815 2.03106733168491 0.954249466325978 -0.575978717905308 -0.668599895602851 -0.549688064734661 0.0157076196146386 -0.419507160431436 0.924742781216554 0.700448658604109 -1.94291691643959 -0.053457354233177 -0.271099326558424 0.890904069496031 0.995414319131966 0.705139038131689 -0.637569733098669 -1.06685863515611 -0.0528835776250186 0.622884294059059 -0.306645741356465 -0.0310655120474982 -0.146782842236258 -1.01288318564329 -0.538074274926069 0.436122830645775 0.420402567922639 -0.846296818606844 -0.0311492229298595 -1.15434000666039 -0.730784555269756 -0.691784154378744 -0.809469845899289 -0.561138498018561 1.42172363467674 0.63933468770244 -0.577848428517068 -0.0311500174705549 0.673663649752257 -1.2264834619774 -1.36662725064063 0.152825506521725 -0.912444324817506 1.61725243363845 0.0976756051576424 0.38676252659336 -0.390051009664504 1.41693702858867 0.894606544864913 -0.299119502938062 -1.7222043298237 0.826927834661017 -0.393245033725485 -0.744733755179457 1.32880995342923 0.767009226018908 0.482138687256486 -1.23975563113545 -0.73870364621629 1.62375234196384 1.4928544638415 -0.566484011362562 -0.403081750822609 0.311322874052697 -2.00217005084404 -0.574715253955125 2.03084015300125 -0.887404591367094 0.683838403258243 -0.767027598593234 1.51583122958165 -0.0698900462433186 1.77888475503057 -0.23819039480115 1.73079909318859 -0.474841586652778 -0.533785537271482 0.135327301494239 0.0269274351878412 0.308961458967471 -1.93758287944755 0.776812878555548 -0.872778468607149 0.325220999503457 -0.616432226236994 1.29299663861158 1.48769251464191 0.161275747242024 -0.908611333842521 0.169573601946295 -0.489653609915515 -1.37862055427278 0.911770182283085 -0.930970442770414 -0.415226670822927 1.60194158076382 -1.40854034394297 0.492368540734167 0.593298427854848 0.398597244052176 -0.36541035287017 0.929913261335696 -0.799928996637331 -0.186226163550157 0.327376454524808 1.22787068341562 -1.62171378082195 1.17973148165905 -0.373059731087397 -1.11333924459995 1.64419586831326 0.852647347186452 -1.29406707324668 -0.209650370766629 0.659327164203617 -1.50407213882848 1.00865148225891 -0.015232869877712 -0.894373509769394 1.97281264267949 0.0862813154822795 0.0708676630089405 -0.972892253599717 -1.19217680258866 -0.743707069990103 -1.09310392001281 0.394394116722921 0.274488573116727 -1.07624101151144 0.547643040835046 1.48770273099624 -1.14949468997667 0.895863999821613 0.833854232885269 -0.0136186261113644 0.959335177903089 -1.83962574796191 -0.550787595316095 -1.58875427779271 2.26913483783306 0.827496299307573 -0.304534602850518 -0.425404722904312 -0.360336065031154 -2.04545804271536 -1.26417780706038 0.676042847786574 0.87817853452609 0.0199941937846793 1.38017492548364 -0.83249087418052 -0.191013344061123 -0.689172807761883 0.192363964354353 0.390793462758449 0.916636866250013 -0.426010661840891 0.357885154873951 -0.327015713761741 0.585435614982035 -0.114921735009938 -0.421435489651051 -1.58577801078956 -0.84731727141357 -1.49077195723369 0.0434471303614421 -0.339156415268357 -2.0205523865505 0.611785190569791 0.968590171302753 -1.46329210966701 0.469766772870189 0.0979552130573052 -0.656892765193208 1.43790069446702 0.241188303436738 -1.29095494507387 0.897122250684049 -0.540135661648813 0.0902401567260638 0.609207129595509 -0.812257882680341 -1.17095487746697 -0.212534215840169 -1.90396386231772 -0.315209166405858 -0.465488156063119 -0.821746735400197 0.151302821991972 1.29875519826529 0.277145446393391 -0.215150789291249 0.939907139054884 -0.259751854786543 -0.204383161543066 0.543447337266943 -1.26875595423065 -0.756908561900359 -1.78215023697487 1.97415883105422 -1.41940827822133 0.0268972587396431 0.00959520441848252 -0.0637900476622639 0.463831499766521 -0.597264126351251 -0.176350144106984 0.992215159776752 -0.196018637620144 1.00324543438207 0.0249890060962069 0.634385405936845 0.787956157363284 -0.345482593914542 -0.732982722474826 -0.151911492427834 -0.123565740823978 1.71904310696375 0.905262856542223 -0.0823337706153381 1.01857275690456 0.970253866941255 0.318731029978749 1.31096133471295 -0.753928343923054 0.433765719915822 -0.705995411983849 0.340547381727549 0.531613235096799 0.0827609575977954 -0.809356849953323 -0.754068570174272 -0.357802221136758 0.93401629777476 0.10910601462949 0.203060636489691 0.567701706028095 1.77415165531605 -0.804911904645863 -0.517934233709661 -0.880516823737878 0.664713714868142 0.396498202991194 0.447301126293949 -0.390564988627178 0.734798210685778 -2.12726528037612 -1.32871819389591 -0.178655188290661 0.332988731247888 -2.03494218272693 -1.73474420691641 -0.133436245974959 0.772427695347807 1.53771276539579 0.397885120480505 -0.445439481610603 0.0544040259959135 1.55718863016677 1.1635016371363 1.05104249393722 0.902781069905243 -1.79065649156989 1.22954026812817 -0.375531552085203 0.348199398129451 -0.032276743995613 -0.0538927772410033 -2.01554418533396 0.57581230298567 -0.248074133047169 -0.512195770237043 0.492547889247561 0.563738252519031 -0.279760032350738 1.39193042167503 -0.857074089457438 -0.930160691366907 -0.414824022265187 0.854461365263187 -0.621690608317256 -0.408868456510263 -0.552605434871755 -0.97103010214264 1.19871927643536 -0.000858955092953057 -1.65738373114282 -1.79391736255677 -0.32402554333801 0.275636203738843 1.12083709993658 -0.97844064779662 -0.871935827958223 0.116361618898955 -0.0438865409782731 -1.04306708783213 0.770087755852142 0.783397436014948 -0.22193608522993 0.896168843143754 0.255733136635639 -0.672409257537658 -0.783604275037257 0.930599787949066 -0.940991812686742 -0.14956181900338 0.858323540379262 0.349194898363104 0.998689501748138 1.21202541335155 0.244599149489973 -1.3621968589676 0.703040297483543 -0.906322322895226 -0.0488123089373261 0.218028350254071 0.188454821371859 -1.34223874212932 -1.39977407874498 0.215563112338744 1.66553887445543 1.62736367869879 -2.55167907361229 2.42373860624784 0.248995875148107 -1.57801186543492 0.997841998519647 0.41195518085962 1.64649232425505 1.20035739621438 0.727799050761192 -0.217749790579145 1.04112067951891 -1.88426546497464 1.33674825174783 -0.683211140525911 0.73842367816548 -0.0591459824103451 -0.721905899355362 1.29581644910553 -1.00742866680106 -0.883379161457002 0.795208566639575 -0.533448784106822 1.44302077432345 -0.820495555904574 0.902460373303119 0.602446789569726 -0.650401759340883 -0.203006085001757 1.21029023788084 -0.815112358115898 -1.05830421257951 1.25731365123799 -0.557899377220486 1.56744862921773 -0.467846348515487 0.131686587419481 -0.470103570288254 0.0097847730381862 1.65311805355083 -0.227098390025212 1.71894544775549 -0.753955574302747 0.0121843734153289 -0.565237119964793 -1.13575626330651 -0.521194731814941 0.559175659021585 1.772869840631 0.0938424416688324 2.14433570430804 2.05027009720296 -1.88945790815882 -0.464556339167963 0.0147906991373962 -0.815371280779985 -1.59331601270859 0.525532242072746 0.464775390455249 -0.181045711880516 1.44088317765291 -0.208906637504706 -0.308443875110956 -0.597288108300049 -1.60191834701104 0.198701336462054 0.995368047714218 0.957834408062949 -1.66745463503208 2.06448517025738 1.2235515038195 -0.639029653156548 -1.40227900959279 -0.46834836564317 0.159230434614192 -0.583952531827207 -0.785098133901664 0.299358903442189 -0.696341432869382 0.390033550927786 0.361976053927333 -0.199223668181744 1.08958859470512 -0.872800310823458 0.786222968822743 0.1644173468077 -0.548224682343518 0.33651995458898 -0.161057056615825 -0.200356846791403 1.38294204968887 0.688306431588269 -2.75590660161846 0.563530107675772 -1.05333108278106 0.73104175907532 -0.0291362828711312 -0.0436697211615718 0.234720067558008 0.0866295199009113 -0.390796448503231 0.30707311957715 -0.461264372553865 -0.392413324754706 0.960077015875477 -0.428952081420962 -0.93001171292919 -0.787757101509776 -2.15816384565758 -1.07964665127526 -1.1819144366002 -1.49926980856015 -1.09978142760052 0.960027763648004 -1.43767351281129 1.70774926422371 -0.684832083313061 -2.41420361406265 0.230070836651803 -0.0810452834494405 -0.366446733828636 0.909030452243794 1.58933175272074 1.07075618532089 -0.175709209477724 -1.92886327604326 0.239197308345434 -0.0717155770752095 -0.185223995257173 -0.695734150104201 0.262722276973292 -1.62612833156528 0.617656843185207 0.877083021876392 1.01348264976238 -0.36867403181514 0.389777398111566 0.70702781782047 -0.324960926066483 0.335316598231183 0.25665760449432 -0.646299367224931 0.466201768636082 -0.0977387485488743 0.303095338675987 -0.472718798493405 -1.37171135703496 0.60996849841025 0.0124330309995032 -0.627773695708888 0.977449027416925 -0.0151765627490998 -0.356678077700332 0.220133504984019 -0.811272760160383 0.14854242196795 -0.354378652494039 1.54859214143919 1.9904063722561 0.686617717193474 -0.63543448995067 1.24014708108555 -0.357414781862555 -0.414185003960797 0.131247598639694 -0.152082057208601 -0.160828875888255 0.403181568792916 0.243276284222105 -1.26919184199365 1.8287717516462 0.869643993025898 -0.117678725805771 -0.739575104957405 0.368710616966585 -0.106617601073999 -0.931855115901487 0.364023934007026 -0.989509740277857 -0.393400439867028 0.539353370968464 2.03793935099102 -0.199563062185651 -0.828528146222512 0.320643225773631 0.439285343551 -0.180885492697496 -1.06547331152844 -0.896844503868515 0.656065152535514 2.60846354679884 1.7172976431222 -0.810014965362224 -0.756647430128838 -0.447720852952175 0.343548639971305 1.33811172223933 0.299585526671107 0.880354812966558 -0.858359954149108 -0.0728688173858025 -0.633828102111974 -0.685101181071095 -0.233448141844748 -0.770291837200189 -0.271119034868047 -0.187726095970692 0.604487466217442 0.0925537704899878 0.690597113494866 -0.677432064723695 3.00287892970254 0.00772680130444338 1.15341622153885 -1.25401358582274 0.44001055059091 -1.92975364218389 1.93465097235241 -0.711976594020592 -0.415248976851502 -2.20896322743506 -2.0374075216994 -0.550209331671255 0.260612940366926 -1.27841919477007 -0.788392575144381 0.190313880353643 0.457820694772224 -1.75250029445568 0.397670253458769 0.272375432696419 0.408419395452737 0.268899983570898 0.419857989297316 0.75161265386663 -1.7401207767952 0.327026730916234 -0.12686591857393 0.581575390374463 -0.944515610864794 1.58875877867716 0.101824783813417 1.71494021385854 1.13018305680788 0.350884835863051 0.46291944224317 -0.0138610526763306 0.803582282804193 0.657931146787506 1.24859559313493 -1.01039873114774 0.0383362374375192 0.533795464758799 0.0384413653435403 -0.0788915639426547 -1.79365617648978 -1.10889052425032 0.635167589045156 0.800775070931183 1.40704444376476 0.0889691648553766 1.3123065749094 0.565618064565764 0.466534627632708 -0.140369161140644 0.550107820738618 0.678359485369758 0.0915082419640583 -0.00286148978544924 0.884626566015121 -0.085539748961533 0.435213466731956 -1.19396698372374 -1.51256247535815 -0.267911934116778 -1.23113290524138 0.997793046230869 -0.767231222826454 1.01200824840643 1.22420105956805 -0.765154824089989 -0.794801655917236 -0.331257473901095 -1.54546106132064 0.518709690756087 1.50084041027248 0.465516090444389 -0.81842320112856 0.170482567219112 0.615829097385812 -1.19886501088587 0.777232361980468 0.146984190818217 -2.09189121603509 1.86834956686373 -1.48077353932192 -2.91373330240087 1.45601130195557 -0.310254985016809 0.7417310194405 -0.472812092357061 0.560912426533858 -0.593624014273012 2.63699480852806 -0.175956260252605 0.774929162988328 0.761580185190295 0.375614325743274 0.766129766194126 0.469030900908156 -0.0882039026274465 -1.14181258083819 0.557081671779583 -0.110655598145463 0.651868128062302 -0.474192884915332 -2.23606844156122 -0.531784176316408 1.27575923270503 -0.323269053976799 0.915620586865624 -0.0560990872219845 0.22998243832128 0.913158548475235 -0.0233369321181626 1.202570527861 0.314765039735885 -0.900038581696257 1.17989557846808 1.48600390864459 -0.122639156597409 1.12393094882211 -0.134436879091674 0.190541565431926 0.416360056835927 -0.147582864931575 -0.911714601669387 0.871373112100155 -1.23096936330989 2.1048035572379 1.03953323304983 0.830565940085004 1.5157940841731 -0.57715406403649 -0.144130983744753 0.993984752485225 -0.115411575728138 -0.788122047046384 0.242688169115859 1.12751896166592 -0.971046744113704 1.13496934950325 -0.0445954932219292 0.242234357257185 -1.16542745223372 0.176649207113642 1.00267895215961 0.894035409614297 0.22154762548868 1.89793021850625 -0.783410645646826 0.0882996130405064 0.355747118933318 0.35608956854899 1.60872616047114 -0.23572299930449 1.05452838879337 1.75123895512429 -0.612855028448516 1.22553296094169 -0.49402934748175 -0.270505912745827 0.551891225454457 -0.334784935362662 -0.792991962286828 -1.63795033772117 -0.447640552820795 -0.360448371666365 0.16163848589248 0.0183369418627201 0.61242754338767 -0.210692243799196 -0.570289542413943 0.0988052188423572 0.546127642759046 -0.00337515283784934 0.783582581858814 -0.780305949259607 0.443304308770119 0.262026311804995 -0.0133872517877664 0.443513530349887 -0.707196274503986 -1.00658931449314 0.371480743571878 0.776231228692919 -0.191070497069282 1.1200698450527 0.2227278350342 -0.785258887292038 1.11399337654414 0.609961084129335 0.246615029483132 -0.325320985549859 1.51294682224071 -1.34619631613077 0.207680410384156 1.139362239528 0.0896180192541552 0.561214291119201 -0.702223920586045 -1.40910486132896 -1.09192152874669 0.886649197111424 -0.0839635984204201 0.43458175095237 -0.348986909533042 -0.213154512853063 -0.202152365818733 0.418599989605839 2.40804829093482 -1.10906089558028 -0.118438878685633 -0.810984797262403 0.296460880511484 1.9763116287688 -1.19257651176543 -0.29011319448149 -0.171512930707088 -1.41642509796934 -0.355107037238427 -0.81747137530879 -0.179756349584446 -1.25492694772648 -0.249548233518884 -0.340435626750687 0.0397254796411995 0.327522890834672 -0.0700488925281392 2.93966967857259 -0.0575209979381175 -0.0430643334117516 -0.344304670622237 0.994425982148918 -0.93312838813529 0.475846174549824 0.0203748649040148 -0.898990962705372 -0.403581514973121 -0.185023302853378 -0.218802192793178 -0.0879630498819332 0.759031692886142 -1.59193301827698 -1.89369943385009 -0.395463404786897 -1.63182788615834 -0.847251773221977 -0.0405145713440552 -1.18666357625365 -0.0878375455507006 -0.418445156438288 0.327323404812666 -0.147325207252353 1.71039558014673 -0.168280378631448 -1.39582379514796 0.767790414617128 1.71905612721228 -0.35208754704296 -0.149865169975933 0.0340860072405288 0.56463431157206 -0.160307698579893 -0.290434007661349 -0.637148038329991 0.648366020358335 -0.465357841823387 -0.361138694004523 -0.329913142793524 -0.690931944277778 -0.827615415870113 1.40819416242036 -0.405764454342039 -1.17249777358583 -0.387323379476878 -0.103367536281654 -0.656377009356766 0.0287386514597753 0.661971041161972 0.441198558974245 1.5032056850718 -0.7423616351054 -0.333147933520535 0.102532855104232 -0.278087911074866 0.201673690742679 -1.75489558493889 -0.923701369334917 -0.783454402178475 0.567330781887464 -0.768744239967083 1.34816446362527 0.564929199917229 -1.97713722406508 -1.57609311175154 -1.34783943284325 1.21354818434061 -0.0843127437387489 -1.4245859018845 1.41870273908424 -0.603730557271689 0.373621892598061 -0.0213610080784956 1.07794914477255 -0.0170703947862743 -1.24643316747619 -0.905608312909672 0.839974488304728 -0.790033873825982 -0.622810994808801 0.699363246925126 0.386608844958892 0.947657777363936 0.816224129551663 1.3330363181704 1.14879480349154 0.108277737449467 -0.858193341814487 0.951939584433105 1.49450471026198 0.460414067013716 0.295450479293647 0.00749644646758593 -0.177216862458186 0.343486142416905 -0.341518915868554 -0.76667155548046 0.492499303290185 0.6278341695808 0.980273435801139 -0.692667847245728 -1.26716772932132 2.19818229652488 2.39314868084197 -0.266523239466591 -0.417115483286575 0.218659732394855 -0.727402543402573 -0.0379430485848057 0.439159926385877 0.338787613141683 -0.907624287589284 1.60026231905672 0.00988702429887554 1.56315075040805 -1.30961518106939 1.38912747824339 0.508388612374985 -0.479223553077312 0.900841608717851 0.558784879905236 0.48112410403928 -0.0762397747119566 -0.46238064174899 -0.964653055425487 0.180566252474835 -0.201005757414316 0.049246090099928 0.235750903275539 0.00804920567889927 -0.555018256000539 -2.56719807155644 -1.26197599435953 1.96703215918222 -0.694612305071951 1.30342222032398 -0.395297749041042 0.410109356536657 -0.855555279339237 -1.06752882838848 -0.609760717705729 1.21166871256512 -0.653076189178812 -0.357999929557408 -0.00217502994490712 0.531077000895534 0.0312558388281035 0.251196620248824 2.43383151654097 -0.0657717759098645 0.2002858228052 1.52124431647314 0.0210096633878742 1.04757593096894 0.164863982386497 0.107868871766022 1.28921142134502 -0.422601560207113 -1.42705674516271 0.678569732514803 -0.250833591347835 -0.062970149309485 1.14240553022742 0.125074193844022 0.670255478933863 1.39366649145917 0.853620086653803 -0.204338722288884 -1.44650579755985 -0.549908013781088 -0.35882140916091 -0.0715315740471957 -0.739130331534115 -0.0430274884188805 0.270582495006454 -0.199035425442673 0.854538775022754 0.82371593200191 -1.06816854461533 -0.485097471745556 0.981595595961223 0.356641076742257 -0.210203096850888 -0.725843196216998 -0.68375248412328 0.834009228592273 -0.539044427793056 0.602423663209183 -0.0906882045970736 -0.863968018441365 0.508367873271105 -1.80265947828425 -1.80882216994102 0.326501874887705 -1.76142615506746 -0.254960799008113 1.01180184486647 0.0969084143326612 2.68501812007048 -0.148170811831856 0.567085316396756 -1.51398642935202 1.06942220088194 -1.45714737283405 -0.596047045361757 -1.43380972072002 -1.90496312164919 -0.711846084012448 -0.530536762327949 -1.59172801446276 -0.0542419923529434 0.259180432356259 0.0159959553315797 -0.0870735599726026 -0.556260412471174 0.238491554521147 -0.79528533312598 -0.0442213530176966 -1.43923307863062 0.914989754203296 -0.345015080480347 -0.318072368331484 1.14743261062099 -0.639404871435678 0.428803412064232 -1.59289505253387 1.32953826857769 0.477310997713314 1.6348586710727 0.550518550156432 0.649966215363008 -1.70011315166079 -1.17474897869019 0.0388834409335096 -0.604003852378367 0.540929167846474 -1.27951151635391 -0.140201880163801 -0.164698023003786 -1.5372181978904 -1.28858767317708 0.729075783277293 0.905959050766495 0.154118315177545 0.90021802588281 -0.514069542348074 0.374370295070147 -0.131636611341182 -0.668566385502608 -0.59013519364077 -0.0378692375166926 -0.333372096689607 0.916764379627202 -0.317715194847688 -1.77004049511762 -0.202689224905925 0.203230040783732 -0.529433744215613 -0.140276801519559 -0.903050874002207 1.82078182507332 0.420538549199622 0.541327690458568 0.86955319167013 0.0388533590884739 -0.454481923550707 1.18413235242723 -0.0731289286509812 -0.145512927271473 -1.40372095231573 -0.900051470728621 -1.52552420647963 -0.39339764918676 0.369279200172726 0.915262146846526 0.876025501089994 1.29297986240931 -0.565121525681489 -0.965718005727549 0.0191791351880404 -0.611872904620749 -1.18841525677139 0.31308888206386 -0.215016171536978 -1.12346227285336 -0.925732228887179 -0.928230328877633 2.62197348865134 -1.36974119421097 0.335101533461249 -1.52042674314714 -1.327231850146 0.346992540447979 -0.852074885455711 0.0901907002254698 -0.875200150343178 0.422947883853457 0.36164378304886 1.14517397213937 0.730244431275924 1.33471058100034 0.356132910682004 -0.822519305107442 -1.20531289304605 -3.25182171865913 -1.00426045304529 -0.676083224962795 -1.03563482928404 -0.0186269020829179 -0.344754048128908 1.19815500979261 -0.109701547357426 -1.33303479909854 -0.555321418136679 -2.09076613474725 1.71912047559548 0.00248502234998233 -0.987236855106793 1.58783476107175 1.59673818561384 0.450590692224355 -0.935358688544494 2.13604643967283 0.529361575962146 -0.204945969846328 -0.242743031303194 0.9408552051663 -0.0844066889438898 -0.0709198203521496 -0.571591567098284 0.527243445662671 -0.159400559179283 0.795783449227073 0.0391366183095399 -0.961348164612003 0.888733644246033 1.06168284361176 -0.29432495884188 -0.776396520029715 0.282764305528401 0.537455269519881 -0.515172690001502 -0.759581725478614 0.2809887234609 0.663404658384988 -0.0897958049345633 -0.218439402458333 0.509958055112364 0.359985613500777 0.834914356768147 0.524433904063499 0.362057995559081 2.3081489516967 -1.35575164214278 0.454713751995289 0.805794627242203 -2.13080853529089 -0.903012702769815 1.19935550547656 0.0313818484464259 -1.73505595659142 0.357977939167591 0.384473655755157 -0.970306316965032 -1.61812472950179 -0.860426613077645 0.331052779579986 -0.880759349069617 -1.01402088853189 -1.04263632799753 0.461989322370843 -1.51277329139605 -1.4017806681555 0.971173193220821 -1.07972572130227 1.39506585934499 0.186634646506357 -0.139248293674154 -1.61167765927688 0.557289857711483 -0.134798187605373 0.425299271940753 0.472958878829709 -1.42374580101003 0.637313172276053 -0.340577504525301 0.0822668523828486 -0.695015204519978 0.372993401138773 -0.333833069974735 1.23461590120466 0.411224756999974 -1.57387968230954 -0.729050288583299 1.92349778233829 -0.280393005642532 0.0192674048581813 0.640113115151774 0.869851576049518 1.44448709081601 0.23764001762595 -1.45675300717857 0.331855583727131 -2.210475847167 -0.246582576538208 0.334424990122139 -0.164499376786853 0.133440917803784 0.917944150216853 -0.40818243073446 -0.737828272864092 -0.824097563161152 -1.04022060080994 -0.844867884661767 -1.52235005043986 1.34733775132313 -0.687788863903738 1.3772162299923 0.273487479989157 -1.15260903365754 -0.728034897830872 1.13780355055504 -0.689531660244284 1.563646288796 -0.147240884232259 -1.82619430747846 -0.713094215213084 -0.699078487141701 0.295651192371739 0.877863952830359 0.541243664196552 0.678716433986463 0.854949314231745 -0.683698266395803 -0.761653153615332 -0.171022096155363 -0.718363944266758 -1.11997753572761 0.825283387374875 -1.21292194950249 0.455213475672723 -1.00104973833525 -0.482218436303916 -1.03354722805778 -0.786427253337984 -1.62014440169809 -0.062229907316086 -0.0458973407195039 1.70527817405805 -0.0795084143775521 -0.43147409941113 1.15863628838412 0.139279402970348 1.51549111947368 1.21989605879448 -1.13840811820925 1.0128517045606 0.551359122354226 1.20097673046384 -1.72569273133929 1.3152501370107 0.698135163419673 -0.693014687818411 -0.663718624207805 0.0634008749594396 -0.946725994276185 0.403472424178839 -0.718741200719483 -1.31767058232349 1.85947764849987 -1.18561272244634 0.755936868431971 0.304944846399679 1.21220178581168 -0.324860873552891 -0.547333068218387 0.491345749981314 -0.962678439026019 -0.869797434502676 -2.89181903243124 -0.315113336687584 1.14939421667546 -1.10339783846823 0.133827529073198 1.28281656260046 0.0216876813550436 -0.636205995619448 -0.877818463727292 -1.4211237985884 -0.096488025698347 -1.45212046651594 0.112527738683101 -0.656837663519413 -0.00592478505927195 1.38048385004435 -1.35892956381687 1.56299596632267 1.73896985460406 0.0328894022769348 0.175560597714919 -0.376082335629997 -0.37327412688263 0.372696002447214 -0.711187831919064 1.3407528219475 -0.356246494939205 0.300623859587703 0.499648942244063 1.06591925979433 -0.87879254449426 1.61780380202275 1.03702909514607 -0.74901632373054 0.209744909627673 -1.26117267141612 0.763995940135895 -0.162707171119333 0.985138082902833 0.593402116184903 -0.987398916509319 1.04127205984428 0.298488361211344 0.465245346256725 0.73333693843567 0.881414772193258 -1.01382879261186 -0.499840540975964 -0.348747219631706 -1.32489065591993 -2.30130681535485 0.0103858447091065 0.288369535302839 -0.533506662159599 -1.89585982845408 0.963110916223008 2.59144787189856 1.05457661783269 0.00113405449389952 0.129784227980984 -0.250533744519724 -0.209224621580718 0.881552996708825 -2.1599027040709 0.275491817501922 -2.43739758344191 0.501755036693542 0.00391308259674138 -1.69930187821023 -0.0488544334964756 0.569230405601064 1.09423860845466 0.917936841738275 -1.47795376855889 0.344969066452814 0.65489713823541 0.451042137113555 0.354748763068334 -1.0363744697543 0.866707113131876 -0.506462174627863 -2.34480261268827 -0.789873642860264 0.127147752342372 -0.265028419884738 -0.281367942107935 1.11046047849707 -1.24684161953113 -1.30122918626756 0.561960985714792 0.560122523274236 -0.0981383537608579 0.270285744532598 -1.61513845527837 1.10802946887299 0.623564331859385 0.219867955921245 0.855955668269527 -0.230069009153269 -0.461290120631804 0.369271968659943 0.25494369362376 -0.679771690069468 0.538713912423871 -0.168609658593911 -1.71152192009914 -0.387338668646896 -0.399897028423099 0.073923745167842 0.514179816099122 0.914235333526719 -0.553417288171272 1.28619389988495 -0.252541452513544 2.00810055778057 -1.07553825052662 -1.44314909484896 0.881769618274891 -0.254416510467911 -0.971053564420088 -1.03876660613977 0.525119960174455 1.18344235815928 0.573959340897127 1.14627583068527 1.08342619729566 0.182930460193982 -1.9256160727665 0.219054231341729 -0.187992584137274 2.45255753657622 -2.07848253839776 0.680556876004692 2.01689797170937 -0.849789068109859 -0.582078001329316 0.704753454168128 -1.46290505925124 0.915189303695415 -1.34024646560379 -0.232061123358034 -1.10948971964045 -0.29073596082169 -1.10455450055571 0.635506936635274 -1.4529972462989 0.41953716093152 0.175930377132895 1.00334411745347 1.77145371027779 -0.31716087564213 -2.28959096240834 -0.354217209358567 -0.39495288291986 -1.95249028374629 -2.43082198752008 1.43410035673875 -1.07583427977277 1.07157852765204 1.58653468736005 0.445333180510729 -0.43097639249327 -1.70328350420208 0.7452211509161 -0.667008622757001 1.36985499441733 0.0469447442954361 0.112246471954049 -1.03047551891388 -0.0924937147016155 -0.038701226728012 0.451446075139748 -0.857237826100851 0.480017560229774 0.918777172753156 -1.27037669748718 1.46711324336429 -1.75278344311097 0.796182838284867 -1.20606114143868 1.36880330634764 -0.640860161797202 0.944543900450102 -0.171550658501596 -0.228690068321077 0.806532270772499 0.195635491570194 -1.23262189459738 1.52498590057056 -0.334444177919764 1.18766116886419 -1.09878085309363 -0.431214949854084 0.948000869906743 0.62551370964218 -0.543409588311133 0.288114233821841 -0.0946699810939708 0.871454751826139 0.7044885672293 0.471774778600576 -0.987584560172983 0.849074992775922 0.373419770253757 -0.147634613391948 -1.59642036206635 0.317464311481098 1.09873280092265 0.527142078973169 1.3277903671891 -1.01991591859718 -0.346838381764371 -0.126626472553156 -0.683582973540711 1.67291884167221 -0.238417320036786 -1.59336965736292 0.844969180830779 1.53889677042369 -0.949160111327132 0.682522995759353 -0.332382023531531 1.73169380024148 1.02546446841149 0.775075204000718 -0.072372017628879 -0.646331340652073 -0.423511380210453 0.979878289971918 0.0566183998418348 -0.135850182939364 2.18019116570775 1.50823465046166 0.407897645890206 0.228675945675609 -0.0488276614632522 -0.275577014942594 1.06170104979294 -1.38729675718847 -0.643235973812001 0.375189864238424 -2.5941489007558 -0.736505171046034 1.19467437880128 -1.38149041055728 -0.474181488484064 -0.607363308819353 -0.243680366654426 0.328799153721846 1.97824029694848 -0.344564342467944 -0.772191859256222 0.317166971883586 -0.693357889069593 -0.712120452416703 0.507220237966665 -0.829749876440807 1.21435903458263 -0.634238304429138 1.0891331525225 0.685253746877564 -1.25480886069627 -0.244091595691154 0.753806903000533 0.752063051592542 -0.218426144827309 0.463917540384632 -1.87403195107621 0.864000516475761 0.831009466870357 -1.40257871822714 -0.0827129141904072 0.680272221155326 -0.484817969346674 1.67380810156781 0.064424412611828 1.12458578144432 -0.77956947005025 0.70797023981788 1.3626203428128 1.09937313609176 -0.0360087839563574 0.782709913477075 -0.335089812511742 -0.651641738789792 0.305538532820188 0.448072128937716 0.516400107050335 -0.211161577142616 1.1315172075373 1.55720396198637 0.513588664297858 0.135603934711187 1.70179337850598 -0.464921810360796 -1.70870742983813 2.29496380528256 -2.00262397476359 0.371549007002404 0.304054107918048 -0.0135767103618875 -0.972549907932335 -0.137967224472617 -0.0709820052014896 0.482730583166355 0.00896928405825197 -0.0774818963553874 -0.0194951994850143 0.0681516079583798 0.249666806307661 -0.2770988871063 -0.987351110139605 0.0505527446761692 -0.382839111628094 -0.432524946433796 0.415183718634413 -0.669996240228642 -0.584567301740029 -1.19738636585273 -1.29668269691306 -0.119394227652919 1.10824931927631 -1.0735678622859 -0.781934052701303 -0.689137204261289 -0.913288783467238 -0.28004874614194 -1.45466727862171 -0.199650223821265 -0.524957230358246 -1.74375277701454 0.948222782923268 -0.34190794947677 -0.479405368128007 -0.550063099627202 1.70223637870726 0.205590690838856 0.768166507788878 0.264378813191803 1.58860242928895 -1.48379814021326 2.0283837395869 0.264013861469941 -0.315534837445066 -2.91399886877565 -0.79125119177063 -0.299945916184341 -1.01988148811961 -1.66205864133558 -1.49861162509748 -2.9156624691233 -0.206970781636356 -2.30376126402311 1.58748269135028 -1.72186810998182 -0.132018539195059 0.537130443981138 -0.763538837680852 -1.66291263915455 -1.4766047057038 -1.97376188379753 0.693963585089352 -0.426179472617023 0.973940146954137 -0.296779745625751 -0.427832352900871 -0.376087879128915 -0.079978056473533 -0.866777061771931 1.16703563595395 -1.71047900457147 1.21275627750149 -0.754722958893166 0.566396681315843 0.0737747038039756 -0.142981543567021 1.20093720153523 1.16029688686039 -0.636636487444346 -0.136349317013243 -0.598400950624487 1.58595592920621 0.0345517817924619 -0.651853058333135 -0.153943991233202 1.41186411247875 -1.73623746117576 -1.38007757243461 0.615791074533511 0.287381914237082 -0.396100733862849 0.835513955867366 0.00946448290204991 1.37872386401377 0.0215424690702793 0.539811788653738 0.385646235506001 -0.504193949162954 0.185952193917176 0.489617097678816 1.72511195792672 0.160105815292496 1.08861054497403 -0.039946138434288 -0.5775066757997 0.146454603206923 -0.725551272665197 -0.751903510370428 -0.602733313777747 0.528932570091545 -1.27691444417531 0.293157177921955 -1.19153665703437 -1.14756587110099 0.624999963893811 0.986900319969976 1.00667995619509 -1.84478877088422 -0.194452800808347 -0.693499402006019 0.527298923746737 0.25568378769552 -0.409295637418142 0.0309277045007396 0.408573496245903 0.294812489222438 0.0372067216388139 -1.04447069981042 -0.132051330187232 0.531616260892844 1.32058003166085 0.597465516521757 -0.24995493960673 -0.500089963948799 1.19209394401217 0.443523900440366 0.250409616878058 0.19570667010301 -1.16985468339901 -1.47166116039073 -0.19293307611197 0.137710220535051 -0.926249655643167 -0.065638358060293 -0.138446129745392 -0.658186273202931 -0.555335268469409 0.489762144329986 0.694158307587619 0.299189719590088 -1.36415814963288 0.318676181264857 0.0684859391771551 0.852198977624094 -0.151481196454878 0.318768062022988 -0.0356849235008914 -2.10465222666473 0.182017462397271 0.331348205585369 0.830151110506368 0.391293983232778 0.633209739795289 1.53126043029081 0.771937722655071 0.79618272904196 -1.83428475992451 -1.44815596497115 0.26862754237938 -0.780640619095986 -0.264531560079135 -0.412175606607939 -0.260462159331057 0.569141457341307 0.957429797830872 0.689655547946241 -0.471412964131543 -0.872108023768944 0.960636813573702 0.45324893286913 1.09031561142602 0.305555068538628 0.558029078141325 0.539167725415778 -0.634754073233485 -1.03892984675988 0.927558284054844 0.728520461722184 0.80401432517809 0.905417589592769 1.00144553112777 0.153798618022267 1.11205449305128 0.451740764595161 0.651127602333396 -0.154872257417064 0.0546235535173404 -0.479351687164536 -1.67270411457118 0.247144096998628 2.63505326061577 -0.171918781787162 -0.757790286324084 0.625139183108849 -1.13440685103483 1.03813838275338 -0.683099734403369 0.738130591206904 -1.18403271028765 -0.293538477852285 -0.728962883528454 -0.825915015359224 -1.53664638839168 -0.0402517445796901 -0.066610604445505 -0.808984425566333 -1.15042609061792 -0.668800245578899 -0.395436519171756 1.35813898405752 0.690355276316986 -0.313182126168992 -0.983499699294798 -0.771015768129655 0.0180686480509422 -0.164106758992948 1.37790226669228 0.0398810222933823 -1.92229947378605 -0.824974122998083 0.2234539089318 1.00831954233558 -0.811686022783369 0.729206638557592 0.755851113078017 -0.417088014958454 -0.624344970008869 1.53001522084095 -0.00409989523264333 0.311845811779529 -1.04163104989602 1.12730555242054 -0.510815252870277 2.19062222708735 0.262297793053757 -0.788538702494798 -1.13964248672538 -0.195814958389186 -2.58676578523712 -2.90382605768038 0.987973087803342 -0.667857095438102 -0.0238672660329239 -0.777595479008554 -1.16804165192259 -1.26026343327741 0.588989679029313 0.66268407836613 -0.177155134973496 0.221896006963516 -0.464880077789678 0.525439264513503 1.5405862493387 0.296942340966981 0.160800218392905 -1.33171564763697 -0.623194415981658 0.762569572327598 -1.19302519347084 -0.00364106112440469 0.345569917422251 0.559083539741458 -0.043420741648935 -0.249647799542426 0.257557646770962 -0.413843249966938 -1.73669734878766 1.53236020382675 0.597193028078628 0.077727021836946 0.0455226370472784 0.197938467512937 0.224667810722407 0.904135943608547 -0.157011554559729 -1.57652092465717 -1.16590692369506 -0.85186834700753 -1.06131736297558 -0.344047442930944 -0.543931917488819 -0.684402437918533 -1.22052171804135 -0.191966964959409 0.255145207595181 2.53683277064983 0.367917452805997 0.502240507145884 -0.0261576406197472 0.532739240623738 -0.860340376945042 -0.227354285408937 1.50089126785184 1.13692702149159 -0.587801723107043 -0.30312904719681 1.07152178915742 1.1428923848745 0.0829701446250572 -0.512991978651345 1.75365979920514 1.08943026811926 -0.303512282039259 0.483848568217405 1.71661257386062 -0.518497019802236 -0.605484346254378 0.508531368627898 0.261367222455276 1.11432458018334 0.109520263190971 -1.08988264613502 -0.205776963445823 -0.0411899123906211 -0.870770011789111 0.312862046568352 -0.833801399250174 -0.4272641335108 0.77473023271802 0.427003899756574 1.60837089029083 0.713412279498389 -2.59931132917978 0.335106612341981 1.11006559139529 -0.818950116720737 0.934825021600104 0.915283166249414 0.579095396033068 2.40514687679974 1.59519832791092 -0.849627446429476 -0.246487850271402 1.12876695454199 -1.2798007744259 1.38980547847773 0.533078544230687 0.284542135916491 -0.551642954414078 -0.835666394513286 0.64387563372492 0.44728903003621 -1.80425832165902 0.466951760800259 1.47292263174567 0.0894688943717129 -1.85761108036602 -1.78508281427081 -0.0448631460323046 0.511612737495774 1.12352985578898 -0.471586856721845 -0.990128249862265 -1.11916123493661 0.350099248091758 -0.950930966755058 0.919432905280379 1.51324715345728 -0.847857953054096 0.52999296868296 0.239591871454175 -0.048983657243556 0.732345015015346 -0.702168346027944 -0.512864655259391 0.368651179532376 -0.326318083568 1.62323206499687 -1.65245086606771 0.293846632338112 -1.70867873351283 -0.040282679853802 0.648753832264303 -0.117419563334531 -0.951036502912566 1.00827894333324 2.33097431786367 0.436392105697664 0.0472031017244657 -0.573538859547979 -1.04830850801365 0.0878773714135018 1.43726815891224 -0.795235673038097 -0.107475187140771 -0.361724431424252 -1.40544637698729 -0.852222118254347 0.214021208446225 -0.00354185814808748 -1.35202767019671 -0.580643937533896 -0.320746788936173 0.529788875479806 -0.479002932845897 0.366006068897848 -0.162061693751871 -0.628033066110045 -1.5969218862643 -0.418425954855617 -0.446186289108555 -1.28995242550845 -1.34875678780887 0.362094978716502 -1.63775890265845 -0.564632206479861 1.75409610387822 -0.858279421334491 -0.693636902478437 -0.801904342964079 -1.2222208088906 -0.244613522593801 0.137773047463373 0.58162415334397 -0.0519722684890923 1.39328619561275 0.668346783657004 0.810288002599661 -1.26360883766894 -0.975099294433368 -0.418491773269385 -1.30106996660701 0.415510371828276 0.00280052540007106 -1.30413221696293 -1.0192246164113 1.63288554545753 0.794736355034598 2.18415266464131 1.55632754472585 -0.937335643906415 -1.96031126881214 0.704533792842441 0.848119610082783 0.240253713118564 1.27468271527917 -1.55719938420161 -1.89059576718433 -0.815767866878618 -0.419640009467991 1.16722738328411 -0.348945126796566 0.850358042965771 0.821991766872852 -1.14575938687616 -1.0232344098304 0.954559266015629 0.855478184438456 -0.901702736147472 -0.0816157242294023 -0.962120523235725 0.176821801889253 0.274223576838069 1.83070152413702 0.832514172292147 0.800379640302185 -2.58558801514329 0.633357767146104 1.09930587238562 -0.224389029137724 1.16898287545449 0.849761670680176 0.255030677186001 0.736367528379837 0.901998958133688 -0.310335110257509 -0.366642816650408 0.425474370735768 -0.928962031981767 -0.980530289015941 0.16011253507307 0.0810848780210305 0.127857133350974 -0.037094799997327 0.368104079352219 0.485485886324466 0.2335925170256 1.46297733347188 -1.18871219110188 0.992214319553329 -1.1905115031223 0.378942986007927 0.334101245956747 0.276609143740822 -0.1415612966259 0.456556634309902 -1.55843069739034 -0.921476786404231 -1.11603167122381 -0.790737697080827 -0.970056282287214 -1.76806163189113 -0.446393156743812 0.867840740752564 -0.615868862731753 -0.10221853462551 -0.90318651194722 0.480453146602396 -0.835588664902099 -0.566866127122226 0.779000593033475 0.346696131383703 -1.19897589641342 -1.6804961142565 0.243632645368923 -1.36710692792139 -0.970240735455444 -0.584161885226636 -1.20534926477118 -1.59994636914396 0.871758174970649 -0.116190550216675 -0.484686887011478 -0.549310707104047 0.566362067612767 -0.0765782629287872 -1.04702740588258 0.676579808852693 0.105421044571418 2.46077201213586 0.685227432514918 1.77197441427599 0.491516311718875 -0.548206143022587 -0.114782381533712 0.816963869647545 -0.20383930652981 -0.201550824402686 -1.25552631047654 -0.42984459430284 -0.455891357342117 -1.89324124920166 -1.55168889164159 -0.25356364096742 -0.389857829034751 0.0734410507256605 -0.633076812879715 1.97492548237898 0.23249435285859 -0.184336654941001 1.79540535534217 0.114751774709109 0.708828930290625 -0.481900172398901 -1.46238828306249 1.29960528800775 0.633273865352429 -0.199191678974361 1.59153889274242 -0.578824992866592 1.35368140735639 -0.904592670114123 -1.13014474183187 2.24808401241484 -1.48553762635228 0.407750646033308 0.367939709347046 1.84669454586081 0.30073718639575 -0.142595372134851 0.589967531976808 -1.00502512391747 1.48712014973218 0.0270834078085547 -1.18234016707741 0.257247993246543 -0.202730255290386 -1.32824576484378 -1.10071508988308 -0.539448934479955 -0.436925119601929 1.28968959092406 -0.788961904726493 0.473816725661053 1.60699756517146 -0.220123110428035 -0.284744268300333 2.18014013991756 0.501921620389796 0.0126985076961748 -1.08653239537199 1.16878965393007 0.697672379407041 -0.571484971535546 1.55090545384639 1.18851713195636 -0.604809229064522 0.865693365433342 0.808292895843327 -1.19104435211532 -0.991748971724144 0.0414179857959749 0.727009668214419 -0.531039160565736 0.542976875360696 0.577544642735094 0.416715780366291 -0.135632444643211 0.0264439722488726 0.945416971586085 0.876637484673804 -1.36602789926938 -0.226537769637328 -1.12756187526035 0.322286722357017 -0.289108600252239 0.502855622617944 1.34494047663249 1.10565216740989 -0.718824861781831 0.518518602876008 0.0881017756870783 2.06193864491228 -0.127978562510712 -2.01270238860331 0.539783407110213 -0.674923976191159 -2.07431636903306 -0.183954060102166 -0.224667504707565 0.0633689421944358 0.604283755148417 1.49735379121111 0.965810822132635 -0.670191842649146 0.246795809002857 0.393844057869484 -1.18004540547597 1.16247295211706 -0.199730045550748 1.53439004607865 -0.983845250669475 1.32789246724614 0.187601680064479 -2.29536092848781 -1.3893106609074 1.84393712207811 -1.49499478763029 0.639077661263915 0.265742898238616 -1.80794657842438 0.485724355465226 -0.729521498085104 -0.948749241585744 0.408842588701344 0.459045171210974 -0.126748945768942 -0.311825336675981 1.09603124674162 0.123801546224122 0.427044012626553 -0.882289175644303 0.943574793322372 -0.61439034286095 -0.24035632479847 -0.253476244674889 1.11420369300022 -1.01306085606093 -0.644438004932147 0.480578390932544 -0.492305113515774 1.62757017398768 -1.24525166126551 0.618778334786665 -0.149156637179301 1.1708014918898 0.666397908523508 1.61227777114007 0.248412947930241 1.227859473112 0.415436408604961 1.52612853704764 0.324380103926838 0.535798488828816 -0.35308021020606 -0.537453544770624 -0.655197973603479 0.865511745478268 0.898360932780989 1.12758021930773 0.874653454188868 0.212551786292843 0.0366615991569621 1.30706511206612 0.468933016411052 -0.541575016575183 0.667394419778247 0.552864745176442 -0.398550484628821 0.193112965782355 0.857830708623646 -1.41440933733792 -0.474276379878212 0.0381926021300374 -0.129865419290237 0.796715362751112 -1.60564497297617 -0.64558970503345 -1.40491446070053 -0.37560989838866 -0.0937701862392961 -0.0716245532070942 0.869906275198932 0.729749348363606 0.413597758257759 -0.930501335638848 1.05429000104235 0.0201969328047722 2.3834031635612 -2.52417573691153 -0.465814429252627 -0.144347132424242 -1.04712435714474 -0.451362358238004 0.014958184949555 -0.759385650554015 1.65745382006057 -1.09383178275788 -0.565873100845544 -0.0435880452874448 -0.866411429660908 -0.733508122706103 -0.59282884690403 -0.0371319803975147 -0.446200313952913 -0.858598495370817 0.172107930694147 -0.707060405932601 1.26189097279611 -0.11959818761458 1.07128759793454 -0.932440033286508 1.04618162412644 0.116447006559482 -0.157434855198144 0.55214656357237 0.927414264897983 -0.0768188463450218 0.205322756270951 -0.375957241279385 -0.594370772365287 0.282511823568599 -1.35256581132782 1.77034359729814 0.289839595464636 -0.868175880123393 0.0279476452055629 -1.06465214047979 0.989584768002589 0.58221346247432 0.0279854605965633 -0.324630378332099 2.10925031717664 1.39536862883468 -0.533525101412616 -1.11037988429499 -0.130650375368322 -1.08001612355095 -0.782065788957377 1.7838508104007 0.511988272650942 -0.282171825806201 0.835261202562483 1.18073817130494 -1.91278335880614 -2.00253266326929 -1.17781071347854 -0.333692195867415 1.07947524757897 -0.245436964028106 -1.03875566153534 0.257743060925885 -1.24109196340202 -0.354817383332485 0.546567359538794 0.131084037401419 -1.35853382875522 -0.461947153636701 0.932423642520503 0.75960785101174 0.58596617259103 1.56712612835984 0.0274359079153776 0.131185739239656 1.46910834900271 1.7621746372229 -0.512876813440312 -1.08264712001209 1.16766950210891 0.89940631893428 -1.4139174137719 0.594486098949521 0.938604503920376 1.58761656387839 0.128279336019309 0.199692821966494 0.511755575418276 -1.34201692069597 -2.52165332816843 0.528046404932421 -0.0861895548650458 0.00541546019198625 -1.51336634546566 -0.661389302218007 -0.239214907874348 0.873085301521754 -0.528882503525814 -2.11860148771436 2.22445160387535 -0.95246192349158 -0.854775519030163 -0.215819550779801 -0.692718504695581 0.31552116793537 -0.562080355596697 -0.227814866409142 -0.586866201455416 -0.98705580188463 -0.556393870589509 1.91474616738284 -0.113691245020596 0.822899023849223 0.131420428740053 0.14715891458697 0.862060645215706 -1.04945417201033 1.831771537875 -0.791378427172167 0.478217829363303 -1.66045202592595 0.933597263705203 -0.519603167108773 -0.315681353177679 -0.488131125775522 -0.210416072634108 1.06609036016171 -1.07439427586297 0.944208344733929 -0.819572918881083 0.0330535056752975 -0.26494351292207 -0.592787998963482 0.561584683883958 -0.365233079427155 0.236990599205114 -1.25976054850535 -2.08011438333763 0.376931882673037 1.71472838980501 0.815212615553956 -1.22539412285644 2.37484251039407 0.624944287660518 -0.474643735234758 -0.284363427320347 1.80926363871601 1.35395714045962 0.937616898661155 0.511667465906112 -0.688559622084959 0.498244570476707 -1.60528467486341 1.54920391434404 -1.87515153702705 -0.579514462484677 -0.567361020793451 -0.0289761933252192 -0.0238985671952205 -1.38032095920779 -0.367772486616759 0.130255505076622 0.171666645597864 -0.73510452196882 0.712766679592407 0.360618956406876 1.98916767202743 0.220139484117829 0.773453319312642 0.0503342685859913 1.51140015394971 -1.33590622081629 0.407285961231503 -0.819337558421701 0.325275859108564 -0.195022359005199 1.08440507526934 -0.343352300389844 -0.543299001021429 1.42128316617743 0.562772341618128 -0.4750174112134 1.74926318387457 0.481991975943709 -1.71000100195711 0.226106131760666 0.275058375466388 1.52581450685596 -0.124815768835598 -1.59980833506925 0.0114221723371457 0.329229723194936 -0.024325609941291 0.951411493635783 1.10822604378538 -0.811302344059166 2.2340677362683 -0.827557720358614 0.049564018420691 0.719986634481025 -0.620080028300095 0.0158416460239107 -0.620662545108545 0.907608558628063 0.379025038808471 -1.65683403161365 -2.14340093311265 0.42260999102904 -1.00026041870565 -1.97071126333822 0.445308360454035 0.0361700283625755 -0.498179902888113 -0.535950375870042 0.573016592462512 -0.986662965420217 -0.0265253094837827 0.889773737696435 0.949169610234474 -0.0973246970992727 -0.350206897709821 -1.91703437885816 1.23817958212668 0.530070981600426 -1.53120486886401 -0.170185764172057 1.22783001067641 -0.141157463628088 -0.21266741039698 0.247540692553138 -0.0188245348292022 -0.161044517718641 -1.7291213667387 -1.11143983745594 1.46597169091234 -0.723884026674198 -0.422819692107464 -0.225232317154209 0.505610519517601 0.700592218268291 1.25834769331246 1.86728585662925 0.723322377302231 -1.1228349636521 -1.42812657984789 -0.211765048102664 0.53026583513985 1.6707582195901 -0.781273433320629 0.224844544386958 1.48330430963523 -1.16763228932902 -0.769703791232195 0.309428833996474 1.56894939670777 -2.10188762377264 -2.21048619935309 0.728276661793719 0.493604968920348 0.552222820816001 -0.443668592590116 0.704364177458338 1.56731549489969 0.491103274537662 -1.09579430548105 0.472372503876858 2.03845480660381 -0.181009813390562 0.762418240772816 -1.06079345526971 0.766146577194971 0.0156050440251391 0.785758032602851 0.828646643306637 0.147042806786104 0.0744024298700584 -0.0491744310547155 0.106440524141313 0.580484739799673 -1.69154784273998 -1.21099841331056 0.151575804959921 0.0758659159362105 -1.07920190570094 0.313369287704581 1.17565143442186 1.63728479508422 0.365435677791156 -0.748218762595375 -1.52533017535644 1.29111291027823 -0.292043876952895 0.400616928525069 0.992491422837479 -1.56575385468714 1.29944739893078 -1.02740771967947 0.025040067406717 -0.395981301320645 0.109035274046583 0.0370044424409956 0.263274073766743 0.586648586942006 0.0956671486256045 1.60792779087997 -1.16151383892177 -0.190519351459499 0.843981778911182 -0.0896879803618706 0.5774087945772 -0.360123460533733 -1.74619407940435 0.238571265367167 1.00935546197224 -0.5496327124821 1.14856574272848 -2.28293880347316 -0.513739451223072 1.29052219011063 -1.67541755275544 1.07445274731296 -0.45074240534235 -0.544094818316705 -0.167799762536009 0.892449352522628 -0.717088962910113 0.139792948750762 0.964399177688254 1.21205932742579 -0.0697038806593711 -1.43837949128919 -2.20384960260734 1.16773888412573 -1.28325472556144 -1.18211850564025 0.115483017761549 -0.444998952786489 0.217331902182186 0.447017212275409 -0.946641117749091 -0.289615790801901 1.00578615087787 -1.10690239409799 0.130379679540564 -0.723902622695853 2.71125876400198 -0.510406623826616 2.44494500273138 0.862861723061699 -2.28578660399299 -0.963876485806102 0.661330335817411 0.0212469556683074 1.10088117526079 0.109910784590142 -0.899950512072404 -1.34655309530362 1.0709297616759 1.06198058937237 -1.17559412690722 2.2073166878582 -0.041075585411734 -0.721836725216804 0.759258340671817 -0.389038768274165 -1.75008588980695 0.665169027204747 -0.539548230021963 1.16317129685469 -0.13400088359897 1.18685345956055 -1.45797995405286 1.7026256774816 -1.29114559429067 0.954110170944837 -0.600458596549606 -0.00968277916973262 -2.1384905340454 1.19568507913056 -0.447698948685753 -0.337412032404128 1.17321021179032 -0.114092788444259 -0.181477242088699 -0.292353603967991 1.87439010234372 0.565449632248901 0.152886365759381 -1.35388176786385 0.13117178968053 -0.458278047977947 -1.3852767147685 0.334770710353814 -1.77086258646464 -0.39107634522754 0.803495448031588 1.16267320546165 -0.0549975857957975 -0.873984022948517 0.154476273592659 -0.621749511893556 2.39161381922472 0.859052536852081 0.812189320075639 -0.53960888134547 -0.49671740316793 0.127988763313197 1.19684274951022 0.100685005414538 -0.615666630255076 0.788792365041842 0.959601471597288 0.627016548014726 -0.763281213214587 -0.98700275457255 1.14147586293625 1.16109041109107 -0.114202826765688 0.391953848291208 0.167977934394612 -0.940206721480835 -0.175304551331247 0.697349261980358 -1.02649449265875 -0.217148729532499 1.68901050910744 -0.141654163523152 0.809171787610401 -1.08607383914849 -0.801188406967855 0.16720592450629 -0.350865681631923 -0.917895531461107 -0.206107187772786 -0.436961192955983 0.876617341709378 -0.387746793280821 0.354951250501711 -0.00522899560095844 0.643914075713671 0.14540625305691 0.612782024679713 -0.0293146099639398 -1.15578977699618 0.201353070622463 0.388400626189123 1.13788806650211 0.659255584379128 -0.368737834159087 0.247076944704957 -0.0612130334123382 -1.43445584676867 1.68285386697309 -0.934267602526545 0.968457848178813 -0.535323560368194 0.027729366538298 -0.242233555649441 2.37914242019228 -0.182528287603172 -0.376374204616069 0.502132862137663 0.0645903385638131 0.940182151125562 -0.205417545695367 -1.33985044986203 -0.378501417484976 0.709434100109315 2.0046911115215 -0.858672987337654 0.555715028572038 0.851831394877137 0.341252686197789 -0.0615323141159435 -2.06807883735785 1.73063843237221 -0.88196490339917 0.345532372368412 0.626552786717882 -1.70217874360259 1.6314411592636 -0.349176036168805 -1.35913083043328 -2.22612252777881 -0.768075754966101 -0.139663408234392 -1.13775402116888 0.189269653730388 -0.336501146508718 -0.289975815787594 -0.922067994193458 -1.49481946448137 1.46972142927751 0.698648935641564 0.000583457021882669 -2.7517518603763 0.898937854527498 0.433229613906053 -0.597775172077665 -1.74917197255978 0.375254037800465 -1.06467414687086 1.25196554933177 0.0189646090938009 1.13278104844237 1.65341864296841 1.97288060040724 -0.504959459509291 2.54689420212398 0.334552044617657 0.512693346090476 -0.286488920622102 -1.10633124286374 1.13251731262112 0.841280772819972 0.663423423513778 -0.223638603361634 -0.127334051149731 0.256455713435152 0.374352875341823 -0.509843039804233 -0.860835756379342 -0.768514897913726 -0.303345551517536 -0.32523855287636 -0.473321845254918 -0.415989378191097 -1.02897024173103 0.797202455893466 -0.445118860891812 -0.0702809747360337 -0.207156757657237 -2.3683058176826 0.539460246891056 -0.378978044839125 1.58209938548487 -0.111543788858833 0.704488919075444 1.04534263752265 1.27033826124848 -0.0355390083777013 -0.291605364084569 -1.83992879848478 0.274950322544031 0.0598273078243544 -0.918423319345351 -1.78220747370792 -0.499952966112771 0.658572846672357 -1.94913878824641 -0.155818309191148 -1.93005517470788 -1.36290712558905 0.2154721693957 0.982314964109131 0.272607942325067 0.335564372632675 1.360950726725 0.234149361151699 1.31405292697128 0.177682700146794 -0.995911235965793 0.666325008752125 -0.337748711049016 -0.183861208878457 -0.0512065327858366 0.211281417006621 1.08641811935292 -0.0760885791729353 -2.73931017913467 -1.68276937351978 -0.456842866484533 0.503939141302444 -0.691987221759461 0.908434811632492 -0.21728337635697 -0.847200935375863 -1.1327792803934 0.394327898919325 0.828150720429935 -0.788299406480166 0.441399943574916 0.858192253129346 1.30259645238077 -0.994314429858059 3.09544236057513 0.923427068359658 1.50293139204403 0.00924970110021663 1.53303331366458 0.928902968343476 -1.20915305021753 -0.859318358487596 -0.261865386457305 0.428002257709775 0.0443581102403239 1.58093544939175 -0.376981363007556 -1.42693446235732 0.806298573941804 -0.207194692425568 0.934672947500472 -0.570997944360274 -1.50392552674275 0.295812295055273 1.9302230389232 0.838534268183803 -1.1233015052474 0.0842341562641195 -1.09051664947336 0.135862768515647 -0.312407012745611 -0.831351292884703 0.366953628698086 0.182696481415636 -2.06461677430844 0.624884030134004 0.574266057624186 1.13981548826214 0.471539228466964 -0.79672305960054 0.400485033394167 -0.74499842844881 -1.37267624860092 1.32874860399482 -0.609882593849525 0.244068760436808 0.329448268027178 -0.103964907828511 -0.424086527996151 1.04124571640513 -0.310664120068737 1.13685640653007 -1.36714847104564 0.375358585275056 0.50721217622463 0.056620368782447 0.183986515661672 0.388410850487659 -0.870646280350527 0.447663392217461 -0.585402154462524 -0.237547860184624 0.641680272365205 1.6710965118625 -0.165844957873649 1.62264706579069 0.978956448505443 0.0691526861952696 -1.08127465283414 0.381036671174231 -0.816964678687736 0.680590649614494 1.25141999497574 -1.98444873321986 0.567866940159157 -0.287698747381147 -0.944983146135433 0.11049364191316 -0.111801422822776 -0.623718215228028 -1.1755085992446 0.508777302311212 -1.00806827855711 -1.44176681617631 0.434647607383599 -0.103325062464092 -0.657112754578832 -2.42010880461453 0.605833348008538 -0.252396766471245 0.867535495408942 -0.628413971773783 0.677221847358896 0.215260791815941 0.435842040869112 0.139236277646567 -0.657235811898817 0.530883560917937 -0.405803538396054 2.10132429744529 -0.275494024580443 -0.420443747193904 -1.57348224851033 -1.19627093208479 -0.231094588760859 -1.44675001545592 -1.35406207991441 -0.546893187873362 -0.0852075900905077 -1.19578118156388 0.458848662651893 -0.203263640658134 1.44230218048884 0.852848104263355 -0.0120203570475303 -0.58526660384248 0.501559262527138 0.443306892424051 0.786637301876334 -0.633674534269895 1.34187600931204 0.576459466608725 0.205832281433539 0.912384135811153 0.434205920238832 0.520552651881324 -0.253015342746408 -1.0212222437588 -0.765218363251275 0.59652072325716 -2.08172459379471 1.19384922053451 0.221325940887873 1.03432903354371 0.142899423595829 -0.536864709715615 0.56360471489119 0.0656001407639621 -1.11908836938808 0.00731743208771916 -2.36586433828165 0.403802240701746 -1.37050444133425 0.0982491640494384 -1.5130111452289 0.64001359203558 -1.68336681071115 2.0895584936129 1.01204020400644 -1.04175524675824 -0.0548191771104542 1.96151672208768 0.737597266570336 2.33485392484953 -0.468881165002085 -1.80512077325629 0.738622684309353 1.48418914199645 -0.799225442943861 -0.932559649796436 -0.62193532269453 0.612158228058202 -0.229744122734053 -0.363557282476294 0.410241465770855 0.211752715992407 -1.36846123839106 -0.747388846450776 -0.808982608181504 -0.845529279916105 -0.852059039866614 -0.141174555900184 -0.818193787548086 0.401777634440809 -0.542687692454942 0.964318794424063 -0.256969722192059 -2.24301885677823 1.12446952476455 1.32211669078671 -2.08252815617407 -0.0191466861695599 -0.12968271086862 0.207833926774629 0.5272637033608 -0.393777856730463 -0.0461993286204503 -2.4223612441232 -0.718051374948013 1.11064574847451 -0.154651921202928 -0.68252403183271 -0.010871264100315 -0.848066270183063 -1.84545785464182 0.55367222753322 0.378534991193284 2.72432632958866 0.926174752866701 0.50966665684972 1.41159886953383 1.4715919152293 0.276946596635773 0.637640814081216 0.204211700541828 -1.04305973816251 -0.664660997803639 -0.917787930491888 -2.07610610632605 0.925004883058424 0.0335256126876598 2.00449005138325 0.409431499752198 -1.96013127442282 2.07989436197749 -0.555329040193275 -1.43936706593123 -1.62696391809849 1.02876325442171 1.31725095844424 0.601251752961596 -1.51028798674964 0.254504439266685 -0.836162410363533 0.330065219759059 -0.170250484686324 1.25352015539449 1.11199304368788 -0.782881328608773 0.562252908690649 -0.526938935184744 0.186286871390174 -0.0782701526213065 -0.0070217138094833 -0.320152430356213 -0.414522915839517 -0.0138974755740198 0.906989827447134 -1.15204393557538 -1.36758082605627 -1.18692859600468 -0.795532048955271 -0.491564128011038 -1.60774849608015 0.298438211723136 -0.191335967568503 -0.838066626638759 2.38662613703316 -0.971432085479462 -0.269795442389455 2.06276537673074 0.0985234737572823 -2.34707770515598 -2.62113144769286 -0.71517610846712 -0.9560074648956 0.346085633280752 -0.596771148066189 -0.38496024062841 0.175972243801468 1.9978580940141 0.952574980542333 -0.592320015309671 -0.387592707295108 1.22712280346178 1.10383702393971 -1.28339766846252 0.0349223729525343 0.564548518282168 -0.109532717827598 0.42291516171024 0.771556786449598 -0.372465535027576 0.441160995710412 -0.592887770571107 0.39566228272391 -0.508249935654314 -0.145144417123376 1.21899672550655 -0.538044395777591 -0.199899133181678 -0.0672956441257115 0.661984856573963 -1.42243564083506 0.0792709889951797 0.0309903396514604 1.3061909231812 1.97586177171144 -0.05135668933585 -0.402670878381301 1.12921433213126 0.646556819111846 -0.561033199434311 -0.306891938101535 0.0162614572131622 0.656838561660356 -1.23507452987701 -0.815667191517726 -0.0428294658165598 0.922768202154136 0.451927216716363 1.75893477195624 -0.972145167448965 0.588842640450563 -1.05224213102834 -0.0799615630626712 1.89877287721854 -0.682223572624342 -0.53047301184124 -0.221538570415508 -0.28787502929027 -0.0122225659964041 -2.03459472707834 0.842706803216555 -1.35933900700386 -0.857447698576143 -1.22677142227196 0.117530567097446 0.160157799300385 -0.571988421539916 -1.07841984437865 0.705653155824877 0.582409300000941 0.269236201658875 -3.41996831114589 0.880573438411197 -0.455955700175582 -0.231422741216475 0.748039606570566 0.202214787973097 -0.58991988685024 -0.774151517801782 -0.735107152578305 0.0268939996048441 -1.7580927545118 -0.781528232498291 -0.318426972723879 0.213274125219386 -2.27768212210004 -0.227284026392077 -0.391680139060389 -1.54013933970245 1.25233132942895 -0.170014272090409 -0.150241736338278 -0.188445561461027 -1.66265056021651 1.62381122531441 -0.569182634789907 0.265359471163746 -0.138174972999585 -0.93190284721052 -0.131981617655852 -0.898782003220197 -0.443315516349601 -0.312620668370782 -0.209876675161293 -0.799608183016101 -0.556633531764011 -0.0664610458873944 -0.351443836185406 0.38027915538049 0.470729479605383 1.60887363370584 -0.0894453077539464 0.711164900928847 0.200505521612412 1.21937755591515 0.312403229692763 -0.115607093843779 -0.805172516549137 -2.54353004139148 -1.92803074611771 -0.275810147523135 -0.265716294104657 0.233014131251657 0.930450370014162 -1.07536789147238 -2.0803333179205 -0.383190305424632 -0.373870643369332 -0.671178262682432 -0.839400471482972 0.63666907384848 0.263393882042935 0.831899283015475 0.467377016383012 -0.673878133343157 -1.50452029322574 -0.509117727910604 -1.46469668234259 0.232074436805787 1.15676116705352 0.365932086771744 -0.147763161941504 0.196816320518284 -0.864564966549994 -0.570966965015507 -1.76062879347203 -0.555413774492072 -0.442859260496076 -0.0719962908901556 0.873856292393014 -1.23750567436668 -0.210070424915498 0.368623837249635 0.0524534366844191 0.460213528872617 -0.93430346239631 1.88080692106283 -0.0718848170075341 0.18705555122265 0.111756125538701 0.0219185845430046 0.518458830177638 -0.349482395327223 -0.846948228567552 1.56084695749989 -0.819225049622458 1.09584577801949 0.353185062365713 -0.313342254018168 1.8122438601221 -0.576549243536531 1.64737114522893 -0.247126409580573 0.869238649501725 0.733824046330671 0.663752372706625 -0.0245072757232279 -0.226776433453497 0.441538988209017 -0.229366881449928 -1.55401951283395 -0.941633189282837 1.8443710244586 -0.520638089232427 0.880088697838298 -0.528107862163266 1.5428415281254 0.149750375511589 0.111027133031161 0.0505167752589381 0.149027102354529 1.22560820106896 0.399328318844232 -1.41269368137076 -1.83369999793051 -1.13484594955316 0.409375109514368 0.445738126508513 -0.454078471339966 0.0709936056251994 0.44512239643458 -0.369010599450626 -1.50371765071895 0.187551783772069 -1.65009930936279 0.243794759359437 -0.0875825025225137 -0.0234245342527157 -0.848838563532253 diff --git a/test/unit/math/laplace/skim_data/lambda.csv b/test/unit/math/laplace/skim_data/lambda.csv new file mode 100644 index 00000000000..302d267c5c5 --- /dev/null +++ b/test/unit/math/laplace/skim_data/lambda.csv @@ -0,0 +1 @@ +0.253351917008522 5.08028655377911 0.470554831187597 0.952876185999496 0.0381730026879363 1.16982668833308 0.248606271959933 0.426812033146948 4.0616454775278 0.474643625210567 1.41940407084377 1.14918584370374 3.83477605936498 2.70562135901228 1.00487481465585 0.819062083664094 1.82285069954345 0.886241109081898 0.017020501612509 0.344370867000578 0.501346359800498 9.64201254492636 1.05983278255086 0.963626471569915 1.37666055508732 0.550049927860912 3.04860114077779 1.25049007314262 1.07120470874354 25.9827769012597 0.686507069575709 6.64163748142612 0.336905460589031 1.73100398501967 0.404252663667494 2.03281367076391 0.093759442644535 0.0783980165633819 0.163088563819333 0.130259518761584 1.09869113994516 0.771490025487267 0.633540013767062 0.524321814979983 0.858498656217546 0.0418192259418327 1.49617174459307 0.526464614186384 0.259853863191551 4.33486504106047 3.79342484635084 17.2842563539866 1.77800576036763 3.2420056515655 0.652771373377247 2.4335137493504 2.20058198966286 0.354249154473303 2.96778523322172 0.991074809610352 4.3228964459156 0.160597199856191 0.345933086404611 1.25317833513144 7.51366926337177 122.767062773473 1.2641813525563 0.750982245758845 2.51905838813804 0.394298275868825 0.848913539820307 5.50673145496987 0.618490682941814 0.0239608707263848 2.91255430003983 10.7239294915719 0.65281597067057 0.16806517670028 0.7802324050705 2.53646153276248 1.88947616595387 5.94987882005797 0.1822947210149 0.32296470642041 2.10116605147111 0.652940121841609 0.238875132762393 1.46228145776141 1.27750667653423 0.459620144380175 1.06680051708528 2.06077113215333 0.725277191548322 0.26250693881502 0.472683409525948 0.448915483867334 3.02614246712959 0.0133987364480264 1.22164081943419 1.11351346348172 0.8412715813494 1.42465590957516 0.446140772207973 0.779777425619714 1.81321949352775 0.666021868933562 0.185601748341941 0.445118008997472 0.576232751352688 1.10477200721566 0.18694437633578 0.795920731118584 0.197932156498638 0.523587078076778 1.46817674208106 0.540112037218709 0.434459180927525 4.00990516464597 0.747609688660345 6.22899742983592 1.8698916206957 0.234238950211591 1.70097015771922 0.977326903985942 0.69944694669086 0.312477894967422 0.218140081993534 10.9300762099385 0.468199471272976 62.1790274530438 0.688100375569718 0.700661208667674 0.278241620548263 28.2112748054162 0.433343654418115 0.44682765940688 1.21992534654594 0.846247959978655 0.0686693684846319 1.93342078149047 0.920806485456274 1.16136899712271 0.734454763981076 0.70412053513296 0.426728781573929 2.2566950228572 0.569475633426559 0.106619960397479 7.06567759109791 0.93688540916471 0.228752083058376 0.484271487128555 3.54020143716896 1.69115142691062 0.0309180619115363 1.20112782170397 0.261697480378214 0.112335423385942 1.09316577655215 7.85742697544779 4.32897479814329 1.1976503438472 0.0619441211683711 13.4080600221768 1.80215136745973 0.410063734822417 3.08932196940621 2.28148107343458 0.00599943421184098 50.2413322726076 1.84622224770351 0.596536912838238 0.159468026413487 1.06517943625862 0.0422554176259049 1.94847564226453 6.08077208442287 0.0272684679832843 0.846878907070055 0.588189226610945 4.71743170122685 0.169693937375684 0.999457022983371 8.69848927027348 0.848320633835168 0.683886832239808 0.298125484593825 0.554169294211133 0.722169318169745 0.627635123943081 0.407245583639018 2.89284783750101 1.16394698572387 0.316584861239233 4.50595220036843 0.205386028209901 0.539978920302429 1.05427610863193 0.857542638859003 16.4679728502088 diff --git a/test/unit/math/laplace/skim_data/y.csv b/test/unit/math/laplace/skim_data/y.csv new file mode 100644 index 00000000000..a72a842036f --- /dev/null +++ b/test/unit/math/laplace/skim_data/y.csv @@ -0,0 +1 @@ +1 1 0 0 0 1 1 1 1 0 1 0 1 1 0 1 1 1 0 0 1 1 1 1 1 1 1 0 1 0 0 1 1 0 0 1 0 0 1 1 0 0 1 0 0 0 0 1 0 0 0 0 0 1 1 0 1 1 0 1 0 1 0 1 0 1 1 1 1 1 0 1 1 1 1 1 1 0 0 1 0 1 1 1 1 1 0 0 1 1 1 0 1 1 0 0 1 0 0 1 From c353fa4b4bdfce477725dffbc16547c2d56e700c Mon Sep 17 00:00:00 2001 From: Charles Margossian Date: Wed, 4 Mar 2020 11:30:44 -0500 Subject: [PATCH 02/53] Include laplace.hpp in header files. --- stan/math/rev.hpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/stan/math/rev.hpp b/stan/math/rev.hpp index 3b56ffe3abc..ca1981c8d74 100644 --- a/stan/math/rev.hpp +++ b/stan/math/rev.hpp @@ -11,4 +11,6 @@ #include #include +#include + #endif From ffec2a61bfd6874e2cf4d6619824b523a388feba Mon Sep 17 00:00:00 2001 From: Charles Margossian Date: Sun, 29 Mar 2020 13:23:49 -0400 Subject: [PATCH 03/53] Add rng function for bernoulli logit function. --- stan/math/laplace/laplace.hpp | 1 + .../prob/laplace_approx_bernoulli_rng.hpp | 44 ++++++ .../prob/laplace_approx_poisson_rng.hpp | 11 +- stan/math/laplace/prob/laplace_approx_rng.hpp | 20 +-- .../laplace_approx_bernoulli_rng_test.cpp | 146 ++++++++++++++++++ 5 files changed, 203 insertions(+), 19 deletions(-) create mode 100644 stan/math/laplace/prob/laplace_approx_bernoulli_rng.hpp create mode 100644 test/unit/math/laplace/laplace_approx_bernoulli_rng_test.cpp diff --git a/stan/math/laplace/laplace.hpp b/stan/math/laplace/laplace.hpp index 3aa0a88d9e0..86a2693b7d3 100644 --- a/stan/math/laplace/laplace.hpp +++ b/stan/math/laplace/laplace.hpp @@ -4,5 +4,6 @@ #include #include #include +#include #endif diff --git a/stan/math/laplace/prob/laplace_approx_bernoulli_rng.hpp b/stan/math/laplace/prob/laplace_approx_bernoulli_rng.hpp new file mode 100644 index 00000000000..53be5084565 --- /dev/null +++ b/stan/math/laplace/prob/laplace_approx_bernoulli_rng.hpp @@ -0,0 +1,44 @@ +#ifndef STAN_MATH_LAPLACE_LAPLACE_APPROX_BERNOULLI_RNG_HPP +#define STAN_MATH_LAPLACE_LAPLACE_APPROX_BERNOULLI_RNG_HPP + +#include + +namespace stan { +namespace math { + +/** + * In a latent gaussian model, + * + * theta ~ Normal(theta | 0, Sigma(phi)) + * y ~ pi(y | theta) + * + * return a multivariate normal random variate sampled + * from the gaussian approximation of p(theta | y, phi), + * where the likelihood is a Bernoulli with logit link. + */ +template +inline Eigen::VectorXd // CHECK -- right return type + laplace_approx_bernoulli_rng + (const std::vector& y, + const std::vector& n_samples, + const K& covariance_function, + const Eigen::Matrix& phi, + // const std::vector& x, + const Eigen::MatrixXd& x, + const std::vector& delta, + const std::vector& delta_int, + const Eigen::Matrix& theta_0, + RNG& rng, + std::ostream* msgs = nullptr, + double tolerance = 1e-6, + long int max_num_steps = 100) { + return + laplace_approx_rng(diff_logistic_log(to_vector(n_samples), to_vector(y)), + covariance_function, phi, x, delta, delta_int, theta_0, + rng, msgs, tolerance, max_num_steps); + } + +} // namespace math +} // namespace stan + +#endif diff --git a/stan/math/laplace/prob/laplace_approx_poisson_rng.hpp b/stan/math/laplace/prob/laplace_approx_poisson_rng.hpp index 1dcf6126fcb..093e6062be2 100644 --- a/stan/math/laplace/prob/laplace_approx_poisson_rng.hpp +++ b/stan/math/laplace/prob/laplace_approx_poisson_rng.hpp @@ -1,5 +1,5 @@ -#ifndef STAN_MATH_LAPLACE_LAPLACE_APPROX_RNG_HPP -#define STAN_MATH_LAPLACE_LAPLACE_APPROX_RNG_HPP +#ifndef STAN_MATH_LAPLACE_LAPLACE_APPROX_POISSON_RNG_HPP +#define STAN_MATH_LAPLACE_LAPLACE_APPROX_POISSON_RNG_HPP #include @@ -8,12 +8,13 @@ namespace math { /** * In a latent gaussian model, - * + * * theta ~ Normal(theta | 0, Sigma(phi)) * y ~ pi(y | theta) - * + * * return a multivariate normal random variate sampled - * from the gaussian approximation of p(theta | y, phi). + * from the gaussian approximation of p(theta | y, phi) + * where the likelihood is a Poisson with a log link. */ template inline Eigen::VectorXd // CHECK -- right return type diff --git a/stan/math/laplace/prob/laplace_approx_rng.hpp b/stan/math/laplace/prob/laplace_approx_rng.hpp index 4f05015685e..bf60b35eea9 100644 --- a/stan/math/laplace/prob/laplace_approx_rng.hpp +++ b/stan/math/laplace/prob/laplace_approx_rng.hpp @@ -17,16 +17,18 @@ namespace math { * return a multivariate normal random variate sampled * from the gaussian approximation of p(theta | y, phi). */ -template +template inline Eigen::VectorXd // CHECK -- right return type laplace_approx_rng (const D& diff_likelihood, const K& covariance_function, - const Eigen::Matrix& phi, - const std::vector& x, + const Eigen::Matrix& phi, + const T_x& x, + // const std::vector& x, const std::vector& delta, const std::vector& delta_int, - const Eigen::Matrix& theta_0, + const Eigen::Matrix& theta_0, RNG& rng, std::ostream* msgs = nullptr, double tolerance = 1e-6, @@ -55,16 +57,6 @@ laplace_approx_rng theta, diag_matrix(square(W_root_inv)) - V_dec.transpose() * V_dec, rng); - - // CHECK -- which method to use? Both seem equivalent. - // R&W method - // Eigen::MatrixXd V; - // V = mdivide_left_tri(L, - // diag_pre_multiply(W_root, covariance)); - // return multi_normal_rng( - // theta, - // covariance - V.transpose() * V, - // rng); } } // namespace math diff --git a/test/unit/math/laplace/laplace_approx_bernoulli_rng_test.cpp b/test/unit/math/laplace/laplace_approx_bernoulli_rng_test.cpp new file mode 100644 index 00000000000..bb5c7a14638 --- /dev/null +++ b/test/unit/math/laplace/laplace_approx_bernoulli_rng_test.cpp @@ -0,0 +1,146 @@ +#include +#include + +#include +#include + +#include +#include +#include +#include +#include + +struct stationary_point { + template + inline Eigen::Matrix::type, + Eigen::Dynamic, 1> + operator() (const Eigen::Matrix& theta, + const Eigen::Matrix& parms, + const std::vector& dat, + const std::vector& dat_int, + std::ostream* pstream__ = 0) const { + Eigen::Matrix::type, + Eigen::Dynamic, 1> z(2); + z(0) = 1 - exp(theta(0)) - theta(0) / (parms(0) * parms(0)); + z(1) = - exp(theta(1)) - theta(1) / (parms(1) * parms(1)); + return z; + } +}; + +struct diagonal_kernel_functor { + template + Eigen::Matrix + operator() (const Eigen::Matrix& phi, + const T2& x, + const std::vector& delta, + const std::vector& delta_int, + std::ostream* msgs = nullptr) const { + Eigen::Matrix K(2, 2); + K(0, 0) = phi(0) * phi(0); + K(1, 1) = phi(1) * phi(1); + K(0, 1) = 0; + K(1, 0) = 0; + return K; + } +}; + +TEST(laplace, basic_rng) { + // make sure the right covariance function is computed + // and compare results. + using stan::math::laplace_approx_rng; + using stan::math::laplace_approx_poisson_rng; + using stan::math::laplace_approx_bernoulli_rng; + using stan::math::diff_poisson_log; + + using stan::math::algebra_solver; + using stan::math::to_vector; + using stan::math::diag_matrix; + using stan::math::value_of; + using stan::math::mdivide_left_tri; + using stan::math::diag_pre_multiply; + using stan::math::inv; + using stan::math::square; + + + Eigen::VectorXd theta_0(2); + theta_0 << 1, 1; + Eigen::VectorXd sigma(2); + sigma << 3, 2; + std::vector n_samples = {1, 1}; + std::vector sums = {1, 0}; + + diff_poisson_log diff_likelihood(to_vector(n_samples), + to_vector(sums)); + std::vector d0; + std::vector di0; + + + // Method 1: brute force and straightforward + Eigen::VectorXd theta_root + = algebra_solver(stationary_point(), + theta_0, sigma, d0, di0); + + Eigen::VectorXd gradient, W; + diff_likelihood.diff(theta_root, gradient, W); + W = -W; + diagonal_kernel_functor covariance_function; + std::vector x_dummy; + Eigen::MatrixXd x_dummay_mat; + Eigen::MatrixXd K = covariance_function(sigma, x_dummy, d0, di0, 0); + + std::cout << "K (brute force): " + << std::endl + << (K.inverse() + diag_matrix(W)).inverse() + << std::endl << std::endl; + + // Method 2: Vectorized R&W method + double tolerance = 1e-6; + int max_num_steps = 100; + + // First find the mode using the custom Newton step + Eigen::MatrixXd covariance; + Eigen::VectorXd theta; + Eigen::VectorXd W_root; + Eigen::MatrixXd L; + { + Eigen::VectorXd a; + Eigen::VectorXd l_grad; + double marginal_density + = laplace_marginal_density(diff_likelihood, + covariance_function, + sigma, x_dummy, d0, di0, + covariance, theta, W_root, L, a, l_grad, + value_of(theta_0), 0, + tolerance, max_num_steps); + } + + Eigen::MatrixXd V; + V = mdivide_left_tri(L, + diag_pre_multiply(W_root, covariance)); + std::cout << "K (method 1): " << std::endl + << covariance - V.transpose() * V << std::endl + << std::endl; + + // Method 3: Modified R&W method + Eigen::VectorXd W_root_inv = inv(W_root); + Eigen::MatrixXd V_dec = mdivide_left_tri(L, + diag_matrix(W_root_inv)); + std::cout << "K (method 2): " << std::endl + << - V_dec.transpose() * V_dec + diag_matrix(square(W_root_inv)) + << std::endl << std::endl; + + // Check calls to rng functions compile + boost::random::mt19937 rng; + Eigen::MatrixXd theta_pred + = laplace_approx_rng(diff_likelihood, covariance_function, + sigma, x_dummy, d0, di0, theta_0, + rng); + + theta_pred + = laplace_approx_poisson_rng(sums, n_samples, covariance_function, + sigma, x_dummy, d0, di0, theta_0, rng); + + theta_pred + = laplace_approx_bernoulli_rng(sums, n_samples, covariance_function, + sigma, x_dummay_mat, d0, di0, theta_0, rng); +} From 0e15cd92b2989fc7f2477edf6e24df88d37a3658 Mon Sep 17 00:00:00 2001 From: Charles Margossian Date: Sun, 29 Mar 2020 13:28:37 -0400 Subject: [PATCH 04/53] Template x argument. --- .../math/laplace/prob/laplace_approx_bernoulli_rng.hpp | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/stan/math/laplace/prob/laplace_approx_bernoulli_rng.hpp b/stan/math/laplace/prob/laplace_approx_bernoulli_rng.hpp index 53be5084565..251f7941646 100644 --- a/stan/math/laplace/prob/laplace_approx_bernoulli_rng.hpp +++ b/stan/math/laplace/prob/laplace_approx_bernoulli_rng.hpp @@ -16,18 +16,18 @@ namespace math { * from the gaussian approximation of p(theta | y, phi), * where the likelihood is a Bernoulli with logit link. */ -template +template inline Eigen::VectorXd // CHECK -- right return type laplace_approx_bernoulli_rng (const std::vector& y, const std::vector& n_samples, const K& covariance_function, - const Eigen::Matrix& phi, - // const std::vector& x, - const Eigen::MatrixXd& x, + const Eigen::Matrix& phi, + const T_x x, const std::vector& delta, const std::vector& delta_int, - const Eigen::Matrix& theta_0, + const Eigen::Matrix& theta_0, RNG& rng, std::ostream* msgs = nullptr, double tolerance = 1e-6, From f05427825b9eea73db1f0a5ad76ec5256943ac53 Mon Sep 17 00:00:00 2001 From: Charles Margossian Date: Fri, 17 Jul 2020 15:56:07 -0400 Subject: [PATCH 05/53] update name laplace_marginal_poisson_log --- .../math/laplace/laplace_marginal_poisson.hpp | 6 ++-- .../laplace/laplace_marginal_poisson_test.cpp | 34 +++++++++---------- 2 files changed, 20 insertions(+), 20 deletions(-) diff --git a/stan/math/laplace/laplace_marginal_poisson.hpp b/stan/math/laplace/laplace_marginal_poisson.hpp index 081d9efcc44..0683aa43068 100644 --- a/stan/math/laplace/laplace_marginal_poisson.hpp +++ b/stan/math/laplace/laplace_marginal_poisson.hpp @@ -36,7 +36,7 @@ namespace math { * breaks and returns an error. */ template - T1 laplace_marginal_poisson + T1 laplace_marginal_poisson_log (const std::vector& y, const std::vector& n_samples, const K& covariance_function, @@ -53,9 +53,9 @@ namespace math { covariance_function, phi, x, delta, delta_int, theta_0, msgs, tolerance, max_num_steps); } - + template - T1 laplace_marginal_poisson + T1 laplace_marginal_poisson_log (const std::vector& y, const std::vector& n_samples, const Eigen::VectorXd& ye, diff --git a/test/unit/math/laplace/laplace_marginal_poisson_test.cpp b/test/unit/math/laplace/laplace_marginal_poisson_test.cpp index 21d7c6094bf..412eb3c654c 100644 --- a/test/unit/math/laplace/laplace_marginal_poisson_test.cpp +++ b/test/unit/math/laplace/laplace_marginal_poisson_test.cpp @@ -67,7 +67,7 @@ TEST(laplace, likelihood_differentiation2) { } TEST(laplace, poisson_lgm_dim2) { - using stan::math::laplace_marginal_poisson; + using stan::math::laplace_marginal_poisson_log; using stan::math::var; using stan::math::to_vector; using stan::math::value_of; @@ -96,21 +96,21 @@ TEST(laplace, poisson_lgm_dim2) { std::vector sums = {1, 0}; squared_kernel_functor K; - var target = laplace_marginal_poisson(sums, n_samples, K, phi, x, delta, - delta_int, theta_0); + var target = laplace_marginal_poisson_log(sums, n_samples, K, phi, x, delta, + delta_int, theta_0); // Test with exposure argument - // Eigen::VectorXd exposure(2); - // exposure << 1, 1; - // var target = laplace_marginal_poisson(theta_0, phi, x, n_samples, sums, - // exposure); + Eigen::VectorXd ye(2); + ye << 1, 1; + target = laplace_marginal_poisson_log(sums, n_samples, ye, K, phi, x, delta, + delta_int, theta_0); // How to test this? The best way would be to generate a few // benchmarks using gpstuff. VEC g; AVEC parm_vec = createAVEC(phi(0), phi(1)); target.grad(parm_vec, g); -/* + // finite diff test double diff = 1e-7; Eigen::VectorXd phi_dbl = value_of(phi); @@ -121,14 +121,14 @@ TEST(laplace, poisson_lgm_dim2) { phi_2l(1) -= diff; phi_2u(1) += diff; - double target_1u = laplace_marginal_poisson(sums, n_samples, phi_1u, x, - delta, delta_int, theta_0), - target_1l = laplace_marginal_poisson(sums, n_samples, phi_1l, x, - delta, delta_int, theta_0), - target_2u = laplace_marginal_poisson(sums, n_samples, phi_2u, x, - delta, delta_int, theta_0), - target_2l = laplace_marginal_poisson(sums, n_samples, phi_2l, x, - delta, delta_int, theta_0); + double target_1u = laplace_marginal_poisson_log(sums, n_samples, K, phi_1u, x, + delta, delta_int, theta_0), + target_1l = laplace_marginal_poisson_log(sums, n_samples, K, phi_1l, x, + delta, delta_int, theta_0), + target_2u = laplace_marginal_poisson_log(sums, n_samples, K, phi_2u, x, + delta, delta_int, theta_0), + target_2l = laplace_marginal_poisson_log(sums, n_samples, K, phi_2l, x, + delta, delta_int, theta_0); VEC g_finite(dim_phi); g_finite[0] = (target_1u - target_1l) / (2 * diff); @@ -136,5 +136,5 @@ TEST(laplace, poisson_lgm_dim2) { double tol = 1.1e-4; EXPECT_NEAR(g_finite[0], g[0], tol); - EXPECT_NEAR(g_finite[1], g[1], tol); */ + EXPECT_NEAR(g_finite[1], g[1], tol); } From d90a5ed85f9b82d8a3f338fd817c461aea9d7ba7 Mon Sep 17 00:00:00 2001 From: Charles Margossian Date: Fri, 17 Jul 2020 16:08:25 -0400 Subject: [PATCH 06/53] update name of laplace_rng. --- .../laplace/prob/laplace_approx_poisson_rng.hpp | 8 ++++---- stan/math/laplace/prob/laplace_approx_rng.hpp | 2 +- .../laplace/laplace_approx_poisson_rng_test.cpp | 14 +++++++------- 3 files changed, 12 insertions(+), 12 deletions(-) diff --git a/stan/math/laplace/prob/laplace_approx_poisson_rng.hpp b/stan/math/laplace/prob/laplace_approx_poisson_rng.hpp index 093e6062be2..6cfe1e52eca 100644 --- a/stan/math/laplace/prob/laplace_approx_poisson_rng.hpp +++ b/stan/math/laplace/prob/laplace_approx_poisson_rng.hpp @@ -18,7 +18,7 @@ namespace math { */ template inline Eigen::VectorXd // CHECK -- right return type - laplace_approx_poisson_rng + laplace_poisson_log_rng (const std::vector& y, const std::vector& n_samples, const K& covariance_function, @@ -32,7 +32,7 @@ inline Eigen::VectorXd // CHECK -- right return type double tolerance = 1e-6, long int max_num_steps = 100) { return - laplace_approx_rng(diff_poisson_log(to_vector(n_samples), to_vector(y)), + laplace_rng(diff_poisson_log(to_vector(n_samples), to_vector(y)), covariance_function, phi, x, delta, delta_int, theta_0, rng, msgs, tolerance, max_num_steps); } @@ -42,7 +42,7 @@ inline Eigen::VectorXd // CHECK -- right return type */ template inline Eigen::VectorXd // CHECK -- right return type - laplace_approx_poisson_rng + laplace_poisson_log_rng (const std::vector& y, const std::vector& n_samples, const Eigen::VectorXd& exposure, @@ -57,7 +57,7 @@ inline Eigen::VectorXd // CHECK -- right return type double tolerance = 1e-6, long int max_num_steps = 100) { return - laplace_approx_rng(diff_poisson_log(to_vector(n_samples), to_vector(y), + laplace_rng(diff_poisson_log(to_vector(n_samples), to_vector(y), log(exposure)), covariance_function, phi, x, delta, delta_int, theta_0, rng, msgs, tolerance, max_num_steps); diff --git a/stan/math/laplace/prob/laplace_approx_rng.hpp b/stan/math/laplace/prob/laplace_approx_rng.hpp index bf60b35eea9..79450ddaad8 100644 --- a/stan/math/laplace/prob/laplace_approx_rng.hpp +++ b/stan/math/laplace/prob/laplace_approx_rng.hpp @@ -20,7 +20,7 @@ namespace math { template inline Eigen::VectorXd // CHECK -- right return type -laplace_approx_rng +laplace_rng (const D& diff_likelihood, const K& covariance_function, const Eigen::Matrix& phi, diff --git a/test/unit/math/laplace/laplace_approx_poisson_rng_test.cpp b/test/unit/math/laplace/laplace_approx_poisson_rng_test.cpp index 91b556451cc..4d60a709407 100644 --- a/test/unit/math/laplace/laplace_approx_poisson_rng_test.cpp +++ b/test/unit/math/laplace/laplace_approx_poisson_rng_test.cpp @@ -1,5 +1,6 @@ #include #include +#include #include #include @@ -49,7 +50,8 @@ TEST(laplace, basic_rng) { using stan::math::diff_poisson_log; using stan::math::to_vector; using stan::math::diag_matrix; - using stan::math::laplace_approx_rng; + using stan::math::laplace_rng; + using stan::math::laplace_poisson_log_rng; using stan::math::value_of; using stan::math::mdivide_left_tri; using stan::math::diag_pre_multiply; @@ -127,11 +129,9 @@ TEST(laplace, basic_rng) { // Call to rng function boost::random::mt19937 rng; Eigen::MatrixXd theta_pred - = laplace_approx_rng(diff_likelihood, covariance_function, - sigma, x_dummy, d0, di0, theta_0, - rng); + = laplace_rng(diff_likelihood, covariance_function, + sigma, x_dummy, d0, di0, theta_0, rng); - // = laplace_approx_rng(theta_0, sigma, x_dummy, - // diff_likelihood, covariance_function, - // rng); + theta_pred = laplace_poisson_log_rng(sums, n_samples, covariance_function, + sigma, x_dummy, d0, di0, theta_0, rng); } From a30f162823357dbf06a648992696f4978b4b6314 Mon Sep 17 00:00:00 2001 From: Charles Margossian Date: Fri, 17 Jul 2020 16:23:53 -0400 Subject: [PATCH 07/53] rename laplace bernoulli functions. --- .../laplace/laplace_marginal_bernoulli.hpp | 9 +++++---- .../prob/laplace_approx_bernoulli_rng.hpp | 8 ++++---- .../laplace_approx_bernoulli_rng_test.cpp | 17 +++++++++-------- .../laplace_marginal_bernoulli_test.cpp | 19 ++++++++++--------- 4 files changed, 28 insertions(+), 25 deletions(-) diff --git a/stan/math/laplace/laplace_marginal_bernoulli.hpp b/stan/math/laplace/laplace_marginal_bernoulli.hpp index 1c606be68c0..d16b2724091 100644 --- a/stan/math/laplace/laplace_marginal_bernoulli.hpp +++ b/stan/math/laplace/laplace_marginal_bernoulli.hpp @@ -1,4 +1,4 @@ -#ifndef STAN_MATH_LAPLACE_LAPLACE_MARGINAL_BERNOULLI_HPP + #ifndef STAN_MATH_LAPLACE_LAPLACE_MARGINAL_BERNOULLI_HPP #define STAN_MATH_LAPLACE_LAPLACE_MARGINAL_BERNOULLI_HPP #include @@ -32,8 +32,9 @@ namespace math { * @param[in] max_num_steps maximum number of steps before the Newton solver * breaks and returns an error. */ + // TODO: deprecate the below function. No default functor. template - T1 laplace_marginal_bernoulli + T1 laplace_marginal_bernoulli_logit (const std::vector& y, const std::vector& n_samples, // const K& covariance function, @@ -54,7 +55,7 @@ namespace math { // Add signature that takes in a Kernel functor specified by the user. template - T1 laplace_marginal_bernoulli + T1 laplace_marginal_bernoulli_logit (const std::vector& y, const std::vector& n_samples, const K& covariance_function, @@ -75,7 +76,7 @@ namespace math { // Add signature that takes x as a matrix instead of a vector. template - T1 laplace_marginal_bernoulli + T1 laplace_marginal_bernoulli_logit (const std::vector& y, const std::vector& n_samples, const K& covariance_function, diff --git a/stan/math/laplace/prob/laplace_approx_bernoulli_rng.hpp b/stan/math/laplace/prob/laplace_approx_bernoulli_rng.hpp index 251f7941646..14b81638acc 100644 --- a/stan/math/laplace/prob/laplace_approx_bernoulli_rng.hpp +++ b/stan/math/laplace/prob/laplace_approx_bernoulli_rng.hpp @@ -19,7 +19,7 @@ namespace math { template inline Eigen::VectorXd // CHECK -- right return type - laplace_approx_bernoulli_rng + laplace_bernoulli_logit_rng (const std::vector& y, const std::vector& n_samples, const K& covariance_function, @@ -33,9 +33,9 @@ inline Eigen::VectorXd // CHECK -- right return type double tolerance = 1e-6, long int max_num_steps = 100) { return - laplace_approx_rng(diff_logistic_log(to_vector(n_samples), to_vector(y)), - covariance_function, phi, x, delta, delta_int, theta_0, - rng, msgs, tolerance, max_num_steps); + laplace_rng(diff_logistic_log(to_vector(n_samples), to_vector(y)), + covariance_function, phi, x, delta, delta_int, theta_0, + rng, msgs, tolerance, max_num_steps); } } // namespace math diff --git a/test/unit/math/laplace/laplace_approx_bernoulli_rng_test.cpp b/test/unit/math/laplace/laplace_approx_bernoulli_rng_test.cpp index bb5c7a14638..0e6532ed3e1 100644 --- a/test/unit/math/laplace/laplace_approx_bernoulli_rng_test.cpp +++ b/test/unit/math/laplace/laplace_approx_bernoulli_rng_test.cpp @@ -47,9 +47,9 @@ struct diagonal_kernel_functor { TEST(laplace, basic_rng) { // make sure the right covariance function is computed // and compare results. - using stan::math::laplace_approx_rng; - using stan::math::laplace_approx_poisson_rng; - using stan::math::laplace_approx_bernoulli_rng; + using stan::math::laplace_rng; + using stan::math::laplace_poisson_log_rng; + using stan::math::laplace_bernoulli_logit_rng; using stan::math::diff_poisson_log; using stan::math::algebra_solver; @@ -132,15 +132,16 @@ TEST(laplace, basic_rng) { // Check calls to rng functions compile boost::random::mt19937 rng; Eigen::MatrixXd theta_pred - = laplace_approx_rng(diff_likelihood, covariance_function, + = laplace_rng(diff_likelihood, covariance_function, sigma, x_dummy, d0, di0, theta_0, rng); theta_pred - = laplace_approx_poisson_rng(sums, n_samples, covariance_function, - sigma, x_dummy, d0, di0, theta_0, rng); + = laplace_bernoulli_logit_rng(sums, n_samples, covariance_function, + sigma, x_dummay_mat, d0, di0, theta_0, rng); + // Bonus: make the distribution with a poisson rng also runs. theta_pred - = laplace_approx_bernoulli_rng(sums, n_samples, covariance_function, - sigma, x_dummay_mat, d0, di0, theta_0, rng); + = laplace_poisson_log_rng(sums, n_samples, covariance_function, + sigma, x_dummy, d0, di0, theta_0, rng); } diff --git a/test/unit/math/laplace/laplace_marginal_bernoulli_test.cpp b/test/unit/math/laplace/laplace_marginal_bernoulli_test.cpp index 952ff3c05ac..a6caee7ed43 100755 --- a/test/unit/math/laplace/laplace_marginal_bernoulli_test.cpp +++ b/test/unit/math/laplace/laplace_marginal_bernoulli_test.cpp @@ -1,8 +1,9 @@ #include #include #include +#include -#include +// #include #include #include @@ -166,21 +167,21 @@ TEST(laplace, logistic_lgm_dim500) { // TO DO -- get total time from GPStuff and do more comparisons. // CASE 3: use wrapper function and compare result. - using stan::math::laplace_marginal_bernoulli; + using stan::math::laplace_marginal_bernoulli_logit; using stan::math::value_of; double marginal_density_v2 - = laplace_marginal_bernoulli(y, n_samples, - phi, x, delta, delta_int, - theta_0, 0, 1e-3, 100); + = laplace_marginal_bernoulli_logit(y, n_samples, + phi, x, delta, delta_int, + theta_0, 0, 1e-3, 100); EXPECT_FLOAT_EQ(marginal_density, marginal_density_v2); marginal_density_v2 - = laplace_marginal_bernoulli(y, n_samples, - sqr_exp_kernel_functor(), - phi, x, delta, delta_int, - theta_0, 0, 1e-3, 100); + = laplace_marginal_bernoulli_logit(y, n_samples, + sqr_exp_kernel_functor(), + phi, x, delta, delta_int, + theta_0, 0, 1e-3, 100); EXPECT_FLOAT_EQ(marginal_density, marginal_density_v2); } From ac4de599dd45c20f604ac7a2406de2f5ec5e5ed8 Mon Sep 17 00:00:00 2001 From: Charles Margossian Date: Fri, 17 Jul 2020 16:41:11 -0400 Subject: [PATCH 08/53] Update file names and header includes. --- stan/math/laplace/laplace.hpp | 8 ++++---- ...bernoulli.hpp => laplace_marginal_bernoulli_logit.hpp} | 0 ...ginal_poisson.hpp => laplace_marginal_poisson_log.hpp} | 0 ..._bernoulli_rng.hpp => laplace_bernoulli_logit_rng.hpp} | 2 +- ...approx_poisson_rng.hpp => laplace_poisson_log_rng.hpp} | 2 +- .../prob/{laplace_approx_rng.hpp => laplace_rng.hpp} | 0 ..._rng_test.cpp => laplace_bernoulli_logit_rng_test.cpp} | 0 ...test.cpp => laplace_marginal_bernoulli_logit_test.cpp} | 2 +- ...son_test.cpp => laplace_marginal_poisson_log_test.cpp} | 2 +- ...sson_rng_test.cpp => laplace_poisson_log_rng_test.cpp} | 4 ++-- 10 files changed, 10 insertions(+), 10 deletions(-) rename stan/math/laplace/{laplace_marginal_bernoulli.hpp => laplace_marginal_bernoulli_logit.hpp} (100%) rename stan/math/laplace/{laplace_marginal_poisson.hpp => laplace_marginal_poisson_log.hpp} (100%) rename stan/math/laplace/prob/{laplace_approx_bernoulli_rng.hpp => laplace_bernoulli_logit_rng.hpp} (95%) rename stan/math/laplace/prob/{laplace_approx_poisson_rng.hpp => laplace_poisson_log_rng.hpp} (97%) rename stan/math/laplace/prob/{laplace_approx_rng.hpp => laplace_rng.hpp} (100%) rename test/unit/math/laplace/{laplace_approx_bernoulli_rng_test.cpp => laplace_bernoulli_logit_rng_test.cpp} (100%) rename test/unit/math/laplace/{laplace_marginal_bernoulli_test.cpp => laplace_marginal_bernoulli_logit_test.cpp} (99%) rename test/unit/math/laplace/{laplace_marginal_poisson_test.cpp => laplace_marginal_poisson_log_test.cpp} (98%) rename test/unit/math/laplace/{laplace_approx_poisson_rng_test.cpp => laplace_poisson_log_rng_test.cpp} (97%) diff --git a/stan/math/laplace/laplace.hpp b/stan/math/laplace/laplace.hpp index 86a2693b7d3..54a5acf468c 100644 --- a/stan/math/laplace/laplace.hpp +++ b/stan/math/laplace/laplace.hpp @@ -1,9 +1,9 @@ #ifndef STAN_MATH_LAPLACE_LAPLACE_HPP #define STAN_MATH_LAPLACE_LAPLACE_HPP -#include -#include -#include -#include +#include +#include +#include +#include #endif diff --git a/stan/math/laplace/laplace_marginal_bernoulli.hpp b/stan/math/laplace/laplace_marginal_bernoulli_logit.hpp similarity index 100% rename from stan/math/laplace/laplace_marginal_bernoulli.hpp rename to stan/math/laplace/laplace_marginal_bernoulli_logit.hpp diff --git a/stan/math/laplace/laplace_marginal_poisson.hpp b/stan/math/laplace/laplace_marginal_poisson_log.hpp similarity index 100% rename from stan/math/laplace/laplace_marginal_poisson.hpp rename to stan/math/laplace/laplace_marginal_poisson_log.hpp diff --git a/stan/math/laplace/prob/laplace_approx_bernoulli_rng.hpp b/stan/math/laplace/prob/laplace_bernoulli_logit_rng.hpp similarity index 95% rename from stan/math/laplace/prob/laplace_approx_bernoulli_rng.hpp rename to stan/math/laplace/prob/laplace_bernoulli_logit_rng.hpp index 14b81638acc..7e3b8e31f65 100644 --- a/stan/math/laplace/prob/laplace_approx_bernoulli_rng.hpp +++ b/stan/math/laplace/prob/laplace_bernoulli_logit_rng.hpp @@ -1,7 +1,7 @@ #ifndef STAN_MATH_LAPLACE_LAPLACE_APPROX_BERNOULLI_RNG_HPP #define STAN_MATH_LAPLACE_LAPLACE_APPROX_BERNOULLI_RNG_HPP -#include +#include namespace stan { namespace math { diff --git a/stan/math/laplace/prob/laplace_approx_poisson_rng.hpp b/stan/math/laplace/prob/laplace_poisson_log_rng.hpp similarity index 97% rename from stan/math/laplace/prob/laplace_approx_poisson_rng.hpp rename to stan/math/laplace/prob/laplace_poisson_log_rng.hpp index 6cfe1e52eca..26b00340ffb 100644 --- a/stan/math/laplace/prob/laplace_approx_poisson_rng.hpp +++ b/stan/math/laplace/prob/laplace_poisson_log_rng.hpp @@ -1,7 +1,7 @@ #ifndef STAN_MATH_LAPLACE_LAPLACE_APPROX_POISSON_RNG_HPP #define STAN_MATH_LAPLACE_LAPLACE_APPROX_POISSON_RNG_HPP -#include +#include namespace stan { namespace math { diff --git a/stan/math/laplace/prob/laplace_approx_rng.hpp b/stan/math/laplace/prob/laplace_rng.hpp similarity index 100% rename from stan/math/laplace/prob/laplace_approx_rng.hpp rename to stan/math/laplace/prob/laplace_rng.hpp diff --git a/test/unit/math/laplace/laplace_approx_bernoulli_rng_test.cpp b/test/unit/math/laplace/laplace_bernoulli_logit_rng_test.cpp similarity index 100% rename from test/unit/math/laplace/laplace_approx_bernoulli_rng_test.cpp rename to test/unit/math/laplace/laplace_bernoulli_logit_rng_test.cpp diff --git a/test/unit/math/laplace/laplace_marginal_bernoulli_test.cpp b/test/unit/math/laplace/laplace_marginal_bernoulli_logit_test.cpp similarity index 99% rename from test/unit/math/laplace/laplace_marginal_bernoulli_test.cpp rename to test/unit/math/laplace/laplace_marginal_bernoulli_logit_test.cpp index a6caee7ed43..2ad5ed991e9 100755 --- a/test/unit/math/laplace/laplace_marginal_bernoulli_test.cpp +++ b/test/unit/math/laplace/laplace_marginal_bernoulli_logit_test.cpp @@ -1,6 +1,6 @@ #include #include -#include +#include #include // #include diff --git a/test/unit/math/laplace/laplace_marginal_poisson_test.cpp b/test/unit/math/laplace/laplace_marginal_poisson_log_test.cpp similarity index 98% rename from test/unit/math/laplace/laplace_marginal_poisson_test.cpp rename to test/unit/math/laplace/laplace_marginal_poisson_log_test.cpp index 412eb3c654c..7435047856f 100644 --- a/test/unit/math/laplace/laplace_marginal_poisson_test.cpp +++ b/test/unit/math/laplace/laplace_marginal_poisson_log_test.cpp @@ -1,5 +1,5 @@ #include -#include +#include #include #include diff --git a/test/unit/math/laplace/laplace_approx_poisson_rng_test.cpp b/test/unit/math/laplace/laplace_poisson_log_rng_test.cpp similarity index 97% rename from test/unit/math/laplace/laplace_approx_poisson_rng_test.cpp rename to test/unit/math/laplace/laplace_poisson_log_rng_test.cpp index 4d60a709407..9f8fe810207 100644 --- a/test/unit/math/laplace/laplace_approx_poisson_rng_test.cpp +++ b/test/unit/math/laplace/laplace_poisson_log_rng_test.cpp @@ -1,6 +1,6 @@ #include -#include -#include +#include +#include #include #include From f66d58235f65785cc915d7f5c2ffbb13f1f95dc6 Mon Sep 17 00:00:00 2001 From: Charles Margossian Date: Sat, 25 Jul 2020 10:51:22 -0400 Subject: [PATCH 09/53] Update signature for laplace_marginal_poisson_log. --- stan/math/laplace/laplace_marginal_poisson_log.hpp | 4 ++-- test/unit/math/laplace/disease_map_test.cpp | 1 + .../laplace/laplace_marginal_poisson_log_test.cpp | 14 +++++++------- 3 files changed, 10 insertions(+), 9 deletions(-) diff --git a/stan/math/laplace/laplace_marginal_poisson_log.hpp b/stan/math/laplace/laplace_marginal_poisson_log.hpp index 0683aa43068..31aeb485b39 100644 --- a/stan/math/laplace/laplace_marginal_poisson_log.hpp +++ b/stan/math/laplace/laplace_marginal_poisson_log.hpp @@ -36,7 +36,7 @@ namespace math { * breaks and returns an error. */ template - T1 laplace_marginal_poisson_log + T1 laplace_marginal_poisson_log_lpmf (const std::vector& y, const std::vector& n_samples, const K& covariance_function, @@ -55,7 +55,7 @@ namespace math { } template - T1 laplace_marginal_poisson_log + T1 laplace_marginal_poisson_log_lpmf (const std::vector& y, const std::vector& n_samples, const Eigen::VectorXd& ye, diff --git a/test/unit/math/laplace/disease_map_test.cpp b/test/unit/math/laplace/disease_map_test.cpp index 259c1f4917c..8e2968c65b9 100755 --- a/test/unit/math/laplace/disease_map_test.cpp +++ b/test/unit/math/laplace/disease_map_test.cpp @@ -15,6 +15,7 @@ #include #include +// TODO(charlesm93): update using new function signatures. TEST(laplace, disease_map_dim_911) { // Based on (Vanhatalo, Pietilainen and Vethari, 2010). See diff --git a/test/unit/math/laplace/laplace_marginal_poisson_log_test.cpp b/test/unit/math/laplace/laplace_marginal_poisson_log_test.cpp index 7435047856f..b4f20157d0a 100644 --- a/test/unit/math/laplace/laplace_marginal_poisson_log_test.cpp +++ b/test/unit/math/laplace/laplace_marginal_poisson_log_test.cpp @@ -67,7 +67,7 @@ TEST(laplace, likelihood_differentiation2) { } TEST(laplace, poisson_lgm_dim2) { - using stan::math::laplace_marginal_poisson_log; + using stan::math::laplace_marginal_poisson_log_lpmf; using stan::math::var; using stan::math::to_vector; using stan::math::value_of; @@ -96,13 +96,13 @@ TEST(laplace, poisson_lgm_dim2) { std::vector sums = {1, 0}; squared_kernel_functor K; - var target = laplace_marginal_poisson_log(sums, n_samples, K, phi, x, delta, + var target = laplace_marginal_poisson_log_lpmf(sums, n_samples, K, phi, x, delta, delta_int, theta_0); // Test with exposure argument Eigen::VectorXd ye(2); ye << 1, 1; - target = laplace_marginal_poisson_log(sums, n_samples, ye, K, phi, x, delta, + target = laplace_marginal_poisson_log_lpmf(sums, n_samples, ye, K, phi, x, delta, delta_int, theta_0); // How to test this? The best way would be to generate a few @@ -121,13 +121,13 @@ TEST(laplace, poisson_lgm_dim2) { phi_2l(1) -= diff; phi_2u(1) += diff; - double target_1u = laplace_marginal_poisson_log(sums, n_samples, K, phi_1u, x, + double target_1u = laplace_marginal_poisson_log_lpmf(sums, n_samples, K, phi_1u, x, delta, delta_int, theta_0), - target_1l = laplace_marginal_poisson_log(sums, n_samples, K, phi_1l, x, + target_1l = laplace_marginal_poisson_log_lpmf(sums, n_samples, K, phi_1l, x, delta, delta_int, theta_0), - target_2u = laplace_marginal_poisson_log(sums, n_samples, K, phi_2u, x, + target_2u = laplace_marginal_poisson_log_lpmf(sums, n_samples, K, phi_2u, x, delta, delta_int, theta_0), - target_2l = laplace_marginal_poisson_log(sums, n_samples, K, phi_2l, x, + target_2l = laplace_marginal_poisson_log_lpmf(sums, n_samples, K, phi_2l, x, delta, delta_int, theta_0); VEC g_finite(dim_phi); From dc00d6f64aa85943df888386005e26adb4b59d4a Mon Sep 17 00:00:00 2001 From: Charles Margossian Date: Sat, 25 Jul 2020 11:51:07 -0400 Subject: [PATCH 10/53] update signature of laplace_bernoulli_logit. --- stan/math/laplace/laplace_marginal_bernoulli_logit.hpp | 6 +++--- .../math/laplace/laplace_marginal_bernoulli_logit_test.cpp | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/stan/math/laplace/laplace_marginal_bernoulli_logit.hpp b/stan/math/laplace/laplace_marginal_bernoulli_logit.hpp index d16b2724091..e1cf79af72e 100644 --- a/stan/math/laplace/laplace_marginal_bernoulli_logit.hpp +++ b/stan/math/laplace/laplace_marginal_bernoulli_logit.hpp @@ -34,7 +34,7 @@ namespace math { */ // TODO: deprecate the below function. No default functor. template - T1 laplace_marginal_bernoulli_logit + T1 laplace_marginal_bernoulli_logit_lpmf (const std::vector& y, const std::vector& n_samples, // const K& covariance function, @@ -55,7 +55,7 @@ namespace math { // Add signature that takes in a Kernel functor specified by the user. template - T1 laplace_marginal_bernoulli_logit + T1 laplace_marginal_bernoulli_logit_lpmf (const std::vector& y, const std::vector& n_samples, const K& covariance_function, @@ -76,7 +76,7 @@ namespace math { // Add signature that takes x as a matrix instead of a vector. template - T1 laplace_marginal_bernoulli_logit + T1 laplace_marginal_bernoulli_logit_lpmf (const std::vector& y, const std::vector& n_samples, const K& covariance_function, diff --git a/test/unit/math/laplace/laplace_marginal_bernoulli_logit_test.cpp b/test/unit/math/laplace/laplace_marginal_bernoulli_logit_test.cpp index 2ad5ed991e9..95451b6e6b7 100755 --- a/test/unit/math/laplace/laplace_marginal_bernoulli_logit_test.cpp +++ b/test/unit/math/laplace/laplace_marginal_bernoulli_logit_test.cpp @@ -167,18 +167,18 @@ TEST(laplace, logistic_lgm_dim500) { // TO DO -- get total time from GPStuff and do more comparisons. // CASE 3: use wrapper function and compare result. - using stan::math::laplace_marginal_bernoulli_logit; + using stan::math::laplace_marginal_bernoulli_logit_lpmf; using stan::math::value_of; double marginal_density_v2 - = laplace_marginal_bernoulli_logit(y, n_samples, + = laplace_marginal_bernoulli_logit_lpmf(y, n_samples, phi, x, delta, delta_int, theta_0, 0, 1e-3, 100); EXPECT_FLOAT_EQ(marginal_density, marginal_density_v2); marginal_density_v2 - = laplace_marginal_bernoulli_logit(y, n_samples, + = laplace_marginal_bernoulli_logit_lpmf(y, n_samples, sqr_exp_kernel_functor(), phi, x, delta, delta_int, theta_0, 0, 1e-3, 100); From 2c73a619099fdf5deabf8a579ffc46984dace34e Mon Sep 17 00:00:00 2001 From: Charles Margossian Date: Wed, 13 Jan 2021 15:47:53 -0500 Subject: [PATCH 11/53] update reference for differentiation. --- stan/math/laplace/laplace_likelihood.hpp | 9 --------- stan/math/laplace/laplace_marginal.hpp | 17 ++++++----------- 2 files changed, 6 insertions(+), 20 deletions(-) diff --git a/stan/math/laplace/laplace_likelihood.hpp b/stan/math/laplace/laplace_likelihood.hpp index 89d48e61e6b..578df129a46 100644 --- a/stan/math/laplace/laplace_likelihood.hpp +++ b/stan/math/laplace/laplace_likelihood.hpp @@ -9,15 +9,6 @@ namespace math { // TO DO: create a parent structure, with each likelihood // function acting as a child structure. -/** - * Create an Eigen vector whose elements are all ones. - */ -// Eigen::VectorXd init_one(int n) { -// Eigen::VectorXd ones(n); -// for (int i = 0; i < n; i++) ones(i) = 1; -// return ones; -// } - /** * A structure to compute the log density, first, second, * and third-order derivatives for a log poisson likelihood diff --git a/stan/math/laplace/laplace_marginal.hpp b/stan/math/laplace/laplace_marginal.hpp index 4eb48a31419..f2786de0080 100644 --- a/stan/math/laplace/laplace_marginal.hpp +++ b/stan/math/laplace/laplace_marginal.hpp @@ -16,17 +16,12 @@ #include // CHECK -- do we need this? // Reference for calculations of marginal and its gradients: -// Rasmussen and Williams, -// "Gaussian Processes for Machine Learning", -// Algorithms 3.1 and 5.1. -// The MIT Press, 2006. -// & -// Margossian, -// "The Search for simulation algorithms in pathological spaces" -// Algorithms 3 and 5 -// Thesis proposal, 2020 -// Note 1: where I didn't conflict with my own notation, I used their notation, -// which significantly helps when debuging the code. +// Charles C Margossian, Aki Vehtari, Daniel Simpson and Raj Agrawal +// "Hamiltonian Monte Carlo using an adjoint-differentiated +// Laplace approximation: Bayesian inference for latent Gaussian +// models and beyond." NeurIPS 2020 +// https://arxiv.org/abs/2004.12550 + namespace stan { namespace math { From dab8b7392bab26d9cc7f21fb5c7cf29c5cc5d1a7 Mon Sep 17 00:00:00 2001 From: Charles Margossian Date: Thu, 14 Jan 2021 10:01:30 -0500 Subject: [PATCH 12/53] log likelihood for student t. --- stan/math/laplace/laplace_likelihood.hpp | 44 ++++++++++ stan/math/laplace/laplace_marginal.hpp | 1 - .../laplace_marginal_student_t_test.cpp | 85 +++++++++++++++++++ 3 files changed, 129 insertions(+), 1 deletion(-) create mode 100755 test/unit/math/laplace/laplace_marginal_student_t_test.cpp diff --git a/stan/math/laplace/laplace_likelihood.hpp b/stan/math/laplace/laplace_likelihood.hpp index 578df129a46..6c6aa7695f9 100644 --- a/stan/math/laplace/laplace_likelihood.hpp +++ b/stan/math/laplace/laplace_likelihood.hpp @@ -166,6 +166,48 @@ struct diff_logistic_log { } }; +struct diff_student_t { + /* Observations. */ + Eigen::VectorXd y_; + /* Latent parameter index for each observation. */ + std::vector y_index_; + // QUESTION - Save eta here too? + + diff_student_t(const Eigen::VectorXd& y, + const std::vector& y_index) + : y_(y), y_index_(y_index) { } + + /** + * Returns the log density. + */ + template + return_type_t + log_likelihood (const Eigen::Matrix& theta, + const Eigen::Matrix& eta) + const { + T_nu nu = eta(0); + T_nu sigma = eta(1); + T_nu sigma_squared = sigma * sigma; + + int n = theta.size(); + + // CHECK -- probably don't need normalizing constant. + return_type_t + log_constant = n * (lgamma((nu + 1) / 2) - lgamma(nu / 2) + - LOG_SQRT_PI - 0.5 * log(nu) - log(sigma)); + + T_theta log_kernel = 0; + + for (int i = 0; i < n; i++) { + T_theta distance = y_(i) - theta(y_index_[i]); + log_kernel += log(1 + distance * distance / (nu * sigma_squared)); + } + + return log_constant - 0.5 * (nu + 1) * log_kernel; + } +}; + + // TO DO: delete this structure. // To experiment with the prototype, provide a built-in covariance // function. In the final version, the user will pass the covariance @@ -188,6 +230,8 @@ struct sqr_exp_kernel_functor { } }; + + } // namespace math } // namespace stan diff --git a/stan/math/laplace/laplace_marginal.hpp b/stan/math/laplace/laplace_marginal.hpp index f2786de0080..60b17039b67 100644 --- a/stan/math/laplace/laplace_marginal.hpp +++ b/stan/math/laplace/laplace_marginal.hpp @@ -413,7 +413,6 @@ namespace math { const K& covariance_function, const Eigen::Matrix& phi, const Tx& x, - // const std::vector& x, const std::vector& delta, const std::vector& delta_int, const Eigen::Matrix& theta_0, diff --git a/test/unit/math/laplace/laplace_marginal_student_t_test.cpp b/test/unit/math/laplace/laplace_marginal_student_t_test.cpp new file mode 100755 index 00000000000..78f2383c212 --- /dev/null +++ b/test/unit/math/laplace/laplace_marginal_student_t_test.cpp @@ -0,0 +1,85 @@ +#include +#include +#include + +#include +#include +#include +#include +#include + +TEST(laplace, likelihood_differentiation) { + using stan::math::diff_student_t; + using stan::math::var; + + double test_tolerance = 2e-4; + + Eigen::VectorXd theta(2); + theta << 0, 0; // -2.45809, -3.6127; + Eigen::VectorXd eta(2); + eta << 1.2, 1; // nu, sigma + + Eigen::VectorXd y(2); + y << -2.655953, -4.2044; + std::vector y_index(2); + y_index[0] = 0; + y_index[1] = 1; + + // Eigen::Matrix theta_v = theta; + diff_student_t diff_functor(y, y_index); + + double log_density = diff_functor.log_likelihood(theta, eta); + + // benchmark against R + EXPECT_NEAR(-7.375673, log_density, test_tolerance); + + + + + // diff_logistic_log diff_functor(n_samples, y); + // double log_density = diff_functor.log_likelihood(theta); + // Eigen::VectorXd gradient, hessian; + // diff_functor.diff(theta, gradient, hessian); + // Eigen::VectorXd third_tensor = diff_functor.third_diff(theta); + // + // EXPECT_NEAR(-2.566843, log_density, test_tolerance); + + // finite diff calculations for first-order derivatives + // double diff = 1e-12; + // Eigen::VectorXd theta_1u = theta; + // Eigen::VectorXd theta_1l = theta; + // Eigen::VectorXd theta_2u = theta; + // Eigen::VectorXd theta_2l = theta; + // theta_1u(0) = theta(0) + diff; + // theta_1l(0) = theta(0) - diff; + // theta_2u(1) = theta(1) + diff; + // theta_2l(1) = theta(1) - diff; + // double diff_1 = (diff_functor.log_likelihood(theta_1u) + // - diff_functor.log_likelihood(theta_1l)) / (2 * diff); + // double diff_2 = (diff_functor.log_likelihood(theta_2u) + // - diff_functor.log_likelihood(theta_2l)) / (2 * diff); + + // EXPECT_NEAR(diff_1, gradient(0), test_tolerance); + // EXPECT_NEAR(diff_2, gradient(1), test_tolerance); + // + // // finite diff calculation for second-order derivatives + // Eigen::VectorXd gradient_1u, gradient_1l, hessian_1u, hessian_1l, + // gradient_2u, gradient_2l, hessian_2u, hessian_2l; + // diff_functor.diff(theta_1u, gradient_1u, hessian_1u); + // diff_functor.diff(theta_1l, gradient_1l, hessian_1l); + // diff_functor.diff(theta_2u, gradient_2u, hessian_2u); + // diff_functor.diff(theta_2l, gradient_2l, hessian_2l); + // + // double diff_grad_1 = (gradient_1u(0) - gradient_1l(0)) / (2 * diff); + // double diff_grad_2 = (gradient_2u(1) - gradient_2l(1)) / (2 * diff); + // + // EXPECT_NEAR(diff_grad_1, hessian(0), test_tolerance); + // EXPECT_NEAR(diff_grad_2, hessian(1), test_tolerance); + // + // // finite diff calculation for third-order derivatives + // double diff_hess_1 = (hessian_1u(0) - hessian_1l(0)) / (2 * diff); + // double diff_hess_2 = (hessian_2u(1) - hessian_2l(1)) / (2 * diff); + // + // EXPECT_NEAR(diff_hess_1, third_tensor(0), test_tolerance); + // EXPECT_NEAR(diff_hess_2, third_tensor(1), test_tolerance); +} From 8795fed4b14c6a0327c7b747d5ade99c627dedcc Mon Sep 17 00:00:00 2001 From: Charles Margossian Date: Thu, 14 Jan 2021 16:08:28 -0500 Subject: [PATCH 13/53] logp for negative binomial. --- stan/math/laplace/laplace_likelihood.hpp | 62 +++++++++++++++++++++--- 1 file changed, 55 insertions(+), 7 deletions(-) diff --git a/stan/math/laplace/laplace_likelihood.hpp b/stan/math/laplace/laplace_likelihood.hpp index 6c6aa7695f9..528272ab53d 100644 --- a/stan/math/laplace/laplace_likelihood.hpp +++ b/stan/math/laplace/laplace_likelihood.hpp @@ -2,6 +2,7 @@ #define STAN_MATH_LAPLACE_LAPLACE_LIKELIHOOD_HPP #include +#include namespace stan { namespace math { @@ -16,6 +17,8 @@ namespace math { * This structure can be passed to the the laplace_marginal function. * Uses sufficient statistics for the data. */ + // FIX ME -- cannot use the sufficient statistic to compute log density in + // because of log factorial term. struct diff_poisson_log { /* The number of samples in each group. */ Eigen::VectorXd n_samples_; @@ -166,6 +169,51 @@ struct diff_logistic_log { } }; +struct diff_neg_binomial_2_log { + /* Observed counts */ + Eigen::VectorXd y_; + /* Latent parameter index for each observation. */ + std::vector y_index_; + /* The number of samples in each group. */ + Eigen::VectorXd n_samples_; + /* The sum of cours in each group. */ + Eigen::VectorXd sums_; + /* Number of latent Gaussian variables. */ + int n_theta_; + + diff_neg_binomial_2_log(const Eigen::VectorXd& y, + const std::vector& y_index, + int n_theta) + : y_(y), y_index_(y_index), n_theta_(n_theta) { + sums_ = Eigen::VectorXd::Zero(n_theta); + n_samples_ = Eigen::VectorXd::Zero(n_theta); + + for (int i = 0; i < n_theta; i++) { + n_samples_(y_index[i]) += 1; + sums_(y_index[i]) += 1; + } + } + + template + return_type_t + log_likelihood (const Eigen::Matrix& theta, + const T_eta& eta) { + return_type_t logp = 0; + for (size_t i = 0; i < y_.size(); i++) { + logp += binomial_coefficient_log(y_(i) + eta - 1, y_(i)); + } + // CHECK -- is it better to vectorize this loop? + Eigen::Matrix exp_theta = exp(theta); + for (int i = 0; i < n_theta_; i++) { + return_type_t + log_theta_plus_exp_theta = log(exp_theta(i) + eta); + logp += y_(i) * (theta(i) - log_theta_plus_exp_theta) + + n_samples_(i) * eta * (log(eta) - log_theta_plus_exp_theta); + } + return logp; + } +}; + struct diff_student_t { /* Observations. */ Eigen::VectorXd y_; @@ -180,19 +228,19 @@ struct diff_student_t { /** * Returns the log density. */ - template - return_type_t + template + return_type_t log_likelihood (const Eigen::Matrix& theta, - const Eigen::Matrix& eta) + const Eigen::Matrix& eta) const { - T_nu nu = eta(0); - T_nu sigma = eta(1); - T_nu sigma_squared = sigma * sigma; + T_eta nu = eta(0); + T_eta sigma = eta(1); + T_eta sigma_squared = sigma * sigma; int n = theta.size(); // CHECK -- probably don't need normalizing constant. - return_type_t + return_type_t log_constant = n * (lgamma((nu + 1) / 2) - lgamma(nu / 2) - LOG_SQRT_PI - 0.5 * log(nu) - log(sigma)); From 61ee5b4bd5a1d9a46b0ad2c9e9e4b021dcfd5c9d Mon Sep 17 00:00:00 2001 From: Charles Margossian Date: Fri, 15 Jan 2021 17:27:24 -0500 Subject: [PATCH 14/53] Features for neg binomial likelihood. --- stan/math/laplace/laplace_likelihood.hpp | 61 ++++++++++++-- ...place_marginal_neg_binomial_2_log_test.cpp | 83 +++++++++++++++++++ 2 files changed, 138 insertions(+), 6 deletions(-) create mode 100755 test/unit/math/laplace/laplace_marginal_neg_binomial_2_log_test.cpp diff --git a/stan/math/laplace/laplace_likelihood.hpp b/stan/math/laplace/laplace_likelihood.hpp index 528272ab53d..d9af430ffe5 100644 --- a/stan/math/laplace/laplace_likelihood.hpp +++ b/stan/math/laplace/laplace_likelihood.hpp @@ -190,28 +190,77 @@ struct diff_neg_binomial_2_log { for (int i = 0; i < n_theta; i++) { n_samples_(y_index[i]) += 1; - sums_(y_index[i]) += 1; + sums_(y_index[i]) += y[i]; } } template return_type_t log_likelihood (const Eigen::Matrix& theta, - const T_eta& eta) { + const Eigen::Matrix& eta) { + T_eta eta_scalar = eta(0); return_type_t logp = 0; for (size_t i = 0; i < y_.size(); i++) { - logp += binomial_coefficient_log(y_(i) + eta - 1, y_(i)); + logp += binomial_coefficient_log(y_(i) + eta_scalar - 1, y_(i)); } // CHECK -- is it better to vectorize this loop? Eigen::Matrix exp_theta = exp(theta); for (int i = 0; i < n_theta_; i++) { return_type_t - log_theta_plus_exp_theta = log(exp_theta(i) + eta); - logp += y_(i) * (theta(i) - log_theta_plus_exp_theta) - + n_samples_(i) * eta * (log(eta) - log_theta_plus_exp_theta); + log_eta_plus_exp_theta = log(eta_scalar + exp_theta(i)); + logp += sums_(i) * (theta(i) - log_eta_plus_exp_theta) + + n_samples_(i) * eta_scalar + * (log(eta_scalar) - log_eta_plus_exp_theta); } return logp; } + + template + void diff (const Eigen::Matrix& theta, + const Eigen::Matrix& eta, + Eigen::Matrix, + Eigen::Dynamic, 1>& gradient, + Eigen::Matrix, + Eigen::Dynamic, 1>& hessian) const { + typedef return_type_t scalar; + Eigen::VectorXd one = rep_vector(1, theta.size()); + T_eta eta_scalar = eta(0); + Eigen::Matrix + sums_plus_n_eta = sums_ + eta_scalar * n_samples_; + Eigen::Matrix exp_neg_theta = exp(-theta); + + Eigen::Matrix + one_plus_exp = one + eta_scalar * exp_neg_theta; + gradient = sums_ - sums_plus_n_eta. + cwiseProduct(elt_divide(one, one_plus_exp)); + + hessian = - eta_scalar * sums_plus_n_eta. + cwiseProduct(elt_divide(exp_neg_theta, square(one_plus_exp))); + } + + template + Eigen::Matrix, Eigen::Dynamic, 1> + third_diff(const Eigen::Matrix& theta, + const Eigen::Matrix& eta) { + typedef return_type_t scalar; + Eigen::Matrix exp_theta = exp(theta); + T_eta eta_scalar = eta(0); + Eigen::Matrix + eta_vec = rep_vector(eta_scalar, theta.size()); + Eigen::Matrix + eta_plus_exp_theta = eta_vec + exp_theta; + + return - ((sums_ + eta_scalar * n_samples_) * eta_scalar). + cwiseProduct(exp_theta.cwiseProduct( + elt_divide(eta_vec - exp_theta, + square(eta_plus_exp_theta).cwiseProduct(eta_plus_exp_theta)))); + + // return (((sums_ + eta_scalar * n_samples_) * eta_scalar). + // cwiseProduct(one - 4 * eta_scalar * exp_neg_theta)). + // cwiseProduct(elt_divide(exp_neg_theta, + // square(one + eta_scalar * exp_neg_theta))); + } + }; struct diff_student_t { diff --git a/test/unit/math/laplace/laplace_marginal_neg_binomial_2_log_test.cpp b/test/unit/math/laplace/laplace_marginal_neg_binomial_2_log_test.cpp new file mode 100755 index 00000000000..6bf8b7f8eb9 --- /dev/null +++ b/test/unit/math/laplace/laplace_marginal_neg_binomial_2_log_test.cpp @@ -0,0 +1,83 @@ +#include +#include +#include + +#include +#include +#include +#include +#include + +TEST(laplace, likelihood_differentiation) { + using stan::math::diff_neg_binomial_2_log; + using stan::math::var; + + Eigen::VectorXd theta(2); + theta << 1, 1; + int n_theta = theta.size(); + Eigen::VectorXd eta(1); + eta << 1.2; + + Eigen::VectorXd y(2); + y << 0, 1; + std::vector y_index(2); + y_index[0] = 0; + y_index[1] = 1; + + // Eigen::Matrix theta_v = theta; + diff_neg_binomial_2_log diff_functor(y, y_index, n_theta); + + double log_density = diff_functor.log_likelihood(theta, eta); + + // benchmark against R + EXPECT_FLOAT_EQ(-3.023328, log_density); + + Eigen::VectorXd gradient, hessian; + diff_functor.diff(theta, eta, gradient, hessian); + + Eigen::VectorXd third_diff = diff_functor.third_diff(theta, eta); + + // Benchmark against finite diff + double epsilon = 1e-6; + Eigen::VectorXd theta_l0 = theta, theta_u0 = theta, + theta_l1 = theta, theta_u1 = theta; + theta_u0(0) += epsilon; + theta_l0(0) -= epsilon; + theta_u1(1) += epsilon; + theta_l1(1) -= epsilon; + + Eigen::VectorXd finite_gradient(2); + finite_gradient(0) = + (diff_functor.log_likelihood(theta_u0, eta) + - diff_functor.log_likelihood(theta_l0, eta)) / (2 * epsilon); + + finite_gradient(1) = + (diff_functor.log_likelihood(theta_u1, eta) + - diff_functor.log_likelihood(theta_l1, eta)) / (2 * epsilon); + + Eigen::VectorXd gradient_l0, gradient_u0, gradient_l1, gradient_u1; + Eigen::VectorXd hessian_l0, hessian_u0, hessian_l1, hessian_u1; + Eigen::VectorXd hessian_dummy; + diff_functor.diff(theta_l0, eta, gradient_l0, hessian_l0); + diff_functor.diff(theta_u0, eta, gradient_u0, hessian_u0); + diff_functor.diff(theta_l1, eta, gradient_l1, hessian_l1); + diff_functor.diff(theta_u1, eta, gradient_u1, hessian_u1); + + Eigen::VectorXd finite_hessian(2); + finite_hessian(0) = (gradient_u0 - gradient_l0)(0) / (2 * epsilon); + finite_hessian(1) = (gradient_u1 - gradient_l1)(1) / (2 * epsilon); + + Eigen::VectorXd finite_third_diff(2); + finite_third_diff(0) = (hessian_u0 - hessian_l0)(0) / (2 * epsilon); + finite_third_diff(1) = (hessian_u1 - hessian_l1)(1) / (2 * epsilon); + + // std::cout << third_diff << std::endl; + // std::cout << finite_third_diff << std::endl; + + EXPECT_FLOAT_EQ(finite_gradient(0), gradient(0)); + EXPECT_FLOAT_EQ(finite_gradient(1), gradient(1)); + EXPECT_FLOAT_EQ(finite_hessian(0), hessian(0)); + EXPECT_FLOAT_EQ(finite_hessian(1), hessian(1)); + EXPECT_FLOAT_EQ(finite_third_diff(0), third_diff(0)); + EXPECT_FLOAT_EQ(finite_third_diff(1), third_diff(1)); +} From 8dee7348f8b71cd96ad85911f1c7795ac9b0cfb2 Mon Sep 17 00:00:00 2001 From: Charles Margossian Date: Tue, 26 Jan 2021 19:39:24 -0500 Subject: [PATCH 15/53] Finish analytical likelihood diff for neg binomial. --- stan/math/laplace/laplace_likelihood.hpp | 63 ++++++++++++++++--- ...place_marginal_neg_binomial_2_log_test.cpp | 40 +++++++++++- 2 files changed, 94 insertions(+), 9 deletions(-) diff --git a/stan/math/laplace/laplace_likelihood.hpp b/stan/math/laplace/laplace_likelihood.hpp index d9af430ffe5..4a62310255f 100644 --- a/stan/math/laplace/laplace_likelihood.hpp +++ b/stan/math/laplace/laplace_likelihood.hpp @@ -1,4 +1,4 @@ -#ifndef STAN_MATH_LAPLACE_LAPLACE_LIKELIHOOD_HPP + #ifndef STAN_MATH_LAPLACE_LAPLACE_LIKELIHOOD_HPP #define STAN_MATH_LAPLACE_LAPLACE_LIKELIHOOD_HPP #include @@ -231,8 +231,7 @@ struct diff_neg_binomial_2_log { Eigen::Matrix one_plus_exp = one + eta_scalar * exp_neg_theta; - gradient = sums_ - sums_plus_n_eta. - cwiseProduct(elt_divide(one, one_plus_exp)); + gradient = sums_ - elt_divide(sums_plus_n_eta, one_plus_exp); hessian = - eta_scalar * sums_plus_n_eta. cwiseProduct(elt_divide(exp_neg_theta, square(one_plus_exp))); @@ -254,13 +253,63 @@ struct diff_neg_binomial_2_log { cwiseProduct(exp_theta.cwiseProduct( elt_divide(eta_vec - exp_theta, square(eta_plus_exp_theta).cwiseProduct(eta_plus_exp_theta)))); + } + + template + Eigen::Matrix, Eigen::Dynamic, 1> + diff_eta(const Eigen::Matrix& theta, + const Eigen::Matrix& eta) { + typedef return_type_t scalar; + T_eta eta_scalar = eta(0); + Eigen::Matrix + y_plus_eta = y_ + rep_vector(eta_scalar, y_.size()); + Eigen::Matrix exp_theta = exp(theta); + Eigen::Matrix + exp_theta_plus_eta = exp_theta + rep_vector(eta_scalar, theta.size()); + + T_eta y_plus_eta_digamma_sum = 0; + for (int i = 0; i < y_.size(); i++) + y_plus_eta_digamma_sum += digamma(y_plus_eta(i)); + + Eigen::Matrix gradient_eta(1); + gradient_eta(0) = + y_plus_eta_digamma_sum - y_.size() * digamma(eta_scalar) + - sum(elt_divide(sums_ + n_samples_ * eta_scalar, exp_theta_plus_eta)) + + sum(n_samples_ * log(eta_scalar) + - n_samples_.cwiseProduct(log(exp_theta_plus_eta)) + + n_samples_); + return gradient_eta; + } - // return (((sums_ + eta_scalar * n_samples_) * eta_scalar). - // cwiseProduct(one - 4 * eta_scalar * exp_neg_theta)). - // cwiseProduct(elt_divide(exp_neg_theta, - // square(one + eta_scalar * exp_neg_theta))); + template + Eigen::Matrix, Eigen::Dynamic, 1> + diff_theta_eta(const Eigen::Matrix& theta, + const Eigen::Matrix& eta) { + T_eta eta_scalar = eta(0); + Eigen::Matrix exp_neg_theta = exp(-theta); + + return - elt_divide(n_samples_ - sums_.cwiseProduct(exp_neg_theta), + square(eta_scalar * exp_neg_theta + rep_vector(1, theta.size()))); + } + + template + Eigen::Matrix, Eigen::Dynamic, 1> + diff2_theta_eta(const Eigen::Matrix& theta, + const Eigen::Matrix& eta, + const Eigen::Matrix& W_root) { + T_eta eta_scalar = eta(0); + Eigen::Matrix exp_neg_theta = exp(-theta); + Eigen::Matrix one_plus_eta_exp + = rep_vector(1, theta.size()) + eta_scalar * exp_neg_theta; + + return 0.5 * (W_root.cwiseInverse()).cwiseProduct( + elt_divide(exp_neg_theta.cwiseProduct( + - eta_scalar * exp_neg_theta.cwiseProduct(sums_) + + sums_ + 2 * eta_scalar * n_samples_), + square(one_plus_eta_exp).cwiseProduct(one_plus_eta_exp))); } + }; struct diff_student_t { diff --git a/test/unit/math/laplace/laplace_marginal_neg_binomial_2_log_test.cpp b/test/unit/math/laplace/laplace_marginal_neg_binomial_2_log_test.cpp index 6bf8b7f8eb9..ebcdd9f7674 100755 --- a/test/unit/math/laplace/laplace_marginal_neg_binomial_2_log_test.cpp +++ b/test/unit/math/laplace/laplace_marginal_neg_binomial_2_log_test.cpp @@ -71,8 +71,6 @@ TEST(laplace, likelihood_differentiation) { finite_third_diff(0) = (hessian_u0 - hessian_l0)(0) / (2 * epsilon); finite_third_diff(1) = (hessian_u1 - hessian_l1)(1) / (2 * epsilon); - // std::cout << third_diff << std::endl; - // std::cout << finite_third_diff << std::endl; EXPECT_FLOAT_EQ(finite_gradient(0), gradient(0)); EXPECT_FLOAT_EQ(finite_gradient(1), gradient(1)); @@ -80,4 +78,42 @@ TEST(laplace, likelihood_differentiation) { EXPECT_FLOAT_EQ(finite_hessian(1), hessian(1)); EXPECT_FLOAT_EQ(finite_third_diff(0), third_diff(0)); EXPECT_FLOAT_EQ(finite_third_diff(1), third_diff(1)); + + // derivatives wrt eta + Eigen::VectorXd diff_eta = diff_functor.diff_eta(theta, eta); + + Eigen::VectorXd eta_l(1), eta_u(1); + eta_l(0) = eta(0) - epsilon; + eta_u(0) = eta(0) + epsilon; + double finite_gradient_eta = + (diff_functor.log_likelihood(theta, eta_u) + - diff_functor.log_likelihood(theta, eta_l)) / (2 * epsilon); + + EXPECT_FLOAT_EQ(finite_gradient_eta, diff_eta(0)); + + Eigen::VectorXd diff_theta_eta = diff_functor.diff_theta_eta(theta, eta); + + Eigen::VectorXd gradient_theta_l, + gradient_theta_u, + hessian_theta_u, + hessian_theta_l; + + diff_functor.diff(theta, eta_l, gradient_theta_l, hessian_theta_l); + diff_functor.diff(theta, eta_u, gradient_theta_u, hessian_theta_u); + Eigen::VectorXd finite_gradient_theta_eta + = (gradient_theta_u - gradient_theta_l) / (2 * epsilon); + + EXPECT_FLOAT_EQ(finite_gradient_theta_eta(0), diff_theta_eta(0)); + EXPECT_FLOAT_EQ(finite_gradient_theta_eta(1), diff_theta_eta(1)); + + Eigen::VectorXd W_root = (-hessian).cwiseSqrt(); + Eigen::VectorXd diff2_theta_eta + = diff_functor.diff2_theta_eta(theta, eta, W_root); + + Eigen::VectorXd finite_hessian_theta_eta + = ((-hessian_theta_u).cwiseSqrt() - (-hessian_theta_l).cwiseSqrt()) + / (2 * epsilon); + + EXPECT_FLOAT_EQ(finite_hessian_theta_eta(0), diff2_theta_eta(0)); + EXPECT_FLOAT_EQ(finite_hessian_theta_eta(1), diff2_theta_eta(1)); } From fcf33be33d009fd651105b8cfefda808a20ed289 Mon Sep 17 00:00:00 2001 From: Charles Margossian Date: Wed, 27 Jan 2021 12:43:25 -0500 Subject: [PATCH 16/53] Prototype differentiation wrt likelihood hyperparameters. --- stan/math/laplace/laplace_likelihood.hpp | 38 +++--- stan/math/laplace/laplace_marginal.hpp | 112 ++++++++---------- ...place_marginal_neg_binomial_2_log_test.cpp | 48 +++++++- .../laplace_marginal_poisson_log_test.cpp | 5 +- 4 files changed, 119 insertions(+), 84 deletions(-) diff --git a/stan/math/laplace/laplace_likelihood.hpp b/stan/math/laplace/laplace_likelihood.hpp index 4a62310255f..bc34ea60d2f 100644 --- a/stan/math/laplace/laplace_likelihood.hpp +++ b/stan/math/laplace/laplace_likelihood.hpp @@ -197,7 +197,7 @@ struct diff_neg_binomial_2_log { template return_type_t log_likelihood (const Eigen::Matrix& theta, - const Eigen::Matrix& eta) { + const Eigen::Matrix& eta) const { T_eta eta_scalar = eta(0); return_type_t logp = 0; for (size_t i = 0; i < y_.size(); i++) { @@ -271,42 +271,52 @@ struct diff_neg_binomial_2_log { for (int i = 0; i < y_.size(); i++) y_plus_eta_digamma_sum += digamma(y_plus_eta(i)); - Eigen::Matrix gradient_eta(1); - gradient_eta(0) = - y_plus_eta_digamma_sum - y_.size() * digamma(eta_scalar) - - sum(elt_divide(sums_ + n_samples_ * eta_scalar, exp_theta_plus_eta)) - + sum(n_samples_ * log(eta_scalar) - - n_samples_.cwiseProduct(log(exp_theta_plus_eta)) - + n_samples_); - return gradient_eta; + Eigen::Matrix gradient_eta(1); + gradient_eta(0) = + y_plus_eta_digamma_sum - y_.size() * digamma(eta_scalar) + - sum(elt_divide(sums_ + n_samples_ * eta_scalar, exp_theta_plus_eta)) + + sum(n_samples_ * log(eta_scalar) + - n_samples_.cwiseProduct(log(exp_theta_plus_eta)) + + n_samples_); + return gradient_eta; } template - Eigen::Matrix, Eigen::Dynamic, 1> + Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> diff_theta_eta(const Eigen::Matrix& theta, const Eigen::Matrix& eta) { + typedef return_type_t scalar; T_eta eta_scalar = eta(0); Eigen::Matrix exp_neg_theta = exp(-theta); - - return - elt_divide(n_samples_ - sums_.cwiseProduct(exp_neg_theta), + Eigen::Matrix + diff_matrix(theta.size(), 1); + diff_matrix.col(0) + = - elt_divide(n_samples_ - sums_.cwiseProduct(exp_neg_theta), square(eta_scalar * exp_neg_theta + rep_vector(1, theta.size()))); + return diff_matrix; } template - Eigen::Matrix, Eigen::Dynamic, 1> + Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> diff2_theta_eta(const Eigen::Matrix& theta, const Eigen::Matrix& eta, const Eigen::Matrix& W_root) { + typedef return_type_t scalar; T_eta eta_scalar = eta(0); Eigen::Matrix exp_neg_theta = exp(-theta); Eigen::Matrix one_plus_eta_exp = rep_vector(1, theta.size()) + eta_scalar * exp_neg_theta; - return 0.5 * (W_root.cwiseInverse()).cwiseProduct( + Eigen::Matrix + diff_matrix(theta.size(), 1); + + diff_matrix.col(0) = 0.5 * (W_root.cwiseInverse()).cwiseProduct( elt_divide(exp_neg_theta.cwiseProduct( - eta_scalar * exp_neg_theta.cwiseProduct(sums_) + sums_ + 2 * eta_scalar * n_samples_), square(one_plus_eta_exp).cwiseProduct(one_plus_eta_exp))); + + return diff_matrix; } diff --git a/stan/math/laplace/laplace_marginal.hpp b/stan/math/laplace/laplace_marginal.hpp index 60b17039b67..3d08a2ad67a 100644 --- a/stan/math/laplace/laplace_marginal.hpp +++ b/stan/math/laplace/laplace_marginal.hpp @@ -26,8 +26,8 @@ namespace stan { namespace math { /** - * For a latent Gaussian model with global parameters phi, latent - * variables theta, and observations y, this function computes + * For a latent Gaussian model with hyperparameters phi and eta, + * latent variables theta, and observations y, this function computes * an approximation of the log marginal density, p(y | phi). * This is done by marginalizing out theta, using a Laplace * approxmation. The latter is obtained by finding the mode, @@ -52,7 +52,8 @@ namespace math { * an array of vectors. * @param[in] D structure to compute and differentiate the log likelihood. * @param[in] K structure to compute the covariance function. - * @param[in] phi the global parameter (input for the covariance function). + * @param[in] phi hyperparameter (input for the covariance function). + * @param[in] eta hyperparameter (input for likelihood). * @param[in] x fixed spatial data (input for the covariance function). * @param[in] delta additional fixed real data (input for covariance * function). @@ -76,7 +77,7 @@ namespace math { laplace_marginal_density (const D& diff_likelihood, const K& covariance_function, const Eigen::VectorXd& phi, - // const std::vector& x, + const Eigen::VectorXd& eta, const Tx& x, const std::vector& delta, const std::vector& delta_int, @@ -93,9 +94,8 @@ namespace math { using Eigen::MatrixXd; using Eigen::VectorXd; - int group_size = theta_0.size(); // CHECK -- do we ever need this? + int group_size = theta_0.size(); covariance = covariance_function(phi, x, delta, delta_int, msgs); - // CHECK -- should we compute the derivatives here too? theta = theta_0; double objective_old = - 1e+10; // CHECK -- what value to use? double objective_new; @@ -110,7 +110,7 @@ namespace math { // Compute variable a. VectorXd hessian; - diff_likelihood.diff(theta, l_grad, hessian); + diff_likelihood.diff(theta, eta, l_grad, hessian); VectorXd W = - hessian; W_root = sqrt(W); { @@ -129,7 +129,7 @@ namespace math { // Check for convergence. if (i != 0) objective_old = objective_new; objective_new = -0.5 * a.dot(theta) - + diff_likelihood.log_likelihood(theta); + + diff_likelihood.log_likelihood(theta, eta); double objective_diff = abs(objective_new - objective_old); if (objective_diff < tolerance) break; } @@ -150,7 +150,7 @@ namespace math { * threshold under which change is deemed small enough) and * maximum number of steps. * - * Wrapper for when the global parameter is passed as a double. + * Wrapper for when the hyperparameters passed as a double. * * @tparam T type of the initial guess. * @tparam D structure type for the likelihood object. @@ -171,13 +171,14 @@ namespace math { * @param[in] max_num_steps maximum number of steps for the Newton solver. * @return the log maginal density, p(y | phi). */ + // TODO: Operands and partials version of this. template double laplace_marginal_density (const D& diff_likelihood, const K& covariance_function, const Eigen::VectorXd& phi, + const Eigen::VectorXd& eta, const Tx& x, - // const std::vector& x, const std::vector& delta, const std::vector& delta_int, const Eigen::Matrix& theta_0, @@ -187,54 +188,13 @@ namespace math { Eigen::VectorXd theta, W_root, a, l_grad; Eigen::MatrixXd L, covariance; return laplace_marginal_density(diff_likelihood, covariance_function, - phi, x, delta, delta_int, + phi, eta, x, delta, delta_int, covariance, theta, W_root, L, a, l_grad, value_of(theta_0), msgs, tolerance, max_num_steps); } - // TO DO -- remove this code from final implementation. - /** - * A structure to compute sensitivities of the covariance - * function using forward mode autodiff. The functor is formatted - * so that it can be passed to Jacobian(). This requires one input - * vector and one output vector. - * - * TO DO: make this structure no templated. See comment by @SteveBronder. - * TO DO: remove this structure for final code: new differentiation - * algorithm does not require it. - */ - // template - // struct covariance_sensitivities { - // /* input data for the covariance function. */ - // std::vector x_; - // /* additional fixed real variable */ - // std::vector delta_; - // /* additional fixed integer variable */ - // std::vector delta_int_; - // /* structure to compute the covariance function. */ - // K covariance_function_; - // /* ostream for printing statements inside covariance function */ - // std::ostream* msgs_; - // - // covariance_sensitivities (const std::vector& x, - // const std::vector& delta, - // const std::vector& delta_int, - // const K& covariance_function, - // std::ostream* msgs) : - // // TO DO -- make covariance function the first argument - // x_(x), delta_(delta), delta_int_(delta_int), - // covariance_function_(covariance_function), msgs_(msgs) { } - // - // template - // Eigen::Matrix - // operator() (const Eigen::Matrix& phi) const { - // return to_vector(covariance_function_(phi, x_, delta_, - // delta_int_, msgs_)); - // } - // }; - /** * The vari class for the laplace marginal density. * The method is adapted from algorithm 5.1 in Rasmussen & Williams, @@ -249,23 +209,29 @@ namespace math { * instead of multiple large matrices. */ struct laplace_marginal_density_vari : public vari { - /* dimension of the global parameters. */ + /* dimension of hyperparameters. */ int phi_size_; - /* global parameters. */ + /* hyperparameters for covariance K. */ vari** phi_; + /* dimension of hyperparameters for likelihood. */ + int eta_size_; + /* hyperparameters for likelihood. */ + vari** eta_; /* the marginal density of the observation, conditional on the * globl parameters. */ vari** marginal_density_; /* An object to store the sensitivities of phi. */ Eigen::VectorXd phi_adj_; + /* An object to store the sensitivities of eta. */ + Eigen::VectorXd eta_adj_; template laplace_marginal_density_vari (const D& diff_likelihood, const K& covariance_function, const Eigen::Matrix& phi, + const Eigen::Matrix& eta, const Tx& x, - // const std::vector& x, const std::vector& delta, const std::vector& delta_int, double marginal_density, @@ -280,23 +246,25 @@ namespace math { phi_size_(phi.size()), phi_(ChainableStack::instance_->memalloc_.alloc_array( phi.size())), + eta_size_(eta.size()), + eta_(ChainableStack::instance_->memalloc_.alloc_array( + eta.size())), marginal_density_( ChainableStack::instance_->memalloc_.alloc_array(1)) { using Eigen::Matrix; using Eigen::Dynamic; + using Eigen::MatrixXd; + using Eigen::VectorXd; int theta_size = theta.size(); for (int i = 0; i < phi_size_; i++) phi_[i] = phi(i).vi_; + for (int i = 0; i < eta_size_; i++) eta_[i] = eta(i).vi_; // CHECK -- is there a cleaner way of doing this? marginal_density_[0] = this; marginal_density_[0] = new vari(marginal_density, false); - // compute derivatives of covariance matrix with respect to phi. - // EXPERIMENT: reverse-mode variation - // auto start = std::chrono::system_clock::now(); - Eigen::MatrixXd R; { Eigen::MatrixXd W_root_diag = W_root.asDiagonal(); @@ -310,10 +278,11 @@ namespace math { C = mdivide_left_tri(L, diag_pre_multiply(W_root, covariance)); + Eigen::VectorXd eta_dbl = value_of(eta); // CHECK -- should there be a minus sign here? Eigen::VectorXd s2 = 0.5 * (covariance.diagonal() - (C.transpose() * C).diagonal()) - .cwiseProduct(diff_likelihood.third_diff(theta)); + .cwiseProduct(diff_likelihood.third_diff(theta, eta_dbl)); phi_adj_ = Eigen::VectorXd(phi_size_); start_nested(); @@ -327,14 +296,30 @@ namespace math { set_zero_all_adjoints_nested(); grad(Z.vi_); - for (int j = 0; j < phi_size_; j++) - phi_adj_[j] = phi_v(j).adj(); + for (int j = 0; j < phi_size_; j++) phi_adj_[j] = phi_v(j).adj(); + } catch (const std::exception& e) { recover_memory_nested(); throw; } recover_memory_nested(); + if (eta_size_ != 0) { + VectorXd diff_eta = diff_likelihood.diff_eta(theta, eta_dbl); + MatrixXd diff_theta_eta = diff_likelihood.diff_theta_eta(theta, eta_dbl); + MatrixXd diff2_theta_eta + = diff_likelihood.diff2_theta_eta(theta, eta_dbl); + for (int l = 0; l < eta_size_; l++) { + VectorXd b = diff_theta_eta.col(l); + // CHECK -- can we use the fact the covariance matrix is symmetric? + VectorXd s3 = b - covariance * (R * b); + + eta_adj_(l) = diff_eta(l) - (W_root.cwiseInverse().asDiagonal() + * (R * (covariance * diff2_theta_eta.col(l)))).trace() + - s2.dot(s3); + } + } + // auto end = std::chrono::system_clock::now(); // std::chrono::duration time = end - ; // std::cout << "diffentiation time: " << time.count() << std::endl; @@ -372,6 +357,9 @@ namespace math { void chain() { for (int j = 0; j < phi_size_; j++) phi_[j]->adj_ += marginal_density_[0]->adj_ * phi_adj_[j]; + + for (int l = 0; l < eta_size_; l++) + eta_[l]->adj_ += marginal_density_[0]->adj_ * eta_adj_[l]; } }; diff --git a/test/unit/math/laplace/laplace_marginal_neg_binomial_2_log_test.cpp b/test/unit/math/laplace/laplace_marginal_neg_binomial_2_log_test.cpp index ebcdd9f7674..b93e81d7b35 100755 --- a/test/unit/math/laplace/laplace_marginal_neg_binomial_2_log_test.cpp +++ b/test/unit/math/laplace/laplace_marginal_neg_binomial_2_log_test.cpp @@ -1,5 +1,6 @@ #include #include +#include #include #include @@ -91,7 +92,7 @@ TEST(laplace, likelihood_differentiation) { EXPECT_FLOAT_EQ(finite_gradient_eta, diff_eta(0)); - Eigen::VectorXd diff_theta_eta = diff_functor.diff_theta_eta(theta, eta); + Eigen::MatrixXd diff_theta_eta = diff_functor.diff_theta_eta(theta, eta); Eigen::VectorXd gradient_theta_l, gradient_theta_u, @@ -103,17 +104,52 @@ TEST(laplace, likelihood_differentiation) { Eigen::VectorXd finite_gradient_theta_eta = (gradient_theta_u - gradient_theta_l) / (2 * epsilon); - EXPECT_FLOAT_EQ(finite_gradient_theta_eta(0), diff_theta_eta(0)); - EXPECT_FLOAT_EQ(finite_gradient_theta_eta(1), diff_theta_eta(1)); + EXPECT_FLOAT_EQ(finite_gradient_theta_eta(0), diff_theta_eta(0, 0)); + EXPECT_FLOAT_EQ(finite_gradient_theta_eta(1), diff_theta_eta(1, 0)); Eigen::VectorXd W_root = (-hessian).cwiseSqrt(); - Eigen::VectorXd diff2_theta_eta + Eigen::MatrixXd diff2_theta_eta = diff_functor.diff2_theta_eta(theta, eta, W_root); Eigen::VectorXd finite_hessian_theta_eta = ((-hessian_theta_u).cwiseSqrt() - (-hessian_theta_l).cwiseSqrt()) / (2 * epsilon); - EXPECT_FLOAT_EQ(finite_hessian_theta_eta(0), diff2_theta_eta(0)); - EXPECT_FLOAT_EQ(finite_hessian_theta_eta(1), diff2_theta_eta(1)); + EXPECT_FLOAT_EQ(finite_hessian_theta_eta(0), diff2_theta_eta(0, 0)); + EXPECT_FLOAT_EQ(finite_hessian_theta_eta(1), diff2_theta_eta(1, 0)); +} + +TEST(laplace, neg_binomial_2_log_dbl) { + using stan::math::to_vector; + using stan::math::diff_neg_binomial_2_log; + using stan::math::sqr_exp_kernel_functor; + using stan::math::laplace_marginal_density; + + int dim_phi = 2, dim_eta = 1, dim_theta = 2; + Eigen::VectorXd phi(dim_phi), eta(dim_eta), theta_0(dim_theta); + phi << 1.6, 0.45; + eta << 1; + theta_0 << 0, 0; + + std::vector x(dim_theta); + Eigen::VectorXd x_0(2), x_1(2); + x_0 << 0.05100797, 0.16086164; + x_1 << -0.59823393, 0.98701425; + x[0] = x_0; + x[1] = x_1; + + std::vector delta; + std::vector delta_int; + std::vector y_index = {1, 1}; + Eigen::VectorXd y = to_vector({1, 0}); + + diff_neg_binomial_2_log diff_functor(y, y_index, dim_theta); + stan::math::sqr_exp_kernel_functor K; + + double log_p = laplace_marginal_density(diff_functor, K, phi, eta, x, delta, + delta_int, theta_0); + + // TODO: add test. + + } diff --git a/test/unit/math/laplace/laplace_marginal_poisson_log_test.cpp b/test/unit/math/laplace/laplace_marginal_poisson_log_test.cpp index b4f20157d0a..fceddce4ccd 100644 --- a/test/unit/math/laplace/laplace_marginal_poisson_log_test.cpp +++ b/test/unit/math/laplace/laplace_marginal_poisson_log_test.cpp @@ -96,8 +96,9 @@ TEST(laplace, poisson_lgm_dim2) { std::vector sums = {1, 0}; squared_kernel_functor K; - var target = laplace_marginal_poisson_log_lpmf(sums, n_samples, K, phi, x, delta, - delta_int, theta_0); + var target + = laplace_marginal_poisson_log_lpmf(sums, n_samples, K, phi, x, delta, + delta_int, theta_0); // Test with exposure argument Eigen::VectorXd ye(2); From 098df42c0a5c7ad58c5900437f4a16b45f14261b Mon Sep 17 00:00:00 2001 From: Charles Margossian Date: Wed, 27 Jan 2021 17:26:10 -0500 Subject: [PATCH 17/53] progress towards marginal diff. --- stan/math/laplace/laplace_likelihood.hpp | 17 +++-- stan/math/laplace/laplace_marginal.hpp | 52 +++++++++----- ...place_marginal_neg_binomial_2_log_test.cpp | 68 +++++++++++++++++-- 3 files changed, 105 insertions(+), 32 deletions(-) diff --git a/stan/math/laplace/laplace_likelihood.hpp b/stan/math/laplace/laplace_likelihood.hpp index bc34ea60d2f..efd43a24730 100644 --- a/stan/math/laplace/laplace_likelihood.hpp +++ b/stan/math/laplace/laplace_likelihood.hpp @@ -240,7 +240,7 @@ struct diff_neg_binomial_2_log { template Eigen::Matrix, Eigen::Dynamic, 1> third_diff(const Eigen::Matrix& theta, - const Eigen::Matrix& eta) { + const Eigen::Matrix& eta) const { typedef return_type_t scalar; Eigen::Matrix exp_theta = exp(theta); T_eta eta_scalar = eta(0); @@ -258,7 +258,7 @@ struct diff_neg_binomial_2_log { template Eigen::Matrix, Eigen::Dynamic, 1> diff_eta(const Eigen::Matrix& theta, - const Eigen::Matrix& eta) { + const Eigen::Matrix& eta) const { typedef return_type_t scalar; T_eta eta_scalar = eta(0); Eigen::Matrix @@ -284,7 +284,7 @@ struct diff_neg_binomial_2_log { template Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> diff_theta_eta(const Eigen::Matrix& theta, - const Eigen::Matrix& eta) { + const Eigen::Matrix& eta) const { typedef return_type_t scalar; T_eta eta_scalar = eta(0); Eigen::Matrix exp_neg_theta = exp(-theta); @@ -296,11 +296,14 @@ struct diff_neg_binomial_2_log { return diff_matrix; } + // TODO: Address special case where we have an empty group (induces zero + // elements in W). template Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> diff2_theta_eta(const Eigen::Matrix& theta, const Eigen::Matrix& eta, - const Eigen::Matrix& W_root) { + const Eigen::Matrix& W_root) + const { typedef return_type_t scalar; T_eta eta_scalar = eta(0); Eigen::Matrix exp_neg_theta = exp(-theta); @@ -310,11 +313,11 @@ struct diff_neg_binomial_2_log { Eigen::Matrix diff_matrix(theta.size(), 1); - diff_matrix.col(0) = 0.5 * (W_root.cwiseInverse()).cwiseProduct( - elt_divide(exp_neg_theta.cwiseProduct( + diff_matrix.col(0) = // 0.5 * (W_root.cwiseInverse()).cwiseProduct( + - elt_divide(exp_neg_theta.cwiseProduct( - eta_scalar * exp_neg_theta.cwiseProduct(sums_) + sums_ + 2 * eta_scalar * n_samples_), - square(one_plus_eta_exp).cwiseProduct(one_plus_eta_exp))); + square(one_plus_eta_exp).cwiseProduct(one_plus_eta_exp)); // ); return diff_matrix; } diff --git a/stan/math/laplace/laplace_marginal.hpp b/stan/math/laplace/laplace_marginal.hpp index 3d08a2ad67a..c9389ab6400 100644 --- a/stan/math/laplace/laplace_marginal.hpp +++ b/stan/math/laplace/laplace_marginal.hpp @@ -304,21 +304,35 @@ namespace math { } recover_memory_nested(); - if (eta_size_ != 0) { - VectorXd diff_eta = diff_likelihood.diff_eta(theta, eta_dbl); - MatrixXd diff_theta_eta = diff_likelihood.diff_theta_eta(theta, eta_dbl); - MatrixXd diff2_theta_eta - = diff_likelihood.diff2_theta_eta(theta, eta_dbl); - for (int l = 0; l < eta_size_; l++) { - VectorXd b = diff_theta_eta.col(l); - // CHECK -- can we use the fact the covariance matrix is symmetric? - VectorXd s3 = b - covariance * (R * b); - - eta_adj_(l) = diff_eta(l) - (W_root.cwiseInverse().asDiagonal() - * (R * (covariance * diff2_theta_eta.col(l)))).trace() - - s2.dot(s3); - } + eta_adj_ = Eigen::VectorXd(eta_size_); + if (eta_size_ != 0) { + VectorXd diff_eta = diff_likelihood.diff_eta(theta, eta_dbl); + MatrixXd diff_theta_eta = diff_likelihood.diff_theta_eta(theta, eta_dbl); + MatrixXd diff2_theta_eta + = diff_likelihood.diff2_theta_eta(theta, eta_dbl, W_root); + + for (int l = 0; l < eta_size_; l++) { + VectorXd b = covariance * diff_theta_eta.col(l); + // CHECK -- can we use the fact the covariance matrix is symmetric? + VectorXd s3 = b - covariance * (R * b); + + std::cout << diff_eta(l) << std::endl + << - 0.5 * (L.transpose().triangularView() + .solve(L.triangularView() + .solve(- covariance * diff2_theta_eta.col(l)))).trace() << std::endl + << - s2.dot(s3) << std::endl; + + eta_adj_(l) = diff_eta(l) + - 0.5 * (L.transpose().triangularView() + .solve(L.triangularView() + .solve(- covariance * diff2_theta_eta.col(l)))).trace() + // - (mdivide_left_tri(L.transpose(), + // C * diff2_theta_eta.col(l))).trace() + // - (W_root.cwiseInverse().asDiagonal() + // * (R * (covariance * diff2_theta_eta.col(l)))).trace() + - s2.dot(s3); } + } // auto end = std::chrono::system_clock::now(); // std::chrono::duration time = end - ; @@ -395,11 +409,13 @@ namespace math { * @param[in] max_num_steps maximum number of steps for the Newton solver. * @return the log maginal density, p(y | phi). */ - template + template T1 laplace_marginal_density (const D& diff_likelihood, const K& covariance_function, const Eigen::Matrix& phi, + const Eigen::Matrix& eta, const Tx& x, const std::vector& delta, const std::vector& delta_int, @@ -418,8 +434,8 @@ namespace math { marginal_density_dbl = laplace_marginal_density(diff_likelihood, covariance_function, - value_of(phi), x, delta, delta_int, - covariance, + value_of(phi), value_of(eta), + x, delta, delta_int, covariance, theta, W_root, L, a, l_grad, value_of(theta_0), msgs, @@ -437,7 +453,7 @@ namespace math { laplace_marginal_density_vari* vi0 = new laplace_marginal_density_vari(diff_likelihood, covariance_function, - phi, x, delta, delta_int, + phi, eta, x, delta, delta_int, marginal_density_dbl, covariance, theta, W_root, L, a, l_grad, diff --git a/test/unit/math/laplace/laplace_marginal_neg_binomial_2_log_test.cpp b/test/unit/math/laplace/laplace_marginal_neg_binomial_2_log_test.cpp index b93e81d7b35..8623de52e07 100755 --- a/test/unit/math/laplace/laplace_marginal_neg_binomial_2_log_test.cpp +++ b/test/unit/math/laplace/laplace_marginal_neg_binomial_2_log_test.cpp @@ -112,8 +112,7 @@ TEST(laplace, likelihood_differentiation) { = diff_functor.diff2_theta_eta(theta, eta, W_root); Eigen::VectorXd finite_hessian_theta_eta - = ((-hessian_theta_u).cwiseSqrt() - (-hessian_theta_l).cwiseSqrt()) - / (2 * epsilon); + = (hessian_theta_u - hessian_theta_l) / (2 * epsilon); EXPECT_FLOAT_EQ(finite_hessian_theta_eta(0), diff2_theta_eta(0, 0)); EXPECT_FLOAT_EQ(finite_hessian_theta_eta(1), diff2_theta_eta(1, 0)); @@ -124,6 +123,8 @@ TEST(laplace, neg_binomial_2_log_dbl) { using stan::math::diff_neg_binomial_2_log; using stan::math::sqr_exp_kernel_functor; using stan::math::laplace_marginal_density; + using stan::math::var; + using stan::math::value_of; int dim_phi = 2, dim_eta = 1, dim_theta = 2; Eigen::VectorXd phi(dim_phi), eta(dim_eta), theta_0(dim_theta); @@ -140,8 +141,8 @@ TEST(laplace, neg_binomial_2_log_dbl) { std::vector delta; std::vector delta_int; - std::vector y_index = {1, 1}; - Eigen::VectorXd y = to_vector({1, 0}); + std::vector y_index = {0, 1}; + Eigen::VectorXd y = to_vector({1, 6}); diff_neg_binomial_2_log diff_functor(y, y_index, dim_theta); stan::math::sqr_exp_kernel_functor K; @@ -149,7 +150,60 @@ TEST(laplace, neg_binomial_2_log_dbl) { double log_p = laplace_marginal_density(diff_functor, K, phi, eta, x, delta, delta_int, theta_0); - // TODO: add test. - - + Eigen::Matrix phi_v = phi, eta_v = eta; + + var target + = laplace_marginal_density(diff_functor, K, phi_v, eta_v, x, delta, + delta_int, theta_0); + + VEC g; + AVEC parm_vec = createAVEC(phi_v(0), phi_v(1), eta_v(0)); + target.grad(parm_vec, g); + + for (size_t i = 0; i < g.size(); i++) std::cout << g[i] << " "; + std::cout << std::endl; + + // finite diff test + double diff = 1e-10; + Eigen::VectorXd phi_dbl = value_of(phi), eta_dbl = value_of(eta); + Eigen::VectorXd phi_1l = phi_dbl, phi_1u = phi_dbl, + phi_2l = phi_dbl, phi_2u = phi_dbl, eta_l = eta_dbl, eta_u = eta_dbl; + phi_1l(0) -= diff; + phi_1u(0) += diff; + phi_2l(1) -= diff; + phi_2u(1) += diff; + eta_l(0) -= diff; + eta_u(0) += diff; + + double target_phi_1u = laplace_marginal_density(diff_functor, K, phi_1u, + eta_dbl, x, delta, + delta_int, theta_0), + target_phi_1l = laplace_marginal_density(diff_functor, K, phi_1l, + eta_dbl, x, delta, + delta_int, theta_0), + target_phi_2u = laplace_marginal_density(diff_functor, K, phi_2u, + eta_dbl, x, delta, + delta_int, theta_0), + target_phi_2l = laplace_marginal_density(diff_functor, K, phi_2l, + eta_dbl, x, delta, + delta_int, theta_0), + target_eta_u = laplace_marginal_density(diff_functor, K, phi_dbl, + eta_u, x, delta, + delta_int, theta_0), + target_eta_l = laplace_marginal_density(diff_functor, K, phi_1u, + eta_l, x, delta, + delta_int, theta_0); + + VEC g_finite(dim_phi + dim_eta); + g_finite[0] = (target_phi_1u - target_phi_1l) / (2 * diff); + g_finite[1] = (target_phi_2u - target_phi_2l) / (2 * diff); + g_finite[2] = (target_eta_u - target_eta_l) / (2 * diff); + + for (int i = 0; i < 3; i++) std::cout << g_finite[i] << " "; + std::cout << std::endl; + + // double tol = 1e-4; + // EXPECT_NEAR(g_finite[0], g[0], tol); + // EXPECT_NEAR(g_finite[1], g[1], tol); + // EXPECT_NEAR(g_finite[2], g[2], tol); } From 4bf51ee7c474f42298dff4d940dc8ce2dddaa6db Mon Sep 17 00:00:00 2001 From: Charles Margossian Date: Fri, 29 Jan 2021 19:48:00 -0500 Subject: [PATCH 18/53] more unit tests. --- stan/math/laplace/laplace_marginal.hpp | 31 ++-- ...place_marginal_neg_binomial_2_log_test.cpp | 135 +++++++++++++++++- 2 files changed, 152 insertions(+), 14 deletions(-) diff --git a/stan/math/laplace/laplace_marginal.hpp b/stan/math/laplace/laplace_marginal.hpp index c9389ab6400..eb1ca6d97f0 100644 --- a/stan/math/laplace/laplace_marginal.hpp +++ b/stan/math/laplace/laplace_marginal.hpp @@ -314,23 +314,28 @@ namespace math { for (int l = 0; l < eta_size_; l++) { VectorXd b = covariance * diff_theta_eta.col(l); // CHECK -- can we use the fact the covariance matrix is symmetric? - VectorXd s3 = b - covariance * (R * b); + VectorXd s3(2); // = b - covariance * (R * b); + s3 << -0.00336244, 0.252416; - std::cout << diff_eta(l) << std::endl - << - 0.5 * (L.transpose().triangularView() - .solve(L.triangularView() - .solve(- covariance * diff2_theta_eta.col(l)))).trace() << std::endl - << - s2.dot(s3) << std::endl; + std::cout << "s3: " << (b - covariance * (R * b)).transpose() << std::endl; + std::cout << "s2: " << s2.transpose() << std::endl; + std::cout << "diff_theta_eta: " << diff_eta(l) << std::endl; + + std::cout << "t1: " << diff_eta(l) << std::endl + << "t2: " << 0.5 * (L.transpose().triangularView() + .solve(L.triangularView() + .solve(W_root.asDiagonal() * covariance * elt_divide( + diff2_theta_eta.col(l), W_root).asDiagonal() + ))).trace() << std::endl + << "t3: " << s2.dot(s3) << std::endl; eta_adj_(l) = diff_eta(l) - - 0.5 * (L.transpose().triangularView() + + 0.5 * (L.transpose().triangularView() .solve(L.triangularView() - .solve(- covariance * diff2_theta_eta.col(l)))).trace() - // - (mdivide_left_tri(L.transpose(), - // C * diff2_theta_eta.col(l))).trace() - // - (W_root.cwiseInverse().asDiagonal() - // * (R * (covariance * diff2_theta_eta.col(l)))).trace() - - s2.dot(s3); + .solve(W_root.asDiagonal() * covariance * elt_divide( + diff2_theta_eta.col(l), W_root).asDiagonal() + ))).trace() + + s2.dot(s3); } } diff --git a/test/unit/math/laplace/laplace_marginal_neg_binomial_2_log_test.cpp b/test/unit/math/laplace/laplace_marginal_neg_binomial_2_log_test.cpp index 8623de52e07..9f8089a4405 100755 --- a/test/unit/math/laplace/laplace_marginal_neg_binomial_2_log_test.cpp +++ b/test/unit/math/laplace/laplace_marginal_neg_binomial_2_log_test.cpp @@ -118,6 +118,21 @@ TEST(laplace, likelihood_differentiation) { EXPECT_FLOAT_EQ(finite_hessian_theta_eta(1), diff2_theta_eta(1, 0)); } +// unit tests for derivatives of B +template +Eigen::MatrixXd compute_B(const Eigen::VectorXd& theta, + const Eigen::VectorXd& eta, + const Eigen::MatrixXd& covariance, + T diff_functor) { + int group_size = theta.size(); + Eigen::VectorXd l_grad, hessian; + diff_functor.diff(theta, eta, l_grad, hessian); + Eigen::VectorXd W_root = (- hessian).cwiseSqrt(); + + return Eigen::MatrixXd::Identity(group_size, group_size) + + stan::math::quad_form_diag(covariance, W_root); +} + TEST(laplace, neg_binomial_2_log_dbl) { using stan::math::to_vector; using stan::math::diff_neg_binomial_2_log; @@ -160,11 +175,12 @@ TEST(laplace, neg_binomial_2_log_dbl) { AVEC parm_vec = createAVEC(phi_v(0), phi_v(1), eta_v(0)); target.grad(parm_vec, g); + std::cout << "autodiff: "; for (size_t i = 0; i < g.size(); i++) std::cout << g[i] << " "; std::cout << std::endl; // finite diff test - double diff = 1e-10; + double diff = 1e-7; Eigen::VectorXd phi_dbl = value_of(phi), eta_dbl = value_of(eta); Eigen::VectorXd phi_1l = phi_dbl, phi_1u = phi_dbl, phi_2l = phi_dbl, phi_2u = phi_dbl, eta_l = eta_dbl, eta_u = eta_dbl; @@ -199,9 +215,126 @@ TEST(laplace, neg_binomial_2_log_dbl) { g_finite[1] = (target_phi_2u - target_phi_2l) / (2 * diff); g_finite[2] = (target_eta_u - target_eta_l) / (2 * diff); + std::cout << "Finite: "; for (int i = 0; i < 3; i++) std::cout << g_finite[i] << " "; std::cout << std::endl; + // Save relevant variables for more detailed unit tests. + Eigen::MatrixXd covariance, L; + Eigen::VectorXd theta, a, theta_u, theta_l, W_root, l_grad, hessian; + target = laplace_marginal_density(diff_functor, K, phi, eta_dbl, x, + delta, delta_int, covariance, theta, W_root, L, a, l_grad, theta_0); + + Eigen::MatrixXd B = compute_B(theta, eta_dbl, covariance, diff_functor), + B_l = compute_B(theta, eta_l, covariance, diff_functor), + B_u = compute_B(theta, eta_u, covariance, diff_functor); + + std::cout << std::endl << "B finite diff tests" << std::endl; + std::cout << "log|B|: " << log(B.determinant()) << std::endl; + std::cout << "finite diff: " + << (log(B_u.determinant()) - log(B_l.determinant())) / (2 * diff) + << std::endl; + + diff_functor.diff(theta, eta_dbl, l_grad, hessian); + W_root = (-hessian).cwiseSqrt(); + + Eigen::VectorXd hessian_l, hessian_u; + diff_functor.diff(theta, eta_l, l_grad, hessian_l); + diff_functor.diff(theta, eta_u, l_grad, hessian_u); + + // L = stan::math::cholesky_decompose(B); + + // std::cout << "candiate 1: " << + // (B.inverse() * (-covariance * diff_functor.diff2_theta_eta(theta_0, eta_dbl, W_root) + // )).trace() << std::endl; + + Eigen::VectorXd W_finite_diff + = (hessian_u - hessian_l) / (2 * diff); + + Eigen::VectorXd W_root_finite_diff + = ((-hessian_u).cwiseSqrt() - (-hessian_l).cwiseSqrt()) / (2 * diff); + + Eigen::VectorXd W_root_diff = - 0.5 * stan::math::elt_divide( + diff_functor.diff2_theta_eta(theta, eta_dbl, W_root), W_root); + + std::cout << "candiate 2: " << + 2 * (B.inverse() * (W_root.asDiagonal() * covariance * W_root_diff.asDiagonal()) + ).trace() << std::endl; + + double diff_log_B = + - (L.transpose().triangularView() + .solve(L.triangularView() + .solve(W_root.asDiagonal() * covariance + * stan::math::elt_divide(diff_functor. + diff2_theta_eta(theta, eta_dbl, W_root), W_root).asDiagonal()))).trace(); + + std::cout << "candiate 3: " << diff_log_B << std::endl; + std::cout << "full log B term: " << - 0.5 * diff_log_B << std::endl; + + std::cout << std::endl << "W_root: " << W_root.transpose() << std::endl; + + std::cout << "W_root finite diff: " << W_root_finite_diff.transpose() << std::endl; + std::cout << "W_root diff: " << W_root_diff.transpose() << std::endl; + + std::cout << std::endl << "diff2 finite: " << W_finite_diff.transpose() << std::endl; + std::cout << std::endl << "diff: " << + diff_functor.diff2_theta_eta(theta, eta_dbl, W_root).transpose() << std::endl; + + std::cout << std::endl << "Differentiation of theta star." << std::endl; + + + Eigen::VectorXd b = covariance * diff_functor.diff_theta_eta(theta, eta_dbl); + Eigen::VectorXd s3 = (Eigen::MatrixXd::Identity(theta.size(), theta.size()) + + covariance * stan::math::square(W_root).asDiagonal()).inverse() * b; + + Eigen::VectorXd theta_star = theta; + + target = laplace_marginal_density(diff_functor, K, phi, eta_u, x, + delta, delta_int, covariance, theta_u, W_root, L, a, l_grad, theta_0); + target = laplace_marginal_density(diff_functor, K, phi, eta_l, x, + delta, delta_int, covariance, theta_l, W_root, L, a, l_grad, theta_0); + + std::cout << "theta: " << theta.transpose() << std::endl; + std::cout << "theta finite diff: " + << ((theta_u - theta_l) / (2 * diff)).transpose() + << std::endl; + + std::cout << "theta analytical diff: " << s3.transpose() << std::endl; + + std::cout << std::endl << "Computation of s2: " << std::endl; + + // Reset the variables to their unperturbed states. + target = laplace_marginal_density(diff_functor, K, phi, eta_dbl, x, + delta, delta_int, covariance, theta, W_root, L, a, l_grad, theta_0); + + Eigen::VectorXd theta_1l = theta_star, theta_1u = theta_star, + theta_2l = theta_star, theta_2u = theta_star; + theta_1l(0) -= diff; + theta_1u(0) += diff; + theta_2l(1) -= diff; + theta_2u(1) += diff; + + B = compute_B(theta_star, eta, covariance, diff_functor); + + double + log_B_1l = log(compute_B(theta_1l, eta_dbl, covariance, diff_functor).determinant()), + log_B_1u = log(compute_B(theta_1u, eta_dbl, covariance, diff_functor).determinant()), + log_B_2l = log(compute_B(theta_2l, eta_dbl, covariance, diff_functor).determinant()), + log_B_2u = log(compute_B(theta_2u, eta_dbl, covariance, diff_functor).determinant()); + + Eigen::VectorXd s2_finite(2); + s2_finite(0) = - 0.5 * (log_B_1u - log_B_1l) / (2 * diff); + s2_finite(1) = - 0.5 * (log_B_2u - log_B_2l) / (2 * diff); + + std::cout << "s2_finite: " << s2_finite.transpose() << std::endl; + + + std::cout << "log diff finite: " << + (diff_functor.log_likelihood(theta_star, eta_u) + - diff_functor.log_likelihood(theta_star, eta_l)) / (2 * diff) + << std::endl; + + // double tol = 1e-4; // EXPECT_NEAR(g_finite[0], g[0], tol); // EXPECT_NEAR(g_finite[1], g[1], tol); From 952858890499bec94ef9d423cdd386658912f0a0 Mon Sep 17 00:00:00 2001 From: Charles Margossian Date: Sat, 30 Jan 2021 16:11:34 -0500 Subject: [PATCH 19/53] Fix finite diff benchmark. --- .../math/laplace/laplace_marginal_neg_binomial_2_log_test.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/unit/math/laplace/laplace_marginal_neg_binomial_2_log_test.cpp b/test/unit/math/laplace/laplace_marginal_neg_binomial_2_log_test.cpp index 9f8089a4405..c9d257eb801 100755 --- a/test/unit/math/laplace/laplace_marginal_neg_binomial_2_log_test.cpp +++ b/test/unit/math/laplace/laplace_marginal_neg_binomial_2_log_test.cpp @@ -206,7 +206,7 @@ TEST(laplace, neg_binomial_2_log_dbl) { target_eta_u = laplace_marginal_density(diff_functor, K, phi_dbl, eta_u, x, delta, delta_int, theta_0), - target_eta_l = laplace_marginal_density(diff_functor, K, phi_1u, + target_eta_l = laplace_marginal_density(diff_functor, K, phi_dbl, eta_l, x, delta, delta_int, theta_0); From 8bb9c78ac0b847937f2ed3156e35857f57fbb089 Mon Sep 17 00:00:00 2001 From: Charles Margossian Date: Sat, 30 Jan 2021 18:31:09 -0500 Subject: [PATCH 20/53] Create wrapper for neg binomial likelihood. --- stan/math/laplace/laplace_marginal.hpp | 29 ++-- .../laplace/laplace_marginal_poisson_log.hpp | 12 +- ...place_marginal_neg_binomial_2_log_test.cpp | 142 ++---------------- 3 files changed, 26 insertions(+), 157 deletions(-) diff --git a/stan/math/laplace/laplace_marginal.hpp b/stan/math/laplace/laplace_marginal.hpp index eb1ca6d97f0..b9cccb1b8fa 100644 --- a/stan/math/laplace/laplace_marginal.hpp +++ b/stan/math/laplace/laplace_marginal.hpp @@ -311,30 +311,21 @@ namespace math { MatrixXd diff2_theta_eta = diff_likelihood.diff2_theta_eta(theta, eta_dbl, W_root); + VectorXd W_root_inv = W_root.cwiseInverse(); + for (int l = 0; l < eta_size_; l++) { VectorXd b = covariance * diff_theta_eta.col(l); // CHECK -- can we use the fact the covariance matrix is symmetric? - VectorXd s3(2); // = b - covariance * (R * b); - s3 << -0.00336244, 0.252416; - - std::cout << "s3: " << (b - covariance * (R * b)).transpose() << std::endl; - std::cout << "s2: " << s2.transpose() << std::endl; - std::cout << "diff_theta_eta: " << diff_eta(l) << std::endl; - - std::cout << "t1: " << diff_eta(l) << std::endl - << "t2: " << 0.5 * (L.transpose().triangularView() - .solve(L.triangularView() - .solve(W_root.asDiagonal() * covariance * elt_divide( - diff2_theta_eta.col(l), W_root).asDiagonal() - ))).trace() << std::endl - << "t3: " << s2.dot(s3) << std::endl; + VectorXd s3 = b - covariance * (R * b); eta_adj_(l) = diff_eta(l) - + 0.5 * (L.transpose().triangularView() - .solve(L.triangularView() - .solve(W_root.asDiagonal() * covariance * elt_divide( - diff2_theta_eta.col(l), W_root).asDiagonal() - ))).trace() + + 0.5 * (W_root_inv.asDiagonal() * R * (covariance * + elt_divide(diff2_theta_eta.col(l), W_root).asDiagonal())).trace() + // + 0.5 * (L.transpose().triangularView() + // .solve(L.triangularView() + // .solve(W_root.asDiagonal() * covariance * elt_divide( + // diff2_theta_eta.col(l), W_root).asDiagonal() + // ))).trace() + s2.dot(s3); } } diff --git a/stan/math/laplace/laplace_marginal_poisson_log.hpp b/stan/math/laplace/laplace_marginal_poisson_log.hpp index 31aeb485b39..805f3d33630 100644 --- a/stan/math/laplace/laplace_marginal_poisson_log.hpp +++ b/stan/math/laplace/laplace_marginal_poisson_log.hpp @@ -1,17 +1,11 @@ -#ifndef STAN_MATH_LAPLACE_LAPLACE_MARGINAL_POISSON_HPP -#define STAN_MATH_LAPLACE_LAPLACE_MARGINAL_POISSON_HPP +#ifndef STAN_MATH_LAPLACE_LAPLACE_MARGINAL_POISSON_LOG_HPP +#define STAN_MATH_LAPLACE_LAPLACE_MARGINAL_POISSON_LOG_HPP #include #include namespace stan { namespace math { - // EXPERIMENTAL - // Use the squared exponential kernel, for the time defined - // in the laplace_likelihood folder. - // In the final version, the user will provide the covariance - // function. - /** * Wrapper function around the laplace_marginal function for * a log poisson likelihood. Returns the marginal density @@ -24,7 +18,7 @@ namespace math { * @param[in] y total counts per group. Second sufficient statistics. * @param[in] n_samples number of samples per group. First sufficient * statistics. - * NOTE: here we would have the covariance functor + * @param[in] covariance a function which returns the prior covariance. * @param[in] phi model parameters for the covariance functor. * @param[in] x data for the covariance functor. * @param[in] delta additional real data for the covariance functor. diff --git a/test/unit/math/laplace/laplace_marginal_neg_binomial_2_log_test.cpp b/test/unit/math/laplace/laplace_marginal_neg_binomial_2_log_test.cpp index c9d257eb801..d0df2a19692 100755 --- a/test/unit/math/laplace/laplace_marginal_neg_binomial_2_log_test.cpp +++ b/test/unit/math/laplace/laplace_marginal_neg_binomial_2_log_test.cpp @@ -1,6 +1,7 @@ #include #include #include +#include #include #include @@ -138,6 +139,7 @@ TEST(laplace, neg_binomial_2_log_dbl) { using stan::math::diff_neg_binomial_2_log; using stan::math::sqr_exp_kernel_functor; using stan::math::laplace_marginal_density; + using stan::math::laplace_marginal_neg_binomial_2_log_lpmf; using stan::math::var; using stan::math::value_of; @@ -157,7 +159,8 @@ TEST(laplace, neg_binomial_2_log_dbl) { std::vector delta; std::vector delta_int; std::vector y_index = {0, 1}; - Eigen::VectorXd y = to_vector({1, 6}); + std::vector y_obs = {1, 6}; + Eigen::VectorXd y = to_vector(y_obs); diff_neg_binomial_2_log diff_functor(y, y_index, dim_theta); stan::math::sqr_exp_kernel_functor K; @@ -175,11 +178,7 @@ TEST(laplace, neg_binomial_2_log_dbl) { AVEC parm_vec = createAVEC(phi_v(0), phi_v(1), eta_v(0)); target.grad(parm_vec, g); - std::cout << "autodiff: "; - for (size_t i = 0; i < g.size(); i++) std::cout << g[i] << " "; - std::cout << std::endl; - - // finite diff test + // finite diff benchmark double diff = 1e-7; Eigen::VectorXd phi_dbl = value_of(phi), eta_dbl = value_of(eta); Eigen::VectorXd phi_1l = phi_dbl, phi_1u = phi_dbl, @@ -215,128 +214,13 @@ TEST(laplace, neg_binomial_2_log_dbl) { g_finite[1] = (target_phi_2u - target_phi_2l) / (2 * diff); g_finite[2] = (target_eta_u - target_eta_l) / (2 * diff); - std::cout << "Finite: "; - for (int i = 0; i < 3; i++) std::cout << g_finite[i] << " "; - std::cout << std::endl; - - // Save relevant variables for more detailed unit tests. - Eigen::MatrixXd covariance, L; - Eigen::VectorXd theta, a, theta_u, theta_l, W_root, l_grad, hessian; - target = laplace_marginal_density(diff_functor, K, phi, eta_dbl, x, - delta, delta_int, covariance, theta, W_root, L, a, l_grad, theta_0); - - Eigen::MatrixXd B = compute_B(theta, eta_dbl, covariance, diff_functor), - B_l = compute_B(theta, eta_l, covariance, diff_functor), - B_u = compute_B(theta, eta_u, covariance, diff_functor); - - std::cout << std::endl << "B finite diff tests" << std::endl; - std::cout << "log|B|: " << log(B.determinant()) << std::endl; - std::cout << "finite diff: " - << (log(B_u.determinant()) - log(B_l.determinant())) / (2 * diff) - << std::endl; - - diff_functor.diff(theta, eta_dbl, l_grad, hessian); - W_root = (-hessian).cwiseSqrt(); - - Eigen::VectorXd hessian_l, hessian_u; - diff_functor.diff(theta, eta_l, l_grad, hessian_l); - diff_functor.diff(theta, eta_u, l_grad, hessian_u); - - // L = stan::math::cholesky_decompose(B); - - // std::cout << "candiate 1: " << - // (B.inverse() * (-covariance * diff_functor.diff2_theta_eta(theta_0, eta_dbl, W_root) - // )).trace() << std::endl; - - Eigen::VectorXd W_finite_diff - = (hessian_u - hessian_l) / (2 * diff); - - Eigen::VectorXd W_root_finite_diff - = ((-hessian_u).cwiseSqrt() - (-hessian_l).cwiseSqrt()) / (2 * diff); - - Eigen::VectorXd W_root_diff = - 0.5 * stan::math::elt_divide( - diff_functor.diff2_theta_eta(theta, eta_dbl, W_root), W_root); - - std::cout << "candiate 2: " << - 2 * (B.inverse() * (W_root.asDiagonal() * covariance * W_root_diff.asDiagonal()) - ).trace() << std::endl; - - double diff_log_B = - - (L.transpose().triangularView() - .solve(L.triangularView() - .solve(W_root.asDiagonal() * covariance - * stan::math::elt_divide(diff_functor. - diff2_theta_eta(theta, eta_dbl, W_root), W_root).asDiagonal()))).trace(); - - std::cout << "candiate 3: " << diff_log_B << std::endl; - std::cout << "full log B term: " << - 0.5 * diff_log_B << std::endl; - - std::cout << std::endl << "W_root: " << W_root.transpose() << std::endl; - - std::cout << "W_root finite diff: " << W_root_finite_diff.transpose() << std::endl; - std::cout << "W_root diff: " << W_root_diff.transpose() << std::endl; - - std::cout << std::endl << "diff2 finite: " << W_finite_diff.transpose() << std::endl; - std::cout << std::endl << "diff: " << - diff_functor.diff2_theta_eta(theta, eta_dbl, W_root).transpose() << std::endl; - - std::cout << std::endl << "Differentiation of theta star." << std::endl; - - - Eigen::VectorXd b = covariance * diff_functor.diff_theta_eta(theta, eta_dbl); - Eigen::VectorXd s3 = (Eigen::MatrixXd::Identity(theta.size(), theta.size()) - + covariance * stan::math::square(W_root).asDiagonal()).inverse() * b; - - Eigen::VectorXd theta_star = theta; - - target = laplace_marginal_density(diff_functor, K, phi, eta_u, x, - delta, delta_int, covariance, theta_u, W_root, L, a, l_grad, theta_0); - target = laplace_marginal_density(diff_functor, K, phi, eta_l, x, - delta, delta_int, covariance, theta_l, W_root, L, a, l_grad, theta_0); - - std::cout << "theta: " << theta.transpose() << std::endl; - std::cout << "theta finite diff: " - << ((theta_u - theta_l) / (2 * diff)).transpose() - << std::endl; - - std::cout << "theta analytical diff: " << s3.transpose() << std::endl; - - std::cout << std::endl << "Computation of s2: " << std::endl; - - // Reset the variables to their unperturbed states. - target = laplace_marginal_density(diff_functor, K, phi, eta_dbl, x, - delta, delta_int, covariance, theta, W_root, L, a, l_grad, theta_0); - - Eigen::VectorXd theta_1l = theta_star, theta_1u = theta_star, - theta_2l = theta_star, theta_2u = theta_star; - theta_1l(0) -= diff; - theta_1u(0) += diff; - theta_2l(1) -= diff; - theta_2u(1) += diff; - - B = compute_B(theta_star, eta, covariance, diff_functor); - - double - log_B_1l = log(compute_B(theta_1l, eta_dbl, covariance, diff_functor).determinant()), - log_B_1u = log(compute_B(theta_1u, eta_dbl, covariance, diff_functor).determinant()), - log_B_2l = log(compute_B(theta_2l, eta_dbl, covariance, diff_functor).determinant()), - log_B_2u = log(compute_B(theta_2u, eta_dbl, covariance, diff_functor).determinant()); - - Eigen::VectorXd s2_finite(2); - s2_finite(0) = - 0.5 * (log_B_1u - log_B_1l) / (2 * diff); - s2_finite(1) = - 0.5 * (log_B_2u - log_B_2l) / (2 * diff); - - std::cout << "s2_finite: " << s2_finite.transpose() << std::endl; - - - std::cout << "log diff finite: " << - (diff_functor.log_likelihood(theta_star, eta_u) - - diff_functor.log_likelihood(theta_star, eta_l)) / (2 * diff) - << std::endl; - + double tol = 4e-6; + EXPECT_NEAR(g_finite[0], g[0], tol); + EXPECT_NEAR(g_finite[1], g[1], tol); + EXPECT_NEAR(g_finite[2], g[2], tol); - // double tol = 1e-4; - // EXPECT_NEAR(g_finite[0], g[0], tol); - // EXPECT_NEAR(g_finite[1], g[1], tol); - // EXPECT_NEAR(g_finite[2], g[2], tol); + // Check wrapper. + EXPECT_EQ(target, + laplace_marginal_neg_binomial_2_log_lpmf(y_obs, y_index, K, phi, eta, x, + delta, delta_int, theta_0)); } From 4936428e463d453e00c4d32db8b0dc60382bff13 Mon Sep 17 00:00:00 2001 From: Charles Margossian Date: Sat, 30 Jan 2021 19:04:33 -0500 Subject: [PATCH 21/53] update poisson_log likelihood. --- stan/math/laplace/laplace_likelihood.hpp | 63 ++++++++++++++----- stan/math/laplace/laplace_marginal.hpp | 8 +-- .../laplace/laplace_marginal_poisson_log.hpp | 8 ++- .../laplace_marginal_poisson_log_test.cpp | 14 +++-- 4 files changed, 67 insertions(+), 26 deletions(-) diff --git a/stan/math/laplace/laplace_likelihood.hpp b/stan/math/laplace/laplace_likelihood.hpp index efd43a24730..6313d01ddcc 100644 --- a/stan/math/laplace/laplace_likelihood.hpp +++ b/stan/math/laplace/laplace_likelihood.hpp @@ -42,15 +42,17 @@ struct diff_poisson_log { * Return the log density. * @tparam T type of the log poisson parameter. * @param[in] theta log poisson parameters for each group. + * @param[in] eta_dummy additional parameters (use for other likelihoods). * @return the log density. */ - template - T log_likelihood (const Eigen::Matrix& theta) + template + T1 log_likelihood (const Eigen::Matrix& theta, + const Eigen::Matrix& eta_dummy) const { double factorial_term = 0; for (int i = 0; i < sums_.size(); i++) factorial_term += lgamma(sums_(i) + 1); - Eigen::Matrix shifted_mean = theta + log_exposure_; + Eigen::Matrix shifted_mean = theta + log_exposure_; return - factorial_term + (shifted_mean).dot(sums_) - n_samples_.dot(exp(shifted_mean)); @@ -64,14 +66,16 @@ struct diff_poisson_log { * approximation, and to avoid redundant computation. * @tparam T type of the log poisson parameter. * @param[in] theta log poisson parameters for each group. + * @param[in] eta_dummy additional parameters (use for other likelihoods). * @param[in, out] gradient * @param[in, out] hessian diagonal, so stored in a vector. */ - template - void diff (const Eigen::Matrix& theta, - Eigen::Matrix& gradient, - Eigen::Matrix& hessian) const { - Eigen::Matrix + template + void diff (const Eigen::Matrix& theta, + const Eigen::Matrix& eta_dummy, + Eigen::Matrix& gradient, + Eigen::Matrix& hessian) const { + Eigen::Matrix common_term = n_samples_.cwiseProduct(exp(theta + log_exposure_)); gradient = sums_ - common_term; @@ -83,14 +87,46 @@ struct diff_poisson_log { * the object is stored in a vector. * @tparam T type of the log poisson parameter. * @param[in] theta log poisson parameters for each group. + * @param[in] eta_dummy additional parameters (use for other likelihoods). * @return A vector containing the non-zero elements of the third * derivative tensor. */ - template - Eigen::Matrix - third_diff(const Eigen::Matrix& theta) const { + template + Eigen::Matrix + third_diff(const Eigen::Matrix& theta, + const Eigen::Matrix& eta_dummy) const { return -n_samples_.cwiseProduct(exp(theta + log_exposure_)); } + + template + Eigen::Matrix, Eigen::Dynamic, 1> + diff_eta(const Eigen::Matrix& theta, + const Eigen::Matrix& eta) const { + std::cout << "THIS FUNCTIONS SHOULD NEVER GET CALLED!" << std::endl; + Eigen::Matrix, Eigen::Dynamic, 1> void_matrix; + return void_matrix; + } + + template + Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> + diff_theta_eta(const Eigen::Matrix& theta, + const Eigen::Matrix& eta) const { + std::cout << "THIS FUNCTIONS SHOULD NEVER GET CALLED!" << std::endl; + Eigen::Matrix, Eigen::Dynamic, + Eigen::Dynamic> void_matrix; + return void_matrix; + } + + template + Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> + diff2_theta_eta(const Eigen::Matrix& theta, + const Eigen::Matrix& eta) + const { + std::cout << "THIS FUNCTIONS SHOULD NEVER GET CALLED!" << std::endl; + Eigen::Matrix, Eigen::Dynamic, + Eigen::Dynamic> void_matrix; + return void_matrix; + } }; /** @@ -301,8 +337,7 @@ struct diff_neg_binomial_2_log { template Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> diff2_theta_eta(const Eigen::Matrix& theta, - const Eigen::Matrix& eta, - const Eigen::Matrix& W_root) + const Eigen::Matrix& eta) const { typedef return_type_t scalar; T_eta eta_scalar = eta(0); @@ -313,7 +348,7 @@ struct diff_neg_binomial_2_log { Eigen::Matrix diff_matrix(theta.size(), 1); - diff_matrix.col(0) = // 0.5 * (W_root.cwiseInverse()).cwiseProduct( + diff_matrix.col(0) = - elt_divide(exp_neg_theta.cwiseProduct( - eta_scalar * exp_neg_theta.cwiseProduct(sums_) + sums_ + 2 * eta_scalar * n_samples_), diff --git a/stan/math/laplace/laplace_marginal.hpp b/stan/math/laplace/laplace_marginal.hpp index b9cccb1b8fa..7eec02b957a 100644 --- a/stan/math/laplace/laplace_marginal.hpp +++ b/stan/math/laplace/laplace_marginal.hpp @@ -225,12 +225,12 @@ namespace math { /* An object to store the sensitivities of eta. */ Eigen::VectorXd eta_adj_; - template + template laplace_marginal_density_vari (const D& diff_likelihood, const K& covariance_function, - const Eigen::Matrix& phi, - const Eigen::Matrix& eta, + const Eigen::Matrix& phi, + const Eigen::Matrix& eta, const Tx& x, const std::vector& delta, const std::vector& delta_int, @@ -309,7 +309,7 @@ namespace math { VectorXd diff_eta = diff_likelihood.diff_eta(theta, eta_dbl); MatrixXd diff_theta_eta = diff_likelihood.diff_theta_eta(theta, eta_dbl); MatrixXd diff2_theta_eta - = diff_likelihood.diff2_theta_eta(theta, eta_dbl, W_root); + = diff_likelihood.diff2_theta_eta(theta, eta_dbl); VectorXd W_root_inv = W_root.cwiseInverse(); diff --git a/stan/math/laplace/laplace_marginal_poisson_log.hpp b/stan/math/laplace/laplace_marginal_poisson_log.hpp index 805f3d33630..d9cc4b7879a 100644 --- a/stan/math/laplace/laplace_marginal_poisson_log.hpp +++ b/stan/math/laplace/laplace_marginal_poisson_log.hpp @@ -42,9 +42,11 @@ namespace math { std::ostream* msgs = nullptr, double tolerance = 1e-6, long int max_num_steps = 100) { + // TODO: change this to a VectorXd once we have operands & partials. + Eigen::Matrix eta_dummy(0); return laplace_marginal_density( diff_poisson_log(to_vector(n_samples), to_vector(y)), - covariance_function, phi, x, delta, delta_int, + covariance_function, phi, eta_dummy, x, delta, delta_int, theta_0, msgs, tolerance, max_num_steps); } @@ -62,9 +64,11 @@ namespace math { std::ostream* msgs = nullptr, double tolerance = 1e-6, long int max_num_steps = 100) { + // TODO: change this to a VectorXd once we have operands & partials. + Eigen::Matrix eta_dummy(0); return laplace_marginal_density( diff_poisson_log(to_vector(n_samples), to_vector(y), log(ye)), - covariance_function, phi, x, delta, delta_int, + covariance_function, phi, eta_dummy, x, delta, delta_int, theta_0, msgs, tolerance, max_num_steps); } diff --git a/test/unit/math/laplace/laplace_marginal_poisson_log_test.cpp b/test/unit/math/laplace/laplace_marginal_poisson_log_test.cpp index fceddce4ccd..deefad421cf 100644 --- a/test/unit/math/laplace/laplace_marginal_poisson_log_test.cpp +++ b/test/unit/math/laplace/laplace_marginal_poisson_log_test.cpp @@ -19,13 +19,14 @@ TEST(laplace, likelihood_differentiation) { theta << 1, 1; std::vector n_samples = {1, 1}; std::vector sums = {1, 0}; + Eigen::VectorXd eta_dummy; diff_poisson_log diff_functor(to_vector(n_samples), to_vector(sums)); - double log_density = diff_functor.log_likelihood(theta); + double log_density = diff_functor.log_likelihood(theta, eta_dummy); Eigen::VectorXd gradient, hessian; - diff_functor.diff(theta, gradient, hessian); - Eigen::VectorXd third_tensor = diff_functor.third_diff(theta); + diff_functor.diff(theta, eta_dummy, gradient, hessian); + Eigen::VectorXd third_tensor = diff_functor.third_diff(theta, eta_dummy); EXPECT_FLOAT_EQ(-4.436564, log_density); EXPECT_FLOAT_EQ(-1.718282, gradient(0)); @@ -46,15 +47,16 @@ TEST(laplace, likelihood_differentiation2) { std::vector n_samples = {1, 1}; std::vector sums = {1, 0}; std::vector log_exposure = {log(0.5), log(2)}; + Eigen::VectorXd eta_dummy; diff_poisson_log diff_functor(to_vector(n_samples), to_vector(sums), to_vector(log_exposure)); - double log_density = diff_functor.log_likelihood(theta); + double log_density = diff_functor.log_likelihood(theta, eta_dummy); Eigen::VectorXd gradient, hessian; - diff_functor.diff(theta, gradient, hessian); - Eigen::VectorXd third_tensor = diff_functor.third_diff(theta); + diff_functor.diff(theta, eta_dummy, gradient, hessian); + Eigen::VectorXd third_tensor = diff_functor.third_diff(theta, eta_dummy); EXPECT_FLOAT_EQ(-6.488852, log_density); EXPECT_FLOAT_EQ(-0.3591409, gradient(0)); From 12b6e37cd229132608f034af2f4cc44da70546f4 Mon Sep 17 00:00:00 2001 From: Charles Margossian Date: Sat, 30 Jan 2021 19:17:17 -0500 Subject: [PATCH 22/53] update bernoulli. --- stan/math/laplace/laplace_likelihood.hpp | 54 +++++++++++++++---- .../laplace_marginal_bernoulli_logit.hpp | 12 +++-- .../laplace_marginal_bernoulli_logit_test.cpp | 31 ++++++----- 3 files changed, 71 insertions(+), 26 deletions(-) diff --git a/stan/math/laplace/laplace_likelihood.hpp b/stan/math/laplace/laplace_likelihood.hpp index 6313d01ddcc..6188d4c496d 100644 --- a/stan/math/laplace/laplace_likelihood.hpp +++ b/stan/math/laplace/laplace_likelihood.hpp @@ -152,8 +152,9 @@ struct diff_logistic_log { * @param[in] theta log poisson parameters for each group. * @return the log density. */ - template - T log_likelihood (const Eigen::Matrix& theta) + template + T1 log_likelihood (const Eigen::Matrix& theta, + const Eigen::Matrix& eta_dummy) const { Eigen::VectorXd one = rep_vector(1, theta.size()); return sum(theta.cwiseProduct(sums_) @@ -171,11 +172,12 @@ struct diff_logistic_log { * @param[in, out] gradient * @param[in, out] hessian diagonal, so stored in a vector. */ - template - void diff (const Eigen::Matrix& theta, - Eigen::Matrix& gradient, - Eigen::Matrix& hessian) const { - Eigen::Matrix exp_theta = exp(theta); + template + void diff (const Eigen::Matrix& theta, + const Eigen::Matrix& eta_dummy, + Eigen::Matrix& gradient, + Eigen::Matrix& hessian) const { + Eigen::Matrix exp_theta = exp(theta); Eigen::VectorXd one = rep_vector(1, theta.size()); gradient = sums_ - n_samples_.cwiseProduct(inv_logit(theta)); @@ -189,12 +191,14 @@ struct diff_logistic_log { * the object is stored in a vector. * @tparam T type of the log poisson parameter. * @param[in] theta log poisson parameters for each group. + * @param[in] eta_dummy additional likelihood parameters (used for other lk) * @return A vector containing the non-zero elements of the third * derivative tensor. */ - template - Eigen::Matrix - third_diff(const Eigen::Matrix& theta) const { + template + Eigen::Matrix + third_diff(const Eigen::Matrix& theta, + const Eigen::Matrix& eta_dummy) const { Eigen::VectorXd exp_theta = exp(theta); Eigen::VectorXd one = rep_vector(1, theta.size()); Eigen::VectorXd nominator = exp_theta.cwiseProduct(exp_theta - one); @@ -203,6 +207,36 @@ struct diff_logistic_log { return n_samples_.cwiseProduct(elt_divide(nominator, denominator)); } + + template + Eigen::Matrix, Eigen::Dynamic, 1> + diff_eta(const Eigen::Matrix& theta, + const Eigen::Matrix& eta) const { + std::cout << "THIS FUNCTIONS SHOULD NEVER GET CALLED!" << std::endl; + Eigen::Matrix, Eigen::Dynamic, 1> void_matrix; + return void_matrix; + } + + template + Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> + diff_theta_eta(const Eigen::Matrix& theta, + const Eigen::Matrix& eta) const { + std::cout << "THIS FUNCTIONS SHOULD NEVER GET CALLED!" << std::endl; + Eigen::Matrix, Eigen::Dynamic, + Eigen::Dynamic> void_matrix; + return void_matrix; + } + + template + Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> + diff2_theta_eta(const Eigen::Matrix& theta, + const Eigen::Matrix& eta) + const { + std::cout << "THIS FUNCTIONS SHOULD NEVER GET CALLED!" << std::endl; + Eigen::Matrix, Eigen::Dynamic, + Eigen::Dynamic> void_matrix; + return void_matrix; + } }; struct diff_neg_binomial_2_log { diff --git a/stan/math/laplace/laplace_marginal_bernoulli_logit.hpp b/stan/math/laplace/laplace_marginal_bernoulli_logit.hpp index e1cf79af72e..2de0c70b253 100644 --- a/stan/math/laplace/laplace_marginal_bernoulli_logit.hpp +++ b/stan/math/laplace/laplace_marginal_bernoulli_logit.hpp @@ -46,10 +46,12 @@ namespace math { std::ostream* msgs = nullptr, double tolerance = 1e-6, long int max_num_steps = 100) { + // TODO: change this to a VectorXd once we have operands & partials. + Eigen::Matrix eta_dummy(0); return laplace_marginal_density( diff_logistic_log(to_vector(n_samples), to_vector(y)), sqr_exp_kernel_functor(), - phi, x, delta, delta_int, + phi, eta_dummy, x, delta, delta_int, theta_0, msgs, tolerance, max_num_steps); } @@ -67,10 +69,12 @@ namespace math { std::ostream* msgs = nullptr, double tolerance = 1e-6, long int max_num_steps = 100) { + // TODO: change this to a VectorXd once we have operands & partials. + Eigen::Matrix eta_dummy(0); return laplace_marginal_density( diff_logistic_log(to_vector(n_samples), to_vector(y)), covariance_function, - phi, x, delta, delta_int, + phi, eta_dummy, x, delta, delta_int, theta_0, msgs, tolerance, max_num_steps); } @@ -88,10 +92,12 @@ namespace math { std::ostream* msgs = nullptr, double tolerance = 1e-6, long int max_num_steps = 100) { + // TODO: change this to a VectorXd once we have operands & partials. + Eigen::Matrix eta_dummy(0); return laplace_marginal_density( diff_logistic_log(to_vector(n_samples), to_vector(y)), covariance_function, - phi, x, delta, delta_int, + phi, eta_dummy, x, delta, delta_int, theta_0, msgs, tolerance, max_num_steps); } diff --git a/test/unit/math/laplace/laplace_marginal_bernoulli_logit_test.cpp b/test/unit/math/laplace/laplace_marginal_bernoulli_logit_test.cpp index 95451b6e6b7..3dbfb1ac924 100755 --- a/test/unit/math/laplace/laplace_marginal_bernoulli_logit_test.cpp +++ b/test/unit/math/laplace/laplace_marginal_bernoulli_logit_test.cpp @@ -24,12 +24,13 @@ TEST(laplace, likelihood_differentiation) { y << 1, 0; n_samples << 1, 1; Eigen::Matrix theta_v = theta; + Eigen::VectorXd eta_dummy; diff_logistic_log diff_functor(n_samples, y); - double log_density = diff_functor.log_likelihood(theta); + double log_density = diff_functor.log_likelihood(theta, eta_dummy); Eigen::VectorXd gradient, hessian; - diff_functor.diff(theta, gradient, hessian); - Eigen::VectorXd third_tensor = diff_functor.third_diff(theta); + diff_functor.diff(theta, eta_dummy, gradient, hessian); + Eigen::VectorXd third_tensor = diff_functor.third_diff(theta, eta_dummy); EXPECT_NEAR(-2.566843, log_density, test_tolerance); @@ -43,10 +44,12 @@ TEST(laplace, likelihood_differentiation) { theta_1l(0) = theta(0) - diff; theta_2u(1) = theta(1) + diff; theta_2l(1) = theta(1) - diff; - double diff_1 = (diff_functor.log_likelihood(theta_1u) - - diff_functor.log_likelihood(theta_1l)) / (2 * diff); - double diff_2 = (diff_functor.log_likelihood(theta_2u) - - diff_functor.log_likelihood(theta_2l)) / (2 * diff); + double diff_1 = (diff_functor.log_likelihood(theta_1u, eta_dummy) + - diff_functor.log_likelihood(theta_1l, eta_dummy)) + / (2 * diff); + double diff_2 = (diff_functor.log_likelihood(theta_2u, eta_dummy) + - diff_functor.log_likelihood(theta_2l, eta_dummy)) + / (2 * diff); EXPECT_NEAR(diff_1, gradient(0), test_tolerance); EXPECT_NEAR(diff_2, gradient(1), test_tolerance); @@ -54,10 +57,10 @@ TEST(laplace, likelihood_differentiation) { // finite diff calculation for second-order derivatives Eigen::VectorXd gradient_1u, gradient_1l, hessian_1u, hessian_1l, gradient_2u, gradient_2l, hessian_2u, hessian_2l; - diff_functor.diff(theta_1u, gradient_1u, hessian_1u); - diff_functor.diff(theta_1l, gradient_1l, hessian_1l); - diff_functor.diff(theta_2u, gradient_2u, hessian_2u); - diff_functor.diff(theta_2l, gradient_2l, hessian_2l); + diff_functor.diff(theta_1u, eta_dummy, gradient_1u, hessian_1u); + diff_functor.diff(theta_1l, eta_dummy, gradient_1l, hessian_1l); + diff_functor.diff(theta_2u, eta_dummy, gradient_2u, hessian_2u); + diff_functor.diff(theta_2l, eta_dummy, gradient_2l, hessian_2l); double diff_grad_1 = (gradient_1u(0) - gradient_1l(0)) / (2 * diff); double diff_grad_2 = (gradient_2u(1) - gradient_2l(1)) / (2 * diff); @@ -110,6 +113,7 @@ TEST(laplace, logistic_lgm_dim500) { // CASE 1: phi is passed as a double. Eigen::VectorXd phi(2); phi << 1.6, 1; // standard deviation, length scale + Eigen::VectorXd eta_dummy; auto start_optimization = std::chrono::system_clock::now(); @@ -117,7 +121,7 @@ TEST(laplace, logistic_lgm_dim500) { = laplace_marginal_density( diff_logistic_log(to_vector(n_samples), to_vector(y)), sqr_exp_kernel_functor(), - phi, x, delta, delta_int, + phi, eta_dummy, x, delta, delta_int, covariance, theta_laplace, W_root, L, a, l_grad, theta_0, 0, 1e-3, 100); @@ -136,13 +140,14 @@ TEST(laplace, logistic_lgm_dim500) { // CASE 2: phi is passed as a var Eigen::Matrix phi_v2 = phi; + Eigen::Matrix eta_dummy_v; start_optimization = std::chrono::system_clock::now(); var marginal_density_v = laplace_marginal_density( diff_logistic_log(to_vector(n_samples), to_vector(y)), sqr_exp_kernel_functor(), - phi_v2, x, delta, delta_int, + phi_v2, eta_dummy_v, x, delta, delta_int, theta_0, 0, 1e-3, 100); VEC g2; From 393587721fa9a12ddc968f90c050caec690f754d Mon Sep 17 00:00:00 2001 From: Charles Margossian Date: Fri, 19 Feb 2021 16:06:34 -0500 Subject: [PATCH 23/53] Steps torwards higher-order autodiff. --- .../laplace_marginal_neg_binomial_2.hpp | 55 ++++++ stan/math/laplace/third_diff_directional.hpp | 53 ++++++ .../math/laplace/higher_order_diff_test.cpp | 161 ++++++++++++++++++ ...place_marginal_neg_binomial_2_log_test.cpp | 14 +- 4 files changed, 281 insertions(+), 2 deletions(-) create mode 100644 stan/math/laplace/laplace_marginal_neg_binomial_2.hpp create mode 100644 stan/math/laplace/third_diff_directional.hpp create mode 100755 test/unit/math/laplace/higher_order_diff_test.cpp diff --git a/stan/math/laplace/laplace_marginal_neg_binomial_2.hpp b/stan/math/laplace/laplace_marginal_neg_binomial_2.hpp new file mode 100644 index 00000000000..f2e9c722b25 --- /dev/null +++ b/stan/math/laplace/laplace_marginal_neg_binomial_2.hpp @@ -0,0 +1,55 @@ +#ifndef STAN_MATH_LAPLACE_LAPLACE_MARGINAL_NEG_BINOMIAL_2_HPP +#define STAN_MATH_LAPLACE_LAPLACE_MARGINAL_NEG_BINOMIAL_2_HPP + +#include +#include + +namespace stan { +namespace math { + /** + * Wrapper function around the laplace_marginal function for + * a negative binomial likelihood. Uses the 2nd parameterization. + * Returns the marginal density p(y | phi) by marginalizing + * out the latent gaussian variable, with a Laplace approximation. + * See the laplace_marginal function for more details. + * + * @tparam T0 The type of the initial guess, theta_0. + * @tparam T1 The type for the global parameter, phi. + * @param[in] y observations. + * @param[in] y_index group to which each observation belongs. Each group + * is parameterized by one element of theta. + * @param[in] covariance a function which returns the prior covariance. + * @param[in] phi model parameters for the covariance functor. + * @param[in] eta non-marginalized model parameters for the likelihood. + * @param[in] x data for the covariance functor. + * @param[in] delta additional real data for the covariance functor. + * @param[in] delta_int additional integer data for covariance functor. + * @param[in] theta_0 the initial guess for the Laplace approximation. + * @param[in] tolerance controls the convergence criterion when finding + * the mode in the Laplace approximation. + * @param[in] max_num_steps maximum number of steps before the Newton solver + * breaks and returns an error. + */ + template + T1 laplace_marginal_neg_binomial_2_log_lpmf + (const std::vector& y, + const std::vector& y_index, + const K& covariance_function, + const Eigen::Matrix& phi, + const Eigen::Matrix& eta, + const std::vector& x, + const std::vector& delta, + const std::vector& delta_int, + const Eigen::Matrix& theta_0, + std::ostream* msgs = nullptr, + double tolerance = 1e-6, + long int max_num_steps = 100) { + return laplace_marginal_density( + diff_neg_binomial_2_log(to_vector(y), y_index, theta_0.size()), + covariance_function, phi, eta, x, delta, delta_int, + theta_0, msgs, tolerance, max_num_steps); + } +} // namespace math +} // namespace stan + +#endif diff --git a/stan/math/laplace/third_diff_directional.hpp b/stan/math/laplace/third_diff_directional.hpp new file mode 100644 index 00000000000..18e16d6e2f0 --- /dev/null +++ b/stan/math/laplace/third_diff_directional.hpp @@ -0,0 +1,53 @@ +#ifndef STAN_MATH_LAPLACE_THIRD_DIFF_DIRECTIONAL_HPP +#define STAN_MATH_LAPLACE_THIRD_DIFF_DIRECTIONAL_HPP + +// TODO: refine include. +#include + +namespace stan { +namespace math { + + /** + * Return the third-order directional derivative of a function + * which maps to a scalar. The derivative is taken with respect + * to do two directions: v and w. + */ + template + void third_diff_directional( + const F& f, const Eigen::VectorXd& x, double& fx, + Eigen::VectorXd& third_diff, + Eigen::VectorXd& v, + Eigen::VectorXd& w) { + using Eigen::Matrix; + using Eigen::Dynamic; + nested_rev_autodiff nested; + + int x_size = x.size(); + Matrix x_var = x; + Matrix, Dynamic, 1> x_fvar(x_size); + for (int i = 0; i < x_size; ++i) { + x_fvar(i) = fvar(x_var(i), v(i)); + } + fvar fx_fvar = f(x_fvar); + + Matrix>, -1, 1> x_ffvar(x_size); + for (int i = 0; i < x_size; ++i) { + x_ffvar(i) = fvar>(x_fvar(i), w(i)); + } + fvar> fx_ffvar = f(x_ffvar); + + grad(fx_ffvar.d_.d_.vi_); + + third_diff.resize(x_size); + for (int i = 0; i < x_size; ++i) { + third_diff(i) = x_var(i).adj(); + } + } + +} // namespace math +} // namespace stan + + +// TODO: figure out which files to include. + +#endif diff --git a/test/unit/math/laplace/higher_order_diff_test.cpp b/test/unit/math/laplace/higher_order_diff_test.cpp new file mode 100755 index 00000000000..c7ba8ceaa53 --- /dev/null +++ b/test/unit/math/laplace/higher_order_diff_test.cpp @@ -0,0 +1,161 @@ +#include +#include +#include +#include +#include +#include +// #include + +#include +#include +#include +#include +#include + +// This is what a function define in Stan would return. +struct neg_bin_log_likelihood { + template + stan::return_type_t + operator()(const Eigen::Matrix& theta, + const Eigen::Matrix& eta, + const Eigen::VectorXd& delta, + const std::vector& delta_int, + std::ostream* pstream) const { + stan::math::diff_neg_binomial_2_log + diff_functor(delta, delta_int, theta.size()); + + return diff_functor.log_likelihood(theta, eta); + } +}; + +template +struct f_theta { + Eigen::VectorXd eta_; + Eigen::VectorXd delta_; + std::vector delta_int_; + std::ostream* pstream_; + F f_functor_; + + f_theta(const Eigen::VectorXd& eta, + const Eigen::VectorXd& delta, + const std::vector& delta_int, + std::ostream* pstream, + F f_functor) : + eta_(eta), delta_(delta), delta_int_(delta_int), f_functor_(f_functor) { } + + template + T operator()(const Eigen::Matrix& theta) const { + return f_functor_(theta, eta_, delta_, delta_int_, pstream_); + } +}; + +TEST(laplace_diff, gradient) { + using stan::math::fvar; + using stan::math::var; + using stan::math::value_of; + using stan::math::hessian_times_vector; + using stan::math::nested_rev_autodiff; + using stan::math::third_diff_directional; + + Eigen::Matrix theta(2); + theta << 1, 1; + Eigen::VectorXd theta_dbl = value_of(theta); + Eigen::VectorXd eta(1); + eta << 1.2; + + Eigen::VectorXd y(2); + y << 1, 1; + std::vector y_index(2); + y_index[0] = 0; + y_index[1] = 1; + + neg_bin_log_likelihood likelihood; + f_theta f(eta, y, y_index, 0, likelihood); + + // var log_density = likelihood(theta, eta, y, y_index, 0); + var log_density = f(theta); + std::cout << log_density.val() << std::endl; + + // autodiff for first derivative + // if (FALSE) { + // VEC g; + // AVEC parm_vec = createAVEC(theta(0), theta(1)); + // log_density.grad(parm_vec, g); + // std::cout << "Gradien: "; + // for (int i = 0; i < theta.size(); i++) std::cout << g[i] << " "; + // std::cout << std::endl; + // } + + // specify initial tangent + Eigen::VectorXd tangent(2); + tangent << 1, 1; + + double fx; + Eigen::VectorXd Hv; + + hessian_times_vector(f, theta_dbl, tangent, fx, Hv); + + std::cout << "value: " << fx << std::endl; + std::cout << "hessian-vector: " << Hv.transpose() << std::endl; + + // Compute third-order derivative + if (TRUE) { + using Eigen::Matrix; + nested_rev_autodiff nested; + + // CHECK -- why would need a for loop for assignment (see + // hessian_times_vector.hpp). + Matrix theta_var = theta_dbl; + + Matrix, -1, 1> theta_fvar(theta_dbl.size()); + for (int i = 0; i < theta_dbl.size(); ++i) { + theta_fvar(i) = fvar(theta_var(i), tangent(i)); + } + fvar fx_fvar = f(theta_fvar); + + std::cout << "fx: " << value_of(fx_fvar.val_) << std::endl; + std::cout << "grad_fx_dot_v: " << value_of(fx_fvar.d_) << std::endl; + + // grad(fx_fvar.d_.vi_); + // for (int i = 0; i < theta_dbl.size(); ++i) + // std::cout << theta_var(i).adj() << " "; + // std::cout << std::endl; + + Matrix>, -1, 1> theta_ffvar(theta_dbl.size()); + for (int i = 0; i < theta_dbl.size(); ++i) { + theta_ffvar(i) = fvar>(theta_fvar(i), tangent(i)); + } + fvar> fx_ffvar = f(theta_ffvar); + var grad2_fx_dot_vv = fx_ffvar.d_.d_; + + std::cout << "fx: " << value_of(fx_ffvar.val_.val_) << std::endl; + std::cout << "grad_grad_fx_dot_v: " + << value_of(fx_ffvar.d_.d_) << std::endl; + + // var fx_var; + // var grad3_f_v; + grad(grad2_fx_dot_vv.vi_); + + Eigen::VectorXd grad3_f_v(theta_dbl.size()); + for (int i = 0; i < theta_dbl.size(); ++i) + grad3_f_v(i) = theta_var(i).adj(); + + std::cout << "grad3 f: " << grad3_f_v.transpose() << std::endl; + + // var fx_var; + // var grad_fx_var_dot_v; + // gradient_dot_vector(f, theta_var, tang, fx_var, grad_fx_var_dot_v); + // fx = fx_var.val(); + // grad(grad_fx_var_dot_v.vi_); + // Hv.resize(theta.size()); + // for (int i = 0; i < theta.size(); ++i) Hv(i) = theta_var(i).adj(); + } + + // Test function + Eigen::VectorXd third_diff; + third_diff_directional(f, theta_dbl, fx, third_diff, tangent, tangent); + + std::cout << "f: " << fx << std::endl; + std::cout << "third diff: " << third_diff.transpose() << std::endl; + +} diff --git a/test/unit/math/laplace/laplace_marginal_neg_binomial_2_log_test.cpp b/test/unit/math/laplace/laplace_marginal_neg_binomial_2_log_test.cpp index d0df2a19692..616ec7bedf8 100755 --- a/test/unit/math/laplace/laplace_marginal_neg_binomial_2_log_test.cpp +++ b/test/unit/math/laplace/laplace_marginal_neg_binomial_2_log_test.cpp @@ -73,6 +73,10 @@ TEST(laplace, likelihood_differentiation) { finite_third_diff(0) = (hessian_u0 - hessian_l0)(0) / (2 * epsilon); finite_third_diff(1) = (hessian_u1 - hessian_l1)(1) / (2 * epsilon); + std::cout << "gradient: " << gradient << std::endl; + std::cout << "hessian: " << hessian << std::endl; + std::cout << "third_diff: " << third_diff << std::endl; + EXPECT_FLOAT_EQ(finite_gradient(0), gradient(0)); EXPECT_FLOAT_EQ(finite_gradient(1), gradient(1)); @@ -91,6 +95,8 @@ TEST(laplace, likelihood_differentiation) { (diff_functor.log_likelihood(theta, eta_u) - diff_functor.log_likelihood(theta, eta_l)) / (2 * epsilon); + std::cout << "diff_eta: " << diff_eta.transpose() << std::endl; + EXPECT_FLOAT_EQ(finite_gradient_eta, diff_eta(0)); Eigen::MatrixXd diff_theta_eta = diff_functor.diff_theta_eta(theta, eta); @@ -105,16 +111,20 @@ TEST(laplace, likelihood_differentiation) { Eigen::VectorXd finite_gradient_theta_eta = (gradient_theta_u - gradient_theta_l) / (2 * epsilon); + std::cout << "diff_theta_eta: " << diff_theta_eta.transpose() << std::endl; + EXPECT_FLOAT_EQ(finite_gradient_theta_eta(0), diff_theta_eta(0, 0)); EXPECT_FLOAT_EQ(finite_gradient_theta_eta(1), diff_theta_eta(1, 0)); - Eigen::VectorXd W_root = (-hessian).cwiseSqrt(); + // Eigen::VectorXd W_root = (-hessian).cwiseSqrt(); Eigen::MatrixXd diff2_theta_eta - = diff_functor.diff2_theta_eta(theta, eta, W_root); + = diff_functor.diff2_theta_eta(theta, eta); Eigen::VectorXd finite_hessian_theta_eta = (hessian_theta_u - hessian_theta_l) / (2 * epsilon); + std::cout << "diff2_theta_eta: " << diff2_theta_eta.transpose() << std::endl; + EXPECT_FLOAT_EQ(finite_hessian_theta_eta(0), diff2_theta_eta(0, 0)); EXPECT_FLOAT_EQ(finite_hessian_theta_eta(1), diff2_theta_eta(1, 0)); } From 8326baaf968771b4e4cd44df68893b99bb46bead Mon Sep 17 00:00:00 2001 From: Charles Margossian Date: Tue, 23 Feb 2021 15:04:33 -0500 Subject: [PATCH 24/53] prototype likelihood using user-specified likelihood. --- stan/math/laplace/hessian_times_vector.hpp | 45 ++++++++++ stan/math/laplace/laplace_likelihood.hpp | 5 +- stan/math/laplace/third_diff_directional.hpp | 16 ++-- .../math/laplace/higher_order_diff_test.cpp | 83 ++++++++++++++----- 4 files changed, 120 insertions(+), 29 deletions(-) create mode 100644 stan/math/laplace/hessian_times_vector.hpp diff --git a/stan/math/laplace/hessian_times_vector.hpp b/stan/math/laplace/hessian_times_vector.hpp new file mode 100644 index 00000000000..c8d9eda6e1d --- /dev/null +++ b/stan/math/laplace/hessian_times_vector.hpp @@ -0,0 +1,45 @@ +#ifndef STAN_MATH_LAPLACE_HESSIAN_TIMES_VECTOR_HPP +#define STAN_MATH_LAPLACE_HESSIAN_TIMES_VECTOR_HPP + +// TODO: refine include. +#include + +namespace stan { +namespace math { + + /** + * Overload Hessian_times_vector function, under stan/math/mix/functor + * to handle functions which take in arguments eta, delta, delta_int, + * and pstream. + */ + template + void hessian_times_vector(const F& f, + const Eigen::VectorXd& x, + const Eigen::VectorXd& eta, + const Eigen::VectorXd& delta, + const std::vector& delta_int, + const Eigen::VectorXd& v, + double& fx, + Eigen::VectorXd& Hv, + std::ostream* pstream = 0) { + using Eigen::Matrix; + using Eigen::Dynamic; + + nested_rev_autodiff nested; + + int x_size = x.size(); + Matrix x_var = x; + Matrix, Dynamic, 1> x_fvar(x_size); + for (int i = 0; i < x_size; i++) { + x_fvar(i) = fvar(x_var(i), v(i)); + } + fvar fx_fvar = f(x_fvar, eta, delta, delta_int, pstream); + grad(fx_fvar.d_.vi_); + Hv.resize(x_size); + for (int i = 0; i < x_size; i++) Hv(i) = x_var(i).adj(); +} + +} // namespace math +} // namespace stan + +#endif diff --git a/stan/math/laplace/laplace_likelihood.hpp b/stan/math/laplace/laplace_likelihood.hpp index 6188d4c496d..706c94ee42b 100644 --- a/stan/math/laplace/laplace_likelihood.hpp +++ b/stan/math/laplace/laplace_likelihood.hpp @@ -1,9 +1,11 @@ - #ifndef STAN_MATH_LAPLACE_LAPLACE_LIKELIHOOD_HPP +#ifndef STAN_MATH_LAPLACE_LAPLACE_LIKELIHOOD_HPP #define STAN_MATH_LAPLACE_LAPLACE_LIKELIHOOD_HPP #include #include +// THIS FILE WILL BE DEPRECATED SOON. + namespace stan { namespace math { @@ -394,6 +396,7 @@ struct diff_neg_binomial_2_log { }; +// NOTE: the below structure is incomplete... struct diff_student_t { /* Observations. */ Eigen::VectorXd y_; diff --git a/stan/math/laplace/third_diff_directional.hpp b/stan/math/laplace/third_diff_directional.hpp index 18e16d6e2f0..ef0679b63f8 100644 --- a/stan/math/laplace/third_diff_directional.hpp +++ b/stan/math/laplace/third_diff_directional.hpp @@ -14,10 +14,15 @@ namespace math { */ template void third_diff_directional( - const F& f, const Eigen::VectorXd& x, double& fx, + const F& f, const Eigen::VectorXd& x, + const Eigen::VectorXd& eta, + const Eigen::VectorXd& delta, + const std::vector& delta_int, + double& fx, Eigen::VectorXd& third_diff, Eigen::VectorXd& v, - Eigen::VectorXd& w) { + Eigen::VectorXd& w, + std::ostream* pstream = 0) { using Eigen::Matrix; using Eigen::Dynamic; nested_rev_autodiff nested; @@ -28,13 +33,13 @@ namespace math { for (int i = 0; i < x_size; ++i) { x_fvar(i) = fvar(x_var(i), v(i)); } - fvar fx_fvar = f(x_fvar); + fvar fx_fvar = f(x_fvar, eta, delta, delta_int, pstream); Matrix>, -1, 1> x_ffvar(x_size); for (int i = 0; i < x_size; ++i) { x_ffvar(i) = fvar>(x_fvar(i), w(i)); } - fvar> fx_ffvar = f(x_ffvar); + fvar> fx_ffvar = f(x_ffvar, eta, delta, delta_int, pstream); grad(fx_ffvar.d_.d_.vi_); @@ -47,7 +52,4 @@ namespace math { } // namespace math } // namespace stan - -// TODO: figure out which files to include. - #endif diff --git a/test/unit/math/laplace/higher_order_diff_test.cpp b/test/unit/math/laplace/higher_order_diff_test.cpp index c7ba8ceaa53..dea76558de6 100755 --- a/test/unit/math/laplace/higher_order_diff_test.cpp +++ b/test/unit/math/laplace/higher_order_diff_test.cpp @@ -1,6 +1,8 @@ #include #include +#include #include +#include #include #include #include @@ -36,11 +38,13 @@ struct f_theta { std::ostream* pstream_; F f_functor_; - f_theta(const Eigen::VectorXd& eta, + f_theta() { } // default constructor required for default class. + + f_theta(const F& f_functor, + const Eigen::VectorXd& eta, const Eigen::VectorXd& delta, const std::vector& delta_int, - std::ostream* pstream, - F f_functor) : + std::ostream* pstream) : eta_(eta), delta_(delta), delta_int_(delta_int), f_functor_(f_functor) { } template @@ -49,7 +53,36 @@ struct f_theta { } }; -TEST(laplace_diff, gradient) { +class neg_bin_log_diff_test : public::testing::Test { +protected: + void SetUp() override { + theta.resize(2); + theta << 1, 1; + theta_dbl = value_of(theta); + eta.resize(1); + eta << 1.2; + + y.resize(2); + y << 0, 1; + y_index.resize(2); + y_index[0] = 0; + y_index[1] = 1; + + f_theta f_(likelihood, eta, y, y_index, 0); + f = f_; + } + + Eigen::Matrix theta; + Eigen::VectorXd theta_dbl; + Eigen::VectorXd eta; + Eigen::VectorXd y; + std::vector y_index; + neg_bin_log_likelihood likelihood; + f_theta f; +}; + + +TEST_F(neg_bin_log_diff_test, manual_calls) { using stan::math::fvar; using stan::math::var; using stan::math::value_of; @@ -57,21 +90,6 @@ TEST(laplace_diff, gradient) { using stan::math::nested_rev_autodiff; using stan::math::third_diff_directional; - Eigen::Matrix theta(2); - theta << 1, 1; - Eigen::VectorXd theta_dbl = value_of(theta); - Eigen::VectorXd eta(1); - eta << 1.2; - - Eigen::VectorXd y(2); - y << 1, 1; - std::vector y_index(2); - y_index[0] = 0; - y_index[1] = 1; - - neg_bin_log_likelihood likelihood; - f_theta f(eta, y, y_index, 0, likelihood); - // var log_density = likelihood(theta, eta, y, y_index, 0); var log_density = f(theta); std::cout << log_density.val() << std::endl; @@ -151,11 +169,34 @@ TEST(laplace_diff, gradient) { // for (int i = 0; i < theta.size(); ++i) Hv(i) = theta_var(i).adj(); } - // Test function + // Test function for directional Hessian and directional third diff. + Eigen::VectorXd hessian_v; + hessian_times_vector(likelihood, theta_dbl, eta, y, y_index, + tangent, fx, hessian_v, 0); + + std::cout << "hessian_v: " << hessian_v.transpose() << std::endl; + Eigen::VectorXd third_diff; - third_diff_directional(f, theta_dbl, fx, third_diff, tangent, tangent); + third_diff_directional(likelihood, theta_dbl, eta, y, y_index, + fx, third_diff, tangent, tangent, 0); std::cout << "f: " << fx << std::endl; std::cout << "third diff: " << third_diff.transpose() << std::endl; +} + +TEST_F(neg_bin_log_diff_test, diff_likelihood) { + using stan::math::diff_likelihood; + using Eigen::VectorXd; + + diff_likelihood lk(likelihood, y, y_index, 0); + double lpmf = lk.log_likelihood(theta_dbl, eta); + VectorXd gradient, hessian; + lk.diff(theta_dbl, eta, gradient, hessian); + VectorXd third_diff = lk.third_diff(theta_dbl, eta); + + std::cout << "lpmf: " << lpmf << std::endl + << "gradient: " << gradient.transpose() << std::endl + << "hessian: " << hessian.transpose() << std::endl + << "third diff: " << third_diff.transpose() << std::endl; } From 2ab0cb4a4789a28bf286b4b30c070d87e23fd50d Mon Sep 17 00:00:00 2001 From: Charles Margossian Date: Tue, 23 Feb 2021 18:18:00 -0500 Subject: [PATCH 25/53] add test for autodiffed likelihood. --- .../laplace_likelihood_bernoulli_logit.hpp | 121 +++++++++ .../laplace_likelihood_neg_binomial_2_log.hpp | 165 ++++++++++++ .../laplace_likelihood_poisson_log.hpp | 134 ++++++++++ test/unit/math/laplace/disease_map_test.cpp | 24 +- .../math/laplace/higher_order_diff_test.cpp | 23 -- test/unit/math/laplace/laplace_skim_test.cpp | 237 +++++++++++------- 6 files changed, 587 insertions(+), 117 deletions(-) create mode 100644 stan/math/laplace/laplace_likelihood_bernoulli_logit.hpp create mode 100644 stan/math/laplace/laplace_likelihood_neg_binomial_2_log.hpp create mode 100644 stan/math/laplace/laplace_likelihood_poisson_log.hpp diff --git a/stan/math/laplace/laplace_likelihood_bernoulli_logit.hpp b/stan/math/laplace/laplace_likelihood_bernoulli_logit.hpp new file mode 100644 index 00000000000..26adc10a3b2 --- /dev/null +++ b/stan/math/laplace/laplace_likelihood_bernoulli_logit.hpp @@ -0,0 +1,121 @@ +#ifndef STAN_MATH_LAPLACE_LAPLACE_LIKELIHOOD_BERNOULLI_LOGIT_HPP +#define STAN_MATH_LAPLACE_LAPLACE_LIKELIHOOD_BERNOULLI_LOGIT_HPP + + +namespace stan { +namespace math { + +/** + * A structure to compute the log density, first, second, + * and third-order derivatives for a Bernoulli logistic likelihood + * whith multiple groups. + * This structure can be passed to the the laplace_marginal function. + * Uses sufficient statistics for the data. + */ +struct diff_bernoulli_logit { + /* The number of samples in each group. */ + Eigen::VectorXd n_samples_; + /* The sum of counts in each group. */ + Eigen::VectorXd sums_; + + diff_bernoulli_logit(const Eigen::VectorXd& n_samples, + const Eigen::VectorXd& sums) + : n_samples_(n_samples), sums_(sums) { } + + /** + * Return the log density. + * @tparam T type of the log poisson parameter. + * @param[in] theta log poisson parameters for each group. + * @return the log density. + */ + template + T1 log_likelihood (const Eigen::Matrix& theta, + const Eigen::Matrix& eta_dummy) + const { + Eigen::VectorXd one = rep_vector(1, theta.size()); + return sum(theta.cwiseProduct(sums_) + - n_samples_.cwiseProduct(log(one + exp(theta)))); + } + + /** + * Returns the gradient of the log density, and the hessian. + * Since the latter is diagonal, it is stored inside a vector. + * The two objects are computed together, because we always use + * both when solving the Newton iteration of the Laplace + * approximation, and to avoid redundant computation. + * @tparam T type of the Bernoulli logistic parameter. + * @param[in] theta Bernoulli logistic parameters for each group. + * @param[in, out] gradient + * @param[in, out] hessian diagonal, so stored in a vector. + */ + template + void diff (const Eigen::Matrix& theta, + const Eigen::Matrix& eta_dummy, + Eigen::Matrix& gradient, + Eigen::Matrix& hessian) const { + Eigen::Matrix exp_theta = exp(theta); + Eigen::VectorXd one = rep_vector(1, theta.size()); + + gradient = sums_ - n_samples_.cwiseProduct(inv_logit(theta)); + + hessian = - n_samples_.cwiseProduct(elt_divide(exp_theta, + square(one + exp_theta))); + } + + /** + * Returns the third derivative tensor. Because it is (cubic) diagonal, + * the object is stored in a vector. + * @tparam T type of the log poisson parameter. + * @param[in] theta log poisson parameters for each group. + * @param[in] eta_dummy additional likelihood parameters (used for other lk) + * @return A vector containing the non-zero elements of the third + * derivative tensor. + */ + template + Eigen::Matrix + third_diff(const Eigen::Matrix& theta, + const Eigen::Matrix& eta_dummy) const { + Eigen::VectorXd exp_theta = exp(theta); + Eigen::VectorXd one = rep_vector(1, theta.size()); + Eigen::VectorXd nominator = exp_theta.cwiseProduct(exp_theta - one); + Eigen::VectorXd denominator = square(one + exp_theta) + .cwiseProduct(one + exp_theta); + + return n_samples_.cwiseProduct(elt_divide(nominator, denominator)); + } + + template + Eigen::Matrix, Eigen::Dynamic, 1> + diff_eta(const Eigen::Matrix& theta, + const Eigen::Matrix& eta) const { + std::cout << "THIS FUNCTIONS SHOULD NEVER GET CALLED!" << std::endl; + Eigen::Matrix, Eigen::Dynamic, 1> void_matrix; + return void_matrix; + } + + template + Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> + diff_theta_eta(const Eigen::Matrix& theta, + const Eigen::Matrix& eta) const { + std::cout << "THIS FUNCTIONS SHOULD NEVER GET CALLED!" << std::endl; + Eigen::Matrix, Eigen::Dynamic, + Eigen::Dynamic> void_matrix; + return void_matrix; + } + + template + Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> + diff2_theta_eta(const Eigen::Matrix& theta, + const Eigen::Matrix& eta) + const { + std::cout << "THIS FUNCTIONS SHOULD NEVER GET CALLED!" << std::endl; + Eigen::Matrix, Eigen::Dynamic, + Eigen::Dynamic> void_matrix; + return void_matrix; + } +}; + +} // namespace math +} // namespace stan + +#endif diff --git a/stan/math/laplace/laplace_likelihood_neg_binomial_2_log.hpp b/stan/math/laplace/laplace_likelihood_neg_binomial_2_log.hpp new file mode 100644 index 00000000000..433282f2dac --- /dev/null +++ b/stan/math/laplace/laplace_likelihood_neg_binomial_2_log.hpp @@ -0,0 +1,165 @@ +#ifndef STAN_MATH_LAPLACE_LAPLACE_LIKELIHOOD_NEG_BINOMIAL_2_LOG_HPP +#define STAN_MATH_LAPLACE_LAPLACE_LIKELIHOOD_NEG_BINOMIAL_2_LOG_HPP + +#include + +namespace stan { +namespace math { + +struct diff_neg_binomial_2_log { + /* Observed counts */ + Eigen::VectorXd y_; + /* Latent parameter index for each observation. */ + std::vector y_index_; + /* The number of samples in each group. */ + Eigen::VectorXd n_samples_; + /* The sum of cours in each group. */ + Eigen::VectorXd sums_; + /* Number of latent Gaussian variables. */ + int n_theta_; + + diff_neg_binomial_2_log(const Eigen::VectorXd& y, + const std::vector& y_index, + int n_theta) + : y_(y), y_index_(y_index), n_theta_(n_theta) { + sums_ = Eigen::VectorXd::Zero(n_theta); + n_samples_ = Eigen::VectorXd::Zero(n_theta); + + for (int i = 0; i < n_theta; i++) { + n_samples_(y_index[i]) += 1; + sums_(y_index[i]) += y[i]; + } + } + + template + return_type_t + log_likelihood (const Eigen::Matrix& theta, + const Eigen::Matrix& eta) const { + T_eta eta_scalar = eta(0); + return_type_t logp = 0; + for (size_t i = 0; i < y_.size(); i++) { + logp += binomial_coefficient_log(y_(i) + eta_scalar - 1, y_(i)); + } + // CHECK -- is it better to vectorize this loop? + Eigen::Matrix exp_theta = exp(theta); + for (int i = 0; i < n_theta_; i++) { + return_type_t + log_eta_plus_exp_theta = log(eta_scalar + exp_theta(i)); + logp += sums_(i) * (theta(i) - log_eta_plus_exp_theta) + + n_samples_(i) * eta_scalar + * (log(eta_scalar) - log_eta_plus_exp_theta); + } + return logp; + } + + template + void diff (const Eigen::Matrix& theta, + const Eigen::Matrix& eta, + Eigen::Matrix, + Eigen::Dynamic, 1>& gradient, + Eigen::Matrix, + Eigen::Dynamic, 1>& hessian) const { + typedef return_type_t scalar; + Eigen::VectorXd one = rep_vector(1, theta.size()); + T_eta eta_scalar = eta(0); + Eigen::Matrix + sums_plus_n_eta = sums_ + eta_scalar * n_samples_; + Eigen::Matrix exp_neg_theta = exp(-theta); + + Eigen::Matrix + one_plus_exp = one + eta_scalar * exp_neg_theta; + gradient = sums_ - elt_divide(sums_plus_n_eta, one_plus_exp); + + hessian = - eta_scalar * sums_plus_n_eta. + cwiseProduct(elt_divide(exp_neg_theta, square(one_plus_exp))); + } + + template + Eigen::Matrix, Eigen::Dynamic, 1> + third_diff(const Eigen::Matrix& theta, + const Eigen::Matrix& eta) const { + typedef return_type_t scalar; + Eigen::Matrix exp_theta = exp(theta); + T_eta eta_scalar = eta(0); + Eigen::Matrix + eta_vec = rep_vector(eta_scalar, theta.size()); + Eigen::Matrix + eta_plus_exp_theta = eta_vec + exp_theta; + + return - ((sums_ + eta_scalar * n_samples_) * eta_scalar). + cwiseProduct(exp_theta.cwiseProduct( + elt_divide(eta_vec - exp_theta, + square(eta_plus_exp_theta).cwiseProduct(eta_plus_exp_theta)))); + } + + template + Eigen::Matrix, Eigen::Dynamic, 1> + diff_eta(const Eigen::Matrix& theta, + const Eigen::Matrix& eta) const { + typedef return_type_t scalar; + T_eta eta_scalar = eta(0); + Eigen::Matrix + y_plus_eta = y_ + rep_vector(eta_scalar, y_.size()); + Eigen::Matrix exp_theta = exp(theta); + Eigen::Matrix + exp_theta_plus_eta = exp_theta + rep_vector(eta_scalar, theta.size()); + + T_eta y_plus_eta_digamma_sum = 0; + for (int i = 0; i < y_.size(); i++) + y_plus_eta_digamma_sum += digamma(y_plus_eta(i)); + + Eigen::Matrix gradient_eta(1); + gradient_eta(0) = + y_plus_eta_digamma_sum - y_.size() * digamma(eta_scalar) + - sum(elt_divide(sums_ + n_samples_ * eta_scalar, exp_theta_plus_eta)) + + sum(n_samples_ * log(eta_scalar) + - n_samples_.cwiseProduct(log(exp_theta_plus_eta)) + + n_samples_); + return gradient_eta; + } + + template + Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> + diff_theta_eta(const Eigen::Matrix& theta, + const Eigen::Matrix& eta) const { + typedef return_type_t scalar; + T_eta eta_scalar = eta(0); + Eigen::Matrix exp_neg_theta = exp(-theta); + Eigen::Matrix + diff_matrix(theta.size(), 1); + diff_matrix.col(0) + = - elt_divide(n_samples_ - sums_.cwiseProduct(exp_neg_theta), + square(eta_scalar * exp_neg_theta + rep_vector(1, theta.size()))); + return diff_matrix; + } + + // TODO: Address special case where we have an empty group (induces zero + // elements in W). + template + Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> + diff2_theta_eta(const Eigen::Matrix& theta, + const Eigen::Matrix& eta) + const { + typedef return_type_t scalar; + T_eta eta_scalar = eta(0); + Eigen::Matrix exp_neg_theta = exp(-theta); + Eigen::Matrix one_plus_eta_exp + = rep_vector(1, theta.size()) + eta_scalar * exp_neg_theta; + + Eigen::Matrix + diff_matrix(theta.size(), 1); + + diff_matrix.col(0) = + - elt_divide(exp_neg_theta.cwiseProduct( + - eta_scalar * exp_neg_theta.cwiseProduct(sums_) + + sums_ + 2 * eta_scalar * n_samples_), + square(one_plus_eta_exp).cwiseProduct(one_plus_eta_exp)); // ); + + return diff_matrix; + } +}; + +} // namespace math +} // namespace stan + +#endif diff --git a/stan/math/laplace/laplace_likelihood_poisson_log.hpp b/stan/math/laplace/laplace_likelihood_poisson_log.hpp new file mode 100644 index 00000000000..7d8ce653a48 --- /dev/null +++ b/stan/math/laplace/laplace_likelihood_poisson_log.hpp @@ -0,0 +1,134 @@ +#ifndef STAN_MATH_LAPLACE_LAPLACE_LIKELIHOOD_POISSON_LOG_HPP +#define STAN_MATH_LAPLACE_LAPLACE_LIKELIHOOD_POISSON_LOG_HPP + +#include + +namespace stan { +namespace math { + +// TO DO: create a parent structure, with each likelihood +// function acting as a child structure. + +/** + * A structure to compute the log density, first, second, + * and third-order derivatives for a log poisson likelihood + * whith multiple groups. + * This structure can be passed to the the laplace_marginal function. + * Uses sufficient statistics for the data. + */ + // FIX ME -- cannot use the sufficient statistic to compute log density in + // because of log factorial term. +struct diff_poisson_log { + /* The number of samples in each group. */ + Eigen::VectorXd n_samples_; + /* The sum of counts in each group. */ + Eigen::VectorXd sums_; + /* exposure, i.e. off-set term for the latent variable. */ + Eigen::VectorXd log_exposure_; + + diff_poisson_log(const Eigen::VectorXd& n_samples, + const Eigen::VectorXd& sums) + : n_samples_(n_samples), sums_(sums) { + log_exposure_ = Eigen::VectorXd::Zero(sums.size()); + } + + diff_poisson_log(const Eigen::VectorXd& n_samples, + const Eigen::VectorXd& sums, + const Eigen::VectorXd& log_exposure) + : n_samples_(n_samples), sums_(sums), log_exposure_(log_exposure) { } + + /** + * Return the log density. + * @tparam T type of the log poisson parameter. + * @param[in] theta log poisson parameters for each group. + * @param[in] eta_dummy additional parameters (use for other likelihoods). + * @return the log density. + */ + template + T1 log_likelihood (const Eigen::Matrix& theta, + const Eigen::Matrix& eta_dummy) + const { + double factorial_term = 0; + for (int i = 0; i < sums_.size(); i++) + factorial_term += lgamma(sums_(i) + 1); + Eigen::Matrix shifted_mean = theta + log_exposure_; + + return - factorial_term + + (shifted_mean).dot(sums_) - n_samples_.dot(exp(shifted_mean)); + } + + /** + * Returns the gradient of the log density, and the hessian. + * Since the latter is diagonal, it is stored inside a vector. + * The two objects are computed together, because we always use + * both when solving the Newton iteration of the Laplace + * approximation, and to avoid redundant computation. + * @tparam T type of the log poisson parameter. + * @param[in] theta log poisson parameters for each group. + * @param[in] eta_dummy additional parameters (use for other likelihoods). + * @param[in, out] gradient + * @param[in, out] hessian diagonal, so stored in a vector. + */ + template + void diff (const Eigen::Matrix& theta, + const Eigen::Matrix& eta_dummy, + Eigen::Matrix& gradient, + Eigen::Matrix& hessian) const { + Eigen::Matrix + common_term = n_samples_.cwiseProduct(exp(theta + log_exposure_)); + + gradient = sums_ - common_term; + hessian = - common_term; + } + + /** + * Returns the third derivative tensor. Because it is ("cubic") diagonal, + * the object is stored in a vector. + * @tparam T type of the log poisson parameter. + * @param[in] theta log poisson parameters for each group. + * @param[in] eta_dummy additional parameters (use for other likelihoods). + * @return A vector containing the non-zero elements of the third + * derivative tensor. + */ + template + Eigen::Matrix + third_diff(const Eigen::Matrix& theta, + const Eigen::Matrix& eta_dummy) const { + return -n_samples_.cwiseProduct(exp(theta + log_exposure_)); + } + + template + Eigen::Matrix, Eigen::Dynamic, 1> + diff_eta(const Eigen::Matrix& theta, + const Eigen::Matrix& eta) const { + std::cout << "THIS FUNCTIONS SHOULD NEVER GET CALLED!" << std::endl; + Eigen::Matrix, Eigen::Dynamic, 1> void_matrix; + return void_matrix; + } + + template + Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> + diff_theta_eta(const Eigen::Matrix& theta, + const Eigen::Matrix& eta) const { + std::cout << "THIS FUNCTIONS SHOULD NEVER GET CALLED!" << std::endl; + Eigen::Matrix, Eigen::Dynamic, + Eigen::Dynamic> void_matrix; + return void_matrix; + } + + template + Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> + diff2_theta_eta(const Eigen::Matrix& theta, + const Eigen::Matrix& eta) + const { + std::cout << "THIS FUNCTIONS SHOULD NEVER GET CALLED!" << std::endl; + Eigen::Matrix, Eigen::Dynamic, + Eigen::Dynamic> void_matrix; + return void_matrix; + } +}; + +} // namespace math +} // namespace stan + +#endif diff --git a/test/unit/math/laplace/disease_map_test.cpp b/test/unit/math/laplace/disease_map_test.cpp index 8e2968c65b9..bbe5702d6fe 100755 --- a/test/unit/math/laplace/disease_map_test.cpp +++ b/test/unit/math/laplace/disease_map_test.cpp @@ -21,7 +21,7 @@ TEST(laplace, disease_map_dim_911) { // Based on (Vanhatalo, Pietilainen and Vethari, 2010). See // https://research.cs.aalto.fi/pml/software/gpstuff/demo_spatial1.shtml using stan::math::var; - using stan::math::laplace_marginal_poisson; + using stan::math::laplace_marginal_poisson_log_lpmf; using stan::math::sqr_exp_kernel_functor; int dim_theta = 911; @@ -61,7 +61,7 @@ TEST(laplace, disease_map_dim_911) { auto start = std::chrono::system_clock::now(); var marginal_density - = laplace_marginal_poisson(y, n_samples, ye, sqr_exp_kernel_functor(), + = laplace_marginal_poisson_log_lpmf(y, n_samples, ye, sqr_exp_kernel_functor(), phi, x, delta, delta_int, theta_0); auto end = std::chrono::system_clock::now(); @@ -84,9 +84,12 @@ TEST(laplace, disease_map_dim_911) { //////////////////////////////////////////////////////////////////////// // Let's now generate a sample theta from the estimated posterior + /* using stan::math::diff_poisson_log; using stan::math::to_vector; using stan::math::sqr_exp_kernel_functor; + using stan::math::laplace_rng; + using stan::math::laplace_poisson_log_rng; diff_poisson_log diff_likelihood(to_vector(n_samples), to_vector(y), @@ -94,10 +97,10 @@ TEST(laplace, disease_map_dim_911) { boost::random::mt19937 rng; start = std::chrono::system_clock::now(); Eigen::VectorXd - theta_pred = laplace_approx_rng(diff_likelihood, - sqr_exp_kernel_functor(), - phi, x, delta, delta_int, - theta_0, rng); + theta_pred = laplace_rng(diff_likelihood, + sqr_exp_kernel_functor(), + phi, x, delta, delta_int, + theta_0, rng); end = std::chrono::system_clock::now(); elapsed_time = end - start; @@ -110,14 +113,15 @@ TEST(laplace, disease_map_dim_911) { // total time: 0.404114 start = std::chrono::system_clock::now(); - theta_pred = laplace_approx_poisson_rng(y, n_samples, ye, - sqr_exp_kernel_functor(), - phi, x, delta, delta_int, - theta_0, rng); + theta_pred = laplace_poisson_log_rng(y, n_samples, ye, + sqr_exp_kernel_functor(), + phi, x, delta, delta_int, + theta_0, rng); end = std::chrono::system_clock::now(); elapsed_time = end - start; std::cout << "LAPLACE_APPROX_POISSON_RNG" << std::endl << "total time: " << elapsed_time.count() << std::endl << std::endl; + */ } diff --git a/test/unit/math/laplace/higher_order_diff_test.cpp b/test/unit/math/laplace/higher_order_diff_test.cpp index dea76558de6..9d13814a343 100755 --- a/test/unit/math/laplace/higher_order_diff_test.cpp +++ b/test/unit/math/laplace/higher_order_diff_test.cpp @@ -94,16 +94,6 @@ TEST_F(neg_bin_log_diff_test, manual_calls) { var log_density = f(theta); std::cout << log_density.val() << std::endl; - // autodiff for first derivative - // if (FALSE) { - // VEC g; - // AVEC parm_vec = createAVEC(theta(0), theta(1)); - // log_density.grad(parm_vec, g); - // std::cout << "Gradien: "; - // for (int i = 0; i < theta.size(); i++) std::cout << g[i] << " "; - // std::cout << std::endl; - // } - // specify initial tangent Eigen::VectorXd tangent(2); tangent << 1, 1; @@ -134,11 +124,6 @@ TEST_F(neg_bin_log_diff_test, manual_calls) { std::cout << "fx: " << value_of(fx_fvar.val_) << std::endl; std::cout << "grad_fx_dot_v: " << value_of(fx_fvar.d_) << std::endl; - // grad(fx_fvar.d_.vi_); - // for (int i = 0; i < theta_dbl.size(); ++i) - // std::cout << theta_var(i).adj() << " "; - // std::cout << std::endl; - Matrix>, -1, 1> theta_ffvar(theta_dbl.size()); for (int i = 0; i < theta_dbl.size(); ++i) { theta_ffvar(i) = fvar>(theta_fvar(i), tangent(i)); @@ -159,14 +144,6 @@ TEST_F(neg_bin_log_diff_test, manual_calls) { grad3_f_v(i) = theta_var(i).adj(); std::cout << "grad3 f: " << grad3_f_v.transpose() << std::endl; - - // var fx_var; - // var grad_fx_var_dot_v; - // gradient_dot_vector(f, theta_var, tang, fx_var, grad_fx_var_dot_v); - // fx = fx_var.val(); - // grad(grad_fx_var_dot_v.vi_); - // Hv.resize(theta.size()); - // for (int i = 0; i < theta.size(); ++i) Hv(i) = theta_var(i).adj(); } // Test function for directional Hessian and directional third diff. diff --git a/test/unit/math/laplace/laplace_skim_test.cpp b/test/unit/math/laplace/laplace_skim_test.cpp index 654ea88ba27..34e158ee5a5 100755 --- a/test/unit/math/laplace/laplace_skim_test.cpp +++ b/test/unit/math/laplace/laplace_skim_test.cpp @@ -1,6 +1,8 @@ #include #include -#include +#include +#include +#include #include #include @@ -11,7 +13,6 @@ #include #include - struct K_functor { template Eigen::Matrix @@ -119,118 +120,186 @@ struct K_functor2 { } }; +class laplace_skim_test : public::testing::Test { +protected: + void SetUp() override { + using stan::math::square; + using stan::math::var; + using stan::math::square; + using stan::math::elt_divide; + using stan::math::add; -TEST(laplace, skm) { - using stan::math::diff_logistic_log; - using stan::math::var; - using stan::math::square; - using stan::math::elt_divide; - using stan::math::add; - using Eigen::MatrixXd; - using Eigen::VectorXd; - - typedef Eigen::Matrix Vector_v; - typedef Eigen::Matrix Matrix_v; - - // DATA AND TRANSFORMED DATA BLOCK - int N = 100; - int M = 200; // options: 2, 50, 100, 150, 200 - - std::string data_directory = "test/unit/math/laplace/skim_data/" + - std::to_string(M) + "_" + std::to_string(N) + "/"; - MatrixXd X(N, M); - std::vector y(N); - VectorXd lambda(M); - - read_in_data(M, N, data_directory, X, y, lambda); - - // std::cout << X << std::endl; - // std::cout << lambda.transpose() << std::endl; - // for (int i = 0; i < N; i++) std::cout << y[i] << " "; - // std::cout << std::endl; - - double alpha_base = 0, psi = 1, m0 = 1, // options: m0 = 2 - slab_scale = 3, - slab_scale2 = slab_scale * slab_scale, - slab_df = 25, + N = 100; + M = 200; // options: 2, 50, 100, 150, 200 + // TODO: add to GitHub directory simulation for each configuration. + // std::string data_directory = "test/unit/math/laplace/skim_data/" + + // std::to_string(M) + "_" + std::to_string(N) + "/"; + std::string data_directory = "test/unit/math/laplace/skim_data/"; + + X.resize(N, M); + y.resize(N); + lambda.resize(M); + + read_in_data(M, N, data_directory, X, y, lambda); + + if (FALSE){ + std::cout << X << std::endl << "-----" << std::endl; + std::cout << lambda.transpose() << std::endl << "------" << std::endl; + std::cout << y[0] << " " << y[1] << " " << std::endl + << "------" << std::endl; + } + + alpha_base = 0; + psi = 1; + m0 = 1; + slab_scale = 3; + slab_scale2 = slab_scale * slab_scale; half_slab_df = 0.5 * slab_df; - VectorXd mu = VectorXd::Zero(N); - std::vector delta(1); - delta[0] = psi; - std::vector delta_int(2); - delta_int[0] = N; - delta_int[1] = M; + mu = Eigen::VectorXd::Zero(N); + delta.resize(1); + delta[0] = psi; + delta_int.resize(2); + delta_int[0] = N; + delta_int[1] = M; - std::vector n_samples(N, 1); - VectorXd theta_0 = VectorXd::Zero(N); + std::vector n_samples_(N, 1); + n_samples = n_samples_; - MatrixXd X2 = square(X); + theta_0 = Eigen::VectorXd::Zero(N); - std::vector x_tot(2 * N); - for (int n = 0; n < N; n++) x_tot[n] = X.block(n, 0, 1, M).transpose(); - for (int n = 0; n < N; n++) x_tot[N + n] = X2.block(n, 0, 1, M).transpose(); + X2 = square(X); + x_tot_m.resize(2 * N, M); + x_tot_m.block(0, 0, N, M) = X; + x_tot_m.block(N, 0, N, M) = X2; - Eigen::MatrixXd x_tot_m(2 * N, M); - x_tot_m.block(0, 0, N, M) = X; - x_tot_m.block(N, 0, N, M) = X2; - - // PARAMETERS BLOCK - // lambda term is defined above - var c2_tilde = 1.112843, - tau_tilde = 7.615908, - sigma = 1.708423, + // parameters block + c2_tilde = 1.112843; + tau_tilde = 7.615908; + sigma = 1.708423; eta_base = 0.9910583; - // TRANSFORMED PARAMETERS BLOCK - var phi = (m0 / (M - m0)) * (sigma / sqrt(N)) * tau_tilde, - c2 = slab_scale2 * c2_tilde, - eta = square(phi) / c2 * eta_base, + phi = (m0 / (M - m0)) * (sigma / sqrt(N)) * tau_tilde; + c2 = slab_scale2 * c2_tilde; + eta = square(phi) / c2 * eta_base; alpha = square(phi) / c2 * alpha_base; - Vector_v lambda_tilde = - c2 * elt_divide(square(lambda), + lambda_tilde = c2 * elt_divide(square(lambda), add(c2, multiply(square(phi), square(lambda)))); - Vector_v parm(M + 4); - parm.head(M) = lambda_tilde; - parm(M) = eta; - parm(M + 1) = alpha; - parm(M + 2) = phi; - parm(M + 3) = sigma; + parm.resize(M + 4); + parm.head(M) = lambda_tilde; + parm(M) = eta; + parm(M + 1) = alpha; + parm(M + 2) = phi; + parm(M + 3) = sigma; + + // std::cout << "parm: " << parm << std::endl; + } + + int N; + int M; + Eigen::MatrixXd X; + std::vector y; + Eigen::VectorXd lambda; + double alpha_base, psi, m0, slab_scale, slab_scale2, slab_df, half_slab_df; + Eigen::VectorXd mu; + std::vector delta; + std::vector delta_int; + std::vector n_samples; + Eigen::VectorXd theta_0; + Eigen::MatrixXd X2; + Eigen::MatrixXd x_tot_m; + + stan::math::var c2_tilde, tau_tilde, sigma, eta_base, + phi, c2, eta, alpha; + Eigen::Matrix lambda_tilde; + Eigen::Matrix parm; +}; + - // K_functor K; - // for (int i = 0; i < parm.size(); i++) std::cout << parm(i) << " "; - // std::cout << std::endl; - // std::cout << "x_tot" << std::endl; - // for (size_t i = 0; i < x_tot.size(); i++) std::cout << x_tot[i].transpose() << std::endl; - // std::cout << std::endl << std::endl; - // for (size_t i = 0; i < delta.size(); i++) std::cout << delta[i] << std::endl; - // for (size_t i = 0; i < delta_int.size(); i++) std::cout << delta_int[i] << std::endl; +TEST_F(laplace_skim_test, lk_analytical) { + using stan::math::var; + using stan::math::laplace_marginal_bernoulli_logit_lpmf; - // std::cout << K(parm, x_tot, delta, delta_int, 0) << std::endl; auto start = std::chrono::system_clock::now(); - // var marginal_density = laplace_marginal_bernoulli(y, n_samples, K_functor(), - // parm, x_tot, delta, delta_int, theta_0); + var marginal_density + = laplace_marginal_bernoulli_logit_lpmf(y, n_samples, K_functor2(), + parm, x_tot_m, delta, delta_int, + theta_0); + + auto end = std::chrono::system_clock::now(); + std::chrono::duration elapsed_time = end - start; + + VEC g; + AVEC parm_vec(M + 4); + for (int m = 0; m < M + 4; m++) parm_vec[m] = parm(m); + marginal_density.grad(parm_vec, g); + + // std::cout << parm << std::endl; + + // Expected density: - 10.9795 + std::cout << "LAPLACE MARGINAL AND VARI CLASS" << std::endl + << "M: " << M << std::endl + << "density: " << marginal_density << std::endl + << "autodiff grad: "; + for (size_t i = 0; i < 10; i++) std::cout << g[i] << " "; + std::cout << std::endl + << "total time: " << elapsed_time.count() << std::endl + << std::endl; +} + +struct bernoulli_logit_likelihood { + template + stan::return_type_t + operator()(const Eigen::Matrix& theta, + const Eigen::Matrix& eta, + const Eigen::VectorXd& sums, // sums + const std::vector& n_samples, // n_samples + std::ostream* pstream) const { + using stan::math::to_vector; + stan::math::diff_bernoulli_logit + diff_functor(to_vector(n_samples), sums); + + return diff_functor.log_likelihood(theta, eta); + } +}; + + +TEST_F(laplace_skim_test, lk_autodiff) { + using stan::math::var; + using stan::math::laplace_marginal_density; + using stan::math::diff_likelihood; + using stan::math::to_vector; + using stan::math::value_of; + + bernoulli_logit_likelihood f; + diff_likelihood + diff_functor(f, to_vector(y), n_samples); + + auto start = std::chrono::system_clock::now(); - var marginal_density = laplace_marginal_bernoulli(y, n_samples, K_functor2(), - parm, x_tot_m, delta, delta_int, theta_0); + Eigen::Matrix eta_dummy; + var marginal_density + = laplace_marginal_density(diff_functor, K_functor2(), parm, eta_dummy, + x_tot_m, delta, delta_int, theta_0); auto end = std::chrono::system_clock::now(); std::chrono::duration elapsed_time = end - start; VEC g; - AVEC parm_vec(M); - for (int m = 0; m < M; m++) parm_vec[m] = parm(m); + AVEC parm_vec(M + 4); + for (int m = 0; m < M + 4; m++) parm_vec[m] = parm(m); marginal_density.grad(parm_vec, g); + // Expected density: - 10.9795 std::cout << "LAPLACE MARGINAL AND VARI CLASS" << std::endl << "M: " << M << std::endl << "density: " << marginal_density << std::endl << "autodiff grad: "; - // for (size_t i = 0; i < parm.size(); i++) std::cout << g[i] << " "; + for (size_t i = 0; i < 10; i++) std::cout << g[i] << " "; std::cout << std::endl << "total time: " << elapsed_time.count() << std::endl << std::endl; From 7831316f1f41c2c62a8faa3ef6f16760b2a4b3c7 Mon Sep 17 00:00:00 2001 From: Charles Margossian Date: Thu, 25 Feb 2021 16:05:37 -0500 Subject: [PATCH 26/53] block diag hessian computation. --- stan/math/laplace/hessian_block_diag.hpp | 49 ++++++ .../laplace/laplace_likelihood_general.hpp | 113 ++++++++++++ .../laplace_likelihood_poisson_log.hpp | 2 +- test/unit/math/laplace/disease_map_test.cpp | 164 +++++++++++++----- .../math/laplace/higher_order_diff_test.cpp | 11 ++ 5 files changed, 298 insertions(+), 41 deletions(-) create mode 100644 stan/math/laplace/hessian_block_diag.hpp create mode 100644 stan/math/laplace/laplace_likelihood_general.hpp diff --git a/stan/math/laplace/hessian_block_diag.hpp b/stan/math/laplace/hessian_block_diag.hpp new file mode 100644 index 00000000000..f8ba0d7e9c2 --- /dev/null +++ b/stan/math/laplace/hessian_block_diag.hpp @@ -0,0 +1,49 @@ +#ifndef STAN_MATH_LAPLACE_HESSIAN_BLOCK_DIAG_HPP +#define STAN_MATH_LAPLACE_HESSIAN_BLOCK_DIAG_HPP + +// TODO: refine include. +#include +#include + +namespace stan { +namespace math { + + /** + * Returns a block diagonal Hessian by computing the relevant directional + * derivatives and storing them in a matrix. + * For m the size of each block, the operations const m calls to + * hessian_times_vector, that is m forward sweeps and m reverse sweeps. + */ + template + void hessian_block_diag(const F& f, + const Eigen::VectorXd& x, + const Eigen::VectorXd& eta, + const Eigen::VectorXd& delta, + const std::vector& delta_int, + int m, + double& fx, + Eigen::MatrixXd& H, + std::ostream* pstream = 0) { + using Eigen::VectorXd; + using Eigen::MatrixXd; + + int x_size = x.size(); + VectorXd v; + H = MatrixXd::Zero(x_size, x_size); + int n_blocks = x_size / m; + for (int i = 0; i < m; ++i) { + v = VectorXd::Zero(x_size); + for (int j = i; j < x_size; j += m) v(j) = 1; + VectorXd Hv; + hessian_times_vector(f, x, eta, delta, delta_int, v, fx, Hv, pstream); + std::cout << "Hv: " << Hv << std::endl; + for (int j = 0; j < n_blocks; ++j) { + for (int k = 0; k < m; ++k) H(k + j * m, i + j * m) = Hv(k + j * m); + } + } + } + +} // namespace math +} // namespace stan + +#endif diff --git a/stan/math/laplace/laplace_likelihood_general.hpp b/stan/math/laplace/laplace_likelihood_general.hpp new file mode 100644 index 00000000000..2f583c23846 --- /dev/null +++ b/stan/math/laplace/laplace_likelihood_general.hpp @@ -0,0 +1,113 @@ +#ifndef STAN_MATH_LAPLACE_LAPLACE_LIKELIHOOD_GENERAL_HPP +#define STAN_MATH_LAPLACE_LAPLACE_LIKELIHOOD_GENERAL_HPP + +#include +#include + +namespace stan { +namespace math { + +/** + * A structure to compute the log density, first, second, + * and third-order derivatives for a likelihoood specified by the user. + */ +template +struct diff_likelihood { + /* Likelihood function. */ + F f_; + /* Real variables passed to the likelihood. */ + Eigen::VectorXd delta_; + /* Integer variables passed to the likelihood. */ + std::vector delta_int_; + /* stream to return print statements when function is called. */ + std::ostream* pstream_; + + diff_likelihood(const F& f, + const Eigen::VectorXd& delta, + const std::vector& delta_int, + std::ostream* pstream = 0) + : f_(f), delta_(delta), delta_int_(delta_int), pstream_(pstream) { } + + template + T1 log_likelihood(const Eigen::Matrix& theta, + const Eigen::Matrix& eta) + const { + return f_(theta, eta, delta_, delta_int_, pstream_); + } + + void diff (const Eigen::VectorXd& theta, + const Eigen::VectorXd& eta, + Eigen::VectorXd& gradient, + Eigen::VectorXd& hessian) const { + using Eigen::Matrix; + using Eigen::Dynamic; + + int theta_size = theta.size(); + // CHECK -- do we need this scope? + { + nested_rev_autodiff nested; + Matrix theta_var = theta; + var f_var = f_(theta_var, eta, delta_, delta_int_, pstream_); + grad(f_var.vi_); + gradient.resize(theta_size); + for (int i = 0; i < theta_size; i++) gradient(i) = theta_var(i).adj(); + } + + Eigen::VectorXd v(theta_size); + for (int i = 0; i < theta_size; i++) v(i) = 1; + double f_theta; + hessian_times_vector(f_, theta, eta, delta_, delta_int_, + v, f_theta, hessian, pstream_); + } + + Eigen::VectorXd third_diff(const Eigen::VectorXd& theta, + const Eigen::VectorXd& eta) const { + + int theta_size = theta.size(); + Eigen::VectorXd v(theta_size); + for (int i = 0; i < theta_size; i++) v(i) = 1; + double f_theta; + Eigen::VectorXd third_diff_tensor; + + third_diff_directional(f_, theta, eta, delta_, delta_int_, + f_theta, third_diff_tensor, + v, v, pstream_); + + return third_diff_tensor; + } + + template + Eigen::Matrix, Eigen::Dynamic, 1> + diff_eta(const Eigen::Matrix& theta, + const Eigen::Matrix& eta) const { + std::cout << "THIS FUNCTIONS SHOULD NEVER GET CALLED!" << std::endl; + Eigen::Matrix, Eigen::Dynamic, 1> void_matrix; + return void_matrix; + } + + template + Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> + diff_theta_eta(const Eigen::Matrix& theta, + const Eigen::Matrix& eta) const { + std::cout << "THIS FUNCTIONS SHOULD NEVER GET CALLED!" << std::endl; + Eigen::Matrix, Eigen::Dynamic, + Eigen::Dynamic> void_matrix; + return void_matrix; + } + + template + Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> + diff2_theta_eta(const Eigen::Matrix& theta, + const Eigen::Matrix& eta) + const { + std::cout << "THIS FUNCTIONS SHOULD NEVER GET CALLED!" << std::endl; + Eigen::Matrix, Eigen::Dynamic, + Eigen::Dynamic> void_matrix; + return void_matrix; + } +}; + +} // namespace math +} // namespace stan + +#endif diff --git a/stan/math/laplace/laplace_likelihood_poisson_log.hpp b/stan/math/laplace/laplace_likelihood_poisson_log.hpp index 7d8ce653a48..f158d42b7ef 100644 --- a/stan/math/laplace/laplace_likelihood_poisson_log.hpp +++ b/stan/math/laplace/laplace_likelihood_poisson_log.hpp @@ -46,7 +46,7 @@ struct diff_poisson_log { */ template T1 log_likelihood (const Eigen::Matrix& theta, - const Eigen::Matrix& eta_dummy) + const Eigen::Matrix& eta_dummy) const { double factorial_term = 0; for (int i = 0; i < sums_.size(); i++) diff --git a/test/unit/math/laplace/disease_map_test.cpp b/test/unit/math/laplace/disease_map_test.cpp index bbe5702d6fe..cb82f4a363b 100755 --- a/test/unit/math/laplace/disease_map_test.cpp +++ b/test/unit/math/laplace/disease_map_test.cpp @@ -1,7 +1,7 @@ #include #include -// #include -// #include +#include +// #include #include #include @@ -16,53 +16,75 @@ #include // TODO(charlesm93): update using new function signatures. +class laplace_disease_map_test : public::testing::Test { +protected: + void SetUp() override { + dim_theta = 911; + n_observations = 911; + data_directory = "test/unit/math/laplace/aki_disease_data/"; + x1.resize(dim_theta); + x2.resize(dim_theta); + y.resize(n_observations); + ye.resize(n_observations); + read_in_data(dim_theta, n_observations, data_directory, x1, x2, y, ye); + + if (FALSE) { + // look at some of the data + std::cout << "x_1: " << x1[0] << " " << x2[0] << std::endl + << "x_2: " << x1[1] << " " << x2[1] << std::endl + << "y_1: " << y[0] << " y_2: " << y[1] << std::endl + << "ye_1: " << ye[0] << " ye_2: " << ye[1] << std::endl; + } + + dim_x = 2; + x.resize(dim_theta); + for (int i = 0; i < dim_theta; i++) { + Eigen::VectorXd coordinate(dim_x); + coordinate << x1[i], x2[i]; + x[i] = coordinate; + } + + // one observation per group + n_samples.resize(dim_theta); + for (int i = 0; i < dim_theta; i++) n_samples[i] = 1; + + theta_0 = Eigen::VectorXd::Zero(dim_theta); + dim_phi = 2; + phi.resize(dim_phi); + phi << 0.3162278, 200; // variance, length scale + } + + int dim_theta; + int n_observations; + std::string data_directory; + std::vector x1, x2; + std::vector y; + Eigen::VectorXd ye; + int dim_x; + std::vector x; + std::vector n_samples; + std::vector delta; + std::vector delta_int; + + Eigen::VectorXd theta_0; + int dim_phi; + Eigen::Matrix phi; +}; -TEST(laplace, disease_map_dim_911) { + +TEST_F(laplace_disease_map_test, lk_analytical) { // Based on (Vanhatalo, Pietilainen and Vethari, 2010). See // https://research.cs.aalto.fi/pml/software/gpstuff/demo_spatial1.shtml using stan::math::var; using stan::math::laplace_marginal_poisson_log_lpmf; using stan::math::sqr_exp_kernel_functor; - int dim_theta = 911; - int n_observations = 911; - std::string data_directory = "test/unit/math/laplace/aki_disease_data/"; - std::vector x1(dim_theta), x2(dim_theta); - std::vector y(n_observations); - Eigen::VectorXd ye(n_observations); - read_in_data(dim_theta, n_observations, data_directory, x1, x2, y, ye); - - // look at some of the data - std::cout << "x_1: " << x1[0] << " " << x2[0] << std::endl - << "x_2: " << x1[1] << " " << x2[1] << std::endl - << "y_1: " << y[0] << " y_2: " << y[1] << std::endl - << "ye_1: " << ye[0] << " ye_2: " << ye[1] << std::endl; - - int dim_x = 2; - std::vector x(dim_theta); - for (int i = 0; i < dim_theta; i++) { - Eigen::VectorXd coordinate(dim_x); - coordinate << x1[i], x2[i]; - x[i] = coordinate; - } - - // one observation per group - std::vector n_samples(dim_theta); - for (int i = 0; i < dim_theta; i++) n_samples[i] = 1; - - std::vector delta; - std::vector delta_int; - - Eigen::VectorXd theta_0 = Eigen::VectorXd::Zero(dim_theta); - int dim_phi = 2; - Eigen::Matrix phi(dim_phi); - phi << 0.3162278, 200; // variance, length scale - auto start = std::chrono::system_clock::now(); var marginal_density - = laplace_marginal_poisson_log_lpmf(y, n_samples, ye, sqr_exp_kernel_functor(), - phi, x, delta, delta_int, theta_0); + = laplace_marginal_poisson_log_lpmf(y, n_samples, ye, + sqr_exp_kernel_functor(), + phi, x, delta, delta_int, theta_0); auto end = std::chrono::system_clock::now(); std::chrono::duration elapsed_time = end - start; @@ -80,8 +102,9 @@ TEST(laplace, disease_map_dim_911) { // Expected result // density: -2866.88 // autodiff grad: 266.501 -0.425901 - // total time: 0.627501 + // total time: 0.414122 (on new computer), 0.627501 (on old computer) + // TODO(charlesm93): update signatures for rng functions. //////////////////////////////////////////////////////////////////////// // Let's now generate a sample theta from the estimated posterior /* @@ -125,3 +148,64 @@ TEST(laplace, disease_map_dim_911) { << std::endl; */ } + +struct poisson_log_likelihood { + template + stan::return_type_t + operator()(const Eigen::Matrix& theta, + const Eigen::Matrix& eta, + const Eigen::VectorXd& delta, + const std::vector& n_samples, + std::ostream* pstream) const { + using stan::math::to_vector; + using stan::math::log; + int n = 911; + Eigen::VectorXd y = delta.head(n); + Eigen::VectorXd ye = delta.tail(n); + // Eigen::VectorXd log_ye = ye.log(); + + stan::math::diff_poisson_log + diff_functor(to_vector(n_samples), y, log(ye)); + + return diff_functor.log_likelihood(theta, eta); + } +}; + +TEST_F(laplace_disease_map_test, lk_autodiff) { + using stan::math::var; + using stan::math::laplace_marginal_density; + using stan::math::diff_likelihood; + using stan::math::sqr_exp_kernel_functor; + + Eigen::VectorXd delta_lk(2 * n_observations); + for (int i = 0; i < n_observations; i++) delta_lk(i) = y[i]; + for (int i = 0; i < n_observations; i++) delta_lk(n_observations + i) = ye(i); + + poisson_log_likelihood f; + diff_likelihood + diff_functor(f, delta_lk, n_samples); + + auto start = std::chrono::system_clock::now(); + + Eigen::Matrix eta_dummy; + var marginal_density + = laplace_marginal_density(diff_functor, + sqr_exp_kernel_functor(), phi, eta_dummy, + x, delta, delta_int, theta_0); + + auto end = std::chrono::system_clock::now(); + std::chrono::duration elapsed_time = end - start; + + VEC g; + AVEC parm_vec = createAVEC(phi(0), phi(1)); + marginal_density.grad(parm_vec, g); + + std::cout << "LAPLACE MARGINAL AND VARI CLASS" << std::endl + << "density: " << value_of(marginal_density) << std::endl + << "autodiff grad: " << g[0] << " " << g[1] << std::endl + << "total time: " << elapsed_time.count() << std::endl + << std::endl; + // Should return consistent evaluation of density and gradient as + // previous iteration. + // Expected run time: 0.39 s +} diff --git a/test/unit/math/laplace/higher_order_diff_test.cpp b/test/unit/math/laplace/higher_order_diff_test.cpp index 9d13814a343..5068ad83fbf 100755 --- a/test/unit/math/laplace/higher_order_diff_test.cpp +++ b/test/unit/math/laplace/higher_order_diff_test.cpp @@ -3,6 +3,7 @@ #include #include #include +#include #include #include #include @@ -175,5 +176,15 @@ TEST_F(neg_bin_log_diff_test, diff_likelihood) { << "gradient: " << gradient.transpose() << std::endl << "hessian: " << hessian.transpose() << std::endl << "third diff: " << third_diff.transpose() << std::endl; +} + +TEST_F(neg_bin_log_diff_test, diff_block_diagonal) { + using stan::math::hessian_block_diag; + + Eigen::MatrixXd H; + double fx; + int m = 1; // size of block (1 for diagonal Hessian) + hessian_block_diag(likelihood, theta_dbl, eta, y, y_index, m, fx, H); + std::cout << "Hessian: " << std::endl << H << std::endl; } From 153cb8c5ada7cab5316f9afe66253d0f37c3bbea Mon Sep 17 00:00:00 2001 From: Charles Margossian Date: Mon, 15 Mar 2021 14:37:49 -0400 Subject: [PATCH 27/53] autodiff for non-diag hessian and eta. --- stan/math/laplace/block_matrix_sqrt.hpp | 55 ++++ stan/math/laplace/hessian_block_diag.hpp | 52 +++- stan/math/laplace/laplace.hpp | 6 +- ....hpp => laplace_likelihood_deprecated.hpp} | 25 -- .../laplace/laplace_likelihood_general.hpp | 79 ++++- .../laplace_likelihood_poisson_log.hpp | 29 +- stan/math/laplace/laplace_marginal.hpp | 291 +++++++++++++----- .../laplace/laplace_marginal_poisson_log.hpp | 2 +- stan/math/laplace/partial_diff_theta.hpp | 95 ++++++ stan/math/laplace/third_diff_directional.hpp | 2 +- test/unit/math/laplace/disease_map_test.cpp | 91 +++++- .../laplace_marginal_poisson_log_test.cpp | 15 +- test/unit/math/laplace/laplace_utility.hpp | 22 ++ test/unit/math/laplace/motorcycle_gp_test.cpp | 191 ++++++++++++ test/unit/math/laplace/sparse_matrix_test.cpp | 141 +++++++++ 15 files changed, 954 insertions(+), 142 deletions(-) create mode 100644 stan/math/laplace/block_matrix_sqrt.hpp rename stan/math/laplace/{laplace_likelihood.hpp => laplace_likelihood_deprecated.hpp} (95%) create mode 100644 stan/math/laplace/partial_diff_theta.hpp create mode 100755 test/unit/math/laplace/motorcycle_gp_test.cpp create mode 100755 test/unit/math/laplace/sparse_matrix_test.cpp diff --git a/stan/math/laplace/block_matrix_sqrt.hpp b/stan/math/laplace/block_matrix_sqrt.hpp new file mode 100644 index 00000000000..5c6878de4d0 --- /dev/null +++ b/stan/math/laplace/block_matrix_sqrt.hpp @@ -0,0 +1,55 @@ +#ifndef STAN_MATH_LAPLACE_BLOCK_MATRIX_SQRT_HPP +#define STAN_MATH_LAPLACE_BLOCK_MATRIX_SQRT_HPP + +#include + +#include +#include +#include + +namespace stan { +namespace math { + +/** + * Return the matrix square-root for a block diagonal matrix. + */ + Eigen::SparseMatrix + block_matrix_sqrt(Eigen::SparseMatrix W, + int block_size) { + int n_block = W.cols() / block_size; + Eigen::MatrixXd local_block(block_size, block_size); + Eigen::MatrixXd local_block_sqrt(block_size, block_size); + Eigen::SparseMatrix W_root(W.rows(), W.cols()); + W_root.reserve(Eigen::VectorXi::Constant(W_root.cols(), block_size)); + + // No block operation available for sparse matrices, so we have to loop. + // See https://eigen.tuxfamily.org/dox/group__TutorialSparse.html#title7 + for (int i = 0; i < n_block; i++) { + std::cout << "block number: " << i << std::endl; + for (int j = 0; j < block_size; j++) { + for (int k = 0; k < block_size; k++) { + local_block(j, k) = W.coeffRef(i * block_size + j, i * block_size + k); + } + } + + local_block_sqrt = local_block.sqrt(); + // local_block_sqrt = cholesky_decompose(local_block); + + for (int j = 0; j < block_size; j++) { + for (int k = 0; k < block_size; k++) { + W_root.insert(i * block_size + j, i * block_size + k) + = local_block_sqrt(j, k); + } + } + } + + W_root.makeCompressed(); + + return W_root; +} + + +} // namespace math +} // namespace stan + +#endif diff --git a/stan/math/laplace/hessian_block_diag.hpp b/stan/math/laplace/hessian_block_diag.hpp index f8ba0d7e9c2..6fbadaad7ca 100644 --- a/stan/math/laplace/hessian_block_diag.hpp +++ b/stan/math/laplace/hessian_block_diag.hpp @@ -4,6 +4,7 @@ // TODO: refine include. #include #include +#include namespace stan { namespace math { @@ -20,7 +21,7 @@ namespace math { const Eigen::VectorXd& eta, const Eigen::VectorXd& delta, const std::vector& delta_int, - int m, + int hessian_block_size, double& fx, Eigen::MatrixXd& H, std::ostream* pstream = 0) { @@ -30,15 +31,54 @@ namespace math { int x_size = x.size(); VectorXd v; H = MatrixXd::Zero(x_size, x_size); - int n_blocks = x_size / m; - for (int i = 0; i < m; ++i) { + int n_blocks = x_size / hessian_block_size; + for (int i = 0; i < hessian_block_size; ++i) { v = VectorXd::Zero(x_size); - for (int j = i; j < x_size; j += m) v(j) = 1; + for (int j = i; j < x_size; j += hessian_block_size) v(j) = 1; VectorXd Hv; hessian_times_vector(f, x, eta, delta, delta_int, v, fx, Hv, pstream); - std::cout << "Hv: " << Hv << std::endl; for (int j = 0; j < n_blocks; ++j) { - for (int k = 0; k < m; ++k) H(k + j * m, i + j * m) = Hv(k + j * m); + for (int k = 0; k < hessian_block_size; ++k) + H(k + j * hessian_block_size, i + j * hessian_block_size) + = Hv(k + j * hessian_block_size); + } + } + } + + /** + * Overload for case where hessian is stored as a sparse matrix. + */ + template + void hessian_block_diag(const F& f, + const Eigen::VectorXd& x, + const Eigen::VectorXd& eta, + const Eigen::VectorXd& delta, + const std::vector& delta_int, + int hessian_block_size, + double& fx, + Eigen::SparseMatrix& H, + // Eigen::MatrixXd& H, + std::ostream* pstream = 0) { + using Eigen::VectorXd; + using Eigen::MatrixXd; + + int x_size = x.size(); + VectorXd v; + // H = MatrixXd::Zero(x_size, x_size); + H.resize(x_size, x_size); + // H.reserve(Eigen::VectorXi::Constant(x_size, hessian_block_size)); + + int n_blocks = x_size / hessian_block_size; + for (int i = 0; i < hessian_block_size; ++i) { + v = VectorXd::Zero(x_size); + for (int j = i; j < x_size; j += hessian_block_size) v(j) = 1; + VectorXd Hv; + hessian_times_vector(f, x, eta, delta, delta_int, v, fx, Hv, pstream); + for (int j = 0; j < n_blocks; ++j) { + for (int k = 0; k < hessian_block_size; ++k) { + H.insert(k + j * hessian_block_size, i + j * hessian_block_size) + = Hv(k + j * hessian_block_size); + } } } } diff --git a/stan/math/laplace/laplace.hpp b/stan/math/laplace/laplace.hpp index 54a5acf468c..a2798fe9971 100644 --- a/stan/math/laplace/laplace.hpp +++ b/stan/math/laplace/laplace.hpp @@ -1,9 +1,9 @@ #ifndef STAN_MATH_LAPLACE_LAPLACE_HPP #define STAN_MATH_LAPLACE_LAPLACE_HPP -#include +// #include #include -#include -#include +// #include +// #include #endif diff --git a/stan/math/laplace/laplace_likelihood.hpp b/stan/math/laplace/laplace_likelihood_deprecated.hpp similarity index 95% rename from stan/math/laplace/laplace_likelihood.hpp rename to stan/math/laplace/laplace_likelihood_deprecated.hpp index 706c94ee42b..9f096cffa55 100644 --- a/stan/math/laplace/laplace_likelihood.hpp +++ b/stan/math/laplace/laplace_likelihood_deprecated.hpp @@ -438,31 +438,6 @@ struct diff_student_t { } }; - -// TO DO: delete this structure. -// To experiment with the prototype, provide a built-in covariance -// function. In the final version, the user will pass the covariance -// function. -struct sqr_exp_kernel_functor { - template - Eigen::Matrix - operator() (const Eigen::Matrix& phi, - const T2& x, - const std::vector& delta, - const std::vector& delta_int, - std::ostream* msgs = nullptr) const { - double jitter = 1e-8; - Eigen::Matrix - kernel = stan::math::gp_exp_quad_cov(x, phi(0), phi(1)); - for (int i = 0; i < kernel.cols(); i++) - kernel(i, i) += jitter; - - return kernel; - } -}; - - - } // namespace math } // namespace stan diff --git a/stan/math/laplace/laplace_likelihood_general.hpp b/stan/math/laplace/laplace_likelihood_general.hpp index 2f583c23846..7ebd891484f 100644 --- a/stan/math/laplace/laplace_likelihood_general.hpp +++ b/stan/math/laplace/laplace_likelihood_general.hpp @@ -1,8 +1,12 @@ #ifndef STAN_MATH_LAPLACE_LAPLACE_LIKELIHOOD_GENERAL_HPP #define STAN_MATH_LAPLACE_LAPLACE_LIKELIHOOD_GENERAL_HPP -#include +// #include +#include #include +#include + +#include namespace stan { namespace math { @@ -38,26 +42,43 @@ struct diff_likelihood { void diff (const Eigen::VectorXd& theta, const Eigen::VectorXd& eta, Eigen::VectorXd& gradient, - Eigen::VectorXd& hessian) const { + Eigen::SparseMatrix& hessian_theta, + int hessian_block_size = 1) const { using Eigen::Matrix; using Eigen::Dynamic; int theta_size = theta.size(); + int eta_size = eta.size(); // CHECK -- do we need this scope? { nested_rev_autodiff nested; Matrix theta_var = theta; - var f_var = f_(theta_var, eta, delta_, delta_int_, pstream_); + Matrix eta_var = eta; + + var f_var = f_(theta_var, eta_var, delta_, delta_int_, pstream_); grad(f_var.vi_); - gradient.resize(theta_size); + gradient.resize(theta_size + eta_size); for (int i = 0; i < theta_size; i++) gradient(i) = theta_var(i).adj(); + for (int i = 0; i < eta_size; i++) + gradient(theta_size + i) = eta_var(i).adj(); } - Eigen::VectorXd v(theta_size); - for (int i = 0; i < theta_size; i++) v(i) = 1; + hessian_theta.resize(theta_size, theta_size); double f_theta; - hessian_times_vector(f_, theta, eta, delta_, delta_int_, - v, f_theta, hessian, pstream_); + if (hessian_block_size == 1) { + Eigen::VectorXd v(theta_size); + for (int i = 0; i < theta_size; i++) v(i) = 1; + Eigen::VectorXd hessian_v; + hessian_times_vector(f_, theta, eta, delta_, delta_int_, + v, f_theta, hessian_v, pstream_); + hessian_theta.reserve(Eigen::VectorXi::Constant(theta_size, 1)); + for (int i = 0; i < theta_size; i++) + hessian_theta.insert(i, i) = hessian_v(i); + } else { + hessian_block_diag(f_, theta, eta, delta_, delta_int_, + hessian_block_size, + f_theta, hessian_theta, pstream_); + } } Eigen::VectorXd third_diff(const Eigen::VectorXd& theta, @@ -76,11 +97,47 @@ struct diff_likelihood { return third_diff_tensor; } + Eigen::VectorXd compute_s2(const Eigen::VectorXd& theta, + const Eigen::VectorXd& eta, + const Eigen::MatrixXd& A, + int hessian_block_size) const { + return partial_diff_theta(f_, theta, eta, delta_, delta_int_, A, + hessian_block_size, pstream_); + } + + Eigen::VectorXd diff_eta_implicit(const Eigen::VectorXd& v, + const Eigen::VectorXd& theta, + const Eigen::VectorXd& eta) const { + using Eigen::Matrix; + using Eigen::Dynamic; + using Eigen::VectorXd; + + nested_rev_autodiff nested; + int eta_size = eta.size(); + Matrix eta_var = eta; + + // CHECK -- can we avoid declaring theta as fvar? + // We currently compute derivatives wrt eta, which is not needed. + int theta_size = theta.size(); + Matrix theta_var = theta; + Matrix, Dynamic, 1> theta_fvar(theta_size); + for (int i = 0; i < theta_size; i++) + theta_fvar(i) = fvar(theta_var(i), v(i)); + + fvar f_fvar = f_(theta_fvar, eta_var, delta_, delta_int_, pstream_); + grad(f_fvar.d_.vi_); + + VectorXd diff_eta(eta_size); + for (int i = 0; i < eta_size; i++) diff_eta(i) = eta_var(i).adj(); + return diff_eta; + } + + template Eigen::Matrix, Eigen::Dynamic, 1> diff_eta(const Eigen::Matrix& theta, const Eigen::Matrix& eta) const { - std::cout << "THIS FUNCTIONS SHOULD NEVER GET CALLED!" << std::endl; + std::cout << "THIS FUNCTION SHOULD NEVER GET CALLED!" << std::endl; Eigen::Matrix, Eigen::Dynamic, 1> void_matrix; return void_matrix; } @@ -89,7 +146,7 @@ struct diff_likelihood { Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> diff_theta_eta(const Eigen::Matrix& theta, const Eigen::Matrix& eta) const { - std::cout << "THIS FUNCTIONS SHOULD NEVER GET CALLED!" << std::endl; + std::cout << "THIS FUNCTION SHOULD NEVER GET CALLED!" << std::endl; Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> void_matrix; return void_matrix; @@ -100,7 +157,7 @@ struct diff_likelihood { diff2_theta_eta(const Eigen::Matrix& theta, const Eigen::Matrix& eta) const { - std::cout << "THIS FUNCTIONS SHOULD NEVER GET CALLED!" << std::endl; + std::cout << "THIS FUNCTION SHOULD NEVER GET CALLED!" << std::endl; Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> void_matrix; return void_matrix; diff --git a/stan/math/laplace/laplace_likelihood_poisson_log.hpp b/stan/math/laplace/laplace_likelihood_poisson_log.hpp index f158d42b7ef..e0931670a02 100644 --- a/stan/math/laplace/laplace_likelihood_poisson_log.hpp +++ b/stan/math/laplace/laplace_likelihood_poisson_log.hpp @@ -2,6 +2,7 @@ #define STAN_MATH_LAPLACE_LAPLACE_LIKELIHOOD_POISSON_LOG_HPP #include +#include namespace stan { namespace math { @@ -73,12 +74,20 @@ struct diff_poisson_log { void diff (const Eigen::Matrix& theta, const Eigen::Matrix& eta_dummy, Eigen::Matrix& gradient, - Eigen::Matrix& hessian) const { + // Eigen::Matrix& hessian, + Eigen::SparseMatrix& hessian, + int hessian_block_size = 1) + const { + int theta_size = theta.size(); Eigen::Matrix common_term = n_samples_.cwiseProduct(exp(theta + log_exposure_)); gradient = sums_ - common_term; - hessian = - common_term; + hessian.resize(theta_size, theta_size); + hessian.reserve(Eigen::VectorXi::Constant(theta_size, hessian_block_size)); + // hessian.col(0) = - common_term; + for (int i = 0; i < theta_size; i++) + hessian.insert(i, i) = - common_term(i); } /** @@ -97,11 +106,21 @@ struct diff_poisson_log { return -n_samples_.cwiseProduct(exp(theta + log_exposure_)); } + Eigen::VectorXd compute_s2(const Eigen::VectorXd& theta, + const Eigen::VectorXd& eta, + const Eigen::MatrixXd& L, + const Eigen::MatrixXd& covariance, + int hessian_block_size) const { + std::cout << "THIS FUNCTION SHOULD NEVER GET CALLED!" << std::endl; + Eigen::VectorXd void_vector; + return void_vector; + } + template Eigen::Matrix, Eigen::Dynamic, 1> diff_eta(const Eigen::Matrix& theta, const Eigen::Matrix& eta) const { - std::cout << "THIS FUNCTIONS SHOULD NEVER GET CALLED!" << std::endl; + std::cout << "THIS FUNCTION SHOULD NEVER GET CALLED!" << std::endl; Eigen::Matrix, Eigen::Dynamic, 1> void_matrix; return void_matrix; } @@ -110,7 +129,7 @@ struct diff_poisson_log { Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> diff_theta_eta(const Eigen::Matrix& theta, const Eigen::Matrix& eta) const { - std::cout << "THIS FUNCTIONS SHOULD NEVER GET CALLED!" << std::endl; + std::cout << "THIS FUNCTION SHOULD NEVER GET CALLED!" << std::endl; Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> void_matrix; return void_matrix; @@ -121,7 +140,7 @@ struct diff_poisson_log { diff2_theta_eta(const Eigen::Matrix& theta, const Eigen::Matrix& eta) const { - std::cout << "THIS FUNCTIONS SHOULD NEVER GET CALLED!" << std::endl; + std::cout << "THIS FUNCTION SHOULD NEVER GET CALLED!" << std::endl; Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> void_matrix; return void_matrix; diff --git a/stan/math/laplace/laplace_marginal.hpp b/stan/math/laplace/laplace_marginal.hpp index 7eec02b957a..c2c04fed4af 100644 --- a/stan/math/laplace/laplace_marginal.hpp +++ b/stan/math/laplace/laplace_marginal.hpp @@ -8,19 +8,20 @@ #include #include #include -#include +// #include #include +#include + +#include +#include +#include #include #include // CHECK -- do we need this? #include // CHECK -- do we need this? // Reference for calculations of marginal and its gradients: -// Charles C Margossian, Aki Vehtari, Daniel Simpson and Raj Agrawal -// "Hamiltonian Monte Carlo using an adjoint-differentiated -// Laplace approximation: Bayesian inference for latent Gaussian -// models and beyond." NeurIPS 2020 -// https://arxiv.org/abs/2004.12550 +// Margossian et al, 2020, https://arxiv.org/abs/2004.12550 namespace stan { @@ -83,22 +84,28 @@ namespace math { const std::vector& delta_int, Eigen::MatrixXd& covariance, Eigen::VectorXd& theta, - Eigen::VectorXd& W_root, + Eigen::SparseMatrix& W_r, + // Eigen::MatrixXd& W_root, Eigen::MatrixXd& L, Eigen::VectorXd& a, Eigen::VectorXd& l_grad, + Eigen::PartialPivLU& LU, const Eigen::VectorXd& theta_0, std::ostream* msgs = nullptr, double tolerance = 1e-6, - long int max_num_steps = 100) { + long int max_num_steps = 100, + int hessian_block_size = 1) { using Eigen::MatrixXd; using Eigen::VectorXd; + using Eigen::SparseMatrix; - int group_size = theta_0.size(); + int theta_size = theta_0.size(); covariance = covariance_function(phi, x, delta, delta_int, msgs); theta = theta_0; double objective_old = - 1e+10; // CHECK -- what value to use? double objective_new; + double B_log_determinant; + for (int i = 0; i <= max_num_steps; i++) { if (i == max_num_steps) { @@ -109,19 +116,47 @@ namespace math { } // Compute variable a. - VectorXd hessian; - diff_likelihood.diff(theta, eta, l_grad, hessian); - VectorXd W = - hessian; - W_root = sqrt(W); + SparseMatrix hessian; // VectorXd hessian; + diff_likelihood.diff(theta, eta, l_grad, hessian, hessian_block_size); + SparseMatrix W = - hessian; // VectorXd W = - hessian; + + VectorXd b; { - MatrixXd B = MatrixXd::Identity(group_size, group_size) - + quad_form_diag(covariance, W_root); - L = cholesky_decompose(B); + MatrixXd B; + if (hessian_block_size == 1) { // W_root = sqrt(W); + W_r = W.cwiseSqrt(); + B = MatrixXd::Identity(theta_size, theta_size) + + quad_form_diag(covariance, W_r.diagonal()); + + L = cholesky_decompose(B); + B_log_determinant = 2 * sum(L.diagonal().array().log()); + } else { + // TODO -- version which uses W_root? + + W_r = W; + B = MatrixXd::Identity(theta_size, theta_size) + covariance * W; + LU = Eigen::PartialPivLU(B); + + // TODO: compute log determinant directly. + B_log_determinant = log(LU.determinant()); + } + + if (hessian_block_size == 1) { + b = W.diagonal().cwiseProduct(theta) + l_grad.head(theta_size); + a = b - W_r + * mdivide_left_tri(transpose(L), + mdivide_left_tri(L, + diag_pre_multiply(W_r.diagonal(), multiply(covariance, b)))); + } else { + b = W * theta + l_grad.head(theta_size); + a = b - W * LU.solve(covariance * b); + + // a = b - W_root + // * mdivide_left_tri(transpose(L), + // mdivide_left_tri(L, + // W_root * (covariance * b))); + } } - VectorXd b = W.cwiseProduct(theta) + l_grad; - a = b - W_root.asDiagonal() * mdivide_left_tri(transpose(L), - mdivide_left_tri(L, - diag_pre_multiply(W_root, multiply(covariance, b)))); // Simple Newton step theta = covariance * a; @@ -134,7 +169,7 @@ namespace math { if (objective_diff < tolerance) break; } - return objective_new - sum(L.diagonal().array().log()); + return objective_new - 0.5 * B_log_determinant; } /** @@ -184,15 +219,21 @@ namespace math { const Eigen::Matrix& theta_0, std::ostream* msgs = nullptr, double tolerance = 1e-6, - long int max_num_steps = 100) { - Eigen::VectorXd theta, W_root, a, l_grad; + long int max_num_steps = 100, + int hessian_block_size = 1) { + // Eigen::VectorXd theta, W_root, a, l_grad; + // Eigen::MatrixXd L, covariance; + Eigen::VectorXd theta, a, l_grad; Eigen::MatrixXd L, covariance; + Eigen::SparseMatrix W_r; + Eigen::PartialPivLU LU; return laplace_marginal_density(diff_likelihood, covariance_function, phi, eta, x, delta, delta_int, covariance, - theta, W_root, L, a, l_grad, + theta, W_r, L, a, l_grad, LU, value_of(theta_0), msgs, - tolerance, max_num_steps); + tolerance, max_num_steps, + hessian_block_size); } /** @@ -237,11 +278,14 @@ namespace math { double marginal_density, const Eigen::MatrixXd& covariance, const Eigen::VectorXd& theta, - const Eigen::VectorXd& W_root, + // const Eigen::MatrixXd& W_root, + const Eigen::SparseMatrix& W_r, const Eigen::MatrixXd& L, const Eigen::VectorXd& a, const Eigen::VectorXd& l_grad, - std::ostream* msgs = nullptr) + const Eigen::PartialPivLU LU, + std::ostream* msgs = nullptr, + int hessian_block_size = 1) : vari(marginal_density), phi_size_(phi.size()), phi_(ChainableStack::instance_->memalloc_.alloc_array( @@ -255,6 +299,7 @@ namespace math { using Eigen::Dynamic; using Eigen::MatrixXd; using Eigen::VectorXd; + using Eigen::SparseMatrix; int theta_size = theta.size(); for (int i = 0; i < phi_size_; i++) phi_[i] = phi(i).vi_; @@ -265,24 +310,79 @@ namespace math { marginal_density_[0] = new vari(marginal_density, false); // auto start = std::chrono::system_clock::now(); - Eigen::MatrixXd R; - { - Eigen::MatrixXd W_root_diag = W_root.asDiagonal(); - R = W_root_diag * - L.transpose().triangularView() - .solve(L.triangularView() - .solve(W_root_diag)); + MatrixXd R; + if (hessian_block_size == 1){ + MatrixXd W_root_diag = W_r; + R = W_r * L.transpose().triangularView() + .solve(L.triangularView() + .solve(W_root_diag)); + } else { + R = W_r - W_r * LU.solve(covariance * W_r); } - Eigen::MatrixXd - C = mdivide_left_tri(L, - diag_pre_multiply(W_root, covariance)); + // Eigen::MatrixXd R; + // { + // Eigen::MatrixXd W_root_diag; + // if (hessian_block_size == 1) { + // W_root_diag = W_root.col(0).asDiagonal(); + // } else { + // W_root_diag = W_root; + // } + // R = W_root_diag * + // L.transpose().triangularView() + // .solve(L.triangularView() + // .solve(W_root_diag)); + // } Eigen::VectorXd eta_dbl = value_of(eta); - // CHECK -- should there be a minus sign here? - Eigen::VectorXd s2 = 0.5 * (covariance.diagonal() - - (C.transpose() * C).diagonal()) - .cwiseProduct(diff_likelihood.third_diff(theta, eta_dbl)); + Eigen::VectorXd partial_parm; + Eigen::VectorXd s2; + + if (hessian_block_size == 1) { + Eigen::MatrixXd + C = mdivide_left_tri(L, W_r * covariance); + s2 = 0.5 * (covariance.diagonal() + - (C.transpose() * C).diagonal()) + .cwiseProduct(diff_likelihood.third_diff(theta, eta_dbl)); + } else { + // Eigen::MatrixXd A = covariance - C.transpose() * C; + Eigen::MatrixXd A = covariance + - covariance * W_r * LU.solve(covariance); + partial_parm + = diff_likelihood.compute_s2(theta, eta_dbl, A, hessian_block_size); + s2 = partial_parm.head(theta_size); + } + + // std::cout << "s2: " << s2.transpose() << std::endl; + + // TEST -- finite diff benchmark for s2 + // double eps = 1e-7; + // Eigen::VectorXd theta_u0 = theta, theta_l0 = theta; + // theta_u0(11) += eps; + // theta_l0(11) -= eps; + // int group_size = theta.size(); + // + // Eigen::VectorXd l_grad_store; + // SparseMatrix hessian_store; // VectorXd hessian; + // diff_likelihood.diff(theta_u0, value_of(eta), l_grad_store, + // hessian_store, hessian_block_size); + // SparseMatrix W_u0 = - hessian_store; + // diff_likelihood.diff(theta_l0, value_of(eta), l_grad_store, + // hessian_store, hessian_block_size); + // SparseMatrix W_l0 = - hessian_store; + // + // Eigen::MatrixXd B_u0 = Eigen::MatrixXd::Identity(group_size, group_size) + // + covariance * W_u0; + // Eigen::MatrixXd B_l0 = Eigen::MatrixXd::Identity(group_size, group_size) + // + covariance * W_l0; + // std::cout << "s2_finite_diff: " + // << -0.5 * (log(B_u0.determinant()) - log(B_l0.determinant())) + // / (2 * eps) << std::endl; + + // // CHECK -- should there be a minus sign here? + // Eigen::VectorXd s2 = 0.5 * (covariance.diagonal() + // - (C.transpose() * C).diagonal()) + // .cwiseProduct(diff_likelihood.third_diff(theta, eta_dbl)); phi_adj_ = Eigen::VectorXd(phi_size_); start_nested(); @@ -291,6 +391,7 @@ namespace math { Matrix phi_v = value_of(phi); Matrix K_var = covariance_function(phi_v, x, delta, delta_int, msgs); + Eigen::VectorXd l_grad_theta = l_grad.head(theta_size); var Z = laplace_pseudo_target(K_var, a, R, l_grad, s2); set_zero_all_adjoints_nested(); @@ -298,12 +399,38 @@ namespace math { for (int j = 0; j < phi_size_; j++) phi_adj_[j] = phi_v(j).adj(); - } catch (const std::exception& e) { - recover_memory_nested(); - throw; - } - recover_memory_nested(); + // finite diff benchmark + // double eps = 1e-7; + // Eigen::VectorXd phi_u0 = value_of(phi), phi_l0 = value_of(phi); + // phi_u0(1) += eps; + // phi_l0(1) -= eps; + // Eigen::MatrixXd K_u0 = + // covariance_function(phi_u0, x, delta, delta_int, msgs), + // K_l0 = covariance_function(phi_l0, x, delta, delta_int, msgs); + // double Z_u0 = laplace_pseudo_target(K_u0, a, R, l_grad, s2), + // Z_l0 = laplace_pseudo_target(K_l0, a, R, l_grad, s2); + // std::cout << "finite diff Z: " << (Z_u0 - Z_l0) / (2 * eps) + // << std::endl; + + } catch (const std::exception& e) { + recover_memory_nested(); + throw; + } + recover_memory_nested(); + eta_adj_ = Eigen::VectorXd(eta_size_); + if (eta_size_ != 0) { // TODO: instead, check if eta contains var. + VectorXd diff_eta = l_grad.tail(eta_size_); + + Eigen::VectorXd v = (Eigen::MatrixXd::Identity(theta_size, theta_size) + - R) * (covariance * s2); + + eta_adj_ = l_grad.tail(eta_size_) + partial_parm.tail(eta_size_) + + diff_likelihood.diff_eta_implicit(v, theta, eta_dbl); + } + +// TODO: reimplement eta case. +/* eta_adj_ = Eigen::VectorXd(eta_size_); if (eta_size_ != 0) { VectorXd diff_eta = diff_likelihood.diff_eta(theta, eta_dbl); @@ -321,14 +448,14 @@ namespace math { eta_adj_(l) = diff_eta(l) + 0.5 * (W_root_inv.asDiagonal() * R * (covariance * elt_divide(diff2_theta_eta.col(l), W_root).asDiagonal())).trace() + + s2.dot(s3); // + 0.5 * (L.transpose().triangularView() // .solve(L.triangularView() // .solve(W_root.asDiagonal() * covariance * elt_divide( // diff2_theta_eta.col(l), W_root).asDiagonal() // ))).trace() - + s2.dot(s3); } - } + } */ // auto end = std::chrono::system_clock::now(); // std::chrono::duration time = end - ; @@ -338,30 +465,34 @@ namespace math { // and then following R&W's scheme. /* = std::chrono::system_clock::now(); - covariance_sensitivities f(x, delta, delta_int, - covariance_function, msgs); - Eigen::MatrixXd diff_cov; - { - Eigen::VectorXd covariance_vector; - jacobian_fwd(f, value_of(phi), covariance_vector, diff_cov); - // covariance = to_matrix(covariance_vector, theta_size, theta_size); - } - - phi_adj_ = Eigen::VectorXd(phi_size_); - - for (int j = 0; j < phi_size_; j++) { - Eigen::VectorXd j_col = diff_cov.col(j); - C = to_matrix(j_col, theta_size, theta_size); - double s1 = 0.5 * quad_form(C, a) - 0.5 * sum((R * C).diagonal()); - Eigen::VectorXd b = C * l_grad; - Eigen::VectorXd s3 = b - covariance * (R * b); - // std::cout << "old Z: " << s1 + s2.dot(s3) << std::endl; - phi_adj_[j] = s1 + s2.dot(s3); - } - end = std::chrono::system_clock::now(); - time = end - ; - std::cout << "Former diff: " << time.count() << std::endl; */ + // covariance_sensitivities f(x, delta, delta_int, + // covariance_function, msgs); + // Eigen::MatrixXd diff_cov; + // { + // Eigen::VectorXd covariance_vector; + // jacobian_fwd(f, value_of(phi), covariance_vector, diff_cov); + // // covariance = to_matrix(covariance_vector, theta_size, theta_size); + // } + // + // phi_adj_ = Eigen::VectorXd(phi_size_); + // + // std::cout << "phi_adj: "; + // for (int j = 0; j < phi_size_; j++) { + // Eigen::VectorXd j_col = diff_cov.col(j); + // Eigen::MatrixXd C = to_matrix(j_col, theta_size, theta_size); + // double s1 = 0.5 * quad_form(C, a) - 0.5 * sum((R * C).diagonal()); + // Eigen::VectorXd b = C * l_grad; + // Eigen::VectorXd s3 = b - covariance * (R * b); + // // std::cout << "old Z: " << s1 + s2.dot(s3) << std::endl; + // phi_adj_[j] = s1 + s2.dot(s3); + // std::cout << phi_adj_[j] << " "; + // } + // std::cout << std::endl; + + // end = std::chrono::system_clock::now(); + // time = end - ; + // std::cout << "Former diff: " << time.count() << std::endl; } void chain() { @@ -418,11 +549,16 @@ namespace math { const Eigen::Matrix& theta_0, std::ostream* msgs = nullptr, double tolerance = 1e-6, - long int max_num_steps = 100) { - Eigen::VectorXd theta, W_root, a, l_grad; + long int max_num_steps = 100, + int hessian_block_size = 1) { + // Eigen::VectorXd theta, W_root, a, l_grad; + Eigen::VectorXd theta, a, l_grad; + // Eigen::MatrixXd W_root; + Eigen::SparseMatrix W_root; Eigen::MatrixXd L; double marginal_density_dbl; Eigen::MatrixXd covariance; + Eigen::PartialPivLU LU; // TEST // auto start = std::chrono::system_clock::now(); @@ -432,10 +568,11 @@ namespace math { covariance_function, value_of(phi), value_of(eta), x, delta, delta_int, covariance, - theta, W_root, L, a, l_grad, + theta, W_root, L, a, l_grad, LU, value_of(theta_0), msgs, - tolerance, max_num_steps); + tolerance, max_num_steps, + hessian_block_size); // TEST // auto end = std::chrono::system_clock::now(); @@ -452,8 +589,8 @@ namespace math { phi, eta, x, delta, delta_int, marginal_density_dbl, covariance, - theta, W_root, L, a, l_grad, - msgs); + theta, W_root, L, a, l_grad, LU, + msgs, hessian_block_size); var marginal_density = var(vi0->marginal_density_[0]); diff --git a/stan/math/laplace/laplace_marginal_poisson_log.hpp b/stan/math/laplace/laplace_marginal_poisson_log.hpp index d9cc4b7879a..e3800519806 100644 --- a/stan/math/laplace/laplace_marginal_poisson_log.hpp +++ b/stan/math/laplace/laplace_marginal_poisson_log.hpp @@ -2,7 +2,7 @@ #define STAN_MATH_LAPLACE_LAPLACE_MARGINAL_POISSON_LOG_HPP #include -#include +#include namespace stan { namespace math { diff --git a/stan/math/laplace/partial_diff_theta.hpp b/stan/math/laplace/partial_diff_theta.hpp new file mode 100644 index 00000000000..5d8f3b9c291 --- /dev/null +++ b/stan/math/laplace/partial_diff_theta.hpp @@ -0,0 +1,95 @@ +#ifndef STAN_MATH_LAPLACE_PARTIAL_DIFF_THETA_HPP +#define STAN_MATH_LAPLACE_PARTIAL_DIFF_THETA_HPP + +// TODO: refine include. +#include +#include + +namespace stan { +namespace math { + /** + * Returns the partial derivative of the approximate marginal + * distribution with respect to theta and eta. + * The derivative with respect to theta is denoted s2 in + * laplace_marginal.hpp. + */ + // TODO: rename function, since we also differentiate wrt eta. + // TODO: address case where eta / theta are doubles and we don't + // want full derivatives. + template + Eigen::VectorXd partial_diff_theta(const F& f, + const Eigen::VectorXd& theta, + const Eigen::VectorXd& eta, + const Eigen::VectorXd& delta, + const std::vector& delta_int, + const Eigen::MatrixXd& A, + int hessian_block_size, + std::ostream* pstream = 0) { + using Eigen::VectorXd; + using Eigen::Matrix; + using Eigen::MatrixXd; + using Eigen::Dynamic; + + nested_rev_autodiff nested; + int theta_size = theta.size(); + int eta_size = eta.size(); + int parm_size = theta_size + eta_size; + // Matrix parm_var(parm_size); + // for (int i = 0; i < theta_size; i++) parm_var(i) = theta(i); + // for (int i = 0; i < eta_size; i++) parm_var(i + theta_size) = eta(i); + Matrix theta_var = theta; + Matrix eta_var = eta; + int n_blocks = theta_size / hessian_block_size; + + fvar> target_ffvar = 0; + + for (int i = 0; i < hessian_block_size; ++i) { + VectorXd v = VectorXd::Zero(theta_size); + for (int j = i; j < theta_size; j += hessian_block_size) v(j) = 1; + + Matrix, Dynamic, 1> theta_fvar(theta_size); + for (int j = 0; j < theta_size; ++j) + theta_fvar(j) = fvar(theta_var(j), v(j)); + + Matrix, Dynamic, 1> eta_fvar(eta_size); + for (int j = 0; j < eta_size; ++j) eta_fvar(j) = fvar(eta_var(j), 0); + + fvar f_fvar = f(theta_fvar, eta_fvar, delta, delta_int, pstream); + + VectorXd w(theta_size); + for (int j = 0; j < n_blocks; ++j) { + for (int k = 0; k < hessian_block_size; ++k) { + w(k + j * hessian_block_size) = A(k + j * hessian_block_size, + i + j * hessian_block_size); + } + } + + Matrix>, Dynamic, 1> theta_ffvar(theta_size); + for (int j = 0; j < theta_size; ++j) + theta_ffvar(j) = fvar>(theta_fvar(j), w(j)); + + Matrix>, Dynamic, 1> eta_ffvar(eta_size); + for (int j = 0; j < eta_size; ++j) + eta_ffvar(j) = fvar>(eta_fvar(j), 0); + + target_ffvar += + f(theta_ffvar, eta_ffvar, delta, delta_int, pstream); + } + grad(target_ffvar.d_.d_.vi_); + + VectorXd parm_adj(parm_size); + for (int i = 0; i < theta_size; ++i) parm_adj(i) = theta_var(i).adj(); + for (int i = 0; i < eta_size; ++i) + parm_adj(theta_size + i) = eta_var(i).adj(); + + return 0.5 * parm_adj; + + // VectorXd theta_adj(theta_size); + // for (int i = 0; i < theta_size; ++i) theta_adj(i) = theta_var(i).adj(); + // return 0.5 * theta_adj; + } + +} // namespace math +} // namespace stan + +#endif diff --git a/stan/math/laplace/third_diff_directional.hpp b/stan/math/laplace/third_diff_directional.hpp index ef0679b63f8..3674b1863d9 100644 --- a/stan/math/laplace/third_diff_directional.hpp +++ b/stan/math/laplace/third_diff_directional.hpp @@ -35,7 +35,7 @@ namespace math { } fvar fx_fvar = f(x_fvar, eta, delta, delta_int, pstream); - Matrix>, -1, 1> x_ffvar(x_size); + Matrix>, Dynamic, 1> x_ffvar(x_size); for (int i = 0; i < x_size; ++i) { x_ffvar(i) = fvar>(x_fvar(i), w(i)); } diff --git a/test/unit/math/laplace/disease_map_test.cpp b/test/unit/math/laplace/disease_map_test.cpp index cb82f4a363b..9f568f35528 100755 --- a/test/unit/math/laplace/disease_map_test.cpp +++ b/test/unit/math/laplace/disease_map_test.cpp @@ -73,12 +73,14 @@ class laplace_disease_map_test : public::testing::Test { TEST_F(laplace_disease_map_test, lk_analytical) { + // Based on (Vanhatalo, Pietilainen and Vethari, 2010). See // https://research.cs.aalto.fi/pml/software/gpstuff/demo_spatial1.shtml using stan::math::var; using stan::math::laplace_marginal_poisson_log_lpmf; - using stan::math::sqr_exp_kernel_functor; + +/* auto start = std::chrono::system_clock::now(); var marginal_density @@ -107,7 +109,7 @@ TEST_F(laplace_disease_map_test, lk_analytical) { // TODO(charlesm93): update signatures for rng functions. //////////////////////////////////////////////////////////////////////// // Let's now generate a sample theta from the estimated posterior - /* + using stan::math::diff_poisson_log; using stan::math::to_vector; using stan::math::sqr_exp_kernel_functor; @@ -175,7 +177,6 @@ TEST_F(laplace_disease_map_test, lk_autodiff) { using stan::math::var; using stan::math::laplace_marginal_density; using stan::math::diff_likelihood; - using stan::math::sqr_exp_kernel_functor; Eigen::VectorXd delta_lk(2 * n_observations); for (int i = 0; i < n_observations; i++) delta_lk(i) = y[i]; @@ -186,16 +187,34 @@ TEST_F(laplace_disease_map_test, lk_autodiff) { diff_functor(f, delta_lk, n_samples); auto start = std::chrono::system_clock::now(); - Eigen::Matrix eta_dummy; - var marginal_density + int hessian_block_size = 1; + double marginal_density_dbl = laplace_marginal_density(diff_functor, - sqr_exp_kernel_functor(), phi, eta_dummy, - x, delta, delta_int, theta_0); + sqr_exp_kernel_functor(), + value_of(phi), value_of(eta_dummy), + x, delta, delta_int, theta_0, + 0, 1e-6, 100, hessian_block_size); auto end = std::chrono::system_clock::now(); std::chrono::duration elapsed_time = end - start; + std::cout << "LAPLACE MARGINAL (dbl)" << std::endl + << "hessian block size: " << hessian_block_size << std::endl + << "density: " << marginal_density_dbl << std::endl + << "total time: " << elapsed_time.count() << std::endl; + + start = std::chrono::system_clock::now(); + + var marginal_density + = laplace_marginal_density(diff_functor, + sqr_exp_kernel_functor(), phi, eta_dummy, + x, delta, delta_int, theta_0, + 0, 1e-6, 100, hessian_block_size); + + end = std::chrono::system_clock::now(); + elapsed_time = end - start; + VEC g; AVEC parm_vec = createAVEC(phi(0), phi(1)); marginal_density.grad(parm_vec, g); @@ -209,3 +228,61 @@ TEST_F(laplace_disease_map_test, lk_autodiff) { // previous iteration. // Expected run time: 0.39 s } + +TEST_F(laplace_disease_map_test, finite_diff_benchmark) { + /////////////////////////////////////////////////////////////////// + // finite_diff benchmark + using stan::math::var; + using stan::math::laplace_marginal_density; + using stan::math::diff_likelihood; + + Eigen::VectorXd delta_lk(2 * n_observations); + for (int i = 0; i < n_observations; i++) delta_lk(i) = y[i]; + for (int i = 0; i < n_observations; i++) delta_lk(n_observations + i) = ye(i); + + poisson_log_likelihood f; + diff_likelihood + diff_functor(f, delta_lk, n_samples); + Eigen::Matrix eta_dummy; + + Eigen::VectorXd phi_dbl = value_of(phi); + Eigen::VectorXd phi_u0 = phi_dbl, phi_u1 = phi_dbl, + phi_l0 = phi_dbl, phi_l1 = phi_dbl; + double eps = 1e-7; + + int hessian_block_size = 1; + + phi_u0(0) += eps; + phi_u1(1) += eps; + phi_l0(0) -= eps; + phi_l1(1) -= eps; + + double target_u0 = laplace_marginal_density(diff_functor, + sqr_exp_kernel_functor(), + phi_u0, value_of(eta_dummy), + x, delta, delta_int, theta_0, + 0, 1e-6, 100, hessian_block_size), + + target_u1 = laplace_marginal_density(diff_functor, + sqr_exp_kernel_functor(), + phi_u1, value_of(eta_dummy), + x, delta, delta_int, theta_0, + 0, 1e-6, 100, hessian_block_size), + + target_l0 = laplace_marginal_density(diff_functor, + sqr_exp_kernel_functor(), + phi_l0, value_of(eta_dummy), + x, delta, delta_int, theta_0, + 0, 1e-6, 100, hessian_block_size), + + target_l1 = laplace_marginal_density(diff_functor, + sqr_exp_kernel_functor(), + phi_l1, value_of(eta_dummy), + x, delta, delta_int, theta_0, + 0, 1e-6, 100, hessian_block_size); + + std::cout << "Finite_diff benchmark: " << std::endl + << "grad: " << (target_u0 - target_l0) / (2 * eps) + << " " << (target_u1 - target_l1) / (2 * eps) + << std::endl; +} diff --git a/test/unit/math/laplace/laplace_marginal_poisson_log_test.cpp b/test/unit/math/laplace/laplace_marginal_poisson_log_test.cpp index deefad421cf..1a2a8010336 100644 --- a/test/unit/math/laplace/laplace_marginal_poisson_log_test.cpp +++ b/test/unit/math/laplace/laplace_marginal_poisson_log_test.cpp @@ -24,19 +24,21 @@ TEST(laplace, likelihood_differentiation) { diff_poisson_log diff_functor(to_vector(n_samples), to_vector(sums)); double log_density = diff_functor.log_likelihood(theta, eta_dummy); - Eigen::VectorXd gradient, hessian; + Eigen::VectorXd gradient; + Eigen::MatrixXd hessian; diff_functor.diff(theta, eta_dummy, gradient, hessian); Eigen::VectorXd third_tensor = diff_functor.third_diff(theta, eta_dummy); EXPECT_FLOAT_EQ(-4.436564, log_density); EXPECT_FLOAT_EQ(-1.718282, gradient(0)); EXPECT_FLOAT_EQ(-2.718282, gradient(1)); - EXPECT_FLOAT_EQ(-2.718282, hessian(0)); - EXPECT_FLOAT_EQ(-2.718282, hessian(1)); + EXPECT_FLOAT_EQ(-2.718282, hessian(0, 0)); + EXPECT_FLOAT_EQ(-2.718282, hessian(1, 0)); EXPECT_FLOAT_EQ(-2.718282, third_tensor(0)); EXPECT_FLOAT_EQ(-2.718282, third_tensor(1)); } + TEST(laplace, likelihood_differentiation2) { // Test exposure argument using stan::math::diff_poisson_log; @@ -54,15 +56,16 @@ TEST(laplace, likelihood_differentiation2) { to_vector(log_exposure)); double log_density = diff_functor.log_likelihood(theta, eta_dummy); - Eigen::VectorXd gradient, hessian; + Eigen::VectorXd gradient; + Eigen::MatrixXd hessian; diff_functor.diff(theta, eta_dummy, gradient, hessian); Eigen::VectorXd third_tensor = diff_functor.third_diff(theta, eta_dummy); EXPECT_FLOAT_EQ(-6.488852, log_density); EXPECT_FLOAT_EQ(-0.3591409, gradient(0)); EXPECT_FLOAT_EQ(-5.4365637, gradient(1)); - EXPECT_FLOAT_EQ(-1.359141, hessian(0)); - EXPECT_FLOAT_EQ(-5.436564, hessian(1)); + EXPECT_FLOAT_EQ(-1.359141, hessian(0, 0)); + EXPECT_FLOAT_EQ(-5.436564, hessian(1, 0)); EXPECT_FLOAT_EQ(-1.359141, third_tensor(0)); EXPECT_FLOAT_EQ(-5.436564, third_tensor(1)); diff --git a/test/unit/math/laplace/laplace_utility.hpp b/test/unit/math/laplace/laplace_utility.hpp index 90c3539a5f9..559b7f92a39 100644 --- a/test/unit/math/laplace/laplace_utility.hpp +++ b/test/unit/math/laplace/laplace_utility.hpp @@ -73,6 +73,28 @@ struct squared_kernel_functor { } }; +// TO DO: delete this structure. +// To experiment with the prototype, provide a built-in covariance +// function. In the final version, the user will pass the covariance +// function. +struct sqr_exp_kernel_functor { + template + Eigen::Matrix + operator() (const Eigen::Matrix& phi, + const T2& x, + const std::vector& delta, + const std::vector& delta_int, + std::ostream* msgs = nullptr) const { + double jitter = 1e-8; + Eigen::Matrix + kernel = stan::math::gp_exp_quad_cov(x, phi(0), phi(1)); + for (int i = 0; i < kernel.cols(); i++) + kernel(i, i) += jitter; + + return kernel; + } +}; + // Naive implementation of the functor (a smarter implementation // precomputes the covariance matrix). struct inla_functor { diff --git a/test/unit/math/laplace/motorcycle_gp_test.cpp b/test/unit/math/laplace/motorcycle_gp_test.cpp new file mode 100755 index 00000000000..1e22eafe8e6 --- /dev/null +++ b/test/unit/math/laplace/motorcycle_gp_test.cpp @@ -0,0 +1,191 @@ +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include + +// Example from +// https://avehtari.github.io/casestudies/Motorcycle/motorcycle.html + +struct covariance_motorcycle_functor { + template + Eigen::Matrix + operator() (const Eigen::Matrix& phi, + const T2& x, + const std::vector& delta, + const std::vector& delta_int, + std::ostream* msgs = nullptr) const { + using Eigen::Matrix; + using stan::math::gp_exp_quad_cov; + + T1 length_scale_f = phi(0); + T1 length_scale_g = phi(1); + T1 sigma_f = phi(2); + T1 sigma_g = phi(3); + int n_obs = delta_int[0]; + + double jitter = 1e-8; + Matrix kernel_f = gp_exp_quad_cov(x, sigma_f, length_scale_f); + Matrix kernel_g = gp_exp_quad_cov(x, sigma_g, length_scale_g); + + Matrix kernel_all + = Eigen::MatrixXd::Zero(2 * n_obs, 2 * n_obs); + for (int i = 0; i < n_obs; i++) { + for (int j = 0; j <= i; j++) { + kernel_all(2 * i, 2 * j) = kernel_f(i, j); + kernel_all(2 * i + 1, 2 * j + 1) = kernel_g(i, j); + if (i != j) { + kernel_all(2 * j, 2 * i) = kernel_all(2 * i, 2 * j); + kernel_all(2 * j + 1, 2 * i + 1) = kernel_all(2 * i + 1, 2 * j + 1); + } + } + } + return kernel_all; + } +}; + +struct normal_likelihood { + template + stan::return_type_t + operator()(const Eigen::Matrix& theta, + const Eigen::Matrix& eta, + const Eigen::VectorXd& y, + const std::vector& delta_int, + std::ostream* pstream) const { + int n_obs = delta_int[0]; + Eigen::Matrix mu(n_obs); + Eigen::Matrix sigma(n_obs); + for (int i = 0; i < n_obs; i++) { + mu(i) = theta(2 * i); + sigma(i) = exp(0.5 * theta(2 * i + 1)); + } + + return stan::math::normal_lpdf(y, mu, sigma); + } +}; + +class laplace_motorcyle_gp_test : public::testing::Test { +protected: + void SetUp() override { + using stan::math::value_of; + using stan::math::gp_exp_quad_cov; + + n_obs = 6; + Eigen::VectorXd x_vec(n_obs); + x_vec << 2.4, 2.6, 3.2, 3.6, 4.0, 6.2; + x.resize(n_obs); + for (int i = 0; i < n_obs; i++) x[i] = x_vec(i); + y.resize(n_obs); + // y << 0.0, 0.0, 0.0, 0.0, 0.0, 0.0; + y << 0.0, -1.3, -2.7, 0.0, -2.7, -2.7; + + length_scale_f = 0.3; + length_scale_g = 0.5; + sigma_f = 0.25; + sigma_g = 0.25; + + phi.resize(4); + phi << length_scale_f, length_scale_g, sigma_f, sigma_g; + + delta_int.resize(1); + delta_int[0] = n_obs; + + theta0 = Eigen::VectorXd::Zero(2 * n_obs); + // theta0 << -10, 0, -10, 0, -10, 0, -10, + // 0, -10, 0, -10, 0; + + Eigen::MatrixXd + K_plus_I = gp_exp_quad_cov(x, value_of(sigma_f), value_of(length_scale_f)) + + Eigen::MatrixXd::Identity(n_obs, n_obs); + + Eigen::VectorXd mu_hat + = K_plus_I.colPivHouseholderQr().solve(y); + + for (int i = 0; i < n_obs; i++) { + theta0(2 * i) = mu_hat(i); + theta0(2 * i + 1) = 0; + } + } + + int n_obs; + std::vector x; + Eigen::VectorXd y; + + stan::math::var length_scale_f; + stan::math::var length_scale_g; + stan::math::var sigma_f; + stan::math::var sigma_g; + Eigen::Matrix phi; + std::vector delta_int; + std::vector delta_dummy; + Eigen::VectorXd theta0; + Eigen::VectorXd eta_dummy_dbl; + Eigen::Matrix eta_dummy; +}; + +TEST_F(laplace_motorcyle_gp_test, lk_autodiff) { + using stan::math::var; + using stan::math::value_of; + using stan::math::laplace_marginal_density; + using stan::math::diff_likelihood; + + normal_likelihood f; + + // std::cout << f(theta0, eta_dummy_dbl, y, delta_int, 0) << std::endl; + + diff_likelihood diff_functor(f, y, delta_int); + + int hessian_block_size = 2; + double marginal_density_dbl + = laplace_marginal_density(diff_functor, + covariance_motorcycle_functor(), + value_of(phi), eta_dummy_dbl, + x, delta_dummy, delta_int, theta0, + 0, 1e-8, 100, hessian_block_size); + + std::cout << "density: " << marginal_density_dbl << std::endl; + + var marginal_density + = laplace_marginal_density(diff_functor, + covariance_motorcycle_functor(), + phi, eta_dummy, + x, delta_dummy, delta_int, theta0, + 0, 1e-8, 100, hessian_block_size); + + VEC g; + AVEC parm_vec = createAVEC(phi(0), phi(1), phi(2), phi(3)); + marginal_density.grad(parm_vec, g); + std::cout << "grad: " << g[0] << " " << g[1] << " " << g[2] << " " << g[3] + << std::endl; + + // FINITE DIFF benchmark + double eps = 1e-7; + Eigen::VectorXd phi_dbl = value_of(phi); + Eigen::VectorXd phi_u0 = phi_dbl, phi_l0 = phi_dbl; + phi_u0(3) += eps; + phi_l0(3) -= eps; + + double target_u0 + = laplace_marginal_density(diff_functor, + covariance_motorcycle_functor(), + phi_u0, eta_dummy_dbl, + x, delta_dummy, delta_int, theta0, + 0, 1e-6, 100, hessian_block_size); + + + double target_l0 + = laplace_marginal_density(diff_functor, + covariance_motorcycle_functor(), + phi_l0, eta_dummy_dbl, + x, delta_dummy, delta_int, theta0, + 0, 1e-6, 100, hessian_block_size); + + std::cout << "g[0]: " << (target_u0 - target_l0) / (2 * eps) << std::endl; +} diff --git a/test/unit/math/laplace/sparse_matrix_test.cpp b/test/unit/math/laplace/sparse_matrix_test.cpp new file mode 100755 index 00000000000..bf815516064 --- /dev/null +++ b/test/unit/math/laplace/sparse_matrix_test.cpp @@ -0,0 +1,141 @@ +#include + +#include +#include +#include + +#include +#include +#include +#include +#include + + +TEST(sparse_matrix, eigen_example) { + typedef Eigen::Triplet trp; + using Eigen::SparseMatrix; + using Eigen::VectorXi; + using Eigen::MatrixXd; + + int m = 2; // size of each block + + std::vector triplet_list(8); + triplet_list[0] = trp(0, 0, 4); + triplet_list[1] = trp(0, 1, 3); + triplet_list[2] = trp(1, 0, 3); + triplet_list[3] = trp(1, 1, 6); + triplet_list[4] = trp(2, 2, 4); + triplet_list[5] = trp(2, 3, 2); + triplet_list[6] = trp(3, 2, 7); + triplet_list[7] = trp(3, 3, 8); + + SparseMatrix A(4, 4); + A.setFromTriplets(triplet_list.begin(), triplet_list.end()); + + std::cout << "A: " << A << std::endl; + + // Alternatively, can construct a matrix without using triplets. + SparseMatrix B(4, 4); + B.reserve(VectorXi::Constant(B.cols(), 2)); + B.insert(0, 0) = 1; + B.insert(0, 1) = 3; + B.insert(1, 0) = 5; + B.insert(1, 1) = 6; + B.insert(2, 2) = 4; + B.insert(2, 3) = 2; + B.insert(3, 2) = 7; + B.insert(3, 3) = 8; + B.makeCompressed(); + + std::cout << "B: " << B << std::endl; + + // If storage order mathces, we can sparse matrices. + std::cout << "A + B: " << A + B << std::endl; + + std::cout << "A * B: " << A * B << std::endl; + + MatrixXd C(4, 4); + C << 1, 3, 4, 5, + 3, 4, 4, 1, + 8, 1, 0, 12, + 3, 4, 5, 1; + + std::cout << "A * C: " << A * C << std::endl; + + SparseMatrix sqrt_A = stan::math::block_matrix_sqrt(A, 2); + + std::cout << "sqrt(A): " << sqrt_A << std::endl; + + std::cout << "Check we recover A: " << sqrt_A * sqrt_A << std::endl; + + + SparseMatrix D(4, 4); + + std::cout << "sqrt(D): " << stan::math::block_matrix_sqrt(D, 2) << std::endl; + + MatrixXd E = MatrixXd::Zero(4, 4); + std::cout << "sqrt(E): " << E.sqrt() << std::endl; +} + +TEST(LU_decomposition, eigen_example) { + using namespace Eigen; + // typedef Matrix Matrix5x3; + typedef Matrix Matrix5x5; + using std::cout; + using std::endl; + + Matrix5x5 m = Matrix5x5::Random(); + cout << "Here is the matrix m:" << endl << m << endl; + Eigen::FullPivLU lu(m); + cout << "Here is, up to permutations, its LU decomposition matrix:" + << endl << lu.matrixLU() << endl; + cout << "Here is the L part:" << endl; + // Matrix5x5 l = Matrix5x5::Identity(); + // l.block<5,3>(0,0).triangularView() = lu.matrixLU(); + Eigen::MatrixXd l(5, 5); + // l.block<5, 3>(0, 0).triangularView() = lu.matrixLU(); + l.triangularView() = lu.matrixLU(); + cout << l << endl; + cout << "Here is the U part:" << endl; + Matrix5x5 u = lu.matrixLU().triangularView(); + cout << u << endl; + // cout << "Here are the two triangular matrices we would store: " << endl; + // cout << lu.permutationP().inverse() * l << endl; + // cout << u * lu.permutationQ().inverse() << endl; + cout << "Let us now reconstruct the original matrix m:" << endl; + cout << lu.permutationP().inverse() * l * u * lu.permutationQ().inverse() + << endl; + + Eigen::MatrixXd L(5, 5); + L.triangularView() = lu.permutationP().inverse() * l; + std::cout << "L: " << L << std::endl; + + + Eigen::MatrixXd U(5, 5); + U = u * lu.permutationQ().inverse(); + std::cout << "U: " << U << std::endl; + + cout << "Determinant through decomposition: " + << l.determinant() * u.determinant() << std::endl + << "Determinant through direct comp: " << m.determinant() << std::endl; +} + +TEST(LU_decomposition, eigen_example_2) { + using Eigen::MatrixXd; + + MatrixXd A = MatrixXd::Random(3, 3); + MatrixXd B = MatrixXd::Random(3, 2); + std::cout << "Here is matrix A:" << std::endl << A << std::endl; + std::cout << "Here us matrix B:" << std::endl << B << std::endl; + + Eigen::PartialPivLU LU; + LU = Eigen::PartialPivLU(A); + std::cout << "LU determinant: " << LU.determinant() << std::endl; + std::cout << "A determinant: " << A.determinant() << std::endl; + + std::cout << "A.solve(B): " << std::endl + << LU.solve(B) << std::endl; + + std::cout << "Check solution: " << std::endl + << A * LU.solve(B) << std::endl; +} From e95375baab45a7cad233d638712ac5f7440cfcbd Mon Sep 17 00:00:00 2001 From: Charles Margossian Date: Tue, 16 Mar 2021 15:05:47 -0400 Subject: [PATCH 28/53] prototype eta differentiation. --- stan/math/laplace/laplace_marginal.hpp | 285 ++++++------------ test/unit/math/laplace/motorcycle_gp_test.cpp | 109 ++++++- 2 files changed, 189 insertions(+), 205 deletions(-) diff --git a/stan/math/laplace/laplace_marginal.hpp b/stan/math/laplace/laplace_marginal.hpp index c2c04fed4af..e201046b015 100644 --- a/stan/math/laplace/laplace_marginal.hpp +++ b/stan/math/laplace/laplace_marginal.hpp @@ -85,7 +85,6 @@ namespace math { Eigen::MatrixXd& covariance, Eigen::VectorXd& theta, Eigen::SparseMatrix& W_r, - // Eigen::MatrixXd& W_root, Eigen::MatrixXd& L, Eigen::VectorXd& a, Eigen::VectorXd& l_grad, @@ -94,7 +93,8 @@ namespace math { std::ostream* msgs = nullptr, double tolerance = 1e-6, long int max_num_steps = 100, - int hessian_block_size = 1) { + int hessian_block_size = 0, + int compute_W_root = 1) { using Eigen::MatrixXd; using Eigen::VectorXd; using Eigen::SparseMatrix; @@ -106,6 +106,14 @@ namespace math { double objective_new; double B_log_determinant; + if (hessian_block_size == 0 && compute_W_root == 0) { + std::ostringstream message; + message << "laplace_marginal_density: if treating the Hessian as diagonal" + << " we assume its matrix square-root can be computed." + << " If you don't want to compute the matrix square-root," + << " set hessian_block_size to 1."; + throw boost::math::evaluation_error(message.str()); + } for (int i = 0; i <= max_num_steps; i++) { if (i == max_num_steps) { @@ -115,46 +123,50 @@ namespace math { throw boost::math::evaluation_error(message.str()); } - // Compute variable a. - SparseMatrix hessian; // VectorXd hessian; - diff_likelihood.diff(theta, eta, l_grad, hessian, hessian_block_size); - SparseMatrix W = - hessian; // VectorXd W = - hessian; + SparseMatrix W; + diff_likelihood.diff(theta, eta, l_grad, W, hessian_block_size); + W = - W; VectorXd b; { MatrixXd B; - if (hessian_block_size == 1) { // W_root = sqrt(W); - W_r = W.cwiseSqrt(); - B = MatrixXd::Identity(theta_size, theta_size) - + quad_form_diag(covariance, W_r.diagonal()); + if (compute_W_root) { + if (hessian_block_size == 0) { + W_r = W.cwiseSqrt(); + B = MatrixXd::Identity(theta_size, theta_size) + + quad_form_diag(covariance, W_r.diagonal()); + } else { + W_r = block_matrix_sqrt(W, hessian_block_size); + B = MatrixXd::Identity(theta_size, theta_size) + + W_r * (covariance * W_r); + } L = cholesky_decompose(B); B_log_determinant = 2 * sum(L.diagonal().array().log()); - } else { - // TODO -- version which uses W_root? - W_r = W; - B = MatrixXd::Identity(theta_size, theta_size) + covariance * W; - LU = Eigen::PartialPivLU(B); + if (hessian_block_size == 0) { + b = W.diagonal().cwiseProduct(theta) + l_grad.head(theta_size); + a = b - W_r + * mdivide_left_tri(transpose(L), + mdivide_left_tri(L, + diag_pre_multiply(W_r.diagonal(), multiply(covariance, b)))); + } else { + b = W * theta + l_grad.head(theta_size); + a = b - W_r + * mdivide_left_tri(transpose(L), + mdivide_left_tri(L, + W_r * (covariance * b))); + } + } else { + W_r = W; + B = MatrixXd::Identity(theta_size, theta_size) + covariance * W; + LU = Eigen::PartialPivLU(B); - // TODO: compute log determinant directly. - B_log_determinant = log(LU.determinant()); - } + // TODO: compute log determinant directly. + B_log_determinant = log(LU.determinant()); - if (hessian_block_size == 1) { - b = W.diagonal().cwiseProduct(theta) + l_grad.head(theta_size); - a = b - W_r - * mdivide_left_tri(transpose(L), - mdivide_left_tri(L, - diag_pre_multiply(W_r.diagonal(), multiply(covariance, b)))); - } else { b = W * theta + l_grad.head(theta_size); a = b - W * LU.solve(covariance * b); - - // a = b - W_root - // * mdivide_left_tri(transpose(L), - // mdivide_left_tri(L, - // W_root * (covariance * b))); } } @@ -220,9 +232,8 @@ namespace math { std::ostream* msgs = nullptr, double tolerance = 1e-6, long int max_num_steps = 100, - int hessian_block_size = 1) { - // Eigen::VectorXd theta, W_root, a, l_grad; - // Eigen::MatrixXd L, covariance; + int hessian_block_size = 0, + int compute_W_root = 1) { Eigen::VectorXd theta, a, l_grad; Eigen::MatrixXd L, covariance; Eigen::SparseMatrix W_r; @@ -233,7 +244,8 @@ namespace math { theta, W_r, L, a, l_grad, LU, value_of(theta_0), msgs, tolerance, max_num_steps, - hessian_block_size); + hessian_block_size, + compute_W_root); } /** @@ -285,7 +297,8 @@ namespace math { const Eigen::VectorXd& l_grad, const Eigen::PartialPivLU LU, std::ostream* msgs = nullptr, - int hessian_block_size = 1) + int hessian_block_size = 0, + int compute_W_root = 1) : vari(marginal_density), phi_size_(phi.size()), phi_(ChainableStack::instance_->memalloc_.alloc_array( @@ -309,109 +322,55 @@ namespace math { marginal_density_[0] = this; marginal_density_[0] = new vari(marginal_density, false); - // auto start = std::chrono::system_clock::now(); MatrixXd R; - if (hessian_block_size == 1){ + Eigen::MatrixXd LU_solve_covariance; + Eigen::VectorXd eta_dbl = value_of(eta); + Eigen::VectorXd partial_parm; + Eigen::VectorXd s2; + + if (compute_W_root == 1) { MatrixXd W_root_diag = W_r; R = W_r * L.transpose().triangularView() .solve(L.triangularView() .solve(W_root_diag)); - } else { - R = W_r - W_r * LU.solve(covariance * W_r); - } - - // Eigen::MatrixXd R; - // { - // Eigen::MatrixXd W_root_diag; - // if (hessian_block_size == 1) { - // W_root_diag = W_root.col(0).asDiagonal(); - // } else { - // W_root_diag = W_root; - // } - // R = W_root_diag * - // L.transpose().triangularView() - // .solve(L.triangularView() - // .solve(W_root_diag)); - // } - - Eigen::VectorXd eta_dbl = value_of(eta); - Eigen::VectorXd partial_parm; - Eigen::VectorXd s2; - if (hessian_block_size == 1) { - Eigen::MatrixXd - C = mdivide_left_tri(L, W_r * covariance); - s2 = 0.5 * (covariance.diagonal() - - (C.transpose() * C).diagonal()) - .cwiseProduct(diff_likelihood.third_diff(theta, eta_dbl)); + Eigen::MatrixXd C = mdivide_left_tri(L, W_r * covariance); + if (hessian_block_size == 0 && eta_size_ == 0) { + s2 = 0.5 * (covariance.diagonal() + - (C.transpose() * C).diagonal()) + .cwiseProduct(diff_likelihood.third_diff(theta, eta_dbl)); + } else { + int block_size = (hessian_block_size == 0) ? hessian_block_size + 1 + : hessian_block_size; + Eigen::MatrixXd A = covariance - C.transpose() * C; + partial_parm + = diff_likelihood.compute_s2(theta, eta_dbl, A, block_size); + s2 = partial_parm.head(theta_size); + } } else { - // Eigen::MatrixXd A = covariance - C.transpose() * C; - Eigen::MatrixXd A = covariance - - covariance * W_r * LU.solve(covariance); + LU_solve_covariance = LU.solve(covariance); + R = W_r - W_r * LU_solve_covariance * W_r; + + Eigen::MatrixXd A = covariance - covariance * W_r * LU_solve_covariance; partial_parm = diff_likelihood.compute_s2(theta, eta_dbl, A, hessian_block_size); s2 = partial_parm.head(theta_size); } - // std::cout << "s2: " << s2.transpose() << std::endl; - - // TEST -- finite diff benchmark for s2 - // double eps = 1e-7; - // Eigen::VectorXd theta_u0 = theta, theta_l0 = theta; - // theta_u0(11) += eps; - // theta_l0(11) -= eps; - // int group_size = theta.size(); - // - // Eigen::VectorXd l_grad_store; - // SparseMatrix hessian_store; // VectorXd hessian; - // diff_likelihood.diff(theta_u0, value_of(eta), l_grad_store, - // hessian_store, hessian_block_size); - // SparseMatrix W_u0 = - hessian_store; - // diff_likelihood.diff(theta_l0, value_of(eta), l_grad_store, - // hessian_store, hessian_block_size); - // SparseMatrix W_l0 = - hessian_store; - // - // Eigen::MatrixXd B_u0 = Eigen::MatrixXd::Identity(group_size, group_size) - // + covariance * W_u0; - // Eigen::MatrixXd B_l0 = Eigen::MatrixXd::Identity(group_size, group_size) - // + covariance * W_l0; - // std::cout << "s2_finite_diff: " - // << -0.5 * (log(B_u0.determinant()) - log(B_l0.determinant())) - // / (2 * eps) << std::endl; - - // // CHECK -- should there be a minus sign here? - // Eigen::VectorXd s2 = 0.5 * (covariance.diagonal() - // - (C.transpose() * C).diagonal()) - // .cwiseProduct(diff_likelihood.third_diff(theta, eta_dbl)); - phi_adj_ = Eigen::VectorXd(phi_size_); start_nested(); try { - // = std::chrono::system_clock::now(); Matrix phi_v = value_of(phi); Matrix K_var = covariance_function(phi_v, x, delta, delta_int, msgs); Eigen::VectorXd l_grad_theta = l_grad.head(theta_size); - var Z = laplace_pseudo_target(K_var, a, R, l_grad, s2); + var Z = laplace_pseudo_target(K_var, a, R, l_grad_theta, s2); set_zero_all_adjoints_nested(); grad(Z.vi_); for (int j = 0; j < phi_size_; j++) phi_adj_[j] = phi_v(j).adj(); - // finite diff benchmark - // double eps = 1e-7; - // Eigen::VectorXd phi_u0 = value_of(phi), phi_l0 = value_of(phi); - // phi_u0(1) += eps; - // phi_l0(1) -= eps; - // Eigen::MatrixXd K_u0 = - // covariance_function(phi_u0, x, delta, delta_int, msgs), - // K_l0 = covariance_function(phi_l0, x, delta, delta_int, msgs); - // double Z_u0 = laplace_pseudo_target(K_u0, a, R, l_grad, s2), - // Z_l0 = laplace_pseudo_target(K_l0, a, R, l_grad, s2); - // std::cout << "finite diff Z: " << (Z_u0 - Z_l0) / (2 * eps) - // << std::endl; - } catch (const std::exception& e) { recover_memory_nested(); throw; @@ -422,78 +381,21 @@ namespace math { if (eta_size_ != 0) { // TODO: instead, check if eta contains var. VectorXd diff_eta = l_grad.tail(eta_size_); - Eigen::VectorXd v = (Eigen::MatrixXd::Identity(theta_size, theta_size) - - R) * (covariance * s2); + Eigen::VectorXd v; + if (compute_W_root == 1) { + Eigen::MatrixXd W = W_r * W_r; // NOTE: store W from Newton step? + v = covariance * s2 - covariance * W + * L.transpose().triangularView() + . solve(L.triangularView() + .solve(covariance * (covariance * s2))); + } else { + v = LU_solve_covariance * s2; + } eta_adj_ = l_grad.tail(eta_size_) + partial_parm.tail(eta_size_) + diff_likelihood.diff_eta_implicit(v, theta, eta_dbl); } - -// TODO: reimplement eta case. -/* - eta_adj_ = Eigen::VectorXd(eta_size_); - if (eta_size_ != 0) { - VectorXd diff_eta = diff_likelihood.diff_eta(theta, eta_dbl); - MatrixXd diff_theta_eta = diff_likelihood.diff_theta_eta(theta, eta_dbl); - MatrixXd diff2_theta_eta - = diff_likelihood.diff2_theta_eta(theta, eta_dbl); - - VectorXd W_root_inv = W_root.cwiseInverse(); - - for (int l = 0; l < eta_size_; l++) { - VectorXd b = covariance * diff_theta_eta.col(l); - // CHECK -- can we use the fact the covariance matrix is symmetric? - VectorXd s3 = b - covariance * (R * b); - - eta_adj_(l) = diff_eta(l) - + 0.5 * (W_root_inv.asDiagonal() * R * (covariance * - elt_divide(diff2_theta_eta.col(l), W_root).asDiagonal())).trace() - + s2.dot(s3); - // + 0.5 * (L.transpose().triangularView() - // .solve(L.triangularView() - // .solve(W_root.asDiagonal() * covariance * elt_divide( - // diff2_theta_eta.col(l), W_root).asDiagonal() - // ))).trace() - } - } */ - - // auto end = std::chrono::system_clock::now(); - // std::chrono::duration time = end - ; - // std::cout << "diffentiation time: " << time.count() << std::endl; - - // Implementation with fwd mode computation of C, - // and then following R&W's scheme. - /* - = std::chrono::system_clock::now(); - */ - // covariance_sensitivities f(x, delta, delta_int, - // covariance_function, msgs); - // Eigen::MatrixXd diff_cov; - // { - // Eigen::VectorXd covariance_vector; - // jacobian_fwd(f, value_of(phi), covariance_vector, diff_cov); - // // covariance = to_matrix(covariance_vector, theta_size, theta_size); - // } - // - // phi_adj_ = Eigen::VectorXd(phi_size_); - // - // std::cout << "phi_adj: "; - // for (int j = 0; j < phi_size_; j++) { - // Eigen::VectorXd j_col = diff_cov.col(j); - // Eigen::MatrixXd C = to_matrix(j_col, theta_size, theta_size); - // double s1 = 0.5 * quad_form(C, a) - 0.5 * sum((R * C).diagonal()); - // Eigen::VectorXd b = C * l_grad; - // Eigen::VectorXd s3 = b - covariance * (R * b); - // // std::cout << "old Z: " << s1 + s2.dot(s3) << std::endl; - // phi_adj_[j] = s1 + s2.dot(s3); - // std::cout << phi_adj_[j] << " "; - // } - // std::cout << std::endl; - - // end = std::chrono::system_clock::now(); - // time = end - ; - // std::cout << "Former diff: " << time.count() << std::endl; - } + } void chain() { for (int j = 0; j < phi_size_; j++) @@ -550,18 +452,15 @@ namespace math { std::ostream* msgs = nullptr, double tolerance = 1e-6, long int max_num_steps = 100, - int hessian_block_size = 1) { - // Eigen::VectorXd theta, W_root, a, l_grad; + int hessian_block_size = 0, + int compute_W_root = 1) { Eigen::VectorXd theta, a, l_grad; - // Eigen::MatrixXd W_root; Eigen::SparseMatrix W_root; Eigen::MatrixXd L; double marginal_density_dbl; Eigen::MatrixXd covariance; Eigen::PartialPivLU LU; - // TEST - // auto start = std::chrono::system_clock::now(); marginal_density_dbl = laplace_marginal_density(diff_likelihood, @@ -572,15 +471,8 @@ namespace math { value_of(theta_0), msgs, tolerance, max_num_steps, - hessian_block_size); - - // TEST - // auto end = std::chrono::system_clock::now(); - // std::chrono::duration elapsed_time = end - start; - // std::cout << "Evaluation time: " << elapsed_time.count() << std::endl; - - // TEST - // start = std::chrono::system_clock::now(); + hessian_block_size, + compute_W_root); // construct vari laplace_marginal_density_vari* vi0 @@ -590,7 +482,8 @@ namespace math { marginal_density_dbl, covariance, theta, W_root, L, a, l_grad, LU, - msgs, hessian_block_size); + msgs, hessian_block_size, + compute_W_root); var marginal_density = var(vi0->marginal_density_[0]); diff --git a/test/unit/math/laplace/motorcycle_gp_test.cpp b/test/unit/math/laplace/motorcycle_gp_test.cpp index 1e22eafe8e6..d07aabcb6c2 100755 --- a/test/unit/math/laplace/motorcycle_gp_test.cpp +++ b/test/unit/math/laplace/motorcycle_gp_test.cpp @@ -71,6 +71,30 @@ struct normal_likelihood { } }; +// include a global variance (passed through eta) +struct normal_likelihood2 { + template + stan::return_type_t + operator()(const Eigen::Matrix& theta, + const Eigen::Matrix& eta, + const Eigen::VectorXd& y, + const std::vector& delta_int, + std::ostream* pstream) const { + using stan::math::multiply; + int n_obs = delta_int[0]; + Eigen::Matrix mu(n_obs); + Eigen::Matrix sigma(n_obs); + for (int i = 0; i < n_obs; i++) { + mu(i) = theta(2 * i); + sigma(i) = exp(0.5 * theta(2 * i + 1)); + } + + T_eta sigma_global = eta(0); + + return stan::math::normal_lpdf(y, mu, multiply(sigma_global, sigma)); + } +}; + class laplace_motorcyle_gp_test : public::testing::Test { protected: void SetUp() override { @@ -112,6 +136,8 @@ class laplace_motorcyle_gp_test : public::testing::Test { theta0(2 * i) = mu_hat(i); theta0(2 * i + 1) = 0; } + + compute_W_root = 0; } int n_obs; @@ -128,6 +154,7 @@ class laplace_motorcyle_gp_test : public::testing::Test { Eigen::VectorXd theta0; Eigen::VectorXd eta_dummy_dbl; Eigen::Matrix eta_dummy; + int compute_W_root; }; TEST_F(laplace_motorcyle_gp_test, lk_autodiff) { @@ -137,9 +164,6 @@ TEST_F(laplace_motorcyle_gp_test, lk_autodiff) { using stan::math::diff_likelihood; normal_likelihood f; - - // std::cout << f(theta0, eta_dummy_dbl, y, delta_int, 0) << std::endl; - diff_likelihood diff_functor(f, y, delta_int); int hessian_block_size = 2; @@ -148,7 +172,8 @@ TEST_F(laplace_motorcyle_gp_test, lk_autodiff) { covariance_motorcycle_functor(), value_of(phi), eta_dummy_dbl, x, delta_dummy, delta_int, theta0, - 0, 1e-8, 100, hessian_block_size); + 0, 1e-8, 100, hessian_block_size, + compute_W_root); std::cout << "density: " << marginal_density_dbl << std::endl; @@ -157,7 +182,8 @@ TEST_F(laplace_motorcyle_gp_test, lk_autodiff) { covariance_motorcycle_functor(), phi, eta_dummy, x, delta_dummy, delta_int, theta0, - 0, 1e-8, 100, hessian_block_size); + 0, 1e-8, 100, hessian_block_size, + compute_W_root); VEC g; AVEC parm_vec = createAVEC(phi(0), phi(1), phi(2), phi(3)); @@ -166,18 +192,20 @@ TEST_F(laplace_motorcyle_gp_test, lk_autodiff) { << std::endl; // FINITE DIFF benchmark + // TODO: benchmark against all the inputs. double eps = 1e-7; Eigen::VectorXd phi_dbl = value_of(phi); Eigen::VectorXd phi_u0 = phi_dbl, phi_l0 = phi_dbl; - phi_u0(3) += eps; - phi_l0(3) -= eps; + phi_u0(0) += eps; + phi_l0(0) -= eps; double target_u0 = laplace_marginal_density(diff_functor, covariance_motorcycle_functor(), phi_u0, eta_dummy_dbl, x, delta_dummy, delta_int, theta0, - 0, 1e-6, 100, hessian_block_size); + 0, 1e-6, 100, hessian_block_size, + compute_W_root); double target_l0 @@ -185,7 +213,70 @@ TEST_F(laplace_motorcyle_gp_test, lk_autodiff) { covariance_motorcycle_functor(), phi_l0, eta_dummy_dbl, x, delta_dummy, delta_int, theta0, - 0, 1e-6, 100, hessian_block_size); + 0, 1e-6, 100, hessian_block_size, + compute_W_root); std::cout << "g[0]: " << (target_u0 - target_l0) / (2 * eps) << std::endl; } + +TEST_F(laplace_motorcyle_gp_test, lk_autodiff_eta) { + using stan::math::var; + using stan::math::value_of; + using stan::math::laplace_marginal_density; + using stan::math::diff_likelihood; + + normal_likelihood2 f; + diff_likelihood diff_functor(f, y, delta_int); + Eigen::Matrix eta(1); + eta(0) = 1; + int hessian_block_size = 2; + double marginal_density_dbl + = laplace_marginal_density(diff_functor, + covariance_motorcycle_functor(), + value_of(phi), value_of(eta), + x, delta_dummy, delta_int, theta0, + 0, 1e-8, 100, hessian_block_size, + compute_W_root); + + std::cout << "density: " << marginal_density_dbl << std::endl; + + var marginal_density + = laplace_marginal_density(diff_functor, + covariance_motorcycle_functor(), + phi, eta, + x, delta_dummy, delta_int, theta0, + 0, 1e-8, 100, hessian_block_size, + compute_W_root); + + VEC g; + AVEC parm_vec = createAVEC(phi(0), phi(1), phi(2), phi(3), eta(0)); + marginal_density.grad(parm_vec, g); + std::cout << "grad: " + << g[0] << " " << g[1] << " " << g[2] << " " << g[3] << " " << g[4] + << std::endl; + + // finite diff benchmark + double eps = 1e-7; + Eigen::VectorXd eta_dbl = value_of(eta); + Eigen::VectorXd eta_u = eta_dbl, eta_l = eta_dbl; + eta_u(0) += eps; + eta_l(0) -= eps; + + double target_u + = laplace_marginal_density(diff_functor, + covariance_motorcycle_functor(), + value_of(phi), eta_u, + x, delta_dummy, delta_int, theta0, + 0, 1e-8, 100, hessian_block_size, + compute_W_root); + + double target_l + = laplace_marginal_density(diff_functor, + covariance_motorcycle_functor(), + value_of(phi), eta_l, + x, delta_dummy, delta_int, theta0, + 0, 1e-8, 100, hessian_block_size, + compute_W_root); + + std::cout << "gf[4]: " << (target_u - target_l) / (2 * eps) << std::endl; +} From d66684ca4d1ea2ac0562da36a8d00a6c83f0c825 Mon Sep 17 00:00:00 2001 From: Charles Margossian Date: Tue, 23 Mar 2021 14:20:49 -0400 Subject: [PATCH 29/53] update rng functions for new interface. --- stan/math/laplace/block_matrix_sqrt.hpp | 1 - .../laplace_likelihood_bernoulli_logit.hpp | 29 ++--- .../laplace_likelihood_poisson_log.hpp | 39 ++---- stan/math/laplace/laplace_marginal.hpp | 10 +- .../laplace/prob/laplace_poisson_log_rng.hpp | 47 ++++--- stan/math/laplace/prob/laplace_rng.hpp | 74 +++++++---- test/unit/math/laplace/disease_map_test.cpp | 116 ++++++++++-------- 7 files changed, 171 insertions(+), 145 deletions(-) diff --git a/stan/math/laplace/block_matrix_sqrt.hpp b/stan/math/laplace/block_matrix_sqrt.hpp index 5c6878de4d0..116acca3e50 100644 --- a/stan/math/laplace/block_matrix_sqrt.hpp +++ b/stan/math/laplace/block_matrix_sqrt.hpp @@ -25,7 +25,6 @@ namespace math { // No block operation available for sparse matrices, so we have to loop. // See https://eigen.tuxfamily.org/dox/group__TutorialSparse.html#title7 for (int i = 0; i < n_block; i++) { - std::cout << "block number: " << i << std::endl; for (int j = 0; j < block_size; j++) { for (int k = 0; k < block_size; k++) { local_block(j, k) = W.coeffRef(i * block_size + j, i * block_size + k); diff --git a/stan/math/laplace/laplace_likelihood_bernoulli_logit.hpp b/stan/math/laplace/laplace_likelihood_bernoulli_logit.hpp index 26adc10a3b2..4f713d42eda 100644 --- a/stan/math/laplace/laplace_likelihood_bernoulli_logit.hpp +++ b/stan/math/laplace/laplace_likelihood_bernoulli_logit.hpp @@ -84,33 +84,20 @@ struct diff_bernoulli_logit { return n_samples_.cwiseProduct(elt_divide(nominator, denominator)); } - template - Eigen::Matrix, Eigen::Dynamic, 1> - diff_eta(const Eigen::Matrix& theta, - const Eigen::Matrix& eta) const { + Eigen::VectorXd compute_s2(const Eigen::VectorXd& theta, + const Eigen::VectorXd& eta, + const Eigen::MatrixXd& A, + int hessian_block_size) const { std::cout << "THIS FUNCTIONS SHOULD NEVER GET CALLED!" << std::endl; Eigen::Matrix, Eigen::Dynamic, 1> void_matrix; return void_matrix; } - template - Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> - diff_theta_eta(const Eigen::Matrix& theta, - const Eigen::Matrix& eta) const { + Eigen::VectorXd diff_eta_implicit(const Eigen::VectorXd& v, + const Eigen::VectorXd& theta, + const Eigen::VectorXd& eta) const { std::cout << "THIS FUNCTIONS SHOULD NEVER GET CALLED!" << std::endl; - Eigen::Matrix, Eigen::Dynamic, - Eigen::Dynamic> void_matrix; - return void_matrix; - } - - template - Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> - diff2_theta_eta(const Eigen::Matrix& theta, - const Eigen::Matrix& eta) - const { - std::cout << "THIS FUNCTIONS SHOULD NEVER GET CALLED!" << std::endl; - Eigen::Matrix, Eigen::Dynamic, - Eigen::Dynamic> void_matrix; + Eigen::Matrix, Eigen::Dynamic, 1> void_matrix; return void_matrix; } }; diff --git a/stan/math/laplace/laplace_likelihood_poisson_log.hpp b/stan/math/laplace/laplace_likelihood_poisson_log.hpp index e0931670a02..9a3497db6c6 100644 --- a/stan/math/laplace/laplace_likelihood_poisson_log.hpp +++ b/stan/math/laplace/laplace_likelihood_poisson_log.hpp @@ -108,41 +108,18 @@ struct diff_poisson_log { Eigen::VectorXd compute_s2(const Eigen::VectorXd& theta, const Eigen::VectorXd& eta, - const Eigen::MatrixXd& L, - const Eigen::MatrixXd& covariance, + const Eigen::MatrixXd& A, int hessian_block_size) const { - std::cout << "THIS FUNCTION SHOULD NEVER GET CALLED!" << std::endl; - Eigen::VectorXd void_vector; - return void_vector; - } - - template - Eigen::Matrix, Eigen::Dynamic, 1> - diff_eta(const Eigen::Matrix& theta, - const Eigen::Matrix& eta) const { - std::cout << "THIS FUNCTION SHOULD NEVER GET CALLED!" << std::endl; - Eigen::Matrix, Eigen::Dynamic, 1> void_matrix; - return void_matrix; - } - - template - Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> - diff_theta_eta(const Eigen::Matrix& theta, - const Eigen::Matrix& eta) const { - std::cout << "THIS FUNCTION SHOULD NEVER GET CALLED!" << std::endl; - Eigen::Matrix, Eigen::Dynamic, - Eigen::Dynamic> void_matrix; + std::cout << "THIS FUNCTIONS SHOULD NEVER GET CALLED!" << std::endl; + Eigen::MatrixXd void_matrix; return void_matrix; } - template - Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> - diff2_theta_eta(const Eigen::Matrix& theta, - const Eigen::Matrix& eta) - const { - std::cout << "THIS FUNCTION SHOULD NEVER GET CALLED!" << std::endl; - Eigen::Matrix, Eigen::Dynamic, - Eigen::Dynamic> void_matrix; + Eigen::VectorXd diff_eta_implicit(const Eigen::VectorXd& v, + const Eigen::VectorXd& theta, + const Eigen::VectorXd& eta) const { + std::cout << "THIS FUNCTIONS SHOULD NEVER GET CALLED!" << std::endl; + Eigen::MatrixXd void_matrix; return void_matrix; } }; diff --git a/stan/math/laplace/laplace_marginal.hpp b/stan/math/laplace/laplace_marginal.hpp index e201046b015..afed55f32e3 100644 --- a/stan/math/laplace/laplace_marginal.hpp +++ b/stan/math/laplace/laplace_marginal.hpp @@ -8,7 +8,6 @@ #include #include #include -// #include #include #include @@ -115,6 +114,9 @@ namespace math { throw boost::math::evaluation_error(message.str()); } + int block_size = (hessian_block_size == 0) ? hessian_block_size + 1 + : hessian_block_size; + for (int i = 0; i <= max_num_steps; i++) { if (i == max_num_steps) { std::ostringstream message; @@ -124,7 +126,7 @@ namespace math { } SparseMatrix W; - diff_likelihood.diff(theta, eta, l_grad, W, hessian_block_size); + diff_likelihood.diff(theta, eta, l_grad, W, block_size); W = - W; VectorXd b; @@ -136,7 +138,7 @@ namespace math { B = MatrixXd::Identity(theta_size, theta_size) + quad_form_diag(covariance, W_r.diagonal()); } else { - W_r = block_matrix_sqrt(W, hessian_block_size); + W_r = block_matrix_sqrt(W, block_size); B = MatrixXd::Identity(theta_size, theta_size) + W_r * (covariance * W_r); } @@ -314,6 +316,8 @@ namespace math { using Eigen::VectorXd; using Eigen::SparseMatrix; +std::cout << "marker a" << std::endl; + int theta_size = theta.size(); for (int i = 0; i < phi_size_; i++) phi_[i] = phi(i).vi_; for (int i = 0; i < eta_size_; i++) eta_[i] = eta(i).vi_; diff --git a/stan/math/laplace/prob/laplace_poisson_log_rng.hpp b/stan/math/laplace/prob/laplace_poisson_log_rng.hpp index 26b00340ffb..66a23f5dddc 100644 --- a/stan/math/laplace/prob/laplace_poisson_log_rng.hpp +++ b/stan/math/laplace/prob/laplace_poisson_log_rng.hpp @@ -16,31 +16,39 @@ namespace math { * from the gaussian approximation of p(theta | y, phi) * where the likelihood is a Poisson with a log link. */ -template -inline Eigen::VectorXd // CHECK -- right return type +template +inline Eigen::VectorXd laplace_poisson_log_rng (const std::vector& y, const std::vector& n_samples, const K& covariance_function, const Eigen::Matrix& phi, - const std::vector& x, + const T2& x, + const T3& x_pred, const std::vector& delta, const std::vector& delta_int, const Eigen::Matrix& theta_0, RNG& rng, std::ostream* msgs = nullptr, double tolerance = 1e-6, - long int max_num_steps = 100) { - return - laplace_rng(diff_poisson_log(to_vector(n_samples), to_vector(y)), - covariance_function, phi, x, delta, delta_int, theta_0, - rng, msgs, tolerance, max_num_steps); + long int max_num_steps = 100, + int hessian_block_size = 0, + int compute_W_root = 1) { + Eigen::VectorXd eta_dummy; + return + laplace_rng(diff_poisson_log(to_vector(n_samples), to_vector(y)), + covariance_function, phi, eta_dummy, + x, x_pred, delta, delta_int, theta_0, + rng, msgs, tolerance, max_num_steps, + hessian_block_size, compute_W_root); } /** * Overload for case where user passes exposure. */ -template +template inline Eigen::VectorXd // CHECK -- right return type laplace_poisson_log_rng (const std::vector& y, @@ -48,21 +56,26 @@ inline Eigen::VectorXd // CHECK -- right return type const Eigen::VectorXd& exposure, const K& covariance_function, const Eigen::Matrix& phi, - const std::vector& x, + const T2& x, + const T3& x_pred, const std::vector& delta, const std::vector& delta_int, const Eigen::Matrix& theta_0, RNG& rng, std::ostream* msgs = nullptr, double tolerance = 1e-6, - long int max_num_steps = 100) { - return - laplace_rng(diff_poisson_log(to_vector(n_samples), to_vector(y), - log(exposure)), - covariance_function, phi, x, delta, delta_int, theta_0, - rng, msgs, tolerance, max_num_steps); + long int max_num_steps = 100, + int hessian_block_size = 0, + int compute_W_root = 1) { + Eigen::VectorXd eta_dummy; + return + laplace_rng(diff_poisson_log(to_vector(n_samples), to_vector(y), + log(exposure)), + covariance_function, phi, eta_dummy, x, x_pred, delta, + delta_int, theta_0, + rng, msgs, tolerance, max_num_steps, + hessian_block_size, compute_W_root); } - } // namespace math } // namespace stan diff --git a/stan/math/laplace/prob/laplace_rng.hpp b/stan/math/laplace/prob/laplace_rng.hpp index 79450ddaad8..61af3916926 100644 --- a/stan/math/laplace/prob/laplace_rng.hpp +++ b/stan/math/laplace/prob/laplace_rng.hpp @@ -5,58 +5,84 @@ #include #include +#include +#include + namespace stan { namespace math { /** * In a latent gaussian model, * - * theta ~ Normal(theta | 0, Sigma(phi)) - * y ~ pi(y | theta) + * theta ~ Normal(theta | 0, Sigma(phi, x)) + * y ~ pi(y | theta, eta) * - * return a multivariate normal random variate sampled - * from the gaussian approximation of p(theta | y, phi). + * returns a multivariate normal random variate sampled + * from the gaussian approximation of p(theta_pred | y, phi, x_pred). + * Note that while the data is observed at x, the new samples + * are drawn for covariates x_pred. + * To sample the "original" theta's, set x_pred = x. */ -template inline Eigen::VectorXd // CHECK -- right return type laplace_rng (const D& diff_likelihood, const K& covariance_function, const Eigen::Matrix& phi, + const Eigen::Matrix& eta, const T_x& x, - // const std::vector& x, + const T_x_pred& x_pred, const std::vector& delta, const std::vector& delta_int, const Eigen::Matrix& theta_0, RNG& rng, std::ostream* msgs = nullptr, double tolerance = 1e-6, - long int max_num_steps = 100) { - Eigen::VectorXd theta; - Eigen::VectorXd W_root; - Eigen::MatrixXd L; + long int max_num_steps = 100, + int hessian_block_size = 0, + int compute_W_root = 1) { + using Eigen::VectorXd; + using Eigen::MatrixXd; + + VectorXd phi_dbl = value_of(phi); + VectorXd eta_dbl = value_of(eta); + Eigen::SparseMatrix W_r; + MatrixXd L; + Eigen::PartialPivLU LU; + VectorXd l_grad; + MatrixXd covariance; { - Eigen::MatrixXd covariance; - Eigen::VectorXd a; - Eigen::VectorXd l_grad; + VectorXd theta; + VectorXd a; double marginal_density = laplace_marginal_density(diff_likelihood, covariance_function, - value_of(phi), x, delta, delta_int, - covariance, theta, W_root, L, a, l_grad, - value_of(theta_0), msgs, - tolerance, max_num_steps); + phi_dbl, eta_dbl, + x, delta, delta_int, + covariance, theta, W_r, L, a, l_grad, + LU, value_of(theta_0), msgs, + tolerance, max_num_steps, + hessian_block_size, compute_W_root); } // Modified R&W method - Eigen::VectorXd W_root_inv = inv(W_root); - Eigen::MatrixXd V_dec = mdivide_left_tri(L, - diag_matrix(W_root_inv)); + MatrixXd covariance_pred = covariance_function(phi_dbl, x_pred, + delta, delta_int, msgs); + VectorXd pred_mean = covariance_pred * l_grad; + + Eigen::MatrixXd Sigma; + if (compute_W_root) { + Eigen::MatrixXd V_dec = mdivide_left_tri(L, + W_r * covariance_pred); + Sigma = covariance_pred - V_dec.transpose() * V_dec; + } else { + Sigma = covariance_pred + - covariance_pred * (W_r - W_r * LU.solve(covariance * W_r)) + * covariance_pred; + } - return multi_normal_rng( - theta, - diag_matrix(square(W_root_inv)) - V_dec.transpose() * V_dec, - rng); + return multi_normal_rng(pred_mean, Sigma, rng); } } // namespace math diff --git a/test/unit/math/laplace/disease_map_test.cpp b/test/unit/math/laplace/disease_map_test.cpp index 9f568f35528..500c4f6a161 100755 --- a/test/unit/math/laplace/disease_map_test.cpp +++ b/test/unit/math/laplace/disease_map_test.cpp @@ -1,7 +1,9 @@ #include #include #include -// #include +#include +#include +#include #include #include @@ -15,6 +17,28 @@ #include #include +struct poisson_log_likelihood { + template + stan::return_type_t + operator()(const Eigen::Matrix& theta, + const Eigen::Matrix& eta, + const Eigen::VectorXd& delta, + const std::vector& n_samples, + std::ostream* pstream) const { + using stan::math::to_vector; + using stan::math::log; + int n = 911; + Eigen::VectorXd y = delta.head(n); + Eigen::VectorXd ye = delta.tail(n); + // Eigen::VectorXd log_ye = ye.log(); + + stan::math::diff_poisson_log + diff_functor(to_vector(n_samples), y, log(ye)); + + return diff_functor.log_likelihood(theta, eta); + } +}; + // TODO(charlesm93): update using new function signatures. class laplace_disease_map_test : public::testing::Test { protected: @@ -52,6 +76,11 @@ class laplace_disease_map_test : public::testing::Test { dim_phi = 2; phi.resize(dim_phi); phi << 0.3162278, 200; // variance, length scale + + delta_lk.resize(2 * n_observations); + for (int i = 0; i < n_observations; i++) delta_lk(i) = y[i]; + for (int i = 0; i < n_observations; i++) + delta_lk(n_observations + i) = ye(i); } int dim_theta; @@ -69,6 +98,10 @@ class laplace_disease_map_test : public::testing::Test { Eigen::VectorXd theta_0; int dim_phi; Eigen::Matrix phi; + Eigen::Matrix eta_dummy; + + Eigen::VectorXd delta_lk; + poisson_log_likelihood f; }; @@ -79,8 +112,6 @@ TEST_F(laplace_disease_map_test, lk_analytical) { using stan::math::var; using stan::math::laplace_marginal_poisson_log_lpmf; - -/* auto start = std::chrono::system_clock::now(); var marginal_density @@ -112,7 +143,6 @@ TEST_F(laplace_disease_map_test, lk_analytical) { using stan::math::diff_poisson_log; using stan::math::to_vector; - using stan::math::sqr_exp_kernel_functor; using stan::math::laplace_rng; using stan::math::laplace_poisson_log_rng; @@ -124,7 +154,7 @@ TEST_F(laplace_disease_map_test, lk_analytical) { Eigen::VectorXd theta_pred = laplace_rng(diff_likelihood, sqr_exp_kernel_functor(), - phi, x, delta, delta_int, + phi, eta_dummy, x, x, delta, delta_int, theta_0, rng); end = std::chrono::system_clock::now(); @@ -135,12 +165,12 @@ TEST_F(laplace_disease_map_test, lk_analytical) { << std::endl; // Expected result - // total time: 0.404114 + // total time: 0.404114 (or 0.328 on new computer) start = std::chrono::system_clock::now(); theta_pred = laplace_poisson_log_rng(y, n_samples, ye, sqr_exp_kernel_functor(), - phi, x, delta, delta_int, + phi, x, x, delta, delta_int, theta_0, rng); end = std::chrono::system_clock::now(); elapsed_time = end - start; @@ -148,53 +178,25 @@ TEST_F(laplace_disease_map_test, lk_analytical) { std::cout << "LAPLACE_APPROX_POISSON_RNG" << std::endl << "total time: " << elapsed_time.count() << std::endl << std::endl; - */ } -struct poisson_log_likelihood { - template - stan::return_type_t - operator()(const Eigen::Matrix& theta, - const Eigen::Matrix& eta, - const Eigen::VectorXd& delta, - const std::vector& n_samples, - std::ostream* pstream) const { - using stan::math::to_vector; - using stan::math::log; - int n = 911; - Eigen::VectorXd y = delta.head(n); - Eigen::VectorXd ye = delta.tail(n); - // Eigen::VectorXd log_ye = ye.log(); - - stan::math::diff_poisson_log - diff_functor(to_vector(n_samples), y, log(ye)); - - return diff_functor.log_likelihood(theta, eta); - } -}; - TEST_F(laplace_disease_map_test, lk_autodiff) { using stan::math::var; using stan::math::laplace_marginal_density; using stan::math::diff_likelihood; - Eigen::VectorXd delta_lk(2 * n_observations); - for (int i = 0; i < n_observations; i++) delta_lk(i) = y[i]; - for (int i = 0; i < n_observations; i++) delta_lk(n_observations + i) = ye(i); - - poisson_log_likelihood f; - diff_likelihood - diff_functor(f, delta_lk, n_samples); + diff_likelihood diff_functor(f, delta_lk, n_samples); auto start = std::chrono::system_clock::now(); - Eigen::Matrix eta_dummy; - int hessian_block_size = 1; + int hessian_block_size = 0; // 0, 1, 911 + int compute_W_root = 1; double marginal_density_dbl = laplace_marginal_density(diff_functor, sqr_exp_kernel_functor(), value_of(phi), value_of(eta_dummy), x, delta, delta_int, theta_0, - 0, 1e-6, 100, hessian_block_size); + 0, 1e-6, 100, hessian_block_size, + compute_W_root); auto end = std::chrono::system_clock::now(); std::chrono::duration elapsed_time = end - start; @@ -236,14 +238,7 @@ TEST_F(laplace_disease_map_test, finite_diff_benchmark) { using stan::math::laplace_marginal_density; using stan::math::diff_likelihood; - Eigen::VectorXd delta_lk(2 * n_observations); - for (int i = 0; i < n_observations; i++) delta_lk(i) = y[i]; - for (int i = 0; i < n_observations; i++) delta_lk(n_observations + i) = ye(i); - - poisson_log_likelihood f; - diff_likelihood - diff_functor(f, delta_lk, n_samples); - Eigen::Matrix eta_dummy; + diff_likelihood diff_functor(f, delta_lk, n_samples); Eigen::VectorXd phi_dbl = value_of(phi); Eigen::VectorXd phi_u0 = phi_dbl, phi_u1 = phi_dbl, @@ -286,3 +281,28 @@ TEST_F(laplace_disease_map_test, finite_diff_benchmark) { << " " << (target_u1 - target_l1) / (2 * eps) << std::endl; } + +TEST_F(laplace_disease_map_test, rng_autodiff) { + using stan::math::var; + using stan::math::laplace_rng; + using stan::math::diff_likelihood; + + diff_likelihood diff_functor(f, delta_lk, n_samples); + + boost::random::mt19937 rng; + int hessian_block_size = 0; + int compute_W_root = 1; + + auto start = std::chrono::system_clock::now(); + Eigen::VectorXd + theta_pred = laplace_rng(diff_functor, + sqr_exp_kernel_functor(), + phi, eta_dummy, + x, x, delta, delta_int, theta_0, rng, + 0, 1e-6, 100, hessian_block_size, compute_W_root); + auto end = std::chrono::system_clock::now(); + std::chrono::duration elapsed_time = end - start; + std::cout << "LAPLACE_APPROX_RNG" << std::endl + << "total time: " << elapsed_time.count() << std::endl + << std::endl; +} From 1fe30d1e7721f3b51137f84de4065fdbeff9ac01 Mon Sep 17 00:00:00 2001 From: Charles Margossian Date: Tue, 23 Mar 2021 14:54:37 -0400 Subject: [PATCH 30/53] update bernoulli analytical lk. --- .../laplace_likelihood_bernoulli_logit.hpp | 20 +++++++++---- .../laplace_marginal_bernoulli_logit.hpp | 30 ++----------------- test/unit/math/laplace/laplace_skim_test.cpp | 1 - 3 files changed, 17 insertions(+), 34 deletions(-) diff --git a/stan/math/laplace/laplace_likelihood_bernoulli_logit.hpp b/stan/math/laplace/laplace_likelihood_bernoulli_logit.hpp index 4f713d42eda..32783f3d4f0 100644 --- a/stan/math/laplace/laplace_likelihood_bernoulli_logit.hpp +++ b/stan/math/laplace/laplace_likelihood_bernoulli_logit.hpp @@ -52,14 +52,22 @@ struct diff_bernoulli_logit { void diff (const Eigen::Matrix& theta, const Eigen::Matrix& eta_dummy, Eigen::Matrix& gradient, - Eigen::Matrix& hessian) const { + Eigen::SparseMatrix& hessian, + // Eigen::Matrix& hessian, + int block_size_dummy) const { Eigen::Matrix exp_theta = exp(theta); - Eigen::VectorXd one = rep_vector(1, theta.size()); + int theta_size = theta.size(); + Eigen::VectorXd one = rep_vector(1, theta_size); gradient = sums_ - n_samples_.cwiseProduct(inv_logit(theta)); - hessian = - n_samples_.cwiseProduct(elt_divide(exp_theta, - square(one + exp_theta))); + Eigen::Matrix + hessian_diagonal = - n_samples_.cwiseProduct(elt_divide(exp_theta, + square(one + exp_theta))); + hessian.resize(theta_size, theta_size); + hessian.reserve(Eigen::VectorXi::Constant(theta_size, 1)); + for (int i = 0; i < theta_size; i++) + hessian.insert(i, i) = hessian_diagonal(i); } /** @@ -89,7 +97,7 @@ struct diff_bernoulli_logit { const Eigen::MatrixXd& A, int hessian_block_size) const { std::cout << "THIS FUNCTIONS SHOULD NEVER GET CALLED!" << std::endl; - Eigen::Matrix, Eigen::Dynamic, 1> void_matrix; + Eigen::MatrixXd void_matrix; return void_matrix; } @@ -97,7 +105,7 @@ struct diff_bernoulli_logit { const Eigen::VectorXd& theta, const Eigen::VectorXd& eta) const { std::cout << "THIS FUNCTIONS SHOULD NEVER GET CALLED!" << std::endl; - Eigen::Matrix, Eigen::Dynamic, 1> void_matrix; + Eigen::MatrixXd void_matrix; return void_matrix; } }; diff --git a/stan/math/laplace/laplace_marginal_bernoulli_logit.hpp b/stan/math/laplace/laplace_marginal_bernoulli_logit.hpp index 2de0c70b253..8b7edacc0e9 100644 --- a/stan/math/laplace/laplace_marginal_bernoulli_logit.hpp +++ b/stan/math/laplace/laplace_marginal_bernoulli_logit.hpp @@ -2,7 +2,7 @@ #define STAN_MATH_LAPLACE_LAPLACE_MARGINAL_BERNOULLI_HPP #include -#include +#include namespace stan { namespace math { @@ -32,30 +32,6 @@ namespace math { * @param[in] max_num_steps maximum number of steps before the Newton solver * breaks and returns an error. */ - // TODO: deprecate the below function. No default functor. - template - T1 laplace_marginal_bernoulli_logit_lpmf - (const std::vector& y, - const std::vector& n_samples, - // const K& covariance function, - const Eigen::Matrix& phi, - const std::vector& x, - const std::vector& delta, - const std::vector& delta_int, - const Eigen::Matrix& theta_0, - std::ostream* msgs = nullptr, - double tolerance = 1e-6, - long int max_num_steps = 100) { - // TODO: change this to a VectorXd once we have operands & partials. - Eigen::Matrix eta_dummy(0); - return laplace_marginal_density( - diff_logistic_log(to_vector(n_samples), to_vector(y)), - sqr_exp_kernel_functor(), - phi, eta_dummy, x, delta, delta_int, - theta_0, msgs, tolerance, max_num_steps); - } - - // Add signature that takes in a Kernel functor specified by the user. template T1 laplace_marginal_bernoulli_logit_lpmf (const std::vector& y, @@ -72,7 +48,7 @@ namespace math { // TODO: change this to a VectorXd once we have operands & partials. Eigen::Matrix eta_dummy(0); return laplace_marginal_density( - diff_logistic_log(to_vector(n_samples), to_vector(y)), + diff_bernoulli_logit(to_vector(n_samples), to_vector(y)), covariance_function, phi, eta_dummy, x, delta, delta_int, theta_0, msgs, tolerance, max_num_steps); @@ -95,7 +71,7 @@ namespace math { // TODO: change this to a VectorXd once we have operands & partials. Eigen::Matrix eta_dummy(0); return laplace_marginal_density( - diff_logistic_log(to_vector(n_samples), to_vector(y)), + diff_bernoulli_logit(to_vector(n_samples), to_vector(y)), covariance_function, phi, eta_dummy, x, delta, delta_int, theta_0, msgs, tolerance, max_num_steps); diff --git a/test/unit/math/laplace/laplace_skim_test.cpp b/test/unit/math/laplace/laplace_skim_test.cpp index 34e158ee5a5..586f0208699 100755 --- a/test/unit/math/laplace/laplace_skim_test.cpp +++ b/test/unit/math/laplace/laplace_skim_test.cpp @@ -1,5 +1,4 @@ #include -#include #include #include #include From f8c6ac09ae4de8c8fb61867053f60481f6e46a58 Mon Sep 17 00:00:00 2001 From: Charles Margossian Date: Wed, 24 Mar 2021 19:03:33 -0400 Subject: [PATCH 31/53] clean up skim test. --- stan/math/laplace/laplace_marginal.hpp | 15 ++++++++------- test/unit/math/laplace/laplace_skim_test.cpp | 2 +- 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/stan/math/laplace/laplace_marginal.hpp b/stan/math/laplace/laplace_marginal.hpp index afed55f32e3..d62effe4811 100644 --- a/stan/math/laplace/laplace_marginal.hpp +++ b/stan/math/laplace/laplace_marginal.hpp @@ -316,8 +316,6 @@ namespace math { using Eigen::VectorXd; using Eigen::SparseMatrix; -std::cout << "marker a" << std::endl; - int theta_size = theta.size(); for (int i = 0; i < phi_size_; i++) phi_[i] = phi(i).vi_; for (int i = 0; i < eta_size_; i++) eta_[i] = eta(i).vi_; @@ -351,11 +349,12 @@ std::cout << "marker a" << std::endl; = diff_likelihood.compute_s2(theta, eta_dbl, A, block_size); s2 = partial_parm.head(theta_size); } - } else { + } else { // we have not computed W_root. LU_solve_covariance = LU.solve(covariance); R = W_r - W_r * LU_solve_covariance * W_r; Eigen::MatrixXd A = covariance - covariance * W_r * LU_solve_covariance; + // Eigen::MatrixXd A = covariance - covariance * R * covariance; partial_parm = diff_likelihood.compute_s2(theta, eta_dbl, A, hessian_block_size); s2 = partial_parm.head(theta_size); @@ -388,10 +387,12 @@ std::cout << "marker a" << std::endl; Eigen::VectorXd v; if (compute_W_root == 1) { Eigen::MatrixXd W = W_r * W_r; // NOTE: store W from Newton step? - v = covariance * s2 - covariance * W - * L.transpose().triangularView() - . solve(L.triangularView() - .solve(covariance * (covariance * s2))); + v = covariance * s2 + - covariance * R * covariance * s2; + // - covariance * W + // * L.transpose().triangularView() + // . solve(L.triangularView() + // .solve(covariance * (covariance * s2))); } else { v = LU_solve_covariance * s2; } diff --git a/test/unit/math/laplace/laplace_skim_test.cpp b/test/unit/math/laplace/laplace_skim_test.cpp index 586f0208699..d79e7e039ba 100755 --- a/test/unit/math/laplace/laplace_skim_test.cpp +++ b/test/unit/math/laplace/laplace_skim_test.cpp @@ -129,7 +129,7 @@ class laplace_skim_test : public::testing::Test { using stan::math::add; N = 100; - M = 200; // options: 2, 50, 100, 150, 200 + M = 2; // options: 2, 50, 100, 150, 200 // TODO: add to GitHub directory simulation for each configuration. // std::string data_directory = "test/unit/math/laplace/skim_data/" + // std::to_string(M) + "_" + std::to_string(N) + "/"; From d4b7c2139f2951e570d5448703f84f81e8ffc824 Mon Sep 17 00:00:00 2001 From: Charles Margossian Date: Tue, 30 Mar 2021 10:17:33 -0400 Subject: [PATCH 32/53] Extend gp motorcycle test. --- test/unit/math/laplace/laplace_utility.hpp | 31 +++++++++++++++++++ .../unit/math/laplace/motorcycle_gp/x_vec.csv | 1 + .../unit/math/laplace/motorcycle_gp/y_vec.csv | 1 + test/unit/math/laplace/motorcycle_gp_test.cpp | 30 ++++++++++++------ 4 files changed, 54 insertions(+), 9 deletions(-) create mode 100644 test/unit/math/laplace/motorcycle_gp/x_vec.csv create mode 100644 test/unit/math/laplace/motorcycle_gp/y_vec.csv diff --git a/test/unit/math/laplace/laplace_utility.hpp b/test/unit/math/laplace/laplace_utility.hpp index 559b7f92a39..4b02ae15d26 100644 --- a/test/unit/math/laplace/laplace_utility.hpp +++ b/test/unit/math/laplace/laplace_utility.hpp @@ -303,3 +303,34 @@ void read_in_data(int dim_theta, lambda[m] = buffer; } } + +// TODO: write a more general data reader, rather than overload. +// Overload function to read in gp motorcycle data. +void read_data(int dim_observations, + std::string data_directory, + std::vector& x, + Eigen::VectorXd& y) { + std::ifstream input_data; + std::string file_y = data_directory + "y_vec.csv"; + std::string file_x = data_directory + "x_vec.csv"; + + // std::cout << "file_y: " << file_y << std::cout; + // printf(file_y.c_str()); + + input_data.open(file_y); + double buffer = 0.0; + y.resize(dim_observations); + for (int i = 0; i < dim_observations; ++i) { + input_data >> buffer; + y(i) = buffer; + } + input_data.close(); + + input_data.open(file_x); + buffer = 0.0; + x.resize(dim_observations); + for (int i = 0; i < dim_observations; ++i) { + input_data >> buffer; + x[i] = buffer; + } +} diff --git a/test/unit/math/laplace/motorcycle_gp/x_vec.csv b/test/unit/math/laplace/motorcycle_gp/x_vec.csv new file mode 100644 index 00000000000..6ac59cf0d7e --- /dev/null +++ b/test/unit/math/laplace/motorcycle_gp/x_vec.csv @@ -0,0 +1 @@ +2.4 2.6 3.2 3.6 4 6.2 6.6 6.8 7.8 8.2 8.8 8.8 9.6 10 10.2 10.6 11 11.4 13.2 13.6 13.8 14.6 14.6 14.6 14.6 14.6 14.6 14.8 15.4 15.4 15.4 15.4 15.6 15.6 15.8 15.8 16 16 16.2 16.2 16.2 16.4 16.4 16.6 16.8 16.8 16.8 17.6 17.6 17.6 17.6 17.8 17.8 18.6 18.6 19.2 19.4 19.4 19.6 20.2 20.4 21.2 21.4 21.8 22 23.2 23.4 24 24.2 24.2 24.6 25 25 25.4 25.4 25.6 26 26.2 26.2 26.4 27 27.2 27.2 27.2 27.6 28.2 28.4 28.4 28.6 29.4 30.2 31 31.2 32 32 32.8 33.4 33.8 34.4 34.8 35.2 35.2 35.4 35.6 35.6 36.2 36.2 38 38 39.2 39.4 40 40.4 41.6 41.6 42.4 42.8 42.8 43 44 44.4 45 46.6 47.8 47.8 48.8 50.6 52 53.2 55 55 55.4 57.6 diff --git a/test/unit/math/laplace/motorcycle_gp/y_vec.csv b/test/unit/math/laplace/motorcycle_gp/y_vec.csv new file mode 100644 index 00000000000..01a07398aca --- /dev/null +++ b/test/unit/math/laplace/motorcycle_gp/y_vec.csv @@ -0,0 +1 @@ +0 -1.3 -2.7 0 -2.7 -2.7 -2.7 -1.3 -2.7 -2.7 -1.3 -2.7 -2.7 -2.7 -5.4 -2.7 -5.4 0 -2.7 -2.7 0 -13.3 -5.4 -5.4 -9.3 -16 -22.8 -2.7 -22.8 -32.1 -53.5 -54.9 -40.2 -21.5 -21.5 -50.8 -42.9 -26.8 -21.5 -50.8 -61.7 -5.4 -80.4 -59 -71 -91.1 -77.7 -37.5 -85.6 -123.1 -101.9 -99.1 -104.4 -112.5 -50.8 -123.1 -85.6 -72.3 -127.2 -123.1 -117.9 -134 -101.9 -108.4 -123.1 -123.1 -128.5 -112.5 -95.1 -81.8 -53.5 -64.4 -57.6 -72.3 -44.3 -26.8 -5.4 -107.1 -21.5 -65.6 -16 -45.6 -24.2 9.5 4 12 -21.5 37.5 46.9 -17.4 36.2 75 8.1 54.9 48.2 46.9 16 45.6 1.3 75 -16 -54.9 69.6 34.8 32.1 -37.5 22.8 46.9 10.7 5.4 -1.3 -21.5 -13.3 30.8 -10.7 29.4 0 -10.7 14.7 -1.3 0 10.7 10.7 -26.8 -14.7 -13.3 0 10.7 -14.7 -2.7 10.7 -2.7 10.7 diff --git a/test/unit/math/laplace/motorcycle_gp_test.cpp b/test/unit/math/laplace/motorcycle_gp_test.cpp index d07aabcb6c2..ed7c5293e94 100755 --- a/test/unit/math/laplace/motorcycle_gp_test.cpp +++ b/test/unit/math/laplace/motorcycle_gp_test.cpp @@ -101,14 +101,25 @@ class laplace_motorcyle_gp_test : public::testing::Test { using stan::math::value_of; using stan::math::gp_exp_quad_cov; - n_obs = 6; - Eigen::VectorXd x_vec(n_obs); - x_vec << 2.4, 2.6, 3.2, 3.6, 4.0, 6.2; - x.resize(n_obs); - for (int i = 0; i < n_obs; i++) x[i] = x_vec(i); - y.resize(n_obs); - // y << 0.0, 0.0, 0.0, 0.0, 0.0, 0.0; - y << 0.0, -1.3, -2.7, 0.0, -2.7, -2.7; + if (FALSE) { + n_obs = 6; + Eigen::VectorXd x_vec(n_obs); + x_vec << 2.4, 2.6, 3.2, 3.6, 4.0, 6.2; + x.resize(n_obs); + for (int i = 0; i < n_obs; i++) x[i] = x_vec(i); + y.resize(n_obs); + y << 0.0, -1.3, -2.7, 0.0, -2.7, -2.7; + } + + if (TRUE) { + n_obs = 133; + read_data(n_obs, "test/unit/math/laplace/motorcycle_gp/", + x, y); + std::cout << "x: "; + for (int i = 0; i < 5; i++) std::cout << x[i] << " "; + std::cout << " ..." << std::endl; + std::cout << "y: " << y.transpose().head(5) << " ..." << std::endl; + } length_scale_f = 0.3; length_scale_g = 0.5; @@ -132,8 +143,9 @@ class laplace_motorcyle_gp_test : public::testing::Test { Eigen::VectorXd mu_hat = K_plus_I.colPivHouseholderQr().solve(y); + // Remark: finds optimal point with or without informed initial guess. for (int i = 0; i < n_obs; i++) { - theta0(2 * i) = mu_hat(i); + theta0(2 * i) = 0; // mu_hat(i); theta0(2 * i + 1) = 0; } From f7831d121dd217d12d30bac8d0a768156a7f177c Mon Sep 17 00:00:00 2001 From: Charles Margossian Date: Tue, 30 Mar 2021 18:15:39 -0400 Subject: [PATCH 33/53] wrapper for general laplace_marginal_pdf --- stan/math/laplace/laplace.hpp | 6 +-- ...laplace_marginal_bernoulli_logit_lpmf.hpp} | 4 +- ... => laplace_marginal_poisson_log_lpmf.hpp} | 4 +- test/unit/math/laplace/disease_map_test.cpp | 2 +- .../laplace_marginal_bernoulli_logit_test.cpp | 2 +- .../laplace_marginal_poisson_log_test.cpp | 2 +- test/unit/math/laplace/laplace_skim_test.cpp | 2 +- test/unit/math/laplace/motorcycle_gp_test.cpp | 37 +++++++++++++++++-- 8 files changed, 44 insertions(+), 15 deletions(-) rename stan/math/laplace/{laplace_marginal_bernoulli_logit.hpp => laplace_marginal_bernoulli_logit_lpmf.hpp} (96%) rename stan/math/laplace/{laplace_marginal_poisson_log.hpp => laplace_marginal_poisson_log_lpmf.hpp} (96%) diff --git a/stan/math/laplace/laplace.hpp b/stan/math/laplace/laplace.hpp index a2798fe9971..962cb65d476 100644 --- a/stan/math/laplace/laplace.hpp +++ b/stan/math/laplace/laplace.hpp @@ -1,9 +1,9 @@ #ifndef STAN_MATH_LAPLACE_LAPLACE_HPP #define STAN_MATH_LAPLACE_LAPLACE_HPP -// #include -#include -// #include +#include +#include +#include // #include #endif diff --git a/stan/math/laplace/laplace_marginal_bernoulli_logit.hpp b/stan/math/laplace/laplace_marginal_bernoulli_logit_lpmf.hpp similarity index 96% rename from stan/math/laplace/laplace_marginal_bernoulli_logit.hpp rename to stan/math/laplace/laplace_marginal_bernoulli_logit_lpmf.hpp index 8b7edacc0e9..899c651c758 100644 --- a/stan/math/laplace/laplace_marginal_bernoulli_logit.hpp +++ b/stan/math/laplace/laplace_marginal_bernoulli_logit_lpmf.hpp @@ -1,5 +1,5 @@ - #ifndef STAN_MATH_LAPLACE_LAPLACE_MARGINAL_BERNOULLI_HPP -#define STAN_MATH_LAPLACE_LAPLACE_MARGINAL_BERNOULLI_HPP + #ifndef STAN_MATH_LAPLACE_LAPLACE_MARGINAL_BERNOULLI_LPMF_HPP +#define STAN_MATH_LAPLACE_LAPLACE_MARGINAL_BERNOULLI_LPMF_HPP #include #include diff --git a/stan/math/laplace/laplace_marginal_poisson_log.hpp b/stan/math/laplace/laplace_marginal_poisson_log_lpmf.hpp similarity index 96% rename from stan/math/laplace/laplace_marginal_poisson_log.hpp rename to stan/math/laplace/laplace_marginal_poisson_log_lpmf.hpp index e3800519806..87a5162b2b2 100644 --- a/stan/math/laplace/laplace_marginal_poisson_log.hpp +++ b/stan/math/laplace/laplace_marginal_poisson_log_lpmf.hpp @@ -1,5 +1,5 @@ -#ifndef STAN_MATH_LAPLACE_LAPLACE_MARGINAL_POISSON_LOG_HPP -#define STAN_MATH_LAPLACE_LAPLACE_MARGINAL_POISSON_LOG_HPP +#ifndef STAN_MATH_LAPLACE_LAPLACE_MARGINAL_POISSON_LOG_LPMF_HPP +#define STAN_MATH_LAPLACE_LAPLACE_MARGINAL_POISSON_LOG_LPMF_HPP #include #include diff --git a/test/unit/math/laplace/disease_map_test.cpp b/test/unit/math/laplace/disease_map_test.cpp index 500c4f6a161..4de359cec46 100755 --- a/test/unit/math/laplace/disease_map_test.cpp +++ b/test/unit/math/laplace/disease_map_test.cpp @@ -1,7 +1,7 @@ #include #include #include -#include +#include #include #include diff --git a/test/unit/math/laplace/laplace_marginal_bernoulli_logit_test.cpp b/test/unit/math/laplace/laplace_marginal_bernoulli_logit_test.cpp index 3dbfb1ac924..747c92faa4c 100755 --- a/test/unit/math/laplace/laplace_marginal_bernoulli_logit_test.cpp +++ b/test/unit/math/laplace/laplace_marginal_bernoulli_logit_test.cpp @@ -1,6 +1,6 @@ #include #include -#include +#include #include // #include diff --git a/test/unit/math/laplace/laplace_marginal_poisson_log_test.cpp b/test/unit/math/laplace/laplace_marginal_poisson_log_test.cpp index 1a2a8010336..91f4737a6cd 100644 --- a/test/unit/math/laplace/laplace_marginal_poisson_log_test.cpp +++ b/test/unit/math/laplace/laplace_marginal_poisson_log_test.cpp @@ -1,5 +1,5 @@ #include -#include +#include #include #include diff --git a/test/unit/math/laplace/laplace_skim_test.cpp b/test/unit/math/laplace/laplace_skim_test.cpp index d79e7e039ba..0e01181c49c 100755 --- a/test/unit/math/laplace/laplace_skim_test.cpp +++ b/test/unit/math/laplace/laplace_skim_test.cpp @@ -1,7 +1,7 @@ #include #include #include -#include +#include #include #include diff --git a/test/unit/math/laplace/motorcycle_gp_test.cpp b/test/unit/math/laplace/motorcycle_gp_test.cpp index ed7c5293e94..06f99d018e3 100755 --- a/test/unit/math/laplace/motorcycle_gp_test.cpp +++ b/test/unit/math/laplace/motorcycle_gp_test.cpp @@ -1,6 +1,7 @@ #include #include #include +#include #include #include @@ -115,10 +116,10 @@ class laplace_motorcyle_gp_test : public::testing::Test { n_obs = 133; read_data(n_obs, "test/unit/math/laplace/motorcycle_gp/", x, y); - std::cout << "x: "; - for (int i = 0; i < 5; i++) std::cout << x[i] << " "; - std::cout << " ..." << std::endl; - std::cout << "y: " << y.transpose().head(5) << " ..." << std::endl; + // std::cout << "x: "; + // for (int i = 0; i < 5; i++) std::cout << x[i] << " "; + // std::cout << " ..." << std::endl; + // std::cout << "y: " << y.transpose().head(5) << " ..." << std::endl; } length_scale_f = 0.3; @@ -211,6 +212,7 @@ TEST_F(laplace_motorcyle_gp_test, lk_autodiff) { phi_u0(0) += eps; phi_l0(0) -= eps; + // TODO: test all the gradients in code (rather than doing it manually) double target_u0 = laplace_marginal_density(diff_functor, covariance_motorcycle_functor(), @@ -268,6 +270,7 @@ TEST_F(laplace_motorcyle_gp_test, lk_autodiff_eta) { << std::endl; // finite diff benchmark + // TODO: test all the gradients in code (rather than doing it manually) double eps = 1e-7; Eigen::VectorXd eta_dbl = value_of(eta); Eigen::VectorXd eta_u = eta_dbl, eta_l = eta_dbl; @@ -292,3 +295,29 @@ TEST_F(laplace_motorcyle_gp_test, lk_autodiff_eta) { std::cout << "gf[4]: " << (target_u - target_l) / (2 * eps) << std::endl; } + +TEST_F(laplace_motorcyle_gp_test, wrapper_function) { + using stan::math::var; + using stan::math::laplace_marginal_lpdf; + + // TODO: move this to the class test. + Eigen::Matrix eta(1); + eta(0) = 1; + int hessian_block_size = 2; + + var marginal_density + = laplace_marginal_lpdf(y, normal_likelihood2(), eta, delta_int, + covariance_motorcycle_functor(), phi, + x, delta_dummy, delta_int, theta0, + 0, 0, 1e-8, 100, hessian_block_size, + compute_W_root); + + std::cout << "density: " << marginal_density << std::endl; + + VEC g; + AVEC parm_vec = createAVEC(phi(0), phi(1), phi(2), phi(3), eta(0)); + marginal_density.grad(parm_vec, g); + std::cout << "grad: " + << g[0] << " " << g[1] << " " << g[2] << " " << g[3] << " " << g[4] + << std::endl; +} From 23b20ae01b20c6972236f9e5cffc9297753f0ce0 Mon Sep 17 00:00:00 2001 From: Charles Margossian Date: Wed, 31 Mar 2021 17:13:28 -0400 Subject: [PATCH 34/53] lpdf and lpmf wrapper for general laplace approximation. --- stan/math/laplace/laplace.hpp | 1 + stan/math/laplace/laplace_marginal_lpdf.hpp | 113 ++++++++++++++++++++ test/unit/math/laplace/disease_map_test.cpp | 25 ++++- 3 files changed, 138 insertions(+), 1 deletion(-) create mode 100644 stan/math/laplace/laplace_marginal_lpdf.hpp diff --git a/stan/math/laplace/laplace.hpp b/stan/math/laplace/laplace.hpp index 962cb65d476..44667927f0f 100644 --- a/stan/math/laplace/laplace.hpp +++ b/stan/math/laplace/laplace.hpp @@ -4,6 +4,7 @@ #include #include #include +#include // #include #endif diff --git a/stan/math/laplace/laplace_marginal_lpdf.hpp b/stan/math/laplace/laplace_marginal_lpdf.hpp new file mode 100644 index 00000000000..cf128107814 --- /dev/null +++ b/stan/math/laplace/laplace_marginal_lpdf.hpp @@ -0,0 +1,113 @@ + #ifndef STAN_MATH_LAPLACE_LAPLACE_MARGINAL_LPDF_HPP +#define STAN_MATH_LAPLACE_LAPLACE_MARGINAL_LPDF_HPP + +#include +#include + +namespace stan { +namespace math { + /** + * Wrapper function around the laplace_marginal function. + * Returns the marginal density p(y | phi) by marginalizing out + * the latent gaussian variable, with a Laplace approximation. + * See the laplace_marginal function for more details. + * The data y is assumed to be real. + * The function is "overloaded" below for the int y and lpmf case. + * + * @tparam T0 The type of the initial guess, theta_0. + * @tparam T1 The type for the global parameter, phi. + * @tparam T2 The type of the auxiliary parameter, eta. + * @tparam K The function which returns the prior covariance matrix. + * @tparam F The function which returns the log likelihood. + * @param[in] y fixed real data to be passed to the log likelihood. + * @param[in] L_f a function which returns the log likelihood. + * @param[in] eta non-marginalized parameters for the log likelihood. + * @param[in] delta_int_f integer data to be passed to the log likelihood. + * @param[in] K_f a function which returns the prior + * covariance for the marginalized out latent Gaussian. + * @param[in] phi model parameters for the covariance function. + * @param[in] x data for the covariance function. + * @param[in] delta additional real data for the covariance matrix. + * @param[in] delta_int_k additional int data for the covariance matrix. + * @param[in] theta_0 initial guess for the Newton solver which returns + * the Laplace approximation. + * @param[in] msgs_f message stream for the log likelihood function. + * @param[in] msgs_k message stream for the covariance function. + * @param[in] tolerance controls the convergence criterion when finding + * the mode in the Laplace approximation. + * @param[in] max_num_steps maximum number of steps before the Newton solver + * breaks and returns an error. + * @param[in] hessian_block_size the size of the block for a block-diagonal + * Hessian of the log likelihood. If 0, the Hessian is stored + * inside a vector. If the Hessian is dense, this should be the + * size of the Hessian. + * @param[in] compute_W_root if 1, the Newton solver computes the root of W, + * the negative Hessian of the log likelihood, which leads to + * efficient computation. Else, a more general but slower solver + * is used. + */ + template + stan::return_type_t laplace_marginal_lpdf + (const Eigen::VectorXd& y, + const L& L_f, + const Eigen::Matrix& eta, + const std::vector& delta_int_f, + const K& K_f, + const Eigen::Matrix& phi, + const Tx& x, + const std::vector& delta, + const std::vector& delta_int_k, + const Eigen::Matrix& theta_0, + std::ostream* msgs_f = nullptr, + std::ostream* msgs_k = nullptr, + double tolerance = 1e-6, + long int max_num_steps = 100, + int hessian_block_size = 0, + int compute_W_root = 1) { + + return laplace_marginal_density( + diff_likelihood(L_f, y, delta_int_f, msgs_f), + K_f, phi, eta, x, delta, delta_int_k, + theta_0, msgs_k, tolerance, max_num_steps, + hessian_block_size, compute_W_root); + } + + /** + * Overloaded function for lpmf case. The first argument + * is now a std::vector of interger and an Eigen::VectorXd + * of double is passed as data. + */ + template + stan::return_type_t laplace_marginal_lpmf + (const std::vector& y, + const L& L_f, + const Eigen::Matrix& eta, + const Eigen::VectorXd& delta_f, + const K& K_f, + const Eigen::Matrix& phi, + const Tx& x, + const std::vector& delta, + const std::vector& delta_int_k, + const Eigen::Matrix& theta_0, + std::ostream* msgs_f = nullptr, + std::ostream* msgs_k = nullptr, + double tolerance = 1e-6, + long int max_num_steps = 100, + int hessian_block_size = 0, + int compute_W_root = 1) { + + return laplace_marginal_lpdf(delta_f, L_f, eta, y, + K_f, phi, x, delta, delta_int_k, + theta_0, msgs_f, msgs_k, + tolerance, + max_num_steps, + hessian_block_size, + compute_W_root); + } + +} // namespace math +} // namespace stan + +#endif diff --git a/test/unit/math/laplace/disease_map_test.cpp b/test/unit/math/laplace/disease_map_test.cpp index 4de359cec46..ecde00b785f 100755 --- a/test/unit/math/laplace/disease_map_test.cpp +++ b/test/unit/math/laplace/disease_map_test.cpp @@ -1,7 +1,7 @@ #include #include #include -#include +#include #include #include @@ -306,3 +306,26 @@ TEST_F(laplace_disease_map_test, rng_autodiff) { << "total time: " << elapsed_time.count() << std::endl << std::endl; } + +TEST_F(laplace_disease_map_test, lpmf_wrapper) { + using stan::math::var; + using stan::math::laplace_marginal_lpmf; + + int hessian_block_size = 0; + int compute_W_root = 1; + + var marginal_density + = laplace_marginal_lpmf(n_samples, poisson_log_likelihood(), + eta_dummy, delta_lk, + sqr_exp_kernel_functor(), + phi, x, delta, delta_int, theta_0); + + VEC g; + AVEC parm_vec = createAVEC(phi(0), phi(1)); + marginal_density.grad(parm_vec, g); + + std::cout << "LAPLACE MARGINAL LPMF AND VARI CLASS" << std::endl + << "density: " << value_of(marginal_density) << std::endl + << "autodiff grad: " << g[0] << " " << g[1] << std::endl + << std::endl; +} From 3a9df9e131482e335e2ceba8ca1c1d135259ad1b Mon Sep 17 00:00:00 2001 From: Charles Margossian Date: Thu, 1 Apr 2021 10:49:57 -0400 Subject: [PATCH 35/53] update rng functions. --- stan/math/laplace/laplace.hpp | 5 +- .../laplace_marginal_bernoulli_logit_lpmf.hpp | 2 +- stan/math/laplace/laplace_marginal_lpdf.hpp | 32 +++---- stan/math/laplace/prob/laplace_base_rng.hpp | 91 ++++++++++++++++++ .../prob/laplace_bernoulli_logit_rng.hpp | 8 +- .../laplace/prob/laplace_poisson_log_rng.hpp | 24 ++--- stan/math/laplace/prob/laplace_rng.hpp | 95 ++++++------------- test/unit/math/laplace/disease_map_test.cpp | 46 ++++++--- test/unit/math/laplace/gp_cycle_data.r | 12 +++ 9 files changed, 200 insertions(+), 115 deletions(-) create mode 100644 stan/math/laplace/prob/laplace_base_rng.hpp create mode 100644 test/unit/math/laplace/gp_cycle_data.r diff --git a/stan/math/laplace/laplace.hpp b/stan/math/laplace/laplace.hpp index 44667927f0f..61ca5c2e262 100644 --- a/stan/math/laplace/laplace.hpp +++ b/stan/math/laplace/laplace.hpp @@ -3,8 +3,9 @@ #include #include -#include #include -// #include +#include +#include +#include #endif diff --git a/stan/math/laplace/laplace_marginal_bernoulli_logit_lpmf.hpp b/stan/math/laplace/laplace_marginal_bernoulli_logit_lpmf.hpp index 899c651c758..01392cbc4cc 100644 --- a/stan/math/laplace/laplace_marginal_bernoulli_logit_lpmf.hpp +++ b/stan/math/laplace/laplace_marginal_bernoulli_logit_lpmf.hpp @@ -1,4 +1,4 @@ - #ifndef STAN_MATH_LAPLACE_LAPLACE_MARGINAL_BERNOULLI_LPMF_HPP +#ifndef STAN_MATH_LAPLACE_LAPLACE_MARGINAL_BERNOULLI_LPMF_HPP #define STAN_MATH_LAPLACE_LAPLACE_MARGINAL_BERNOULLI_LPMF_HPP #include diff --git a/stan/math/laplace/laplace_marginal_lpdf.hpp b/stan/math/laplace/laplace_marginal_lpdf.hpp index cf128107814..d9f9aba8a1b 100644 --- a/stan/math/laplace/laplace_marginal_lpdf.hpp +++ b/stan/math/laplace/laplace_marginal_lpdf.hpp @@ -52,24 +52,24 @@ namespace math { (const Eigen::VectorXd& y, const L& L_f, const Eigen::Matrix& eta, - const std::vector& delta_int_f, + const std::vector& delta_int_L, const K& K_f, const Eigen::Matrix& phi, const Tx& x, - const std::vector& delta, - const std::vector& delta_int_k, + const std::vector& delta_K, + const std::vector& delta_int_K, const Eigen::Matrix& theta_0, - std::ostream* msgs_f = nullptr, - std::ostream* msgs_k = nullptr, + std::ostream* msgs_L = nullptr, + std::ostream* msgs_K = nullptr, double tolerance = 1e-6, long int max_num_steps = 100, int hessian_block_size = 0, int compute_W_root = 1) { return laplace_marginal_density( - diff_likelihood(L_f, y, delta_int_f, msgs_f), - K_f, phi, eta, x, delta, delta_int_k, - theta_0, msgs_k, tolerance, max_num_steps, + diff_likelihood(L_f, y, delta_int_L, msgs_L), + K_f, phi, eta, x, delta_K, delta_int_K, + theta_0, msgs_K, tolerance, max_num_steps, hessian_block_size, compute_W_root); } @@ -84,23 +84,23 @@ namespace math { (const std::vector& y, const L& L_f, const Eigen::Matrix& eta, - const Eigen::VectorXd& delta_f, + const Eigen::VectorXd& delta_L, const K& K_f, const Eigen::Matrix& phi, const Tx& x, - const std::vector& delta, - const std::vector& delta_int_k, + const std::vector& delta_K, + const std::vector& delta_int_K, const Eigen::Matrix& theta_0, - std::ostream* msgs_f = nullptr, - std::ostream* msgs_k = nullptr, + std::ostream* msgs_L = nullptr, + std::ostream* msgs_K = nullptr, double tolerance = 1e-6, long int max_num_steps = 100, int hessian_block_size = 0, int compute_W_root = 1) { - return laplace_marginal_lpdf(delta_f, L_f, eta, y, - K_f, phi, x, delta, delta_int_k, - theta_0, msgs_f, msgs_k, + return laplace_marginal_lpdf(delta_L, L_f, eta, y, + K_f, phi, x, delta_K, delta_int_K, + theta_0, msgs_L, msgs_K, tolerance, max_num_steps, hessian_block_size, diff --git a/stan/math/laplace/prob/laplace_base_rng.hpp b/stan/math/laplace/prob/laplace_base_rng.hpp new file mode 100644 index 00000000000..dcd3575e9e4 --- /dev/null +++ b/stan/math/laplace/prob/laplace_base_rng.hpp @@ -0,0 +1,91 @@ +#ifndef STAN_MATH_LAPLACE_PROB_LAPLACE_BASE_RNG_HPP +#define STAN_MATH_LAPLACE_PROB_LAPLACE_BASE_RNG_HPP + +#include +#include +#include + +#include +#include + +namespace stan { +namespace math { + +/** + * In a latent gaussian model, + * + * theta ~ Normal(theta | 0, Sigma(phi, x)) + * y ~ pi(y | theta, eta) + * + * returns a multivariate normal random variate sampled + * from the gaussian approximation of p(theta_pred | y, phi, x_pred). + * Note that while the data is observed at x, the new samples + * are drawn for covariates x_pred. + * To sample the "original" theta's, set x_pred = x. + */ +template +inline Eigen::VectorXd // CHECK -- right return type +laplace_base_rng + (const D& diff_likelihood, + const K& covariance_function, + const Eigen::Matrix& phi, + const Eigen::Matrix& eta, + const T_x& x, + const T_x_pred& x_pred, + const std::vector& delta, + const std::vector& delta_int, + const Eigen::Matrix& theta_0, + RNG& rng, + std::ostream* msgs = nullptr, + double tolerance = 1e-6, + long int max_num_steps = 100, + int hessian_block_size = 0, + int compute_W_root = 1) { + using Eigen::VectorXd; + using Eigen::MatrixXd; + + VectorXd phi_dbl = value_of(phi); + VectorXd eta_dbl = value_of(eta); + Eigen::SparseMatrix W_r; + MatrixXd L; + Eigen::PartialPivLU LU; + VectorXd l_grad; + MatrixXd covariance; + { + VectorXd theta; + VectorXd a; + double marginal_density + = laplace_marginal_density(diff_likelihood, covariance_function, + phi_dbl, eta_dbl, + x, delta, delta_int, + covariance, theta, W_r, L, a, l_grad, + LU, value_of(theta_0), msgs, + tolerance, max_num_steps, + hessian_block_size, compute_W_root); + } + + // Modified R&W method + MatrixXd covariance_pred = covariance_function(phi_dbl, x_pred, + delta, delta_int, msgs); + VectorXd pred_mean = covariance_pred * l_grad; + + Eigen::MatrixXd Sigma; + if (compute_W_root) { + Eigen::MatrixXd V_dec = mdivide_left_tri(L, + W_r * covariance_pred); + Sigma = covariance_pred - V_dec.transpose() * V_dec; + } else { + Sigma = covariance_pred + - covariance_pred * (W_r - W_r * LU.solve(covariance * W_r)) + * covariance_pred; + } + + return multi_normal_rng(pred_mean, Sigma, rng); +} + +} // namespace math +} // namespace stan + +#endif diff --git a/stan/math/laplace/prob/laplace_bernoulli_logit_rng.hpp b/stan/math/laplace/prob/laplace_bernoulli_logit_rng.hpp index 7e3b8e31f65..4a2cafe1c72 100644 --- a/stan/math/laplace/prob/laplace_bernoulli_logit_rng.hpp +++ b/stan/math/laplace/prob/laplace_bernoulli_logit_rng.hpp @@ -1,7 +1,7 @@ #ifndef STAN_MATH_LAPLACE_LAPLACE_APPROX_BERNOULLI_RNG_HPP #define STAN_MATH_LAPLACE_LAPLACE_APPROX_BERNOULLI_RNG_HPP -#include +#include namespace stan { namespace math { @@ -33,9 +33,9 @@ inline Eigen::VectorXd // CHECK -- right return type double tolerance = 1e-6, long int max_num_steps = 100) { return - laplace_rng(diff_logistic_log(to_vector(n_samples), to_vector(y)), - covariance_function, phi, x, delta, delta_int, theta_0, - rng, msgs, tolerance, max_num_steps); + laplace_base_rng(diff_bernoulli_logit(to_vector(n_samples), to_vector(y)), + covariance_function, phi, x, x, delta, delta_int, theta_0, + rng, msgs, tolerance, max_num_steps); } } // namespace math diff --git a/stan/math/laplace/prob/laplace_poisson_log_rng.hpp b/stan/math/laplace/prob/laplace_poisson_log_rng.hpp index 66a23f5dddc..ae1a64d630f 100644 --- a/stan/math/laplace/prob/laplace_poisson_log_rng.hpp +++ b/stan/math/laplace/prob/laplace_poisson_log_rng.hpp @@ -1,7 +1,7 @@ #ifndef STAN_MATH_LAPLACE_LAPLACE_APPROX_POISSON_RNG_HPP #define STAN_MATH_LAPLACE_LAPLACE_APPROX_POISSON_RNG_HPP -#include +#include namespace stan { namespace math { @@ -16,7 +16,7 @@ namespace math { * from the gaussian approximation of p(theta | y, phi) * where the likelihood is a Poisson with a log link. */ -template inline Eigen::VectorXd laplace_poisson_log_rng @@ -25,7 +25,7 @@ inline Eigen::VectorXd const K& covariance_function, const Eigen::Matrix& phi, const T2& x, - const T3& x_pred, + // const T3& x_pred, const std::vector& delta, const std::vector& delta_int, const Eigen::Matrix& theta_0, @@ -37,17 +37,17 @@ inline Eigen::VectorXd int compute_W_root = 1) { Eigen::VectorXd eta_dummy; return - laplace_rng(diff_poisson_log(to_vector(n_samples), to_vector(y)), - covariance_function, phi, eta_dummy, - x, x_pred, delta, delta_int, theta_0, - rng, msgs, tolerance, max_num_steps, - hessian_block_size, compute_W_root); + laplace_base_rng(diff_poisson_log(to_vector(n_samples), to_vector(y)), + covariance_function, phi, eta_dummy, + x, x, delta, delta_int, theta_0, + rng, msgs, tolerance, max_num_steps, + hessian_block_size, compute_W_root); } /** * Overload for case where user passes exposure. */ -template inline Eigen::VectorXd // CHECK -- right return type laplace_poisson_log_rng @@ -57,7 +57,7 @@ inline Eigen::VectorXd // CHECK -- right return type const K& covariance_function, const Eigen::Matrix& phi, const T2& x, - const T3& x_pred, + // const T3& x_pred, const std::vector& delta, const std::vector& delta_int, const Eigen::Matrix& theta_0, @@ -69,9 +69,9 @@ inline Eigen::VectorXd // CHECK -- right return type int compute_W_root = 1) { Eigen::VectorXd eta_dummy; return - laplace_rng(diff_poisson_log(to_vector(n_samples), to_vector(y), + laplace_base_rng(diff_poisson_log(to_vector(n_samples), to_vector(y), log(exposure)), - covariance_function, phi, eta_dummy, x, x_pred, delta, + covariance_function, phi, eta_dummy, x, x, delta, delta_int, theta_0, rng, msgs, tolerance, max_num_steps, hessian_block_size, compute_W_root); diff --git a/stan/math/laplace/prob/laplace_rng.hpp b/stan/math/laplace/prob/laplace_rng.hpp index 61af3916926..2e4305160a2 100644 --- a/stan/math/laplace/prob/laplace_rng.hpp +++ b/stan/math/laplace/prob/laplace_rng.hpp @@ -1,12 +1,7 @@ -#ifndef STAN_MATH_LAPLACE_PROB_LAPLACE_APPROX_RNG_HPP -#define STAN_MATH_LAPLACE_PROB_LAPLACE_APPROX_RNG_HPP +#ifndef STAN_MATH_LAPLACE_LAPLACE_RNG_HPP +#define STAN_MATH_LAPLACE_LAPLACE_RNG_HPP -#include -#include -#include - -#include -#include +#include namespace stan { namespace math { @@ -14,77 +9,43 @@ namespace math { /** * In a latent gaussian model, * - * theta ~ Normal(theta | 0, Sigma(phi, x)) - * y ~ pi(y | theta, eta) + * theta ~ Normal(theta | 0, Sigma(phi)) + * y ~ pi(y | theta) * - * returns a multivariate normal random variate sampled - * from the gaussian approximation of p(theta_pred | y, phi, x_pred). - * Note that while the data is observed at x, the new samples - * are drawn for covariates x_pred. - * To sample the "original" theta's, set x_pred = x. + * return a multivariate normal random variate sampled + * from the gaussian approximation of p(theta | y, phi) + * where the log likelihood is given by L_f. */ -template -inline Eigen::VectorXd // CHECK -- right return type -laplace_rng - (const D& diff_likelihood, - const K& covariance_function, - const Eigen::Matrix& phi, +template +inline Eigen::VectorXd + laplace_rng + (const L& L_f, const Eigen::Matrix& eta, + const Eigen::VectorXd& delta_L, + const std::vector& delta_int_L, + const K& K_f, + const Eigen::Matrix& phi, const T_x& x, - const T_x_pred& x_pred, - const std::vector& delta, - const std::vector& delta_int, + const std::vector& delta_K, + const std::vector& delta_int_K, const Eigen::Matrix& theta_0, RNG& rng, - std::ostream* msgs = nullptr, + std::ostream* msgs_L = nullptr, + std::ostream* msgs_K = nullptr, double tolerance = 1e-6, long int max_num_steps = 100, int hessian_block_size = 0, int compute_W_root = 1) { - using Eigen::VectorXd; - using Eigen::MatrixXd; - - VectorXd phi_dbl = value_of(phi); - VectorXd eta_dbl = value_of(eta); - Eigen::SparseMatrix W_r; - MatrixXd L; - Eigen::PartialPivLU LU; - VectorXd l_grad; - MatrixXd covariance; - { - VectorXd theta; - VectorXd a; - double marginal_density - = laplace_marginal_density(diff_likelihood, covariance_function, - phi_dbl, eta_dbl, - x, delta, delta_int, - covariance, theta, W_r, L, a, l_grad, - LU, value_of(theta_0), msgs, - tolerance, max_num_steps, - hessian_block_size, compute_W_root); + return + laplace_base_rng( + diff_likelihood(L_f, delta_L, delta_int_L, msgs_L), + K_f, phi, eta, + x, x, delta_K, delta_int_K, theta_0, + rng, msgs_K, tolerance, max_num_steps, + hessian_block_size, compute_W_root); } - // Modified R&W method - MatrixXd covariance_pred = covariance_function(phi_dbl, x_pred, - delta, delta_int, msgs); - VectorXd pred_mean = covariance_pred * l_grad; - - Eigen::MatrixXd Sigma; - if (compute_W_root) { - Eigen::MatrixXd V_dec = mdivide_left_tri(L, - W_r * covariance_pred); - Sigma = covariance_pred - V_dec.transpose() * V_dec; - } else { - Sigma = covariance_pred - - covariance_pred * (W_r - W_r * LU.solve(covariance * W_r)) - * covariance_pred; - } - - return multi_normal_rng(pred_mean, Sigma, rng); -} - } // namespace math } // namespace stan diff --git a/test/unit/math/laplace/disease_map_test.cpp b/test/unit/math/laplace/disease_map_test.cpp index ecde00b785f..5967649d3d0 100755 --- a/test/unit/math/laplace/disease_map_test.cpp +++ b/test/unit/math/laplace/disease_map_test.cpp @@ -2,7 +2,7 @@ #include #include #include -#include +#include #include #include @@ -143,7 +143,7 @@ TEST_F(laplace_disease_map_test, lk_analytical) { using stan::math::diff_poisson_log; using stan::math::to_vector; - using stan::math::laplace_rng; + using stan::math::laplace_base_rng; using stan::math::laplace_poisson_log_rng; diff_poisson_log diff_likelihood(to_vector(n_samples), @@ -152,10 +152,10 @@ TEST_F(laplace_disease_map_test, lk_analytical) { boost::random::mt19937 rng; start = std::chrono::system_clock::now(); Eigen::VectorXd - theta_pred = laplace_rng(diff_likelihood, - sqr_exp_kernel_functor(), - phi, eta_dummy, x, x, delta, delta_int, - theta_0, rng); + theta_pred = laplace_base_rng(diff_likelihood, + sqr_exp_kernel_functor(), + phi, eta_dummy, x, x, delta, delta_int, + theta_0, rng); end = std::chrono::system_clock::now(); elapsed_time = end - start; @@ -170,7 +170,7 @@ TEST_F(laplace_disease_map_test, lk_analytical) { start = std::chrono::system_clock::now(); theta_pred = laplace_poisson_log_rng(y, n_samples, ye, sqr_exp_kernel_functor(), - phi, x, x, delta, delta_int, + phi, x, delta, delta_int, theta_0, rng); end = std::chrono::system_clock::now(); elapsed_time = end - start; @@ -284,7 +284,7 @@ TEST_F(laplace_disease_map_test, finite_diff_benchmark) { TEST_F(laplace_disease_map_test, rng_autodiff) { using stan::math::var; - using stan::math::laplace_rng; + using stan::math::laplace_base_rng; using stan::math::diff_likelihood; diff_likelihood diff_functor(f, delta_lk, n_samples); @@ -295,11 +295,12 @@ TEST_F(laplace_disease_map_test, rng_autodiff) { auto start = std::chrono::system_clock::now(); Eigen::VectorXd - theta_pred = laplace_rng(diff_functor, - sqr_exp_kernel_functor(), - phi, eta_dummy, - x, x, delta, delta_int, theta_0, rng, - 0, 1e-6, 100, hessian_block_size, compute_W_root); + theta_pred = laplace_base_rng(diff_functor, + sqr_exp_kernel_functor(), + phi, eta_dummy, + x, x, delta, delta_int, theta_0, rng, + 0, 1e-6, 100, hessian_block_size, + compute_W_root); auto end = std::chrono::system_clock::now(); std::chrono::duration elapsed_time = end - start; std::cout << "LAPLACE_APPROX_RNG" << std::endl @@ -329,3 +330,22 @@ TEST_F(laplace_disease_map_test, lpmf_wrapper) { << "autodiff grad: " << g[0] << " " << g[1] << std::endl << std::endl; } + +TEST_F(laplace_disease_map_test, rng_wrapper) { + using stan::math::var; + using stan::math::laplace_rng; + + // TODO: put these variables in the test class. + boost::random::mt19937 rng; + int hessian_block_size = 0; + int compute_W_root = 1; + + Eigen::VectorXd + theta_pred = laplace_rng(poisson_log_likelihood(), + eta_dummy, delta_lk, n_samples, + sqr_exp_kernel_functor(), + phi, x, delta, delta_int, theta_0, rng); + + // std::cout << "theta_pred: " << theta_pred.transpose().head(5) << std::endl; + +} diff --git a/test/unit/math/laplace/gp_cycle_data.r b/test/unit/math/laplace/gp_cycle_data.r new file mode 100644 index 00000000000..49549f59ccd --- /dev/null +++ b/test/unit/math/laplace/gp_cycle_data.r @@ -0,0 +1,12 @@ + +data(mcycle, package="MASS") +setwd("~/Code/laplace_approximation/math/test/unit/math/laplace") + +write.table(t(mcycle$times), file = "motorcycle_gp/x_vec.csv", + row.names = FALSE, col.names = FALSE, quote = FALSE, sep = " ") + +write.table(t(mcycle$accel), file = "motorcycle_gp/y_vec.csv", + row.names = FALSE, col.names = FALSE, quote = FALSE, sep = " ") + +# write.csv(as.vector(mcycle$times), "motorcycle_gp/x_vec.csv") +# write.csv(mcycle$accel, "motorcyle_gp/y_vec.csv") From 77e343f6e33d8c85f09deb678877a51c1776d615 Mon Sep 17 00:00:00 2001 From: Charles Margossian Date: Thu, 1 Apr 2021 16:17:54 -0400 Subject: [PATCH 36/53] Update all (relevant) unit tests and make sure they run. --- .../laplace_likelihood_bernoulli_logit.hpp | 3 +- .../prob/laplace_bernoulli_logit_rng.hpp | 4 +- .../laplace_bernoulli_logit_rng_test.cpp | 31 ++++++++----- .../laplace_marginal_bernoulli_logit_test.cpp | 44 +++++++++---------- .../laplace_marginal_poisson_log_test.cpp | 12 ++--- .../laplace/laplace_poisson_log_rng_test.cpp | 27 +++++++----- 6 files changed, 66 insertions(+), 55 deletions(-) diff --git a/stan/math/laplace/laplace_likelihood_bernoulli_logit.hpp b/stan/math/laplace/laplace_likelihood_bernoulli_logit.hpp index 32783f3d4f0..477a0c60219 100644 --- a/stan/math/laplace/laplace_likelihood_bernoulli_logit.hpp +++ b/stan/math/laplace/laplace_likelihood_bernoulli_logit.hpp @@ -53,8 +53,7 @@ struct diff_bernoulli_logit { const Eigen::Matrix& eta_dummy, Eigen::Matrix& gradient, Eigen::SparseMatrix& hessian, - // Eigen::Matrix& hessian, - int block_size_dummy) const { + int block_size_dummy = 0) const { Eigen::Matrix exp_theta = exp(theta); int theta_size = theta.size(); Eigen::VectorXd one = rep_vector(1, theta_size); diff --git a/stan/math/laplace/prob/laplace_bernoulli_logit_rng.hpp b/stan/math/laplace/prob/laplace_bernoulli_logit_rng.hpp index 4a2cafe1c72..6abc4d2eb98 100644 --- a/stan/math/laplace/prob/laplace_bernoulli_logit_rng.hpp +++ b/stan/math/laplace/prob/laplace_bernoulli_logit_rng.hpp @@ -32,9 +32,11 @@ inline Eigen::VectorXd // CHECK -- right return type std::ostream* msgs = nullptr, double tolerance = 1e-6, long int max_num_steps = 100) { + Eigen::VectorXd eta_dummy; return laplace_base_rng(diff_bernoulli_logit(to_vector(n_samples), to_vector(y)), - covariance_function, phi, x, x, delta, delta_int, theta_0, + covariance_function, phi, eta_dummy, + x, x, delta, delta_int, theta_0, rng, msgs, tolerance, max_num_steps); } diff --git a/test/unit/math/laplace/laplace_bernoulli_logit_rng_test.cpp b/test/unit/math/laplace/laplace_bernoulli_logit_rng_test.cpp index 0e6532ed3e1..1f0692c0f32 100644 --- a/test/unit/math/laplace/laplace_bernoulli_logit_rng_test.cpp +++ b/test/unit/math/laplace/laplace_bernoulli_logit_rng_test.cpp @@ -1,6 +1,8 @@ #include #include +#include + #include #include @@ -80,9 +82,10 @@ TEST(laplace, basic_rng) { = algebra_solver(stationary_point(), theta_0, sigma, d0, di0); - Eigen::VectorXd gradient, W; - diff_likelihood.diff(theta_root, gradient, W); - W = -W; + Eigen::VectorXd gradient, eta_dummy; + Eigen::SparseMatrix W_sparse; + diff_likelihood.diff(theta_root, eta_dummy, gradient, W_sparse); + Eigen::MatrixXd W = - W_sparse; diagonal_kernel_functor covariance_function; std::vector x_dummy; Eigen::MatrixXd x_dummay_mat; @@ -90,7 +93,7 @@ TEST(laplace, basic_rng) { std::cout << "K (brute force): " << std::endl - << (K.inverse() + diag_matrix(W)).inverse() + << (K.inverse() + W).inverse() << std::endl << std::endl; // Method 2: Vectorized R&W method @@ -100,20 +103,25 @@ TEST(laplace, basic_rng) { // First find the mode using the custom Newton step Eigen::MatrixXd covariance; Eigen::VectorXd theta; - Eigen::VectorXd W_root; + // Eigen::VectorXd W_root; + Eigen::SparseMatrix W_r; Eigen::MatrixXd L; { Eigen::VectorXd a; Eigen::VectorXd l_grad; + Eigen::PartialPivLU LU_dummy; double marginal_density = laplace_marginal_density(diff_likelihood, covariance_function, - sigma, x_dummy, d0, di0, - covariance, theta, W_root, L, a, l_grad, + sigma, eta_dummy, x_dummy, d0, di0, + covariance, theta, W_r, L, a, l_grad, + LU_dummy, value_of(theta_0), 0, tolerance, max_num_steps); } + Eigen::VectorXd W_root(theta.size()); + for (int i = 0; i < theta.size(); i++) W_root(i) = W_r.coeff(i, i); Eigen::MatrixXd V; V = mdivide_left_tri(L, diag_pre_multiply(W_root, covariance)); @@ -132,13 +140,14 @@ TEST(laplace, basic_rng) { // Check calls to rng functions compile boost::random::mt19937 rng; Eigen::MatrixXd theta_pred - = laplace_rng(diff_likelihood, covariance_function, - sigma, x_dummy, d0, di0, theta_0, - rng); + = laplace_base_rng(diff_likelihood, covariance_function, + sigma, eta_dummy, x_dummy, x_dummy, d0, di0, theta_0, + rng); theta_pred = laplace_bernoulli_logit_rng(sums, n_samples, covariance_function, - sigma, x_dummay_mat, d0, di0, theta_0, rng); + sigma, x_dummay_mat, + d0, di0, theta_0, rng); // Bonus: make the distribution with a poisson rng also runs. theta_pred diff --git a/test/unit/math/laplace/laplace_marginal_bernoulli_logit_test.cpp b/test/unit/math/laplace/laplace_marginal_bernoulli_logit_test.cpp index 747c92faa4c..7cf50d646dc 100755 --- a/test/unit/math/laplace/laplace_marginal_bernoulli_logit_test.cpp +++ b/test/unit/math/laplace/laplace_marginal_bernoulli_logit_test.cpp @@ -1,9 +1,8 @@ #include -#include +#include #include #include -// #include #include #include @@ -13,7 +12,7 @@ #include TEST(laplace, likelihood_differentiation) { - using stan::math::diff_logistic_log; + using stan::math::diff_bernoulli_logit; using stan::math::var; double test_tolerance = 2e-4; @@ -26,9 +25,10 @@ TEST(laplace, likelihood_differentiation) { Eigen::Matrix theta_v = theta; Eigen::VectorXd eta_dummy; - diff_logistic_log diff_functor(n_samples, y); + diff_bernoulli_logit diff_functor(n_samples, y); double log_density = diff_functor.log_likelihood(theta, eta_dummy); - Eigen::VectorXd gradient, hessian; + Eigen::VectorXd gradient; + Eigen::SparseMatrix hessian; diff_functor.diff(theta, eta_dummy, gradient, hessian); Eigen::VectorXd third_tensor = diff_functor.third_diff(theta, eta_dummy); @@ -55,8 +55,8 @@ TEST(laplace, likelihood_differentiation) { EXPECT_NEAR(diff_2, gradient(1), test_tolerance); // finite diff calculation for second-order derivatives - Eigen::VectorXd gradient_1u, gradient_1l, hessian_1u, hessian_1l, - gradient_2u, gradient_2l, hessian_2u, hessian_2l; + Eigen::VectorXd gradient_1u, gradient_1l, gradient_2u, gradient_2l; + Eigen::SparseMatrix hessian_1u, hessian_1l, hessian_2u, hessian_2l; diff_functor.diff(theta_1u, eta_dummy, gradient_1u, hessian_1u); diff_functor.diff(theta_1l, eta_dummy, gradient_1l, hessian_1l); diff_functor.diff(theta_2u, eta_dummy, gradient_2u, hessian_2u); @@ -65,12 +65,14 @@ TEST(laplace, likelihood_differentiation) { double diff_grad_1 = (gradient_1u(0) - gradient_1l(0)) / (2 * diff); double diff_grad_2 = (gradient_2u(1) - gradient_2l(1)) / (2 * diff); - EXPECT_NEAR(diff_grad_1, hessian(0), test_tolerance); - EXPECT_NEAR(diff_grad_2, hessian(1), test_tolerance); + EXPECT_NEAR(diff_grad_1, hessian.coeff(0, 0), test_tolerance); + EXPECT_NEAR(diff_grad_2, hessian.coeff(1, 1), test_tolerance); // finite diff calculation for third-order derivatives - double diff_hess_1 = (hessian_1u(0) - hessian_1l(0)) / (2 * diff); - double diff_hess_2 = (hessian_2u(1) - hessian_2l(1)) / (2 * diff); + double diff_hess_1 = (hessian_1u.coeff(0, 0) - hessian_1l.coeff(0, 0)) + / (2 * diff); + double diff_hess_2 = (hessian_2u.coeff(1, 1) - hessian_2l.coeff(1, 1)) + / (2 * diff); EXPECT_NEAR(diff_hess_1, third_tensor(0), test_tolerance); EXPECT_NEAR(diff_hess_2, third_tensor(1), test_tolerance); @@ -79,8 +81,7 @@ TEST(laplace, likelihood_differentiation) { TEST(laplace, logistic_lgm_dim500) { using stan::math::var; using stan::math::to_vector; - using stan::math::diff_logistic_log; - using stan::math::sqr_exp_kernel_functor; + using stan::math::diff_bernoulli_logit; int dim_theta = 500; int n_observations = 500; @@ -105,7 +106,8 @@ TEST(laplace, logistic_lgm_dim500) { Eigen::VectorXd theta_0 = Eigen::VectorXd::Zero(dim_theta); - Eigen::VectorXd theta_laplace, W_root, a, l_grad; + Eigen::VectorXd theta_laplace, a, l_grad; + Eigen::SparseMatrix W_root; Eigen::MatrixXd L, covariance; std::vector delta; std::vector delta_int; @@ -114,15 +116,17 @@ TEST(laplace, logistic_lgm_dim500) { Eigen::VectorXd phi(2); phi << 1.6, 1; // standard deviation, length scale Eigen::VectorXd eta_dummy; + Eigen::PartialPivLU LU_dummy; auto start_optimization = std::chrono::system_clock::now(); double marginal_density = laplace_marginal_density( - diff_logistic_log(to_vector(n_samples), to_vector(y)), + diff_bernoulli_logit(to_vector(n_samples), to_vector(y)), sqr_exp_kernel_functor(), phi, eta_dummy, x, delta, delta_int, covariance, theta_laplace, W_root, L, a, l_grad, + LU_dummy, theta_0, 0, 1e-3, 100); auto end_optimization = std::chrono::system_clock::now(); @@ -145,7 +149,7 @@ TEST(laplace, logistic_lgm_dim500) { start_optimization = std::chrono::system_clock::now(); var marginal_density_v = laplace_marginal_density( - diff_logistic_log(to_vector(n_samples), to_vector(y)), + diff_bernoulli_logit(to_vector(n_samples), to_vector(y)), sqr_exp_kernel_functor(), phi_v2, eta_dummy_v, x, delta, delta_int, theta_0, 0, 1e-3, 100); @@ -175,14 +179,8 @@ TEST(laplace, logistic_lgm_dim500) { using stan::math::laplace_marginal_bernoulli_logit_lpmf; using stan::math::value_of; - double marginal_density_v2 - = laplace_marginal_bernoulli_logit_lpmf(y, n_samples, - phi, x, delta, delta_int, - theta_0, 0, 1e-3, 100); - EXPECT_FLOAT_EQ(marginal_density, marginal_density_v2); - - marginal_density_v2 + double marginal_density_v2 = laplace_marginal_bernoulli_logit_lpmf(y, n_samples, sqr_exp_kernel_functor(), phi, x, delta, delta_int, diff --git a/test/unit/math/laplace/laplace_marginal_poisson_log_test.cpp b/test/unit/math/laplace/laplace_marginal_poisson_log_test.cpp index 91f4737a6cd..b6e5dbc0f2c 100644 --- a/test/unit/math/laplace/laplace_marginal_poisson_log_test.cpp +++ b/test/unit/math/laplace/laplace_marginal_poisson_log_test.cpp @@ -25,15 +25,15 @@ TEST(laplace, likelihood_differentiation) { to_vector(sums)); double log_density = diff_functor.log_likelihood(theta, eta_dummy); Eigen::VectorXd gradient; - Eigen::MatrixXd hessian; + Eigen::SparseMatrix hessian; diff_functor.diff(theta, eta_dummy, gradient, hessian); Eigen::VectorXd third_tensor = diff_functor.third_diff(theta, eta_dummy); EXPECT_FLOAT_EQ(-4.436564, log_density); EXPECT_FLOAT_EQ(-1.718282, gradient(0)); EXPECT_FLOAT_EQ(-2.718282, gradient(1)); - EXPECT_FLOAT_EQ(-2.718282, hessian(0, 0)); - EXPECT_FLOAT_EQ(-2.718282, hessian(1, 0)); + EXPECT_FLOAT_EQ(-2.718282, hessian.coeff(0, 0)); + EXPECT_FLOAT_EQ(-2.718282, hessian.coeff(1, 1)); EXPECT_FLOAT_EQ(-2.718282, third_tensor(0)); EXPECT_FLOAT_EQ(-2.718282, third_tensor(1)); } @@ -57,15 +57,15 @@ TEST(laplace, likelihood_differentiation2) { double log_density = diff_functor.log_likelihood(theta, eta_dummy); Eigen::VectorXd gradient; - Eigen::MatrixXd hessian; + Eigen::SparseMatrix hessian; diff_functor.diff(theta, eta_dummy, gradient, hessian); Eigen::VectorXd third_tensor = diff_functor.third_diff(theta, eta_dummy); EXPECT_FLOAT_EQ(-6.488852, log_density); EXPECT_FLOAT_EQ(-0.3591409, gradient(0)); EXPECT_FLOAT_EQ(-5.4365637, gradient(1)); - EXPECT_FLOAT_EQ(-1.359141, hessian(0, 0)); - EXPECT_FLOAT_EQ(-5.436564, hessian(1, 0)); + EXPECT_FLOAT_EQ(-1.359141, hessian.coeff(0, 0)); + EXPECT_FLOAT_EQ(-5.436564, hessian.coeff(1, 1)); EXPECT_FLOAT_EQ(-1.359141, third_tensor(0)); EXPECT_FLOAT_EQ(-5.436564, third_tensor(1)); diff --git a/test/unit/math/laplace/laplace_poisson_log_rng_test.cpp b/test/unit/math/laplace/laplace_poisson_log_rng_test.cpp index 9f8fe810207..d722b420ee1 100644 --- a/test/unit/math/laplace/laplace_poisson_log_rng_test.cpp +++ b/test/unit/math/laplace/laplace_poisson_log_rng_test.cpp @@ -77,16 +77,18 @@ TEST(laplace, basic_rng) { = algebra_solver(stationary_point(), theta_0, sigma, d0, di0); - Eigen::VectorXd gradient, W; - diff_likelihood.diff(theta_root, gradient, W); - W = -W; + Eigen::VectorXd gradient; + Eigen::SparseMatrix W_sparse; + Eigen::VectorXd eta_dummy; + diff_likelihood.diff(theta_root, eta_dummy, gradient, W_sparse); + Eigen::MatrixXd W = - W_sparse; diagonal_kernel_functor covariance_function; std::vector x_dummy; Eigen::MatrixXd K = covariance_function(sigma, x_dummy, d0, di0, 0); std::cout << "K (brute force): " << std::endl - << (K.inverse() + diag_matrix(W)).inverse() + << (K.inverse() + W).inverse() << std::endl << std::endl; // Method 2: Vectorized R&W method @@ -96,21 +98,25 @@ TEST(laplace, basic_rng) { // First find the mode using the custom Newton step Eigen::MatrixXd covariance; Eigen::VectorXd theta; - Eigen::VectorXd W_root; + Eigen::SparseMatrix W_r; Eigen::MatrixXd L; { Eigen::VectorXd a; Eigen::VectorXd l_grad; + Eigen::PartialPivLU LU_dummy; double marginal_density = laplace_marginal_density(diff_likelihood, covariance_function, - sigma, x_dummy, d0, di0, - covariance, theta, W_root, L, a, l_grad, + sigma, eta_dummy, x_dummy, d0, di0, + covariance, theta, W_r, L, a, l_grad, + LU_dummy, value_of(theta_0), 0, tolerance, max_num_steps); } Eigen::MatrixXd V; + Eigen::VectorXd W_root(theta.size()); + for (int i = 0; i < theta.size(); i++) W_root(i) = W_r.coeff(i, i); V = mdivide_left_tri(L, diag_pre_multiply(W_root, covariance)); std::cout << "K (method 1): " << std::endl @@ -129,9 +135,6 @@ TEST(laplace, basic_rng) { // Call to rng function boost::random::mt19937 rng; Eigen::MatrixXd theta_pred - = laplace_rng(diff_likelihood, covariance_function, - sigma, x_dummy, d0, di0, theta_0, rng); - - theta_pred = laplace_poisson_log_rng(sums, n_samples, covariance_function, - sigma, x_dummy, d0, di0, theta_0, rng); + = laplace_poisson_log_rng(sums, n_samples, covariance_function, + sigma, x_dummy, d0, di0, theta_0, rng); } From a554cfd784ce9dc6330641418cb56bd6c5e30e3e Mon Sep 17 00:00:00 2001 From: Charles Margossian Date: Fri, 2 Apr 2021 14:53:11 -0400 Subject: [PATCH 37/53] Add inline keyword for internal functions. --- stan/math/laplace/block_matrix_sqrt.hpp | 2 +- stan/math/laplace/hessian_block_diag.hpp | 4 ++-- stan/math/laplace/hessian_times_vector.hpp | 2 +- stan/math/laplace/laplace_marginal_lpdf.hpp | 2 +- stan/math/laplace/partial_diff_theta.hpp | 2 +- stan/math/laplace/third_diff_directional.hpp | 2 +- 6 files changed, 7 insertions(+), 7 deletions(-) diff --git a/stan/math/laplace/block_matrix_sqrt.hpp b/stan/math/laplace/block_matrix_sqrt.hpp index 116acca3e50..f1b310e5247 100644 --- a/stan/math/laplace/block_matrix_sqrt.hpp +++ b/stan/math/laplace/block_matrix_sqrt.hpp @@ -14,7 +14,7 @@ namespace math { * Return the matrix square-root for a block diagonal matrix. */ Eigen::SparseMatrix - block_matrix_sqrt(Eigen::SparseMatrix W, + inline block_matrix_sqrt(Eigen::SparseMatrix W, int block_size) { int n_block = W.cols() / block_size; Eigen::MatrixXd local_block(block_size, block_size); diff --git a/stan/math/laplace/hessian_block_diag.hpp b/stan/math/laplace/hessian_block_diag.hpp index 6fbadaad7ca..93a13ebd7c8 100644 --- a/stan/math/laplace/hessian_block_diag.hpp +++ b/stan/math/laplace/hessian_block_diag.hpp @@ -16,7 +16,7 @@ namespace math { * hessian_times_vector, that is m forward sweeps and m reverse sweeps. */ template - void hessian_block_diag(const F& f, + inline void hessian_block_diag(const F& f, const Eigen::VectorXd& x, const Eigen::VectorXd& eta, const Eigen::VectorXd& delta, @@ -49,7 +49,7 @@ namespace math { * Overload for case where hessian is stored as a sparse matrix. */ template - void hessian_block_diag(const F& f, + inline void hessian_block_diag(const F& f, const Eigen::VectorXd& x, const Eigen::VectorXd& eta, const Eigen::VectorXd& delta, diff --git a/stan/math/laplace/hessian_times_vector.hpp b/stan/math/laplace/hessian_times_vector.hpp index c8d9eda6e1d..134da49d585 100644 --- a/stan/math/laplace/hessian_times_vector.hpp +++ b/stan/math/laplace/hessian_times_vector.hpp @@ -13,7 +13,7 @@ namespace math { * and pstream. */ template - void hessian_times_vector(const F& f, + inline void hessian_times_vector(const F& f, const Eigen::VectorXd& x, const Eigen::VectorXd& eta, const Eigen::VectorXd& delta, diff --git a/stan/math/laplace/laplace_marginal_lpdf.hpp b/stan/math/laplace/laplace_marginal_lpdf.hpp index d9f9aba8a1b..c1aa4ff622e 100644 --- a/stan/math/laplace/laplace_marginal_lpdf.hpp +++ b/stan/math/laplace/laplace_marginal_lpdf.hpp @@ -1,4 +1,4 @@ - #ifndef STAN_MATH_LAPLACE_LAPLACE_MARGINAL_LPDF_HPP +#ifndef STAN_MATH_LAPLACE_LAPLACE_MARGINAL_LPDF_HPP #define STAN_MATH_LAPLACE_LAPLACE_MARGINAL_LPDF_HPP #include diff --git a/stan/math/laplace/partial_diff_theta.hpp b/stan/math/laplace/partial_diff_theta.hpp index 5d8f3b9c291..d08a72a9db2 100644 --- a/stan/math/laplace/partial_diff_theta.hpp +++ b/stan/math/laplace/partial_diff_theta.hpp @@ -17,7 +17,7 @@ namespace math { // TODO: address case where eta / theta are doubles and we don't // want full derivatives. template - Eigen::VectorXd partial_diff_theta(const F& f, + inline Eigen::VectorXd partial_diff_theta(const F& f, const Eigen::VectorXd& theta, const Eigen::VectorXd& eta, const Eigen::VectorXd& delta, diff --git a/stan/math/laplace/third_diff_directional.hpp b/stan/math/laplace/third_diff_directional.hpp index 3674b1863d9..8c904a8052a 100644 --- a/stan/math/laplace/third_diff_directional.hpp +++ b/stan/math/laplace/third_diff_directional.hpp @@ -13,7 +13,7 @@ namespace math { * to do two directions: v and w. */ template - void third_diff_directional( + inline void third_diff_directional( const F& f, const Eigen::VectorXd& x, const Eigen::VectorXd& eta, const Eigen::VectorXd& delta, From 0d297807d7a3783ec818b6ea3a253532f7e8de2e Mon Sep 17 00:00:00 2001 From: Charles Margossian Date: Fri, 9 Apr 2021 11:59:24 -0400 Subject: [PATCH 38/53] Temporary signature in agreement with parser. --- stan/math/laplace/laplace_marginal_lpdf.hpp | 24 +++++++++++++++------ 1 file changed, 17 insertions(+), 7 deletions(-) diff --git a/stan/math/laplace/laplace_marginal_lpdf.hpp b/stan/math/laplace/laplace_marginal_lpdf.hpp index c1aa4ff622e..9a88439eee2 100644 --- a/stan/math/laplace/laplace_marginal_lpdf.hpp +++ b/stan/math/laplace/laplace_marginal_lpdf.hpp @@ -46,8 +46,8 @@ namespace math { * efficient computation. Else, a more general but slower solver * is used. */ - template + template stan::return_type_t laplace_marginal_lpdf (const Eigen::VectorXd& y, const L& L_f, @@ -59,18 +59,26 @@ namespace math { const std::vector& delta_K, const std::vector& delta_int_K, const Eigen::Matrix& theta_0, - std::ostream* msgs_L = nullptr, - std::ostream* msgs_K = nullptr, + // std::ostream* msgs_L = nullptr, + // std::ostream* msgs_K = nullptr, double tolerance = 1e-6, long int max_num_steps = 100, int hessian_block_size = 0, - int compute_W_root = 1) { + int compute_W_root = 1, + std::ostream* msgs = nullptr) { + // TEST: provisional signature to agree with parser. return laplace_marginal_density( - diff_likelihood(L_f, y, delta_int_L, msgs_L), + diff_likelihood(L_f, y, delta_int_L, msgs), K_f, phi, eta, x, delta_K, delta_int_K, - theta_0, msgs_K, tolerance, max_num_steps, + theta_0, msgs, tolerance, max_num_steps, hessian_block_size, compute_W_root); + + // return laplace_marginal_density( + // diff_likelihood(L_f, y, delta_int_L, msgs_L), + // K_f, phi, eta, x, delta_K, delta_int_K, + // theta_0, msgs_K, tolerance, max_num_steps, + // hessian_block_size, compute_W_root); } /** @@ -78,6 +86,7 @@ namespace math { * is now a std::vector of interger and an Eigen::VectorXd * of double is passed as data. */ + /* template stan::return_type_t laplace_marginal_lpmf @@ -106,6 +115,7 @@ namespace math { hessian_block_size, compute_W_root); } + */ } // namespace math } // namespace stan From d110e1815fc5caf6a1fc9df38561e47485560071 Mon Sep 17 00:00:00 2001 From: Charles Margossian Date: Fri, 16 Apr 2021 13:49:20 -0400 Subject: [PATCH 39/53] Fix bugs to run function from Stan. --- stan/math/laplace/laplace_marginal_lpdf.hpp | 35 ++++++++++++++----- stan/math/laplace/prob/laplace_base_rng.hpp | 3 +- stan/math/laplace/prob/laplace_rng.hpp | 11 +++--- test/unit/math/laplace/motorcycle_gp_test.cpp | 31 +++++++++++----- 4 files changed, 56 insertions(+), 24 deletions(-) diff --git a/stan/math/laplace/laplace_marginal_lpdf.hpp b/stan/math/laplace/laplace_marginal_lpdf.hpp index 9a88439eee2..1fd0c6bbbfb 100644 --- a/stan/math/laplace/laplace_marginal_lpdf.hpp +++ b/stan/math/laplace/laplace_marginal_lpdf.hpp @@ -59,8 +59,33 @@ namespace math { const std::vector& delta_K, const std::vector& delta_int_K, const Eigen::Matrix& theta_0, - // std::ostream* msgs_L = nullptr, - // std::ostream* msgs_K = nullptr, + std::ostream* msgs_L = nullptr, + std::ostream* msgs_K = nullptr, + double tolerance = 1e-6, + long int max_num_steps = 100, + int hessian_block_size = 0, + int compute_W_root = 1) { + + return laplace_marginal_density( + diff_likelihood(L_f, y, delta_int_L, msgs_L), + K_f, phi, eta, x, delta_K, delta_int_K, + theta_0, msgs_K, tolerance, max_num_steps, + hessian_block_size, compute_W_root); + } + + template + stan::return_type_t laplace_marginal_lpdf + (const Eigen::VectorXd& y, + const L& L_f, + const Eigen::Matrix& eta, + const std::vector& delta_int_L, + const K& K_f, + const Eigen::Matrix& phi, + const Tx& x, + const std::vector& delta_K, + const std::vector& delta_int_K, + const Eigen::Matrix& theta_0, double tolerance = 1e-6, long int max_num_steps = 100, int hessian_block_size = 0, @@ -73,12 +98,6 @@ namespace math { K_f, phi, eta, x, delta_K, delta_int_K, theta_0, msgs, tolerance, max_num_steps, hessian_block_size, compute_W_root); - - // return laplace_marginal_density( - // diff_likelihood(L_f, y, delta_int_L, msgs_L), - // K_f, phi, eta, x, delta_K, delta_int_K, - // theta_0, msgs_K, tolerance, max_num_steps, - // hessian_block_size, compute_W_root); } /** diff --git a/stan/math/laplace/prob/laplace_base_rng.hpp b/stan/math/laplace/prob/laplace_base_rng.hpp index dcd3575e9e4..c3f64749b78 100644 --- a/stan/math/laplace/prob/laplace_base_rng.hpp +++ b/stan/math/laplace/prob/laplace_base_rng.hpp @@ -69,7 +69,8 @@ laplace_base_rng // Modified R&W method MatrixXd covariance_pred = covariance_function(phi_dbl, x_pred, delta, delta_int, msgs); - VectorXd pred_mean = covariance_pred * l_grad; + + VectorXd pred_mean = covariance_pred * l_grad.head(theta_0.rows()); Eigen::MatrixXd Sigma; if (compute_W_root) { diff --git a/stan/math/laplace/prob/laplace_rng.hpp b/stan/math/laplace/prob/laplace_rng.hpp index 2e4305160a2..9aa28b4d593 100644 --- a/stan/math/laplace/prob/laplace_rng.hpp +++ b/stan/math/laplace/prob/laplace_rng.hpp @@ -30,19 +30,18 @@ inline Eigen::VectorXd const std::vector& delta_K, const std::vector& delta_int_K, const Eigen::Matrix& theta_0, - RNG& rng, - std::ostream* msgs_L = nullptr, - std::ostream* msgs_K = nullptr, double tolerance = 1e-6, long int max_num_steps = 100, int hessian_block_size = 0, - int compute_W_root = 1) { + int compute_W_root = 1, + RNG& rng = boost::random::mt19937(), + std::ostream* msgs = nullptr) { return laplace_base_rng( - diff_likelihood(L_f, delta_L, delta_int_L, msgs_L), + diff_likelihood(L_f, delta_L, delta_int_L, msgs), K_f, phi, eta, x, x, delta_K, delta_int_K, theta_0, - rng, msgs_K, tolerance, max_num_steps, + rng, msgs, tolerance, max_num_steps, hessian_block_size, compute_W_root); } diff --git a/test/unit/math/laplace/motorcycle_gp_test.cpp b/test/unit/math/laplace/motorcycle_gp_test.cpp index c1107691e18..cc81e6ebf2e 100755 --- a/test/unit/math/laplace/motorcycle_gp_test.cpp +++ b/test/unit/math/laplace/motorcycle_gp_test.cpp @@ -36,6 +36,12 @@ struct covariance_motorcycle_functor { Matrix kernel_f = gp_exp_quad_cov(x, sigma_f, length_scale_f); Matrix kernel_g = gp_exp_quad_cov(x, sigma_g, length_scale_g); + // std::cout << "kernel_f: " << kernel_f.row(0).head(5) << std::endl; + // std::cout << "kernel_g: " << kernel_g.row(0).head(5) << std::endl; + // std::cout << "x: "; + // for (int i = 0; i < 5; i++) std::cout << x[i] << " "; + // std::cout << std::endl; + Matrix kernel_all = Eigen::MatrixXd::Zero(2 * n_obs, 2 * n_obs); for (int i = 0; i < n_obs; i++) { @@ -117,15 +123,16 @@ class laplace_motorcyle_gp_test : public::testing::Test { read_data(n_obs, "test/unit/math/laplace/motorcycle_gp/", x, y); // std::cout << "x: "; - // for (int i = 0; i < 5; i++) std::cout << x[i] << " "; + // for (int i = 0; i < n_obs; i++) std::cout << x[i] << " "; // std::cout << " ..." << std::endl; - // std::cout << "y: " << y.transpose().head(5) << " ..." << std::endl; + // std::cout << "y: " << y.transpose().head(n_obs) << " ..." << std::endl; } - length_scale_f = 0.3; - length_scale_g = 0.5; - sigma_f = 0.25; - sigma_g = 0.25; + // [0.335852,0.433641,0.335354,0.323559] + length_scale_f = 0.335852; // 0.3; + length_scale_g = 0.433641; // 0.5; + sigma_f = 0.335354; // 0.25; + sigma_g = 0.323559; // 0.25; phi.resize(4); phi << length_scale_f, length_scale_g, sigma_f, sigma_g; @@ -176,6 +183,12 @@ TEST_F(laplace_motorcyle_gp_test, lk_autodiff) { using stan::math::laplace_marginal_density; using stan::math::diff_likelihood; + covariance_motorcycle_functor K_f; + Eigen::VectorXd phi_dbl_ = value_of(phi); + Eigen::MatrixXd K_eval + = K_f(phi_dbl_, x, delta_dummy, delta_int, 0); + std::cout << "K_eval: " << K_eval.row(0).head(5) << std::endl; + normal_likelihood f; diff_likelihood diff_functor(f, y, delta_int); @@ -306,11 +319,11 @@ TEST_F(laplace_motorcyle_gp_test, wrapper_function) { int hessian_block_size = 2; var marginal_density - = laplace_marginal_lpdf(y, normal_likelihood2(), eta, delta_int, + = laplace_marginal_lpdf(y, normal_likelihood2(), eta, delta_int, covariance_motorcycle_functor(), phi, x, delta_dummy, delta_int, theta0, - 0, 0, 1e-8, 100, hessian_block_size, - compute_W_root); + 1e-8, 100, hessian_block_size, + compute_W_root, 0); std::cout << "density: " << marginal_density << std::endl; From 096625866d308329ffaf46cd7f2f095fd3b27ce5 Mon Sep 17 00:00:00 2001 From: Charles Margossian Date: Wed, 21 Apr 2021 10:41:36 -0400 Subject: [PATCH 40/53] prototype linesearch step. --- stan/math/laplace/laplace_marginal.hpp | 30 +++++++++++++++++++-- stan/math/laplace/laplace_marginal_lpdf.hpp | 22 +++++++-------- test/unit/math/laplace/disease_map_test.cpp | 12 +++++---- 3 files changed, 44 insertions(+), 20 deletions(-) diff --git a/stan/math/laplace/laplace_marginal.hpp b/stan/math/laplace/laplace_marginal.hpp index 464494c0c24..c3eda385ebe 100644 --- a/stan/math/laplace/laplace_marginal.hpp +++ b/stan/math/laplace/laplace_marginal.hpp @@ -102,8 +102,11 @@ namespace math { covariance = covariance_function(phi, x, delta, delta_int, msgs); theta = theta_0; double objective_old = - 1e+10; // CHECK -- what value to use? + double objective_inter = - 1e+10; double objective_new; double B_log_determinant; + Eigen::VectorXd a_old; + int j; if (hessian_block_size == 0 && compute_W_root == 0) { std::ostringstream message; @@ -152,7 +155,6 @@ namespace math { * mdivide_left_tri(transpose(L), mdivide_left_tri(L, W_r.diagonal().cwiseProduct(covariance * b))); - // diag_pre_multiply(W_r.diagonal(), multiply(covariance, b)))); } else { b = W * theta + l_grad.head(theta_size); a = b - W_r @@ -176,10 +178,34 @@ namespace math { // Simple Newton step theta = covariance * a; - // Check for convergence. if (i != 0) objective_old = objective_new; objective_new = -0.5 * a.dot(theta) + diff_likelihood.log_likelihood(theta, eta); + + // linesearch method + int do_line_search = 0; + int max_steps_line_search = 10; + if (do_line_search && i != 0) { // CHECK -- no line search at first step? + j = 0; + + // CHECK -- should we use a different convergence criterion? + // while (j <= max_steps_line_search || objective_new < objective_old) { + while (j <= max_steps_line_search || objective_new < objective_inter) { + + a = (a + a_old) * 0.5; // CHECK -- generalize this for any reduction? + theta = covariance * a; + + objective_inter = objective_new; + objective_new = - 0.5 * a.dot(theta) + + diff_likelihood.log_likelihood(theta, eta); + + j += 1; + } + } + + a_old = a; + + // Check for convergence. double objective_diff = abs(objective_new - objective_old); if (objective_diff < tolerance) break; } diff --git a/stan/math/laplace/laplace_marginal_lpdf.hpp b/stan/math/laplace/laplace_marginal_lpdf.hpp index 1fd0c6bbbfb..72c20451834 100644 --- a/stan/math/laplace/laplace_marginal_lpdf.hpp +++ b/stan/math/laplace/laplace_marginal_lpdf.hpp @@ -46,6 +46,7 @@ namespace math { * efficient computation. Else, a more general but slower solver * is used. */ + /* template stan::return_type_t laplace_marginal_lpdf @@ -71,7 +72,7 @@ namespace math { K_f, phi, eta, x, delta_K, delta_int_K, theta_0, msgs_K, tolerance, max_num_steps, hessian_block_size, compute_W_root); - } + } */ template @@ -105,9 +106,8 @@ namespace math { * is now a std::vector of interger and an Eigen::VectorXd * of double is passed as data. */ - /* - template + template stan::return_type_t laplace_marginal_lpmf (const std::vector& y, const L& L_f, @@ -119,23 +119,19 @@ namespace math { const std::vector& delta_K, const std::vector& delta_int_K, const Eigen::Matrix& theta_0, - std::ostream* msgs_L = nullptr, - std::ostream* msgs_K = nullptr, double tolerance = 1e-6, long int max_num_steps = 100, int hessian_block_size = 0, - int compute_W_root = 1) { + int compute_W_root = 1, + std::ostream* msgs = nullptr) { - return laplace_marginal_lpdf(delta_L, L_f, eta, y, + return laplace_marginal_lpdf(delta_L, L_f, eta, y, K_f, phi, x, delta_K, delta_int_K, - theta_0, msgs_L, msgs_K, - tolerance, + theta_0, tolerance, max_num_steps, hessian_block_size, - compute_W_root); + compute_W_root, msgs); } - */ - } // namespace math } // namespace stan diff --git a/test/unit/math/laplace/disease_map_test.cpp b/test/unit/math/laplace/disease_map_test.cpp index 5967649d3d0..19a4d91be51 100755 --- a/test/unit/math/laplace/disease_map_test.cpp +++ b/test/unit/math/laplace/disease_map_test.cpp @@ -316,10 +316,10 @@ TEST_F(laplace_disease_map_test, lpmf_wrapper) { int compute_W_root = 1; var marginal_density - = laplace_marginal_lpmf(n_samples, poisson_log_likelihood(), - eta_dummy, delta_lk, - sqr_exp_kernel_functor(), - phi, x, delta, delta_int, theta_0); + = laplace_marginal_lpmf(n_samples, poisson_log_likelihood(), + eta_dummy, delta_lk, + sqr_exp_kernel_functor(), + phi, x, delta, delta_int, theta_0); VEC g; AVEC parm_vec = createAVEC(phi(0), phi(1)); @@ -344,7 +344,9 @@ TEST_F(laplace_disease_map_test, rng_wrapper) { theta_pred = laplace_rng(poisson_log_likelihood(), eta_dummy, delta_lk, n_samples, sqr_exp_kernel_functor(), - phi, x, delta, delta_int, theta_0, rng); + phi, x, delta, delta_int, theta_0, + 1e-6, 100, hessian_block_size, + compute_W_root, rng); // std::cout << "theta_pred: " << theta_pred.transpose().head(5) << std::endl; From ab69c98a81c0d0af614825e4657761e990b941e1 Mon Sep 17 00:00:00 2001 From: Charles Margossian Date: Wed, 21 Apr 2021 14:28:06 -0400 Subject: [PATCH 41/53] prototype line search. --- stan/math/laplace/laplace_marginal.hpp | 8 ++++++-- test/unit/math/laplace/motorcycle_gp_test.cpp | 14 +++++++++----- 2 files changed, 15 insertions(+), 7 deletions(-) diff --git a/stan/math/laplace/laplace_marginal.hpp b/stan/math/laplace/laplace_marginal.hpp index c3eda385ebe..5fc11b6a893 100644 --- a/stan/math/laplace/laplace_marginal.hpp +++ b/stan/math/laplace/laplace_marginal.hpp @@ -183,14 +183,14 @@ namespace math { + diff_likelihood.log_likelihood(theta, eta); // linesearch method - int do_line_search = 0; + int do_line_search = 1; int max_steps_line_search = 10; if (do_line_search && i != 0) { // CHECK -- no line search at first step? j = 0; // CHECK -- should we use a different convergence criterion? // while (j <= max_steps_line_search || objective_new < objective_old) { - while (j <= max_steps_line_search || objective_new < objective_inter) { + while (j <= max_steps_line_search || objective_new > objective_inter) { a = (a + a_old) * 0.5; // CHECK -- generalize this for any reduction? theta = covariance * a; @@ -207,6 +207,10 @@ namespace math { // Check for convergence. double objective_diff = abs(objective_new - objective_old); + + if (i % 500 == 0) std::cout << "obj: " << objective_new << std::endl; + + // if (objective_diff < tolerance) std::cout << "iter: " << i << std::endl; if (objective_diff < tolerance) break; } diff --git a/test/unit/math/laplace/motorcycle_gp_test.cpp b/test/unit/math/laplace/motorcycle_gp_test.cpp index cc81e6ebf2e..878c5fb7270 100755 --- a/test/unit/math/laplace/motorcycle_gp_test.cpp +++ b/test/unit/math/laplace/motorcycle_gp_test.cpp @@ -153,7 +153,7 @@ class laplace_motorcyle_gp_test : public::testing::Test { // Remark: finds optimal point with or without informed initial guess. for (int i = 0; i < n_obs; i++) { - theta0(2 * i) = 0; // mu_hat(i); + theta0(2 * i) = mu_hat(i); // 0 theta0(2 * i + 1) = 0; } @@ -198,17 +198,18 @@ TEST_F(laplace_motorcyle_gp_test, lk_autodiff) { covariance_motorcycle_functor(), value_of(phi), eta_dummy_dbl, x, delta_dummy, delta_int, theta0, - 0, 1e-8, 100, hessian_block_size, + 0, 1e-2, 20000, hessian_block_size, compute_W_root); std::cout << "density: " << marginal_density_dbl << std::endl; +/* var marginal_density = laplace_marginal_density(diff_functor, covariance_motorcycle_functor(), phi, eta_dummy, x, delta_dummy, delta_int, theta0, - 0, 1e-8, 100, hessian_block_size, + 0, 1e-8, 1000, hessian_block_size, compute_W_root); VEC g; @@ -244,8 +245,10 @@ TEST_F(laplace_motorcyle_gp_test, lk_autodiff) { compute_W_root); std::cout << "g[0]: " << (target_u0 - target_l0) / (2 * eps) << std::endl; + */ } +/* TEST_F(laplace_motorcyle_gp_test, lk_autodiff_eta) { using stan::math::var; using stan::math::value_of; @@ -307,8 +310,9 @@ TEST_F(laplace_motorcyle_gp_test, lk_autodiff_eta) { compute_W_root); std::cout << "gf[4]: " << (target_u - target_l) / (2 * eps) << std::endl; -} +} */ +/* TEST_F(laplace_motorcyle_gp_test, wrapper_function) { using stan::math::var; using stan::math::laplace_marginal_lpdf; @@ -333,4 +337,4 @@ TEST_F(laplace_motorcyle_gp_test, wrapper_function) { std::cout << "grad: " << g[0] << " " << g[1] << " " << g[2] << " " << g[3] << " " << g[4] << std::endl; -} +} */ From 5c4b2e4c0eb95e895382839e54d2d3dc16664ff3 Mon Sep 17 00:00:00 2001 From: Charles Margossian Date: Wed, 21 Apr 2021 15:21:56 -0400 Subject: [PATCH 42/53] Update convergence criterion for linesearch. --- stan/math/laplace/laplace_marginal.hpp | 34 ++++++++++++++------------ 1 file changed, 19 insertions(+), 15 deletions(-) diff --git a/stan/math/laplace/laplace_marginal.hpp b/stan/math/laplace/laplace_marginal.hpp index 5fc11b6a893..110565d445b 100644 --- a/stan/math/laplace/laplace_marginal.hpp +++ b/stan/math/laplace/laplace_marginal.hpp @@ -106,7 +106,8 @@ namespace math { double objective_new; double B_log_determinant; Eigen::VectorXd a_old; - int j; + Eigen::VectorXd a_new; + Eigen::VectorXd theta_new; if (hessian_block_size == 0 && compute_W_root == 0) { std::ostringstream message; @@ -185,21 +186,24 @@ namespace math { // linesearch method int do_line_search = 1; int max_steps_line_search = 10; - if (do_line_search && i != 0) { // CHECK -- no line search at first step? - j = 0; - - // CHECK -- should we use a different convergence criterion? - // while (j <= max_steps_line_search || objective_new < objective_old) { - while (j <= max_steps_line_search || objective_new > objective_inter) { - - a = (a + a_old) * 0.5; // CHECK -- generalize this for any reduction? - theta = covariance * a; + if (do_line_search && i != 0) { + // CHECK -- no line search at first step? + // CHECK -- which convergence criterion should we use here? + for (int j = 0; j < max_steps_line_search; j++) { + a_new = (a + a_old) * 0.5; // CHECK -- generalize this for any reduction? + theta_new = covariance * a_new; objective_inter = objective_new; - objective_new = - 0.5 * a.dot(theta) - + diff_likelihood.log_likelihood(theta, eta); + objective_new = - 0.5 * a_new.dot(theta_new) + + diff_likelihood.log_likelihood(theta_new, eta); + + // NOTE -- if objective function doesn't increase, break. + bool break_linesearch = (objective_new <= objective_inter); + if (break_linesearch) objective_new = objective_inter; + if (break_linesearch) break; - j += 1; + theta = theta_new; + a = a_new; } } @@ -208,9 +212,9 @@ namespace math { // Check for convergence. double objective_diff = abs(objective_new - objective_old); - if (i % 500 == 0) std::cout << "obj: " << objective_new << std::endl; - + // if (i % 500 == 0) std::cout << "obj: " << objective_new << std::endl; // if (objective_diff < tolerance) std::cout << "iter: " << i << std::endl; + if (objective_diff < tolerance) break; } From 8d455132348b3c9c909f16fb6a2a08c4136c9529 Mon Sep 17 00:00:00 2001 From: Charles Margossian Date: Wed, 21 Apr 2021 15:49:40 -0400 Subject: [PATCH 43/53] simplify the linesearch. --- stan/math/laplace/laplace_marginal.hpp | 29 ++++++++++++++------------ 1 file changed, 16 insertions(+), 13 deletions(-) diff --git a/stan/math/laplace/laplace_marginal.hpp b/stan/math/laplace/laplace_marginal.hpp index 110565d445b..5d47bcba0e7 100644 --- a/stan/math/laplace/laplace_marginal.hpp +++ b/stan/math/laplace/laplace_marginal.hpp @@ -15,6 +15,7 @@ #include #include +#include #include #include // CHECK -- do we need this? #include // CHECK -- do we need this? @@ -180,30 +181,32 @@ namespace math { theta = covariance * a; if (i != 0) objective_old = objective_new; + objective_new = -0.5 * a.dot(theta) + diff_likelihood.log_likelihood(theta, eta); - // linesearch method + // linesearch int do_line_search = 1; int max_steps_line_search = 10; + int j = 0; if (do_line_search && i != 0) { // CHECK -- no line search at first step? // CHECK -- which convergence criterion should we use here? - for (int j = 0; j < max_steps_line_search; j++) { - a_new = (a + a_old) * 0.5; // CHECK -- generalize this for any reduction? - theta_new = covariance * a_new; + // CHECK -- what do we do when theta has non-finite elements? + while (j < max_steps_line_search && objective_new < objective_old) { + a = (a + a_old) * 0.5; // CHECK -- generalize this for any reduction? + theta = covariance * a; + objective_new = - 0.5 * a.dot(theta) + + diff_likelihood.log_likelihood(theta, eta); - objective_inter = objective_new; - objective_new = - 0.5 * a_new.dot(theta_new) - + diff_likelihood.log_likelihood(theta_new, eta); + j++; // NOTE -- if objective function doesn't increase, break. - bool break_linesearch = (objective_new <= objective_inter); - if (break_linesearch) objective_new = objective_inter; - if (break_linesearch) break; - - theta = theta_new; - a = a_new; + // bool break_linesearch = (objective_new <= objective_inter); + // if (break_linesearch) objective_new = objective_inter; + // if (break_linesearch) break; + // theta = theta_new; + // a = a_new; } } From fcbf075ed2563c101d8b3c16d3b2eec9e59e9e3c Mon Sep 17 00:00:00 2001 From: Charles Margossian Date: Wed, 21 Apr 2021 15:58:09 -0400 Subject: [PATCH 44/53] linesearch: check for non-finite values. --- stan/math/laplace/laplace_marginal.hpp | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/stan/math/laplace/laplace_marginal.hpp b/stan/math/laplace/laplace_marginal.hpp index 5d47bcba0e7..5bf30cb07a0 100644 --- a/stan/math/laplace/laplace_marginal.hpp +++ b/stan/math/laplace/laplace_marginal.hpp @@ -182,8 +182,10 @@ namespace math { if (i != 0) objective_old = objective_new; + if (std::isfinite(theta.sum())) { objective_new = -0.5 * a.dot(theta) + diff_likelihood.log_likelihood(theta, eta); + } // linesearch int do_line_search = 1; @@ -193,11 +195,15 @@ namespace math { // CHECK -- no line search at first step? // CHECK -- which convergence criterion should we use here? // CHECK -- what do we do when theta has non-finite elements? - while (j < max_steps_line_search && objective_new < objective_old) { + while (j < max_steps_line_search + && (objective_new < objective_old || !std::isfinite(theta.sum()))) { a = (a + a_old) * 0.5; // CHECK -- generalize this for any reduction? theta = covariance * a; - objective_new = - 0.5 * a.dot(theta) - + diff_likelihood.log_likelihood(theta, eta); + + if (std::isfinite(theta.sum())) { + objective_new = - 0.5 * a.dot(theta) + + diff_likelihood.log_likelihood(theta, eta); + } j++; From 150f713652e42c6e7df6be5e30c8bf7d35164a0f Mon Sep 17 00:00:00 2001 From: Charles Margossian Date: Thu, 22 Apr 2021 12:24:36 -0400 Subject: [PATCH 45/53] prototype Jarnos newton solver. --- stan/math/laplace/laplace_marginal.hpp | 109 +++++++++++------- stan/math/laplace/laplace_marginal_lpdf.hpp | 14 ++- stan/math/laplace/prob/laplace_base_rng.hpp | 4 +- test/unit/math/laplace/disease_map_test.cpp | 20 ++-- test/unit/math/laplace/motorcycle_gp_test.cpp | 21 ++-- 5 files changed, 102 insertions(+), 66 deletions(-) diff --git a/stan/math/laplace/laplace_marginal.hpp b/stan/math/laplace/laplace_marginal.hpp index 5bf30cb07a0..c7dfab6dc93 100644 --- a/stan/math/laplace/laplace_marginal.hpp +++ b/stan/math/laplace/laplace_marginal.hpp @@ -23,6 +23,9 @@ // Reference for calculations of marginal and its gradients: // Margossian et al, 2020, https://arxiv.org/abs/2004.12550 +// TODO -- either use Eigen's .solve() or mdivide_left_tri. +// The code needs to be more consistent. + namespace stan { namespace math { @@ -63,14 +66,22 @@ namespace math { * @param[in, out] covariance the evaluated covariance function for the * latent gaussian variable. * @param[in, out] theta a vector to store the mode. - * @param[in, out] W_root a vector to store the square root of the - * diagonal negative Hessian. + * @param[in, out] W_r a vector to store the square root of the + * negative Hessian or the negative Hessian, depending + * on which solver we use. * @param[in, out] L cholesky decomposition of stabilized inverse covariance. * @param[in, out] a element in the Newton step * @param[in, out] l_grad the log density of the likelihood. * @param[in] theta_0 the initial guess for the mode. * @param[in] tolerance the convergence criterion for the Newton solver. * @param[in] max_num_steps maximum number of steps for the Newton solver. + * @param[in] hessian_block_size the size of the block, where we assume + * the Hessian is block-diagonal. + * @param[in] solver which Newton solver to use: + * (1) method using the root of W. + * (2) method using the root of the covariance. + * (3) method using an LU decomposition. + * * @return the log marginal density, p(y | phi). */ template @@ -89,12 +100,15 @@ namespace math { Eigen::VectorXd& a, Eigen::VectorXd& l_grad, Eigen::PartialPivLU& LU, + Eigen::MatrixXd& K_root, const Eigen::VectorXd& theta_0, std::ostream* msgs = nullptr, double tolerance = 1e-6, long int max_num_steps = 100, int hessian_block_size = 0, - int compute_W_root = 1) { + int solver = 1, + int do_line_search = 0, + int max_steps_line_search = 10) { using Eigen::MatrixXd; using Eigen::VectorXd; using Eigen::SparseMatrix; @@ -110,7 +124,7 @@ namespace math { Eigen::VectorXd a_new; Eigen::VectorXd theta_new; - if (hessian_block_size == 0 && compute_W_root == 0) { + if (hessian_block_size == 0 && solver != 1) { std::ostringstream message; message << "laplace_marginal_density: if treating the Hessian as diagonal" << " we assume its matrix square-root can be computed." @@ -137,7 +151,7 @@ namespace math { VectorXd b; { MatrixXd B; - if (compute_W_root) { + if (solver == 1) { if (hessian_block_size == 0) { W_r = W.cwiseSqrt(); B = MatrixXd::Identity(theta_size, theta_size) @@ -164,6 +178,18 @@ namespace math { mdivide_left_tri(L, W_r * (covariance * b))); } + } else if (solver == 2) { + // TODO -- use triangularView for K_root. + W_r = W; + K_root = cholesky_decompose(covariance); + B = MatrixXd::Identity(theta_size, theta_size) + + K_root.transpose() * W * K_root; + L = cholesky_decompose(B); + B_log_determinant = 2 * sum(L.diagonal().array().log()); + b = W * theta + l_grad.head(theta_size); + a = mdivide_left_tri(K_root.transpose(), + mdivide_left_tri(L.transpose(), + mdivide_left_tri(L, K_root.transpose() * b))); } else { W_r = W; B = MatrixXd::Identity(theta_size, theta_size) + covariance * W; @@ -188,16 +214,12 @@ namespace math { } // linesearch - int do_line_search = 1; - int max_steps_line_search = 10; + // CHECK -- does linesearch work for solver 2? int j = 0; if (do_line_search && i != 0) { - // CHECK -- no line search at first step? - // CHECK -- which convergence criterion should we use here? - // CHECK -- what do we do when theta has non-finite elements? while (j < max_steps_line_search && (objective_new < objective_old || !std::isfinite(theta.sum()))) { - a = (a + a_old) * 0.5; // CHECK -- generalize this for any reduction? + a = (a + a_old) * 0.5; // TODO -- generalize for any factor. theta = covariance * a; if (std::isfinite(theta.sum())) { @@ -206,13 +228,6 @@ namespace math { } j++; - - // NOTE -- if objective function doesn't increase, break. - // bool break_linesearch = (objective_new <= objective_inter); - // if (break_linesearch) objective_new = objective_inter; - // if (break_linesearch) break; - // theta = theta_new; - // a = a_new; } } @@ -279,19 +294,22 @@ namespace math { double tolerance = 1e-6, long int max_num_steps = 100, int hessian_block_size = 0, - int compute_W_root = 1) { + int solver = 1, + int do_line_search = 0, + int max_steps_line_search = 10) { Eigen::VectorXd theta, a, l_grad; - Eigen::MatrixXd L, covariance; + Eigen::MatrixXd L, covariance, K_root; Eigen::SparseMatrix W_r; Eigen::PartialPivLU LU; return laplace_marginal_density(diff_likelihood, covariance_function, phi, eta, x, delta, delta_int, covariance, - theta, W_r, L, a, l_grad, LU, + theta, W_r, L, a, l_grad, LU, K_root, value_of(theta_0), msgs, tolerance, max_num_steps, hessian_block_size, - compute_W_root); + solver, do_line_search, + max_steps_line_search); } /** @@ -336,15 +354,15 @@ namespace math { double marginal_density, const Eigen::MatrixXd& covariance, const Eigen::VectorXd& theta, - // const Eigen::MatrixXd& W_root, const Eigen::SparseMatrix& W_r, const Eigen::MatrixXd& L, const Eigen::VectorXd& a, const Eigen::VectorXd& l_grad, const Eigen::PartialPivLU LU, + const Eigen::MatrixXd& K_root, std::ostream* msgs = nullptr, int hessian_block_size = 0, - int compute_W_root = 1) + int solver = 1) : vari(marginal_density), phi_size_(phi.size()), phi_(ChainableStack::instance_->memalloc_.alloc_array( @@ -374,7 +392,7 @@ namespace math { Eigen::VectorXd partial_parm; Eigen::VectorXd s2; - if (compute_W_root == 1) { + if (solver == 1) { MatrixXd W_root_diag = W_r; R = W_r * L.transpose().triangularView() .solve(L.triangularView() @@ -393,7 +411,19 @@ namespace math { = diff_likelihood.compute_s2(theta, eta_dbl, A, block_size); s2 = partial_parm.head(theta_size); } - } else { // we have not computed W_root. + } else if (solver == 2) { + // TODO -- use triangularView for K_root. + R = W_r - W_r * K_root * L.transpose().triangularView() + .solve(L.triangularView() + .solve(K_root.transpose() * W_r)); + + Eigen::MatrixXd C = L.triangularView() + .solve(K_root.transpose()); + Eigen::MatrixXd A = C.transpose() * C; + partial_parm + = diff_likelihood.compute_s2(theta, eta_dbl, A, hessian_block_size); + s2 = partial_parm.head(theta_size); + } else { // solver with LU decomposition LU_solve_covariance = LU.solve(covariance); R = W_r - W_r * LU_solve_covariance * W_r; @@ -429,14 +459,11 @@ namespace math { VectorXd diff_eta = l_grad.tail(eta_size_); Eigen::VectorXd v; - if (compute_W_root == 1) { - Eigen::MatrixXd W = W_r * W_r; // NOTE: store W from Newton step? - v = covariance * s2 - - covariance * R * covariance * s2; - // - covariance * W - // * L.transpose().triangularView() - // . solve(L.triangularView() - // .solve(covariance * (covariance * s2))); + if (solver == 1) { + Eigen::MatrixXd W = W_r * W_r; // CHECK -- store W from Newton step? + v = covariance * s2 - covariance * R * covariance * s2; + } else if (solver == 2) { + v = covariance * s2 - covariance * R * covariance * s2; } else { v = LU_solve_covariance * s2; } @@ -502,26 +529,27 @@ namespace math { double tolerance = 1e-6, long int max_num_steps = 100, int hessian_block_size = 0, - int compute_W_root = 1) { + int solver = 1, + int do_line_search = 0, + int max_steps_line_search = 10) { Eigen::VectorXd theta, a, l_grad; Eigen::SparseMatrix W_root; - Eigen::MatrixXd L; + Eigen::MatrixXd L, K_root; double marginal_density_dbl; Eigen::MatrixXd covariance; Eigen::PartialPivLU LU; - marginal_density_dbl = laplace_marginal_density(diff_likelihood, covariance_function, value_of(phi), value_of(eta), x, delta, delta_int, covariance, - theta, W_root, L, a, l_grad, LU, + theta, W_root, L, a, l_grad, LU, K_root, value_of(theta_0), msgs, tolerance, max_num_steps, hessian_block_size, - compute_W_root); + solver); // construct vari laplace_marginal_density_vari* vi0 @@ -531,8 +559,9 @@ namespace math { marginal_density_dbl, covariance, theta, W_root, L, a, l_grad, LU, + K_root, msgs, hessian_block_size, - compute_W_root); + solver); var marginal_density = var(vi0->marginal_density_[0]); diff --git a/stan/math/laplace/laplace_marginal_lpdf.hpp b/stan/math/laplace/laplace_marginal_lpdf.hpp index 72c20451834..d52df6a33c5 100644 --- a/stan/math/laplace/laplace_marginal_lpdf.hpp +++ b/stan/math/laplace/laplace_marginal_lpdf.hpp @@ -90,7 +90,9 @@ namespace math { double tolerance = 1e-6, long int max_num_steps = 100, int hessian_block_size = 0, - int compute_W_root = 1, + int solver = 1, + int do_line_search = 1, + int max_steps_line_search = 10, std::ostream* msgs = nullptr) { // TEST: provisional signature to agree with parser. @@ -98,7 +100,8 @@ namespace math { diff_likelihood(L_f, y, delta_int_L, msgs), K_f, phi, eta, x, delta_K, delta_int_K, theta_0, msgs, tolerance, max_num_steps, - hessian_block_size, compute_W_root); + hessian_block_size, solver, + do_line_search, max_steps_line_search); } /** @@ -122,7 +125,9 @@ namespace math { double tolerance = 1e-6, long int max_num_steps = 100, int hessian_block_size = 0, - int compute_W_root = 1, + int solver = 1, + int do_line_search = 1, + int max_steps_line_search = 10, std::ostream* msgs = nullptr) { return laplace_marginal_lpdf(delta_L, L_f, eta, y, @@ -130,7 +135,8 @@ namespace math { theta_0, tolerance, max_num_steps, hessian_block_size, - compute_W_root, msgs); + solver, do_line_search, + max_steps_line_search, msgs); } } // namespace math } // namespace stan diff --git a/stan/math/laplace/prob/laplace_base_rng.hpp b/stan/math/laplace/prob/laplace_base_rng.hpp index c3f64749b78..0fa0dea420f 100644 --- a/stan/math/laplace/prob/laplace_base_rng.hpp +++ b/stan/math/laplace/prob/laplace_base_rng.hpp @@ -49,7 +49,7 @@ laplace_base_rng VectorXd phi_dbl = value_of(phi); VectorXd eta_dbl = value_of(eta); Eigen::SparseMatrix W_r; - MatrixXd L; + MatrixXd L, K_root; Eigen::PartialPivLU LU; VectorXd l_grad; MatrixXd covariance; @@ -61,7 +61,7 @@ laplace_base_rng phi_dbl, eta_dbl, x, delta, delta_int, covariance, theta, W_r, L, a, l_grad, - LU, value_of(theta_0), msgs, + LU, K_root, value_of(theta_0), msgs, tolerance, max_num_steps, hessian_block_size, compute_W_root); } diff --git a/test/unit/math/laplace/disease_map_test.cpp b/test/unit/math/laplace/disease_map_test.cpp index 19a4d91be51..94a3e4b9a3e 100755 --- a/test/unit/math/laplace/disease_map_test.cpp +++ b/test/unit/math/laplace/disease_map_test.cpp @@ -189,14 +189,13 @@ TEST_F(laplace_disease_map_test, lk_autodiff) { auto start = std::chrono::system_clock::now(); int hessian_block_size = 0; // 0, 1, 911 - int compute_W_root = 1; + int solver = 1; // options: 1, 2, or 3. double marginal_density_dbl = laplace_marginal_density(diff_functor, sqr_exp_kernel_functor(), value_of(phi), value_of(eta_dummy), x, delta, delta_int, theta_0, - 0, 1e-6, 100, hessian_block_size, - compute_W_root); + 0, 1e-6, 100, hessian_block_size, solver); auto end = std::chrono::system_clock::now(); std::chrono::duration elapsed_time = end - start; @@ -208,11 +207,13 @@ TEST_F(laplace_disease_map_test, lk_autodiff) { start = std::chrono::system_clock::now(); + hessian_block_size = 0; + solver = 1; var marginal_density = laplace_marginal_density(diff_functor, sqr_exp_kernel_functor(), phi, eta_dummy, x, delta, delta_int, theta_0, - 0, 1e-6, 100, hessian_block_size); + 0, 1e-6, 100, hessian_block_size, solver); end = std::chrono::system_clock::now(); elapsed_time = end - start; @@ -291,7 +292,7 @@ TEST_F(laplace_disease_map_test, rng_autodiff) { boost::random::mt19937 rng; int hessian_block_size = 0; - int compute_W_root = 1; + int solver = 1; auto start = std::chrono::system_clock::now(); Eigen::VectorXd @@ -300,7 +301,7 @@ TEST_F(laplace_disease_map_test, rng_autodiff) { phi, eta_dummy, x, x, delta, delta_int, theta_0, rng, 0, 1e-6, 100, hessian_block_size, - compute_W_root); + solver); auto end = std::chrono::system_clock::now(); std::chrono::duration elapsed_time = end - start; std::cout << "LAPLACE_APPROX_RNG" << std::endl @@ -313,7 +314,7 @@ TEST_F(laplace_disease_map_test, lpmf_wrapper) { using stan::math::laplace_marginal_lpmf; int hessian_block_size = 0; - int compute_W_root = 1; + int solver = 1; var marginal_density = laplace_marginal_lpmf(n_samples, poisson_log_likelihood(), @@ -338,7 +339,7 @@ TEST_F(laplace_disease_map_test, rng_wrapper) { // TODO: put these variables in the test class. boost::random::mt19937 rng; int hessian_block_size = 0; - int compute_W_root = 1; + int solver = 1; Eigen::VectorXd theta_pred = laplace_rng(poisson_log_likelihood(), @@ -346,8 +347,7 @@ TEST_F(laplace_disease_map_test, rng_wrapper) { sqr_exp_kernel_functor(), phi, x, delta, delta_int, theta_0, 1e-6, 100, hessian_block_size, - compute_W_root, rng); - + solver, rng); // std::cout << "theta_pred: " << theta_pred.transpose().head(5) << std::endl; } diff --git a/test/unit/math/laplace/motorcycle_gp_test.cpp b/test/unit/math/laplace/motorcycle_gp_test.cpp index 878c5fb7270..d50a9d0c7b5 100755 --- a/test/unit/math/laplace/motorcycle_gp_test.cpp +++ b/test/unit/math/laplace/motorcycle_gp_test.cpp @@ -32,16 +32,10 @@ struct covariance_motorcycle_functor { T1 sigma_g = phi(3); int n_obs = delta_int[0]; - double jitter = 1e-8; + double jitter = 1e-6; Matrix kernel_f = gp_exp_quad_cov(x, sigma_f, length_scale_f); Matrix kernel_g = gp_exp_quad_cov(x, sigma_g, length_scale_g); - // std::cout << "kernel_f: " << kernel_f.row(0).head(5) << std::endl; - // std::cout << "kernel_g: " << kernel_g.row(0).head(5) << std::endl; - // std::cout << "x: "; - // for (int i = 0; i < 5; i++) std::cout << x[i] << " "; - // std::cout << std::endl; - Matrix kernel_all = Eigen::MatrixXd::Zero(2 * n_obs, 2 * n_obs); for (int i = 0; i < n_obs; i++) { @@ -54,6 +48,9 @@ struct covariance_motorcycle_functor { } } } + + for (int i = 0; i < 2 * n_obs; i++) kernel_all(i, i) += jitter; + return kernel_all; } }; @@ -157,7 +154,7 @@ class laplace_motorcyle_gp_test : public::testing::Test { theta0(2 * i + 1) = 0; } - compute_W_root = 0; + solver = 2; } int n_obs; @@ -174,7 +171,7 @@ class laplace_motorcyle_gp_test : public::testing::Test { Eigen::VectorXd theta0; Eigen::VectorXd eta_dummy_dbl; Eigen::Matrix eta_dummy; - int compute_W_root; + int solver; }; TEST_F(laplace_motorcyle_gp_test, lk_autodiff) { @@ -193,13 +190,17 @@ TEST_F(laplace_motorcyle_gp_test, lk_autodiff) { diff_likelihood diff_functor(f, y, delta_int); int hessian_block_size = 2; + solver = 2; + int do_line_search = 1; + int max_steps_line_search = 100; double marginal_density_dbl = laplace_marginal_density(diff_functor, covariance_motorcycle_functor(), value_of(phi), eta_dummy_dbl, x, delta_dummy, delta_int, theta0, 0, 1e-2, 20000, hessian_block_size, - compute_W_root); + solver, do_line_search, + max_steps_line_search); std::cout << "density: " << marginal_density_dbl << std::endl; From be9f880939088991246b5c9e73dc7dc74f8049fa Mon Sep 17 00:00:00 2001 From: Charles Margossian Date: Wed, 28 Apr 2021 16:15:23 -0400 Subject: [PATCH 46/53] prototype treatment of diagonal covariance. --- stan/math/laplace/laplace_marginal.hpp | 26 +++++++++- stan/math/laplace/laplace_pseudo_target.hpp | 49 ++++++++++++++----- test/unit/math/laplace/disease_map_test.cpp | 5 +- test/unit/math/laplace/motorcycle_gp_test.cpp | 38 ++++++++------ 4 files changed, 86 insertions(+), 32 deletions(-) diff --git a/stan/math/laplace/laplace_marginal.hpp b/stan/math/laplace/laplace_marginal.hpp index c7dfab6dc93..14b883799a2 100644 --- a/stan/math/laplace/laplace_marginal.hpp +++ b/stan/math/laplace/laplace_marginal.hpp @@ -113,8 +113,21 @@ namespace math { using Eigen::VectorXd; using Eigen::SparseMatrix; - int theta_size = theta_0.size(); + // TEST + int diagonal_covariance = 1; + solver = 1; + hessian_block_size = 1; + covariance = covariance_function(phi, x, delta, delta_int, msgs); + + if (diagonal_covariance) { + Eigen::MatrixXd K_dummy = covariance.diagonal().asDiagonal(); + covariance = K_dummy; + // covariance = covariance.diagonal().asDiagonal(); + // CHECK -- above line doesn't work, not sure why... + } + + int theta_size = theta_0.size(); theta = theta_0; double objective_old = - 1e+10; // CHECK -- what value to use? double objective_inter = - 1e+10; @@ -181,7 +194,16 @@ namespace math { } else if (solver == 2) { // TODO -- use triangularView for K_root. W_r = W; - K_root = cholesky_decompose(covariance); + + if (diagonal_covariance) { + K_root = covariance.cwiseSqrt(); + } else { + K_root = cholesky_decompose(covariance); + } + + std::cout << "Cov: " << covariance.row(0).head(5) << std::endl; + std::cout << "K: " << K_root.row(0).head(5) << std::endl; + B = MatrixXd::Identity(theta_size, theta_size) + K_root.transpose() * W * K_root; L = cholesky_decompose(B); diff --git a/stan/math/laplace/laplace_pseudo_target.hpp b/stan/math/laplace/laplace_pseudo_target.hpp index 03ceed3bc26..bdd34e6e6da 100644 --- a/stan/math/laplace/laplace_pseudo_target.hpp +++ b/stan/math/laplace/laplace_pseudo_target.hpp @@ -10,6 +10,8 @@ namespace math { /** * Function to compute the pseudo target, $\tilde Z$, * with a custom derivative method. + * NOTE: we actually don't need to compute the pseudo-target, only its + * derivative. */ inline double laplace_pseudo_target ( const Eigen::MatrixXd& K, @@ -17,10 +19,11 @@ namespace math { const Eigen::MatrixXd& R, const Eigen::VectorXd& l_grad, const Eigen::VectorXd& s2) { - double s1 = 0.5 * quad_form(K, a) - 0.5 * sum((R * K).diagonal()); - Eigen::VectorXd b = K * l_grad; - Eigen::VectorXd s3 = b - K * (R * b); - return s1 + s2.dot(s3); + // double s1 = 0.5 * quad_form(K, a) - 0.5 * sum((R * K).diagonal()); + // Eigen::VectorXd b = K * l_grad; + // Eigen::VectorXd s3 = b - K * (R * b); + // return s1 + s2.dot(s3); + return 0; } /** @@ -35,6 +38,8 @@ namespace math { vari** pseudo_target_; /* An object to store the sensitivities of K. */ Eigen::MatrixXd K_adj_; + /* Boolean: true is K is diagonal. */ + int diagonal_covariance_; template laplace_pseudo_target_vari ( @@ -43,14 +48,17 @@ namespace math { const Eigen::Matrix& K, const Eigen::VectorXd& s2, const Eigen::VectorXd& l, - double pseudo_target) + double pseudo_target, + int diagonal_covariance = 1) : vari(pseudo_target), K_size_(K.size()), K_(ChainableStack::instance_->memalloc_.alloc_array( K.size())), pseudo_target_( - ChainableStack::instance_->memalloc_.alloc_array(1)) { + ChainableStack::instance_->memalloc_.alloc_array(1)), + diagonal_covariance_(diagonal_covariance) { int dim_theta = K.rows(); + for (int j = 0; j < dim_theta; j++) for (int i = 0; i < dim_theta; i++) K_[j * dim_theta + i] = K(i, j).vi_; @@ -58,17 +66,32 @@ namespace math { pseudo_target_[0] = this; pseudo_target_[0] = new vari(pseudo_target, false); - K_adj_ = 0.5 * a * a.transpose() - 0.5 * R - + s2 * l.transpose() - - (R * (value_of(K) * s2)) * l.transpose(); + if (diagonal_covariance_) { + std::cout << "Running diagonal covariance case." << std::endl; + K_adj_ = 0.5 * a.cwiseProduct(a) - 0.5 * R.diagonal() + + l.cwiseProduct(s2 + R * + (value_of(K).diagonal().asDiagonal() * s2)); + std::cout << "Marker A" << std::endl; + } else { + K_adj_ = 0.5 * a * a.transpose() - 0.5 * R + + s2 * l.transpose() + - (R * (value_of(K) * s2)) * l.transpose(); + } } void chain() { int dim_theta = K_adj_.rows(); - for (int j = 0; j < dim_theta; j++) - for (int i = 0; i < dim_theta; i++) - K_[j * dim_theta + i]->adj_ += - pseudo_target_[0]->adj_ * K_adj_(i, j); + if (diagonal_covariance_) { + for (int j = 0; j < dim_theta; j++) { + K_[j * dim_theta + j]->adj_ += + pseudo_target_[0]->adj_ * K_adj_(j, 0); + } + } else { + for (int j = 0; j < dim_theta; j++) + for (int i = 0; i < dim_theta; i++) + K_[j * dim_theta + i]->adj_ += + pseudo_target_[0]->adj_ * K_adj_(i, j); + } } }; diff --git a/test/unit/math/laplace/disease_map_test.cpp b/test/unit/math/laplace/disease_map_test.cpp index 94a3e4b9a3e..f76b6a2c616 100755 --- a/test/unit/math/laplace/disease_map_test.cpp +++ b/test/unit/math/laplace/disease_map_test.cpp @@ -131,7 +131,7 @@ TEST_F(laplace_disease_map_test, lk_analytical) { << "autodiff grad: " << g[0] << " " << g[1] << std::endl << "total time: " << elapsed_time.count() << std::endl << std::endl; - +} // Expected result // density: -2866.88 // autodiff grad: 266.501 -0.425901 @@ -141,6 +141,7 @@ TEST_F(laplace_disease_map_test, lk_analytical) { //////////////////////////////////////////////////////////////////////// // Let's now generate a sample theta from the estimated posterior +/* using stan::math::diff_poisson_log; using stan::math::to_vector; using stan::math::laplace_base_rng; @@ -350,4 +351,4 @@ TEST_F(laplace_disease_map_test, rng_wrapper) { solver, rng); // std::cout << "theta_pred: " << theta_pred.transpose().head(5) << std::endl; -} +} */ diff --git a/test/unit/math/laplace/motorcycle_gp_test.cpp b/test/unit/math/laplace/motorcycle_gp_test.cpp index d50a9d0c7b5..d445c2dca46 100755 --- a/test/unit/math/laplace/motorcycle_gp_test.cpp +++ b/test/unit/math/laplace/motorcycle_gp_test.cpp @@ -32,10 +32,17 @@ struct covariance_motorcycle_functor { T1 sigma_g = phi(3); int n_obs = delta_int[0]; + std::cout << "x: "; + for (int i = 0; i < 5; i++) std::cout << x[i] << " "; + std::cout << std::endl; + double jitter = 1e-6; Matrix kernel_f = gp_exp_quad_cov(x, sigma_f, length_scale_f); Matrix kernel_g = gp_exp_quad_cov(x, sigma_g, length_scale_g); + std::cout << "K_f: " << kernel_f.row(0).head(5) << std::endl; + std::cout << "K_g: " << kernel_g.row(0).head(5) << std::endl; + Matrix kernel_all = Eigen::MatrixXd::Zero(2 * n_obs, 2 * n_obs); for (int i = 0; i < n_obs; i++) { @@ -126,10 +133,10 @@ class laplace_motorcyle_gp_test : public::testing::Test { } // [0.335852,0.433641,0.335354,0.323559] - length_scale_f = 0.335852; // 0.3; - length_scale_g = 0.433641; // 0.5; - sigma_f = 0.335354; // 0.25; - sigma_g = 0.323559; // 0.25; + length_scale_f = 0.3; // 0.335852; // 0.3; + length_scale_g = 0.5; // 0.433641; // 0.5; + sigma_f = 0.25; // 0.335354; // 0.25; + sigma_g = 0.25; // 0.323559; // 0.25; phi.resize(4); phi << length_scale_f, length_scale_g, sigma_f, sigma_g; @@ -190,28 +197,28 @@ TEST_F(laplace_motorcyle_gp_test, lk_autodiff) { diff_likelihood diff_functor(f, y, delta_int); int hessian_block_size = 2; - solver = 2; - int do_line_search = 1; - int max_steps_line_search = 100; - double marginal_density_dbl + solver = 3; + int do_line_search = 0; + int max_steps_line_search = 10; + /*double marginal_density_dbl = laplace_marginal_density(diff_functor, covariance_motorcycle_functor(), value_of(phi), eta_dummy_dbl, x, delta_dummy, delta_int, theta0, - 0, 1e-2, 20000, hessian_block_size, + 0, 1e-6, 20000, hessian_block_size, solver, do_line_search, max_steps_line_search); std::cout << "density: " << marginal_density_dbl << std::endl; -/* var marginal_density = laplace_marginal_density(diff_functor, covariance_motorcycle_functor(), phi, eta_dummy, x, delta_dummy, delta_int, theta0, 0, 1e-8, 1000, hessian_block_size, - compute_W_root); + solver, do_line_search, + max_steps_line_search); VEC g; AVEC parm_vec = createAVEC(phi(0), phi(1), phi(2), phi(3)); @@ -234,7 +241,8 @@ TEST_F(laplace_motorcyle_gp_test, lk_autodiff) { phi_u0, eta_dummy_dbl, x, delta_dummy, delta_int, theta0, 0, 1e-6, 100, hessian_block_size, - compute_W_root); + solver, do_line_search, + max_steps_line_search); double target_l0 @@ -243,10 +251,10 @@ TEST_F(laplace_motorcyle_gp_test, lk_autodiff) { phi_l0, eta_dummy_dbl, x, delta_dummy, delta_int, theta0, 0, 1e-6, 100, hessian_block_size, - compute_W_root); + solver, do_line_search, + max_steps_line_search); - std::cout << "g[0]: " << (target_u0 - target_l0) / (2 * eps) << std::endl; - */ + std::cout << "g[0]: " << (target_u0 - target_l0) / (2 * eps) << std::endl; */ } /* From 47e2827a8cb1c2cdb05aa4156c4c65bed4bf18ca Mon Sep 17 00:00:00 2001 From: Charles Margossian Date: Wed, 28 Apr 2021 17:27:47 -0400 Subject: [PATCH 47/53] attempt at debug diagonal K case... --- stan/math/laplace/laplace_marginal.hpp | 6 +++--- stan/math/laplace/laplace_pseudo_target.hpp | 6 ++---- test/unit/math/laplace/disease_map_test.cpp | 17 ++++++++++++----- 3 files changed, 17 insertions(+), 12 deletions(-) diff --git a/stan/math/laplace/laplace_marginal.hpp b/stan/math/laplace/laplace_marginal.hpp index 14b883799a2..9ce1db7a21e 100644 --- a/stan/math/laplace/laplace_marginal.hpp +++ b/stan/math/laplace/laplace_marginal.hpp @@ -114,9 +114,9 @@ namespace math { using Eigen::SparseMatrix; // TEST - int diagonal_covariance = 1; - solver = 1; - hessian_block_size = 1; + // int diagonal_covariance = 1; + // solver = 1; + // hessian_block_size = 1; covariance = covariance_function(phi, x, delta, delta_int, msgs); diff --git a/stan/math/laplace/laplace_pseudo_target.hpp b/stan/math/laplace/laplace_pseudo_target.hpp index bdd34e6e6da..04a35b468b6 100644 --- a/stan/math/laplace/laplace_pseudo_target.hpp +++ b/stan/math/laplace/laplace_pseudo_target.hpp @@ -67,11 +67,9 @@ namespace math { pseudo_target_[0] = new vari(pseudo_target, false); if (diagonal_covariance_) { - std::cout << "Running diagonal covariance case." << std::endl; + Eigen::VectorXd K_diag = value_of(K).diagonal(); K_adj_ = 0.5 * a.cwiseProduct(a) - 0.5 * R.diagonal() - + l.cwiseProduct(s2 + R * - (value_of(K).diagonal().asDiagonal() * s2)); - std::cout << "Marker A" << std::endl; + + l.cwiseProduct(s2 + R * K_diag.cwiseProduct(s2)); } else { K_adj_ = 0.5 * a * a.transpose() - 0.5 * R + s2 * l.transpose() diff --git a/test/unit/math/laplace/disease_map_test.cpp b/test/unit/math/laplace/disease_map_test.cpp index f76b6a2c616..2c31447b9d9 100755 --- a/test/unit/math/laplace/disease_map_test.cpp +++ b/test/unit/math/laplace/disease_map_test.cpp @@ -131,7 +131,7 @@ TEST_F(laplace_disease_map_test, lk_analytical) { << "autodiff grad: " << g[0] << " " << g[1] << std::endl << "total time: " << elapsed_time.count() << std::endl << std::endl; -} + // Expected result // density: -2866.88 // autodiff grad: 266.501 -0.425901 @@ -178,9 +178,9 @@ TEST_F(laplace_disease_map_test, lk_analytical) { std::cout << "LAPLACE_APPROX_POISSON_RNG" << std::endl << "total time: " << elapsed_time.count() << std::endl - << std::endl; + << std::endl; */ } - +/* TEST_F(laplace_disease_map_test, lk_autodiff) { using stan::math::var; using stan::math::laplace_marginal_density; @@ -231,7 +231,7 @@ TEST_F(laplace_disease_map_test, lk_autodiff) { // Should return consistent evaluation of density and gradient as // previous iteration. // Expected run time: 0.39 s -} +} */ TEST_F(laplace_disease_map_test, finite_diff_benchmark) { /////////////////////////////////////////////////////////////////// @@ -254,6 +254,12 @@ TEST_F(laplace_disease_map_test, finite_diff_benchmark) { phi_l0(0) -= eps; phi_l1(1) -= eps; + double target = laplace_marginal_density(diff_functor, + sqr_exp_kernel_functor(), + phi_dbl, value_of(eta_dummy), + x, delta, delta_int, theta_0, + 0, 1e-6, 100, hessian_block_size); + double target_u0 = laplace_marginal_density(diff_functor, sqr_exp_kernel_functor(), phi_u0, value_of(eta_dummy), @@ -279,11 +285,12 @@ TEST_F(laplace_disease_map_test, finite_diff_benchmark) { 0, 1e-6, 100, hessian_block_size); std::cout << "Finite_diff benchmark: " << std::endl + << "Value: " << target << std::endl << "grad: " << (target_u0 - target_l0) / (2 * eps) << " " << (target_u1 - target_l1) / (2 * eps) << std::endl; } - +/* TEST_F(laplace_disease_map_test, rng_autodiff) { using stan::math::var; using stan::math::laplace_base_rng; From 6bf438f224f84be82d5795f4d11567ec4792c821 Mon Sep 17 00:00:00 2001 From: Charles Margossian Date: Wed, 28 Apr 2021 17:29:36 -0400 Subject: [PATCH 48/53] return int term. --- stan/math/laplace/laplace_marginal.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stan/math/laplace/laplace_marginal.hpp b/stan/math/laplace/laplace_marginal.hpp index 9ce1db7a21e..1e2cd905e09 100644 --- a/stan/math/laplace/laplace_marginal.hpp +++ b/stan/math/laplace/laplace_marginal.hpp @@ -114,7 +114,7 @@ namespace math { using Eigen::SparseMatrix; // TEST - // int diagonal_covariance = 1; + int diagonal_covariance = 0; // solver = 1; // hessian_block_size = 1; From 3985450dfac7356e7ad81f005589d99f8b8245fd Mon Sep 17 00:00:00 2001 From: Steve Bronder Date: Tue, 28 Sep 2021 13:03:08 -0400 Subject: [PATCH 49/53] move some files around and cleanup tests. Comment out tests that do not compile --- stan/math/laplace/block_matrix_sqrt.hpp | 14 +- stan/math/laplace/hessian_block_diag.hpp | 121 ++- stan/math/laplace/hessian_times_vector.hpp | 55 +- stan/math/laplace/laplace.hpp | 19 +- .../laplace_likelihood_bernoulli_logit.hpp | 43 +- .../laplace/laplace_likelihood_deprecated.hpp | 281 +++-- .../laplace/laplace_likelihood_general.hpp | 74 +- .../laplace_likelihood_neg_binomial_2_log.hpp | 129 +-- .../laplace_likelihood_poisson_log.hpp | 43 +- stan/math/laplace/laplace_marginal.hpp | 962 +++++++++--------- .../laplace_marginal_bernoulli_logit_lpmf.hpp | 119 +-- stan/math/laplace/laplace_marginal_lpdf.hpp | 234 ++--- .../laplace_marginal_neg_binomial_2.hpp | 84 +- .../laplace_marginal_poisson_log_lpmf.hpp | 119 +-- stan/math/laplace/laplace_pseudo_target.hpp | 173 ++-- stan/math/laplace/partial_diff_theta.hpp | 149 ++- stan/math/laplace/prob/laplace_base_rng.hpp | 58 +- .../prob/laplace_bernoulli_logit_rng.hpp | 37 +- .../laplace/prob/laplace_poisson_log_rng.hpp | 83 +- stan/math/laplace/prob/laplace_rng.hpp | 41 +- stan/math/laplace/third_diff_directional.hpp | 77 +- stan/math/mix.hpp | 10 +- test/unit/math/laplace/disease_map_test.cpp | 46 +- .../math/laplace/higher_order_diff_test.cpp | 9 +- .../laplace_bernoulli_logit_rng_test.cpp | 5 +- .../laplace_marginal_bernoulli_logit_test.cpp | 16 +- ...place_marginal_neg_binomial_2_log_test.cpp | 20 +- .../laplace_marginal_poisson_log_test.cpp | 10 +- .../laplace_marginal_student_t_test.cpp | 5 +- .../laplace/laplace_poisson_log_rng_test.cpp | 6 +- test/unit/math/laplace/laplace_skim_test.cpp | 13 +- test/unit/math/laplace/laplace_utility.hpp | 14 +- test/unit/math/laplace/motorcycle_gp_test.cpp | 18 +- 33 files changed, 1489 insertions(+), 1598 deletions(-) diff --git a/stan/math/laplace/block_matrix_sqrt.hpp b/stan/math/laplace/block_matrix_sqrt.hpp index f1b310e5247..ca8380ea115 100644 --- a/stan/math/laplace/block_matrix_sqrt.hpp +++ b/stan/math/laplace/block_matrix_sqrt.hpp @@ -1,10 +1,10 @@ #ifndef STAN_MATH_LAPLACE_BLOCK_MATRIX_SQRT_HPP #define STAN_MATH_LAPLACE_BLOCK_MATRIX_SQRT_HPP -#include - -#include +#include +#include #include +// REMOVE ME #include namespace stan { @@ -13,9 +13,8 @@ namespace math { /** * Return the matrix square-root for a block diagonal matrix. */ - Eigen::SparseMatrix - inline block_matrix_sqrt(Eigen::SparseMatrix W, - int block_size) { +Eigen::SparseMatrix inline block_matrix_sqrt( + Eigen::SparseMatrix W, int block_size) { int n_block = W.cols() / block_size; Eigen::MatrixXd local_block(block_size, block_size); Eigen::MatrixXd local_block_sqrt(block_size, block_size); @@ -37,7 +36,7 @@ namespace math { for (int j = 0; j < block_size; j++) { for (int k = 0; k < block_size; k++) { W_root.insert(i * block_size + j, i * block_size + k) - = local_block_sqrt(j, k); + = local_block_sqrt(j, k); } } } @@ -47,7 +46,6 @@ namespace math { return W_root; } - } // namespace math } // namespace stan diff --git a/stan/math/laplace/hessian_block_diag.hpp b/stan/math/laplace/hessian_block_diag.hpp index 93a13ebd7c8..c2431fd97bd 100644 --- a/stan/math/laplace/hessian_block_diag.hpp +++ b/stan/math/laplace/hessian_block_diag.hpp @@ -9,79 +9,76 @@ namespace stan { namespace math { - /** - * Returns a block diagonal Hessian by computing the relevant directional - * derivatives and storing them in a matrix. - * For m the size of each block, the operations const m calls to - * hessian_times_vector, that is m forward sweeps and m reverse sweeps. - */ - template - inline void hessian_block_diag(const F& f, - const Eigen::VectorXd& x, - const Eigen::VectorXd& eta, - const Eigen::VectorXd& delta, - const std::vector& delta_int, - int hessian_block_size, - double& fx, - Eigen::MatrixXd& H, - std::ostream* pstream = 0) { - using Eigen::VectorXd; - using Eigen::MatrixXd; +/** + * Returns a block diagonal Hessian by computing the relevant directional + * derivatives and storing them in a matrix. + * For m the size of each block, the operations const m calls to + * hessian_times_vector, that is m forward sweeps and m reverse sweeps. + */ +template +inline void hessian_block_diag(const F& f, const Eigen::VectorXd& x, + const Eigen::VectorXd& eta, + const Eigen::VectorXd& delta, + const std::vector& delta_int, + int hessian_block_size, double& fx, + Eigen::MatrixXd& H, std::ostream* pstream = 0) { + using Eigen::MatrixXd; + using Eigen::VectorXd; - int x_size = x.size(); - VectorXd v; - H = MatrixXd::Zero(x_size, x_size); - int n_blocks = x_size / hessian_block_size; - for (int i = 0; i < hessian_block_size; ++i) { - v = VectorXd::Zero(x_size); - for (int j = i; j < x_size; j += hessian_block_size) v(j) = 1; - VectorXd Hv; - hessian_times_vector(f, x, eta, delta, delta_int, v, fx, Hv, pstream); - for (int j = 0; j < n_blocks; ++j) { - for (int k = 0; k < hessian_block_size; ++k) - H(k + j * hessian_block_size, i + j * hessian_block_size) + int x_size = x.size(); + VectorXd v; + H = MatrixXd::Zero(x_size, x_size); + int n_blocks = x_size / hessian_block_size; + for (int i = 0; i < hessian_block_size; ++i) { + v = VectorXd::Zero(x_size); + for (int j = i; j < x_size; j += hessian_block_size) + v(j) = 1; + VectorXd Hv; + hessian_times_vector(f, x, eta, delta, delta_int, v, fx, Hv, pstream); + for (int j = 0; j < n_blocks; ++j) { + for (int k = 0; k < hessian_block_size; ++k) + H(k + j * hessian_block_size, i + j * hessian_block_size) = Hv(k + j * hessian_block_size); - } } } +} - /** - * Overload for case where hessian is stored as a sparse matrix. - */ - template - inline void hessian_block_diag(const F& f, - const Eigen::VectorXd& x, - const Eigen::VectorXd& eta, - const Eigen::VectorXd& delta, - const std::vector& delta_int, - int hessian_block_size, - double& fx, - Eigen::SparseMatrix& H, - // Eigen::MatrixXd& H, - std::ostream* pstream = 0) { - using Eigen::VectorXd; - using Eigen::MatrixXd; +/** + * Overload for case where hessian is stored as a sparse matrix. + */ +template +inline void hessian_block_diag(const F& f, const Eigen::VectorXd& x, + const Eigen::VectorXd& eta, + const Eigen::VectorXd& delta, + const std::vector& delta_int, + int hessian_block_size, double& fx, + Eigen::SparseMatrix& H, + // Eigen::MatrixXd& H, + std::ostream* pstream = 0) { + using Eigen::MatrixXd; + using Eigen::VectorXd; - int x_size = x.size(); - VectorXd v; - // H = MatrixXd::Zero(x_size, x_size); - H.resize(x_size, x_size); - // H.reserve(Eigen::VectorXi::Constant(x_size, hessian_block_size)); + int x_size = x.size(); + VectorXd v; + // H = MatrixXd::Zero(x_size, x_size); + H.resize(x_size, x_size); + // H.reserve(Eigen::VectorXi::Constant(x_size, hessian_block_size)); - int n_blocks = x_size / hessian_block_size; - for (int i = 0; i < hessian_block_size; ++i) { - v = VectorXd::Zero(x_size); - for (int j = i; j < x_size; j += hessian_block_size) v(j) = 1; - VectorXd Hv; - hessian_times_vector(f, x, eta, delta, delta_int, v, fx, Hv, pstream); - for (int j = 0; j < n_blocks; ++j) { - for (int k = 0; k < hessian_block_size; ++k) { - H.insert(k + j * hessian_block_size, i + j * hessian_block_size) + int n_blocks = x_size / hessian_block_size; + for (int i = 0; i < hessian_block_size; ++i) { + v = VectorXd::Zero(x_size); + for (int j = i; j < x_size; j += hessian_block_size) + v(j) = 1; + VectorXd Hv; + hessian_times_vector(f, x, eta, delta, delta_int, v, fx, Hv, pstream); + for (int j = 0; j < n_blocks; ++j) { + for (int k = 0; k < hessian_block_size; ++k) { + H.insert(k + j * hessian_block_size, i + j * hessian_block_size) = Hv(k + j * hessian_block_size); - } } } } +} } // namespace math } // namespace stan diff --git a/stan/math/laplace/hessian_times_vector.hpp b/stan/math/laplace/hessian_times_vector.hpp index 134da49d585..cd180874258 100644 --- a/stan/math/laplace/hessian_times_vector.hpp +++ b/stan/math/laplace/hessian_times_vector.hpp @@ -7,36 +7,35 @@ namespace stan { namespace math { - /** - * Overload Hessian_times_vector function, under stan/math/mix/functor - * to handle functions which take in arguments eta, delta, delta_int, - * and pstream. - */ - template - inline void hessian_times_vector(const F& f, - const Eigen::VectorXd& x, - const Eigen::VectorXd& eta, - const Eigen::VectorXd& delta, - const std::vector& delta_int, - const Eigen::VectorXd& v, - double& fx, - Eigen::VectorXd& Hv, - std::ostream* pstream = 0) { - using Eigen::Matrix; - using Eigen::Dynamic; +/** + * Overload Hessian_times_vector function, under stan/math/mix/functor + * to handle functions which take in arguments eta, delta, delta_int, + * and pstream. + */ +template +inline void hessian_times_vector(const F& f, const Eigen::VectorXd& x, + const Eigen::VectorXd& eta, + const Eigen::VectorXd& delta, + const std::vector& delta_int, + const Eigen::VectorXd& v, double& fx, + Eigen::VectorXd& Hv, + std::ostream* pstream = 0) { + using Eigen::Dynamic; + using Eigen::Matrix; - nested_rev_autodiff nested; + nested_rev_autodiff nested; - int x_size = x.size(); - Matrix x_var = x; - Matrix, Dynamic, 1> x_fvar(x_size); - for (int i = 0; i < x_size; i++) { - x_fvar(i) = fvar(x_var(i), v(i)); - } - fvar fx_fvar = f(x_fvar, eta, delta, delta_int, pstream); - grad(fx_fvar.d_.vi_); - Hv.resize(x_size); - for (int i = 0; i < x_size; i++) Hv(i) = x_var(i).adj(); + int x_size = x.size(); + Matrix x_var = x; + Matrix, Dynamic, 1> x_fvar(x_size); + for (int i = 0; i < x_size; i++) { + x_fvar(i) = fvar(x_var(i), v(i)); + } + fvar fx_fvar = f(x_fvar, eta, delta, delta_int, pstream); + grad(fx_fvar.d_.vi_); + Hv.resize(x_size); + for (int i = 0; i < x_size; i++) + Hv(i) = x_var(i).adj(); } } // namespace math diff --git a/stan/math/laplace/laplace.hpp b/stan/math/laplace/laplace.hpp index 61ca5c2e262..83311ffda6f 100644 --- a/stan/math/laplace/laplace.hpp +++ b/stan/math/laplace/laplace.hpp @@ -1,11 +1,26 @@ #ifndef STAN_MATH_LAPLACE_LAPLACE_HPP #define STAN_MATH_LAPLACE_LAPLACE_HPP +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include #include -#include #include -#include +#include +#include +#include +#include +#include #include +#include #include #endif diff --git a/stan/math/laplace/laplace_likelihood_bernoulli_logit.hpp b/stan/math/laplace/laplace_likelihood_bernoulli_logit.hpp index 477a0c60219..54f7e175766 100644 --- a/stan/math/laplace/laplace_likelihood_bernoulli_logit.hpp +++ b/stan/math/laplace/laplace_likelihood_bernoulli_logit.hpp @@ -1,7 +1,6 @@ #ifndef STAN_MATH_LAPLACE_LAPLACE_LIKELIHOOD_BERNOULLI_LOGIT_HPP #define STAN_MATH_LAPLACE_LAPLACE_LIKELIHOOD_BERNOULLI_LOGIT_HPP - namespace stan { namespace math { @@ -20,7 +19,7 @@ struct diff_bernoulli_logit { diff_bernoulli_logit(const Eigen::VectorXd& n_samples, const Eigen::VectorXd& sums) - : n_samples_(n_samples), sums_(sums) { } + : n_samples_(n_samples), sums_(sums) {} /** * Return the log density. @@ -29,13 +28,13 @@ struct diff_bernoulli_logit { * @return the log density. */ template - T1 log_likelihood (const Eigen::Matrix& theta, - const Eigen::Matrix& eta_dummy) - const { - Eigen::VectorXd one = rep_vector(1, theta.size()); - return sum(theta.cwiseProduct(sums_) - - n_samples_.cwiseProduct(log(one + exp(theta)))); - } + T1 log_likelihood( + const Eigen::Matrix& theta, + const Eigen::Matrix& eta_dummy) const { + Eigen::VectorXd one = rep_vector(1, theta.size()); + return sum(theta.cwiseProduct(sums_) + - n_samples_.cwiseProduct(log(one + exp(theta)))); + } /** * Returns the gradient of the log density, and the hessian. @@ -49,20 +48,20 @@ struct diff_bernoulli_logit { * @param[in, out] hessian diagonal, so stored in a vector. */ template - void diff (const Eigen::Matrix& theta, - const Eigen::Matrix& eta_dummy, - Eigen::Matrix& gradient, - Eigen::SparseMatrix& hessian, - int block_size_dummy = 0) const { + void diff(const Eigen::Matrix& theta, + const Eigen::Matrix& eta_dummy, + Eigen::Matrix& gradient, + Eigen::SparseMatrix& hessian, + int block_size_dummy = 0) const { Eigen::Matrix exp_theta = exp(theta); int theta_size = theta.size(); Eigen::VectorXd one = rep_vector(1, theta_size); gradient = sums_ - n_samples_.cwiseProduct(inv_logit(theta)); - Eigen::Matrix - hessian_diagonal = - n_samples_.cwiseProduct(elt_divide(exp_theta, - square(one + exp_theta))); + Eigen::Matrix hessian_diagonal + = -n_samples_.cwiseProduct( + elt_divide(exp_theta, square(one + exp_theta))); hessian.resize(theta_size, theta_size); hessian.reserve(Eigen::VectorXi::Constant(theta_size, 1)); for (int i = 0; i < theta_size; i++) @@ -79,14 +78,14 @@ struct diff_bernoulli_logit { * derivative tensor. */ template - Eigen::Matrix - third_diff(const Eigen::Matrix& theta, - const Eigen::Matrix& eta_dummy) const { + Eigen::Matrix third_diff( + const Eigen::Matrix& theta, + const Eigen::Matrix& eta_dummy) const { Eigen::VectorXd exp_theta = exp(theta); Eigen::VectorXd one = rep_vector(1, theta.size()); Eigen::VectorXd nominator = exp_theta.cwiseProduct(exp_theta - one); - Eigen::VectorXd denominator = square(one + exp_theta) - .cwiseProduct(one + exp_theta); + Eigen::VectorXd denominator + = square(one + exp_theta).cwiseProduct(one + exp_theta); return n_samples_.cwiseProduct(elt_divide(nominator, denominator)); } diff --git a/stan/math/laplace/laplace_likelihood_deprecated.hpp b/stan/math/laplace/laplace_likelihood_deprecated.hpp index 9f096cffa55..31936c3acb9 100644 --- a/stan/math/laplace/laplace_likelihood_deprecated.hpp +++ b/stan/math/laplace/laplace_likelihood_deprecated.hpp @@ -19,26 +19,27 @@ namespace math { * This structure can be passed to the the laplace_marginal function. * Uses sufficient statistics for the data. */ - // FIX ME -- cannot use the sufficient statistic to compute log density in - // because of log factorial term. +// FIX ME -- cannot use the sufficient statistic to compute log density in +// because of log factorial term. +#ifdef EXCLUDE_THIS_FOR_NOW struct diff_poisson_log { - /* The number of samples in each group. */ + // The number of samples in each group. Eigen::VectorXd n_samples_; - /* The sum of counts in each group. */ + // The sum of counts in each group. Eigen::VectorXd sums_; - /* exposure, i.e. off-set term for the latent variable. */ + // exposure, i.e. off-set term for the latent variable. Eigen::VectorXd log_exposure_; diff_poisson_log(const Eigen::VectorXd& n_samples, const Eigen::VectorXd& sums) - : n_samples_(n_samples), sums_(sums) { + : n_samples_(n_samples), sums_(sums) { log_exposure_ = Eigen::VectorXd::Zero(sums.size()); } diff_poisson_log(const Eigen::VectorXd& n_samples, const Eigen::VectorXd& sums, const Eigen::VectorXd& log_exposure) - : n_samples_(n_samples), sums_(sums), log_exposure_(log_exposure) { } + : n_samples_(n_samples), sums_(sums), log_exposure_(log_exposure) {} /** * Return the log density. @@ -48,16 +49,16 @@ struct diff_poisson_log { * @return the log density. */ template - T1 log_likelihood (const Eigen::Matrix& theta, - const Eigen::Matrix& eta_dummy) - const { + T1 log_likelihood( + const Eigen::Matrix& theta, + const Eigen::Matrix& eta_dummy) const { double factorial_term = 0; for (int i = 0; i < sums_.size(); i++) factorial_term += lgamma(sums_(i) + 1); Eigen::Matrix shifted_mean = theta + log_exposure_; - return - factorial_term - + (shifted_mean).dot(sums_) - n_samples_.dot(exp(shifted_mean)); + return -factorial_term + (shifted_mean).dot(sums_) + - n_samples_.dot(exp(shifted_mean)); } /** @@ -73,15 +74,15 @@ struct diff_poisson_log { * @param[in, out] hessian diagonal, so stored in a vector. */ template - void diff (const Eigen::Matrix& theta, - const Eigen::Matrix& eta_dummy, - Eigen::Matrix& gradient, - Eigen::Matrix& hessian) const { - Eigen::Matrix - common_term = n_samples_.cwiseProduct(exp(theta + log_exposure_)); + void diff(const Eigen::Matrix& theta, + const Eigen::Matrix& eta_dummy, + Eigen::Matrix& gradient, + Eigen::Matrix& hessian) const { + Eigen::Matrix common_term + = n_samples_.cwiseProduct(exp(theta + log_exposure_)); gradient = sums_ - common_term; - hessian = - common_term; + hessian = -common_term; } /** @@ -94,16 +95,16 @@ struct diff_poisson_log { * derivative tensor. */ template - Eigen::Matrix - third_diff(const Eigen::Matrix& theta, - const Eigen::Matrix& eta_dummy) const { + Eigen::Matrix third_diff( + const Eigen::Matrix& theta, + const Eigen::Matrix& eta_dummy) const { return -n_samples_.cwiseProduct(exp(theta + log_exposure_)); } template - Eigen::Matrix, Eigen::Dynamic, 1> - diff_eta(const Eigen::Matrix& theta, - const Eigen::Matrix& eta) const { + Eigen::Matrix, Eigen::Dynamic, 1> diff_eta( + const Eigen::Matrix& theta, + const Eigen::Matrix& eta) const { std::cout << "THIS FUNCTIONS SHOULD NEVER GET CALLED!" << std::endl; Eigen::Matrix, Eigen::Dynamic, 1> void_matrix; return void_matrix; @@ -114,23 +115,22 @@ struct diff_poisson_log { diff_theta_eta(const Eigen::Matrix& theta, const Eigen::Matrix& eta) const { std::cout << "THIS FUNCTIONS SHOULD NEVER GET CALLED!" << std::endl; - Eigen::Matrix, Eigen::Dynamic, - Eigen::Dynamic> void_matrix; + Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> + void_matrix; return void_matrix; } template Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> diff2_theta_eta(const Eigen::Matrix& theta, - const Eigen::Matrix& eta) - const { + const Eigen::Matrix& eta) const { std::cout << "THIS FUNCTIONS SHOULD NEVER GET CALLED!" << std::endl; - Eigen::Matrix, Eigen::Dynamic, - Eigen::Dynamic> void_matrix; + Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> + void_matrix; return void_matrix; } }; - +#endif /** * A structure to compute the log density, first, second, * and third-order derivatives for a Bernoulli logistic likelihood @@ -146,7 +146,7 @@ struct diff_logistic_log { diff_logistic_log(const Eigen::VectorXd& n_samples, const Eigen::VectorXd& sums) - : n_samples_(n_samples), sums_(sums) { } + : n_samples_(n_samples), sums_(sums) {} /** * Return the log density. @@ -155,13 +155,13 @@ struct diff_logistic_log { * @return the log density. */ template - T1 log_likelihood (const Eigen::Matrix& theta, - const Eigen::Matrix& eta_dummy) - const { - Eigen::VectorXd one = rep_vector(1, theta.size()); - return sum(theta.cwiseProduct(sums_) - - n_samples_.cwiseProduct(log(one + exp(theta)))); - } + T1 log_likelihood( + const Eigen::Matrix& theta, + const Eigen::Matrix& eta_dummy) const { + Eigen::VectorXd one = rep_vector(1, theta.size()); + return sum(theta.cwiseProduct(sums_) + - n_samples_.cwiseProduct(log(one + exp(theta)))); + } /** * Returns the gradient of the log density, and the hessian. @@ -175,17 +175,17 @@ struct diff_logistic_log { * @param[in, out] hessian diagonal, so stored in a vector. */ template - void diff (const Eigen::Matrix& theta, - const Eigen::Matrix& eta_dummy, - Eigen::Matrix& gradient, - Eigen::Matrix& hessian) const { + void diff(const Eigen::Matrix& theta, + const Eigen::Matrix& eta_dummy, + Eigen::Matrix& gradient, + Eigen::Matrix& hessian) const { Eigen::Matrix exp_theta = exp(theta); Eigen::VectorXd one = rep_vector(1, theta.size()); gradient = sums_ - n_samples_.cwiseProduct(inv_logit(theta)); - hessian = - n_samples_.cwiseProduct(elt_divide(exp_theta, - square(one + exp_theta))); + hessian = -n_samples_.cwiseProduct( + elt_divide(exp_theta, square(one + exp_theta))); } /** @@ -198,22 +198,22 @@ struct diff_logistic_log { * derivative tensor. */ template - Eigen::Matrix - third_diff(const Eigen::Matrix& theta, - const Eigen::Matrix& eta_dummy) const { + Eigen::Matrix third_diff( + const Eigen::Matrix& theta, + const Eigen::Matrix& eta_dummy) const { Eigen::VectorXd exp_theta = exp(theta); Eigen::VectorXd one = rep_vector(1, theta.size()); Eigen::VectorXd nominator = exp_theta.cwiseProduct(exp_theta - one); - Eigen::VectorXd denominator = square(one + exp_theta) - .cwiseProduct(one + exp_theta); + Eigen::VectorXd denominator + = square(one + exp_theta).cwiseProduct(one + exp_theta); return n_samples_.cwiseProduct(elt_divide(nominator, denominator)); } template - Eigen::Matrix, Eigen::Dynamic, 1> - diff_eta(const Eigen::Matrix& theta, - const Eigen::Matrix& eta) const { + Eigen::Matrix, Eigen::Dynamic, 1> diff_eta( + const Eigen::Matrix& theta, + const Eigen::Matrix& eta) const { std::cout << "THIS FUNCTIONS SHOULD NEVER GET CALLED!" << std::endl; Eigen::Matrix, Eigen::Dynamic, 1> void_matrix; return void_matrix; @@ -224,39 +224,38 @@ struct diff_logistic_log { diff_theta_eta(const Eigen::Matrix& theta, const Eigen::Matrix& eta) const { std::cout << "THIS FUNCTIONS SHOULD NEVER GET CALLED!" << std::endl; - Eigen::Matrix, Eigen::Dynamic, - Eigen::Dynamic> void_matrix; + Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> + void_matrix; return void_matrix; } template Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> diff2_theta_eta(const Eigen::Matrix& theta, - const Eigen::Matrix& eta) - const { + const Eigen::Matrix& eta) const { std::cout << "THIS FUNCTIONS SHOULD NEVER GET CALLED!" << std::endl; - Eigen::Matrix, Eigen::Dynamic, - Eigen::Dynamic> void_matrix; + Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> + void_matrix; return void_matrix; } }; +#ifdef EXCLUDE_THIS_FOR_NOW struct diff_neg_binomial_2_log { - /* Observed counts */ + // Observed counts Eigen::VectorXd y_; - /* Latent parameter index for each observation. */ + // Latent parameter index for each observation. std::vector y_index_; - /* The number of samples in each group. */ + // The number of samples in each group. Eigen::VectorXd n_samples_; - /* The sum of cours in each group. */ + // The sum of cours in each group. Eigen::VectorXd sums_; - /* Number of latent Gaussian variables. */ + // Number of latent Gaussian variables. int n_theta_; diff_neg_binomial_2_log(const Eigen::VectorXd& y, - const std::vector& y_index, - int n_theta) - : y_(y), y_index_(y_index), n_theta_(n_theta) { + const std::vector& y_index, int n_theta) + : y_(y), y_index_(y_index), n_theta_(n_theta) { sums_ = Eigen::VectorXd::Zero(n_theta); n_samples_ = Eigen::VectorXd::Zero(n_theta); @@ -267,9 +266,9 @@ struct diff_neg_binomial_2_log { } template - return_type_t - log_likelihood (const Eigen::Matrix& theta, - const Eigen::Matrix& eta) const { + return_type_t log_likelihood( + const Eigen::Matrix& theta, + const Eigen::Matrix& eta) const { T_eta eta_scalar = eta(0); return_type_t logp = 0; for (size_t i = 0; i < y_.size(); i++) { @@ -278,79 +277,81 @@ struct diff_neg_binomial_2_log { // CHECK -- is it better to vectorize this loop? Eigen::Matrix exp_theta = exp(theta); for (int i = 0; i < n_theta_; i++) { - return_type_t - log_eta_plus_exp_theta = log(eta_scalar + exp_theta(i)); + return_type_t log_eta_plus_exp_theta + = log(eta_scalar + exp_theta(i)); logp += sums_(i) * (theta(i) - log_eta_plus_exp_theta) - + n_samples_(i) * eta_scalar - * (log(eta_scalar) - log_eta_plus_exp_theta); + + n_samples_(i) * eta_scalar + * (log(eta_scalar) - log_eta_plus_exp_theta); } return logp; } template - void diff (const Eigen::Matrix& theta, - const Eigen::Matrix& eta, - Eigen::Matrix, - Eigen::Dynamic, 1>& gradient, - Eigen::Matrix, - Eigen::Dynamic, 1>& hessian) const { + void diff( + const Eigen::Matrix& theta, + const Eigen::Matrix& eta, + Eigen::Matrix, Eigen::Dynamic, 1>& gradient, + Eigen::Matrix, Eigen::Dynamic, 1>& hessian) + const { typedef return_type_t scalar; Eigen::VectorXd one = rep_vector(1, theta.size()); T_eta eta_scalar = eta(0); - Eigen::Matrix - sums_plus_n_eta = sums_ + eta_scalar * n_samples_; + Eigen::Matrix sums_plus_n_eta + = sums_ + eta_scalar * n_samples_; Eigen::Matrix exp_neg_theta = exp(-theta); - Eigen::Matrix - one_plus_exp = one + eta_scalar * exp_neg_theta; + Eigen::Matrix one_plus_exp + = one + eta_scalar * exp_neg_theta; gradient = sums_ - elt_divide(sums_plus_n_eta, one_plus_exp); - hessian = - eta_scalar * sums_plus_n_eta. - cwiseProduct(elt_divide(exp_neg_theta, square(one_plus_exp))); + hessian = -eta_scalar + * sums_plus_n_eta.cwiseProduct( + elt_divide(exp_neg_theta, square(one_plus_exp))); } template - Eigen::Matrix, Eigen::Dynamic, 1> - third_diff(const Eigen::Matrix& theta, - const Eigen::Matrix& eta) const { + Eigen::Matrix, Eigen::Dynamic, 1> third_diff( + const Eigen::Matrix& theta, + const Eigen::Matrix& eta) const { typedef return_type_t scalar; Eigen::Matrix exp_theta = exp(theta); T_eta eta_scalar = eta(0); - Eigen::Matrix - eta_vec = rep_vector(eta_scalar, theta.size()); - Eigen::Matrix - eta_plus_exp_theta = eta_vec + exp_theta; - - return - ((sums_ + eta_scalar * n_samples_) * eta_scalar). - cwiseProduct(exp_theta.cwiseProduct( - elt_divide(eta_vec - exp_theta, - square(eta_plus_exp_theta).cwiseProduct(eta_plus_exp_theta)))); + Eigen::Matrix eta_vec + = rep_vector(eta_scalar, theta.size()); + Eigen::Matrix eta_plus_exp_theta + = eta_vec + exp_theta; + + return -((sums_ + eta_scalar * n_samples_) * eta_scalar) + .cwiseProduct(exp_theta.cwiseProduct( + elt_divide(eta_vec - exp_theta, + square(eta_plus_exp_theta) + .cwiseProduct(eta_plus_exp_theta)))); } template - Eigen::Matrix, Eigen::Dynamic, 1> - diff_eta(const Eigen::Matrix& theta, - const Eigen::Matrix& eta) const { + Eigen::Matrix, Eigen::Dynamic, 1> diff_eta( + const Eigen::Matrix& theta, + const Eigen::Matrix& eta) const { typedef return_type_t scalar; T_eta eta_scalar = eta(0); - Eigen::Matrix - y_plus_eta = y_ + rep_vector(eta_scalar, y_.size()); + Eigen::Matrix y_plus_eta + = y_ + rep_vector(eta_scalar, y_.size()); Eigen::Matrix exp_theta = exp(theta); - Eigen::Matrix - exp_theta_plus_eta = exp_theta + rep_vector(eta_scalar, theta.size()); + Eigen::Matrix exp_theta_plus_eta + = exp_theta + rep_vector(eta_scalar, theta.size()); T_eta y_plus_eta_digamma_sum = 0; for (int i = 0; i < y_.size(); i++) y_plus_eta_digamma_sum += digamma(y_plus_eta(i)); - Eigen::Matrix gradient_eta(1); - gradient_eta(0) = - y_plus_eta_digamma_sum - y_.size() * digamma(eta_scalar) - - sum(elt_divide(sums_ + n_samples_ * eta_scalar, exp_theta_plus_eta)) - + sum(n_samples_ * log(eta_scalar) - - n_samples_.cwiseProduct(log(exp_theta_plus_eta)) - + n_samples_); - return gradient_eta; + Eigen::Matrix gradient_eta(1); + gradient_eta(0) + = y_plus_eta_digamma_sum - y_.size() * digamma(eta_scalar) + - sum(elt_divide(sums_ + n_samples_ * eta_scalar, exp_theta_plus_eta)) + + sum(n_samples_ * log(eta_scalar) + - n_samples_.cwiseProduct(log(exp_theta_plus_eta)) + + n_samples_); + return gradient_eta; } template @@ -360,11 +361,11 @@ struct diff_neg_binomial_2_log { typedef return_type_t scalar; T_eta eta_scalar = eta(0); Eigen::Matrix exp_neg_theta = exp(-theta); - Eigen::Matrix - diff_matrix(theta.size(), 1); - diff_matrix.col(0) - = - elt_divide(n_samples_ - sums_.cwiseProduct(exp_neg_theta), - square(eta_scalar * exp_neg_theta + rep_vector(1, theta.size()))); + Eigen::Matrix diff_matrix( + theta.size(), 1); + diff_matrix.col(0) = -elt_divide( + n_samples_ - sums_.cwiseProduct(exp_neg_theta), + square(eta_scalar * exp_neg_theta + rep_vector(1, theta.size()))); return diff_matrix; } @@ -373,29 +374,26 @@ struct diff_neg_binomial_2_log { template Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> diff2_theta_eta(const Eigen::Matrix& theta, - const Eigen::Matrix& eta) - const { + const Eigen::Matrix& eta) const { typedef return_type_t scalar; T_eta eta_scalar = eta(0); Eigen::Matrix exp_neg_theta = exp(-theta); Eigen::Matrix one_plus_eta_exp - = rep_vector(1, theta.size()) + eta_scalar * exp_neg_theta; + = rep_vector(1, theta.size()) + eta_scalar * exp_neg_theta; - Eigen::Matrix - diff_matrix(theta.size(), 1); + Eigen::Matrix diff_matrix( + theta.size(), 1); - diff_matrix.col(0) = - - elt_divide(exp_neg_theta.cwiseProduct( - - eta_scalar * exp_neg_theta.cwiseProduct(sums_) - + sums_ + 2 * eta_scalar * n_samples_), - square(one_plus_eta_exp).cwiseProduct(one_plus_eta_exp)); // ); + diff_matrix.col(0) = -elt_divide( + exp_neg_theta.cwiseProduct(-eta_scalar + * exp_neg_theta.cwiseProduct(sums_) + + sums_ + 2 * eta_scalar * n_samples_), + square(one_plus_eta_exp).cwiseProduct(one_plus_eta_exp)); // ); return diff_matrix; } - - }; - +#endif // NOTE: the below structure is incomplete... struct diff_student_t { /* Observations. */ @@ -404,18 +402,16 @@ struct diff_student_t { std::vector y_index_; // QUESTION - Save eta here too? - diff_student_t(const Eigen::VectorXd& y, - const std::vector& y_index) - : y_(y), y_index_(y_index) { } + diff_student_t(const Eigen::VectorXd& y, const std::vector& y_index) + : y_(y), y_index_(y_index) {} /** * Returns the log density. */ template - return_type_t - log_likelihood (const Eigen::Matrix& theta, - const Eigen::Matrix& eta) - const { + return_type_t log_likelihood( + const Eigen::Matrix& theta, + const Eigen::Matrix& eta) const { T_eta nu = eta(0); T_eta sigma = eta(1); T_eta sigma_squared = sigma * sigma; @@ -423,9 +419,10 @@ struct diff_student_t { int n = theta.size(); // CHECK -- probably don't need normalizing constant. - return_type_t - log_constant = n * (lgamma((nu + 1) / 2) - lgamma(nu / 2) - - LOG_SQRT_PI - 0.5 * log(nu) - log(sigma)); + return_type_t log_constant + = n + * (lgamma((nu + 1) / 2) - lgamma(nu / 2) - LOG_SQRT_PI - 0.5 * log(nu) + - log(sigma)); T_theta log_kernel = 0; diff --git a/stan/math/laplace/laplace_likelihood_general.hpp b/stan/math/laplace/laplace_likelihood_general.hpp index ba5b3daa04e..d502e38d5b3 100644 --- a/stan/math/laplace/laplace_likelihood_general.hpp +++ b/stan/math/laplace/laplace_likelihood_general.hpp @@ -26,26 +26,22 @@ struct diff_likelihood { /* stream to return print statements when function is called. */ std::ostream* pstream_; - diff_likelihood(const F& f, - const Eigen::VectorXd& delta, - const std::vector& delta_int, - std::ostream* pstream = 0) - : f_(f), delta_(delta), delta_int_(delta_int), pstream_(pstream) { } + diff_likelihood(const F& f, const Eigen::VectorXd& delta, + const std::vector& delta_int, std::ostream* pstream = 0) + : f_(f), delta_(delta), delta_int_(delta_int), pstream_(pstream) {} template T1 log_likelihood(const Eigen::Matrix& theta, - const Eigen::Matrix& eta) - const { - return f_(theta, eta, delta_, delta_int_, pstream_); - } + const Eigen::Matrix& eta) const { + return f_(theta, eta, delta_, delta_int_, pstream_); + } - void diff (const Eigen::VectorXd& theta, - const Eigen::VectorXd& eta, - Eigen::VectorXd& gradient, - Eigen::SparseMatrix& hessian_theta, - int hessian_block_size = 1) const { - using Eigen::Matrix; + void diff(const Eigen::VectorXd& theta, const Eigen::VectorXd& eta, + Eigen::VectorXd& gradient, + Eigen::SparseMatrix& hessian_theta, + int hessian_block_size = 1) const { using Eigen::Dynamic; + using Eigen::Matrix; int theta_size = theta.size(); int eta_size = eta.size(); @@ -58,7 +54,8 @@ struct diff_likelihood { var f_var = f_(theta_var, eta_var, delta_, delta_int_, pstream_); grad(f_var.vi_); gradient.resize(theta_size + eta_size); - for (int i = 0; i < theta_size; i++) gradient(i) = theta_var(i).adj(); + for (int i = 0; i < theta_size; i++) + gradient(i) = theta_var(i).adj(); for (int i = 0; i < eta_size; i++) gradient(theta_size + i) = eta_var(i).adj(); } @@ -67,32 +64,31 @@ struct diff_likelihood { double f_theta; if (hessian_block_size == 1) { Eigen::VectorXd v(theta_size); - for (int i = 0; i < theta_size; i++) v(i) = 1; + for (int i = 0; i < theta_size; i++) + v(i) = 1; Eigen::VectorXd hessian_v; - hessian_times_vector(f_, theta, eta, delta_, delta_int_, - v, f_theta, hessian_v, pstream_); + hessian_times_vector(f_, theta, eta, delta_, delta_int_, v, f_theta, + hessian_v, pstream_); hessian_theta.reserve(Eigen::VectorXi::Constant(theta_size, 1)); for (int i = 0; i < theta_size; i++) hessian_theta.insert(i, i) = hessian_v(i); } else { - hessian_block_diag(f_, theta, eta, delta_, delta_int_, - hessian_block_size, + hessian_block_diag(f_, theta, eta, delta_, delta_int_, hessian_block_size, f_theta, hessian_theta, pstream_); } } Eigen::VectorXd third_diff(const Eigen::VectorXd& theta, const Eigen::VectorXd& eta) const { - int theta_size = theta.size(); Eigen::VectorXd v(theta_size); - for (int i = 0; i < theta_size; i++) v(i) = 1; + for (int i = 0; i < theta_size; i++) + v(i) = 1; double f_theta; Eigen::VectorXd third_diff_tensor; - third_diff_directional(f_, theta, eta, delta_, delta_int_, - f_theta, third_diff_tensor, - v, v, pstream_); + third_diff_directional(f_, theta, eta, delta_, delta_int_, f_theta, + third_diff_tensor, v, v, pstream_); return third_diff_tensor; } @@ -108,8 +104,8 @@ struct diff_likelihood { Eigen::VectorXd diff_eta_implicit(const Eigen::VectorXd& v, const Eigen::VectorXd& theta, const Eigen::VectorXd& eta) const { - using Eigen::Matrix; using Eigen::Dynamic; + using Eigen::Matrix; using Eigen::VectorXd; nested_rev_autodiff nested; @@ -126,21 +122,22 @@ struct diff_likelihood { // CHECK -- After merging develop branch, needed to do this. Matrix, Dynamic, 1> eta_fvar(eta_size); - for (int i = 0; i < eta_size; i++) eta_fvar(i) = fvar(eta_var(i), 0); + for (int i = 0; i < eta_size; i++) + eta_fvar(i) = fvar(eta_var(i), 0); fvar f_fvar = f_(theta_fvar, eta_fvar, delta_, delta_int_, pstream_); grad(f_fvar.d_.vi_); VectorXd diff_eta(eta_size); - for (int i = 0; i < eta_size; i++) diff_eta(i) = eta_var(i).adj(); + for (int i = 0; i < eta_size; i++) + diff_eta(i) = eta_var(i).adj(); return diff_eta; } - template - Eigen::Matrix, Eigen::Dynamic, 1> - diff_eta(const Eigen::Matrix& theta, - const Eigen::Matrix& eta) const { + Eigen::Matrix, Eigen::Dynamic, 1> diff_eta( + const Eigen::Matrix& theta, + const Eigen::Matrix& eta) const { std::cout << "THIS FUNCTION SHOULD NEVER GET CALLED!" << std::endl; Eigen::Matrix, Eigen::Dynamic, 1> void_matrix; return void_matrix; @@ -151,19 +148,18 @@ struct diff_likelihood { diff_theta_eta(const Eigen::Matrix& theta, const Eigen::Matrix& eta) const { std::cout << "THIS FUNCTION SHOULD NEVER GET CALLED!" << std::endl; - Eigen::Matrix, Eigen::Dynamic, - Eigen::Dynamic> void_matrix; + Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> + void_matrix; return void_matrix; } template Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> diff2_theta_eta(const Eigen::Matrix& theta, - const Eigen::Matrix& eta) - const { + const Eigen::Matrix& eta) const { std::cout << "THIS FUNCTION SHOULD NEVER GET CALLED!" << std::endl; - Eigen::Matrix, Eigen::Dynamic, - Eigen::Dynamic> void_matrix; + Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> + void_matrix; return void_matrix; } }; diff --git a/stan/math/laplace/laplace_likelihood_neg_binomial_2_log.hpp b/stan/math/laplace/laplace_likelihood_neg_binomial_2_log.hpp index 433282f2dac..c0a1cb697ef 100644 --- a/stan/math/laplace/laplace_likelihood_neg_binomial_2_log.hpp +++ b/stan/math/laplace/laplace_likelihood_neg_binomial_2_log.hpp @@ -19,9 +19,8 @@ struct diff_neg_binomial_2_log { int n_theta_; diff_neg_binomial_2_log(const Eigen::VectorXd& y, - const std::vector& y_index, - int n_theta) - : y_(y), y_index_(y_index), n_theta_(n_theta) { + const std::vector& y_index, int n_theta) + : y_(y), y_index_(y_index), n_theta_(n_theta) { sums_ = Eigen::VectorXd::Zero(n_theta); n_samples_ = Eigen::VectorXd::Zero(n_theta); @@ -32,9 +31,9 @@ struct diff_neg_binomial_2_log { } template - return_type_t - log_likelihood (const Eigen::Matrix& theta, - const Eigen::Matrix& eta) const { + return_type_t log_likelihood( + const Eigen::Matrix& theta, + const Eigen::Matrix& eta) const { T_eta eta_scalar = eta(0); return_type_t logp = 0; for (size_t i = 0; i < y_.size(); i++) { @@ -43,79 +42,82 @@ struct diff_neg_binomial_2_log { // CHECK -- is it better to vectorize this loop? Eigen::Matrix exp_theta = exp(theta); for (int i = 0; i < n_theta_; i++) { - return_type_t - log_eta_plus_exp_theta = log(eta_scalar + exp_theta(i)); + return_type_t log_eta_plus_exp_theta + = log(eta_scalar + exp_theta(i)); logp += sums_(i) * (theta(i) - log_eta_plus_exp_theta) - + n_samples_(i) * eta_scalar - * (log(eta_scalar) - log_eta_plus_exp_theta); + + n_samples_(i) * eta_scalar + * (log(eta_scalar) - log_eta_plus_exp_theta); } return logp; } template - void diff (const Eigen::Matrix& theta, - const Eigen::Matrix& eta, - Eigen::Matrix, - Eigen::Dynamic, 1>& gradient, - Eigen::Matrix, - Eigen::Dynamic, 1>& hessian) const { + void diff( + const Eigen::Matrix& theta, + const Eigen::Matrix& eta, + Eigen::Matrix, Eigen::Dynamic, 1>& gradient, + Eigen::Matrix, Eigen::Dynamic, 1>& hessian, + int hessian_block_size = 1) + const { typedef return_type_t scalar; Eigen::VectorXd one = rep_vector(1, theta.size()); T_eta eta_scalar = eta(0); - Eigen::Matrix - sums_plus_n_eta = sums_ + eta_scalar * n_samples_; + Eigen::Matrix sums_plus_n_eta + = sums_ + eta_scalar * n_samples_; Eigen::Matrix exp_neg_theta = exp(-theta); - Eigen::Matrix - one_plus_exp = one + eta_scalar * exp_neg_theta; + Eigen::Matrix one_plus_exp + = one + eta_scalar * exp_neg_theta; gradient = sums_ - elt_divide(sums_plus_n_eta, one_plus_exp); - hessian = - eta_scalar * sums_plus_n_eta. - cwiseProduct(elt_divide(exp_neg_theta, square(one_plus_exp))); + hessian = -eta_scalar + * sums_plus_n_eta.cwiseProduct( + elt_divide(exp_neg_theta, square(one_plus_exp))); } template - Eigen::Matrix, Eigen::Dynamic, 1> - third_diff(const Eigen::Matrix& theta, - const Eigen::Matrix& eta) const { + Eigen::Matrix, Eigen::Dynamic, 1> third_diff( + const Eigen::Matrix& theta, + const Eigen::Matrix& eta) const { typedef return_type_t scalar; Eigen::Matrix exp_theta = exp(theta); T_eta eta_scalar = eta(0); - Eigen::Matrix - eta_vec = rep_vector(eta_scalar, theta.size()); - Eigen::Matrix - eta_plus_exp_theta = eta_vec + exp_theta; - - return - ((sums_ + eta_scalar * n_samples_) * eta_scalar). - cwiseProduct(exp_theta.cwiseProduct( - elt_divide(eta_vec - exp_theta, - square(eta_plus_exp_theta).cwiseProduct(eta_plus_exp_theta)))); + Eigen::Matrix eta_vec + = rep_vector(eta_scalar, theta.size()); + Eigen::Matrix eta_plus_exp_theta + = eta_vec + exp_theta; + + return -((sums_ + eta_scalar * n_samples_) * eta_scalar) + .cwiseProduct(exp_theta.cwiseProduct( + elt_divide(eta_vec - exp_theta, + square(eta_plus_exp_theta) + .cwiseProduct(eta_plus_exp_theta)))); } template - Eigen::Matrix, Eigen::Dynamic, 1> - diff_eta(const Eigen::Matrix& theta, - const Eigen::Matrix& eta) const { + Eigen::Matrix, Eigen::Dynamic, 1> diff_eta( + const Eigen::Matrix& theta, + const Eigen::Matrix& eta) const { typedef return_type_t scalar; T_eta eta_scalar = eta(0); - Eigen::Matrix - y_plus_eta = y_ + rep_vector(eta_scalar, y_.size()); + Eigen::Matrix y_plus_eta + = y_ + rep_vector(eta_scalar, y_.size()); Eigen::Matrix exp_theta = exp(theta); - Eigen::Matrix - exp_theta_plus_eta = exp_theta + rep_vector(eta_scalar, theta.size()); + Eigen::Matrix exp_theta_plus_eta + = exp_theta + rep_vector(eta_scalar, theta.size()); T_eta y_plus_eta_digamma_sum = 0; for (int i = 0; i < y_.size(); i++) y_plus_eta_digamma_sum += digamma(y_plus_eta(i)); - Eigen::Matrix gradient_eta(1); - gradient_eta(0) = - y_plus_eta_digamma_sum - y_.size() * digamma(eta_scalar) - - sum(elt_divide(sums_ + n_samples_ * eta_scalar, exp_theta_plus_eta)) - + sum(n_samples_ * log(eta_scalar) - - n_samples_.cwiseProduct(log(exp_theta_plus_eta)) - + n_samples_); - return gradient_eta; + Eigen::Matrix gradient_eta(1); + gradient_eta(0) + = y_plus_eta_digamma_sum - y_.size() * digamma(eta_scalar) + - sum(elt_divide(sums_ + n_samples_ * eta_scalar, exp_theta_plus_eta)) + + sum(n_samples_ * log(eta_scalar) + - n_samples_.cwiseProduct(log(exp_theta_plus_eta)) + + n_samples_); + return gradient_eta; } template @@ -125,11 +127,11 @@ struct diff_neg_binomial_2_log { typedef return_type_t scalar; T_eta eta_scalar = eta(0); Eigen::Matrix exp_neg_theta = exp(-theta); - Eigen::Matrix - diff_matrix(theta.size(), 1); - diff_matrix.col(0) - = - elt_divide(n_samples_ - sums_.cwiseProduct(exp_neg_theta), - square(eta_scalar * exp_neg_theta + rep_vector(1, theta.size()))); + Eigen::Matrix diff_matrix( + theta.size(), 1); + diff_matrix.col(0) = -elt_divide( + n_samples_ - sums_.cwiseProduct(exp_neg_theta), + square(eta_scalar * exp_neg_theta + rep_vector(1, theta.size()))); return diff_matrix; } @@ -138,22 +140,21 @@ struct diff_neg_binomial_2_log { template Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> diff2_theta_eta(const Eigen::Matrix& theta, - const Eigen::Matrix& eta) - const { + const Eigen::Matrix& eta) const { typedef return_type_t scalar; T_eta eta_scalar = eta(0); Eigen::Matrix exp_neg_theta = exp(-theta); Eigen::Matrix one_plus_eta_exp - = rep_vector(1, theta.size()) + eta_scalar * exp_neg_theta; + = rep_vector(1, theta.size()) + eta_scalar * exp_neg_theta; - Eigen::Matrix - diff_matrix(theta.size(), 1); + Eigen::Matrix diff_matrix( + theta.size(), 1); - diff_matrix.col(0) = - - elt_divide(exp_neg_theta.cwiseProduct( - - eta_scalar * exp_neg_theta.cwiseProduct(sums_) - + sums_ + 2 * eta_scalar * n_samples_), - square(one_plus_eta_exp).cwiseProduct(one_plus_eta_exp)); // ); + diff_matrix.col(0) = -elt_divide( + exp_neg_theta.cwiseProduct(-eta_scalar + * exp_neg_theta.cwiseProduct(sums_) + + sums_ + 2 * eta_scalar * n_samples_), + square(one_plus_eta_exp).cwiseProduct(one_plus_eta_exp)); // ); return diff_matrix; } diff --git a/stan/math/laplace/laplace_likelihood_poisson_log.hpp b/stan/math/laplace/laplace_likelihood_poisson_log.hpp index 9a3497db6c6..6556a5b0fbd 100644 --- a/stan/math/laplace/laplace_likelihood_poisson_log.hpp +++ b/stan/math/laplace/laplace_likelihood_poisson_log.hpp @@ -17,8 +17,8 @@ namespace math { * This structure can be passed to the the laplace_marginal function. * Uses sufficient statistics for the data. */ - // FIX ME -- cannot use the sufficient statistic to compute log density in - // because of log factorial term. +// FIX ME -- cannot use the sufficient statistic to compute log density in +// because of log factorial term. struct diff_poisson_log { /* The number of samples in each group. */ Eigen::VectorXd n_samples_; @@ -29,14 +29,14 @@ struct diff_poisson_log { diff_poisson_log(const Eigen::VectorXd& n_samples, const Eigen::VectorXd& sums) - : n_samples_(n_samples), sums_(sums) { + : n_samples_(n_samples), sums_(sums) { log_exposure_ = Eigen::VectorXd::Zero(sums.size()); } diff_poisson_log(const Eigen::VectorXd& n_samples, const Eigen::VectorXd& sums, const Eigen::VectorXd& log_exposure) - : n_samples_(n_samples), sums_(sums), log_exposure_(log_exposure) { } + : n_samples_(n_samples), sums_(sums), log_exposure_(log_exposure) {} /** * Return the log density. @@ -46,16 +46,16 @@ struct diff_poisson_log { * @return the log density. */ template - T1 log_likelihood (const Eigen::Matrix& theta, - const Eigen::Matrix& eta_dummy) - const { + T1 log_likelihood( + const Eigen::Matrix& theta, + const Eigen::Matrix& eta_dummy) const { double factorial_term = 0; for (int i = 0; i < sums_.size(); i++) factorial_term += lgamma(sums_(i) + 1); Eigen::Matrix shifted_mean = theta + log_exposure_; - return - factorial_term - + (shifted_mean).dot(sums_) - n_samples_.dot(exp(shifted_mean)); + return -factorial_term + (shifted_mean).dot(sums_) + - n_samples_.dot(exp(shifted_mean)); } /** @@ -71,23 +71,22 @@ struct diff_poisson_log { * @param[in, out] hessian diagonal, so stored in a vector. */ template - void diff (const Eigen::Matrix& theta, - const Eigen::Matrix& eta_dummy, - Eigen::Matrix& gradient, - // Eigen::Matrix& hessian, - Eigen::SparseMatrix& hessian, - int hessian_block_size = 1) - const { + void diff(const Eigen::Matrix& theta, + const Eigen::Matrix& eta_dummy, + Eigen::Matrix& gradient, + // Eigen::Matrix& hessian, + Eigen::SparseMatrix& hessian, + int hessian_block_size = 1) const { int theta_size = theta.size(); - Eigen::Matrix - common_term = n_samples_.cwiseProduct(exp(theta + log_exposure_)); + Eigen::Matrix common_term + = n_samples_.cwiseProduct(exp(theta + log_exposure_)); gradient = sums_ - common_term; hessian.resize(theta_size, theta_size); hessian.reserve(Eigen::VectorXi::Constant(theta_size, hessian_block_size)); // hessian.col(0) = - common_term; for (int i = 0; i < theta_size; i++) - hessian.insert(i, i) = - common_term(i); + hessian.insert(i, i) = -common_term(i); } /** @@ -100,9 +99,9 @@ struct diff_poisson_log { * derivative tensor. */ template - Eigen::Matrix - third_diff(const Eigen::Matrix& theta, - const Eigen::Matrix& eta_dummy) const { + Eigen::Matrix third_diff( + const Eigen::Matrix& theta, + const Eigen::Matrix& eta_dummy) const { return -n_samples_.cwiseProduct(exp(theta + log_exposure_)); } diff --git a/stan/math/laplace/laplace_marginal.hpp b/stan/math/laplace/laplace_marginal.hpp index 1e2cd905e09..28fedf671a3 100644 --- a/stan/math/laplace/laplace_marginal.hpp +++ b/stan/math/laplace/laplace_marginal.hpp @@ -1,6 +1,9 @@ #ifndef STAN_MATH_LAPLACE_LAPLACE_MARGINAL_HPP #define STAN_MATH_LAPLACE_LAPLACE_MARGINAL_HPP +#include +#include +#include #include #include #include @@ -8,8 +11,6 @@ #include #include #include -#include -#include #include #include @@ -26,449 +27,439 @@ // TODO -- either use Eigen's .solve() or mdivide_left_tri. // The code needs to be more consistent. - namespace stan { namespace math { - /** - * For a latent Gaussian model with hyperparameters phi and eta, - * latent variables theta, and observations y, this function computes - * an approximation of the log marginal density, p(y | phi). - * This is done by marginalizing out theta, using a Laplace - * approxmation. The latter is obtained by finding the mode, - * via Newton's method, and computing the Hessian of the likelihood. - * - * The convergence criterion for the Newton is a small change in - * log marginal density. The user controls the tolerance (i.e. - * threshold under which change is deemed small enough) and - * maximum number of steps. - * TO DO: add more robust convergence criterion. - * - * This algorithm is adapted from Rasmussen and Williams, - * "Gaussian Processes for Machine Learning", second edition, - * MIT Press 2006, algorithm 3.1. - * - * Variables needed for the gradient or generating quantities - * are stored by reference. - * - * @tparam D structure type for the likelihood object. - * @tparam K structure type for the covariance object. - * @tparam Tx type of x, which can in Stan be passed as a matrix or - * an array of vectors. - * @param[in] D structure to compute and differentiate the log likelihood. - * @param[in] K structure to compute the covariance function. - * @param[in] phi hyperparameter (input for the covariance function). - * @param[in] eta hyperparameter (input for likelihood). - * @param[in] x fixed spatial data (input for the covariance function). - * @param[in] delta additional fixed real data (input for covariance - * function). - * @param[in] delta_int additional fixed integer data (input for covariance - * function). - * @param[in, out] covariance the evaluated covariance function for the - * latent gaussian variable. - * @param[in, out] theta a vector to store the mode. - * @param[in, out] W_r a vector to store the square root of the - * negative Hessian or the negative Hessian, depending - * on which solver we use. - * @param[in, out] L cholesky decomposition of stabilized inverse covariance. - * @param[in, out] a element in the Newton step - * @param[in, out] l_grad the log density of the likelihood. - * @param[in] theta_0 the initial guess for the mode. - * @param[in] tolerance the convergence criterion for the Newton solver. - * @param[in] max_num_steps maximum number of steps for the Newton solver. - * @param[in] hessian_block_size the size of the block, where we assume - * the Hessian is block-diagonal. - * @param[in] solver which Newton solver to use: - * (1) method using the root of W. - * (2) method using the root of the covariance. - * (3) method using an LU decomposition. - * - * @return the log marginal density, p(y | phi). - */ - template - double - laplace_marginal_density (const D& diff_likelihood, - const K& covariance_function, - const Eigen::VectorXd& phi, - const Eigen::VectorXd& eta, - const Tx& x, - const std::vector& delta, - const std::vector& delta_int, - Eigen::MatrixXd& covariance, - Eigen::VectorXd& theta, - Eigen::SparseMatrix& W_r, - Eigen::MatrixXd& L, - Eigen::VectorXd& a, - Eigen::VectorXd& l_grad, - Eigen::PartialPivLU& LU, - Eigen::MatrixXd& K_root, - const Eigen::VectorXd& theta_0, - std::ostream* msgs = nullptr, - double tolerance = 1e-6, - long int max_num_steps = 100, - int hessian_block_size = 0, - int solver = 1, - int do_line_search = 0, - int max_steps_line_search = 10) { - using Eigen::MatrixXd; - using Eigen::VectorXd; - using Eigen::SparseMatrix; - - // TEST - int diagonal_covariance = 0; - // solver = 1; - // hessian_block_size = 1; +/** + * For a latent Gaussian model with hyperparameters phi and eta, + * latent variables theta, and observations y, this function computes + * an approximation of the log marginal density, p(y | phi). + * This is done by marginalizing out theta, using a Laplace + * approximation. The latter is obtained by finding the mode, + * via Newton's method, and computing the Hessian of the likelihood. + * + * The convergence criterion for the Newton is a small change in + * log marginal density. The user controls the tolerance (i.e. + * threshold under which change is deemed small enough) and + * maximum number of steps. + * TO DO: add more robust convergence criterion. + * + * This algorithm is adapted from Rasmussen and Williams, + * "Gaussian Processes for Machine Learning", second edition, + * MIT Press 2006, algorithm 3.1. + * + * Variables needed for the gradient or generating quantities + * are stored by reference. + * + * @tparam D structure type for the likelihood object. + * @tparam K structure type for the covariance object. + * @tparam Tx type of x, which can in Stan be passed as a matrix or + * an array of vectors. + * @param[in] D structure to compute and differentiate the log likelihood. + * @param[in] K structure to compute the covariance function. + * @param[in] phi hyperparameter (input for the covariance function). + * @param[in] eta hyperparameter (input for likelihood). + * @param[in] x fixed spatial data (input for the covariance function). + * @param[in] delta additional fixed real data (input for covariance + * function). + * @param[in] delta_int additional fixed integer data (input for covariance + * function). + * @param[in, out] covariance the evaluated covariance function for the + * latent gaussian variable. + * @param[in, out] theta a vector to store the mode. + * @param[in, out] W_r a vector to store the square root of the + * negative Hessian or the negative Hessian, depending + * on which solver we use. + * @param[in, out] L cholesky decomposition of stabilized inverse covariance. + * @param[in, out] a element in the Newton step + * @param[in, out] l_grad the log density of the likelihood. + * @param[in] theta_0 the initial guess for the mode. + * @param[in] tolerance the convergence criterion for the Newton solver. + * @param[in] max_num_steps maximum number of steps for the Newton solver. + * @param[in] hessian_block_size the size of the block, where we assume + * the Hessian is block-diagonal. + * @param[in] solver which Newton solver to use: + * (1) method using the root of W. + * (2) method using the root of the covariance. + * (3) method using an LU decomposition. + * + * @return the log marginal density, p(y | phi). + */ +template +double laplace_marginal_density( + const D& diff_likelihood, + const K& covariance_function, + const Eigen::VectorXd& phi, + const Eigen::VectorXd& eta, + const Tx& x, + const std::vector& delta, + const std::vector& delta_int, + Eigen::MatrixXd& covariance, + Eigen::VectorXd& theta, + Eigen::SparseMatrix& W_r, + Eigen::MatrixXd& L, + Eigen::VectorXd& a, + Eigen::VectorXd& l_grad, + Eigen::PartialPivLU& LU, + Eigen::MatrixXd& K_root, + const Eigen::VectorXd& theta_0, + std::ostream* msgs = nullptr, + double tolerance = 1e-6, + long int max_num_steps = 100, + int hessian_block_size = 0, int solver = 1, + int do_line_search = 0, int max_steps_line_search = 10) { + using Eigen::MatrixXd; + using Eigen::SparseMatrix; + using Eigen::VectorXd; + + // TEST + int diagonal_covariance = 0; + // solver = 1; + // hessian_block_size = 1; + + covariance = covariance_function(phi, x, delta, delta_int, msgs); + + if (diagonal_covariance) { + Eigen::MatrixXd K_dummy = covariance.diagonal().asDiagonal(); + covariance = K_dummy; + // covariance = covariance.diagonal().asDiagonal(); + // CHECK -- above line doesn't work, not sure why... + } - covariance = covariance_function(phi, x, delta, delta_int, msgs); + int theta_size = theta_0.size(); + theta = theta_0; + double objective_old = -1e+10; // CHECK -- what value to use? + double objective_inter = -1e+10; + double objective_new; + double B_log_determinant; + Eigen::VectorXd a_old; + Eigen::VectorXd a_new; + Eigen::VectorXd theta_new; + + if (hessian_block_size == 0 && solver != 1) { + std::ostringstream message; + message << "laplace_marginal_density: if treating the Hessian as diagonal" + << " we assume its matrix square-root can be computed." + << " If you don't want to compute the matrix square-root," + << " set hessian_block_size to 1."; + throw boost::math::evaluation_error(message.str()); + } - if (diagonal_covariance) { - Eigen::MatrixXd K_dummy = covariance.diagonal().asDiagonal(); - covariance = K_dummy; - // covariance = covariance.diagonal().asDiagonal(); - // CHECK -- above line doesn't work, not sure why... - } + int block_size + = (hessian_block_size == 0) ? hessian_block_size + 1 : hessian_block_size; - int theta_size = theta_0.size(); - theta = theta_0; - double objective_old = - 1e+10; // CHECK -- what value to use? - double objective_inter = - 1e+10; - double objective_new; - double B_log_determinant; - Eigen::VectorXd a_old; - Eigen::VectorXd a_new; - Eigen::VectorXd theta_new; - - if (hessian_block_size == 0 && solver != 1) { + for (int i = 0; i <= max_num_steps; i++) { + if (i == max_num_steps) { std::ostringstream message; - message << "laplace_marginal_density: if treating the Hessian as diagonal" - << " we assume its matrix square-root can be computed." - << " If you don't want to compute the matrix square-root," - << " set hessian_block_size to 1."; + message << "laplace_marginal_density: max number of iterations:" + << max_num_steps << " exceeded."; throw boost::math::evaluation_error(message.str()); } - int block_size = (hessian_block_size == 0) ? hessian_block_size + 1 - : hessian_block_size; - - for (int i = 0; i <= max_num_steps; i++) { - if (i == max_num_steps) { - std::ostringstream message; - message << "laplace_marginal_density: max number of iterations:" - << max_num_steps << " exceeded."; - throw boost::math::evaluation_error(message.str()); - } - - SparseMatrix W; - diff_likelihood.diff(theta, eta, l_grad, W, block_size); - W = - W; - - VectorXd b; - { - MatrixXd B; - if (solver == 1) { - if (hessian_block_size == 0) { - W_r = W.cwiseSqrt(); - B = MatrixXd::Identity(theta_size, theta_size) - + quad_form_diag(covariance, W_r.diagonal()); - } else { - W_r = block_matrix_sqrt(W, block_size); - B = MatrixXd::Identity(theta_size, theta_size) - + W_r * (covariance * W_r); - } - - L = cholesky_decompose(B); - B_log_determinant = 2 * sum(L.diagonal().array().log()); - - if (hessian_block_size == 0) { - b = W.diagonal().cwiseProduct(theta) + l_grad.head(theta_size); - a = b - W_r - * mdivide_left_tri(transpose(L), - mdivide_left_tri(L, - W_r.diagonal().cwiseProduct(covariance * b))); - } else { - b = W * theta + l_grad.head(theta_size); - a = b - W_r - * mdivide_left_tri(transpose(L), - mdivide_left_tri(L, - W_r * (covariance * b))); - } - } else if (solver == 2) { - // TODO -- use triangularView for K_root. - W_r = W; - - if (diagonal_covariance) { - K_root = covariance.cwiseSqrt(); - } else { - K_root = cholesky_decompose(covariance); - } - - std::cout << "Cov: " << covariance.row(0).head(5) << std::endl; - std::cout << "K: " << K_root.row(0).head(5) << std::endl; + SparseMatrix W; + diff_likelihood.diff(theta, eta, l_grad, W, block_size); + W = -W; + VectorXd b; + { + MatrixXd B; + if (solver == 1) { + if (hessian_block_size == 0) { + W_r = W.cwiseSqrt(); B = MatrixXd::Identity(theta_size, theta_size) - + K_root.transpose() * W * K_root; - L = cholesky_decompose(B); - B_log_determinant = 2 * sum(L.diagonal().array().log()); - b = W * theta + l_grad.head(theta_size); - a = mdivide_left_tri(K_root.transpose(), - mdivide_left_tri(L.transpose(), - mdivide_left_tri(L, K_root.transpose() * b))); + + quad_form_diag(covariance, W_r.diagonal()); } else { - W_r = W; - B = MatrixXd::Identity(theta_size, theta_size) + covariance * W; - LU = Eigen::PartialPivLU(B); - - // TODO: compute log determinant directly. - B_log_determinant = log(LU.determinant()); + W_r = block_matrix_sqrt(W, block_size); + B = MatrixXd::Identity(theta_size, theta_size) + + W_r * (covariance * W_r); + } + L = cholesky_decompose(B); + B_log_determinant = 2 * sum(L.diagonal().array().log()); + + if (hessian_block_size == 0) { + b = W.diagonal().cwiseProduct(theta) + l_grad.head(theta_size); + a = b + - W_r + * mdivide_left_tri( + transpose(L), + mdivide_left_tri( + L, W_r.diagonal().cwiseProduct(covariance * b))); + } else { b = W * theta + l_grad.head(theta_size); - a = b - W * LU.solve(covariance * b); + a = b + - W_r + * mdivide_left_tri( + transpose(L), mdivide_left_tri( + L, W_r * (covariance * b))); } - } + } else if (solver == 2) { + // TODO -- use triangularView for K_root. + W_r = W; - // Simple Newton step - theta = covariance * a; + if (diagonal_covariance) { + K_root = covariance.cwiseSqrt(); + } else { + K_root = cholesky_decompose(covariance); + } + + std::cout << "Cov: " << covariance.row(0).head(5) << std::endl; + std::cout << "K: " << K_root.row(0).head(5) << std::endl; + + B = MatrixXd::Identity(theta_size, theta_size) + + K_root.transpose() * W * K_root; + L = cholesky_decompose(B); + B_log_determinant = 2 * sum(L.diagonal().array().log()); + b = W * theta + l_grad.head(theta_size); + a = mdivide_left_tri( + K_root.transpose(), + mdivide_left_tri( + L.transpose(), + mdivide_left_tri(L, K_root.transpose() * b))); + } else { + W_r = W; + B = MatrixXd::Identity(theta_size, theta_size) + covariance * W; + LU = Eigen::PartialPivLU(B); - if (i != 0) objective_old = objective_new; + // TODO: compute log determinant directly. + B_log_determinant = log(LU.determinant()); - if (std::isfinite(theta.sum())) { - objective_new = -0.5 * a.dot(theta) - + diff_likelihood.log_likelihood(theta, eta); + b = W * theta + l_grad.head(theta_size); + a = b - W * LU.solve(covariance * b); } + } - // linesearch - // CHECK -- does linesearch work for solver 2? - int j = 0; - if (do_line_search && i != 0) { - while (j < max_steps_line_search - && (objective_new < objective_old || !std::isfinite(theta.sum()))) { - a = (a + a_old) * 0.5; // TODO -- generalize for any factor. - theta = covariance * a; + // Simple Newton step + theta = covariance * a; - if (std::isfinite(theta.sum())) { - objective_new = - 0.5 * a.dot(theta) - + diff_likelihood.log_likelihood(theta, eta); - } + if (i != 0) + objective_old = objective_new; - j++; - } - } - - a_old = a; + if (std::isfinite(theta.sum())) { + objective_new + = -0.5 * a.dot(theta) + diff_likelihood.log_likelihood(theta, eta); + } - // Check for convergence. - double objective_diff = abs(objective_new - objective_old); + // linesearch + // CHECK -- does linesearch work for solver 2? + int j = 0; + if (do_line_search && i != 0) { + while ( + j < max_steps_line_search + && (objective_new < objective_old || !std::isfinite(theta.sum()))) { + a = (a + a_old) * 0.5; // TODO -- generalize for any factor. + theta = covariance * a; - // if (i % 500 == 0) std::cout << "obj: " << objective_new << std::endl; - // if (objective_diff < tolerance) std::cout << "iter: " << i << std::endl; + if (std::isfinite(theta.sum())) { + objective_new = -0.5 * a.dot(theta) + + diff_likelihood.log_likelihood(theta, eta); + } - if (objective_diff < tolerance) break; + j++; + } } - return objective_new - 0.5 * B_log_determinant; - } + a_old = a; + + // Check for convergence. + double objective_diff = abs(objective_new - objective_old); + + // if (i % 500 == 0) std::cout << "obj: " << objective_new << std::endl; + // if (objective_diff < tolerance) std::cout << "iter: " << i << std::endl; - /** - * For a latent Gaussian model with global parameters phi, latent - * variables theta, and observations y, this function computes - * an approximation of the log marginal density, p(y | phi). - * This is done by marginalizing out theta, using a Laplace - * approxmation. The latter is obtained by finding the mode, - * using a custom Newton method, and the Hessian of the likelihood. - * - * The convergence criterion for the Newton is a small change in - * log marginal density. The user controls the tolerance (i.e. - * threshold under which change is deemed small enough) and - * maximum number of steps. - * - * Wrapper for when the hyperparameters passed as a double. - * - * @tparam T type of the initial guess. - * @tparam D structure type for the likelihood object. - * @tparam K structure type for the covariance object. - * @tparam Tx type of spatial data for covariance: in Stan, this can - * either be a matrix or an array of vectors. - * @param[in] D structure to compute and differentiate the log likelihood. - * The object stores the sufficient stats for the observations. - * @param[in] K structure to compute the covariance function. - * @param[in] phi the global parameter (input for the covariance function). - * @param[in] x data for the covariance function. - * @param[in] delta additional fixed real data (input for covariance - * function). - * @param[in] delta_int additional fixed integer data (input for covariance - * function). - * @param[in] theta_0 the initial guess for the mode. - * @param[in] tolerance the convergence criterion for the Newton solver. - * @param[in] max_num_steps maximum number of steps for the Newton solver. - * @return the log maginal density, p(y | phi). - */ - // TODO: Operands and partials version of this. - template - double - laplace_marginal_density (const D& diff_likelihood, - const K& covariance_function, - const Eigen::VectorXd& phi, - const Eigen::VectorXd& eta, - const Tx& x, - const std::vector& delta, - const std::vector& delta_int, - const Eigen::Matrix& theta_0, - std::ostream* msgs = nullptr, - double tolerance = 1e-6, - long int max_num_steps = 100, - int hessian_block_size = 0, - int solver = 1, - int do_line_search = 0, - int max_steps_line_search = 10) { - Eigen::VectorXd theta, a, l_grad; - Eigen::MatrixXd L, covariance, K_root; - Eigen::SparseMatrix W_r; - Eigen::PartialPivLU LU; - return laplace_marginal_density(diff_likelihood, covariance_function, - phi, eta, x, delta, delta_int, - covariance, - theta, W_r, L, a, l_grad, LU, K_root, - value_of(theta_0), msgs, - tolerance, max_num_steps, - hessian_block_size, - solver, do_line_search, - max_steps_line_search); + if (objective_diff < tolerance) + break; } - /** - * The vari class for the laplace marginal density. - * The method is adapted from algorithm 5.1 in Rasmussen & Williams, - * "Gaussian Processes for Machine Learning" - * with modifications described in my (Charles Margossian) - * thesis proposal. - * - * To make computation efficient, variables produced during the - * Newton step are stored and reused. To avoid storing these variables - * for too long, the sensitivies are computed in the constructor, and - * stored for the chain method. Hence, we store a single small vector, - * instead of multiple large matrices. - */ - struct laplace_marginal_density_vari : public vari { - /* dimension of hyperparameters. */ - int phi_size_; - /* hyperparameters for covariance K. */ - vari** phi_; - /* dimension of hyperparameters for likelihood. */ - int eta_size_; - /* hyperparameters for likelihood. */ - vari** eta_; - /* the marginal density of the observation, conditional on the - * globl parameters. */ - vari** marginal_density_; - /* An object to store the sensitivities of phi. */ - Eigen::VectorXd phi_adj_; - /* An object to store the sensitivities of eta. */ - Eigen::VectorXd eta_adj_; - - template - laplace_marginal_density_vari - (const D& diff_likelihood, - const K& covariance_function, - const Eigen::Matrix& phi, - const Eigen::Matrix& eta, - const Tx& x, - const std::vector& delta, - const std::vector& delta_int, - double marginal_density, - const Eigen::MatrixXd& covariance, - const Eigen::VectorXd& theta, - const Eigen::SparseMatrix& W_r, - const Eigen::MatrixXd& L, - const Eigen::VectorXd& a, - const Eigen::VectorXd& l_grad, - const Eigen::PartialPivLU LU, - const Eigen::MatrixXd& K_root, - std::ostream* msgs = nullptr, - int hessian_block_size = 0, - int solver = 1) + return objective_new - 0.5 * B_log_determinant; +} + +/** + * For a latent Gaussian model with global parameters phi, latent + * variables theta, and observations y, this function computes + * an approximation of the log marginal density, p(y | phi). + * This is done by marginalizing out theta, using a Laplace + * approxmation. The latter is obtained by finding the mode, + * using a custom Newton method, and the Hessian of the likelihood. + * + * The convergence criterion for the Newton is a small change in + * log marginal density. The user controls the tolerance (i.e. + * threshold under which change is deemed small enough) and + * maximum number of steps. + * + * Wrapper for when the hyperparameters passed as a double. + * + * @tparam T type of the initial guess. + * @tparam D structure type for the likelihood object. + * @tparam K structure type for the covariance object. + * @tparam Tx type of spatial data for covariance: in Stan, this can + * either be a matrix or an array of vectors. + * @param[in] D structure to compute and differentiate the log likelihood. + * The object stores the sufficient stats for the observations. + * @param[in] K structure to compute the covariance function. + * @param[in] phi the global parameter (input for the covariance function). + * @param[in] x data for the covariance function. + * @param[in] delta additional fixed real data (input for covariance + * function). + * @param[in] delta_int additional fixed integer data (input for covariance + * function). + * @param[in] theta_0 the initial guess for the mode. + * @param[in] tolerance the convergence criterion for the Newton solver. + * @param[in] max_num_steps maximum number of steps for the Newton solver. + * @return the log maginal density, p(y | phi). + */ +// TODO: Operands and partials version of this. +template +double laplace_marginal_density( + const D& diff_likelihood, const K& covariance_function, + const Eigen::VectorXd& phi, const Eigen::VectorXd& eta, const Tx& x, + const std::vector& delta, const std::vector& delta_int, + const Eigen::Matrix& theta_0, + std::ostream* msgs = nullptr, double tolerance = 1e-6, + long int max_num_steps = 100, int hessian_block_size = 0, int solver = 1, + int do_line_search = 0, int max_steps_line_search = 10) { + Eigen::VectorXd theta, a, l_grad; + Eigen::MatrixXd L, covariance, K_root; + Eigen::SparseMatrix W_r; + Eigen::PartialPivLU LU; + return laplace_marginal_density( + diff_likelihood, covariance_function, phi, eta, x, delta, delta_int, + covariance, theta, W_r, L, a, l_grad, LU, K_root, value_of(theta_0), msgs, + tolerance, max_num_steps, hessian_block_size, solver, do_line_search, + max_steps_line_search); +} + +/** + * The vari class for the laplace marginal density. + * The method is adapted from algorithm 5.1 in Rasmussen & Williams, + * "Gaussian Processes for Machine Learning" + * with modifications described in my (Charles Margossian) + * thesis proposal. + * + * To make computation efficient, variables produced during the + * Newton step are stored and reused. To avoid storing these variables + * for too long, the sensitivies are computed in the constructor, and + * stored for the chain method. Hence, we store a single small vector, + * instead of multiple large matrices. + */ +struct laplace_marginal_density_vari : public vari { + /* dimension of hyperparameters. */ + int phi_size_; + /* hyperparameters for covariance K. */ + vari** phi_; + /* dimension of hyperparameters for likelihood. */ + int eta_size_; + /* hyperparameters for likelihood. */ + vari** eta_; + /* the marginal density of the observation, conditional on the + * globl parameters. */ + vari** marginal_density_; + /* An object to store the sensitivities of phi. */ + Eigen::VectorXd phi_adj_; + /* An object to store the sensitivities of eta. */ + Eigen::VectorXd eta_adj_; + + template + laplace_marginal_density_vari( + const D& diff_likelihood, const K& covariance_function, + const Eigen::Matrix& phi, + const Eigen::Matrix& eta, const Tx& x, + const std::vector& delta, const std::vector& delta_int, + double marginal_density, const Eigen::MatrixXd& covariance, + const Eigen::VectorXd& theta, const Eigen::SparseMatrix& W_r, + const Eigen::MatrixXd& L, const Eigen::VectorXd& a, + const Eigen::VectorXd& l_grad, + const Eigen::PartialPivLU LU, + const Eigen::MatrixXd& K_root, std::ostream* msgs = nullptr, + int hessian_block_size = 0, int solver = 1) : vari(marginal_density), phi_size_(phi.size()), phi_(ChainableStack::instance_->memalloc_.alloc_array( - phi.size())), + phi.size())), eta_size_(eta.size()), eta_(ChainableStack::instance_->memalloc_.alloc_array( - eta.size())), + eta.size())), marginal_density_( - ChainableStack::instance_->memalloc_.alloc_array(1)) { - using Eigen::Matrix; - using Eigen::Dynamic; - using Eigen::MatrixXd; - using Eigen::VectorXd; - using Eigen::SparseMatrix; - - int theta_size = theta.size(); - for (int i = 0; i < phi_size_; i++) phi_[i] = phi(i).vi_; - for (int i = 0; i < eta_size_; i++) eta_[i] = eta(i).vi_; - - // CHECK -- is there a cleaner way of doing this? - marginal_density_[0] = this; - marginal_density_[0] = new vari(marginal_density, false); - - MatrixXd R; - Eigen::MatrixXd LU_solve_covariance; - Eigen::VectorXd eta_dbl = value_of(eta); - Eigen::VectorXd partial_parm; - Eigen::VectorXd s2; - - if (solver == 1) { - MatrixXd W_root_diag = W_r; - R = W_r * L.transpose().triangularView() - .solve(L.triangularView() - .solve(W_root_diag)); - - Eigen::MatrixXd C = mdivide_left_tri(L, W_r * covariance); - if (hessian_block_size == 0 && eta_size_ == 0) { - s2 = 0.5 * (covariance.diagonal() - - (C.transpose() * C).diagonal()) - .cwiseProduct(diff_likelihood.third_diff(theta, eta_dbl)); - } else { - int block_size = (hessian_block_size == 0) ? hessian_block_size + 1 - : hessian_block_size; - Eigen::MatrixXd A = covariance - C.transpose() * C; - partial_parm - = diff_likelihood.compute_s2(theta, eta_dbl, A, block_size); - s2 = partial_parm.head(theta_size); - } - } else if (solver == 2) { - // TODO -- use triangularView for K_root. - R = W_r - W_r * K_root * L.transpose().triangularView() - .solve(L.triangularView() - .solve(K_root.transpose() * W_r)); - - Eigen::MatrixXd C = L.triangularView() - .solve(K_root.transpose()); - Eigen::MatrixXd A = C.transpose() * C; - partial_parm - = diff_likelihood.compute_s2(theta, eta_dbl, A, hessian_block_size); - s2 = partial_parm.head(theta_size); - } else { // solver with LU decomposition - LU_solve_covariance = LU.solve(covariance); - R = W_r - W_r * LU_solve_covariance * W_r; + ChainableStack::instance_->memalloc_.alloc_array(1)) { + using Eigen::Dynamic; + using Eigen::Matrix; + using Eigen::MatrixXd; + using Eigen::SparseMatrix; + using Eigen::VectorXd; - Eigen::MatrixXd A = covariance - covariance * W_r * LU_solve_covariance; - // Eigen::MatrixXd A = covariance - covariance * R * covariance; + int theta_size = theta.size(); + for (int i = 0; i < phi_size_; i++) + phi_[i] = phi(i).vi_; + for (int i = 0; i < eta_size_; i++) + eta_[i] = eta(i).vi_; + + // CHECK -- is there a cleaner way of doing this? + marginal_density_[0] = this; + marginal_density_[0] = new vari(marginal_density, false); + + MatrixXd R; + Eigen::MatrixXd LU_solve_covariance; + Eigen::VectorXd eta_dbl = value_of(eta); + Eigen::VectorXd partial_parm; + Eigen::VectorXd s2; + + if (solver == 1) { + MatrixXd W_root_diag = W_r; + R = W_r + * L.transpose().triangularView().solve( + L.triangularView().solve(W_root_diag)); + + Eigen::MatrixXd C = mdivide_left_tri(L, W_r * covariance); + if (hessian_block_size == 0 && eta_size_ == 0) { + s2 = 0.5 + * (covariance.diagonal() - (C.transpose() * C).diagonal()) + .cwiseProduct(diff_likelihood.third_diff(theta, eta_dbl)); + } else { + int block_size = (hessian_block_size == 0) ? hessian_block_size + 1 + : hessian_block_size; + Eigen::MatrixXd A = covariance - C.transpose() * C; partial_parm - = diff_likelihood.compute_s2(theta, eta_dbl, A, hessian_block_size); + = diff_likelihood.compute_s2(theta, eta_dbl, A, block_size); s2 = partial_parm.head(theta_size); } + } else if (solver == 2) { + // TODO -- use triangularView for K_root. + R = W_r + - W_r * K_root + * L.transpose().triangularView().solve( + L.triangularView().solve(K_root.transpose() + * W_r)); + + Eigen::MatrixXd C + = L.triangularView().solve(K_root.transpose()); + Eigen::MatrixXd A = C.transpose() * C; + partial_parm + = diff_likelihood.compute_s2(theta, eta_dbl, A, hessian_block_size); + s2 = partial_parm.head(theta_size); + } else { // solver with LU decomposition + LU_solve_covariance = LU.solve(covariance); + R = W_r - W_r * LU_solve_covariance * W_r; + + Eigen::MatrixXd A = covariance - covariance * W_r * LU_solve_covariance; + // Eigen::MatrixXd A = covariance - covariance * R * covariance; + partial_parm + = diff_likelihood.compute_s2(theta, eta_dbl, A, hessian_block_size); + s2 = partial_parm.head(theta_size); + } - phi_adj_ = Eigen::VectorXd(phi_size_); - start_nested(); - try { - Matrix phi_v = value_of(phi); - Matrix - K_var = covariance_function(phi_v, x, delta, delta_int, msgs); - Eigen::VectorXd l_grad_theta = l_grad.head(theta_size); - var Z = laplace_pseudo_target(K_var, a, R, l_grad_theta, s2); + phi_adj_ = Eigen::VectorXd(phi_size_); + start_nested(); + try { + Matrix phi_v = value_of(phi); + Matrix K_var + = covariance_function(phi_v, x, delta, delta_int, msgs); + Eigen::VectorXd l_grad_theta = l_grad.head(theta_size); + var Z = laplace_pseudo_target(K_var, a, R, l_grad_theta, s2); - set_zero_all_adjoints_nested(); - grad(Z.vi_); + set_zero_all_adjoints_nested(); + grad(Z.vi_); - for (int j = 0; j < phi_size_; j++) phi_adj_[j] = phi_v(j).adj(); + for (int j = 0; j < phi_size_; j++) + phi_adj_[j] = phi_v(j).adj(); } catch (const std::exception& e) { recover_memory_nested(); @@ -491,104 +482,85 @@ namespace math { } eta_adj_ = l_grad.tail(eta_size_) + partial_parm.tail(eta_size_) - + diff_likelihood.diff_eta_implicit(v, theta, eta_dbl); + + diff_likelihood.diff_eta_implicit(v, theta, eta_dbl); } } - void chain() { - for (int j = 0; j < phi_size_; j++) - phi_[j]->adj_ += marginal_density_[0]->adj_ * phi_adj_[j]; + void chain() { + for (int j = 0; j < phi_size_; j++) + phi_[j]->adj_ += marginal_density_[0]->adj_ * phi_adj_[j]; - for (int l = 0; l < eta_size_; l++) - eta_[l]->adj_ += marginal_density_[0]->adj_ * eta_adj_[l]; - } - }; - - /** - * For a latent Gaussian model with global parameters phi, latent - * variables theta, and observations y, this function computes - * an approximation of the log marginal density, p(y | phi). - * This is done by marginalizing out theta, using a Laplace - * approxmation. The latter is obtained by finding the mode, - * using a custom Newton method, and the Hessian of the likelihood. - * - * The convergence criterion for the Newton is a small change in - * the log marginal density. The user controls the tolerance (i.e. - * threshold under which change is deemed small enough) and - * maximum number of steps. - * - * Wrapper for when the global parameter is passed as a double. - * - * @tparam T0 type of the initial guess. - * @tparam T1 type of the global parameters. - * @tparam D structure type for the likelihood object. - * @tparam K structure type for the covariance object. - *@tparam Tx type for the spatial data passed to the covariance. - * @param[in] D structure to compute and differentiate the log likelihood. - * The object stores the sufficient stats for the observations. - * @param[in] K structure to compute the covariance function. - * @param[in] phi the global parameter (input for the covariance function). - * @param[in] x data for the covariance function. - * @param[in] delta addition real data for covariance function. - * @param[in] delta_int additional interger data for covariance function. - * @param[in] theta_0 the initial guess for the mode. - * @param[in] tolerance the convergence criterion for the Newton solver. - * @param[in] max_num_steps maximum number of steps for the Newton solver. - * @return the log maginal density, p(y | phi). - */ - template - T1 laplace_marginal_density - (const D& diff_likelihood, - const K& covariance_function, - const Eigen::Matrix& phi, - const Eigen::Matrix& eta, - const Tx& x, - const std::vector& delta, - const std::vector& delta_int, - const Eigen::Matrix& theta_0, - std::ostream* msgs = nullptr, - double tolerance = 1e-6, - long int max_num_steps = 100, - int hessian_block_size = 0, - int solver = 1, - int do_line_search = 0, - int max_steps_line_search = 10) { - Eigen::VectorXd theta, a, l_grad; - Eigen::SparseMatrix W_root; - Eigen::MatrixXd L, K_root; - double marginal_density_dbl; - Eigen::MatrixXd covariance; - Eigen::PartialPivLU LU; - - marginal_density_dbl - = laplace_marginal_density(diff_likelihood, - covariance_function, - value_of(phi), value_of(eta), - x, delta, delta_int, covariance, - theta, W_root, L, a, l_grad, LU, K_root, - value_of(theta_0), - msgs, - tolerance, max_num_steps, - hessian_block_size, - solver); - - // construct vari - laplace_marginal_density_vari* vi0 - = new laplace_marginal_density_vari(diff_likelihood, - covariance_function, - phi, eta, x, delta, delta_int, - marginal_density_dbl, - covariance, - theta, W_root, L, a, l_grad, LU, - K_root, - msgs, hessian_block_size, - solver); - - var marginal_density = var(vi0->marginal_density_[0]); - - return marginal_density; + for (int l = 0; l < eta_size_; l++) + eta_[l]->adj_ += marginal_density_[0]->adj_ * eta_adj_[l]; } +}; + +/** + * For a latent Gaussian model with global parameters phi, latent + * variables theta, and observations y, this function computes + * an approximation of the log marginal density, p(y | phi). + * This is done by marginalizing out theta, using a Laplace + * approxmation. The latter is obtained by finding the mode, + * using a custom Newton method, and the Hessian of the likelihood. + * + * The convergence criterion for the Newton is a small change in + * the log marginal density. The user controls the tolerance (i.e. + * threshold under which change is deemed small enough) and + * maximum number of steps. + * + * Wrapper for when the global parameter is passed as a double. + * + * @tparam T0 type of the initial guess. + * @tparam T1 type of the global parameters. + * @tparam D structure type for the likelihood object. + * @tparam K structure type for the covariance object. + *@tparam Tx type for the spatial data passed to the covariance. + * @param[in] D structure to compute and differentiate the log likelihood. + * The object stores the sufficient stats for the observations. + * @param[in] K structure to compute the covariance function. + * @param[in] phi the global parameter (input for the covariance function). + * @param[in] x data for the covariance function. + * @param[in] delta addition real data for covariance function. + * @param[in] delta_int additional interger data for covariance function. + * @param[in] theta_0 the initial guess for the mode. + * @param[in] tolerance the convergence criterion for the Newton solver. + * @param[in] max_num_steps maximum number of steps for the Newton solver. + * @return the log maginal density, p(y | phi). + */ +template +T1 laplace_marginal_density( + const D& diff_likelihood, const K& covariance_function, + const Eigen::Matrix& phi, + const Eigen::Matrix& eta, const Tx& x, + const std::vector& delta, const std::vector& delta_int, + const Eigen::Matrix& theta_0, + std::ostream* msgs = nullptr, double tolerance = 1e-6, + long int max_num_steps = 100, int hessian_block_size = 0, int solver = 1, + int do_line_search = 0, int max_steps_line_search = 10) { + Eigen::VectorXd theta, a, l_grad; + Eigen::SparseMatrix W_root; + Eigen::MatrixXd L, K_root; + double marginal_density_dbl; + Eigen::MatrixXd covariance; + Eigen::PartialPivLU LU; + + marginal_density_dbl = laplace_marginal_density( + diff_likelihood, covariance_function, value_of(phi), value_of(eta), x, + delta, delta_int, covariance, theta, W_root, L, a, l_grad, LU, K_root, + value_of(theta_0), msgs, tolerance, max_num_steps, hessian_block_size, + solver); + + // construct vari + laplace_marginal_density_vari* vi0 = new laplace_marginal_density_vari( + diff_likelihood, covariance_function, phi, eta, x, delta, delta_int, + marginal_density_dbl, covariance, theta, W_root, L, a, l_grad, LU, K_root, + msgs, hessian_block_size, solver); + + var marginal_density = var(vi0->marginal_density_[0]); + + return marginal_density; +} } // namespace math } // namespace stan diff --git a/stan/math/laplace/laplace_marginal_bernoulli_logit_lpmf.hpp b/stan/math/laplace/laplace_marginal_bernoulli_logit_lpmf.hpp index 01392cbc4cc..ee117b17ad6 100644 --- a/stan/math/laplace/laplace_marginal_bernoulli_logit_lpmf.hpp +++ b/stan/math/laplace/laplace_marginal_bernoulli_logit_lpmf.hpp @@ -6,76 +6,67 @@ namespace stan { namespace math { - // EXPERIMENT - // Use the squared exponential kernel, for the time defined - // in the laplace_likelihood folder. - // In the final version, the user will provide the covariance - // function. +// EXPERIMENT +// Use the squared exponential kernel, for the time defined +// in the laplace_likelihood folder. +// In the final version, the user will provide the covariance +// function. - /** - * Wrapper function around the laplace_marginal function for - * a logistic Bernoulli likelihood. Returns the marginal density - * p(y | phi) by marginalizing out the latent gaussian variable, - * with a Laplace approximation. See the laplace_marginal function - * for more details. - * - * @tparam T0 The type of the initial guess, theta_0. - * @tparam T1 The type for the global parameter, phi. - * @param[in] theta_0 the initial guess for the Laplace approximation. - * @param[in] phi model parameters for the covariance function. - * @param[in] x data for the covariance function. - * @param[in] n_samples number of samples per group. First sufficient - * statistics. - * @param[in] y total counts per group. Second sufficient statistics. - * @param[in] tolerance controls the convergence criterion when finding - * the mode in the Laplace approximation. - * @param[in] max_num_steps maximum number of steps before the Newton solver - * breaks and returns an error. - */ - template - T1 laplace_marginal_bernoulli_logit_lpmf - (const std::vector& y, - const std::vector& n_samples, - const K& covariance_function, - const Eigen::Matrix& phi, - const std::vector& x, - const std::vector& delta, - const std::vector& delta_int, - const Eigen::Matrix& theta_0, - std::ostream* msgs = nullptr, - double tolerance = 1e-6, - long int max_num_steps = 100) { - // TODO: change this to a VectorXd once we have operands & partials. - Eigen::Matrix eta_dummy(0); - return laplace_marginal_density( +/** + * Wrapper function around the laplace_marginal function for + * a logistic Bernoulli likelihood. Returns the marginal density + * p(y | phi) by marginalizing out the latent gaussian variable, + * with a Laplace approximation. See the laplace_marginal function + * for more details. + * + * @tparam T0 The type of the initial guess, theta_0. + * @tparam T1 The type for the global parameter, phi. + * @param[in] theta_0 the initial guess for the Laplace approximation. + * @param[in] phi model parameters for the covariance function. + * @param[in] x data for the covariance function. + * @param[in] n_samples number of samples per group. First sufficient + * statistics. + * @param[in] y total counts per group. Second sufficient statistics. + * @param[in] tolerance controls the convergence criterion when finding + * the mode in the Laplace approximation. + * @param[in] max_num_steps maximum number of steps before the Newton solver + * breaks and returns an error. + */ +template +T1 laplace_marginal_bernoulli_logit_lpmf( + const std::vector& y, const std::vector& n_samples, + const K& covariance_function, + const Eigen::Matrix& phi, + const std::vector& x, const std::vector& delta, + const std::vector& delta_int, + const Eigen::Matrix& theta_0, + std::ostream* msgs = nullptr, double tolerance = 1e-6, + long int max_num_steps = 100) { + // TODO: change this to a VectorXd once we have operands & partials. + Eigen::Matrix eta_dummy(0); + return laplace_marginal_density( diff_bernoulli_logit(to_vector(n_samples), to_vector(y)), - covariance_function, - phi, eta_dummy, x, delta, delta_int, - theta_0, msgs, tolerance, max_num_steps); - } + covariance_function, phi, eta_dummy, x, delta, delta_int, theta_0, msgs, + tolerance, max_num_steps); +} - // Add signature that takes x as a matrix instead of a vector. - template - T1 laplace_marginal_bernoulli_logit_lpmf - (const std::vector& y, - const std::vector& n_samples, - const K& covariance_function, - const Eigen::Matrix& phi, - const Eigen::MatrixXd& x, - const std::vector& delta, - const std::vector& delta_int, - const Eigen::Matrix& theta_0, - std::ostream* msgs = nullptr, - double tolerance = 1e-6, - long int max_num_steps = 100) { +// Add signature that takes x as a matrix instead of a vector. +template +T1 laplace_marginal_bernoulli_logit_lpmf( + const std::vector& y, const std::vector& n_samples, + const K& covariance_function, + const Eigen::Matrix& phi, const Eigen::MatrixXd& x, + const std::vector& delta, const std::vector& delta_int, + const Eigen::Matrix& theta_0, + std::ostream* msgs = nullptr, double tolerance = 1e-6, + long int max_num_steps = 100) { // TODO: change this to a VectorXd once we have operands & partials. Eigen::Matrix eta_dummy(0); return laplace_marginal_density( - diff_bernoulli_logit(to_vector(n_samples), to_vector(y)), - covariance_function, - phi, eta_dummy, x, delta, delta_int, - theta_0, msgs, tolerance, max_num_steps); - } + diff_bernoulli_logit(to_vector(n_samples), to_vector(y)), + covariance_function, phi, eta_dummy, x, delta, delta_int, theta_0, msgs, + tolerance, max_num_steps); +} } // namespace math } // namespace stan diff --git a/stan/math/laplace/laplace_marginal_lpdf.hpp b/stan/math/laplace/laplace_marginal_lpdf.hpp index d52df6a33c5..d5699cb15fa 100644 --- a/stan/math/laplace/laplace_marginal_lpdf.hpp +++ b/stan/math/laplace/laplace_marginal_lpdf.hpp @@ -6,138 +6,116 @@ namespace stan { namespace math { - /** - * Wrapper function around the laplace_marginal function. - * Returns the marginal density p(y | phi) by marginalizing out - * the latent gaussian variable, with a Laplace approximation. - * See the laplace_marginal function for more details. - * The data y is assumed to be real. - * The function is "overloaded" below for the int y and lpmf case. - * - * @tparam T0 The type of the initial guess, theta_0. - * @tparam T1 The type for the global parameter, phi. - * @tparam T2 The type of the auxiliary parameter, eta. - * @tparam K The function which returns the prior covariance matrix. - * @tparam F The function which returns the log likelihood. - * @param[in] y fixed real data to be passed to the log likelihood. - * @param[in] L_f a function which returns the log likelihood. - * @param[in] eta non-marginalized parameters for the log likelihood. - * @param[in] delta_int_f integer data to be passed to the log likelihood. - * @param[in] K_f a function which returns the prior - * covariance for the marginalized out latent Gaussian. - * @param[in] phi model parameters for the covariance function. - * @param[in] x data for the covariance function. - * @param[in] delta additional real data for the covariance matrix. - * @param[in] delta_int_k additional int data for the covariance matrix. - * @param[in] theta_0 initial guess for the Newton solver which returns - * the Laplace approximation. - * @param[in] msgs_f message stream for the log likelihood function. - * @param[in] msgs_k message stream for the covariance function. - * @param[in] tolerance controls the convergence criterion when finding - * the mode in the Laplace approximation. - * @param[in] max_num_steps maximum number of steps before the Newton solver - * breaks and returns an error. - * @param[in] hessian_block_size the size of the block for a block-diagonal - * Hessian of the log likelihood. If 0, the Hessian is stored - * inside a vector. If the Hessian is dense, this should be the - * size of the Hessian. - * @param[in] compute_W_root if 1, the Newton solver computes the root of W, - * the negative Hessian of the log likelihood, which leads to - * efficient computation. Else, a more general but slower solver - * is used. - */ - /* - template - stan::return_type_t laplace_marginal_lpdf - (const Eigen::VectorXd& y, - const L& L_f, - const Eigen::Matrix& eta, - const std::vector& delta_int_L, - const K& K_f, - const Eigen::Matrix& phi, - const Tx& x, - const std::vector& delta_K, - const std::vector& delta_int_K, - const Eigen::Matrix& theta_0, - std::ostream* msgs_L = nullptr, - std::ostream* msgs_K = nullptr, - double tolerance = 1e-6, - long int max_num_steps = 100, - int hessian_block_size = 0, - int compute_W_root = 1) { +/** + * Wrapper function around the laplace_marginal function. + * Returns the marginal density p(y | phi) by marginalizing out + * the latent gaussian variable, with a Laplace approximation. + * See the laplace_marginal function for more details. + * The data y is assumed to be real. + * The function is "overloaded" below for the int y and lpmf case. + * + * @tparam T0 The type of the initial guess, theta_0. + * @tparam T1 The type for the global parameter, phi. + * @tparam T2 The type of the auxiliary parameter, eta. + * @tparam K The function which returns the prior covariance matrix. + * @tparam F The function which returns the log likelihood. + * @param[in] y fixed real data to be passed to the log likelihood. + * @param[in] L_f a function which returns the log likelihood. + * @param[in] eta non-marginalized parameters for the log likelihood. + * @param[in] delta_int_f integer data to be passed to the log likelihood. + * @param[in] K_f a function which returns the prior + * covariance for the marginalized out latent Gaussian. + * @param[in] phi model parameters for the covariance function. + * @param[in] x data for the covariance function. + * @param[in] delta additional real data for the covariance matrix. + * @param[in] delta_int_k additional int data for the covariance matrix. + * @param[in] theta_0 initial guess for the Newton solver which returns + * the Laplace approximation. + * @param[in] msgs_f message stream for the log likelihood function. + * @param[in] msgs_k message stream for the covariance function. + * @param[in] tolerance controls the convergence criterion when finding + * the mode in the Laplace approximation. + * @param[in] max_num_steps maximum number of steps before the Newton solver + * breaks and returns an error. + * @param[in] hessian_block_size the size of the block for a block-diagonal + * Hessian of the log likelihood. If 0, the Hessian is stored + * inside a vector. If the Hessian is dense, this should be the + * size of the Hessian. + * @param[in] compute_W_root if 1, the Newton solver computes the root of W, + * the negative Hessian of the log likelihood, which leads to + * efficient computation. Else, a more general but slower solver + * is used. + */ +/* +template +stan::return_type_t laplace_marginal_lpdf + (const Eigen::VectorXd& y, + const L& L_f, + const Eigen::Matrix& eta, + const std::vector& delta_int_L, + const K& K_f, + const Eigen::Matrix& phi, + const Tx& x, + const std::vector& delta_K, + const std::vector& delta_int_K, + const Eigen::Matrix& theta_0, + std::ostream* msgs_L = nullptr, + std::ostream* msgs_K = nullptr, + double tolerance = 1e-6, + long int max_num_steps = 100, + int hessian_block_size = 0, + int compute_W_root = 1) { - return laplace_marginal_density( - diff_likelihood(L_f, y, delta_int_L, msgs_L), - K_f, phi, eta, x, delta_K, delta_int_K, - theta_0, msgs_K, tolerance, max_num_steps, - hessian_block_size, compute_W_root); - } */ + return laplace_marginal_density( + diff_likelihood(L_f, y, delta_int_L, msgs_L), + K_f, phi, eta, x, delta_K, delta_int_K, + theta_0, msgs_K, tolerance, max_num_steps, + hessian_block_size, compute_W_root); +} */ - template - stan::return_type_t laplace_marginal_lpdf - (const Eigen::VectorXd& y, - const L& L_f, - const Eigen::Matrix& eta, - const std::vector& delta_int_L, - const K& K_f, - const Eigen::Matrix& phi, - const Tx& x, - const std::vector& delta_K, - const std::vector& delta_int_K, - const Eigen::Matrix& theta_0, - double tolerance = 1e-6, - long int max_num_steps = 100, - int hessian_block_size = 0, - int solver = 1, - int do_line_search = 1, - int max_steps_line_search = 10, - std::ostream* msgs = nullptr) { - // TEST: provisional signature to agree with parser. +template +stan::return_type_t laplace_marginal_lpdf( + const Eigen::VectorXd& y, const L& L_f, + const Eigen::Matrix& eta, + const std::vector& delta_int_L, const K& K_f, + const Eigen::Matrix& phi, const Tx& x, + const std::vector& delta_K, const std::vector& delta_int_K, + const Eigen::Matrix& theta_0, + double tolerance = 1e-6, long int max_num_steps = 100, + int hessian_block_size = 0, int solver = 1, int do_line_search = 1, + int max_steps_line_search = 10, std::ostream* msgs = nullptr) { + // TEST: provisional signature to agree with parser. - return laplace_marginal_density( - diff_likelihood(L_f, y, delta_int_L, msgs), - K_f, phi, eta, x, delta_K, delta_int_K, - theta_0, msgs, tolerance, max_num_steps, - hessian_block_size, solver, - do_line_search, max_steps_line_search); - } + return laplace_marginal_density( + diff_likelihood(L_f, y, delta_int_L, msgs), K_f, phi, eta, x, delta_K, + delta_int_K, theta_0, msgs, tolerance, max_num_steps, hessian_block_size, + solver, do_line_search, max_steps_line_search); +} - /** - * Overloaded function for lpmf case. The first argument - * is now a std::vector of interger and an Eigen::VectorXd - * of double is passed as data. - */ - template - stan::return_type_t laplace_marginal_lpmf - (const std::vector& y, - const L& L_f, - const Eigen::Matrix& eta, - const Eigen::VectorXd& delta_L, - const K& K_f, - const Eigen::Matrix& phi, - const Tx& x, - const std::vector& delta_K, - const std::vector& delta_int_K, - const Eigen::Matrix& theta_0, - double tolerance = 1e-6, - long int max_num_steps = 100, - int hessian_block_size = 0, - int solver = 1, - int do_line_search = 1, - int max_steps_line_search = 10, - std::ostream* msgs = nullptr) { - - return laplace_marginal_lpdf(delta_L, L_f, eta, y, - K_f, phi, x, delta_K, delta_int_K, - theta_0, tolerance, - max_num_steps, - hessian_block_size, - solver, do_line_search, - max_steps_line_search, msgs); - } +/** + * Overloaded function for lpmf case. The first argument + * is now a std::vector of interger and an Eigen::VectorXd + * of double is passed as data. + */ +template +stan::return_type_t laplace_marginal_lpmf( + const std::vector& y, const L& L_f, + const Eigen::Matrix& eta, + const Eigen::VectorXd& delta_L, const K& K_f, + const Eigen::Matrix& phi, const Tx& x, + const std::vector& delta_K, const std::vector& delta_int_K, + const Eigen::Matrix& theta_0, + double tolerance = 1e-6, long int max_num_steps = 100, + int hessian_block_size = 0, int solver = 1, int do_line_search = 1, + int max_steps_line_search = 10, std::ostream* msgs = nullptr) { + return laplace_marginal_lpdf( + delta_L, L_f, eta, y, K_f, phi, x, delta_K, delta_int_K, theta_0, + tolerance, max_num_steps, hessian_block_size, solver, do_line_search, + max_steps_line_search, msgs); +} } // namespace math } // namespace stan diff --git a/stan/math/laplace/laplace_marginal_neg_binomial_2.hpp b/stan/math/laplace/laplace_marginal_neg_binomial_2.hpp index f2e9c722b25..e2e36cd07d8 100644 --- a/stan/math/laplace/laplace_marginal_neg_binomial_2.hpp +++ b/stan/math/laplace/laplace_marginal_neg_binomial_2.hpp @@ -2,53 +2,51 @@ #define STAN_MATH_LAPLACE_LAPLACE_MARGINAL_NEG_BINOMIAL_2_HPP #include -#include +//#include +#include namespace stan { namespace math { - /** - * Wrapper function around the laplace_marginal function for - * a negative binomial likelihood. Uses the 2nd parameterization. - * Returns the marginal density p(y | phi) by marginalizing - * out the latent gaussian variable, with a Laplace approximation. - * See the laplace_marginal function for more details. - * - * @tparam T0 The type of the initial guess, theta_0. - * @tparam T1 The type for the global parameter, phi. - * @param[in] y observations. - * @param[in] y_index group to which each observation belongs. Each group - * is parameterized by one element of theta. - * @param[in] covariance a function which returns the prior covariance. - * @param[in] phi model parameters for the covariance functor. - * @param[in] eta non-marginalized model parameters for the likelihood. - * @param[in] x data for the covariance functor. - * @param[in] delta additional real data for the covariance functor. - * @param[in] delta_int additional integer data for covariance functor. - * @param[in] theta_0 the initial guess for the Laplace approximation. - * @param[in] tolerance controls the convergence criterion when finding - * the mode in the Laplace approximation. - * @param[in] max_num_steps maximum number of steps before the Newton solver - * breaks and returns an error. - */ - template - T1 laplace_marginal_neg_binomial_2_log_lpmf - (const std::vector& y, - const std::vector& y_index, - const K& covariance_function, - const Eigen::Matrix& phi, - const Eigen::Matrix& eta, - const std::vector& x, - const std::vector& delta, - const std::vector& delta_int, - const Eigen::Matrix& theta_0, - std::ostream* msgs = nullptr, - double tolerance = 1e-6, - long int max_num_steps = 100) { - return laplace_marginal_density( +/** + * Wrapper function around the laplace_marginal function for + * a negative binomial likelihood. Uses the 2nd parameterization. + * Returns the marginal density p(y | phi) by marginalizing + * out the latent gaussian variable, with a Laplace approximation. + * See the laplace_marginal function for more details. + * + * @tparam T0 The type of the initial guess, theta_0. + * @tparam T1 The type for the global parameter, phi. + * @param[in] y observations. + * @param[in] y_index group to which each observation belongs. Each group + * is parameterized by one element of theta. + * @param[in] covariance a function which returns the prior covariance. + * @param[in] phi model parameters for the covariance functor. + * @param[in] eta non-marginalized model parameters for the likelihood. + * @param[in] x data for the covariance functor. + * @param[in] delta additional real data for the covariance functor. + * @param[in] delta_int additional integer data for covariance functor. + * @param[in] theta_0 the initial guess for the Laplace approximation. + * @param[in] tolerance controls the convergence criterion when finding + * the mode in the Laplace approximation. + * @param[in] max_num_steps maximum number of steps before the Newton solver + * breaks and returns an error. + */ +template +T1 laplace_marginal_neg_binomial_2_log_lpmf( + const std::vector& y, const std::vector& y_index, + const K& covariance_function, + const Eigen::Matrix& phi, + const Eigen::Matrix& eta, + const std::vector& x, const std::vector& delta, + const std::vector& delta_int, + const Eigen::Matrix& theta_0, + std::ostream* msgs = nullptr, double tolerance = 1e-6, + long int max_num_steps = 100) { + return laplace_marginal_density( diff_neg_binomial_2_log(to_vector(y), y_index, theta_0.size()), - covariance_function, phi, eta, x, delta, delta_int, - theta_0, msgs, tolerance, max_num_steps); - } + covariance_function, phi, eta, x, delta, delta_int, theta_0, msgs, + tolerance, max_num_steps); +} } // namespace math } // namespace stan diff --git a/stan/math/laplace/laplace_marginal_poisson_log_lpmf.hpp b/stan/math/laplace/laplace_marginal_poisson_log_lpmf.hpp index 87a5162b2b2..2c05efd1f2a 100644 --- a/stan/math/laplace/laplace_marginal_poisson_log_lpmf.hpp +++ b/stan/math/laplace/laplace_marginal_poisson_log_lpmf.hpp @@ -6,71 +6,64 @@ namespace stan { namespace math { - /** - * Wrapper function around the laplace_marginal function for - * a log poisson likelihood. Returns the marginal density - * p(y | phi) by marginalizing out the latent gaussian variable, - * with a Laplace approximation. See the laplace_marginal function - * for more details. - * - * @tparam T0 The type of the initial guess, theta_0. - * @tparam T1 The type for the global parameter, phi. - * @param[in] y total counts per group. Second sufficient statistics. - * @param[in] n_samples number of samples per group. First sufficient - * statistics. - * @param[in] covariance a function which returns the prior covariance. - * @param[in] phi model parameters for the covariance functor. - * @param[in] x data for the covariance functor. - * @param[in] delta additional real data for the covariance functor. - * @param[in] delta_int additional integer data for covariance functor. - * @param[in] theta_0 the initial guess for the Laplace approximation. - * @param[in] tolerance controls the convergence criterion when finding - * the mode in the Laplace approximation. - * @param[in] max_num_steps maximum number of steps before the Newton solver - * breaks and returns an error. - */ - template - T1 laplace_marginal_poisson_log_lpmf - (const std::vector& y, - const std::vector& n_samples, - const K& covariance_function, - const Eigen::Matrix& phi, - const std::vector& x, - const std::vector& delta, - const std::vector& delta_int, - const Eigen::Matrix& theta_0, - std::ostream* msgs = nullptr, - double tolerance = 1e-6, - long int max_num_steps = 100) { - // TODO: change this to a VectorXd once we have operands & partials. - Eigen::Matrix eta_dummy(0); - return laplace_marginal_density( - diff_poisson_log(to_vector(n_samples), to_vector(y)), - covariance_function, phi, eta_dummy, x, delta, delta_int, - theta_0, msgs, tolerance, max_num_steps); - } +/** + * Wrapper function around the laplace_marginal function for + * a log poisson likelihood. Returns the marginal density + * p(y | phi) by marginalizing out the latent gaussian variable, + * with a Laplace approximation. See the laplace_marginal function + * for more details. + * + * @tparam T0 The type of the initial guess, theta_0. + * @tparam T1 The type for the global parameter, phi. + * @param[in] y total counts per group. Second sufficient statistics. + * @param[in] n_samples number of samples per group. First sufficient + * statistics. + * @param[in] covariance a function which returns the prior covariance. + * @param[in] phi model parameters for the covariance functor. + * @param[in] x data for the covariance functor. + * @param[in] delta additional real data for the covariance functor. + * @param[in] delta_int additional integer data for covariance functor. + * @param[in] theta_0 the initial guess for the Laplace approximation. + * @param[in] tolerance controls the convergence criterion when finding + * the mode in the Laplace approximation. + * @param[in] max_num_steps maximum number of steps before the Newton solver + * breaks and returns an error. + */ +template +T1 laplace_marginal_poisson_log_lpmf( + const std::vector& y, const std::vector& n_samples, + const K& covariance_function, + const Eigen::Matrix& phi, + const std::vector& x, const std::vector& delta, + const std::vector& delta_int, + const Eigen::Matrix& theta_0, + std::ostream* msgs = nullptr, double tolerance = 1e-6, + long int max_num_steps = 100) { + // TODO: change this to a VectorXd once we have operands & partials. + Eigen::Matrix eta_dummy(0); + return laplace_marginal_density( + diff_poisson_log(to_vector(n_samples), to_vector(y)), covariance_function, + phi, eta_dummy, x, delta, delta_int, theta_0, msgs, tolerance, + max_num_steps); +} - template - T1 laplace_marginal_poisson_log_lpmf - (const std::vector& y, - const std::vector& n_samples, - const Eigen::VectorXd& ye, - const K& covariance_function, - const Eigen::Matrix& phi, - const std::vector& x, - const std::vector& delta, - const std::vector& delta_int, - const Eigen::Matrix& theta_0, - std::ostream* msgs = nullptr, - double tolerance = 1e-6, - long int max_num_steps = 100) { - // TODO: change this to a VectorXd once we have operands & partials. - Eigen::Matrix eta_dummy(0); - return laplace_marginal_density( +template +T1 laplace_marginal_poisson_log_lpmf( + const std::vector& y, const std::vector& n_samples, + const Eigen::VectorXd& ye, const K& covariance_function, + const Eigen::Matrix& phi, + const std::vector& x, const std::vector& delta, + const std::vector& delta_int, + const Eigen::Matrix& theta_0, + std::ostream* msgs = nullptr, double tolerance = 1e-6, + long int max_num_steps = 100) { + // TODO: change this to a VectorXd once we have operands & partials. + Eigen::Matrix eta_dummy(0); + return laplace_marginal_density( diff_poisson_log(to_vector(n_samples), to_vector(y), log(ye)), - covariance_function, phi, eta_dummy, x, delta, delta_int, - theta_0, msgs, tolerance, max_num_steps); - } + covariance_function, phi, eta_dummy, x, delta, delta_int, theta_0, msgs, + tolerance, max_num_steps); +} } // namespace math } // namespace stan diff --git a/stan/math/laplace/laplace_pseudo_target.hpp b/stan/math/laplace/laplace_pseudo_target.hpp index 04a35b468b6..727563c1631 100644 --- a/stan/math/laplace/laplace_pseudo_target.hpp +++ b/stan/math/laplace/laplace_pseudo_target.hpp @@ -7,113 +7,102 @@ namespace stan { namespace math { - /** - * Function to compute the pseudo target, $\tilde Z$, - * with a custom derivative method. - * NOTE: we actually don't need to compute the pseudo-target, only its - * derivative. - */ - inline double laplace_pseudo_target ( - const Eigen::MatrixXd& K, - const Eigen::VectorXd& a, - const Eigen::MatrixXd& R, - const Eigen::VectorXd& l_grad, - const Eigen::VectorXd& s2) { - // double s1 = 0.5 * quad_form(K, a) - 0.5 * sum((R * K).diagonal()); - // Eigen::VectorXd b = K * l_grad; - // Eigen::VectorXd s3 = b - K * (R * b); - // return s1 + s2.dot(s3); - return 0; - } +/** + * Function to compute the pseudo target, $\tilde Z$, + * with a custom derivative method. + * NOTE: we actually don't need to compute the pseudo-target, only its + * derivative. + */ +inline double laplace_pseudo_target(const Eigen::MatrixXd& K, + const Eigen::VectorXd& a, + const Eigen::MatrixXd& R, + const Eigen::VectorXd& l_grad, + const Eigen::VectorXd& s2) { + // double s1 = 0.5 * quad_form(K, a) - 0.5 * sum((R * K).diagonal()); + // Eigen::VectorXd b = K * l_grad; + // Eigen::VectorXd s3 = b - K * (R * b); + // return s1 + s2.dot(s3); + return 0; +} - /** - * Vari class for the function. - */ - struct laplace_pseudo_target_vari : public vari { - /* number of elements in covariance matrix. */ - int K_size_; - /* covariance matrix. */ - vari** K_; - /* pseudo target. */ - vari** pseudo_target_; - /* An object to store the sensitivities of K. */ - Eigen::MatrixXd K_adj_; - /* Boolean: true is K is diagonal. */ - int diagonal_covariance_; +/** + * Vari class for the function. + */ +struct laplace_pseudo_target_vari : public vari { + /* number of elements in covariance matrix. */ + int K_size_; + /* covariance matrix. */ + vari** K_; + /* pseudo target. */ + vari** pseudo_target_; + /* An object to store the sensitivities of K. */ + Eigen::MatrixXd K_adj_; + /* Boolean: true is K is diagonal. */ + int diagonal_covariance_; - template - laplace_pseudo_target_vari ( - const Eigen::VectorXd& a, - const Eigen::MatrixXd& R, + template + laplace_pseudo_target_vari( + const Eigen::VectorXd& a, const Eigen::MatrixXd& R, const Eigen::Matrix& K, - const Eigen::VectorXd& s2, - const Eigen::VectorXd& l, - double pseudo_target, + const Eigen::VectorXd& s2, const Eigen::VectorXd& l, double pseudo_target, int diagonal_covariance = 1) : vari(pseudo_target), K_size_(K.size()), - K_(ChainableStack::instance_->memalloc_.alloc_array( - K.size())), + K_(ChainableStack::instance_->memalloc_.alloc_array(K.size())), pseudo_target_( - ChainableStack::instance_->memalloc_.alloc_array(1)), + ChainableStack::instance_->memalloc_.alloc_array(1)), diagonal_covariance_(diagonal_covariance) { - int dim_theta = K.rows(); + int dim_theta = K.rows(); - for (int j = 0; j < dim_theta; j++) - for (int i = 0; i < dim_theta; i++) - K_[j * dim_theta + i] = K(i, j).vi_; + for (int j = 0; j < dim_theta; j++) + for (int i = 0; i < dim_theta; i++) + K_[j * dim_theta + i] = K(i, j).vi_; - pseudo_target_[0] = this; - pseudo_target_[0] = new vari(pseudo_target, false); + pseudo_target_[0] = this; + pseudo_target_[0] = new vari(pseudo_target, false); - if (diagonal_covariance_) { - Eigen::VectorXd K_diag = value_of(K).diagonal(); - K_adj_ = 0.5 * a.cwiseProduct(a) - 0.5 * R.diagonal() - + l.cwiseProduct(s2 + R * K_diag.cwiseProduct(s2)); - } else { - K_adj_ = 0.5 * a * a.transpose() - 0.5 * R - + s2 * l.transpose() - - (R * (value_of(K) * s2)) * l.transpose(); - } - } + if (diagonal_covariance_) { + Eigen::VectorXd K_diag = value_of(K).diagonal(); + K_adj_ = 0.5 * a.cwiseProduct(a) - 0.5 * R.diagonal() + + l.cwiseProduct(s2 + R * K_diag.cwiseProduct(s2)); + } else { + K_adj_ = 0.5 * a * a.transpose() - 0.5 * R + s2 * l.transpose() + - (R * (value_of(K) * s2)) * l.transpose(); + } + } - void chain() { - int dim_theta = K_adj_.rows(); - if (diagonal_covariance_) { - for (int j = 0; j < dim_theta; j++) { - K_[j * dim_theta + j]->adj_ += - pseudo_target_[0]->adj_ * K_adj_(j, 0); - } - } else { - for (int j = 0; j < dim_theta; j++) - for (int i = 0; i < dim_theta; i++) - K_[j * dim_theta + i]->adj_ += - pseudo_target_[0]->adj_ * K_adj_(i, j); - } - } - }; + void chain() { + int dim_theta = K_adj_.rows(); + if (diagonal_covariance_) { + for (int j = 0; j < dim_theta; j++) { + K_[j * dim_theta + j]->adj_ += pseudo_target_[0]->adj_ * K_adj_(j, 0); + } + } else { + for (int j = 0; j < dim_theta; j++) + for (int i = 0; i < dim_theta; i++) + K_[j * dim_theta + i]->adj_ += pseudo_target_[0]->adj_ * K_adj_(i, j); + } + } +}; - /** - * Overload function for case where K is passed as a matrix of var. - */ - template - inline T laplace_pseudo_target ( - const Eigen::Matrix& K, - const Eigen::VectorXd& a, - const Eigen::MatrixXd& R, - const Eigen::VectorXd& l_grad, - const Eigen::VectorXd& s2) { - double pseudo_target_dbl - = laplace_pseudo_target(value_of(K), a, R, l_grad, s2); +/** + * Overload function for case where K is passed as a matrix of var. + */ +template +inline T laplace_pseudo_target( + const Eigen::Matrix& K, + const Eigen::VectorXd& a, const Eigen::MatrixXd& R, + const Eigen::VectorXd& l_grad, const Eigen::VectorXd& s2) { + double pseudo_target_dbl + = laplace_pseudo_target(value_of(K), a, R, l_grad, s2); - // construct vari - laplace_pseudo_target_vari* vi0 - = new laplace_pseudo_target_vari(a, R, K, s2, l_grad, - pseudo_target_dbl); + // construct vari + laplace_pseudo_target_vari* vi0 + = new laplace_pseudo_target_vari(a, R, K, s2, l_grad, pseudo_target_dbl); - var pseudo_target = var(vi0->pseudo_target_[0]); - return pseudo_target; - } + var pseudo_target = var(vi0->pseudo_target_[0]); + return pseudo_target; +} } // namespace math } // namespace stan diff --git a/stan/math/laplace/partial_diff_theta.hpp b/stan/math/laplace/partial_diff_theta.hpp index d08a72a9db2..ee1dab0dc2c 100644 --- a/stan/math/laplace/partial_diff_theta.hpp +++ b/stan/math/laplace/partial_diff_theta.hpp @@ -2,92 +2,91 @@ #define STAN_MATH_LAPLACE_PARTIAL_DIFF_THETA_HPP // TODO: refine include. -#include #include +#include namespace stan { namespace math { - /** - * Returns the partial derivative of the approximate marginal - * distribution with respect to theta and eta. - * The derivative with respect to theta is denoted s2 in - * laplace_marginal.hpp. - */ - // TODO: rename function, since we also differentiate wrt eta. - // TODO: address case where eta / theta are doubles and we don't - // want full derivatives. - template - inline Eigen::VectorXd partial_diff_theta(const F& f, - const Eigen::VectorXd& theta, - const Eigen::VectorXd& eta, - const Eigen::VectorXd& delta, - const std::vector& delta_int, - const Eigen::MatrixXd& A, - int hessian_block_size, - std::ostream* pstream = 0) { - using Eigen::VectorXd; - using Eigen::Matrix; - using Eigen::MatrixXd; - using Eigen::Dynamic; - - nested_rev_autodiff nested; - int theta_size = theta.size(); - int eta_size = eta.size(); - int parm_size = theta_size + eta_size; - // Matrix parm_var(parm_size); - // for (int i = 0; i < theta_size; i++) parm_var(i) = theta(i); - // for (int i = 0; i < eta_size; i++) parm_var(i + theta_size) = eta(i); - Matrix theta_var = theta; - Matrix eta_var = eta; - int n_blocks = theta_size / hessian_block_size; - - fvar> target_ffvar = 0; - - for (int i = 0; i < hessian_block_size; ++i) { - VectorXd v = VectorXd::Zero(theta_size); - for (int j = i; j < theta_size; j += hessian_block_size) v(j) = 1; - - Matrix, Dynamic, 1> theta_fvar(theta_size); - for (int j = 0; j < theta_size; ++j) - theta_fvar(j) = fvar(theta_var(j), v(j)); - - Matrix, Dynamic, 1> eta_fvar(eta_size); - for (int j = 0; j < eta_size; ++j) eta_fvar(j) = fvar(eta_var(j), 0); - - fvar f_fvar = f(theta_fvar, eta_fvar, delta, delta_int, pstream); - - VectorXd w(theta_size); - for (int j = 0; j < n_blocks; ++j) { - for (int k = 0; k < hessian_block_size; ++k) { - w(k + j * hessian_block_size) = A(k + j * hessian_block_size, - i + j * hessian_block_size); - } +/** + * Returns the partial derivative of the approximate marginal + * distribution with respect to theta and eta. + * The derivative with respect to theta is denoted s2 in + * laplace_marginal.hpp. + */ +// TODO: rename function, since we also differentiate wrt eta. +// TODO: address case where eta / theta are doubles and we don't +// want full derivatives. +template +inline Eigen::VectorXd partial_diff_theta( + const F& f, const Eigen::VectorXd& theta, const Eigen::VectorXd& eta, + const Eigen::VectorXd& delta, const std::vector& delta_int, + const Eigen::MatrixXd& A, int hessian_block_size, + std::ostream* pstream = 0) { + using Eigen::Dynamic; + using Eigen::Matrix; + using Eigen::MatrixXd; + using Eigen::VectorXd; + + nested_rev_autodiff nested; + int theta_size = theta.size(); + int eta_size = eta.size(); + int parm_size = theta_size + eta_size; + // Matrix parm_var(parm_size); + // for (int i = 0; i < theta_size; i++) parm_var(i) = theta(i); + // for (int i = 0; i < eta_size; i++) parm_var(i + theta_size) = eta(i); + Matrix theta_var = theta; + Matrix eta_var = eta; + int n_blocks = theta_size / hessian_block_size; + + fvar> target_ffvar = 0; + + for (int i = 0; i < hessian_block_size; ++i) { + VectorXd v = VectorXd::Zero(theta_size); + for (int j = i; j < theta_size; j += hessian_block_size) + v(j) = 1; + + Matrix, Dynamic, 1> theta_fvar(theta_size); + for (int j = 0; j < theta_size; ++j) + theta_fvar(j) = fvar(theta_var(j), v(j)); + + Matrix, Dynamic, 1> eta_fvar(eta_size); + for (int j = 0; j < eta_size; ++j) + eta_fvar(j) = fvar(eta_var(j), 0); + + fvar f_fvar = f(theta_fvar, eta_fvar, delta, delta_int, pstream); + + VectorXd w(theta_size); + for (int j = 0; j < n_blocks; ++j) { + for (int k = 0; k < hessian_block_size; ++k) { + w(k + j * hessian_block_size) + = A(k + j * hessian_block_size, i + j * hessian_block_size); } + } - Matrix>, Dynamic, 1> theta_ffvar(theta_size); - for (int j = 0; j < theta_size; ++j) - theta_ffvar(j) = fvar>(theta_fvar(j), w(j)); + Matrix>, Dynamic, 1> theta_ffvar(theta_size); + for (int j = 0; j < theta_size; ++j) + theta_ffvar(j) = fvar>(theta_fvar(j), w(j)); - Matrix>, Dynamic, 1> eta_ffvar(eta_size); - for (int j = 0; j < eta_size; ++j) - eta_ffvar(j) = fvar>(eta_fvar(j), 0); + Matrix>, Dynamic, 1> eta_ffvar(eta_size); + for (int j = 0; j < eta_size; ++j) + eta_ffvar(j) = fvar>(eta_fvar(j), 0); - target_ffvar += - f(theta_ffvar, eta_ffvar, delta, delta_int, pstream); - } - grad(target_ffvar.d_.d_.vi_); + target_ffvar += f(theta_ffvar, eta_ffvar, delta, delta_int, pstream); + } + grad(target_ffvar.d_.d_.vi_); - VectorXd parm_adj(parm_size); - for (int i = 0; i < theta_size; ++i) parm_adj(i) = theta_var(i).adj(); - for (int i = 0; i < eta_size; ++i) - parm_adj(theta_size + i) = eta_var(i).adj(); + VectorXd parm_adj(parm_size); + for (int i = 0; i < theta_size; ++i) + parm_adj(i) = theta_var(i).adj(); + for (int i = 0; i < eta_size; ++i) + parm_adj(theta_size + i) = eta_var(i).adj(); - return 0.5 * parm_adj; + return 0.5 * parm_adj; - // VectorXd theta_adj(theta_size); - // for (int i = 0; i < theta_size; ++i) theta_adj(i) = theta_var(i).adj(); - // return 0.5 * theta_adj; - } + // VectorXd theta_adj(theta_size); + // for (int i = 0; i < theta_size; ++i) theta_adj(i) = theta_var(i).adj(); + // return 0.5 * theta_adj; +} } // namespace math } // namespace stan diff --git a/stan/math/laplace/prob/laplace_base_rng.hpp b/stan/math/laplace/prob/laplace_base_rng.hpp index 0fa0dea420f..8847e56f7b0 100644 --- a/stan/math/laplace/prob/laplace_base_rng.hpp +++ b/stan/math/laplace/prob/laplace_base_rng.hpp @@ -23,28 +23,21 @@ namespace math { * are drawn for covariates x_pred. * To sample the "original" theta's, set x_pred = x. */ -template +template inline Eigen::VectorXd // CHECK -- right return type -laplace_base_rng - (const D& diff_likelihood, - const K& covariance_function, - const Eigen::Matrix& phi, - const Eigen::Matrix& eta, - const T_x& x, - const T_x_pred& x_pred, - const std::vector& delta, - const std::vector& delta_int, - const Eigen::Matrix& theta_0, - RNG& rng, - std::ostream* msgs = nullptr, - double tolerance = 1e-6, - long int max_num_steps = 100, - int hessian_block_size = 0, - int compute_W_root = 1) { - using Eigen::VectorXd; +laplace_base_rng(const D& diff_likelihood, const K& covariance_function, + const Eigen::Matrix& phi, + const Eigen::Matrix& eta, + const T_x& x, const T_x_pred& x_pred, + const std::vector& delta, + const std::vector& delta_int, + const Eigen::Matrix& theta_0, + RNG& rng, std::ostream* msgs = nullptr, + double tolerance = 1e-6, long int max_num_steps = 100, + int hessian_block_size = 0, int compute_W_root = 1) { using Eigen::MatrixXd; + using Eigen::VectorXd; VectorXd phi_dbl = value_of(phi); VectorXd eta_dbl = value_of(eta); @@ -56,31 +49,28 @@ laplace_base_rng { VectorXd theta; VectorXd a; - double marginal_density - = laplace_marginal_density(diff_likelihood, covariance_function, - phi_dbl, eta_dbl, - x, delta, delta_int, - covariance, theta, W_r, L, a, l_grad, - LU, K_root, value_of(theta_0), msgs, - tolerance, max_num_steps, - hessian_block_size, compute_W_root); + double marginal_density = laplace_marginal_density( + diff_likelihood, covariance_function, phi_dbl, eta_dbl, x, delta, + delta_int, covariance, theta, W_r, L, a, l_grad, LU, K_root, + value_of(theta_0), msgs, tolerance, max_num_steps, hessian_block_size, + compute_W_root); } // Modified R&W method - MatrixXd covariance_pred = covariance_function(phi_dbl, x_pred, - delta, delta_int, msgs); + MatrixXd covariance_pred + = covariance_function(phi_dbl, x_pred, delta, delta_int, msgs); VectorXd pred_mean = covariance_pred * l_grad.head(theta_0.rows()); Eigen::MatrixXd Sigma; if (compute_W_root) { - Eigen::MatrixXd V_dec = mdivide_left_tri(L, - W_r * covariance_pred); + Eigen::MatrixXd V_dec + = mdivide_left_tri(L, W_r * covariance_pred); Sigma = covariance_pred - V_dec.transpose() * V_dec; } else { Sigma = covariance_pred - - covariance_pred * (W_r - W_r * LU.solve(covariance * W_r)) - * covariance_pred; + - covariance_pred * (W_r - W_r * LU.solve(covariance * W_r)) + * covariance_pred; } return multi_normal_rng(pred_mean, Sigma, rng); diff --git a/stan/math/laplace/prob/laplace_bernoulli_logit_rng.hpp b/stan/math/laplace/prob/laplace_bernoulli_logit_rng.hpp index 6abc4d2eb98..04d6f153e3c 100644 --- a/stan/math/laplace/prob/laplace_bernoulli_logit_rng.hpp +++ b/stan/math/laplace/prob/laplace_bernoulli_logit_rng.hpp @@ -16,29 +16,22 @@ namespace math { * from the gaussian approximation of p(theta | y, phi), * where the likelihood is a Bernoulli with logit link. */ -template +template inline Eigen::VectorXd // CHECK -- right return type - laplace_bernoulli_logit_rng - (const std::vector& y, - const std::vector& n_samples, - const K& covariance_function, - const Eigen::Matrix& phi, - const T_x x, - const std::vector& delta, - const std::vector& delta_int, - const Eigen::Matrix& theta_0, - RNG& rng, - std::ostream* msgs = nullptr, - double tolerance = 1e-6, - long int max_num_steps = 100) { - Eigen::VectorXd eta_dummy; - return - laplace_base_rng(diff_bernoulli_logit(to_vector(n_samples), to_vector(y)), - covariance_function, phi, eta_dummy, - x, x, delta, delta_int, theta_0, - rng, msgs, tolerance, max_num_steps); - } +laplace_bernoulli_logit_rng( + const std::vector& y, const std::vector& n_samples, + const K& covariance_function, + const Eigen::Matrix& phi, const T_x x, + const std::vector& delta, const std::vector& delta_int, + const Eigen::Matrix& theta_0, RNG& rng, + std::ostream* msgs = nullptr, double tolerance = 1e-6, + long int max_num_steps = 100) { + Eigen::VectorXd eta_dummy; + return laplace_base_rng( + diff_bernoulli_logit(to_vector(n_samples), to_vector(y)), + covariance_function, phi, eta_dummy, x, x, delta, delta_int, theta_0, rng, + msgs, tolerance, max_num_steps); +} } // namespace math } // namespace stan diff --git a/stan/math/laplace/prob/laplace_poisson_log_rng.hpp b/stan/math/laplace/prob/laplace_poisson_log_rng.hpp index ae1a64d630f..ef0a48e1970 100644 --- a/stan/math/laplace/prob/laplace_poisson_log_rng.hpp +++ b/stan/math/laplace/prob/laplace_poisson_log_rng.hpp @@ -18,31 +18,22 @@ namespace math { */ template -inline Eigen::VectorXd - laplace_poisson_log_rng - (const std::vector& y, - const std::vector& n_samples, - const K& covariance_function, - const Eigen::Matrix& phi, - const T2& x, - // const T3& x_pred, - const std::vector& delta, - const std::vector& delta_int, - const Eigen::Matrix& theta_0, - RNG& rng, - std::ostream* msgs = nullptr, - double tolerance = 1e-6, - long int max_num_steps = 100, - int hessian_block_size = 0, - int compute_W_root = 1) { - Eigen::VectorXd eta_dummy; - return - laplace_base_rng(diff_poisson_log(to_vector(n_samples), to_vector(y)), - covariance_function, phi, eta_dummy, - x, x, delta, delta_int, theta_0, - rng, msgs, tolerance, max_num_steps, - hessian_block_size, compute_W_root); - } +inline Eigen::VectorXd laplace_poisson_log_rng( + const std::vector& y, const std::vector& n_samples, + const K& covariance_function, + const Eigen::Matrix& phi, const T2& x, + // const T3& x_pred, + const std::vector& delta, const std::vector& delta_int, + const Eigen::Matrix& theta_0, RNG& rng, + std::ostream* msgs = nullptr, double tolerance = 1e-6, + long int max_num_steps = 100, int hessian_block_size = 0, + int compute_W_root = 1) { + Eigen::VectorXd eta_dummy; + return laplace_base_rng(diff_poisson_log(to_vector(n_samples), to_vector(y)), + covariance_function, phi, eta_dummy, x, x, delta, + delta_int, theta_0, rng, msgs, tolerance, + max_num_steps, hessian_block_size, compute_W_root); +} /** * Overload for case where user passes exposure. @@ -50,32 +41,22 @@ inline Eigen::VectorXd template inline Eigen::VectorXd // CHECK -- right return type - laplace_poisson_log_rng - (const std::vector& y, - const std::vector& n_samples, - const Eigen::VectorXd& exposure, - const K& covariance_function, - const Eigen::Matrix& phi, - const T2& x, - // const T3& x_pred, - const std::vector& delta, - const std::vector& delta_int, - const Eigen::Matrix& theta_0, - RNG& rng, - std::ostream* msgs = nullptr, - double tolerance = 1e-6, - long int max_num_steps = 100, - int hessian_block_size = 0, - int compute_W_root = 1) { - Eigen::VectorXd eta_dummy; - return - laplace_base_rng(diff_poisson_log(to_vector(n_samples), to_vector(y), - log(exposure)), - covariance_function, phi, eta_dummy, x, x, delta, - delta_int, theta_0, - rng, msgs, tolerance, max_num_steps, - hessian_block_size, compute_W_root); - } +laplace_poisson_log_rng( + const std::vector& y, const std::vector& n_samples, + const Eigen::VectorXd& exposure, const K& covariance_function, + const Eigen::Matrix& phi, const T2& x, + // const T3& x_pred, + const std::vector& delta, const std::vector& delta_int, + const Eigen::Matrix& theta_0, RNG& rng, + std::ostream* msgs = nullptr, double tolerance = 1e-6, + long int max_num_steps = 100, int hessian_block_size = 0, + int compute_W_root = 1) { + Eigen::VectorXd eta_dummy; + return laplace_base_rng( + diff_poisson_log(to_vector(n_samples), to_vector(y), log(exposure)), + covariance_function, phi, eta_dummy, x, x, delta, delta_int, theta_0, rng, + msgs, tolerance, max_num_steps, hessian_block_size, compute_W_root); +} } // namespace math } // namespace stan diff --git a/stan/math/laplace/prob/laplace_rng.hpp b/stan/math/laplace/prob/laplace_rng.hpp index 9aa28b4d593..9150042d7a2 100644 --- a/stan/math/laplace/prob/laplace_rng.hpp +++ b/stan/math/laplace/prob/laplace_rng.hpp @@ -18,32 +18,21 @@ namespace math { */ template -inline Eigen::VectorXd - laplace_rng - (const L& L_f, - const Eigen::Matrix& eta, - const Eigen::VectorXd& delta_L, - const std::vector& delta_int_L, - const K& K_f, - const Eigen::Matrix& phi, - const T_x& x, - const std::vector& delta_K, - const std::vector& delta_int_K, - const Eigen::Matrix& theta_0, - double tolerance = 1e-6, - long int max_num_steps = 100, - int hessian_block_size = 0, - int compute_W_root = 1, - RNG& rng = boost::random::mt19937(), - std::ostream* msgs = nullptr) { - return - laplace_base_rng( - diff_likelihood(L_f, delta_L, delta_int_L, msgs), - K_f, phi, eta, - x, x, delta_K, delta_int_K, theta_0, - rng, msgs, tolerance, max_num_steps, - hessian_block_size, compute_W_root); - } +inline Eigen::VectorXd laplace_rng( + const L& L_f, const Eigen::Matrix& eta, + const Eigen::VectorXd& delta_L, const std::vector& delta_int_L, + const K& K_f, const Eigen::Matrix& phi, + const T_x& x, const std::vector& delta_K, + const std::vector& delta_int_K, + const Eigen::Matrix& theta_0, + double tolerance = 1e-6, long int max_num_steps = 100, + int hessian_block_size = 0, int compute_W_root = 1, + RNG& rng = boost::random::mt19937(), std::ostream* msgs = nullptr) { + return laplace_base_rng(diff_likelihood(L_f, delta_L, delta_int_L, msgs), + K_f, phi, eta, x, x, delta_K, delta_int_K, theta_0, + rng, msgs, tolerance, max_num_steps, + hessian_block_size, compute_W_root); +} } // namespace math } // namespace stan diff --git a/stan/math/laplace/third_diff_directional.hpp b/stan/math/laplace/third_diff_directional.hpp index 8c904a8052a..7dd0dea470e 100644 --- a/stan/math/laplace/third_diff_directional.hpp +++ b/stan/math/laplace/third_diff_directional.hpp @@ -7,47 +7,44 @@ namespace stan { namespace math { - /** - * Return the third-order directional derivative of a function - * which maps to a scalar. The derivative is taken with respect - * to do two directions: v and w. - */ - template - inline void third_diff_directional( - const F& f, const Eigen::VectorXd& x, - const Eigen::VectorXd& eta, - const Eigen::VectorXd& delta, - const std::vector& delta_int, - double& fx, - Eigen::VectorXd& third_diff, - Eigen::VectorXd& v, - Eigen::VectorXd& w, - std::ostream* pstream = 0) { - using Eigen::Matrix; - using Eigen::Dynamic; - nested_rev_autodiff nested; - - int x_size = x.size(); - Matrix x_var = x; - Matrix, Dynamic, 1> x_fvar(x_size); - for (int i = 0; i < x_size; ++i) { - x_fvar(i) = fvar(x_var(i), v(i)); - } - fvar fx_fvar = f(x_fvar, eta, delta, delta_int, pstream); - - Matrix>, Dynamic, 1> x_ffvar(x_size); - for (int i = 0; i < x_size; ++i) { - x_ffvar(i) = fvar>(x_fvar(i), w(i)); - } - fvar> fx_ffvar = f(x_ffvar, eta, delta, delta_int, pstream); - - grad(fx_ffvar.d_.d_.vi_); - - third_diff.resize(x_size); - for (int i = 0; i < x_size; ++i) { - third_diff(i) = x_var(i).adj(); - } +/** + * Return the third-order directional derivative of a function + * which maps to a scalar. The derivative is taken with respect + * to do two directions: v and w. + */ +template +inline void third_diff_directional(const F& f, const Eigen::VectorXd& x, + const Eigen::VectorXd& eta, + const Eigen::VectorXd& delta, + const std::vector& delta_int, + double& fx, Eigen::VectorXd& third_diff, + Eigen::VectorXd& v, Eigen::VectorXd& w, + std::ostream* pstream = 0) { + using Eigen::Dynamic; + using Eigen::Matrix; + nested_rev_autodiff nested; + + int x_size = x.size(); + Matrix x_var = x; + Matrix, Dynamic, 1> x_fvar(x_size); + for (int i = 0; i < x_size; ++i) { + x_fvar(i) = fvar(x_var(i), v(i)); } + fvar fx_fvar = f(x_fvar, eta, delta, delta_int, pstream); + + Matrix>, Dynamic, 1> x_ffvar(x_size); + for (int i = 0; i < x_size; ++i) { + x_ffvar(i) = fvar>(x_fvar(i), w(i)); + } + fvar> fx_ffvar = f(x_ffvar, eta, delta, delta_int, pstream); + + grad(fx_ffvar.d_.d_.vi_); + + third_diff.resize(x_size); + for (int i = 0; i < x_size; ++i) { + third_diff(i) = x_var(i).adj(); + } +} } // namespace math } // namespace stan diff --git a/stan/math/mix.hpp b/stan/math/mix.hpp index 38b6a5e9c0e..4037ef8c752 100644 --- a/stan/math/mix.hpp +++ b/stan/math/mix.hpp @@ -9,16 +9,16 @@ #include #endif -#include -#include -#include -#include - #include #include #include #include +#include +#include +#include +#include + #include #endif diff --git a/test/unit/math/laplace/disease_map_test.cpp b/test/unit/math/laplace/disease_map_test.cpp index 2c31447b9d9..1c183efb38a 100755 --- a/test/unit/math/laplace/disease_map_test.cpp +++ b/test/unit/math/laplace/disease_map_test.cpp @@ -1,9 +1,5 @@ #include #include -#include -#include -#include -#include #include #include @@ -50,9 +46,9 @@ class laplace_disease_map_test : public::testing::Test { x2.resize(dim_theta); y.resize(n_observations); ye.resize(n_observations); - read_in_data(dim_theta, n_observations, data_directory, x1, x2, y, ye); + stan::math::test::read_in_data(dim_theta, n_observations, data_directory, x1, x2, y, ye); - if (FALSE) { + if (false) { // look at some of the data std::cout << "x_1: " << x1[0] << " " << x2[0] << std::endl << "x_2: " << x1[1] << " " << x2[1] << std::endl @@ -116,14 +112,14 @@ TEST_F(laplace_disease_map_test, lk_analytical) { var marginal_density = laplace_marginal_poisson_log_lpmf(y, n_samples, ye, - sqr_exp_kernel_functor(), + stan::math::test::sqr_exp_kernel_functor(), phi, x, delta, delta_int, theta_0); auto end = std::chrono::system_clock::now(); std::chrono::duration elapsed_time = end - start; - VEC g; - AVEC parm_vec = createAVEC(phi(0), phi(1)); + std::vector g; + std::vector parm_vec{phi(0), phi(1)}; marginal_density.grad(parm_vec, g); std::cout << "LAPLACE MARGINAL AND VARI CLASS" << std::endl @@ -154,7 +150,7 @@ TEST_F(laplace_disease_map_test, lk_analytical) { start = std::chrono::system_clock::now(); Eigen::VectorXd theta_pred = laplace_base_rng(diff_likelihood, - sqr_exp_kernel_functor(), + stan::math::test::sqr_exp_kernel_functor(), phi, eta_dummy, x, x, delta, delta_int, theta_0, rng); @@ -170,7 +166,7 @@ TEST_F(laplace_disease_map_test, lk_analytical) { start = std::chrono::system_clock::now(); theta_pred = laplace_poisson_log_rng(y, n_samples, ye, - sqr_exp_kernel_functor(), + stan::math::test::sqr_exp_kernel_functor(), phi, x, delta, delta_int, theta_0, rng); end = std::chrono::system_clock::now(); @@ -193,7 +189,7 @@ TEST_F(laplace_disease_map_test, lk_autodiff) { int solver = 1; // options: 1, 2, or 3. double marginal_density_dbl = laplace_marginal_density(diff_functor, - sqr_exp_kernel_functor(), + stan::math::test::sqr_exp_kernel_functor(), value_of(phi), value_of(eta_dummy), x, delta, delta_int, theta_0, 0, 1e-6, 100, hessian_block_size, solver); @@ -212,15 +208,15 @@ TEST_F(laplace_disease_map_test, lk_autodiff) { solver = 1; var marginal_density = laplace_marginal_density(diff_functor, - sqr_exp_kernel_functor(), phi, eta_dummy, + stan::math::test::sqr_exp_kernel_functor(), phi, eta_dummy, x, delta, delta_int, theta_0, 0, 1e-6, 100, hessian_block_size, solver); end = std::chrono::system_clock::now(); elapsed_time = end - start; - VEC g; - AVEC parm_vec = createAVEC(phi(0), phi(1)); + std::vectorg; + std::vector parm_vec{phi(0), phi(1)}; marginal_density.grad(parm_vec, g); std::cout << "LAPLACE MARGINAL AND VARI CLASS" << std::endl @@ -255,31 +251,31 @@ TEST_F(laplace_disease_map_test, finite_diff_benchmark) { phi_l1(1) -= eps; double target = laplace_marginal_density(diff_functor, - sqr_exp_kernel_functor(), + stan::math::test::sqr_exp_kernel_functor(), phi_dbl, value_of(eta_dummy), x, delta, delta_int, theta_0, 0, 1e-6, 100, hessian_block_size); double target_u0 = laplace_marginal_density(diff_functor, - sqr_exp_kernel_functor(), + stan::math::test::sqr_exp_kernel_functor(), phi_u0, value_of(eta_dummy), x, delta, delta_int, theta_0, 0, 1e-6, 100, hessian_block_size), target_u1 = laplace_marginal_density(diff_functor, - sqr_exp_kernel_functor(), + stan::math::test::sqr_exp_kernel_functor(), phi_u1, value_of(eta_dummy), x, delta, delta_int, theta_0, 0, 1e-6, 100, hessian_block_size), target_l0 = laplace_marginal_density(diff_functor, - sqr_exp_kernel_functor(), + stan::math::test::sqr_exp_kernel_functor(), phi_l0, value_of(eta_dummy), x, delta, delta_int, theta_0, 0, 1e-6, 100, hessian_block_size), target_l1 = laplace_marginal_density(diff_functor, - sqr_exp_kernel_functor(), + stan::math::test::sqr_exp_kernel_functor(), phi_l1, value_of(eta_dummy), x, delta, delta_int, theta_0, 0, 1e-6, 100, hessian_block_size); @@ -305,7 +301,7 @@ TEST_F(laplace_disease_map_test, rng_autodiff) { auto start = std::chrono::system_clock::now(); Eigen::VectorXd theta_pred = laplace_base_rng(diff_functor, - sqr_exp_kernel_functor(), + stan::math::test::sqr_exp_kernel_functor(), phi, eta_dummy, x, x, delta, delta_int, theta_0, rng, 0, 1e-6, 100, hessian_block_size, @@ -327,11 +323,11 @@ TEST_F(laplace_disease_map_test, lpmf_wrapper) { var marginal_density = laplace_marginal_lpmf(n_samples, poisson_log_likelihood(), eta_dummy, delta_lk, - sqr_exp_kernel_functor(), + stan::math::test::sqr_exp_kernel_functor(), phi, x, delta, delta_int, theta_0); - VEC g; - AVEC parm_vec = createAVEC(phi(0), phi(1)); + std::vectorg; + std::vector parm_vec{phi(0), phi(1)}; marginal_density.grad(parm_vec, g); std::cout << "LAPLACE MARGINAL LPMF AND VARI CLASS" << std::endl @@ -352,7 +348,7 @@ TEST_F(laplace_disease_map_test, rng_wrapper) { Eigen::VectorXd theta_pred = laplace_rng(poisson_log_likelihood(), eta_dummy, delta_lk, n_samples, - sqr_exp_kernel_functor(), + stan::math::test::sqr_exp_kernel_functor(), phi, x, delta, delta_int, theta_0, 1e-6, 100, hessian_block_size, solver, rng); diff --git a/test/unit/math/laplace/higher_order_diff_test.cpp b/test/unit/math/laplace/higher_order_diff_test.cpp index 5068ad83fbf..ece470b7055 100755 --- a/test/unit/math/laplace/higher_order_diff_test.cpp +++ b/test/unit/math/laplace/higher_order_diff_test.cpp @@ -1,5 +1,6 @@ #include -#include +#include +//#include #include #include #include @@ -108,7 +109,7 @@ TEST_F(neg_bin_log_diff_test, manual_calls) { std::cout << "hessian-vector: " << Hv.transpose() << std::endl; // Compute third-order derivative - if (TRUE) { + if (true) { using Eigen::Matrix; nested_rev_autodiff nested; @@ -161,7 +162,7 @@ TEST_F(neg_bin_log_diff_test, manual_calls) { std::cout << "f: " << fx << std::endl; std::cout << "third diff: " << third_diff.transpose() << std::endl; } - +/* TEST_F(neg_bin_log_diff_test, diff_likelihood) { using stan::math::diff_likelihood; using Eigen::VectorXd; @@ -177,7 +178,7 @@ TEST_F(neg_bin_log_diff_test, diff_likelihood) { << "hessian: " << hessian.transpose() << std::endl << "third diff: " << third_diff.transpose() << std::endl; } - +*/ TEST_F(neg_bin_log_diff_test, diff_block_diagonal) { using stan::math::hessian_block_diag; diff --git a/test/unit/math/laplace/laplace_bernoulli_logit_rng_test.cpp b/test/unit/math/laplace/laplace_bernoulli_logit_rng_test.cpp index 1f0692c0f32..d5fc5835852 100644 --- a/test/unit/math/laplace/laplace_bernoulli_logit_rng_test.cpp +++ b/test/unit/math/laplace/laplace_bernoulli_logit_rng_test.cpp @@ -106,6 +106,8 @@ TEST(laplace, basic_rng) { // Eigen::VectorXd W_root; Eigen::SparseMatrix W_r; Eigen::MatrixXd L; + Eigen::MatrixXd K_root; + Eigen::VectorXd theta0_val = value_of(theta_0); { Eigen::VectorXd a; Eigen::VectorXd l_grad; @@ -116,7 +118,8 @@ TEST(laplace, basic_rng) { sigma, eta_dummy, x_dummy, d0, di0, covariance, theta, W_r, L, a, l_grad, LU_dummy, - value_of(theta_0), 0, + K_root, + theta0_val, 0, tolerance, max_num_steps); } diff --git a/test/unit/math/laplace/laplace_marginal_bernoulli_logit_test.cpp b/test/unit/math/laplace/laplace_marginal_bernoulli_logit_test.cpp index 7cf50d646dc..d968216d8bc 100755 --- a/test/unit/math/laplace/laplace_marginal_bernoulli_logit_test.cpp +++ b/test/unit/math/laplace/laplace_marginal_bernoulli_logit_test.cpp @@ -1,4 +1,5 @@ #include +#include #include #include #include @@ -88,7 +89,7 @@ TEST(laplace, logistic_lgm_dim500) { std::string data_directory = "test/unit/math/laplace/aki_synth_data/"; std::vector x1(dim_theta), x2(dim_theta); std::vector y(n_observations); - read_in_data(dim_theta, n_observations, data_directory, x1, x2, y); + stan::math::test::read_in_data(dim_theta, n_observations, data_directory, x1, x2, y); // Look a some of the data. // std::cout << "x_1: " << x1[0] << " " << x2[0] << std::endl @@ -117,16 +118,17 @@ TEST(laplace, logistic_lgm_dim500) { phi << 1.6, 1; // standard deviation, length scale Eigen::VectorXd eta_dummy; Eigen::PartialPivLU LU_dummy; - + Eigen::MatrixXd K_dummy; auto start_optimization = std::chrono::system_clock::now(); double marginal_density = laplace_marginal_density( diff_bernoulli_logit(to_vector(n_samples), to_vector(y)), - sqr_exp_kernel_functor(), + stan::math::test::sqr_exp_kernel_functor(), phi, eta_dummy, x, delta, delta_int, covariance, theta_laplace, W_root, L, a, l_grad, LU_dummy, + K_dummy, theta_0, 0, 1e-3, 100); auto end_optimization = std::chrono::system_clock::now(); @@ -150,12 +152,12 @@ TEST(laplace, logistic_lgm_dim500) { var marginal_density_v = laplace_marginal_density( diff_bernoulli_logit(to_vector(n_samples), to_vector(y)), - sqr_exp_kernel_functor(), + stan::math::test::sqr_exp_kernel_functor(), phi_v2, eta_dummy_v, x, delta, delta_int, theta_0, 0, 1e-3, 100); - VEC g2; - AVEC parm_vec2 = createAVEC(phi_v2(0), phi_v2(1)); + std::vectorg2; + std::vector parm_vec2{phi_v2(0), phi_v2(1)}; marginal_density_v.grad(parm_vec2, g2); end_optimization = std::chrono::system_clock::now(); @@ -182,7 +184,7 @@ TEST(laplace, logistic_lgm_dim500) { double marginal_density_v2 = laplace_marginal_bernoulli_logit_lpmf(y, n_samples, - sqr_exp_kernel_functor(), + stan::math::test::sqr_exp_kernel_functor(), phi, x, delta, delta_int, theta_0, 0, 1e-3, 100); diff --git a/test/unit/math/laplace/laplace_marginal_neg_binomial_2_log_test.cpp b/test/unit/math/laplace/laplace_marginal_neg_binomial_2_log_test.cpp index 616ec7bedf8..cae10b21548 100755 --- a/test/unit/math/laplace/laplace_marginal_neg_binomial_2_log_test.cpp +++ b/test/unit/math/laplace/laplace_marginal_neg_binomial_2_log_test.cpp @@ -1,9 +1,8 @@ #include -#include -#include -#include +//#include +#include #include - +#include #include #include #include @@ -143,11 +142,11 @@ Eigen::MatrixXd compute_B(const Eigen::VectorXd& theta, return Eigen::MatrixXd::Identity(group_size, group_size) + stan::math::quad_form_diag(covariance, W_root); } - +/* TEST(laplace, neg_binomial_2_log_dbl) { using stan::math::to_vector; using stan::math::diff_neg_binomial_2_log; - using stan::math::sqr_exp_kernel_functor; + using stan::math::test::sqr_exp_kernel_functor; using stan::math::laplace_marginal_density; using stan::math::laplace_marginal_neg_binomial_2_log_lpmf; using stan::math::var; @@ -173,7 +172,7 @@ TEST(laplace, neg_binomial_2_log_dbl) { Eigen::VectorXd y = to_vector(y_obs); diff_neg_binomial_2_log diff_functor(y, y_index, dim_theta); - stan::math::sqr_exp_kernel_functor K; + stan::math::test::sqr_exp_kernel_functor K; double log_p = laplace_marginal_density(diff_functor, K, phi, eta, x, delta, delta_int, theta_0); @@ -184,8 +183,8 @@ TEST(laplace, neg_binomial_2_log_dbl) { = laplace_marginal_density(diff_functor, K, phi_v, eta_v, x, delta, delta_int, theta_0); - VEC g; - AVEC parm_vec = createAVEC(phi_v(0), phi_v(1), eta_v(0)); + std::vector g; + std::vector parm_vec{phi_v(0), phi_v(1), eta_v(0)}; target.grad(parm_vec, g); // finite diff benchmark @@ -219,7 +218,7 @@ TEST(laplace, neg_binomial_2_log_dbl) { eta_l, x, delta, delta_int, theta_0); - VEC g_finite(dim_phi + dim_eta); + std::vectorg_finite(dim_phi + dim_eta); g_finite[0] = (target_phi_1u - target_phi_1l) / (2 * diff); g_finite[1] = (target_phi_2u - target_phi_2l) / (2 * diff); g_finite[2] = (target_eta_u - target_eta_l) / (2 * diff); @@ -234,3 +233,4 @@ TEST(laplace, neg_binomial_2_log_dbl) { laplace_marginal_neg_binomial_2_log_lpmf(y_obs, y_index, K, phi, eta, x, delta, delta_int, theta_0)); } +*/ diff --git a/test/unit/math/laplace/laplace_marginal_poisson_log_test.cpp b/test/unit/math/laplace/laplace_marginal_poisson_log_test.cpp index b6e5dbc0f2c..48a79b9b23f 100644 --- a/test/unit/math/laplace/laplace_marginal_poisson_log_test.cpp +++ b/test/unit/math/laplace/laplace_marginal_poisson_log_test.cpp @@ -1,5 +1,5 @@ #include -#include +#include #include #include @@ -100,7 +100,7 @@ TEST(laplace, poisson_lgm_dim2) { std::vector n_samples = {1, 1}; std::vector sums = {1, 0}; - squared_kernel_functor K; + stan::math::test::squared_kernel_functor K; var target = laplace_marginal_poisson_log_lpmf(sums, n_samples, K, phi, x, delta, delta_int, theta_0); @@ -113,8 +113,8 @@ TEST(laplace, poisson_lgm_dim2) { // How to test this? The best way would be to generate a few // benchmarks using gpstuff. - VEC g; - AVEC parm_vec = createAVEC(phi(0), phi(1)); + std::vector g; + std::vector parm_vec{phi(0), phi(1)}; target.grad(parm_vec, g); // finite diff test @@ -136,7 +136,7 @@ TEST(laplace, poisson_lgm_dim2) { target_2l = laplace_marginal_poisson_log_lpmf(sums, n_samples, K, phi_2l, x, delta, delta_int, theta_0); - VEC g_finite(dim_phi); + std::vectorg_finite(dim_phi); g_finite[0] = (target_1u - target_1l) / (2 * diff); g_finite[1] = (target_2u - target_2l) / (2 * diff); diff --git a/test/unit/math/laplace/laplace_marginal_student_t_test.cpp b/test/unit/math/laplace/laplace_marginal_student_t_test.cpp index 78f2383c212..6a6b482cd71 100755 --- a/test/unit/math/laplace/laplace_marginal_student_t_test.cpp +++ b/test/unit/math/laplace/laplace_marginal_student_t_test.cpp @@ -1,5 +1,8 @@ #include -#include +//#include +#include + +#include #include #include diff --git a/test/unit/math/laplace/laplace_poisson_log_rng_test.cpp b/test/unit/math/laplace/laplace_poisson_log_rng_test.cpp index d722b420ee1..97667eaff55 100644 --- a/test/unit/math/laplace/laplace_poisson_log_rng_test.cpp +++ b/test/unit/math/laplace/laplace_poisson_log_rng_test.cpp @@ -94,12 +94,13 @@ TEST(laplace, basic_rng) { // Method 2: Vectorized R&W method double tolerance = 1e-6; int max_num_steps = 100; - + Eigen::MatrixXd K_root; // First find the mode using the custom Newton step Eigen::MatrixXd covariance; Eigen::VectorXd theta; Eigen::SparseMatrix W_r; Eigen::MatrixXd L; + Eigen::VectorXd theta0_val = value_of(theta_0); { Eigen::VectorXd a; Eigen::VectorXd l_grad; @@ -110,7 +111,8 @@ TEST(laplace, basic_rng) { sigma, eta_dummy, x_dummy, d0, di0, covariance, theta, W_r, L, a, l_grad, LU_dummy, - value_of(theta_0), 0, + K_root, + theta0_val, 0, tolerance, max_num_steps); } diff --git a/test/unit/math/laplace/laplace_skim_test.cpp b/test/unit/math/laplace/laplace_skim_test.cpp index 0e01181c49c..5db65d2cd88 100755 --- a/test/unit/math/laplace/laplace_skim_test.cpp +++ b/test/unit/math/laplace/laplace_skim_test.cpp @@ -1,4 +1,5 @@ #include +#include #include #include #include @@ -139,9 +140,9 @@ class laplace_skim_test : public::testing::Test { y.resize(N); lambda.resize(M); - read_in_data(M, N, data_directory, X, y, lambda); + stan::math::test::read_in_data(M, N, data_directory, X, y, lambda); - if (FALSE){ + if (false){ std::cout << X << std::endl << "-----" << std::endl; std::cout << lambda.transpose() << std::endl << "------" << std::endl; std::cout << y[0] << " " << y[1] << " " << std::endl @@ -232,8 +233,8 @@ TEST_F(laplace_skim_test, lk_analytical) { auto end = std::chrono::system_clock::now(); std::chrono::duration elapsed_time = end - start; - VEC g; - AVEC parm_vec(M + 4); + std::vector g; + std::vector parm_vec(M + 4); for (int m = 0; m < M + 4; m++) parm_vec[m] = parm(m); marginal_density.grad(parm_vec, g); @@ -288,8 +289,8 @@ TEST_F(laplace_skim_test, lk_autodiff) { auto end = std::chrono::system_clock::now(); std::chrono::duration elapsed_time = end - start; - VEC g; - AVEC parm_vec(M + 4); + std::vector g; + std::vector parm_vec(M + 4); for (int m = 0; m < M + 4; m++) parm_vec[m] = parm(m); marginal_density.grad(parm_vec, g); diff --git a/test/unit/math/laplace/laplace_utility.hpp b/test/unit/math/laplace/laplace_utility.hpp index 4b02ae15d26..be5297aa3bd 100644 --- a/test/unit/math/laplace/laplace_utility.hpp +++ b/test/unit/math/laplace/laplace_utility.hpp @@ -1,7 +1,14 @@ +#ifndef STAN_TEST_UNIT_MATH_LAPLACE_LAPLACE_UTILITY_HPP +#define STAN_TEST_UNIT_MATH_LAPLACE_LAPLACE_UTILITY_HPP +#include #include #include #include +namespace stan { +namespace math { +namespace test { + /* Functions and functors used in several lgp tests. */ ///////////////////////////////////////////////////////////////////// @@ -114,7 +121,7 @@ struct inla_functor { Eigen::VectorXd n_samples = to_vector(head(dat, n_groups)); Eigen::VectorXd sums = to_vector(tail(dat, dat.size() - n_groups)); Eigen::Matrix - Sigma = covariance(parm, n_groups, 1); + Sigma = stan::math::test::covariance(parm, n_groups, 1); return sums - stan::math::elt_multiply(n_samples, stan::math::exp(theta)) - stan::math::mdivide_left(Sigma, theta); @@ -334,3 +341,8 @@ void read_data(int dim_observations, x[i] = buffer; } } + +} +} +} +#endif diff --git a/test/unit/math/laplace/motorcycle_gp_test.cpp b/test/unit/math/laplace/motorcycle_gp_test.cpp index d445c2dca46..f2a9ad528e4 100755 --- a/test/unit/math/laplace/motorcycle_gp_test.cpp +++ b/test/unit/math/laplace/motorcycle_gp_test.cpp @@ -112,7 +112,7 @@ class laplace_motorcyle_gp_test : public::testing::Test { using stan::math::value_of; using stan::math::gp_exp_quad_cov; - if (FALSE) { + if (false) { n_obs = 6; Eigen::VectorXd x_vec(n_obs); x_vec << 2.4, 2.6, 3.2, 3.6, 4.0, 6.2; @@ -122,9 +122,9 @@ class laplace_motorcyle_gp_test : public::testing::Test { y << 0.0, -1.3, -2.7, 0.0, -2.7, -2.7; } - if (TRUE) { + if (true) { n_obs = 133; - read_data(n_obs, "test/unit/math/laplace/motorcycle_gp/", + stan::math::test::read_data(n_obs, "test/unit/math/laplace/motorcycle_gp/", x, y); // std::cout << "x: "; // for (int i = 0; i < n_obs; i++) std::cout << x[i] << " "; @@ -220,8 +220,8 @@ TEST_F(laplace_motorcyle_gp_test, lk_autodiff) { solver, do_line_search, max_steps_line_search); - VEC g; - AVEC parm_vec = createAVEC(phi(0), phi(1), phi(2), phi(3)); + std::vectorg; + std::vector parm_vec{phi(0), phi(1), phi(2), phi(3)}; marginal_density.grad(parm_vec, g); std::cout << "grad: " << g[0] << " " << g[1] << " " << g[2] << " " << g[3] << std::endl; @@ -287,8 +287,8 @@ TEST_F(laplace_motorcyle_gp_test, lk_autodiff_eta) { 0, 1e-8, 100, hessian_block_size, compute_W_root); - VEC g; - AVEC parm_vec = createAVEC(phi(0), phi(1), phi(2), phi(3), eta(0)); + std::vectorg; + std::vector parm_vec{phi(0), phi(1), phi(2), phi(3)}; marginal_density.grad(parm_vec, g); std::cout << "grad: " << g[0] << " " << g[1] << " " << g[2] << " " << g[3] << " " << g[4] @@ -340,8 +340,8 @@ TEST_F(laplace_motorcyle_gp_test, wrapper_function) { std::cout << "density: " << marginal_density << std::endl; - VEC g; - AVEC parm_vec = createAVEC(phi(0), phi(1), phi(2), phi(3), eta(0)); + std::vectorg; + std::vector parm_vec{phi(0), phi(1), phi(2), phi(3)}; marginal_density.grad(parm_vec, g); std::cout << "grad: " << g[0] << " " << g[1] << " " << g[2] << " " << g[3] << " " << g[4] From 77ad12a7f47201bed8dbd65dba3134ef2dc409e7 Mon Sep 17 00:00:00 2001 From: Stan Jenkins Date: Fri, 1 Oct 2021 17:55:15 +0000 Subject: [PATCH 50/53] [Jenkins] auto-formatting by clang-format version 6.0.0-1ubuntu2~16.04.1 (tags/RELEASE_600/final) --- .../laplace/laplace_likelihood_deprecated.hpp | 2 +- .../laplace_likelihood_neg_binomial_2_log.hpp | 5 +- stan/math/laplace/laplace_marginal.hpp | 45 ++--- test/unit/math/laplace/disease_map_test.cpp | 189 +++++++++--------- .../math/laplace/higher_order_diff_test.cpp | 50 +++-- .../laplace_bernoulli_logit_rng_test.cpp | 103 ++++------ .../laplace_marginal_bernoulli_logit_test.cpp | 73 +++---- ...place_marginal_neg_binomial_2_log_test.cpp | 45 ++--- .../laplace_marginal_poisson_log_test.cpp | 44 ++-- .../laplace_marginal_student_t_test.cpp | 3 - .../laplace/laplace_poisson_log_rng_test.cpp | 93 ++++----- test/unit/math/laplace/laplace_skim_test.cpp | 139 +++++++------ test/unit/math/laplace/laplace_utility.hpp | 154 +++++++------- test/unit/math/laplace/motorcycle_gp_test.cpp | 84 ++++---- test/unit/math/laplace/sparse_matrix_test.cpp | 20 +- 15 files changed, 472 insertions(+), 577 deletions(-) mode change 100755 => 100644 test/unit/math/laplace/disease_map_test.cpp mode change 100755 => 100644 test/unit/math/laplace/higher_order_diff_test.cpp mode change 100755 => 100644 test/unit/math/laplace/laplace_marginal_bernoulli_logit_test.cpp mode change 100755 => 100644 test/unit/math/laplace/laplace_marginal_neg_binomial_2_log_test.cpp mode change 100755 => 100644 test/unit/math/laplace/laplace_marginal_student_t_test.cpp mode change 100755 => 100644 test/unit/math/laplace/laplace_skim_test.cpp mode change 100755 => 100644 test/unit/math/laplace/motorcycle_gp_test.cpp mode change 100755 => 100644 test/unit/math/laplace/sparse_matrix_test.cpp diff --git a/stan/math/laplace/laplace_likelihood_deprecated.hpp b/stan/math/laplace/laplace_likelihood_deprecated.hpp index 31936c3acb9..5ad14b69c5d 100644 --- a/stan/math/laplace/laplace_likelihood_deprecated.hpp +++ b/stan/math/laplace/laplace_likelihood_deprecated.hpp @@ -306,7 +306,7 @@ struct diff_neg_binomial_2_log { hessian = -eta_scalar * sums_plus_n_eta.cwiseProduct( - elt_divide(exp_neg_theta, square(one_plus_exp))); + elt_divide(exp_neg_theta, square(one_plus_exp))); } template diff --git a/stan/math/laplace/laplace_likelihood_neg_binomial_2_log.hpp b/stan/math/laplace/laplace_likelihood_neg_binomial_2_log.hpp index c0a1cb697ef..2f5029fc98c 100644 --- a/stan/math/laplace/laplace_likelihood_neg_binomial_2_log.hpp +++ b/stan/math/laplace/laplace_likelihood_neg_binomial_2_log.hpp @@ -57,8 +57,7 @@ struct diff_neg_binomial_2_log { const Eigen::Matrix& eta, Eigen::Matrix, Eigen::Dynamic, 1>& gradient, Eigen::Matrix, Eigen::Dynamic, 1>& hessian, - int hessian_block_size = 1) - const { + int hessian_block_size = 1) const { typedef return_type_t scalar; Eigen::VectorXd one = rep_vector(1, theta.size()); T_eta eta_scalar = eta(0); @@ -72,7 +71,7 @@ struct diff_neg_binomial_2_log { hessian = -eta_scalar * sums_plus_n_eta.cwiseProduct( - elt_divide(exp_neg_theta, square(one_plus_exp))); + elt_divide(exp_neg_theta, square(one_plus_exp))); } template diff --git a/stan/math/laplace/laplace_marginal.hpp b/stan/math/laplace/laplace_marginal.hpp index 28fedf671a3..694a1b9faa4 100644 --- a/stan/math/laplace/laplace_marginal.hpp +++ b/stan/math/laplace/laplace_marginal.hpp @@ -86,26 +86,15 @@ namespace math { */ template double laplace_marginal_density( - const D& diff_likelihood, - const K& covariance_function, - const Eigen::VectorXd& phi, - const Eigen::VectorXd& eta, - const Tx& x, - const std::vector& delta, - const std::vector& delta_int, - Eigen::MatrixXd& covariance, - Eigen::VectorXd& theta, - Eigen::SparseMatrix& W_r, - Eigen::MatrixXd& L, - Eigen::VectorXd& a, - Eigen::VectorXd& l_grad, - Eigen::PartialPivLU& LU, - Eigen::MatrixXd& K_root, - const Eigen::VectorXd& theta_0, - std::ostream* msgs = nullptr, - double tolerance = 1e-6, - long int max_num_steps = 100, - int hessian_block_size = 0, int solver = 1, + const D& diff_likelihood, const K& covariance_function, + const Eigen::VectorXd& phi, const Eigen::VectorXd& eta, const Tx& x, + const std::vector& delta, const std::vector& delta_int, + Eigen::MatrixXd& covariance, Eigen::VectorXd& theta, + Eigen::SparseMatrix& W_r, Eigen::MatrixXd& L, Eigen::VectorXd& a, + Eigen::VectorXd& l_grad, Eigen::PartialPivLU& LU, + Eigen::MatrixXd& K_root, const Eigen::VectorXd& theta_0, + std::ostream* msgs = nullptr, double tolerance = 1e-6, + long int max_num_steps = 100, int hessian_block_size = 0, int solver = 1, int do_line_search = 0, int max_steps_line_search = 10) { using Eigen::MatrixXd; using Eigen::SparseMatrix; @@ -181,16 +170,16 @@ double laplace_marginal_density( a = b - W_r * mdivide_left_tri( - transpose(L), - mdivide_left_tri( - L, W_r.diagonal().cwiseProduct(covariance * b))); + transpose(L), + mdivide_left_tri( + L, W_r.diagonal().cwiseProduct(covariance * b))); } else { b = W * theta + l_grad.head(theta_size); a = b - W_r * mdivide_left_tri( - transpose(L), mdivide_left_tri( - L, W_r * (covariance * b))); + transpose(L), mdivide_left_tri( + L, W_r * (covariance * b))); } } else if (solver == 2) { // TODO -- use triangularView for K_root. @@ -406,7 +395,7 @@ struct laplace_marginal_density_vari : public vari { MatrixXd W_root_diag = W_r; R = W_r * L.transpose().triangularView().solve( - L.triangularView().solve(W_root_diag)); + L.triangularView().solve(W_root_diag)); Eigen::MatrixXd C = mdivide_left_tri(L, W_r * covariance); if (hessian_block_size == 0 && eta_size_ == 0) { @@ -426,8 +415,8 @@ struct laplace_marginal_density_vari : public vari { R = W_r - W_r * K_root * L.transpose().triangularView().solve( - L.triangularView().solve(K_root.transpose() - * W_r)); + L.triangularView().solve(K_root.transpose() + * W_r)); Eigen::MatrixXd C = L.triangularView().solve(K_root.transpose()); diff --git a/test/unit/math/laplace/disease_map_test.cpp b/test/unit/math/laplace/disease_map_test.cpp old mode 100755 new mode 100644 index 1c183efb38a..4bf929c043e --- a/test/unit/math/laplace/disease_map_test.cpp +++ b/test/unit/math/laplace/disease_map_test.cpp @@ -14,30 +14,27 @@ #include struct poisson_log_likelihood { - template - stan::return_type_t - operator()(const Eigen::Matrix& theta, - const Eigen::Matrix& eta, - const Eigen::VectorXd& delta, - const std::vector& n_samples, - std::ostream* pstream) const { - using stan::math::to_vector; + template + stan::return_type_t operator()( + const Eigen::Matrix& theta, + const Eigen::Matrix& eta, const Eigen::VectorXd& delta, + const std::vector& n_samples, std::ostream* pstream) const { using stan::math::log; + using stan::math::to_vector; int n = 911; Eigen::VectorXd y = delta.head(n); Eigen::VectorXd ye = delta.tail(n); // Eigen::VectorXd log_ye = ye.log(); - stan::math::diff_poisson_log - diff_functor(to_vector(n_samples), y, log(ye)); + stan::math::diff_poisson_log diff_functor(to_vector(n_samples), y, log(ye)); return diff_functor.log_likelihood(theta, eta); } }; // TODO(charlesm93): update using new function signatures. -class laplace_disease_map_test : public::testing::Test { -protected: +class laplace_disease_map_test : public ::testing::Test { + protected: void SetUp() override { dim_theta = 911; n_observations = 911; @@ -46,7 +43,8 @@ class laplace_disease_map_test : public::testing::Test { x2.resize(dim_theta); y.resize(n_observations); ye.resize(n_observations); - stan::math::test::read_in_data(dim_theta, n_observations, data_directory, x1, x2, y, ye); + stan::math::test::read_in_data(dim_theta, n_observations, data_directory, + x1, x2, y, ye); if (false) { // look at some of the data @@ -66,7 +64,8 @@ class laplace_disease_map_test : public::testing::Test { // one observation per group n_samples.resize(dim_theta); - for (int i = 0; i < dim_theta; i++) n_samples[i] = 1; + for (int i = 0; i < dim_theta; i++) + n_samples[i] = 1; theta_0 = Eigen::VectorXd::Zero(dim_theta); dim_phi = 2; @@ -74,7 +73,8 @@ class laplace_disease_map_test : public::testing::Test { phi << 0.3162278, 200; // variance, length scale delta_lk.resize(2 * n_observations); - for (int i = 0; i < n_observations; i++) delta_lk(i) = y[i]; + for (int i = 0; i < n_observations; i++) + delta_lk(i) = y[i]; for (int i = 0; i < n_observations; i++) delta_lk(n_observations + i) = ye(i); } @@ -100,20 +100,17 @@ class laplace_disease_map_test : public::testing::Test { poisson_log_likelihood f; }; - TEST_F(laplace_disease_map_test, lk_analytical) { - // Based on (Vanhatalo, Pietilainen and Vethari, 2010). See // https://research.cs.aalto.fi/pml/software/gpstuff/demo_spatial1.shtml - using stan::math::var; using stan::math::laplace_marginal_poisson_log_lpmf; + using stan::math::var; auto start = std::chrono::system_clock::now(); - var marginal_density - = laplace_marginal_poisson_log_lpmf(y, n_samples, ye, - stan::math::test::sqr_exp_kernel_functor(), - phi, x, delta, delta_int, theta_0); + var marginal_density = laplace_marginal_poisson_log_lpmf( + y, n_samples, ye, stan::math::test::sqr_exp_kernel_functor(), phi, x, + delta, delta_int, theta_0); auto end = std::chrono::system_clock::now(); std::chrono::duration elapsed_time = end - start; @@ -137,44 +134,44 @@ TEST_F(laplace_disease_map_test, lk_analytical) { //////////////////////////////////////////////////////////////////////// // Let's now generate a sample theta from the estimated posterior -/* - using stan::math::diff_poisson_log; - using stan::math::to_vector; - using stan::math::laplace_base_rng; - using stan::math::laplace_poisson_log_rng; - - diff_poisson_log diff_likelihood(to_vector(n_samples), - to_vector(y), - stan::math::log(ye)); - boost::random::mt19937 rng; - start = std::chrono::system_clock::now(); - Eigen::VectorXd - theta_pred = laplace_base_rng(diff_likelihood, - stan::math::test::sqr_exp_kernel_functor(), - phi, eta_dummy, x, x, delta, delta_int, - theta_0, rng); - - end = std::chrono::system_clock::now(); - elapsed_time = end - start; - - std::cout << "LAPLACE_APPROX_RNG" << std::endl - << "total time: " << elapsed_time.count() << std::endl - << std::endl; - - // Expected result - // total time: 0.404114 (or 0.328 on new computer) - - start = std::chrono::system_clock::now(); - theta_pred = laplace_poisson_log_rng(y, n_samples, ye, - stan::math::test::sqr_exp_kernel_functor(), - phi, x, delta, delta_int, - theta_0, rng); - end = std::chrono::system_clock::now(); - elapsed_time = end - start; - - std::cout << "LAPLACE_APPROX_POISSON_RNG" << std::endl - << "total time: " << elapsed_time.count() << std::endl - << std::endl; */ + /* + using stan::math::diff_poisson_log; + using stan::math::to_vector; + using stan::math::laplace_base_rng; + using stan::math::laplace_poisson_log_rng; + + diff_poisson_log diff_likelihood(to_vector(n_samples), + to_vector(y), + stan::math::log(ye)); + boost::random::mt19937 rng; + start = std::chrono::system_clock::now(); + Eigen::VectorXd + theta_pred = laplace_base_rng(diff_likelihood, + stan::math::test::sqr_exp_kernel_functor(), + phi, eta_dummy, x, x, delta, delta_int, + theta_0, rng); + + end = std::chrono::system_clock::now(); + elapsed_time = end - start; + + std::cout << "LAPLACE_APPROX_RNG" << std::endl + << "total time: " << elapsed_time.count() << std::endl + << std::endl; + + // Expected result + // total time: 0.404114 (or 0.328 on new computer) + + start = std::chrono::system_clock::now(); + theta_pred = laplace_poisson_log_rng(y, n_samples, ye, + stan::math::test::sqr_exp_kernel_functor(), + phi, x, delta, delta_int, + theta_0, rng); + end = std::chrono::system_clock::now(); + elapsed_time = end - start; + + std::cout << "LAPLACE_APPROX_POISSON_RNG" << std::endl + << "total time: " << elapsed_time.count() << std::endl + << std::endl; */ } /* TEST_F(laplace_disease_map_test, lk_autodiff) { @@ -208,9 +205,9 @@ TEST_F(laplace_disease_map_test, lk_autodiff) { solver = 1; var marginal_density = laplace_marginal_density(diff_functor, - stan::math::test::sqr_exp_kernel_functor(), phi, eta_dummy, - x, delta, delta_int, theta_0, - 0, 1e-6, 100, hessian_block_size, solver); + stan::math::test::sqr_exp_kernel_functor(), phi, +eta_dummy, x, delta, delta_int, theta_0, 0, 1e-6, 100, hessian_block_size, +solver); end = std::chrono::system_clock::now(); elapsed_time = end - start; @@ -232,15 +229,15 @@ TEST_F(laplace_disease_map_test, lk_autodiff) { TEST_F(laplace_disease_map_test, finite_diff_benchmark) { /////////////////////////////////////////////////////////////////// // finite_diff benchmark - using stan::math::var; - using stan::math::laplace_marginal_density; using stan::math::diff_likelihood; + using stan::math::laplace_marginal_density; + using stan::math::var; diff_likelihood diff_functor(f, delta_lk, n_samples); Eigen::VectorXd phi_dbl = value_of(phi); - Eigen::VectorXd phi_u0 = phi_dbl, phi_u1 = phi_dbl, - phi_l0 = phi_dbl, phi_l1 = phi_dbl; + Eigen::VectorXd phi_u0 = phi_dbl, phi_u1 = phi_dbl, phi_l0 = phi_dbl, + phi_l1 = phi_dbl; double eps = 1e-7; int hessian_block_size = 1; @@ -250,41 +247,35 @@ TEST_F(laplace_disease_map_test, finite_diff_benchmark) { phi_l0(0) -= eps; phi_l1(1) -= eps; - double target = laplace_marginal_density(diff_functor, - stan::math::test::sqr_exp_kernel_functor(), - phi_dbl, value_of(eta_dummy), - x, delta, delta_int, theta_0, - 0, 1e-6, 100, hessian_block_size); - - double target_u0 = laplace_marginal_density(diff_functor, - stan::math::test::sqr_exp_kernel_functor(), - phi_u0, value_of(eta_dummy), - x, delta, delta_int, theta_0, - 0, 1e-6, 100, hessian_block_size), - - target_u1 = laplace_marginal_density(diff_functor, - stan::math::test::sqr_exp_kernel_functor(), - phi_u1, value_of(eta_dummy), - x, delta, delta_int, theta_0, - 0, 1e-6, 100, hessian_block_size), - - target_l0 = laplace_marginal_density(diff_functor, - stan::math::test::sqr_exp_kernel_functor(), - phi_l0, value_of(eta_dummy), - x, delta, delta_int, theta_0, - 0, 1e-6, 100, hessian_block_size), - - target_l1 = laplace_marginal_density(diff_functor, - stan::math::test::sqr_exp_kernel_functor(), - phi_l1, value_of(eta_dummy), - x, delta, delta_int, theta_0, - 0, 1e-6, 100, hessian_block_size); + double target = laplace_marginal_density( + diff_functor, stan::math::test::sqr_exp_kernel_functor(), phi_dbl, + value_of(eta_dummy), x, delta, delta_int, theta_0, 0, 1e-6, 100, + hessian_block_size); + + double target_u0 = laplace_marginal_density( + diff_functor, stan::math::test::sqr_exp_kernel_functor(), phi_u0, + value_of(eta_dummy), x, delta, delta_int, theta_0, 0, 1e-6, 100, + hessian_block_size), + + target_u1 = laplace_marginal_density( + diff_functor, stan::math::test::sqr_exp_kernel_functor(), phi_u1, + value_of(eta_dummy), x, delta, delta_int, theta_0, 0, 1e-6, 100, + hessian_block_size), + + target_l0 = laplace_marginal_density( + diff_functor, stan::math::test::sqr_exp_kernel_functor(), phi_l0, + value_of(eta_dummy), x, delta, delta_int, theta_0, 0, 1e-6, 100, + hessian_block_size), + + target_l1 = laplace_marginal_density( + diff_functor, stan::math::test::sqr_exp_kernel_functor(), phi_l1, + value_of(eta_dummy), x, delta, delta_int, theta_0, 0, 1e-6, 100, + hessian_block_size); std::cout << "Finite_diff benchmark: " << std::endl << "Value: " << target << std::endl - << "grad: " << (target_u0 - target_l0) / (2 * eps) - << " " << (target_u1 - target_l1) / (2 * eps) - << std::endl; + << "grad: " << (target_u0 - target_l0) / (2 * eps) << " " + << (target_u1 - target_l1) / (2 * eps) << std::endl; } /* TEST_F(laplace_disease_map_test, rng_autodiff) { diff --git a/test/unit/math/laplace/higher_order_diff_test.cpp b/test/unit/math/laplace/higher_order_diff_test.cpp old mode 100755 new mode 100644 index ece470b7055..cfea061c8da --- a/test/unit/math/laplace/higher_order_diff_test.cpp +++ b/test/unit/math/laplace/higher_order_diff_test.cpp @@ -19,14 +19,12 @@ // This is what a function define in Stan would return. struct neg_bin_log_likelihood { template - stan::return_type_t - operator()(const Eigen::Matrix& theta, - const Eigen::Matrix& eta, - const Eigen::VectorXd& delta, - const std::vector& delta_int, - std::ostream* pstream) const { - stan::math::diff_neg_binomial_2_log - diff_functor(delta, delta_int, theta.size()); + stan::return_type_t operator()( + const Eigen::Matrix& theta, + const Eigen::Matrix& eta, const Eigen::VectorXd& delta, + const std::vector& delta_int, std::ostream* pstream) const { + stan::math::diff_neg_binomial_2_log diff_functor(delta, delta_int, + theta.size()); return diff_functor.log_likelihood(theta, eta); } @@ -40,14 +38,15 @@ struct f_theta { std::ostream* pstream_; F f_functor_; - f_theta() { } // default constructor required for default class. + f_theta() {} // default constructor required for default class. - f_theta(const F& f_functor, - const Eigen::VectorXd& eta, - const Eigen::VectorXd& delta, - const std::vector& delta_int, - std::ostream* pstream) : - eta_(eta), delta_(delta), delta_int_(delta_int), f_functor_(f_functor) { } + f_theta(const F& f_functor, const Eigen::VectorXd& eta, + const Eigen::VectorXd& delta, const std::vector& delta_int, + std::ostream* pstream) + : eta_(eta), + delta_(delta), + delta_int_(delta_int), + f_functor_(f_functor) {} template T operator()(const Eigen::Matrix& theta) const { @@ -55,8 +54,8 @@ struct f_theta { } }; -class neg_bin_log_diff_test : public::testing::Test { -protected: +class neg_bin_log_diff_test : public ::testing::Test { + protected: void SetUp() override { theta.resize(2); theta << 1, 1; @@ -83,14 +82,13 @@ class neg_bin_log_diff_test : public::testing::Test { f_theta f; }; - TEST_F(neg_bin_log_diff_test, manual_calls) { using stan::math::fvar; - using stan::math::var; - using stan::math::value_of; using stan::math::hessian_times_vector; using stan::math::nested_rev_autodiff; using stan::math::third_diff_directional; + using stan::math::value_of; + using stan::math::var; // var log_density = likelihood(theta, eta, y, y_index, 0); var log_density = f(theta); @@ -134,8 +132,8 @@ TEST_F(neg_bin_log_diff_test, manual_calls) { var grad2_fx_dot_vv = fx_ffvar.d_.d_; std::cout << "fx: " << value_of(fx_ffvar.val_.val_) << std::endl; - std::cout << "grad_grad_fx_dot_v: " - << value_of(fx_ffvar.d_.d_) << std::endl; + std::cout << "grad_grad_fx_dot_v: " << value_of(fx_ffvar.d_.d_) + << std::endl; // var fx_var; // var grad3_f_v; @@ -150,14 +148,14 @@ TEST_F(neg_bin_log_diff_test, manual_calls) { // Test function for directional Hessian and directional third diff. Eigen::VectorXd hessian_v; - hessian_times_vector(likelihood, theta_dbl, eta, y, y_index, - tangent, fx, hessian_v, 0); + hessian_times_vector(likelihood, theta_dbl, eta, y, y_index, tangent, fx, + hessian_v, 0); std::cout << "hessian_v: " << hessian_v.transpose() << std::endl; Eigen::VectorXd third_diff; - third_diff_directional(likelihood, theta_dbl, eta, y, y_index, - fx, third_diff, tangent, tangent, 0); + third_diff_directional(likelihood, theta_dbl, eta, y, y_index, fx, third_diff, + tangent, tangent, 0); std::cout << "f: " << fx << std::endl; std::cout << "third diff: " << third_diff.transpose() << std::endl; diff --git a/test/unit/math/laplace/laplace_bernoulli_logit_rng_test.cpp b/test/unit/math/laplace/laplace_bernoulli_logit_rng_test.cpp index d5fc5835852..f89647322eb 100644 --- a/test/unit/math/laplace/laplace_bernoulli_logit_rng_test.cpp +++ b/test/unit/math/laplace/laplace_bernoulli_logit_rng_test.cpp @@ -13,30 +13,27 @@ #include struct stationary_point { - template - inline Eigen::Matrix::type, - Eigen::Dynamic, 1> - operator() (const Eigen::Matrix& theta, - const Eigen::Matrix& parms, - const std::vector& dat, - const std::vector& dat_int, - std::ostream* pstream__ = 0) const { - Eigen::Matrix::type, - Eigen::Dynamic, 1> z(2); + template + inline Eigen::Matrix::type, Eigen::Dynamic, + 1> + operator()(const Eigen::Matrix& theta, + const Eigen::Matrix& parms, + const std::vector& dat, const std::vector& dat_int, + std::ostream* pstream__ = 0) const { + Eigen::Matrix::type, Eigen::Dynamic, 1> + z(2); z(0) = 1 - exp(theta(0)) - theta(0) / (parms(0) * parms(0)); - z(1) = - exp(theta(1)) - theta(1) / (parms(1) * parms(1)); + z(1) = -exp(theta(1)) - theta(1) / (parms(1) * parms(1)); return z; } }; struct diagonal_kernel_functor { template - Eigen::Matrix - operator() (const Eigen::Matrix& phi, - const T2& x, - const std::vector& delta, - const std::vector& delta_int, - std::ostream* msgs = nullptr) const { + Eigen::Matrix operator()( + const Eigen::Matrix& phi, const T2& x, + const std::vector& delta, const std::vector& delta_int, + std::ostream* msgs = nullptr) const { Eigen::Matrix K(2, 2); K(0, 0) = phi(0) * phi(0); K(1, 1) = phi(1) * phi(1); @@ -49,20 +46,19 @@ struct diagonal_kernel_functor { TEST(laplace, basic_rng) { // make sure the right covariance function is computed // and compare results. - using stan::math::laplace_rng; - using stan::math::laplace_poisson_log_rng; - using stan::math::laplace_bernoulli_logit_rng; using stan::math::diff_poisson_log; + using stan::math::laplace_bernoulli_logit_rng; + using stan::math::laplace_poisson_log_rng; + using stan::math::laplace_rng; using stan::math::algebra_solver; - using stan::math::to_vector; using stan::math::diag_matrix; - using stan::math::value_of; - using stan::math::mdivide_left_tri; using stan::math::diag_pre_multiply; using stan::math::inv; + using stan::math::mdivide_left_tri; using stan::math::square; - + using stan::math::to_vector; + using stan::math::value_of; Eigen::VectorXd theta_0(2); theta_0 << 1, 1; @@ -71,30 +67,26 @@ TEST(laplace, basic_rng) { std::vector n_samples = {1, 1}; std::vector sums = {1, 0}; - diff_poisson_log diff_likelihood(to_vector(n_samples), - to_vector(sums)); + diff_poisson_log diff_likelihood(to_vector(n_samples), to_vector(sums)); std::vector d0; std::vector di0; - // Method 1: brute force and straightforward Eigen::VectorXd theta_root - = algebra_solver(stationary_point(), - theta_0, sigma, d0, di0); + = algebra_solver(stationary_point(), theta_0, sigma, d0, di0); Eigen::VectorXd gradient, eta_dummy; Eigen::SparseMatrix W_sparse; diff_likelihood.diff(theta_root, eta_dummy, gradient, W_sparse); - Eigen::MatrixXd W = - W_sparse; + Eigen::MatrixXd W = -W_sparse; diagonal_kernel_functor covariance_function; std::vector x_dummy; Eigen::MatrixXd x_dummay_mat; Eigen::MatrixXd K = covariance_function(sigma, x_dummy, d0, di0, 0); - std::cout << "K (brute force): " - << std::endl - << (K.inverse() + W).inverse() - << std::endl << std::endl; + std::cout << "K (brute force): " << std::endl + << (K.inverse() + W).inverse() << std::endl + << std::endl; // Method 2: Vectorized R&W method double tolerance = 1e-6; @@ -112,48 +104,41 @@ TEST(laplace, basic_rng) { Eigen::VectorXd a; Eigen::VectorXd l_grad; Eigen::PartialPivLU LU_dummy; - double marginal_density - = laplace_marginal_density(diff_likelihood, - covariance_function, - sigma, eta_dummy, x_dummy, d0, di0, - covariance, theta, W_r, L, a, l_grad, - LU_dummy, - K_root, - theta0_val, 0, - tolerance, max_num_steps); + double marginal_density = laplace_marginal_density( + diff_likelihood, covariance_function, sigma, eta_dummy, x_dummy, d0, + di0, covariance, theta, W_r, L, a, l_grad, LU_dummy, K_root, theta0_val, + 0, tolerance, max_num_steps); } Eigen::VectorXd W_root(theta.size()); - for (int i = 0; i < theta.size(); i++) W_root(i) = W_r.coeff(i, i); + for (int i = 0; i < theta.size(); i++) + W_root(i) = W_r.coeff(i, i); Eigen::MatrixXd V; - V = mdivide_left_tri(L, - diag_pre_multiply(W_root, covariance)); + V = mdivide_left_tri(L, diag_pre_multiply(W_root, covariance)); std::cout << "K (method 1): " << std::endl << covariance - V.transpose() * V << std::endl << std::endl; // Method 3: Modified R&W method Eigen::VectorXd W_root_inv = inv(W_root); - Eigen::MatrixXd V_dec = mdivide_left_tri(L, - diag_matrix(W_root_inv)); + Eigen::MatrixXd V_dec + = mdivide_left_tri(L, diag_matrix(W_root_inv)); std::cout << "K (method 2): " << std::endl - << - V_dec.transpose() * V_dec + diag_matrix(square(W_root_inv)) - << std::endl << std::endl; + << -V_dec.transpose() * V_dec + diag_matrix(square(W_root_inv)) + << std::endl + << std::endl; // Check calls to rng functions compile boost::random::mt19937 rng; Eigen::MatrixXd theta_pred - = laplace_base_rng(diff_likelihood, covariance_function, - sigma, eta_dummy, x_dummy, x_dummy, d0, di0, theta_0, - rng); + = laplace_base_rng(diff_likelihood, covariance_function, sigma, eta_dummy, + x_dummy, x_dummy, d0, di0, theta_0, rng); theta_pred - = laplace_bernoulli_logit_rng(sums, n_samples, covariance_function, - sigma, x_dummay_mat, - d0, di0, theta_0, rng); + = laplace_bernoulli_logit_rng(sums, n_samples, covariance_function, sigma, + x_dummay_mat, d0, di0, theta_0, rng); // Bonus: make the distribution with a poisson rng also runs. - theta_pred - = laplace_poisson_log_rng(sums, n_samples, covariance_function, - sigma, x_dummy, d0, di0, theta_0, rng); + theta_pred = laplace_poisson_log_rng(sums, n_samples, covariance_function, + sigma, x_dummy, d0, di0, theta_0, rng); } diff --git a/test/unit/math/laplace/laplace_marginal_bernoulli_logit_test.cpp b/test/unit/math/laplace/laplace_marginal_bernoulli_logit_test.cpp old mode 100755 new mode 100644 index d968216d8bc..524c5204e08 --- a/test/unit/math/laplace/laplace_marginal_bernoulli_logit_test.cpp +++ b/test/unit/math/laplace/laplace_marginal_bernoulli_logit_test.cpp @@ -46,11 +46,11 @@ TEST(laplace, likelihood_differentiation) { theta_2u(1) = theta(1) + diff; theta_2l(1) = theta(1) - diff; double diff_1 = (diff_functor.log_likelihood(theta_1u, eta_dummy) - - diff_functor.log_likelihood(theta_1l, eta_dummy)) - / (2 * diff); + - diff_functor.log_likelihood(theta_1l, eta_dummy)) + / (2 * diff); double diff_2 = (diff_functor.log_likelihood(theta_2u, eta_dummy) - - diff_functor.log_likelihood(theta_2l, eta_dummy)) - / (2 * diff); + - diff_functor.log_likelihood(theta_2l, eta_dummy)) + / (2 * diff); EXPECT_NEAR(diff_1, gradient(0), test_tolerance); EXPECT_NEAR(diff_2, gradient(1), test_tolerance); @@ -70,26 +70,27 @@ TEST(laplace, likelihood_differentiation) { EXPECT_NEAR(diff_grad_2, hessian.coeff(1, 1), test_tolerance); // finite diff calculation for third-order derivatives - double diff_hess_1 = (hessian_1u.coeff(0, 0) - hessian_1l.coeff(0, 0)) - / (2 * diff); - double diff_hess_2 = (hessian_2u.coeff(1, 1) - hessian_2l.coeff(1, 1)) - / (2 * diff); + double diff_hess_1 + = (hessian_1u.coeff(0, 0) - hessian_1l.coeff(0, 0)) / (2 * diff); + double diff_hess_2 + = (hessian_2u.coeff(1, 1) - hessian_2l.coeff(1, 1)) / (2 * diff); EXPECT_NEAR(diff_hess_1, third_tensor(0), test_tolerance); EXPECT_NEAR(diff_hess_2, third_tensor(1), test_tolerance); } TEST(laplace, logistic_lgm_dim500) { - using stan::math::var; - using stan::math::to_vector; using stan::math::diff_bernoulli_logit; + using stan::math::to_vector; + using stan::math::var; int dim_theta = 500; int n_observations = 500; std::string data_directory = "test/unit/math/laplace/aki_synth_data/"; std::vector x1(dim_theta), x2(dim_theta); std::vector y(n_observations); - stan::math::test::read_in_data(dim_theta, n_observations, data_directory, x1, x2, y); + stan::math::test::read_in_data(dim_theta, n_observations, data_directory, x1, + x2, y); // Look a some of the data. // std::cout << "x_1: " << x1[0] << " " << x2[0] << std::endl @@ -121,24 +122,20 @@ TEST(laplace, logistic_lgm_dim500) { Eigen::MatrixXd K_dummy; auto start_optimization = std::chrono::system_clock::now(); - double marginal_density - = laplace_marginal_density( + double marginal_density = laplace_marginal_density( diff_bernoulli_logit(to_vector(n_samples), to_vector(y)), - stan::math::test::sqr_exp_kernel_functor(), - phi, eta_dummy, x, delta, delta_int, - covariance, theta_laplace, W_root, L, a, l_grad, - LU_dummy, - K_dummy, - theta_0, 0, 1e-3, 100); + stan::math::test::sqr_exp_kernel_functor(), phi, eta_dummy, x, delta, + delta_int, covariance, theta_laplace, W_root, L, a, l_grad, LU_dummy, + K_dummy, theta_0, 0, 1e-3, 100); auto end_optimization = std::chrono::system_clock::now(); - std::chrono::duration - elapsed_time_optimization = end_optimization - start_optimization; + std::chrono::duration elapsed_time_optimization + = end_optimization - start_optimization; std::cout << "LAPLACE MARGINAL FOR DOUBLE: " << std::endl << "density: " << marginal_density << std::endl - << "time: " << elapsed_time_optimization.count() - << std::endl << std::endl; + << "time: " << elapsed_time_optimization.count() << std::endl + << std::endl; // Expected output // density: -195.368 @@ -149,14 +146,12 @@ TEST(laplace, logistic_lgm_dim500) { Eigen::Matrix eta_dummy_v; start_optimization = std::chrono::system_clock::now(); - var marginal_density_v - = laplace_marginal_density( - diff_bernoulli_logit(to_vector(n_samples), to_vector(y)), - stan::math::test::sqr_exp_kernel_functor(), - phi_v2, eta_dummy_v, x, delta, delta_int, - theta_0, 0, 1e-3, 100); - - std::vectorg2; + var marginal_density_v = laplace_marginal_density( + diff_bernoulli_logit(to_vector(n_samples), to_vector(y)), + stan::math::test::sqr_exp_kernel_functor(), phi_v2, eta_dummy_v, x, delta, + delta_int, theta_0, 0, 1e-3, 100); + + std::vector g2; std::vector parm_vec2{phi_v2(0), phi_v2(1)}; marginal_density_v.grad(parm_vec2, g2); @@ -165,10 +160,9 @@ TEST(laplace, logistic_lgm_dim500) { std::cout << "LAPLACE MARGINAL AND VARI CLASS" << std::endl << "density: " << value_of(marginal_density_v) << std::endl - << "autodiff grad: " << g2[0] << " " << g2[1] - << std::endl - << "total time: " << elapsed_time_optimization.count() - << std::endl << std::endl; + << "autodiff grad: " << g2[0] << " " << g2[1] << std::endl + << "total time: " << elapsed_time_optimization.count() << std::endl + << std::endl; // EXPECTED // density: -195.368 @@ -181,12 +175,9 @@ TEST(laplace, logistic_lgm_dim500) { using stan::math::laplace_marginal_bernoulli_logit_lpmf; using stan::math::value_of; - - double marginal_density_v2 - = laplace_marginal_bernoulli_logit_lpmf(y, n_samples, - stan::math::test::sqr_exp_kernel_functor(), - phi, x, delta, delta_int, - theta_0, 0, 1e-3, 100); + double marginal_density_v2 = laplace_marginal_bernoulli_logit_lpmf( + y, n_samples, stan::math::test::sqr_exp_kernel_functor(), phi, x, delta, + delta_int, theta_0, 0, 1e-3, 100); EXPECT_FLOAT_EQ(marginal_density, marginal_density_v2); } diff --git a/test/unit/math/laplace/laplace_marginal_neg_binomial_2_log_test.cpp b/test/unit/math/laplace/laplace_marginal_neg_binomial_2_log_test.cpp old mode 100755 new mode 100644 index cae10b21548..75fb0187be1 --- a/test/unit/math/laplace/laplace_marginal_neg_binomial_2_log_test.cpp +++ b/test/unit/math/laplace/laplace_marginal_neg_binomial_2_log_test.cpp @@ -40,21 +40,21 @@ TEST(laplace, likelihood_differentiation) { // Benchmark against finite diff double epsilon = 1e-6; - Eigen::VectorXd theta_l0 = theta, theta_u0 = theta, - theta_l1 = theta, theta_u1 = theta; + Eigen::VectorXd theta_l0 = theta, theta_u0 = theta, theta_l1 = theta, + theta_u1 = theta; theta_u0(0) += epsilon; theta_l0(0) -= epsilon; theta_u1(1) += epsilon; theta_l1(1) -= epsilon; Eigen::VectorXd finite_gradient(2); - finite_gradient(0) = - (diff_functor.log_likelihood(theta_u0, eta) - - diff_functor.log_likelihood(theta_l0, eta)) / (2 * epsilon); + finite_gradient(0) = (diff_functor.log_likelihood(theta_u0, eta) + - diff_functor.log_likelihood(theta_l0, eta)) + / (2 * epsilon); - finite_gradient(1) = - (diff_functor.log_likelihood(theta_u1, eta) - - diff_functor.log_likelihood(theta_l1, eta)) / (2 * epsilon); + finite_gradient(1) = (diff_functor.log_likelihood(theta_u1, eta) + - diff_functor.log_likelihood(theta_l1, eta)) + / (2 * epsilon); Eigen::VectorXd gradient_l0, gradient_u0, gradient_l1, gradient_u1; Eigen::VectorXd hessian_l0, hessian_u0, hessian_l1, hessian_u1; @@ -76,7 +76,6 @@ TEST(laplace, likelihood_differentiation) { std::cout << "hessian: " << hessian << std::endl; std::cout << "third_diff: " << third_diff << std::endl; - EXPECT_FLOAT_EQ(finite_gradient(0), gradient(0)); EXPECT_FLOAT_EQ(finite_gradient(1), gradient(1)); EXPECT_FLOAT_EQ(finite_hessian(0), hessian(0)); @@ -90,25 +89,23 @@ TEST(laplace, likelihood_differentiation) { Eigen::VectorXd eta_l(1), eta_u(1); eta_l(0) = eta(0) - epsilon; eta_u(0) = eta(0) + epsilon; - double finite_gradient_eta = - (diff_functor.log_likelihood(theta, eta_u) - - diff_functor.log_likelihood(theta, eta_l)) / (2 * epsilon); + double finite_gradient_eta = (diff_functor.log_likelihood(theta, eta_u) + - diff_functor.log_likelihood(theta, eta_l)) + / (2 * epsilon); std::cout << "diff_eta: " << diff_eta.transpose() << std::endl; - EXPECT_FLOAT_EQ(finite_gradient_eta, diff_eta(0)); + EXPECT_FLOAT_EQ(finite_gradient_eta, diff_eta(0)); Eigen::MatrixXd diff_theta_eta = diff_functor.diff_theta_eta(theta, eta); - Eigen::VectorXd gradient_theta_l, - gradient_theta_u, - hessian_theta_u, - hessian_theta_l; + Eigen::VectorXd gradient_theta_l, gradient_theta_u, hessian_theta_u, + hessian_theta_l; diff_functor.diff(theta, eta_l, gradient_theta_l, hessian_theta_l); diff_functor.diff(theta, eta_u, gradient_theta_u, hessian_theta_u); Eigen::VectorXd finite_gradient_theta_eta - = (gradient_theta_u - gradient_theta_l) / (2 * epsilon); + = (gradient_theta_u - gradient_theta_l) / (2 * epsilon); std::cout << "diff_theta_eta: " << diff_theta_eta.transpose() << std::endl; @@ -116,11 +113,10 @@ TEST(laplace, likelihood_differentiation) { EXPECT_FLOAT_EQ(finite_gradient_theta_eta(1), diff_theta_eta(1, 0)); // Eigen::VectorXd W_root = (-hessian).cwiseSqrt(); - Eigen::MatrixXd diff2_theta_eta - = diff_functor.diff2_theta_eta(theta, eta); + Eigen::MatrixXd diff2_theta_eta = diff_functor.diff2_theta_eta(theta, eta); Eigen::VectorXd finite_hessian_theta_eta - = (hessian_theta_u - hessian_theta_l) / (2 * epsilon); + = (hessian_theta_u - hessian_theta_l) / (2 * epsilon); std::cout << "diff2_theta_eta: " << diff2_theta_eta.transpose() << std::endl; @@ -132,15 +128,14 @@ TEST(laplace, likelihood_differentiation) { template Eigen::MatrixXd compute_B(const Eigen::VectorXd& theta, const Eigen::VectorXd& eta, - const Eigen::MatrixXd& covariance, - T diff_functor) { + const Eigen::MatrixXd& covariance, T diff_functor) { int group_size = theta.size(); Eigen::VectorXd l_grad, hessian; diff_functor.diff(theta, eta, l_grad, hessian); - Eigen::VectorXd W_root = (- hessian).cwiseSqrt(); + Eigen::VectorXd W_root = (-hessian).cwiseSqrt(); return Eigen::MatrixXd::Identity(group_size, group_size) - + stan::math::quad_form_diag(covariance, W_root); + + stan::math::quad_form_diag(covariance, W_root); } /* TEST(laplace, neg_binomial_2_log_dbl) { diff --git a/test/unit/math/laplace/laplace_marginal_poisson_log_test.cpp b/test/unit/math/laplace/laplace_marginal_poisson_log_test.cpp index 48a79b9b23f..270e1223db1 100644 --- a/test/unit/math/laplace/laplace_marginal_poisson_log_test.cpp +++ b/test/unit/math/laplace/laplace_marginal_poisson_log_test.cpp @@ -10,7 +10,6 @@ #include #include - TEST(laplace, likelihood_differentiation) { using stan::math::diff_poisson_log; using stan::math::to_vector; @@ -21,8 +20,7 @@ TEST(laplace, likelihood_differentiation) { std::vector sums = {1, 0}; Eigen::VectorXd eta_dummy; - diff_poisson_log diff_functor(to_vector(n_samples), - to_vector(sums)); + diff_poisson_log diff_functor(to_vector(n_samples), to_vector(sums)); double log_density = diff_functor.log_likelihood(theta, eta_dummy); Eigen::VectorXd gradient; Eigen::SparseMatrix hessian; @@ -38,7 +36,6 @@ TEST(laplace, likelihood_differentiation) { EXPECT_FLOAT_EQ(-2.718282, third_tensor(1)); } - TEST(laplace, likelihood_differentiation2) { // Test exposure argument using stan::math::diff_poisson_log; @@ -51,8 +48,7 @@ TEST(laplace, likelihood_differentiation2) { std::vector log_exposure = {log(0.5), log(2)}; Eigen::VectorXd eta_dummy; - diff_poisson_log diff_functor(to_vector(n_samples), - to_vector(sums), + diff_poisson_log diff_functor(to_vector(n_samples), to_vector(sums), to_vector(log_exposure)); double log_density = diff_functor.log_likelihood(theta, eta_dummy); @@ -68,14 +64,13 @@ TEST(laplace, likelihood_differentiation2) { EXPECT_FLOAT_EQ(-5.436564, hessian.coeff(1, 1)); EXPECT_FLOAT_EQ(-1.359141, third_tensor(0)); EXPECT_FLOAT_EQ(-5.436564, third_tensor(1)); - } TEST(laplace, poisson_lgm_dim2) { using stan::math::laplace_marginal_poisson_log_lpmf; - using stan::math::var; using stan::math::to_vector; using stan::math::value_of; + using stan::math::var; int dim_phi = 2; Eigen::Matrix phi(dim_phi); @@ -88,7 +83,7 @@ TEST(laplace, poisson_lgm_dim2) { int dim_x = 2; std::vector x(dim_theta); Eigen::VectorXd x_0(2); - x_0 << 0.05100797, 0.16086164; + x_0 << 0.05100797, 0.16086164; Eigen::VectorXd x_1(2); x_1 << -0.59823393, 0.98701425; x[0] = x_0; @@ -101,15 +96,14 @@ TEST(laplace, poisson_lgm_dim2) { std::vector sums = {1, 0}; stan::math::test::squared_kernel_functor K; - var target - = laplace_marginal_poisson_log_lpmf(sums, n_samples, K, phi, x, delta, - delta_int, theta_0); + var target = laplace_marginal_poisson_log_lpmf(sums, n_samples, K, phi, x, + delta, delta_int, theta_0); // Test with exposure argument Eigen::VectorXd ye(2); ye << 1, 1; - target = laplace_marginal_poisson_log_lpmf(sums, n_samples, ye, K, phi, x, delta, - delta_int, theta_0); + target = laplace_marginal_poisson_log_lpmf(sums, n_samples, ye, K, phi, x, + delta, delta_int, theta_0); // How to test this? The best way would be to generate a few // benchmarks using gpstuff. @@ -120,23 +114,23 @@ TEST(laplace, poisson_lgm_dim2) { // finite diff test double diff = 1e-7; Eigen::VectorXd phi_dbl = value_of(phi); - Eigen::VectorXd phi_1l = phi_dbl, phi_1u = phi_dbl, - phi_2l = phi_dbl, phi_2u = phi_dbl; + Eigen::VectorXd phi_1l = phi_dbl, phi_1u = phi_dbl, phi_2l = phi_dbl, + phi_2u = phi_dbl; phi_1l(0) -= diff; phi_1u(0) += diff; phi_2l(1) -= diff; phi_2u(1) += diff; - double target_1u = laplace_marginal_poisson_log_lpmf(sums, n_samples, K, phi_1u, x, - delta, delta_int, theta_0), - target_1l = laplace_marginal_poisson_log_lpmf(sums, n_samples, K, phi_1l, x, - delta, delta_int, theta_0), - target_2u = laplace_marginal_poisson_log_lpmf(sums, n_samples, K, phi_2u, x, - delta, delta_int, theta_0), - target_2l = laplace_marginal_poisson_log_lpmf(sums, n_samples, K, phi_2l, x, - delta, delta_int, theta_0); + double target_1u = laplace_marginal_poisson_log_lpmf( + sums, n_samples, K, phi_1u, x, delta, delta_int, theta_0), + target_1l = laplace_marginal_poisson_log_lpmf( + sums, n_samples, K, phi_1l, x, delta, delta_int, theta_0), + target_2u = laplace_marginal_poisson_log_lpmf( + sums, n_samples, K, phi_2u, x, delta, delta_int, theta_0), + target_2l = laplace_marginal_poisson_log_lpmf( + sums, n_samples, K, phi_2l, x, delta, delta_int, theta_0); - std::vectorg_finite(dim_phi); + std::vector g_finite(dim_phi); g_finite[0] = (target_1u - target_1l) / (2 * diff); g_finite[1] = (target_2u - target_2l) / (2 * diff); diff --git a/test/unit/math/laplace/laplace_marginal_student_t_test.cpp b/test/unit/math/laplace/laplace_marginal_student_t_test.cpp old mode 100755 new mode 100644 index 6a6b482cd71..a930d9a007a --- a/test/unit/math/laplace/laplace_marginal_student_t_test.cpp +++ b/test/unit/math/laplace/laplace_marginal_student_t_test.cpp @@ -36,9 +36,6 @@ TEST(laplace, likelihood_differentiation) { // benchmark against R EXPECT_NEAR(-7.375673, log_density, test_tolerance); - - - // diff_logistic_log diff_functor(n_samples, y); // double log_density = diff_functor.log_likelihood(theta); // Eigen::VectorXd gradient, hessian; diff --git a/test/unit/math/laplace/laplace_poisson_log_rng_test.cpp b/test/unit/math/laplace/laplace_poisson_log_rng_test.cpp index 97667eaff55..ee21d306b75 100644 --- a/test/unit/math/laplace/laplace_poisson_log_rng_test.cpp +++ b/test/unit/math/laplace/laplace_poisson_log_rng_test.cpp @@ -12,30 +12,27 @@ #include struct stationary_point { - template - inline Eigen::Matrix::type, - Eigen::Dynamic, 1> - operator() (const Eigen::Matrix& theta, - const Eigen::Matrix& parms, - const std::vector& dat, - const std::vector& dat_int, - std::ostream* pstream__ = 0) const { - Eigen::Matrix::type, - Eigen::Dynamic, 1> z(2); + template + inline Eigen::Matrix::type, Eigen::Dynamic, + 1> + operator()(const Eigen::Matrix& theta, + const Eigen::Matrix& parms, + const std::vector& dat, const std::vector& dat_int, + std::ostream* pstream__ = 0) const { + Eigen::Matrix::type, Eigen::Dynamic, 1> + z(2); z(0) = 1 - exp(theta(0)) - theta(0) / (parms(0) * parms(0)); - z(1) = - exp(theta(1)) - theta(1) / (parms(1) * parms(1)); + z(1) = -exp(theta(1)) - theta(1) / (parms(1) * parms(1)); return z; } }; struct diagonal_kernel_functor { template - Eigen::Matrix - operator() (const Eigen::Matrix& phi, - const T2& x, - const std::vector& delta, - const std::vector& delta_int, - std::ostream* msgs = nullptr) const { + Eigen::Matrix operator()( + const Eigen::Matrix& phi, const T2& x, + const std::vector& delta, const std::vector& delta_int, + std::ostream* msgs = nullptr) const { Eigen::Matrix K(2, 2); K(0, 0) = phi(0) * phi(0); K(1, 1) = phi(1) * phi(1); @@ -47,17 +44,16 @@ struct diagonal_kernel_functor { TEST(laplace, basic_rng) { using stan::math::algebra_solver; - using stan::math::diff_poisson_log; - using stan::math::to_vector; using stan::math::diag_matrix; - using stan::math::laplace_rng; - using stan::math::laplace_poisson_log_rng; - using stan::math::value_of; - using stan::math::mdivide_left_tri; using stan::math::diag_pre_multiply; + using stan::math::diff_poisson_log; using stan::math::inv; + using stan::math::laplace_poisson_log_rng; + using stan::math::laplace_rng; + using stan::math::mdivide_left_tri; using stan::math::square; - + using stan::math::to_vector; + using stan::math::value_of; Eigen::VectorXd theta_0(2); theta_0 << 1, 1; @@ -66,30 +62,26 @@ TEST(laplace, basic_rng) { std::vector n_samples = {1, 1}; std::vector sums = {1, 0}; - diff_poisson_log diff_likelihood(to_vector(n_samples), - to_vector(sums)); + diff_poisson_log diff_likelihood(to_vector(n_samples), to_vector(sums)); std::vector d0; std::vector di0; - // Method 1: brute force and straightforward Eigen::VectorXd theta_root - = algebra_solver(stationary_point(), - theta_0, sigma, d0, di0); + = algebra_solver(stationary_point(), theta_0, sigma, d0, di0); Eigen::VectorXd gradient; Eigen::SparseMatrix W_sparse; Eigen::VectorXd eta_dummy; diff_likelihood.diff(theta_root, eta_dummy, gradient, W_sparse); - Eigen::MatrixXd W = - W_sparse; + Eigen::MatrixXd W = -W_sparse; diagonal_kernel_functor covariance_function; std::vector x_dummy; Eigen::MatrixXd K = covariance_function(sigma, x_dummy, d0, di0, 0); - std::cout << "K (brute force): " - << std::endl - << (K.inverse() + W).inverse() - << std::endl << std::endl; + std::cout << "K (brute force): " << std::endl + << (K.inverse() + W).inverse() << std::endl + << std::endl; // Method 2: Vectorized R&W method double tolerance = 1e-6; @@ -105,38 +97,33 @@ TEST(laplace, basic_rng) { Eigen::VectorXd a; Eigen::VectorXd l_grad; Eigen::PartialPivLU LU_dummy; - double marginal_density - = laplace_marginal_density(diff_likelihood, - covariance_function, - sigma, eta_dummy, x_dummy, d0, di0, - covariance, theta, W_r, L, a, l_grad, - LU_dummy, - K_root, - theta0_val, 0, - tolerance, max_num_steps); + double marginal_density = laplace_marginal_density( + diff_likelihood, covariance_function, sigma, eta_dummy, x_dummy, d0, + di0, covariance, theta, W_r, L, a, l_grad, LU_dummy, K_root, theta0_val, + 0, tolerance, max_num_steps); } Eigen::MatrixXd V; Eigen::VectorXd W_root(theta.size()); - for (int i = 0; i < theta.size(); i++) W_root(i) = W_r.coeff(i, i); - V = mdivide_left_tri(L, - diag_pre_multiply(W_root, covariance)); + for (int i = 0; i < theta.size(); i++) + W_root(i) = W_r.coeff(i, i); + V = mdivide_left_tri(L, diag_pre_multiply(W_root, covariance)); std::cout << "K (method 1): " << std::endl << covariance - V.transpose() * V << std::endl << std::endl; // Method 3: Modified R&W method Eigen::VectorXd W_root_inv = inv(W_root); - Eigen::MatrixXd V_dec = mdivide_left_tri(L, - diag_matrix(W_root_inv)); + Eigen::MatrixXd V_dec + = mdivide_left_tri(L, diag_matrix(W_root_inv)); std::cout << "K (method 2): " << std::endl - << - V_dec.transpose() * V_dec + diag_matrix(square(W_root_inv)) - << std::endl << std::endl; - + << -V_dec.transpose() * V_dec + diag_matrix(square(W_root_inv)) + << std::endl + << std::endl; // Call to rng function boost::random::mt19937 rng; Eigen::MatrixXd theta_pred - = laplace_poisson_log_rng(sums, n_samples, covariance_function, - sigma, x_dummy, d0, di0, theta_0, rng); + = laplace_poisson_log_rng(sums, n_samples, covariance_function, sigma, + x_dummy, d0, di0, theta_0, rng); } diff --git a/test/unit/math/laplace/laplace_skim_test.cpp b/test/unit/math/laplace/laplace_skim_test.cpp old mode 100755 new mode 100644 index 5db65d2cd88..fd6078f33aa --- a/test/unit/math/laplace/laplace_skim_test.cpp +++ b/test/unit/math/laplace/laplace_skim_test.cpp @@ -15,15 +15,14 @@ struct K_functor { template - Eigen::Matrix - operator()(const Eigen::Matrix& parm, - const std::vector& x_tot, - const std::vector& delta, - const std::vector& delta_int, - std::ostream* pstream) const { + Eigen::Matrix operator()(const Eigen::Matrix& parm, + const std::vector& x_tot, + const std::vector& delta, + const std::vector& delta_int, + std::ostream* pstream) const { using stan::math::add; - using stan::math::multiply; using stan::math::diag_post_multiply; + using stan::math::multiply; using stan::math::square; using stan::math::transpose; @@ -31,7 +30,8 @@ struct K_functor { int M = delta_int[1]; Eigen::Matrix lambda_tilde(M); - for (int m = 0; m < M; m++) lambda_tilde[m] = parm[m]; + for (int m = 0; m < M; m++) + lambda_tilde[m] = parm[m]; T eta = parm[M]; T alpha = parm[M + 1]; @@ -56,19 +56,20 @@ struct K_functor { X2(n, m) = x_tot[N + n](m); } - Eigen::Matrix - K1 = multiply(diag_post_multiply(X, lambda_tilde), transpose(X)); - Eigen::Matrix - K2 = multiply(diag_post_multiply(X2, lambda_tilde), transpose(X2)); + Eigen::Matrix K1 + = multiply(diag_post_multiply(X, lambda_tilde), transpose(X)); + Eigen::Matrix K2 + = multiply(diag_post_multiply(X2, lambda_tilde), transpose(X2)); Eigen::Matrix K; - K = square(eta) * square(add(K1, 1)) + - (square(alpha) - 0.5 * square(eta)) * K2 + - (square(phi) - square(eta)) * K1; + K = square(eta) * square(add(K1, 1)) + + (square(alpha) - 0.5 * square(eta)) * K2 + + (square(phi) - square(eta)) * K1; K = add(0.5 + square(psi) - 0.5 * square(eta), K); // Add jitter to make linear algebra more numerically stable - for (int n = 0; n < N; n++) K(n, n) += square(sigma) + 1e-7; + for (int n = 0; n < N; n++) + K(n, n) += square(sigma) + 1e-7; return K; } }; @@ -76,15 +77,14 @@ struct K_functor { // Overload structure for case where x is passed as a matrix. struct K_functor2 { template - Eigen::Matrix - operator()(const Eigen::Matrix& parm, - const Eigen::MatrixXd& x_tot, - const std::vector& delta, - const std::vector& delta_int, - std::ostream* pstream) const { + Eigen::Matrix operator()(const Eigen::Matrix& parm, + const Eigen::MatrixXd& x_tot, + const std::vector& delta, + const std::vector& delta_int, + std::ostream* pstream) const { using stan::math::add; - using stan::math::multiply; using stan::math::diag_post_multiply; + using stan::math::multiply; using stan::math::square; using stan::math::transpose; @@ -92,7 +92,8 @@ struct K_functor2 { int M = delta_int[1]; Eigen::Matrix lambda_tilde(M); - for (int m = 0; m < M; m++) lambda_tilde[m] = parm[m]; + for (int m = 0; m < M; m++) + lambda_tilde[m] = parm[m]; T eta = parm[M]; T alpha = parm[M + 1]; @@ -103,31 +104,31 @@ struct K_functor2 { Eigen::MatrixXd X = x_tot.block(0, 0, N, M); Eigen::MatrixXd X2 = x_tot.block(N, 0, N, M); - Eigen::Matrix - K1 = multiply(diag_post_multiply(X, lambda_tilde), transpose(X)); - Eigen::Matrix - K2 = multiply(diag_post_multiply(X2, lambda_tilde), transpose(X2)); + Eigen::Matrix K1 + = multiply(diag_post_multiply(X, lambda_tilde), transpose(X)); + Eigen::Matrix K2 + = multiply(diag_post_multiply(X2, lambda_tilde), transpose(X2)); Eigen::Matrix K; - K = square(eta) * square(add(K1, 1)) + - (square(alpha) - 0.5 * square(eta)) * K2 + - (square(phi) - square(eta)) * K1; + K = square(eta) * square(add(K1, 1)) + + (square(alpha) - 0.5 * square(eta)) * K2 + + (square(phi) - square(eta)) * K1; K = add(0.5 + square(psi) - 0.5 * square(eta), K); // Add jitter to make linear algebra more numerically stable - for (int n = 0; n < N; n++) K(n, n) += square(sigma) + 1e-7; + for (int n = 0; n < N; n++) + K(n, n) += square(sigma) + 1e-7; return K; } }; -class laplace_skim_test : public::testing::Test { -protected: +class laplace_skim_test : public ::testing::Test { + protected: void SetUp() override { + using stan::math::add; + using stan::math::elt_divide; using stan::math::square; using stan::math::var; - using stan::math::square; - using stan::math::elt_divide; - using stan::math::add; N = 100; M = 2; // options: 2, 50, 100, 150, 200 @@ -142,11 +143,11 @@ class laplace_skim_test : public::testing::Test { stan::math::test::read_in_data(M, N, data_directory, X, y, lambda); - if (false){ + if (false) { std::cout << X << std::endl << "-----" << std::endl; std::cout << lambda.transpose() << std::endl << "------" << std::endl; std::cout << y[0] << " " << y[1] << " " << std::endl - << "------" << std::endl; + << "------" << std::endl; } alpha_base = 0; @@ -184,8 +185,9 @@ class laplace_skim_test : public::testing::Test { eta = square(phi) / c2 * eta_base; alpha = square(phi) / c2 * alpha_base; - lambda_tilde = c2 * elt_divide(square(lambda), - add(c2, multiply(square(phi), square(lambda)))); + lambda_tilde = c2 + * elt_divide(square(lambda), + add(c2, multiply(square(phi), square(lambda)))); parm.resize(M + 4); parm.head(M) = lambda_tilde; @@ -211,31 +213,27 @@ class laplace_skim_test : public::testing::Test { Eigen::MatrixXd X2; Eigen::MatrixXd x_tot_m; - stan::math::var c2_tilde, tau_tilde, sigma, eta_base, - phi, c2, eta, alpha; + stan::math::var c2_tilde, tau_tilde, sigma, eta_base, phi, c2, eta, alpha; Eigen::Matrix lambda_tilde; Eigen::Matrix parm; }; - TEST_F(laplace_skim_test, lk_analytical) { - using stan::math::var; using stan::math::laplace_marginal_bernoulli_logit_lpmf; - + using stan::math::var; auto start = std::chrono::system_clock::now(); - var marginal_density - = laplace_marginal_bernoulli_logit_lpmf(y, n_samples, K_functor2(), - parm, x_tot_m, delta, delta_int, - theta_0); + var marginal_density = laplace_marginal_bernoulli_logit_lpmf( + y, n_samples, K_functor2(), parm, x_tot_m, delta, delta_int, theta_0); auto end = std::chrono::system_clock::now(); std::chrono::duration elapsed_time = end - start; std::vector g; std::vector parm_vec(M + 4); - for (int m = 0; m < M + 4; m++) parm_vec[m] = parm(m); + for (int m = 0; m < M + 4; m++) + parm_vec[m] = parm(m); marginal_density.grad(parm_vec, g); // std::cout << parm << std::endl; @@ -245,53 +243,53 @@ TEST_F(laplace_skim_test, lk_analytical) { << "M: " << M << std::endl << "density: " << marginal_density << std::endl << "autodiff grad: "; - for (size_t i = 0; i < 10; i++) std::cout << g[i] << " "; + for (size_t i = 0; i < 10; i++) + std::cout << g[i] << " "; std::cout << std::endl << "total time: " << elapsed_time.count() << std::endl << std::endl; } struct bernoulli_logit_likelihood { - template - stan::return_type_t - operator()(const Eigen::Matrix& theta, - const Eigen::Matrix& eta, - const Eigen::VectorXd& sums, // sums - const std::vector& n_samples, // n_samples - std::ostream* pstream) const { + template + stan::return_type_t operator()( + const Eigen::Matrix& theta, + const Eigen::Matrix& eta, + const Eigen::VectorXd& sums, // sums + const std::vector& n_samples, // n_samples + std::ostream* pstream) const { using stan::math::to_vector; - stan::math::diff_bernoulli_logit - diff_functor(to_vector(n_samples), sums); + stan::math::diff_bernoulli_logit diff_functor(to_vector(n_samples), sums); return diff_functor.log_likelihood(theta, eta); } }; - TEST_F(laplace_skim_test, lk_autodiff) { - using stan::math::var; - using stan::math::laplace_marginal_density; using stan::math::diff_likelihood; + using stan::math::laplace_marginal_density; using stan::math::to_vector; using stan::math::value_of; + using stan::math::var; bernoulli_logit_likelihood f; - diff_likelihood - diff_functor(f, to_vector(y), n_samples); + diff_likelihood diff_functor(f, to_vector(y), + n_samples); auto start = std::chrono::system_clock::now(); Eigen::Matrix eta_dummy; var marginal_density - = laplace_marginal_density(diff_functor, K_functor2(), parm, eta_dummy, - x_tot_m, delta, delta_int, theta_0); + = laplace_marginal_density(diff_functor, K_functor2(), parm, eta_dummy, + x_tot_m, delta, delta_int, theta_0); auto end = std::chrono::system_clock::now(); std::chrono::duration elapsed_time = end - start; std::vector g; std::vector parm_vec(M + 4); - for (int m = 0; m < M + 4; m++) parm_vec[m] = parm(m); + for (int m = 0; m < M + 4; m++) + parm_vec[m] = parm(m); marginal_density.grad(parm_vec, g); // Expected density: - 10.9795 @@ -299,7 +297,8 @@ TEST_F(laplace_skim_test, lk_autodiff) { << "M: " << M << std::endl << "density: " << marginal_density << std::endl << "autodiff grad: "; - for (size_t i = 0; i < 10; i++) std::cout << g[i] << " "; + for (size_t i = 0; i < 10; i++) + std::cout << g[i] << " "; std::cout << std::endl << "total time: " << elapsed_time.count() << std::endl << std::endl; diff --git a/test/unit/math/laplace/laplace_utility.hpp b/test/unit/math/laplace/laplace_utility.hpp index be5297aa3bd..854b7bafe1c 100644 --- a/test/unit/math/laplace/laplace_utility.hpp +++ b/test/unit/math/laplace/laplace_utility.hpp @@ -16,9 +16,9 @@ namespace test { // Function to construct spatial covariance matrix. template -Eigen::Matrix -covariance (Eigen::Matrix phi, int M, - bool space_matters = false) { +Eigen::Matrix covariance( + Eigen::Matrix phi, int M, + bool space_matters = false) { using std::pow; T sigma = phi[0]; T rho = phi[1]; @@ -28,7 +28,11 @@ covariance (Eigen::Matrix phi, int M, for (int i = 0; i < M; i++) { for (int j = 0; j < i; j++) { - if (space_matters) {exponent = i - j;} else {exponent = 1;} + if (space_matters) { + exponent = i - j; + } else { + exponent = 1; + } Sigma(i, j) = pow(rho, exponent) * sigma; Sigma(j, i) = Sigma(i, j); } @@ -40,12 +44,11 @@ covariance (Eigen::Matrix phi, int M, struct spatial_covariance { template - Eigen::Matrix::type, - Eigen::Dynamic, Eigen::Dynamic> - operator() (const Eigen::Matrix& phi, - const std::vector>& x, - int M = 0) const { + Eigen::Matrix::type, Eigen::Dynamic, + Eigen::Dynamic> + operator()(const Eigen::Matrix& phi, + const std::vector>& x, + int M = 0) const { typedef typename stan::return_type::type scalar; int space_matters = true; using std::pow; @@ -57,7 +60,11 @@ struct spatial_covariance { for (int i = 0; i < M; i++) { for (int j = 0; j < i; j++) { - if (space_matters) {exponent = i - j;} else {exponent = 1;} + if (space_matters) { + exponent = i - j; + } else { + exponent = 1; + } Sigma(i, j) = pow(rho, exponent) * sigma; Sigma(j, i) = Sigma(i, j); } @@ -69,14 +76,12 @@ struct spatial_covariance { struct squared_kernel_functor { template - Eigen::Matrix - operator() (const Eigen::Matrix& phi, - const T2& x, - const std::vector& delta, - const std::vector& delta_int, - std::ostream* msgs = nullptr) const { + Eigen::Matrix operator()( + const Eigen::Matrix& phi, const T2& x, + const std::vector& delta, const std::vector& delta_int, + std::ostream* msgs = nullptr) const { return stan::math::gp_exp_quad_cov(x, phi(0), phi(1)) - + 1e-9 * Eigen::MatrixXd::Identity(x.size(), x.size()); + + 1e-9 * Eigen::MatrixXd::Identity(x.size(), x.size()); } }; @@ -86,15 +91,13 @@ struct squared_kernel_functor { // function. struct sqr_exp_kernel_functor { template - Eigen::Matrix - operator() (const Eigen::Matrix& phi, - const T2& x, - const std::vector& delta, - const std::vector& delta_int, - std::ostream* msgs = nullptr) const { + Eigen::Matrix operator()( + const Eigen::Matrix& phi, const T2& x, + const std::vector& delta, const std::vector& delta_int, + std::ostream* msgs = nullptr) const { double jitter = 1e-8; - Eigen::Matrix - kernel = stan::math::gp_exp_quad_cov(x, phi(0), phi(1)); + Eigen::Matrix kernel + = stan::math::gp_exp_quad_cov(x, phi(0), phi(1)); for (int i = 0; i < kernel.cols(); i++) kernel(i, i) += jitter; @@ -106,38 +109,36 @@ struct sqr_exp_kernel_functor { // precomputes the covariance matrix). struct inla_functor { template - inline Eigen::Matrix::type, - Eigen::Dynamic, 1> - operator() (const Eigen::Matrix& theta, - const Eigen::Matrix& parm, - const std::vector& dat, - const std::vector& dat_int, - std::ostream* pstream__ = 0) const { - using stan::math::to_vector; + inline Eigen::Matrix::type, Eigen::Dynamic, + 1> + operator()(const Eigen::Matrix& theta, + const Eigen::Matrix& parm, + const std::vector& dat, const std::vector& dat_int, + std::ostream* pstream__ = 0) const { using stan::math::head; using stan::math::tail; + using stan::math::to_vector; int n_groups = theta.size(); Eigen::VectorXd n_samples = to_vector(head(dat, n_groups)); Eigen::VectorXd sums = to_vector(tail(dat, dat.size() - n_groups)); - Eigen::Matrix - Sigma = stan::math::test::covariance(parm, n_groups, 1); + Eigen::Matrix Sigma + = stan::math::test::covariance(parm, n_groups, 1); - return sums - stan::math::elt_multiply(n_samples, stan::math::exp(theta)) - - stan::math::mdivide_left(Sigma, theta); + return sums - stan::math::elt_multiply(n_samples, stan::math::exp(theta)) + - stan::math::mdivide_left(Sigma, theta); } }; // simple case where the covariance matrix is diagonal. struct lgp_functor { template - inline Eigen::Matrix::type, - Eigen::Dynamic, 1> - operator ()(const Eigen::Matrix& theta, - const Eigen::Matrix& phi, - const std::vector& dat, - const std::vector& dat_int, - std::ostream* pstream__) const { + inline Eigen::Matrix::type, Eigen::Dynamic, + 1> + operator()(const Eigen::Matrix& theta, + const Eigen::Matrix& phi, + const std::vector& dat, const std::vector& dat_int, + std::ostream* pstream__) const { typedef typename stan::return_type::type scalar; Eigen::Matrix fgrad; int dim_theta = 2; @@ -150,9 +151,8 @@ struct lgp_functor { sums(0) = dat[2]; sums(1) = dat[3]; - return sums - stan::math::elt_multiply(n_samples, - stan::math::exp(theta)) - - theta / phi(0); + return sums - stan::math::elt_multiply(n_samples, stan::math::exp(theta)) + - theta / phi(0); } }; @@ -160,22 +160,17 @@ struct lgp_functor { // Note y and index are only required to compute the likelihood, // although it is more efficient to do this using sufficient // statistics. -void read_in_data (int dim_theta, - int n_observations, - std::string data_directory, - std::vector& y, - std::vector& index, - std::vector& sums, - std::vector& n_samples, - bool get_raw_data = false) { +void read_in_data(int dim_theta, int n_observations, std::string data_directory, + std::vector& y, std::vector& index, + std::vector& sums, std::vector& n_samples, + bool get_raw_data = false) { std::ifstream input_data; std::string dim_theta_string = std::to_string(dim_theta); std::string file_y = data_directory + "y_" + dim_theta_string + ".csv"; - std::string file_index = data_directory + "index_" + - dim_theta_string + ".csv"; + std::string file_index + = data_directory + "index_" + dim_theta_string + ".csv"; std::string file_m = data_directory + "m_" + dim_theta_string + ".csv"; - std::string file_sums = data_directory + "sums_" + - dim_theta_string + ".csv"; + std::string file_sums = data_directory + "sums_" + dim_theta_string + ".csv"; input_data.open(file_m); double buffer = 0.0; @@ -214,12 +209,9 @@ void read_in_data (int dim_theta, // Overload function to read data from Aki's experiment // using a logistic and latent Gaussian process. -void read_in_data (int dim_theta, - int n_observations, - std::string data_directory, - std::vector& x1, - std::vector& x2, - std::vector& y) { +void read_in_data(int dim_theta, int n_observations, std::string data_directory, + std::vector& x1, std::vector& x2, + std::vector& y) { std::ifstream input_data; std::string file_x1 = data_directory + "x1.csv"; std::string file_x2 = data_directory + "x2.csv"; @@ -252,12 +244,9 @@ void read_in_data (int dim_theta, // Overload function to read in disease mapping data. // Same as above, but in addition include an exposure term. -void read_in_data(int dim_theta, - int dim_observations, - std::string data_directory, - std::vector& x1, - std::vector& x2, - std::vector& y, +void read_in_data(int dim_theta, int dim_observations, + std::string data_directory, std::vector& x1, + std::vector& x2, std::vector& y, Eigen::VectorXd& ye) { read_in_data(dim_theta, dim_observations, data_directory, x1, x2, y); @@ -275,12 +264,9 @@ void read_in_data(int dim_theta, // Overload function to read in skim data. // The covariates have a different structure. -void read_in_data(int dim_theta, - int dim_observations, - std::string data_directory, - Eigen::MatrixXd& X, - std::vector& y, - Eigen::VectorXd& lambda) { +void read_in_data(int dim_theta, int dim_observations, + std::string data_directory, Eigen::MatrixXd& X, + std::vector& y, Eigen::VectorXd& lambda) { std::ifstream input_data; std::string file_y = data_directory + "y.csv"; std::string file_X = data_directory + "X.csv"; @@ -313,10 +299,8 @@ void read_in_data(int dim_theta, // TODO: write a more general data reader, rather than overload. // Overload function to read in gp motorcycle data. -void read_data(int dim_observations, - std::string data_directory, - std::vector& x, - Eigen::VectorXd& y) { +void read_data(int dim_observations, std::string data_directory, + std::vector& x, Eigen::VectorXd& y) { std::ifstream input_data; std::string file_y = data_directory + "y_vec.csv"; std::string file_x = data_directory + "x_vec.csv"; @@ -342,7 +326,7 @@ void read_data(int dim_observations, } } -} -} -} +} // namespace test +} // namespace math +} // namespace stan #endif diff --git a/test/unit/math/laplace/motorcycle_gp_test.cpp b/test/unit/math/laplace/motorcycle_gp_test.cpp old mode 100755 new mode 100644 index f2a9ad528e4..657baf2d557 --- a/test/unit/math/laplace/motorcycle_gp_test.cpp +++ b/test/unit/math/laplace/motorcycle_gp_test.cpp @@ -17,14 +17,12 @@ struct covariance_motorcycle_functor { template - Eigen::Matrix - operator() (const Eigen::Matrix& phi, - const T2& x, - const std::vector& delta, - const std::vector& delta_int, - std::ostream* msgs = nullptr) const { - using Eigen::Matrix; - using stan::math::gp_exp_quad_cov; + Eigen::Matrix operator()( + const Eigen::Matrix& phi, const T2& x, + const std::vector& delta, const std::vector& delta_int, + std::ostream* msgs = nullptr) const { + using Eigen::Matrix; + using stan::math::gp_exp_quad_cov; T1 length_scale_f = phi(0); T1 length_scale_g = phi(1); @@ -33,7 +31,8 @@ struct covariance_motorcycle_functor { int n_obs = delta_int[0]; std::cout << "x: "; - for (int i = 0; i < 5; i++) std::cout << x[i] << " "; + for (int i = 0; i < 5; i++) + std::cout << x[i] << " "; std::cout << std::endl; double jitter = 1e-6; @@ -43,8 +42,7 @@ struct covariance_motorcycle_functor { std::cout << "K_f: " << kernel_f.row(0).head(5) << std::endl; std::cout << "K_g: " << kernel_g.row(0).head(5) << std::endl; - Matrix kernel_all - = Eigen::MatrixXd::Zero(2 * n_obs, 2 * n_obs); + Matrix kernel_all = Eigen::MatrixXd::Zero(2 * n_obs, 2 * n_obs); for (int i = 0; i < n_obs; i++) { for (int j = 0; j <= i; j++) { kernel_all(2 * i, 2 * j) = kernel_f(i, j); @@ -56,20 +54,19 @@ struct covariance_motorcycle_functor { } } - for (int i = 0; i < 2 * n_obs; i++) kernel_all(i, i) += jitter; + for (int i = 0; i < 2 * n_obs; i++) + kernel_all(i, i) += jitter; return kernel_all; } }; struct normal_likelihood { - template - stan::return_type_t - operator()(const Eigen::Matrix& theta, - const Eigen::Matrix& eta, - const Eigen::VectorXd& y, - const std::vector& delta_int, - std::ostream* pstream) const { + template + stan::return_type_t operator()( + const Eigen::Matrix& theta, + const Eigen::Matrix& eta, const Eigen::VectorXd& y, + const std::vector& delta_int, std::ostream* pstream) const { int n_obs = delta_int[0]; Eigen::Matrix mu(n_obs); Eigen::Matrix sigma(n_obs); @@ -84,13 +81,11 @@ struct normal_likelihood { // include a global variance (passed through eta) struct normal_likelihood2 { - template - stan::return_type_t - operator()(const Eigen::Matrix& theta, - const Eigen::Matrix& eta, - const Eigen::VectorXd& y, - const std::vector& delta_int, - std::ostream* pstream) const { + template + stan::return_type_t operator()( + const Eigen::Matrix& theta, + const Eigen::Matrix& eta, const Eigen::VectorXd& y, + const std::vector& delta_int, std::ostream* pstream) const { using stan::math::multiply; int n_obs = delta_int[0]; Eigen::Matrix mu(n_obs); @@ -106,26 +101,27 @@ struct normal_likelihood2 { } }; -class laplace_motorcyle_gp_test : public::testing::Test { -protected: +class laplace_motorcyle_gp_test : public ::testing::Test { + protected: void SetUp() override { - using stan::math::value_of; using stan::math::gp_exp_quad_cov; + using stan::math::value_of; if (false) { n_obs = 6; Eigen::VectorXd x_vec(n_obs); x_vec << 2.4, 2.6, 3.2, 3.6, 4.0, 6.2; x.resize(n_obs); - for (int i = 0; i < n_obs; i++) x[i] = x_vec(i); + for (int i = 0; i < n_obs; i++) + x[i] = x_vec(i); y.resize(n_obs); - y << 0.0, -1.3, -2.7, 0.0, -2.7, -2.7; + y << 0.0, -1.3, -2.7, 0.0, -2.7, -2.7; } if (true) { n_obs = 133; - stan::math::test::read_data(n_obs, "test/unit/math/laplace/motorcycle_gp/", - x, y); + stan::math::test::read_data( + n_obs, "test/unit/math/laplace/motorcycle_gp/", x, y); // std::cout << "x: "; // for (int i = 0; i < n_obs; i++) std::cout << x[i] << " "; // std::cout << " ..." << std::endl; @@ -135,8 +131,8 @@ class laplace_motorcyle_gp_test : public::testing::Test { // [0.335852,0.433641,0.335354,0.323559] length_scale_f = 0.3; // 0.335852; // 0.3; length_scale_g = 0.5; // 0.433641; // 0.5; - sigma_f = 0.25; // 0.335354; // 0.25; - sigma_g = 0.25; // 0.323559; // 0.25; + sigma_f = 0.25; // 0.335354; // 0.25; + sigma_g = 0.25; // 0.323559; // 0.25; phi.resize(4); phi << length_scale_f, length_scale_g, sigma_f, sigma_g; @@ -148,12 +144,11 @@ class laplace_motorcyle_gp_test : public::testing::Test { // theta0 << -10, 0, -10, 0, -10, 0, -10, // 0, -10, 0, -10, 0; - Eigen::MatrixXd - K_plus_I = gp_exp_quad_cov(x, value_of(sigma_f), value_of(length_scale_f)) - + Eigen::MatrixXd::Identity(n_obs, n_obs); + Eigen::MatrixXd K_plus_I + = gp_exp_quad_cov(x, value_of(sigma_f), value_of(length_scale_f)) + + Eigen::MatrixXd::Identity(n_obs, n_obs); - Eigen::VectorXd mu_hat - = K_plus_I.colPivHouseholderQr().solve(y); + Eigen::VectorXd mu_hat = K_plus_I.colPivHouseholderQr().solve(y); // Remark: finds optimal point with or without informed initial guess. for (int i = 0; i < n_obs; i++) { @@ -182,15 +177,14 @@ class laplace_motorcyle_gp_test : public::testing::Test { }; TEST_F(laplace_motorcyle_gp_test, lk_autodiff) { - using stan::math::var; - using stan::math::value_of; - using stan::math::laplace_marginal_density; using stan::math::diff_likelihood; + using stan::math::laplace_marginal_density; + using stan::math::value_of; + using stan::math::var; covariance_motorcycle_functor K_f; Eigen::VectorXd phi_dbl_ = value_of(phi); - Eigen::MatrixXd K_eval - = K_f(phi_dbl_, x, delta_dummy, delta_int, 0); + Eigen::MatrixXd K_eval = K_f(phi_dbl_, x, delta_dummy, delta_int, 0); std::cout << "K_eval: " << K_eval.row(0).head(5) << std::endl; normal_likelihood f; diff --git a/test/unit/math/laplace/sparse_matrix_test.cpp b/test/unit/math/laplace/sparse_matrix_test.cpp old mode 100755 new mode 100644 index bf815516064..59840fa9e8e --- a/test/unit/math/laplace/sparse_matrix_test.cpp +++ b/test/unit/math/laplace/sparse_matrix_test.cpp @@ -10,12 +10,11 @@ #include #include - TEST(sparse_matrix, eigen_example) { typedef Eigen::Triplet trp; + using Eigen::MatrixXd; using Eigen::SparseMatrix; using Eigen::VectorXi; - using Eigen::MatrixXd; int m = 2; // size of each block @@ -55,10 +54,7 @@ TEST(sparse_matrix, eigen_example) { std::cout << "A * B: " << A * B << std::endl; MatrixXd C(4, 4); - C << 1, 3, 4, 5, - 3, 4, 4, 1, - 8, 1, 0, 12, - 3, 4, 5, 1; + C << 1, 3, 4, 5, 3, 4, 4, 1, 8, 1, 0, 12, 3, 4, 5, 1; std::cout << "A * C: " << A * C << std::endl; @@ -68,7 +64,6 @@ TEST(sparse_matrix, eigen_example) { std::cout << "Check we recover A: " << sqrt_A * sqrt_A << std::endl; - SparseMatrix D(4, 4); std::cout << "sqrt(D): " << stan::math::block_matrix_sqrt(D, 2) << std::endl; @@ -87,8 +82,8 @@ TEST(LU_decomposition, eigen_example) { Matrix5x5 m = Matrix5x5::Random(); cout << "Here is the matrix m:" << endl << m << endl; Eigen::FullPivLU lu(m); - cout << "Here is, up to permutations, its LU decomposition matrix:" - << endl << lu.matrixLU() << endl; + cout << "Here is, up to permutations, its LU decomposition matrix:" << endl + << lu.matrixLU() << endl; cout << "Here is the L part:" << endl; // Matrix5x5 l = Matrix5x5::Identity(); // l.block<5,3>(0,0).triangularView() = lu.matrixLU(); @@ -110,7 +105,6 @@ TEST(LU_decomposition, eigen_example) { L.triangularView() = lu.permutationP().inverse() * l; std::cout << "L: " << L << std::endl; - Eigen::MatrixXd U(5, 5); U = u * lu.permutationQ().inverse(); std::cout << "U: " << U << std::endl; @@ -133,9 +127,7 @@ TEST(LU_decomposition, eigen_example_2) { std::cout << "LU determinant: " << LU.determinant() << std::endl; std::cout << "A determinant: " << A.determinant() << std::endl; - std::cout << "A.solve(B): " << std::endl - << LU.solve(B) << std::endl; + std::cout << "A.solve(B): " << std::endl << LU.solve(B) << std::endl; - std::cout << "Check solution: " << std::endl - << A * LU.solve(B) << std::endl; + std::cout << "Check solution: " << std::endl << A * LU.solve(B) << std::endl; } From c1cf1b978faca07aab2db9629cee693451884be6 Mon Sep 17 00:00:00 2001 From: Steve Bronder Date: Fri, 1 Oct 2021 15:26:52 -0400 Subject: [PATCH 51/53] cleanup more of neg_binomial_2 tests --- .../laplace_likelihood_neg_binomial_2_log.hpp | 30 ++++++- ...place_marginal_neg_binomial_2_log_test.cpp | 81 +++++++++++-------- 2 files changed, 77 insertions(+), 34 deletions(-) diff --git a/stan/math/laplace/laplace_likelihood_neg_binomial_2_log.hpp b/stan/math/laplace/laplace_likelihood_neg_binomial_2_log.hpp index 2f5029fc98c..1dc08483615 100644 --- a/stan/math/laplace/laplace_likelihood_neg_binomial_2_log.hpp +++ b/stan/math/laplace/laplace_likelihood_neg_binomial_2_log.hpp @@ -56,10 +56,11 @@ struct diff_neg_binomial_2_log { const Eigen::Matrix& theta, const Eigen::Matrix& eta, Eigen::Matrix, Eigen::Dynamic, 1>& gradient, - Eigen::Matrix, Eigen::Dynamic, 1>& hessian, + Eigen::SparseMatrix& hessian, int hessian_block_size = 1) const { typedef return_type_t scalar; Eigen::VectorXd one = rep_vector(1, theta.size()); + int theta_size = theta.size(); T_eta eta_scalar = eta(0); Eigen::Matrix sums_plus_n_eta = sums_ + eta_scalar * n_samples_; @@ -68,10 +69,19 @@ struct diff_neg_binomial_2_log { Eigen::Matrix one_plus_exp = one + eta_scalar * exp_neg_theta; gradient = sums_ - elt_divide(sums_plus_n_eta, one_plus_exp); - + Eigen::MatrixXd hessian_val = eta_scalar + * sums_plus_n_eta.cwiseProduct( + elt_divide(exp_neg_theta, square(one_plus_exp))); + hessian.resize(theta_size, theta_size); + hessian.reserve(Eigen::VectorXi::Constant(theta_size, hessian_block_size)); + // hessian.col(0) = - common_term; + for (int i = 0; i < theta_size; i++) + hessian.insert(i, i) = -hessian_val(i); +/* hessian = -eta_scalar * sums_plus_n_eta.cwiseProduct( elt_divide(exp_neg_theta, square(one_plus_exp))); +*/ } template @@ -157,6 +167,22 @@ struct diff_neg_binomial_2_log { return diff_matrix; } + Eigen::VectorXd compute_s2(const Eigen::VectorXd& theta, + const Eigen::VectorXd& eta, + const Eigen::MatrixXd& A, + int hessian_block_size) const { + std::cout << "THIS FUNCTIONS SHOULD NEVER GET CALLED!" << std::endl; + Eigen::MatrixXd void_matrix; + return void_matrix; + } + + Eigen::VectorXd diff_eta_implicit(const Eigen::VectorXd& v, + const Eigen::VectorXd& theta, + const Eigen::VectorXd& eta) const { + std::cout << "THIS FUNCTIONS SHOULD NEVER GET CALLED!" << std::endl; + Eigen::MatrixXd void_matrix; + return void_matrix; + } }; } // namespace math diff --git a/test/unit/math/laplace/laplace_marginal_neg_binomial_2_log_test.cpp b/test/unit/math/laplace/laplace_marginal_neg_binomial_2_log_test.cpp index 75fb0187be1..6cb8149f16f 100644 --- a/test/unit/math/laplace/laplace_marginal_neg_binomial_2_log_test.cpp +++ b/test/unit/math/laplace/laplace_marginal_neg_binomial_2_log_test.cpp @@ -1,6 +1,7 @@ #include //#include #include +#include #include #include #include @@ -33,7 +34,8 @@ TEST(laplace, likelihood_differentiation) { // benchmark against R EXPECT_FLOAT_EQ(-3.023328, log_density); - Eigen::VectorXd gradient, hessian; + Eigen::VectorXd gradient; + Eigen::SparseMatrix hessian; diff_functor.diff(theta, eta, gradient, hessian); Eigen::VectorXd third_diff = diff_functor.third_diff(theta, eta); @@ -57,8 +59,7 @@ TEST(laplace, likelihood_differentiation) { / (2 * epsilon); Eigen::VectorXd gradient_l0, gradient_u0, gradient_l1, gradient_u1; - Eigen::VectorXd hessian_l0, hessian_u0, hessian_l1, hessian_u1; - Eigen::VectorXd hessian_dummy; + Eigen::SparseMatrix hessian_l0, hessian_u0, hessian_l1, hessian_u1; diff_functor.diff(theta_l0, eta, gradient_l0, hessian_l0); diff_functor.diff(theta_u0, eta, gradient_u0, hessian_u0); diff_functor.diff(theta_l1, eta, gradient_l1, hessian_l1); @@ -69,8 +70,8 @@ TEST(laplace, likelihood_differentiation) { finite_hessian(1) = (gradient_u1 - gradient_l1)(1) / (2 * epsilon); Eigen::VectorXd finite_third_diff(2); - finite_third_diff(0) = (hessian_u0 - hessian_l0)(0) / (2 * epsilon); - finite_third_diff(1) = (hessian_u1 - hessian_l1)(1) / (2 * epsilon); + finite_third_diff(0) = (hessian_u0 - hessian_l0).eval().coeff(0, 0) / (2 * epsilon); + finite_third_diff(1) = (hessian_u1 - hessian_l1).eval().coeff(1, 1) / (2 * epsilon); std::cout << "gradient: " << gradient << std::endl; std::cout << "hessian: " << hessian << std::endl; @@ -78,8 +79,8 @@ TEST(laplace, likelihood_differentiation) { EXPECT_FLOAT_EQ(finite_gradient(0), gradient(0)); EXPECT_FLOAT_EQ(finite_gradient(1), gradient(1)); - EXPECT_FLOAT_EQ(finite_hessian(0), hessian(0)); - EXPECT_FLOAT_EQ(finite_hessian(1), hessian(1)); + EXPECT_FLOAT_EQ(finite_hessian(0), hessian.coeff(0, 0)); + EXPECT_FLOAT_EQ(finite_hessian(1), hessian.coeff(1, 1)); EXPECT_FLOAT_EQ(finite_third_diff(0), third_diff(0)); EXPECT_FLOAT_EQ(finite_third_diff(1), third_diff(1)); @@ -99,7 +100,8 @@ TEST(laplace, likelihood_differentiation) { Eigen::MatrixXd diff_theta_eta = diff_functor.diff_theta_eta(theta, eta); - Eigen::VectorXd gradient_theta_l, gradient_theta_u, hessian_theta_u, + Eigen::VectorXd gradient_theta_l, gradient_theta_u; + Eigen::SparseMatrix hessian_theta_u, hessian_theta_l; diff_functor.diff(theta, eta_l, gradient_theta_l, hessian_theta_l); @@ -108,15 +110,19 @@ TEST(laplace, likelihood_differentiation) { = (gradient_theta_u - gradient_theta_l) / (2 * epsilon); std::cout << "diff_theta_eta: " << diff_theta_eta.transpose() << std::endl; - + std::cout << "finite_gradient_theta_eta: " << finite_gradient_theta_eta.transpose() << std::endl; + Eigen::VectorXd diff_theta_eta1 = diff_theta_eta.col(0); + EXPECT_MATRIX_FLOAT_EQ(finite_gradient_theta_eta, diff_theta_eta1); + /* EXPECT_FLOAT_EQ(finite_gradient_theta_eta(0), diff_theta_eta(0, 0)); + std::cout << "Got Here-1"; EXPECT_FLOAT_EQ(finite_gradient_theta_eta(1), diff_theta_eta(1, 0)); - +*/ // Eigen::VectorXd W_root = (-hessian).cwiseSqrt(); Eigen::MatrixXd diff2_theta_eta = diff_functor.diff2_theta_eta(theta, eta); Eigen::VectorXd finite_hessian_theta_eta - = (hessian_theta_u - hessian_theta_l) / (2 * epsilon); + = (hessian_theta_u.diagonal() - hessian_theta_l.diagonal()) / (2 * epsilon); std::cout << "diff2_theta_eta: " << diff2_theta_eta.transpose() << std::endl; @@ -137,7 +143,7 @@ Eigen::MatrixXd compute_B(const Eigen::VectorXd& theta, return Eigen::MatrixXd::Identity(group_size, group_size) + stan::math::quad_form_diag(covariance, W_root); } -/* + TEST(laplace, neg_binomial_2_log_dbl) { using stan::math::to_vector; using stan::math::diff_neg_binomial_2_log; @@ -152,12 +158,12 @@ TEST(laplace, neg_binomial_2_log_dbl) { phi << 1.6, 0.45; eta << 1; theta_0 << 0, 0; - std::vector x(dim_theta); - Eigen::VectorXd x_0(2), x_1(2); + Eigen::VectorXd x_0(2); x_0 << 0.05100797, 0.16086164; - x_1 << -0.59823393, 0.98701425; x[0] = x_0; + Eigen::VectorXd x_1(2); + x_1 << -0.59823393, 0.98701425; x[1] = x_1; std::vector delta; @@ -169,15 +175,19 @@ TEST(laplace, neg_binomial_2_log_dbl) { diff_neg_binomial_2_log diff_functor(y, y_index, dim_theta); stan::math::test::sqr_exp_kernel_functor K; + std::cout << "here1" << std::endl; double log_p = laplace_marginal_density(diff_functor, K, phi, eta, x, delta, delta_int, theta_0); + std::cout << "here2" << std::endl; Eigen::Matrix phi_v = phi, eta_v = eta; + std::cout << "here3" << std::endl; var target = laplace_marginal_density(diff_functor, K, phi_v, eta_v, x, delta, delta_int, theta_0); + std::cout << "here4" << std::endl; std::vector g; std::vector parm_vec{phi_v(0), phi_v(1), eta_v(0)}; target.grad(parm_vec, g); @@ -194,24 +204,32 @@ TEST(laplace, neg_binomial_2_log_dbl) { eta_l(0) -= diff; eta_u(0) += diff; - double target_phi_1u = laplace_marginal_density(diff_functor, K, phi_1u, - eta_dbl, x, delta, - delta_int, theta_0), - target_phi_1l = laplace_marginal_density(diff_functor, K, phi_1l, - eta_dbl, x, delta, - delta_int, theta_0), - target_phi_2u = laplace_marginal_density(diff_functor, K, phi_2u, +std::cout << "here5" << std::endl; +double target_phi_1u = laplace_marginal_density(diff_functor, K, phi_1u, eta_dbl, x, delta, - delta_int, theta_0), - target_phi_2l = laplace_marginal_density(diff_functor, K, phi_2l, - eta_dbl, x, delta, - delta_int, theta_0), - target_eta_u = laplace_marginal_density(diff_functor, K, phi_dbl, - eta_u, x, delta, - delta_int, theta_0), - target_eta_l = laplace_marginal_density(diff_functor, K, phi_dbl, - eta_l, x, delta, delta_int, theta_0); +std::cout << "here6" << std::endl; +double target_phi_1l = laplace_marginal_density(diff_functor, K, phi_1l, + eta_dbl, x, delta, + delta_int, theta_0); +std::cout << "here7" << std::endl; +double target_phi_2u = laplace_marginal_density(diff_functor, K, phi_2u, + eta_dbl, x, delta, + delta_int, theta_0); +std::cout << "here8" << std::endl; +double target_phi_2l = laplace_marginal_density(diff_functor, K, phi_2l, + eta_dbl, x, delta, + delta_int, theta_0); + +std::cout << "here9" << std::endl; +double target_eta_u = laplace_marginal_density(diff_functor, K, phi_dbl, + eta_u, x, delta, + delta_int, theta_0); + +std::cout << "here10" << std::endl; +double target_eta_l = laplace_marginal_density(diff_functor, K, phi_dbl, + eta_l, x, delta, + delta_int, theta_0); std::vectorg_finite(dim_phi + dim_eta); g_finite[0] = (target_phi_1u - target_phi_1l) / (2 * diff); @@ -228,4 +246,3 @@ TEST(laplace, neg_binomial_2_log_dbl) { laplace_marginal_neg_binomial_2_log_lpmf(y_obs, y_index, K, phi, eta, x, delta, delta_int, theta_0)); } -*/ From 3f310e79f58c315f148540f0938a6c3f4ea09ac7 Mon Sep 17 00:00:00 2001 From: Stan Jenkins Date: Fri, 1 Oct 2021 19:28:31 +0000 Subject: [PATCH 52/53] [Jenkins] auto-formatting by clang-format version 6.0.0-1ubuntu2~16.04.1 (tags/RELEASE_600/final) --- .../laplace_likelihood_neg_binomial_2_log.hpp | 17 ++-- ...place_marginal_neg_binomial_2_log_test.cpp | 86 +++++++++---------- 2 files changed, 49 insertions(+), 54 deletions(-) diff --git a/stan/math/laplace/laplace_likelihood_neg_binomial_2_log.hpp b/stan/math/laplace/laplace_likelihood_neg_binomial_2_log.hpp index 1dc08483615..7464d2df5f8 100644 --- a/stan/math/laplace/laplace_likelihood_neg_binomial_2_log.hpp +++ b/stan/math/laplace/laplace_likelihood_neg_binomial_2_log.hpp @@ -56,8 +56,7 @@ struct diff_neg_binomial_2_log { const Eigen::Matrix& theta, const Eigen::Matrix& eta, Eigen::Matrix, Eigen::Dynamic, 1>& gradient, - Eigen::SparseMatrix& hessian, - int hessian_block_size = 1) const { + Eigen::SparseMatrix& hessian, int hessian_block_size = 1) const { typedef return_type_t scalar; Eigen::VectorXd one = rep_vector(1, theta.size()); int theta_size = theta.size(); @@ -70,18 +69,18 @@ struct diff_neg_binomial_2_log { = one + eta_scalar * exp_neg_theta; gradient = sums_ - elt_divide(sums_plus_n_eta, one_plus_exp); Eigen::MatrixXd hessian_val = eta_scalar - * sums_plus_n_eta.cwiseProduct( - elt_divide(exp_neg_theta, square(one_plus_exp))); + * sums_plus_n_eta.cwiseProduct(elt_divide( + exp_neg_theta, square(one_plus_exp))); hessian.resize(theta_size, theta_size); hessian.reserve(Eigen::VectorXi::Constant(theta_size, hessian_block_size)); // hessian.col(0) = - common_term; for (int i = 0; i < theta_size; i++) hessian.insert(i, i) = -hessian_val(i); -/* - hessian = -eta_scalar - * sums_plus_n_eta.cwiseProduct( - elt_divide(exp_neg_theta, square(one_plus_exp))); -*/ + /* + hessian = -eta_scalar + * sums_plus_n_eta.cwiseProduct( + elt_divide(exp_neg_theta, square(one_plus_exp))); + */ } template diff --git a/test/unit/math/laplace/laplace_marginal_neg_binomial_2_log_test.cpp b/test/unit/math/laplace/laplace_marginal_neg_binomial_2_log_test.cpp index 6cb8149f16f..77a2dadb1f7 100644 --- a/test/unit/math/laplace/laplace_marginal_neg_binomial_2_log_test.cpp +++ b/test/unit/math/laplace/laplace_marginal_neg_binomial_2_log_test.cpp @@ -70,8 +70,10 @@ TEST(laplace, likelihood_differentiation) { finite_hessian(1) = (gradient_u1 - gradient_l1)(1) / (2 * epsilon); Eigen::VectorXd finite_third_diff(2); - finite_third_diff(0) = (hessian_u0 - hessian_l0).eval().coeff(0, 0) / (2 * epsilon); - finite_third_diff(1) = (hessian_u1 - hessian_l1).eval().coeff(1, 1) / (2 * epsilon); + finite_third_diff(0) + = (hessian_u0 - hessian_l0).eval().coeff(0, 0) / (2 * epsilon); + finite_third_diff(1) + = (hessian_u1 - hessian_l1).eval().coeff(1, 1) / (2 * epsilon); std::cout << "gradient: " << gradient << std::endl; std::cout << "hessian: " << hessian << std::endl; @@ -101,8 +103,7 @@ TEST(laplace, likelihood_differentiation) { Eigen::MatrixXd diff_theta_eta = diff_functor.diff_theta_eta(theta, eta); Eigen::VectorXd gradient_theta_l, gradient_theta_u; - Eigen::SparseMatrix hessian_theta_u, - hessian_theta_l; + Eigen::SparseMatrix hessian_theta_u, hessian_theta_l; diff_functor.diff(theta, eta_l, gradient_theta_l, hessian_theta_l); diff_functor.diff(theta, eta_u, gradient_theta_u, hessian_theta_u); @@ -110,7 +111,8 @@ TEST(laplace, likelihood_differentiation) { = (gradient_theta_u - gradient_theta_l) / (2 * epsilon); std::cout << "diff_theta_eta: " << diff_theta_eta.transpose() << std::endl; - std::cout << "finite_gradient_theta_eta: " << finite_gradient_theta_eta.transpose() << std::endl; + std::cout << "finite_gradient_theta_eta: " + << finite_gradient_theta_eta.transpose() << std::endl; Eigen::VectorXd diff_theta_eta1 = diff_theta_eta.col(0); EXPECT_MATRIX_FLOAT_EQ(finite_gradient_theta_eta, diff_theta_eta1); /* @@ -122,7 +124,8 @@ TEST(laplace, likelihood_differentiation) { Eigen::MatrixXd diff2_theta_eta = diff_functor.diff2_theta_eta(theta, eta); Eigen::VectorXd finite_hessian_theta_eta - = (hessian_theta_u.diagonal() - hessian_theta_l.diagonal()) / (2 * epsilon); + = (hessian_theta_u.diagonal() - hessian_theta_l.diagonal()) + / (2 * epsilon); std::cout << "diff2_theta_eta: " << diff2_theta_eta.transpose() << std::endl; @@ -145,13 +148,13 @@ Eigen::MatrixXd compute_B(const Eigen::VectorXd& theta, } TEST(laplace, neg_binomial_2_log_dbl) { - using stan::math::to_vector; using stan::math::diff_neg_binomial_2_log; - using stan::math::test::sqr_exp_kernel_functor; using stan::math::laplace_marginal_density; using stan::math::laplace_marginal_neg_binomial_2_log_lpmf; - using stan::math::var; + using stan::math::to_vector; using stan::math::value_of; + using stan::math::var; + using stan::math::test::sqr_exp_kernel_functor; int dim_phi = 2, dim_eta = 1, dim_theta = 2; Eigen::VectorXd phi(dim_phi), eta(dim_eta), theta_0(dim_theta); @@ -160,7 +163,7 @@ TEST(laplace, neg_binomial_2_log_dbl) { theta_0 << 0, 0; std::vector x(dim_theta); Eigen::VectorXd x_0(2); - x_0 << 0.05100797, 0.16086164; + x_0 << 0.05100797, 0.16086164; x[0] = x_0; Eigen::VectorXd x_1(2); x_1 << -0.59823393, 0.98701425; @@ -183,9 +186,8 @@ TEST(laplace, neg_binomial_2_log_dbl) { Eigen::Matrix phi_v = phi, eta_v = eta; std::cout << "here3" << std::endl; - var target - = laplace_marginal_density(diff_functor, K, phi_v, eta_v, x, delta, - delta_int, theta_0); + var target = laplace_marginal_density(diff_functor, K, phi_v, eta_v, x, delta, + delta_int, theta_0); std::cout << "here4" << std::endl; std::vector g; @@ -195,8 +197,8 @@ TEST(laplace, neg_binomial_2_log_dbl) { // finite diff benchmark double diff = 1e-7; Eigen::VectorXd phi_dbl = value_of(phi), eta_dbl = value_of(eta); - Eigen::VectorXd phi_1l = phi_dbl, phi_1u = phi_dbl, - phi_2l = phi_dbl, phi_2u = phi_dbl, eta_l = eta_dbl, eta_u = eta_dbl; + Eigen::VectorXd phi_1l = phi_dbl, phi_1u = phi_dbl, phi_2l = phi_dbl, + phi_2u = phi_dbl, eta_l = eta_dbl, eta_u = eta_dbl; phi_1l(0) -= diff; phi_1u(0) += diff; phi_2l(1) -= diff; @@ -204,34 +206,28 @@ TEST(laplace, neg_binomial_2_log_dbl) { eta_l(0) -= diff; eta_u(0) += diff; -std::cout << "here5" << std::endl; -double target_phi_1u = laplace_marginal_density(diff_functor, K, phi_1u, - eta_dbl, x, delta, - delta_int, theta_0); -std::cout << "here6" << std::endl; -double target_phi_1l = laplace_marginal_density(diff_functor, K, phi_1l, - eta_dbl, x, delta, - delta_int, theta_0); -std::cout << "here7" << std::endl; -double target_phi_2u = laplace_marginal_density(diff_functor, K, phi_2u, - eta_dbl, x, delta, - delta_int, theta_0); -std::cout << "here8" << std::endl; -double target_phi_2l = laplace_marginal_density(diff_functor, K, phi_2l, - eta_dbl, x, delta, - delta_int, theta_0); - -std::cout << "here9" << std::endl; -double target_eta_u = laplace_marginal_density(diff_functor, K, phi_dbl, - eta_u, x, delta, - delta_int, theta_0); - -std::cout << "here10" << std::endl; -double target_eta_l = laplace_marginal_density(diff_functor, K, phi_dbl, - eta_l, x, delta, - delta_int, theta_0); - - std::vectorg_finite(dim_phi + dim_eta); + std::cout << "here5" << std::endl; + double target_phi_1u = laplace_marginal_density( + diff_functor, K, phi_1u, eta_dbl, x, delta, delta_int, theta_0); + std::cout << "here6" << std::endl; + double target_phi_1l = laplace_marginal_density( + diff_functor, K, phi_1l, eta_dbl, x, delta, delta_int, theta_0); + std::cout << "here7" << std::endl; + double target_phi_2u = laplace_marginal_density( + diff_functor, K, phi_2u, eta_dbl, x, delta, delta_int, theta_0); + std::cout << "here8" << std::endl; + double target_phi_2l = laplace_marginal_density( + diff_functor, K, phi_2l, eta_dbl, x, delta, delta_int, theta_0); + + std::cout << "here9" << std::endl; + double target_eta_u = laplace_marginal_density( + diff_functor, K, phi_dbl, eta_u, x, delta, delta_int, theta_0); + + std::cout << "here10" << std::endl; + double target_eta_l = laplace_marginal_density( + diff_functor, K, phi_dbl, eta_l, x, delta, delta_int, theta_0); + + std::vector g_finite(dim_phi + dim_eta); g_finite[0] = (target_phi_1u - target_phi_1l) / (2 * diff); g_finite[1] = (target_phi_2u - target_phi_2l) / (2 * diff); g_finite[2] = (target_eta_u - target_eta_l) / (2 * diff); @@ -243,6 +239,6 @@ double target_eta_l = laplace_marginal_density(diff_functor, K, phi_dbl, // Check wrapper. EXPECT_EQ(target, - laplace_marginal_neg_binomial_2_log_lpmf(y_obs, y_index, K, phi, eta, x, - delta, delta_int, theta_0)); + laplace_marginal_neg_binomial_2_log_lpmf( + y_obs, y_index, K, phi, eta, x, delta, delta_int, theta_0)); } From 6148a7dd2f96cb37d577b8d47e9afe7f108e6cfb Mon Sep 17 00:00:00 2001 From: Stan BuildBot Date: Wed, 13 Apr 2022 11:16:01 -0400 Subject: [PATCH 53/53] [Jenkins] auto-formatting by clang-format version 10.0.0-4ubuntu1 --- .../laplace/laplace_likelihood_deprecated.hpp | 2 +- .../laplace_likelihood_neg_binomial_2_log.hpp | 2 +- stan/math/laplace/laplace_marginal.hpp | 16 ++++++++-------- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/stan/math/laplace/laplace_likelihood_deprecated.hpp b/stan/math/laplace/laplace_likelihood_deprecated.hpp index 5ad14b69c5d..31936c3acb9 100644 --- a/stan/math/laplace/laplace_likelihood_deprecated.hpp +++ b/stan/math/laplace/laplace_likelihood_deprecated.hpp @@ -306,7 +306,7 @@ struct diff_neg_binomial_2_log { hessian = -eta_scalar * sums_plus_n_eta.cwiseProduct( - elt_divide(exp_neg_theta, square(one_plus_exp))); + elt_divide(exp_neg_theta, square(one_plus_exp))); } template diff --git a/stan/math/laplace/laplace_likelihood_neg_binomial_2_log.hpp b/stan/math/laplace/laplace_likelihood_neg_binomial_2_log.hpp index 7464d2df5f8..bb7f4917d48 100644 --- a/stan/math/laplace/laplace_likelihood_neg_binomial_2_log.hpp +++ b/stan/math/laplace/laplace_likelihood_neg_binomial_2_log.hpp @@ -70,7 +70,7 @@ struct diff_neg_binomial_2_log { gradient = sums_ - elt_divide(sums_plus_n_eta, one_plus_exp); Eigen::MatrixXd hessian_val = eta_scalar * sums_plus_n_eta.cwiseProduct(elt_divide( - exp_neg_theta, square(one_plus_exp))); + exp_neg_theta, square(one_plus_exp))); hessian.resize(theta_size, theta_size); hessian.reserve(Eigen::VectorXi::Constant(theta_size, hessian_block_size)); // hessian.col(0) = - common_term; diff --git a/stan/math/laplace/laplace_marginal.hpp b/stan/math/laplace/laplace_marginal.hpp index 694a1b9faa4..ed17c89ea17 100644 --- a/stan/math/laplace/laplace_marginal.hpp +++ b/stan/math/laplace/laplace_marginal.hpp @@ -170,16 +170,16 @@ double laplace_marginal_density( a = b - W_r * mdivide_left_tri( - transpose(L), - mdivide_left_tri( - L, W_r.diagonal().cwiseProduct(covariance * b))); + transpose(L), + mdivide_left_tri( + L, W_r.diagonal().cwiseProduct(covariance * b))); } else { b = W * theta + l_grad.head(theta_size); a = b - W_r * mdivide_left_tri( - transpose(L), mdivide_left_tri( - L, W_r * (covariance * b))); + transpose(L), mdivide_left_tri( + L, W_r * (covariance * b))); } } else if (solver == 2) { // TODO -- use triangularView for K_root. @@ -395,7 +395,7 @@ struct laplace_marginal_density_vari : public vari { MatrixXd W_root_diag = W_r; R = W_r * L.transpose().triangularView().solve( - L.triangularView().solve(W_root_diag)); + L.triangularView().solve(W_root_diag)); Eigen::MatrixXd C = mdivide_left_tri(L, W_r * covariance); if (hessian_block_size == 0 && eta_size_ == 0) { @@ -415,8 +415,8 @@ struct laplace_marginal_density_vari : public vari { R = W_r - W_r * K_root * L.transpose().triangularView().solve( - L.triangularView().solve(K_root.transpose() - * W_r)); + L.triangularView().solve(K_root.transpose() + * W_r)); Eigen::MatrixXd C = L.triangularView().solve(K_root.transpose());