diff --git a/src/simulators/qasm/qasm_controller.hpp b/src/simulators/qasm/qasm_controller.hpp index 335d616051..46ecf17358 100755 --- a/src/simulators/qasm/qasm_controller.hpp +++ b/src/simulators/qasm/qasm_controller.hpp @@ -137,6 +137,12 @@ class QasmController : public Base::Controller { tensor_network }; + // Simulation precision + enum class Precision { + double_precision, + single_precision + }; + //----------------------------------------------------------------------- // Base class abstract method override //----------------------------------------------------------------------- @@ -233,9 +239,12 @@ class QasmController : public Base::Controller { //----------------------------------------------------------------------- size_t required_memory_mb(const Circuit& circ) const override; - // Simulation method + // Simulation method Method simulation_method_ = Method::automatic; + // Simulation precision + Precision simulation_precision_ = Precision::double_precision; + // Qubit threshold for running circuit optimizations uint_t circuit_opt_ideal_threshold_ = 5; uint_t circuit_opt_noise_threshold_ = 12; @@ -293,6 +302,16 @@ void QasmController::set_config(const json_t &config) { } } + std::string precision; + if (JSON::get_value(precision, "precision", config)) { + if (precision == "double") { + simulation_precision_ = Precision::double_precision; + } else if (precision == "single") { + simulation_precision_ = Precision::single_precision; + } + } + + // Check for circuit optimization threshold JSON::get_value(circuit_opt_ideal_threshold_, "optimize_ideal_threshold", config); @@ -343,12 +362,22 @@ OutputData QasmController::run_circuit(const Circuit &circ, switch (simulation_method(circ)) { case Method::statevector: - // Statevector simulation - return run_circuit_helper>( - circ, - shots, - rng_seed, - initial_statevector_); // allow custom initial state + + if (simulation_precision_ == Precision::double_precision) { + // Statevector simulation + return run_circuit_helper>>( + circ, + shots, + rng_seed, + initial_statevector_); // allow custom initial state + } else { + // Statevector simulation + return run_circuit_helper>>( + circ, + shots, + rng_seed, + initial_statevector_); // allow custom initial state + } case Method::stabilizer: // Stabilizer simulation // TODO: Stabilizer doesn't yet support custom state initialization diff --git a/src/simulators/statevector/qubitvector.hpp b/src/simulators/statevector/qubitvector.hpp index 66c518ef52..61d396f66b 100755 --- a/src/simulators/statevector/qubitvector.hpp +++ b/src/simulators/statevector/qubitvector.hpp @@ -37,10 +37,10 @@ using uint_t = uint64_t; using int_t = int64_t; using reg_t = std::vector; using indexes_t = std::unique_ptr; -using complex_t = std::complex; -using cvector_t = std::vector; -using rvector_t = std::vector; template using areg_t = std::array; +template using complex_t = std::complex; +template using cvector_t = std::vector>; +template using rvector_t = std::vector; //============================================================================ // BIT MASKS and indexing @@ -107,11 +107,11 @@ const std::array MASKS {{ // The following methods may also need to be template specialized: // * set_num_qubits(size_t) // * initialize() -// * initialize_from_vector(cvector_t) +// * initialize_from_vector(cvector_t) // If the template argument does not have these methods then template // specialization must be used to override the default implementations. -template +template class QubitVector { public: @@ -131,14 +131,14 @@ class QubitVector { //----------------------------------------------------------------------- // Element access - complex_t &operator[](uint_t element); - complex_t operator[](uint_t element) const; + complex_t &operator[](uint_t element); + complex_t operator[](uint_t element) const; // Returns a reference to the underlying data_t data class - data_t &data() {return data_;} + complex_t* &data() {return data_;} // Returns a copy of the underlying data_t data class - data_t data() const {return data_;} + complex_t* data() const {return data_;} //----------------------------------------------------------------------- // Utility functions @@ -154,7 +154,7 @@ class QubitVector { uint_t size() const {return data_size_;} // Returns a copy of the underlying data_t data as a complex vector - cvector_t vector() const; + cvector_t vector() const; // Return JSON serialization of QubitVector; json_t json() const; @@ -162,6 +162,9 @@ class QubitVector { // Set all entries in the vector to 0. void zero(); + // convert vector type to data type of this qubit vector + cvector_t convert(const cvector_t& v) const; + // index0 returns the integer representation of a number of bits set // to zero inserted into an arbitrary bit string. // Eg: for qubits 0,2 in a state k = ba ( ba = 00 => k=0, etc). @@ -202,7 +205,7 @@ class QubitVector { // (leaving the other qubits in their current state) // assuming the qubits being initialized have already been reset to the zero state // (using apply_reset) - void initialize_component(const reg_t &qubits, const cvector_t &state); + void initialize_component(const reg_t &qubits, const cvector_t &state); //----------------------------------------------------------------------- // Check point operations @@ -215,7 +218,7 @@ class QubitVector { void revert(bool keep); // Compute the inner product of current state with checkpoint state - complex_t inner_product() const; + complex_t inner_product() const; //----------------------------------------------------------------------- // Initialization @@ -227,11 +230,11 @@ class QubitVector { // Initializes the vector to a custom initial state. // If the length of the data vector does not match the number of qubits // an exception is raised. - void initialize_from_vector(const cvector_t &data); + void initialize_from_vector(const cvector_t &data); // Initializes the vector to a custom initial state. // If num_states does not match the number of qubits an exception is raised. - void initialize_from_data(const data_t &data, const size_t num_states); + void initialize_from_data(const complex_t* data, const size_t num_states); //----------------------------------------------------------------------- // Apply Matrices @@ -239,23 +242,23 @@ class QubitVector { // Apply a 1-qubit matrix to the state vector. // The matrix is input as vector of the column-major vectorized 1-qubit matrix. - void apply_matrix(const uint_t qubit, const cvector_t &mat); + void apply_matrix(const uint_t qubit, const cvector_t &mat); // Apply a N-qubit matrix to the state vector. // The matrix is input as vector of the column-major vectorized N-qubit matrix. - void apply_matrix(const reg_t &qubits, const cvector_t &mat); + void apply_matrix(const reg_t &qubits, const cvector_t &mat); // Apply a stacked set of 2^control_count target_count--qubit matrix to the state vector. // The matrix is input as vector of the column-major vectorized N-qubit matrix. - void apply_multiplexer(const reg_t &control_qubits, const reg_t &target_qubits, const cvector_t &mat); + void apply_multiplexer(const reg_t &control_qubits, const reg_t &target_qubits, const cvector_t &mat); // Apply a 1-qubit diagonal matrix to the state vector. // The matrix is input as vector of the matrix diagonal. - void apply_diagonal_matrix(const uint_t qubit, const cvector_t &mat); + void apply_diagonal_matrix(const uint_t qubit, const cvector_t &mat); // Apply a N-qubit diagonal matrix to the state vector. // The matrix is input as vector of the matrix diagonal. - void apply_diagonal_matrix(const reg_t &qubits, const cvector_t &mat); + void apply_diagonal_matrix(const reg_t &qubits, const cvector_t &mat); // Swap pairs of indicies in the underlying vector void apply_permutation_matrix(const reg_t &qubits, @@ -283,13 +286,13 @@ class QubitVector { // If N=2 this implements an optimized CPhase gate // If N=3 this implements an optimized CCPhase gate // if phase = -1 this is a Z, CZ, CCZ gate - void apply_mcphase(const reg_t &qubits, const complex_t phase); + void apply_mcphase(const reg_t &qubits, const complex_t phase); // Apply a general multi-controlled single-qubit unitary gate // If N=1 this implements an optimized single-qubit U gate // If N=2 this implements an optimized CU gate // If N=3 this implements an optimized CCU gate - void apply_mcu(const reg_t &qubits, const cvector_t &mat); + void apply_mcu(const reg_t &qubits, const cvector_t &mat); // Apply a general multi-controlled SWAP gate // If N=2 this implements an optimized SWAP gate @@ -307,20 +310,20 @@ class QubitVector { // Return the probabilities for all measurement outcomes in the current vector // This is equivalent to returning a new vector with new[i]=|orig[i]|^2. // Eg. For 2-qubits this is [P(00), P(01), P(010), P(11)] - rvector_t probabilities() const; + rvector_t probabilities() const; // Return the Z-basis measurement outcome probabilities [P(0), P(1)] // for measurement of specified qubit - rvector_t probabilities(const uint_t qubit) const; + rvector_t probabilities(const uint_t qubit) const; // Return the Z-basis measurement outcome probabilities [P(0), ..., P(2^N-1)] // for measurement of N-qubits. - rvector_t probabilities(const reg_t &qubits) const; + rvector_t probabilities(const reg_t &qubits) const; // Return M sampled outcomes for Z-basis measurement of all qubits // The input is a length M list of random reals between [0, 1) used for // generating samples. - std::vector sample_measure(const std::vector &rnds) const; + std::vector sample_measure(const rvector_t &rnds) const; //----------------------------------------------------------------------- // Norms @@ -337,22 +340,22 @@ class QubitVector { // Return the norm for of the vector obtained after apply the 1-qubit // matrix mat to the vector. // The matrix is input as vector of the column-major vectorized 1-qubit matrix. - double norm(const uint_t qubit, const cvector_t &mat) const; + double norm(const uint_t qubit, const cvector_t &mat) const; // Return the norm for of the vector obtained after apply the N-qubit // matrix mat to the vector. // The matrix is input as vector of the column-major vectorized N-qubit matrix. - double norm(const reg_t &qubits, const cvector_t &mat) const; + double norm(const reg_t &qubits, const cvector_t &mat) const; // Return the norm for of the vector obtained after apply the 1-qubit // diagonal matrix mat to the vector. // The matrix is input as vector of the matrix diagonal. - double norm_diagonal(const uint_t qubit, const cvector_t &mat) const; + double norm_diagonal(const uint_t qubit, const cvector_t &mat) const; // Return the norm for of the vector obtained after apply the N-qubit // diagonal matrix mat to the vector. // The matrix is input as vector of the matrix diagonal. - double norm_diagonal(const reg_t &qubits, const cvector_t &mat) const; + double norm_diagonal(const reg_t &qubits, const cvector_t &mat) const; //----------------------------------------------------------------------- // JSON configuration settings @@ -398,8 +401,8 @@ class QubitVector { //----------------------------------------------------------------------- size_t num_qubits_; size_t data_size_; - data_t data_; - data_t checkpoint_; + complex_t* data_; + complex_t* checkpoint_; //----------------------------------------------------------------------- // Config settings @@ -415,8 +418,8 @@ class QubitVector { //----------------------------------------------------------------------- void check_qubit(const uint_t qubit) const; - void check_vector(const cvector_t &diag, uint_t nqubits) const; - void check_matrix(const cvector_t &mat, uint_t nqubits) const; + void check_vector(const cvector_t &diag, uint_t nqubits) const; + void check_matrix(const cvector_t &mat, uint_t nqubits) const; void check_dimension(const QubitVector &qv) const; void check_checkpoint() const; @@ -476,9 +479,9 @@ class QubitVector { // // where k is the index of the vector, val_re and val_im are the doubles // to store the reduction. - // Returns complex_t(val_re, val_im) + // Returns complex_t(val_re, val_im) template - complex_t apply_reduction_lambda(Lambda&& func) const; + complex_t apply_reduction_lambda(Lambda&& func) const; //----------------------------------------------------------------------- // Statevector block reduction with Lambda function @@ -486,7 +489,7 @@ class QubitVector { // These functions loop through the indexes of the qubitvector data and // apply a reduction lambda function to each block specified by the qubits // argument. The reduction lambda stores the reduction in two doubles - // (val_re, val_im) and returns the complex result complex_t(val_re, val_im) + // (val_re, val_im) and returns the complex result complex_t(val_re, val_im) // // NOTE: The lambda functions can use the dynamic or static indexes // signature however if N is known at compile time the static case should @@ -503,9 +506,9 @@ class QubitVector { // // where `inds` are the 2 ** N indexes for each N-qubit block returned by // the `indexes` function, `val_re` and `val_im` are the doubles to - // store the reduction returned as complex_t(val_re, val_im). + // store the reduction returned as complex_t(val_re, val_im). template - complex_t apply_reduction_lambda(Lambda&& func, + complex_t apply_reduction_lambda(Lambda&& func, const list_t &qubits) const; // Apply a N-qubit complex matrix reduction lambda function to all blocks @@ -520,9 +523,9 @@ class QubitVector { // where `inds` are the 2 ** N indexes for each N-qubit block returned by // the `indexe`s function, `params` is a templated parameter class // (typically a complex vector), `val_re` and `val_im` are the doubles to - // store the reduction returned as complex_t(val_re, val_im). + // store the reduction returned as complex_t(val_re, val_im). template - complex_t apply_reduction_lambda(Lambda&& func, + complex_t apply_reduction_lambda(Lambda&& func, const list_t &qubits, const param_t ¶ms) const; }; @@ -545,7 +548,7 @@ inline void to_json(json_t &js, const QubitVector &qv) { template json_t QubitVector::json() const { const int_t END = data_size_; - const json_t ZERO = complex_t(0.0, 0.0); + const json_t ZERO = complex_t(0.0, 0.0); json_t js = json_t(data_size_, ZERO); if (json_chop_threshold_ > 0) { @@ -580,7 +583,7 @@ void QubitVector::check_qubit(const uint_t qubit) const { } template -void QubitVector::check_matrix(const cvector_t &vec, uint_t nqubits) const { +void QubitVector::check_matrix(const cvector_t &vec, uint_t nqubits) const { const size_t DIM = BITS[nqubits]; const auto SIZE = vec.size(); if (SIZE != DIM * DIM) { @@ -591,7 +594,7 @@ void QubitVector::check_matrix(const cvector_t &vec, uint_t nqubits) con } template -void QubitVector::check_vector(const cvector_t &vec, uint_t nqubits) const { +void QubitVector::check_vector(const cvector_t &vec, uint_t nqubits) const { const size_t DIM = BITS[nqubits]; const auto SIZE = vec.size(); if (SIZE != DIM) { @@ -644,7 +647,7 @@ QubitVector::~QubitVector() { //------------------------------------------------------------------------------ template -complex_t &QubitVector::operator[](uint_t element) { +complex_t &QubitVector::operator[](uint_t element) { // Error checking #ifdef DEBUG if (element > data_size_) { @@ -657,7 +660,7 @@ complex_t &QubitVector::operator[](uint_t element) { } template -complex_t QubitVector::operator[](uint_t element) const { +complex_t QubitVector::operator[](uint_t element) const { // Error checking #ifdef DEBUG if (element > data_size_) { @@ -670,8 +673,8 @@ complex_t QubitVector::operator[](uint_t element) const { } template -cvector_t QubitVector::vector() const { - cvector_t ret(data_size_, 0.); +cvector_t QubitVector::vector() const { + cvector_t ret(data_size_, 0.); const int_t END = data_size_; #pragma omp parallel for if (num_qubits_ > omp_threshold_ && omp_threads_ > 1) num_threads(omp_threads_) for (int_t j=0; j < END; j++) { @@ -734,13 +737,15 @@ indexes_t QubitVector::indexes(const reg_t& qubits, // State initialize component //------------------------------------------------------------------------------ template -void QubitVector::initialize_component(const reg_t &qubits, const cvector_t &state) { +void QubitVector::initialize_component(const reg_t &qubits, const cvector_t &state0) { + + cvector_t state = convert(state0); // Lambda function for initializing component const size_t N = qubits.size(); - auto lambda = [&](const indexes_t &inds, const cvector_t &_state)->void { + auto lambda = [&](const indexes_t &inds, const cvector_t &_state)->void { const uint_t DIM = 1ULL << N; - complex_t cache = data_[inds[0]]; // the k-th component of non-initialized vector + complex_t cache = data_[inds[0]]; // the k-th component of non-initialized vector for (size_t i = 0; i < DIM; i++) { data_[inds[i]] = cache * _state[i]; // set component to psi[k] * state[i] } // (where psi is is the post-reset state of the non-initialized qubits) @@ -763,6 +768,15 @@ void QubitVector::zero() { } } +template +cvector_t QubitVector::convert(const cvector_t& v) const { + cvector_t ret(v.size()); + for (size_t i = 0; i < v.size(); ++i) + ret[i] = v[i]; + return ret; +} + + template void QubitVector::set_num_qubits(size_t num_qubits) { @@ -785,14 +799,14 @@ void QubitVector::set_num_qubits(size_t num_qubits) { // Allocate memory for new vector if (data_ == nullptr) - data_ = reinterpret_cast(malloc(sizeof(complex_t) * data_size_)); + data_ = reinterpret_cast*>(malloc(sizeof(complex_t) * data_size_)); } template void QubitVector::checkpoint() { if (!checkpoint_) - checkpoint_ = reinterpret_cast(malloc(sizeof(complex_t) * data_size_)); + checkpoint_ = reinterpret_cast*>(malloc(sizeof(complex_t) * data_size_)); const int_t END = data_size_; // end for k loop #pragma omp parallel for if (num_qubits_ > omp_threshold_ && omp_threads_ > 1) num_threads(omp_threads_) @@ -820,14 +834,14 @@ void QubitVector::revert(bool keep) { } template -complex_t QubitVector::inner_product() const { +complex_t QubitVector::inner_product() const { #ifdef DEBUG check_checkpoint(); #endif // Lambda function for inner product with checkpoint state auto lambda = [&](int_t k, double &val_re, double &val_im)->void { - const complex_t z = data_[k] * std::conj(checkpoint_[k]); + const complex_t z = data_[k] * std::conj(checkpoint_[k]); val_re += std::real(z); val_im += std::imag(z); }; @@ -845,7 +859,7 @@ void QubitVector::initialize() { } template -void QubitVector::initialize_from_vector(const cvector_t &statevec) { +void QubitVector::initialize_from_vector(const cvector_t &statevec) { if (data_size_ != statevec.size()) { std::string error = "QubitVector::initialize input vector is incorrect length (" + std::to_string(data_size_) + "!=" + @@ -861,7 +875,7 @@ void QubitVector::initialize_from_vector(const cvector_t &statevec) { } template -void QubitVector::initialize_from_data(const data_t &statevec, const size_t num_states) { +void QubitVector::initialize_from_data(const complex_t* statevec, const size_t num_states) { if (data_size_ != num_states) { std::string error = "QubitVector::initialize input vector is incorrect length (" + std::to_string(data_size_) + "!=" + std::to_string(num_states) + ")"; @@ -982,7 +996,7 @@ void QubitVector::apply_lambda(Lambda&& func, template template -complex_t QubitVector::apply_reduction_lambda(Lambda &&func) const { +complex_t QubitVector::apply_reduction_lambda(Lambda &&func) const { // Reduction variables double val_re = 0.; double val_im = 0.; @@ -995,13 +1009,13 @@ complex_t QubitVector::apply_reduction_lambda(Lambda &&func) const { std::forward(func)(k, val_re, val_im); } } // end omp parallel - return complex_t(val_re, val_im); + return complex_t(val_re, val_im); } template template -complex_t QubitVector::apply_reduction_lambda(Lambda&& func, +complex_t QubitVector::apply_reduction_lambda(Lambda&& func, const list_t &qubits) const { // Error checking @@ -1027,13 +1041,13 @@ complex_t QubitVector::apply_reduction_lambda(Lambda&& func, std::forward(func)(inds, val_re, val_im); } } // end omp parallel - return complex_t(val_re, val_im); + return complex_t(val_re, val_im); } template template -complex_t QubitVector::apply_reduction_lambda(Lambda&& func, +complex_t QubitVector::apply_reduction_lambda(Lambda&& func, const list_t &qubits, const param_t ¶ms) const { @@ -1060,7 +1074,7 @@ complex_t QubitVector::apply_reduction_lambda(Lambda&& func, std::forward(func)(inds, params, val_re, val_im); } } // end omp parallel - return complex_t(val_re, val_im); + return complex_t(val_re, val_im); } @@ -1071,7 +1085,7 @@ complex_t QubitVector::apply_reduction_lambda(Lambda&& func, ******************************************************************************/ template void QubitVector::apply_matrix(const reg_t &qubits, - const cvector_t &mat) { + const cvector_t &mat) { const size_t N = qubits.size(); // Error checking @@ -1086,8 +1100,8 @@ void QubitVector::apply_matrix(const reg_t &qubits, return; case 2: { // Lambda function for 2-qubit matrix multiplication - auto lambda = [&](const areg_t<4> &inds, const cvector_t &_mat)->void { - std::array cache; + auto lambda = [&](const areg_t<4> &inds, const cvector_t &_mat)->void { + std::array, 4> cache; for (size_t i = 0; i < 4; i++) { const auto ii = inds[i]; cache[i] = data_[ii]; @@ -1098,13 +1112,13 @@ void QubitVector::apply_matrix(const reg_t &qubits, for (size_t j = 0; j < 4; j++) data_[inds[i]] += _mat[i + 4 * j] * cache[j]; }; - apply_lambda(lambda, areg_t<2>({{qubits[0], qubits[1]}}), mat); + apply_lambda(lambda, areg_t<2>({{qubits[0], qubits[1]}}), convert(mat)); return; } case 3: { // Lambda function for 3-qubit matrix multiplication - auto lambda = [&](const areg_t<8> &inds, const cvector_t &_mat)->void { - std::array cache; + auto lambda = [&](const areg_t<8> &inds, const cvector_t &_mat)->void { + std::array, 8> cache; for (size_t i = 0; i < 8; i++) { const auto ii = inds[i]; cache[i] = data_[ii]; @@ -1115,13 +1129,13 @@ void QubitVector::apply_matrix(const reg_t &qubits, for (size_t j = 0; j < 8; j++) data_[inds[i]] += _mat[i + 8 * j] * cache[j]; }; - apply_lambda(lambda, areg_t<3>({{qubits[0], qubits[1], qubits[2]}}), mat); + apply_lambda(lambda, areg_t<3>({{qubits[0], qubits[1], qubits[2]}}), convert(mat)); return; } case 4: { // Lambda function for 4-qubit matrix multiplication - auto lambda = [&](const areg_t<16> &inds, const cvector_t &_mat)->void { - std::array cache; + auto lambda = [&](const areg_t<16> &inds, const cvector_t &_mat)->void { + std::array, 16> cache; for (size_t i = 0; i < 16; i++) { const auto ii = inds[i]; cache[i] = data_[ii]; @@ -1132,14 +1146,14 @@ void QubitVector::apply_matrix(const reg_t &qubits, for (size_t j = 0; j < 16; j++) data_[inds[i]] += _mat[i + 16 * j] * cache[j]; }; - apply_lambda(lambda, areg_t<4>({{qubits[0], qubits[1], qubits[2], qubits[3]}}), mat); + apply_lambda(lambda, areg_t<4>({{qubits[0], qubits[1], qubits[2], qubits[3]}}), convert(mat)); return; } default: { const uint_t DIM = BITS[N]; // Lambda function for N-qubit matrix multiplication - auto lambda = [&](const indexes_t &inds, const cvector_t &_mat)->void { - auto cache = std::make_unique(DIM); + auto lambda = [&](const indexes_t &inds, const cvector_t &_mat)->void { + auto cache = std::make_unique[]>(DIM); for (size_t i = 0; i < DIM; i++) { const auto ii = inds[i]; cache[i] = data_[ii]; @@ -1150,15 +1164,15 @@ void QubitVector::apply_matrix(const reg_t &qubits, for (size_t j = 0; j < DIM; j++) data_[inds[i]] += _mat[i + DIM * j] * cache[j]; }; - apply_lambda(lambda, qubits, mat); + apply_lambda(lambda, qubits, convert(mat)); } } // end switch } template void QubitVector::apply_multiplexer(const reg_t &control_qubits, - const reg_t &target_qubits, - const cvector_t &mat) { + const reg_t &target_qubits, + const cvector_t &mat) { // General implementation const size_t control_count = control_qubits.size(); @@ -1167,8 +1181,8 @@ void QubitVector::apply_multiplexer(const reg_t &control_qubits, const uint_t columns = BITS[target_count]; const uint_t blocks = BITS[control_count]; // Lambda function for stacked matrix multiplication - auto lambda = [&](const indexes_t &inds, const cvector_t &_mat)->void { - auto cache = std::make_unique(DIM); + auto lambda = [&](const indexes_t &inds, const cvector_t &_mat)->void { + auto cache = std::make_unique[]>(DIM); for (uint_t i = 0; i < DIM; i++) { const auto ii = inds[i]; cache[i] = data_[ii]; @@ -1186,12 +1200,13 @@ void QubitVector::apply_multiplexer(const reg_t &control_qubits, // Use the lambda function auto qubits = target_qubits; for (const auto &q : control_qubits) {qubits.push_back(q);} - apply_lambda(lambda, qubits, mat); + apply_lambda(lambda, qubits, convert(mat)); } template void QubitVector::apply_diagonal_matrix(const reg_t &qubits, - const cvector_t &diag) { + const cvector_t &diag) { + const int_t N = qubits.size(); // Error checking #ifdef DEBUG @@ -1203,18 +1218,18 @@ void QubitVector::apply_diagonal_matrix(const reg_t &qubits, return; } - auto lambda = [&](const areg_t<2> &inds, const cvector_t &_diag)->void { + auto lambda = [&](const areg_t<2> &inds, const cvector_t &_diag)->void { for (int_t i = 0; i < 2; ++i) { const int_t k = inds[i]; int_t iv = 0; for (int_t j = 0; j < N; j++) if ((k & (1ULL << qubits[j])) != 0) iv += (1 << j); - if (diag[iv] != 1.0) - data_[k] *= diag[iv]; + if (_diag[iv] != (data_t) 1.0) + data_[k] *= _diag[iv]; } }; - apply_lambda(lambda, areg_t<1>({{qubits[0]}}), diag); + apply_lambda(lambda, areg_t<1>({{qubits[0]}}), convert(diag)); } template @@ -1340,13 +1355,13 @@ void QubitVector::apply_mcy(const reg_t &qubits) { const size_t N = qubits.size(); const size_t pos0 = MASKS[N - 1]; const size_t pos1 = MASKS[N]; - const complex_t I(0., 1.); + const complex_t I(0., 1.); switch (N) { case 1: { // Lambda function for Y gate auto lambda = [&](const areg_t<2> &inds)->void { - const complex_t cache = data_[inds[pos0]]; + const complex_t cache = data_[inds[pos0]]; data_[inds[pos0]] = -I * data_[inds[pos1]]; data_[inds[pos1]] = I * cache; }; @@ -1356,7 +1371,7 @@ void QubitVector::apply_mcy(const reg_t &qubits) { case 2: { // Lambda function for CY gate auto lambda = [&](const areg_t<4> &inds)->void { - const complex_t cache = data_[inds[pos0]]; + const complex_t cache = data_[inds[pos0]]; data_[inds[pos0]] = -I * data_[inds[pos1]]; data_[inds[pos1]] = I * cache; }; @@ -1366,7 +1381,7 @@ void QubitVector::apply_mcy(const reg_t &qubits) { case 3: { // Lambda function for CCY gate auto lambda = [&](const areg_t<8> &inds)->void { - const complex_t cache = data_[inds[pos0]]; + const complex_t cache = data_[inds[pos0]]; data_[inds[pos0]] = -I * data_[inds[pos1]]; data_[inds[pos1]] = I * cache; }; @@ -1376,7 +1391,7 @@ void QubitVector::apply_mcy(const reg_t &qubits) { default: { // Lambda function for general multi-controlled Y gate auto lambda = [&](const indexes_t &inds)->void { - const complex_t cache = data_[inds[pos0]]; + const complex_t cache = data_[inds[pos0]]; data_[inds[pos0]] = -I * data_[inds[pos1]]; data_[inds[pos1]] = I * cache; }; @@ -1421,7 +1436,7 @@ void QubitVector::apply_mcswap(const reg_t &qubits) { } template -void QubitVector::apply_mcphase(const reg_t &qubits, const complex_t phase) { +void QubitVector::apply_mcphase(const reg_t &qubits, const complex_t phase) { const size_t N = qubits.size(); switch (N) { case 1: { @@ -1460,7 +1475,8 @@ void QubitVector::apply_mcphase(const reg_t &qubits, const complex_t pha template void QubitVector::apply_mcu(const reg_t &qubits, - const cvector_t &mat) { + const cvector_t &mat) { + // Calculate the permutation positions for the last qubit. const size_t N = qubits.size(); const size_t pos0 = MASKS[N - 1]; @@ -1476,7 +1492,7 @@ void QubitVector::apply_mcu(const reg_t &qubits, return; } // Otherwise apply general diagonal gate - const cvector_t diag = {{mat[0], mat[3]}}; + const cvector_t diag = {{mat[0], mat[3]}}; // Diagonal version switch (N) { case 1: { @@ -1487,31 +1503,31 @@ void QubitVector::apply_mcu(const reg_t &qubits, case 2: { // Lambda function for CU gate auto lambda = [&](const areg_t<4> &inds, - const cvector_t &_diag)->void { + const cvector_t &_diag)->void { data_[inds[pos0]] = _diag[0] * data_[inds[pos0]]; data_[inds[pos1]] = _diag[1] * data_[inds[pos1]]; }; - apply_lambda(lambda, areg_t<2>({{qubits[0], qubits[1]}}), diag); + apply_lambda(lambda, areg_t<2>({{qubits[0], qubits[1]}}), convert(diag)); return; } case 3: { // Lambda function for CCU gate auto lambda = [&](const areg_t<8> &inds, - const cvector_t &_diag)->void { + const cvector_t &_diag)->void { data_[inds[pos0]] = _diag[0] * data_[inds[pos0]]; data_[inds[pos1]] = _diag[1] * data_[inds[pos1]]; }; - apply_lambda(lambda, areg_t<3>({{qubits[0], qubits[1], qubits[2]}}), diag); + apply_lambda(lambda, areg_t<3>({{qubits[0], qubits[1], qubits[2]}}), convert(diag)); return; } default: { // Lambda function for general multi-controlled U gate auto lambda = [&](const indexes_t &inds, - const cvector_t &_diag)->void { + const cvector_t &_diag)->void { data_[inds[pos0]] = _diag[0] * data_[inds[pos0]]; data_[inds[pos1]] = _diag[1] * data_[inds[pos1]]; }; - apply_lambda(lambda, qubits, diag); + apply_lambda(lambda, qubits, convert(diag)); return; } } // end switch @@ -1527,34 +1543,34 @@ void QubitVector::apply_mcu(const reg_t &qubits, case 2: { // Lambda function for CU gate auto lambda = [&](const areg_t<4> &inds, - const cvector_t &_mat)->void { + const cvector_t &_mat)->void { const auto cache = data_[inds[pos0]]; data_[inds[pos0]] = _mat[0] * data_[inds[pos0]] + _mat[2] * data_[inds[pos1]]; data_[inds[pos1]] = _mat[1] * cache + _mat[3] * data_[inds[pos1]]; }; - apply_lambda(lambda, areg_t<2>({{qubits[0], qubits[1]}}), mat); + apply_lambda(lambda, areg_t<2>({{qubits[0], qubits[1]}}), convert(mat)); return; } case 3: { // Lambda function for CCU gate auto lambda = [&](const areg_t<8> &inds, - const cvector_t &_mat)->void { + const cvector_t &_mat)->void { const auto cache = data_[inds[pos0]]; data_[inds[pos0]] = _mat[0] * data_[inds[pos0]] + _mat[2] * data_[inds[pos1]]; data_[inds[pos1]] = _mat[1] * cache + _mat[3] * data_[inds[pos1]]; }; - apply_lambda(lambda, areg_t<3>({{qubits[0], qubits[1], qubits[2]}}), mat); + apply_lambda(lambda, areg_t<3>({{qubits[0], qubits[1], qubits[2]}}), convert(mat)); return; } default: { // Lambda function for general multi-controlled U gate auto lambda = [&](const indexes_t &inds, - const cvector_t &_mat)->void { + const cvector_t &_mat)->void { const auto cache = data_[inds[pos0]]; data_[inds[pos0]] = _mat[0] * data_[inds[pos0]] + _mat[2] * data_[inds[pos1]]; data_[inds[pos1]] = _mat[1] * cache + _mat[3] * data_[inds[pos1]]; }; - apply_lambda(lambda, qubits, mat); + apply_lambda(lambda, qubits, convert(mat)); return; } } // end switch @@ -1566,10 +1582,11 @@ void QubitVector::apply_mcu(const reg_t &qubits, template void QubitVector::apply_matrix(const uint_t qubit, - const cvector_t& mat) { + const cvector_t& mat) { + // Check if matrix is diagonal and if so use optimized lambda if (mat[1] == 0.0 && mat[2] == 0.0) { - const cvector_t diag = {{mat[0], mat[3]}}; + const cvector_t diag = {{mat[0], mat[3]}}; apply_diagonal_matrix(qubit, diag); return; } @@ -1590,142 +1607,146 @@ void QubitVector::apply_matrix(const uint_t qubit, if (mat[2] == 0.0) { // Non-unitary projector // possibly used in measure/reset/kraus update - auto lambda = [&](const areg_t<2> &inds)->void { - data_[inds[1]] = mat[1] * data_[inds[0]]; + auto lambda = [&](const areg_t<2> &inds, + const cvector_t &_mat)->void { + data_[inds[1]] = _mat[1] * data_[inds[0]]; data_[inds[0]] = 0.0; }; - apply_lambda(lambda, qubits); + apply_lambda(lambda, qubits, convert(mat)); return; } if (mat[1] == 0.0) { // Non-unitary projector // possibly used in measure/reset/kraus update - auto lambda = [&](const areg_t<2> &inds)->void { - data_[inds[0]] = mat[2] * data_[inds[1]]; + auto lambda = [&](const areg_t<2> &inds, + const cvector_t &_mat)->void { + data_[inds[0]] = _mat[2] * data_[inds[1]]; data_[inds[1]] = 0.0; }; - apply_lambda(lambda, qubits); + apply_lambda(lambda, qubits, convert(mat)); return; } // else we have a general anti-diagonal matrix - auto lambda = [&](const areg_t<2> &inds)->void { - const complex_t cache = data_[inds[0]]; - data_[inds[0]] = mat[2] * data_[inds[1]]; - data_[inds[1]] = mat[1] * cache; + auto lambda = [&](const areg_t<2> &inds, + const cvector_t &_mat)->void { + const complex_t cache = data_[inds[0]]; + data_[inds[0]] = _mat[2] * data_[inds[1]]; + data_[inds[1]] = _mat[1] * cache; }; - apply_lambda(lambda, qubits); + apply_lambda(lambda, qubits, convert(mat)); return; } // Otherwise general single-qubit matrix multiplication - auto lambda = [&](const areg_t<2> &inds)->void { + auto lambda = [&](const areg_t<2> &inds, const cvector_t &_mat)->void { const auto cache = data_[inds[0]]; - data_[inds[0]] = mat[0] * cache + mat[2] * data_[inds[1]]; - data_[inds[1]] = mat[1] * cache + mat[3] * data_[inds[1]]; + data_[inds[0]] = _mat[0] * cache + _mat[2] * data_[inds[1]]; + data_[inds[1]] = _mat[1] * cache + _mat[3] * data_[inds[1]]; }; - apply_lambda(lambda, qubits); + apply_lambda(lambda, qubits, convert(mat)); } template void QubitVector::apply_diagonal_matrix(const uint_t qubit, - const cvector_t& diag) { + const cvector_t& diag) { + // TODO: This should be changed so it isn't checking doubles with == if (diag[0] == 1.0) { // [[1, 0], [0, z]] matrix if (diag[1] == 1.0) return; // Identity - if (diag[1] == complex_t(0., -1.)) { // [[1, 0], [0, -i]] + if (diag[1] == (0., -1.)) { // [[1, 0], [0, -i]] auto lambda = [&](const areg_t<2> &inds, - const cvector_t &_mat)->void { + const cvector_t &_mat)->void { const auto k = inds[1]; double cache = data_[k].imag(); data_[k].imag(data_[k].real() * -1.); data_[k].real(cache); }; - apply_lambda(lambda, areg_t<1>({{qubit}}), diag); + apply_lambda(lambda, areg_t<1>({{qubit}}), convert(diag)); return; } - if (diag[1] == complex_t(0., 1.)) { + if (diag[1] == (0., 1.)) { // [[1, 0], [0, i]] auto lambda = [&](const areg_t<2> &inds, - const cvector_t &_mat)->void { + const cvector_t &_mat)->void { const auto k = inds[1]; double cache = data_[k].imag(); data_[k].imag(data_[k].real()); data_[k].real(cache * -1.); }; - apply_lambda(lambda, areg_t<1>({{qubit}}), diag); + apply_lambda(lambda, areg_t<1>({{qubit}}), convert(diag)); return; } if (diag[0] == 0.0) { // [[1, 0], [0, 0]] auto lambda = [&](const areg_t<2> &inds, - const cvector_t &_mat)->void { + const cvector_t &_mat)->void { data_[inds[1]] = 0.0; }; - apply_lambda(lambda, areg_t<1>({{qubit}}), diag); + apply_lambda(lambda, areg_t<1>({{qubit}}), convert(diag)); return; } // general [[1, 0], [0, z]] auto lambda = [&](const areg_t<2> &inds, - const cvector_t &_mat)->void { + const cvector_t &_mat)->void { const auto k = inds[1]; data_[k] *= _mat[1]; }; - apply_lambda(lambda, areg_t<1>({{qubit}}), diag); + apply_lambda(lambda, areg_t<1>({{qubit}}), convert(diag)); return; } else if (diag[1] == 1.0) { // [[z, 0], [0, 1]] matrix - if (diag[0] == complex_t(0., -1.)) { + if (diag[0] == (0., -1.)) { // [[-i, 0], [0, 1]] auto lambda = [&](const areg_t<2> &inds, - const cvector_t &_mat)->void { + const cvector_t &_mat)->void { const auto k = inds[1]; double cache = data_[k].imag(); data_[k].imag(data_[k].real() * -1.); data_[k].real(cache); }; - apply_lambda(lambda, areg_t<1>({{qubit}}), diag); + apply_lambda(lambda, areg_t<1>({{qubit}}), convert(diag)); return; } - if (diag[0] == complex_t(0., 1.)) { + if (diag[0] == (0., 1.)) { // [[i, 0], [0, 1]] auto lambda = [&](const areg_t<2> &inds, - const cvector_t &_mat)->void { + const cvector_t &_mat)->void { const auto k = inds[1]; double cache = data_[k].imag(); data_[k].imag(data_[k].real()); data_[k].real(cache * -1.); }; - apply_lambda(lambda, areg_t<1>({{qubit}}), diag); + apply_lambda(lambda, areg_t<1>({{qubit}}), convert(diag)); return; } if (diag[0] == 0.0) { // [[0, 0], [0, 1]] auto lambda = [&](const areg_t<2> &inds, - const cvector_t &_mat)->void { + const cvector_t &_mat)->void { data_[inds[0]] = 0.0; }; - apply_lambda(lambda, areg_t<1>({{qubit}}), diag); + apply_lambda(lambda, areg_t<1>({{qubit}}), convert(diag)); return; } // general [[z, 0], [0, 1]] auto lambda = [&](const areg_t<2> &inds, - const cvector_t &_mat)->void { + const cvector_t &_mat)->void { const auto k = inds[0]; data_[k] *= _mat[0]; }; - apply_lambda(lambda, areg_t<1>({{qubit}}), diag); + apply_lambda(lambda, areg_t<1>({{qubit}}), convert(diag)); return; } else { // Lambda function for diagonal matrix multiplication auto lambda = [&](const areg_t<2> &inds, - const cvector_t &_mat)->void { + const cvector_t &_mat)->void { const auto k0 = inds[0]; const auto k1 = inds[1]; data_[k0] *= _mat[0]; data_[k1] *= _mat[1]; }; - apply_lambda(lambda, areg_t<1>({{qubit}}), diag); + apply_lambda(lambda, areg_t<1>({{qubit}}), convert(diag)); } } @@ -1745,7 +1766,7 @@ double QubitVector::norm() const { } template -double QubitVector::norm(const reg_t &qubits, const cvector_t &mat) const { +double QubitVector::norm(const reg_t &qubits, const cvector_t &mat) const { const uint_t N = qubits.size(); @@ -1760,70 +1781,70 @@ double QubitVector::norm(const reg_t &qubits, const cvector_t &mat) cons return norm(qubits[0], mat); case 2: { // Lambda function for 2-qubit matrix norm - auto lambda = [&](const areg_t<4> &inds, const cvector_t &_mat, + auto lambda = [&](const areg_t<4> &inds, const cvector_t &_mat, double &val_re, double &val_im)->void { (void)val_im; // unused for (size_t i = 0; i < 4; i++) { - complex_t vi = 0; + complex_t vi = 0; for (size_t j = 0; j < 4; j++) vi += _mat[i + 4 * j] * data_[inds[j]]; val_re += std::real(vi * std::conj(vi)); } }; areg_t<2> qubits_arr = {{qubits[0], qubits[1]}}; - return std::real(apply_reduction_lambda(lambda, qubits_arr, mat)); + return std::real(apply_reduction_lambda(lambda, qubits_arr, convert(mat))); } case 3: { // Lambda function for 3-qubit matrix norm - auto lambda = [&](const areg_t<8> &inds, const cvector_t &_mat, + auto lambda = [&](const areg_t<8> &inds, const cvector_t &_mat, double &val_re, double &val_im)->void { (void)val_im; // unused for (size_t i = 0; i < 8; i++) { - complex_t vi = 0; + complex_t vi = 0; for (size_t j = 0; j < 8; j++) vi += _mat[i + 8 * j] * data_[inds[j]]; val_re += std::real(vi * std::conj(vi)); } }; areg_t<3> qubits_arr = {{qubits[0], qubits[1], qubits[2]}}; - return std::real(apply_reduction_lambda(lambda, qubits_arr, mat)); + return std::real(apply_reduction_lambda(lambda, qubits_arr, convert(mat))); } case 4: { // Lambda function for 4-qubit matrix norm - auto lambda = [&](const areg_t<16> &inds, const cvector_t &_mat, + auto lambda = [&](const areg_t<16> &inds, const cvector_t &_mat, double &val_re, double &val_im)->void { (void)val_im; // unused for (size_t i = 0; i < 16; i++) { - complex_t vi = 0; + complex_t vi = 0; for (size_t j = 0; j < 16; j++) vi += _mat[i + 16 * j] * data_[inds[j]]; val_re += std::real(vi * std::conj(vi)); } }; areg_t<4> qubits_arr = {{qubits[0], qubits[1], qubits[2], qubits[3]}}; - return std::real(apply_reduction_lambda(lambda, qubits_arr, mat)); + return std::real(apply_reduction_lambda(lambda, qubits_arr, convert(mat))); } default: { // Lambda function for N-qubit matrix norm const uint_t DIM = BITS[N]; - auto lambda = [&](const indexes_t &inds, const cvector_t &_mat, + auto lambda = [&](const indexes_t &inds, const cvector_t &_mat, double &val_re, double &val_im)->void { (void)val_im; // unused for (size_t i = 0; i < DIM; i++) { - complex_t vi = 0; + complex_t vi = 0; for (size_t j = 0; j < DIM; j++) vi += _mat[i + DIM * j] * data_[inds[j]]; val_re += std::real(vi * std::conj(vi)); } }; // Use the lambda function - return std::real(apply_reduction_lambda(lambda, qubits, mat)); + return std::real(apply_reduction_lambda(lambda, qubits, convert(mat))); } } // end switch } template -double QubitVector::norm_diagonal(const reg_t &qubits, const cvector_t &mat) const { +double QubitVector::norm_diagonal(const reg_t &qubits, const cvector_t &mat) const { const uint_t N = qubits.size(); @@ -1838,7 +1859,7 @@ double QubitVector::norm_diagonal(const reg_t &qubits, const cvector_t & return norm_diagonal(qubits[0], mat); case 2: { // Lambda function for 2-qubit matrix norm - auto lambda = [&](const areg_t<4> &inds, const cvector_t &_mat, + auto lambda = [&](const areg_t<4> &inds, const cvector_t &_mat, double &val_re, double &val_im)->void { (void)val_im; // unused for (size_t i = 0; i < 4; i++) { @@ -1847,11 +1868,11 @@ double QubitVector::norm_diagonal(const reg_t &qubits, const cvector_t & } }; areg_t<2> qubits_arr = {{qubits[0], qubits[1]}}; - return std::real(apply_reduction_lambda(lambda, qubits_arr, mat)); + return std::real(apply_reduction_lambda(lambda, qubits_arr, convert(mat))); } case 3: { // Lambda function for 3-qubit matrix norm - auto lambda = [&](const areg_t<8> &inds, const cvector_t &_mat, + auto lambda = [&](const areg_t<8> &inds, const cvector_t &_mat, double &val_re, double &val_im)->void { (void)val_im; // unused for (size_t i = 0; i < 8; i++) { @@ -1860,11 +1881,11 @@ double QubitVector::norm_diagonal(const reg_t &qubits, const cvector_t & } }; areg_t<3> qubits_arr = {{qubits[0], qubits[1], qubits[2]}}; - return std::real(apply_reduction_lambda(lambda, qubits_arr, mat)); + return std::real(apply_reduction_lambda(lambda, qubits_arr, convert(mat))); } case 4: { // Lambda function for 4-qubit matrix norm - auto lambda = [&](const areg_t<16> &inds, const cvector_t &_mat, + auto lambda = [&](const areg_t<16> &inds, const cvector_t &_mat, double &val_re, double &val_im)->void { (void)val_im; // unused for (size_t i = 0; i < 16; i++) { @@ -1873,12 +1894,12 @@ double QubitVector::norm_diagonal(const reg_t &qubits, const cvector_t & } }; areg_t<4> qubits_arr = {{qubits[0], qubits[1], qubits[2], qubits[3]}}; - return std::real(apply_reduction_lambda(lambda, qubits_arr, mat)); + return std::real(apply_reduction_lambda(lambda, qubits_arr, convert(mat))); } default: { // Lambda function for N-qubit matrix norm const uint_t DIM = BITS[N]; - auto lambda = [&](const indexes_t &inds, const cvector_t &_mat, + auto lambda = [&](const indexes_t &inds, const cvector_t &_mat, double &val_re, double &val_im)->void { (void)val_im; // unused for (size_t i = 0; i < DIM; i++) { @@ -1887,7 +1908,7 @@ double QubitVector::norm_diagonal(const reg_t &qubits, const cvector_t & } }; // Use the lambda function - return std::real(apply_reduction_lambda(lambda, qubits, mat)); + return std::real(apply_reduction_lambda(lambda, qubits, convert(mat))); } } // end switch } @@ -1896,7 +1917,7 @@ double QubitVector::norm_diagonal(const reg_t &qubits, const cvector_t & // Single-qubit specialization //------------------------------------------------------------------------------ template -double QubitVector::norm(const uint_t qubit, const cvector_t &mat) const { +double QubitVector::norm(const uint_t qubit, const cvector_t &mat) const { // Error handling #ifdef DEBUG check_vector(mat, 2); @@ -1904,13 +1925,13 @@ double QubitVector::norm(const uint_t qubit, const cvector_t &mat) const // Check if input matrix is diagonal, and if so use diagonal function. if (mat[1] == 0.0 && mat[2] == 0.0) { - const cvector_t diag = {{mat[0], mat[3]}}; + const cvector_t diag = {{mat[0], mat[3]}}; return norm_diagonal(qubit, diag); } // Lambda function for norm reduction to real value. auto lambda = [&](const areg_t<2> &inds, - const cvector_t &_mat, + const cvector_t &_mat, double &val_re, double &val_im)->void { (void)val_im; // unused @@ -1918,18 +1939,18 @@ double QubitVector::norm(const uint_t qubit, const cvector_t &mat) const const auto v1 = _mat[1] * data_[inds[0]] + _mat[3] * data_[inds[1]]; val_re += std::real(v0 * std::conj(v0)) + std::real(v1 * std::conj(v1)); }; - return std::real(apply_reduction_lambda(lambda, areg_t<1>({{qubit}}), mat)); + return std::real(apply_reduction_lambda(lambda, areg_t<1>({{qubit}}), convert(mat))); } template -double QubitVector::norm_diagonal(const uint_t qubit, const cvector_t &mat) const { +double QubitVector::norm_diagonal(const uint_t qubit, const cvector_t &mat) const { // Error handling #ifdef DEBUG check_vector(mat, 1); #endif // Lambda function for norm reduction to real value. auto lambda = [&](const areg_t<2> &inds, - const cvector_t &_mat, + const cvector_t &_mat, double &val_re, double &val_im)->void { (void)val_im; // unused @@ -1937,7 +1958,7 @@ double QubitVector::norm_diagonal(const uint_t qubit, const cvector_t &m const auto v1 = _mat[1] * data_[inds[1]]; val_re += std::real(v0 * std::conj(v0)) + std::real(v1 * std::conj(v1)); }; - return std::real(apply_reduction_lambda(lambda, areg_t<1>({{qubit}}), mat)); + return std::real(apply_reduction_lambda(lambda, areg_t<1>({{qubit}}), convert(mat))); } @@ -1954,8 +1975,8 @@ double QubitVector::probability(const uint_t outcome) const { } template -rvector_t QubitVector::probabilities() const { - rvector_t probs(data_size_); +rvector_t QubitVector::probabilities() const { + rvector_t probs(data_size_); const int_t END = data_size_; probs.assign(data_size_, 0.); @@ -1967,7 +1988,7 @@ rvector_t QubitVector::probabilities() const { } template -rvector_t QubitVector::probabilities(const reg_t &qubits) const { +rvector_t QubitVector::probabilities(const reg_t &qubits) const { const size_t N = qubits.size(); @@ -1984,17 +2005,17 @@ rvector_t QubitVector::probabilities(const reg_t &qubits) const { #endif if (N == 0) - return rvector_t({norm()}); + return rvector_t({norm()}); auto qubits_sorted = qubits; std::sort(qubits_sorted.begin(), qubits_sorted.end()); if ((N == num_qubits_) && (qubits == qubits_sorted)) return probabilities(); - rvector_t probs(DIM, 0.); + rvector_t probs(DIM, 0.); #pragma omp parallel if (num_qubits_ > omp_threshold_ && omp_threads_ > 1) num_threads(omp_threads_) { - rvector_t probs_private(DIM, 0.); + rvector_t probs_private(DIM, 0.); #pragma omp for for (int_t k = 0; k < END; k++) { auto idx = indexes(qubits, qubits_sorted, k); @@ -2016,7 +2037,7 @@ rvector_t QubitVector::probabilities(const reg_t &qubits) const { //------------------------------------------------------------------------------ template -rvector_t QubitVector::probabilities(const uint_t qubit) const { +rvector_t QubitVector::probabilities(const uint_t qubit) const { // Error handling #ifdef DEBUG @@ -2032,7 +2053,7 @@ rvector_t QubitVector::probabilities(const uint_t qubit) const { val_p1 += probability(inds[1]); }; auto p0p1 = apply_reduction_lambda(lambda, areg_t<1>({{qubit}})); - return rvector_t({std::real(p0p1), std::imag(p0p1)}); + return rvector_t({std::real(p0p1), std::imag(p0p1)}); } @@ -2040,7 +2061,7 @@ rvector_t QubitVector::probabilities(const uint_t qubit) const { // Sample measure outcomes //------------------------------------------------------------------------------ template -reg_t QubitVector::sample_measure(const std::vector &rnds) const { +reg_t QubitVector::sample_measure(const rvector_t &rnds) const { const int_t END = data_size_; const int_t SHOTS = rnds.size(); @@ -2070,7 +2091,7 @@ reg_t QubitVector::sample_measure(const std::vector &rnds) const // Qubit number is above index size, loop over index blocks else { // Initialize indexes - std::vector idxs; + rvector_t idxs; idxs.assign(INDEX_END, 0.0); uint_t loop = (END >> INDEX_SIZE); #pragma omp parallel if (num_qubits_ > omp_threshold_ && omp_threads_ > 1) num_threads(omp_threads_) diff --git a/src/simulators/statevector/statevector_state.hpp b/src/simulators/statevector/statevector_state.hpp index b7b1176150..72a731b4ab 100755 --- a/src/simulators/statevector/statevector_state.hpp +++ b/src/simulators/statevector/statevector_state.hpp @@ -47,7 +47,7 @@ enum class Snapshots { // QubitVector State subclass //========================================================================= -template > +template > class State : public Base::State { public: using BaseState = Base::State; diff --git a/src/simulators/unitary/unitary_state.hpp b/src/simulators/unitary/unitary_state.hpp index efc5172ead..1ef2796922 100755 --- a/src/simulators/unitary/unitary_state.hpp +++ b/src/simulators/unitary/unitary_state.hpp @@ -40,7 +40,7 @@ enum class Gates { // QubitUnitary State subclass //========================================================================= -template +template class State : public Base::State> { public: using BaseState = Base::State>; diff --git a/src/simulators/unitary/unitarymatrix.hpp b/src/simulators/unitary/unitarymatrix.hpp index 45754240a3..fd25688002 100755 --- a/src/simulators/unitary/unitarymatrix.hpp +++ b/src/simulators/unitary/unitarymatrix.hpp @@ -31,7 +31,7 @@ namespace QV { // convention left-matrix multiplication on qubit-n is equal to multiplication // of the vectorized 2*N qubit vector also on qubit-n. -template +template class UnitaryMatrix : public QubitVector { public: @@ -64,7 +64,7 @@ class UnitaryMatrix : public QubitVector { AER::cmatrix_t matrix() const; // Return the trace of the unitary - complex_t trace() const; + complex_t trace() const; // Return JSON serialization of UnitaryMatrix; json_t json() const; @@ -130,7 +130,7 @@ template json_t UnitaryMatrix::json() const { const int_t nrows = rows_; // Initialize empty matrix - const json_t ZERO = complex_t(0.0, 0.0); + const json_t ZERO = complex_t(0.0, 0.0); json_t js = json_t(nrows, json_t(nrows, ZERO)); if (BaseVector::json_chop_threshold_ > 0) { @@ -253,7 +253,7 @@ void UnitaryMatrix::set_num_qubits(size_t num_qubits) { } template -complex_t UnitaryMatrix::trace() const { +complex_t UnitaryMatrix::trace() const { const int_t NROWS = rows_; const int_t DIAG = NROWS + 1; double val_re = 0.; @@ -266,7 +266,7 @@ complex_t UnitaryMatrix::trace() const { val_im += std::imag(BaseVector::data_[k * DIAG]); } } - return complex_t(val_re, val_im); + return complex_t(val_re, val_im); }