Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

bppp: align terminology with paper #226

Merged
merged 2 commits into from
Apr 20, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
160 changes: 80 additions & 80 deletions src/modules/bppp/bppp_norm_product_impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ static int secp256k1_scalar_inner_product(
/* Computes the q-weighted inner product of two vectors of scalars
* for elements starting from offset a and offset b respectively with the
* given step.
* Returns: Sum_{i=0..len-1}(a[offset_a + step*i] * b[offset_b2 + step*i]*q^(i+1)) */
* Returns: Sum_{i=0..len-1}(a[offset_a + step*i] * b[offset_b2 + step*i]*mu^(i+1)) */
static int secp256k1_weighted_scalar_inner_product(
secp256k1_scalar* res,
const secp256k1_scalar* a_vec,
Expand All @@ -52,29 +52,29 @@ static int secp256k1_weighted_scalar_inner_product(
const size_t b_offset,
const size_t step,
const size_t len,
const secp256k1_scalar* q
const secp256k1_scalar* mu
) {
secp256k1_scalar q_pow;
secp256k1_scalar mu_pow;
size_t i;
secp256k1_scalar_set_int(res, 0);
q_pow = *q;
mu_pow = *mu;
for (i = 0; i < len; i++) {
secp256k1_scalar term;
secp256k1_scalar_mul(&term, &a_vec[a_offset + step*i], &b_vec[b_offset + step*i]);
secp256k1_scalar_mul(&term, &term, &q_pow);
secp256k1_scalar_mul(&q_pow, &q_pow, q);
secp256k1_scalar_mul(&term, &term, &mu_pow);
secp256k1_scalar_mul(&mu_pow, &mu_pow, mu);
secp256k1_scalar_add(res, res, &term);
}
return 1;
}

/* Compute the powers of r as r, r^2, r^4 ... r^(2^(n-1)) */
static void secp256k1_bppp_powers_of_r(secp256k1_scalar *powers, const secp256k1_scalar *r, size_t n) {
/* Compute the powers of rho as rho, rho^2, rho^4 ... rho^(2^(n-1)) */
static void secp256k1_bppp_powers_of_rho(secp256k1_scalar *powers, const secp256k1_scalar *rho, size_t n) {
size_t i;
if (n == 0) {
return;
}
powers[0] = *r;
powers[0] = *rho;
for (i = 1; i < n; i++) {
secp256k1_scalar_sqr(&powers[i], &powers[i - 1]);
}
Expand All @@ -99,7 +99,7 @@ static int ecmult_bp_commit_cb(secp256k1_scalar *sc, secp256k1_ge *pt, size_t id
}

/* Create a commitment `commit` = vG + n_vec*G_vec + l_vec*H_vec where
v = |n_vec*n_vec|_q + <l_vec, c_vec>. |w|_q denotes q-weighted norm of w and
v = |n_vec*n_vec|_mu + <l_vec, c_vec>. |w|_mu denotes mu-weighted norm of w and
<l, r> denotes inner product of l and r.
*/
static int secp256k1_bppp_commit(
Expand All @@ -113,7 +113,7 @@ static int secp256k1_bppp_commit(
size_t l_vec_len,
const secp256k1_scalar* c_vec,
size_t c_vec_len,
const secp256k1_scalar* q
const secp256k1_scalar* mu
) {
secp256k1_scalar v, l_c;
/* First n_vec_len generators are Gs, rest are Hs*/
Expand All @@ -125,8 +125,8 @@ static int secp256k1_bppp_commit(
VERIFY_CHECK(secp256k1_is_power_of_two(n_vec_len));
VERIFY_CHECK(secp256k1_is_power_of_two(c_vec_len));

/* Compute v = n_vec*n_vec*q + l_vec*c_vec */
secp256k1_weighted_scalar_inner_product(&v, n_vec, 0 /*a offset */, n_vec, 0 /*b offset*/, 1 /*step*/, n_vec_len, q);
/* Compute v = n_vec*n_vec*mu + l_vec*c_vec */
secp256k1_weighted_scalar_inner_product(&v, n_vec, 0 /*a offset */, n_vec, 0 /*b offset*/, 1 /*step*/, n_vec_len, mu);
secp256k1_scalar_inner_product(&l_c, l_vec, 0 /*a offset */, c_vec, 0 /*b offset*/, 1 /*step*/, l_vec_len);
secp256k1_scalar_add(&v, &v, &l_c);

Expand All @@ -150,8 +150,8 @@ typedef struct ecmult_x_cb_data {
const secp256k1_scalar *n;
const secp256k1_ge *g;
const secp256k1_scalar *l;
const secp256k1_scalar *r;
const secp256k1_scalar *r_inv;
const secp256k1_scalar *rho;
const secp256k1_scalar *rho_inv;
size_t G_GENS_LEN; /* Figure out initialization syntax so that this can also be const */
size_t n_len;
} ecmult_x_cb_data;
Expand All @@ -160,10 +160,10 @@ static int ecmult_x_cb(secp256k1_scalar *sc, secp256k1_ge *pt, size_t idx, void
ecmult_x_cb_data *data = (ecmult_x_cb_data*) cbdata;
if (idx < data->n_len) {
if (idx % 2 == 0) {
secp256k1_scalar_mul(sc, &data->n[idx + 1], data->r);
secp256k1_scalar_mul(sc, &data->n[idx + 1], data->rho);
*pt = data->g[idx];
} else {
secp256k1_scalar_mul(sc, &data->n[idx - 1], data->r_inv);
secp256k1_scalar_mul(sc, &data->n[idx - 1], data->rho_inv);
*pt = data->g[idx];
}
} else {
Expand Down Expand Up @@ -201,11 +201,11 @@ static int ecmult_r_cb(secp256k1_scalar *sc, secp256k1_ge *pt, size_t idx, void
}

/* Recursively compute the norm argument proof satisfying the relation
* <n_vec, n_vec>_q + <c_vec, l_vec> = v for some commitment
* C = v*G + <n_vec, G_vec> + <l_vec, H_vec>. <x, x>_q is the weighted inner
* product of x with itself, where the weights are the first n powers of q.
* <x, x>_q = q*x_1^2 + q^2*x_2^2 + q^3*x_3^2 + ... + q^n*x_n^2.
* The API computes q as square of the r challenge (`r^2`).
* <n_vec, n_vec>_mu + <c_vec, l_vec> = v for some commitment
* C = v*G + <n_vec, G_vec> + <l_vec, H_vec>. <x, x>_mu is the weighted inner
* product of x with itself, where the weights are the first n powers of mu.
* <x, x>_mu = mu*x_1^2 + mu^2*x_2^2 + mu^3*x_3^2 + ... + mu^n*x_n^2.
* The API computes mu as square of the r challenge (`r^2`).
*
* The norm argument is not zero knowledge and does not operate on any secret data.
* Thus the following code uses variable time operations while computing the proof.
Expand All @@ -222,7 +222,7 @@ static int secp256k1_bppp_rangeproof_norm_product_prove(
unsigned char* proof,
size_t *proof_len,
secp256k1_sha256* transcript, /* Transcript hash of the parent protocol */
const secp256k1_scalar* r,
const secp256k1_scalar* rho,
secp256k1_ge* g_vec,
size_t g_vec_len,
secp256k1_scalar* n_vec,
Expand All @@ -232,7 +232,7 @@ static int secp256k1_bppp_rangeproof_norm_product_prove(
secp256k1_scalar* c_vec,
size_t c_vec_len
) {
secp256k1_scalar q_f, r_f = *r;
secp256k1_scalar mu_f, rho_f = *rho;
size_t proof_idx = 0;
ecmult_x_cb_data x_cb_data;
ecmult_r_cb_data r_cb_data;
Expand All @@ -259,38 +259,38 @@ static int secp256k1_bppp_rangeproof_norm_product_prove(
r_cb_data.g1 = g_vec;
r_cb_data.l1 = l_vec;
r_cb_data.G_GENS_LEN = G_GENS_LEN;
secp256k1_scalar_sqr(&q_f, &r_f);
secp256k1_scalar_sqr(&mu_f, &rho_f);


while (g_len > 1 || h_len > 1) {
size_t i, num_points;
secp256k1_scalar q_sq, r_inv, c0_l1, c1_l0, x_v, c1_l1, r_v;
secp256k1_scalar mu_sq, rho_inv, c0_l1, c1_l0, x_v, c1_l1, r_v;
secp256k1_gej rj, xj;
secp256k1_ge r_ge, x_ge;
secp256k1_scalar e;
secp256k1_scalar gamma;

secp256k1_scalar_inverse_var(&r_inv, &r_f);
secp256k1_scalar_sqr(&q_sq, &q_f);
secp256k1_scalar_inverse_var(&rho_inv, &rho_f);
secp256k1_scalar_sqr(&mu_sq, &mu_f);

/* Compute the X commitment X = WIP(r_inv*n0,n1)_q2 * g + r<n1,G> + <r_inv*x0, G1> */
/* Compute the X commitment X = WIP(rho_inv*n0,n1)_mu2 * g + r<n1,G> + <rho_inv*x0, G1> */
secp256k1_scalar_inner_product(&c0_l1, c_vec, 0, l_vec, 1, 2, h_len/2);
secp256k1_scalar_inner_product(&c1_l0, c_vec, 1, l_vec, 0, 2, h_len/2);
secp256k1_weighted_scalar_inner_product(&x_v, n_vec, 0, n_vec, 1, 2, g_len/2, &q_sq);
secp256k1_scalar_mul(&x_v, &x_v, &r_inv);
secp256k1_weighted_scalar_inner_product(&x_v, n_vec, 0, n_vec, 1, 2, g_len/2, &mu_sq);
secp256k1_scalar_mul(&x_v, &x_v, &rho_inv);
secp256k1_scalar_add(&x_v, &x_v, &x_v);
secp256k1_scalar_add(&x_v, &x_v, &c0_l1);
secp256k1_scalar_add(&x_v, &x_v, &c1_l0);

x_cb_data.r = &r_f;
x_cb_data.r_inv = &r_inv;
x_cb_data.rho = &rho_f;
x_cb_data.rho_inv = &rho_inv;
x_cb_data.n_len = g_len >= 2 ? g_len : 0;
num_points = x_cb_data.n_len + (h_len >= 2 ? h_len : 0);

if (!secp256k1_ecmult_multi_var(&ctx->error_callback, scratch, &xj, &x_v, ecmult_x_cb, (void*)&x_cb_data, num_points)) {
return 0;
}

secp256k1_weighted_scalar_inner_product(&r_v, n_vec, 1, n_vec, 1, 2, g_len/2, &q_sq);
secp256k1_weighted_scalar_inner_product(&r_v, n_vec, 1, n_vec, 1, 2, g_len/2, &mu_sq);
secp256k1_scalar_inner_product(&c1_l1, c_vec, 1, l_vec, 1, 2, h_len/2);
secp256k1_scalar_add(&r_v, &r_v, &c1_l1);

Expand All @@ -314,22 +314,22 @@ static int secp256k1_bppp_rangeproof_norm_product_prove(
secp256k1_bppp_serialize_points(&proof[proof_idx], &x_ge, &r_ge);
proof_idx += 65;

/* Obtain challenge e for the the next round */
/* Obtain challenge gamma for the the next round */
secp256k1_sha256_write(transcript, &proof[proof_idx - 65], 65);
secp256k1_bppp_challenge_scalar(&e, transcript, 0);
secp256k1_bppp_challenge_scalar(&gamma, transcript, 0);

if (g_len > 1) {
for (i = 0; i < g_len; i = i + 2) {
secp256k1_scalar nl, nr;
secp256k1_gej gl, gr;
secp256k1_scalar_mul(&nl, &n_vec[i], &r_inv);
secp256k1_scalar_mul(&nr, &n_vec[i + 1], &e);
secp256k1_scalar_mul(&nl, &n_vec[i], &rho_inv);
secp256k1_scalar_mul(&nr, &n_vec[i + 1], &gamma);
secp256k1_scalar_add(&n_vec[i/2], &nl, &nr);

secp256k1_gej_set_ge(&gl, &g_vec[i]);
secp256k1_ecmult(&gl, &gl, &r_f, NULL);
secp256k1_ecmult(&gl, &gl, &rho_f, NULL);
secp256k1_gej_set_ge(&gr, &g_vec[i + 1]);
secp256k1_ecmult(&gr, &gr, &e, NULL);
secp256k1_ecmult(&gr, &gr, &gamma, NULL);
secp256k1_gej_add_var(&gl, &gl, &gr, NULL);
secp256k1_ge_set_gej_var(&g_vec[i/2], &gl);
}
Expand All @@ -339,22 +339,22 @@ static int secp256k1_bppp_rangeproof_norm_product_prove(
for (i = 0; i < h_len; i = i + 2) {
secp256k1_scalar temp1;
secp256k1_gej grj;
secp256k1_scalar_mul(&temp1, &c_vec[i + 1], &e);
secp256k1_scalar_mul(&temp1, &c_vec[i + 1], &gamma);
secp256k1_scalar_add(&c_vec[i/2], &c_vec[i], &temp1);

secp256k1_scalar_mul(&temp1, &l_vec[i + 1], &e);
secp256k1_scalar_mul(&temp1, &l_vec[i + 1], &gamma);
secp256k1_scalar_add(&l_vec[i/2], &l_vec[i], &temp1);

secp256k1_gej_set_ge(&grj, &g_vec[G_GENS_LEN + i + 1]);
secp256k1_ecmult(&grj, &grj, &e, NULL);
secp256k1_ecmult(&grj, &grj, &gamma, NULL);
secp256k1_gej_add_ge_var(&grj, &grj, &g_vec[G_GENS_LEN + i], NULL);
secp256k1_ge_set_gej_var(&g_vec[G_GENS_LEN + i/2], &grj);
}
}
g_len = g_len / 2;
h_len = h_len / 2;
r_f = q_f;
q_f = q_sq;
rho_f = mu_f;
mu_f = mu_sq;
}

secp256k1_scalar_get_b32(&proof[proof_idx], &n_vec[0]);
Expand All @@ -367,7 +367,7 @@ static int secp256k1_bppp_rangeproof_norm_product_prove(
typedef struct ec_mult_verify_cb_data1 {
const unsigned char *proof;
const secp256k1_ge *commit;
const secp256k1_scalar *challenges;
const secp256k1_scalar *gammas;
} ec_mult_verify_cb_data1;

static int ec_mult_verify_cb1(secp256k1_scalar *sc, secp256k1_ge *pt, size_t idx, void *cbdata) {
Expand All @@ -381,7 +381,7 @@ static int ec_mult_verify_cb1(secp256k1_scalar *sc, secp256k1_ge *pt, size_t idx
if (idx % 2 == 0) {
unsigned char pk_buf[33];
idx /= 2;
*sc = data->challenges[idx];
*sc = data->gammas[idx];
pk_buf[0] = 2 | (data->proof[65*idx] >> 1);
memcpy(&pk_buf[1], &data->proof[65*idx + 1], 32);
if (!secp256k1_eckey_pubkey_parse(pt, pk_buf, sizeof(pk_buf))) {
Expand All @@ -393,7 +393,7 @@ static int ec_mult_verify_cb1(secp256k1_scalar *sc, secp256k1_ge *pt, size_t idx
idx /= 2;
secp256k1_scalar_set_int(&neg_one, 1);
secp256k1_scalar_negate(&neg_one, &neg_one);
*sc = data->challenges[idx];
*sc = data->gammas[idx];
secp256k1_scalar_sqr(sc, sc);
secp256k1_scalar_add(sc, sc, &neg_one);
pk_buf[0] = 2 | data->proof[65*idx];
Expand Down Expand Up @@ -432,15 +432,15 @@ static int secp256k1_bppp_rangeproof_norm_product_verify(
const unsigned char* proof,
size_t proof_len,
secp256k1_sha256* transcript,
const secp256k1_scalar* r,
const secp256k1_scalar* rho,
const secp256k1_bppp_generators* g_vec,
size_t g_len,
const secp256k1_scalar* c_vec,
size_t c_vec_len,
const secp256k1_ge* commit
) {
secp256k1_scalar r_f, q_f, v, n, l, r_inv, h_c;
secp256k1_scalar *es, *s_g, *s_h, *r_inv_pows;
secp256k1_scalar rho_f, mu_f, v, n, l, rho_inv, h_c;
secp256k1_scalar *gammas, *s_g, *s_h, *rho_inv_pows;
secp256k1_gej res1, res2;
size_t i = 0, scratch_checkpoint;
int overflow;
Expand All @@ -467,69 +467,69 @@ static int secp256k1_bppp_rangeproof_norm_product_verify(
if (overflow) return 0;
secp256k1_scalar_set_b32(&l, &proof[n_rounds*65 + 32], &overflow); /* l */
if (overflow) return 0;
if (secp256k1_scalar_is_zero(r)) return 0;
if (secp256k1_scalar_is_zero(rho)) return 0;

/* Collect the challenges in a new vector */
/* Collect the gammas in a new vector */
scratch_checkpoint = secp256k1_scratch_checkpoint(&ctx->error_callback, scratch);
es = (secp256k1_scalar*)secp256k1_scratch_alloc(&ctx->error_callback, scratch, n_rounds * sizeof(secp256k1_scalar));
gammas = (secp256k1_scalar*)secp256k1_scratch_alloc(&ctx->error_callback, scratch, n_rounds * sizeof(secp256k1_scalar));
s_g = (secp256k1_scalar*)secp256k1_scratch_alloc(&ctx->error_callback, scratch, g_len * sizeof(secp256k1_scalar));
s_h = (secp256k1_scalar*)secp256k1_scratch_alloc(&ctx->error_callback, scratch, h_len * sizeof(secp256k1_scalar));
r_inv_pows = (secp256k1_scalar*)secp256k1_scratch_alloc(&ctx->error_callback, scratch, log_g_len * sizeof(secp256k1_scalar));
if (es == NULL || s_g == NULL || s_h == NULL || r_inv_pows == NULL) {
rho_inv_pows = (secp256k1_scalar*)secp256k1_scratch_alloc(&ctx->error_callback, scratch, log_g_len * sizeof(secp256k1_scalar));
if (gammas == NULL || s_g == NULL || s_h == NULL || rho_inv_pows == NULL) {
secp256k1_scratch_apply_checkpoint(&ctx->error_callback, scratch, scratch_checkpoint);
return 0;
}

/* Compute powers of r_inv. Later used in g_factor computations*/
secp256k1_scalar_inverse_var(&r_inv, r);
secp256k1_bppp_powers_of_r(r_inv_pows, &r_inv, log_g_len);
/* Compute powers of rho_inv. Later used in g_factor computations*/
secp256k1_scalar_inverse_var(&rho_inv, rho);
secp256k1_bppp_powers_of_rho(rho_inv_pows, &rho_inv, log_g_len);

/* Compute r_f = r^(2^log_g_len) */
r_f = *r;
/* Compute rho_f = rho^(2^log_g_len) */
rho_f = *rho;
for (i = 0; i < log_g_len; i++) {
secp256k1_scalar_sqr(&r_f, &r_f);
secp256k1_scalar_sqr(&rho_f, &rho_f);
}

for (i = 0; i < n_rounds; i++) {
secp256k1_scalar e;
secp256k1_scalar gamma;
secp256k1_sha256_write(transcript, &proof[i * 65], 65);
secp256k1_bppp_challenge_scalar(&e, transcript, 0);
es[i] = e;
secp256k1_bppp_challenge_scalar(&gamma, transcript, 0);
gammas[i] = gamma;
}
/* s_g[0] = n * \prod_{j=0}^{log_g_len - 1} r^(2^j)
* = n * r^(2^log_g_len - 1)
* = n * r_f * r_inv */
secp256k1_scalar_mul(&s_g[0], &n, &r_f);
secp256k1_scalar_mul(&s_g[0], &s_g[0], &r_inv);
/* s_g[0] = n * \prod_{j=0}^{log_g_len - 1} rho^(2^j)
* = n * rho^(2^log_g_len - 1)
* = n * rho_f * rho_inv */
secp256k1_scalar_mul(&s_g[0], &n, &rho_f);
secp256k1_scalar_mul(&s_g[0], &s_g[0], &rho_inv);
for (i = 1; i < g_len; i++) {
size_t log_i = secp256k1_bppp_log2(i);
size_t nearest_pow_of_two = (size_t)1 << log_i;
/* This combines the two multiplications of challenges and r_invs in a
/* This combines the two multiplications of gammas and rho_invs in a
* single loop.
* s_g[i] = s_g[i - nearest_pow_of_two]
* * e[log_i] * r_inv^(2^log_i) */
secp256k1_scalar_mul(&s_g[i], &s_g[i - nearest_pow_of_two], &es[log_i]);
secp256k1_scalar_mul(&s_g[i], &s_g[i], &r_inv_pows[log_i]);
* * e[log_i] * rho_inv^(2^log_i) */
secp256k1_scalar_mul(&s_g[i], &s_g[i - nearest_pow_of_two], &gammas[log_i]);
secp256k1_scalar_mul(&s_g[i], &s_g[i], &rho_inv_pows[log_i]);
}
s_h[0] = l;
secp256k1_scalar_set_int(&h_c, 0);
for (i = 1; i < h_len; i++) {
size_t log_i = secp256k1_bppp_log2(i);
size_t nearest_pow_of_two = (size_t)1 << log_i;
secp256k1_scalar_mul(&s_h[i], &s_h[i - nearest_pow_of_two], &es[log_i]);
secp256k1_scalar_mul(&s_h[i], &s_h[i - nearest_pow_of_two], &gammas[log_i]);
}
secp256k1_scalar_inner_product(&h_c, c_vec, 0 /* a_offset */ , s_h, 0 /* b_offset */, 1 /* step */, h_len);
/* Compute v = n*n*q_f + l*h_c where q_f = r_f^2 */
secp256k1_scalar_sqr(&q_f, &r_f);
/* Compute v = n*n*mu_f + l*h_c where mu_f = rho_f^2 */
secp256k1_scalar_sqr(&mu_f, &rho_f);
secp256k1_scalar_mul(&v, &n, &n);
secp256k1_scalar_mul(&v, &v, &q_f);
secp256k1_scalar_mul(&v, &v, &mu_f);
secp256k1_scalar_add(&v, &v, &h_c);

{
ec_mult_verify_cb_data1 data;
data.proof = proof;
data.commit = commit;
data.challenges = es;
data.gammas = gammas;

if (!secp256k1_ecmult_multi_var(&ctx->error_callback, scratch, &res1, NULL, ec_mult_verify_cb1, &data, 2*n_rounds + 1)) {
secp256k1_scratch_apply_checkpoint(&ctx->error_callback, scratch, scratch_checkpoint);
Expand Down
Loading