diff --git a/constantine/math/elliptic/ec_endomorphism_accel.nim b/constantine/math/elliptic/ec_endomorphism_accel.nim index bb518a88..d4be13b5 100644 --- a/constantine/math/elliptic/ec_endomorphism_accel.nim +++ b/constantine/math/elliptic/ec_endomorphism_accel.nim @@ -44,11 +44,12 @@ type template decomposeEndoImpl[scalBits: static int]( scalar: BigInt[scalBits], - F: typedesc[Fp or Fp2], + frBits: static int, + Name: static Algebra, + G: static Subgroup, copyMiniScalarsResult: untyped) = static: doAssert scalBits >= L, "Cannot decompose a scalar smaller than a mini-scalar or the decomposition coefficient" # Equal when no window or no negative handling, greater otherwise - const frBits = Fr[F.Name].bits() static: doAssert frBits >= scalBits static: doAssert L >= ceilDiv_vartime(frBits, M) + 1 const w = frBits.wordsRequired() @@ -59,24 +60,24 @@ template decomposeEndoImpl[scalBits: static int]( when M == 2: var alphas{.noInit, inject.}: ( - BigInt[frBits + babai(F)[0][0].bits], - BigInt[frBits + babai(F)[1][0].bits] + BigInt[frBits + babai(Name, G)[0][0].bits], + BigInt[frBits + babai(Name, G)[1][0].bits] ) elif M == 4: var alphas{.noInit, inject.}: ( - BigInt[frBits + babai(F)[0][0].bits], - BigInt[frBits + babai(F)[1][0].bits], - BigInt[frBits + babai(F)[2][0].bits], - BigInt[frBits + babai(F)[3][0].bits] + BigInt[frBits + babai(Name, G)[0][0].bits], + BigInt[frBits + babai(Name, G)[1][0].bits], + BigInt[frBits + babai(Name, G)[2][0].bits], + BigInt[frBits + babai(Name, G)[3][0].bits] ) else: {.error: "The decomposition degree " & $M & " is not configured".} staticFor i, 0, M: - when bool babai(F)[i][0].isZero(): + when bool babai(Name, G)[i][0].isZero(): alphas[i].setZero() else: - alphas[i].prod_high_words(babai(F)[i][0], scalar, w) + alphas[i].prod_high_words(babai(Name, G)[i][0], scalar, w) # We have k0 = s - ๐›ผ0 b00 - ๐›ผ1 b10 ... - ๐›ผm bm0 # and kj = 0 - ๐›ผj b0j - ๐›ผ1 b1j ... - ๐›ผm bmj @@ -86,13 +87,13 @@ template decomposeEndoImpl[scalBits: static int]( k[0].copyTruncatedFrom(scalar) staticFor miniScalarIdx, 0, M: staticFor basisIdx, 0, M: - when not bool lattice(F)[basisIdx][miniScalarIdx][0].isZero(): - when bool lattice(F)[basisIdx][miniScalarIdx][0].isOne(): + when not bool lattice(Name, G)[basisIdx][miniScalarIdx][0].isZero(): + when bool lattice(Name, G)[basisIdx][miniScalarIdx][0].isOne(): alphaB.copyTruncatedFrom(alphas[basisIdx]) else: - alphaB.prod(alphas[basisIdx], lattice(F)[basisIdx][miniScalarIdx][0]) + alphaB.prod(alphas[basisIdx], lattice(Name, G)[basisIdx][miniScalarIdx][0]) - when lattice(F)[basisIdx][miniScalarIdx][1] xor babai(F)[basisIdx][1]: + when lattice(Name, G)[basisIdx][miniScalarIdx][1] xor babai(Name, G)[basisIdx][1]: k[miniScalarIdx] += alphaB else: k[miniScalarIdx] -= alphaB @@ -103,7 +104,9 @@ func decomposeEndo*[M, scalBits, L: static int]( miniScalars: var MultiScalar[M, L], negatePoints: var array[M, SecretBool], scalar: BigInt[scalBits], - F: typedesc[Fp or Fp2]) = + frBits: static int, + Name: static Algebra, + G: static Subgroup) = ## Decompose a secret scalar into M mini-scalars ## using a curve endomorphism(s) characteristics. ## @@ -119,7 +122,7 @@ func decomposeEndo*[M, scalBits, L: static int]( ## and negate it as well. ## ## This implements solution 1. - decomposeEndoImpl(scalar, F): + decomposeEndoImpl(scalar, frBits, Name, G): # Negative miniscalars are turned positive # Caller should negate the corresponding Elliptic Curve points let isNeg = k[miniScalarIdx].isMsbSet() @@ -130,7 +133,9 @@ func decomposeEndo*[M, scalBits, L: static int]( func decomposeEndo*[M, scalBits, L: static int]( miniScalars: var MultiScalar[M, L], scalar: BigInt[scalBits], - F: typedesc[Fp or Fp2]) = + frBits: static int, + Name: static Algebra, + G: static Subgroup) = ## Decompose a secret scalar into M mini-scalars ## using a curve endomorphism(s) characteristics. ## @@ -150,7 +155,7 @@ func decomposeEndo*[M, scalBits, L: static int]( ## Also for partitioned GLV-SAC (with 8-way decomposition) it is necessary. ## ## This implements solution 2. - decomposeEndoImpl(scalar, F): + decomposeEndoImpl(scalar, frBits, Name, G): miniScalars[miniScalarIdx].copyTruncatedFrom(k[miniScalarIdx]) # Secret scalar + dynamic point @@ -339,6 +344,8 @@ func scalarMulEndo*[scalBits; EC]( const M = when P.F is Fp: 2 elif P.F is Fp2: 4 else: {.error: "Unconfigured".} + const G = when EC isnot EC_ShortW_Aff|EC_ShortW_Jac|EC_ShortW_Prj: G1 + else: EC.G var endos {.noInit.}: array[M-1, EC] endos.computeEndomorphisms(P) @@ -347,7 +354,7 @@ func scalarMulEndo*[scalBits; EC]( const L = EC.getScalarField().bits().ceilDiv_vartime(M) + 1 var miniScalars {.noInit.}: array[M, BigInt[L]] var negatePoints {.noInit.}: array[M, SecretBool] - miniScalars.decomposeEndo(negatePoints, scalar, P.F) + miniScalars.decomposeEndo(negatePoints, scalar, EC.getScalarField().bits(), EC.getName(), G) # 3. Handle negative mini-scalars # A scalar decomposition might lead to negative miniscalar. @@ -510,6 +517,8 @@ func scalarMulGLV_m2w2*[scalBits; EC](P0: var EC, scalar: BigInt[scalBits]) {.me mixin affine const C = P0.F.Name # curve static: doAssert: scalBits <= EC.getScalarField().bits() + const G = when EC isnot EC_ShortW_Aff|EC_ShortW_Jac|EC_ShortW_Prj: G1 + else: EC.G # 1. Compute endomorphisms var P1 {.noInit.}: EC @@ -519,7 +528,7 @@ func scalarMulGLV_m2w2*[scalBits; EC](P0: var EC, scalar: BigInt[scalBits]) {.me const L = computeRecodedLength(EC.getScalarField().bits(), 2) var miniScalars {.noInit.}: array[2, BigInt[L]] var negatePoints {.noInit.}: array[2, SecretBool] - miniScalars.decomposeEndo(negatePoints, scalar, P0.F) + miniScalars.decomposeEndo(negatePoints, scalar, EC.getScalarField().bits(), EC.getName(), G) # 3. Handle negative mini-scalars # Either negate the associated base and the scalar (in the `endomorphisms` array) diff --git a/constantine/math/elliptic/ec_multi_scalar_mul.nim b/constantine/math/elliptic/ec_multi_scalar_mul.nim index a9541601..cee871f4 100644 --- a/constantine/math/elliptic/ec_multi_scalar_mul.nim +++ b/constantine/math/elliptic/ec_multi_scalar_mul.nim @@ -400,6 +400,8 @@ proc applyEndomorphism[bits: static int, ECaff]( const M = when ECaff.F is Fp: 2 elif ECaff.F is Fp2: 4 else: {.error: "Unconfigured".} + const G = when ECaff isnot EC_ShortW_Aff: G1 + else: ECaff.G const L = ECaff.getScalarField().bits().ceilDiv_vartime(M) + 1 let splitCoefs = allocHeapArray(array[M, BigInt[L]], N) @@ -407,7 +409,7 @@ proc applyEndomorphism[bits: static int, ECaff]( for i in 0 ..< N: var negatePoints {.noinit.}: array[M, SecretBool] - splitCoefs[i].decomposeEndo(negatePoints, coefs[i], ECaff.F) + splitCoefs[i].decomposeEndo(negatePoints, coefs[i], ECaff.getScalarField().bits(), ECaff.getName(), G) if negatePoints[0].bool: endoBasis[i][0].neg(points[i]) else: diff --git a/constantine/math/elliptic/ec_multi_scalar_mul_parallel.nim b/constantine/math/elliptic/ec_multi_scalar_mul_parallel.nim index a132caff..4dcbaea4 100644 --- a/constantine/math/elliptic/ec_multi_scalar_mul_parallel.nim +++ b/constantine/math/elliptic/ec_multi_scalar_mul_parallel.nim @@ -462,6 +462,8 @@ proc applyEndomorphism_parallel[bits: static int, ECaff]( const M = when ECaff.F is Fp: 2 elif ECaff.F is Fp2: 4 else: {.error: "Unconfigured".} + const G = when ECaff isnot EC_ShortW_Aff: G1 + else: ECaff.G const L = ECaff.getScalarField().bits().ceilDiv_vartime(M) + 1 let splitCoefs = allocHeapArray(array[M, BigInt[L]], N) @@ -472,7 +474,7 @@ proc applyEndomorphism_parallel[bits: static int, ECaff]( captures: {coefs, points, splitCoefs, endoBasis} var negatePoints {.noinit.}: array[M, SecretBool] - splitCoefs[i].decomposeEndo(negatePoints, coefs[i], ECaff.F) + splitCoefs[i].decomposeEndo(negatePoints, coefs[i], ECaff.getScalarField().bits(), ECaff.getName(), G) if negatePoints[0].bool: endoBasis[i][0].neg(points[i]) else: diff --git a/constantine/math/elliptic/ec_scalar_mul_vartime.nim b/constantine/math/elliptic/ec_scalar_mul_vartime.nim index b2727daa..7486d39f 100644 --- a/constantine/math/elliptic/ec_scalar_mul_vartime.nim +++ b/constantine/math/elliptic/ec_scalar_mul_vartime.nim @@ -213,7 +213,7 @@ func scalarMul_minHammingWeight_windowed_vartime*[EC](P: var EC, scalar: BigInt, # Odd-only divides precomputation table size by another 2 const precompSize = 1 shl (window - 2) - static: doAssert window < 8, "Window is too large and precomputation would use " & $(precompSize * sizeof(EC)) & " stack space." + static: doAssert window < 8, "Window of size " & $window & " is too large and precomputation would use " & $(precompSize * sizeof(EC)) & " stack space." var tabEC {.noinit.}: array[precompSize, EC] var P2{.noInit.}: EC @@ -252,12 +252,14 @@ func scalarMulEndo_minHammingWeight_windowed_vartime*[scalBits: static int; EC]( # Signed digits divides precomputation table size by 2 # Odd-only divides precomputation table size by another 2 const precompSize = 1 shl (window - 2) - static: doAssert window < 8, "Window is too large and precomputation would use " & $(precompSize * sizeof(EC)) & " stack space." + static: doAssert window < 8, "Window of size " & $window & " is too large and precomputation would use " & $(precompSize * sizeof(EC)) & " stack space." # 1. Compute endomorphisms const M = when P.F is Fp: 2 elif P.F is Fp2: 4 else: {.error: "Unconfigured".} + const G = when EC isnot EC_ShortW_Aff|EC_ShortW_Jac|EC_ShortW_Prj: G1 + else: EC.G var endos {.noInit.}: array[M-1, EC] endos.computeEndomorphisms(P) @@ -266,7 +268,7 @@ func scalarMulEndo_minHammingWeight_windowed_vartime*[scalBits: static int; EC]( const L = EC.getScalarField().bits().ceilDiv_vartime(M) + 1 var miniScalars {.noInit.}: array[M, BigInt[L]] var negatePoints {.noInit.}: array[M, SecretBool] - miniScalars.decomposeEndo(negatePoints, scalar, EC.F) + miniScalars.decomposeEndo(negatePoints, scalar, EC.getScalarField().bits(), EC.getName(), G) # 3. Handle negative mini-scalars if negatePoints[0].bool: diff --git a/constantine/math/pairings/gt_exponentiations_vartime.nim b/constantine/math/pairings/gt_exponentiations_vartime.nim index f41cedb8..cbc9fb31 100644 --- a/constantine/math/pairings/gt_exponentiations_vartime.nim +++ b/constantine/math/pairings/gt_exponentiations_vartime.nim @@ -18,6 +18,8 @@ import constantine/named/algebras, ./cyclotomic_subgroups +from constantine/math/elliptic/ec_shortweierstrass_affine import G2 + {.push raises: [].} # No exceptions allowed in core cryptographic operations {.push checks: off.} # No defects due to array bound checking or signed integer overflow allowed @@ -26,7 +28,7 @@ iterator unpackBE(scalarByte: byte): bool = yield bool((scalarByte shr i) and 1) func gtExp_sqrmul_vartime*[Gt: ExtensionField](r: var Gt, a: Gt, scalar: BigInt) {.tags:[VarTime], meter.} = - ## **Variable-time** Exponentiation in Gt + ## **Variable-time** Exponentiation in ๐”พโ‚œ ## ## r <- aแต ## @@ -45,7 +47,7 @@ func gtExp_sqrmul_vartime*[Gt: ExtensionField](r: var Gt, a: Gt, scalar: BigInt) for scalarByte in scalarCanonical: for bit in unpackBE(scalarByte): if not isNeutral: - r.square() + r.cyclotomic_square() if bit: if isNeutral: r = a @@ -54,7 +56,7 @@ func gtExp_sqrmul_vartime*[Gt: ExtensionField](r: var Gt, a: Gt, scalar: BigInt) r *= a func gtExp_addchain_4bit_vartime[Gt: ExtensionField](r: var Gt, a: Gt, scalar: BigInt) {.tags:[VarTime], meter.} = - ## **Variable-time** Exponentiation in Gt + ## **Variable-time** Exponentiation in ๐”พโ‚œ ## This can only handle for small scalars up to 2โด = 16 excluded let s = uint scalar.limbs[0] @@ -64,88 +66,88 @@ func gtExp_addchain_4bit_vartime[Gt: ExtensionField](r: var Gt, a: Gt, scalar: B of 1: discard of 2: - r.square(a) + r.cyclotomic_square(a) of 3: var t {.noInit.}: Gt - t.square(a) + t.cyclotomic_square(a) r.prod(a, t) of 4: - r.square(a) - r.square() + r.cyclotomic_square(a) + r.cyclotomic_square() of 5: var t {.noInit.}: Gt - t.square(a) - t.square() + t.cyclotomic_square(a) + t.cyclotomic_square() r.prod(a, t) of 6: var t {.noInit.}: Gt - t.square(a) + t.cyclotomic_square(a) r.prod(a, t) - r.square() + r.cyclotomic_square() of 7: var t {.noInit.}: Gt - t.square(a) - t.square() - t.square() + t.cyclotomic_square(a) + t.cyclotomic_square() + t.cyclotomic_square() r.cyclotomic_inv(a) r *= t of 8: - r.square(a) - r.square() - r.square() + r.cyclotomic_square(a) + r.cyclotomic_square() + r.cyclotomic_square() of 9: var t {.noInit.}: Gt - t.square(a) - t.square() - t.square() + t.cyclotomic_square(a) + t.cyclotomic_square() + t.cyclotomic_square() r.prod(a, t) of 10: var t {.noInit.}: Gt - t.square(a) - t.square() + t.cyclotomic_square(a) + t.cyclotomic_square() r.prod(a, t) - r.square() + r.cyclotomic_square() of 11: var t1 {.noInit.}, t2 {.noInit.}: Gt - t1.square(a) # [2]P - t2.square(t1) - t2.square() # [8]P + t1.cyclotomic_square(a) # [2]P + t2.cyclotomic_square(t1) + t2.cyclotomic_square() # [8]P t1 *= t2 r.prod(a, t1) of 12: var t1 {.noInit.}, t2 {.noInit.}: Gt - t1.square(a) - t1.square() # [4]P - t2.square(t1) # [8]P + t1.cyclotomic_square(a) + t1.cyclotomic_square() # [4]P + t2.cyclotomic_square(t1) # [8]P r.prod(t1, t2) of 13: var t1 {.noInit.}, t2 {.noInit.}: Gt - t1.square(a) - t1.square() # [4]P - t2.square(t1) # [8]P + t1.cyclotomic_square(a) + t1.cyclotomic_square() # [4]P + t2.cyclotomic_square(t1) # [8]P t1 *= t2 r.prod(a, t1) of 14: var t {.noInit.}: Gt - t.square(a) - t.square() - t.square() + t.cyclotomic_square(a) + t.cyclotomic_square() + t.cyclotomic_square() r.cyclotomic_inv(a) t *= r # [7]P - r.square(t) + r.cyclotomic_square(t) of 15: var t {.noInit.}: Gt - t.square(a) - t.square() - t.square() - t.square() + t.cyclotomic_square(a) + t.cyclotomic_square() + t.cyclotomic_square() + t.cyclotomic_square() r.cyclotomic_inv(a) r *= t else: unreachable() func gtExp_minHammingWeight_vartime*[Gt: ExtensionField](r: var Gt, a: Gt, scalar: BigInt) {.tags:[VarTime].} = - ## **Variable-time** Exponentiation in Gt + ## **Variable-time** Exponentiation in ๐”พโ‚œ ## ## r <- aแต ## @@ -160,7 +162,7 @@ func gtExp_minHammingWeight_vartime*[Gt: ExtensionField](r: var Gt, a: Gt, scala r.setOne() for bit in recoding_l2r_signed_vartime(scalar): - r.square() + r.cyclotomic_square() if bit == 1: r *= a elif bit == -1: @@ -199,7 +201,7 @@ func accumNAF[precompSize, NafMax: static int, Gt: ExtensionField]( func gtExp_minHammingWeight_windowed_vartime*[Gt: ExtensionField]( r: var Gt, a: Gt, scalar: BigInt, window: static int) {.tags:[VarTime], meter.} = - ## **Variable-time** Exponentiation in Gt + ## **Variable-time** Exponentiation in ๐”พโ‚œ ## ## r <- aแต ## @@ -210,14 +212,13 @@ func gtExp_minHammingWeight_windowed_vartime*[Gt: ExtensionField]( # Signed digits divides precomputation table size by 2 # Odd-only divides precomputation table size by another 2 - const precompSize = 1 shl (window - 2) - static: doAssert window < 8, "Window is too large and precomputation would use " & $(precompSize * sizeof(Gt)) & " stack space." + static: doAssert window <= 4, "Window of size " & $window & " is too large and precomputation would use " & $(precompSize * sizeof(Gt)) & " stack space." var tab {.noinit.}: array[precompSize, Gt] var a2{.noInit.}: Gt tab[0] = a - a2.square(a) + a2.cyclotomic_square(a) for i in 1 ..< tab.len: tab[i].prod(tab[i-1], a2) @@ -227,7 +228,85 @@ func gtExp_minHammingWeight_windowed_vartime*[Gt: ExtensionField]( var isInit = false for i in 0 ..< nafLen: if isInit: - r.square() + r.cyclotomic_square() r.accumNAF(tab, naf, nafLen, i) else: isInit = r.initNAF(tab, naf, nafLen, i) + +func gtExpEndo_minHammingWeight_windowed_vartime*[Gt: ExtensionField, scalBits: static int]( + r: var Gt, a: Gt, scalar: BigInt[scalBits], window: static int) {.tags:[VarTime], meter.} = + ## Endomorphism accelerated **Variable-time** Exponentiation in ๐”พโ‚œ + ## + ## r <- aแต + ## + ## This uses windowed-NAF (wNAF) + ## This MUST NOT be used with secret data. + ## + ## This is highly VULNERABLE to timing attacks and power analysis attacks + + # Signed digits divides precomputation table size by 2 + # Odd-only divides precomputation table size by another 2 + const precompSize = 1 shl (window - 2) + static: doAssert window <= 4, "Window of size " & $window & " is too large and precomputation would use " & $(precompSize * sizeof(Gt)) & " stack space." + + # 1. Compute endomorphisms + const M = when Gt is Fp6: 2 + elif Gt is Fp12: 4 + else: {.error: "Unconfigured".} + + var endos {.noInit.}: array[M-1, Gt] + endos.computeEndomorphisms(a) + + # 2. Decompose scalar into mini-scalars + const L = Fr[Gt.Name].bits().ceilDiv_vartime(M) + 1 + var miniScalars {.noInit.}: array[M, BigInt[L]] + var negateElems {.noInit.}: array[M, SecretBool] + miniScalars.decomposeEndo(negateElems, scalar, Fr[Gt.Name].bits(), Gt.Name, G2) # ๐”พโ‚œ has same decomposition as ๐”พโ‚‚ + + # 3. Handle negative mini-scalars + if negateElems[0].bool: + r.cyclotomic_inv(a) + else: + r = a + for m in 1 ..< M: + if negateElems[m].bool: + endos[m-1].cyclotomic_inv() + + # It's OK if r aliases a, we don't need a anymore + + # 4. Precomputed table + var tab {.noinit.}: array[M, array[precompSize, Gt]] + for m in 0 ..< M: + var a2{.noInit.}: Gt + if m == 0: + tab[0][0] = r + a2.cyclotomic_square(r) + else: + tab[m][0] = endos[m-1] + a2.cyclotomic_square(endos[m-1]) + for i in 1 ..< tab[m].len: + tab[m][i].prod(tab[m][i-1], a2) + + # 5. wNAF precomputed tables + const NafLen = L+1 + var tabNaf {.noinit.}: array[M, array[NafLen, int8]] + + for m in 0 ..< M: + # tabNaf returns NAF from least-significant to most significant bits + let miniScalarLen = tabNaf[m].recode_r2l_signed_window_vartime(miniScalars[m], window) + # We compute from most significant to least significant + # so we pad with 0 + for i in miniScalarLen ..< NafLen: + tabNaf[m][i] = 0 + + # 6. Compute + var isInit = false + + for i in 0 ..< NafLen: + if isInit: + r.cyclotomic_square() + for m in 0 ..< M: + if isInit: + r.accumNAF(tab[m], tabNaf[m], NafLen, i) + else: + isInit = r.initNAF(tab[m], tabNaf[m], NafLen, i) diff --git a/constantine/named/zoo_endomorphisms.nim b/constantine/named/zoo_endomorphisms.nim index 429f949c..f49fd365 100644 --- a/constantine/named/zoo_endomorphisms.nim +++ b/constantine/named/zoo_endomorphisms.nim @@ -35,17 +35,13 @@ import macro dispatch(Name: static Algebra, tag: static string, G: static string): untyped = result = bindSym($Name & "_" & tag & "_" & G) -template babai*(F: typedesc[Fp or Fp2]): untyped = +template babai*(Name: static Algebra, G: static Subgroup): untyped = ## Return the GLV Babai roundings vector - const G = if F is Fp: "G1" - else: "G2" - dispatch(F.Name, "Babai", G) + dispatch(Name, "Babai", $G) -template lattice*(F: typedesc[Fp or Fp2]): untyped = +template lattice*(Name: static Algebra, G: static Subgroup): untyped = ## Returns the GLV Decomposition Lattice - const G = if F is Fp: "G1" - else: "G2" - dispatch(F.Name, "Lattice", G) + dispatch(Name, "Lattice", $G) macro getCubicRootOfUnity_mod_p*(Name: static Algebra): untyped = ## Get a non-trivial cubic root of unity (mod p) with p the prime field @@ -91,7 +87,7 @@ func computeEndomorphism*[EC](endo: var EC, P: EC) = else: # For BW6-761, both G1 and G2 are on Fp endo.frobenius_psi(P, 2) -func computeEndomorphisms*[EC; M: static int](endos: var array[M-1, EC], P: EC) = +func computeEndomorphisms*[EC: not ExtensionField; M: static int](endos: var array[M-1, EC], P: EC) = ## An endomorphism decomposes M-way. when P.F is Fp: static: doAssert M == 2 diff --git a/tests/math_pairings/t_pairing_template.nim b/tests/math_pairings/t_pairing_template.nim index 1d88c1f8..fb3d3de7 100644 --- a/tests/math_pairings/t_pairing_template.nim +++ b/tests/math_pairings/t_pairing_template.nim @@ -193,6 +193,15 @@ template runGTexponentiationTests*(Iters: static int, GT: typedesc): untyped {.d r_wNAF.gtExp_minHammingWeight_windowed_vartime(a, k, window = 4) doAssert bool(r_ref == r_wNAF) + # Windowed NAF + endomorphism acceleration + var r_endoWNAF {.noInit.}: GT + r_endoWNAF.gtExpEndo_minHammingWeight_windowed_vartime(a, k, window = 2) + doAssert bool(r_ref == r_endoWNAF) + r_endoWNAF.gtExpEndo_minHammingWeight_windowed_vartime(a, k, window = 3) + doAssert bool(r_ref == r_endoWNAF) + r_endoWNAF.gtExpEndo_minHammingWeight_windowed_vartime(a, k, window = 4) + doAssert bool(r_ref == r_endoWNAF) + stdout.write '.' stdout.write '\n'