secp256k1Modinv64UpdateDe62 static method

void secp256k1Modinv64UpdateDe62(
  1. Secp256k1ModinvSigned d,
  2. Secp256k1ModinvSigned e,
  3. Secp256k1ModinvTrans t,
  4. Secp256k1ModinvInfo modinfo,
)

Implementation

static void secp256k1Modinv64UpdateDe62(
    Secp256k1ModinvSigned d,
    Secp256k1ModinvSigned e,
    Secp256k1ModinvTrans t,
    Secp256k1ModinvInfo modinfo) {
  final BigInt m62 = Secp256k1Const.mask62;
  final BigInt d0 = d[0], d1 = d[1], d2 = d[2], d3 = d[3], d4 = d[4];
  final BigInt e0 = e[0], e1 = e[1], e2 = e[2], e3 = e[3], e4 = e[4];
  final BigInt u = t.u, v = t.v, q = t.q, r = t.r;
  BigInt md, me, sd, se;
  Secp256k1Int128 cd = Secp256k1Int128(), ce = Secp256k1Int128();
  _cond(secp256k1Modinv64MulCmp62(d, 5, modinfo.modulus, (-2).toBigInt) > 0,
      "secp256k1Modinv64UpdateDe62");
  _cond(secp256k1Modinv64MulCmp62(d, 5, modinfo.modulus, BigInt.one) < 0,
      "secp256k1Modinv64UpdateDe62");
  _cond(secp256k1Modinv64MulCmp62(e, 5, modinfo.modulus, (-2).toBigInt) > 0,
      "secp256k1Modinv64UpdateDe62");
  _cond(secp256k1Modinv64MulCmp62(e, 5, modinfo.modulus, BigInt.one) < 0,
      "secp256k1Modinv64UpdateDe62");
  _cond(
      secp256k1Modinv64Abs(u) <=
          ((BigInt.one << 62).toSigned64 - secp256k1Modinv64Abs(v)),
      "secp256k1Modinv64UpdateDe62");
  _cond(
      secp256k1Modinv64Abs(q) <=
          ((BigInt.one << 62).toSigned64 - secp256k1Modinv64Abs(r)),
      "secp256k1Modinv64UpdateDe62");

  /// [md,me] start as zero; plus [u,q] if d is negative; plus [v,r] if e is negative.
  sd = (d4 >> 63).toSigned64;
  se = (e4 >> 63).toSigned64;
  md = ((u & sd).toSigned64 + (v & se).toSigned64).toSigned64;
  me = ((q & sd).toSigned64 + (r & se).toSigned64).toSigned64;

  /// Begin computing t*[d,e].
  /// Begin computing t*[d,e].
  secp256k1I128Mul(cd, u, d0);
  secp256k1I128AccumMul(cd, v, e0);
  secp256k1I128Mul(ce, q, d0);
  secp256k1I128AccumMul(ce, r, e0);

  /// Correct md,me so that t*[d,e]+modulus*[md,me] has 62 zero bottom bits.
  md = (md -
          (((modinfo.modulusInv * secp256k1I128ToU64(cd)).toUnsigned64 + md) &
              m62))
      .toSigned64;
  me = (me -
          (((modinfo.modulusInv * secp256k1I128ToU64(ce)).toUnsigned64 + me) &
              m62))
      .toSigned64;

  /// Update the beginning of computation for t*[d,e]+modulus*[md,me] now md,me are known.
  secp256k1I128AccumMul(cd, modinfo.modulus[0], md);
  secp256k1I128AccumMul(ce, modinfo.modulus[0], me);

  /// Verify that the low 62 bits of the computation are indeed zero, and then throw them away.
  _cond((secp256k1I128ToU64(cd) & m62) == BigInt.zero,
      "secp256k1Modinv64UpdateDe62");
  secp256k1I128Rshift(cd, 62);
  _cond((secp256k1I128ToU64(ce) & m62) == BigInt.zero,
      "secp256k1Modinv64UpdateDe62");
  secp256k1I128Rshift(ce, 62);

  /// Compute limb 1 of t*[d,e]+modulus*[md,me], and store it as output limb 0 (= down shift).
  secp256k1I128AccumMul(cd, u, d1);
  secp256k1I128AccumMul(cd, v, e1);
  secp256k1I128AccumMul(ce, q, d1);
  secp256k1I128AccumMul(ce, r, e1);
  if (modinfo.modulus[1].toBool) {
    /// Optimize for the case where limb of modulus is zero.
    secp256k1I128AccumMul(cd, modinfo.modulus[1], md);
    secp256k1I128AccumMul(ce, modinfo.modulus[1], me);
  }
  d[0] = secp256k1I128ToU64(cd) & m62;
  secp256k1I128Rshift(cd, 62);
  e[0] = secp256k1I128ToU64(ce) & m62;
  secp256k1I128Rshift(ce, 62);

  /// Compute limb 2 of t*[d,e]+modulus*[md,me], and store it as output limb 1.
  secp256k1I128AccumMul(cd, u, d2);
  secp256k1I128AccumMul(cd, v, e2);
  secp256k1I128AccumMul(ce, q, d2);
  secp256k1I128AccumMul(ce, r, e2);
  if (modinfo.modulus[2].toBool) {
    /// Optimize for the case where limb of modulus is zero.
    secp256k1I128AccumMul(cd, modinfo.modulus[2], md);
    secp256k1I128AccumMul(ce, modinfo.modulus[2], me);
  }
  d[1] = secp256k1I128ToU64(cd) & m62;
  secp256k1I128Rshift(cd, 62);
  e[1] = secp256k1I128ToU64(ce) & m62;
  secp256k1I128Rshift(ce, 62);

  /// Compute limb 3 of t*[d,e]+modulus*[md,me], and store it as output limb 2.
  secp256k1I128AccumMul(cd, u, d3);
  secp256k1I128AccumMul(cd, v, e3);
  secp256k1I128AccumMul(ce, q, d3);
  secp256k1I128AccumMul(ce, r, e3);
  if (modinfo.modulus[3].toBool) {
    /// Optimize for the case where limb of modulus is zero.
    secp256k1I128AccumMul(cd, modinfo.modulus[3], md);
    secp256k1I128AccumMul(ce, modinfo.modulus[3], me);
  }
  d[2] = secp256k1I128ToU64(cd) & m62;
  secp256k1I128Rshift(cd, 62);
  e[2] = secp256k1I128ToU64(ce) & m62;
  secp256k1I128Rshift(ce, 62);

  /// Compute limb 4 of t*[d,e]+modulus*[md,me], and store it as output limb 3.
  secp256k1I128AccumMul(cd, u, d4);
  secp256k1I128AccumMul(cd, v, e4);
  secp256k1I128AccumMul(ce, q, d4);
  secp256k1I128AccumMul(ce, r, e4);
  secp256k1I128AccumMul(cd, modinfo.modulus[4], md);
  secp256k1I128AccumMul(ce, modinfo.modulus[4], me);
  d[3] = secp256k1I128ToU64(cd) & m62;
  secp256k1I128Rshift(cd, 62);
  e[3] = secp256k1I128ToU64(ce) & m62;
  secp256k1I128Rshift(ce, 62);

  /// What remains is limb 5 of t*[d,e]+modulus*[md,me]; store it as output limb 4.
  d[4] = secp256k1I128ToI64(cd);
  e[4] = secp256k1I128ToI64(ce);

  _cond(secp256k1Modinv64MulCmp62(d, 5, modinfo.modulus, (-2).toBigInt) > 0,
      "secp256k1Modinv64UpdateDe62");
  _cond(secp256k1Modinv64MulCmp62(d, 5, modinfo.modulus, BigInt.one) < 0,
      "secp256k1Modinv64UpdateDe62");
  _cond(secp256k1Modinv64MulCmp62(e, 5, modinfo.modulus, (-2).toBigInt) > 0,
      "secp256k1Modinv64UpdateDe62");
  _cond(secp256k1Modinv64MulCmp62(e, 5, modinfo.modulus, BigInt.one) < 0,
      "secp256k1Modinv64UpdateDe62");
}