diff --git a/CHANGELOG.md b/CHANGELOG.md
index be9784379e..a142c31e13 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,35 +1,72 @@
-Note: this version will be partially audited (bn254 & bls12-381 packages).
+## [v0.8.0] - 2022-08-03
-## [v0.8.0] - 2022-05-31
+This version was partially audited by Kudelski Security for the Algorand Foundation. (TODO insert report link).
+The scope of the audit covered `bn254` and `bls12-381` packages (including field arithmetic).
### Feat
-- field/generator suppors 1-limb modulus ([#175](https://github.com/ConsenSys/gnark-crypto/issues/175))
- field/goldilocks (more efficient 1-limb modulus arith) ([#177](https://github.com/ConsenSys/gnark-crypto/issues/177))
-- **fri:** FRI protocol support and improvments
-- field.SetRandom: use random uniform sampling instead of mod reduce (zero-allocs)
-- adds BLS24-317 curve optimized for KZG ([#179](https://github.com/ConsenSys/gnark-crypto/pull/179))
+- field/generator suppors 1-limb modulus ([#175](https://github.com/ConsenSys/gnark-crypto/issues/175))
+- field.SetRandom zero-alloc uniform sampling
+- **E6/E12/E24:** GT torus-based batch compression/decompression
+- **fri:** modified challenge generation so it fits in a snark variable
+- **fri:** added check of correctness between rounds
### Fix
+- Handle edge case in Karabina decompression ([#219](https://github.com/ConsenSys/gnark-crypto/issues/219))
+- check nbTasks config when running msm, test all possible c-bit windows in when testing.Short not set) ([#226](https://github.com/ConsenSys/gnark-crypto/issues/226))
+- element.SetString(_) returns error if invalid input instead of panic
+- expand_msg_xmd copy bug, a few tests ([#201](https://github.com/ConsenSys/gnark-crypto/issues/201))
- closes [#199](https://github.com/ConsenSys/gnark-crypto/issues/199). Correct bound in eddsa key gen template
-### Refactor & Cosmetics
+### Perf
+
+- remove unecessary inverse in KZG-verify
+- faster GLV scalar decompostion
+
+### Refactor & Docs
+- moved consensys/goff into field/goff ([#204](https://github.com/ConsenSys/gnark-crypto/issues/204))
- clean comments in curves ([#193](https://github.com/ConsenSys/gnark-crypto/issues/193))
-- replace modulus generated by constants ([#194](https://github.com/ConsenSys/gnark-crypto/issues/194))
+- remove dead code ([#230](https://github.com/ConsenSys/gnark-crypto/issues/230))
+- cosmetic changes ([#197](https://github.com/ConsenSys/gnark-crypto/issues/197))
+- replace modulus generated by constants, add zero-alloc SetRandom ([#194](https://github.com/ConsenSys/gnark-crypto/issues/194))
- remove uneeded x86 asm and files ([#192](https://github.com/ConsenSys/gnark-crypto/issues/192))
-- code cleaning & cosmetic changes ([#197](https://github.com/ConsenSys/gnark-crypto/issues/197))
-- clean HashToCurve APIs ([#188](https://github.com/ConsenSys/gnark-crypto/pull/188))
+- polish readme.md with updated godoc subpackage links ([#235](https://github.com/ConsenSys/gnark-crypto/issues/235))
+- acknowledge that inv(0)==0 in comments as a convention ([#233](https://github.com/ConsenSys/gnark-crypto/issues/233))
+- added note in pairing godoc - doesn't check inputs are in correct subgroup ([#231](https://github.com/ConsenSys/gnark-crypto/issues/231))
+- add security estimates of implemented curves in comments
+
+### Test
+
+- fix [#205](https://github.com/ConsenSys/gnark-crypto/issues/205) - msm bench with different bases ([#206](https://github.com/ConsenSys/gnark-crypto/issues/206))
+- vectors generated using
+- **all curves:** compress/decompress pairing result
### Pull Requests
+- Merge pull request [#232](https://github.com/ConsenSys/gnark-crypto/issues/232) from ConsenSys/docs/comments
+- Merge pull request [#229](https://github.com/ConsenSys/gnark-crypto/issues/229) from ConsenSys/update_deps
+- Merge pull request [#227](https://github.com/ConsenSys/gnark-crypto/issues/227) from ConsenSys/fix/element_setstring
+- Merge pull request [#228](https://github.com/ConsenSys/gnark-crypto/issues/228) from ConsenSys/fix/race/test
+- Merge pull request [#224](https://github.com/ConsenSys/gnark-crypto/issues/224) from ConsenSys/refactor/scalarmul
+- Merge pull request [#220](https://github.com/ConsenSys/gnark-crypto/issues/220) from ConsenSys/perf/kzg-verify
+- Merge pull request [#223](https://github.com/ConsenSys/gnark-crypto/issues/223) from ConsenSys/doc/security-estimates-curves
+- Merge pull request [#216](https://github.com/ConsenSys/gnark-crypto/issues/216) from ConsenSys/feat/poly
+- Merge pull request [#217](https://github.com/ConsenSys/gnark-crypto/issues/217) from ConsenSys/string-utils
+- Merge pull request [#213](https://github.com/ConsenSys/gnark-crypto/issues/213) from ConsenSys/perf/glv
+- Merge pull request [#129](https://github.com/ConsenSys/gnark-crypto/issues/129) from ConsenSys/feat/GT-compression
+- Merge pull request [#209](https://github.com/ConsenSys/gnark-crypto/issues/209) from ConsenSys/codegen/svdw-not-e4
+- Merge pull request [#203](https://github.com/ConsenSys/gnark-crypto/issues/203) from ConsenSys/tests/bn254-vectors
+- Merge pull request [#196](https://github.com/ConsenSys/gnark-crypto/issues/196) from ConsenSys/patch/hashToFpGeneric
- Merge pull request [#202](https://github.com/ConsenSys/gnark-crypto/issues/202) from ConsenSys/gbotrel/issue199
- Merge pull request [#200](https://github.com/ConsenSys/gnark-crypto/issues/200) from tyGavinZJU/develop
- Merge pull request [#85](https://github.com/ConsenSys/gnark-crypto/issues/85) from ConsenSys/feat/fri
+
## [v0.7.0] - 2022-03-24
diff --git a/README.md b/README.md
index aaecff50cd..71b4e075b4 100644
--- a/README.md
+++ b/README.md
@@ -2,19 +2,33 @@
[![Twitter URL](https://img.shields.io/twitter/url/https/twitter.com/gnark_team.svg?style=social&label=Follow%20%40gnark_team)](https://twitter.com/gnark_team) [![License](https://img.shields.io/badge/license-Apache%202-blue)](LICENSE) [![Go Report Card](https://goreportcard.com/badge/github.com/ConsenSys/gnark-crypto)](https://goreportcard.com/badge/github.com/ConsenSys/gnark-crypto) [![PkgGoDev](https://pkg.go.dev/badge/mod/github.com/consensys/gnark-crypto)](https://pkg.go.dev/mod/github.com/consensys/gnark-crypto) [![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.5815453.svg)](https://doi.org/10.5281/zenodo.5815453)
-`gnark-crypto` provides:
-* [Elliptic curve cryptography](ecc/ecc.md) (+pairing) on BN254, BLS12-381, BLS12-377, BW6-761, BLS24-315, BLS24-317, BW6-633, BLS12-378 and BW6-756
-* [Finite field arithmetic](field/field.md) (fast big.Int)
-* FFT
-* Polynomial commitment schemes
-* MiMC
-* EdDSA (on the "companion" twisted edwards curves)
+`gnark-crypto` provides efficient cryptographic primitives, in Go:
+
+* Elliptic curve cryptography & **Pairing** on:
+ * [`bn254`] ([audit report]())
+ * [`bls12-381`] ([audit report]())
+ * [`bls24-317`]
+ * [`bls12-377`] / [`bw6-761`]
+ * [`bls24-315`] / [`bw6-633`]
+ * [`bls12-378`] / [`bw6-756`]
+ * Each of these curve has a [`twistededwards`] sub-package with its companion curve which allow efficient elliptic curve cryptography inside zkSNARK circuits.
+* [`field/goff`] - Finite field arithmetic code generator (blazingly fast big.Int)
+* [`fft`] - Fast Fourier Transform
+* [`fri`] - FRI (multiplicative) commitment scheme
+* [`fiatshamir`] - Fiat-Shamir transcript builder
+* [`mimc`] - MiMC hash function using Miyaguchi-Preneel construction
+* [`kzg`] - KZG commitment scheme
+* [`permutation`] - Permutation proofs
+* [`plookup`] - Plookup proofs
+* [`eddsa`] - EdDSA signatures (on the companion [`twistededwards`] curves)
`gnark-crypto` is actively developed and maintained by the team (gnark@consensys.net | [HackMD](https://hackmd.io/@gnark)) behind:
-* [`gnark`: a framework to execute (and verify) algorithms in zero-knowledge](https://github.com/consensys/gnark)
+
+* [`gnark`: a framework to execute (and verify) algorithms in zero-knowledge](https://github.com/consensys/gnark)
## Warning
-**`gnark-crypto` has not been audited and is provided as-is, use at your own risk. In particular, `gnark-crypto` makes no security guarantees such as constant time implementation or side-channel attack resistance.**
+
+**`gnark-crypto` is not fully audited and is provided as-is, use at your own risk. In particular, `gnark-crypto` makes no security guarantees such as constant time implementation or side-channel attack resistance.**
**To report a security bug, please refer to [`gnark` Security Policy](https://github.com/ConsenSys/gnark/blob/master/SECURITY.md).**
@@ -24,7 +38,7 @@
### Go version
-`gnark-crypto` is tested with the last 2 major releases of Go (1.16 and 1.17).
+`gnark-crypto` is tested with the last 2 major releases of Go (1.17 and 1.18).
### Install `gnark-crypto`
@@ -34,24 +48,19 @@ go get github.com/consensys/gnark-crypto
Note if that if you use go modules, in `go.mod` the module path is case sensitive (use `consensys` and not `ConsenSys`).
-### Documentation
-
-[![PkgGoDev](https://pkg.go.dev/badge/mod/github.com/consensys/gnark-crypto)](https://pkg.go.dev/mod/github.com/consensys/gnark-crypto)
-
-The APIs are consistent accross the curves. For example, [here is `bn254` godoc](https://pkg.go.dev/github.com/consensys/gnark-crypto/ecc/bn254#pkg-overview).
-
### Development
Most (but not all) of the code is generated from the templates in `internal/generator`.
-The generated code contains little to no interfaces and is strongly typed with a base field (generated by the `gnark-crypto/field`). The two main factors driving this design choice are:
+The generated code contains little to no interfaces and is strongly typed with a field (generated by the `gnark-crypto/field` package). The two main factors driving this design choice are:
1. Performance: `gnark-crypto` algorithms manipulates millions (if not billions) of field elements. Interface indirection at this level, plus garbage collection indexing takes a heavy toll on perf.
-2. No generics in Go: need to derive (mostly) identical code for various moduli and curves, with consistent APIs
+2. Need to derive (mostly) identical code for various moduli and curves, with consistent APIs. Generics introduce significant performance overhead and are not yet suited for high performance computing.
To regenerate the files, see `internal/generator/main.go`. Run:
-```
-go generate ./internal/...
+
+```bash
+go generate ./...
```
## Benchmarks
@@ -86,7 +95,26 @@ Please use the following BibTeX to cite the most recent release.
We use [SemVer](http://semver.org/) for versioning. For the versions available, see the [tags on this repository](https://github.com/consensys/gnark-crypto/tags).
-
## License
-This project is licensed under the Apache 2 License - see the [LICENSE](LICENSE) file for details
+This project is licensed under the Apache 2 License - see the [LICENSE](LICENSE) file for details.
+
+[`field/goff`]: https://pkg.go.dev/github.com/consensys/gnark-crypto/field/goff
+[`bn254`]: https://pkg.go.dev/github.com/consensys/gnark-crypto/ecc/bn254
+[`bls12-381`]: https://pkg.go.dev/github.com/consensys/gnark-crypto/ecc/bls12-381
+[`bls24-317`]: https://pkg.go.dev/github.com/consensys/gnark-crypto/ecc/bls24-317
+[`bls12-377`]: https://pkg.go.dev/github.com/consensys/gnark-crypto/ecc/bls12-377
+[`bls24-315`]: https://pkg.go.dev/github.com/consensys/gnark-crypto/ecc/bls24-315
+[`bls12-378`]: https://pkg.go.dev/github.com/consensys/gnark-crypto/ecc/bls12-378
+[`bw6-761`]: https://pkg.go.dev/github.com/consensys/gnark-crypto/ecc/bw6-761
+[`bw6-633`]: https://pkg.go.dev/github.com/consensys/gnark-crypto/ecc/bw6-633
+[`bw6-756`]: https://pkg.go.dev/github.com/consensys/gnark-crypto/ecc/bw6-756
+[`twistededwards`]: https://pkg.go.dev/github.com/consensys/gnark-crypto/ecc/bn254/twistededwards
+[`eddsa`]: https://pkg.go.dev/github.com/consensys/gnark-crypto/ecc/bn254/twistededwards/eddsa
+[`fft`]: https://pkg.go.dev/github.com/consensys/gnark-crypto/ecc/bn254/fr/fft
+[`fri`]: https://pkg.go.dev/github.com/consensys/gnark-crypto/ecc/bn254/fr/fri
+[`mimc`]: https://pkg.go.dev/github.com/consensys/gnark-crypto/ecc/bn254/fr/mimc
+[`kzg`]: https://pkg.go.dev/github.com/consensys/gnark-crypto/ecc/bn254/fr/kzg
+[`plookup`]: https://pkg.go.dev/github.com/consensys/gnark-crypto/ecc/bn254/fr/plookup
+[`permutation`]: https://pkg.go.dev/github.com/consensys/gnark-crypto/ecc/bn254/fr/permutation
+[`fiatshamir`]: https://pkg.go.dev/github.com/consensys/gnark-crypto/fiat-shamir
\ No newline at end of file
diff --git a/ecc/bls12-377/bls12-377.go b/ecc/bls12-377/bls12-377.go
index d4c503c506..f42bca4324 100644
--- a/ecc/bls12-377/bls12-377.go
+++ b/ecc/bls12-377/bls12-377.go
@@ -1,3 +1,25 @@
+// Package bls12377 efficient elliptic curve, pairing and hash to curve implementation for bls12-377.
+//
+// bls12-377: A Barreto--Lynn--Scott curve with
+// embedding degree k=12
+// seed x₀=9586122913090633729
+// 𝔽r: r=8444461749428370424248824938781546531375899335154063827935233455917409239041 (x₀⁴-x₀²+1)
+// 𝔽p: p=258664426012969094010652733694893533536393512754914660539884262666720468348340822774968888139573360124440321458177 ((x₀-1)² ⋅ r(x₀)/3+x₀)
+// (E/𝔽p): Y²=X³+1
+// (Eₜ/𝔽p²): Y² = X³+1/u (D-type twist)
+// r ∣ #E(Fp) and r ∣ #Eₜ(𝔽p²)
+// Extension fields tower:
+// 𝔽p²[u] = 𝔽p/u²+5
+// 𝔽p⁶[v] = 𝔽p²/v³-u
+// 𝔽p¹²[w] = 𝔽p⁶/w²-v
+// optimal Ate loop size:
+// x₀
+// Security: estimated 126-bit level following [https://eprint.iacr.org/2019/885.pdf]
+// (r is 253 bits and p¹² is 4521 bits)
+//
+// Warning
+//
+// This code has not been audited and is provided as-is. In particular, there is no security guarantees such as constant time implementation or side-channel attack resistance.
package bls12377
import (
@@ -9,18 +31,6 @@ import (
"github.com/consensys/gnark-crypto/ecc/bls12-377/internal/fptower"
)
-// BLS12-377: A Barreto--Lynn--Scott curve of embedding degree k=12 with seed x₀=9586122913090633729
-// 𝔽r: r=8444461749428370424248824938781546531375899335154063827935233455917409239041 (x₀⁴-x₀²+1)
-// 𝔽p: p=258664426012969094010652733694893533536393512754914660539884262666720468348340822774968888139573360124440321458177 ((x₀-1)² ⋅ r(x₀)/3+x₀)
-// (E/𝔽p): Y²=X³+1
-// (Eₜ/𝔽p²): Y² = X³+1/u (D-type twist)
-// r ∣ #E(Fp) and r ∣ #Eₜ(𝔽p²)
-// Extension fields tower:
-// 𝔽p²[u] = 𝔽p/u²+5
-// 𝔽p⁶[v] = 𝔽p²/v³-u
-// 𝔽p¹²[w] = 𝔽p⁶/w²-v
-// optimal Ate loop size: x₀
-
// ID bls377 ID
const ID = ecc.BLS12_377
@@ -89,7 +99,7 @@ func init() {
g1Gen.X.SetString("81937999373150964239938255573465948239988671502647976594219695644855304257327692006745978603320413799295628339695")
g1Gen.Y.SetString("241266749859715473739788878240585681733927191168601896383759122102112907357779751001206799952863815012735208165030")
- g1Gen.Z.SetString("1")
+ g1Gen.Z.SetOne()
g2Gen.X.SetString("233578398248691099356572568220835526895379068987715365179118596935057653620464273615301663571204657964920925606294",
"140913150380207355837477652521042157274541796891053068589147167627541651775299824604154852141315666357241556069118")
diff --git a/ecc/bls12-377/doc.go b/ecc/bls12-377/doc.go
deleted file mode 100644
index 190041df52..0000000000
--- a/ecc/bls12-377/doc.go
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright 2020 ConsenSys Software Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Code generated by consensys/gnark-crypto DO NOT EDIT
-
-// Package bls12377 efficient elliptic curve and pairing implementation for bls12-377.
-//
-// Warning
-//
-// This code has not been audited and is provided as-is. In particular, there is no security guarantees such as constant time implementation or side-channel attack resistance.
-package bls12377
diff --git a/ecc/bls12-377/fp/element.go b/ecc/bls12-377/fp/element.go
index 4788dde843..9873a533ab 100644
--- a/ecc/bls12-377/fp/element.go
+++ b/ecc/bls12-377/fp/element.go
@@ -181,7 +181,7 @@ func (z *Element) SetInterface(i1 interface{}) (*Element, error) {
case int:
return z.SetInt64(int64(c1)), nil
case string:
- return z.SetString(c1), nil
+ return z.SetString(c1)
case *big.Int:
if c1 == nil {
return nil, errors.New("can't set fp.Element with ")
@@ -1086,12 +1086,13 @@ func (z *Element) setBigInt(v *big.Int) *Element {
// Incorrect placement of underscores is reported as a panic if there
// are no other errors.
//
-func (z *Element) SetString(number string) *Element {
+// If the number is invalid this method leaves z unchanged and returns nil, error.
+func (z *Element) SetString(number string) (*Element, error) {
// get temporary big int from the pool
vv := bigIntPool.Get().(*big.Int)
if _, ok := vv.SetString(number, 0); !ok {
- panic("Element.SetString failed -> can't parse number into a big.Int " + number)
+ return nil, errors.New("Element.SetString failed -> can't parse number into a big.Int " + number)
}
z.SetBigInt(vv)
@@ -1099,7 +1100,7 @@ func (z *Element) SetString(number string) *Element {
// release object into pool
bigIntPool.Put(vv)
- return z
+ return z, nil
}
// MarshalJSON returns json encoding of z (z.Text(10))
diff --git a/ecc/bls12-377/fr/element.go b/ecc/bls12-377/fr/element.go
index f78823e606..1b60b54a38 100644
--- a/ecc/bls12-377/fr/element.go
+++ b/ecc/bls12-377/fr/element.go
@@ -175,7 +175,7 @@ func (z *Element) SetInterface(i1 interface{}) (*Element, error) {
case int:
return z.SetInt64(int64(c1)), nil
case string:
- return z.SetString(c1), nil
+ return z.SetString(c1)
case *big.Int:
if c1 == nil {
return nil, errors.New("can't set fr.Element with ")
@@ -944,12 +944,13 @@ func (z *Element) setBigInt(v *big.Int) *Element {
// Incorrect placement of underscores is reported as a panic if there
// are no other errors.
//
-func (z *Element) SetString(number string) *Element {
+// If the number is invalid this method leaves z unchanged and returns nil, error.
+func (z *Element) SetString(number string) (*Element, error) {
// get temporary big int from the pool
vv := bigIntPool.Get().(*big.Int)
if _, ok := vv.SetString(number, 0); !ok {
- panic("Element.SetString failed -> can't parse number into a big.Int " + number)
+ return nil, errors.New("Element.SetString failed -> can't parse number into a big.Int " + number)
}
z.SetBigInt(vv)
@@ -957,7 +958,7 @@ func (z *Element) SetString(number string) *Element {
// release object into pool
bigIntPool.Put(vv)
- return z
+ return z, nil
}
// MarshalJSON returns json encoding of z (z.Text(10))
diff --git a/ecc/bls12-377/fr/kzg/kzg.go b/ecc/bls12-377/fr/kzg/kzg.go
index 1cc6531feb..ea8ee346eb 100644
--- a/ecc/bls12-377/fr/kzg/kzg.go
+++ b/ecc/bls12-377/fr/kzg/kzg.go
@@ -169,16 +169,15 @@ func Open(p []fr.Element, point fr.Element, srs *SRS) (OpeningProof, error) {
func Verify(commitment *Digest, proof *OpeningProof, point fr.Element, srs *SRS) error {
// [f(a)]G₁
- var claimedValueG1Aff bls12377.G1Affine
+ var claimedValueG1Aff bls12377.G1Jac
var claimedValueBigInt big.Int
proof.ClaimedValue.ToBigIntRegular(&claimedValueBigInt)
- claimedValueG1Aff.ScalarMultiplication(&srs.G1[0], &claimedValueBigInt)
+ claimedValueG1Aff.ScalarMultiplicationAffine(&srs.G1[0], &claimedValueBigInt)
// [f(α) - f(a)]G₁
- var fminusfaG1Jac, tmpG1Jac bls12377.G1Jac
+ var fminusfaG1Jac bls12377.G1Jac
fminusfaG1Jac.FromAffine(commitment)
- tmpG1Jac.FromAffine(&claimedValueG1Aff)
- fminusfaG1Jac.SubAssign(&tmpG1Jac)
+ fminusfaG1Jac.SubAssign(&claimedValueG1Aff)
// [-H(α)]G₁
var negH bls12377.G1Affine
diff --git a/ecc/bls12-377/fr/polynomial/multilin.go b/ecc/bls12-377/fr/polynomial/multilin.go
new file mode 100644
index 0000000000..f7ad79d4b7
--- /dev/null
+++ b/ecc/bls12-377/fr/polynomial/multilin.go
@@ -0,0 +1,250 @@
+// Copyright 2020 ConsenSys Software Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by consensys/gnark-crypto DO NOT EDIT
+
+package polynomial
+
+import (
+ "github.com/consensys/gnark-crypto/ecc/bls12-377/fr"
+)
+
+// MultiLin tracks the values of a (dense i.e. not sparse) multilinear polynomial
+// The variables are X₁ through Xₙ where n = log(len(.))
+// .[∑ᵢ 2ⁱ⁻¹ bₙ₋ᵢ] = the polynomial evaluated at (b₁, b₂, ..., bₙ)
+// It is understood that any hypercube evaluation can be extrapolated to a multilinear polynomial
+type MultiLin []fr.Element
+
+// Fold is partial evaluation function k[X₁, X₂, ..., Xₙ] → k[X₂, ..., Xₙ] by setting X₁=r
+func (m *MultiLin) Fold(r fr.Element) {
+ mid := len(*m) / 2
+
+ bottom, top := (*m)[:mid], (*m)[mid:]
+
+ // updating bookkeeping table
+ // knowing that the polynomial f ∈ (k[X₂, ..., Xₙ])[X₁] is linear, we would get f(r) = f(0) + r(f(1) - f(0))
+ // the following loop computes the evaluations of f(r) accordingly:
+ // f(r, b₂, ..., bₙ) = f(0, b₂, ..., bₙ) + r(f(1, b₂, ..., bₙ) - f(0, b₂, ..., bₙ))
+ for i := 0; i < mid; i++ {
+ // table[i] ← table[i] + r (table[i + mid] - table[i])
+ top[i].Sub(&top[i], &bottom[i])
+ top[i].Mul(&top[i], &r)
+ bottom[i].Add(&bottom[i], &top[i])
+ }
+
+ *m = (*m)[:mid]
+}
+
+// Evaluate extrapolate the value of the multilinear polynomial corresponding to m
+// on the given coordinates
+func (m MultiLin) Evaluate(coordinates []fr.Element) fr.Element {
+ // Folding is a mutating operation
+ bkCopy := m.Clone()
+
+ // Evaluate step by step through repeated folding (i.e. evaluation at the first remaining variable)
+ for _, r := range coordinates {
+ bkCopy.Fold(r)
+ }
+
+ return bkCopy[0]
+}
+
+// Clone creates a deep copy of a book-keeping table.
+// Both multilinear interpolation and sumcheck require folding an underlying
+// array, but folding changes the array. To do both one requires a deep copy
+// of the book-keeping table.
+func (m MultiLin) Clone() MultiLin {
+ tableDeepCopy := Make(len(m))
+ copy(tableDeepCopy, m)
+ return tableDeepCopy
+}
+
+// Add two bookKeepingTables
+func (m *MultiLin) Add(left, right MultiLin) {
+ size := len(left)
+ // Check that left and right have the same size
+ if len(right) != size {
+ panic("Left and right do not have the right size")
+ }
+ // Reallocate the table if necessary
+ if cap(*m) < size {
+ *m = make([]fr.Element, size)
+ }
+
+ // Resize the destination table
+ *m = (*m)[:size]
+
+ // Add elementwise
+ for i := 0; i < size; i++ {
+ (*m)[i].Add(&left[i], &right[i])
+ }
+}
+
+// EvalEq computes Eq(q₁, ... , qₙ, h₁, ... , hₙ) = Π₁ⁿ Eq(qᵢ, hᵢ)
+// where Eq(x,y) = xy + (1-x)(1-y) = 1 - x - y + xy + xy interpolates
+// _________________
+// | | |
+// | 0 | 1 |
+// |_______|_______|
+// y | | |
+// | 1 | 0 |
+// |_______|_______|
+//
+// x
+// In other words the polynomial evaluated here is the multilinear extrapolation of
+// one that evaluates to q' == h' for vectors q', h' of binary values
+func EvalEq(q, h []fr.Element) fr.Element {
+ var res, nxt, one, sum fr.Element
+ one.SetOne()
+ for i := 0; i < len(q); i++ {
+ nxt.Mul(&q[i], &h[i]) // nxt <- qᵢ * hᵢ
+ nxt.Double(&nxt) // nxt <- 2 * qᵢ * hᵢ
+ nxt.Add(&nxt, &one) // nxt <- 1 + 2 * qᵢ * hᵢ
+ sum.Add(&q[i], &h[i]) // sum <- qᵢ + hᵢ TODO: Why not subtract one by one from nxt? More parallel?
+
+ if i == 0 {
+ res.Sub(&nxt, &sum) // nxt <- 1 + 2 * qᵢ * hᵢ - qᵢ - hᵢ
+ } else {
+ nxt.Sub(&nxt, &sum) // nxt <- 1 + 2 * qᵢ * hᵢ - qᵢ - hᵢ
+ res.Mul(&res, &nxt) // res <- res * nxt
+ }
+ }
+ return res
+}
+
+// Eq sets m to the representation of the polynomial Eq(q₁, ..., qₙ, *, ..., *) × m[0]
+func (m *MultiLin) Eq(q []fr.Element) {
+ n := len(q)
+
+ if len(*m) != 1< 0 {
+ i.Sub(fr.Modulus(), &i)
+ i.Neg(&i)
+ }
+ return i
+}
+
+func (p Polynomial) Text(base int) string {
+
+ var builder strings.Builder
+
+ first := true
+ for d := len(p) - 1; d >= 0; d-- {
+ if p[d].IsZero() {
+ continue
+ }
+
+ i := signedBigInt(&p[d])
+
+ initialLen := builder.Len()
+
+ if i.Sign() < 1 {
+ i.Neg(&i)
+ if first {
+ builder.WriteString("-")
+ } else {
+ builder.WriteString(" - ")
+ }
+ } else if !first {
+ builder.WriteString(" + ")
+ }
+
+ first = false
+
+ asInt64 := int64(0)
+ if i.IsInt64() {
+ asInt64 = i.Int64()
+ }
+
+ if asInt64 != 1 || d == 0 {
+ builder.WriteString(i.Text(base))
+ }
+
+ if builder.Len()-initialLen > 10 {
+ builder.WriteString("×")
+ }
+
+ if d != 0 {
+ builder.WriteString("X")
+ }
+ if d > 1 {
+ builder.WriteString(
+ utils.ToSuperscript(strconv.Itoa(d)),
+ )
+ }
+
+ }
+
+ if first {
+ return "0"
+ }
+
+ return builder.String()
+}
diff --git a/ecc/bls12-377/fr/polynomial/pool.go b/ecc/bls12-377/fr/polynomial/pool.go
new file mode 100644
index 0000000000..4470597593
--- /dev/null
+++ b/ecc/bls12-377/fr/polynomial/pool.go
@@ -0,0 +1,130 @@
+// Copyright 2020 ConsenSys Software Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by consensys/gnark-crypto DO NOT EDIT
+
+package polynomial
+
+import (
+ "fmt"
+ "github.com/consensys/gnark-crypto/ecc/bls12-377/fr"
+ "reflect"
+ "sync"
+ "unsafe"
+)
+
+// Memory management for polynomials
+// Copied verbatim from gkr repo
+
+// Sets a maximum for the array size we keep in pool
+const maxNForLargePool int = 1 << 24
+const maxNForSmallPool int = 256
+
+// Aliases because it is annoying to use arrays in all the places
+type largeArr = [maxNForLargePool]fr.Element
+type smallArr = [maxNForSmallPool]fr.Element
+
+var rC = sync.Map{}
+
+var (
+ largePool = sync.Pool{
+ New: func() interface{} {
+ var res largeArr
+ return &res
+ },
+ }
+ smallPool = sync.Pool{
+ New: func() interface{} {
+ var res smallArr
+ return &res
+ },
+ }
+)
+
+// ClearPool Clears the pool completely, shields against memory leaks
+// Eg: if we forgot to dump a polynomial at some point, this will ensure the value get dumped eventually
+// Returns how many polynomials were cleared that way
+func ClearPool() int {
+ res := 0
+ rC.Range(func(k, _ interface{}) bool {
+ switch ptr := k.(type) {
+ case *largeArr:
+ largePool.Put(ptr)
+ case *smallArr:
+ smallPool.Put(ptr)
+ default:
+ panic(fmt.Sprintf("tried to clear %v", reflect.TypeOf(ptr)))
+ }
+ res++
+ return true
+ })
+ return res
+}
+
+// CountPool Returns the number of elements in the pool without mutating it
+func CountPool() int {
+ res := 0
+ rC.Range(func(_, _ interface{}) bool {
+ res++
+ return true
+ })
+ return res
+}
+
+// Make tries to find a reusable polynomial or allocates a new one
+func Make(n int) []fr.Element {
+ if n > maxNForLargePool {
+ panic(fmt.Sprintf("been provided with size of %v but the maximum is %v", n, maxNForLargePool))
+ }
+
+ if n <= maxNForSmallPool {
+ ptr := smallPool.Get().(*smallArr)
+ rC.Store(ptr, struct{}{}) // registers the pointer being used
+ return (*ptr)[:n]
+ }
+
+ ptr := largePool.Get().(*largeArr)
+ rC.Store(ptr, struct{}{}) // remember we allocated the pointer is being used
+ return (*ptr)[:n]
+}
+
+// Dump dumps a set of polynomials into the pool
+// Returns the number of deallocated polys
+func Dump(arrs ...[]fr.Element) int {
+ cnt := 0
+ for _, arr := range arrs {
+ ptr := ptr(arr)
+ pool := &smallPool
+ if len(arr) > maxNForSmallPool {
+ pool = &largePool
+ }
+ // If the rC did not register, then
+ // either the array was allocated somewhere else which can be ignored
+ // otherwise a double put which MUST be ignored
+ if _, ok := rC.Load(ptr); ok {
+ pool.Put(ptr)
+ // And deregisters the ptr
+ rC.Delete(ptr)
+ cnt++
+ }
+ }
+ return cnt
+}
+
+func ptr(m []fr.Element) unsafe.Pointer {
+ if cap(m) != maxNForSmallPool && cap(m) != maxNForLargePool {
+ panic(fmt.Sprintf("can't cast to large or small array, the put array's is %v it should have capacity %v or %v", cap(m), maxNForLargePool, maxNForSmallPool))
+ }
+ return unsafe.Pointer(&m[0])
+}
diff --git a/ecc/bls12-377/g1.go b/ecc/bls12-377/g1.go
index 73d42aed11..4a39ccf7c5 100644
--- a/ecc/bls12-377/g1.go
+++ b/ecc/bls12-377/g1.go
@@ -59,6 +59,14 @@ func (p *G1Affine) ScalarMultiplication(a *G1Affine, s *big.Int) *G1Affine {
return p
}
+// ScalarMultiplicationAffine computes and returns p = a ⋅ s
+// Takes an affine point and returns a Jacobian point (useful for KZG)
+func (p *G1Jac) ScalarMultiplicationAffine(a *G1Affine, s *big.Int) *G1Jac {
+ p.FromAffine(a)
+ p.mulGLV(p, s)
+ return p
+}
+
// Add adds two point in affine coordinates.
// This should rarely be used as it is very inefficient compared to Jacobian
func (p *G1Affine) Add(a, b *G1Affine) *G1Affine {
@@ -336,7 +344,7 @@ func (p *G1Jac) String() string {
return _p.String()
}
-// FromAffine sets p = Q, p in Jacboian, Q in affine
+// FromAffine sets p = Q, p in Jacobian, Q in affine
func (p *G1Jac) FromAffine(Q *G1Affine) *G1Jac {
if Q.IsInfinity() {
p.Z.SetZero()
@@ -814,9 +822,9 @@ func (p *g1JacExtended) doubleMixed(q *G1Affine) *g1JacExtended {
}
// BatchJacobianToAffineG1 converts points in Jacobian coordinates to Affine coordinates
-// performing a single field inversion (Montgomery batch inversion trick)
-// result must be allocated with len(result) == len(points)
-func BatchJacobianToAffineG1(points []G1Jac, result []G1Affine) {
+// performing a single field inversion (Montgomery batch inversion trick).
+func BatchJacobianToAffineG1(points []G1Jac) []G1Affine {
+ result := make([]G1Affine, len(points))
zeroes := make([]bool, len(points))
accumulator := fp.One()
@@ -836,7 +844,7 @@ func BatchJacobianToAffineG1(points []G1Jac, result []G1Affine) {
for i := len(points) - 1; i >= 0; i-- {
if zeroes[i] {
- // do nothing, X and Y are zeroes in affine.
+ // do nothing, (X=0, Y=0) is infinity point in affine
continue
}
result[i].X.Mul(&result[i].X, &accInverse)
@@ -847,7 +855,7 @@ func BatchJacobianToAffineG1(points []G1Jac, result []G1Affine) {
parallel.Execute(len(points), func(start, end int) {
for i := start; i < end; i++ {
if zeroes[i] {
- // do nothing, X and Y are zeroes in affine.
+ // do nothing, (X=0, Y=0) is infinity point in affine
continue
}
var a, b fp.Element
@@ -859,6 +867,7 @@ func BatchJacobianToAffineG1(points []G1Jac, result []G1Affine) {
}
})
+ return result
}
// BatchScalarMultiplicationG1 multiplies the same base by all scalars
@@ -922,8 +931,7 @@ func BatchScalarMultiplicationG1(base *G1Affine, scalars []fr.Element) []G1Affin
selectors[chunk] = d
}
// convert our base exp table into affine to use AddMixed
- baseTableAff := make([]G1Affine, (1 << (c - 1)))
- BatchJacobianToAffineG1(baseTable, baseTableAff)
+ baseTableAff := BatchJacobianToAffineG1(baseTable)
toReturn := make([]G1Jac, len(scalars))
// for each digit, take value in the base table, double it c time, voilà.
@@ -965,7 +973,6 @@ func BatchScalarMultiplicationG1(base *G1Affine, scalars []fr.Element) []G1Affin
}
})
- toReturnAff := make([]G1Affine, len(scalars))
- BatchJacobianToAffineG1(toReturn, toReturnAff)
+ toReturnAff := BatchJacobianToAffineG1(toReturn)
return toReturnAff
}
diff --git a/ecc/bls12-377/g1_test.go b/ecc/bls12-377/g1_test.go
index 907d1541cd..3209de0cd2 100644
--- a/ecc/bls12-377/g1_test.go
+++ b/ecc/bls12-377/g1_test.go
@@ -85,7 +85,7 @@ func TestG1AffineIsOnCurve(t *testing.T) {
func(a fp.Element) bool {
var op1, op2 G1Affine
op1.FromJacobian(&g1Gen)
- op2.FromJacobian(&g1Gen)
+ op2.Set(&op1)
op2.Y.Mul(&op2.Y, &a)
return op1.IsOnCurve() && !op2.IsOnCurve()
},
@@ -220,6 +220,19 @@ func TestG1AffineConversions(t *testing.T) {
GenFp(),
GenFp(),
))
+ properties.Property("[BLS12-377] BatchJacobianToAffineG1 and FromJacobian should output the same result", prop.ForAll(
+ func(a, b fp.Element) bool {
+ g1 := fuzzG1Jac(&g1Gen, a)
+ g2 := fuzzG1Jac(&g1Gen, b)
+ var op1, op2 G1Affine
+ op1.FromJacobian(&g1)
+ op2.FromJacobian(&g2)
+ baseTableAff := BatchJacobianToAffineG1([]G1Jac{g1, g2})
+ return op1.Equal(&baseTableAff[0]) && op2.Equal(&baseTableAff[1])
+ },
+ GenFp(),
+ GenFp(),
+ ))
properties.TestingRun(t, gopter.ConsoleReporter(false))
}
@@ -486,7 +499,7 @@ func BenchmarkG1JacIsInSubGroup(b *testing.B) {
}
-func BenchmarkG1AffineBatchScalarMul(b *testing.B) {
+func BenchmarkG1AffineBatchScalarMultiplication(b *testing.B) {
// ensure every words of the scalars are filled
var mixer fr.Element
mixer.SetString("7716837800905789770901243404444209691916730933998574719964609384059111546487")
@@ -514,7 +527,7 @@ func BenchmarkG1AffineBatchScalarMul(b *testing.B) {
}
}
-func BenchmarkG1JacScalarMul(b *testing.B) {
+func BenchmarkG1JacScalarMultiplication(b *testing.B) {
var scalar big.Int
r := fr.Modulus()
diff --git a/ecc/bls12-377/g2.go b/ecc/bls12-377/g2.go
index ac68e3c2e4..c80b5bdee0 100644
--- a/ecc/bls12-377/g2.go
+++ b/ecc/bls12-377/g2.go
@@ -341,7 +341,7 @@ func (p *G2Jac) String() string {
return _p.String()
}
-// FromAffine sets p = Q, p in Jacboian, Q in affine
+// FromAffine sets p = Q, p in Jacobian, Q in affine
func (p *G2Jac) FromAffine(Q *G2Affine) *G2Jac {
if Q.IsInfinity() {
p.Z.SetZero()
diff --git a/ecc/bls12-377/g2_test.go b/ecc/bls12-377/g2_test.go
index df1c0da2f8..d3b0af12be 100644
--- a/ecc/bls12-377/g2_test.go
+++ b/ecc/bls12-377/g2_test.go
@@ -99,7 +99,7 @@ func TestG2AffineIsOnCurve(t *testing.T) {
func(a fptower.E2) bool {
var op1, op2 G2Affine
op1.FromJacobian(&g2Gen)
- op2.FromJacobian(&g2Gen)
+ op2.Set(&op1)
op2.Y.Mul(&op2.Y, &a)
return op1.IsOnCurve() && !op2.IsOnCurve()
},
@@ -505,7 +505,7 @@ func BenchmarkG2JacIsInSubGroup(b *testing.B) {
}
-func BenchmarkG2AffineBatchScalarMul(b *testing.B) {
+func BenchmarkG2AffineBatchScalarMultiplication(b *testing.B) {
// ensure every words of the scalars are filled
var mixer fr.Element
mixer.SetString("7716837800905789770901243404444209691916730933998574719964609384059111546487")
@@ -533,7 +533,7 @@ func BenchmarkG2AffineBatchScalarMul(b *testing.B) {
}
}
-func BenchmarkG2JacScalarMul(b *testing.B) {
+func BenchmarkG2JacScalarMultiplication(b *testing.B) {
var scalar big.Int
r := fr.Modulus()
diff --git a/ecc/bls12-377/hash_to_g1.go b/ecc/bls12-377/hash_to_g1.go
index 48a91c07c5..d63191b36b 100644
--- a/ecc/bls12-377/hash_to_g1.go
+++ b/ecc/bls12-377/hash_to_g1.go
@@ -89,65 +89,58 @@ func g1Isogeny(p *G1Affine) {
// g1SqrtRatio computes the square root of u/v and returns 0 iff u/v was indeed a quadratic residue
// if not, we get sqrt(Z * u / v). Recall that Z is non-residue
+// If v = 0, u/v is meaningless and the output is unspecified, without raising an error.
// The main idea is that since the computation of the square root involves taking large powers of u/v, the inversion of v can be avoided
func g1SqrtRatio(z *fp.Element, u *fp.Element, v *fp.Element) uint64 {
- // Taken from https://datatracker.ietf.org/doc/draft-irtf-cfrg-hash-to-curve/13/ F.2.1.1. for any field
+ // https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-16.html#name-sqrt_ratio-for-any-field
tv1 := fp.Element{7563926049028936178, 2688164645460651601, 12112688591437172399, 3177973240564633687, 14764383749841851163, 52487407124055189} //tv1 = c6
var tv2, tv3, tv4, tv5 fp.Element
var exp big.Int
- // c4 = 70368744177663 = 2^46 - 1
+ // c4 = 70368744177663 = 2⁴⁶ - 1
// q is odd so c1 is at least 1.
exp.SetBytes([]byte{63, 255, 255, 255, 255, 255})
- tv2.Exp(*v, &exp)
- tv3.Mul(&tv2, &tv2)
- tv3.Mul(&tv3, v)
-
- // line 5
- tv5.Mul(u, &tv3)
+ tv2.Exp(*v, &exp) // 2. tv2 = vᶜ⁴
+ tv3.Square(&tv2) // 3. tv3 = tv2²
+ tv3.Mul(&tv3, v) // 4. tv3 = tv3 * v
+ tv5.Mul(u, &tv3) // 5. tv5 = u * tv3
// c3 = 1837921289030710838195067919506396475074392872918698035817074744121558668640693829665401097909504529
exp.SetBytes([]byte{3, 92, 116, 140, 47, 138, 33, 213, 140, 118, 11, 128, 217, 66, 146, 118, 52, 69, 179, 230, 1, 234, 39, 30, 61, 230, 196, 95, 116, 18, 144, 0, 46, 22, 186, 136, 96, 0, 0, 1, 10, 17})
- tv5.Exp(tv5, &exp)
- tv5.Mul(&tv5, &tv2)
- tv2.Mul(&tv5, v)
- tv3.Mul(&tv5, u)
- // line 10
- tv4.Mul(&tv3, &tv2)
+ tv5.Exp(tv5, &exp) // 6. tv5 = tv5ᶜ³
+ tv5.Mul(&tv5, &tv2) // 7. tv5 = tv5 * tv2
+ tv2.Mul(&tv5, v) // 8. tv2 = tv5 * v
+ tv3.Mul(&tv5, u) // 9. tv3 = tv5 * u
+ tv4.Mul(&tv3, &tv2) // 10. tv4 = tv3 * tv2
// c5 = 35184372088832
exp.SetBytes([]byte{32, 0, 0, 0, 0, 0})
- tv5.Exp(tv4, &exp)
-
- isQNr := g1NotOne(&tv5)
-
- tv2.Mul(&tv3, &fp.Element{13262060633605929793, 16269117706405780335, 1787999441809606207, 11078968899094441280, 17534011895423012165, 96686002316065324})
- tv5.Mul(&tv4, &tv1)
-
- // line 15
-
- tv3.Select(int(isQNr), &tv3, &tv2)
- tv4.Select(int(isQNr), &tv4, &tv5)
-
- exp.Lsh(big.NewInt(1), 46-2)
-
- for i := 46; i >= 2; i-- {
- //line 20
- tv5.Exp(tv4, &exp)
- nE1 := g1NotOne(&tv5)
-
- tv2.Mul(&tv3, &tv1)
- tv1.Mul(&tv1, &tv1)
- tv5.Mul(&tv4, &tv1)
-
- tv3.Select(int(nE1), &tv3, &tv2)
- tv4.Select(int(nE1), &tv4, &tv5)
-
- exp.Rsh(&exp, 1)
+ tv5.Exp(tv4, &exp) // 11. tv5 = tv4ᶜ⁵
+ isQNr := g1NotOne(&tv5) // 12. isQR = tv5 == 1
+ c7 := fp.Element{13262060633605929793, 16269117706405780335, 1787999441809606207, 11078968899094441280, 17534011895423012165, 96686002316065324}
+ tv2.Mul(&tv3, &c7) // 13. tv2 = tv3 * c7
+ tv5.Mul(&tv4, &tv1) // 14. tv5 = tv4 * tv1
+ tv3.Select(int(isQNr), &tv3, &tv2) // 15. tv3 = CMOV(tv2, tv3, isQR)
+ tv4.Select(int(isQNr), &tv4, &tv5) // 16. tv4 = CMOV(tv5, tv4, isQR)
+ exp.Lsh(big.NewInt(1), 46-2) // 18, 19: tv5 = 2ⁱ⁻² for i = c1
+
+ for i := 46; i >= 2; i-- { // 17. for i in (c1, c1 - 1, ..., 2):
+
+ tv5.Exp(tv4, &exp) // 20. tv5 = tv4ᵗᵛ⁵
+ nE1 := g1NotOne(&tv5) // 21. e1 = tv5 == 1
+ tv2.Mul(&tv3, &tv1) // 22. tv2 = tv3 * tv1
+ tv1.Mul(&tv1, &tv1) // 23. tv1 = tv1 * tv1 Why not write square?
+ tv5.Mul(&tv4, &tv1) // 24. tv5 = tv4 * tv1
+ tv3.Select(int(nE1), &tv3, &tv2) // 25. tv3 = CMOV(tv2, tv3, e1)
+ tv4.Select(int(nE1), &tv4, &tv5) // 26. tv4 = CMOV(tv5, tv4, e1)
+
+ if i > 2 {
+ exp.Rsh(&exp, 1) // 18, 19. tv5 = 2ⁱ⁻²
+ }
}
*z = tv3
@@ -161,12 +154,6 @@ func g1NotOne(x *fp.Element) uint64 {
}
-/*
-// g1SetZ sets z to [5].
-func g1SetZ(z *fp.Element) {
- z.Set( &fp.Element {9871116327010172167, 9167007004823125620, 18338974479346628539, 5649234265355377548, 13442091487463296847, 77904398905292312} )
-}*/
-
// g1MulByZ multiplies x by [5] and stores the result in z
func g1MulByZ(z *fp.Element, x *fp.Element) {
@@ -179,30 +166,29 @@ func g1MulByZ(z *fp.Element, x *fp.Element) {
*z = res
}
-//TODO: Define A,B here
-
-// From https://datatracker.ietf.org/doc/draft-irtf-cfrg-hash-to-curve/13/ Pg 80
+// https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-16.html#name-simplified-swu-method
// mapToCurve1 implements the SSWU map
// No cofactor clearing or isogeny
func mapToCurve1(u *fp.Element) G1Affine {
+ var sswuIsoCurveCoeffA = fp.Element{17252667382019449424, 8408110001211059699, 18415587021986261264, 10797086888535946954, 9462758283094809199, 54995354010328751}
+ var sswuIsoCurveCoeffB = fp.Element{11130294635325289193, 6502679372128844082, 15863297759487624914, 16270683149854112145, 3560014356538878812, 27923742146399959}
+
var tv1 fp.Element
- tv1.Square(u)
+ tv1.Square(u) // 1. tv1 = u²
//mul tv1 by Z
- g1MulByZ(&tv1, &tv1)
+ g1MulByZ(&tv1, &tv1) // 2. tv1 = Z * tv1
var tv2 fp.Element
- tv2.Square(&tv1)
- tv2.Add(&tv2, &tv1)
+ tv2.Square(&tv1) // 3. tv2 = tv1²
+ tv2.Add(&tv2, &tv1) // 4. tv2 = tv2 + tv1
var tv3 fp.Element
- //Standard doc line 5
var tv4 fp.Element
tv4.SetOne()
- tv3.Add(&tv2, &tv4)
- //TODO: Use bCurveConf when no isogeny
- tv3.Mul(&tv3, &fp.Element{11130294635325289193, 6502679372128844082, 15863297759487624914, 16270683149854112145, 3560014356538878812, 27923742146399959})
+ tv3.Add(&tv2, &tv4) // 5. tv3 = tv2 + 1
+ tv3.Mul(&tv3, &sswuIsoCurveCoeffB) // 6. tv3 = B * tv3
tv2NZero := g1NotZero(&tv2)
@@ -210,48 +196,45 @@ func mapToCurve1(u *fp.Element) G1Affine {
tv4 = fp.Element{9871116327010172167, 9167007004823125620, 18338974479346628539, 5649234265355377548, 13442091487463296847, 77904398905292312}
tv2.Neg(&tv2)
- tv4.Select(int(tv2NZero), &tv4, &tv2)
- //TODO: When no isogeny use curve constants
- tv2 = fp.Element{17252667382019449424, 8408110001211059699, 18415587021986261264, 10797086888535946954, 9462758283094809199, 54995354010328751}
- tv4.Mul(&tv4, &tv2)
+ tv4.Select(int(tv2NZero), &tv4, &tv2) // 7. tv4 = CMOV(Z, -tv2, tv2 != 0)
+ tv4.Mul(&tv4, &sswuIsoCurveCoeffA) // 8. tv4 = A * tv4
- tv2.Square(&tv3)
+ tv2.Square(&tv3) // 9. tv2 = tv3²
var tv6 fp.Element
- //Standard doc line 10
- tv6.Square(&tv4)
+ tv6.Square(&tv4) // 10. tv6 = tv4²
var tv5 fp.Element
- tv5.Mul(&tv6, &fp.Element{17252667382019449424, 8408110001211059699, 18415587021986261264, 10797086888535946954, 9462758283094809199, 54995354010328751})
+ tv5.Mul(&tv6, &sswuIsoCurveCoeffA) // 11. tv5 = A * tv6
- tv2.Add(&tv2, &tv5)
- tv2.Mul(&tv2, &tv3)
- tv6.Mul(&tv6, &tv4)
+ tv2.Add(&tv2, &tv5) // 12. tv2 = tv2 + tv5
+ tv2.Mul(&tv2, &tv3) // 13. tv2 = tv2 * tv3
+ tv6.Mul(&tv6, &tv4) // 14. tv6 = tv6 * tv4
- //Standards doc line 15
- tv5.Mul(&tv6, &fp.Element{11130294635325289193, 6502679372128844082, 15863297759487624914, 16270683149854112145, 3560014356538878812, 27923742146399959})
- tv2.Add(&tv2, &tv5)
+ tv5.Mul(&tv6, &sswuIsoCurveCoeffB) // 15. tv5 = B * tv6
+ tv2.Add(&tv2, &tv5) // 16. tv2 = tv2 + tv5
var x fp.Element
- x.Mul(&tv1, &tv3)
+ x.Mul(&tv1, &tv3) // 17. x = tv1 * tv3
var y1 fp.Element
- gx1NSquare := g1SqrtRatio(&y1, &tv2, &tv6)
+ gx1NSquare := g1SqrtRatio(&y1, &tv2, &tv6) // 18. (is_gx1_square, y1) = sqrt_ratio(tv2, tv6)
var y fp.Element
- y.Mul(&tv1, u)
+ y.Mul(&tv1, u) // 19. y = tv1 * u
- //Standards doc line 20
- y.Mul(&y, &y1)
+ y.Mul(&y, &y1) // 20. y = y * y1
- x.Select(int(gx1NSquare), &tv3, &x)
- y.Select(int(gx1NSquare), &y1, &y)
+ x.Select(int(gx1NSquare), &tv3, &x) // 21. x = CMOV(x, tv3, is_gx1_square)
+ y.Select(int(gx1NSquare), &y1, &y) // 22. y = CMOV(y, y1, is_gx1_square)
y1.Neg(&y)
y.Select(int(g1Sgn0(u)^g1Sgn0(&y)), &y, &y1)
- //Standards doc line 25
- x.Div(&x, &tv4)
+ // 23. e1 = sgn0(u) == sgn0(y)
+ // 24. y = CMOV(-y, y, e1)
+
+ x.Div(&x, &tv4) // 25. x = x / tv4
return G1Affine{x, y}
}
@@ -294,13 +277,13 @@ func hashToFp(msg, dst []byte, count int) ([]fp.Element, error) {
// g1Sgn0 is an algebraic substitute for the notion of sign in ordered fields
// Namely, every non-zero quadratic residue in a finite field of characteristic =/= 2 has exactly two square roots, one of each sign
-// Taken from https://datatracker.ietf.org/doc/draft-irtf-cfrg-hash-to-curve/ section 4.1
+// https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-16.html#name-the-sgn0-function
// The sign of an element is not obviously related to that of its Montgomery form
func g1Sgn0(z *fp.Element) uint64 {
nonMont := *z
nonMont.FromMont()
-
+ // m == 1
return nonMont[0] % 2
}
@@ -317,7 +300,7 @@ func MapToG1(u fp.Element) G1Affine {
// EncodeToG1 hashes a message to a point on the G1 curve using the SSWU map.
// It is faster than HashToG1, but the result is not uniformly distributed. Unsuitable as a random oracle.
// dst stands for "domain separation tag", a string unique to the construction using the hash function
-//https://datatracker.ietf.org/doc/draft-irtf-cfrg-hash-to-curve/13/#section-6.6.3
+//https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-16.html#roadmap
func EncodeToG1(msg, dst []byte) (G1Affine, error) {
var res G1Affine
@@ -337,7 +320,7 @@ func EncodeToG1(msg, dst []byte) (G1Affine, error) {
// HashToG1 hashes a message to a point on the G1 curve using the SSWU map.
// Slower than EncodeToG1, but usable as a random oracle.
// dst stands for "domain separation tag", a string unique to the construction using the hash function
-// https://tools.ietf.org/html/draft-irtf-cfrg-hash-to-curve-06#section-3
+//https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-16.html#roadmap
func HashToG1(msg, dst []byte) (G1Affine, error) {
u, err := hashToFp(msg, dst, 2*1)
if err != nil {
@@ -347,7 +330,7 @@ func HashToG1(msg, dst []byte) (G1Affine, error) {
Q0 := mapToCurve1(&u[0])
Q1 := mapToCurve1(&u[1])
- //TODO: Add in E' first, then apply isogeny
+ //TODO (perf): Add in E' first, then apply isogeny
g1Isogeny(&Q0)
g1Isogeny(&Q1)
diff --git a/ecc/bls12-377/hash_to_g2.go b/ecc/bls12-377/hash_to_g2.go
index d60e32370b..46e6c62ae3 100644
--- a/ecc/bls12-377/hash_to_g2.go
+++ b/ecc/bls12-377/hash_to_g2.go
@@ -530,10 +530,11 @@ func g2Isogeny(p *G2Affine) {
// g2SqrtRatio computes the square root of u/v and returns 0 iff u/v was indeed a quadratic residue
// if not, we get sqrt(Z * u / v). Recall that Z is non-residue
+// If v = 0, u/v is meaningless and the output is unspecified, without raising an error.
// The main idea is that since the computation of the square root involves taking large powers of u/v, the inversion of v can be avoided
func g2SqrtRatio(z *fptower.E2, u *fptower.E2, v *fptower.E2) uint64 {
- // Taken from https://datatracker.ietf.org/doc/draft-irtf-cfrg-hash-to-curve/13/ F.2.1.1. for any field
+ // https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-16.html#name-sqrt_ratio-for-any-field
tv1 := fptower.E2{
A0: fp.Element{0},
@@ -542,59 +543,51 @@ func g2SqrtRatio(z *fptower.E2, u *fptower.E2, v *fptower.E2) uint64 {
var tv2, tv3, tv4, tv5 fptower.E2
var exp big.Int
- // c4 = 140737488355327 = 2^47 - 1
+ // c4 = 140737488355327 = 2⁴⁷ - 1
// q is odd so c1 is at least 1.
exp.SetBytes([]byte{127, 255, 255, 255, 255, 255})
- tv2.Exp(*v, &exp)
- tv3.Mul(&tv2, &tv2)
- tv3.Mul(&tv3, v)
-
- // line 5
- tv5.Mul(u, &tv3)
+ tv2.Exp(*v, &exp) // 2. tv2 = vᶜ⁴
+ tv3.Square(&tv2) // 3. tv3 = tv2²
+ tv3.Mul(&tv3, v) // 4. tv3 = tv3 * v
+ tv5.Mul(u, &tv3) // 5. tv5 = u * tv3
// c3 = 237702427642072544657662731610863241996908072983433720914596829155825880635712864411696995402952020023758239370111403151139377997388748144480691770738487127695940799749981367718443673617185911789718419957467908625
exp.SetBytes([]byte{2, 211, 7, 208, 187, 175, 251, 34, 86, 145, 59, 179, 97, 38, 60, 75, 184, 184, 125, 164, 174, 233, 63, 31, 94, 113, 65, 61, 218, 77, 92, 9, 208, 24, 175, 185, 6, 96, 205, 192, 20, 231, 18, 80, 42, 77, 108, 70, 10, 170, 170, 139, 183, 10, 224, 49, 131, 36, 185, 88, 99, 140, 157, 107, 203, 251, 210, 53, 241, 192, 154, 74, 218, 38, 143, 46, 27, 216, 0, 115, 56, 210, 84, 240, 0, 0, 1, 10, 17})
- tv5.Exp(tv5, &exp)
- tv5.Mul(&tv5, &tv2)
- tv2.Mul(&tv5, v)
- tv3.Mul(&tv5, u)
- // line 10
- tv4.Mul(&tv3, &tv2)
+ tv5.Exp(tv5, &exp) // 6. tv5 = tv5ᶜ³
+ tv5.Mul(&tv5, &tv2) // 7. tv5 = tv5 * tv2
+ tv2.Mul(&tv5, v) // 8. tv2 = tv5 * v
+ tv3.Mul(&tv5, u) // 9. tv3 = tv5 * u
+ tv4.Mul(&tv3, &tv2) // 10. tv4 = tv3 * tv2
// c5 = 70368744177664
exp.SetBytes([]byte{64, 0, 0, 0, 0, 0})
- tv5.Exp(tv4, &exp)
-
- isQNr := g2NotOne(&tv5)
-
- tv2.Mul(&tv3, &fptower.E2{
+ tv5.Exp(tv4, &exp) // 11. tv5 = tv4ᶜ⁵
+ isQNr := g2NotOne(&tv5) // 12. isQR = tv5 == 1
+ c7 := fptower.E2{
A0: fp.Element{1479358275892676257, 2814264704614556731, 13691179386454739330, 7530671302001941842, 60362263363904715, 37906327945374822},
A1: fp.Element{5350190547200862053, 10822704806123199611, 5122684409451163826, 10616884767534481491, 1436196917100294910, 20226740120672211},
- })
- tv5.Mul(&tv4, &tv1)
-
- // line 15
-
- tv3.Select(int(isQNr), &tv3, &tv2)
- tv4.Select(int(isQNr), &tv4, &tv5)
-
- exp.Lsh(big.NewInt(1), 47-2)
-
- for i := 47; i >= 2; i-- {
- //line 20
- tv5.Exp(tv4, &exp)
- nE1 := g2NotOne(&tv5)
-
- tv2.Mul(&tv3, &tv1)
- tv1.Mul(&tv1, &tv1)
- tv5.Mul(&tv4, &tv1)
-
- tv3.Select(int(nE1), &tv3, &tv2)
- tv4.Select(int(nE1), &tv4, &tv5)
-
- exp.Rsh(&exp, 1)
+ }
+ tv2.Mul(&tv3, &c7) // 13. tv2 = tv3 * c7
+ tv5.Mul(&tv4, &tv1) // 14. tv5 = tv4 * tv1
+ tv3.Select(int(isQNr), &tv3, &tv2) // 15. tv3 = CMOV(tv2, tv3, isQR)
+ tv4.Select(int(isQNr), &tv4, &tv5) // 16. tv4 = CMOV(tv5, tv4, isQR)
+ exp.Lsh(big.NewInt(1), 47-2) // 18, 19: tv5 = 2ⁱ⁻² for i = c1
+
+ for i := 47; i >= 2; i-- { // 17. for i in (c1, c1 - 1, ..., 2):
+
+ tv5.Exp(tv4, &exp) // 20. tv5 = tv4ᵗᵛ⁵
+ nE1 := g2NotOne(&tv5) // 21. e1 = tv5 == 1
+ tv2.Mul(&tv3, &tv1) // 22. tv2 = tv3 * tv1
+ tv1.Mul(&tv1, &tv1) // 23. tv1 = tv1 * tv1 Why not write square?
+ tv5.Mul(&tv4, &tv1) // 24. tv5 = tv4 * tv1
+ tv3.Select(int(nE1), &tv3, &tv2) // 25. tv3 = CMOV(tv2, tv3, e1)
+ tv4.Select(int(nE1), &tv4, &tv5) // 26. tv4 = CMOV(tv5, tv4, e1)
+
+ if i > 2 {
+ exp.Rsh(&exp, 1) // 18, 19. tv5 = 2ⁱ⁻²
+ }
}
*z = tv3
@@ -609,15 +602,6 @@ func g2NotOne(x *fptower.E2) uint64 {
}
-/*
-// g2SetZ sets z to [12, 1].
-func g2SetZ(z *fptower.E2) {
- z.Set( &fptower.E2 {
-A0: fp.Element{10560307807486212317, 9936456306313395274, 2092561269709285211, 8738829082964617622, 5243865315912343348, 114311569748804731},
-A1: fp.Element{202099033278250856, 5854854902718660529, 11492539364873682930, 8885205928937022213, 5545221690922665192, 39800542322357402},
-} )
-}*/
-
// g2MulByZ multiplies x by [12, 1] and stores the result in z
func g2MulByZ(z *fptower.E2, x *fptower.E2) {
@@ -628,33 +612,35 @@ func g2MulByZ(z *fptower.E2, x *fptower.E2) {
}
-//TODO: Define A,B here
-
-// From https://datatracker.ietf.org/doc/draft-irtf-cfrg-hash-to-curve/13/ Pg 80
+// https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-16.html#name-simplified-swu-method
// mapToCurve2 implements the SSWU map
// No cofactor clearing or isogeny
func mapToCurve2(u *fptower.E2) G2Affine {
+ var sswuIsoCurveCoeffA = fptower.E2{
+ A0: fp.Element{4274545572028848265, 14157081418478689358, 13123833976752631407, 4466041663276938746, 9062541850312583986, 90030181981586611},
+ A1: fp.Element{4627353644986202063, 14941155654691983603, 14266958733709189881, 10264689865410103271, 10052798319587953375, 111844286035220969},
+ }
+ var sswuIsoCurveCoeffB = fptower.E2{
+ A0: fp.Element{10237434857876739089, 8476639787604822147, 6641637803208190023, 1721529389316620686, 8656544759275761743, 38999476160258021},
+ A1: fp.Element{2360755569119276357, 10390833517265838837, 12467133771585386911, 8219721226907645480, 3130947551623757939, 83517800164149569},
+ }
+
var tv1 fptower.E2
- tv1.Square(u)
+ tv1.Square(u) // 1. tv1 = u²
//mul tv1 by Z
- g2MulByZ(&tv1, &tv1)
+ g2MulByZ(&tv1, &tv1) // 2. tv1 = Z * tv1
var tv2 fptower.E2
- tv2.Square(&tv1)
- tv2.Add(&tv2, &tv1)
+ tv2.Square(&tv1) // 3. tv2 = tv1²
+ tv2.Add(&tv2, &tv1) // 4. tv2 = tv2 + tv1
var tv3 fptower.E2
- //Standard doc line 5
var tv4 fptower.E2
tv4.SetOne()
- tv3.Add(&tv2, &tv4)
- //TODO: Use bCurveConf when no isogeny
- tv3.Mul(&tv3, &fptower.E2{
- A0: fp.Element{10237434857876739089, 8476639787604822147, 6641637803208190023, 1721529389316620686, 8656544759275761743, 38999476160258021},
- A1: fp.Element{2360755569119276357, 10390833517265838837, 12467133771585386911, 8219721226907645480, 3130947551623757939, 83517800164149569},
- })
+ tv3.Add(&tv2, &tv4) // 5. tv3 = tv2 + 1
+ tv3.Mul(&tv3, &sswuIsoCurveCoeffB) // 6. tv3 = B * tv3
tv2NZero := g2NotZero(&tv2)
@@ -665,57 +651,45 @@ func mapToCurve2(u *fptower.E2) G2Affine {
}
tv2.Neg(&tv2)
- tv4.Select(int(tv2NZero), &tv4, &tv2)
- //TODO: When no isogeny use curve constants
- tv2 = fptower.E2{
- A0: fp.Element{4274545572028848265, 14157081418478689358, 13123833976752631407, 4466041663276938746, 9062541850312583986, 90030181981586611},
- A1: fp.Element{4627353644986202063, 14941155654691983603, 14266958733709189881, 10264689865410103271, 10052798319587953375, 111844286035220969},
- }
- tv4.Mul(&tv4, &tv2)
+ tv4.Select(int(tv2NZero), &tv4, &tv2) // 7. tv4 = CMOV(Z, -tv2, tv2 != 0)
+ tv4.Mul(&tv4, &sswuIsoCurveCoeffA) // 8. tv4 = A * tv4
- tv2.Square(&tv3)
+ tv2.Square(&tv3) // 9. tv2 = tv3²
var tv6 fptower.E2
- //Standard doc line 10
- tv6.Square(&tv4)
+ tv6.Square(&tv4) // 10. tv6 = tv4²
var tv5 fptower.E2
- tv5.Mul(&tv6, &fptower.E2{
- A0: fp.Element{4274545572028848265, 14157081418478689358, 13123833976752631407, 4466041663276938746, 9062541850312583986, 90030181981586611},
- A1: fp.Element{4627353644986202063, 14941155654691983603, 14266958733709189881, 10264689865410103271, 10052798319587953375, 111844286035220969},
- })
+ tv5.Mul(&tv6, &sswuIsoCurveCoeffA) // 11. tv5 = A * tv6
- tv2.Add(&tv2, &tv5)
- tv2.Mul(&tv2, &tv3)
- tv6.Mul(&tv6, &tv4)
+ tv2.Add(&tv2, &tv5) // 12. tv2 = tv2 + tv5
+ tv2.Mul(&tv2, &tv3) // 13. tv2 = tv2 * tv3
+ tv6.Mul(&tv6, &tv4) // 14. tv6 = tv6 * tv4
- //Standards doc line 15
- tv5.Mul(&tv6, &fptower.E2{
- A0: fp.Element{10237434857876739089, 8476639787604822147, 6641637803208190023, 1721529389316620686, 8656544759275761743, 38999476160258021},
- A1: fp.Element{2360755569119276357, 10390833517265838837, 12467133771585386911, 8219721226907645480, 3130947551623757939, 83517800164149569},
- })
- tv2.Add(&tv2, &tv5)
+ tv5.Mul(&tv6, &sswuIsoCurveCoeffB) // 15. tv5 = B * tv6
+ tv2.Add(&tv2, &tv5) // 16. tv2 = tv2 + tv5
var x fptower.E2
- x.Mul(&tv1, &tv3)
+ x.Mul(&tv1, &tv3) // 17. x = tv1 * tv3
var y1 fptower.E2
- gx1NSquare := g2SqrtRatio(&y1, &tv2, &tv6)
+ gx1NSquare := g2SqrtRatio(&y1, &tv2, &tv6) // 18. (is_gx1_square, y1) = sqrt_ratio(tv2, tv6)
var y fptower.E2
- y.Mul(&tv1, u)
+ y.Mul(&tv1, u) // 19. y = tv1 * u
- //Standards doc line 20
- y.Mul(&y, &y1)
+ y.Mul(&y, &y1) // 20. y = y * y1
- x.Select(int(gx1NSquare), &tv3, &x)
- y.Select(int(gx1NSquare), &y1, &y)
+ x.Select(int(gx1NSquare), &tv3, &x) // 21. x = CMOV(x, tv3, is_gx1_square)
+ y.Select(int(gx1NSquare), &y1, &y) // 22. y = CMOV(y, y1, is_gx1_square)
y1.Neg(&y)
y.Select(int(g2Sgn0(u)^g2Sgn0(&y)), &y, &y1)
- //Standards doc line 25
- x.Div(&x, &tv4)
+ // 23. e1 = sgn0(u) == sgn0(y)
+ // 24. y = CMOV(-y, y, e1)
+
+ x.Div(&x, &tv4) // 25. x = x / tv4
return G2Affine{x, y}
}
@@ -737,28 +711,29 @@ func g2EvalPolynomial(z *fptower.E2, monic bool, coefficients []fptower.E2, x *f
// g2Sgn0 is an algebraic substitute for the notion of sign in ordered fields
// Namely, every non-zero quadratic residue in a finite field of characteristic =/= 2 has exactly two square roots, one of each sign
-// Taken from https://datatracker.ietf.org/doc/draft-irtf-cfrg-hash-to-curve/ section 4.1
+// https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-16.html#name-the-sgn0-function
// The sign of an element is not obviously related to that of its Montgomery form
func g2Sgn0(z *fptower.E2) uint64 {
nonMont := *z
nonMont.FromMont()
- sign := uint64(0)
- zero := uint64(1)
+ sign := uint64(0) // 1. sign = 0
+ zero := uint64(1) // 2. zero = 1
var signI uint64
var zeroI uint64
- signI = nonMont.A0[0] % 2
- sign = sign | (zero & signI)
-
+ // 3. i = 1
+ signI = nonMont.A0[0] % 2 // 4. sign_i = x_i mod 2
zeroI = g1NotZero(&nonMont.A0)
- zeroI = 1 ^ (zeroI|-zeroI)>>63
- zero = zero & zeroI
-
- signI = nonMont.A1[0] % 2
- sign = sign | (zero & signI)
-
+ zeroI = 1 ^ (zeroI|-zeroI)>>63 // 5. zero_i = x_i == 0
+ sign = sign | (zero & signI) // 6. sign = sign OR (zero AND sign_i) # Avoid short-circuit logic ops
+ zero = zero & zeroI // 7. zero = zero AND zero_i
+ // 3. i = 2
+ signI = nonMont.A1[0] % 2 // 4. sign_i = x_i mod 2
+ // 5. zero_i = x_i == 0
+ sign = sign | (zero & signI) // 6. sign = sign OR (zero AND sign_i) # Avoid short-circuit logic ops
+ // 7. zero = zero AND zero_i
return sign
}
@@ -775,7 +750,7 @@ func MapToG2(u fptower.E2) G2Affine {
// EncodeToG2 hashes a message to a point on the G2 curve using the SSWU map.
// It is faster than HashToG2, but the result is not uniformly distributed. Unsuitable as a random oracle.
// dst stands for "domain separation tag", a string unique to the construction using the hash function
-//https://datatracker.ietf.org/doc/draft-irtf-cfrg-hash-to-curve/13/#section-6.6.3
+//https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-16.html#roadmap
func EncodeToG2(msg, dst []byte) (G2Affine, error) {
var res G2Affine
@@ -798,7 +773,7 @@ func EncodeToG2(msg, dst []byte) (G2Affine, error) {
// HashToG2 hashes a message to a point on the G2 curve using the SSWU map.
// Slower than EncodeToG2, but usable as a random oracle.
// dst stands for "domain separation tag", a string unique to the construction using the hash function
-// https://tools.ietf.org/html/draft-irtf-cfrg-hash-to-curve-06#section-3
+//https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-16.html#roadmap
func HashToG2(msg, dst []byte) (G2Affine, error) {
u, err := hashToFp(msg, dst, 2*2)
if err != nil {
@@ -814,7 +789,7 @@ func HashToG2(msg, dst []byte) (G2Affine, error) {
A1: u[2+1],
})
- //TODO: Add in E' first, then apply isogeny
+ //TODO (perf): Add in E' first, then apply isogeny
g2Isogeny(&Q0)
g2Isogeny(&Q1)
diff --git a/ecc/bls12-377/internal/fptower/e12.go b/ecc/bls12-377/internal/fptower/e12.go
index 723902e2bf..b3edf3a17e 100644
--- a/ecc/bls12-377/internal/fptower/e12.go
+++ b/ecc/bls12-377/internal/fptower/e12.go
@@ -225,28 +225,45 @@ func (z *E12) CyclotomicSquareCompressed(x *E12) *E12 {
}
// DecompressKarabina Karabina's cyclotomic square result
+// if g3 != 0
+// g4 = (E * g5^2 + 3 * g1^2 - 2 * g2)/4g3
+// if g3 == 0
+// g4 = 2g1g5/g2
+//
+// if g3=g2=0 then g4=g5=g1=0 and g0=1 (x=1)
+// Theorem 3.1 is well-defined for all x in Gϕₙ\{1}
func (z *E12) DecompressKarabina(x *E12) *E12 {
var t [3]E2
var one E2
one.SetOne()
- // t0 = g1^2
- t[0].Square(&x.C0.B1)
- // t1 = 3 * g1^2 - 2 * g2
- t[1].Sub(&t[0], &x.C0.B2).
- Double(&t[1]).
- Add(&t[1], &t[0])
- // t0 = E * g5^2 + t1
- t[2].Square(&x.C1.B2)
- t[0].MulByNonResidue(&t[2]).
- Add(&t[0], &t[1])
- // t1 = 1/(4 * g3)
- t[1].Double(&x.C1.B0).
- Double(&t[1]).
- Inverse(&t[1]) // costly
+ // g3 == 0
+ if x.C1.B2.IsZero() {
+ t[0].Mul(&x.C0.B1, &x.C1.B2).
+ Double(&t[0])
+ // t1 = g2
+ t[1].Set(&x.C0.B2)
+
+ // g3 != 0
+ } else {
+ // t0 = g1^2
+ t[0].Square(&x.C0.B1)
+ // t1 = 3 * g1^2 - 2 * g2
+ t[1].Sub(&t[0], &x.C0.B2).
+ Double(&t[1]).
+ Add(&t[1], &t[0])
+ // t0 = E * g5^2 + t1
+ t[2].Square(&x.C1.B2)
+ t[0].MulByNonResidue(&t[2]).
+ Add(&t[0], &t[1])
+ // t1 = 4 * g3
+ t[1].Double(&x.C1.B0).
+ Double(&t[1])
+ }
+
// z4 = g4
- z.C1.B1.Mul(&t[0], &t[1])
+ z.C1.B1.Div(&t[0], &t[1]) // costly
// t1 = g2 * g1
t[1].Mul(&x.C0.B2, &x.C0.B1)
@@ -255,7 +272,7 @@ func (z *E12) DecompressKarabina(x *E12) *E12 {
Sub(&t[2], &t[1]).
Double(&t[2]).
Sub(&t[2], &t[1])
- // t1 = g3 * g5
+ // t1 = g3 * g5 (g3 can be 0)
t[1].Mul(&x.C1.B0, &x.C1.B2)
// c_0 = E * (2 * g4^2 + g3 * g5 - 3 * g2 * g1) + 1
t[2].Add(&t[2], &t[1])
@@ -271,6 +288,15 @@ func (z *E12) DecompressKarabina(x *E12) *E12 {
}
// BatchDecompressKarabina multiple Karabina's cyclotomic square results
+// if g3 != 0
+// g4 = (E * g5^2 + 3 * g1^2 - 2 * g2)/4g3
+// if g3 == 0
+// g4 = 2g1g5/g2
+//
+// if g3=g2=0 then g4=g5=g1=0 and g0=1 (x=1)
+// Theorem 3.1 is well-defined for all x in Gϕₙ\{1}
+//
+// Divisions by 4g3 or g2 is batched using Montgomery batch inverse
func BatchDecompressKarabina(x []E12) []E12 {
n := len(x)
@@ -286,19 +312,29 @@ func BatchDecompressKarabina(x []E12) []E12 {
one.SetOne()
for i := 0; i < n; i++ {
- // t0 = g1^2
- t0[i].Square(&x[i].C0.B1)
- // t1 = 3 * g1^2 - 2 * g2
- t1[i].Sub(&t0[i], &x[i].C0.B2).
- Double(&t1[i]).
- Add(&t1[i], &t0[i])
- // t0 = E * g5^2 + t1
- t2[i].Square(&x[i].C1.B2)
- t0[i].MulByNonResidue(&t2[i]).
- Add(&t0[i], &t1[i])
- // t1 = 4 * g3
- t1[i].Double(&x[i].C1.B0).
- Double(&t1[i])
+ // g3 == 0
+ if x[i].C1.B2.IsZero() {
+ t0[i].Mul(&x[i].C0.B1, &x[i].C1.B2).
+ Double(&t0[i])
+ // t1 = g2
+ t1[i].Set(&x[i].C0.B2)
+
+ // g3 != 0
+ } else {
+ // t0 = g1^2
+ t0[i].Square(&x[i].C0.B1)
+ // t1 = 3 * g1^2 - 2 * g2
+ t1[i].Sub(&t0[i], &x[i].C0.B2).
+ Double(&t1[i]).
+ Add(&t1[i], &t0[i])
+ // t0 = E * g5^2 + t1
+ t2[i].Square(&x[i].C1.B2)
+ t0[i].MulByNonResidue(&t2[i]).
+ Add(&t0[i], &t1[i])
+ // t1 = 4 * g3
+ t1[i].Double(&x[i].C1.B0).
+ Double(&t1[i])
+ }
}
t1 = BatchInvertE2(t1) // costs 1 inverse
@@ -315,7 +351,7 @@ func BatchDecompressKarabina(x []E12) []E12 {
t2[i].Double(&t2[i])
t2[i].Sub(&t2[i], &t1[i])
- // t1 = g3 * g5
+ // t1 = g3 * g5 (g3s can be 0s)
t1[i].Mul(&x[i].C1.B0, &x[i].C1.B2)
// z0 = E * (2 * g4^2 + g3 * g5 - 3 * g2 * g1) + 1
t2[i].Add(&t2[i], &t1[i])
@@ -366,6 +402,8 @@ func (z *E12) CyclotomicSquare(x *E12) *E12 {
}
// Inverse set z to the inverse of x in E12 and return z
+//
+// if x == 0, sets and returns z = x
func (z *E12) Inverse(x *E12) *E12 {
// Algorithm 23 from https://eprint.iacr.org/2010/354.pdf
@@ -383,6 +421,8 @@ func (z *E12) Inverse(x *E12) *E12 {
// BatchInvertE12 returns a new slice with every element inverted.
// Uses Montgomery batch inversion trick
+//
+// if a[i] == 0, returns result[i] = a[i]
func BatchInvertE12(a []E12) []E12 {
res := make([]E12, len(a))
if len(a) == 0 {
@@ -425,7 +465,7 @@ func (z *E12) Exp(x E12, k *big.Int) *E12 {
e := k
if k.Sign() == -1 {
// negative k, we invert
- // if k < 0: xᵏ (mod q) == (x⁻¹)ᵏ (mod q)
+ // if k < 0: xᵏ (mod q¹²) == (x⁻¹)ᵏ (mod q¹²)
x.Inverse(&x)
// we negate k in a temp big.Int since
@@ -778,13 +818,14 @@ func (z *E12) CompressTorus() (E6, error) {
return res, nil
}
-// BatchCompressTorus GT/E12 elements to half their size
-// using a batch inversion
+// BatchCompressTorus GT/E12 elements to half their size using a batch inversion.
+//
+// if len(x) == 0 or if any of the x[i].C1 coordinate is 0, this function returns an error.
func BatchCompressTorus(x []E12) ([]E6, error) {
n := len(x)
if n == 0 {
- return []E6{}, errors.New("invalid input size")
+ return nil, errors.New("invalid input size")
}
var one E6
@@ -793,6 +834,10 @@ func BatchCompressTorus(x []E12) ([]E6, error) {
for i := 0; i < n; i++ {
res[i].Set(&x[i].C1)
+ // throw an error if any of the x[i].C1 is 0
+ if res[i].IsZero() {
+ return nil, errors.New("invalid input; C1 is 0")
+ }
}
t := BatchInvertE6(res) // costs 1 inverse
diff --git a/ecc/bls12-377/internal/fptower/e12_test.go b/ecc/bls12-377/internal/fptower/e12_test.go
index 6fe75b79f2..c0141ed179 100644
--- a/ecc/bls12-377/internal/fptower/e12_test.go
+++ b/ecc/bls12-377/internal/fptower/e12_test.go
@@ -339,13 +339,29 @@ func TestE12Ops(t *testing.T) {
properties.Property("[BLS12-377] compressed cyclotomic square (Karabina) and square should be the same in the cyclotomic subgroup", prop.ForAll(
func(a *E12) bool {
- var b, c, d E12
+ var _a, b, c, d, _c, _d E12
+ _a.SetOne().Double(&_a)
+
+ // put a and _a in the cyclotomic subgroup
+ // a (g3 != 0 probably)
b.Conjugate(a)
a.Inverse(a)
b.Mul(&b, a)
a.FrobeniusSquare(&b).Mul(a, &b)
+ // _a (g3 == 0)
+ b.Conjugate(&_a)
+ _a.Inverse(&_a)
+ b.Mul(&b, &_a)
+ _a.FrobeniusSquare(&b).Mul(&_a, &b)
+
+ // case g3 != 0
c.Square(a)
d.CyclotomicSquareCompressed(a).DecompressKarabina(&d)
+
+ // case g3 == 0
+ _c.Square(&_a)
+ _d.CyclotomicSquareCompressed(&_a).DecompressKarabina(&_d)
+
return c.Equal(&d)
},
genA,
@@ -353,18 +369,26 @@ func TestE12Ops(t *testing.T) {
properties.Property("[BLS12-377] batch decompress and individual decompress (Karabina) should be the same", prop.ForAll(
func(a *E12) bool {
- var b E12
- // put in the cyclotomic subgroup
+ var _a, b E12
+ _a.SetOne().Double(&_a)
+
+ // put a and _a in the cyclotomic subgroup
+ // a (g3 !=0 probably)
b.Conjugate(a)
a.Inverse(a)
b.Mul(&b, a)
a.FrobeniusSquare(&b).Mul(a, &b)
+ // _a (g3 == 0)
+ b.Conjugate(&_a)
+ _a.Inverse(&_a)
+ b.Mul(&b, &_a)
+ _a.FrobeniusSquare(&b).Mul(&_a, &b)
var a2, a4, a17 E12
- a2.Set(a)
+ a2.Set(&_a)
a4.Set(a)
a17.Set(a)
- a2.nSquareCompressed(2)
+ a2.nSquareCompressed(2) // case g3 == 0
a4.nSquareCompressed(4)
a17.nSquareCompressed(17)
batch := BatchDecompressKarabina([]E12{a2, a4, a17})
diff --git a/ecc/bls12-377/internal/fptower/e2.go b/ecc/bls12-377/internal/fptower/e2.go
index fc300952ca..c617faf6ed 100644
--- a/ecc/bls12-377/internal/fptower/e2.go
+++ b/ecc/bls12-377/internal/fptower/e2.go
@@ -246,6 +246,8 @@ func (z *E2) Sqrt(x *E2) *E2 {
// BatchInvertE2 returns a new slice with every element inverted.
// Uses Montgomery batch inversion trick
+//
+// if a[i] == 0, returns result[i] = a[i]
func BatchInvertE2(a []E2) []E2 {
res := make([]E2, len(a))
if len(a) == 0 {
diff --git a/ecc/bls12-377/internal/fptower/e2_test.go b/ecc/bls12-377/internal/fptower/e2_test.go
index ff750ad336..3818ab929c 100644
--- a/ecc/bls12-377/internal/fptower/e2_test.go
+++ b/ecc/bls12-377/internal/fptower/e2_test.go
@@ -189,12 +189,6 @@ func TestE2ReceiverIsOperand(t *testing.T) {
properties.TestingRun(t, gopter.ConsoleReporter(false))
- if supportAdx {
- t.Log("disabling ADX")
- supportAdx = false
- properties.TestingRun(t, gopter.ConsoleReporter(false))
- supportAdx = true
- }
}
func TestE2MulMaxed(t *testing.T) {
@@ -404,12 +398,6 @@ func TestE2Ops(t *testing.T) {
properties.TestingRun(t, gopter.ConsoleReporter(false))
- if supportAdx {
- t.Log("disabling ADX")
- supportAdx = false
- properties.TestingRun(t, gopter.ConsoleReporter(false))
- supportAdx = true
- }
}
// ------------------------------------------------------------
diff --git a/ecc/bls12-377/internal/fptower/e6.go b/ecc/bls12-377/internal/fptower/e6.go
index 2ed48dc26e..4da093f5f0 100644
--- a/ecc/bls12-377/internal/fptower/e6.go
+++ b/ecc/bls12-377/internal/fptower/e6.go
@@ -242,6 +242,8 @@ func (z *E6) Square(x *E6) *E6 {
}
// Inverse an element in E6
+//
+// if x == 0, sets and returns z = x
func (z *E6) Inverse(x *E6) *E6 {
// Algorithm 17 from https://eprint.iacr.org/2010/354.pdf
// step 9 is wrong in the paper it's t1-t4
@@ -270,6 +272,8 @@ func (z *E6) Inverse(x *E6) *E6 {
// BatchInvertE6 returns a new slice with every element inverted.
// Uses Montgomery batch inversion trick
+//
+// if a[i] == 0, returns result[i] = a[i]
func BatchInvertE6(a []E6) []E6 {
res := make([]E6, len(a))
if len(a) == 0 {
diff --git a/ecc/bls12-377/multiexp.go b/ecc/bls12-377/multiexp.go
index 23d49a8aaa..6d4f14f13b 100644
--- a/ecc/bls12-377/multiexp.go
+++ b/ecc/bls12-377/multiexp.go
@@ -41,7 +41,7 @@ type selector struct {
// if the digit is larger than 2^{c-1}, then, we borrow 2^c from the next window and substract
// 2^{c} to the current digit, making it negative.
// negative digits can be processed in a later step as adding -G into the bucket instead of G
-// (computing -G is cheap, and this saves us half of the buckets in the MultiExp or BatchScalarMul)
+// (computing -G is cheap, and this saves us half of the buckets in the MultiExp or BatchScalarMultiplication)
// scalarsMont indicates wheter the provided scalars are in montgomery form
// returns smallValues, which represent the number of scalars which meets the following condition
// 0 < scalar < 2^c (in other words, scalars where only the c-least significant bits are non zero)
@@ -163,6 +163,8 @@ func partitionScalars(scalars []fr.Element, c uint64, scalarsMont bool, nbTasks
}
// MultiExp implements section 4 of https://eprint.iacr.org/2012/549.pdf
+//
+// This call return an error if len(scalars) != len(points) or if provided config is invalid.
func (p *G1Affine) MultiExp(points []G1Affine, scalars []fr.Element, config ecc.MultiExpConfig) (*G1Affine, error) {
var _p G1Jac
if _, err := _p.MultiExp(points, scalars, config); err != nil {
@@ -173,6 +175,8 @@ func (p *G1Affine) MultiExp(points []G1Affine, scalars []fr.Element, config ecc.
}
// MultiExp implements section 4 of https://eprint.iacr.org/2012/549.pdf
+//
+// This call return an error if len(scalars) != len(points) or if provided config is invalid.
func (p *G1Jac) MultiExp(points []G1Affine, scalars []fr.Element, config ecc.MultiExpConfig) (*G1Jac, error) {
// note:
// each of the msmCX method is the same, except for the c constant it declares
@@ -209,6 +213,8 @@ func (p *G1Jac) MultiExp(points []G1Affine, scalars []fr.Element, config ecc.Mul
// if nbTasks is not set, use all available CPUs
if config.NbTasks <= 0 {
config.NbTasks = runtime.NumCPU()
+ } else if config.NbTasks > 1024 {
+ return nil, errors.New("invalid config: config.NbTasks > 1024")
}
// here, we compute the best C for nbPoints
@@ -333,9 +339,6 @@ func msmInnerG1Jac(p *G1Jac, c int, points []G1Affine, scalars []fr.Element, spl
case 21:
p.msmC21(points, scalars, splitFirstChunk)
- case 22:
- p.msmC22(points, scalars, splitFirstChunk)
-
default:
panic("not implemented")
}
@@ -1180,59 +1183,9 @@ func (p *G1Jac) msmC21(points []G1Affine, scalars []fr.Element, splitFirstChunk
return msmReduceChunkG1Affine(p, c, chChunks[:])
}
-func (p *G1Jac) msmC22(points []G1Affine, scalars []fr.Element, splitFirstChunk bool) *G1Jac {
- const (
- c = 22 // scalars partitioned into c-bit radixes
- nbChunks = (fr.Limbs * 64 / c) // number of c-bit radixes in a scalar
- )
-
- // for each chunk, spawn one go routine that'll loop through all the scalars in the
- // corresponding bit-window
- // note that buckets is an array allocated on the stack (for most sizes of c) and this is
- // critical for performance
-
- // each go routine sends its result in chChunks[i] channel
- var chChunks [nbChunks + 1]chan g1JacExtended
- for i := 0; i < len(chChunks); i++ {
- chChunks[i] = make(chan g1JacExtended, 1)
- }
-
- // c doesn't divide 256, last window is smaller we can allocate less buckets
- const lastC = (fr.Limbs * 64) - (c * (fr.Limbs * 64 / c))
- go func(j uint64, points []G1Affine, scalars []fr.Element) {
- var buckets [1 << (lastC - 1)]g1JacExtended
- msmProcessChunkG1Affine(j, chChunks[j], buckets[:], c, points, scalars)
- }(uint64(nbChunks), points, scalars)
-
- processChunk := func(j int, points []G1Affine, scalars []fr.Element, chChunk chan g1JacExtended) {
- var buckets [1 << (c - 1)]g1JacExtended
- msmProcessChunkG1Affine(uint64(j), chChunk, buckets[:], c, points, scalars)
- }
-
- for j := int(nbChunks - 1); j > 0; j-- {
- go processChunk(j, points, scalars, chChunks[j])
- }
-
- if !splitFirstChunk {
- go processChunk(0, points, scalars, chChunks[0])
- } else {
- chSplit := make(chan g1JacExtended, 2)
- split := len(points) / 2
- go processChunk(0, points[:split], scalars[:split], chSplit)
- go processChunk(0, points[split:], scalars[split:], chSplit)
- go func() {
- s1 := <-chSplit
- s2 := <-chSplit
- close(chSplit)
- s1.add(&s2)
- chChunks[0] <- s1
- }()
- }
-
- return msmReduceChunkG1Affine(p, c, chChunks[:])
-}
-
// MultiExp implements section 4 of https://eprint.iacr.org/2012/549.pdf
+//
+// This call return an error if len(scalars) != len(points) or if provided config is invalid.
func (p *G2Affine) MultiExp(points []G2Affine, scalars []fr.Element, config ecc.MultiExpConfig) (*G2Affine, error) {
var _p G2Jac
if _, err := _p.MultiExp(points, scalars, config); err != nil {
@@ -1243,6 +1196,8 @@ func (p *G2Affine) MultiExp(points []G2Affine, scalars []fr.Element, config ecc.
}
// MultiExp implements section 4 of https://eprint.iacr.org/2012/549.pdf
+//
+// This call return an error if len(scalars) != len(points) or if provided config is invalid.
func (p *G2Jac) MultiExp(points []G2Affine, scalars []fr.Element, config ecc.MultiExpConfig) (*G2Jac, error) {
// note:
// each of the msmCX method is the same, except for the c constant it declares
@@ -1279,13 +1234,15 @@ func (p *G2Jac) MultiExp(points []G2Affine, scalars []fr.Element, config ecc.Mul
// if nbTasks is not set, use all available CPUs
if config.NbTasks <= 0 {
config.NbTasks = runtime.NumCPU()
+ } else if config.NbTasks > 1024 {
+ return nil, errors.New("invalid config: config.NbTasks > 1024")
}
// here, we compute the best C for nbPoints
// we split recursively until nbChunks(c) >= nbTasks,
bestC := func(nbPoints int) uint64 {
// implemented msmC methods (the c we use must be in this slice)
- implementedCs := []uint64{4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 20, 21, 22}
+ implementedCs := []uint64{4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 20, 21}
var C uint64
// approximate cost (in group operations)
// cost = bits/c * (nbPoints + 2^{c})
@@ -1403,9 +1360,6 @@ func msmInnerG2Jac(p *G2Jac, c int, points []G2Affine, scalars []fr.Element, spl
case 21:
p.msmC21(points, scalars, splitFirstChunk)
- case 22:
- p.msmC22(points, scalars, splitFirstChunk)
-
default:
panic("not implemented")
}
@@ -2249,55 +2203,3 @@ func (p *G2Jac) msmC21(points []G2Affine, scalars []fr.Element, splitFirstChunk
return msmReduceChunkG2Affine(p, c, chChunks[:])
}
-
-func (p *G2Jac) msmC22(points []G2Affine, scalars []fr.Element, splitFirstChunk bool) *G2Jac {
- const (
- c = 22 // scalars partitioned into c-bit radixes
- nbChunks = (fr.Limbs * 64 / c) // number of c-bit radixes in a scalar
- )
-
- // for each chunk, spawn one go routine that'll loop through all the scalars in the
- // corresponding bit-window
- // note that buckets is an array allocated on the stack (for most sizes of c) and this is
- // critical for performance
-
- // each go routine sends its result in chChunks[i] channel
- var chChunks [nbChunks + 1]chan g2JacExtended
- for i := 0; i < len(chChunks); i++ {
- chChunks[i] = make(chan g2JacExtended, 1)
- }
-
- // c doesn't divide 256, last window is smaller we can allocate less buckets
- const lastC = (fr.Limbs * 64) - (c * (fr.Limbs * 64 / c))
- go func(j uint64, points []G2Affine, scalars []fr.Element) {
- var buckets [1 << (lastC - 1)]g2JacExtended
- msmProcessChunkG2Affine(j, chChunks[j], buckets[:], c, points, scalars)
- }(uint64(nbChunks), points, scalars)
-
- processChunk := func(j int, points []G2Affine, scalars []fr.Element, chChunk chan g2JacExtended) {
- var buckets [1 << (c - 1)]g2JacExtended
- msmProcessChunkG2Affine(uint64(j), chChunk, buckets[:], c, points, scalars)
- }
-
- for j := int(nbChunks - 1); j > 0; j-- {
- go processChunk(j, points, scalars, chChunks[j])
- }
-
- if !splitFirstChunk {
- go processChunk(0, points, scalars, chChunks[0])
- } else {
- chSplit := make(chan g2JacExtended, 2)
- split := len(points) / 2
- go processChunk(0, points[:split], scalars[:split], chSplit)
- go processChunk(0, points[split:], scalars[split:], chSplit)
- go func() {
- s1 := <-chSplit
- s2 := <-chSplit
- close(chSplit)
- s1.add(&s2)
- chChunks[0] <- s1
- }()
- }
-
- return msmReduceChunkG2Affine(p, c, chChunks[:])
-}
diff --git a/ecc/bls12-377/multiexp_test.go b/ecc/bls12-377/multiexp_test.go
index fc7e810a55..7f87124d9f 100644
--- a/ecc/bls12-377/multiexp_test.go
+++ b/ecc/bls12-377/multiexp_test.go
@@ -92,7 +92,14 @@ func TestMultiExpG1(t *testing.T) {
genScalar,
))
- properties.Property("[G1] Multi exponentation (c=5, c=16) should be consistent with sum of square", prop.ForAll(
+ // cRange is generated from template and contains the available parameters for the multiexp window size
+ cRange := []uint64{4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 20, 21}
+ if testing.Short() {
+ // test only "odd" and "even" (ie windows size divide word size vs not)
+ cRange = []uint64{5, 16}
+ }
+
+ properties.Property(fmt.Sprintf("[G1] Multi exponentation (c in %v) should be consistent with sum of square", cRange), prop.ForAll(
func(mixer fr.Element) bool {
var expected G1Jac
@@ -111,13 +118,21 @@ func TestMultiExpG1(t *testing.T) {
FromMont()
}
- scalars5, _ := partitionScalars(sampleScalars[:], 5, false, runtime.NumCPU())
- scalars16, _ := partitionScalars(sampleScalars[:], 16, false, runtime.NumCPU())
-
- var r5, r16 G1Jac
- r5.msmC5(samplePoints[:], scalars5, false)
- r16.msmC16(samplePoints[:], scalars16, true)
- return (r5.Equal(&expected) && r16.Equal(&expected))
+ results := make([]G1Jac, len(cRange)+1)
+ for i, c := range cRange {
+ scalars, _ := partitionScalars(sampleScalars[:], c, false, runtime.NumCPU())
+ msmInnerG1Jac(&results[i], int(c), samplePoints[:], scalars, false)
+ if c == 16 {
+ // split the first chunk
+ msmInnerG1Jac(&results[len(results)-1], 16, samplePoints[:], scalars, true)
+ }
+ }
+ for i := 1; i < len(results); i++ {
+ if !results[i].Equal(&results[i-1]) {
+ return false
+ }
+ }
+ return true
},
genScalar,
))
@@ -148,7 +163,7 @@ func TestMultiExpG1(t *testing.T) {
var finalBigScalar fr.Element
var finalBigScalarBi big.Int
var op1ScalarMul G1Affine
- finalBigScalar.SetString("9455").Mul(&finalBigScalar, &mixer)
+ finalBigScalar.SetUint64(9455).Mul(&finalBigScalar, &mixer)
finalBigScalar.ToBigIntRegular(&finalBigScalarBi)
op1ScalarMul.ScalarMultiplication(&g1GenAff, &finalBigScalarBi)
@@ -322,7 +337,12 @@ func TestMultiExpG2(t *testing.T) {
genScalar,
))
- properties.Property("[G2] Multi exponentation (c=5, c=16) should be consistent with sum of square", prop.ForAll(
+ // cRange is generated from template and contains the available parameters for the multiexp window size
+ // for g2, CI suffers with large c size since it needs to allocate a lot of memory for the buckets.
+ // test only "odd" and "even" (ie windows size divide word size vs not)
+ cRange := []uint64{5, 16}
+
+ properties.Property(fmt.Sprintf("[G2] Multi exponentation (c in %v) should be consistent with sum of square", cRange), prop.ForAll(
func(mixer fr.Element) bool {
var expected G2Jac
@@ -341,13 +361,21 @@ func TestMultiExpG2(t *testing.T) {
FromMont()
}
- scalars5, _ := partitionScalars(sampleScalars[:], 5, false, runtime.NumCPU())
- scalars16, _ := partitionScalars(sampleScalars[:], 16, false, runtime.NumCPU())
-
- var r5, r16 G2Jac
- r5.msmC5(samplePoints[:], scalars5, false)
- r16.msmC16(samplePoints[:], scalars16, true)
- return (r5.Equal(&expected) && r16.Equal(&expected))
+ results := make([]G2Jac, len(cRange)+1)
+ for i, c := range cRange {
+ scalars, _ := partitionScalars(sampleScalars[:], c, false, runtime.NumCPU())
+ msmInnerG2Jac(&results[i], int(c), samplePoints[:], scalars, false)
+ if c == 16 {
+ // split the first chunk
+ msmInnerG2Jac(&results[len(results)-1], 16, samplePoints[:], scalars, true)
+ }
+ }
+ for i := 1; i < len(results); i++ {
+ if !results[i].Equal(&results[i-1]) {
+ return false
+ }
+ }
+ return true
},
genScalar,
))
@@ -378,7 +406,7 @@ func TestMultiExpG2(t *testing.T) {
var finalBigScalar fr.Element
var finalBigScalarBi big.Int
var op1ScalarMul G2Affine
- finalBigScalar.SetString("9455").Mul(&finalBigScalar, &mixer)
+ finalBigScalar.SetUint64(9455).Mul(&finalBigScalar, &mixer)
finalBigScalar.ToBigIntRegular(&finalBigScalarBi)
op1ScalarMul.ScalarMultiplication(&g2GenAff, &finalBigScalarBi)
diff --git a/ecc/bls12-377/pairing.go b/ecc/bls12-377/pairing.go
index 54245331c2..b1ed68e1a1 100644
--- a/ecc/bls12-377/pairing.go
+++ b/ecc/bls12-377/pairing.go
@@ -30,7 +30,9 @@ type lineEvaluation struct {
}
// Pair calculates the reduced pairing for a set of points
-// ∏ᵢ e(Pᵢ, Qᵢ)
+// ∏ᵢ e(Pᵢ, Qᵢ).
+//
+// This function doesn't check that the inputs are in the correct subgroup. See IsInSubGroup.
func Pair(P []G1Affine, Q []G2Affine) (GT, error) {
f, err := MillerLoop(P, Q)
if err != nil {
@@ -41,6 +43,8 @@ func Pair(P []G1Affine, Q []G2Affine) (GT, error) {
// PairingCheck calculates the reduced pairing for a set of points and returns True if the result is One
// ∏ᵢ e(Pᵢ, Qᵢ) =? 1
+//
+// This function doesn't check that the inputs are in the correct subgroup. See IsInSubGroup.
func PairingCheck(P []G1Affine, Q []G2Affine) (bool, error) {
f, err := Pair(P, Q)
if err != nil {
diff --git a/ecc/bls12-377/twistededwards/eddsa/eddsa.go b/ecc/bls12-377/twistededwards/eddsa/eddsa.go
index 605deee20e..82c7839eb4 100644
--- a/ecc/bls12-377/twistededwards/eddsa/eddsa.go
+++ b/ecc/bls12-377/twistededwards/eddsa/eddsa.go
@@ -89,7 +89,7 @@ func GenerateKey(r io.Reader) (*PrivateKey, error) {
var bScalar big.Int
bScalar.SetBytes(priv.scalar[:])
- pub.A.ScalarMul(&c.Base, &bScalar)
+ pub.A.ScalarMultiplication(&c.Base, &bScalar)
priv.PublicKey = pub
@@ -137,7 +137,7 @@ func (privKey *PrivateKey) Sign(message []byte, hFunc hash.Hash) ([]byte, error)
blindingFactorBigInt.SetBytes(blindingFactorBytes[:sizeFr])
// compute R = randScalar*Base
- res.R.ScalarMul(&curveParams.Base, &blindingFactorBigInt)
+ res.R.ScalarMultiplication(&curveParams.Base, &blindingFactorBigInt)
if !res.R.IsOnCurve() {
return nil, errNotOnCurve
}
@@ -223,8 +223,8 @@ func (pub *PublicKey) Verify(sigBin, message []byte, hFunc hash.Hash) (bool, err
var bCofactor, bs big.Int
curveParams.Cofactor.ToBigIntRegular(&bCofactor)
bs.SetBytes(sig.S[:])
- lhs.ScalarMul(&curveParams.Base, &bs).
- ScalarMul(&lhs, &bCofactor)
+ lhs.ScalarMultiplication(&curveParams.Base, &bs).
+ ScalarMultiplication(&lhs, &bCofactor)
if !lhs.IsOnCurve() {
return false, errNotOnCurve
@@ -232,9 +232,9 @@ func (pub *PublicKey) Verify(sigBin, message []byte, hFunc hash.Hash) (bool, err
// rhs = cofactor*(R + H(R,A,M)*A)
var rhs twistededwards.PointAffine
- rhs.ScalarMul(&pub.A, &hramInt).
+ rhs.ScalarMultiplication(&pub.A, &hramInt).
Add(&rhs, &sig.R).
- ScalarMul(&rhs, &bCofactor)
+ ScalarMultiplication(&rhs, &bCofactor)
if !rhs.IsOnCurve() {
return false, errNotOnCurve
}
diff --git a/ecc/bls12-377/twistededwards/point.go b/ecc/bls12-377/twistededwards/point.go
index c1e68421c0..808cfbee54 100644
--- a/ecc/bls12-377/twistededwards/point.go
+++ b/ecc/bls12-377/twistededwards/point.go
@@ -256,13 +256,13 @@ func (p *PointAffine) FromExtended(p1 *PointExtended) *PointAffine {
return p
}
-// ScalarMul scalar multiplication of a point
+// ScalarMultiplication scalar multiplication of a point
// p1 in affine coordinates with a scalar in big.Int
-func (p *PointAffine) ScalarMul(p1 *PointAffine, scalar *big.Int) *PointAffine {
+func (p *PointAffine) ScalarMultiplication(p1 *PointAffine, scalar *big.Int) *PointAffine {
var p1Extended, resExtended PointExtended
p1Extended.FromAffine(p1)
- resExtended.ScalarMul(&p1Extended, scalar)
+ resExtended.ScalarMultiplication(&p1Extended, scalar)
p.FromExtended(&resExtended)
return p
@@ -409,9 +409,9 @@ func (p *PointProj) Add(p1, p2 *PointProj) *PointProj {
return p
}
-// ScalarMul scalar multiplication of a point
+// ScalarMultiplication scalar multiplication of a point
// p1 in projective coordinates with a scalar in big.Int
-func (p *PointProj) ScalarMul(p1 *PointProj, scalar *big.Int) *PointProj {
+func (p *PointProj) ScalarMultiplication(p1 *PointProj, scalar *big.Int) *PointProj {
var _scalar big.Int
_scalar.Set(scalar)
p.Set(p1)
@@ -622,9 +622,9 @@ func (p *PointExtended) setInfinity() *PointExtended {
return p
}
-// ScalarMul scalar multiplication of a point
+// ScalarMultiplication scalar multiplication of a point
// p1 in extended coordinates with a scalar in big.Int
-func (p *PointExtended) ScalarMul(p1 *PointExtended, scalar *big.Int) *PointExtended {
+func (p *PointExtended) ScalarMultiplication(p1 *PointExtended, scalar *big.Int) *PointExtended {
var _scalar big.Int
_scalar.Set(scalar)
p.Set(p1)
diff --git a/ecc/bls12-377/twistededwards/point_test.go b/ecc/bls12-377/twistededwards/point_test.go
index f427167fb3..950ea15ac7 100644
--- a/ecc/bls12-377/twistededwards/point_test.go
+++ b/ecc/bls12-377/twistededwards/point_test.go
@@ -124,8 +124,8 @@ func TestReceiverIsOperand(t *testing.T) {
var s big.Int
s.SetUint64(10)
- p2.ScalarMul(&p1, &s)
- p1.ScalarMul(&p1, &s)
+ p2.ScalarMultiplication(&p1, &s)
+ p1.ScalarMultiplication(&p1, &s)
return p2.Equal(&p1)
},
@@ -336,7 +336,7 @@ func TestOps(t *testing.T) {
params := GetEdwardsCurve()
var p1, p2, zero PointAffine
- p1.ScalarMul(¶ms.Base, &s1)
+ p1.ScalarMultiplication(¶ms.Base, &s1)
zero.setInfinity()
p2.Add(&p1, &zero)
@@ -352,7 +352,7 @@ func TestOps(t *testing.T) {
params := GetEdwardsCurve()
var p1, p2 PointAffine
- p1.ScalarMul(¶ms.Base, &s1)
+ p1.ScalarMultiplication(¶ms.Base, &s1)
p2.Neg(&p1)
p1.Add(&p1, &p2)
@@ -371,8 +371,8 @@ func TestOps(t *testing.T) {
params := GetEdwardsCurve()
var p1, p2, inf PointAffine
- p1.ScalarMul(¶ms.Base, &s)
- p2.ScalarMul(¶ms.Base, &s)
+ p1.ScalarMultiplication(¶ms.Base, &s)
+ p2.ScalarMultiplication(¶ms.Base, &s)
p1.Add(&p1, &p2)
p2.Double(&p2)
@@ -390,14 +390,14 @@ func TestOps(t *testing.T) {
var p1, p2, p3, inf PointAffine
inf.X.SetZero()
inf.Y.SetZero()
- p1.ScalarMul(¶ms.Base, &s1)
- p2.ScalarMul(¶ms.Base, &s2)
+ p1.ScalarMultiplication(¶ms.Base, &s1)
+ p2.ScalarMultiplication(¶ms.Base, &s2)
p3.Set(¶ms.Base)
p2.Add(&p1, &p2)
s1.Add(&s1, &s2)
- p3.ScalarMul(¶ms.Base, &s1)
+ p3.ScalarMultiplication(¶ms.Base, &s1)
return p2.IsOnCurve() && p3.Equal(&p2) && !p3.Equal(&inf)
},
@@ -413,9 +413,9 @@ func TestOps(t *testing.T) {
var p1, p2, inf PointAffine
inf.X.SetZero()
inf.Y.SetOne()
- p1.ScalarMul(¶ms.Base, &s1)
+ p1.ScalarMultiplication(¶ms.Base, &s1)
s1.Neg(&s1)
- p2.ScalarMul(¶ms.Base, &s1)
+ p2.ScalarMultiplication(¶ms.Base, &s1)
p2.Add(&p1, &p2)
@@ -430,11 +430,11 @@ func TestOps(t *testing.T) {
params := GetEdwardsCurve()
var p1, p2 PointAffine
- p1.ScalarMul(¶ms.Base, &s1)
+ p1.ScalarMultiplication(¶ms.Base, &s1)
five := big.NewInt(5)
p2.Double(&p1).Double(&p2).Add(&p2, &p1)
- p1.ScalarMul(&p1, five)
+ p1.ScalarMultiplication(&p1, five)
return p2.IsOnCurve() && p2.Equal(&p1)
},
@@ -463,7 +463,7 @@ func TestOps(t *testing.T) {
var baseProj, p1, p2, zero PointProj
baseProj.FromAffine(¶ms.Base)
- p1.ScalarMul(&baseProj, &s1)
+ p1.ScalarMultiplication(&baseProj, &s1)
zero.setInfinity()
p2.Add(&p1, &zero)
@@ -480,7 +480,7 @@ func TestOps(t *testing.T) {
var baseProj, p1, p2, p PointProj
baseProj.FromAffine(¶ms.Base)
- p1.ScalarMul(&baseProj, &s1)
+ p1.ScalarMultiplication(&baseProj, &s1)
p2.Neg(&p1)
p.Add(&p1, &p2)
@@ -498,7 +498,7 @@ func TestOps(t *testing.T) {
var baseProj, p1, p2, p PointProj
baseProj.FromAffine(¶ms.Base)
- p.ScalarMul(&baseProj, &s)
+ p.ScalarMultiplication(&baseProj, &s)
p1.Add(&p, &p)
p2.Double(&p)
@@ -515,11 +515,11 @@ func TestOps(t *testing.T) {
var baseProj, p1, p2 PointProj
baseProj.FromAffine(¶ms.Base)
- p1.ScalarMul(&baseProj, &s1)
+ p1.ScalarMultiplication(&baseProj, &s1)
five := big.NewInt(5)
p2.Double(&p1).Double(&p2).Add(&p2, &p1)
- p1.ScalarMul(&p1, five)
+ p1.ScalarMultiplication(&p1, five)
return p2.Equal(&p1)
},
@@ -547,7 +547,7 @@ func TestOps(t *testing.T) {
var baseExtended, p1, p2, zero PointExtended
baseExtended.FromAffine(¶ms.Base)
- p1.ScalarMul(&baseExtended, &s1)
+ p1.ScalarMultiplication(&baseExtended, &s1)
zero.setInfinity()
p2.Add(&p1, &zero)
@@ -564,7 +564,7 @@ func TestOps(t *testing.T) {
var baseExtended, p1, p2, p PointExtended
baseExtended.FromAffine(¶ms.Base)
- p1.ScalarMul(&baseExtended, &s1)
+ p1.ScalarMultiplication(&baseExtended, &s1)
p2.Neg(&p1)
p.Add(&p1, &p2)
@@ -582,7 +582,7 @@ func TestOps(t *testing.T) {
var baseExtended, p1, p2, p PointExtended
baseExtended.FromAffine(¶ms.Base)
- p.ScalarMul(&baseExtended, &s)
+ p.ScalarMultiplication(&baseExtended, &s)
p1.Add(&p, &p)
p2.Double(&p)
@@ -599,11 +599,11 @@ func TestOps(t *testing.T) {
var baseExtended, p1, p2 PointExtended
baseExtended.FromAffine(¶ms.Base)
- p1.ScalarMul(&baseExtended, &s1)
+ p1.ScalarMultiplication(&baseExtended, &s1)
five := big.NewInt(5)
p2.Double(&p1).Double(&p2).Add(&p2, &p1)
- p1.ScalarMul(&p1, five)
+ p1.ScalarMultiplication(&p1, five)
return p2.Equal(&p1)
},
@@ -619,8 +619,8 @@ func TestOps(t *testing.T) {
var baseExtended, pExtended, p PointExtended
var pAffine PointAffine
baseExtended.FromAffine(¶ms.Base)
- pExtended.ScalarMul(&baseExtended, &s)
- pAffine.ScalarMul(¶ms.Base, &s)
+ pExtended.ScalarMultiplication(&baseExtended, &s)
+ pAffine.ScalarMultiplication(¶ms.Base, &s)
pAffine.Neg(&pAffine)
p.MixedAdd(&pExtended, &pAffine)
@@ -638,8 +638,8 @@ func TestOps(t *testing.T) {
var baseExtended, pExtended, p, p2 PointExtended
var pAffine PointAffine
baseExtended.FromAffine(¶ms.Base)
- pExtended.ScalarMul(&baseExtended, &s)
- pAffine.ScalarMul(¶ms.Base, &s)
+ pExtended.ScalarMultiplication(&baseExtended, &s)
+ pAffine.ScalarMultiplication(¶ms.Base, &s)
p.MixedAdd(&pExtended, &pAffine)
p2.MixedDouble(&pExtended)
@@ -658,8 +658,8 @@ func TestOps(t *testing.T) {
var baseProj, pProj, p PointProj
var pAffine PointAffine
baseProj.FromAffine(¶ms.Base)
- pProj.ScalarMul(&baseProj, &s)
- pAffine.ScalarMul(¶ms.Base, &s)
+ pProj.ScalarMultiplication(&baseProj, &s)
+ pAffine.ScalarMultiplication(¶ms.Base, &s)
pAffine.Neg(&pAffine)
p.MixedAdd(&pProj, &pAffine)
@@ -677,8 +677,8 @@ func TestOps(t *testing.T) {
var baseProj, pProj, p, p2 PointProj
var pAffine PointAffine
baseProj.FromAffine(¶ms.Base)
- pProj.ScalarMul(&baseProj, &s)
- pAffine.ScalarMul(¶ms.Base, &s)
+ pProj.ScalarMultiplication(&baseProj, &s)
+ pAffine.ScalarMultiplication(¶ms.Base, &s)
p.MixedAdd(&pProj, &pAffine)
p2.Double(&pProj)
@@ -697,9 +697,9 @@ func TestOps(t *testing.T) {
var baseExt PointExtended
var p1, p2 PointAffine
baseProj.FromAffine(¶ms.Base)
- baseProj.ScalarMul(&baseProj, &s)
+ baseProj.ScalarMultiplication(&baseProj, &s)
baseExt.FromAffine(¶ms.Base)
- baseExt.ScalarMul(&baseExt, &s)
+ baseExt.ScalarMultiplication(&baseExt, &s)
p1.FromProj(&baseProj)
p2.FromExtended(&baseExt)
@@ -760,7 +760,7 @@ func BenchmarkScalarMulExtended(b *testing.B) {
b.ResetTimer()
for j := 0; j < b.N; j++ {
- doubleAndAdd.ScalarMul(&a, &s)
+ doubleAndAdd.ScalarMultiplication(&a, &s)
}
}
@@ -776,6 +776,6 @@ func BenchmarkScalarMulProjective(b *testing.B) {
b.ResetTimer()
for j := 0; j < b.N; j++ {
- doubleAndAdd.ScalarMul(&a, &s)
+ doubleAndAdd.ScalarMultiplication(&a, &s)
}
}
diff --git a/ecc/bls12-378/bls12-378.go b/ecc/bls12-378/bls12-378.go
index 22a796091b..5b6cc91c38 100644
--- a/ecc/bls12-378/bls12-378.go
+++ b/ecc/bls12-378/bls12-378.go
@@ -1,3 +1,25 @@
+// Package bls12378 efficient elliptic curve, pairing and hash to curve implementation for bls12-378.
+//
+// bls12-378: A Barreto--Lynn--Scott curve
+// embedding degree k=12
+// seed x₀=11045256207009841153
+// 𝔽r: r=14883435066912132899950318861128167269793560281114003360875131245101026639873 (x₀⁴-x₀²+1)
+// 𝔽p: p=605248206075306171733248481581800960739847691770924913753520744034740935903401304776283802348837311170974282940417 ((x₀-1)² ⋅ r(x₀)/3+x₀)
+// (E/𝔽p): Y²=X³+1
+// (Eₜ/𝔽p²): Y² = X³+u (M-type twist)
+// r ∣ #E(Fp) and r ∣ #Eₜ(𝔽p²)
+// Extension fields tower:
+// 𝔽p²[u] = 𝔽p/u²+5
+// 𝔽p⁶[v] = 𝔽p²/v³-u
+// 𝔽p¹²[w] = 𝔽p⁶/w²-v
+// optimal Ate loop size:
+// x₀
+// Security: estimated 126-bit level following [https://eprint.iacr.org/2019/885.pdf]
+// (r is 254 bits and p¹² is 4536 bits)
+//
+// Warning
+//
+// This code has not been audited and is provided as-is. In particular, there is no security guarantees such as constant time implementation or side-channel attack resistance.
package bls12378
import (
@@ -9,18 +31,6 @@ import (
"github.com/consensys/gnark-crypto/ecc/bls12-378/internal/fptower"
)
-// BLS12-378: A Barreto--Lynn--Scott curve of embedding degree k=12 with seed x₀=11045256207009841153
-// 𝔽r: r=14883435066912132899950318861128167269793560281114003360875131245101026639873 (x₀⁴-x₀²+1)
-// 𝔽p: p=605248206075306171733248481581800960739847691770924913753520744034740935903401304776283802348837311170974282940417 ((x₀-1)² ⋅ r(x₀)/3+x₀)
-// (E/𝔽p): Y²=X³+1
-// (Eₜ/𝔽p²): Y² = X³+u (M-type twist)
-// r ∣ #E(Fp) and r ∣ #Eₜ(𝔽p²)
-// Extension fields tower:
-// 𝔽p²[u] = 𝔽p/u²+5
-// 𝔽p⁶[v] = 𝔽p²/v³-u
-// 𝔽p¹²[w] = 𝔽p⁶/w²-v
-// optimal Ate loop size: x₀
-
// ID bls378 ID
const ID = ecc.BLS12_378
@@ -85,7 +95,7 @@ func init() {
// E(3,y) * cofactor
g1Gen.X.SetString("302027100877540500544138164010696035562809807233645104772290911818386302983750063098216015456036850656714568735197")
g1Gen.Y.SetString("232851047397483214541821965369374725182070455016459237170823497053622811786333462699984177726412751508198874482530")
- g1Gen.Z.SetString("1")
+ g1Gen.Z.SetOne()
// E_t(1,y) * cofactor'
g2Gen.X.SetString("470810816643554779222760025249941413452299198622737082648784137654933833261310635469274149014014206108405592809732",
diff --git a/ecc/bls12-378/doc.go b/ecc/bls12-378/doc.go
deleted file mode 100644
index 973fe0a7b7..0000000000
--- a/ecc/bls12-378/doc.go
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright 2020 ConsenSys Software Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Code generated by consensys/gnark-crypto DO NOT EDIT
-
-// Package bls12378 efficient elliptic curve and pairing implementation for bls12-378.
-//
-// Warning
-//
-// This code has not been audited and is provided as-is. In particular, there is no security guarantees such as constant time implementation or side-channel attack resistance.
-package bls12378
diff --git a/ecc/bls12-378/fp/element.go b/ecc/bls12-378/fp/element.go
index 152dc9a51c..4161fec824 100644
--- a/ecc/bls12-378/fp/element.go
+++ b/ecc/bls12-378/fp/element.go
@@ -181,7 +181,7 @@ func (z *Element) SetInterface(i1 interface{}) (*Element, error) {
case int:
return z.SetInt64(int64(c1)), nil
case string:
- return z.SetString(c1), nil
+ return z.SetString(c1)
case *big.Int:
if c1 == nil {
return nil, errors.New("can't set fp.Element with ")
@@ -1086,12 +1086,13 @@ func (z *Element) setBigInt(v *big.Int) *Element {
// Incorrect placement of underscores is reported as a panic if there
// are no other errors.
//
-func (z *Element) SetString(number string) *Element {
+// If the number is invalid this method leaves z unchanged and returns nil, error.
+func (z *Element) SetString(number string) (*Element, error) {
// get temporary big int from the pool
vv := bigIntPool.Get().(*big.Int)
if _, ok := vv.SetString(number, 0); !ok {
- panic("Element.SetString failed -> can't parse number into a big.Int " + number)
+ return nil, errors.New("Element.SetString failed -> can't parse number into a big.Int " + number)
}
z.SetBigInt(vv)
@@ -1099,7 +1100,7 @@ func (z *Element) SetString(number string) *Element {
// release object into pool
bigIntPool.Put(vv)
- return z
+ return z, nil
}
// MarshalJSON returns json encoding of z (z.Text(10))
diff --git a/ecc/bls12-378/fr/element.go b/ecc/bls12-378/fr/element.go
index 3b645e0994..0bd32b5a75 100644
--- a/ecc/bls12-378/fr/element.go
+++ b/ecc/bls12-378/fr/element.go
@@ -175,7 +175,7 @@ func (z *Element) SetInterface(i1 interface{}) (*Element, error) {
case int:
return z.SetInt64(int64(c1)), nil
case string:
- return z.SetString(c1), nil
+ return z.SetString(c1)
case *big.Int:
if c1 == nil {
return nil, errors.New("can't set fr.Element with ")
@@ -944,12 +944,13 @@ func (z *Element) setBigInt(v *big.Int) *Element {
// Incorrect placement of underscores is reported as a panic if there
// are no other errors.
//
-func (z *Element) SetString(number string) *Element {
+// If the number is invalid this method leaves z unchanged and returns nil, error.
+func (z *Element) SetString(number string) (*Element, error) {
// get temporary big int from the pool
vv := bigIntPool.Get().(*big.Int)
if _, ok := vv.SetString(number, 0); !ok {
- panic("Element.SetString failed -> can't parse number into a big.Int " + number)
+ return nil, errors.New("Element.SetString failed -> can't parse number into a big.Int " + number)
}
z.SetBigInt(vv)
@@ -957,7 +958,7 @@ func (z *Element) SetString(number string) *Element {
// release object into pool
bigIntPool.Put(vv)
- return z
+ return z, nil
}
// MarshalJSON returns json encoding of z (z.Text(10))
diff --git a/ecc/bls12-378/fr/kzg/kzg.go b/ecc/bls12-378/fr/kzg/kzg.go
index b624ab7e4e..6385590992 100644
--- a/ecc/bls12-378/fr/kzg/kzg.go
+++ b/ecc/bls12-378/fr/kzg/kzg.go
@@ -169,16 +169,15 @@ func Open(p []fr.Element, point fr.Element, srs *SRS) (OpeningProof, error) {
func Verify(commitment *Digest, proof *OpeningProof, point fr.Element, srs *SRS) error {
// [f(a)]G₁
- var claimedValueG1Aff bls12378.G1Affine
+ var claimedValueG1Aff bls12378.G1Jac
var claimedValueBigInt big.Int
proof.ClaimedValue.ToBigIntRegular(&claimedValueBigInt)
- claimedValueG1Aff.ScalarMultiplication(&srs.G1[0], &claimedValueBigInt)
+ claimedValueG1Aff.ScalarMultiplicationAffine(&srs.G1[0], &claimedValueBigInt)
// [f(α) - f(a)]G₁
- var fminusfaG1Jac, tmpG1Jac bls12378.G1Jac
+ var fminusfaG1Jac bls12378.G1Jac
fminusfaG1Jac.FromAffine(commitment)
- tmpG1Jac.FromAffine(&claimedValueG1Aff)
- fminusfaG1Jac.SubAssign(&tmpG1Jac)
+ fminusfaG1Jac.SubAssign(&claimedValueG1Aff)
// [-H(α)]G₁
var negH bls12378.G1Affine
diff --git a/ecc/bls12-378/fr/polynomial/multilin.go b/ecc/bls12-378/fr/polynomial/multilin.go
new file mode 100644
index 0000000000..ddfa198336
--- /dev/null
+++ b/ecc/bls12-378/fr/polynomial/multilin.go
@@ -0,0 +1,250 @@
+// Copyright 2020 ConsenSys Software Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by consensys/gnark-crypto DO NOT EDIT
+
+package polynomial
+
+import (
+ "github.com/consensys/gnark-crypto/ecc/bls12-378/fr"
+)
+
+// MultiLin tracks the values of a (dense i.e. not sparse) multilinear polynomial
+// The variables are X₁ through Xₙ where n = log(len(.))
+// .[∑ᵢ 2ⁱ⁻¹ bₙ₋ᵢ] = the polynomial evaluated at (b₁, b₂, ..., bₙ)
+// It is understood that any hypercube evaluation can be extrapolated to a multilinear polynomial
+type MultiLin []fr.Element
+
+// Fold is partial evaluation function k[X₁, X₂, ..., Xₙ] → k[X₂, ..., Xₙ] by setting X₁=r
+func (m *MultiLin) Fold(r fr.Element) {
+ mid := len(*m) / 2
+
+ bottom, top := (*m)[:mid], (*m)[mid:]
+
+ // updating bookkeeping table
+ // knowing that the polynomial f ∈ (k[X₂, ..., Xₙ])[X₁] is linear, we would get f(r) = f(0) + r(f(1) - f(0))
+ // the following loop computes the evaluations of f(r) accordingly:
+ // f(r, b₂, ..., bₙ) = f(0, b₂, ..., bₙ) + r(f(1, b₂, ..., bₙ) - f(0, b₂, ..., bₙ))
+ for i := 0; i < mid; i++ {
+ // table[i] ← table[i] + r (table[i + mid] - table[i])
+ top[i].Sub(&top[i], &bottom[i])
+ top[i].Mul(&top[i], &r)
+ bottom[i].Add(&bottom[i], &top[i])
+ }
+
+ *m = (*m)[:mid]
+}
+
+// Evaluate extrapolate the value of the multilinear polynomial corresponding to m
+// on the given coordinates
+func (m MultiLin) Evaluate(coordinates []fr.Element) fr.Element {
+ // Folding is a mutating operation
+ bkCopy := m.Clone()
+
+ // Evaluate step by step through repeated folding (i.e. evaluation at the first remaining variable)
+ for _, r := range coordinates {
+ bkCopy.Fold(r)
+ }
+
+ return bkCopy[0]
+}
+
+// Clone creates a deep copy of a book-keeping table.
+// Both multilinear interpolation and sumcheck require folding an underlying
+// array, but folding changes the array. To do both one requires a deep copy
+// of the book-keeping table.
+func (m MultiLin) Clone() MultiLin {
+ tableDeepCopy := Make(len(m))
+ copy(tableDeepCopy, m)
+ return tableDeepCopy
+}
+
+// Add two bookKeepingTables
+func (m *MultiLin) Add(left, right MultiLin) {
+ size := len(left)
+ // Check that left and right have the same size
+ if len(right) != size {
+ panic("Left and right do not have the right size")
+ }
+ // Reallocate the table if necessary
+ if cap(*m) < size {
+ *m = make([]fr.Element, size)
+ }
+
+ // Resize the destination table
+ *m = (*m)[:size]
+
+ // Add elementwise
+ for i := 0; i < size; i++ {
+ (*m)[i].Add(&left[i], &right[i])
+ }
+}
+
+// EvalEq computes Eq(q₁, ... , qₙ, h₁, ... , hₙ) = Π₁ⁿ Eq(qᵢ, hᵢ)
+// where Eq(x,y) = xy + (1-x)(1-y) = 1 - x - y + xy + xy interpolates
+// _________________
+// | | |
+// | 0 | 1 |
+// |_______|_______|
+// y | | |
+// | 1 | 0 |
+// |_______|_______|
+//
+// x
+// In other words the polynomial evaluated here is the multilinear extrapolation of
+// one that evaluates to q' == h' for vectors q', h' of binary values
+func EvalEq(q, h []fr.Element) fr.Element {
+ var res, nxt, one, sum fr.Element
+ one.SetOne()
+ for i := 0; i < len(q); i++ {
+ nxt.Mul(&q[i], &h[i]) // nxt <- qᵢ * hᵢ
+ nxt.Double(&nxt) // nxt <- 2 * qᵢ * hᵢ
+ nxt.Add(&nxt, &one) // nxt <- 1 + 2 * qᵢ * hᵢ
+ sum.Add(&q[i], &h[i]) // sum <- qᵢ + hᵢ TODO: Why not subtract one by one from nxt? More parallel?
+
+ if i == 0 {
+ res.Sub(&nxt, &sum) // nxt <- 1 + 2 * qᵢ * hᵢ - qᵢ - hᵢ
+ } else {
+ nxt.Sub(&nxt, &sum) // nxt <- 1 + 2 * qᵢ * hᵢ - qᵢ - hᵢ
+ res.Mul(&res, &nxt) // res <- res * nxt
+ }
+ }
+ return res
+}
+
+// Eq sets m to the representation of the polynomial Eq(q₁, ..., qₙ, *, ..., *) × m[0]
+func (m *MultiLin) Eq(q []fr.Element) {
+ n := len(q)
+
+ if len(*m) != 1< 0 {
+ i.Sub(fr.Modulus(), &i)
+ i.Neg(&i)
+ }
+ return i
+}
+
+func (p Polynomial) Text(base int) string {
+
+ var builder strings.Builder
+
+ first := true
+ for d := len(p) - 1; d >= 0; d-- {
+ if p[d].IsZero() {
+ continue
+ }
+
+ i := signedBigInt(&p[d])
+
+ initialLen := builder.Len()
+
+ if i.Sign() < 1 {
+ i.Neg(&i)
+ if first {
+ builder.WriteString("-")
+ } else {
+ builder.WriteString(" - ")
+ }
+ } else if !first {
+ builder.WriteString(" + ")
+ }
+
+ first = false
+
+ asInt64 := int64(0)
+ if i.IsInt64() {
+ asInt64 = i.Int64()
+ }
+
+ if asInt64 != 1 || d == 0 {
+ builder.WriteString(i.Text(base))
+ }
+
+ if builder.Len()-initialLen > 10 {
+ builder.WriteString("×")
+ }
+
+ if d != 0 {
+ builder.WriteString("X")
+ }
+ if d > 1 {
+ builder.WriteString(
+ utils.ToSuperscript(strconv.Itoa(d)),
+ )
+ }
+
+ }
+
+ if first {
+ return "0"
+ }
+
+ return builder.String()
+}
diff --git a/ecc/bls12-378/fr/polynomial/pool.go b/ecc/bls12-378/fr/polynomial/pool.go
new file mode 100644
index 0000000000..15c16d53fd
--- /dev/null
+++ b/ecc/bls12-378/fr/polynomial/pool.go
@@ -0,0 +1,130 @@
+// Copyright 2020 ConsenSys Software Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by consensys/gnark-crypto DO NOT EDIT
+
+package polynomial
+
+import (
+ "fmt"
+ "github.com/consensys/gnark-crypto/ecc/bls12-378/fr"
+ "reflect"
+ "sync"
+ "unsafe"
+)
+
+// Memory management for polynomials
+// Copied verbatim from gkr repo
+
+// Sets a maximum for the array size we keep in pool
+const maxNForLargePool int = 1 << 24
+const maxNForSmallPool int = 256
+
+// Aliases because it is annoying to use arrays in all the places
+type largeArr = [maxNForLargePool]fr.Element
+type smallArr = [maxNForSmallPool]fr.Element
+
+var rC = sync.Map{}
+
+var (
+ largePool = sync.Pool{
+ New: func() interface{} {
+ var res largeArr
+ return &res
+ },
+ }
+ smallPool = sync.Pool{
+ New: func() interface{} {
+ var res smallArr
+ return &res
+ },
+ }
+)
+
+// ClearPool Clears the pool completely, shields against memory leaks
+// Eg: if we forgot to dump a polynomial at some point, this will ensure the value get dumped eventually
+// Returns how many polynomials were cleared that way
+func ClearPool() int {
+ res := 0
+ rC.Range(func(k, _ interface{}) bool {
+ switch ptr := k.(type) {
+ case *largeArr:
+ largePool.Put(ptr)
+ case *smallArr:
+ smallPool.Put(ptr)
+ default:
+ panic(fmt.Sprintf("tried to clear %v", reflect.TypeOf(ptr)))
+ }
+ res++
+ return true
+ })
+ return res
+}
+
+// CountPool Returns the number of elements in the pool without mutating it
+func CountPool() int {
+ res := 0
+ rC.Range(func(_, _ interface{}) bool {
+ res++
+ return true
+ })
+ return res
+}
+
+// Make tries to find a reusable polynomial or allocates a new one
+func Make(n int) []fr.Element {
+ if n > maxNForLargePool {
+ panic(fmt.Sprintf("been provided with size of %v but the maximum is %v", n, maxNForLargePool))
+ }
+
+ if n <= maxNForSmallPool {
+ ptr := smallPool.Get().(*smallArr)
+ rC.Store(ptr, struct{}{}) // registers the pointer being used
+ return (*ptr)[:n]
+ }
+
+ ptr := largePool.Get().(*largeArr)
+ rC.Store(ptr, struct{}{}) // remember we allocated the pointer is being used
+ return (*ptr)[:n]
+}
+
+// Dump dumps a set of polynomials into the pool
+// Returns the number of deallocated polys
+func Dump(arrs ...[]fr.Element) int {
+ cnt := 0
+ for _, arr := range arrs {
+ ptr := ptr(arr)
+ pool := &smallPool
+ if len(arr) > maxNForSmallPool {
+ pool = &largePool
+ }
+ // If the rC did not register, then
+ // either the array was allocated somewhere else which can be ignored
+ // otherwise a double put which MUST be ignored
+ if _, ok := rC.Load(ptr); ok {
+ pool.Put(ptr)
+ // And deregisters the ptr
+ rC.Delete(ptr)
+ cnt++
+ }
+ }
+ return cnt
+}
+
+func ptr(m []fr.Element) unsafe.Pointer {
+ if cap(m) != maxNForSmallPool && cap(m) != maxNForLargePool {
+ panic(fmt.Sprintf("can't cast to large or small array, the put array's is %v it should have capacity %v or %v", cap(m), maxNForLargePool, maxNForSmallPool))
+ }
+ return unsafe.Pointer(&m[0])
+}
diff --git a/ecc/bls12-378/g1.go b/ecc/bls12-378/g1.go
index 41c829ba69..6384c93dfb 100644
--- a/ecc/bls12-378/g1.go
+++ b/ecc/bls12-378/g1.go
@@ -59,6 +59,14 @@ func (p *G1Affine) ScalarMultiplication(a *G1Affine, s *big.Int) *G1Affine {
return p
}
+// ScalarMultiplicationAffine computes and returns p = a ⋅ s
+// Takes an affine point and returns a Jacobian point (useful for KZG)
+func (p *G1Jac) ScalarMultiplicationAffine(a *G1Affine, s *big.Int) *G1Jac {
+ p.FromAffine(a)
+ p.mulGLV(p, s)
+ return p
+}
+
// Add adds two point in affine coordinates.
// This should rarely be used as it is very inefficient compared to Jacobian
func (p *G1Affine) Add(a, b *G1Affine) *G1Affine {
@@ -336,7 +344,7 @@ func (p *G1Jac) String() string {
return _p.String()
}
-// FromAffine sets p = Q, p in Jacboian, Q in affine
+// FromAffine sets p = Q, p in Jacobian, Q in affine
func (p *G1Jac) FromAffine(Q *G1Affine) *G1Jac {
if Q.IsInfinity() {
p.Z.SetZero()
@@ -814,9 +822,9 @@ func (p *g1JacExtended) doubleMixed(q *G1Affine) *g1JacExtended {
}
// BatchJacobianToAffineG1 converts points in Jacobian coordinates to Affine coordinates
-// performing a single field inversion (Montgomery batch inversion trick)
-// result must be allocated with len(result) == len(points)
-func BatchJacobianToAffineG1(points []G1Jac, result []G1Affine) {
+// performing a single field inversion (Montgomery batch inversion trick).
+func BatchJacobianToAffineG1(points []G1Jac) []G1Affine {
+ result := make([]G1Affine, len(points))
zeroes := make([]bool, len(points))
accumulator := fp.One()
@@ -836,7 +844,7 @@ func BatchJacobianToAffineG1(points []G1Jac, result []G1Affine) {
for i := len(points) - 1; i >= 0; i-- {
if zeroes[i] {
- // do nothing, X and Y are zeroes in affine.
+ // do nothing, (X=0, Y=0) is infinity point in affine
continue
}
result[i].X.Mul(&result[i].X, &accInverse)
@@ -847,7 +855,7 @@ func BatchJacobianToAffineG1(points []G1Jac, result []G1Affine) {
parallel.Execute(len(points), func(start, end int) {
for i := start; i < end; i++ {
if zeroes[i] {
- // do nothing, X and Y are zeroes in affine.
+ // do nothing, (X=0, Y=0) is infinity point in affine
continue
}
var a, b fp.Element
@@ -859,6 +867,7 @@ func BatchJacobianToAffineG1(points []G1Jac, result []G1Affine) {
}
})
+ return result
}
// BatchScalarMultiplicationG1 multiplies the same base by all scalars
@@ -922,8 +931,7 @@ func BatchScalarMultiplicationG1(base *G1Affine, scalars []fr.Element) []G1Affin
selectors[chunk] = d
}
// convert our base exp table into affine to use AddMixed
- baseTableAff := make([]G1Affine, (1 << (c - 1)))
- BatchJacobianToAffineG1(baseTable, baseTableAff)
+ baseTableAff := BatchJacobianToAffineG1(baseTable)
toReturn := make([]G1Jac, len(scalars))
// for each digit, take value in the base table, double it c time, voilà.
@@ -965,7 +973,6 @@ func BatchScalarMultiplicationG1(base *G1Affine, scalars []fr.Element) []G1Affin
}
})
- toReturnAff := make([]G1Affine, len(scalars))
- BatchJacobianToAffineG1(toReturn, toReturnAff)
+ toReturnAff := BatchJacobianToAffineG1(toReturn)
return toReturnAff
}
diff --git a/ecc/bls12-378/g1_test.go b/ecc/bls12-378/g1_test.go
index ca3840383d..dccaca15c7 100644
--- a/ecc/bls12-378/g1_test.go
+++ b/ecc/bls12-378/g1_test.go
@@ -85,7 +85,7 @@ func TestG1AffineIsOnCurve(t *testing.T) {
func(a fp.Element) bool {
var op1, op2 G1Affine
op1.FromJacobian(&g1Gen)
- op2.FromJacobian(&g1Gen)
+ op2.Set(&op1)
op2.Y.Mul(&op2.Y, &a)
return op1.IsOnCurve() && !op2.IsOnCurve()
},
@@ -220,6 +220,19 @@ func TestG1AffineConversions(t *testing.T) {
GenFp(),
GenFp(),
))
+ properties.Property("[BLS12-378] BatchJacobianToAffineG1 and FromJacobian should output the same result", prop.ForAll(
+ func(a, b fp.Element) bool {
+ g1 := fuzzG1Jac(&g1Gen, a)
+ g2 := fuzzG1Jac(&g1Gen, b)
+ var op1, op2 G1Affine
+ op1.FromJacobian(&g1)
+ op2.FromJacobian(&g2)
+ baseTableAff := BatchJacobianToAffineG1([]G1Jac{g1, g2})
+ return op1.Equal(&baseTableAff[0]) && op2.Equal(&baseTableAff[1])
+ },
+ GenFp(),
+ GenFp(),
+ ))
properties.TestingRun(t, gopter.ConsoleReporter(false))
}
@@ -486,7 +499,7 @@ func BenchmarkG1JacIsInSubGroup(b *testing.B) {
}
-func BenchmarkG1AffineBatchScalarMul(b *testing.B) {
+func BenchmarkG1AffineBatchScalarMultiplication(b *testing.B) {
// ensure every words of the scalars are filled
var mixer fr.Element
mixer.SetString("7716837800905789770901243404444209691916730933998574719964609384059111546487")
@@ -514,7 +527,7 @@ func BenchmarkG1AffineBatchScalarMul(b *testing.B) {
}
}
-func BenchmarkG1JacScalarMul(b *testing.B) {
+func BenchmarkG1JacScalarMultiplication(b *testing.B) {
var scalar big.Int
r := fr.Modulus()
diff --git a/ecc/bls12-378/g2.go b/ecc/bls12-378/g2.go
index 44789a1269..143474f59f 100644
--- a/ecc/bls12-378/g2.go
+++ b/ecc/bls12-378/g2.go
@@ -341,7 +341,7 @@ func (p *G2Jac) String() string {
return _p.String()
}
-// FromAffine sets p = Q, p in Jacboian, Q in affine
+// FromAffine sets p = Q, p in Jacobian, Q in affine
func (p *G2Jac) FromAffine(Q *G2Affine) *G2Jac {
if Q.IsInfinity() {
p.Z.SetZero()
diff --git a/ecc/bls12-378/g2_test.go b/ecc/bls12-378/g2_test.go
index 89ebff3cc0..21e79d7238 100644
--- a/ecc/bls12-378/g2_test.go
+++ b/ecc/bls12-378/g2_test.go
@@ -99,7 +99,7 @@ func TestG2AffineIsOnCurve(t *testing.T) {
func(a fptower.E2) bool {
var op1, op2 G2Affine
op1.FromJacobian(&g2Gen)
- op2.FromJacobian(&g2Gen)
+ op2.Set(&op1)
op2.Y.Mul(&op2.Y, &a)
return op1.IsOnCurve() && !op2.IsOnCurve()
},
@@ -505,7 +505,7 @@ func BenchmarkG2JacIsInSubGroup(b *testing.B) {
}
-func BenchmarkG2AffineBatchScalarMul(b *testing.B) {
+func BenchmarkG2AffineBatchScalarMultiplication(b *testing.B) {
// ensure every words of the scalars are filled
var mixer fr.Element
mixer.SetString("7716837800905789770901243404444209691916730933998574719964609384059111546487")
@@ -533,7 +533,7 @@ func BenchmarkG2AffineBatchScalarMul(b *testing.B) {
}
}
-func BenchmarkG2JacScalarMul(b *testing.B) {
+func BenchmarkG2JacScalarMultiplication(b *testing.B) {
var scalar big.Int
r := fr.Modulus()
diff --git a/ecc/bls12-378/hash_to_g1.go b/ecc/bls12-378/hash_to_g1.go
index ccc9468dcc..e67e9b40c2 100644
--- a/ecc/bls12-378/hash_to_g1.go
+++ b/ecc/bls12-378/hash_to_g1.go
@@ -89,65 +89,58 @@ func g1Isogeny(p *G1Affine) {
// g1SqrtRatio computes the square root of u/v and returns 0 iff u/v was indeed a quadratic residue
// if not, we get sqrt(Z * u / v). Recall that Z is non-residue
+// If v = 0, u/v is meaningless and the output is unspecified, without raising an error.
// The main idea is that since the computation of the square root involves taking large powers of u/v, the inversion of v can be avoided
func g1SqrtRatio(z *fp.Element, u *fp.Element, v *fp.Element) uint64 {
- // Taken from https://datatracker.ietf.org/doc/draft-irtf-cfrg-hash-to-curve/13/ F.2.1.1. for any field
+ // https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-16.html#name-sqrt_ratio-for-any-field
tv1 := fp.Element{3422016347327078217, 15952935974507985473, 10210560017327941857, 6195437588884472512, 1531492004832937820, 17090488542823369} //tv1 = c6
var tv2, tv3, tv4, tv5 fp.Element
var exp big.Int
- // c4 = 2199023255551 = 2^41 - 1
+ // c4 = 2199023255551 = 2⁴¹ - 1
// q is odd so c1 is at least 1.
exp.SetBytes([]byte{1, 255, 255, 255, 255, 255})
- tv2.Exp(*v, &exp)
- tv3.Mul(&tv2, &tv2)
- tv3.Mul(&tv3, v)
-
- // line 5
- tv5.Mul(u, &tv3)
+ tv2.Exp(*v, &exp) // 2. tv2 = vᶜ⁴
+ tv3.Square(&tv2) // 3. tv3 = tv2²
+ tv3.Mul(&tv3, v) // 4. tv3 = tv3 * v
+ tv5.Mul(u, &tv3) // 5. tv5 = u * tv3
// c3 = 137617509170765099891752579783724504691201148437113468788429769127729045045134922651478473733013131816
exp.SetBytes([]byte{251, 172, 16, 89, 161, 52, 100, 20, 242, 215, 73, 3, 180, 65, 232, 161, 1, 103, 173, 145, 196, 8, 201, 166, 3, 112, 216, 52, 41, 39, 95, 243, 165, 253, 218, 160, 139, 0, 0, 38, 82, 40})
- tv5.Exp(tv5, &exp)
- tv5.Mul(&tv5, &tv2)
- tv2.Mul(&tv5, v)
- tv3.Mul(&tv5, u)
- // line 10
- tv4.Mul(&tv3, &tv2)
+ tv5.Exp(tv5, &exp) // 6. tv5 = tv5ᶜ³
+ tv5.Mul(&tv5, &tv2) // 7. tv5 = tv5 * tv2
+ tv2.Mul(&tv5, v) // 8. tv2 = tv5 * v
+ tv3.Mul(&tv5, u) // 9. tv3 = tv5 * u
+ tv4.Mul(&tv3, &tv2) // 10. tv4 = tv3 * tv2
// c5 = 1099511627776
exp.SetBytes([]byte{1, 0, 0, 0, 0, 0})
- tv5.Exp(tv4, &exp)
-
- isQNr := g1NotOne(&tv5)
-
- tv2.Mul(&tv3, &fp.Element{17614810958234635860, 11393801269165528284, 8781501035240632779, 8106712880529013806, 4971838157288047198, 122121039825317715})
- tv5.Mul(&tv4, &tv1)
-
- // line 15
-
- tv3.Select(int(isQNr), &tv3, &tv2)
- tv4.Select(int(isQNr), &tv4, &tv5)
-
- exp.Lsh(big.NewInt(1), 41-2)
-
- for i := 41; i >= 2; i-- {
- //line 20
- tv5.Exp(tv4, &exp)
- nE1 := g1NotOne(&tv5)
-
- tv2.Mul(&tv3, &tv1)
- tv1.Mul(&tv1, &tv1)
- tv5.Mul(&tv4, &tv1)
-
- tv3.Select(int(nE1), &tv3, &tv2)
- tv4.Select(int(nE1), &tv4, &tv5)
-
- exp.Rsh(&exp, 1)
+ tv5.Exp(tv4, &exp) // 11. tv5 = tv4ᶜ⁵
+ isQNr := g1NotOne(&tv5) // 12. isQR = tv5 == 1
+ c7 := fp.Element{17614810958234635860, 11393801269165528284, 8781501035240632779, 8106712880529013806, 4971838157288047198, 122121039825317715}
+ tv2.Mul(&tv3, &c7) // 13. tv2 = tv3 * c7
+ tv5.Mul(&tv4, &tv1) // 14. tv5 = tv4 * tv1
+ tv3.Select(int(isQNr), &tv3, &tv2) // 15. tv3 = CMOV(tv2, tv3, isQR)
+ tv4.Select(int(isQNr), &tv4, &tv5) // 16. tv4 = CMOV(tv5, tv4, isQR)
+ exp.Lsh(big.NewInt(1), 41-2) // 18, 19: tv5 = 2ⁱ⁻² for i = c1
+
+ for i := 41; i >= 2; i-- { // 17. for i in (c1, c1 - 1, ..., 2):
+
+ tv5.Exp(tv4, &exp) // 20. tv5 = tv4ᵗᵛ⁵
+ nE1 := g1NotOne(&tv5) // 21. e1 = tv5 == 1
+ tv2.Mul(&tv3, &tv1) // 22. tv2 = tv3 * tv1
+ tv1.Mul(&tv1, &tv1) // 23. tv1 = tv1 * tv1 Why not write square?
+ tv5.Mul(&tv4, &tv1) // 24. tv5 = tv4 * tv1
+ tv3.Select(int(nE1), &tv3, &tv2) // 25. tv3 = CMOV(tv2, tv3, e1)
+ tv4.Select(int(nE1), &tv4, &tv5) // 26. tv4 = CMOV(tv5, tv4, e1)
+
+ if i > 2 {
+ exp.Rsh(&exp, 1) // 18, 19. tv5 = 2ⁱ⁻²
+ }
}
*z = tv3
@@ -161,12 +154,6 @@ func g1NotOne(x *fp.Element) uint64 {
}
-/*
-// g1SetZ sets z to [11].
-func g1SetZ(z *fp.Element) {
- z.Set( &fp.Element {5249763402351377716, 3384457438931451475, 13367120442609335946, 13855353052415766542, 11761008755492169078, 30127809456627797} )
-}*/
-
// g1MulByZ multiplies x by [11] and stores the result in z
func g1MulByZ(z *fp.Element, x *fp.Element) {
@@ -181,30 +168,29 @@ func g1MulByZ(z *fp.Element, x *fp.Element) {
*z = res
}
-//TODO: Define A,B here
-
-// From https://datatracker.ietf.org/doc/draft-irtf-cfrg-hash-to-curve/13/ Pg 80
+// https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-16.html#name-simplified-swu-method
// mapToCurve1 implements the SSWU map
// No cofactor clearing or isogeny
func mapToCurve1(u *fp.Element) G1Affine {
+ var sswuIsoCurveCoeffA = fp.Element{15314533651602404840, 3999629397495592995, 17991228730268553058, 13253234862282888158, 4784493033884022421, 276795783356562829}
+ var sswuIsoCurveCoeffB = fp.Element{10499526804702755432, 6768914877862902950, 8287496811509120276, 9263962031121981469, 5075273437274786541, 60255618913255595}
+
var tv1 fp.Element
- tv1.Square(u)
+ tv1.Square(u) // 1. tv1 = u²
//mul tv1 by Z
- g1MulByZ(&tv1, &tv1)
+ g1MulByZ(&tv1, &tv1) // 2. tv1 = Z * tv1
var tv2 fp.Element
- tv2.Square(&tv1)
- tv2.Add(&tv2, &tv1)
+ tv2.Square(&tv1) // 3. tv2 = tv1²
+ tv2.Add(&tv2, &tv1) // 4. tv2 = tv2 + tv1
var tv3 fp.Element
- //Standard doc line 5
var tv4 fp.Element
tv4.SetOne()
- tv3.Add(&tv2, &tv4)
- //TODO: Use bCurveConf when no isogeny
- tv3.Mul(&tv3, &fp.Element{10499526804702755432, 6768914877862902950, 8287496811509120276, 9263962031121981469, 5075273437274786541, 60255618913255595})
+ tv3.Add(&tv2, &tv4) // 5. tv3 = tv2 + 1
+ tv3.Mul(&tv3, &sswuIsoCurveCoeffB) // 6. tv3 = B * tv3
tv2NZero := g1NotZero(&tv2)
@@ -212,48 +198,45 @@ func mapToCurve1(u *fp.Element) G1Affine {
tv4 = fp.Element{5249763402351377716, 3384457438931451475, 13367120442609335946, 13855353052415766542, 11761008755492169078, 30127809456627797}
tv2.Neg(&tv2)
- tv4.Select(int(tv2NZero), &tv4, &tv2)
- //TODO: When no isogeny use curve constants
- tv2 = fp.Element{15314533651602404840, 3999629397495592995, 17991228730268553058, 13253234862282888158, 4784493033884022421, 276795783356562829}
- tv4.Mul(&tv4, &tv2)
+ tv4.Select(int(tv2NZero), &tv4, &tv2) // 7. tv4 = CMOV(Z, -tv2, tv2 != 0)
+ tv4.Mul(&tv4, &sswuIsoCurveCoeffA) // 8. tv4 = A * tv4
- tv2.Square(&tv3)
+ tv2.Square(&tv3) // 9. tv2 = tv3²
var tv6 fp.Element
- //Standard doc line 10
- tv6.Square(&tv4)
+ tv6.Square(&tv4) // 10. tv6 = tv4²
var tv5 fp.Element
- tv5.Mul(&tv6, &fp.Element{15314533651602404840, 3999629397495592995, 17991228730268553058, 13253234862282888158, 4784493033884022421, 276795783356562829})
+ tv5.Mul(&tv6, &sswuIsoCurveCoeffA) // 11. tv5 = A * tv6
- tv2.Add(&tv2, &tv5)
- tv2.Mul(&tv2, &tv3)
- tv6.Mul(&tv6, &tv4)
+ tv2.Add(&tv2, &tv5) // 12. tv2 = tv2 + tv5
+ tv2.Mul(&tv2, &tv3) // 13. tv2 = tv2 * tv3
+ tv6.Mul(&tv6, &tv4) // 14. tv6 = tv6 * tv4
- //Standards doc line 15
- tv5.Mul(&tv6, &fp.Element{10499526804702755432, 6768914877862902950, 8287496811509120276, 9263962031121981469, 5075273437274786541, 60255618913255595})
- tv2.Add(&tv2, &tv5)
+ tv5.Mul(&tv6, &sswuIsoCurveCoeffB) // 15. tv5 = B * tv6
+ tv2.Add(&tv2, &tv5) // 16. tv2 = tv2 + tv5
var x fp.Element
- x.Mul(&tv1, &tv3)
+ x.Mul(&tv1, &tv3) // 17. x = tv1 * tv3
var y1 fp.Element
- gx1NSquare := g1SqrtRatio(&y1, &tv2, &tv6)
+ gx1NSquare := g1SqrtRatio(&y1, &tv2, &tv6) // 18. (is_gx1_square, y1) = sqrt_ratio(tv2, tv6)
var y fp.Element
- y.Mul(&tv1, u)
+ y.Mul(&tv1, u) // 19. y = tv1 * u
- //Standards doc line 20
- y.Mul(&y, &y1)
+ y.Mul(&y, &y1) // 20. y = y * y1
- x.Select(int(gx1NSquare), &tv3, &x)
- y.Select(int(gx1NSquare), &y1, &y)
+ x.Select(int(gx1NSquare), &tv3, &x) // 21. x = CMOV(x, tv3, is_gx1_square)
+ y.Select(int(gx1NSquare), &y1, &y) // 22. y = CMOV(y, y1, is_gx1_square)
y1.Neg(&y)
y.Select(int(g1Sgn0(u)^g1Sgn0(&y)), &y, &y1)
- //Standards doc line 25
- x.Div(&x, &tv4)
+ // 23. e1 = sgn0(u) == sgn0(y)
+ // 24. y = CMOV(-y, y, e1)
+
+ x.Div(&x, &tv4) // 25. x = x / tv4
return G1Affine{x, y}
}
@@ -296,13 +279,13 @@ func hashToFp(msg, dst []byte, count int) ([]fp.Element, error) {
// g1Sgn0 is an algebraic substitute for the notion of sign in ordered fields
// Namely, every non-zero quadratic residue in a finite field of characteristic =/= 2 has exactly two square roots, one of each sign
-// Taken from https://datatracker.ietf.org/doc/draft-irtf-cfrg-hash-to-curve/ section 4.1
+// https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-16.html#name-the-sgn0-function
// The sign of an element is not obviously related to that of its Montgomery form
func g1Sgn0(z *fp.Element) uint64 {
nonMont := *z
nonMont.FromMont()
-
+ // m == 1
return nonMont[0] % 2
}
@@ -319,7 +302,7 @@ func MapToG1(u fp.Element) G1Affine {
// EncodeToG1 hashes a message to a point on the G1 curve using the SSWU map.
// It is faster than HashToG1, but the result is not uniformly distributed. Unsuitable as a random oracle.
// dst stands for "domain separation tag", a string unique to the construction using the hash function
-//https://datatracker.ietf.org/doc/draft-irtf-cfrg-hash-to-curve/13/#section-6.6.3
+//https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-16.html#roadmap
func EncodeToG1(msg, dst []byte) (G1Affine, error) {
var res G1Affine
@@ -339,7 +322,7 @@ func EncodeToG1(msg, dst []byte) (G1Affine, error) {
// HashToG1 hashes a message to a point on the G1 curve using the SSWU map.
// Slower than EncodeToG1, but usable as a random oracle.
// dst stands for "domain separation tag", a string unique to the construction using the hash function
-// https://tools.ietf.org/html/draft-irtf-cfrg-hash-to-curve-06#section-3
+//https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-16.html#roadmap
func HashToG1(msg, dst []byte) (G1Affine, error) {
u, err := hashToFp(msg, dst, 2*1)
if err != nil {
@@ -349,7 +332,7 @@ func HashToG1(msg, dst []byte) (G1Affine, error) {
Q0 := mapToCurve1(&u[0])
Q1 := mapToCurve1(&u[1])
- //TODO: Add in E' first, then apply isogeny
+ //TODO (perf): Add in E' first, then apply isogeny
g1Isogeny(&Q0)
g1Isogeny(&Q1)
diff --git a/ecc/bls12-378/internal/fptower/e12.go b/ecc/bls12-378/internal/fptower/e12.go
index 0169ee5054..f3dc8789f5 100644
--- a/ecc/bls12-378/internal/fptower/e12.go
+++ b/ecc/bls12-378/internal/fptower/e12.go
@@ -225,28 +225,45 @@ func (z *E12) CyclotomicSquareCompressed(x *E12) *E12 {
}
// DecompressKarabina Karabina's cyclotomic square result
+// if g3 != 0
+// g4 = (E * g5^2 + 3 * g1^2 - 2 * g2)/4g3
+// if g3 == 0
+// g4 = 2g1g5/g2
+//
+// if g3=g2=0 then g4=g5=g1=0 and g0=1 (x=1)
+// Theorem 3.1 is well-defined for all x in Gϕₙ\{1}
func (z *E12) DecompressKarabina(x *E12) *E12 {
var t [3]E2
var one E2
one.SetOne()
- // t0 = g1^2
- t[0].Square(&x.C0.B1)
- // t1 = 3 * g1^2 - 2 * g2
- t[1].Sub(&t[0], &x.C0.B2).
- Double(&t[1]).
- Add(&t[1], &t[0])
- // t0 = E * g5^2 + t1
- t[2].Square(&x.C1.B2)
- t[0].MulByNonResidue(&t[2]).
- Add(&t[0], &t[1])
- // t1 = 1/(4 * g3)
- t[1].Double(&x.C1.B0).
- Double(&t[1]).
- Inverse(&t[1]) // costly
+ // g3 == 0
+ if x.C1.B2.IsZero() {
+ t[0].Mul(&x.C0.B1, &x.C1.B2).
+ Double(&t[0])
+ // t1 = g2
+ t[1].Set(&x.C0.B2)
+
+ // g3 != 0
+ } else {
+ // t0 = g1^2
+ t[0].Square(&x.C0.B1)
+ // t1 = 3 * g1^2 - 2 * g2
+ t[1].Sub(&t[0], &x.C0.B2).
+ Double(&t[1]).
+ Add(&t[1], &t[0])
+ // t0 = E * g5^2 + t1
+ t[2].Square(&x.C1.B2)
+ t[0].MulByNonResidue(&t[2]).
+ Add(&t[0], &t[1])
+ // t1 = 4 * g3
+ t[1].Double(&x.C1.B0).
+ Double(&t[1])
+ }
+
// z4 = g4
- z.C1.B1.Mul(&t[0], &t[1])
+ z.C1.B1.Div(&t[0], &t[1]) // costly
// t1 = g2 * g1
t[1].Mul(&x.C0.B2, &x.C0.B1)
@@ -255,7 +272,7 @@ func (z *E12) DecompressKarabina(x *E12) *E12 {
Sub(&t[2], &t[1]).
Double(&t[2]).
Sub(&t[2], &t[1])
- // t1 = g3 * g5
+ // t1 = g3 * g5 (g3 can be 0)
t[1].Mul(&x.C1.B0, &x.C1.B2)
// c_0 = E * (2 * g4^2 + g3 * g5 - 3 * g2 * g1) + 1
t[2].Add(&t[2], &t[1])
@@ -271,6 +288,15 @@ func (z *E12) DecompressKarabina(x *E12) *E12 {
}
// BatchDecompressKarabina multiple Karabina's cyclotomic square results
+// if g3 != 0
+// g4 = (E * g5^2 + 3 * g1^2 - 2 * g2)/4g3
+// if g3 == 0
+// g4 = 2g1g5/g2
+//
+// if g3=g2=0 then g4=g5=g1=0 and g0=1 (x=1)
+// Theorem 3.1 is well-defined for all x in Gϕₙ\{1}
+//
+// Divisions by 4g3 or g2 is batched using Montgomery batch inverse
func BatchDecompressKarabina(x []E12) []E12 {
n := len(x)
@@ -286,19 +312,29 @@ func BatchDecompressKarabina(x []E12) []E12 {
one.SetOne()
for i := 0; i < n; i++ {
- // t0 = g1^2
- t0[i].Square(&x[i].C0.B1)
- // t1 = 3 * g1^2 - 2 * g2
- t1[i].Sub(&t0[i], &x[i].C0.B2).
- Double(&t1[i]).
- Add(&t1[i], &t0[i])
- // t0 = E * g5^2 + t1
- t2[i].Square(&x[i].C1.B2)
- t0[i].MulByNonResidue(&t2[i]).
- Add(&t0[i], &t1[i])
- // t1 = 4 * g3
- t1[i].Double(&x[i].C1.B0).
- Double(&t1[i])
+ // g3 == 0
+ if x[i].C1.B2.IsZero() {
+ t0[i].Mul(&x[i].C0.B1, &x[i].C1.B2).
+ Double(&t0[i])
+ // t1 = g2
+ t1[i].Set(&x[i].C0.B2)
+
+ // g3 != 0
+ } else {
+ // t0 = g1^2
+ t0[i].Square(&x[i].C0.B1)
+ // t1 = 3 * g1^2 - 2 * g2
+ t1[i].Sub(&t0[i], &x[i].C0.B2).
+ Double(&t1[i]).
+ Add(&t1[i], &t0[i])
+ // t0 = E * g5^2 + t1
+ t2[i].Square(&x[i].C1.B2)
+ t0[i].MulByNonResidue(&t2[i]).
+ Add(&t0[i], &t1[i])
+ // t1 = 4 * g3
+ t1[i].Double(&x[i].C1.B0).
+ Double(&t1[i])
+ }
}
t1 = BatchInvertE2(t1) // costs 1 inverse
@@ -315,7 +351,7 @@ func BatchDecompressKarabina(x []E12) []E12 {
t2[i].Double(&t2[i])
t2[i].Sub(&t2[i], &t1[i])
- // t1 = g3 * g5
+ // t1 = g3 * g5 (g3s can be 0s)
t1[i].Mul(&x[i].C1.B0, &x[i].C1.B2)
// z0 = E * (2 * g4^2 + g3 * g5 - 3 * g2 * g1) + 1
t2[i].Add(&t2[i], &t1[i])
@@ -366,6 +402,8 @@ func (z *E12) CyclotomicSquare(x *E12) *E12 {
}
// Inverse set z to the inverse of x in E12 and return z
+//
+// if x == 0, sets and returns z = x
func (z *E12) Inverse(x *E12) *E12 {
// Algorithm 23 from https://eprint.iacr.org/2010/354.pdf
@@ -383,6 +421,8 @@ func (z *E12) Inverse(x *E12) *E12 {
// BatchInvertE12 returns a new slice with every element inverted.
// Uses Montgomery batch inversion trick
+//
+// if a[i] == 0, returns result[i] = a[i]
func BatchInvertE12(a []E12) []E12 {
res := make([]E12, len(a))
if len(a) == 0 {
@@ -425,7 +465,7 @@ func (z *E12) Exp(x E12, k *big.Int) *E12 {
e := k
if k.Sign() == -1 {
// negative k, we invert
- // if k < 0: xᵏ (mod q) == (x⁻¹)ᵏ (mod q)
+ // if k < 0: xᵏ (mod q¹²) == (x⁻¹)ᵏ (mod q¹²)
x.Inverse(&x)
// we negate k in a temp big.Int since
@@ -778,13 +818,14 @@ func (z *E12) CompressTorus() (E6, error) {
return res, nil
}
-// BatchCompressTorus GT/E12 elements to half their size
-// using a batch inversion
+// BatchCompressTorus GT/E12 elements to half their size using a batch inversion.
+//
+// if len(x) == 0 or if any of the x[i].C1 coordinate is 0, this function returns an error.
func BatchCompressTorus(x []E12) ([]E6, error) {
n := len(x)
if n == 0 {
- return []E6{}, errors.New("invalid input size")
+ return nil, errors.New("invalid input size")
}
var one E6
@@ -793,6 +834,10 @@ func BatchCompressTorus(x []E12) ([]E6, error) {
for i := 0; i < n; i++ {
res[i].Set(&x[i].C1)
+ // throw an error if any of the x[i].C1 is 0
+ if res[i].IsZero() {
+ return nil, errors.New("invalid input; C1 is 0")
+ }
}
t := BatchInvertE6(res) // costs 1 inverse
diff --git a/ecc/bls12-378/internal/fptower/e12_test.go b/ecc/bls12-378/internal/fptower/e12_test.go
index 2ce5f01057..51a5042c7b 100644
--- a/ecc/bls12-378/internal/fptower/e12_test.go
+++ b/ecc/bls12-378/internal/fptower/e12_test.go
@@ -339,13 +339,29 @@ func TestE12Ops(t *testing.T) {
properties.Property("[BLS12-378] compressed cyclotomic square (Karabina) and square should be the same in the cyclotomic subgroup", prop.ForAll(
func(a *E12) bool {
- var b, c, d E12
+ var _a, b, c, d, _c, _d E12
+ _a.SetOne().Double(&_a)
+
+ // put a and _a in the cyclotomic subgroup
+ // a (g3 != 0 probably)
b.Conjugate(a)
a.Inverse(a)
b.Mul(&b, a)
a.FrobeniusSquare(&b).Mul(a, &b)
+ // _a (g3 == 0)
+ b.Conjugate(&_a)
+ _a.Inverse(&_a)
+ b.Mul(&b, &_a)
+ _a.FrobeniusSquare(&b).Mul(&_a, &b)
+
+ // case g3 != 0
c.Square(a)
d.CyclotomicSquareCompressed(a).DecompressKarabina(&d)
+
+ // case g3 == 0
+ _c.Square(&_a)
+ _d.CyclotomicSquareCompressed(&_a).DecompressKarabina(&_d)
+
return c.Equal(&d)
},
genA,
@@ -353,18 +369,26 @@ func TestE12Ops(t *testing.T) {
properties.Property("[BLS12-378] batch decompress and individual decompress (Karabina) should be the same", prop.ForAll(
func(a *E12) bool {
- var b E12
- // put in the cyclotomic subgroup
+ var _a, b E12
+ _a.SetOne().Double(&_a)
+
+ // put a and _a in the cyclotomic subgroup
+ // a (g3 !=0 probably)
b.Conjugate(a)
a.Inverse(a)
b.Mul(&b, a)
a.FrobeniusSquare(&b).Mul(a, &b)
+ // _a (g3 == 0)
+ b.Conjugate(&_a)
+ _a.Inverse(&_a)
+ b.Mul(&b, &_a)
+ _a.FrobeniusSquare(&b).Mul(&_a, &b)
var a2, a4, a17 E12
- a2.Set(a)
+ a2.Set(&_a)
a4.Set(a)
a17.Set(a)
- a2.nSquareCompressed(2)
+ a2.nSquareCompressed(2) // case g3 == 0
a4.nSquareCompressed(4)
a17.nSquareCompressed(17)
batch := BatchDecompressKarabina([]E12{a2, a4, a17})
diff --git a/ecc/bls12-378/internal/fptower/e2.go b/ecc/bls12-378/internal/fptower/e2.go
index 55fd82e0b5..4de9ceea91 100644
--- a/ecc/bls12-378/internal/fptower/e2.go
+++ b/ecc/bls12-378/internal/fptower/e2.go
@@ -246,6 +246,8 @@ func (z *E2) Sqrt(x *E2) *E2 {
// BatchInvertE2 returns a new slice with every element inverted.
// Uses Montgomery batch inversion trick
+//
+// if a[i] == 0, returns result[i] = a[i]
func BatchInvertE2(a []E2) []E2 {
res := make([]E2, len(a))
if len(a) == 0 {
diff --git a/ecc/bls12-378/internal/fptower/e2_test.go b/ecc/bls12-378/internal/fptower/e2_test.go
index 327a5e263a..edf4c7425d 100644
--- a/ecc/bls12-378/internal/fptower/e2_test.go
+++ b/ecc/bls12-378/internal/fptower/e2_test.go
@@ -189,12 +189,6 @@ func TestE2ReceiverIsOperand(t *testing.T) {
properties.TestingRun(t, gopter.ConsoleReporter(false))
- if supportAdx {
- t.Log("disabling ADX")
- supportAdx = false
- properties.TestingRun(t, gopter.ConsoleReporter(false))
- supportAdx = true
- }
}
func TestE2MulMaxed(t *testing.T) {
@@ -404,12 +398,6 @@ func TestE2Ops(t *testing.T) {
properties.TestingRun(t, gopter.ConsoleReporter(false))
- if supportAdx {
- t.Log("disabling ADX")
- supportAdx = false
- properties.TestingRun(t, gopter.ConsoleReporter(false))
- supportAdx = true
- }
}
// ------------------------------------------------------------
diff --git a/ecc/bls12-378/internal/fptower/e6.go b/ecc/bls12-378/internal/fptower/e6.go
index 2ed48dc26e..4da093f5f0 100644
--- a/ecc/bls12-378/internal/fptower/e6.go
+++ b/ecc/bls12-378/internal/fptower/e6.go
@@ -242,6 +242,8 @@ func (z *E6) Square(x *E6) *E6 {
}
// Inverse an element in E6
+//
+// if x == 0, sets and returns z = x
func (z *E6) Inverse(x *E6) *E6 {
// Algorithm 17 from https://eprint.iacr.org/2010/354.pdf
// step 9 is wrong in the paper it's t1-t4
@@ -270,6 +272,8 @@ func (z *E6) Inverse(x *E6) *E6 {
// BatchInvertE6 returns a new slice with every element inverted.
// Uses Montgomery batch inversion trick
+//
+// if a[i] == 0, returns result[i] = a[i]
func BatchInvertE6(a []E6) []E6 {
res := make([]E6, len(a))
if len(a) == 0 {
diff --git a/ecc/bls12-378/multiexp.go b/ecc/bls12-378/multiexp.go
index 5f00d42231..b42536f1b9 100644
--- a/ecc/bls12-378/multiexp.go
+++ b/ecc/bls12-378/multiexp.go
@@ -41,7 +41,7 @@ type selector struct {
// if the digit is larger than 2^{c-1}, then, we borrow 2^c from the next window and substract
// 2^{c} to the current digit, making it negative.
// negative digits can be processed in a later step as adding -G into the bucket instead of G
-// (computing -G is cheap, and this saves us half of the buckets in the MultiExp or BatchScalarMul)
+// (computing -G is cheap, and this saves us half of the buckets in the MultiExp or BatchScalarMultiplication)
// scalarsMont indicates wheter the provided scalars are in montgomery form
// returns smallValues, which represent the number of scalars which meets the following condition
// 0 < scalar < 2^c (in other words, scalars where only the c-least significant bits are non zero)
@@ -163,6 +163,8 @@ func partitionScalars(scalars []fr.Element, c uint64, scalarsMont bool, nbTasks
}
// MultiExp implements section 4 of https://eprint.iacr.org/2012/549.pdf
+//
+// This call return an error if len(scalars) != len(points) or if provided config is invalid.
func (p *G1Affine) MultiExp(points []G1Affine, scalars []fr.Element, config ecc.MultiExpConfig) (*G1Affine, error) {
var _p G1Jac
if _, err := _p.MultiExp(points, scalars, config); err != nil {
@@ -173,6 +175,8 @@ func (p *G1Affine) MultiExp(points []G1Affine, scalars []fr.Element, config ecc.
}
// MultiExp implements section 4 of https://eprint.iacr.org/2012/549.pdf
+//
+// This call return an error if len(scalars) != len(points) or if provided config is invalid.
func (p *G1Jac) MultiExp(points []G1Affine, scalars []fr.Element, config ecc.MultiExpConfig) (*G1Jac, error) {
// note:
// each of the msmCX method is the same, except for the c constant it declares
@@ -209,6 +213,8 @@ func (p *G1Jac) MultiExp(points []G1Affine, scalars []fr.Element, config ecc.Mul
// if nbTasks is not set, use all available CPUs
if config.NbTasks <= 0 {
config.NbTasks = runtime.NumCPU()
+ } else if config.NbTasks > 1024 {
+ return nil, errors.New("invalid config: config.NbTasks > 1024")
}
// here, we compute the best C for nbPoints
@@ -333,9 +339,6 @@ func msmInnerG1Jac(p *G1Jac, c int, points []G1Affine, scalars []fr.Element, spl
case 21:
p.msmC21(points, scalars, splitFirstChunk)
- case 22:
- p.msmC22(points, scalars, splitFirstChunk)
-
default:
panic("not implemented")
}
@@ -1180,59 +1183,9 @@ func (p *G1Jac) msmC21(points []G1Affine, scalars []fr.Element, splitFirstChunk
return msmReduceChunkG1Affine(p, c, chChunks[:])
}
-func (p *G1Jac) msmC22(points []G1Affine, scalars []fr.Element, splitFirstChunk bool) *G1Jac {
- const (
- c = 22 // scalars partitioned into c-bit radixes
- nbChunks = (fr.Limbs * 64 / c) // number of c-bit radixes in a scalar
- )
-
- // for each chunk, spawn one go routine that'll loop through all the scalars in the
- // corresponding bit-window
- // note that buckets is an array allocated on the stack (for most sizes of c) and this is
- // critical for performance
-
- // each go routine sends its result in chChunks[i] channel
- var chChunks [nbChunks + 1]chan g1JacExtended
- for i := 0; i < len(chChunks); i++ {
- chChunks[i] = make(chan g1JacExtended, 1)
- }
-
- // c doesn't divide 256, last window is smaller we can allocate less buckets
- const lastC = (fr.Limbs * 64) - (c * (fr.Limbs * 64 / c))
- go func(j uint64, points []G1Affine, scalars []fr.Element) {
- var buckets [1 << (lastC - 1)]g1JacExtended
- msmProcessChunkG1Affine(j, chChunks[j], buckets[:], c, points, scalars)
- }(uint64(nbChunks), points, scalars)
-
- processChunk := func(j int, points []G1Affine, scalars []fr.Element, chChunk chan g1JacExtended) {
- var buckets [1 << (c - 1)]g1JacExtended
- msmProcessChunkG1Affine(uint64(j), chChunk, buckets[:], c, points, scalars)
- }
-
- for j := int(nbChunks - 1); j > 0; j-- {
- go processChunk(j, points, scalars, chChunks[j])
- }
-
- if !splitFirstChunk {
- go processChunk(0, points, scalars, chChunks[0])
- } else {
- chSplit := make(chan g1JacExtended, 2)
- split := len(points) / 2
- go processChunk(0, points[:split], scalars[:split], chSplit)
- go processChunk(0, points[split:], scalars[split:], chSplit)
- go func() {
- s1 := <-chSplit
- s2 := <-chSplit
- close(chSplit)
- s1.add(&s2)
- chChunks[0] <- s1
- }()
- }
-
- return msmReduceChunkG1Affine(p, c, chChunks[:])
-}
-
// MultiExp implements section 4 of https://eprint.iacr.org/2012/549.pdf
+//
+// This call return an error if len(scalars) != len(points) or if provided config is invalid.
func (p *G2Affine) MultiExp(points []G2Affine, scalars []fr.Element, config ecc.MultiExpConfig) (*G2Affine, error) {
var _p G2Jac
if _, err := _p.MultiExp(points, scalars, config); err != nil {
@@ -1243,6 +1196,8 @@ func (p *G2Affine) MultiExp(points []G2Affine, scalars []fr.Element, config ecc.
}
// MultiExp implements section 4 of https://eprint.iacr.org/2012/549.pdf
+//
+// This call return an error if len(scalars) != len(points) or if provided config is invalid.
func (p *G2Jac) MultiExp(points []G2Affine, scalars []fr.Element, config ecc.MultiExpConfig) (*G2Jac, error) {
// note:
// each of the msmCX method is the same, except for the c constant it declares
@@ -1279,13 +1234,15 @@ func (p *G2Jac) MultiExp(points []G2Affine, scalars []fr.Element, config ecc.Mul
// if nbTasks is not set, use all available CPUs
if config.NbTasks <= 0 {
config.NbTasks = runtime.NumCPU()
+ } else if config.NbTasks > 1024 {
+ return nil, errors.New("invalid config: config.NbTasks > 1024")
}
// here, we compute the best C for nbPoints
// we split recursively until nbChunks(c) >= nbTasks,
bestC := func(nbPoints int) uint64 {
// implemented msmC methods (the c we use must be in this slice)
- implementedCs := []uint64{4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 20, 21, 22}
+ implementedCs := []uint64{4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 20, 21}
var C uint64
// approximate cost (in group operations)
// cost = bits/c * (nbPoints + 2^{c})
@@ -1403,9 +1360,6 @@ func msmInnerG2Jac(p *G2Jac, c int, points []G2Affine, scalars []fr.Element, spl
case 21:
p.msmC21(points, scalars, splitFirstChunk)
- case 22:
- p.msmC22(points, scalars, splitFirstChunk)
-
default:
panic("not implemented")
}
@@ -2249,55 +2203,3 @@ func (p *G2Jac) msmC21(points []G2Affine, scalars []fr.Element, splitFirstChunk
return msmReduceChunkG2Affine(p, c, chChunks[:])
}
-
-func (p *G2Jac) msmC22(points []G2Affine, scalars []fr.Element, splitFirstChunk bool) *G2Jac {
- const (
- c = 22 // scalars partitioned into c-bit radixes
- nbChunks = (fr.Limbs * 64 / c) // number of c-bit radixes in a scalar
- )
-
- // for each chunk, spawn one go routine that'll loop through all the scalars in the
- // corresponding bit-window
- // note that buckets is an array allocated on the stack (for most sizes of c) and this is
- // critical for performance
-
- // each go routine sends its result in chChunks[i] channel
- var chChunks [nbChunks + 1]chan g2JacExtended
- for i := 0; i < len(chChunks); i++ {
- chChunks[i] = make(chan g2JacExtended, 1)
- }
-
- // c doesn't divide 256, last window is smaller we can allocate less buckets
- const lastC = (fr.Limbs * 64) - (c * (fr.Limbs * 64 / c))
- go func(j uint64, points []G2Affine, scalars []fr.Element) {
- var buckets [1 << (lastC - 1)]g2JacExtended
- msmProcessChunkG2Affine(j, chChunks[j], buckets[:], c, points, scalars)
- }(uint64(nbChunks), points, scalars)
-
- processChunk := func(j int, points []G2Affine, scalars []fr.Element, chChunk chan g2JacExtended) {
- var buckets [1 << (c - 1)]g2JacExtended
- msmProcessChunkG2Affine(uint64(j), chChunk, buckets[:], c, points, scalars)
- }
-
- for j := int(nbChunks - 1); j > 0; j-- {
- go processChunk(j, points, scalars, chChunks[j])
- }
-
- if !splitFirstChunk {
- go processChunk(0, points, scalars, chChunks[0])
- } else {
- chSplit := make(chan g2JacExtended, 2)
- split := len(points) / 2
- go processChunk(0, points[:split], scalars[:split], chSplit)
- go processChunk(0, points[split:], scalars[split:], chSplit)
- go func() {
- s1 := <-chSplit
- s2 := <-chSplit
- close(chSplit)
- s1.add(&s2)
- chChunks[0] <- s1
- }()
- }
-
- return msmReduceChunkG2Affine(p, c, chChunks[:])
-}
diff --git a/ecc/bls12-378/multiexp_test.go b/ecc/bls12-378/multiexp_test.go
index ff93935d2e..39c7f5dd67 100644
--- a/ecc/bls12-378/multiexp_test.go
+++ b/ecc/bls12-378/multiexp_test.go
@@ -92,7 +92,14 @@ func TestMultiExpG1(t *testing.T) {
genScalar,
))
- properties.Property("[G1] Multi exponentation (c=5, c=16) should be consistent with sum of square", prop.ForAll(
+ // cRange is generated from template and contains the available parameters for the multiexp window size
+ cRange := []uint64{4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 20, 21}
+ if testing.Short() {
+ // test only "odd" and "even" (ie windows size divide word size vs not)
+ cRange = []uint64{5, 16}
+ }
+
+ properties.Property(fmt.Sprintf("[G1] Multi exponentation (c in %v) should be consistent with sum of square", cRange), prop.ForAll(
func(mixer fr.Element) bool {
var expected G1Jac
@@ -111,13 +118,21 @@ func TestMultiExpG1(t *testing.T) {
FromMont()
}
- scalars5, _ := partitionScalars(sampleScalars[:], 5, false, runtime.NumCPU())
- scalars16, _ := partitionScalars(sampleScalars[:], 16, false, runtime.NumCPU())
-
- var r5, r16 G1Jac
- r5.msmC5(samplePoints[:], scalars5, false)
- r16.msmC16(samplePoints[:], scalars16, true)
- return (r5.Equal(&expected) && r16.Equal(&expected))
+ results := make([]G1Jac, len(cRange)+1)
+ for i, c := range cRange {
+ scalars, _ := partitionScalars(sampleScalars[:], c, false, runtime.NumCPU())
+ msmInnerG1Jac(&results[i], int(c), samplePoints[:], scalars, false)
+ if c == 16 {
+ // split the first chunk
+ msmInnerG1Jac(&results[len(results)-1], 16, samplePoints[:], scalars, true)
+ }
+ }
+ for i := 1; i < len(results); i++ {
+ if !results[i].Equal(&results[i-1]) {
+ return false
+ }
+ }
+ return true
},
genScalar,
))
@@ -148,7 +163,7 @@ func TestMultiExpG1(t *testing.T) {
var finalBigScalar fr.Element
var finalBigScalarBi big.Int
var op1ScalarMul G1Affine
- finalBigScalar.SetString("9455").Mul(&finalBigScalar, &mixer)
+ finalBigScalar.SetUint64(9455).Mul(&finalBigScalar, &mixer)
finalBigScalar.ToBigIntRegular(&finalBigScalarBi)
op1ScalarMul.ScalarMultiplication(&g1GenAff, &finalBigScalarBi)
@@ -322,7 +337,12 @@ func TestMultiExpG2(t *testing.T) {
genScalar,
))
- properties.Property("[G2] Multi exponentation (c=5, c=16) should be consistent with sum of square", prop.ForAll(
+ // cRange is generated from template and contains the available parameters for the multiexp window size
+ // for g2, CI suffers with large c size since it needs to allocate a lot of memory for the buckets.
+ // test only "odd" and "even" (ie windows size divide word size vs not)
+ cRange := []uint64{5, 16}
+
+ properties.Property(fmt.Sprintf("[G2] Multi exponentation (c in %v) should be consistent with sum of square", cRange), prop.ForAll(
func(mixer fr.Element) bool {
var expected G2Jac
@@ -341,13 +361,21 @@ func TestMultiExpG2(t *testing.T) {
FromMont()
}
- scalars5, _ := partitionScalars(sampleScalars[:], 5, false, runtime.NumCPU())
- scalars16, _ := partitionScalars(sampleScalars[:], 16, false, runtime.NumCPU())
-
- var r5, r16 G2Jac
- r5.msmC5(samplePoints[:], scalars5, false)
- r16.msmC16(samplePoints[:], scalars16, true)
- return (r5.Equal(&expected) && r16.Equal(&expected))
+ results := make([]G2Jac, len(cRange)+1)
+ for i, c := range cRange {
+ scalars, _ := partitionScalars(sampleScalars[:], c, false, runtime.NumCPU())
+ msmInnerG2Jac(&results[i], int(c), samplePoints[:], scalars, false)
+ if c == 16 {
+ // split the first chunk
+ msmInnerG2Jac(&results[len(results)-1], 16, samplePoints[:], scalars, true)
+ }
+ }
+ for i := 1; i < len(results); i++ {
+ if !results[i].Equal(&results[i-1]) {
+ return false
+ }
+ }
+ return true
},
genScalar,
))
@@ -378,7 +406,7 @@ func TestMultiExpG2(t *testing.T) {
var finalBigScalar fr.Element
var finalBigScalarBi big.Int
var op1ScalarMul G2Affine
- finalBigScalar.SetString("9455").Mul(&finalBigScalar, &mixer)
+ finalBigScalar.SetUint64(9455).Mul(&finalBigScalar, &mixer)
finalBigScalar.ToBigIntRegular(&finalBigScalarBi)
op1ScalarMul.ScalarMultiplication(&g2GenAff, &finalBigScalarBi)
diff --git a/ecc/bls12-378/pairing.go b/ecc/bls12-378/pairing.go
index e7ae9af52d..c88db75481 100644
--- a/ecc/bls12-378/pairing.go
+++ b/ecc/bls12-378/pairing.go
@@ -30,7 +30,9 @@ type lineEvaluation struct {
}
// Pair calculates the reduced pairing for a set of points
-// ∏ᵢ e(Pᵢ, Qᵢ)
+// ∏ᵢ e(Pᵢ, Qᵢ).
+//
+// This function doesn't check that the inputs are in the correct subgroup. See IsInSubGroup.
func Pair(P []G1Affine, Q []G2Affine) (GT, error) {
f, err := MillerLoop(P, Q)
if err != nil {
@@ -41,6 +43,8 @@ func Pair(P []G1Affine, Q []G2Affine) (GT, error) {
// PairingCheck calculates the reduced pairing for a set of points and returns True if the result is One
// ∏ᵢ e(Pᵢ, Qᵢ) =? 1
+//
+// This function doesn't check that the inputs are in the correct subgroup. See IsInSubGroup.
func PairingCheck(P []G1Affine, Q []G2Affine) (bool, error) {
f, err := Pair(P, Q)
if err != nil {
diff --git a/ecc/bls12-378/twistededwards/eddsa/eddsa.go b/ecc/bls12-378/twistededwards/eddsa/eddsa.go
index 26d6b47227..8e12207ba9 100644
--- a/ecc/bls12-378/twistededwards/eddsa/eddsa.go
+++ b/ecc/bls12-378/twistededwards/eddsa/eddsa.go
@@ -89,7 +89,7 @@ func GenerateKey(r io.Reader) (*PrivateKey, error) {
var bScalar big.Int
bScalar.SetBytes(priv.scalar[:])
- pub.A.ScalarMul(&c.Base, &bScalar)
+ pub.A.ScalarMultiplication(&c.Base, &bScalar)
priv.PublicKey = pub
@@ -137,7 +137,7 @@ func (privKey *PrivateKey) Sign(message []byte, hFunc hash.Hash) ([]byte, error)
blindingFactorBigInt.SetBytes(blindingFactorBytes[:sizeFr])
// compute R = randScalar*Base
- res.R.ScalarMul(&curveParams.Base, &blindingFactorBigInt)
+ res.R.ScalarMultiplication(&curveParams.Base, &blindingFactorBigInt)
if !res.R.IsOnCurve() {
return nil, errNotOnCurve
}
@@ -223,8 +223,8 @@ func (pub *PublicKey) Verify(sigBin, message []byte, hFunc hash.Hash) (bool, err
var bCofactor, bs big.Int
curveParams.Cofactor.ToBigIntRegular(&bCofactor)
bs.SetBytes(sig.S[:])
- lhs.ScalarMul(&curveParams.Base, &bs).
- ScalarMul(&lhs, &bCofactor)
+ lhs.ScalarMultiplication(&curveParams.Base, &bs).
+ ScalarMultiplication(&lhs, &bCofactor)
if !lhs.IsOnCurve() {
return false, errNotOnCurve
@@ -232,9 +232,9 @@ func (pub *PublicKey) Verify(sigBin, message []byte, hFunc hash.Hash) (bool, err
// rhs = cofactor*(R + H(R,A,M)*A)
var rhs twistededwards.PointAffine
- rhs.ScalarMul(&pub.A, &hramInt).
+ rhs.ScalarMultiplication(&pub.A, &hramInt).
Add(&rhs, &sig.R).
- ScalarMul(&rhs, &bCofactor)
+ ScalarMultiplication(&rhs, &bCofactor)
if !rhs.IsOnCurve() {
return false, errNotOnCurve
}
diff --git a/ecc/bls12-378/twistededwards/point.go b/ecc/bls12-378/twistededwards/point.go
index 866b689cf9..f7846fb6d6 100644
--- a/ecc/bls12-378/twistededwards/point.go
+++ b/ecc/bls12-378/twistededwards/point.go
@@ -256,13 +256,13 @@ func (p *PointAffine) FromExtended(p1 *PointExtended) *PointAffine {
return p
}
-// ScalarMul scalar multiplication of a point
+// ScalarMultiplication scalar multiplication of a point
// p1 in affine coordinates with a scalar in big.Int
-func (p *PointAffine) ScalarMul(p1 *PointAffine, scalar *big.Int) *PointAffine {
+func (p *PointAffine) ScalarMultiplication(p1 *PointAffine, scalar *big.Int) *PointAffine {
var p1Extended, resExtended PointExtended
p1Extended.FromAffine(p1)
- resExtended.ScalarMul(&p1Extended, scalar)
+ resExtended.ScalarMultiplication(&p1Extended, scalar)
p.FromExtended(&resExtended)
return p
@@ -409,9 +409,9 @@ func (p *PointProj) Add(p1, p2 *PointProj) *PointProj {
return p
}
-// ScalarMul scalar multiplication of a point
+// ScalarMultiplication scalar multiplication of a point
// p1 in projective coordinates with a scalar in big.Int
-func (p *PointProj) ScalarMul(p1 *PointProj, scalar *big.Int) *PointProj {
+func (p *PointProj) ScalarMultiplication(p1 *PointProj, scalar *big.Int) *PointProj {
var _scalar big.Int
_scalar.Set(scalar)
p.Set(p1)
@@ -622,9 +622,9 @@ func (p *PointExtended) setInfinity() *PointExtended {
return p
}
-// ScalarMul scalar multiplication of a point
+// ScalarMultiplication scalar multiplication of a point
// p1 in extended coordinates with a scalar in big.Int
-func (p *PointExtended) ScalarMul(p1 *PointExtended, scalar *big.Int) *PointExtended {
+func (p *PointExtended) ScalarMultiplication(p1 *PointExtended, scalar *big.Int) *PointExtended {
var _scalar big.Int
_scalar.Set(scalar)
p.Set(p1)
diff --git a/ecc/bls12-378/twistededwards/point_test.go b/ecc/bls12-378/twistededwards/point_test.go
index ab966e7732..b3baa3e279 100644
--- a/ecc/bls12-378/twistededwards/point_test.go
+++ b/ecc/bls12-378/twistededwards/point_test.go
@@ -124,8 +124,8 @@ func TestReceiverIsOperand(t *testing.T) {
var s big.Int
s.SetUint64(10)
- p2.ScalarMul(&p1, &s)
- p1.ScalarMul(&p1, &s)
+ p2.ScalarMultiplication(&p1, &s)
+ p1.ScalarMultiplication(&p1, &s)
return p2.Equal(&p1)
},
@@ -336,7 +336,7 @@ func TestOps(t *testing.T) {
params := GetEdwardsCurve()
var p1, p2, zero PointAffine
- p1.ScalarMul(¶ms.Base, &s1)
+ p1.ScalarMultiplication(¶ms.Base, &s1)
zero.setInfinity()
p2.Add(&p1, &zero)
@@ -352,7 +352,7 @@ func TestOps(t *testing.T) {
params := GetEdwardsCurve()
var p1, p2 PointAffine
- p1.ScalarMul(¶ms.Base, &s1)
+ p1.ScalarMultiplication(¶ms.Base, &s1)
p2.Neg(&p1)
p1.Add(&p1, &p2)
@@ -371,8 +371,8 @@ func TestOps(t *testing.T) {
params := GetEdwardsCurve()
var p1, p2, inf PointAffine
- p1.ScalarMul(¶ms.Base, &s)
- p2.ScalarMul(¶ms.Base, &s)
+ p1.ScalarMultiplication(¶ms.Base, &s)
+ p2.ScalarMultiplication(¶ms.Base, &s)
p1.Add(&p1, &p2)
p2.Double(&p2)
@@ -390,14 +390,14 @@ func TestOps(t *testing.T) {
var p1, p2, p3, inf PointAffine
inf.X.SetZero()
inf.Y.SetZero()
- p1.ScalarMul(¶ms.Base, &s1)
- p2.ScalarMul(¶ms.Base, &s2)
+ p1.ScalarMultiplication(¶ms.Base, &s1)
+ p2.ScalarMultiplication(¶ms.Base, &s2)
p3.Set(¶ms.Base)
p2.Add(&p1, &p2)
s1.Add(&s1, &s2)
- p3.ScalarMul(¶ms.Base, &s1)
+ p3.ScalarMultiplication(¶ms.Base, &s1)
return p2.IsOnCurve() && p3.Equal(&p2) && !p3.Equal(&inf)
},
@@ -413,9 +413,9 @@ func TestOps(t *testing.T) {
var p1, p2, inf PointAffine
inf.X.SetZero()
inf.Y.SetOne()
- p1.ScalarMul(¶ms.Base, &s1)
+ p1.ScalarMultiplication(¶ms.Base, &s1)
s1.Neg(&s1)
- p2.ScalarMul(¶ms.Base, &s1)
+ p2.ScalarMultiplication(¶ms.Base, &s1)
p2.Add(&p1, &p2)
@@ -430,11 +430,11 @@ func TestOps(t *testing.T) {
params := GetEdwardsCurve()
var p1, p2 PointAffine
- p1.ScalarMul(¶ms.Base, &s1)
+ p1.ScalarMultiplication(¶ms.Base, &s1)
five := big.NewInt(5)
p2.Double(&p1).Double(&p2).Add(&p2, &p1)
- p1.ScalarMul(&p1, five)
+ p1.ScalarMultiplication(&p1, five)
return p2.IsOnCurve() && p2.Equal(&p1)
},
@@ -463,7 +463,7 @@ func TestOps(t *testing.T) {
var baseProj, p1, p2, zero PointProj
baseProj.FromAffine(¶ms.Base)
- p1.ScalarMul(&baseProj, &s1)
+ p1.ScalarMultiplication(&baseProj, &s1)
zero.setInfinity()
p2.Add(&p1, &zero)
@@ -480,7 +480,7 @@ func TestOps(t *testing.T) {
var baseProj, p1, p2, p PointProj
baseProj.FromAffine(¶ms.Base)
- p1.ScalarMul(&baseProj, &s1)
+ p1.ScalarMultiplication(&baseProj, &s1)
p2.Neg(&p1)
p.Add(&p1, &p2)
@@ -498,7 +498,7 @@ func TestOps(t *testing.T) {
var baseProj, p1, p2, p PointProj
baseProj.FromAffine(¶ms.Base)
- p.ScalarMul(&baseProj, &s)
+ p.ScalarMultiplication(&baseProj, &s)
p1.Add(&p, &p)
p2.Double(&p)
@@ -515,11 +515,11 @@ func TestOps(t *testing.T) {
var baseProj, p1, p2 PointProj
baseProj.FromAffine(¶ms.Base)
- p1.ScalarMul(&baseProj, &s1)
+ p1.ScalarMultiplication(&baseProj, &s1)
five := big.NewInt(5)
p2.Double(&p1).Double(&p2).Add(&p2, &p1)
- p1.ScalarMul(&p1, five)
+ p1.ScalarMultiplication(&p1, five)
return p2.Equal(&p1)
},
@@ -547,7 +547,7 @@ func TestOps(t *testing.T) {
var baseExtended, p1, p2, zero PointExtended
baseExtended.FromAffine(¶ms.Base)
- p1.ScalarMul(&baseExtended, &s1)
+ p1.ScalarMultiplication(&baseExtended, &s1)
zero.setInfinity()
p2.Add(&p1, &zero)
@@ -564,7 +564,7 @@ func TestOps(t *testing.T) {
var baseExtended, p1, p2, p PointExtended
baseExtended.FromAffine(¶ms.Base)
- p1.ScalarMul(&baseExtended, &s1)
+ p1.ScalarMultiplication(&baseExtended, &s1)
p2.Neg(&p1)
p.Add(&p1, &p2)
@@ -582,7 +582,7 @@ func TestOps(t *testing.T) {
var baseExtended, p1, p2, p PointExtended
baseExtended.FromAffine(¶ms.Base)
- p.ScalarMul(&baseExtended, &s)
+ p.ScalarMultiplication(&baseExtended, &s)
p1.Add(&p, &p)
p2.Double(&p)
@@ -599,11 +599,11 @@ func TestOps(t *testing.T) {
var baseExtended, p1, p2 PointExtended
baseExtended.FromAffine(¶ms.Base)
- p1.ScalarMul(&baseExtended, &s1)
+ p1.ScalarMultiplication(&baseExtended, &s1)
five := big.NewInt(5)
p2.Double(&p1).Double(&p2).Add(&p2, &p1)
- p1.ScalarMul(&p1, five)
+ p1.ScalarMultiplication(&p1, five)
return p2.Equal(&p1)
},
@@ -619,8 +619,8 @@ func TestOps(t *testing.T) {
var baseExtended, pExtended, p PointExtended
var pAffine PointAffine
baseExtended.FromAffine(¶ms.Base)
- pExtended.ScalarMul(&baseExtended, &s)
- pAffine.ScalarMul(¶ms.Base, &s)
+ pExtended.ScalarMultiplication(&baseExtended, &s)
+ pAffine.ScalarMultiplication(¶ms.Base, &s)
pAffine.Neg(&pAffine)
p.MixedAdd(&pExtended, &pAffine)
@@ -638,8 +638,8 @@ func TestOps(t *testing.T) {
var baseExtended, pExtended, p, p2 PointExtended
var pAffine PointAffine
baseExtended.FromAffine(¶ms.Base)
- pExtended.ScalarMul(&baseExtended, &s)
- pAffine.ScalarMul(¶ms.Base, &s)
+ pExtended.ScalarMultiplication(&baseExtended, &s)
+ pAffine.ScalarMultiplication(¶ms.Base, &s)
p.MixedAdd(&pExtended, &pAffine)
p2.MixedDouble(&pExtended)
@@ -658,8 +658,8 @@ func TestOps(t *testing.T) {
var baseProj, pProj, p PointProj
var pAffine PointAffine
baseProj.FromAffine(¶ms.Base)
- pProj.ScalarMul(&baseProj, &s)
- pAffine.ScalarMul(¶ms.Base, &s)
+ pProj.ScalarMultiplication(&baseProj, &s)
+ pAffine.ScalarMultiplication(¶ms.Base, &s)
pAffine.Neg(&pAffine)
p.MixedAdd(&pProj, &pAffine)
@@ -677,8 +677,8 @@ func TestOps(t *testing.T) {
var baseProj, pProj, p, p2 PointProj
var pAffine PointAffine
baseProj.FromAffine(¶ms.Base)
- pProj.ScalarMul(&baseProj, &s)
- pAffine.ScalarMul(¶ms.Base, &s)
+ pProj.ScalarMultiplication(&baseProj, &s)
+ pAffine.ScalarMultiplication(¶ms.Base, &s)
p.MixedAdd(&pProj, &pAffine)
p2.Double(&pProj)
@@ -697,9 +697,9 @@ func TestOps(t *testing.T) {
var baseExt PointExtended
var p1, p2 PointAffine
baseProj.FromAffine(¶ms.Base)
- baseProj.ScalarMul(&baseProj, &s)
+ baseProj.ScalarMultiplication(&baseProj, &s)
baseExt.FromAffine(¶ms.Base)
- baseExt.ScalarMul(&baseExt, &s)
+ baseExt.ScalarMultiplication(&baseExt, &s)
p1.FromProj(&baseProj)
p2.FromExtended(&baseExt)
@@ -760,7 +760,7 @@ func BenchmarkScalarMulExtended(b *testing.B) {
b.ResetTimer()
for j := 0; j < b.N; j++ {
- doubleAndAdd.ScalarMul(&a, &s)
+ doubleAndAdd.ScalarMultiplication(&a, &s)
}
}
@@ -776,6 +776,6 @@ func BenchmarkScalarMulProjective(b *testing.B) {
b.ResetTimer()
for j := 0; j < b.N; j++ {
- doubleAndAdd.ScalarMul(&a, &s)
+ doubleAndAdd.ScalarMultiplication(&a, &s)
}
}
diff --git a/ecc/bls12-381/bandersnatch/eddsa/eddsa.go b/ecc/bls12-381/bandersnatch/eddsa/eddsa.go
index 6feb5cbbb6..c9e883d76b 100644
--- a/ecc/bls12-381/bandersnatch/eddsa/eddsa.go
+++ b/ecc/bls12-381/bandersnatch/eddsa/eddsa.go
@@ -89,7 +89,7 @@ func GenerateKey(r io.Reader) (*PrivateKey, error) {
var bScalar big.Int
bScalar.SetBytes(priv.scalar[:])
- pub.A.ScalarMul(&c.Base, &bScalar)
+ pub.A.ScalarMultiplication(&c.Base, &bScalar)
priv.PublicKey = pub
@@ -137,7 +137,7 @@ func (privKey *PrivateKey) Sign(message []byte, hFunc hash.Hash) ([]byte, error)
blindingFactorBigInt.SetBytes(blindingFactorBytes[:sizeFr])
// compute R = randScalar*Base
- res.R.ScalarMul(&curveParams.Base, &blindingFactorBigInt)
+ res.R.ScalarMultiplication(&curveParams.Base, &blindingFactorBigInt)
if !res.R.IsOnCurve() {
return nil, errNotOnCurve
}
@@ -223,8 +223,8 @@ func (pub *PublicKey) Verify(sigBin, message []byte, hFunc hash.Hash) (bool, err
var bCofactor, bs big.Int
curveParams.Cofactor.ToBigIntRegular(&bCofactor)
bs.SetBytes(sig.S[:])
- lhs.ScalarMul(&curveParams.Base, &bs).
- ScalarMul(&lhs, &bCofactor)
+ lhs.ScalarMultiplication(&curveParams.Base, &bs).
+ ScalarMultiplication(&lhs, &bCofactor)
if !lhs.IsOnCurve() {
return false, errNotOnCurve
@@ -232,9 +232,9 @@ func (pub *PublicKey) Verify(sigBin, message []byte, hFunc hash.Hash) (bool, err
// rhs = cofactor*(R + H(R,A,M)*A)
var rhs twistededwards.PointAffine
- rhs.ScalarMul(&pub.A, &hramInt).
+ rhs.ScalarMultiplication(&pub.A, &hramInt).
Add(&rhs, &sig.R).
- ScalarMul(&rhs, &bCofactor)
+ ScalarMultiplication(&rhs, &bCofactor)
if !rhs.IsOnCurve() {
return false, errNotOnCurve
}
diff --git a/ecc/bls12-381/bandersnatch/endomorpism.go b/ecc/bls12-381/bandersnatch/endomorpism.go
index c4d1b1dce6..5c6aa7a0b8 100644
--- a/ecc/bls12-381/bandersnatch/endomorpism.go
+++ b/ecc/bls12-381/bandersnatch/endomorpism.go
@@ -30,7 +30,7 @@ func (p *PointProj) phi(p1 *PointProj) *PointProj {
return p
}
-// ScalarMul scalar multiplication (GLV) of a point
+// ScalarMultiplication scalar multiplication (GLV) of a point
// p1 in projective coordinates with a scalar in big.Int
func (p *PointProj) scalarMulGLV(p1 *PointProj, scalar *big.Int) *PointProj {
@@ -121,7 +121,7 @@ func (p *PointExtended) phi(p1 *PointExtended) *PointExtended {
return p
}
-// ScalarMul scalar multiplication (GLV) of a point
+// ScalarMultiplication scalar multiplication (GLV) of a point
// p1 in projective coordinates with a scalar in big.Int
func (p *PointExtended) scalarMulGLV(p1 *PointExtended, scalar *big.Int) *PointExtended {
initOnce.Do(initCurveParams)
diff --git a/ecc/bls12-381/bandersnatch/point.go b/ecc/bls12-381/bandersnatch/point.go
index baa936af8b..4e030bc1f8 100644
--- a/ecc/bls12-381/bandersnatch/point.go
+++ b/ecc/bls12-381/bandersnatch/point.go
@@ -255,13 +255,13 @@ func (p *PointAffine) FromExtended(p1 *PointExtended) *PointAffine {
return p
}
-// ScalarMul scalar multiplication of a point
+// ScalarMultiplication scalar multiplication of a point
// p1 in affine coordinates with a scalar in big.Int
-func (p *PointAffine) ScalarMul(p1 *PointAffine, scalar *big.Int) *PointAffine {
+func (p *PointAffine) ScalarMultiplication(p1 *PointAffine, scalar *big.Int) *PointAffine {
var p1Extended, resExtended PointExtended
p1Extended.FromAffine(p1)
- resExtended.ScalarMul(&p1Extended, scalar)
+ resExtended.ScalarMultiplication(&p1Extended, scalar)
p.FromExtended(&resExtended)
return p
@@ -408,9 +408,9 @@ func (p *PointProj) Add(p1, p2 *PointProj) *PointProj {
return p
}
-// ScalarMul scalar multiplication of a point
+// ScalarMultiplication scalar multiplication of a point
// p1 in projective coordinates with a scalar in big.Int
-func (p *PointProj) ScalarMul(p1 *PointProj, scalar *big.Int) *PointProj {
+func (p *PointProj) ScalarMultiplication(p1 *PointProj, scalar *big.Int) *PointProj {
return p.scalarMulGLV(p1, scalar)
}
@@ -597,8 +597,8 @@ func (p *PointExtended) setInfinity() *PointExtended {
return p
}
-// ScalarMul scalar multiplication of a point
+// ScalarMultiplication scalar multiplication of a point
// p1 in extended coordinates with a scalar in big.Int
-func (p *PointExtended) ScalarMul(p1 *PointExtended, scalar *big.Int) *PointExtended {
+func (p *PointExtended) ScalarMultiplication(p1 *PointExtended, scalar *big.Int) *PointExtended {
return p.scalarMulGLV(p1, scalar)
}
diff --git a/ecc/bls12-381/bandersnatch/point_test.go b/ecc/bls12-381/bandersnatch/point_test.go
index d447631e73..d8b8fbc9ca 100644
--- a/ecc/bls12-381/bandersnatch/point_test.go
+++ b/ecc/bls12-381/bandersnatch/point_test.go
@@ -124,8 +124,8 @@ func TestReceiverIsOperand(t *testing.T) {
var s big.Int
s.SetUint64(10)
- p2.ScalarMul(&p1, &s)
- p1.ScalarMul(&p1, &s)
+ p2.ScalarMultiplication(&p1, &s)
+ p1.ScalarMultiplication(&p1, &s)
return p2.Equal(&p1)
},
@@ -336,7 +336,7 @@ func TestOps(t *testing.T) {
params := GetEdwardsCurve()
var p1, p2, zero PointAffine
- p1.ScalarMul(¶ms.Base, &s1)
+ p1.ScalarMultiplication(¶ms.Base, &s1)
zero.setInfinity()
p2.Add(&p1, &zero)
@@ -352,7 +352,7 @@ func TestOps(t *testing.T) {
params := GetEdwardsCurve()
var p1, p2 PointAffine
- p1.ScalarMul(¶ms.Base, &s1)
+ p1.ScalarMultiplication(¶ms.Base, &s1)
p2.Neg(&p1)
p1.Add(&p1, &p2)
@@ -371,8 +371,8 @@ func TestOps(t *testing.T) {
params := GetEdwardsCurve()
var p1, p2, inf PointAffine
- p1.ScalarMul(¶ms.Base, &s)
- p2.ScalarMul(¶ms.Base, &s)
+ p1.ScalarMultiplication(¶ms.Base, &s)
+ p2.ScalarMultiplication(¶ms.Base, &s)
p1.Add(&p1, &p2)
p2.Double(&p2)
@@ -390,14 +390,14 @@ func TestOps(t *testing.T) {
var p1, p2, p3, inf PointAffine
inf.X.SetZero()
inf.Y.SetZero()
- p1.ScalarMul(¶ms.Base, &s1)
- p2.ScalarMul(¶ms.Base, &s2)
+ p1.ScalarMultiplication(¶ms.Base, &s1)
+ p2.ScalarMultiplication(¶ms.Base, &s2)
p3.Set(¶ms.Base)
p2.Add(&p1, &p2)
s1.Add(&s1, &s2)
- p3.ScalarMul(¶ms.Base, &s1)
+ p3.ScalarMultiplication(¶ms.Base, &s1)
return p2.IsOnCurve() && p3.Equal(&p2) && !p3.Equal(&inf)
},
@@ -413,9 +413,9 @@ func TestOps(t *testing.T) {
var p1, p2, inf PointAffine
inf.X.SetZero()
inf.Y.SetOne()
- p1.ScalarMul(¶ms.Base, &s1)
+ p1.ScalarMultiplication(¶ms.Base, &s1)
s1.Neg(&s1)
- p2.ScalarMul(¶ms.Base, &s1)
+ p2.ScalarMultiplication(¶ms.Base, &s1)
p2.Add(&p1, &p2)
@@ -430,11 +430,11 @@ func TestOps(t *testing.T) {
params := GetEdwardsCurve()
var p1, p2 PointAffine
- p1.ScalarMul(¶ms.Base, &s1)
+ p1.ScalarMultiplication(¶ms.Base, &s1)
five := big.NewInt(5)
p2.Double(&p1).Double(&p2).Add(&p2, &p1)
- p1.ScalarMul(&p1, five)
+ p1.ScalarMultiplication(&p1, five)
return p2.IsOnCurve() && p2.Equal(&p1)
},
@@ -463,7 +463,7 @@ func TestOps(t *testing.T) {
var baseProj, p1, p2, zero PointProj
baseProj.FromAffine(¶ms.Base)
- p1.ScalarMul(&baseProj, &s1)
+ p1.ScalarMultiplication(&baseProj, &s1)
zero.setInfinity()
p2.Add(&p1, &zero)
@@ -480,7 +480,7 @@ func TestOps(t *testing.T) {
var baseProj, p1, p2, p PointProj
baseProj.FromAffine(¶ms.Base)
- p1.ScalarMul(&baseProj, &s1)
+ p1.ScalarMultiplication(&baseProj, &s1)
p2.Neg(&p1)
p.Add(&p1, &p2)
@@ -498,7 +498,7 @@ func TestOps(t *testing.T) {
var baseProj, p1, p2, p PointProj
baseProj.FromAffine(¶ms.Base)
- p.ScalarMul(&baseProj, &s)
+ p.ScalarMultiplication(&baseProj, &s)
p1.Add(&p, &p)
p2.Double(&p)
@@ -515,11 +515,11 @@ func TestOps(t *testing.T) {
var baseProj, p1, p2 PointProj
baseProj.FromAffine(¶ms.Base)
- p1.ScalarMul(&baseProj, &s1)
+ p1.ScalarMultiplication(&baseProj, &s1)
five := big.NewInt(5)
p2.Double(&p1).Double(&p2).Add(&p2, &p1)
- p1.ScalarMul(&p1, five)
+ p1.ScalarMultiplication(&p1, five)
return p2.Equal(&p1)
},
@@ -547,7 +547,7 @@ func TestOps(t *testing.T) {
var baseExtended, p1, p2, zero PointExtended
baseExtended.FromAffine(¶ms.Base)
- p1.ScalarMul(&baseExtended, &s1)
+ p1.ScalarMultiplication(&baseExtended, &s1)
zero.setInfinity()
p2.Add(&p1, &zero)
@@ -564,7 +564,7 @@ func TestOps(t *testing.T) {
var baseExtended, p1, p2, p PointExtended
baseExtended.FromAffine(¶ms.Base)
- p1.ScalarMul(&baseExtended, &s1)
+ p1.ScalarMultiplication(&baseExtended, &s1)
p2.Neg(&p1)
p.Add(&p1, &p2)
@@ -582,7 +582,7 @@ func TestOps(t *testing.T) {
var baseExtended, p1, p2, p PointExtended
baseExtended.FromAffine(¶ms.Base)
- p.ScalarMul(&baseExtended, &s)
+ p.ScalarMultiplication(&baseExtended, &s)
p1.Add(&p, &p)
p2.Double(&p)
@@ -599,11 +599,11 @@ func TestOps(t *testing.T) {
var baseExtended, p1, p2 PointExtended
baseExtended.FromAffine(¶ms.Base)
- p1.ScalarMul(&baseExtended, &s1)
+ p1.ScalarMultiplication(&baseExtended, &s1)
five := big.NewInt(5)
p2.Double(&p1).Double(&p2).Add(&p2, &p1)
- p1.ScalarMul(&p1, five)
+ p1.ScalarMultiplication(&p1, five)
return p2.Equal(&p1)
},
@@ -619,8 +619,8 @@ func TestOps(t *testing.T) {
var baseExtended, pExtended, p PointExtended
var pAffine PointAffine
baseExtended.FromAffine(¶ms.Base)
- pExtended.ScalarMul(&baseExtended, &s)
- pAffine.ScalarMul(¶ms.Base, &s)
+ pExtended.ScalarMultiplication(&baseExtended, &s)
+ pAffine.ScalarMultiplication(¶ms.Base, &s)
pAffine.Neg(&pAffine)
p.MixedAdd(&pExtended, &pAffine)
@@ -638,8 +638,8 @@ func TestOps(t *testing.T) {
var baseExtended, pExtended, p, p2 PointExtended
var pAffine PointAffine
baseExtended.FromAffine(¶ms.Base)
- pExtended.ScalarMul(&baseExtended, &s)
- pAffine.ScalarMul(¶ms.Base, &s)
+ pExtended.ScalarMultiplication(&baseExtended, &s)
+ pAffine.ScalarMultiplication(¶ms.Base, &s)
p.MixedAdd(&pExtended, &pAffine)
p2.MixedDouble(&pExtended)
@@ -658,8 +658,8 @@ func TestOps(t *testing.T) {
var baseProj, pProj, p PointProj
var pAffine PointAffine
baseProj.FromAffine(¶ms.Base)
- pProj.ScalarMul(&baseProj, &s)
- pAffine.ScalarMul(¶ms.Base, &s)
+ pProj.ScalarMultiplication(&baseProj, &s)
+ pAffine.ScalarMultiplication(¶ms.Base, &s)
pAffine.Neg(&pAffine)
p.MixedAdd(&pProj, &pAffine)
@@ -677,8 +677,8 @@ func TestOps(t *testing.T) {
var baseProj, pProj, p, p2 PointProj
var pAffine PointAffine
baseProj.FromAffine(¶ms.Base)
- pProj.ScalarMul(&baseProj, &s)
- pAffine.ScalarMul(¶ms.Base, &s)
+ pProj.ScalarMultiplication(&baseProj, &s)
+ pAffine.ScalarMultiplication(¶ms.Base, &s)
p.MixedAdd(&pProj, &pAffine)
p2.Double(&pProj)
@@ -697,9 +697,9 @@ func TestOps(t *testing.T) {
var baseExt PointExtended
var p1, p2 PointAffine
baseProj.FromAffine(¶ms.Base)
- baseProj.ScalarMul(&baseProj, &s)
+ baseProj.ScalarMultiplication(&baseProj, &s)
baseExt.FromAffine(¶ms.Base)
- baseExt.ScalarMul(&baseExt, &s)
+ baseExt.ScalarMultiplication(&baseExt, &s)
p1.FromProj(&baseProj)
p2.FromExtended(&baseExt)
@@ -760,7 +760,7 @@ func BenchmarkScalarMulExtended(b *testing.B) {
b.ResetTimer()
for j := 0; j < b.N; j++ {
- doubleAndAdd.ScalarMul(&a, &s)
+ doubleAndAdd.ScalarMultiplication(&a, &s)
}
}
@@ -776,6 +776,6 @@ func BenchmarkScalarMulProjective(b *testing.B) {
b.ResetTimer()
for j := 0; j < b.N; j++ {
- doubleAndAdd.ScalarMul(&a, &s)
+ doubleAndAdd.ScalarMultiplication(&a, &s)
}
}
diff --git a/ecc/bls12-381/bls12-381.go b/ecc/bls12-381/bls12-381.go
index 86cd1fe5b0..dbfb11edc2 100644
--- a/ecc/bls12-381/bls12-381.go
+++ b/ecc/bls12-381/bls12-381.go
@@ -1,3 +1,25 @@
+// Package bls12381 efficient elliptic curve, pairing and hash to curve implementation for bls12-381.
+//
+// bls12-381: A Barreto--Lynn--Scott curve
+// embedding degree k=12
+// seed x₀=-15132376222941642752
+// 𝔽r: r=52435875175126190479447740508185965837690552500527637822603658699938581184513 (x₀⁴-x₀²+1)
+// 𝔽p: p=4002409555221667393417789825735904156556882819939007885332058136124031650490837864442687629129015664037894272559787 ((x₀-1)² ⋅ r(x₀)/3+x₀)
+// (E/𝔽p): Y²=X³+4
+// (Eₜ/𝔽p²): Y² = X³+4(u+1) (M-type twist)
+// r ∣ #E(Fp) and r ∣ #Eₜ(𝔽p²)
+// Extension fields tower:
+// 𝔽p²[u] = 𝔽p/u²+1
+// 𝔽p⁶[v] = 𝔽p²/v³-1-u
+// 𝔽p¹²[w] = 𝔽p⁶/w²-v
+// optimal Ate loop size:
+// x₀
+// Security: estimated 126-bit level following [https://eprint.iacr.org/2019/885.pdf]
+// (r is 255 bits and p¹² is 4569 bits)
+//
+// Warning
+//
+// This code has been partially audited and is provided as-is. In particular, there is no security guarantees such as constant time implementation or side-channel attack resistance.
package bls12381
import (
@@ -9,18 +31,6 @@ import (
"github.com/consensys/gnark-crypto/ecc/bls12-381/internal/fptower"
)
-// BLS12-381: A Barreto--Lynn--Scott curve of embedding degree k=12 with seed x₀=-15132376222941642752
-// 𝔽r: r=52435875175126190479447740508185965837690552500527637822603658699938581184513 (x₀⁴-x₀²+1)
-// 𝔽p: p=4002409555221667393417789825735904156556882819939007885332058136124031650490837864442687629129015664037894272559787 ((x₀-1)² ⋅ r(x₀)/3+x₀)
-// (E/𝔽p): Y²=X³+4
-// (Eₜ/𝔽p²): Y² = X³+4(u+1) (M-type twist)
-// r ∣ #E(Fp) and r ∣ #Eₜ(𝔽p²)
-// Extension fields tower:
-// 𝔽p²[u] = 𝔽p/u²+1
-// 𝔽p⁶[v] = 𝔽p²/v³-1-u
-// 𝔽p¹²[w] = 𝔽p⁶/w²-v
-// optimal Ate loop size: x₀
-
// ID bls381 ID
const ID = ecc.BLS12_381
@@ -79,7 +89,7 @@ func init() {
g1Gen.X.SetString("3685416753713387016781088315183077757961620795782546409894578378688607592378376318836054947676345821548104185464507")
g1Gen.Y.SetString("1339506544944476473020471379941921221584933875938349620426543736416511423956333506472724655353366534992391756441569")
- g1Gen.Z.SetString("1")
+ g1Gen.Z.SetOne()
g2Gen.X.SetString("352701069587466618187139116011060144890029952792775240219908644239793785735715026873347600343865175952761926303160",
"3059144344244213709971259814753781636986470325476647558659373206291635324768958432433509563104347017837885763365758")
diff --git a/ecc/bls12-381/doc.go b/ecc/bls12-381/doc.go
deleted file mode 100644
index 96abdc1258..0000000000
--- a/ecc/bls12-381/doc.go
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright 2020 ConsenSys Software Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Code generated by consensys/gnark-crypto DO NOT EDIT
-
-// Package bls12381 efficient elliptic curve and pairing implementation for bls12-381.
-//
-// Warning
-//
-// This code has not been audited and is provided as-is. In particular, there is no security guarantees such as constant time implementation or side-channel attack resistance.
-package bls12381
diff --git a/ecc/bls12-381/fp/element.go b/ecc/bls12-381/fp/element.go
index 7c7bf3b4ce..6c9aad2bcf 100644
--- a/ecc/bls12-381/fp/element.go
+++ b/ecc/bls12-381/fp/element.go
@@ -181,7 +181,7 @@ func (z *Element) SetInterface(i1 interface{}) (*Element, error) {
case int:
return z.SetInt64(int64(c1)), nil
case string:
- return z.SetString(c1), nil
+ return z.SetString(c1)
case *big.Int:
if c1 == nil {
return nil, errors.New("can't set fp.Element with ")
@@ -1086,12 +1086,13 @@ func (z *Element) setBigInt(v *big.Int) *Element {
// Incorrect placement of underscores is reported as a panic if there
// are no other errors.
//
-func (z *Element) SetString(number string) *Element {
+// If the number is invalid this method leaves z unchanged and returns nil, error.
+func (z *Element) SetString(number string) (*Element, error) {
// get temporary big int from the pool
vv := bigIntPool.Get().(*big.Int)
if _, ok := vv.SetString(number, 0); !ok {
- panic("Element.SetString failed -> can't parse number into a big.Int " + number)
+ return nil, errors.New("Element.SetString failed -> can't parse number into a big.Int " + number)
}
z.SetBigInt(vv)
@@ -1099,7 +1100,7 @@ func (z *Element) SetString(number string) *Element {
// release object into pool
bigIntPool.Put(vv)
- return z
+ return z, nil
}
// MarshalJSON returns json encoding of z (z.Text(10))
diff --git a/ecc/bls12-381/fr/element.go b/ecc/bls12-381/fr/element.go
index 2fed77e4c2..f825c87682 100644
--- a/ecc/bls12-381/fr/element.go
+++ b/ecc/bls12-381/fr/element.go
@@ -175,7 +175,7 @@ func (z *Element) SetInterface(i1 interface{}) (*Element, error) {
case int:
return z.SetInt64(int64(c1)), nil
case string:
- return z.SetString(c1), nil
+ return z.SetString(c1)
case *big.Int:
if c1 == nil {
return nil, errors.New("can't set fr.Element with ")
@@ -944,12 +944,13 @@ func (z *Element) setBigInt(v *big.Int) *Element {
// Incorrect placement of underscores is reported as a panic if there
// are no other errors.
//
-func (z *Element) SetString(number string) *Element {
+// If the number is invalid this method leaves z unchanged and returns nil, error.
+func (z *Element) SetString(number string) (*Element, error) {
// get temporary big int from the pool
vv := bigIntPool.Get().(*big.Int)
if _, ok := vv.SetString(number, 0); !ok {
- panic("Element.SetString failed -> can't parse number into a big.Int " + number)
+ return nil, errors.New("Element.SetString failed -> can't parse number into a big.Int " + number)
}
z.SetBigInt(vv)
@@ -957,7 +958,7 @@ func (z *Element) SetString(number string) *Element {
// release object into pool
bigIntPool.Put(vv)
- return z
+ return z, nil
}
// MarshalJSON returns json encoding of z (z.Text(10))
diff --git a/ecc/bls12-381/fr/kzg/kzg.go b/ecc/bls12-381/fr/kzg/kzg.go
index d2dfaf6dd9..0a7a712d33 100644
--- a/ecc/bls12-381/fr/kzg/kzg.go
+++ b/ecc/bls12-381/fr/kzg/kzg.go
@@ -169,16 +169,15 @@ func Open(p []fr.Element, point fr.Element, srs *SRS) (OpeningProof, error) {
func Verify(commitment *Digest, proof *OpeningProof, point fr.Element, srs *SRS) error {
// [f(a)]G₁
- var claimedValueG1Aff bls12381.G1Affine
+ var claimedValueG1Aff bls12381.G1Jac
var claimedValueBigInt big.Int
proof.ClaimedValue.ToBigIntRegular(&claimedValueBigInt)
- claimedValueG1Aff.ScalarMultiplication(&srs.G1[0], &claimedValueBigInt)
+ claimedValueG1Aff.ScalarMultiplicationAffine(&srs.G1[0], &claimedValueBigInt)
// [f(α) - f(a)]G₁
- var fminusfaG1Jac, tmpG1Jac bls12381.G1Jac
+ var fminusfaG1Jac bls12381.G1Jac
fminusfaG1Jac.FromAffine(commitment)
- tmpG1Jac.FromAffine(&claimedValueG1Aff)
- fminusfaG1Jac.SubAssign(&tmpG1Jac)
+ fminusfaG1Jac.SubAssign(&claimedValueG1Aff)
// [-H(α)]G₁
var negH bls12381.G1Affine
diff --git a/ecc/bls12-381/fr/polynomial/multilin.go b/ecc/bls12-381/fr/polynomial/multilin.go
new file mode 100644
index 0000000000..087ef65e7b
--- /dev/null
+++ b/ecc/bls12-381/fr/polynomial/multilin.go
@@ -0,0 +1,250 @@
+// Copyright 2020 ConsenSys Software Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by consensys/gnark-crypto DO NOT EDIT
+
+package polynomial
+
+import (
+ "github.com/consensys/gnark-crypto/ecc/bls12-381/fr"
+)
+
+// MultiLin tracks the values of a (dense i.e. not sparse) multilinear polynomial
+// The variables are X₁ through Xₙ where n = log(len(.))
+// .[∑ᵢ 2ⁱ⁻¹ bₙ₋ᵢ] = the polynomial evaluated at (b₁, b₂, ..., bₙ)
+// It is understood that any hypercube evaluation can be extrapolated to a multilinear polynomial
+type MultiLin []fr.Element
+
+// Fold is partial evaluation function k[X₁, X₂, ..., Xₙ] → k[X₂, ..., Xₙ] by setting X₁=r
+func (m *MultiLin) Fold(r fr.Element) {
+ mid := len(*m) / 2
+
+ bottom, top := (*m)[:mid], (*m)[mid:]
+
+ // updating bookkeeping table
+ // knowing that the polynomial f ∈ (k[X₂, ..., Xₙ])[X₁] is linear, we would get f(r) = f(0) + r(f(1) - f(0))
+ // the following loop computes the evaluations of f(r) accordingly:
+ // f(r, b₂, ..., bₙ) = f(0, b₂, ..., bₙ) + r(f(1, b₂, ..., bₙ) - f(0, b₂, ..., bₙ))
+ for i := 0; i < mid; i++ {
+ // table[i] ← table[i] + r (table[i + mid] - table[i])
+ top[i].Sub(&top[i], &bottom[i])
+ top[i].Mul(&top[i], &r)
+ bottom[i].Add(&bottom[i], &top[i])
+ }
+
+ *m = (*m)[:mid]
+}
+
+// Evaluate extrapolate the value of the multilinear polynomial corresponding to m
+// on the given coordinates
+func (m MultiLin) Evaluate(coordinates []fr.Element) fr.Element {
+ // Folding is a mutating operation
+ bkCopy := m.Clone()
+
+ // Evaluate step by step through repeated folding (i.e. evaluation at the first remaining variable)
+ for _, r := range coordinates {
+ bkCopy.Fold(r)
+ }
+
+ return bkCopy[0]
+}
+
+// Clone creates a deep copy of a book-keeping table.
+// Both multilinear interpolation and sumcheck require folding an underlying
+// array, but folding changes the array. To do both one requires a deep copy
+// of the book-keeping table.
+func (m MultiLin) Clone() MultiLin {
+ tableDeepCopy := Make(len(m))
+ copy(tableDeepCopy, m)
+ return tableDeepCopy
+}
+
+// Add two bookKeepingTables
+func (m *MultiLin) Add(left, right MultiLin) {
+ size := len(left)
+ // Check that left and right have the same size
+ if len(right) != size {
+ panic("Left and right do not have the right size")
+ }
+ // Reallocate the table if necessary
+ if cap(*m) < size {
+ *m = make([]fr.Element, size)
+ }
+
+ // Resize the destination table
+ *m = (*m)[:size]
+
+ // Add elementwise
+ for i := 0; i < size; i++ {
+ (*m)[i].Add(&left[i], &right[i])
+ }
+}
+
+// EvalEq computes Eq(q₁, ... , qₙ, h₁, ... , hₙ) = Π₁ⁿ Eq(qᵢ, hᵢ)
+// where Eq(x,y) = xy + (1-x)(1-y) = 1 - x - y + xy + xy interpolates
+// _________________
+// | | |
+// | 0 | 1 |
+// |_______|_______|
+// y | | |
+// | 1 | 0 |
+// |_______|_______|
+//
+// x
+// In other words the polynomial evaluated here is the multilinear extrapolation of
+// one that evaluates to q' == h' for vectors q', h' of binary values
+func EvalEq(q, h []fr.Element) fr.Element {
+ var res, nxt, one, sum fr.Element
+ one.SetOne()
+ for i := 0; i < len(q); i++ {
+ nxt.Mul(&q[i], &h[i]) // nxt <- qᵢ * hᵢ
+ nxt.Double(&nxt) // nxt <- 2 * qᵢ * hᵢ
+ nxt.Add(&nxt, &one) // nxt <- 1 + 2 * qᵢ * hᵢ
+ sum.Add(&q[i], &h[i]) // sum <- qᵢ + hᵢ TODO: Why not subtract one by one from nxt? More parallel?
+
+ if i == 0 {
+ res.Sub(&nxt, &sum) // nxt <- 1 + 2 * qᵢ * hᵢ - qᵢ - hᵢ
+ } else {
+ nxt.Sub(&nxt, &sum) // nxt <- 1 + 2 * qᵢ * hᵢ - qᵢ - hᵢ
+ res.Mul(&res, &nxt) // res <- res * nxt
+ }
+ }
+ return res
+}
+
+// Eq sets m to the representation of the polynomial Eq(q₁, ..., qₙ, *, ..., *) × m[0]
+func (m *MultiLin) Eq(q []fr.Element) {
+ n := len(q)
+
+ if len(*m) != 1< 0 {
+ i.Sub(fr.Modulus(), &i)
+ i.Neg(&i)
+ }
+ return i
+}
+
+func (p Polynomial) Text(base int) string {
+
+ var builder strings.Builder
+
+ first := true
+ for d := len(p) - 1; d >= 0; d-- {
+ if p[d].IsZero() {
+ continue
+ }
+
+ i := signedBigInt(&p[d])
+
+ initialLen := builder.Len()
+
+ if i.Sign() < 1 {
+ i.Neg(&i)
+ if first {
+ builder.WriteString("-")
+ } else {
+ builder.WriteString(" - ")
+ }
+ } else if !first {
+ builder.WriteString(" + ")
+ }
+
+ first = false
+
+ asInt64 := int64(0)
+ if i.IsInt64() {
+ asInt64 = i.Int64()
+ }
+
+ if asInt64 != 1 || d == 0 {
+ builder.WriteString(i.Text(base))
+ }
+
+ if builder.Len()-initialLen > 10 {
+ builder.WriteString("×")
+ }
+
+ if d != 0 {
+ builder.WriteString("X")
+ }
+ if d > 1 {
+ builder.WriteString(
+ utils.ToSuperscript(strconv.Itoa(d)),
+ )
+ }
+
+ }
+
+ if first {
+ return "0"
+ }
+
+ return builder.String()
+}
diff --git a/ecc/bls12-381/fr/polynomial/pool.go b/ecc/bls12-381/fr/polynomial/pool.go
new file mode 100644
index 0000000000..a364c152bd
--- /dev/null
+++ b/ecc/bls12-381/fr/polynomial/pool.go
@@ -0,0 +1,130 @@
+// Copyright 2020 ConsenSys Software Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by consensys/gnark-crypto DO NOT EDIT
+
+package polynomial
+
+import (
+ "fmt"
+ "github.com/consensys/gnark-crypto/ecc/bls12-381/fr"
+ "reflect"
+ "sync"
+ "unsafe"
+)
+
+// Memory management for polynomials
+// Copied verbatim from gkr repo
+
+// Sets a maximum for the array size we keep in pool
+const maxNForLargePool int = 1 << 24
+const maxNForSmallPool int = 256
+
+// Aliases because it is annoying to use arrays in all the places
+type largeArr = [maxNForLargePool]fr.Element
+type smallArr = [maxNForSmallPool]fr.Element
+
+var rC = sync.Map{}
+
+var (
+ largePool = sync.Pool{
+ New: func() interface{} {
+ var res largeArr
+ return &res
+ },
+ }
+ smallPool = sync.Pool{
+ New: func() interface{} {
+ var res smallArr
+ return &res
+ },
+ }
+)
+
+// ClearPool Clears the pool completely, shields against memory leaks
+// Eg: if we forgot to dump a polynomial at some point, this will ensure the value get dumped eventually
+// Returns how many polynomials were cleared that way
+func ClearPool() int {
+ res := 0
+ rC.Range(func(k, _ interface{}) bool {
+ switch ptr := k.(type) {
+ case *largeArr:
+ largePool.Put(ptr)
+ case *smallArr:
+ smallPool.Put(ptr)
+ default:
+ panic(fmt.Sprintf("tried to clear %v", reflect.TypeOf(ptr)))
+ }
+ res++
+ return true
+ })
+ return res
+}
+
+// CountPool Returns the number of elements in the pool without mutating it
+func CountPool() int {
+ res := 0
+ rC.Range(func(_, _ interface{}) bool {
+ res++
+ return true
+ })
+ return res
+}
+
+// Make tries to find a reusable polynomial or allocates a new one
+func Make(n int) []fr.Element {
+ if n > maxNForLargePool {
+ panic(fmt.Sprintf("been provided with size of %v but the maximum is %v", n, maxNForLargePool))
+ }
+
+ if n <= maxNForSmallPool {
+ ptr := smallPool.Get().(*smallArr)
+ rC.Store(ptr, struct{}{}) // registers the pointer being used
+ return (*ptr)[:n]
+ }
+
+ ptr := largePool.Get().(*largeArr)
+ rC.Store(ptr, struct{}{}) // remember we allocated the pointer is being used
+ return (*ptr)[:n]
+}
+
+// Dump dumps a set of polynomials into the pool
+// Returns the number of deallocated polys
+func Dump(arrs ...[]fr.Element) int {
+ cnt := 0
+ for _, arr := range arrs {
+ ptr := ptr(arr)
+ pool := &smallPool
+ if len(arr) > maxNForSmallPool {
+ pool = &largePool
+ }
+ // If the rC did not register, then
+ // either the array was allocated somewhere else which can be ignored
+ // otherwise a double put which MUST be ignored
+ if _, ok := rC.Load(ptr); ok {
+ pool.Put(ptr)
+ // And deregisters the ptr
+ rC.Delete(ptr)
+ cnt++
+ }
+ }
+ return cnt
+}
+
+func ptr(m []fr.Element) unsafe.Pointer {
+ if cap(m) != maxNForSmallPool && cap(m) != maxNForLargePool {
+ panic(fmt.Sprintf("can't cast to large or small array, the put array's is %v it should have capacity %v or %v", cap(m), maxNForLargePool, maxNForSmallPool))
+ }
+ return unsafe.Pointer(&m[0])
+}
diff --git a/ecc/bls12-381/g1.go b/ecc/bls12-381/g1.go
index fc151cb34e..26e21d85aa 100644
--- a/ecc/bls12-381/g1.go
+++ b/ecc/bls12-381/g1.go
@@ -59,6 +59,14 @@ func (p *G1Affine) ScalarMultiplication(a *G1Affine, s *big.Int) *G1Affine {
return p
}
+// ScalarMultiplicationAffine computes and returns p = a ⋅ s
+// Takes an affine point and returns a Jacobian point (useful for KZG)
+func (p *G1Jac) ScalarMultiplicationAffine(a *G1Affine, s *big.Int) *G1Jac {
+ p.FromAffine(a)
+ p.mulGLV(p, s)
+ return p
+}
+
// Add adds two point in affine coordinates.
// This should rarely be used as it is very inefficient compared to Jacobian
func (p *G1Affine) Add(a, b *G1Affine) *G1Affine {
@@ -336,7 +344,7 @@ func (p *G1Jac) String() string {
return _p.String()
}
-// FromAffine sets p = Q, p in Jacboian, Q in affine
+// FromAffine sets p = Q, p in Jacobian, Q in affine
func (p *G1Jac) FromAffine(Q *G1Affine) *G1Jac {
if Q.IsInfinity() {
p.Z.SetZero()
@@ -814,9 +822,9 @@ func (p *g1JacExtended) doubleMixed(q *G1Affine) *g1JacExtended {
}
// BatchJacobianToAffineG1 converts points in Jacobian coordinates to Affine coordinates
-// performing a single field inversion (Montgomery batch inversion trick)
-// result must be allocated with len(result) == len(points)
-func BatchJacobianToAffineG1(points []G1Jac, result []G1Affine) {
+// performing a single field inversion (Montgomery batch inversion trick).
+func BatchJacobianToAffineG1(points []G1Jac) []G1Affine {
+ result := make([]G1Affine, len(points))
zeroes := make([]bool, len(points))
accumulator := fp.One()
@@ -836,7 +844,7 @@ func BatchJacobianToAffineG1(points []G1Jac, result []G1Affine) {
for i := len(points) - 1; i >= 0; i-- {
if zeroes[i] {
- // do nothing, X and Y are zeroes in affine.
+ // do nothing, (X=0, Y=0) is infinity point in affine
continue
}
result[i].X.Mul(&result[i].X, &accInverse)
@@ -847,7 +855,7 @@ func BatchJacobianToAffineG1(points []G1Jac, result []G1Affine) {
parallel.Execute(len(points), func(start, end int) {
for i := start; i < end; i++ {
if zeroes[i] {
- // do nothing, X and Y are zeroes in affine.
+ // do nothing, (X=0, Y=0) is infinity point in affine
continue
}
var a, b fp.Element
@@ -859,6 +867,7 @@ func BatchJacobianToAffineG1(points []G1Jac, result []G1Affine) {
}
})
+ return result
}
// BatchScalarMultiplicationG1 multiplies the same base by all scalars
@@ -922,8 +931,7 @@ func BatchScalarMultiplicationG1(base *G1Affine, scalars []fr.Element) []G1Affin
selectors[chunk] = d
}
// convert our base exp table into affine to use AddMixed
- baseTableAff := make([]G1Affine, (1 << (c - 1)))
- BatchJacobianToAffineG1(baseTable, baseTableAff)
+ baseTableAff := BatchJacobianToAffineG1(baseTable)
toReturn := make([]G1Jac, len(scalars))
// for each digit, take value in the base table, double it c time, voilà.
@@ -965,7 +973,6 @@ func BatchScalarMultiplicationG1(base *G1Affine, scalars []fr.Element) []G1Affin
}
})
- toReturnAff := make([]G1Affine, len(scalars))
- BatchJacobianToAffineG1(toReturn, toReturnAff)
+ toReturnAff := BatchJacobianToAffineG1(toReturn)
return toReturnAff
}
diff --git a/ecc/bls12-381/g1_test.go b/ecc/bls12-381/g1_test.go
index 700a4227be..9aa3311f05 100644
--- a/ecc/bls12-381/g1_test.go
+++ b/ecc/bls12-381/g1_test.go
@@ -85,7 +85,7 @@ func TestG1AffineIsOnCurve(t *testing.T) {
func(a fp.Element) bool {
var op1, op2 G1Affine
op1.FromJacobian(&g1Gen)
- op2.FromJacobian(&g1Gen)
+ op2.Set(&op1)
op2.Y.Mul(&op2.Y, &a)
return op1.IsOnCurve() && !op2.IsOnCurve()
},
@@ -220,6 +220,19 @@ func TestG1AffineConversions(t *testing.T) {
GenFp(),
GenFp(),
))
+ properties.Property("[BLS12-381] BatchJacobianToAffineG1 and FromJacobian should output the same result", prop.ForAll(
+ func(a, b fp.Element) bool {
+ g1 := fuzzG1Jac(&g1Gen, a)
+ g2 := fuzzG1Jac(&g1Gen, b)
+ var op1, op2 G1Affine
+ op1.FromJacobian(&g1)
+ op2.FromJacobian(&g2)
+ baseTableAff := BatchJacobianToAffineG1([]G1Jac{g1, g2})
+ return op1.Equal(&baseTableAff[0]) && op2.Equal(&baseTableAff[1])
+ },
+ GenFp(),
+ GenFp(),
+ ))
properties.TestingRun(t, gopter.ConsoleReporter(false))
}
@@ -486,7 +499,7 @@ func BenchmarkG1JacIsInSubGroup(b *testing.B) {
}
-func BenchmarkG1AffineBatchScalarMul(b *testing.B) {
+func BenchmarkG1AffineBatchScalarMultiplication(b *testing.B) {
// ensure every words of the scalars are filled
var mixer fr.Element
mixer.SetString("7716837800905789770901243404444209691916730933998574719964609384059111546487")
@@ -514,7 +527,7 @@ func BenchmarkG1AffineBatchScalarMul(b *testing.B) {
}
}
-func BenchmarkG1JacScalarMul(b *testing.B) {
+func BenchmarkG1JacScalarMultiplication(b *testing.B) {
var scalar big.Int
r := fr.Modulus()
diff --git a/ecc/bls12-381/g2.go b/ecc/bls12-381/g2.go
index d129abb7fb..d2e3a4b91c 100644
--- a/ecc/bls12-381/g2.go
+++ b/ecc/bls12-381/g2.go
@@ -341,7 +341,7 @@ func (p *G2Jac) String() string {
return _p.String()
}
-// FromAffine sets p = Q, p in Jacboian, Q in affine
+// FromAffine sets p = Q, p in Jacobian, Q in affine
func (p *G2Jac) FromAffine(Q *G2Affine) *G2Jac {
if Q.IsInfinity() {
p.Z.SetZero()
diff --git a/ecc/bls12-381/g2_test.go b/ecc/bls12-381/g2_test.go
index e24dffa784..c259606622 100644
--- a/ecc/bls12-381/g2_test.go
+++ b/ecc/bls12-381/g2_test.go
@@ -99,7 +99,7 @@ func TestG2AffineIsOnCurve(t *testing.T) {
func(a fptower.E2) bool {
var op1, op2 G2Affine
op1.FromJacobian(&g2Gen)
- op2.FromJacobian(&g2Gen)
+ op2.Set(&op1)
op2.Y.Mul(&op2.Y, &a)
return op1.IsOnCurve() && !op2.IsOnCurve()
},
@@ -505,7 +505,7 @@ func BenchmarkG2JacIsInSubGroup(b *testing.B) {
}
-func BenchmarkG2AffineBatchScalarMul(b *testing.B) {
+func BenchmarkG2AffineBatchScalarMultiplication(b *testing.B) {
// ensure every words of the scalars are filled
var mixer fr.Element
mixer.SetString("7716837800905789770901243404444209691916730933998574719964609384059111546487")
@@ -533,7 +533,7 @@ func BenchmarkG2AffineBatchScalarMul(b *testing.B) {
}
}
-func BenchmarkG2JacScalarMul(b *testing.B) {
+func BenchmarkG2JacScalarMultiplication(b *testing.B) {
var scalar big.Int
r := fr.Modulus()
diff --git a/ecc/bls12-381/hash_to_g1.go b/ecc/bls12-381/hash_to_g1.go
index 6c3dda2056..23c108b946 100644
--- a/ecc/bls12-381/hash_to_g1.go
+++ b/ecc/bls12-381/hash_to_g1.go
@@ -131,43 +131,38 @@ func g1Isogeny(p *G1Affine) {
// g1SqrtRatio computes the square root of u/v and returns 0 iff u/v was indeed a quadratic residue
// if not, we get sqrt(Z * u / v). Recall that Z is non-residue
+// If v = 0, u/v is meaningless and the output is unspecified, without raising an error.
// The main idea is that since the computation of the square root involves taking large powers of u/v, the inversion of v can be avoided
func g1SqrtRatio(z *fp.Element, u *fp.Element, v *fp.Element) uint64 {
- // Taken from https://datatracker.ietf.org/doc/draft-irtf-cfrg-hash-to-curve/13/ F.2.1.2. q = 3 mod 4
+ // https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-16.html#name-optimized-sqrt_ratio-for-q- (3 mod 4)
var tv1 fp.Element
- tv1.Square(v)
+ tv1.Square(v) // 1. tv1 = v²
var tv2 fp.Element
- tv2.Mul(u, v)
- tv1.Mul(&tv1, &tv2)
+ tv2.Mul(u, v) // 2. tv2 = u * v
+ tv1.Mul(&tv1, &tv2) // 3. tv1 = tv1 * tv2
var y1 fp.Element
{
var c1 big.Int
// c1 = 1000602388805416848354447456433976039139220704984751971333014534031007912622709466110671907282253916009473568139946
- c1.SetBytes([]byte{6, 128, 68, 122, 142, 95, 249, 166, 146, 198, 233, 237, 144, 210, 235, 53, 217, 29, 210, 225, 60, 225, 68, 175, 217, 204, 52, 168, 61, 172, 61, 137, 7, 170, 255, 255, 172, 84, 255, 255, 238, 127, 191, 255, 255, 255, 234, 170})
- y1.Exp(tv1, &c1)
+ c1.SetBytes([]byte{6, 128, 68, 122, 142, 95, 249, 166, 146, 198, 233, 237, 144, 210, 235, 53, 217, 29, 210, 225, 60, 225, 68, 175, 217, 204, 52, 168, 61, 172, 61, 137, 7, 170, 255, 255, 172, 84, 255, 255, 238, 127, 191, 255, 255, 255, 234, 170}) // c1 = (q - 3) / 4 # Integer arithmetic
+
+ y1.Exp(tv1, &c1) // 4. y1 = tv1ᶜ¹
}
- y1.Mul(&y1, &tv2)
+ y1.Mul(&y1, &tv2) // 5. y1 = y1 * tv2
var y2 fp.Element
- y2.Mul(&y1, &fp.Element{17544630987809824292, 17306709551153317753, 8299808889594647786, 5930295261504720397, 675038575008112577, 167386374569371918})
-
- var tv3 fp.Element
- tv3.Square(&y1)
- tv3.Mul(&tv3, v)
-
- isQNr := tv3.NotEqual(u)
- z.Select(int(isQNr), &y1, &y2)
+ // c2 = sqrt(-Z)
+ tv3 := fp.Element{17544630987809824292, 17306709551153317753, 8299808889594647786, 5930295261504720397, 675038575008112577, 167386374569371918}
+ y2.Mul(&y1, &tv3) // 6. y2 = y1 * c2
+ tv3.Square(&y1) // 7. tv3 = y1²
+ tv3.Mul(&tv3, v) // 8. tv3 = tv3 * v
+ isQNr := tv3.NotEqual(u) // 9. isQR = tv3 == u
+ z.Select(int(isQNr), &y1, &y2) // 10. y = CMOV(y2, y1, isQR)
return isQNr
}
-/*
-// g1SetZ sets z to [11].
-func g1SetZ(z *fp.Element) {
- z.Set( &fp.Element {9830232086645309404, 1112389714365644829, 8603885298299447491, 11361495444721768256, 5788602283869803809, 543934104870762216} )
-}*/
-
// g1MulByZ multiplies x by [11] and stores the result in z
func g1MulByZ(z *fp.Element, x *fp.Element) {
@@ -182,30 +177,29 @@ func g1MulByZ(z *fp.Element, x *fp.Element) {
*z = res
}
-//TODO: Define A,B here
-
-// From https://datatracker.ietf.org/doc/draft-irtf-cfrg-hash-to-curve/13/ Pg 80
+// https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-16.html#name-simplified-swu-method
// mapToCurve1 implements the SSWU map
// No cofactor clearing or isogeny
func mapToCurve1(u *fp.Element) G1Affine {
+ var sswuIsoCurveCoeffA = fp.Element{3415322872136444497, 9675504606121301699, 13284745414851768802, 2873609449387478652, 2897906769629812789, 1536947672689614213}
+ var sswuIsoCurveCoeffB = fp.Element{18129637713272545760, 11144507692959411567, 10108153527111632324, 9745270364868568433, 14587922135379007624, 469008097655535723}
+
var tv1 fp.Element
- tv1.Square(u)
+ tv1.Square(u) // 1. tv1 = u²
//mul tv1 by Z
- g1MulByZ(&tv1, &tv1)
+ g1MulByZ(&tv1, &tv1) // 2. tv1 = Z * tv1
var tv2 fp.Element
- tv2.Square(&tv1)
- tv2.Add(&tv2, &tv1)
+ tv2.Square(&tv1) // 3. tv2 = tv1²
+ tv2.Add(&tv2, &tv1) // 4. tv2 = tv2 + tv1
var tv3 fp.Element
- //Standard doc line 5
var tv4 fp.Element
tv4.SetOne()
- tv3.Add(&tv2, &tv4)
- //TODO: Use bCurveConf when no isogeny
- tv3.Mul(&tv3, &fp.Element{18129637713272545760, 11144507692959411567, 10108153527111632324, 9745270364868568433, 14587922135379007624, 469008097655535723})
+ tv3.Add(&tv2, &tv4) // 5. tv3 = tv2 + 1
+ tv3.Mul(&tv3, &sswuIsoCurveCoeffB) // 6. tv3 = B * tv3
tv2NZero := g1NotZero(&tv2)
@@ -213,48 +207,45 @@ func mapToCurve1(u *fp.Element) G1Affine {
tv4 = fp.Element{9830232086645309404, 1112389714365644829, 8603885298299447491, 11361495444721768256, 5788602283869803809, 543934104870762216}
tv2.Neg(&tv2)
- tv4.Select(int(tv2NZero), &tv4, &tv2)
- //TODO: When no isogeny use curve constants
- tv2 = fp.Element{3415322872136444497, 9675504606121301699, 13284745414851768802, 2873609449387478652, 2897906769629812789, 1536947672689614213}
- tv4.Mul(&tv4, &tv2)
+ tv4.Select(int(tv2NZero), &tv4, &tv2) // 7. tv4 = CMOV(Z, -tv2, tv2 != 0)
+ tv4.Mul(&tv4, &sswuIsoCurveCoeffA) // 8. tv4 = A * tv4
- tv2.Square(&tv3)
+ tv2.Square(&tv3) // 9. tv2 = tv3²
var tv6 fp.Element
- //Standard doc line 10
- tv6.Square(&tv4)
+ tv6.Square(&tv4) // 10. tv6 = tv4²
var tv5 fp.Element
- tv5.Mul(&tv6, &fp.Element{3415322872136444497, 9675504606121301699, 13284745414851768802, 2873609449387478652, 2897906769629812789, 1536947672689614213})
+ tv5.Mul(&tv6, &sswuIsoCurveCoeffA) // 11. tv5 = A * tv6
- tv2.Add(&tv2, &tv5)
- tv2.Mul(&tv2, &tv3)
- tv6.Mul(&tv6, &tv4)
+ tv2.Add(&tv2, &tv5) // 12. tv2 = tv2 + tv5
+ tv2.Mul(&tv2, &tv3) // 13. tv2 = tv2 * tv3
+ tv6.Mul(&tv6, &tv4) // 14. tv6 = tv6 * tv4
- //Standards doc line 15
- tv5.Mul(&tv6, &fp.Element{18129637713272545760, 11144507692959411567, 10108153527111632324, 9745270364868568433, 14587922135379007624, 469008097655535723})
- tv2.Add(&tv2, &tv5)
+ tv5.Mul(&tv6, &sswuIsoCurveCoeffB) // 15. tv5 = B * tv6
+ tv2.Add(&tv2, &tv5) // 16. tv2 = tv2 + tv5
var x fp.Element
- x.Mul(&tv1, &tv3)
+ x.Mul(&tv1, &tv3) // 17. x = tv1 * tv3
var y1 fp.Element
- gx1NSquare := g1SqrtRatio(&y1, &tv2, &tv6)
+ gx1NSquare := g1SqrtRatio(&y1, &tv2, &tv6) // 18. (is_gx1_square, y1) = sqrt_ratio(tv2, tv6)
var y fp.Element
- y.Mul(&tv1, u)
+ y.Mul(&tv1, u) // 19. y = tv1 * u
- //Standards doc line 20
- y.Mul(&y, &y1)
+ y.Mul(&y, &y1) // 20. y = y * y1
- x.Select(int(gx1NSquare), &tv3, &x)
- y.Select(int(gx1NSquare), &y1, &y)
+ x.Select(int(gx1NSquare), &tv3, &x) // 21. x = CMOV(x, tv3, is_gx1_square)
+ y.Select(int(gx1NSquare), &y1, &y) // 22. y = CMOV(y, y1, is_gx1_square)
y1.Neg(&y)
y.Select(int(g1Sgn0(u)^g1Sgn0(&y)), &y, &y1)
- //Standards doc line 25
- x.Div(&x, &tv4)
+ // 23. e1 = sgn0(u) == sgn0(y)
+ // 24. y = CMOV(-y, y, e1)
+
+ x.Div(&x, &tv4) // 25. x = x / tv4
return G1Affine{x, y}
}
@@ -297,13 +288,13 @@ func hashToFp(msg, dst []byte, count int) ([]fp.Element, error) {
// g1Sgn0 is an algebraic substitute for the notion of sign in ordered fields
// Namely, every non-zero quadratic residue in a finite field of characteristic =/= 2 has exactly two square roots, one of each sign
-// Taken from https://datatracker.ietf.org/doc/draft-irtf-cfrg-hash-to-curve/ section 4.1
+// https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-16.html#name-the-sgn0-function
// The sign of an element is not obviously related to that of its Montgomery form
func g1Sgn0(z *fp.Element) uint64 {
nonMont := *z
nonMont.FromMont()
-
+ // m == 1
return nonMont[0] % 2
}
@@ -320,7 +311,7 @@ func MapToG1(u fp.Element) G1Affine {
// EncodeToG1 hashes a message to a point on the G1 curve using the SSWU map.
// It is faster than HashToG1, but the result is not uniformly distributed. Unsuitable as a random oracle.
// dst stands for "domain separation tag", a string unique to the construction using the hash function
-//https://datatracker.ietf.org/doc/draft-irtf-cfrg-hash-to-curve/13/#section-6.6.3
+//https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-16.html#roadmap
func EncodeToG1(msg, dst []byte) (G1Affine, error) {
var res G1Affine
@@ -340,7 +331,7 @@ func EncodeToG1(msg, dst []byte) (G1Affine, error) {
// HashToG1 hashes a message to a point on the G1 curve using the SSWU map.
// Slower than EncodeToG1, but usable as a random oracle.
// dst stands for "domain separation tag", a string unique to the construction using the hash function
-// https://tools.ietf.org/html/draft-irtf-cfrg-hash-to-curve-06#section-3
+//https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-16.html#roadmap
func HashToG1(msg, dst []byte) (G1Affine, error) {
u, err := hashToFp(msg, dst, 2*1)
if err != nil {
@@ -350,7 +341,7 @@ func HashToG1(msg, dst []byte) (G1Affine, error) {
Q0 := mapToCurve1(&u[0])
Q1 := mapToCurve1(&u[1])
- //TODO: Add in E' first, then apply isogeny
+ //TODO (perf): Add in E' first, then apply isogeny
g1Isogeny(&Q0)
g1Isogeny(&Q1)
diff --git a/ecc/bls12-381/hash_to_g2.go b/ecc/bls12-381/hash_to_g2.go
index 8f371d4172..19f8fbfa8a 100644
--- a/ecc/bls12-381/hash_to_g2.go
+++ b/ecc/bls12-381/hash_to_g2.go
@@ -130,10 +130,11 @@ func g2Isogeny(p *G2Affine) {
// g2SqrtRatio computes the square root of u/v and returns 0 iff u/v was indeed a quadratic residue
// if not, we get sqrt(Z * u / v). Recall that Z is non-residue
+// If v = 0, u/v is meaningless and the output is unspecified, without raising an error.
// The main idea is that since the computation of the square root involves taking large powers of u/v, the inversion of v can be avoided
func g2SqrtRatio(z *fptower.E2, u *fptower.E2, v *fptower.E2) uint64 {
- // Taken from https://datatracker.ietf.org/doc/draft-irtf-cfrg-hash-to-curve/13/ F.2.1.1. for any field
+ // https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-16.html#name-sqrt_ratio-for-any-field
tv1 := fptower.E2{
A0: fp.Element{8921533702591418330, 15859389534032789116, 3389114680249073393, 15116930867080254631, 3288288975085550621, 1021049300055853010},
@@ -142,59 +143,51 @@ func g2SqrtRatio(z *fptower.E2, u *fptower.E2, v *fptower.E2) uint64 {
var tv2, tv3, tv4, tv5 fptower.E2
var exp big.Int
- // c4 = 7 = 2^3 - 1
+ // c4 = 7 = 2³ - 1
// q is odd so c1 is at least 1.
exp.SetBytes([]byte{7})
- tv2.Exp(*v, &exp)
- tv3.Mul(&tv2, &tv2)
- tv3.Mul(&tv3, v)
-
- // line 5
- tv5.Mul(u, &tv3)
+ tv2.Exp(*v, &exp) // 2. tv2 = vᶜ⁴
+ tv3.Square(&tv2) // 3. tv3 = tv2²
+ tv3.Mul(&tv3, v) // 4. tv3 = tv3 * v
+ tv5.Mul(u, &tv3) // 5. tv5 = u * tv3
// c3 = 1001205140483106588246484290269935788605945006208159541241399033561623546780709821462541004956387089373434649096260670658193992783731681621012512651314777238193313314641988297376025498093520728838658813979860931248214124593092835
exp.SetBytes([]byte{42, 67, 122, 75, 140, 53, 252, 116, 189, 39, 142, 170, 34, 242, 94, 158, 45, 201, 14, 80, 231, 4, 107, 70, 110, 89, 228, 147, 73, 232, 189, 5, 10, 98, 207, 209, 109, 220, 166, 239, 83, 20, 147, 48, 151, 142, 240, 17, 214, 134, 25, 200, 97, 133, 199, 178, 146, 232, 90, 135, 9, 26, 4, 150, 107, 249, 30, 211, 231, 27, 116, 49, 98, 195, 56, 54, 33, 19, 207, 215, 206, 214, 177, 215, 99, 130, 234, 178, 106, 160, 0, 1, 199, 24, 227})
- tv5.Exp(tv5, &exp)
- tv5.Mul(&tv5, &tv2)
- tv2.Mul(&tv5, v)
- tv3.Mul(&tv5, u)
- // line 10
- tv4.Mul(&tv3, &tv2)
+ tv5.Exp(tv5, &exp) // 6. tv5 = tv5ᶜ³
+ tv5.Mul(&tv5, &tv2) // 7. tv5 = tv5 * tv2
+ tv2.Mul(&tv5, v) // 8. tv2 = tv5 * v
+ tv3.Mul(&tv5, u) // 9. tv3 = tv5 * u
+ tv4.Mul(&tv3, &tv2) // 10. tv4 = tv3 * tv2
// c5 = 4
exp.SetBytes([]byte{4})
- tv5.Exp(tv4, &exp)
-
- isQNr := g2NotOne(&tv5)
-
- tv2.Mul(&tv3, &fptower.E2{
+ tv5.Exp(tv4, &exp) // 11. tv5 = tv4ᶜ⁵
+ isQNr := g2NotOne(&tv5) // 12. isQR = tv5 == 1
+ c7 := fptower.E2{
A0: fp.Element{1921729236329761493, 9193968980645934504, 9862280504246317678, 6861748847800817560, 10375788487011937166, 4460107375738415},
A1: fp.Element{16821121318233475459, 10183025025229892778, 1779012082459463630, 3442292649700377418, 1061500799026501234, 1352426537312017168},
- })
- tv5.Mul(&tv4, &tv1)
-
- // line 15
-
- tv3.Select(int(isQNr), &tv3, &tv2)
- tv4.Select(int(isQNr), &tv4, &tv5)
-
- exp.Lsh(big.NewInt(1), 3-2)
-
- for i := 3; i >= 2; i-- {
- //line 20
- tv5.Exp(tv4, &exp)
- nE1 := g2NotOne(&tv5)
-
- tv2.Mul(&tv3, &tv1)
- tv1.Mul(&tv1, &tv1)
- tv5.Mul(&tv4, &tv1)
-
- tv3.Select(int(nE1), &tv3, &tv2)
- tv4.Select(int(nE1), &tv4, &tv5)
-
- exp.Rsh(&exp, 1)
+ }
+ tv2.Mul(&tv3, &c7) // 13. tv2 = tv3 * c7
+ tv5.Mul(&tv4, &tv1) // 14. tv5 = tv4 * tv1
+ tv3.Select(int(isQNr), &tv3, &tv2) // 15. tv3 = CMOV(tv2, tv3, isQR)
+ tv4.Select(int(isQNr), &tv4, &tv5) // 16. tv4 = CMOV(tv5, tv4, isQR)
+ exp.Lsh(big.NewInt(1), 3-2) // 18, 19: tv5 = 2ⁱ⁻² for i = c1
+
+ for i := 3; i >= 2; i-- { // 17. for i in (c1, c1 - 1, ..., 2):
+
+ tv5.Exp(tv4, &exp) // 20. tv5 = tv4ᵗᵛ⁵
+ nE1 := g2NotOne(&tv5) // 21. e1 = tv5 == 1
+ tv2.Mul(&tv3, &tv1) // 22. tv2 = tv3 * tv1
+ tv1.Mul(&tv1, &tv1) // 23. tv1 = tv1 * tv1 Why not write square?
+ tv5.Mul(&tv4, &tv1) // 24. tv5 = tv4 * tv1
+ tv3.Select(int(nE1), &tv3, &tv2) // 25. tv3 = CMOV(tv2, tv3, e1)
+ tv4.Select(int(nE1), &tv4, &tv5) // 26. tv4 = CMOV(tv5, tv4, e1)
+
+ if i > 2 {
+ exp.Rsh(&exp, 1) // 18, 19. tv5 = 2ⁱ⁻²
+ }
}
*z = tv3
@@ -209,15 +202,6 @@ func g2NotOne(x *fptower.E2) uint64 {
}
-/*
-// g2SetZ sets z to [-2, -1].
-func g2SetZ(z *fptower.E2) {
- z.Set( &fptower.E2 {
-A0: fp.Element{9794203289623549276, 7309342082925068282, 1139538881605221074, 15659550692327388916, 16008355200866287827, 582484205531694093},
-A1: fp.Element{4897101644811774638, 3654671041462534141, 569769440802610537, 17053147383018470266, 17227549637287919721, 291242102765847046},
-} )
-}*/
-
// g2MulByZ multiplies x by [-2, -1] and stores the result in z
func g2MulByZ(z *fptower.E2, x *fptower.E2) {
@@ -228,33 +212,35 @@ func g2MulByZ(z *fptower.E2, x *fptower.E2) {
}
-//TODO: Define A,B here
-
-// From https://datatracker.ietf.org/doc/draft-irtf-cfrg-hash-to-curve/13/ Pg 80
+// https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-16.html#name-simplified-swu-method
// mapToCurve2 implements the SSWU map
// No cofactor clearing or isogeny
func mapToCurve2(u *fptower.E2) G2Affine {
+ var sswuIsoCurveCoeffA = fptower.E2{
+ A0: fp.Element{0},
+ A1: fp.Element{16517514583386313282, 74322656156451461, 16683759486841714365, 815493829203396097, 204518332920448171, 1306242806803223655},
+ }
+ var sswuIsoCurveCoeffB = fptower.E2{
+ A0: fp.Element{2515823342057463218, 7982686274772798116, 7934098172177393262, 8484566552980779962, 4455086327883106868, 1323173589274087377},
+ A1: fp.Element{2515823342057463218, 7982686274772798116, 7934098172177393262, 8484566552980779962, 4455086327883106868, 1323173589274087377},
+ }
+
var tv1 fptower.E2
- tv1.Square(u)
+ tv1.Square(u) // 1. tv1 = u²
//mul tv1 by Z
- g2MulByZ(&tv1, &tv1)
+ g2MulByZ(&tv1, &tv1) // 2. tv1 = Z * tv1
var tv2 fptower.E2
- tv2.Square(&tv1)
- tv2.Add(&tv2, &tv1)
+ tv2.Square(&tv1) // 3. tv2 = tv1²
+ tv2.Add(&tv2, &tv1) // 4. tv2 = tv2 + tv1
var tv3 fptower.E2
- //Standard doc line 5
var tv4 fptower.E2
tv4.SetOne()
- tv3.Add(&tv2, &tv4)
- //TODO: Use bCurveConf when no isogeny
- tv3.Mul(&tv3, &fptower.E2{
- A0: fp.Element{2515823342057463218, 7982686274772798116, 7934098172177393262, 8484566552980779962, 4455086327883106868, 1323173589274087377},
- A1: fp.Element{2515823342057463218, 7982686274772798116, 7934098172177393262, 8484566552980779962, 4455086327883106868, 1323173589274087377},
- })
+ tv3.Add(&tv2, &tv4) // 5. tv3 = tv2 + 1
+ tv3.Mul(&tv3, &sswuIsoCurveCoeffB) // 6. tv3 = B * tv3
tv2NZero := g2NotZero(&tv2)
@@ -265,57 +251,45 @@ func mapToCurve2(u *fptower.E2) G2Affine {
}
tv2.Neg(&tv2)
- tv4.Select(int(tv2NZero), &tv4, &tv2)
- //TODO: When no isogeny use curve constants
- tv2 = fptower.E2{
- A0: fp.Element{0},
- A1: fp.Element{16517514583386313282, 74322656156451461, 16683759486841714365, 815493829203396097, 204518332920448171, 1306242806803223655},
- }
- tv4.Mul(&tv4, &tv2)
+ tv4.Select(int(tv2NZero), &tv4, &tv2) // 7. tv4 = CMOV(Z, -tv2, tv2 != 0)
+ tv4.Mul(&tv4, &sswuIsoCurveCoeffA) // 8. tv4 = A * tv4
- tv2.Square(&tv3)
+ tv2.Square(&tv3) // 9. tv2 = tv3²
var tv6 fptower.E2
- //Standard doc line 10
- tv6.Square(&tv4)
+ tv6.Square(&tv4) // 10. tv6 = tv4²
var tv5 fptower.E2
- tv5.Mul(&tv6, &fptower.E2{
- A0: fp.Element{0},
- A1: fp.Element{16517514583386313282, 74322656156451461, 16683759486841714365, 815493829203396097, 204518332920448171, 1306242806803223655},
- })
+ tv5.Mul(&tv6, &sswuIsoCurveCoeffA) // 11. tv5 = A * tv6
- tv2.Add(&tv2, &tv5)
- tv2.Mul(&tv2, &tv3)
- tv6.Mul(&tv6, &tv4)
+ tv2.Add(&tv2, &tv5) // 12. tv2 = tv2 + tv5
+ tv2.Mul(&tv2, &tv3) // 13. tv2 = tv2 * tv3
+ tv6.Mul(&tv6, &tv4) // 14. tv6 = tv6 * tv4
- //Standards doc line 15
- tv5.Mul(&tv6, &fptower.E2{
- A0: fp.Element{2515823342057463218, 7982686274772798116, 7934098172177393262, 8484566552980779962, 4455086327883106868, 1323173589274087377},
- A1: fp.Element{2515823342057463218, 7982686274772798116, 7934098172177393262, 8484566552980779962, 4455086327883106868, 1323173589274087377},
- })
- tv2.Add(&tv2, &tv5)
+ tv5.Mul(&tv6, &sswuIsoCurveCoeffB) // 15. tv5 = B * tv6
+ tv2.Add(&tv2, &tv5) // 16. tv2 = tv2 + tv5
var x fptower.E2
- x.Mul(&tv1, &tv3)
+ x.Mul(&tv1, &tv3) // 17. x = tv1 * tv3
var y1 fptower.E2
- gx1NSquare := g2SqrtRatio(&y1, &tv2, &tv6)
+ gx1NSquare := g2SqrtRatio(&y1, &tv2, &tv6) // 18. (is_gx1_square, y1) = sqrt_ratio(tv2, tv6)
var y fptower.E2
- y.Mul(&tv1, u)
+ y.Mul(&tv1, u) // 19. y = tv1 * u
- //Standards doc line 20
- y.Mul(&y, &y1)
+ y.Mul(&y, &y1) // 20. y = y * y1
- x.Select(int(gx1NSquare), &tv3, &x)
- y.Select(int(gx1NSquare), &y1, &y)
+ x.Select(int(gx1NSquare), &tv3, &x) // 21. x = CMOV(x, tv3, is_gx1_square)
+ y.Select(int(gx1NSquare), &y1, &y) // 22. y = CMOV(y, y1, is_gx1_square)
y1.Neg(&y)
y.Select(int(g2Sgn0(u)^g2Sgn0(&y)), &y, &y1)
- //Standards doc line 25
- x.Div(&x, &tv4)
+ // 23. e1 = sgn0(u) == sgn0(y)
+ // 24. y = CMOV(-y, y, e1)
+
+ x.Div(&x, &tv4) // 25. x = x / tv4
return G2Affine{x, y}
}
@@ -337,28 +311,29 @@ func g2EvalPolynomial(z *fptower.E2, monic bool, coefficients []fptower.E2, x *f
// g2Sgn0 is an algebraic substitute for the notion of sign in ordered fields
// Namely, every non-zero quadratic residue in a finite field of characteristic =/= 2 has exactly two square roots, one of each sign
-// Taken from https://datatracker.ietf.org/doc/draft-irtf-cfrg-hash-to-curve/ section 4.1
+// https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-16.html#name-the-sgn0-function
// The sign of an element is not obviously related to that of its Montgomery form
func g2Sgn0(z *fptower.E2) uint64 {
nonMont := *z
nonMont.FromMont()
- sign := uint64(0)
- zero := uint64(1)
+ sign := uint64(0) // 1. sign = 0
+ zero := uint64(1) // 2. zero = 1
var signI uint64
var zeroI uint64
- signI = nonMont.A0[0] % 2
- sign = sign | (zero & signI)
-
+ // 3. i = 1
+ signI = nonMont.A0[0] % 2 // 4. sign_i = x_i mod 2
zeroI = g1NotZero(&nonMont.A0)
- zeroI = 1 ^ (zeroI|-zeroI)>>63
- zero = zero & zeroI
-
- signI = nonMont.A1[0] % 2
- sign = sign | (zero & signI)
-
+ zeroI = 1 ^ (zeroI|-zeroI)>>63 // 5. zero_i = x_i == 0
+ sign = sign | (zero & signI) // 6. sign = sign OR (zero AND sign_i) # Avoid short-circuit logic ops
+ zero = zero & zeroI // 7. zero = zero AND zero_i
+ // 3. i = 2
+ signI = nonMont.A1[0] % 2 // 4. sign_i = x_i mod 2
+ // 5. zero_i = x_i == 0
+ sign = sign | (zero & signI) // 6. sign = sign OR (zero AND sign_i) # Avoid short-circuit logic ops
+ // 7. zero = zero AND zero_i
return sign
}
@@ -375,7 +350,7 @@ func MapToG2(u fptower.E2) G2Affine {
// EncodeToG2 hashes a message to a point on the G2 curve using the SSWU map.
// It is faster than HashToG2, but the result is not uniformly distributed. Unsuitable as a random oracle.
// dst stands for "domain separation tag", a string unique to the construction using the hash function
-//https://datatracker.ietf.org/doc/draft-irtf-cfrg-hash-to-curve/13/#section-6.6.3
+//https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-16.html#roadmap
func EncodeToG2(msg, dst []byte) (G2Affine, error) {
var res G2Affine
@@ -398,7 +373,7 @@ func EncodeToG2(msg, dst []byte) (G2Affine, error) {
// HashToG2 hashes a message to a point on the G2 curve using the SSWU map.
// Slower than EncodeToG2, but usable as a random oracle.
// dst stands for "domain separation tag", a string unique to the construction using the hash function
-// https://tools.ietf.org/html/draft-irtf-cfrg-hash-to-curve-06#section-3
+//https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-16.html#roadmap
func HashToG2(msg, dst []byte) (G2Affine, error) {
u, err := hashToFp(msg, dst, 2*2)
if err != nil {
@@ -414,7 +389,7 @@ func HashToG2(msg, dst []byte) (G2Affine, error) {
A1: u[2+1],
})
- //TODO: Add in E' first, then apply isogeny
+ //TODO (perf): Add in E' first, then apply isogeny
g2Isogeny(&Q0)
g2Isogeny(&Q1)
diff --git a/ecc/bls12-381/internal/fptower/e12.go b/ecc/bls12-381/internal/fptower/e12.go
index 0eaa9f3df4..50bb505e5a 100644
--- a/ecc/bls12-381/internal/fptower/e12.go
+++ b/ecc/bls12-381/internal/fptower/e12.go
@@ -225,28 +225,45 @@ func (z *E12) CyclotomicSquareCompressed(x *E12) *E12 {
}
// DecompressKarabina Karabina's cyclotomic square result
+// if g3 != 0
+// g4 = (E * g5^2 + 3 * g1^2 - 2 * g2)/4g3
+// if g3 == 0
+// g4 = 2g1g5/g2
+//
+// if g3=g2=0 then g4=g5=g1=0 and g0=1 (x=1)
+// Theorem 3.1 is well-defined for all x in Gϕₙ\{1}
func (z *E12) DecompressKarabina(x *E12) *E12 {
var t [3]E2
var one E2
one.SetOne()
- // t0 = g1^2
- t[0].Square(&x.C0.B1)
- // t1 = 3 * g1^2 - 2 * g2
- t[1].Sub(&t[0], &x.C0.B2).
- Double(&t[1]).
- Add(&t[1], &t[0])
- // t0 = E * g5^2 + t1
- t[2].Square(&x.C1.B2)
- t[0].MulByNonResidue(&t[2]).
- Add(&t[0], &t[1])
- // t1 = 1/(4 * g3)
- t[1].Double(&x.C1.B0).
- Double(&t[1]).
- Inverse(&t[1]) // costly
+ // g3 == 0
+ if x.C1.B2.IsZero() {
+ t[0].Mul(&x.C0.B1, &x.C1.B2).
+ Double(&t[0])
+ // t1 = g2
+ t[1].Set(&x.C0.B2)
+
+ // g3 != 0
+ } else {
+ // t0 = g1^2
+ t[0].Square(&x.C0.B1)
+ // t1 = 3 * g1^2 - 2 * g2
+ t[1].Sub(&t[0], &x.C0.B2).
+ Double(&t[1]).
+ Add(&t[1], &t[0])
+ // t0 = E * g5^2 + t1
+ t[2].Square(&x.C1.B2)
+ t[0].MulByNonResidue(&t[2]).
+ Add(&t[0], &t[1])
+ // t1 = 4 * g3
+ t[1].Double(&x.C1.B0).
+ Double(&t[1])
+ }
+
// z4 = g4
- z.C1.B1.Mul(&t[0], &t[1])
+ z.C1.B1.Div(&t[0], &t[1]) // costly
// t1 = g2 * g1
t[1].Mul(&x.C0.B2, &x.C0.B1)
@@ -255,7 +272,7 @@ func (z *E12) DecompressKarabina(x *E12) *E12 {
Sub(&t[2], &t[1]).
Double(&t[2]).
Sub(&t[2], &t[1])
- // t1 = g3 * g5
+ // t1 = g3 * g5 (g3 can be 0)
t[1].Mul(&x.C1.B0, &x.C1.B2)
// c_0 = E * (2 * g4^2 + g3 * g5 - 3 * g2 * g1) + 1
t[2].Add(&t[2], &t[1])
@@ -271,6 +288,15 @@ func (z *E12) DecompressKarabina(x *E12) *E12 {
}
// BatchDecompressKarabina multiple Karabina's cyclotomic square results
+// if g3 != 0
+// g4 = (E * g5^2 + 3 * g1^2 - 2 * g2)/4g3
+// if g3 == 0
+// g4 = 2g1g5/g2
+//
+// if g3=g2=0 then g4=g5=g1=0 and g0=1 (x=1)
+// Theorem 3.1 is well-defined for all x in Gϕₙ\{1}
+//
+// Divisions by 4g3 or g2 is batched using Montgomery batch inverse
func BatchDecompressKarabina(x []E12) []E12 {
n := len(x)
@@ -286,19 +312,29 @@ func BatchDecompressKarabina(x []E12) []E12 {
one.SetOne()
for i := 0; i < n; i++ {
- // t0 = g1^2
- t0[i].Square(&x[i].C0.B1)
- // t1 = 3 * g1^2 - 2 * g2
- t1[i].Sub(&t0[i], &x[i].C0.B2).
- Double(&t1[i]).
- Add(&t1[i], &t0[i])
- // t0 = E * g5^2 + t1
- t2[i].Square(&x[i].C1.B2)
- t0[i].MulByNonResidue(&t2[i]).
- Add(&t0[i], &t1[i])
- // t1 = 4 * g3
- t1[i].Double(&x[i].C1.B0).
- Double(&t1[i])
+ // g3 == 0
+ if x[i].C1.B2.IsZero() {
+ t0[i].Mul(&x[i].C0.B1, &x[i].C1.B2).
+ Double(&t0[i])
+ // t1 = g2
+ t1[i].Set(&x[i].C0.B2)
+
+ // g3 != 0
+ } else {
+ // t0 = g1^2
+ t0[i].Square(&x[i].C0.B1)
+ // t1 = 3 * g1^2 - 2 * g2
+ t1[i].Sub(&t0[i], &x[i].C0.B2).
+ Double(&t1[i]).
+ Add(&t1[i], &t0[i])
+ // t0 = E * g5^2 + t1
+ t2[i].Square(&x[i].C1.B2)
+ t0[i].MulByNonResidue(&t2[i]).
+ Add(&t0[i], &t1[i])
+ // t1 = 4 * g3
+ t1[i].Double(&x[i].C1.B0).
+ Double(&t1[i])
+ }
}
t1 = BatchInvertE2(t1) // costs 1 inverse
@@ -315,7 +351,7 @@ func BatchDecompressKarabina(x []E12) []E12 {
t2[i].Double(&t2[i])
t2[i].Sub(&t2[i], &t1[i])
- // t1 = g3 * g5
+ // t1 = g3 * g5 (g3s can be 0s)
t1[i].Mul(&x[i].C1.B0, &x[i].C1.B2)
// z0 = E * (2 * g4^2 + g3 * g5 - 3 * g2 * g1) + 1
t2[i].Add(&t2[i], &t1[i])
@@ -366,6 +402,8 @@ func (z *E12) CyclotomicSquare(x *E12) *E12 {
}
// Inverse set z to the inverse of x in E12 and return z
+//
+// if x == 0, sets and returns z = x
func (z *E12) Inverse(x *E12) *E12 {
// Algorithm 23 from https://eprint.iacr.org/2010/354.pdf
@@ -383,6 +421,8 @@ func (z *E12) Inverse(x *E12) *E12 {
// BatchInvertE12 returns a new slice with every element inverted.
// Uses Montgomery batch inversion trick
+//
+// if a[i] == 0, returns result[i] = a[i]
func BatchInvertE12(a []E12) []E12 {
res := make([]E12, len(a))
if len(a) == 0 {
@@ -425,7 +465,7 @@ func (z *E12) Exp(x E12, k *big.Int) *E12 {
e := k
if k.Sign() == -1 {
// negative k, we invert
- // if k < 0: xᵏ (mod q) == (x⁻¹)ᵏ (mod q)
+ // if k < 0: xᵏ (mod q¹²) == (x⁻¹)ᵏ (mod q¹²)
x.Inverse(&x)
// we negate k in a temp big.Int since
@@ -778,13 +818,14 @@ func (z *E12) CompressTorus() (E6, error) {
return res, nil
}
-// BatchCompressTorus GT/E12 elements to half their size
-// using a batch inversion
+// BatchCompressTorus GT/E12 elements to half their size using a batch inversion.
+//
+// if len(x) == 0 or if any of the x[i].C1 coordinate is 0, this function returns an error.
func BatchCompressTorus(x []E12) ([]E6, error) {
n := len(x)
if n == 0 {
- return []E6{}, errors.New("invalid input size")
+ return nil, errors.New("invalid input size")
}
var one E6
@@ -793,6 +834,10 @@ func BatchCompressTorus(x []E12) ([]E6, error) {
for i := 0; i < n; i++ {
res[i].Set(&x[i].C1)
+ // throw an error if any of the x[i].C1 is 0
+ if res[i].IsZero() {
+ return nil, errors.New("invalid input; C1 is 0")
+ }
}
t := BatchInvertE6(res) // costs 1 inverse
diff --git a/ecc/bls12-381/internal/fptower/e12_test.go b/ecc/bls12-381/internal/fptower/e12_test.go
index 0d5f9cd4ae..2855516d6f 100644
--- a/ecc/bls12-381/internal/fptower/e12_test.go
+++ b/ecc/bls12-381/internal/fptower/e12_test.go
@@ -339,13 +339,29 @@ func TestE12Ops(t *testing.T) {
properties.Property("[BLS12-381] compressed cyclotomic square (Karabina) and square should be the same in the cyclotomic subgroup", prop.ForAll(
func(a *E12) bool {
- var b, c, d E12
+ var _a, b, c, d, _c, _d E12
+ _a.SetOne().Double(&_a)
+
+ // put a and _a in the cyclotomic subgroup
+ // a (g3 != 0 probably)
b.Conjugate(a)
a.Inverse(a)
b.Mul(&b, a)
a.FrobeniusSquare(&b).Mul(a, &b)
+ // _a (g3 == 0)
+ b.Conjugate(&_a)
+ _a.Inverse(&_a)
+ b.Mul(&b, &_a)
+ _a.FrobeniusSquare(&b).Mul(&_a, &b)
+
+ // case g3 != 0
c.Square(a)
d.CyclotomicSquareCompressed(a).DecompressKarabina(&d)
+
+ // case g3 == 0
+ _c.Square(&_a)
+ _d.CyclotomicSquareCompressed(&_a).DecompressKarabina(&_d)
+
return c.Equal(&d)
},
genA,
@@ -353,18 +369,26 @@ func TestE12Ops(t *testing.T) {
properties.Property("[BLS12-381] batch decompress and individual decompress (Karabina) should be the same", prop.ForAll(
func(a *E12) bool {
- var b E12
- // put in the cyclotomic subgroup
+ var _a, b E12
+ _a.SetOne().Double(&_a)
+
+ // put a and _a in the cyclotomic subgroup
+ // a (g3 !=0 probably)
b.Conjugate(a)
a.Inverse(a)
b.Mul(&b, a)
a.FrobeniusSquare(&b).Mul(a, &b)
+ // _a (g3 == 0)
+ b.Conjugate(&_a)
+ _a.Inverse(&_a)
+ b.Mul(&b, &_a)
+ _a.FrobeniusSquare(&b).Mul(&_a, &b)
var a2, a4, a17 E12
- a2.Set(a)
+ a2.Set(&_a)
a4.Set(a)
a17.Set(a)
- a2.nSquareCompressed(2)
+ a2.nSquareCompressed(2) // case g3 == 0
a4.nSquareCompressed(4)
a17.nSquareCompressed(17)
batch := BatchDecompressKarabina([]E12{a2, a4, a17})
diff --git a/ecc/bls12-381/internal/fptower/e2.go b/ecc/bls12-381/internal/fptower/e2.go
index 6dcb1aca0e..f15a70d85c 100644
--- a/ecc/bls12-381/internal/fptower/e2.go
+++ b/ecc/bls12-381/internal/fptower/e2.go
@@ -247,6 +247,8 @@ func (z *E2) Sqrt(x *E2) *E2 {
// BatchInvertE2 returns a new slice with every element inverted.
// Uses Montgomery batch inversion trick
+//
+// if a[i] == 0, returns result[i] = a[i]
func BatchInvertE2(a []E2) []E2 {
res := make([]E2, len(a))
if len(a) == 0 {
diff --git a/ecc/bls12-381/internal/fptower/e2_bls381.go b/ecc/bls12-381/internal/fptower/e2_bls381.go
index fd6a6a2ecb..32fb644b69 100644
--- a/ecc/bls12-381/internal/fptower/e2_bls381.go
+++ b/ecc/bls12-381/internal/fptower/e2_bls381.go
@@ -37,7 +37,7 @@ func mulGenericE2(z, x, y *E2) {
// Square sets z to the E2-product of x,x returns z
func squareGenericE2(z, x *E2) *E2 {
- // algo 22 https://eprint.iacr.org/2010/354.pdf
+ // adapted from algo 22 https://eprint.iacr.org/2010/354.pdf
var a, b fp.Element
a.Add(&x.A0, &x.A1)
b.Sub(&x.A0, &x.A1)
@@ -69,6 +69,8 @@ func (z *E2) MulByNonResidueInv(x *E2) *E2 {
}
// Inverse sets z to the E2-inverse of x, returns z
+//
+// if x == 0, sets and returns z = x
func (z *E2) Inverse(x *E2) *E2 {
// Algorithm 8 from https://eprint.iacr.org/2010/354.pdf
var t0, t1 fp.Element
diff --git a/ecc/bls12-381/internal/fptower/e2_test.go b/ecc/bls12-381/internal/fptower/e2_test.go
index 8cf9a5ed17..4d8d882916 100644
--- a/ecc/bls12-381/internal/fptower/e2_test.go
+++ b/ecc/bls12-381/internal/fptower/e2_test.go
@@ -189,12 +189,6 @@ func TestE2ReceiverIsOperand(t *testing.T) {
properties.TestingRun(t, gopter.ConsoleReporter(false))
- if supportAdx {
- t.Log("disabling ADX")
- supportAdx = false
- properties.TestingRun(t, gopter.ConsoleReporter(false))
- supportAdx = true
- }
}
func TestE2MulMaxed(t *testing.T) {
@@ -415,12 +409,6 @@ func TestE2Ops(t *testing.T) {
properties.TestingRun(t, gopter.ConsoleReporter(false))
- if supportAdx {
- t.Log("disabling ADX")
- supportAdx = false
- properties.TestingRun(t, gopter.ConsoleReporter(false))
- supportAdx = true
- }
}
// ------------------------------------------------------------
diff --git a/ecc/bls12-381/internal/fptower/e6.go b/ecc/bls12-381/internal/fptower/e6.go
index 2ed48dc26e..4da093f5f0 100644
--- a/ecc/bls12-381/internal/fptower/e6.go
+++ b/ecc/bls12-381/internal/fptower/e6.go
@@ -242,6 +242,8 @@ func (z *E6) Square(x *E6) *E6 {
}
// Inverse an element in E6
+//
+// if x == 0, sets and returns z = x
func (z *E6) Inverse(x *E6) *E6 {
// Algorithm 17 from https://eprint.iacr.org/2010/354.pdf
// step 9 is wrong in the paper it's t1-t4
@@ -270,6 +272,8 @@ func (z *E6) Inverse(x *E6) *E6 {
// BatchInvertE6 returns a new slice with every element inverted.
// Uses Montgomery batch inversion trick
+//
+// if a[i] == 0, returns result[i] = a[i]
func BatchInvertE6(a []E6) []E6 {
res := make([]E6, len(a))
if len(a) == 0 {
diff --git a/ecc/bls12-381/multiexp.go b/ecc/bls12-381/multiexp.go
index 34510edd1c..25a18a9457 100644
--- a/ecc/bls12-381/multiexp.go
+++ b/ecc/bls12-381/multiexp.go
@@ -41,7 +41,7 @@ type selector struct {
// if the digit is larger than 2^{c-1}, then, we borrow 2^c from the next window and substract
// 2^{c} to the current digit, making it negative.
// negative digits can be processed in a later step as adding -G into the bucket instead of G
-// (computing -G is cheap, and this saves us half of the buckets in the MultiExp or BatchScalarMul)
+// (computing -G is cheap, and this saves us half of the buckets in the MultiExp or BatchScalarMultiplication)
// scalarsMont indicates wheter the provided scalars are in montgomery form
// returns smallValues, which represent the number of scalars which meets the following condition
// 0 < scalar < 2^c (in other words, scalars where only the c-least significant bits are non zero)
@@ -163,6 +163,8 @@ func partitionScalars(scalars []fr.Element, c uint64, scalarsMont bool, nbTasks
}
// MultiExp implements section 4 of https://eprint.iacr.org/2012/549.pdf
+//
+// This call return an error if len(scalars) != len(points) or if provided config is invalid.
func (p *G1Affine) MultiExp(points []G1Affine, scalars []fr.Element, config ecc.MultiExpConfig) (*G1Affine, error) {
var _p G1Jac
if _, err := _p.MultiExp(points, scalars, config); err != nil {
@@ -173,6 +175,8 @@ func (p *G1Affine) MultiExp(points []G1Affine, scalars []fr.Element, config ecc.
}
// MultiExp implements section 4 of https://eprint.iacr.org/2012/549.pdf
+//
+// This call return an error if len(scalars) != len(points) or if provided config is invalid.
func (p *G1Jac) MultiExp(points []G1Affine, scalars []fr.Element, config ecc.MultiExpConfig) (*G1Jac, error) {
// note:
// each of the msmCX method is the same, except for the c constant it declares
@@ -209,6 +213,8 @@ func (p *G1Jac) MultiExp(points []G1Affine, scalars []fr.Element, config ecc.Mul
// if nbTasks is not set, use all available CPUs
if config.NbTasks <= 0 {
config.NbTasks = runtime.NumCPU()
+ } else if config.NbTasks > 1024 {
+ return nil, errors.New("invalid config: config.NbTasks > 1024")
}
// here, we compute the best C for nbPoints
@@ -333,9 +339,6 @@ func msmInnerG1Jac(p *G1Jac, c int, points []G1Affine, scalars []fr.Element, spl
case 21:
p.msmC21(points, scalars, splitFirstChunk)
- case 22:
- p.msmC22(points, scalars, splitFirstChunk)
-
default:
panic("not implemented")
}
@@ -1180,59 +1183,9 @@ func (p *G1Jac) msmC21(points []G1Affine, scalars []fr.Element, splitFirstChunk
return msmReduceChunkG1Affine(p, c, chChunks[:])
}
-func (p *G1Jac) msmC22(points []G1Affine, scalars []fr.Element, splitFirstChunk bool) *G1Jac {
- const (
- c = 22 // scalars partitioned into c-bit radixes
- nbChunks = (fr.Limbs * 64 / c) // number of c-bit radixes in a scalar
- )
-
- // for each chunk, spawn one go routine that'll loop through all the scalars in the
- // corresponding bit-window
- // note that buckets is an array allocated on the stack (for most sizes of c) and this is
- // critical for performance
-
- // each go routine sends its result in chChunks[i] channel
- var chChunks [nbChunks + 1]chan g1JacExtended
- for i := 0; i < len(chChunks); i++ {
- chChunks[i] = make(chan g1JacExtended, 1)
- }
-
- // c doesn't divide 256, last window is smaller we can allocate less buckets
- const lastC = (fr.Limbs * 64) - (c * (fr.Limbs * 64 / c))
- go func(j uint64, points []G1Affine, scalars []fr.Element) {
- var buckets [1 << (lastC - 1)]g1JacExtended
- msmProcessChunkG1Affine(j, chChunks[j], buckets[:], c, points, scalars)
- }(uint64(nbChunks), points, scalars)
-
- processChunk := func(j int, points []G1Affine, scalars []fr.Element, chChunk chan g1JacExtended) {
- var buckets [1 << (c - 1)]g1JacExtended
- msmProcessChunkG1Affine(uint64(j), chChunk, buckets[:], c, points, scalars)
- }
-
- for j := int(nbChunks - 1); j > 0; j-- {
- go processChunk(j, points, scalars, chChunks[j])
- }
-
- if !splitFirstChunk {
- go processChunk(0, points, scalars, chChunks[0])
- } else {
- chSplit := make(chan g1JacExtended, 2)
- split := len(points) / 2
- go processChunk(0, points[:split], scalars[:split], chSplit)
- go processChunk(0, points[split:], scalars[split:], chSplit)
- go func() {
- s1 := <-chSplit
- s2 := <-chSplit
- close(chSplit)
- s1.add(&s2)
- chChunks[0] <- s1
- }()
- }
-
- return msmReduceChunkG1Affine(p, c, chChunks[:])
-}
-
// MultiExp implements section 4 of https://eprint.iacr.org/2012/549.pdf
+//
+// This call return an error if len(scalars) != len(points) or if provided config is invalid.
func (p *G2Affine) MultiExp(points []G2Affine, scalars []fr.Element, config ecc.MultiExpConfig) (*G2Affine, error) {
var _p G2Jac
if _, err := _p.MultiExp(points, scalars, config); err != nil {
@@ -1243,6 +1196,8 @@ func (p *G2Affine) MultiExp(points []G2Affine, scalars []fr.Element, config ecc.
}
// MultiExp implements section 4 of https://eprint.iacr.org/2012/549.pdf
+//
+// This call return an error if len(scalars) != len(points) or if provided config is invalid.
func (p *G2Jac) MultiExp(points []G2Affine, scalars []fr.Element, config ecc.MultiExpConfig) (*G2Jac, error) {
// note:
// each of the msmCX method is the same, except for the c constant it declares
@@ -1279,13 +1234,15 @@ func (p *G2Jac) MultiExp(points []G2Affine, scalars []fr.Element, config ecc.Mul
// if nbTasks is not set, use all available CPUs
if config.NbTasks <= 0 {
config.NbTasks = runtime.NumCPU()
+ } else if config.NbTasks > 1024 {
+ return nil, errors.New("invalid config: config.NbTasks > 1024")
}
// here, we compute the best C for nbPoints
// we split recursively until nbChunks(c) >= nbTasks,
bestC := func(nbPoints int) uint64 {
// implemented msmC methods (the c we use must be in this slice)
- implementedCs := []uint64{4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 20, 21, 22}
+ implementedCs := []uint64{4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 20, 21}
var C uint64
// approximate cost (in group operations)
// cost = bits/c * (nbPoints + 2^{c})
@@ -1403,9 +1360,6 @@ func msmInnerG2Jac(p *G2Jac, c int, points []G2Affine, scalars []fr.Element, spl
case 21:
p.msmC21(points, scalars, splitFirstChunk)
- case 22:
- p.msmC22(points, scalars, splitFirstChunk)
-
default:
panic("not implemented")
}
@@ -2249,55 +2203,3 @@ func (p *G2Jac) msmC21(points []G2Affine, scalars []fr.Element, splitFirstChunk
return msmReduceChunkG2Affine(p, c, chChunks[:])
}
-
-func (p *G2Jac) msmC22(points []G2Affine, scalars []fr.Element, splitFirstChunk bool) *G2Jac {
- const (
- c = 22 // scalars partitioned into c-bit radixes
- nbChunks = (fr.Limbs * 64 / c) // number of c-bit radixes in a scalar
- )
-
- // for each chunk, spawn one go routine that'll loop through all the scalars in the
- // corresponding bit-window
- // note that buckets is an array allocated on the stack (for most sizes of c) and this is
- // critical for performance
-
- // each go routine sends its result in chChunks[i] channel
- var chChunks [nbChunks + 1]chan g2JacExtended
- for i := 0; i < len(chChunks); i++ {
- chChunks[i] = make(chan g2JacExtended, 1)
- }
-
- // c doesn't divide 256, last window is smaller we can allocate less buckets
- const lastC = (fr.Limbs * 64) - (c * (fr.Limbs * 64 / c))
- go func(j uint64, points []G2Affine, scalars []fr.Element) {
- var buckets [1 << (lastC - 1)]g2JacExtended
- msmProcessChunkG2Affine(j, chChunks[j], buckets[:], c, points, scalars)
- }(uint64(nbChunks), points, scalars)
-
- processChunk := func(j int, points []G2Affine, scalars []fr.Element, chChunk chan g2JacExtended) {
- var buckets [1 << (c - 1)]g2JacExtended
- msmProcessChunkG2Affine(uint64(j), chChunk, buckets[:], c, points, scalars)
- }
-
- for j := int(nbChunks - 1); j > 0; j-- {
- go processChunk(j, points, scalars, chChunks[j])
- }
-
- if !splitFirstChunk {
- go processChunk(0, points, scalars, chChunks[0])
- } else {
- chSplit := make(chan g2JacExtended, 2)
- split := len(points) / 2
- go processChunk(0, points[:split], scalars[:split], chSplit)
- go processChunk(0, points[split:], scalars[split:], chSplit)
- go func() {
- s1 := <-chSplit
- s2 := <-chSplit
- close(chSplit)
- s1.add(&s2)
- chChunks[0] <- s1
- }()
- }
-
- return msmReduceChunkG2Affine(p, c, chChunks[:])
-}
diff --git a/ecc/bls12-381/multiexp_test.go b/ecc/bls12-381/multiexp_test.go
index 9dd4d40bb8..2c051ce12f 100644
--- a/ecc/bls12-381/multiexp_test.go
+++ b/ecc/bls12-381/multiexp_test.go
@@ -92,7 +92,14 @@ func TestMultiExpG1(t *testing.T) {
genScalar,
))
- properties.Property("[G1] Multi exponentation (c=5, c=16) should be consistent with sum of square", prop.ForAll(
+ // cRange is generated from template and contains the available parameters for the multiexp window size
+ cRange := []uint64{4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 20, 21}
+ if testing.Short() {
+ // test only "odd" and "even" (ie windows size divide word size vs not)
+ cRange = []uint64{5, 16}
+ }
+
+ properties.Property(fmt.Sprintf("[G1] Multi exponentation (c in %v) should be consistent with sum of square", cRange), prop.ForAll(
func(mixer fr.Element) bool {
var expected G1Jac
@@ -111,13 +118,21 @@ func TestMultiExpG1(t *testing.T) {
FromMont()
}
- scalars5, _ := partitionScalars(sampleScalars[:], 5, false, runtime.NumCPU())
- scalars16, _ := partitionScalars(sampleScalars[:], 16, false, runtime.NumCPU())
-
- var r5, r16 G1Jac
- r5.msmC5(samplePoints[:], scalars5, false)
- r16.msmC16(samplePoints[:], scalars16, true)
- return (r5.Equal(&expected) && r16.Equal(&expected))
+ results := make([]G1Jac, len(cRange)+1)
+ for i, c := range cRange {
+ scalars, _ := partitionScalars(sampleScalars[:], c, false, runtime.NumCPU())
+ msmInnerG1Jac(&results[i], int(c), samplePoints[:], scalars, false)
+ if c == 16 {
+ // split the first chunk
+ msmInnerG1Jac(&results[len(results)-1], 16, samplePoints[:], scalars, true)
+ }
+ }
+ for i := 1; i < len(results); i++ {
+ if !results[i].Equal(&results[i-1]) {
+ return false
+ }
+ }
+ return true
},
genScalar,
))
@@ -148,7 +163,7 @@ func TestMultiExpG1(t *testing.T) {
var finalBigScalar fr.Element
var finalBigScalarBi big.Int
var op1ScalarMul G1Affine
- finalBigScalar.SetString("9455").Mul(&finalBigScalar, &mixer)
+ finalBigScalar.SetUint64(9455).Mul(&finalBigScalar, &mixer)
finalBigScalar.ToBigIntRegular(&finalBigScalarBi)
op1ScalarMul.ScalarMultiplication(&g1GenAff, &finalBigScalarBi)
@@ -322,7 +337,12 @@ func TestMultiExpG2(t *testing.T) {
genScalar,
))
- properties.Property("[G2] Multi exponentation (c=5, c=16) should be consistent with sum of square", prop.ForAll(
+ // cRange is generated from template and contains the available parameters for the multiexp window size
+ // for g2, CI suffers with large c size since it needs to allocate a lot of memory for the buckets.
+ // test only "odd" and "even" (ie windows size divide word size vs not)
+ cRange := []uint64{5, 16}
+
+ properties.Property(fmt.Sprintf("[G2] Multi exponentation (c in %v) should be consistent with sum of square", cRange), prop.ForAll(
func(mixer fr.Element) bool {
var expected G2Jac
@@ -341,13 +361,21 @@ func TestMultiExpG2(t *testing.T) {
FromMont()
}
- scalars5, _ := partitionScalars(sampleScalars[:], 5, false, runtime.NumCPU())
- scalars16, _ := partitionScalars(sampleScalars[:], 16, false, runtime.NumCPU())
-
- var r5, r16 G2Jac
- r5.msmC5(samplePoints[:], scalars5, false)
- r16.msmC16(samplePoints[:], scalars16, true)
- return (r5.Equal(&expected) && r16.Equal(&expected))
+ results := make([]G2Jac, len(cRange)+1)
+ for i, c := range cRange {
+ scalars, _ := partitionScalars(sampleScalars[:], c, false, runtime.NumCPU())
+ msmInnerG2Jac(&results[i], int(c), samplePoints[:], scalars, false)
+ if c == 16 {
+ // split the first chunk
+ msmInnerG2Jac(&results[len(results)-1], 16, samplePoints[:], scalars, true)
+ }
+ }
+ for i := 1; i < len(results); i++ {
+ if !results[i].Equal(&results[i-1]) {
+ return false
+ }
+ }
+ return true
},
genScalar,
))
@@ -378,7 +406,7 @@ func TestMultiExpG2(t *testing.T) {
var finalBigScalar fr.Element
var finalBigScalarBi big.Int
var op1ScalarMul G2Affine
- finalBigScalar.SetString("9455").Mul(&finalBigScalar, &mixer)
+ finalBigScalar.SetUint64(9455).Mul(&finalBigScalar, &mixer)
finalBigScalar.ToBigIntRegular(&finalBigScalarBi)
op1ScalarMul.ScalarMultiplication(&g2GenAff, &finalBigScalarBi)
diff --git a/ecc/bls12-381/pairing.go b/ecc/bls12-381/pairing.go
index 5c585e69ee..9a78d328dc 100644
--- a/ecc/bls12-381/pairing.go
+++ b/ecc/bls12-381/pairing.go
@@ -30,7 +30,9 @@ type lineEvaluation struct {
}
// Pair calculates the reduced pairing for a set of points
-// ∏ᵢ e(Pᵢ, Qᵢ)
+// ∏ᵢ e(Pᵢ, Qᵢ).
+//
+// This function doesn't check that the inputs are in the correct subgroup. See IsInSubGroup.
func Pair(P []G1Affine, Q []G2Affine) (GT, error) {
f, err := MillerLoop(P, Q)
if err != nil {
@@ -41,6 +43,8 @@ func Pair(P []G1Affine, Q []G2Affine) (GT, error) {
// PairingCheck calculates the reduced pairing for a set of points and returns True if the result is One
// ∏ᵢ e(Pᵢ, Qᵢ) =? 1
+//
+// This function doesn't check that the inputs are in the correct subgroup. See IsInSubGroup.
func PairingCheck(P []G1Affine, Q []G2Affine) (bool, error) {
f, err := Pair(P, Q)
if err != nil {
diff --git a/ecc/bls12-381/twistededwards/eddsa/eddsa.go b/ecc/bls12-381/twistededwards/eddsa/eddsa.go
index 6feb5cbbb6..c9e883d76b 100644
--- a/ecc/bls12-381/twistededwards/eddsa/eddsa.go
+++ b/ecc/bls12-381/twistededwards/eddsa/eddsa.go
@@ -89,7 +89,7 @@ func GenerateKey(r io.Reader) (*PrivateKey, error) {
var bScalar big.Int
bScalar.SetBytes(priv.scalar[:])
- pub.A.ScalarMul(&c.Base, &bScalar)
+ pub.A.ScalarMultiplication(&c.Base, &bScalar)
priv.PublicKey = pub
@@ -137,7 +137,7 @@ func (privKey *PrivateKey) Sign(message []byte, hFunc hash.Hash) ([]byte, error)
blindingFactorBigInt.SetBytes(blindingFactorBytes[:sizeFr])
// compute R = randScalar*Base
- res.R.ScalarMul(&curveParams.Base, &blindingFactorBigInt)
+ res.R.ScalarMultiplication(&curveParams.Base, &blindingFactorBigInt)
if !res.R.IsOnCurve() {
return nil, errNotOnCurve
}
@@ -223,8 +223,8 @@ func (pub *PublicKey) Verify(sigBin, message []byte, hFunc hash.Hash) (bool, err
var bCofactor, bs big.Int
curveParams.Cofactor.ToBigIntRegular(&bCofactor)
bs.SetBytes(sig.S[:])
- lhs.ScalarMul(&curveParams.Base, &bs).
- ScalarMul(&lhs, &bCofactor)
+ lhs.ScalarMultiplication(&curveParams.Base, &bs).
+ ScalarMultiplication(&lhs, &bCofactor)
if !lhs.IsOnCurve() {
return false, errNotOnCurve
@@ -232,9 +232,9 @@ func (pub *PublicKey) Verify(sigBin, message []byte, hFunc hash.Hash) (bool, err
// rhs = cofactor*(R + H(R,A,M)*A)
var rhs twistededwards.PointAffine
- rhs.ScalarMul(&pub.A, &hramInt).
+ rhs.ScalarMultiplication(&pub.A, &hramInt).
Add(&rhs, &sig.R).
- ScalarMul(&rhs, &bCofactor)
+ ScalarMultiplication(&rhs, &bCofactor)
if !rhs.IsOnCurve() {
return false, errNotOnCurve
}
diff --git a/ecc/bls12-381/twistededwards/point.go b/ecc/bls12-381/twistededwards/point.go
index 683acc3e11..aa28fa7195 100644
--- a/ecc/bls12-381/twistededwards/point.go
+++ b/ecc/bls12-381/twistededwards/point.go
@@ -256,13 +256,13 @@ func (p *PointAffine) FromExtended(p1 *PointExtended) *PointAffine {
return p
}
-// ScalarMul scalar multiplication of a point
+// ScalarMultiplication scalar multiplication of a point
// p1 in affine coordinates with a scalar in big.Int
-func (p *PointAffine) ScalarMul(p1 *PointAffine, scalar *big.Int) *PointAffine {
+func (p *PointAffine) ScalarMultiplication(p1 *PointAffine, scalar *big.Int) *PointAffine {
var p1Extended, resExtended PointExtended
p1Extended.FromAffine(p1)
- resExtended.ScalarMul(&p1Extended, scalar)
+ resExtended.ScalarMultiplication(&p1Extended, scalar)
p.FromExtended(&resExtended)
return p
@@ -409,9 +409,9 @@ func (p *PointProj) Add(p1, p2 *PointProj) *PointProj {
return p
}
-// ScalarMul scalar multiplication of a point
+// ScalarMultiplication scalar multiplication of a point
// p1 in projective coordinates with a scalar in big.Int
-func (p *PointProj) ScalarMul(p1 *PointProj, scalar *big.Int) *PointProj {
+func (p *PointProj) ScalarMultiplication(p1 *PointProj, scalar *big.Int) *PointProj {
var _scalar big.Int
_scalar.Set(scalar)
p.Set(p1)
@@ -622,9 +622,9 @@ func (p *PointExtended) setInfinity() *PointExtended {
return p
}
-// ScalarMul scalar multiplication of a point
+// ScalarMultiplication scalar multiplication of a point
// p1 in extended coordinates with a scalar in big.Int
-func (p *PointExtended) ScalarMul(p1 *PointExtended, scalar *big.Int) *PointExtended {
+func (p *PointExtended) ScalarMultiplication(p1 *PointExtended, scalar *big.Int) *PointExtended {
var _scalar big.Int
_scalar.Set(scalar)
p.Set(p1)
diff --git a/ecc/bls12-381/twistededwards/point_test.go b/ecc/bls12-381/twistededwards/point_test.go
index 903fbeacbf..0d5bf5cb0f 100644
--- a/ecc/bls12-381/twistededwards/point_test.go
+++ b/ecc/bls12-381/twistededwards/point_test.go
@@ -124,8 +124,8 @@ func TestReceiverIsOperand(t *testing.T) {
var s big.Int
s.SetUint64(10)
- p2.ScalarMul(&p1, &s)
- p1.ScalarMul(&p1, &s)
+ p2.ScalarMultiplication(&p1, &s)
+ p1.ScalarMultiplication(&p1, &s)
return p2.Equal(&p1)
},
@@ -336,7 +336,7 @@ func TestOps(t *testing.T) {
params := GetEdwardsCurve()
var p1, p2, zero PointAffine
- p1.ScalarMul(¶ms.Base, &s1)
+ p1.ScalarMultiplication(¶ms.Base, &s1)
zero.setInfinity()
p2.Add(&p1, &zero)
@@ -352,7 +352,7 @@ func TestOps(t *testing.T) {
params := GetEdwardsCurve()
var p1, p2 PointAffine
- p1.ScalarMul(¶ms.Base, &s1)
+ p1.ScalarMultiplication(¶ms.Base, &s1)
p2.Neg(&p1)
p1.Add(&p1, &p2)
@@ -371,8 +371,8 @@ func TestOps(t *testing.T) {
params := GetEdwardsCurve()
var p1, p2, inf PointAffine
- p1.ScalarMul(¶ms.Base, &s)
- p2.ScalarMul(¶ms.Base, &s)
+ p1.ScalarMultiplication(¶ms.Base, &s)
+ p2.ScalarMultiplication(¶ms.Base, &s)
p1.Add(&p1, &p2)
p2.Double(&p2)
@@ -390,14 +390,14 @@ func TestOps(t *testing.T) {
var p1, p2, p3, inf PointAffine
inf.X.SetZero()
inf.Y.SetZero()
- p1.ScalarMul(¶ms.Base, &s1)
- p2.ScalarMul(¶ms.Base, &s2)
+ p1.ScalarMultiplication(¶ms.Base, &s1)
+ p2.ScalarMultiplication(¶ms.Base, &s2)
p3.Set(¶ms.Base)
p2.Add(&p1, &p2)
s1.Add(&s1, &s2)
- p3.ScalarMul(¶ms.Base, &s1)
+ p3.ScalarMultiplication(¶ms.Base, &s1)
return p2.IsOnCurve() && p3.Equal(&p2) && !p3.Equal(&inf)
},
@@ -413,9 +413,9 @@ func TestOps(t *testing.T) {
var p1, p2, inf PointAffine
inf.X.SetZero()
inf.Y.SetOne()
- p1.ScalarMul(¶ms.Base, &s1)
+ p1.ScalarMultiplication(¶ms.Base, &s1)
s1.Neg(&s1)
- p2.ScalarMul(¶ms.Base, &s1)
+ p2.ScalarMultiplication(¶ms.Base, &s1)
p2.Add(&p1, &p2)
@@ -430,11 +430,11 @@ func TestOps(t *testing.T) {
params := GetEdwardsCurve()
var p1, p2 PointAffine
- p1.ScalarMul(¶ms.Base, &s1)
+ p1.ScalarMultiplication(¶ms.Base, &s1)
five := big.NewInt(5)
p2.Double(&p1).Double(&p2).Add(&p2, &p1)
- p1.ScalarMul(&p1, five)
+ p1.ScalarMultiplication(&p1, five)
return p2.IsOnCurve() && p2.Equal(&p1)
},
@@ -463,7 +463,7 @@ func TestOps(t *testing.T) {
var baseProj, p1, p2, zero PointProj
baseProj.FromAffine(¶ms.Base)
- p1.ScalarMul(&baseProj, &s1)
+ p1.ScalarMultiplication(&baseProj, &s1)
zero.setInfinity()
p2.Add(&p1, &zero)
@@ -480,7 +480,7 @@ func TestOps(t *testing.T) {
var baseProj, p1, p2, p PointProj
baseProj.FromAffine(¶ms.Base)
- p1.ScalarMul(&baseProj, &s1)
+ p1.ScalarMultiplication(&baseProj, &s1)
p2.Neg(&p1)
p.Add(&p1, &p2)
@@ -498,7 +498,7 @@ func TestOps(t *testing.T) {
var baseProj, p1, p2, p PointProj
baseProj.FromAffine(¶ms.Base)
- p.ScalarMul(&baseProj, &s)
+ p.ScalarMultiplication(&baseProj, &s)
p1.Add(&p, &p)
p2.Double(&p)
@@ -515,11 +515,11 @@ func TestOps(t *testing.T) {
var baseProj, p1, p2 PointProj
baseProj.FromAffine(¶ms.Base)
- p1.ScalarMul(&baseProj, &s1)
+ p1.ScalarMultiplication(&baseProj, &s1)
five := big.NewInt(5)
p2.Double(&p1).Double(&p2).Add(&p2, &p1)
- p1.ScalarMul(&p1, five)
+ p1.ScalarMultiplication(&p1, five)
return p2.Equal(&p1)
},
@@ -547,7 +547,7 @@ func TestOps(t *testing.T) {
var baseExtended, p1, p2, zero PointExtended
baseExtended.FromAffine(¶ms.Base)
- p1.ScalarMul(&baseExtended, &s1)
+ p1.ScalarMultiplication(&baseExtended, &s1)
zero.setInfinity()
p2.Add(&p1, &zero)
@@ -564,7 +564,7 @@ func TestOps(t *testing.T) {
var baseExtended, p1, p2, p PointExtended
baseExtended.FromAffine(¶ms.Base)
- p1.ScalarMul(&baseExtended, &s1)
+ p1.ScalarMultiplication(&baseExtended, &s1)
p2.Neg(&p1)
p.Add(&p1, &p2)
@@ -582,7 +582,7 @@ func TestOps(t *testing.T) {
var baseExtended, p1, p2, p PointExtended
baseExtended.FromAffine(¶ms.Base)
- p.ScalarMul(&baseExtended, &s)
+ p.ScalarMultiplication(&baseExtended, &s)
p1.Add(&p, &p)
p2.Double(&p)
@@ -599,11 +599,11 @@ func TestOps(t *testing.T) {
var baseExtended, p1, p2 PointExtended
baseExtended.FromAffine(¶ms.Base)
- p1.ScalarMul(&baseExtended, &s1)
+ p1.ScalarMultiplication(&baseExtended, &s1)
five := big.NewInt(5)
p2.Double(&p1).Double(&p2).Add(&p2, &p1)
- p1.ScalarMul(&p1, five)
+ p1.ScalarMultiplication(&p1, five)
return p2.Equal(&p1)
},
@@ -619,8 +619,8 @@ func TestOps(t *testing.T) {
var baseExtended, pExtended, p PointExtended
var pAffine PointAffine
baseExtended.FromAffine(¶ms.Base)
- pExtended.ScalarMul(&baseExtended, &s)
- pAffine.ScalarMul(¶ms.Base, &s)
+ pExtended.ScalarMultiplication(&baseExtended, &s)
+ pAffine.ScalarMultiplication(¶ms.Base, &s)
pAffine.Neg(&pAffine)
p.MixedAdd(&pExtended, &pAffine)
@@ -638,8 +638,8 @@ func TestOps(t *testing.T) {
var baseExtended, pExtended, p, p2 PointExtended
var pAffine PointAffine
baseExtended.FromAffine(¶ms.Base)
- pExtended.ScalarMul(&baseExtended, &s)
- pAffine.ScalarMul(¶ms.Base, &s)
+ pExtended.ScalarMultiplication(&baseExtended, &s)
+ pAffine.ScalarMultiplication(¶ms.Base, &s)
p.MixedAdd(&pExtended, &pAffine)
p2.MixedDouble(&pExtended)
@@ -658,8 +658,8 @@ func TestOps(t *testing.T) {
var baseProj, pProj, p PointProj
var pAffine PointAffine
baseProj.FromAffine(¶ms.Base)
- pProj.ScalarMul(&baseProj, &s)
- pAffine.ScalarMul(¶ms.Base, &s)
+ pProj.ScalarMultiplication(&baseProj, &s)
+ pAffine.ScalarMultiplication(¶ms.Base, &s)
pAffine.Neg(&pAffine)
p.MixedAdd(&pProj, &pAffine)
@@ -677,8 +677,8 @@ func TestOps(t *testing.T) {
var baseProj, pProj, p, p2 PointProj
var pAffine PointAffine
baseProj.FromAffine(¶ms.Base)
- pProj.ScalarMul(&baseProj, &s)
- pAffine.ScalarMul(¶ms.Base, &s)
+ pProj.ScalarMultiplication(&baseProj, &s)
+ pAffine.ScalarMultiplication(¶ms.Base, &s)
p.MixedAdd(&pProj, &pAffine)
p2.Double(&pProj)
@@ -697,9 +697,9 @@ func TestOps(t *testing.T) {
var baseExt PointExtended
var p1, p2 PointAffine
baseProj.FromAffine(¶ms.Base)
- baseProj.ScalarMul(&baseProj, &s)
+ baseProj.ScalarMultiplication(&baseProj, &s)
baseExt.FromAffine(¶ms.Base)
- baseExt.ScalarMul(&baseExt, &s)
+ baseExt.ScalarMultiplication(&baseExt, &s)
p1.FromProj(&baseProj)
p2.FromExtended(&baseExt)
@@ -760,7 +760,7 @@ func BenchmarkScalarMulExtended(b *testing.B) {
b.ResetTimer()
for j := 0; j < b.N; j++ {
- doubleAndAdd.ScalarMul(&a, &s)
+ doubleAndAdd.ScalarMultiplication(&a, &s)
}
}
@@ -776,6 +776,6 @@ func BenchmarkScalarMulProjective(b *testing.B) {
b.ResetTimer()
for j := 0; j < b.N; j++ {
- doubleAndAdd.ScalarMul(&a, &s)
+ doubleAndAdd.ScalarMultiplication(&a, &s)
}
}
diff --git a/ecc/bls24-315/bls24-315.go b/ecc/bls24-315/bls24-315.go
index f23d3a318f..c5270c6721 100644
--- a/ecc/bls24-315/bls24-315.go
+++ b/ecc/bls24-315/bls24-315.go
@@ -1,3 +1,26 @@
+// Package bls24315 efficient elliptic curve, pairing and hash to curve implementation for bls24-315.
+//
+// bls24-315: A Barreto--Lynn--Scott curve
+// embedding degree k=24
+// seed x₀=-3218079743
+// 𝔽r: r=0x196deac24a9da12b25fc7ec9cf927a98c8c480ece644e36419d0c5fd00c00001 (x₀^8-x₀^4+2)
+// 𝔽p: p=0x4c23a02b586d650d3f7498be97c5eafdec1d01aa27a1ae0421ee5da52bde5026fe802ff40300001 ((x₀-1)² ⋅ r(x₀)/3+x₀)
+// (E/𝔽p): Y²=X³+1
+// (Eₜ/𝔽p⁴): Y² = X³+1/v (D-type twist)
+// r ∣ #E(Fp) and r ∣ #Eₜ(𝔽p⁴)
+// Extension fields tower:
+// 𝔽p²[u] = 𝔽p/u²-13
+// 𝔽p⁴[v] = 𝔽p²/v²-u
+// 𝔽p¹²[w] = 𝔽p⁴/w³-v
+// 𝔽p²⁴[i] = 𝔽p¹²/i²-w
+// optimal Ate loop size:
+// x₀
+// Security: estimated 160-bit level following [https://eprint.iacr.org/2019/885.pdf]
+// (r is 253 bits and p²⁴ is 7543 bits)
+//
+// Warning
+//
+// This code has not been audited and is provided as-is. In particular, there is no security guarantees such as constant time implementation or side-channel attack resistance.
package bls24315
import (
@@ -9,19 +32,6 @@ import (
"github.com/consensys/gnark-crypto/ecc/bls24-315/internal/fptower"
)
-// BLS24-315: A Barreto--Lynn--Scott curve of embedding degree k=24 with seed x₀=-3218079743
-// 𝔽r: r=0x196deac24a9da12b25fc7ec9cf927a98c8c480ece644e36419d0c5fd00c00001 (x₀^8-x₀^4+2)
-// 𝔽p: p=0x4c23a02b586d650d3f7498be97c5eafdec1d01aa27a1ae0421ee5da52bde5026fe802ff40300001 ((x₀-1)² ⋅ r(x₀)/3+x₀)
-// (E/𝔽p): Y²=X³+1
-// (Eₜ/𝔽p⁴): Y² = X³+1/v (D-type twist)
-// r ∣ #E(Fp) and r ∣ #Eₜ(𝔽p⁴)
-// Extension fields tower:
-// 𝔽p²[u] = 𝔽p/u²-13
-// 𝔽p⁴[v] = 𝔽p²/v²-u
-// 𝔽p¹²[w] = 𝔽p⁴/w³-v
-// 𝔽p²⁴[i] = 𝔽p¹²/i²-w
-// optimal Ate loop size: x₀
-
// ID bls315 ID
const ID = ecc.BLS24_315
@@ -94,7 +104,7 @@ func init() {
// E(1,y)*c
g1Gen.X.SetString("34223510504517033132712852754388476272837911830964394866541204856091481856889569724484362330263")
g1Gen.Y.SetString("24215295174889464585413596429561903295150472552154479431771837786124301185073987899223459122783")
- g1Gen.Z.SetString("1")
+ g1Gen.Z.SetOne()
// E'(5,y)*c'
g2Gen.X.B0.SetString("24614737899199071964341749845083777103809664018538138889239909664991294445469052467064654073699",
diff --git a/ecc/bls24-315/doc.go b/ecc/bls24-315/doc.go
deleted file mode 100644
index 059407699c..0000000000
--- a/ecc/bls24-315/doc.go
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright 2020 ConsenSys Software Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Code generated by consensys/gnark-crypto DO NOT EDIT
-
-// Package bls24315 efficient elliptic curve and pairing implementation for bls24-315.
-//
-// Warning
-//
-// This code has not been audited and is provided as-is. In particular, there is no security guarantees such as constant time implementation or side-channel attack resistance.
-package bls24315
diff --git a/ecc/bls24-315/fp/element.go b/ecc/bls24-315/fp/element.go
index ef298e8067..a48ffb0b50 100644
--- a/ecc/bls24-315/fp/element.go
+++ b/ecc/bls24-315/fp/element.go
@@ -178,7 +178,7 @@ func (z *Element) SetInterface(i1 interface{}) (*Element, error) {
case int:
return z.SetInt64(int64(c1)), nil
case string:
- return z.SetString(c1), nil
+ return z.SetString(c1)
case *big.Int:
if c1 == nil {
return nil, errors.New("can't set fp.Element with ")
@@ -1012,12 +1012,13 @@ func (z *Element) setBigInt(v *big.Int) *Element {
// Incorrect placement of underscores is reported as a panic if there
// are no other errors.
//
-func (z *Element) SetString(number string) *Element {
+// If the number is invalid this method leaves z unchanged and returns nil, error.
+func (z *Element) SetString(number string) (*Element, error) {
// get temporary big int from the pool
vv := bigIntPool.Get().(*big.Int)
if _, ok := vv.SetString(number, 0); !ok {
- panic("Element.SetString failed -> can't parse number into a big.Int " + number)
+ return nil, errors.New("Element.SetString failed -> can't parse number into a big.Int " + number)
}
z.SetBigInt(vv)
@@ -1025,7 +1026,7 @@ func (z *Element) SetString(number string) *Element {
// release object into pool
bigIntPool.Put(vv)
- return z
+ return z, nil
}
// MarshalJSON returns json encoding of z (z.Text(10))
diff --git a/ecc/bls24-315/fr/element.go b/ecc/bls24-315/fr/element.go
index 7814a1414c..12f3caea46 100644
--- a/ecc/bls24-315/fr/element.go
+++ b/ecc/bls24-315/fr/element.go
@@ -175,7 +175,7 @@ func (z *Element) SetInterface(i1 interface{}) (*Element, error) {
case int:
return z.SetInt64(int64(c1)), nil
case string:
- return z.SetString(c1), nil
+ return z.SetString(c1)
case *big.Int:
if c1 == nil {
return nil, errors.New("can't set fr.Element with ")
@@ -944,12 +944,13 @@ func (z *Element) setBigInt(v *big.Int) *Element {
// Incorrect placement of underscores is reported as a panic if there
// are no other errors.
//
-func (z *Element) SetString(number string) *Element {
+// If the number is invalid this method leaves z unchanged and returns nil, error.
+func (z *Element) SetString(number string) (*Element, error) {
// get temporary big int from the pool
vv := bigIntPool.Get().(*big.Int)
if _, ok := vv.SetString(number, 0); !ok {
- panic("Element.SetString failed -> can't parse number into a big.Int " + number)
+ return nil, errors.New("Element.SetString failed -> can't parse number into a big.Int " + number)
}
z.SetBigInt(vv)
@@ -957,7 +958,7 @@ func (z *Element) SetString(number string) *Element {
// release object into pool
bigIntPool.Put(vv)
- return z
+ return z, nil
}
// MarshalJSON returns json encoding of z (z.Text(10))
diff --git a/ecc/bls24-315/fr/kzg/kzg.go b/ecc/bls24-315/fr/kzg/kzg.go
index b090802b5d..505083c87c 100644
--- a/ecc/bls24-315/fr/kzg/kzg.go
+++ b/ecc/bls24-315/fr/kzg/kzg.go
@@ -169,16 +169,15 @@ func Open(p []fr.Element, point fr.Element, srs *SRS) (OpeningProof, error) {
func Verify(commitment *Digest, proof *OpeningProof, point fr.Element, srs *SRS) error {
// [f(a)]G₁
- var claimedValueG1Aff bls24315.G1Affine
+ var claimedValueG1Aff bls24315.G1Jac
var claimedValueBigInt big.Int
proof.ClaimedValue.ToBigIntRegular(&claimedValueBigInt)
- claimedValueG1Aff.ScalarMultiplication(&srs.G1[0], &claimedValueBigInt)
+ claimedValueG1Aff.ScalarMultiplicationAffine(&srs.G1[0], &claimedValueBigInt)
// [f(α) - f(a)]G₁
- var fminusfaG1Jac, tmpG1Jac bls24315.G1Jac
+ var fminusfaG1Jac bls24315.G1Jac
fminusfaG1Jac.FromAffine(commitment)
- tmpG1Jac.FromAffine(&claimedValueG1Aff)
- fminusfaG1Jac.SubAssign(&tmpG1Jac)
+ fminusfaG1Jac.SubAssign(&claimedValueG1Aff)
// [-H(α)]G₁
var negH bls24315.G1Affine
diff --git a/ecc/bls24-315/fr/polynomial/multilin.go b/ecc/bls24-315/fr/polynomial/multilin.go
new file mode 100644
index 0000000000..17bbf1ed2c
--- /dev/null
+++ b/ecc/bls24-315/fr/polynomial/multilin.go
@@ -0,0 +1,250 @@
+// Copyright 2020 ConsenSys Software Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by consensys/gnark-crypto DO NOT EDIT
+
+package polynomial
+
+import (
+ "github.com/consensys/gnark-crypto/ecc/bls24-315/fr"
+)
+
+// MultiLin tracks the values of a (dense i.e. not sparse) multilinear polynomial
+// The variables are X₁ through Xₙ where n = log(len(.))
+// .[∑ᵢ 2ⁱ⁻¹ bₙ₋ᵢ] = the polynomial evaluated at (b₁, b₂, ..., bₙ)
+// It is understood that any hypercube evaluation can be extrapolated to a multilinear polynomial
+type MultiLin []fr.Element
+
+// Fold is partial evaluation function k[X₁, X₂, ..., Xₙ] → k[X₂, ..., Xₙ] by setting X₁=r
+func (m *MultiLin) Fold(r fr.Element) {
+ mid := len(*m) / 2
+
+ bottom, top := (*m)[:mid], (*m)[mid:]
+
+ // updating bookkeeping table
+ // knowing that the polynomial f ∈ (k[X₂, ..., Xₙ])[X₁] is linear, we would get f(r) = f(0) + r(f(1) - f(0))
+ // the following loop computes the evaluations of f(r) accordingly:
+ // f(r, b₂, ..., bₙ) = f(0, b₂, ..., bₙ) + r(f(1, b₂, ..., bₙ) - f(0, b₂, ..., bₙ))
+ for i := 0; i < mid; i++ {
+ // table[i] ← table[i] + r (table[i + mid] - table[i])
+ top[i].Sub(&top[i], &bottom[i])
+ top[i].Mul(&top[i], &r)
+ bottom[i].Add(&bottom[i], &top[i])
+ }
+
+ *m = (*m)[:mid]
+}
+
+// Evaluate extrapolate the value of the multilinear polynomial corresponding to m
+// on the given coordinates
+func (m MultiLin) Evaluate(coordinates []fr.Element) fr.Element {
+ // Folding is a mutating operation
+ bkCopy := m.Clone()
+
+ // Evaluate step by step through repeated folding (i.e. evaluation at the first remaining variable)
+ for _, r := range coordinates {
+ bkCopy.Fold(r)
+ }
+
+ return bkCopy[0]
+}
+
+// Clone creates a deep copy of a book-keeping table.
+// Both multilinear interpolation and sumcheck require folding an underlying
+// array, but folding changes the array. To do both one requires a deep copy
+// of the book-keeping table.
+func (m MultiLin) Clone() MultiLin {
+ tableDeepCopy := Make(len(m))
+ copy(tableDeepCopy, m)
+ return tableDeepCopy
+}
+
+// Add two bookKeepingTables
+func (m *MultiLin) Add(left, right MultiLin) {
+ size := len(left)
+ // Check that left and right have the same size
+ if len(right) != size {
+ panic("Left and right do not have the right size")
+ }
+ // Reallocate the table if necessary
+ if cap(*m) < size {
+ *m = make([]fr.Element, size)
+ }
+
+ // Resize the destination table
+ *m = (*m)[:size]
+
+ // Add elementwise
+ for i := 0; i < size; i++ {
+ (*m)[i].Add(&left[i], &right[i])
+ }
+}
+
+// EvalEq computes Eq(q₁, ... , qₙ, h₁, ... , hₙ) = Π₁ⁿ Eq(qᵢ, hᵢ)
+// where Eq(x,y) = xy + (1-x)(1-y) = 1 - x - y + xy + xy interpolates
+// _________________
+// | | |
+// | 0 | 1 |
+// |_______|_______|
+// y | | |
+// | 1 | 0 |
+// |_______|_______|
+//
+// x
+// In other words the polynomial evaluated here is the multilinear extrapolation of
+// one that evaluates to q' == h' for vectors q', h' of binary values
+func EvalEq(q, h []fr.Element) fr.Element {
+ var res, nxt, one, sum fr.Element
+ one.SetOne()
+ for i := 0; i < len(q); i++ {
+ nxt.Mul(&q[i], &h[i]) // nxt <- qᵢ * hᵢ
+ nxt.Double(&nxt) // nxt <- 2 * qᵢ * hᵢ
+ nxt.Add(&nxt, &one) // nxt <- 1 + 2 * qᵢ * hᵢ
+ sum.Add(&q[i], &h[i]) // sum <- qᵢ + hᵢ TODO: Why not subtract one by one from nxt? More parallel?
+
+ if i == 0 {
+ res.Sub(&nxt, &sum) // nxt <- 1 + 2 * qᵢ * hᵢ - qᵢ - hᵢ
+ } else {
+ nxt.Sub(&nxt, &sum) // nxt <- 1 + 2 * qᵢ * hᵢ - qᵢ - hᵢ
+ res.Mul(&res, &nxt) // res <- res * nxt
+ }
+ }
+ return res
+}
+
+// Eq sets m to the representation of the polynomial Eq(q₁, ..., qₙ, *, ..., *) × m[0]
+func (m *MultiLin) Eq(q []fr.Element) {
+ n := len(q)
+
+ if len(*m) != 1< 0 {
+ i.Sub(fr.Modulus(), &i)
+ i.Neg(&i)
+ }
+ return i
+}
+
+func (p Polynomial) Text(base int) string {
+
+ var builder strings.Builder
+
+ first := true
+ for d := len(p) - 1; d >= 0; d-- {
+ if p[d].IsZero() {
+ continue
+ }
+
+ i := signedBigInt(&p[d])
+
+ initialLen := builder.Len()
+
+ if i.Sign() < 1 {
+ i.Neg(&i)
+ if first {
+ builder.WriteString("-")
+ } else {
+ builder.WriteString(" - ")
+ }
+ } else if !first {
+ builder.WriteString(" + ")
+ }
+
+ first = false
+
+ asInt64 := int64(0)
+ if i.IsInt64() {
+ asInt64 = i.Int64()
+ }
+
+ if asInt64 != 1 || d == 0 {
+ builder.WriteString(i.Text(base))
+ }
+
+ if builder.Len()-initialLen > 10 {
+ builder.WriteString("×")
+ }
+
+ if d != 0 {
+ builder.WriteString("X")
+ }
+ if d > 1 {
+ builder.WriteString(
+ utils.ToSuperscript(strconv.Itoa(d)),
+ )
+ }
+
+ }
+
+ if first {
+ return "0"
+ }
+
+ return builder.String()
+}
diff --git a/ecc/bls24-315/fr/polynomial/pool.go b/ecc/bls24-315/fr/polynomial/pool.go
new file mode 100644
index 0000000000..045ba8eb16
--- /dev/null
+++ b/ecc/bls24-315/fr/polynomial/pool.go
@@ -0,0 +1,130 @@
+// Copyright 2020 ConsenSys Software Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by consensys/gnark-crypto DO NOT EDIT
+
+package polynomial
+
+import (
+ "fmt"
+ "github.com/consensys/gnark-crypto/ecc/bls24-315/fr"
+ "reflect"
+ "sync"
+ "unsafe"
+)
+
+// Memory management for polynomials
+// Copied verbatim from gkr repo
+
+// Sets a maximum for the array size we keep in pool
+const maxNForLargePool int = 1 << 24
+const maxNForSmallPool int = 256
+
+// Aliases because it is annoying to use arrays in all the places
+type largeArr = [maxNForLargePool]fr.Element
+type smallArr = [maxNForSmallPool]fr.Element
+
+var rC = sync.Map{}
+
+var (
+ largePool = sync.Pool{
+ New: func() interface{} {
+ var res largeArr
+ return &res
+ },
+ }
+ smallPool = sync.Pool{
+ New: func() interface{} {
+ var res smallArr
+ return &res
+ },
+ }
+)
+
+// ClearPool Clears the pool completely, shields against memory leaks
+// Eg: if we forgot to dump a polynomial at some point, this will ensure the value get dumped eventually
+// Returns how many polynomials were cleared that way
+func ClearPool() int {
+ res := 0
+ rC.Range(func(k, _ interface{}) bool {
+ switch ptr := k.(type) {
+ case *largeArr:
+ largePool.Put(ptr)
+ case *smallArr:
+ smallPool.Put(ptr)
+ default:
+ panic(fmt.Sprintf("tried to clear %v", reflect.TypeOf(ptr)))
+ }
+ res++
+ return true
+ })
+ return res
+}
+
+// CountPool Returns the number of elements in the pool without mutating it
+func CountPool() int {
+ res := 0
+ rC.Range(func(_, _ interface{}) bool {
+ res++
+ return true
+ })
+ return res
+}
+
+// Make tries to find a reusable polynomial or allocates a new one
+func Make(n int) []fr.Element {
+ if n > maxNForLargePool {
+ panic(fmt.Sprintf("been provided with size of %v but the maximum is %v", n, maxNForLargePool))
+ }
+
+ if n <= maxNForSmallPool {
+ ptr := smallPool.Get().(*smallArr)
+ rC.Store(ptr, struct{}{}) // registers the pointer being used
+ return (*ptr)[:n]
+ }
+
+ ptr := largePool.Get().(*largeArr)
+ rC.Store(ptr, struct{}{}) // remember we allocated the pointer is being used
+ return (*ptr)[:n]
+}
+
+// Dump dumps a set of polynomials into the pool
+// Returns the number of deallocated polys
+func Dump(arrs ...[]fr.Element) int {
+ cnt := 0
+ for _, arr := range arrs {
+ ptr := ptr(arr)
+ pool := &smallPool
+ if len(arr) > maxNForSmallPool {
+ pool = &largePool
+ }
+ // If the rC did not register, then
+ // either the array was allocated somewhere else which can be ignored
+ // otherwise a double put which MUST be ignored
+ if _, ok := rC.Load(ptr); ok {
+ pool.Put(ptr)
+ // And deregisters the ptr
+ rC.Delete(ptr)
+ cnt++
+ }
+ }
+ return cnt
+}
+
+func ptr(m []fr.Element) unsafe.Pointer {
+ if cap(m) != maxNForSmallPool && cap(m) != maxNForLargePool {
+ panic(fmt.Sprintf("can't cast to large or small array, the put array's is %v it should have capacity %v or %v", cap(m), maxNForLargePool, maxNForSmallPool))
+ }
+ return unsafe.Pointer(&m[0])
+}
diff --git a/ecc/bls24-315/g1.go b/ecc/bls24-315/g1.go
index 66bbda7b76..c97b141497 100644
--- a/ecc/bls24-315/g1.go
+++ b/ecc/bls24-315/g1.go
@@ -59,6 +59,14 @@ func (p *G1Affine) ScalarMultiplication(a *G1Affine, s *big.Int) *G1Affine {
return p
}
+// ScalarMultiplicationAffine computes and returns p = a ⋅ s
+// Takes an affine point and returns a Jacobian point (useful for KZG)
+func (p *G1Jac) ScalarMultiplicationAffine(a *G1Affine, s *big.Int) *G1Jac {
+ p.FromAffine(a)
+ p.mulGLV(p, s)
+ return p
+}
+
// Add adds two point in affine coordinates.
// This should rarely be used as it is very inefficient compared to Jacobian
func (p *G1Affine) Add(a, b *G1Affine) *G1Affine {
@@ -336,7 +344,7 @@ func (p *G1Jac) String() string {
return _p.String()
}
-// FromAffine sets p = Q, p in Jacboian, Q in affine
+// FromAffine sets p = Q, p in Jacobian, Q in affine
func (p *G1Jac) FromAffine(Q *G1Affine) *G1Jac {
if Q.IsInfinity() {
p.Z.SetZero()
@@ -816,9 +824,9 @@ func (p *g1JacExtended) doubleMixed(q *G1Affine) *g1JacExtended {
}
// BatchJacobianToAffineG1 converts points in Jacobian coordinates to Affine coordinates
-// performing a single field inversion (Montgomery batch inversion trick)
-// result must be allocated with len(result) == len(points)
-func BatchJacobianToAffineG1(points []G1Jac, result []G1Affine) {
+// performing a single field inversion (Montgomery batch inversion trick).
+func BatchJacobianToAffineG1(points []G1Jac) []G1Affine {
+ result := make([]G1Affine, len(points))
zeroes := make([]bool, len(points))
accumulator := fp.One()
@@ -838,7 +846,7 @@ func BatchJacobianToAffineG1(points []G1Jac, result []G1Affine) {
for i := len(points) - 1; i >= 0; i-- {
if zeroes[i] {
- // do nothing, X and Y are zeroes in affine.
+ // do nothing, (X=0, Y=0) is infinity point in affine
continue
}
result[i].X.Mul(&result[i].X, &accInverse)
@@ -849,7 +857,7 @@ func BatchJacobianToAffineG1(points []G1Jac, result []G1Affine) {
parallel.Execute(len(points), func(start, end int) {
for i := start; i < end; i++ {
if zeroes[i] {
- // do nothing, X and Y are zeroes in affine.
+ // do nothing, (X=0, Y=0) is infinity point in affine
continue
}
var a, b fp.Element
@@ -861,6 +869,7 @@ func BatchJacobianToAffineG1(points []G1Jac, result []G1Affine) {
}
})
+ return result
}
// BatchScalarMultiplicationG1 multiplies the same base by all scalars
@@ -924,8 +933,7 @@ func BatchScalarMultiplicationG1(base *G1Affine, scalars []fr.Element) []G1Affin
selectors[chunk] = d
}
// convert our base exp table into affine to use AddMixed
- baseTableAff := make([]G1Affine, (1 << (c - 1)))
- BatchJacobianToAffineG1(baseTable, baseTableAff)
+ baseTableAff := BatchJacobianToAffineG1(baseTable)
toReturn := make([]G1Jac, len(scalars))
// for each digit, take value in the base table, double it c time, voilà.
@@ -967,7 +975,6 @@ func BatchScalarMultiplicationG1(base *G1Affine, scalars []fr.Element) []G1Affin
}
})
- toReturnAff := make([]G1Affine, len(scalars))
- BatchJacobianToAffineG1(toReturn, toReturnAff)
+ toReturnAff := BatchJacobianToAffineG1(toReturn)
return toReturnAff
}
diff --git a/ecc/bls24-315/g1_test.go b/ecc/bls24-315/g1_test.go
index 0321ddaea3..5eba73ee93 100644
--- a/ecc/bls24-315/g1_test.go
+++ b/ecc/bls24-315/g1_test.go
@@ -85,7 +85,7 @@ func TestG1AffineIsOnCurve(t *testing.T) {
func(a fp.Element) bool {
var op1, op2 G1Affine
op1.FromJacobian(&g1Gen)
- op2.FromJacobian(&g1Gen)
+ op2.Set(&op1)
op2.Y.Mul(&op2.Y, &a)
return op1.IsOnCurve() && !op2.IsOnCurve()
},
@@ -220,6 +220,19 @@ func TestG1AffineConversions(t *testing.T) {
GenFp(),
GenFp(),
))
+ properties.Property("[BLS24-315] BatchJacobianToAffineG1 and FromJacobian should output the same result", prop.ForAll(
+ func(a, b fp.Element) bool {
+ g1 := fuzzG1Jac(&g1Gen, a)
+ g2 := fuzzG1Jac(&g1Gen, b)
+ var op1, op2 G1Affine
+ op1.FromJacobian(&g1)
+ op2.FromJacobian(&g2)
+ baseTableAff := BatchJacobianToAffineG1([]G1Jac{g1, g2})
+ return op1.Equal(&baseTableAff[0]) && op2.Equal(&baseTableAff[1])
+ },
+ GenFp(),
+ GenFp(),
+ ))
properties.TestingRun(t, gopter.ConsoleReporter(false))
}
@@ -486,7 +499,7 @@ func BenchmarkG1JacIsInSubGroup(b *testing.B) {
}
-func BenchmarkG1AffineBatchScalarMul(b *testing.B) {
+func BenchmarkG1AffineBatchScalarMultiplication(b *testing.B) {
// ensure every words of the scalars are filled
var mixer fr.Element
mixer.SetString("7716837800905789770901243404444209691916730933998574719964609384059111546487")
@@ -514,7 +527,7 @@ func BenchmarkG1AffineBatchScalarMul(b *testing.B) {
}
}
-func BenchmarkG1JacScalarMul(b *testing.B) {
+func BenchmarkG1JacScalarMultiplication(b *testing.B) {
var scalar big.Int
r := fr.Modulus()
diff --git a/ecc/bls24-315/g2.go b/ecc/bls24-315/g2.go
index 538191e50e..81fb7f6427 100644
--- a/ecc/bls24-315/g2.go
+++ b/ecc/bls24-315/g2.go
@@ -341,7 +341,7 @@ func (p *G2Jac) String() string {
return _p.String()
}
-// FromAffine sets p = Q, p in Jacboian, Q in affine
+// FromAffine sets p = Q, p in Jacobian, Q in affine
func (p *G2Jac) FromAffine(Q *G2Affine) *G2Jac {
if Q.IsInfinity() {
p.Z.SetZero()
diff --git a/ecc/bls24-315/g2_test.go b/ecc/bls24-315/g2_test.go
index 468b425f5a..bab8fbad10 100644
--- a/ecc/bls24-315/g2_test.go
+++ b/ecc/bls24-315/g2_test.go
@@ -99,7 +99,7 @@ func TestG2AffineIsOnCurve(t *testing.T) {
func(a fptower.E4) bool {
var op1, op2 G2Affine
op1.FromJacobian(&g2Gen)
- op2.FromJacobian(&g2Gen)
+ op2.Set(&op1)
op2.Y.Mul(&op2.Y, &a)
return op1.IsOnCurve() && !op2.IsOnCurve()
},
@@ -505,7 +505,7 @@ func BenchmarkG2JacIsInSubGroup(b *testing.B) {
}
-func BenchmarkG2AffineBatchScalarMul(b *testing.B) {
+func BenchmarkG2AffineBatchScalarMultiplication(b *testing.B) {
// ensure every words of the scalars are filled
var mixer fr.Element
mixer.SetString("7716837800905789770901243404444209691916730933998574719964609384059111546487")
@@ -533,7 +533,7 @@ func BenchmarkG2AffineBatchScalarMul(b *testing.B) {
}
}
-func BenchmarkG2JacScalarMul(b *testing.B) {
+func BenchmarkG2JacScalarMultiplication(b *testing.B) {
var scalar big.Int
r := fr.Modulus()
diff --git a/ecc/bls24-315/hash_to_g1.go b/ecc/bls24-315/hash_to_g1.go
index e5d218377e..faae02c7a7 100644
--- a/ecc/bls24-315/hash_to_g1.go
+++ b/ecc/bls24-315/hash_to_g1.go
@@ -89,65 +89,58 @@ func g1Isogeny(p *G1Affine) {
// g1SqrtRatio computes the square root of u/v and returns 0 iff u/v was indeed a quadratic residue
// if not, we get sqrt(Z * u / v). Recall that Z is non-residue
+// If v = 0, u/v is meaningless and the output is unspecified, without raising an error.
// The main idea is that since the computation of the square root involves taking large powers of u/v, the inversion of v can be avoided
func g1SqrtRatio(z *fp.Element, u *fp.Element, v *fp.Element) uint64 {
- // Taken from https://datatracker.ietf.org/doc/draft-irtf-cfrg-hash-to-curve/13/ F.2.1.1. for any field
+ // https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-16.html#name-sqrt_ratio-for-any-field
tv1 := fp.Element{11195128742969911322, 1359304652430195240, 15267589139354181340, 10518360976114966361, 300769513466036652} //tv1 = c6
var tv2, tv3, tv4, tv5 fp.Element
var exp big.Int
- // c4 = 1048575 = 2^20 - 1
+ // c4 = 1048575 = 2²⁰ - 1
// q is odd so c1 is at least 1.
exp.SetBytes([]byte{15, 255, 255})
- tv2.Exp(*v, &exp)
- tv3.Mul(&tv2, &tv2)
- tv3.Mul(&tv3, v)
-
- // line 5
- tv5.Mul(u, &tv3)
+ tv2.Exp(*v, &exp) // 2. tv2 = vᶜ⁴
+ tv3.Square(&tv2) // 3. tv3 = tv2²
+ tv3.Mul(&tv3, v) // 4. tv3 = tv3 * v
+ tv5.Mul(u, &tv3) // 5. tv5 = u * tv3
// c3 = 18932887415653914611351818986134037849871398170907377879650252106493894621432467626129921
exp.SetBytes([]byte{38, 17, 208, 21, 172, 54, 178, 134, 159, 186, 76, 95, 75, 226, 245, 126, 246, 14, 128, 213, 19, 208, 215, 2, 16, 247, 46, 210, 149, 239, 40, 19, 127, 64, 23, 250, 1})
- tv5.Exp(tv5, &exp)
- tv5.Mul(&tv5, &tv2)
- tv2.Mul(&tv5, v)
- tv3.Mul(&tv5, u)
- // line 10
- tv4.Mul(&tv3, &tv2)
+ tv5.Exp(tv5, &exp) // 6. tv5 = tv5ᶜ³
+ tv5.Mul(&tv5, &tv2) // 7. tv5 = tv5 * tv2
+ tv2.Mul(&tv5, v) // 8. tv2 = tv5 * v
+ tv3.Mul(&tv5, u) // 9. tv3 = tv5 * u
+ tv4.Mul(&tv3, &tv2) // 10. tv4 = tv3 * tv2
// c5 = 524288
exp.SetBytes([]byte{8, 0, 0})
- tv5.Exp(tv4, &exp)
-
- isQNr := g1NotOne(&tv5)
-
- tv2.Mul(&tv3, &fp.Element{1141794007209116247, 256324699145650176, 2958838397954514392, 9976887947641032208, 153331829745922234})
- tv5.Mul(&tv4, &tv1)
-
- // line 15
-
- tv3.Select(int(isQNr), &tv3, &tv2)
- tv4.Select(int(isQNr), &tv4, &tv5)
-
- exp.Lsh(big.NewInt(1), 20-2)
-
- for i := 20; i >= 2; i-- {
- //line 20
- tv5.Exp(tv4, &exp)
- nE1 := g1NotOne(&tv5)
-
- tv2.Mul(&tv3, &tv1)
- tv1.Mul(&tv1, &tv1)
- tv5.Mul(&tv4, &tv1)
-
- tv3.Select(int(nE1), &tv3, &tv2)
- tv4.Select(int(nE1), &tv4, &tv5)
-
- exp.Rsh(&exp, 1)
+ tv5.Exp(tv4, &exp) // 11. tv5 = tv4ᶜ⁵
+ isQNr := g1NotOne(&tv5) // 12. isQR = tv5 == 1
+ c7 := fp.Element{1141794007209116247, 256324699145650176, 2958838397954514392, 9976887947641032208, 153331829745922234}
+ tv2.Mul(&tv3, &c7) // 13. tv2 = tv3 * c7
+ tv5.Mul(&tv4, &tv1) // 14. tv5 = tv4 * tv1
+ tv3.Select(int(isQNr), &tv3, &tv2) // 15. tv3 = CMOV(tv2, tv3, isQR)
+ tv4.Select(int(isQNr), &tv4, &tv5) // 16. tv4 = CMOV(tv5, tv4, isQR)
+ exp.Lsh(big.NewInt(1), 20-2) // 18, 19: tv5 = 2ⁱ⁻² for i = c1
+
+ for i := 20; i >= 2; i-- { // 17. for i in (c1, c1 - 1, ..., 2):
+
+ tv5.Exp(tv4, &exp) // 20. tv5 = tv4ᵗᵛ⁵
+ nE1 := g1NotOne(&tv5) // 21. e1 = tv5 == 1
+ tv2.Mul(&tv3, &tv1) // 22. tv2 = tv3 * tv1
+ tv1.Mul(&tv1, &tv1) // 23. tv1 = tv1 * tv1 Why not write square?
+ tv5.Mul(&tv4, &tv1) // 24. tv5 = tv4 * tv1
+ tv3.Select(int(nE1), &tv3, &tv2) // 25. tv3 = CMOV(tv2, tv3, e1)
+ tv4.Select(int(nE1), &tv4, &tv5) // 26. tv4 = CMOV(tv5, tv4, e1)
+
+ if i > 2 {
+ exp.Rsh(&exp, 1) // 18, 19. tv5 = 2ⁱ⁻²
+ }
}
*z = tv3
@@ -161,12 +154,6 @@ func g1NotOne(x *fp.Element) uint64 {
}
-/*
-// g1SetZ sets z to [13].
-func g1SetZ(z *fp.Element) {
- z.Set( &fp.Element {8178485296672800069, 8476448362227282520, 14180928431697993131, 4308307642551989706, 120359802761433421} )
-}*/
-
// g1MulByZ multiplies x by [13] and stores the result in z
func g1MulByZ(z *fp.Element, x *fp.Element) {
@@ -181,30 +168,29 @@ func g1MulByZ(z *fp.Element, x *fp.Element) {
*z = res
}
-//TODO: Define A,B here
-
-// From https://datatracker.ietf.org/doc/draft-irtf-cfrg-hash-to-curve/13/ Pg 80
+// https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-16.html#name-simplified-swu-method
// mapToCurve1 implements the SSWU map
// No cofactor clearing or isogeny
func mapToCurve1(u *fp.Element) G1Affine {
+ var sswuIsoCurveCoeffA = fp.Element{5402807948305211529, 9163880483319140034, 7646126700453841420, 11071466103913358468, 124200740526673728}
+ var sswuIsoCurveCoeffB = fp.Element{16058189711238232929, 8302337653269510588, 11411933349841587630, 8954038365926617417, 177308873523699836}
+
var tv1 fp.Element
- tv1.Square(u)
+ tv1.Square(u) // 1. tv1 = u²
//mul tv1 by Z
- g1MulByZ(&tv1, &tv1)
+ g1MulByZ(&tv1, &tv1) // 2. tv1 = Z * tv1
var tv2 fp.Element
- tv2.Square(&tv1)
- tv2.Add(&tv2, &tv1)
+ tv2.Square(&tv1) // 3. tv2 = tv1²
+ tv2.Add(&tv2, &tv1) // 4. tv2 = tv2 + tv1
var tv3 fp.Element
- //Standard doc line 5
var tv4 fp.Element
tv4.SetOne()
- tv3.Add(&tv2, &tv4)
- //TODO: Use bCurveConf when no isogeny
- tv3.Mul(&tv3, &fp.Element{16058189711238232929, 8302337653269510588, 11411933349841587630, 8954038365926617417, 177308873523699836})
+ tv3.Add(&tv2, &tv4) // 5. tv3 = tv2 + 1
+ tv3.Mul(&tv3, &sswuIsoCurveCoeffB) // 6. tv3 = B * tv3
tv2NZero := g1NotZero(&tv2)
@@ -212,48 +198,45 @@ func mapToCurve1(u *fp.Element) G1Affine {
tv4 = fp.Element{8178485296672800069, 8476448362227282520, 14180928431697993131, 4308307642551989706, 120359802761433421}
tv2.Neg(&tv2)
- tv4.Select(int(tv2NZero), &tv4, &tv2)
- //TODO: When no isogeny use curve constants
- tv2 = fp.Element{5402807948305211529, 9163880483319140034, 7646126700453841420, 11071466103913358468, 124200740526673728}
- tv4.Mul(&tv4, &tv2)
+ tv4.Select(int(tv2NZero), &tv4, &tv2) // 7. tv4 = CMOV(Z, -tv2, tv2 != 0)
+ tv4.Mul(&tv4, &sswuIsoCurveCoeffA) // 8. tv4 = A * tv4
- tv2.Square(&tv3)
+ tv2.Square(&tv3) // 9. tv2 = tv3²
var tv6 fp.Element
- //Standard doc line 10
- tv6.Square(&tv4)
+ tv6.Square(&tv4) // 10. tv6 = tv4²
var tv5 fp.Element
- tv5.Mul(&tv6, &fp.Element{5402807948305211529, 9163880483319140034, 7646126700453841420, 11071466103913358468, 124200740526673728})
+ tv5.Mul(&tv6, &sswuIsoCurveCoeffA) // 11. tv5 = A * tv6
- tv2.Add(&tv2, &tv5)
- tv2.Mul(&tv2, &tv3)
- tv6.Mul(&tv6, &tv4)
+ tv2.Add(&tv2, &tv5) // 12. tv2 = tv2 + tv5
+ tv2.Mul(&tv2, &tv3) // 13. tv2 = tv2 * tv3
+ tv6.Mul(&tv6, &tv4) // 14. tv6 = tv6 * tv4
- //Standards doc line 15
- tv5.Mul(&tv6, &fp.Element{16058189711238232929, 8302337653269510588, 11411933349841587630, 8954038365926617417, 177308873523699836})
- tv2.Add(&tv2, &tv5)
+ tv5.Mul(&tv6, &sswuIsoCurveCoeffB) // 15. tv5 = B * tv6
+ tv2.Add(&tv2, &tv5) // 16. tv2 = tv2 + tv5
var x fp.Element
- x.Mul(&tv1, &tv3)
+ x.Mul(&tv1, &tv3) // 17. x = tv1 * tv3
var y1 fp.Element
- gx1NSquare := g1SqrtRatio(&y1, &tv2, &tv6)
+ gx1NSquare := g1SqrtRatio(&y1, &tv2, &tv6) // 18. (is_gx1_square, y1) = sqrt_ratio(tv2, tv6)
var y fp.Element
- y.Mul(&tv1, u)
+ y.Mul(&tv1, u) // 19. y = tv1 * u
- //Standards doc line 20
- y.Mul(&y, &y1)
+ y.Mul(&y, &y1) // 20. y = y * y1
- x.Select(int(gx1NSquare), &tv3, &x)
- y.Select(int(gx1NSquare), &y1, &y)
+ x.Select(int(gx1NSquare), &tv3, &x) // 21. x = CMOV(x, tv3, is_gx1_square)
+ y.Select(int(gx1NSquare), &y1, &y) // 22. y = CMOV(y, y1, is_gx1_square)
y1.Neg(&y)
y.Select(int(g1Sgn0(u)^g1Sgn0(&y)), &y, &y1)
- //Standards doc line 25
- x.Div(&x, &tv4)
+ // 23. e1 = sgn0(u) == sgn0(y)
+ // 24. y = CMOV(-y, y, e1)
+
+ x.Div(&x, &tv4) // 25. x = x / tv4
return G1Affine{x, y}
}
@@ -296,13 +279,13 @@ func hashToFp(msg, dst []byte, count int) ([]fp.Element, error) {
// g1Sgn0 is an algebraic substitute for the notion of sign in ordered fields
// Namely, every non-zero quadratic residue in a finite field of characteristic =/= 2 has exactly two square roots, one of each sign
-// Taken from https://datatracker.ietf.org/doc/draft-irtf-cfrg-hash-to-curve/ section 4.1
+// https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-16.html#name-the-sgn0-function
// The sign of an element is not obviously related to that of its Montgomery form
func g1Sgn0(z *fp.Element) uint64 {
nonMont := *z
nonMont.FromMont()
-
+ // m == 1
return nonMont[0] % 2
}
@@ -319,7 +302,7 @@ func MapToG1(u fp.Element) G1Affine {
// EncodeToG1 hashes a message to a point on the G1 curve using the SSWU map.
// It is faster than HashToG1, but the result is not uniformly distributed. Unsuitable as a random oracle.
// dst stands for "domain separation tag", a string unique to the construction using the hash function
-//https://datatracker.ietf.org/doc/draft-irtf-cfrg-hash-to-curve/13/#section-6.6.3
+//https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-16.html#roadmap
func EncodeToG1(msg, dst []byte) (G1Affine, error) {
var res G1Affine
@@ -339,7 +322,7 @@ func EncodeToG1(msg, dst []byte) (G1Affine, error) {
// HashToG1 hashes a message to a point on the G1 curve using the SSWU map.
// Slower than EncodeToG1, but usable as a random oracle.
// dst stands for "domain separation tag", a string unique to the construction using the hash function
-// https://tools.ietf.org/html/draft-irtf-cfrg-hash-to-curve-06#section-3
+//https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-16.html#roadmap
func HashToG1(msg, dst []byte) (G1Affine, error) {
u, err := hashToFp(msg, dst, 2*1)
if err != nil {
@@ -349,7 +332,7 @@ func HashToG1(msg, dst []byte) (G1Affine, error) {
Q0 := mapToCurve1(&u[0])
Q1 := mapToCurve1(&u[1])
- //TODO: Add in E' first, then apply isogeny
+ //TODO (perf): Add in E' first, then apply isogeny
g1Isogeny(&Q0)
g1Isogeny(&Q1)
diff --git a/ecc/bls24-315/hash_to_g2.go b/ecc/bls24-315/hash_to_g2.go
index 511ee0f0b6..3686bba0ed 100644
--- a/ecc/bls24-315/hash_to_g2.go
+++ b/ecc/bls24-315/hash_to_g2.go
@@ -28,12 +28,12 @@ func svdwMapG2(u fptower.E4) G2Affine {
// constants
// sage script to find z: https://tools.ietf.org/html/draft-irtf-cfrg-hash-to-curve-06#appendix-E.1
var z, c1, c2, c3, c4 fptower.E4
- z.B0.A0.SetString("1")
+ z.B0.A0.SetOne()
z.B0.A1.SetString("0")
- z.B1.A0.SetString("1")
+ z.B1.A0.SetOne()
z.B1.A1.SetString("0")
- c1.B0.A0.SetString("1")
- c1.B0.A1.SetString("1")
+ c1.B0.A0.SetOne()
+ c1.B0.A1.SetOne()
c1.B1.A0.SetString("2")
c1.B1.A1.SetString("6108483493771298205388567675447533806912846525679192205394505462405828322019437284165171866703")
c2.B0.A0.SetString("19852571354756719167512844945204484872466751208457374667532142752818942046563171173536808566784")
diff --git a/ecc/bls24-315/internal/fptower/e12.go b/ecc/bls24-315/internal/fptower/e12.go
index faa9d387c8..9ddc554ea6 100644
--- a/ecc/bls24-315/internal/fptower/e12.go
+++ b/ecc/bls24-315/internal/fptower/e12.go
@@ -180,6 +180,8 @@ func (z *E12) Square(x *E12) *E12 {
}
// Inverse an element in E12
+//
+// if x == 0, sets and returns z = x
func (z *E12) Inverse(x *E12) *E12 {
// Algorithm 17 from https://eprint.iacr.org/2010/354.pdf
// step 9 is wrong in the paper it's t1-t4
@@ -208,6 +210,8 @@ func (z *E12) Inverse(x *E12) *E12 {
// BatchInvertE12 returns a new slice with every element inverted.
// Uses Montgomery batch inversion trick
+//
+// if a[i] == 0, returns result[i] = a[i]
func BatchInvertE12(a []E12) []E12 {
res := make([]E12, len(a))
if len(a) == 0 {
@@ -250,7 +254,7 @@ func (z *E12) Exp(x E12, k *big.Int) *E12 {
e := k
if k.Sign() == -1 {
// negative k, we invert
- // if k < 0: xᵏ (mod q) == (x⁻¹)ᵏ (mod q)
+ // if k < 0: xᵏ (mod q¹²) == (x⁻¹)ᵏ (mod q¹²)
x.Inverse(&x)
// we negate k in a temp big.Int since
diff --git a/ecc/bls24-315/internal/fptower/e24.go b/ecc/bls24-315/internal/fptower/e24.go
index 043b85e0da..68a8dc3a75 100644
--- a/ecc/bls24-315/internal/fptower/e24.go
+++ b/ecc/bls24-315/internal/fptower/e24.go
@@ -223,28 +223,45 @@ func (z *E24) CyclotomicSquareCompressed(x *E24) *E24 {
}
// DecompressKarabina Karabina's cyclotomic square result
+// if g3 != 0
+// g4 = (E * g5^2 + 3 * g1^2 - 2 * g2)/4g3
+// if g3 == 0
+// g4 = 2g1g5/g2
+//
+// if g3=g2=0 then g4=g5=g1=0 and g0=1 (x=1)
+// Theorem 3.1 is well-defined for all x in Gϕₙ\{1}
func (z *E24) DecompressKarabina(x *E24) *E24 {
var t [3]E4
var one E4
one.SetOne()
- // t0 = g1²
- t[0].Square(&x.D0.C1)
- // t1 = 3 * g1² - 2 * g2
- t[1].Sub(&t[0], &x.D0.C2).
- Double(&t[1]).
- Add(&t[1], &t[0])
- // t0 = E * g5² + t1
- t[2].Square(&x.D1.C2)
- t[0].MulByNonResidue(&t[2]).
- Add(&t[0], &t[1])
- // t1 = 1/(4 * g3)
- t[1].Double(&x.D1.C0).
- Double(&t[1]).
- Inverse(&t[1]) // costly
+ // g3 == 0
+ if x.D1.C0.IsZero() {
+ t[0].Mul(&x.D0.C1, &x.D1.C2).
+ Double(&t[0])
+ // t1 = g2
+ t[1].Set(&x.D0.C2)
+
+ // g3 != 0
+ } else {
+ // t0 = g1^2
+ t[0].Square(&x.D0.C1)
+ // t1 = 3 * g1^2 - 2 * g2
+ t[1].Sub(&t[0], &x.D0.C2).
+ Double(&t[1]).
+ Add(&t[1], &t[0])
+ // t0 = E * g5^2 + t1
+ t[2].Square(&x.D1.C2)
+ t[0].MulByNonResidue(&t[2]).
+ Add(&t[0], &t[1])
+ // t1 = 1/(4 * g3)
+ t[1].Double(&x.D1.C0).
+ Double(&t[1])
+ }
+
// z4 = g4
- z.D1.C1.Mul(&t[0], &t[1])
+ z.D1.C1.Div(&t[0], &t[1]) // costly
// t1 = g2 * g1
t[1].Mul(&x.D0.C2, &x.D0.C1)
@@ -253,7 +270,7 @@ func (z *E24) DecompressKarabina(x *E24) *E24 {
Sub(&t[2], &t[1]).
Double(&t[2]).
Sub(&t[2], &t[1])
- // t1 = g3 * g5
+ // t1 = g3 * g5 (g3 can be 0)
t[1].Mul(&x.D1.C0, &x.D1.C2)
// c₀ = E * (2 * g4² + g3 * g5 - 3 * g2 * g1) + 1
t[2].Add(&t[2], &t[1])
@@ -269,6 +286,15 @@ func (z *E24) DecompressKarabina(x *E24) *E24 {
}
// BatchDecompressKarabina multiple Karabina's cyclotomic square results
+// if g3 != 0
+// g4 = (E * g5^2 + 3 * g1^2 - 2 * g2)/4g3
+// if g3 == 0
+// g4 = 2g1g5/g2
+//
+// if g3=g2=0 then g4=g5=g1=0 and g0=1 (x=1)
+// Theorem 3.1 is well-defined for all x in Gϕₙ\{1}
+//
+// Divisions by 4g3 or g2 is batched using Montgomery batch inverse
func BatchDecompressKarabina(x []E24) []E24 {
n := len(x)
@@ -284,19 +310,29 @@ func BatchDecompressKarabina(x []E24) []E24 {
one.SetOne()
for i := 0; i < n; i++ {
- // t0 = g1²
- t0[i].Square(&x[i].D0.C1)
- // t1 = 3 * g1² - 2 * g2
- t1[i].Sub(&t0[i], &x[i].D0.C2).
- Double(&t1[i]).
- Add(&t1[i], &t0[i])
- // t0 = E * g5² + t1
- t2[i].Square(&x[i].D1.C2)
- t0[i].MulByNonResidue(&t2[i]).
- Add(&t0[i], &t1[i])
- // t1 = 4 * g3
- t1[i].Double(&x[i].D1.C0).
- Double(&t1[i])
+ // g3 == 0
+ if x[i].D1.C0.IsZero() {
+ t0[i].Mul(&x[i].D0.C1, &x[i].D1.C2).
+ Double(&t0[i])
+ // t1 = g2
+ t1[i].Set(&x[i].D0.C2)
+
+ // g3 != 0
+ } else {
+ // t0 = g1^2
+ t0[i].Square(&x[i].D0.C1)
+ // t1 = 3 * g1^2 - 2 * g2
+ t1[i].Sub(&t0[i], &x[i].D0.C2).
+ Double(&t1[i]).
+ Add(&t1[i], &t0[i])
+ // t0 = E * g5^2 + t1
+ t2[i].Square(&x[i].D1.C2)
+ t0[i].MulByNonResidue(&t2[i]).
+ Add(&t0[i], &t1[i])
+ // t1 = 4 * g3
+ t1[i].Double(&x[i].D1.C0).
+ Double(&t1[i])
+ }
}
t1 = BatchInvertE4(t1) // costs 1 inverse
@@ -313,7 +349,7 @@ func BatchDecompressKarabina(x []E24) []E24 {
t2[i].Double(&t2[i])
t2[i].Sub(&t2[i], &t1[i])
- // t1 = g3 * g5
+ // t1 = g3 * g5 (g3s can be 0s)
t1[i].Mul(&x[i].D1.C0, &x[i].D1.C2)
// z0 = E * (2 * g4² + g3 * g5 - 3 * g2 * g1) + 1
t2[i].Add(&t2[i], &t1[i])
@@ -364,6 +400,8 @@ func (z *E24) CyclotomicSquare(x *E24) *E24 {
}
// Inverse set z to the inverse of x in E24 and return z
+//
+// if x == 0, sets and returns z = x
func (z *E24) Inverse(x *E24) *E24 {
// Algorithm 23 from https://eprint.iacr.org/2010/354.pdf
@@ -381,6 +419,8 @@ func (z *E24) Inverse(x *E24) *E24 {
// BatchInvertE24 returns a new slice with every element inverted.
// Uses Montgomery batch inversion trick
+//
+// if a[i] == 0, returns result[i] = a[i]
func BatchInvertE24(a []E24) []E24 {
res := make([]E24, len(a))
if len(a) == 0 {
@@ -423,7 +463,7 @@ func (z *E24) Exp(x E24, k *big.Int) *E24 {
e := k
if k.Sign() == -1 {
// negative k, we invert
- // if k < 0: xᵏ (mod q) == (x⁻¹)ᵏ (mod q)
+ // if k < 0: xᵏ (mod q²⁴) == (x⁻¹)ᵏ (mod q²⁴)
x.Inverse(&x)
// we negate k in a temp big.Int since
@@ -827,6 +867,10 @@ func BatchCompressTorus(x []E24) ([]E12, error) {
for i := 0; i < n; i++ {
res[i].Set(&x[i].D1)
+ // throw an error if any of the x[i].C1 is 0
+ if res[i].IsZero() {
+ return []E12{}, errors.New("invalid input")
+ }
}
t := BatchInvertE12(res) // costs 1 inverse
diff --git a/ecc/bls24-315/internal/fptower/e24_test.go b/ecc/bls24-315/internal/fptower/e24_test.go
index ff70344d14..bbe53e4c08 100644
--- a/ecc/bls24-315/internal/fptower/e24_test.go
+++ b/ecc/bls24-315/internal/fptower/e24_test.go
@@ -370,13 +370,29 @@ func TestE24Ops(t *testing.T) {
properties.Property("[BLS24-315] compressed cyclotomic square (Karabina) and square should be the same in the cyclotomic subgroup", prop.ForAll(
func(a *E24) bool {
- var b, c, d E24
+ var _a, b, c, d, _c, _d E24
+ _a.SetOne().Double(&_a)
+
+ // put a and _a in the cyclotomic subgroup
+ // a (g3 != 0 probably)
b.Conjugate(a)
a.Inverse(a)
b.Mul(&b, a)
a.FrobeniusQuad(&b).Mul(a, &b)
+ // _a (g3 == 0)
+ b.Conjugate(&_a)
+ _a.Inverse(&_a)
+ b.Mul(&b, &_a)
+ _a.FrobeniusQuad(&b).Mul(&_a, &b)
+
+ // case g3 != 0
c.Square(a)
d.CyclotomicSquareCompressed(a).DecompressKarabina(&d)
+
+ // case g3 == 0
+ _c.Square(&_a)
+ _d.CyclotomicSquareCompressed(&_a).DecompressKarabina(&_d)
+
return c.Equal(&d)
},
genA,
@@ -384,18 +400,26 @@ func TestE24Ops(t *testing.T) {
properties.Property("[BLS24-315] batch decompress and individual decompress (Karabina) should be the same", prop.ForAll(
func(a *E24) bool {
- var b E24
- // put in the cyclotomic subgroup
+ var _a, b E24
+ _a.SetOne().Double(&_a)
+
+ // put a and _a in the cyclotomic subgroup
+ // a (g3 !=0 probably)
b.Conjugate(a)
a.Inverse(a)
b.Mul(&b, a)
a.FrobeniusQuad(&b).Mul(a, &b)
+ // _a (g3 == 0)
+ b.Conjugate(&_a)
+ _a.Inverse(&_a)
+ b.Mul(&b, &_a)
+ _a.FrobeniusQuad(&b).Mul(&_a, &b)
var a2, a4, a17 E24
- a2.Set(a)
+ a2.Set(&_a)
a4.Set(a)
a17.Set(a)
- a2.nSquareCompressed(2)
+ a2.nSquareCompressed(2) // case g3 == 0
a4.nSquareCompressed(4)
a17.nSquareCompressed(17)
batch := BatchDecompressKarabina([]E24{a2, a4, a17})
diff --git a/ecc/bls24-315/internal/fptower/e2_bls315.go b/ecc/bls24-315/internal/fptower/e2_bls315.go
index 93791c10ea..d1f46e4e01 100644
--- a/ecc/bls24-315/internal/fptower/e2_bls315.go
+++ b/ecc/bls24-315/internal/fptower/e2_bls315.go
@@ -67,6 +67,8 @@ func (z *E2) MulByNonResidueInv(x *E2) *E2 {
}
// Inverse sets z to the E2-inverse of x, returns z
+//
+// if x == 0, sets and returns z = x
func (z *E2) Inverse(x *E2) *E2 {
// Algorithm 8 from https://eprint.iacr.org/2010/354.pdf
//var a, b, t0, t1, tmp fp.Element
diff --git a/ecc/bls24-315/internal/fptower/e2_test.go b/ecc/bls24-315/internal/fptower/e2_test.go
index cdadd8b987..8743507943 100644
--- a/ecc/bls24-315/internal/fptower/e2_test.go
+++ b/ecc/bls24-315/internal/fptower/e2_test.go
@@ -177,12 +177,6 @@ func TestE2ReceiverIsOperand(t *testing.T) {
properties.TestingRun(t, gopter.ConsoleReporter(false))
- if supportAdx {
- t.Log("disabling ADX")
- supportAdx = false
- properties.TestingRun(t, gopter.ConsoleReporter(false))
- supportAdx = true
- }
}
func TestE2MulMaxed(t *testing.T) {
@@ -386,12 +380,6 @@ func TestE2Ops(t *testing.T) {
properties.TestingRun(t, gopter.ConsoleReporter(false))
- if supportAdx {
- t.Log("disabling ADX")
- supportAdx = false
- properties.TestingRun(t, gopter.ConsoleReporter(false))
- supportAdx = true
- }
}
// ------------------------------------------------------------
diff --git a/ecc/bls24-315/internal/fptower/e4.go b/ecc/bls24-315/internal/fptower/e4.go
index 34fe0659c4..06b5374156 100644
--- a/ecc/bls24-315/internal/fptower/e4.go
+++ b/ecc/bls24-315/internal/fptower/e4.go
@@ -200,6 +200,8 @@ func (z *E4) Square(x *E4) *E4 {
}
// Inverse set z to the inverse of x in E4 and return z
+//
+// if x == 0, sets and returns z = x
func (z *E4) Inverse(x *E4) *E4 {
// Algorithm 23 from https://eprint.iacr.org/2010/354.pdf
@@ -322,6 +324,8 @@ func (z *E4) Sqrt(x *E4) *E4 {
// BatchInvertE4 returns a new slice with every element inverted.
// Uses Montgomery batch inversion trick
+//
+// if a[i] == 0, returns result[i] = a[i]
func BatchInvertE4(a []E4) []E4 {
res := make([]E4, len(a))
if len(a) == 0 {
diff --git a/ecc/bls24-315/multiexp.go b/ecc/bls24-315/multiexp.go
index f01ac08d33..0c3d0039b2 100644
--- a/ecc/bls24-315/multiexp.go
+++ b/ecc/bls24-315/multiexp.go
@@ -41,7 +41,7 @@ type selector struct {
// if the digit is larger than 2^{c-1}, then, we borrow 2^c from the next window and substract
// 2^{c} to the current digit, making it negative.
// negative digits can be processed in a later step as adding -G into the bucket instead of G
-// (computing -G is cheap, and this saves us half of the buckets in the MultiExp or BatchScalarMul)
+// (computing -G is cheap, and this saves us half of the buckets in the MultiExp or BatchScalarMultiplication)
// scalarsMont indicates wheter the provided scalars are in montgomery form
// returns smallValues, which represent the number of scalars which meets the following condition
// 0 < scalar < 2^c (in other words, scalars where only the c-least significant bits are non zero)
@@ -163,6 +163,8 @@ func partitionScalars(scalars []fr.Element, c uint64, scalarsMont bool, nbTasks
}
// MultiExp implements section 4 of https://eprint.iacr.org/2012/549.pdf
+//
+// This call return an error if len(scalars) != len(points) or if provided config is invalid.
func (p *G1Affine) MultiExp(points []G1Affine, scalars []fr.Element, config ecc.MultiExpConfig) (*G1Affine, error) {
var _p G1Jac
if _, err := _p.MultiExp(points, scalars, config); err != nil {
@@ -173,6 +175,8 @@ func (p *G1Affine) MultiExp(points []G1Affine, scalars []fr.Element, config ecc.
}
// MultiExp implements section 4 of https://eprint.iacr.org/2012/549.pdf
+//
+// This call return an error if len(scalars) != len(points) or if provided config is invalid.
func (p *G1Jac) MultiExp(points []G1Affine, scalars []fr.Element, config ecc.MultiExpConfig) (*G1Jac, error) {
// note:
// each of the msmCX method is the same, except for the c constant it declares
@@ -209,6 +213,8 @@ func (p *G1Jac) MultiExp(points []G1Affine, scalars []fr.Element, config ecc.Mul
// if nbTasks is not set, use all available CPUs
if config.NbTasks <= 0 {
config.NbTasks = runtime.NumCPU()
+ } else if config.NbTasks > 1024 {
+ return nil, errors.New("invalid config: config.NbTasks > 1024")
}
// here, we compute the best C for nbPoints
@@ -333,9 +339,6 @@ func msmInnerG1Jac(p *G1Jac, c int, points []G1Affine, scalars []fr.Element, spl
case 21:
p.msmC21(points, scalars, splitFirstChunk)
- case 22:
- p.msmC22(points, scalars, splitFirstChunk)
-
default:
panic("not implemented")
}
@@ -1180,59 +1183,9 @@ func (p *G1Jac) msmC21(points []G1Affine, scalars []fr.Element, splitFirstChunk
return msmReduceChunkG1Affine(p, c, chChunks[:])
}
-func (p *G1Jac) msmC22(points []G1Affine, scalars []fr.Element, splitFirstChunk bool) *G1Jac {
- const (
- c = 22 // scalars partitioned into c-bit radixes
- nbChunks = (fr.Limbs * 64 / c) // number of c-bit radixes in a scalar
- )
-
- // for each chunk, spawn one go routine that'll loop through all the scalars in the
- // corresponding bit-window
- // note that buckets is an array allocated on the stack (for most sizes of c) and this is
- // critical for performance
-
- // each go routine sends its result in chChunks[i] channel
- var chChunks [nbChunks + 1]chan g1JacExtended
- for i := 0; i < len(chChunks); i++ {
- chChunks[i] = make(chan g1JacExtended, 1)
- }
-
- // c doesn't divide 256, last window is smaller we can allocate less buckets
- const lastC = (fr.Limbs * 64) - (c * (fr.Limbs * 64 / c))
- go func(j uint64, points []G1Affine, scalars []fr.Element) {
- var buckets [1 << (lastC - 1)]g1JacExtended
- msmProcessChunkG1Affine(j, chChunks[j], buckets[:], c, points, scalars)
- }(uint64(nbChunks), points, scalars)
-
- processChunk := func(j int, points []G1Affine, scalars []fr.Element, chChunk chan g1JacExtended) {
- var buckets [1 << (c - 1)]g1JacExtended
- msmProcessChunkG1Affine(uint64(j), chChunk, buckets[:], c, points, scalars)
- }
-
- for j := int(nbChunks - 1); j > 0; j-- {
- go processChunk(j, points, scalars, chChunks[j])
- }
-
- if !splitFirstChunk {
- go processChunk(0, points, scalars, chChunks[0])
- } else {
- chSplit := make(chan g1JacExtended, 2)
- split := len(points) / 2
- go processChunk(0, points[:split], scalars[:split], chSplit)
- go processChunk(0, points[split:], scalars[split:], chSplit)
- go func() {
- s1 := <-chSplit
- s2 := <-chSplit
- close(chSplit)
- s1.add(&s2)
- chChunks[0] <- s1
- }()
- }
-
- return msmReduceChunkG1Affine(p, c, chChunks[:])
-}
-
// MultiExp implements section 4 of https://eprint.iacr.org/2012/549.pdf
+//
+// This call return an error if len(scalars) != len(points) or if provided config is invalid.
func (p *G2Affine) MultiExp(points []G2Affine, scalars []fr.Element, config ecc.MultiExpConfig) (*G2Affine, error) {
var _p G2Jac
if _, err := _p.MultiExp(points, scalars, config); err != nil {
@@ -1243,6 +1196,8 @@ func (p *G2Affine) MultiExp(points []G2Affine, scalars []fr.Element, config ecc.
}
// MultiExp implements section 4 of https://eprint.iacr.org/2012/549.pdf
+//
+// This call return an error if len(scalars) != len(points) or if provided config is invalid.
func (p *G2Jac) MultiExp(points []G2Affine, scalars []fr.Element, config ecc.MultiExpConfig) (*G2Jac, error) {
// note:
// each of the msmCX method is the same, except for the c constant it declares
@@ -1279,13 +1234,15 @@ func (p *G2Jac) MultiExp(points []G2Affine, scalars []fr.Element, config ecc.Mul
// if nbTasks is not set, use all available CPUs
if config.NbTasks <= 0 {
config.NbTasks = runtime.NumCPU()
+ } else if config.NbTasks > 1024 {
+ return nil, errors.New("invalid config: config.NbTasks > 1024")
}
// here, we compute the best C for nbPoints
// we split recursively until nbChunks(c) >= nbTasks,
bestC := func(nbPoints int) uint64 {
// implemented msmC methods (the c we use must be in this slice)
- implementedCs := []uint64{4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 20, 21, 22}
+ implementedCs := []uint64{4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 20, 21}
var C uint64
// approximate cost (in group operations)
// cost = bits/c * (nbPoints + 2^{c})
@@ -1403,9 +1360,6 @@ func msmInnerG2Jac(p *G2Jac, c int, points []G2Affine, scalars []fr.Element, spl
case 21:
p.msmC21(points, scalars, splitFirstChunk)
- case 22:
- p.msmC22(points, scalars, splitFirstChunk)
-
default:
panic("not implemented")
}
@@ -2249,55 +2203,3 @@ func (p *G2Jac) msmC21(points []G2Affine, scalars []fr.Element, splitFirstChunk
return msmReduceChunkG2Affine(p, c, chChunks[:])
}
-
-func (p *G2Jac) msmC22(points []G2Affine, scalars []fr.Element, splitFirstChunk bool) *G2Jac {
- const (
- c = 22 // scalars partitioned into c-bit radixes
- nbChunks = (fr.Limbs * 64 / c) // number of c-bit radixes in a scalar
- )
-
- // for each chunk, spawn one go routine that'll loop through all the scalars in the
- // corresponding bit-window
- // note that buckets is an array allocated on the stack (for most sizes of c) and this is
- // critical for performance
-
- // each go routine sends its result in chChunks[i] channel
- var chChunks [nbChunks + 1]chan g2JacExtended
- for i := 0; i < len(chChunks); i++ {
- chChunks[i] = make(chan g2JacExtended, 1)
- }
-
- // c doesn't divide 256, last window is smaller we can allocate less buckets
- const lastC = (fr.Limbs * 64) - (c * (fr.Limbs * 64 / c))
- go func(j uint64, points []G2Affine, scalars []fr.Element) {
- var buckets [1 << (lastC - 1)]g2JacExtended
- msmProcessChunkG2Affine(j, chChunks[j], buckets[:], c, points, scalars)
- }(uint64(nbChunks), points, scalars)
-
- processChunk := func(j int, points []G2Affine, scalars []fr.Element, chChunk chan g2JacExtended) {
- var buckets [1 << (c - 1)]g2JacExtended
- msmProcessChunkG2Affine(uint64(j), chChunk, buckets[:], c, points, scalars)
- }
-
- for j := int(nbChunks - 1); j > 0; j-- {
- go processChunk(j, points, scalars, chChunks[j])
- }
-
- if !splitFirstChunk {
- go processChunk(0, points, scalars, chChunks[0])
- } else {
- chSplit := make(chan g2JacExtended, 2)
- split := len(points) / 2
- go processChunk(0, points[:split], scalars[:split], chSplit)
- go processChunk(0, points[split:], scalars[split:], chSplit)
- go func() {
- s1 := <-chSplit
- s2 := <-chSplit
- close(chSplit)
- s1.add(&s2)
- chChunks[0] <- s1
- }()
- }
-
- return msmReduceChunkG2Affine(p, c, chChunks[:])
-}
diff --git a/ecc/bls24-315/multiexp_test.go b/ecc/bls24-315/multiexp_test.go
index 45e014ede6..713bedc007 100644
--- a/ecc/bls24-315/multiexp_test.go
+++ b/ecc/bls24-315/multiexp_test.go
@@ -92,7 +92,14 @@ func TestMultiExpG1(t *testing.T) {
genScalar,
))
- properties.Property("[G1] Multi exponentation (c=5, c=16) should be consistent with sum of square", prop.ForAll(
+ // cRange is generated from template and contains the available parameters for the multiexp window size
+ cRange := []uint64{4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 20, 21}
+ if testing.Short() {
+ // test only "odd" and "even" (ie windows size divide word size vs not)
+ cRange = []uint64{5, 16}
+ }
+
+ properties.Property(fmt.Sprintf("[G1] Multi exponentation (c in %v) should be consistent with sum of square", cRange), prop.ForAll(
func(mixer fr.Element) bool {
var expected G1Jac
@@ -111,13 +118,21 @@ func TestMultiExpG1(t *testing.T) {
FromMont()
}
- scalars5, _ := partitionScalars(sampleScalars[:], 5, false, runtime.NumCPU())
- scalars16, _ := partitionScalars(sampleScalars[:], 16, false, runtime.NumCPU())
-
- var r5, r16 G1Jac
- r5.msmC5(samplePoints[:], scalars5, false)
- r16.msmC16(samplePoints[:], scalars16, true)
- return (r5.Equal(&expected) && r16.Equal(&expected))
+ results := make([]G1Jac, len(cRange)+1)
+ for i, c := range cRange {
+ scalars, _ := partitionScalars(sampleScalars[:], c, false, runtime.NumCPU())
+ msmInnerG1Jac(&results[i], int(c), samplePoints[:], scalars, false)
+ if c == 16 {
+ // split the first chunk
+ msmInnerG1Jac(&results[len(results)-1], 16, samplePoints[:], scalars, true)
+ }
+ }
+ for i := 1; i < len(results); i++ {
+ if !results[i].Equal(&results[i-1]) {
+ return false
+ }
+ }
+ return true
},
genScalar,
))
@@ -148,7 +163,7 @@ func TestMultiExpG1(t *testing.T) {
var finalBigScalar fr.Element
var finalBigScalarBi big.Int
var op1ScalarMul G1Affine
- finalBigScalar.SetString("9455").Mul(&finalBigScalar, &mixer)
+ finalBigScalar.SetUint64(9455).Mul(&finalBigScalar, &mixer)
finalBigScalar.ToBigIntRegular(&finalBigScalarBi)
op1ScalarMul.ScalarMultiplication(&g1GenAff, &finalBigScalarBi)
@@ -322,7 +337,12 @@ func TestMultiExpG2(t *testing.T) {
genScalar,
))
- properties.Property("[G2] Multi exponentation (c=5, c=16) should be consistent with sum of square", prop.ForAll(
+ // cRange is generated from template and contains the available parameters for the multiexp window size
+ // for g2, CI suffers with large c size since it needs to allocate a lot of memory for the buckets.
+ // test only "odd" and "even" (ie windows size divide word size vs not)
+ cRange := []uint64{5, 16}
+
+ properties.Property(fmt.Sprintf("[G2] Multi exponentation (c in %v) should be consistent with sum of square", cRange), prop.ForAll(
func(mixer fr.Element) bool {
var expected G2Jac
@@ -341,13 +361,21 @@ func TestMultiExpG2(t *testing.T) {
FromMont()
}
- scalars5, _ := partitionScalars(sampleScalars[:], 5, false, runtime.NumCPU())
- scalars16, _ := partitionScalars(sampleScalars[:], 16, false, runtime.NumCPU())
-
- var r5, r16 G2Jac
- r5.msmC5(samplePoints[:], scalars5, false)
- r16.msmC16(samplePoints[:], scalars16, true)
- return (r5.Equal(&expected) && r16.Equal(&expected))
+ results := make([]G2Jac, len(cRange)+1)
+ for i, c := range cRange {
+ scalars, _ := partitionScalars(sampleScalars[:], c, false, runtime.NumCPU())
+ msmInnerG2Jac(&results[i], int(c), samplePoints[:], scalars, false)
+ if c == 16 {
+ // split the first chunk
+ msmInnerG2Jac(&results[len(results)-1], 16, samplePoints[:], scalars, true)
+ }
+ }
+ for i := 1; i < len(results); i++ {
+ if !results[i].Equal(&results[i-1]) {
+ return false
+ }
+ }
+ return true
},
genScalar,
))
@@ -378,7 +406,7 @@ func TestMultiExpG2(t *testing.T) {
var finalBigScalar fr.Element
var finalBigScalarBi big.Int
var op1ScalarMul G2Affine
- finalBigScalar.SetString("9455").Mul(&finalBigScalar, &mixer)
+ finalBigScalar.SetUint64(9455).Mul(&finalBigScalar, &mixer)
finalBigScalar.ToBigIntRegular(&finalBigScalarBi)
op1ScalarMul.ScalarMultiplication(&g2GenAff, &finalBigScalarBi)
diff --git a/ecc/bls24-315/pairing.go b/ecc/bls24-315/pairing.go
index 0b47b90004..5e1bf1fe3c 100644
--- a/ecc/bls24-315/pairing.go
+++ b/ecc/bls24-315/pairing.go
@@ -30,7 +30,9 @@ type lineEvaluation struct {
}
// Pair calculates the reduced pairing for a set of points
-// ∏ᵢ e(Pᵢ, Qᵢ)
+// ∏ᵢ e(Pᵢ, Qᵢ).
+//
+// This function doesn't check that the inputs are in the correct subgroup. See IsInSubGroup.
func Pair(P []G1Affine, Q []G2Affine) (GT, error) {
f, err := MillerLoop(P, Q)
if err != nil {
@@ -41,6 +43,8 @@ func Pair(P []G1Affine, Q []G2Affine) (GT, error) {
// PairingCheck calculates the reduced pairing for a set of points and returns True if the result is One
// ∏ᵢ e(Pᵢ, Qᵢ) =? 1
+//
+// This function doesn't check that the inputs are in the correct subgroup. See IsInSubGroup.
func PairingCheck(P []G1Affine, Q []G2Affine) (bool, error) {
f, err := Pair(P, Q)
if err != nil {
diff --git a/ecc/bls24-315/twistededwards/eddsa/eddsa.go b/ecc/bls24-315/twistededwards/eddsa/eddsa.go
index 24c0c9bb8a..144e3e38b1 100644
--- a/ecc/bls24-315/twistededwards/eddsa/eddsa.go
+++ b/ecc/bls24-315/twistededwards/eddsa/eddsa.go
@@ -89,7 +89,7 @@ func GenerateKey(r io.Reader) (*PrivateKey, error) {
var bScalar big.Int
bScalar.SetBytes(priv.scalar[:])
- pub.A.ScalarMul(&c.Base, &bScalar)
+ pub.A.ScalarMultiplication(&c.Base, &bScalar)
priv.PublicKey = pub
@@ -137,7 +137,7 @@ func (privKey *PrivateKey) Sign(message []byte, hFunc hash.Hash) ([]byte, error)
blindingFactorBigInt.SetBytes(blindingFactorBytes[:sizeFr])
// compute R = randScalar*Base
- res.R.ScalarMul(&curveParams.Base, &blindingFactorBigInt)
+ res.R.ScalarMultiplication(&curveParams.Base, &blindingFactorBigInt)
if !res.R.IsOnCurve() {
return nil, errNotOnCurve
}
@@ -223,8 +223,8 @@ func (pub *PublicKey) Verify(sigBin, message []byte, hFunc hash.Hash) (bool, err
var bCofactor, bs big.Int
curveParams.Cofactor.ToBigIntRegular(&bCofactor)
bs.SetBytes(sig.S[:])
- lhs.ScalarMul(&curveParams.Base, &bs).
- ScalarMul(&lhs, &bCofactor)
+ lhs.ScalarMultiplication(&curveParams.Base, &bs).
+ ScalarMultiplication(&lhs, &bCofactor)
if !lhs.IsOnCurve() {
return false, errNotOnCurve
@@ -232,9 +232,9 @@ func (pub *PublicKey) Verify(sigBin, message []byte, hFunc hash.Hash) (bool, err
// rhs = cofactor*(R + H(R,A,M)*A)
var rhs twistededwards.PointAffine
- rhs.ScalarMul(&pub.A, &hramInt).
+ rhs.ScalarMultiplication(&pub.A, &hramInt).
Add(&rhs, &sig.R).
- ScalarMul(&rhs, &bCofactor)
+ ScalarMultiplication(&rhs, &bCofactor)
if !rhs.IsOnCurve() {
return false, errNotOnCurve
}
diff --git a/ecc/bls24-315/twistededwards/point.go b/ecc/bls24-315/twistededwards/point.go
index d9fe9add7b..f717b90165 100644
--- a/ecc/bls24-315/twistededwards/point.go
+++ b/ecc/bls24-315/twistededwards/point.go
@@ -256,13 +256,13 @@ func (p *PointAffine) FromExtended(p1 *PointExtended) *PointAffine {
return p
}
-// ScalarMul scalar multiplication of a point
+// ScalarMultiplication scalar multiplication of a point
// p1 in affine coordinates with a scalar in big.Int
-func (p *PointAffine) ScalarMul(p1 *PointAffine, scalar *big.Int) *PointAffine {
+func (p *PointAffine) ScalarMultiplication(p1 *PointAffine, scalar *big.Int) *PointAffine {
var p1Extended, resExtended PointExtended
p1Extended.FromAffine(p1)
- resExtended.ScalarMul(&p1Extended, scalar)
+ resExtended.ScalarMultiplication(&p1Extended, scalar)
p.FromExtended(&resExtended)
return p
@@ -409,9 +409,9 @@ func (p *PointProj) Add(p1, p2 *PointProj) *PointProj {
return p
}
-// ScalarMul scalar multiplication of a point
+// ScalarMultiplication scalar multiplication of a point
// p1 in projective coordinates with a scalar in big.Int
-func (p *PointProj) ScalarMul(p1 *PointProj, scalar *big.Int) *PointProj {
+func (p *PointProj) ScalarMultiplication(p1 *PointProj, scalar *big.Int) *PointProj {
var _scalar big.Int
_scalar.Set(scalar)
p.Set(p1)
@@ -622,9 +622,9 @@ func (p *PointExtended) setInfinity() *PointExtended {
return p
}
-// ScalarMul scalar multiplication of a point
+// ScalarMultiplication scalar multiplication of a point
// p1 in extended coordinates with a scalar in big.Int
-func (p *PointExtended) ScalarMul(p1 *PointExtended, scalar *big.Int) *PointExtended {
+func (p *PointExtended) ScalarMultiplication(p1 *PointExtended, scalar *big.Int) *PointExtended {
var _scalar big.Int
_scalar.Set(scalar)
p.Set(p1)
diff --git a/ecc/bls24-315/twistededwards/point_test.go b/ecc/bls24-315/twistededwards/point_test.go
index cca492a4da..6c96a37f9f 100644
--- a/ecc/bls24-315/twistededwards/point_test.go
+++ b/ecc/bls24-315/twistededwards/point_test.go
@@ -124,8 +124,8 @@ func TestReceiverIsOperand(t *testing.T) {
var s big.Int
s.SetUint64(10)
- p2.ScalarMul(&p1, &s)
- p1.ScalarMul(&p1, &s)
+ p2.ScalarMultiplication(&p1, &s)
+ p1.ScalarMultiplication(&p1, &s)
return p2.Equal(&p1)
},
@@ -336,7 +336,7 @@ func TestOps(t *testing.T) {
params := GetEdwardsCurve()
var p1, p2, zero PointAffine
- p1.ScalarMul(¶ms.Base, &s1)
+ p1.ScalarMultiplication(¶ms.Base, &s1)
zero.setInfinity()
p2.Add(&p1, &zero)
@@ -352,7 +352,7 @@ func TestOps(t *testing.T) {
params := GetEdwardsCurve()
var p1, p2 PointAffine
- p1.ScalarMul(¶ms.Base, &s1)
+ p1.ScalarMultiplication(¶ms.Base, &s1)
p2.Neg(&p1)
p1.Add(&p1, &p2)
@@ -371,8 +371,8 @@ func TestOps(t *testing.T) {
params := GetEdwardsCurve()
var p1, p2, inf PointAffine
- p1.ScalarMul(¶ms.Base, &s)
- p2.ScalarMul(¶ms.Base, &s)
+ p1.ScalarMultiplication(¶ms.Base, &s)
+ p2.ScalarMultiplication(¶ms.Base, &s)
p1.Add(&p1, &p2)
p2.Double(&p2)
@@ -390,14 +390,14 @@ func TestOps(t *testing.T) {
var p1, p2, p3, inf PointAffine
inf.X.SetZero()
inf.Y.SetZero()
- p1.ScalarMul(¶ms.Base, &s1)
- p2.ScalarMul(¶ms.Base, &s2)
+ p1.ScalarMultiplication(¶ms.Base, &s1)
+ p2.ScalarMultiplication(¶ms.Base, &s2)
p3.Set(¶ms.Base)
p2.Add(&p1, &p2)
s1.Add(&s1, &s2)
- p3.ScalarMul(¶ms.Base, &s1)
+ p3.ScalarMultiplication(¶ms.Base, &s1)
return p2.IsOnCurve() && p3.Equal(&p2) && !p3.Equal(&inf)
},
@@ -413,9 +413,9 @@ func TestOps(t *testing.T) {
var p1, p2, inf PointAffine
inf.X.SetZero()
inf.Y.SetOne()
- p1.ScalarMul(¶ms.Base, &s1)
+ p1.ScalarMultiplication(¶ms.Base, &s1)
s1.Neg(&s1)
- p2.ScalarMul(¶ms.Base, &s1)
+ p2.ScalarMultiplication(¶ms.Base, &s1)
p2.Add(&p1, &p2)
@@ -430,11 +430,11 @@ func TestOps(t *testing.T) {
params := GetEdwardsCurve()
var p1, p2 PointAffine
- p1.ScalarMul(¶ms.Base, &s1)
+ p1.ScalarMultiplication(¶ms.Base, &s1)
five := big.NewInt(5)
p2.Double(&p1).Double(&p2).Add(&p2, &p1)
- p1.ScalarMul(&p1, five)
+ p1.ScalarMultiplication(&p1, five)
return p2.IsOnCurve() && p2.Equal(&p1)
},
@@ -463,7 +463,7 @@ func TestOps(t *testing.T) {
var baseProj, p1, p2, zero PointProj
baseProj.FromAffine(¶ms.Base)
- p1.ScalarMul(&baseProj, &s1)
+ p1.ScalarMultiplication(&baseProj, &s1)
zero.setInfinity()
p2.Add(&p1, &zero)
@@ -480,7 +480,7 @@ func TestOps(t *testing.T) {
var baseProj, p1, p2, p PointProj
baseProj.FromAffine(¶ms.Base)
- p1.ScalarMul(&baseProj, &s1)
+ p1.ScalarMultiplication(&baseProj, &s1)
p2.Neg(&p1)
p.Add(&p1, &p2)
@@ -498,7 +498,7 @@ func TestOps(t *testing.T) {
var baseProj, p1, p2, p PointProj
baseProj.FromAffine(¶ms.Base)
- p.ScalarMul(&baseProj, &s)
+ p.ScalarMultiplication(&baseProj, &s)
p1.Add(&p, &p)
p2.Double(&p)
@@ -515,11 +515,11 @@ func TestOps(t *testing.T) {
var baseProj, p1, p2 PointProj
baseProj.FromAffine(¶ms.Base)
- p1.ScalarMul(&baseProj, &s1)
+ p1.ScalarMultiplication(&baseProj, &s1)
five := big.NewInt(5)
p2.Double(&p1).Double(&p2).Add(&p2, &p1)
- p1.ScalarMul(&p1, five)
+ p1.ScalarMultiplication(&p1, five)
return p2.Equal(&p1)
},
@@ -547,7 +547,7 @@ func TestOps(t *testing.T) {
var baseExtended, p1, p2, zero PointExtended
baseExtended.FromAffine(¶ms.Base)
- p1.ScalarMul(&baseExtended, &s1)
+ p1.ScalarMultiplication(&baseExtended, &s1)
zero.setInfinity()
p2.Add(&p1, &zero)
@@ -564,7 +564,7 @@ func TestOps(t *testing.T) {
var baseExtended, p1, p2, p PointExtended
baseExtended.FromAffine(¶ms.Base)
- p1.ScalarMul(&baseExtended, &s1)
+ p1.ScalarMultiplication(&baseExtended, &s1)
p2.Neg(&p1)
p.Add(&p1, &p2)
@@ -582,7 +582,7 @@ func TestOps(t *testing.T) {
var baseExtended, p1, p2, p PointExtended
baseExtended.FromAffine(¶ms.Base)
- p.ScalarMul(&baseExtended, &s)
+ p.ScalarMultiplication(&baseExtended, &s)
p1.Add(&p, &p)
p2.Double(&p)
@@ -599,11 +599,11 @@ func TestOps(t *testing.T) {
var baseExtended, p1, p2 PointExtended
baseExtended.FromAffine(¶ms.Base)
- p1.ScalarMul(&baseExtended, &s1)
+ p1.ScalarMultiplication(&baseExtended, &s1)
five := big.NewInt(5)
p2.Double(&p1).Double(&p2).Add(&p2, &p1)
- p1.ScalarMul(&p1, five)
+ p1.ScalarMultiplication(&p1, five)
return p2.Equal(&p1)
},
@@ -619,8 +619,8 @@ func TestOps(t *testing.T) {
var baseExtended, pExtended, p PointExtended
var pAffine PointAffine
baseExtended.FromAffine(¶ms.Base)
- pExtended.ScalarMul(&baseExtended, &s)
- pAffine.ScalarMul(¶ms.Base, &s)
+ pExtended.ScalarMultiplication(&baseExtended, &s)
+ pAffine.ScalarMultiplication(¶ms.Base, &s)
pAffine.Neg(&pAffine)
p.MixedAdd(&pExtended, &pAffine)
@@ -638,8 +638,8 @@ func TestOps(t *testing.T) {
var baseExtended, pExtended, p, p2 PointExtended
var pAffine PointAffine
baseExtended.FromAffine(¶ms.Base)
- pExtended.ScalarMul(&baseExtended, &s)
- pAffine.ScalarMul(¶ms.Base, &s)
+ pExtended.ScalarMultiplication(&baseExtended, &s)
+ pAffine.ScalarMultiplication(¶ms.Base, &s)
p.MixedAdd(&pExtended, &pAffine)
p2.MixedDouble(&pExtended)
@@ -658,8 +658,8 @@ func TestOps(t *testing.T) {
var baseProj, pProj, p PointProj
var pAffine PointAffine
baseProj.FromAffine(¶ms.Base)
- pProj.ScalarMul(&baseProj, &s)
- pAffine.ScalarMul(¶ms.Base, &s)
+ pProj.ScalarMultiplication(&baseProj, &s)
+ pAffine.ScalarMultiplication(¶ms.Base, &s)
pAffine.Neg(&pAffine)
p.MixedAdd(&pProj, &pAffine)
@@ -677,8 +677,8 @@ func TestOps(t *testing.T) {
var baseProj, pProj, p, p2 PointProj
var pAffine PointAffine
baseProj.FromAffine(¶ms.Base)
- pProj.ScalarMul(&baseProj, &s)
- pAffine.ScalarMul(¶ms.Base, &s)
+ pProj.ScalarMultiplication(&baseProj, &s)
+ pAffine.ScalarMultiplication(¶ms.Base, &s)
p.MixedAdd(&pProj, &pAffine)
p2.Double(&pProj)
@@ -697,9 +697,9 @@ func TestOps(t *testing.T) {
var baseExt PointExtended
var p1, p2 PointAffine
baseProj.FromAffine(¶ms.Base)
- baseProj.ScalarMul(&baseProj, &s)
+ baseProj.ScalarMultiplication(&baseProj, &s)
baseExt.FromAffine(¶ms.Base)
- baseExt.ScalarMul(&baseExt, &s)
+ baseExt.ScalarMultiplication(&baseExt, &s)
p1.FromProj(&baseProj)
p2.FromExtended(&baseExt)
@@ -760,7 +760,7 @@ func BenchmarkScalarMulExtended(b *testing.B) {
b.ResetTimer()
for j := 0; j < b.N; j++ {
- doubleAndAdd.ScalarMul(&a, &s)
+ doubleAndAdd.ScalarMultiplication(&a, &s)
}
}
@@ -776,6 +776,6 @@ func BenchmarkScalarMulProjective(b *testing.B) {
b.ResetTimer()
for j := 0; j < b.N; j++ {
- doubleAndAdd.ScalarMul(&a, &s)
+ doubleAndAdd.ScalarMultiplication(&a, &s)
}
}
diff --git a/ecc/bls24-317/bls24-317.go b/ecc/bls24-317/bls24-317.go
index 5e2aa6c33f..ee1cb5325c 100644
--- a/ecc/bls24-317/bls24-317.go
+++ b/ecc/bls24-317/bls24-317.go
@@ -1,3 +1,26 @@
+// Package bls24317 efficient elliptic curve, pairing and hash to curve implementation for bls24-317.
+//
+// bls24-317: A Barreto--Lynn--Scott curve
+// embedding degree k=24
+// seed x₀=3640754176
+// 𝔽r: r=30869589236456844204538189757527902584594726589286811523515204428962673459201 (x₀^8-x₀^4+2)
+// 𝔽p: p=136393071104295911515099765908274057061945112121419593977210139303905973197232025618026156731051 ((x₀-1)² ⋅ r(x₀)/3+x₀)
+// (E/𝔽p): Y²=X³+4
+// (Eₜ/𝔽p⁴): Y² = X³+4v (M-type twist)
+// r ∣ #E(Fp) and r ∣ #Eₜ(𝔽p⁴)
+// Extension fields tower:
+// 𝔽p²[u] = 𝔽p/u²+1
+// 𝔽p⁴[v] = 𝔽p²/v²-u-1
+// 𝔽p¹²[w] = 𝔽p⁴/w³-v
+// 𝔽p²⁴[i] = 𝔽p¹²/i²-w
+// optimal Ate loop size:
+// x₀
+// Security: estimated 160-bit level following [https://eprint.iacr.org/2019/885.pdf]
+// (r is 255 bits and p²⁴ is 7599 bits)
+//
+// Warning
+//
+// This code has not been audited and is provided as-is. In particular, there is no security guarantees such as constant time implementation or side-channel attack resistance.
package bls24317
import (
@@ -9,19 +32,6 @@ import (
"github.com/consensys/gnark-crypto/ecc/bls24-317/internal/fptower"
)
-// BLS24-317: A Barreto--Lynn--Scott curve of embedding degree k=24 with seed x₀=3640754176
-// 𝔽r: r=30869589236456844204538189757527902584594726589286811523515204428962673459201 (x₀^8-x₀^4+2)
-// 𝔽p: p=136393071104295911515099765908274057061945112121419593977210139303905973197232025618026156731051 ((x₀-1)² ⋅ r(x₀)/3+x₀)
-// (E/𝔽p): Y²=X³+4
-// (Eₜ/𝔽p⁴): Y² = X³+4v (M-type twist)
-// r ∣ #E(Fp) and r ∣ #Eₜ(𝔽p⁴)
-// Extension fields tower:
-// 𝔽p²[u] = 𝔽p/u²+1
-// 𝔽p⁴[v] = 𝔽p²/v²-u-1
-// 𝔽p¹²[w] = 𝔽p⁴/w³-v
-// 𝔽p²⁴[i] = 𝔽p¹²/i²-w
-// optimal Ate loop size: x₀
-
// ID bls317 ID
const ID = ecc.BLS24_317
@@ -80,7 +90,7 @@ func init() {
// E(1,y)*c
g1Gen.X.SetString("26261810162995192444253184251590159762050205376519976412461726336843100448942248976252388876791")
g1Gen.Y.SetString("26146603602820658047261036676090398397874822703333117264049387703172159980214065566219085800243")
- g1Gen.Z.SetString("1")
+ g1Gen.Z.SetOne()
// E'(1,y)*c'
g2Gen.X.B0.SetString("28498404142312365002533744693556861244212064443103687717510540998257508853975496760832205123607",
diff --git a/ecc/bls24-317/doc.go b/ecc/bls24-317/doc.go
deleted file mode 100644
index 0b3eb217cc..0000000000
--- a/ecc/bls24-317/doc.go
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright 2020 ConsenSys Software Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Code generated by consensys/gnark-crypto DO NOT EDIT
-
-// Package bls24317 efficient elliptic curve and pairing implementation for bls24-317.
-//
-// Warning
-//
-// This code has not been audited and is provided as-is. In particular, there is no security guarantees such as constant time implementation or side-channel attack resistance.
-package bls24317
diff --git a/ecc/bls24-317/fp/element.go b/ecc/bls24-317/fp/element.go
index af48a661c0..a71f578b92 100644
--- a/ecc/bls24-317/fp/element.go
+++ b/ecc/bls24-317/fp/element.go
@@ -178,7 +178,7 @@ func (z *Element) SetInterface(i1 interface{}) (*Element, error) {
case int:
return z.SetInt64(int64(c1)), nil
case string:
- return z.SetString(c1), nil
+ return z.SetString(c1)
case *big.Int:
if c1 == nil {
return nil, errors.New("can't set fp.Element with ")
@@ -1012,12 +1012,13 @@ func (z *Element) setBigInt(v *big.Int) *Element {
// Incorrect placement of underscores is reported as a panic if there
// are no other errors.
//
-func (z *Element) SetString(number string) *Element {
+// If the number is invalid this method leaves z unchanged and returns nil, error.
+func (z *Element) SetString(number string) (*Element, error) {
// get temporary big int from the pool
vv := bigIntPool.Get().(*big.Int)
if _, ok := vv.SetString(number, 0); !ok {
- panic("Element.SetString failed -> can't parse number into a big.Int " + number)
+ return nil, errors.New("Element.SetString failed -> can't parse number into a big.Int " + number)
}
z.SetBigInt(vv)
@@ -1025,7 +1026,7 @@ func (z *Element) SetString(number string) *Element {
// release object into pool
bigIntPool.Put(vv)
- return z
+ return z, nil
}
// MarshalJSON returns json encoding of z (z.Text(10))
diff --git a/ecc/bls24-317/fr/element.go b/ecc/bls24-317/fr/element.go
index 5e22f181ff..3200e0e546 100644
--- a/ecc/bls24-317/fr/element.go
+++ b/ecc/bls24-317/fr/element.go
@@ -175,7 +175,7 @@ func (z *Element) SetInterface(i1 interface{}) (*Element, error) {
case int:
return z.SetInt64(int64(c1)), nil
case string:
- return z.SetString(c1), nil
+ return z.SetString(c1)
case *big.Int:
if c1 == nil {
return nil, errors.New("can't set fr.Element with ")
@@ -944,12 +944,13 @@ func (z *Element) setBigInt(v *big.Int) *Element {
// Incorrect placement of underscores is reported as a panic if there
// are no other errors.
//
-func (z *Element) SetString(number string) *Element {
+// If the number is invalid this method leaves z unchanged and returns nil, error.
+func (z *Element) SetString(number string) (*Element, error) {
// get temporary big int from the pool
vv := bigIntPool.Get().(*big.Int)
if _, ok := vv.SetString(number, 0); !ok {
- panic("Element.SetString failed -> can't parse number into a big.Int " + number)
+ return nil, errors.New("Element.SetString failed -> can't parse number into a big.Int " + number)
}
z.SetBigInt(vv)
@@ -957,7 +958,7 @@ func (z *Element) SetString(number string) *Element {
// release object into pool
bigIntPool.Put(vv)
- return z
+ return z, nil
}
// MarshalJSON returns json encoding of z (z.Text(10))
diff --git a/ecc/bls24-317/fr/kzg/kzg.go b/ecc/bls24-317/fr/kzg/kzg.go
index 69c2cd4111..38e7015e88 100644
--- a/ecc/bls24-317/fr/kzg/kzg.go
+++ b/ecc/bls24-317/fr/kzg/kzg.go
@@ -169,16 +169,15 @@ func Open(p []fr.Element, point fr.Element, srs *SRS) (OpeningProof, error) {
func Verify(commitment *Digest, proof *OpeningProof, point fr.Element, srs *SRS) error {
// [f(a)]G₁
- var claimedValueG1Aff bls24317.G1Affine
+ var claimedValueG1Aff bls24317.G1Jac
var claimedValueBigInt big.Int
proof.ClaimedValue.ToBigIntRegular(&claimedValueBigInt)
- claimedValueG1Aff.ScalarMultiplication(&srs.G1[0], &claimedValueBigInt)
+ claimedValueG1Aff.ScalarMultiplicationAffine(&srs.G1[0], &claimedValueBigInt)
// [f(α) - f(a)]G₁
- var fminusfaG1Jac, tmpG1Jac bls24317.G1Jac
+ var fminusfaG1Jac bls24317.G1Jac
fminusfaG1Jac.FromAffine(commitment)
- tmpG1Jac.FromAffine(&claimedValueG1Aff)
- fminusfaG1Jac.SubAssign(&tmpG1Jac)
+ fminusfaG1Jac.SubAssign(&claimedValueG1Aff)
// [-H(α)]G₁
var negH bls24317.G1Affine
diff --git a/ecc/bls24-317/fr/polynomial/multilin.go b/ecc/bls24-317/fr/polynomial/multilin.go
new file mode 100644
index 0000000000..891137feaa
--- /dev/null
+++ b/ecc/bls24-317/fr/polynomial/multilin.go
@@ -0,0 +1,250 @@
+// Copyright 2020 ConsenSys Software Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by consensys/gnark-crypto DO NOT EDIT
+
+package polynomial
+
+import (
+ "github.com/consensys/gnark-crypto/ecc/bls24-317/fr"
+)
+
+// MultiLin tracks the values of a (dense i.e. not sparse) multilinear polynomial
+// The variables are X₁ through Xₙ where n = log(len(.))
+// .[∑ᵢ 2ⁱ⁻¹ bₙ₋ᵢ] = the polynomial evaluated at (b₁, b₂, ..., bₙ)
+// It is understood that any hypercube evaluation can be extrapolated to a multilinear polynomial
+type MultiLin []fr.Element
+
+// Fold is partial evaluation function k[X₁, X₂, ..., Xₙ] → k[X₂, ..., Xₙ] by setting X₁=r
+func (m *MultiLin) Fold(r fr.Element) {
+ mid := len(*m) / 2
+
+ bottom, top := (*m)[:mid], (*m)[mid:]
+
+ // updating bookkeeping table
+ // knowing that the polynomial f ∈ (k[X₂, ..., Xₙ])[X₁] is linear, we would get f(r) = f(0) + r(f(1) - f(0))
+ // the following loop computes the evaluations of f(r) accordingly:
+ // f(r, b₂, ..., bₙ) = f(0, b₂, ..., bₙ) + r(f(1, b₂, ..., bₙ) - f(0, b₂, ..., bₙ))
+ for i := 0; i < mid; i++ {
+ // table[i] ← table[i] + r (table[i + mid] - table[i])
+ top[i].Sub(&top[i], &bottom[i])
+ top[i].Mul(&top[i], &r)
+ bottom[i].Add(&bottom[i], &top[i])
+ }
+
+ *m = (*m)[:mid]
+}
+
+// Evaluate extrapolate the value of the multilinear polynomial corresponding to m
+// on the given coordinates
+func (m MultiLin) Evaluate(coordinates []fr.Element) fr.Element {
+ // Folding is a mutating operation
+ bkCopy := m.Clone()
+
+ // Evaluate step by step through repeated folding (i.e. evaluation at the first remaining variable)
+ for _, r := range coordinates {
+ bkCopy.Fold(r)
+ }
+
+ return bkCopy[0]
+}
+
+// Clone creates a deep copy of a book-keeping table.
+// Both multilinear interpolation and sumcheck require folding an underlying
+// array, but folding changes the array. To do both one requires a deep copy
+// of the book-keeping table.
+func (m MultiLin) Clone() MultiLin {
+ tableDeepCopy := Make(len(m))
+ copy(tableDeepCopy, m)
+ return tableDeepCopy
+}
+
+// Add two bookKeepingTables
+func (m *MultiLin) Add(left, right MultiLin) {
+ size := len(left)
+ // Check that left and right have the same size
+ if len(right) != size {
+ panic("Left and right do not have the right size")
+ }
+ // Reallocate the table if necessary
+ if cap(*m) < size {
+ *m = make([]fr.Element, size)
+ }
+
+ // Resize the destination table
+ *m = (*m)[:size]
+
+ // Add elementwise
+ for i := 0; i < size; i++ {
+ (*m)[i].Add(&left[i], &right[i])
+ }
+}
+
+// EvalEq computes Eq(q₁, ... , qₙ, h₁, ... , hₙ) = Π₁ⁿ Eq(qᵢ, hᵢ)
+// where Eq(x,y) = xy + (1-x)(1-y) = 1 - x - y + xy + xy interpolates
+// _________________
+// | | |
+// | 0 | 1 |
+// |_______|_______|
+// y | | |
+// | 1 | 0 |
+// |_______|_______|
+//
+// x
+// In other words the polynomial evaluated here is the multilinear extrapolation of
+// one that evaluates to q' == h' for vectors q', h' of binary values
+func EvalEq(q, h []fr.Element) fr.Element {
+ var res, nxt, one, sum fr.Element
+ one.SetOne()
+ for i := 0; i < len(q); i++ {
+ nxt.Mul(&q[i], &h[i]) // nxt <- qᵢ * hᵢ
+ nxt.Double(&nxt) // nxt <- 2 * qᵢ * hᵢ
+ nxt.Add(&nxt, &one) // nxt <- 1 + 2 * qᵢ * hᵢ
+ sum.Add(&q[i], &h[i]) // sum <- qᵢ + hᵢ TODO: Why not subtract one by one from nxt? More parallel?
+
+ if i == 0 {
+ res.Sub(&nxt, &sum) // nxt <- 1 + 2 * qᵢ * hᵢ - qᵢ - hᵢ
+ } else {
+ nxt.Sub(&nxt, &sum) // nxt <- 1 + 2 * qᵢ * hᵢ - qᵢ - hᵢ
+ res.Mul(&res, &nxt) // res <- res * nxt
+ }
+ }
+ return res
+}
+
+// Eq sets m to the representation of the polynomial Eq(q₁, ..., qₙ, *, ..., *) × m[0]
+func (m *MultiLin) Eq(q []fr.Element) {
+ n := len(q)
+
+ if len(*m) != 1< 0 {
+ i.Sub(fr.Modulus(), &i)
+ i.Neg(&i)
+ }
+ return i
+}
+
+func (p Polynomial) Text(base int) string {
+
+ var builder strings.Builder
+
+ first := true
+ for d := len(p) - 1; d >= 0; d-- {
+ if p[d].IsZero() {
+ continue
+ }
+
+ i := signedBigInt(&p[d])
+
+ initialLen := builder.Len()
+
+ if i.Sign() < 1 {
+ i.Neg(&i)
+ if first {
+ builder.WriteString("-")
+ } else {
+ builder.WriteString(" - ")
+ }
+ } else if !first {
+ builder.WriteString(" + ")
+ }
+
+ first = false
+
+ asInt64 := int64(0)
+ if i.IsInt64() {
+ asInt64 = i.Int64()
+ }
+
+ if asInt64 != 1 || d == 0 {
+ builder.WriteString(i.Text(base))
+ }
+
+ if builder.Len()-initialLen > 10 {
+ builder.WriteString("×")
+ }
+
+ if d != 0 {
+ builder.WriteString("X")
+ }
+ if d > 1 {
+ builder.WriteString(
+ utils.ToSuperscript(strconv.Itoa(d)),
+ )
+ }
+
+ }
+
+ if first {
+ return "0"
+ }
+
+ return builder.String()
+}
diff --git a/ecc/bls24-317/fr/polynomial/pool.go b/ecc/bls24-317/fr/polynomial/pool.go
new file mode 100644
index 0000000000..8e260ebad5
--- /dev/null
+++ b/ecc/bls24-317/fr/polynomial/pool.go
@@ -0,0 +1,130 @@
+// Copyright 2020 ConsenSys Software Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by consensys/gnark-crypto DO NOT EDIT
+
+package polynomial
+
+import (
+ "fmt"
+ "github.com/consensys/gnark-crypto/ecc/bls24-317/fr"
+ "reflect"
+ "sync"
+ "unsafe"
+)
+
+// Memory management for polynomials
+// Copied verbatim from gkr repo
+
+// Sets a maximum for the array size we keep in pool
+const maxNForLargePool int = 1 << 24
+const maxNForSmallPool int = 256
+
+// Aliases because it is annoying to use arrays in all the places
+type largeArr = [maxNForLargePool]fr.Element
+type smallArr = [maxNForSmallPool]fr.Element
+
+var rC = sync.Map{}
+
+var (
+ largePool = sync.Pool{
+ New: func() interface{} {
+ var res largeArr
+ return &res
+ },
+ }
+ smallPool = sync.Pool{
+ New: func() interface{} {
+ var res smallArr
+ return &res
+ },
+ }
+)
+
+// ClearPool Clears the pool completely, shields against memory leaks
+// Eg: if we forgot to dump a polynomial at some point, this will ensure the value get dumped eventually
+// Returns how many polynomials were cleared that way
+func ClearPool() int {
+ res := 0
+ rC.Range(func(k, _ interface{}) bool {
+ switch ptr := k.(type) {
+ case *largeArr:
+ largePool.Put(ptr)
+ case *smallArr:
+ smallPool.Put(ptr)
+ default:
+ panic(fmt.Sprintf("tried to clear %v", reflect.TypeOf(ptr)))
+ }
+ res++
+ return true
+ })
+ return res
+}
+
+// CountPool Returns the number of elements in the pool without mutating it
+func CountPool() int {
+ res := 0
+ rC.Range(func(_, _ interface{}) bool {
+ res++
+ return true
+ })
+ return res
+}
+
+// Make tries to find a reusable polynomial or allocates a new one
+func Make(n int) []fr.Element {
+ if n > maxNForLargePool {
+ panic(fmt.Sprintf("been provided with size of %v but the maximum is %v", n, maxNForLargePool))
+ }
+
+ if n <= maxNForSmallPool {
+ ptr := smallPool.Get().(*smallArr)
+ rC.Store(ptr, struct{}{}) // registers the pointer being used
+ return (*ptr)[:n]
+ }
+
+ ptr := largePool.Get().(*largeArr)
+ rC.Store(ptr, struct{}{}) // remember we allocated the pointer is being used
+ return (*ptr)[:n]
+}
+
+// Dump dumps a set of polynomials into the pool
+// Returns the number of deallocated polys
+func Dump(arrs ...[]fr.Element) int {
+ cnt := 0
+ for _, arr := range arrs {
+ ptr := ptr(arr)
+ pool := &smallPool
+ if len(arr) > maxNForSmallPool {
+ pool = &largePool
+ }
+ // If the rC did not register, then
+ // either the array was allocated somewhere else which can be ignored
+ // otherwise a double put which MUST be ignored
+ if _, ok := rC.Load(ptr); ok {
+ pool.Put(ptr)
+ // And deregisters the ptr
+ rC.Delete(ptr)
+ cnt++
+ }
+ }
+ return cnt
+}
+
+func ptr(m []fr.Element) unsafe.Pointer {
+ if cap(m) != maxNForSmallPool && cap(m) != maxNForLargePool {
+ panic(fmt.Sprintf("can't cast to large or small array, the put array's is %v it should have capacity %v or %v", cap(m), maxNForLargePool, maxNForSmallPool))
+ }
+ return unsafe.Pointer(&m[0])
+}
diff --git a/ecc/bls24-317/g1.go b/ecc/bls24-317/g1.go
index 4b316955a2..24670352e6 100644
--- a/ecc/bls24-317/g1.go
+++ b/ecc/bls24-317/g1.go
@@ -59,6 +59,14 @@ func (p *G1Affine) ScalarMultiplication(a *G1Affine, s *big.Int) *G1Affine {
return p
}
+// ScalarMultiplicationAffine computes and returns p = a ⋅ s
+// Takes an affine point and returns a Jacobian point (useful for KZG)
+func (p *G1Jac) ScalarMultiplicationAffine(a *G1Affine, s *big.Int) *G1Jac {
+ p.FromAffine(a)
+ p.mulGLV(p, s)
+ return p
+}
+
// Add adds two point in affine coordinates.
// This should rarely be used as it is very inefficient compared to Jacobian
func (p *G1Affine) Add(a, b *G1Affine) *G1Affine {
@@ -336,7 +344,7 @@ func (p *G1Jac) String() string {
return _p.String()
}
-// FromAffine sets p = Q, p in Jacboian, Q in affine
+// FromAffine sets p = Q, p in Jacobian, Q in affine
func (p *G1Jac) FromAffine(Q *G1Affine) *G1Jac {
if Q.IsInfinity() {
p.Z.SetZero()
@@ -816,9 +824,9 @@ func (p *g1JacExtended) doubleMixed(q *G1Affine) *g1JacExtended {
}
// BatchJacobianToAffineG1 converts points in Jacobian coordinates to Affine coordinates
-// performing a single field inversion (Montgomery batch inversion trick)
-// result must be allocated with len(result) == len(points)
-func BatchJacobianToAffineG1(points []G1Jac, result []G1Affine) {
+// performing a single field inversion (Montgomery batch inversion trick).
+func BatchJacobianToAffineG1(points []G1Jac) []G1Affine {
+ result := make([]G1Affine, len(points))
zeroes := make([]bool, len(points))
accumulator := fp.One()
@@ -838,7 +846,7 @@ func BatchJacobianToAffineG1(points []G1Jac, result []G1Affine) {
for i := len(points) - 1; i >= 0; i-- {
if zeroes[i] {
- // do nothing, X and Y are zeroes in affine.
+ // do nothing, (X=0, Y=0) is infinity point in affine
continue
}
result[i].X.Mul(&result[i].X, &accInverse)
@@ -849,7 +857,7 @@ func BatchJacobianToAffineG1(points []G1Jac, result []G1Affine) {
parallel.Execute(len(points), func(start, end int) {
for i := start; i < end; i++ {
if zeroes[i] {
- // do nothing, X and Y are zeroes in affine.
+ // do nothing, (X=0, Y=0) is infinity point in affine
continue
}
var a, b fp.Element
@@ -861,6 +869,7 @@ func BatchJacobianToAffineG1(points []G1Jac, result []G1Affine) {
}
})
+ return result
}
// BatchScalarMultiplicationG1 multiplies the same base by all scalars
@@ -924,8 +933,7 @@ func BatchScalarMultiplicationG1(base *G1Affine, scalars []fr.Element) []G1Affin
selectors[chunk] = d
}
// convert our base exp table into affine to use AddMixed
- baseTableAff := make([]G1Affine, (1 << (c - 1)))
- BatchJacobianToAffineG1(baseTable, baseTableAff)
+ baseTableAff := BatchJacobianToAffineG1(baseTable)
toReturn := make([]G1Jac, len(scalars))
// for each digit, take value in the base table, double it c time, voilà.
@@ -967,7 +975,6 @@ func BatchScalarMultiplicationG1(base *G1Affine, scalars []fr.Element) []G1Affin
}
})
- toReturnAff := make([]G1Affine, len(scalars))
- BatchJacobianToAffineG1(toReturn, toReturnAff)
+ toReturnAff := BatchJacobianToAffineG1(toReturn)
return toReturnAff
}
diff --git a/ecc/bls24-317/g1_test.go b/ecc/bls24-317/g1_test.go
index 2f529cb419..2c08510b14 100644
--- a/ecc/bls24-317/g1_test.go
+++ b/ecc/bls24-317/g1_test.go
@@ -85,7 +85,7 @@ func TestG1AffineIsOnCurve(t *testing.T) {
func(a fp.Element) bool {
var op1, op2 G1Affine
op1.FromJacobian(&g1Gen)
- op2.FromJacobian(&g1Gen)
+ op2.Set(&op1)
op2.Y.Mul(&op2.Y, &a)
return op1.IsOnCurve() && !op2.IsOnCurve()
},
@@ -220,6 +220,19 @@ func TestG1AffineConversions(t *testing.T) {
GenFp(),
GenFp(),
))
+ properties.Property("[BLS24-317] BatchJacobianToAffineG1 and FromJacobian should output the same result", prop.ForAll(
+ func(a, b fp.Element) bool {
+ g1 := fuzzG1Jac(&g1Gen, a)
+ g2 := fuzzG1Jac(&g1Gen, b)
+ var op1, op2 G1Affine
+ op1.FromJacobian(&g1)
+ op2.FromJacobian(&g2)
+ baseTableAff := BatchJacobianToAffineG1([]G1Jac{g1, g2})
+ return op1.Equal(&baseTableAff[0]) && op2.Equal(&baseTableAff[1])
+ },
+ GenFp(),
+ GenFp(),
+ ))
properties.TestingRun(t, gopter.ConsoleReporter(false))
}
@@ -486,7 +499,7 @@ func BenchmarkG1JacIsInSubGroup(b *testing.B) {
}
-func BenchmarkG1AffineBatchScalarMul(b *testing.B) {
+func BenchmarkG1AffineBatchScalarMultiplication(b *testing.B) {
// ensure every words of the scalars are filled
var mixer fr.Element
mixer.SetString("7716837800905789770901243404444209691916730933998574719964609384059111546487")
@@ -514,7 +527,7 @@ func BenchmarkG1AffineBatchScalarMul(b *testing.B) {
}
}
-func BenchmarkG1JacScalarMul(b *testing.B) {
+func BenchmarkG1JacScalarMultiplication(b *testing.B) {
var scalar big.Int
r := fr.Modulus()
diff --git a/ecc/bls24-317/g2.go b/ecc/bls24-317/g2.go
index 683e112cbd..bf9e978856 100644
--- a/ecc/bls24-317/g2.go
+++ b/ecc/bls24-317/g2.go
@@ -341,7 +341,7 @@ func (p *G2Jac) String() string {
return _p.String()
}
-// FromAffine sets p = Q, p in Jacboian, Q in affine
+// FromAffine sets p = Q, p in Jacobian, Q in affine
func (p *G2Jac) FromAffine(Q *G2Affine) *G2Jac {
if Q.IsInfinity() {
p.Z.SetZero()
diff --git a/ecc/bls24-317/g2_test.go b/ecc/bls24-317/g2_test.go
index 0fb3b5af2c..376b469347 100644
--- a/ecc/bls24-317/g2_test.go
+++ b/ecc/bls24-317/g2_test.go
@@ -99,7 +99,7 @@ func TestG2AffineIsOnCurve(t *testing.T) {
func(a fptower.E4) bool {
var op1, op2 G2Affine
op1.FromJacobian(&g2Gen)
- op2.FromJacobian(&g2Gen)
+ op2.Set(&op1)
op2.Y.Mul(&op2.Y, &a)
return op1.IsOnCurve() && !op2.IsOnCurve()
},
@@ -505,7 +505,7 @@ func BenchmarkG2JacIsInSubGroup(b *testing.B) {
}
-func BenchmarkG2AffineBatchScalarMul(b *testing.B) {
+func BenchmarkG2AffineBatchScalarMultiplication(b *testing.B) {
// ensure every words of the scalars are filled
var mixer fr.Element
mixer.SetString("7716837800905789770901243404444209691916730933998574719964609384059111546487")
@@ -533,7 +533,7 @@ func BenchmarkG2AffineBatchScalarMul(b *testing.B) {
}
}
-func BenchmarkG2JacScalarMul(b *testing.B) {
+func BenchmarkG2JacScalarMultiplication(b *testing.B) {
var scalar big.Int
r := fr.Modulus()
diff --git a/ecc/bls24-317/hash_to_g1.go b/ecc/bls24-317/hash_to_g1.go
index bc5d84f81a..ab839928e1 100644
--- a/ecc/bls24-317/hash_to_g1.go
+++ b/ecc/bls24-317/hash_to_g1.go
@@ -101,43 +101,38 @@ func g1Isogeny(p *G1Affine) {
// g1SqrtRatio computes the square root of u/v and returns 0 iff u/v was indeed a quadratic residue
// if not, we get sqrt(Z * u / v). Recall that Z is non-residue
+// If v = 0, u/v is meaningless and the output is unspecified, without raising an error.
// The main idea is that since the computation of the square root involves taking large powers of u/v, the inversion of v can be avoided
func g1SqrtRatio(z *fp.Element, u *fp.Element, v *fp.Element) uint64 {
- // Taken from https://datatracker.ietf.org/doc/draft-irtf-cfrg-hash-to-curve/13/ F.2.1.2. q = 3 mod 4
+ // https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-16.html#name-optimized-sqrt_ratio-for-q- (3 mod 4)
var tv1 fp.Element
- tv1.Square(v)
+ tv1.Square(v) // 1. tv1 = v²
var tv2 fp.Element
- tv2.Mul(u, v)
- tv1.Mul(&tv1, &tv2)
+ tv2.Mul(u, v) // 2. tv2 = u * v
+ tv1.Mul(&tv1, &tv2) // 3. tv1 = tv1 * tv2
var y1 fp.Element
{
var c1 big.Int
// c1 = 34098267776073977878774941477068514265486278030354898494302534825976493299308006404506539182762
- c1.SetBytes([]byte{4, 22, 50, 136, 155, 216, 34, 75, 60, 163, 241, 104, 45, 254, 116, 14, 69, 166, 152, 121, 161, 49, 205, 17, 181, 188, 206, 121, 13, 9, 47, 223, 163, 84, 75, 149, 151, 106, 202, 170})
- y1.Exp(tv1, &c1)
+ c1.SetBytes([]byte{4, 22, 50, 136, 155, 216, 34, 75, 60, 163, 241, 104, 45, 254, 116, 14, 69, 166, 152, 121, 161, 49, 205, 17, 181, 188, 206, 121, 13, 9, 47, 223, 163, 84, 75, 149, 151, 106, 202, 170}) // c1 = (q - 3) / 4 # Integer arithmetic
+
+ y1.Exp(tv1, &c1) // 4. y1 = tv1ᶜ¹
}
- y1.Mul(&y1, &tv2)
+ y1.Mul(&y1, &tv2) // 5. y1 = y1 * tv2
var y2 fp.Element
- y2.Mul(&y1, &fp.Element{10652859563586318787, 3643689439157831556, 9236201363192486412, 11781990169133948855, 1044489031832785863})
-
- var tv3 fp.Element
- tv3.Square(&y1)
- tv3.Mul(&tv3, v)
-
- isQNr := tv3.NotEqual(u)
- z.Select(int(isQNr), &y1, &y2)
+ // c2 = sqrt(-Z)
+ tv3 := fp.Element{10652859563586318787, 3643689439157831556, 9236201363192486412, 11781990169133948855, 1044489031832785863}
+ y2.Mul(&y1, &tv3) // 6. y2 = y1 * c2
+ tv3.Square(&y1) // 7. tv3 = y1²
+ tv3.Mul(&tv3, v) // 8. tv3 = tv3 * v
+ isQNr := tv3.NotEqual(u) // 9. isQR = tv3 == u
+ z.Select(int(isQNr), &y1, &y2) // 10. y = CMOV(y2, y1, isQR)
return isQNr
}
-/*
-// g1SetZ sets z to [8].
-func g1SetZ(z *fp.Element) {
- z.Set( &fp.Element {18400687542797871745, 809728271075671860, 17770696641280178537, 10361798156408411167, 334758614216279309} )
-}*/
-
// g1MulByZ multiplies x by [8] and stores the result in z
func g1MulByZ(z *fp.Element, x *fp.Element) {
@@ -150,30 +145,29 @@ func g1MulByZ(z *fp.Element, x *fp.Element) {
*z = res
}
-//TODO: Define A,B here
-
-// From https://datatracker.ietf.org/doc/draft-irtf-cfrg-hash-to-curve/13/ Pg 80
+// https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-16.html#name-simplified-swu-method
// mapToCurve1 implements the SSWU map
// No cofactor clearing or isogeny
func mapToCurve1(u *fp.Element) G1Affine {
+ var sswuIsoCurveCoeffA = fp.Element{2751493217506761890, 10508083672876982400, 9568653941102734201, 1934905759174260726, 590687129635764257}
+ var sswuIsoCurveCoeffB = fp.Element{14477170886729819615, 1154054877908840441, 13400991584556574205, 3277375072715511934, 979998381373634863}
+
var tv1 fp.Element
- tv1.Square(u)
+ tv1.Square(u) // 1. tv1 = u²
//mul tv1 by Z
- g1MulByZ(&tv1, &tv1)
+ g1MulByZ(&tv1, &tv1) // 2. tv1 = Z * tv1
var tv2 fp.Element
- tv2.Square(&tv1)
- tv2.Add(&tv2, &tv1)
+ tv2.Square(&tv1) // 3. tv2 = tv1²
+ tv2.Add(&tv2, &tv1) // 4. tv2 = tv2 + tv1
var tv3 fp.Element
- //Standard doc line 5
var tv4 fp.Element
tv4.SetOne()
- tv3.Add(&tv2, &tv4)
- //TODO: Use bCurveConf when no isogeny
- tv3.Mul(&tv3, &fp.Element{14477170886729819615, 1154054877908840441, 13400991584556574205, 3277375072715511934, 979998381373634863})
+ tv3.Add(&tv2, &tv4) // 5. tv3 = tv2 + 1
+ tv3.Mul(&tv3, &sswuIsoCurveCoeffB) // 6. tv3 = B * tv3
tv2NZero := g1NotZero(&tv2)
@@ -181,48 +175,45 @@ func mapToCurve1(u *fp.Element) G1Affine {
tv4 = fp.Element{18400687542797871745, 809728271075671860, 17770696641280178537, 10361798156408411167, 334758614216279309}
tv2.Neg(&tv2)
- tv4.Select(int(tv2NZero), &tv4, &tv2)
- //TODO: When no isogeny use curve constants
- tv2 = fp.Element{2751493217506761890, 10508083672876982400, 9568653941102734201, 1934905759174260726, 590687129635764257}
- tv4.Mul(&tv4, &tv2)
+ tv4.Select(int(tv2NZero), &tv4, &tv2) // 7. tv4 = CMOV(Z, -tv2, tv2 != 0)
+ tv4.Mul(&tv4, &sswuIsoCurveCoeffA) // 8. tv4 = A * tv4
- tv2.Square(&tv3)
+ tv2.Square(&tv3) // 9. tv2 = tv3²
var tv6 fp.Element
- //Standard doc line 10
- tv6.Square(&tv4)
+ tv6.Square(&tv4) // 10. tv6 = tv4²
var tv5 fp.Element
- tv5.Mul(&tv6, &fp.Element{2751493217506761890, 10508083672876982400, 9568653941102734201, 1934905759174260726, 590687129635764257})
+ tv5.Mul(&tv6, &sswuIsoCurveCoeffA) // 11. tv5 = A * tv6
- tv2.Add(&tv2, &tv5)
- tv2.Mul(&tv2, &tv3)
- tv6.Mul(&tv6, &tv4)
+ tv2.Add(&tv2, &tv5) // 12. tv2 = tv2 + tv5
+ tv2.Mul(&tv2, &tv3) // 13. tv2 = tv2 * tv3
+ tv6.Mul(&tv6, &tv4) // 14. tv6 = tv6 * tv4
- //Standards doc line 15
- tv5.Mul(&tv6, &fp.Element{14477170886729819615, 1154054877908840441, 13400991584556574205, 3277375072715511934, 979998381373634863})
- tv2.Add(&tv2, &tv5)
+ tv5.Mul(&tv6, &sswuIsoCurveCoeffB) // 15. tv5 = B * tv6
+ tv2.Add(&tv2, &tv5) // 16. tv2 = tv2 + tv5
var x fp.Element
- x.Mul(&tv1, &tv3)
+ x.Mul(&tv1, &tv3) // 17. x = tv1 * tv3
var y1 fp.Element
- gx1NSquare := g1SqrtRatio(&y1, &tv2, &tv6)
+ gx1NSquare := g1SqrtRatio(&y1, &tv2, &tv6) // 18. (is_gx1_square, y1) = sqrt_ratio(tv2, tv6)
var y fp.Element
- y.Mul(&tv1, u)
+ y.Mul(&tv1, u) // 19. y = tv1 * u
- //Standards doc line 20
- y.Mul(&y, &y1)
+ y.Mul(&y, &y1) // 20. y = y * y1
- x.Select(int(gx1NSquare), &tv3, &x)
- y.Select(int(gx1NSquare), &y1, &y)
+ x.Select(int(gx1NSquare), &tv3, &x) // 21. x = CMOV(x, tv3, is_gx1_square)
+ y.Select(int(gx1NSquare), &y1, &y) // 22. y = CMOV(y, y1, is_gx1_square)
y1.Neg(&y)
y.Select(int(g1Sgn0(u)^g1Sgn0(&y)), &y, &y1)
- //Standards doc line 25
- x.Div(&x, &tv4)
+ // 23. e1 = sgn0(u) == sgn0(y)
+ // 24. y = CMOV(-y, y, e1)
+
+ x.Div(&x, &tv4) // 25. x = x / tv4
return G1Affine{x, y}
}
@@ -265,13 +256,13 @@ func hashToFp(msg, dst []byte, count int) ([]fp.Element, error) {
// g1Sgn0 is an algebraic substitute for the notion of sign in ordered fields
// Namely, every non-zero quadratic residue in a finite field of characteristic =/= 2 has exactly two square roots, one of each sign
-// Taken from https://datatracker.ietf.org/doc/draft-irtf-cfrg-hash-to-curve/ section 4.1
+// https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-16.html#name-the-sgn0-function
// The sign of an element is not obviously related to that of its Montgomery form
func g1Sgn0(z *fp.Element) uint64 {
nonMont := *z
nonMont.FromMont()
-
+ // m == 1
return nonMont[0] % 2
}
@@ -288,7 +279,7 @@ func MapToG1(u fp.Element) G1Affine {
// EncodeToG1 hashes a message to a point on the G1 curve using the SSWU map.
// It is faster than HashToG1, but the result is not uniformly distributed. Unsuitable as a random oracle.
// dst stands for "domain separation tag", a string unique to the construction using the hash function
-//https://datatracker.ietf.org/doc/draft-irtf-cfrg-hash-to-curve/13/#section-6.6.3
+//https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-16.html#roadmap
func EncodeToG1(msg, dst []byte) (G1Affine, error) {
var res G1Affine
@@ -308,7 +299,7 @@ func EncodeToG1(msg, dst []byte) (G1Affine, error) {
// HashToG1 hashes a message to a point on the G1 curve using the SSWU map.
// Slower than EncodeToG1, but usable as a random oracle.
// dst stands for "domain separation tag", a string unique to the construction using the hash function
-// https://tools.ietf.org/html/draft-irtf-cfrg-hash-to-curve-06#section-3
+//https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-16.html#roadmap
func HashToG1(msg, dst []byte) (G1Affine, error) {
u, err := hashToFp(msg, dst, 2*1)
if err != nil {
@@ -318,7 +309,7 @@ func HashToG1(msg, dst []byte) (G1Affine, error) {
Q0 := mapToCurve1(&u[0])
Q1 := mapToCurve1(&u[1])
- //TODO: Add in E' first, then apply isogeny
+ //TODO (perf): Add in E' first, then apply isogeny
g1Isogeny(&Q0)
g1Isogeny(&Q1)
diff --git a/ecc/bls24-317/hash_to_g2.go b/ecc/bls24-317/hash_to_g2.go
index 32d54c1249..ea3fab2442 100644
--- a/ecc/bls24-317/hash_to_g2.go
+++ b/ecc/bls24-317/hash_to_g2.go
@@ -28,12 +28,12 @@ func svdwMapG2(u fptower.E4) G2Affine {
// constants
// sage script to find z: https://tools.ietf.org/html/draft-irtf-cfrg-hash-to-curve-06#appendix-E.1
var z, c1, c2, c3, c4 fptower.E4
- z.B0.A0.SetString("1")
- z.B1.A0.SetString("1")
+ z.B0.A0.SetOne()
+ z.B1.A0.SetOne()
c1.B0.A0.SetString("4")
c1.B0.A1.SetString("3")
c1.B1.A0.SetString("8")
- c1.B1.A1.SetString("1")
+ c1.B1.A1.SetOne()
c2.B0.A0.SetString("68196535552147955757549882954137028530972556060709796988605069651952986598616012809013078365525")
c2.B1.A0.SetString("68196535552147955757549882954137028530972556060709796988605069651952986598616012809013078365525")
c3.B0.A0.SetString("25710473854271083900266173357439657657737168361084633536126117969329631844210973452609964652920")
diff --git a/ecc/bls24-317/internal/fptower/e12.go b/ecc/bls24-317/internal/fptower/e12.go
index 785fea776d..fc47857646 100644
--- a/ecc/bls24-317/internal/fptower/e12.go
+++ b/ecc/bls24-317/internal/fptower/e12.go
@@ -180,6 +180,8 @@ func (z *E12) Square(x *E12) *E12 {
}
// Inverse an element in E12
+//
+// if x == 0, sets and returns z = x
func (z *E12) Inverse(x *E12) *E12 {
// Algorithm 17 from https://eprint.iacr.org/2010/354.pdf
// step 9 is wrong in the paper it's t1-t4
@@ -208,6 +210,8 @@ func (z *E12) Inverse(x *E12) *E12 {
// BatchInvertE12 returns a new slice with every element inverted.
// Uses Montgomery batch inversion trick
+//
+// if a[i] == 0, returns result[i] = a[i]
func BatchInvertE12(a []E12) []E12 {
res := make([]E12, len(a))
if len(a) == 0 {
@@ -250,7 +254,7 @@ func (z *E12) Exp(x E12, k *big.Int) *E12 {
e := k
if k.Sign() == -1 {
// negative k, we invert
- // if k < 0: xᵏ (mod q) == (x⁻¹)ᵏ (mod q)
+ // if k < 0: xᵏ (mod q¹²) == (x⁻¹)ᵏ (mod q¹²)
x.Inverse(&x)
// we negate k in a temp big.Int since
diff --git a/ecc/bls24-317/internal/fptower/e24.go b/ecc/bls24-317/internal/fptower/e24.go
index c29a23021e..1c8432093c 100644
--- a/ecc/bls24-317/internal/fptower/e24.go
+++ b/ecc/bls24-317/internal/fptower/e24.go
@@ -223,28 +223,45 @@ func (z *E24) CyclotomicSquareCompressed(x *E24) *E24 {
}
// DecompressKarabina Karabina's cyclotomic square result
+// if g3 != 0
+// g4 = (E * g5^2 + 3 * g1^2 - 2 * g2)/4g3
+// if g3 == 0
+// g4 = 2g1g5/g2
+//
+// if g3=g2=0 then g4=g5=g1=0 and g0=1 (x=1)
+// Theorem 3.1 is well-defined for all x in Gϕₙ\{1}
func (z *E24) DecompressKarabina(x *E24) *E24 {
var t [3]E4
var one E4
one.SetOne()
- // t0 = g1²
- t[0].Square(&x.D0.C1)
- // t1 = 3 * g1² - 2 * g2
- t[1].Sub(&t[0], &x.D0.C2).
- Double(&t[1]).
- Add(&t[1], &t[0])
- // t0 = E * g5² + t1
- t[2].Square(&x.D1.C2)
- t[0].MulByNonResidue(&t[2]).
- Add(&t[0], &t[1])
- // t1 = 1/(4 * g3)
- t[1].Double(&x.D1.C0).
- Double(&t[1]).
- Inverse(&t[1]) // costly
+ // g3 == 0
+ if x.D1.C0.IsZero() {
+ t[0].Mul(&x.D0.C1, &x.D1.C2).
+ Double(&t[0])
+ // t1 = g2
+ t[1].Set(&x.D0.C2)
+
+ // g3 != 0
+ } else {
+ // t0 = g1^2
+ t[0].Square(&x.D0.C1)
+ // t1 = 3 * g1^2 - 2 * g2
+ t[1].Sub(&t[0], &x.D0.C2).
+ Double(&t[1]).
+ Add(&t[1], &t[0])
+ // t0 = E * g5^2 + t1
+ t[2].Square(&x.D1.C2)
+ t[0].MulByNonResidue(&t[2]).
+ Add(&t[0], &t[1])
+ // t1 = 1/(4 * g3)
+ t[1].Double(&x.D1.C0).
+ Double(&t[1])
+ }
+
// z4 = g4
- z.D1.C1.Mul(&t[0], &t[1])
+ z.D1.C1.Div(&t[0], &t[1]) // costly
// t1 = g2 * g1
t[1].Mul(&x.D0.C2, &x.D0.C1)
@@ -253,7 +270,7 @@ func (z *E24) DecompressKarabina(x *E24) *E24 {
Sub(&t[2], &t[1]).
Double(&t[2]).
Sub(&t[2], &t[1])
- // t1 = g3 * g5
+ // t1 = g3 * g5 (g3 can be 0)
t[1].Mul(&x.D1.C0, &x.D1.C2)
// c₀ = E * (2 * g4² + g3 * g5 - 3 * g2 * g1) + 1
t[2].Add(&t[2], &t[1])
@@ -269,6 +286,15 @@ func (z *E24) DecompressKarabina(x *E24) *E24 {
}
// BatchDecompressKarabina multiple Karabina's cyclotomic square results
+// if g3 != 0
+// g4 = (E * g5^2 + 3 * g1^2 - 2 * g2)/4g3
+// if g3 == 0
+// g4 = 2g1g5/g2
+//
+// if g3=g2=0 then g4=g5=g1=0 and g0=1 (x=1)
+// Theorem 3.1 is well-defined for all x in Gϕₙ\{1}
+//
+// Divisions by 4g3 or g2 is batched using Montgomery batch inverse
func BatchDecompressKarabina(x []E24) []E24 {
n := len(x)
@@ -284,19 +310,29 @@ func BatchDecompressKarabina(x []E24) []E24 {
one.SetOne()
for i := 0; i < n; i++ {
- // t0 = g1²
- t0[i].Square(&x[i].D0.C1)
- // t1 = 3 * g1² - 2 * g2
- t1[i].Sub(&t0[i], &x[i].D0.C2).
- Double(&t1[i]).
- Add(&t1[i], &t0[i])
- // t0 = E * g5² + t1
- t2[i].Square(&x[i].D1.C2)
- t0[i].MulByNonResidue(&t2[i]).
- Add(&t0[i], &t1[i])
- // t1 = 4 * g3
- t1[i].Double(&x[i].D1.C0).
- Double(&t1[i])
+ // g3 == 0
+ if x[i].D1.C0.IsZero() {
+ t0[i].Mul(&x[i].D0.C1, &x[i].D1.C2).
+ Double(&t0[i])
+ // t1 = g2
+ t1[i].Set(&x[i].D0.C2)
+
+ // g3 != 0
+ } else {
+ // t0 = g1^2
+ t0[i].Square(&x[i].D0.C1)
+ // t1 = 3 * g1^2 - 2 * g2
+ t1[i].Sub(&t0[i], &x[i].D0.C2).
+ Double(&t1[i]).
+ Add(&t1[i], &t0[i])
+ // t0 = E * g5^2 + t1
+ t2[i].Square(&x[i].D1.C2)
+ t0[i].MulByNonResidue(&t2[i]).
+ Add(&t0[i], &t1[i])
+ // t1 = 4 * g3
+ t1[i].Double(&x[i].D1.C0).
+ Double(&t1[i])
+ }
}
t1 = BatchInvertE4(t1) // costs 1 inverse
@@ -313,7 +349,7 @@ func BatchDecompressKarabina(x []E24) []E24 {
t2[i].Double(&t2[i])
t2[i].Sub(&t2[i], &t1[i])
- // t1 = g3 * g5
+ // t1 = g3 * g5 (g3s can be 0s)
t1[i].Mul(&x[i].D1.C0, &x[i].D1.C2)
// z0 = E * (2 * g4² + g3 * g5 - 3 * g2 * g1) + 1
t2[i].Add(&t2[i], &t1[i])
@@ -364,6 +400,8 @@ func (z *E24) CyclotomicSquare(x *E24) *E24 {
}
// Inverse set z to the inverse of x in E24 and return z
+//
+// if x == 0, sets and returns z = x
func (z *E24) Inverse(x *E24) *E24 {
// Algorithm 23 from https://eprint.iacr.org/2010/354.pdf
@@ -381,6 +419,8 @@ func (z *E24) Inverse(x *E24) *E24 {
// BatchInvertE24 returns a new slice with every element inverted.
// Uses Montgomery batch inversion trick
+//
+// if a[i] == 0, returns result[i] = a[i]
func BatchInvertE24(a []E24) []E24 {
res := make([]E24, len(a))
if len(a) == 0 {
@@ -423,7 +463,7 @@ func (z *E24) Exp(x E24, k *big.Int) *E24 {
e := k
if k.Sign() == -1 {
// negative k, we invert
- // if k < 0: xᵏ (mod q) == (x⁻¹)ᵏ (mod q)
+ // if k < 0: xᵏ (mod q²⁴) == (x⁻¹)ᵏ (mod q²⁴)
x.Inverse(&x)
// we negate k in a temp big.Int since
@@ -827,6 +867,10 @@ func BatchCompressTorus(x []E24) ([]E12, error) {
for i := 0; i < n; i++ {
res[i].Set(&x[i].D1)
+ // throw an error if any of the x[i].C1 is 0
+ if res[i].IsZero() {
+ return []E12{}, errors.New("invalid input")
+ }
}
t := BatchInvertE12(res) // costs 1 inverse
diff --git a/ecc/bls24-317/internal/fptower/e24_test.go b/ecc/bls24-317/internal/fptower/e24_test.go
index 6f235ca829..7bdbeb1cf2 100644
--- a/ecc/bls24-317/internal/fptower/e24_test.go
+++ b/ecc/bls24-317/internal/fptower/e24_test.go
@@ -370,13 +370,29 @@ func TestE24Ops(t *testing.T) {
properties.Property("[BLS24-317] compressed cyclotomic square (Karabina) and square should be the same in the cyclotomic subgroup", prop.ForAll(
func(a *E24) bool {
- var b, c, d E24
+ var _a, b, c, d, _c, _d E24
+ _a.SetOne().Double(&_a)
+
+ // put a and _a in the cyclotomic subgroup
+ // a (g3 != 0 probably)
b.Conjugate(a)
a.Inverse(a)
b.Mul(&b, a)
a.FrobeniusQuad(&b).Mul(a, &b)
+ // _a (g3 == 0)
+ b.Conjugate(&_a)
+ _a.Inverse(&_a)
+ b.Mul(&b, &_a)
+ _a.FrobeniusQuad(&b).Mul(&_a, &b)
+
+ // case g3 != 0
c.Square(a)
d.CyclotomicSquareCompressed(a).DecompressKarabina(&d)
+
+ // case g3 == 0
+ _c.Square(&_a)
+ _d.CyclotomicSquareCompressed(&_a).DecompressKarabina(&_d)
+
return c.Equal(&d)
},
genA,
@@ -384,18 +400,26 @@ func TestE24Ops(t *testing.T) {
properties.Property("[BLS24-317] batch decompress and individual decompress (Karabina) should be the same", prop.ForAll(
func(a *E24) bool {
- var b E24
- // put in the cyclotomic subgroup
+ var _a, b E24
+ _a.SetOne().Double(&_a)
+
+ // put a and _a in the cyclotomic subgroup
+ // a (g3 !=0 probably)
b.Conjugate(a)
a.Inverse(a)
b.Mul(&b, a)
a.FrobeniusQuad(&b).Mul(a, &b)
+ // _a (g3 == 0)
+ b.Conjugate(&_a)
+ _a.Inverse(&_a)
+ b.Mul(&b, &_a)
+ _a.FrobeniusQuad(&b).Mul(&_a, &b)
var a2, a4, a17 E24
- a2.Set(a)
+ a2.Set(&_a)
a4.Set(a)
a17.Set(a)
- a2.nSquareCompressed(2)
+ a2.nSquareCompressed(2) // case g3 == 0
a4.nSquareCompressed(4)
a17.nSquareCompressed(17)
batch := BatchDecompressKarabina([]E24{a2, a4, a17})
diff --git a/ecc/bls24-317/internal/fptower/e2_bls317.go b/ecc/bls24-317/internal/fptower/e2_bls317.go
index 51f5e23ff4..96829d0618 100644
--- a/ecc/bls24-317/internal/fptower/e2_bls317.go
+++ b/ecc/bls24-317/internal/fptower/e2_bls317.go
@@ -65,6 +65,8 @@ func (z *E2) MulByNonResidueInv(x *E2) *E2 {
}
// Inverse sets z to the E2-inverse of x, returns z
+//
+// if x == 0, sets and returns z = x
func (z *E2) Inverse(x *E2) *E2 {
// Algorithm 8 from https://eprint.iacr.org/2010/354.pdf
var t0, t1 fp.Element
diff --git a/ecc/bls24-317/internal/fptower/e2_test.go b/ecc/bls24-317/internal/fptower/e2_test.go
index a63f11c4a6..69a1ec9e61 100644
--- a/ecc/bls24-317/internal/fptower/e2_test.go
+++ b/ecc/bls24-317/internal/fptower/e2_test.go
@@ -189,12 +189,6 @@ func TestE2ReceiverIsOperand(t *testing.T) {
properties.TestingRun(t, gopter.ConsoleReporter(false))
- if supportAdx {
- t.Log("disabling ADX")
- supportAdx = false
- properties.TestingRun(t, gopter.ConsoleReporter(false))
- supportAdx = true
- }
}
func TestE2MulMaxed(t *testing.T) {
@@ -389,12 +383,6 @@ func TestE2Ops(t *testing.T) {
properties.TestingRun(t, gopter.ConsoleReporter(false))
- if supportAdx {
- t.Log("disabling ADX")
- supportAdx = false
- properties.TestingRun(t, gopter.ConsoleReporter(false))
- supportAdx = true
- }
}
// ------------------------------------------------------------
diff --git a/ecc/bls24-317/internal/fptower/e4.go b/ecc/bls24-317/internal/fptower/e4.go
index 63e6b37321..c6319aa9b9 100644
--- a/ecc/bls24-317/internal/fptower/e4.go
+++ b/ecc/bls24-317/internal/fptower/e4.go
@@ -201,6 +201,8 @@ func (z *E4) Square(x *E4) *E4 {
}
// Inverse set z to the inverse of x in E4 and return z
+//
+// if x == 0, sets and returns z = x
func (z *E4) Inverse(x *E4) *E4 {
// Algorithm 23 from https://eprint.iacr.org/2010/354.pdf
@@ -323,6 +325,8 @@ func (z *E4) Sqrt(x *E4) *E4 {
// BatchInvertE4 returns a new slice with every element inverted.
// Uses Montgomery batch inversion trick
+//
+// if a[i] == 0, returns result[i] = a[i]
func BatchInvertE4(a []E4) []E4 {
res := make([]E4, len(a))
if len(a) == 0 {
diff --git a/ecc/bls24-317/multiexp.go b/ecc/bls24-317/multiexp.go
index 2418530de3..5a1b6797d2 100644
--- a/ecc/bls24-317/multiexp.go
+++ b/ecc/bls24-317/multiexp.go
@@ -41,7 +41,7 @@ type selector struct {
// if the digit is larger than 2^{c-1}, then, we borrow 2^c from the next window and substract
// 2^{c} to the current digit, making it negative.
// negative digits can be processed in a later step as adding -G into the bucket instead of G
-// (computing -G is cheap, and this saves us half of the buckets in the MultiExp or BatchScalarMul)
+// (computing -G is cheap, and this saves us half of the buckets in the MultiExp or BatchScalarMultiplication)
// scalarsMont indicates wheter the provided scalars are in montgomery form
// returns smallValues, which represent the number of scalars which meets the following condition
// 0 < scalar < 2^c (in other words, scalars where only the c-least significant bits are non zero)
@@ -163,6 +163,8 @@ func partitionScalars(scalars []fr.Element, c uint64, scalarsMont bool, nbTasks
}
// MultiExp implements section 4 of https://eprint.iacr.org/2012/549.pdf
+//
+// This call return an error if len(scalars) != len(points) or if provided config is invalid.
func (p *G1Affine) MultiExp(points []G1Affine, scalars []fr.Element, config ecc.MultiExpConfig) (*G1Affine, error) {
var _p G1Jac
if _, err := _p.MultiExp(points, scalars, config); err != nil {
@@ -173,6 +175,8 @@ func (p *G1Affine) MultiExp(points []G1Affine, scalars []fr.Element, config ecc.
}
// MultiExp implements section 4 of https://eprint.iacr.org/2012/549.pdf
+//
+// This call return an error if len(scalars) != len(points) or if provided config is invalid.
func (p *G1Jac) MultiExp(points []G1Affine, scalars []fr.Element, config ecc.MultiExpConfig) (*G1Jac, error) {
// note:
// each of the msmCX method is the same, except for the c constant it declares
@@ -209,6 +213,8 @@ func (p *G1Jac) MultiExp(points []G1Affine, scalars []fr.Element, config ecc.Mul
// if nbTasks is not set, use all available CPUs
if config.NbTasks <= 0 {
config.NbTasks = runtime.NumCPU()
+ } else if config.NbTasks > 1024 {
+ return nil, errors.New("invalid config: config.NbTasks > 1024")
}
// here, we compute the best C for nbPoints
@@ -333,9 +339,6 @@ func msmInnerG1Jac(p *G1Jac, c int, points []G1Affine, scalars []fr.Element, spl
case 21:
p.msmC21(points, scalars, splitFirstChunk)
- case 22:
- p.msmC22(points, scalars, splitFirstChunk)
-
default:
panic("not implemented")
}
@@ -1180,59 +1183,9 @@ func (p *G1Jac) msmC21(points []G1Affine, scalars []fr.Element, splitFirstChunk
return msmReduceChunkG1Affine(p, c, chChunks[:])
}
-func (p *G1Jac) msmC22(points []G1Affine, scalars []fr.Element, splitFirstChunk bool) *G1Jac {
- const (
- c = 22 // scalars partitioned into c-bit radixes
- nbChunks = (fr.Limbs * 64 / c) // number of c-bit radixes in a scalar
- )
-
- // for each chunk, spawn one go routine that'll loop through all the scalars in the
- // corresponding bit-window
- // note that buckets is an array allocated on the stack (for most sizes of c) and this is
- // critical for performance
-
- // each go routine sends its result in chChunks[i] channel
- var chChunks [nbChunks + 1]chan g1JacExtended
- for i := 0; i < len(chChunks); i++ {
- chChunks[i] = make(chan g1JacExtended, 1)
- }
-
- // c doesn't divide 256, last window is smaller we can allocate less buckets
- const lastC = (fr.Limbs * 64) - (c * (fr.Limbs * 64 / c))
- go func(j uint64, points []G1Affine, scalars []fr.Element) {
- var buckets [1 << (lastC - 1)]g1JacExtended
- msmProcessChunkG1Affine(j, chChunks[j], buckets[:], c, points, scalars)
- }(uint64(nbChunks), points, scalars)
-
- processChunk := func(j int, points []G1Affine, scalars []fr.Element, chChunk chan g1JacExtended) {
- var buckets [1 << (c - 1)]g1JacExtended
- msmProcessChunkG1Affine(uint64(j), chChunk, buckets[:], c, points, scalars)
- }
-
- for j := int(nbChunks - 1); j > 0; j-- {
- go processChunk(j, points, scalars, chChunks[j])
- }
-
- if !splitFirstChunk {
- go processChunk(0, points, scalars, chChunks[0])
- } else {
- chSplit := make(chan g1JacExtended, 2)
- split := len(points) / 2
- go processChunk(0, points[:split], scalars[:split], chSplit)
- go processChunk(0, points[split:], scalars[split:], chSplit)
- go func() {
- s1 := <-chSplit
- s2 := <-chSplit
- close(chSplit)
- s1.add(&s2)
- chChunks[0] <- s1
- }()
- }
-
- return msmReduceChunkG1Affine(p, c, chChunks[:])
-}
-
// MultiExp implements section 4 of https://eprint.iacr.org/2012/549.pdf
+//
+// This call return an error if len(scalars) != len(points) or if provided config is invalid.
func (p *G2Affine) MultiExp(points []G2Affine, scalars []fr.Element, config ecc.MultiExpConfig) (*G2Affine, error) {
var _p G2Jac
if _, err := _p.MultiExp(points, scalars, config); err != nil {
@@ -1243,6 +1196,8 @@ func (p *G2Affine) MultiExp(points []G2Affine, scalars []fr.Element, config ecc.
}
// MultiExp implements section 4 of https://eprint.iacr.org/2012/549.pdf
+//
+// This call return an error if len(scalars) != len(points) or if provided config is invalid.
func (p *G2Jac) MultiExp(points []G2Affine, scalars []fr.Element, config ecc.MultiExpConfig) (*G2Jac, error) {
// note:
// each of the msmCX method is the same, except for the c constant it declares
@@ -1279,13 +1234,15 @@ func (p *G2Jac) MultiExp(points []G2Affine, scalars []fr.Element, config ecc.Mul
// if nbTasks is not set, use all available CPUs
if config.NbTasks <= 0 {
config.NbTasks = runtime.NumCPU()
+ } else if config.NbTasks > 1024 {
+ return nil, errors.New("invalid config: config.NbTasks > 1024")
}
// here, we compute the best C for nbPoints
// we split recursively until nbChunks(c) >= nbTasks,
bestC := func(nbPoints int) uint64 {
// implemented msmC methods (the c we use must be in this slice)
- implementedCs := []uint64{4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 20, 21, 22}
+ implementedCs := []uint64{4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 20, 21}
var C uint64
// approximate cost (in group operations)
// cost = bits/c * (nbPoints + 2^{c})
@@ -1403,9 +1360,6 @@ func msmInnerG2Jac(p *G2Jac, c int, points []G2Affine, scalars []fr.Element, spl
case 21:
p.msmC21(points, scalars, splitFirstChunk)
- case 22:
- p.msmC22(points, scalars, splitFirstChunk)
-
default:
panic("not implemented")
}
@@ -2249,55 +2203,3 @@ func (p *G2Jac) msmC21(points []G2Affine, scalars []fr.Element, splitFirstChunk
return msmReduceChunkG2Affine(p, c, chChunks[:])
}
-
-func (p *G2Jac) msmC22(points []G2Affine, scalars []fr.Element, splitFirstChunk bool) *G2Jac {
- const (
- c = 22 // scalars partitioned into c-bit radixes
- nbChunks = (fr.Limbs * 64 / c) // number of c-bit radixes in a scalar
- )
-
- // for each chunk, spawn one go routine that'll loop through all the scalars in the
- // corresponding bit-window
- // note that buckets is an array allocated on the stack (for most sizes of c) and this is
- // critical for performance
-
- // each go routine sends its result in chChunks[i] channel
- var chChunks [nbChunks + 1]chan g2JacExtended
- for i := 0; i < len(chChunks); i++ {
- chChunks[i] = make(chan g2JacExtended, 1)
- }
-
- // c doesn't divide 256, last window is smaller we can allocate less buckets
- const lastC = (fr.Limbs * 64) - (c * (fr.Limbs * 64 / c))
- go func(j uint64, points []G2Affine, scalars []fr.Element) {
- var buckets [1 << (lastC - 1)]g2JacExtended
- msmProcessChunkG2Affine(j, chChunks[j], buckets[:], c, points, scalars)
- }(uint64(nbChunks), points, scalars)
-
- processChunk := func(j int, points []G2Affine, scalars []fr.Element, chChunk chan g2JacExtended) {
- var buckets [1 << (c - 1)]g2JacExtended
- msmProcessChunkG2Affine(uint64(j), chChunk, buckets[:], c, points, scalars)
- }
-
- for j := int(nbChunks - 1); j > 0; j-- {
- go processChunk(j, points, scalars, chChunks[j])
- }
-
- if !splitFirstChunk {
- go processChunk(0, points, scalars, chChunks[0])
- } else {
- chSplit := make(chan g2JacExtended, 2)
- split := len(points) / 2
- go processChunk(0, points[:split], scalars[:split], chSplit)
- go processChunk(0, points[split:], scalars[split:], chSplit)
- go func() {
- s1 := <-chSplit
- s2 := <-chSplit
- close(chSplit)
- s1.add(&s2)
- chChunks[0] <- s1
- }()
- }
-
- return msmReduceChunkG2Affine(p, c, chChunks[:])
-}
diff --git a/ecc/bls24-317/multiexp_test.go b/ecc/bls24-317/multiexp_test.go
index d7906d96d2..d5674abaec 100644
--- a/ecc/bls24-317/multiexp_test.go
+++ b/ecc/bls24-317/multiexp_test.go
@@ -92,7 +92,14 @@ func TestMultiExpG1(t *testing.T) {
genScalar,
))
- properties.Property("[G1] Multi exponentation (c=5, c=16) should be consistent with sum of square", prop.ForAll(
+ // cRange is generated from template and contains the available parameters for the multiexp window size
+ cRange := []uint64{4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 20, 21}
+ if testing.Short() {
+ // test only "odd" and "even" (ie windows size divide word size vs not)
+ cRange = []uint64{5, 16}
+ }
+
+ properties.Property(fmt.Sprintf("[G1] Multi exponentation (c in %v) should be consistent with sum of square", cRange), prop.ForAll(
func(mixer fr.Element) bool {
var expected G1Jac
@@ -111,13 +118,21 @@ func TestMultiExpG1(t *testing.T) {
FromMont()
}
- scalars5, _ := partitionScalars(sampleScalars[:], 5, false, runtime.NumCPU())
- scalars16, _ := partitionScalars(sampleScalars[:], 16, false, runtime.NumCPU())
-
- var r5, r16 G1Jac
- r5.msmC5(samplePoints[:], scalars5, false)
- r16.msmC16(samplePoints[:], scalars16, true)
- return (r5.Equal(&expected) && r16.Equal(&expected))
+ results := make([]G1Jac, len(cRange)+1)
+ for i, c := range cRange {
+ scalars, _ := partitionScalars(sampleScalars[:], c, false, runtime.NumCPU())
+ msmInnerG1Jac(&results[i], int(c), samplePoints[:], scalars, false)
+ if c == 16 {
+ // split the first chunk
+ msmInnerG1Jac(&results[len(results)-1], 16, samplePoints[:], scalars, true)
+ }
+ }
+ for i := 1; i < len(results); i++ {
+ if !results[i].Equal(&results[i-1]) {
+ return false
+ }
+ }
+ return true
},
genScalar,
))
@@ -148,7 +163,7 @@ func TestMultiExpG1(t *testing.T) {
var finalBigScalar fr.Element
var finalBigScalarBi big.Int
var op1ScalarMul G1Affine
- finalBigScalar.SetString("9455").Mul(&finalBigScalar, &mixer)
+ finalBigScalar.SetUint64(9455).Mul(&finalBigScalar, &mixer)
finalBigScalar.ToBigIntRegular(&finalBigScalarBi)
op1ScalarMul.ScalarMultiplication(&g1GenAff, &finalBigScalarBi)
@@ -322,7 +337,12 @@ func TestMultiExpG2(t *testing.T) {
genScalar,
))
- properties.Property("[G2] Multi exponentation (c=5, c=16) should be consistent with sum of square", prop.ForAll(
+ // cRange is generated from template and contains the available parameters for the multiexp window size
+ // for g2, CI suffers with large c size since it needs to allocate a lot of memory for the buckets.
+ // test only "odd" and "even" (ie windows size divide word size vs not)
+ cRange := []uint64{5, 16}
+
+ properties.Property(fmt.Sprintf("[G2] Multi exponentation (c in %v) should be consistent with sum of square", cRange), prop.ForAll(
func(mixer fr.Element) bool {
var expected G2Jac
@@ -341,13 +361,21 @@ func TestMultiExpG2(t *testing.T) {
FromMont()
}
- scalars5, _ := partitionScalars(sampleScalars[:], 5, false, runtime.NumCPU())
- scalars16, _ := partitionScalars(sampleScalars[:], 16, false, runtime.NumCPU())
-
- var r5, r16 G2Jac
- r5.msmC5(samplePoints[:], scalars5, false)
- r16.msmC16(samplePoints[:], scalars16, true)
- return (r5.Equal(&expected) && r16.Equal(&expected))
+ results := make([]G2Jac, len(cRange)+1)
+ for i, c := range cRange {
+ scalars, _ := partitionScalars(sampleScalars[:], c, false, runtime.NumCPU())
+ msmInnerG2Jac(&results[i], int(c), samplePoints[:], scalars, false)
+ if c == 16 {
+ // split the first chunk
+ msmInnerG2Jac(&results[len(results)-1], 16, samplePoints[:], scalars, true)
+ }
+ }
+ for i := 1; i < len(results); i++ {
+ if !results[i].Equal(&results[i-1]) {
+ return false
+ }
+ }
+ return true
},
genScalar,
))
@@ -378,7 +406,7 @@ func TestMultiExpG2(t *testing.T) {
var finalBigScalar fr.Element
var finalBigScalarBi big.Int
var op1ScalarMul G2Affine
- finalBigScalar.SetString("9455").Mul(&finalBigScalar, &mixer)
+ finalBigScalar.SetUint64(9455).Mul(&finalBigScalar, &mixer)
finalBigScalar.ToBigIntRegular(&finalBigScalarBi)
op1ScalarMul.ScalarMultiplication(&g2GenAff, &finalBigScalarBi)
diff --git a/ecc/bls24-317/pairing.go b/ecc/bls24-317/pairing.go
index 6f1721d271..1d4cb27300 100644
--- a/ecc/bls24-317/pairing.go
+++ b/ecc/bls24-317/pairing.go
@@ -30,7 +30,9 @@ type lineEvaluation struct {
}
// Pair calculates the reduced pairing for a set of points
-// ∏ᵢ e(Pᵢ, Qᵢ)
+// ∏ᵢ e(Pᵢ, Qᵢ).
+//
+// This function doesn't check that the inputs are in the correct subgroup. See IsInSubGroup.
func Pair(P []G1Affine, Q []G2Affine) (GT, error) {
f, err := MillerLoop(P, Q)
if err != nil {
@@ -41,6 +43,8 @@ func Pair(P []G1Affine, Q []G2Affine) (GT, error) {
// PairingCheck calculates the reduced pairing for a set of points and returns True if the result is One
// ∏ᵢ e(Pᵢ, Qᵢ) =? 1
+//
+// This function doesn't check that the inputs are in the correct subgroup. See IsInSubGroup.
func PairingCheck(P []G1Affine, Q []G2Affine) (bool, error) {
f, err := Pair(P, Q)
if err != nil {
diff --git a/ecc/bls24-317/twistededwards/eddsa/eddsa.go b/ecc/bls24-317/twistededwards/eddsa/eddsa.go
index 384c7311d5..4bf98697e4 100644
--- a/ecc/bls24-317/twistededwards/eddsa/eddsa.go
+++ b/ecc/bls24-317/twistededwards/eddsa/eddsa.go
@@ -89,7 +89,7 @@ func GenerateKey(r io.Reader) (*PrivateKey, error) {
var bScalar big.Int
bScalar.SetBytes(priv.scalar[:])
- pub.A.ScalarMul(&c.Base, &bScalar)
+ pub.A.ScalarMultiplication(&c.Base, &bScalar)
priv.PublicKey = pub
@@ -137,7 +137,7 @@ func (privKey *PrivateKey) Sign(message []byte, hFunc hash.Hash) ([]byte, error)
blindingFactorBigInt.SetBytes(blindingFactorBytes[:sizeFr])
// compute R = randScalar*Base
- res.R.ScalarMul(&curveParams.Base, &blindingFactorBigInt)
+ res.R.ScalarMultiplication(&curveParams.Base, &blindingFactorBigInt)
if !res.R.IsOnCurve() {
return nil, errNotOnCurve
}
@@ -223,8 +223,8 @@ func (pub *PublicKey) Verify(sigBin, message []byte, hFunc hash.Hash) (bool, err
var bCofactor, bs big.Int
curveParams.Cofactor.ToBigIntRegular(&bCofactor)
bs.SetBytes(sig.S[:])
- lhs.ScalarMul(&curveParams.Base, &bs).
- ScalarMul(&lhs, &bCofactor)
+ lhs.ScalarMultiplication(&curveParams.Base, &bs).
+ ScalarMultiplication(&lhs, &bCofactor)
if !lhs.IsOnCurve() {
return false, errNotOnCurve
@@ -232,9 +232,9 @@ func (pub *PublicKey) Verify(sigBin, message []byte, hFunc hash.Hash) (bool, err
// rhs = cofactor*(R + H(R,A,M)*A)
var rhs twistededwards.PointAffine
- rhs.ScalarMul(&pub.A, &hramInt).
+ rhs.ScalarMultiplication(&pub.A, &hramInt).
Add(&rhs, &sig.R).
- ScalarMul(&rhs, &bCofactor)
+ ScalarMultiplication(&rhs, &bCofactor)
if !rhs.IsOnCurve() {
return false, errNotOnCurve
}
diff --git a/ecc/bls24-317/twistededwards/point.go b/ecc/bls24-317/twistededwards/point.go
index a0426dc1ee..c2e7d2c584 100644
--- a/ecc/bls24-317/twistededwards/point.go
+++ b/ecc/bls24-317/twistededwards/point.go
@@ -256,13 +256,13 @@ func (p *PointAffine) FromExtended(p1 *PointExtended) *PointAffine {
return p
}
-// ScalarMul scalar multiplication of a point
+// ScalarMultiplication scalar multiplication of a point
// p1 in affine coordinates with a scalar in big.Int
-func (p *PointAffine) ScalarMul(p1 *PointAffine, scalar *big.Int) *PointAffine {
+func (p *PointAffine) ScalarMultiplication(p1 *PointAffine, scalar *big.Int) *PointAffine {
var p1Extended, resExtended PointExtended
p1Extended.FromAffine(p1)
- resExtended.ScalarMul(&p1Extended, scalar)
+ resExtended.ScalarMultiplication(&p1Extended, scalar)
p.FromExtended(&resExtended)
return p
@@ -409,9 +409,9 @@ func (p *PointProj) Add(p1, p2 *PointProj) *PointProj {
return p
}
-// ScalarMul scalar multiplication of a point
+// ScalarMultiplication scalar multiplication of a point
// p1 in projective coordinates with a scalar in big.Int
-func (p *PointProj) ScalarMul(p1 *PointProj, scalar *big.Int) *PointProj {
+func (p *PointProj) ScalarMultiplication(p1 *PointProj, scalar *big.Int) *PointProj {
var _scalar big.Int
_scalar.Set(scalar)
p.Set(p1)
@@ -622,9 +622,9 @@ func (p *PointExtended) setInfinity() *PointExtended {
return p
}
-// ScalarMul scalar multiplication of a point
+// ScalarMultiplication scalar multiplication of a point
// p1 in extended coordinates with a scalar in big.Int
-func (p *PointExtended) ScalarMul(p1 *PointExtended, scalar *big.Int) *PointExtended {
+func (p *PointExtended) ScalarMultiplication(p1 *PointExtended, scalar *big.Int) *PointExtended {
var _scalar big.Int
_scalar.Set(scalar)
p.Set(p1)
diff --git a/ecc/bls24-317/twistededwards/point_test.go b/ecc/bls24-317/twistededwards/point_test.go
index 21a7f04d42..2da327d286 100644
--- a/ecc/bls24-317/twistededwards/point_test.go
+++ b/ecc/bls24-317/twistededwards/point_test.go
@@ -124,8 +124,8 @@ func TestReceiverIsOperand(t *testing.T) {
var s big.Int
s.SetUint64(10)
- p2.ScalarMul(&p1, &s)
- p1.ScalarMul(&p1, &s)
+ p2.ScalarMultiplication(&p1, &s)
+ p1.ScalarMultiplication(&p1, &s)
return p2.Equal(&p1)
},
@@ -336,7 +336,7 @@ func TestOps(t *testing.T) {
params := GetEdwardsCurve()
var p1, p2, zero PointAffine
- p1.ScalarMul(¶ms.Base, &s1)
+ p1.ScalarMultiplication(¶ms.Base, &s1)
zero.setInfinity()
p2.Add(&p1, &zero)
@@ -352,7 +352,7 @@ func TestOps(t *testing.T) {
params := GetEdwardsCurve()
var p1, p2 PointAffine
- p1.ScalarMul(¶ms.Base, &s1)
+ p1.ScalarMultiplication(¶ms.Base, &s1)
p2.Neg(&p1)
p1.Add(&p1, &p2)
@@ -371,8 +371,8 @@ func TestOps(t *testing.T) {
params := GetEdwardsCurve()
var p1, p2, inf PointAffine
- p1.ScalarMul(¶ms.Base, &s)
- p2.ScalarMul(¶ms.Base, &s)
+ p1.ScalarMultiplication(¶ms.Base, &s)
+ p2.ScalarMultiplication(¶ms.Base, &s)
p1.Add(&p1, &p2)
p2.Double(&p2)
@@ -390,14 +390,14 @@ func TestOps(t *testing.T) {
var p1, p2, p3, inf PointAffine
inf.X.SetZero()
inf.Y.SetZero()
- p1.ScalarMul(¶ms.Base, &s1)
- p2.ScalarMul(¶ms.Base, &s2)
+ p1.ScalarMultiplication(¶ms.Base, &s1)
+ p2.ScalarMultiplication(¶ms.Base, &s2)
p3.Set(¶ms.Base)
p2.Add(&p1, &p2)
s1.Add(&s1, &s2)
- p3.ScalarMul(¶ms.Base, &s1)
+ p3.ScalarMultiplication(¶ms.Base, &s1)
return p2.IsOnCurve() && p3.Equal(&p2) && !p3.Equal(&inf)
},
@@ -413,9 +413,9 @@ func TestOps(t *testing.T) {
var p1, p2, inf PointAffine
inf.X.SetZero()
inf.Y.SetOne()
- p1.ScalarMul(¶ms.Base, &s1)
+ p1.ScalarMultiplication(¶ms.Base, &s1)
s1.Neg(&s1)
- p2.ScalarMul(¶ms.Base, &s1)
+ p2.ScalarMultiplication(¶ms.Base, &s1)
p2.Add(&p1, &p2)
@@ -430,11 +430,11 @@ func TestOps(t *testing.T) {
params := GetEdwardsCurve()
var p1, p2 PointAffine
- p1.ScalarMul(¶ms.Base, &s1)
+ p1.ScalarMultiplication(¶ms.Base, &s1)
five := big.NewInt(5)
p2.Double(&p1).Double(&p2).Add(&p2, &p1)
- p1.ScalarMul(&p1, five)
+ p1.ScalarMultiplication(&p1, five)
return p2.IsOnCurve() && p2.Equal(&p1)
},
@@ -463,7 +463,7 @@ func TestOps(t *testing.T) {
var baseProj, p1, p2, zero PointProj
baseProj.FromAffine(¶ms.Base)
- p1.ScalarMul(&baseProj, &s1)
+ p1.ScalarMultiplication(&baseProj, &s1)
zero.setInfinity()
p2.Add(&p1, &zero)
@@ -480,7 +480,7 @@ func TestOps(t *testing.T) {
var baseProj, p1, p2, p PointProj
baseProj.FromAffine(¶ms.Base)
- p1.ScalarMul(&baseProj, &s1)
+ p1.ScalarMultiplication(&baseProj, &s1)
p2.Neg(&p1)
p.Add(&p1, &p2)
@@ -498,7 +498,7 @@ func TestOps(t *testing.T) {
var baseProj, p1, p2, p PointProj
baseProj.FromAffine(¶ms.Base)
- p.ScalarMul(&baseProj, &s)
+ p.ScalarMultiplication(&baseProj, &s)
p1.Add(&p, &p)
p2.Double(&p)
@@ -515,11 +515,11 @@ func TestOps(t *testing.T) {
var baseProj, p1, p2 PointProj
baseProj.FromAffine(¶ms.Base)
- p1.ScalarMul(&baseProj, &s1)
+ p1.ScalarMultiplication(&baseProj, &s1)
five := big.NewInt(5)
p2.Double(&p1).Double(&p2).Add(&p2, &p1)
- p1.ScalarMul(&p1, five)
+ p1.ScalarMultiplication(&p1, five)
return p2.Equal(&p1)
},
@@ -547,7 +547,7 @@ func TestOps(t *testing.T) {
var baseExtended, p1, p2, zero PointExtended
baseExtended.FromAffine(¶ms.Base)
- p1.ScalarMul(&baseExtended, &s1)
+ p1.ScalarMultiplication(&baseExtended, &s1)
zero.setInfinity()
p2.Add(&p1, &zero)
@@ -564,7 +564,7 @@ func TestOps(t *testing.T) {
var baseExtended, p1, p2, p PointExtended
baseExtended.FromAffine(¶ms.Base)
- p1.ScalarMul(&baseExtended, &s1)
+ p1.ScalarMultiplication(&baseExtended, &s1)
p2.Neg(&p1)
p.Add(&p1, &p2)
@@ -582,7 +582,7 @@ func TestOps(t *testing.T) {
var baseExtended, p1, p2, p PointExtended
baseExtended.FromAffine(¶ms.Base)
- p.ScalarMul(&baseExtended, &s)
+ p.ScalarMultiplication(&baseExtended, &s)
p1.Add(&p, &p)
p2.Double(&p)
@@ -599,11 +599,11 @@ func TestOps(t *testing.T) {
var baseExtended, p1, p2 PointExtended
baseExtended.FromAffine(¶ms.Base)
- p1.ScalarMul(&baseExtended, &s1)
+ p1.ScalarMultiplication(&baseExtended, &s1)
five := big.NewInt(5)
p2.Double(&p1).Double(&p2).Add(&p2, &p1)
- p1.ScalarMul(&p1, five)
+ p1.ScalarMultiplication(&p1, five)
return p2.Equal(&p1)
},
@@ -619,8 +619,8 @@ func TestOps(t *testing.T) {
var baseExtended, pExtended, p PointExtended
var pAffine PointAffine
baseExtended.FromAffine(¶ms.Base)
- pExtended.ScalarMul(&baseExtended, &s)
- pAffine.ScalarMul(¶ms.Base, &s)
+ pExtended.ScalarMultiplication(&baseExtended, &s)
+ pAffine.ScalarMultiplication(¶ms.Base, &s)
pAffine.Neg(&pAffine)
p.MixedAdd(&pExtended, &pAffine)
@@ -638,8 +638,8 @@ func TestOps(t *testing.T) {
var baseExtended, pExtended, p, p2 PointExtended
var pAffine PointAffine
baseExtended.FromAffine(¶ms.Base)
- pExtended.ScalarMul(&baseExtended, &s)
- pAffine.ScalarMul(¶ms.Base, &s)
+ pExtended.ScalarMultiplication(&baseExtended, &s)
+ pAffine.ScalarMultiplication(¶ms.Base, &s)
p.MixedAdd(&pExtended, &pAffine)
p2.MixedDouble(&pExtended)
@@ -658,8 +658,8 @@ func TestOps(t *testing.T) {
var baseProj, pProj, p PointProj
var pAffine PointAffine
baseProj.FromAffine(¶ms.Base)
- pProj.ScalarMul(&baseProj, &s)
- pAffine.ScalarMul(¶ms.Base, &s)
+ pProj.ScalarMultiplication(&baseProj, &s)
+ pAffine.ScalarMultiplication(¶ms.Base, &s)
pAffine.Neg(&pAffine)
p.MixedAdd(&pProj, &pAffine)
@@ -677,8 +677,8 @@ func TestOps(t *testing.T) {
var baseProj, pProj, p, p2 PointProj
var pAffine PointAffine
baseProj.FromAffine(¶ms.Base)
- pProj.ScalarMul(&baseProj, &s)
- pAffine.ScalarMul(¶ms.Base, &s)
+ pProj.ScalarMultiplication(&baseProj, &s)
+ pAffine.ScalarMultiplication(¶ms.Base, &s)
p.MixedAdd(&pProj, &pAffine)
p2.Double(&pProj)
@@ -697,9 +697,9 @@ func TestOps(t *testing.T) {
var baseExt PointExtended
var p1, p2 PointAffine
baseProj.FromAffine(¶ms.Base)
- baseProj.ScalarMul(&baseProj, &s)
+ baseProj.ScalarMultiplication(&baseProj, &s)
baseExt.FromAffine(¶ms.Base)
- baseExt.ScalarMul(&baseExt, &s)
+ baseExt.ScalarMultiplication(&baseExt, &s)
p1.FromProj(&baseProj)
p2.FromExtended(&baseExt)
@@ -760,7 +760,7 @@ func BenchmarkScalarMulExtended(b *testing.B) {
b.ResetTimer()
for j := 0; j < b.N; j++ {
- doubleAndAdd.ScalarMul(&a, &s)
+ doubleAndAdd.ScalarMultiplication(&a, &s)
}
}
@@ -776,6 +776,6 @@ func BenchmarkScalarMulProjective(b *testing.B) {
b.ResetTimer()
for j := 0; j < b.N; j++ {
- doubleAndAdd.ScalarMul(&a, &s)
+ doubleAndAdd.ScalarMultiplication(&a, &s)
}
}
diff --git a/ecc/bn254/bn254.go b/ecc/bn254/bn254.go
index 336bfb8c4d..e52e97be45 100644
--- a/ecc/bn254/bn254.go
+++ b/ecc/bn254/bn254.go
@@ -1,3 +1,40 @@
+// Copyright 2020 ConsenSys Software Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package bn254 efficient elliptic curve, pairing and hash to curve implementation for bn254. This curve appears in
+// Ethereum pre-compiles as altbn128.
+//
+// bn254: A Barreto--Naerig curve with
+// seed x₀=4965661367192848881
+// 𝔽r: r=21888242871839275222246405745257275088548364400416034343698204186575808495617 (36x₀⁴+36x₀³+18x₀²+6x₀+1)
+// 𝔽p: p=21888242871839275222246405745257275088696311157297823662689037894645226208583 (36x₀⁴+36x₀³+24x₀²+6x₀+1)
+// (E/𝔽p): Y²=X³+3
+// (Eₜ/𝔽p²): Y² = X³+3/(u+9) (D-type twist)
+// r ∣ #E(Fp) and r ∣ #Eₜ(𝔽p²)
+// Extension fields tower:
+// 𝔽p²[u] = 𝔽p/u²+1
+// 𝔽p⁶[v] = 𝔽p²/v³-9-u
+// 𝔽p¹²[w] = 𝔽p⁶/w²-v
+// optimal Ate loop size:
+// 6x₀+2
+//
+// Security: estimated 103-bit level following [https://eprint.iacr.org/2019/885.pdf]
+// (r is 254 bits and p¹² is 3044 bits)
+//
+// Warning
+//
+// This code has been partially audited and is provided as-is. In particular, there is no security guarantees such as constant time implementation or side-channel attack resistance.
package bn254
import (
@@ -9,18 +46,6 @@ import (
"github.com/consensys/gnark-crypto/ecc/bn254/internal/fptower"
)
-// BN254: A Barreto--Naerig curve with seed x₀=4965661367192848881
-// 𝔽r: r=21888242871839275222246405745257275088548364400416034343698204186575808495617 (36x₀⁴+36x₀³+18x₀²+6x₀+1)
-// 𝔽p: p=21888242871839275222246405745257275088696311157297823662689037894645226208583 (36x₀⁴+36x₀³+24x₀²+6x₀+1)
-// (E/𝔽p): Y²=X³+3
-// (Eₜ/𝔽p²): Y² = X³+3/(u+9) (D-type twist)
-// r ∣ #E(Fp) and r ∣ #Eₜ(𝔽p²)
-// Extension fields tower:
-// 𝔽p²[u] = 𝔽p/u²+1
-// 𝔽p⁶[v] = 𝔽p²/v³-9-u
-// 𝔽p¹²[w] = 𝔽p⁶/w²-v
-// optimal Ate loop size: 6x₀+2
-
// ID bn254 ID
const ID = ecc.BN254
@@ -80,9 +105,9 @@ func init() {
twist.A1.SetUint64(1)
bTwistCurveCoeff.Inverse(&twist).MulByElement(&bTwistCurveCoeff, &bCurveCoeff)
- g1Gen.X.SetString("1")
- g1Gen.Y.SetString("2")
- g1Gen.Z.SetString("1")
+ g1Gen.X.SetOne()
+ g1Gen.Y.SetUint64(2)
+ g1Gen.Z.SetOne()
g2Gen.X.SetString("10857046999023057135944570762232829481370756359578518086990519993285655852781",
"11559732032986387107991004021392285783925812861821192530917403151452391805634")
diff --git a/ecc/bn254/doc.go b/ecc/bn254/doc.go
deleted file mode 100644
index e7e47ad976..0000000000
--- a/ecc/bn254/doc.go
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright 2020 ConsenSys Software Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Code generated by consensys/gnark-crypto DO NOT EDIT
-
-// Package bn254 efficient elliptic curve and pairing implementation for bn254.
-//
-// Warning
-//
-// This code has not been audited and is provided as-is. In particular, there is no security guarantees such as constant time implementation or side-channel attack resistance.
-package bn254
diff --git a/ecc/bn254/fp/element.go b/ecc/bn254/fp/element.go
index 0d258e8984..c60263d4a3 100644
--- a/ecc/bn254/fp/element.go
+++ b/ecc/bn254/fp/element.go
@@ -175,7 +175,7 @@ func (z *Element) SetInterface(i1 interface{}) (*Element, error) {
case int:
return z.SetInt64(int64(c1)), nil
case string:
- return z.SetString(c1), nil
+ return z.SetString(c1)
case *big.Int:
if c1 == nil {
return nil, errors.New("can't set fp.Element with ")
@@ -944,12 +944,13 @@ func (z *Element) setBigInt(v *big.Int) *Element {
// Incorrect placement of underscores is reported as a panic if there
// are no other errors.
//
-func (z *Element) SetString(number string) *Element {
+// If the number is invalid this method leaves z unchanged and returns nil, error.
+func (z *Element) SetString(number string) (*Element, error) {
// get temporary big int from the pool
vv := bigIntPool.Get().(*big.Int)
if _, ok := vv.SetString(number, 0); !ok {
- panic("Element.SetString failed -> can't parse number into a big.Int " + number)
+ return nil, errors.New("Element.SetString failed -> can't parse number into a big.Int " + number)
}
z.SetBigInt(vv)
@@ -957,7 +958,7 @@ func (z *Element) SetString(number string) *Element {
// release object into pool
bigIntPool.Put(vv)
- return z
+ return z, nil
}
// MarshalJSON returns json encoding of z (z.Text(10))
diff --git a/ecc/bn254/fr/element.go b/ecc/bn254/fr/element.go
index 112b64ea4d..21cd363523 100644
--- a/ecc/bn254/fr/element.go
+++ b/ecc/bn254/fr/element.go
@@ -175,7 +175,7 @@ func (z *Element) SetInterface(i1 interface{}) (*Element, error) {
case int:
return z.SetInt64(int64(c1)), nil
case string:
- return z.SetString(c1), nil
+ return z.SetString(c1)
case *big.Int:
if c1 == nil {
return nil, errors.New("can't set fr.Element with ")
@@ -944,12 +944,13 @@ func (z *Element) setBigInt(v *big.Int) *Element {
// Incorrect placement of underscores is reported as a panic if there
// are no other errors.
//
-func (z *Element) SetString(number string) *Element {
+// If the number is invalid this method leaves z unchanged and returns nil, error.
+func (z *Element) SetString(number string) (*Element, error) {
// get temporary big int from the pool
vv := bigIntPool.Get().(*big.Int)
if _, ok := vv.SetString(number, 0); !ok {
- panic("Element.SetString failed -> can't parse number into a big.Int " + number)
+ return nil, errors.New("Element.SetString failed -> can't parse number into a big.Int " + number)
}
z.SetBigInt(vv)
@@ -957,7 +958,7 @@ func (z *Element) SetString(number string) *Element {
// release object into pool
bigIntPool.Put(vv)
- return z
+ return z, nil
}
// MarshalJSON returns json encoding of z (z.Text(10))
diff --git a/ecc/bn254/fr/kzg/kzg.go b/ecc/bn254/fr/kzg/kzg.go
index a0ba3aed57..0fd45d0a35 100644
--- a/ecc/bn254/fr/kzg/kzg.go
+++ b/ecc/bn254/fr/kzg/kzg.go
@@ -169,16 +169,15 @@ func Open(p []fr.Element, point fr.Element, srs *SRS) (OpeningProof, error) {
func Verify(commitment *Digest, proof *OpeningProof, point fr.Element, srs *SRS) error {
// [f(a)]G₁
- var claimedValueG1Aff bn254.G1Affine
+ var claimedValueG1Aff bn254.G1Jac
var claimedValueBigInt big.Int
proof.ClaimedValue.ToBigIntRegular(&claimedValueBigInt)
- claimedValueG1Aff.ScalarMultiplication(&srs.G1[0], &claimedValueBigInt)
+ claimedValueG1Aff.ScalarMultiplicationAffine(&srs.G1[0], &claimedValueBigInt)
// [f(α) - f(a)]G₁
- var fminusfaG1Jac, tmpG1Jac bn254.G1Jac
+ var fminusfaG1Jac bn254.G1Jac
fminusfaG1Jac.FromAffine(commitment)
- tmpG1Jac.FromAffine(&claimedValueG1Aff)
- fminusfaG1Jac.SubAssign(&tmpG1Jac)
+ fminusfaG1Jac.SubAssign(&claimedValueG1Aff)
// [-H(α)]G₁
var negH bn254.G1Affine
diff --git a/ecc/bn254/fr/polynomial/multilin.go b/ecc/bn254/fr/polynomial/multilin.go
new file mode 100644
index 0000000000..2b2fac3a13
--- /dev/null
+++ b/ecc/bn254/fr/polynomial/multilin.go
@@ -0,0 +1,250 @@
+// Copyright 2020 ConsenSys Software Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by consensys/gnark-crypto DO NOT EDIT
+
+package polynomial
+
+import (
+ "github.com/consensys/gnark-crypto/ecc/bn254/fr"
+)
+
+// MultiLin tracks the values of a (dense i.e. not sparse) multilinear polynomial
+// The variables are X₁ through Xₙ where n = log(len(.))
+// .[∑ᵢ 2ⁱ⁻¹ bₙ₋ᵢ] = the polynomial evaluated at (b₁, b₂, ..., bₙ)
+// It is understood that any hypercube evaluation can be extrapolated to a multilinear polynomial
+type MultiLin []fr.Element
+
+// Fold is partial evaluation function k[X₁, X₂, ..., Xₙ] → k[X₂, ..., Xₙ] by setting X₁=r
+func (m *MultiLin) Fold(r fr.Element) {
+ mid := len(*m) / 2
+
+ bottom, top := (*m)[:mid], (*m)[mid:]
+
+ // updating bookkeeping table
+ // knowing that the polynomial f ∈ (k[X₂, ..., Xₙ])[X₁] is linear, we would get f(r) = f(0) + r(f(1) - f(0))
+ // the following loop computes the evaluations of f(r) accordingly:
+ // f(r, b₂, ..., bₙ) = f(0, b₂, ..., bₙ) + r(f(1, b₂, ..., bₙ) - f(0, b₂, ..., bₙ))
+ for i := 0; i < mid; i++ {
+ // table[i] ← table[i] + r (table[i + mid] - table[i])
+ top[i].Sub(&top[i], &bottom[i])
+ top[i].Mul(&top[i], &r)
+ bottom[i].Add(&bottom[i], &top[i])
+ }
+
+ *m = (*m)[:mid]
+}
+
+// Evaluate extrapolate the value of the multilinear polynomial corresponding to m
+// on the given coordinates
+func (m MultiLin) Evaluate(coordinates []fr.Element) fr.Element {
+ // Folding is a mutating operation
+ bkCopy := m.Clone()
+
+ // Evaluate step by step through repeated folding (i.e. evaluation at the first remaining variable)
+ for _, r := range coordinates {
+ bkCopy.Fold(r)
+ }
+
+ return bkCopy[0]
+}
+
+// Clone creates a deep copy of a book-keeping table.
+// Both multilinear interpolation and sumcheck require folding an underlying
+// array, but folding changes the array. To do both one requires a deep copy
+// of the book-keeping table.
+func (m MultiLin) Clone() MultiLin {
+ tableDeepCopy := Make(len(m))
+ copy(tableDeepCopy, m)
+ return tableDeepCopy
+}
+
+// Add two bookKeepingTables
+func (m *MultiLin) Add(left, right MultiLin) {
+ size := len(left)
+ // Check that left and right have the same size
+ if len(right) != size {
+ panic("Left and right do not have the right size")
+ }
+ // Reallocate the table if necessary
+ if cap(*m) < size {
+ *m = make([]fr.Element, size)
+ }
+
+ // Resize the destination table
+ *m = (*m)[:size]
+
+ // Add elementwise
+ for i := 0; i < size; i++ {
+ (*m)[i].Add(&left[i], &right[i])
+ }
+}
+
+// EvalEq computes Eq(q₁, ... , qₙ, h₁, ... , hₙ) = Π₁ⁿ Eq(qᵢ, hᵢ)
+// where Eq(x,y) = xy + (1-x)(1-y) = 1 - x - y + xy + xy interpolates
+// _________________
+// | | |
+// | 0 | 1 |
+// |_______|_______|
+// y | | |
+// | 1 | 0 |
+// |_______|_______|
+//
+// x
+// In other words the polynomial evaluated here is the multilinear extrapolation of
+// one that evaluates to q' == h' for vectors q', h' of binary values
+func EvalEq(q, h []fr.Element) fr.Element {
+ var res, nxt, one, sum fr.Element
+ one.SetOne()
+ for i := 0; i < len(q); i++ {
+ nxt.Mul(&q[i], &h[i]) // nxt <- qᵢ * hᵢ
+ nxt.Double(&nxt) // nxt <- 2 * qᵢ * hᵢ
+ nxt.Add(&nxt, &one) // nxt <- 1 + 2 * qᵢ * hᵢ
+ sum.Add(&q[i], &h[i]) // sum <- qᵢ + hᵢ TODO: Why not subtract one by one from nxt? More parallel?
+
+ if i == 0 {
+ res.Sub(&nxt, &sum) // nxt <- 1 + 2 * qᵢ * hᵢ - qᵢ - hᵢ
+ } else {
+ nxt.Sub(&nxt, &sum) // nxt <- 1 + 2 * qᵢ * hᵢ - qᵢ - hᵢ
+ res.Mul(&res, &nxt) // res <- res * nxt
+ }
+ }
+ return res
+}
+
+// Eq sets m to the representation of the polynomial Eq(q₁, ..., qₙ, *, ..., *) × m[0]
+func (m *MultiLin) Eq(q []fr.Element) {
+ n := len(q)
+
+ if len(*m) != 1< 0 {
+ i.Sub(fr.Modulus(), &i)
+ i.Neg(&i)
+ }
+ return i
+}
+
+func (p Polynomial) Text(base int) string {
+
+ var builder strings.Builder
+
+ first := true
+ for d := len(p) - 1; d >= 0; d-- {
+ if p[d].IsZero() {
+ continue
+ }
+
+ i := signedBigInt(&p[d])
+
+ initialLen := builder.Len()
+
+ if i.Sign() < 1 {
+ i.Neg(&i)
+ if first {
+ builder.WriteString("-")
+ } else {
+ builder.WriteString(" - ")
+ }
+ } else if !first {
+ builder.WriteString(" + ")
+ }
+
+ first = false
+
+ asInt64 := int64(0)
+ if i.IsInt64() {
+ asInt64 = i.Int64()
+ }
+
+ if asInt64 != 1 || d == 0 {
+ builder.WriteString(i.Text(base))
+ }
+
+ if builder.Len()-initialLen > 10 {
+ builder.WriteString("×")
+ }
+
+ if d != 0 {
+ builder.WriteString("X")
+ }
+ if d > 1 {
+ builder.WriteString(
+ utils.ToSuperscript(strconv.Itoa(d)),
+ )
+ }
+
+ }
+
+ if first {
+ return "0"
+ }
+
+ return builder.String()
+}
diff --git a/ecc/bn254/fr/polynomial/pool.go b/ecc/bn254/fr/polynomial/pool.go
new file mode 100644
index 0000000000..21f2e5a87d
--- /dev/null
+++ b/ecc/bn254/fr/polynomial/pool.go
@@ -0,0 +1,130 @@
+// Copyright 2020 ConsenSys Software Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by consensys/gnark-crypto DO NOT EDIT
+
+package polynomial
+
+import (
+ "fmt"
+ "github.com/consensys/gnark-crypto/ecc/bn254/fr"
+ "reflect"
+ "sync"
+ "unsafe"
+)
+
+// Memory management for polynomials
+// Copied verbatim from gkr repo
+
+// Sets a maximum for the array size we keep in pool
+const maxNForLargePool int = 1 << 24
+const maxNForSmallPool int = 256
+
+// Aliases because it is annoying to use arrays in all the places
+type largeArr = [maxNForLargePool]fr.Element
+type smallArr = [maxNForSmallPool]fr.Element
+
+var rC = sync.Map{}
+
+var (
+ largePool = sync.Pool{
+ New: func() interface{} {
+ var res largeArr
+ return &res
+ },
+ }
+ smallPool = sync.Pool{
+ New: func() interface{} {
+ var res smallArr
+ return &res
+ },
+ }
+)
+
+// ClearPool Clears the pool completely, shields against memory leaks
+// Eg: if we forgot to dump a polynomial at some point, this will ensure the value get dumped eventually
+// Returns how many polynomials were cleared that way
+func ClearPool() int {
+ res := 0
+ rC.Range(func(k, _ interface{}) bool {
+ switch ptr := k.(type) {
+ case *largeArr:
+ largePool.Put(ptr)
+ case *smallArr:
+ smallPool.Put(ptr)
+ default:
+ panic(fmt.Sprintf("tried to clear %v", reflect.TypeOf(ptr)))
+ }
+ res++
+ return true
+ })
+ return res
+}
+
+// CountPool Returns the number of elements in the pool without mutating it
+func CountPool() int {
+ res := 0
+ rC.Range(func(_, _ interface{}) bool {
+ res++
+ return true
+ })
+ return res
+}
+
+// Make tries to find a reusable polynomial or allocates a new one
+func Make(n int) []fr.Element {
+ if n > maxNForLargePool {
+ panic(fmt.Sprintf("been provided with size of %v but the maximum is %v", n, maxNForLargePool))
+ }
+
+ if n <= maxNForSmallPool {
+ ptr := smallPool.Get().(*smallArr)
+ rC.Store(ptr, struct{}{}) // registers the pointer being used
+ return (*ptr)[:n]
+ }
+
+ ptr := largePool.Get().(*largeArr)
+ rC.Store(ptr, struct{}{}) // remember we allocated the pointer is being used
+ return (*ptr)[:n]
+}
+
+// Dump dumps a set of polynomials into the pool
+// Returns the number of deallocated polys
+func Dump(arrs ...[]fr.Element) int {
+ cnt := 0
+ for _, arr := range arrs {
+ ptr := ptr(arr)
+ pool := &smallPool
+ if len(arr) > maxNForSmallPool {
+ pool = &largePool
+ }
+ // If the rC did not register, then
+ // either the array was allocated somewhere else which can be ignored
+ // otherwise a double put which MUST be ignored
+ if _, ok := rC.Load(ptr); ok {
+ pool.Put(ptr)
+ // And deregisters the ptr
+ rC.Delete(ptr)
+ cnt++
+ }
+ }
+ return cnt
+}
+
+func ptr(m []fr.Element) unsafe.Pointer {
+ if cap(m) != maxNForSmallPool && cap(m) != maxNForLargePool {
+ panic(fmt.Sprintf("can't cast to large or small array, the put array's is %v it should have capacity %v or %v", cap(m), maxNForLargePool, maxNForSmallPool))
+ }
+ return unsafe.Pointer(&m[0])
+}
diff --git a/ecc/bn254/g1.go b/ecc/bn254/g1.go
index ee39596f14..872d8218c9 100644
--- a/ecc/bn254/g1.go
+++ b/ecc/bn254/g1.go
@@ -59,6 +59,14 @@ func (p *G1Affine) ScalarMultiplication(a *G1Affine, s *big.Int) *G1Affine {
return p
}
+// ScalarMultiplicationAffine computes and returns p = a ⋅ s
+// Takes an affine point and returns a Jacobian point (useful for KZG)
+func (p *G1Jac) ScalarMultiplicationAffine(a *G1Affine, s *big.Int) *G1Jac {
+ p.FromAffine(a)
+ p.mulGLV(p, s)
+ return p
+}
+
// Add adds two point in affine coordinates.
// This should rarely be used as it is very inefficient compared to Jacobian
func (p *G1Affine) Add(a, b *G1Affine) *G1Affine {
@@ -336,7 +344,7 @@ func (p *G1Jac) String() string {
return _p.String()
}
-// FromAffine sets p = Q, p in Jacboian, Q in affine
+// FromAffine sets p = Q, p in Jacobian, Q in affine
func (p *G1Jac) FromAffine(Q *G1Affine) *G1Jac {
if Q.IsInfinity() {
p.Z.SetZero()
@@ -786,9 +794,9 @@ func (p *g1JacExtended) doubleMixed(q *G1Affine) *g1JacExtended {
}
// BatchJacobianToAffineG1 converts points in Jacobian coordinates to Affine coordinates
-// performing a single field inversion (Montgomery batch inversion trick)
-// result must be allocated with len(result) == len(points)
-func BatchJacobianToAffineG1(points []G1Jac, result []G1Affine) {
+// performing a single field inversion (Montgomery batch inversion trick).
+func BatchJacobianToAffineG1(points []G1Jac) []G1Affine {
+ result := make([]G1Affine, len(points))
zeroes := make([]bool, len(points))
accumulator := fp.One()
@@ -808,7 +816,7 @@ func BatchJacobianToAffineG1(points []G1Jac, result []G1Affine) {
for i := len(points) - 1; i >= 0; i-- {
if zeroes[i] {
- // do nothing, X and Y are zeroes in affine.
+ // do nothing, (X=0, Y=0) is infinity point in affine
continue
}
result[i].X.Mul(&result[i].X, &accInverse)
@@ -819,7 +827,7 @@ func BatchJacobianToAffineG1(points []G1Jac, result []G1Affine) {
parallel.Execute(len(points), func(start, end int) {
for i := start; i < end; i++ {
if zeroes[i] {
- // do nothing, X and Y are zeroes in affine.
+ // do nothing, (X=0, Y=0) is infinity point in affine
continue
}
var a, b fp.Element
@@ -831,6 +839,7 @@ func BatchJacobianToAffineG1(points []G1Jac, result []G1Affine) {
}
})
+ return result
}
// BatchScalarMultiplicationG1 multiplies the same base by all scalars
@@ -894,8 +903,7 @@ func BatchScalarMultiplicationG1(base *G1Affine, scalars []fr.Element) []G1Affin
selectors[chunk] = d
}
// convert our base exp table into affine to use AddMixed
- baseTableAff := make([]G1Affine, (1 << (c - 1)))
- BatchJacobianToAffineG1(baseTable, baseTableAff)
+ baseTableAff := BatchJacobianToAffineG1(baseTable)
toReturn := make([]G1Jac, len(scalars))
// for each digit, take value in the base table, double it c time, voilà.
@@ -937,7 +945,6 @@ func BatchScalarMultiplicationG1(base *G1Affine, scalars []fr.Element) []G1Affin
}
})
- toReturnAff := make([]G1Affine, len(scalars))
- BatchJacobianToAffineG1(toReturn, toReturnAff)
+ toReturnAff := BatchJacobianToAffineG1(toReturn)
return toReturnAff
}
diff --git a/ecc/bn254/g1_test.go b/ecc/bn254/g1_test.go
index 4ab7148379..8ee025d787 100644
--- a/ecc/bn254/g1_test.go
+++ b/ecc/bn254/g1_test.go
@@ -85,7 +85,7 @@ func TestG1AffineIsOnCurve(t *testing.T) {
func(a fp.Element) bool {
var op1, op2 G1Affine
op1.FromJacobian(&g1Gen)
- op2.FromJacobian(&g1Gen)
+ op2.Set(&op1)
op2.Y.Mul(&op2.Y, &a)
return op1.IsOnCurve() && !op2.IsOnCurve()
},
@@ -220,6 +220,19 @@ func TestG1AffineConversions(t *testing.T) {
GenFp(),
GenFp(),
))
+ properties.Property("[BN254] BatchJacobianToAffineG1 and FromJacobian should output the same result", prop.ForAll(
+ func(a, b fp.Element) bool {
+ g1 := fuzzG1Jac(&g1Gen, a)
+ g2 := fuzzG1Jac(&g1Gen, b)
+ var op1, op2 G1Affine
+ op1.FromJacobian(&g1)
+ op2.FromJacobian(&g2)
+ baseTableAff := BatchJacobianToAffineG1([]G1Jac{g1, g2})
+ return op1.Equal(&baseTableAff[0]) && op2.Equal(&baseTableAff[1])
+ },
+ GenFp(),
+ GenFp(),
+ ))
properties.TestingRun(t, gopter.ConsoleReporter(false))
}
@@ -447,7 +460,7 @@ func BenchmarkG1JacIsInSubGroup(b *testing.B) {
}
-func BenchmarkG1AffineBatchScalarMul(b *testing.B) {
+func BenchmarkG1AffineBatchScalarMultiplication(b *testing.B) {
// ensure every words of the scalars are filled
var mixer fr.Element
mixer.SetString("7716837800905789770901243404444209691916730933998574719964609384059111546487")
@@ -475,7 +488,7 @@ func BenchmarkG1AffineBatchScalarMul(b *testing.B) {
}
}
-func BenchmarkG1JacScalarMul(b *testing.B) {
+func BenchmarkG1JacScalarMultiplication(b *testing.B) {
var scalar big.Int
r := fr.Modulus()
diff --git a/ecc/bn254/g2.go b/ecc/bn254/g2.go
index 1939b3f32e..0a8625caa2 100644
--- a/ecc/bn254/g2.go
+++ b/ecc/bn254/g2.go
@@ -341,7 +341,7 @@ func (p *G2Jac) String() string {
return _p.String()
}
-// FromAffine sets p = Q, p in Jacboian, Q in affine
+// FromAffine sets p = Q, p in Jacobian, Q in affine
func (p *G2Jac) FromAffine(Q *G2Affine) *G2Jac {
if Q.IsInfinity() {
p.Z.SetZero()
diff --git a/ecc/bn254/g2_test.go b/ecc/bn254/g2_test.go
index 4f0c11d14f..17c09d95ba 100644
--- a/ecc/bn254/g2_test.go
+++ b/ecc/bn254/g2_test.go
@@ -98,7 +98,7 @@ func TestG2AffineIsOnCurve(t *testing.T) {
func(a fptower.E2) bool {
var op1, op2 G2Affine
op1.FromJacobian(&g2Gen)
- op2.FromJacobian(&g2Gen)
+ op2.Set(&op1)
op2.Y.Mul(&op2.Y, &a)
return op1.IsOnCurve() && !op2.IsOnCurve()
},
@@ -504,7 +504,7 @@ func BenchmarkG2JacIsInSubGroup(b *testing.B) {
}
-func BenchmarkG2AffineBatchScalarMul(b *testing.B) {
+func BenchmarkG2AffineBatchScalarMultiplication(b *testing.B) {
// ensure every words of the scalars are filled
var mixer fr.Element
mixer.SetString("7716837800905789770901243404444209691916730933998574719964609384059111546487")
@@ -532,7 +532,7 @@ func BenchmarkG2AffineBatchScalarMul(b *testing.B) {
}
}
-func BenchmarkG2JacScalarMul(b *testing.B) {
+func BenchmarkG2JacScalarMultiplication(b *testing.B) {
var scalar big.Int
r := fr.Modulus()
diff --git a/ecc/bn254/hash_to_g1.go b/ecc/bn254/hash_to_g1.go
index 47e35a7d48..9df840a530 100644
--- a/ecc/bn254/hash_to_g1.go
+++ b/ecc/bn254/hash_to_g1.go
@@ -23,7 +23,7 @@ import (
// mapToCurve1 implements the Shallue and van de Woestijne method, applicable to any elliptic curve in Weierstrass form
// No cofactor clearing or isogeny
-// https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-hash-to-curve-14#appendix-F.1
+// https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-16.html#straightline-svdw
func mapToCurve1(u *fp.Element) G1Affine {
var tv1, tv2, tv3, tv4 fp.Element
var x1, x2, x3, gx1, gx2, gx, x, y fp.Element
@@ -36,7 +36,6 @@ func mapToCurve1(u *fp.Element) G1Affine {
//c3 = sqrt(-g(Z) * (3 * Z² + 4 * A)) # sgn0(c3) MUST equal 0
//c4 = -4 * g(Z) / (3 * Z² + 4 * A)
- //TODO: Move outside function?
Z := fp.Element{15230403791020821917, 754611498739239741, 7381016538464732716, 1011752739694698287}
c1 := fp.Element{1248766071674976557, 10548065924188627562, 16242874202584236114, 560012691975822483}
c2 := fp.Element{12997850613838968789, 14304628359724097447, 2950087706404981016, 1237622763554136189}
@@ -58,8 +57,7 @@ func mapToCurve1(u *fp.Element) G1Affine {
x1.Sub(&c2, &tv4) // 10. x1 = c2 - tv4
gx1.Square(&x1) // 11. gx1 = x1²
- //TODO: Beware A ≠ 0
- //12. gx1 = gx1 + A
+ //12. gx1 = gx1 + A All curves in gnark-crypto have A=0 (j-invariant=0). It is crucial to include this step if the curve has nonzero A coefficient.
gx1.Mul(&gx1, &x1) // 13. gx1 = gx1 * x1
gx1.Add(&gx1, &bCurveCoeff) // 14. gx1 = gx1 + B
gx1NotSquare = gx1.Legendre() >> 1 // 15. e1 = is_square(gx1)
@@ -67,7 +65,7 @@ func mapToCurve1(u *fp.Element) G1Affine {
x2.Add(&c2, &tv4) // 16. x2 = c2 + tv4
gx2.Square(&x2) // 17. gx2 = x2²
- // 18. gx2 = gx2 + A
+ // 18. gx2 = gx2 + A See line 12
gx2.Mul(&gx2, &x2) // 19. gx2 = gx2 * x2
gx2.Add(&gx2, &bCurveCoeff) // 20. gx2 = gx2 + B
@@ -123,13 +121,13 @@ func hashToFp(msg, dst []byte, count int) ([]fp.Element, error) {
// g1Sgn0 is an algebraic substitute for the notion of sign in ordered fields
// Namely, every non-zero quadratic residue in a finite field of characteristic =/= 2 has exactly two square roots, one of each sign
-// Taken from https://datatracker.ietf.org/doc/draft-irtf-cfrg-hash-to-curve/ section 4.1
+// https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-16.html#name-the-sgn0-function
// The sign of an element is not obviously related to that of its Montgomery form
func g1Sgn0(z *fp.Element) uint64 {
nonMont := *z
nonMont.FromMont()
-
+ // m == 1
return nonMont[0] % 2
}
@@ -143,7 +141,7 @@ func MapToG1(u fp.Element) G1Affine {
// EncodeToG1 hashes a message to a point on the G1 curve using the SVDW map.
// It is faster than HashToG1, but the result is not uniformly distributed. Unsuitable as a random oracle.
// dst stands for "domain separation tag", a string unique to the construction using the hash function
-//https://datatracker.ietf.org/doc/draft-irtf-cfrg-hash-to-curve/13/#section-6.6.3
+//https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-16.html#roadmap
func EncodeToG1(msg, dst []byte) (G1Affine, error) {
var res G1Affine
@@ -160,7 +158,7 @@ func EncodeToG1(msg, dst []byte) (G1Affine, error) {
// HashToG1 hashes a message to a point on the G1 curve using the SVDW map.
// Slower than EncodeToG1, but usable as a random oracle.
// dst stands for "domain separation tag", a string unique to the construction using the hash function
-// https://tools.ietf.org/html/draft-irtf-cfrg-hash-to-curve-06#section-3
+//https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-16.html#roadmap
func HashToG1(msg, dst []byte) (G1Affine, error) {
u, err := hashToFp(msg, dst, 2*1)
if err != nil {
diff --git a/ecc/bn254/hash_to_g2.go b/ecc/bn254/hash_to_g2.go
index 4f19f3db23..4395405049 100644
--- a/ecc/bn254/hash_to_g2.go
+++ b/ecc/bn254/hash_to_g2.go
@@ -23,7 +23,7 @@ import (
// mapToCurve2 implements the Shallue and van de Woestijne method, applicable to any elliptic curve in Weierstrass form
// No cofactor clearing or isogeny
-// https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-hash-to-curve-14#appendix-F.1
+// https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-16.html#straightline-svdw
func mapToCurve2(u *fptower.E2) G2Affine {
var tv1, tv2, tv3, tv4 fptower.E2
var x1, x2, x3, gx1, gx2, gx, x, y fptower.E2
@@ -36,7 +36,6 @@ func mapToCurve2(u *fptower.E2) G2Affine {
//c3 = sqrt(-g(Z) * (3 * Z² + 4 * A)) # sgn0(c3) MUST equal 0
//c4 = -4 * g(Z) / (3 * Z² + 4 * A)
- //TODO: Move outside function?
Z := fptower.E2{
A0: fp.Element{15230403791020821917, 754611498739239741, 7381016538464732716, 1011752739694698287},
A1: fp.Element{0},
@@ -73,8 +72,7 @@ func mapToCurve2(u *fptower.E2) G2Affine {
x1.Sub(&c2, &tv4) // 10. x1 = c2 - tv4
gx1.Square(&x1) // 11. gx1 = x1²
- //TODO: Beware A ≠ 0
- //12. gx1 = gx1 + A
+ //12. gx1 = gx1 + A All curves in gnark-crypto have A=0 (j-invariant=0). It is crucial to include this step if the curve has nonzero A coefficient.
gx1.Mul(&gx1, &x1) // 13. gx1 = gx1 * x1
gx1.Add(&gx1, &bTwistCurveCoeff) // 14. gx1 = gx1 + B
gx1NotSquare = gx1.Legendre() >> 1 // 15. e1 = is_square(gx1)
@@ -82,7 +80,7 @@ func mapToCurve2(u *fptower.E2) G2Affine {
x2.Add(&c2, &tv4) // 16. x2 = c2 + tv4
gx2.Square(&x2) // 17. gx2 = x2²
- // 18. gx2 = gx2 + A
+ // 18. gx2 = gx2 + A See line 12
gx2.Mul(&gx2, &x2) // 19. gx2 = gx2 * x2
gx2.Add(&gx2, &bTwistCurveCoeff) // 20. gx2 = gx2 + B
@@ -117,28 +115,29 @@ func mapToCurve2(u *fptower.E2) G2Affine {
// g2Sgn0 is an algebraic substitute for the notion of sign in ordered fields
// Namely, every non-zero quadratic residue in a finite field of characteristic =/= 2 has exactly two square roots, one of each sign
-// Taken from https://datatracker.ietf.org/doc/draft-irtf-cfrg-hash-to-curve/ section 4.1
+// https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-16.html#name-the-sgn0-function
// The sign of an element is not obviously related to that of its Montgomery form
func g2Sgn0(z *fptower.E2) uint64 {
nonMont := *z
nonMont.FromMont()
- sign := uint64(0)
- zero := uint64(1)
+ sign := uint64(0) // 1. sign = 0
+ zero := uint64(1) // 2. zero = 1
var signI uint64
var zeroI uint64
- signI = nonMont.A0[0] % 2
- sign = sign | (zero & signI)
-
+ // 3. i = 1
+ signI = nonMont.A0[0] % 2 // 4. sign_i = x_i mod 2
zeroI = g1NotZero(&nonMont.A0)
- zeroI = 1 ^ (zeroI|-zeroI)>>63
- zero = zero & zeroI
-
- signI = nonMont.A1[0] % 2
- sign = sign | (zero & signI)
-
+ zeroI = 1 ^ (zeroI|-zeroI)>>63 // 5. zero_i = x_i == 0
+ sign = sign | (zero & signI) // 6. sign = sign OR (zero AND sign_i) # Avoid short-circuit logic ops
+ zero = zero & zeroI // 7. zero = zero AND zero_i
+ // 3. i = 2
+ signI = nonMont.A1[0] % 2 // 4. sign_i = x_i mod 2
+ // 5. zero_i = x_i == 0
+ sign = sign | (zero & signI) // 6. sign = sign OR (zero AND sign_i) # Avoid short-circuit logic ops
+ // 7. zero = zero AND zero_i
return sign
}
@@ -153,7 +152,7 @@ func MapToG2(u fptower.E2) G2Affine {
// EncodeToG2 hashes a message to a point on the G2 curve using the SVDW map.
// It is faster than HashToG2, but the result is not uniformly distributed. Unsuitable as a random oracle.
// dst stands for "domain separation tag", a string unique to the construction using the hash function
-//https://datatracker.ietf.org/doc/draft-irtf-cfrg-hash-to-curve/13/#section-6.6.3
+//https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-16.html#roadmap
func EncodeToG2(msg, dst []byte) (G2Affine, error) {
var res G2Affine
@@ -174,7 +173,7 @@ func EncodeToG2(msg, dst []byte) (G2Affine, error) {
// HashToG2 hashes a message to a point on the G2 curve using the SVDW map.
// Slower than EncodeToG2, but usable as a random oracle.
// dst stands for "domain separation tag", a string unique to the construction using the hash function
-// https://tools.ietf.org/html/draft-irtf-cfrg-hash-to-curve-06#section-3
+//https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-16.html#roadmap
func HashToG2(msg, dst []byte) (G2Affine, error) {
u, err := hashToFp(msg, dst, 2*2)
if err != nil {
diff --git a/ecc/bn254/internal/fptower/e12.go b/ecc/bn254/internal/fptower/e12.go
index 950d60de6c..087e81e175 100644
--- a/ecc/bn254/internal/fptower/e12.go
+++ b/ecc/bn254/internal/fptower/e12.go
@@ -225,28 +225,45 @@ func (z *E12) CyclotomicSquareCompressed(x *E12) *E12 {
}
// DecompressKarabina Karabina's cyclotomic square result
+// if g3 != 0
+// g4 = (E * g5^2 + 3 * g1^2 - 2 * g2)/4g3
+// if g3 == 0
+// g4 = 2g1g5/g2
+//
+// if g3=g2=0 then g4=g5=g1=0 and g0=1 (x=1)
+// Theorem 3.1 is well-defined for all x in Gϕₙ\{1}
func (z *E12) DecompressKarabina(x *E12) *E12 {
var t [3]E2
var one E2
one.SetOne()
- // t0 = g1^2
- t[0].Square(&x.C0.B1)
- // t1 = 3 * g1^2 - 2 * g2
- t[1].Sub(&t[0], &x.C0.B2).
- Double(&t[1]).
- Add(&t[1], &t[0])
- // t0 = E * g5^2 + t1
- t[2].Square(&x.C1.B2)
- t[0].MulByNonResidue(&t[2]).
- Add(&t[0], &t[1])
- // t1 = 1/(4 * g3)
- t[1].Double(&x.C1.B0).
- Double(&t[1]).
- Inverse(&t[1]) // costly
+ // g3 == 0
+ if x.C1.B2.IsZero() {
+ t[0].Mul(&x.C0.B1, &x.C1.B2).
+ Double(&t[0])
+ // t1 = g2
+ t[1].Set(&x.C0.B2)
+
+ // g3 != 0
+ } else {
+ // t0 = g1^2
+ t[0].Square(&x.C0.B1)
+ // t1 = 3 * g1^2 - 2 * g2
+ t[1].Sub(&t[0], &x.C0.B2).
+ Double(&t[1]).
+ Add(&t[1], &t[0])
+ // t0 = E * g5^2 + t1
+ t[2].Square(&x.C1.B2)
+ t[0].MulByNonResidue(&t[2]).
+ Add(&t[0], &t[1])
+ // t1 = 4 * g3
+ t[1].Double(&x.C1.B0).
+ Double(&t[1])
+ }
+
// z4 = g4
- z.C1.B1.Mul(&t[0], &t[1])
+ z.C1.B1.Div(&t[0], &t[1]) // costly
// t1 = g2 * g1
t[1].Mul(&x.C0.B2, &x.C0.B1)
@@ -255,7 +272,7 @@ func (z *E12) DecompressKarabina(x *E12) *E12 {
Sub(&t[2], &t[1]).
Double(&t[2]).
Sub(&t[2], &t[1])
- // t1 = g3 * g5
+ // t1 = g3 * g5 (g3 can be 0)
t[1].Mul(&x.C1.B0, &x.C1.B2)
// c_0 = E * (2 * g4^2 + g3 * g5 - 3 * g2 * g1) + 1
t[2].Add(&t[2], &t[1])
@@ -271,6 +288,15 @@ func (z *E12) DecompressKarabina(x *E12) *E12 {
}
// BatchDecompressKarabina multiple Karabina's cyclotomic square results
+// if g3 != 0
+// g4 = (E * g5^2 + 3 * g1^2 - 2 * g2)/4g3
+// if g3 == 0
+// g4 = 2g1g5/g2
+//
+// if g3=g2=0 then g4=g5=g1=0 and g0=1 (x=1)
+// Theorem 3.1 is well-defined for all x in Gϕₙ\{1}
+//
+// Divisions by 4g3 or g2 is batched using Montgomery batch inverse
func BatchDecompressKarabina(x []E12) []E12 {
n := len(x)
@@ -286,19 +312,29 @@ func BatchDecompressKarabina(x []E12) []E12 {
one.SetOne()
for i := 0; i < n; i++ {
- // t0 = g1^2
- t0[i].Square(&x[i].C0.B1)
- // t1 = 3 * g1^2 - 2 * g2
- t1[i].Sub(&t0[i], &x[i].C0.B2).
- Double(&t1[i]).
- Add(&t1[i], &t0[i])
- // t0 = E * g5^2 + t1
- t2[i].Square(&x[i].C1.B2)
- t0[i].MulByNonResidue(&t2[i]).
- Add(&t0[i], &t1[i])
- // t1 = 4 * g3
- t1[i].Double(&x[i].C1.B0).
- Double(&t1[i])
+ // g3 == 0
+ if x[i].C1.B2.IsZero() {
+ t0[i].Mul(&x[i].C0.B1, &x[i].C1.B2).
+ Double(&t0[i])
+ // t1 = g2
+ t1[i].Set(&x[i].C0.B2)
+
+ // g3 != 0
+ } else {
+ // t0 = g1^2
+ t0[i].Square(&x[i].C0.B1)
+ // t1 = 3 * g1^2 - 2 * g2
+ t1[i].Sub(&t0[i], &x[i].C0.B2).
+ Double(&t1[i]).
+ Add(&t1[i], &t0[i])
+ // t0 = E * g5^2 + t1
+ t2[i].Square(&x[i].C1.B2)
+ t0[i].MulByNonResidue(&t2[i]).
+ Add(&t0[i], &t1[i])
+ // t1 = 4 * g3
+ t1[i].Double(&x[i].C1.B0).
+ Double(&t1[i])
+ }
}
t1 = BatchInvertE2(t1) // costs 1 inverse
@@ -315,7 +351,7 @@ func BatchDecompressKarabina(x []E12) []E12 {
t2[i].Double(&t2[i])
t2[i].Sub(&t2[i], &t1[i])
- // t1 = g3 * g5
+ // t1 = g3 * g5 (g3s can be 0s)
t1[i].Mul(&x[i].C1.B0, &x[i].C1.B2)
// z0 = E * (2 * g4^2 + g3 * g5 - 3 * g2 * g1) + 1
t2[i].Add(&t2[i], &t1[i])
@@ -366,6 +402,8 @@ func (z *E12) CyclotomicSquare(x *E12) *E12 {
}
// Inverse set z to the inverse of x in E12 and return z
+//
+// if x == 0, sets and returns z = x
func (z *E12) Inverse(x *E12) *E12 {
// Algorithm 23 from https://eprint.iacr.org/2010/354.pdf
@@ -383,6 +421,8 @@ func (z *E12) Inverse(x *E12) *E12 {
// BatchInvertE12 returns a new slice with every element inverted.
// Uses Montgomery batch inversion trick
+//
+// if a[i] == 0, returns result[i] = a[i]
func BatchInvertE12(a []E12) []E12 {
res := make([]E12, len(a))
if len(a) == 0 {
@@ -425,7 +465,7 @@ func (z *E12) Exp(x E12, k *big.Int) *E12 {
e := k
if k.Sign() == -1 {
// negative k, we invert
- // if k < 0: xᵏ (mod q) == (x⁻¹)ᵏ (mod q)
+ // if k < 0: xᵏ (mod q¹²) == (x⁻¹)ᵏ (mod q¹²)
x.Inverse(&x)
// we negate k in a temp big.Int since
@@ -749,13 +789,14 @@ func (z *E12) CompressTorus() (E6, error) {
return res, nil
}
-// BatchCompressTorus GT/E12 elements to half their size
-// using a batch inversion
+// BatchCompressTorus GT/E12 elements to half their size using a batch inversion.
+//
+// if len(x) == 0 or if any of the x[i].C1 coordinate is 0, this function returns an error.
func BatchCompressTorus(x []E12) ([]E6, error) {
n := len(x)
if n == 0 {
- return []E6{}, errors.New("invalid input size")
+ return nil, errors.New("invalid input size")
}
var one E6
@@ -764,6 +805,10 @@ func BatchCompressTorus(x []E12) ([]E6, error) {
for i := 0; i < n; i++ {
res[i].Set(&x[i].C1)
+ // throw an error if any of the x[i].C1 is 0
+ if res[i].IsZero() {
+ return nil, errors.New("invalid input; C1 is 0")
+ }
}
t := BatchInvertE6(res) // costs 1 inverse
diff --git a/ecc/bn254/internal/fptower/e12_test.go b/ecc/bn254/internal/fptower/e12_test.go
index a503e238cd..db3e38e9b3 100644
--- a/ecc/bn254/internal/fptower/e12_test.go
+++ b/ecc/bn254/internal/fptower/e12_test.go
@@ -339,13 +339,29 @@ func TestE12Ops(t *testing.T) {
properties.Property("[BN254] compressed cyclotomic square (Karabina) and square should be the same in the cyclotomic subgroup", prop.ForAll(
func(a *E12) bool {
- var b, c, d E12
+ var _a, b, c, d, _c, _d E12
+ _a.SetOne().Double(&_a)
+
+ // put a and _a in the cyclotomic subgroup
+ // a (g3 != 0 probably)
b.Conjugate(a)
a.Inverse(a)
b.Mul(&b, a)
a.FrobeniusSquare(&b).Mul(a, &b)
+ // _a (g3 == 0)
+ b.Conjugate(&_a)
+ _a.Inverse(&_a)
+ b.Mul(&b, &_a)
+ _a.FrobeniusSquare(&b).Mul(&_a, &b)
+
+ // case g3 != 0
c.Square(a)
d.CyclotomicSquareCompressed(a).DecompressKarabina(&d)
+
+ // case g3 == 0
+ _c.Square(&_a)
+ _d.CyclotomicSquareCompressed(&_a).DecompressKarabina(&_d)
+
return c.Equal(&d)
},
genA,
@@ -353,18 +369,26 @@ func TestE12Ops(t *testing.T) {
properties.Property("[BN254] batch decompress and individual decompress (Karabina) should be the same", prop.ForAll(
func(a *E12) bool {
- var b E12
- // put in the cyclotomic subgroup
+ var _a, b E12
+ _a.SetOne().Double(&_a)
+
+ // put a and _a in the cyclotomic subgroup
+ // a (g3 !=0 probably)
b.Conjugate(a)
a.Inverse(a)
b.Mul(&b, a)
a.FrobeniusSquare(&b).Mul(a, &b)
+ // _a (g3 == 0)
+ b.Conjugate(&_a)
+ _a.Inverse(&_a)
+ b.Mul(&b, &_a)
+ _a.FrobeniusSquare(&b).Mul(&_a, &b)
var a2, a4, a17 E12
- a2.Set(a)
+ a2.Set(&_a)
a4.Set(a)
a17.Set(a)
- a2.nSquareCompressed(2)
+ a2.nSquareCompressed(2) // case g3 == 0
a4.nSquareCompressed(4)
a17.nSquareCompressed(17)
batch := BatchDecompressKarabina([]E12{a2, a4, a17})
diff --git a/ecc/bn254/internal/fptower/e2.go b/ecc/bn254/internal/fptower/e2.go
index 3d12b8b7e8..12be3b4009 100644
--- a/ecc/bn254/internal/fptower/e2.go
+++ b/ecc/bn254/internal/fptower/e2.go
@@ -247,6 +247,8 @@ func (z *E2) Sqrt(x *E2) *E2 {
// BatchInvertE2 returns a new slice with every element inverted.
// Uses Montgomery batch inversion trick
+//
+// if a[i] == 0, returns result[i] = a[i]
func BatchInvertE2(a []E2) []E2 {
res := make([]E2, len(a))
if len(a) == 0 {
diff --git a/ecc/bn254/internal/fptower/e2_bn254.go b/ecc/bn254/internal/fptower/e2_bn254.go
index cd59a2efee..87ab9d1caf 100644
--- a/ecc/bn254/internal/fptower/e2_bn254.go
+++ b/ecc/bn254/internal/fptower/e2_bn254.go
@@ -50,7 +50,7 @@ func mulGenericE2(z, x, y *E2) {
// squareGenericE2 sets z to the E2-product of x,x returns z
// note: do not rename, this is referenced in the x86 assembly impl
func squareGenericE2(z, x *E2) {
- // algo 22 https://eprint.iacr.org/2010/354.pdf
+ // adapted from algo 22 https://eprint.iacr.org/2010/354.pdf
var a, b fp.Element
a.Add(&x.A0, &x.A1)
b.Sub(&x.A0, &x.A1)
@@ -67,6 +67,8 @@ func (z *E2) MulByNonResidueInv(x *E2) *E2 {
}
// Inverse sets z to the E2-inverse of x, returns z
+//
+// if x == 0, sets and returns z = x
func (z *E2) Inverse(x *E2) *E2 {
// Algorithm 8 from https://eprint.iacr.org/2010/354.pdf
var t0, t1 fp.Element
diff --git a/ecc/bn254/internal/fptower/e2_test.go b/ecc/bn254/internal/fptower/e2_test.go
index 788d14b59d..08670985ab 100644
--- a/ecc/bn254/internal/fptower/e2_test.go
+++ b/ecc/bn254/internal/fptower/e2_test.go
@@ -189,12 +189,6 @@ func TestE2ReceiverIsOperand(t *testing.T) {
properties.TestingRun(t, gopter.ConsoleReporter(false))
- if supportAdx {
- t.Log("disabling ADX")
- supportAdx = false
- properties.TestingRun(t, gopter.ConsoleReporter(false))
- supportAdx = true
- }
}
func TestE2MulMaxed(t *testing.T) {
@@ -413,12 +407,6 @@ func TestE2Ops(t *testing.T) {
properties.TestingRun(t, gopter.ConsoleReporter(false))
- if supportAdx {
- t.Log("disabling ADX")
- supportAdx = false
- properties.TestingRun(t, gopter.ConsoleReporter(false))
- supportAdx = true
- }
}
// ------------------------------------------------------------
diff --git a/ecc/bn254/internal/fptower/e6.go b/ecc/bn254/internal/fptower/e6.go
index 2ed48dc26e..4da093f5f0 100644
--- a/ecc/bn254/internal/fptower/e6.go
+++ b/ecc/bn254/internal/fptower/e6.go
@@ -242,6 +242,8 @@ func (z *E6) Square(x *E6) *E6 {
}
// Inverse an element in E6
+//
+// if x == 0, sets and returns z = x
func (z *E6) Inverse(x *E6) *E6 {
// Algorithm 17 from https://eprint.iacr.org/2010/354.pdf
// step 9 is wrong in the paper it's t1-t4
@@ -270,6 +272,8 @@ func (z *E6) Inverse(x *E6) *E6 {
// BatchInvertE6 returns a new slice with every element inverted.
// Uses Montgomery batch inversion trick
+//
+// if a[i] == 0, returns result[i] = a[i]
func BatchInvertE6(a []E6) []E6 {
res := make([]E6, len(a))
if len(a) == 0 {
diff --git a/ecc/bn254/multiexp.go b/ecc/bn254/multiexp.go
index d3efdaa423..4c120b4be4 100644
--- a/ecc/bn254/multiexp.go
+++ b/ecc/bn254/multiexp.go
@@ -41,7 +41,7 @@ type selector struct {
// if the digit is larger than 2^{c-1}, then, we borrow 2^c from the next window and substract
// 2^{c} to the current digit, making it negative.
// negative digits can be processed in a later step as adding -G into the bucket instead of G
-// (computing -G is cheap, and this saves us half of the buckets in the MultiExp or BatchScalarMul)
+// (computing -G is cheap, and this saves us half of the buckets in the MultiExp or BatchScalarMultiplication)
// scalarsMont indicates wheter the provided scalars are in montgomery form
// returns smallValues, which represent the number of scalars which meets the following condition
// 0 < scalar < 2^c (in other words, scalars where only the c-least significant bits are non zero)
@@ -163,6 +163,8 @@ func partitionScalars(scalars []fr.Element, c uint64, scalarsMont bool, nbTasks
}
// MultiExp implements section 4 of https://eprint.iacr.org/2012/549.pdf
+//
+// This call return an error if len(scalars) != len(points) or if provided config is invalid.
func (p *G1Affine) MultiExp(points []G1Affine, scalars []fr.Element, config ecc.MultiExpConfig) (*G1Affine, error) {
var _p G1Jac
if _, err := _p.MultiExp(points, scalars, config); err != nil {
@@ -173,6 +175,8 @@ func (p *G1Affine) MultiExp(points []G1Affine, scalars []fr.Element, config ecc.
}
// MultiExp implements section 4 of https://eprint.iacr.org/2012/549.pdf
+//
+// This call return an error if len(scalars) != len(points) or if provided config is invalid.
func (p *G1Jac) MultiExp(points []G1Affine, scalars []fr.Element, config ecc.MultiExpConfig) (*G1Jac, error) {
// note:
// each of the msmCX method is the same, except for the c constant it declares
@@ -209,6 +213,8 @@ func (p *G1Jac) MultiExp(points []G1Affine, scalars []fr.Element, config ecc.Mul
// if nbTasks is not set, use all available CPUs
if config.NbTasks <= 0 {
config.NbTasks = runtime.NumCPU()
+ } else if config.NbTasks > 1024 {
+ return nil, errors.New("invalid config: config.NbTasks > 1024")
}
// here, we compute the best C for nbPoints
@@ -333,9 +339,6 @@ func msmInnerG1Jac(p *G1Jac, c int, points []G1Affine, scalars []fr.Element, spl
case 21:
p.msmC21(points, scalars, splitFirstChunk)
- case 22:
- p.msmC22(points, scalars, splitFirstChunk)
-
default:
panic("not implemented")
}
@@ -1180,59 +1183,9 @@ func (p *G1Jac) msmC21(points []G1Affine, scalars []fr.Element, splitFirstChunk
return msmReduceChunkG1Affine(p, c, chChunks[:])
}
-func (p *G1Jac) msmC22(points []G1Affine, scalars []fr.Element, splitFirstChunk bool) *G1Jac {
- const (
- c = 22 // scalars partitioned into c-bit radixes
- nbChunks = (fr.Limbs * 64 / c) // number of c-bit radixes in a scalar
- )
-
- // for each chunk, spawn one go routine that'll loop through all the scalars in the
- // corresponding bit-window
- // note that buckets is an array allocated on the stack (for most sizes of c) and this is
- // critical for performance
-
- // each go routine sends its result in chChunks[i] channel
- var chChunks [nbChunks + 1]chan g1JacExtended
- for i := 0; i < len(chChunks); i++ {
- chChunks[i] = make(chan g1JacExtended, 1)
- }
-
- // c doesn't divide 256, last window is smaller we can allocate less buckets
- const lastC = (fr.Limbs * 64) - (c * (fr.Limbs * 64 / c))
- go func(j uint64, points []G1Affine, scalars []fr.Element) {
- var buckets [1 << (lastC - 1)]g1JacExtended
- msmProcessChunkG1Affine(j, chChunks[j], buckets[:], c, points, scalars)
- }(uint64(nbChunks), points, scalars)
-
- processChunk := func(j int, points []G1Affine, scalars []fr.Element, chChunk chan g1JacExtended) {
- var buckets [1 << (c - 1)]g1JacExtended
- msmProcessChunkG1Affine(uint64(j), chChunk, buckets[:], c, points, scalars)
- }
-
- for j := int(nbChunks - 1); j > 0; j-- {
- go processChunk(j, points, scalars, chChunks[j])
- }
-
- if !splitFirstChunk {
- go processChunk(0, points, scalars, chChunks[0])
- } else {
- chSplit := make(chan g1JacExtended, 2)
- split := len(points) / 2
- go processChunk(0, points[:split], scalars[:split], chSplit)
- go processChunk(0, points[split:], scalars[split:], chSplit)
- go func() {
- s1 := <-chSplit
- s2 := <-chSplit
- close(chSplit)
- s1.add(&s2)
- chChunks[0] <- s1
- }()
- }
-
- return msmReduceChunkG1Affine(p, c, chChunks[:])
-}
-
// MultiExp implements section 4 of https://eprint.iacr.org/2012/549.pdf
+//
+// This call return an error if len(scalars) != len(points) or if provided config is invalid.
func (p *G2Affine) MultiExp(points []G2Affine, scalars []fr.Element, config ecc.MultiExpConfig) (*G2Affine, error) {
var _p G2Jac
if _, err := _p.MultiExp(points, scalars, config); err != nil {
@@ -1243,6 +1196,8 @@ func (p *G2Affine) MultiExp(points []G2Affine, scalars []fr.Element, config ecc.
}
// MultiExp implements section 4 of https://eprint.iacr.org/2012/549.pdf
+//
+// This call return an error if len(scalars) != len(points) or if provided config is invalid.
func (p *G2Jac) MultiExp(points []G2Affine, scalars []fr.Element, config ecc.MultiExpConfig) (*G2Jac, error) {
// note:
// each of the msmCX method is the same, except for the c constant it declares
@@ -1279,13 +1234,15 @@ func (p *G2Jac) MultiExp(points []G2Affine, scalars []fr.Element, config ecc.Mul
// if nbTasks is not set, use all available CPUs
if config.NbTasks <= 0 {
config.NbTasks = runtime.NumCPU()
+ } else if config.NbTasks > 1024 {
+ return nil, errors.New("invalid config: config.NbTasks > 1024")
}
// here, we compute the best C for nbPoints
// we split recursively until nbChunks(c) >= nbTasks,
bestC := func(nbPoints int) uint64 {
// implemented msmC methods (the c we use must be in this slice)
- implementedCs := []uint64{4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 20, 21, 22}
+ implementedCs := []uint64{4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 20, 21}
var C uint64
// approximate cost (in group operations)
// cost = bits/c * (nbPoints + 2^{c})
@@ -1403,9 +1360,6 @@ func msmInnerG2Jac(p *G2Jac, c int, points []G2Affine, scalars []fr.Element, spl
case 21:
p.msmC21(points, scalars, splitFirstChunk)
- case 22:
- p.msmC22(points, scalars, splitFirstChunk)
-
default:
panic("not implemented")
}
@@ -2249,55 +2203,3 @@ func (p *G2Jac) msmC21(points []G2Affine, scalars []fr.Element, splitFirstChunk
return msmReduceChunkG2Affine(p, c, chChunks[:])
}
-
-func (p *G2Jac) msmC22(points []G2Affine, scalars []fr.Element, splitFirstChunk bool) *G2Jac {
- const (
- c = 22 // scalars partitioned into c-bit radixes
- nbChunks = (fr.Limbs * 64 / c) // number of c-bit radixes in a scalar
- )
-
- // for each chunk, spawn one go routine that'll loop through all the scalars in the
- // corresponding bit-window
- // note that buckets is an array allocated on the stack (for most sizes of c) and this is
- // critical for performance
-
- // each go routine sends its result in chChunks[i] channel
- var chChunks [nbChunks + 1]chan g2JacExtended
- for i := 0; i < len(chChunks); i++ {
- chChunks[i] = make(chan g2JacExtended, 1)
- }
-
- // c doesn't divide 256, last window is smaller we can allocate less buckets
- const lastC = (fr.Limbs * 64) - (c * (fr.Limbs * 64 / c))
- go func(j uint64, points []G2Affine, scalars []fr.Element) {
- var buckets [1 << (lastC - 1)]g2JacExtended
- msmProcessChunkG2Affine(j, chChunks[j], buckets[:], c, points, scalars)
- }(uint64(nbChunks), points, scalars)
-
- processChunk := func(j int, points []G2Affine, scalars []fr.Element, chChunk chan g2JacExtended) {
- var buckets [1 << (c - 1)]g2JacExtended
- msmProcessChunkG2Affine(uint64(j), chChunk, buckets[:], c, points, scalars)
- }
-
- for j := int(nbChunks - 1); j > 0; j-- {
- go processChunk(j, points, scalars, chChunks[j])
- }
-
- if !splitFirstChunk {
- go processChunk(0, points, scalars, chChunks[0])
- } else {
- chSplit := make(chan g2JacExtended, 2)
- split := len(points) / 2
- go processChunk(0, points[:split], scalars[:split], chSplit)
- go processChunk(0, points[split:], scalars[split:], chSplit)
- go func() {
- s1 := <-chSplit
- s2 := <-chSplit
- close(chSplit)
- s1.add(&s2)
- chChunks[0] <- s1
- }()
- }
-
- return msmReduceChunkG2Affine(p, c, chChunks[:])
-}
diff --git a/ecc/bn254/multiexp_test.go b/ecc/bn254/multiexp_test.go
index e6d8d92f08..12055f3ebf 100644
--- a/ecc/bn254/multiexp_test.go
+++ b/ecc/bn254/multiexp_test.go
@@ -92,7 +92,14 @@ func TestMultiExpG1(t *testing.T) {
genScalar,
))
- properties.Property("[G1] Multi exponentation (c=5, c=16) should be consistent with sum of square", prop.ForAll(
+ // cRange is generated from template and contains the available parameters for the multiexp window size
+ cRange := []uint64{4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 20, 21}
+ if testing.Short() {
+ // test only "odd" and "even" (ie windows size divide word size vs not)
+ cRange = []uint64{5, 16}
+ }
+
+ properties.Property(fmt.Sprintf("[G1] Multi exponentation (c in %v) should be consistent with sum of square", cRange), prop.ForAll(
func(mixer fr.Element) bool {
var expected G1Jac
@@ -111,13 +118,21 @@ func TestMultiExpG1(t *testing.T) {
FromMont()
}
- scalars5, _ := partitionScalars(sampleScalars[:], 5, false, runtime.NumCPU())
- scalars16, _ := partitionScalars(sampleScalars[:], 16, false, runtime.NumCPU())
-
- var r5, r16 G1Jac
- r5.msmC5(samplePoints[:], scalars5, false)
- r16.msmC16(samplePoints[:], scalars16, true)
- return (r5.Equal(&expected) && r16.Equal(&expected))
+ results := make([]G1Jac, len(cRange)+1)
+ for i, c := range cRange {
+ scalars, _ := partitionScalars(sampleScalars[:], c, false, runtime.NumCPU())
+ msmInnerG1Jac(&results[i], int(c), samplePoints[:], scalars, false)
+ if c == 16 {
+ // split the first chunk
+ msmInnerG1Jac(&results[len(results)-1], 16, samplePoints[:], scalars, true)
+ }
+ }
+ for i := 1; i < len(results); i++ {
+ if !results[i].Equal(&results[i-1]) {
+ return false
+ }
+ }
+ return true
},
genScalar,
))
@@ -148,7 +163,7 @@ func TestMultiExpG1(t *testing.T) {
var finalBigScalar fr.Element
var finalBigScalarBi big.Int
var op1ScalarMul G1Affine
- finalBigScalar.SetString("9455").Mul(&finalBigScalar, &mixer)
+ finalBigScalar.SetUint64(9455).Mul(&finalBigScalar, &mixer)
finalBigScalar.ToBigIntRegular(&finalBigScalarBi)
op1ScalarMul.ScalarMultiplication(&g1GenAff, &finalBigScalarBi)
@@ -322,7 +337,12 @@ func TestMultiExpG2(t *testing.T) {
genScalar,
))
- properties.Property("[G2] Multi exponentation (c=5, c=16) should be consistent with sum of square", prop.ForAll(
+ // cRange is generated from template and contains the available parameters for the multiexp window size
+ // for g2, CI suffers with large c size since it needs to allocate a lot of memory for the buckets.
+ // test only "odd" and "even" (ie windows size divide word size vs not)
+ cRange := []uint64{5, 16}
+
+ properties.Property(fmt.Sprintf("[G2] Multi exponentation (c in %v) should be consistent with sum of square", cRange), prop.ForAll(
func(mixer fr.Element) bool {
var expected G2Jac
@@ -341,13 +361,21 @@ func TestMultiExpG2(t *testing.T) {
FromMont()
}
- scalars5, _ := partitionScalars(sampleScalars[:], 5, false, runtime.NumCPU())
- scalars16, _ := partitionScalars(sampleScalars[:], 16, false, runtime.NumCPU())
-
- var r5, r16 G2Jac
- r5.msmC5(samplePoints[:], scalars5, false)
- r16.msmC16(samplePoints[:], scalars16, true)
- return (r5.Equal(&expected) && r16.Equal(&expected))
+ results := make([]G2Jac, len(cRange)+1)
+ for i, c := range cRange {
+ scalars, _ := partitionScalars(sampleScalars[:], c, false, runtime.NumCPU())
+ msmInnerG2Jac(&results[i], int(c), samplePoints[:], scalars, false)
+ if c == 16 {
+ // split the first chunk
+ msmInnerG2Jac(&results[len(results)-1], 16, samplePoints[:], scalars, true)
+ }
+ }
+ for i := 1; i < len(results); i++ {
+ if !results[i].Equal(&results[i-1]) {
+ return false
+ }
+ }
+ return true
},
genScalar,
))
@@ -378,7 +406,7 @@ func TestMultiExpG2(t *testing.T) {
var finalBigScalar fr.Element
var finalBigScalarBi big.Int
var op1ScalarMul G2Affine
- finalBigScalar.SetString("9455").Mul(&finalBigScalar, &mixer)
+ finalBigScalar.SetUint64(9455).Mul(&finalBigScalar, &mixer)
finalBigScalar.ToBigIntRegular(&finalBigScalarBi)
op1ScalarMul.ScalarMultiplication(&g2GenAff, &finalBigScalarBi)
diff --git a/ecc/bn254/pairing.go b/ecc/bn254/pairing.go
index f9444320f0..53160c9be6 100644
--- a/ecc/bn254/pairing.go
+++ b/ecc/bn254/pairing.go
@@ -30,7 +30,9 @@ type lineEvaluation struct {
}
// Pair calculates the reduced pairing for a set of points
-// ∏ᵢ e(Pᵢ, Qᵢ)
+// ∏ᵢ e(Pᵢ, Qᵢ).
+//
+// This function doesn't check that the inputs are in the correct subgroup. See IsInSubGroup.
func Pair(P []G1Affine, Q []G2Affine) (GT, error) {
f, err := MillerLoop(P, Q)
if err != nil {
@@ -41,6 +43,8 @@ func Pair(P []G1Affine, Q []G2Affine) (GT, error) {
// PairingCheck calculates the reduced pairing for a set of points and returns True if the result is One
// ∏ᵢ e(Pᵢ, Qᵢ) =? 1
+//
+// This function doesn't check that the inputs are in the correct subgroup. See IsInSubGroup.
func PairingCheck(P []G1Affine, Q []G2Affine) (bool, error) {
f, err := Pair(P, Q)
if err != nil {
diff --git a/ecc/bn254/twistededwards/eddsa/eddsa.go b/ecc/bn254/twistededwards/eddsa/eddsa.go
index f95fa5f60b..a25f74e152 100644
--- a/ecc/bn254/twistededwards/eddsa/eddsa.go
+++ b/ecc/bn254/twistededwards/eddsa/eddsa.go
@@ -89,7 +89,7 @@ func GenerateKey(r io.Reader) (*PrivateKey, error) {
var bScalar big.Int
bScalar.SetBytes(priv.scalar[:])
- pub.A.ScalarMul(&c.Base, &bScalar)
+ pub.A.ScalarMultiplication(&c.Base, &bScalar)
priv.PublicKey = pub
@@ -137,7 +137,7 @@ func (privKey *PrivateKey) Sign(message []byte, hFunc hash.Hash) ([]byte, error)
blindingFactorBigInt.SetBytes(blindingFactorBytes[:sizeFr])
// compute R = randScalar*Base
- res.R.ScalarMul(&curveParams.Base, &blindingFactorBigInt)
+ res.R.ScalarMultiplication(&curveParams.Base, &blindingFactorBigInt)
if !res.R.IsOnCurve() {
return nil, errNotOnCurve
}
@@ -223,8 +223,8 @@ func (pub *PublicKey) Verify(sigBin, message []byte, hFunc hash.Hash) (bool, err
var bCofactor, bs big.Int
curveParams.Cofactor.ToBigIntRegular(&bCofactor)
bs.SetBytes(sig.S[:])
- lhs.ScalarMul(&curveParams.Base, &bs).
- ScalarMul(&lhs, &bCofactor)
+ lhs.ScalarMultiplication(&curveParams.Base, &bs).
+ ScalarMultiplication(&lhs, &bCofactor)
if !lhs.IsOnCurve() {
return false, errNotOnCurve
@@ -232,9 +232,9 @@ func (pub *PublicKey) Verify(sigBin, message []byte, hFunc hash.Hash) (bool, err
// rhs = cofactor*(R + H(R,A,M)*A)
var rhs twistededwards.PointAffine
- rhs.ScalarMul(&pub.A, &hramInt).
+ rhs.ScalarMultiplication(&pub.A, &hramInt).
Add(&rhs, &sig.R).
- ScalarMul(&rhs, &bCofactor)
+ ScalarMultiplication(&rhs, &bCofactor)
if !rhs.IsOnCurve() {
return false, errNotOnCurve
}
diff --git a/ecc/bn254/twistededwards/point.go b/ecc/bn254/twistededwards/point.go
index 37a87925cc..427e5cb274 100644
--- a/ecc/bn254/twistededwards/point.go
+++ b/ecc/bn254/twistededwards/point.go
@@ -256,13 +256,13 @@ func (p *PointAffine) FromExtended(p1 *PointExtended) *PointAffine {
return p
}
-// ScalarMul scalar multiplication of a point
+// ScalarMultiplication scalar multiplication of a point
// p1 in affine coordinates with a scalar in big.Int
-func (p *PointAffine) ScalarMul(p1 *PointAffine, scalar *big.Int) *PointAffine {
+func (p *PointAffine) ScalarMultiplication(p1 *PointAffine, scalar *big.Int) *PointAffine {
var p1Extended, resExtended PointExtended
p1Extended.FromAffine(p1)
- resExtended.ScalarMul(&p1Extended, scalar)
+ resExtended.ScalarMultiplication(&p1Extended, scalar)
p.FromExtended(&resExtended)
return p
@@ -409,9 +409,9 @@ func (p *PointProj) Add(p1, p2 *PointProj) *PointProj {
return p
}
-// ScalarMul scalar multiplication of a point
+// ScalarMultiplication scalar multiplication of a point
// p1 in projective coordinates with a scalar in big.Int
-func (p *PointProj) ScalarMul(p1 *PointProj, scalar *big.Int) *PointProj {
+func (p *PointProj) ScalarMultiplication(p1 *PointProj, scalar *big.Int) *PointProj {
var _scalar big.Int
_scalar.Set(scalar)
p.Set(p1)
@@ -622,9 +622,9 @@ func (p *PointExtended) setInfinity() *PointExtended {
return p
}
-// ScalarMul scalar multiplication of a point
+// ScalarMultiplication scalar multiplication of a point
// p1 in extended coordinates with a scalar in big.Int
-func (p *PointExtended) ScalarMul(p1 *PointExtended, scalar *big.Int) *PointExtended {
+func (p *PointExtended) ScalarMultiplication(p1 *PointExtended, scalar *big.Int) *PointExtended {
var _scalar big.Int
_scalar.Set(scalar)
p.Set(p1)
diff --git a/ecc/bn254/twistededwards/point_test.go b/ecc/bn254/twistededwards/point_test.go
index 7b59a701d7..c8434cff61 100644
--- a/ecc/bn254/twistededwards/point_test.go
+++ b/ecc/bn254/twistededwards/point_test.go
@@ -124,8 +124,8 @@ func TestReceiverIsOperand(t *testing.T) {
var s big.Int
s.SetUint64(10)
- p2.ScalarMul(&p1, &s)
- p1.ScalarMul(&p1, &s)
+ p2.ScalarMultiplication(&p1, &s)
+ p1.ScalarMultiplication(&p1, &s)
return p2.Equal(&p1)
},
@@ -336,7 +336,7 @@ func TestOps(t *testing.T) {
params := GetEdwardsCurve()
var p1, p2, zero PointAffine
- p1.ScalarMul(¶ms.Base, &s1)
+ p1.ScalarMultiplication(¶ms.Base, &s1)
zero.setInfinity()
p2.Add(&p1, &zero)
@@ -352,7 +352,7 @@ func TestOps(t *testing.T) {
params := GetEdwardsCurve()
var p1, p2 PointAffine
- p1.ScalarMul(¶ms.Base, &s1)
+ p1.ScalarMultiplication(¶ms.Base, &s1)
p2.Neg(&p1)
p1.Add(&p1, &p2)
@@ -371,8 +371,8 @@ func TestOps(t *testing.T) {
params := GetEdwardsCurve()
var p1, p2, inf PointAffine
- p1.ScalarMul(¶ms.Base, &s)
- p2.ScalarMul(¶ms.Base, &s)
+ p1.ScalarMultiplication(¶ms.Base, &s)
+ p2.ScalarMultiplication(¶ms.Base, &s)
p1.Add(&p1, &p2)
p2.Double(&p2)
@@ -390,14 +390,14 @@ func TestOps(t *testing.T) {
var p1, p2, p3, inf PointAffine
inf.X.SetZero()
inf.Y.SetZero()
- p1.ScalarMul(¶ms.Base, &s1)
- p2.ScalarMul(¶ms.Base, &s2)
+ p1.ScalarMultiplication(¶ms.Base, &s1)
+ p2.ScalarMultiplication(¶ms.Base, &s2)
p3.Set(¶ms.Base)
p2.Add(&p1, &p2)
s1.Add(&s1, &s2)
- p3.ScalarMul(¶ms.Base, &s1)
+ p3.ScalarMultiplication(¶ms.Base, &s1)
return p2.IsOnCurve() && p3.Equal(&p2) && !p3.Equal(&inf)
},
@@ -413,9 +413,9 @@ func TestOps(t *testing.T) {
var p1, p2, inf PointAffine
inf.X.SetZero()
inf.Y.SetOne()
- p1.ScalarMul(¶ms.Base, &s1)
+ p1.ScalarMultiplication(¶ms.Base, &s1)
s1.Neg(&s1)
- p2.ScalarMul(¶ms.Base, &s1)
+ p2.ScalarMultiplication(¶ms.Base, &s1)
p2.Add(&p1, &p2)
@@ -430,11 +430,11 @@ func TestOps(t *testing.T) {
params := GetEdwardsCurve()
var p1, p2 PointAffine
- p1.ScalarMul(¶ms.Base, &s1)
+ p1.ScalarMultiplication(¶ms.Base, &s1)
five := big.NewInt(5)
p2.Double(&p1).Double(&p2).Add(&p2, &p1)
- p1.ScalarMul(&p1, five)
+ p1.ScalarMultiplication(&p1, five)
return p2.IsOnCurve() && p2.Equal(&p1)
},
@@ -463,7 +463,7 @@ func TestOps(t *testing.T) {
var baseProj, p1, p2, zero PointProj
baseProj.FromAffine(¶ms.Base)
- p1.ScalarMul(&baseProj, &s1)
+ p1.ScalarMultiplication(&baseProj, &s1)
zero.setInfinity()
p2.Add(&p1, &zero)
@@ -480,7 +480,7 @@ func TestOps(t *testing.T) {
var baseProj, p1, p2, p PointProj
baseProj.FromAffine(¶ms.Base)
- p1.ScalarMul(&baseProj, &s1)
+ p1.ScalarMultiplication(&baseProj, &s1)
p2.Neg(&p1)
p.Add(&p1, &p2)
@@ -498,7 +498,7 @@ func TestOps(t *testing.T) {
var baseProj, p1, p2, p PointProj
baseProj.FromAffine(¶ms.Base)
- p.ScalarMul(&baseProj, &s)
+ p.ScalarMultiplication(&baseProj, &s)
p1.Add(&p, &p)
p2.Double(&p)
@@ -515,11 +515,11 @@ func TestOps(t *testing.T) {
var baseProj, p1, p2 PointProj
baseProj.FromAffine(¶ms.Base)
- p1.ScalarMul(&baseProj, &s1)
+ p1.ScalarMultiplication(&baseProj, &s1)
five := big.NewInt(5)
p2.Double(&p1).Double(&p2).Add(&p2, &p1)
- p1.ScalarMul(&p1, five)
+ p1.ScalarMultiplication(&p1, five)
return p2.Equal(&p1)
},
@@ -547,7 +547,7 @@ func TestOps(t *testing.T) {
var baseExtended, p1, p2, zero PointExtended
baseExtended.FromAffine(¶ms.Base)
- p1.ScalarMul(&baseExtended, &s1)
+ p1.ScalarMultiplication(&baseExtended, &s1)
zero.setInfinity()
p2.Add(&p1, &zero)
@@ -564,7 +564,7 @@ func TestOps(t *testing.T) {
var baseExtended, p1, p2, p PointExtended
baseExtended.FromAffine(¶ms.Base)
- p1.ScalarMul(&baseExtended, &s1)
+ p1.ScalarMultiplication(&baseExtended, &s1)
p2.Neg(&p1)
p.Add(&p1, &p2)
@@ -582,7 +582,7 @@ func TestOps(t *testing.T) {
var baseExtended, p1, p2, p PointExtended
baseExtended.FromAffine(¶ms.Base)
- p.ScalarMul(&baseExtended, &s)
+ p.ScalarMultiplication(&baseExtended, &s)
p1.Add(&p, &p)
p2.Double(&p)
@@ -599,11 +599,11 @@ func TestOps(t *testing.T) {
var baseExtended, p1, p2 PointExtended
baseExtended.FromAffine(¶ms.Base)
- p1.ScalarMul(&baseExtended, &s1)
+ p1.ScalarMultiplication(&baseExtended, &s1)
five := big.NewInt(5)
p2.Double(&p1).Double(&p2).Add(&p2, &p1)
- p1.ScalarMul(&p1, five)
+ p1.ScalarMultiplication(&p1, five)
return p2.Equal(&p1)
},
@@ -619,8 +619,8 @@ func TestOps(t *testing.T) {
var baseExtended, pExtended, p PointExtended
var pAffine PointAffine
baseExtended.FromAffine(¶ms.Base)
- pExtended.ScalarMul(&baseExtended, &s)
- pAffine.ScalarMul(¶ms.Base, &s)
+ pExtended.ScalarMultiplication(&baseExtended, &s)
+ pAffine.ScalarMultiplication(¶ms.Base, &s)
pAffine.Neg(&pAffine)
p.MixedAdd(&pExtended, &pAffine)
@@ -638,8 +638,8 @@ func TestOps(t *testing.T) {
var baseExtended, pExtended, p, p2 PointExtended
var pAffine PointAffine
baseExtended.FromAffine(¶ms.Base)
- pExtended.ScalarMul(&baseExtended, &s)
- pAffine.ScalarMul(¶ms.Base, &s)
+ pExtended.ScalarMultiplication(&baseExtended, &s)
+ pAffine.ScalarMultiplication(¶ms.Base, &s)
p.MixedAdd(&pExtended, &pAffine)
p2.MixedDouble(&pExtended)
@@ -658,8 +658,8 @@ func TestOps(t *testing.T) {
var baseProj, pProj, p PointProj
var pAffine PointAffine
baseProj.FromAffine(¶ms.Base)
- pProj.ScalarMul(&baseProj, &s)
- pAffine.ScalarMul(¶ms.Base, &s)
+ pProj.ScalarMultiplication(&baseProj, &s)
+ pAffine.ScalarMultiplication(¶ms.Base, &s)
pAffine.Neg(&pAffine)
p.MixedAdd(&pProj, &pAffine)
@@ -677,8 +677,8 @@ func TestOps(t *testing.T) {
var baseProj, pProj, p, p2 PointProj
var pAffine PointAffine
baseProj.FromAffine(¶ms.Base)
- pProj.ScalarMul(&baseProj, &s)
- pAffine.ScalarMul(¶ms.Base, &s)
+ pProj.ScalarMultiplication(&baseProj, &s)
+ pAffine.ScalarMultiplication(¶ms.Base, &s)
p.MixedAdd(&pProj, &pAffine)
p2.Double(&pProj)
@@ -697,9 +697,9 @@ func TestOps(t *testing.T) {
var baseExt PointExtended
var p1, p2 PointAffine
baseProj.FromAffine(¶ms.Base)
- baseProj.ScalarMul(&baseProj, &s)
+ baseProj.ScalarMultiplication(&baseProj, &s)
baseExt.FromAffine(¶ms.Base)
- baseExt.ScalarMul(&baseExt, &s)
+ baseExt.ScalarMultiplication(&baseExt, &s)
p1.FromProj(&baseProj)
p2.FromExtended(&baseExt)
@@ -760,7 +760,7 @@ func BenchmarkScalarMulExtended(b *testing.B) {
b.ResetTimer()
for j := 0; j < b.N; j++ {
- doubleAndAdd.ScalarMul(&a, &s)
+ doubleAndAdd.ScalarMultiplication(&a, &s)
}
}
@@ -776,6 +776,6 @@ func BenchmarkScalarMulProjective(b *testing.B) {
b.ResetTimer()
for j := 0; j < b.N; j++ {
- doubleAndAdd.ScalarMul(&a, &s)
+ doubleAndAdd.ScalarMultiplication(&a, &s)
}
}
diff --git a/ecc/bw6-633/bw6-633.go b/ecc/bw6-633/bw6-633.go
index 4e229b4c77..5390374c8c 100644
--- a/ecc/bw6-633/bw6-633.go
+++ b/ecc/bw6-633/bw6-633.go
@@ -1,17 +1,24 @@
-// Copyright 2020 ConsenSys AG
+// Package bw6633 efficient elliptic curve, pairing and hash to curve implementation for bw6-633.
//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
+// bw6-633: A Brezing--Weng curve (2-chain with bls24-315)
+// embedding degree k=6
+// seed x₀=-3218079743
+// 𝔽p: p=20494478644167774678813387386538961497669590920908778075528754551012016751717791778743535050360001387419576570244406805463255765034468441182772056330021723098661967429339971741066259394985997
+// 𝔽r: r=39705142709513438335025689890408969744933502416914749335064285505637884093126342347073617133569
+// (E/𝔽p): Y²=X³+4
+// (Eₜ/𝔽p): Y² = X³+8 (M-type twist)
+// r ∣ #E(Fp) and r ∣ #Eₜ(𝔽p)
+// Extension fields tower:
+// 𝔽p³[u] = 𝔽p/u³-2
+// 𝔽p⁶[v] = 𝔽p²/v²-u
+// optimal Ate loops:
+// x₀+1, x₀^5-x₀^4-x₀
+// Security: estimated 124-bit level following [https://eprint.iacr.org/2019/885.pdf]
+// (r is 315 bits and p⁶ is 3798 bits)
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// Warning
//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
+// This code has not been audited and is provided as-is. In particular, there is no security guarantees such as constant time implementation or side-channel attack resistance.
package bw6633
import (
@@ -22,17 +29,6 @@ import (
"github.com/consensys/gnark-crypto/ecc/bw6-633/fr"
)
-// BW6-633: A Brezing--Weng curve of embedding degree k=6 with seed x₀=-3218079743. It forms a 2-chain with BLS24-315.
-// 𝔽p: p=20494478644167774678813387386538961497669590920908778075528754551012016751717791778743535050360001387419576570244406805463255765034468441182772056330021723098661967429339971741066259394985997
-// 𝔽r: r=39705142709513438335025689890408969744933502416914749335064285505637884093126342347073617133569
-// (E/𝔽p): Y²=X³+4
-// (Eₜ/𝔽p): Y² = X³+8 (M-type twist)
-// r ∣ #E(Fp) and r ∣ #Eₜ(𝔽p)
-// Extension fields tower:
-// 𝔽p³[u] = 𝔽p/u³-2
-// 𝔽p⁶[v] = 𝔽p²/v²-u
-// optimal Ate loops: x₀+1, x₀^5-x₀^4-x₀
-
// ID BW6_633 ID
const ID = ecc.BW6_633
@@ -81,12 +77,12 @@ func init() {
// E1(2,y)*cofactor
g1Gen.X.SetString("14087405796052437206213362229855313116771222912153372774869400386285407949123477431442535997951698710614498307938219633856996133201713506830167161540335446217605918678317160130862890417553415")
g1Gen.Y.SetString("5208886161111258314476333487866604447704068601830026647530443033297117148121067806438008469463787158470000157308702133756065259580313172904438248825389121766442385979570644351664733475122746")
- g1Gen.Z.SetString("1")
+ g1Gen.Z.SetOne()
// E2(2,y))*cofactor
g2Gen.X.SetString("13658793733252505713431834233072715040674666715141692574468286839081203251180283741830175712695426047062165811313478642863696265647598838732554425602399576125615559121457137320131899043374497")
g2Gen.Y.SetString("599560264833409786573595720823495699033661029721475252751314180543773745554433461106678360045466656230822473390866244089461950086268801746497554519984580043036179195728559548424763890207250")
- g2Gen.Z.SetString("1")
+ g2Gen.Z.SetOne()
g1GenAff.FromJacobian(&g1Gen)
g2GenAff.FromJacobian(&g2Gen)
diff --git a/ecc/bw6-633/doc.go b/ecc/bw6-633/doc.go
deleted file mode 100644
index b1a5f64cea..0000000000
--- a/ecc/bw6-633/doc.go
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright 2020 ConsenSys Software Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Code generated by consensys/gnark-crypto DO NOT EDIT
-
-// Package bw6633 efficient elliptic curve and pairing implementation for bw6-633.
-//
-// Warning
-//
-// This code has not been audited and is provided as-is. In particular, there is no security guarantees such as constant time implementation or side-channel attack resistance.
-package bw6633
diff --git a/ecc/bw6-633/fp/element.go b/ecc/bw6-633/fp/element.go
index d24e4b122e..1f571e1d95 100644
--- a/ecc/bw6-633/fp/element.go
+++ b/ecc/bw6-633/fp/element.go
@@ -193,7 +193,7 @@ func (z *Element) SetInterface(i1 interface{}) (*Element, error) {
case int:
return z.SetInt64(int64(c1)), nil
case string:
- return z.SetString(c1), nil
+ return z.SetString(c1)
case *big.Int:
if c1 == nil {
return nil, errors.New("can't set fp.Element with ")
@@ -1442,12 +1442,13 @@ func (z *Element) setBigInt(v *big.Int) *Element {
// Incorrect placement of underscores is reported as a panic if there
// are no other errors.
//
-func (z *Element) SetString(number string) *Element {
+// If the number is invalid this method leaves z unchanged and returns nil, error.
+func (z *Element) SetString(number string) (*Element, error) {
// get temporary big int from the pool
vv := bigIntPool.Get().(*big.Int)
if _, ok := vv.SetString(number, 0); !ok {
- panic("Element.SetString failed -> can't parse number into a big.Int " + number)
+ return nil, errors.New("Element.SetString failed -> can't parse number into a big.Int " + number)
}
z.SetBigInt(vv)
@@ -1455,7 +1456,7 @@ func (z *Element) SetString(number string) *Element {
// release object into pool
bigIntPool.Put(vv)
- return z
+ return z, nil
}
// MarshalJSON returns json encoding of z (z.Text(10))
diff --git a/ecc/bw6-633/fr/element.go b/ecc/bw6-633/fr/element.go
index 700e7c42a9..d8661fd433 100644
--- a/ecc/bw6-633/fr/element.go
+++ b/ecc/bw6-633/fr/element.go
@@ -178,7 +178,7 @@ func (z *Element) SetInterface(i1 interface{}) (*Element, error) {
case int:
return z.SetInt64(int64(c1)), nil
case string:
- return z.SetString(c1), nil
+ return z.SetString(c1)
case *big.Int:
if c1 == nil {
return nil, errors.New("can't set fr.Element with ")
@@ -1012,12 +1012,13 @@ func (z *Element) setBigInt(v *big.Int) *Element {
// Incorrect placement of underscores is reported as a panic if there
// are no other errors.
//
-func (z *Element) SetString(number string) *Element {
+// If the number is invalid this method leaves z unchanged and returns nil, error.
+func (z *Element) SetString(number string) (*Element, error) {
// get temporary big int from the pool
vv := bigIntPool.Get().(*big.Int)
if _, ok := vv.SetString(number, 0); !ok {
- panic("Element.SetString failed -> can't parse number into a big.Int " + number)
+ return nil, errors.New("Element.SetString failed -> can't parse number into a big.Int " + number)
}
z.SetBigInt(vv)
@@ -1025,7 +1026,7 @@ func (z *Element) SetString(number string) *Element {
// release object into pool
bigIntPool.Put(vv)
- return z
+ return z, nil
}
// MarshalJSON returns json encoding of z (z.Text(10))
diff --git a/ecc/bw6-633/fr/kzg/kzg.go b/ecc/bw6-633/fr/kzg/kzg.go
index 7bd1bbdd8b..824365a3f9 100644
--- a/ecc/bw6-633/fr/kzg/kzg.go
+++ b/ecc/bw6-633/fr/kzg/kzg.go
@@ -169,16 +169,15 @@ func Open(p []fr.Element, point fr.Element, srs *SRS) (OpeningProof, error) {
func Verify(commitment *Digest, proof *OpeningProof, point fr.Element, srs *SRS) error {
// [f(a)]G₁
- var claimedValueG1Aff bw6633.G1Affine
+ var claimedValueG1Aff bw6633.G1Jac
var claimedValueBigInt big.Int
proof.ClaimedValue.ToBigIntRegular(&claimedValueBigInt)
- claimedValueG1Aff.ScalarMultiplication(&srs.G1[0], &claimedValueBigInt)
+ claimedValueG1Aff.ScalarMultiplicationAffine(&srs.G1[0], &claimedValueBigInt)
// [f(α) - f(a)]G₁
- var fminusfaG1Jac, tmpG1Jac bw6633.G1Jac
+ var fminusfaG1Jac bw6633.G1Jac
fminusfaG1Jac.FromAffine(commitment)
- tmpG1Jac.FromAffine(&claimedValueG1Aff)
- fminusfaG1Jac.SubAssign(&tmpG1Jac)
+ fminusfaG1Jac.SubAssign(&claimedValueG1Aff)
// [-H(α)]G₁
var negH bw6633.G1Affine
diff --git a/ecc/bw6-633/fr/polynomial/multilin.go b/ecc/bw6-633/fr/polynomial/multilin.go
new file mode 100644
index 0000000000..8d0a683fec
--- /dev/null
+++ b/ecc/bw6-633/fr/polynomial/multilin.go
@@ -0,0 +1,250 @@
+// Copyright 2020 ConsenSys Software Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by consensys/gnark-crypto DO NOT EDIT
+
+package polynomial
+
+import (
+ "github.com/consensys/gnark-crypto/ecc/bw6-633/fr"
+)
+
+// MultiLin tracks the values of a (dense i.e. not sparse) multilinear polynomial
+// The variables are X₁ through Xₙ where n = log(len(.))
+// .[∑ᵢ 2ⁱ⁻¹ bₙ₋ᵢ] = the polynomial evaluated at (b₁, b₂, ..., bₙ)
+// It is understood that any hypercube evaluation can be extrapolated to a multilinear polynomial
+type MultiLin []fr.Element
+
+// Fold is partial evaluation function k[X₁, X₂, ..., Xₙ] → k[X₂, ..., Xₙ] by setting X₁=r
+func (m *MultiLin) Fold(r fr.Element) {
+ mid := len(*m) / 2
+
+ bottom, top := (*m)[:mid], (*m)[mid:]
+
+ // updating bookkeeping table
+ // knowing that the polynomial f ∈ (k[X₂, ..., Xₙ])[X₁] is linear, we would get f(r) = f(0) + r(f(1) - f(0))
+ // the following loop computes the evaluations of f(r) accordingly:
+ // f(r, b₂, ..., bₙ) = f(0, b₂, ..., bₙ) + r(f(1, b₂, ..., bₙ) - f(0, b₂, ..., bₙ))
+ for i := 0; i < mid; i++ {
+ // table[i] ← table[i] + r (table[i + mid] - table[i])
+ top[i].Sub(&top[i], &bottom[i])
+ top[i].Mul(&top[i], &r)
+ bottom[i].Add(&bottom[i], &top[i])
+ }
+
+ *m = (*m)[:mid]
+}
+
+// Evaluate extrapolate the value of the multilinear polynomial corresponding to m
+// on the given coordinates
+func (m MultiLin) Evaluate(coordinates []fr.Element) fr.Element {
+ // Folding is a mutating operation
+ bkCopy := m.Clone()
+
+ // Evaluate step by step through repeated folding (i.e. evaluation at the first remaining variable)
+ for _, r := range coordinates {
+ bkCopy.Fold(r)
+ }
+
+ return bkCopy[0]
+}
+
+// Clone creates a deep copy of a book-keeping table.
+// Both multilinear interpolation and sumcheck require folding an underlying
+// array, but folding changes the array. To do both one requires a deep copy
+// of the book-keeping table.
+func (m MultiLin) Clone() MultiLin {
+ tableDeepCopy := Make(len(m))
+ copy(tableDeepCopy, m)
+ return tableDeepCopy
+}
+
+// Add two bookKeepingTables
+func (m *MultiLin) Add(left, right MultiLin) {
+ size := len(left)
+ // Check that left and right have the same size
+ if len(right) != size {
+ panic("Left and right do not have the right size")
+ }
+ // Reallocate the table if necessary
+ if cap(*m) < size {
+ *m = make([]fr.Element, size)
+ }
+
+ // Resize the destination table
+ *m = (*m)[:size]
+
+ // Add elementwise
+ for i := 0; i < size; i++ {
+ (*m)[i].Add(&left[i], &right[i])
+ }
+}
+
+// EvalEq computes Eq(q₁, ... , qₙ, h₁, ... , hₙ) = Π₁ⁿ Eq(qᵢ, hᵢ)
+// where Eq(x,y) = xy + (1-x)(1-y) = 1 - x - y + xy + xy interpolates
+// _________________
+// | | |
+// | 0 | 1 |
+// |_______|_______|
+// y | | |
+// | 1 | 0 |
+// |_______|_______|
+//
+// x
+// In other words the polynomial evaluated here is the multilinear extrapolation of
+// one that evaluates to q' == h' for vectors q', h' of binary values
+func EvalEq(q, h []fr.Element) fr.Element {
+ var res, nxt, one, sum fr.Element
+ one.SetOne()
+ for i := 0; i < len(q); i++ {
+ nxt.Mul(&q[i], &h[i]) // nxt <- qᵢ * hᵢ
+ nxt.Double(&nxt) // nxt <- 2 * qᵢ * hᵢ
+ nxt.Add(&nxt, &one) // nxt <- 1 + 2 * qᵢ * hᵢ
+ sum.Add(&q[i], &h[i]) // sum <- qᵢ + hᵢ TODO: Why not subtract one by one from nxt? More parallel?
+
+ if i == 0 {
+ res.Sub(&nxt, &sum) // nxt <- 1 + 2 * qᵢ * hᵢ - qᵢ - hᵢ
+ } else {
+ nxt.Sub(&nxt, &sum) // nxt <- 1 + 2 * qᵢ * hᵢ - qᵢ - hᵢ
+ res.Mul(&res, &nxt) // res <- res * nxt
+ }
+ }
+ return res
+}
+
+// Eq sets m to the representation of the polynomial Eq(q₁, ..., qₙ, *, ..., *) × m[0]
+func (m *MultiLin) Eq(q []fr.Element) {
+ n := len(q)
+
+ if len(*m) != 1< 0 {
+ i.Sub(fr.Modulus(), &i)
+ i.Neg(&i)
+ }
+ return i
+}
+
+func (p Polynomial) Text(base int) string {
+
+ var builder strings.Builder
+
+ first := true
+ for d := len(p) - 1; d >= 0; d-- {
+ if p[d].IsZero() {
+ continue
+ }
+
+ i := signedBigInt(&p[d])
+
+ initialLen := builder.Len()
+
+ if i.Sign() < 1 {
+ i.Neg(&i)
+ if first {
+ builder.WriteString("-")
+ } else {
+ builder.WriteString(" - ")
+ }
+ } else if !first {
+ builder.WriteString(" + ")
+ }
+
+ first = false
+
+ asInt64 := int64(0)
+ if i.IsInt64() {
+ asInt64 = i.Int64()
+ }
+
+ if asInt64 != 1 || d == 0 {
+ builder.WriteString(i.Text(base))
+ }
+
+ if builder.Len()-initialLen > 10 {
+ builder.WriteString("×")
+ }
+
+ if d != 0 {
+ builder.WriteString("X")
+ }
+ if d > 1 {
+ builder.WriteString(
+ utils.ToSuperscript(strconv.Itoa(d)),
+ )
+ }
+
+ }
+
+ if first {
+ return "0"
+ }
+
+ return builder.String()
+}
diff --git a/ecc/bw6-633/fr/polynomial/pool.go b/ecc/bw6-633/fr/polynomial/pool.go
new file mode 100644
index 0000000000..717315fa75
--- /dev/null
+++ b/ecc/bw6-633/fr/polynomial/pool.go
@@ -0,0 +1,130 @@
+// Copyright 2020 ConsenSys Software Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by consensys/gnark-crypto DO NOT EDIT
+
+package polynomial
+
+import (
+ "fmt"
+ "github.com/consensys/gnark-crypto/ecc/bw6-633/fr"
+ "reflect"
+ "sync"
+ "unsafe"
+)
+
+// Memory management for polynomials
+// Copied verbatim from gkr repo
+
+// Sets a maximum for the array size we keep in pool
+const maxNForLargePool int = 1 << 24
+const maxNForSmallPool int = 256
+
+// Aliases because it is annoying to use arrays in all the places
+type largeArr = [maxNForLargePool]fr.Element
+type smallArr = [maxNForSmallPool]fr.Element
+
+var rC = sync.Map{}
+
+var (
+ largePool = sync.Pool{
+ New: func() interface{} {
+ var res largeArr
+ return &res
+ },
+ }
+ smallPool = sync.Pool{
+ New: func() interface{} {
+ var res smallArr
+ return &res
+ },
+ }
+)
+
+// ClearPool Clears the pool completely, shields against memory leaks
+// Eg: if we forgot to dump a polynomial at some point, this will ensure the value get dumped eventually
+// Returns how many polynomials were cleared that way
+func ClearPool() int {
+ res := 0
+ rC.Range(func(k, _ interface{}) bool {
+ switch ptr := k.(type) {
+ case *largeArr:
+ largePool.Put(ptr)
+ case *smallArr:
+ smallPool.Put(ptr)
+ default:
+ panic(fmt.Sprintf("tried to clear %v", reflect.TypeOf(ptr)))
+ }
+ res++
+ return true
+ })
+ return res
+}
+
+// CountPool Returns the number of elements in the pool without mutating it
+func CountPool() int {
+ res := 0
+ rC.Range(func(_, _ interface{}) bool {
+ res++
+ return true
+ })
+ return res
+}
+
+// Make tries to find a reusable polynomial or allocates a new one
+func Make(n int) []fr.Element {
+ if n > maxNForLargePool {
+ panic(fmt.Sprintf("been provided with size of %v but the maximum is %v", n, maxNForLargePool))
+ }
+
+ if n <= maxNForSmallPool {
+ ptr := smallPool.Get().(*smallArr)
+ rC.Store(ptr, struct{}{}) // registers the pointer being used
+ return (*ptr)[:n]
+ }
+
+ ptr := largePool.Get().(*largeArr)
+ rC.Store(ptr, struct{}{}) // remember we allocated the pointer is being used
+ return (*ptr)[:n]
+}
+
+// Dump dumps a set of polynomials into the pool
+// Returns the number of deallocated polys
+func Dump(arrs ...[]fr.Element) int {
+ cnt := 0
+ for _, arr := range arrs {
+ ptr := ptr(arr)
+ pool := &smallPool
+ if len(arr) > maxNForSmallPool {
+ pool = &largePool
+ }
+ // If the rC did not register, then
+ // either the array was allocated somewhere else which can be ignored
+ // otherwise a double put which MUST be ignored
+ if _, ok := rC.Load(ptr); ok {
+ pool.Put(ptr)
+ // And deregisters the ptr
+ rC.Delete(ptr)
+ cnt++
+ }
+ }
+ return cnt
+}
+
+func ptr(m []fr.Element) unsafe.Pointer {
+ if cap(m) != maxNForSmallPool && cap(m) != maxNForLargePool {
+ panic(fmt.Sprintf("can't cast to large or small array, the put array's is %v it should have capacity %v or %v", cap(m), maxNForLargePool, maxNForSmallPool))
+ }
+ return unsafe.Pointer(&m[0])
+}
diff --git a/ecc/bw6-633/g1.go b/ecc/bw6-633/g1.go
index 49115911f4..0228f01bbd 100644
--- a/ecc/bw6-633/g1.go
+++ b/ecc/bw6-633/g1.go
@@ -64,6 +64,14 @@ func (p *G1Affine) ScalarMultiplication(a *G1Affine, s *big.Int) *G1Affine {
return p
}
+// ScalarMultiplicationAffine computes and returns p = a ⋅ s
+// Takes an affine point and returns a Jacobian point (useful for KZG)
+func (p *G1Jac) ScalarMultiplicationAffine(a *G1Affine, s *big.Int) *G1Jac {
+ p.FromAffine(a)
+ p.mulGLV(p, s)
+ return p
+}
+
// Add adds two point in affine coordinates.
// This should rarely be used as it is very inefficient compared to Jacobian
func (p *G1Affine) Add(a, b *G1Affine) *G1Affine {
@@ -341,7 +349,7 @@ func (p *G1Jac) String() string {
return _p.String()
}
-// FromAffine sets p = Q, p in Jacboian, Q in affine
+// FromAffine sets p = Q, p in Jacobian, Q in affine
func (p *G1Jac) FromAffine(Q *G1Affine) *G1Jac {
if Q.IsInfinity() {
p.Z.SetZero()
@@ -873,9 +881,9 @@ func (p *g1Proj) FromAffine(Q *G1Affine) *g1Proj {
}
// BatchProjectiveToAffineG1 converts points in Projective coordinates to Affine coordinates
-// performing a single field inversion (Montgomery batch inversion trick)
-// result must be allocated with len(result) == len(points)
-func BatchProjectiveToAffineG1(points []g1Proj, result []G1Affine) {
+// performing a single field inversion (Montgomery batch inversion trick).
+func BatchProjectiveToAffineG1(points []g1Proj) []G1Affine {
+ result := make([]G1Affine, len(points))
zeroes := make([]bool, len(points))
accumulator := fp.One()
@@ -895,7 +903,7 @@ func BatchProjectiveToAffineG1(points []g1Proj, result []G1Affine) {
for i := len(points) - 1; i >= 0; i-- {
if zeroes[i] {
- // do nothing, X and Y are zeroes in affine.
+ // do nothing, (X=0, Y=0) is infinity point in affine
continue
}
result[i].X.Mul(&result[i].X, &accInverse)
@@ -906,7 +914,7 @@ func BatchProjectiveToAffineG1(points []g1Proj, result []G1Affine) {
parallel.Execute(len(points), func(start, end int) {
for i := start; i < end; i++ {
if zeroes[i] {
- // do nothing, X and Y are zeroes in affine.
+ // do nothing, (X=0, Y=0) is infinity point in affine
continue
}
a := result[i].X
@@ -914,12 +922,13 @@ func BatchProjectiveToAffineG1(points []g1Proj, result []G1Affine) {
result[i].Y.Mul(&points[i].y, &a)
}
})
+ return result
}
// BatchJacobianToAffineG1 converts points in Jacobian coordinates to Affine coordinates
-// performing a single field inversion (Montgomery batch inversion trick)
-// result must be allocated with len(result) == len(points)
-func BatchJacobianToAffineG1(points []G1Jac, result []G1Affine) {
+// performing a single field inversion (Montgomery batch inversion trick).
+func BatchJacobianToAffineG1(points []G1Jac) []G1Affine {
+ result := make([]G1Affine, len(points))
zeroes := make([]bool, len(points))
accumulator := fp.One()
@@ -939,7 +948,7 @@ func BatchJacobianToAffineG1(points []G1Jac, result []G1Affine) {
for i := len(points) - 1; i >= 0; i-- {
if zeroes[i] {
- // do nothing, X and Y are zeroes in affine.
+ // do nothing, (X=0, Y=0) is infinity point in affine
continue
}
result[i].X.Mul(&result[i].X, &accInverse)
@@ -950,7 +959,7 @@ func BatchJacobianToAffineG1(points []G1Jac, result []G1Affine) {
parallel.Execute(len(points), func(start, end int) {
for i := start; i < end; i++ {
if zeroes[i] {
- // do nothing, X and Y are zeroes in affine.
+ // do nothing, (X=0, Y=0) is infinity point in affine
continue
}
var a, b fp.Element
@@ -962,6 +971,7 @@ func BatchJacobianToAffineG1(points []G1Jac, result []G1Affine) {
}
})
+ return result
}
// BatchScalarMultiplicationG1 multiplies the same base by all scalars
@@ -1025,8 +1035,7 @@ func BatchScalarMultiplicationG1(base *G1Affine, scalars []fr.Element) []G1Affin
selectors[chunk] = d
}
// convert our base exp table into affine to use AddMixed
- baseTableAff := make([]G1Affine, (1 << (c - 1)))
- BatchJacobianToAffineG1(baseTable, baseTableAff)
+ baseTableAff := BatchJacobianToAffineG1(baseTable)
toReturn := make([]G1Jac, len(scalars))
// for each digit, take value in the base table, double it c time, voilà.
@@ -1068,7 +1077,6 @@ func BatchScalarMultiplicationG1(base *G1Affine, scalars []fr.Element) []G1Affin
}
})
- toReturnAff := make([]G1Affine, len(scalars))
- BatchJacobianToAffineG1(toReturn, toReturnAff)
+ toReturnAff := BatchJacobianToAffineG1(toReturn)
return toReturnAff
}
diff --git a/ecc/bw6-633/g1_test.go b/ecc/bw6-633/g1_test.go
index ba715fa7d1..6caf91227c 100644
--- a/ecc/bw6-633/g1_test.go
+++ b/ecc/bw6-633/g1_test.go
@@ -85,7 +85,7 @@ func TestG1AffineIsOnCurve(t *testing.T) {
func(a fp.Element) bool {
var op1, op2 G1Affine
op1.FromJacobian(&g1Gen)
- op2.FromJacobian(&g1Gen)
+ op2.Set(&op1)
op2.Y.Mul(&op2.Y, &a)
return op1.IsOnCurve() && !op2.IsOnCurve()
},
@@ -220,6 +220,19 @@ func TestG1AffineConversions(t *testing.T) {
GenFp(),
GenFp(),
))
+ properties.Property("[BW6-633] BatchJacobianToAffineG1 and FromJacobian should output the same result", prop.ForAll(
+ func(a, b fp.Element) bool {
+ g1 := fuzzG1Jac(&g1Gen, a)
+ g2 := fuzzG1Jac(&g1Gen, b)
+ var op1, op2 G1Affine
+ op1.FromJacobian(&g1)
+ op2.FromJacobian(&g2)
+ baseTableAff := BatchJacobianToAffineG1([]G1Jac{g1, g2})
+ return op1.Equal(&baseTableAff[0]) && op2.Equal(&baseTableAff[1])
+ },
+ GenFp(),
+ GenFp(),
+ ))
properties.TestingRun(t, gopter.ConsoleReporter(false))
}
@@ -486,7 +499,7 @@ func BenchmarkG1JacIsInSubGroup(b *testing.B) {
}
-func BenchmarkG1AffineBatchScalarMul(b *testing.B) {
+func BenchmarkG1AffineBatchScalarMultiplication(b *testing.B) {
// ensure every words of the scalars are filled
var mixer fr.Element
mixer.SetString("7716837800905789770901243404444209691916730933998574719964609384059111546487")
@@ -514,7 +527,7 @@ func BenchmarkG1AffineBatchScalarMul(b *testing.B) {
}
}
-func BenchmarkG1JacScalarMul(b *testing.B) {
+func BenchmarkG1JacScalarMultiplication(b *testing.B) {
var scalar big.Int
r := fr.Modulus()
diff --git a/ecc/bw6-633/g2.go b/ecc/bw6-633/g2.go
index 20d3c4b377..33adec5f32 100644
--- a/ecc/bw6-633/g2.go
+++ b/ecc/bw6-633/g2.go
@@ -336,7 +336,7 @@ func (p *G2Jac) String() string {
return _p.String()
}
-// FromAffine sets p = Q, p in Jacboian, Q in affine
+// FromAffine sets p = Q, p in Jacobian, Q in affine
func (p *G2Jac) FromAffine(Q *G2Affine) *G2Jac {
if Q.IsInfinity() {
p.Z.SetZero()
diff --git a/ecc/bw6-633/g2_test.go b/ecc/bw6-633/g2_test.go
index dfc232a308..32773e9718 100644
--- a/ecc/bw6-633/g2_test.go
+++ b/ecc/bw6-633/g2_test.go
@@ -85,7 +85,7 @@ func TestG2AffineIsOnCurve(t *testing.T) {
func(a fp.Element) bool {
var op1, op2 G2Affine
op1.FromJacobian(&g2Gen)
- op2.FromJacobian(&g2Gen)
+ op2.Set(&op1)
op2.Y.Mul(&op2.Y, &a)
return op1.IsOnCurve() && !op2.IsOnCurve()
},
@@ -486,7 +486,7 @@ func BenchmarkG2JacIsInSubGroup(b *testing.B) {
}
-func BenchmarkG2AffineBatchScalarMul(b *testing.B) {
+func BenchmarkG2AffineBatchScalarMultiplication(b *testing.B) {
// ensure every words of the scalars are filled
var mixer fr.Element
mixer.SetString("7716837800905789770901243404444209691916730933998574719964609384059111546487")
@@ -514,7 +514,7 @@ func BenchmarkG2AffineBatchScalarMul(b *testing.B) {
}
}
-func BenchmarkG2JacScalarMul(b *testing.B) {
+func BenchmarkG2JacScalarMultiplication(b *testing.B) {
var scalar big.Int
r := fr.Modulus()
diff --git a/ecc/bw6-633/hash_to_g1.go b/ecc/bw6-633/hash_to_g1.go
index a6346311b7..7e6d7f1a5d 100644
--- a/ecc/bw6-633/hash_to_g1.go
+++ b/ecc/bw6-633/hash_to_g1.go
@@ -111,61 +111,51 @@ func g1Isogeny(p *G1Affine) {
// g1SqrtRatio computes the square root of u/v and returns 0 iff u/v was indeed a quadratic residue
// if not, we get sqrt(Z * u / v). Recall that Z is non-residue
+// If v = 0, u/v is meaningless and the output is unspecified, without raising an error.
// The main idea is that since the computation of the square root involves taking large powers of u/v, the inversion of v can be avoided
func g1SqrtRatio(z *fp.Element, u *fp.Element, v *fp.Element) uint64 {
- // Taken from https://datatracker.ietf.org/doc/draft-irtf-cfrg-hash-to-curve/13/ F.2.1.3. q = 5 mod 8
- // TODO: Test correct use of Element.Select
+ // https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-16.html#name-optimized-sqrt_ratio-for-q-5 (mod 8)
var tv1, tv2 fp.Element
- tv1.Square(v)
- tv2.Mul(&tv1, v)
- tv1.Square(&tv1)
- tv2.Mul(&tv2, u)
- tv1.Mul(&tv1, &tv2)
+ tv1.Square(v) // 1. tv1 = v²
+ tv2.Mul(&tv1, v) // 2. tv2 = tv1 * v
+ tv1.Square(&tv1) // 3. tv1 = tv1²
+ tv2.Mul(&tv2, u) // 4. tv2 = tv2 * u
+ tv1.Mul(&tv1, &tv2) // 5. tv1 = tv1 * tv2
var c1 big.Int
- // c1 = 2561809830520971834851673423317370187208698865113597259441094318876502093964723972342941881295000173427447071280550850682906970629308555147846507041252715387332745928667496467633282424373249
+ // c1 = (q - 5) / 8 = 2561809830520971834851673423317370187208698865113597259441094318876502093964723972342941881295000173427447071280550850682906970629308555147846507041252715387332745928667496467633282424373249
c1.SetBytes([]byte{36, 204, 103, 152, 30, 107, 236, 127, 131, 66, 233, 224, 58, 229, 86, 181, 31, 155, 24, 235, 175, 58, 88, 233, 203, 46, 211, 91, 55, 123, 69, 240, 42, 84, 216, 31, 91, 212, 146, 23, 27, 83, 235, 208, 126, 175, 137, 47, 193, 209, 10, 29, 183, 180, 128, 250, 246, 185, 207, 87, 7, 56, 68, 167, 166, 211, 122, 98, 40, 254, 231, 154, 233, 34, 221, 72, 174, 0, 1})
var y1 fp.Element
- y1.Exp(tv1, &c1)
- y1.Mul(&y1, &tv2)
- tv1.Mul(&y1, &fp.Element{7899625277197386435, 5217716493391639390, 7472932469883704682, 7632350077606897049, 9296070723299766388, 14353472371414671016, 14644604696869838127, 11421353192299464576, 237964513547175570, 46667570639865841})
- tv2.Square(&tv1)
-
- //Line 10 in std doc
- tv2.Mul(&tv2, v)
-
- y1.Select(int(tv2.NotEqual(u)), &tv1, &y1)
-
- tv2.Square(&y1)
- tv2.Mul(&tv2, v)
-
- //Line 15
- isQNr := tv2.NotEqual(u)
+ y1.Exp(tv1, &c1) // 6. y1 = tv1ᶜ¹
+ y1.Mul(&y1, &tv2) // 7. y1 = y1 * tv2
+ // c2 = sqrt(-1)
+ c2 := fp.Element{7899625277197386435, 5217716493391639390, 7472932469883704682, 7632350077606897049, 9296070723299766388, 14353472371414671016, 14644604696869838127, 11421353192299464576, 237964513547175570, 46667570639865841}
+ tv1.Mul(&y1, &c2) // 8. tv1 = y1 * c2
+ tv2.Square(&tv1) // 9. tv2 = tv1²
+ tv2.Mul(&tv2, v) // 10. tv2 = tv2 * v
+ // 11. e1 = tv2 == u
+ y1.Select(int(tv2.NotEqual(u)), &tv1, &y1) // 12. y1 = CMOV(y1, tv1, e1)
+ tv2.Square(&y1) // 13. tv2 = y1²
+ tv2.Mul(&tv2, v) // 14. tv2 = tv2 * v
+ isQNr := tv2.NotEqual(u) // 15. isQR = tv2 == u
var y2 fp.Element
- y2.Mul(&y1, &fp.Element{17779787117422825675, 4939796438941267298, 17917495165866445585, 10745325761140663972, 1801762923695319826, 15143865745144854318, 15159144602769454149, 1697730798702809869, 18119907780441139825, 48351383131100009})
- tv1.Mul(&y2, &fp.Element{7899625277197386435, 5217716493391639390, 7472932469883704682, 7632350077606897049, 9296070723299766388, 14353472371414671016, 14644604696869838127, 11421353192299464576, 237964513547175570, 46667570639865841})
- tv2.Square(&tv1)
- tv2.Mul(&tv2, v)
-
+ // c3 = sqrt(Z / c2)
+ y2 = fp.Element{17779787117422825675, 4939796438941267298, 17917495165866445585, 10745325761140663972, 1801762923695319826, 15143865745144854318, 15159144602769454149, 1697730798702809869, 18119907780441139825, 48351383131100009}
+ y2.Mul(&y1, &y2) // 16. y2 = y1 * c3
+ tv1.Mul(&y2, &c2) // 17. tv1 = y2 * c2
+ tv2.Square(&tv1) // 18. tv2 = tv1²
+ tv2.Mul(&tv2, v) // 19. tv2 = tv2 * v
var tv3 fp.Element
- //Line 20
// Z = [11]
- g1MulByZ(&tv3, u)
-
- y2.Select(int(tv2.NotEqual(&tv3)), &tv1, &y2)
-
- z.Select(int(isQNr), &y1, &y2)
+ g1MulByZ(&tv3, u) // 20. tv3 = Z * u
+ // 21. e2 = tv2 == tv3
+ y2.Select(int(tv2.NotEqual(&tv3)), &tv1, &y2) // 22. y2 = CMOV(y2, tv1, e2)
+ z.Select(int(isQNr), &y1, &y2) // 23. y = CMOV(y2, y1, isQR)
return isQNr
}
-/*
-// g1SetZ sets z to [11].
-func g1SetZ(z *fp.Element) {
- z.Set( &fp.Element {6130771042861286320, 11947466704102345269, 5006184736040647654, 10738967583325648129, 6155303802163134778, 6459686480506411032, 14448065740527999419, 1019798761927372322, 5080373183861200608, 66158761009468389} )
-}*/
-
// g1MulByZ multiplies x by [11] and stores the result in z
func g1MulByZ(z *fp.Element, x *fp.Element) {
@@ -180,30 +170,29 @@ func g1MulByZ(z *fp.Element, x *fp.Element) {
*z = res
}
-//TODO: Define A,B here
-
-// From https://datatracker.ietf.org/doc/draft-irtf-cfrg-hash-to-curve/13/ Pg 80
+// https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-16.html#name-simplified-swu-method
// mapToCurve1 implements the SSWU map
// No cofactor clearing or isogeny
func mapToCurve1(u *fp.Element) G1Affine {
+ var sswuIsoCurveCoeffA = fp.Element{12925890271846221020, 6355149021182850637, 12305199997029221454, 3176370205483940054, 1111744716227392272, 1674946515969267914, 9082444721826297409, 17859522351279563418, 11442187008395780520, 4206825732020662}
+ var sswuIsoCurveCoeffB = fp.Element{1447342806075484185, 5642327672839545870, 16783436050687675045, 2630023864181351186, 5909133526915342434, 1057352115267779153, 1923190814798170064, 13280701548970829092, 3305076617946573429, 29606717104036842}
+
var tv1 fp.Element
- tv1.Square(u)
+ tv1.Square(u) // 1. tv1 = u²
//mul tv1 by Z
- g1MulByZ(&tv1, &tv1)
+ g1MulByZ(&tv1, &tv1) // 2. tv1 = Z * tv1
var tv2 fp.Element
- tv2.Square(&tv1)
- tv2.Add(&tv2, &tv1)
+ tv2.Square(&tv1) // 3. tv2 = tv1²
+ tv2.Add(&tv2, &tv1) // 4. tv2 = tv2 + tv1
var tv3 fp.Element
- //Standard doc line 5
var tv4 fp.Element
tv4.SetOne()
- tv3.Add(&tv2, &tv4)
- //TODO: Use bCurveConf when no isogeny
- tv3.Mul(&tv3, &fp.Element{1447342806075484185, 5642327672839545870, 16783436050687675045, 2630023864181351186, 5909133526915342434, 1057352115267779153, 1923190814798170064, 13280701548970829092, 3305076617946573429, 29606717104036842})
+ tv3.Add(&tv2, &tv4) // 5. tv3 = tv2 + 1
+ tv3.Mul(&tv3, &sswuIsoCurveCoeffB) // 6. tv3 = B * tv3
tv2NZero := g1NotZero(&tv2)
@@ -211,48 +200,45 @@ func mapToCurve1(u *fp.Element) G1Affine {
tv4 = fp.Element{6130771042861286320, 11947466704102345269, 5006184736040647654, 10738967583325648129, 6155303802163134778, 6459686480506411032, 14448065740527999419, 1019798761927372322, 5080373183861200608, 66158761009468389}
tv2.Neg(&tv2)
- tv4.Select(int(tv2NZero), &tv4, &tv2)
- //TODO: When no isogeny use curve constants
- tv2 = fp.Element{12925890271846221020, 6355149021182850637, 12305199997029221454, 3176370205483940054, 1111744716227392272, 1674946515969267914, 9082444721826297409, 17859522351279563418, 11442187008395780520, 4206825732020662}
- tv4.Mul(&tv4, &tv2)
+ tv4.Select(int(tv2NZero), &tv4, &tv2) // 7. tv4 = CMOV(Z, -tv2, tv2 != 0)
+ tv4.Mul(&tv4, &sswuIsoCurveCoeffA) // 8. tv4 = A * tv4
- tv2.Square(&tv3)
+ tv2.Square(&tv3) // 9. tv2 = tv3²
var tv6 fp.Element
- //Standard doc line 10
- tv6.Square(&tv4)
+ tv6.Square(&tv4) // 10. tv6 = tv4²
var tv5 fp.Element
- tv5.Mul(&tv6, &fp.Element{12925890271846221020, 6355149021182850637, 12305199997029221454, 3176370205483940054, 1111744716227392272, 1674946515969267914, 9082444721826297409, 17859522351279563418, 11442187008395780520, 4206825732020662})
+ tv5.Mul(&tv6, &sswuIsoCurveCoeffA) // 11. tv5 = A * tv6
- tv2.Add(&tv2, &tv5)
- tv2.Mul(&tv2, &tv3)
- tv6.Mul(&tv6, &tv4)
+ tv2.Add(&tv2, &tv5) // 12. tv2 = tv2 + tv5
+ tv2.Mul(&tv2, &tv3) // 13. tv2 = tv2 * tv3
+ tv6.Mul(&tv6, &tv4) // 14. tv6 = tv6 * tv4
- //Standards doc line 15
- tv5.Mul(&tv6, &fp.Element{1447342806075484185, 5642327672839545870, 16783436050687675045, 2630023864181351186, 5909133526915342434, 1057352115267779153, 1923190814798170064, 13280701548970829092, 3305076617946573429, 29606717104036842})
- tv2.Add(&tv2, &tv5)
+ tv5.Mul(&tv6, &sswuIsoCurveCoeffB) // 15. tv5 = B * tv6
+ tv2.Add(&tv2, &tv5) // 16. tv2 = tv2 + tv5
var x fp.Element
- x.Mul(&tv1, &tv3)
+ x.Mul(&tv1, &tv3) // 17. x = tv1 * tv3
var y1 fp.Element
- gx1NSquare := g1SqrtRatio(&y1, &tv2, &tv6)
+ gx1NSquare := g1SqrtRatio(&y1, &tv2, &tv6) // 18. (is_gx1_square, y1) = sqrt_ratio(tv2, tv6)
var y fp.Element
- y.Mul(&tv1, u)
+ y.Mul(&tv1, u) // 19. y = tv1 * u
- //Standards doc line 20
- y.Mul(&y, &y1)
+ y.Mul(&y, &y1) // 20. y = y * y1
- x.Select(int(gx1NSquare), &tv3, &x)
- y.Select(int(gx1NSquare), &y1, &y)
+ x.Select(int(gx1NSquare), &tv3, &x) // 21. x = CMOV(x, tv3, is_gx1_square)
+ y.Select(int(gx1NSquare), &y1, &y) // 22. y = CMOV(y, y1, is_gx1_square)
y1.Neg(&y)
y.Select(int(g1Sgn0(u)^g1Sgn0(&y)), &y, &y1)
- //Standards doc line 25
- x.Div(&x, &tv4)
+ // 23. e1 = sgn0(u) == sgn0(y)
+ // 24. y = CMOV(-y, y, e1)
+
+ x.Div(&x, &tv4) // 25. x = x / tv4
return G1Affine{x, y}
}
@@ -295,13 +281,13 @@ func hashToFp(msg, dst []byte, count int) ([]fp.Element, error) {
// g1Sgn0 is an algebraic substitute for the notion of sign in ordered fields
// Namely, every non-zero quadratic residue in a finite field of characteristic =/= 2 has exactly two square roots, one of each sign
-// Taken from https://datatracker.ietf.org/doc/draft-irtf-cfrg-hash-to-curve/ section 4.1
+// https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-16.html#name-the-sgn0-function
// The sign of an element is not obviously related to that of its Montgomery form
func g1Sgn0(z *fp.Element) uint64 {
nonMont := *z
nonMont.FromMont()
-
+ // m == 1
return nonMont[0] % 2
}
@@ -318,7 +304,7 @@ func MapToG1(u fp.Element) G1Affine {
// EncodeToG1 hashes a message to a point on the G1 curve using the SSWU map.
// It is faster than HashToG1, but the result is not uniformly distributed. Unsuitable as a random oracle.
// dst stands for "domain separation tag", a string unique to the construction using the hash function
-//https://datatracker.ietf.org/doc/draft-irtf-cfrg-hash-to-curve/13/#section-6.6.3
+//https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-16.html#roadmap
func EncodeToG1(msg, dst []byte) (G1Affine, error) {
var res G1Affine
@@ -338,7 +324,7 @@ func EncodeToG1(msg, dst []byte) (G1Affine, error) {
// HashToG1 hashes a message to a point on the G1 curve using the SSWU map.
// Slower than EncodeToG1, but usable as a random oracle.
// dst stands for "domain separation tag", a string unique to the construction using the hash function
-// https://tools.ietf.org/html/draft-irtf-cfrg-hash-to-curve-06#section-3
+//https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-16.html#roadmap
func HashToG1(msg, dst []byte) (G1Affine, error) {
u, err := hashToFp(msg, dst, 2*1)
if err != nil {
@@ -348,7 +334,7 @@ func HashToG1(msg, dst []byte) (G1Affine, error) {
Q0 := mapToCurve1(&u[0])
Q1 := mapToCurve1(&u[1])
- //TODO: Add in E' first, then apply isogeny
+ //TODO (perf): Add in E' first, then apply isogeny
g1Isogeny(&Q0)
g1Isogeny(&Q1)
diff --git a/ecc/bw6-633/hash_to_g2.go b/ecc/bw6-633/hash_to_g2.go
index b838e94a92..9b78724681 100644
--- a/ecc/bw6-633/hash_to_g2.go
+++ b/ecc/bw6-633/hash_to_g2.go
@@ -88,61 +88,51 @@ func g2Isogeny(p *G2Affine) {
// g2SqrtRatio computes the square root of u/v and returns 0 iff u/v was indeed a quadratic residue
// if not, we get sqrt(Z * u / v). Recall that Z is non-residue
+// If v = 0, u/v is meaningless and the output is unspecified, without raising an error.
// The main idea is that since the computation of the square root involves taking large powers of u/v, the inversion of v can be avoided
func g2SqrtRatio(z *fp.Element, u *fp.Element, v *fp.Element) uint64 {
- // Taken from https://datatracker.ietf.org/doc/draft-irtf-cfrg-hash-to-curve/13/ F.2.1.3. q = 5 mod 8
- // TODO: Test correct use of Element.Select
+ // https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-16.html#name-optimized-sqrt_ratio-for-q-5 (mod 8)
var tv1, tv2 fp.Element
- tv1.Square(v)
- tv2.Mul(&tv1, v)
- tv1.Square(&tv1)
- tv2.Mul(&tv2, u)
- tv1.Mul(&tv1, &tv2)
+ tv1.Square(v) // 1. tv1 = v²
+ tv2.Mul(&tv1, v) // 2. tv2 = tv1 * v
+ tv1.Square(&tv1) // 3. tv1 = tv1²
+ tv2.Mul(&tv2, u) // 4. tv2 = tv2 * u
+ tv1.Mul(&tv1, &tv2) // 5. tv1 = tv1 * tv2
var c1 big.Int
- // c1 = 2561809830520971834851673423317370187208698865113597259441094318876502093964723972342941881295000173427447071280550850682906970629308555147846507041252715387332745928667496467633282424373249
+ // c1 = (q - 5) / 8 = 2561809830520971834851673423317370187208698865113597259441094318876502093964723972342941881295000173427447071280550850682906970629308555147846507041252715387332745928667496467633282424373249
c1.SetBytes([]byte{36, 204, 103, 152, 30, 107, 236, 127, 131, 66, 233, 224, 58, 229, 86, 181, 31, 155, 24, 235, 175, 58, 88, 233, 203, 46, 211, 91, 55, 123, 69, 240, 42, 84, 216, 31, 91, 212, 146, 23, 27, 83, 235, 208, 126, 175, 137, 47, 193, 209, 10, 29, 183, 180, 128, 250, 246, 185, 207, 87, 7, 56, 68, 167, 166, 211, 122, 98, 40, 254, 231, 154, 233, 34, 221, 72, 174, 0, 1})
var y1 fp.Element
- y1.Exp(tv1, &c1)
- y1.Mul(&y1, &tv2)
- tv1.Mul(&y1, &fp.Element{7899625277197386435, 5217716493391639390, 7472932469883704682, 7632350077606897049, 9296070723299766388, 14353472371414671016, 14644604696869838127, 11421353192299464576, 237964513547175570, 46667570639865841})
- tv2.Square(&tv1)
-
- //Line 10 in std doc
- tv2.Mul(&tv2, v)
-
- y1.Select(int(tv2.NotEqual(u)), &tv1, &y1)
-
- tv2.Square(&y1)
- tv2.Mul(&tv2, v)
-
- //Line 15
- isQNr := tv2.NotEqual(u)
+ y1.Exp(tv1, &c1) // 6. y1 = tv1ᶜ¹
+ y1.Mul(&y1, &tv2) // 7. y1 = y1 * tv2
+ // c2 = sqrt(-1)
+ c2 := fp.Element{7899625277197386435, 5217716493391639390, 7472932469883704682, 7632350077606897049, 9296070723299766388, 14353472371414671016, 14644604696869838127, 11421353192299464576, 237964513547175570, 46667570639865841}
+ tv1.Mul(&y1, &c2) // 8. tv1 = y1 * c2
+ tv2.Square(&tv1) // 9. tv2 = tv1²
+ tv2.Mul(&tv2, v) // 10. tv2 = tv2 * v
+ // 11. e1 = tv2 == u
+ y1.Select(int(tv2.NotEqual(u)), &tv1, &y1) // 12. y1 = CMOV(y1, tv1, e1)
+ tv2.Square(&y1) // 13. tv2 = y1²
+ tv2.Mul(&tv2, v) // 14. tv2 = tv2 * v
+ isQNr := tv2.NotEqual(u) // 15. isQR = tv2 == u
var y2 fp.Element
- y2.Mul(&y1, &fp.Element{16212120288951005687, 11690167560162600414, 9845362566212292170, 5006379754746321817, 3559960229467473872, 1378556217976105943, 4841104984578141598, 15436992508257808297, 6778583767067406308, 4544728946065242})
- tv1.Mul(&y2, &fp.Element{7899625277197386435, 5217716493391639390, 7472932469883704682, 7632350077606897049, 9296070723299766388, 14353472371414671016, 14644604696869838127, 11421353192299464576, 237964513547175570, 46667570639865841})
- tv2.Square(&tv1)
- tv2.Mul(&tv2, v)
-
+ // c3 = sqrt(Z / c2)
+ y2 = fp.Element{16212120288951005687, 11690167560162600414, 9845362566212292170, 5006379754746321817, 3559960229467473872, 1378556217976105943, 4841104984578141598, 15436992508257808297, 6778583767067406308, 4544728946065242}
+ y2.Mul(&y1, &y2) // 16. y2 = y1 * c3
+ tv1.Mul(&y2, &c2) // 17. tv1 = y2 * c2
+ tv2.Square(&tv1) // 18. tv2 = tv1²
+ tv2.Mul(&tv2, v) // 19. tv2 = tv2 * v
var tv3 fp.Element
- //Line 20
// Z = [2]
- g2MulByZ(&tv3, u)
-
- y2.Select(int(tv2.NotEqual(&tv3)), &tv1, &y2)
-
- z.Select(int(isQNr), &y1, &y2)
+ g2MulByZ(&tv3, u) // 20. tv3 = Z * u
+ // 21. e2 = tv2 == tv3
+ y2.Select(int(tv2.NotEqual(&tv3)), &tv1, &y2) // 22. y2 = CMOV(y2, tv1, e2)
+ z.Select(int(isQNr), &y1, &y2) // 23. y = CMOV(y2, y1, isQR)
return isQNr
}
-/*
-// g2SetZ sets z to [2].
-func g2SetZ(z *fp.Element) {
- z.Set( &fp.Element {14263791471689722215, 10958139817512614717, 646289283071182148, 16194112285086178910, 12391927829343171647, 3698619178316197998, 14879001273850772332, 4646357410414107532, 14313982959885664825, 19561843432566578} )
-}*/
-
// g2MulByZ multiplies x by [2] and stores the result in z
func g2MulByZ(z *fp.Element, x *fp.Element) {
@@ -153,30 +143,29 @@ func g2MulByZ(z *fp.Element, x *fp.Element) {
*z = res
}
-//TODO: Define A,B here
-
-// From https://datatracker.ietf.org/doc/draft-irtf-cfrg-hash-to-curve/13/ Pg 80
+// https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-16.html#name-simplified-swu-method
// mapToCurve2 implements the SSWU map
// No cofactor clearing or isogeny
func mapToCurve2(u *fp.Element) G2Affine {
+ var sswuIsoCurveCoeffA = fp.Element{13503940466125084703, 3000707982748310797, 1529397070312683242, 9240962296298654443, 4577258595340312235, 16046828875439788343, 7236093083337192433, 2860564553402019540, 5160479239841632821, 65394042426465165}
+ var sswuIsoCurveCoeffB = fp.Element{4170590011558214244, 9101648159034903675, 4256739633972552875, 7483080556638609334, 12430228215152656439, 9977400640742476476, 15847011074743951739, 17768582661138350292, 10869631430819016060, 64187107279947172}
+
var tv1 fp.Element
- tv1.Square(u)
+ tv1.Square(u) // 1. tv1 = u²
//mul tv1 by Z
- g2MulByZ(&tv1, &tv1)
+ g2MulByZ(&tv1, &tv1) // 2. tv1 = Z * tv1
var tv2 fp.Element
- tv2.Square(&tv1)
- tv2.Add(&tv2, &tv1)
+ tv2.Square(&tv1) // 3. tv2 = tv1²
+ tv2.Add(&tv2, &tv1) // 4. tv2 = tv2 + tv1
var tv3 fp.Element
- //Standard doc line 5
var tv4 fp.Element
tv4.SetOne()
- tv3.Add(&tv2, &tv4)
- //TODO: Use bCurveConf when no isogeny
- tv3.Mul(&tv3, &fp.Element{4170590011558214244, 9101648159034903675, 4256739633972552875, 7483080556638609334, 12430228215152656439, 9977400640742476476, 15847011074743951739, 17768582661138350292, 10869631430819016060, 64187107279947172})
+ tv3.Add(&tv2, &tv4) // 5. tv3 = tv2 + 1
+ tv3.Mul(&tv3, &sswuIsoCurveCoeffB) // 6. tv3 = B * tv3
tv2NZero := g2NotZero(&tv2)
@@ -184,48 +173,45 @@ func mapToCurve2(u *fp.Element) G2Affine {
tv4 = fp.Element{14263791471689722215, 10958139817512614717, 646289283071182148, 16194112285086178910, 12391927829343171647, 3698619178316197998, 14879001273850772332, 4646357410414107532, 14313982959885664825, 19561843432566578}
tv2.Neg(&tv2)
- tv4.Select(int(tv2NZero), &tv4, &tv2)
- //TODO: When no isogeny use curve constants
- tv2 = fp.Element{13503940466125084703, 3000707982748310797, 1529397070312683242, 9240962296298654443, 4577258595340312235, 16046828875439788343, 7236093083337192433, 2860564553402019540, 5160479239841632821, 65394042426465165}
- tv4.Mul(&tv4, &tv2)
+ tv4.Select(int(tv2NZero), &tv4, &tv2) // 7. tv4 = CMOV(Z, -tv2, tv2 != 0)
+ tv4.Mul(&tv4, &sswuIsoCurveCoeffA) // 8. tv4 = A * tv4
- tv2.Square(&tv3)
+ tv2.Square(&tv3) // 9. tv2 = tv3²
var tv6 fp.Element
- //Standard doc line 10
- tv6.Square(&tv4)
+ tv6.Square(&tv4) // 10. tv6 = tv4²
var tv5 fp.Element
- tv5.Mul(&tv6, &fp.Element{13503940466125084703, 3000707982748310797, 1529397070312683242, 9240962296298654443, 4577258595340312235, 16046828875439788343, 7236093083337192433, 2860564553402019540, 5160479239841632821, 65394042426465165})
+ tv5.Mul(&tv6, &sswuIsoCurveCoeffA) // 11. tv5 = A * tv6
- tv2.Add(&tv2, &tv5)
- tv2.Mul(&tv2, &tv3)
- tv6.Mul(&tv6, &tv4)
+ tv2.Add(&tv2, &tv5) // 12. tv2 = tv2 + tv5
+ tv2.Mul(&tv2, &tv3) // 13. tv2 = tv2 * tv3
+ tv6.Mul(&tv6, &tv4) // 14. tv6 = tv6 * tv4
- //Standards doc line 15
- tv5.Mul(&tv6, &fp.Element{4170590011558214244, 9101648159034903675, 4256739633972552875, 7483080556638609334, 12430228215152656439, 9977400640742476476, 15847011074743951739, 17768582661138350292, 10869631430819016060, 64187107279947172})
- tv2.Add(&tv2, &tv5)
+ tv5.Mul(&tv6, &sswuIsoCurveCoeffB) // 15. tv5 = B * tv6
+ tv2.Add(&tv2, &tv5) // 16. tv2 = tv2 + tv5
var x fp.Element
- x.Mul(&tv1, &tv3)
+ x.Mul(&tv1, &tv3) // 17. x = tv1 * tv3
var y1 fp.Element
- gx1NSquare := g2SqrtRatio(&y1, &tv2, &tv6)
+ gx1NSquare := g2SqrtRatio(&y1, &tv2, &tv6) // 18. (is_gx1_square, y1) = sqrt_ratio(tv2, tv6)
var y fp.Element
- y.Mul(&tv1, u)
+ y.Mul(&tv1, u) // 19. y = tv1 * u
- //Standards doc line 20
- y.Mul(&y, &y1)
+ y.Mul(&y, &y1) // 20. y = y * y1
- x.Select(int(gx1NSquare), &tv3, &x)
- y.Select(int(gx1NSquare), &y1, &y)
+ x.Select(int(gx1NSquare), &tv3, &x) // 21. x = CMOV(x, tv3, is_gx1_square)
+ y.Select(int(gx1NSquare), &y1, &y) // 22. y = CMOV(y, y1, is_gx1_square)
y1.Neg(&y)
y.Select(int(g2Sgn0(u)^g2Sgn0(&y)), &y, &y1)
- //Standards doc line 25
- x.Div(&x, &tv4)
+ // 23. e1 = sgn0(u) == sgn0(y)
+ // 24. y = CMOV(-y, y, e1)
+
+ x.Div(&x, &tv4) // 25. x = x / tv4
return G2Affine{x, y}
}
@@ -247,13 +233,13 @@ func g2EvalPolynomial(z *fp.Element, monic bool, coefficients []fp.Element, x *f
// g2Sgn0 is an algebraic substitute for the notion of sign in ordered fields
// Namely, every non-zero quadratic residue in a finite field of characteristic =/= 2 has exactly two square roots, one of each sign
-// Taken from https://datatracker.ietf.org/doc/draft-irtf-cfrg-hash-to-curve/ section 4.1
+// https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-16.html#name-the-sgn0-function
// The sign of an element is not obviously related to that of its Montgomery form
func g2Sgn0(z *fp.Element) uint64 {
nonMont := *z
nonMont.FromMont()
-
+ // m == 1
return nonMont[0] % 2
}
@@ -270,7 +256,7 @@ func MapToG2(u fp.Element) G2Affine {
// EncodeToG2 hashes a message to a point on the G2 curve using the SSWU map.
// It is faster than HashToG2, but the result is not uniformly distributed. Unsuitable as a random oracle.
// dst stands for "domain separation tag", a string unique to the construction using the hash function
-//https://datatracker.ietf.org/doc/draft-irtf-cfrg-hash-to-curve/13/#section-6.6.3
+//https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-16.html#roadmap
func EncodeToG2(msg, dst []byte) (G2Affine, error) {
var res G2Affine
@@ -290,7 +276,7 @@ func EncodeToG2(msg, dst []byte) (G2Affine, error) {
// HashToG2 hashes a message to a point on the G2 curve using the SSWU map.
// Slower than EncodeToG2, but usable as a random oracle.
// dst stands for "domain separation tag", a string unique to the construction using the hash function
-// https://tools.ietf.org/html/draft-irtf-cfrg-hash-to-curve-06#section-3
+//https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-16.html#roadmap
func HashToG2(msg, dst []byte) (G2Affine, error) {
u, err := hashToFp(msg, dst, 2*1)
if err != nil {
@@ -300,7 +286,7 @@ func HashToG2(msg, dst []byte) (G2Affine, error) {
Q0 := mapToCurve2(&u[0])
Q1 := mapToCurve2(&u[1])
- //TODO: Add in E' first, then apply isogeny
+ //TODO (perf): Add in E' first, then apply isogeny
g2Isogeny(&Q0)
g2Isogeny(&Q1)
diff --git a/ecc/bw6-633/internal/fptower/e3.go b/ecc/bw6-633/internal/fptower/e3.go
index a0a48a5855..8e309811ca 100644
--- a/ecc/bw6-633/internal/fptower/e3.go
+++ b/ecc/bw6-633/internal/fptower/e3.go
@@ -275,6 +275,8 @@ func (z *E3) MulByNonResidue(x *E3) *E3 {
}
// Inverse an element in E3
+//
+// if x == 0, sets and returns z = x
func (z *E3) Inverse(x *E3) *E3 {
// Algorithm 17 from https://eprint.iacr.org/2010/354.pdf
// step 9 is wrong in the paper it's t1-t4
@@ -303,6 +305,8 @@ func (z *E3) Inverse(x *E3) *E3 {
// BatchInvertE3 returns a new slice with every element inverted.
// Uses Montgomery batch inversion trick
+//
+// if a[i] == 0, returns result[i] = a[i]
func BatchInvertE3(a []E3) []E3 {
res := make([]E3, len(a))
if len(a) == 0 {
diff --git a/ecc/bw6-633/internal/fptower/e6.go b/ecc/bw6-633/internal/fptower/e6.go
index 92f54a1dd3..e6c171b934 100644
--- a/ecc/bw6-633/internal/fptower/e6.go
+++ b/ecc/bw6-633/internal/fptower/e6.go
@@ -224,29 +224,46 @@ func (z *E6) CyclotomicSquareCompressed(x *E6) *E6 {
return z
}
-// Decompress Karabina's cyclotomic square result
-func (z *E6) Decompress(x *E6) *E6 {
+// DecompressKarabina Karabina's cyclotomic square result
+// if g3 != 0
+// g4 = (E * g5^2 + 3 * g1^2 - 2 * g2)/4g3
+// if g3 == 0
+// g4 = 2g1g5/g2
+//
+// if g3=g2=0 then g4=g5=g1=0 and g0=1 (x=1)
+// Theorem 3.1 is well-defined for all x in Gϕₙ\{1}
+func (z *E6) DecompressKarabina(x *E6) *E6 {
var t [3]fp.Element
var one fp.Element
one.SetOne()
- // t0 = g1²
- t[0].Square(&x.B0.A1)
- // t1 = 3 * g1² - 2 * g2
- t[1].Sub(&t[0], &x.B0.A2).
- Double(&t[1]).
- Add(&t[1], &t[0])
- // t0 = E * g5² + t1
- t[2].Square(&x.B1.A2)
- t[0].MulByNonResidue(&t[2]).
- Add(&t[0], &t[1])
- // t1 = 1/(4 * g3)
- t[1].Double(&x.B1.A0).
- Double(&t[1]).
- Inverse(&t[1]) // costly
+ // g3 == 0
+ if x.B1.A0.IsZero() {
+ t[0].Mul(&x.B0.A1, &x.B1.A2).
+ Double(&t[0])
+ // t1 = g2
+ t[1].Set(&x.B0.A2)
+
+ // g3 != 0
+ } else {
+ // t0 = g1^2
+ t[0].Square(&x.B0.A1)
+ // t1 = 3 * g1^2 - 2 * g2
+ t[1].Sub(&t[0], &x.B0.A2).
+ Double(&t[1]).
+ Add(&t[1], &t[0])
+ // t0 = E * g5^2 + t1
+ t[2].Square(&x.B1.A2)
+ t[0].MulByNonResidue(&t[2]).
+ Add(&t[0], &t[1])
+ // t1 = 1/(4 * g3)
+ t[1].Double(&x.B1.A0).
+ Double(&t[1])
+ }
+
// z4 = g4
- z.B1.A1.Mul(&t[0], &t[1])
+ z.B1.A1.Div(&t[0], &t[1]) // costly
// t1 = g2 * g1
t[1].Mul(&x.B0.A2, &x.B0.A1)
@@ -255,7 +272,7 @@ func (z *E6) Decompress(x *E6) *E6 {
Sub(&t[2], &t[1]).
Double(&t[2]).
Sub(&t[2], &t[1])
- // t1 = g3 * g5
+ // t1 = g3 * g5 (g3 can be 0)
t[1].Mul(&x.B1.A0, &x.B1.A2)
// c₀ = E * (2 * g4² + g3 * g5 - 3 * g2 * g1) + 1
t[2].Add(&t[2], &t[1])
@@ -270,6 +287,81 @@ func (z *E6) Decompress(x *E6) *E6 {
return z
}
+// BatchDecompressKarabina multiple Karabina's cyclotomic square results
+// if g3 != 0
+// g4 = (E * g5^2 + 3 * g1^2 - 2 * g2)/4g3
+// if g3 == 0
+// g4 = 2g1g5/g2
+//
+// if g3=g2=0 then g4=g5=g1=0 and g0=1 (x=1)
+// Theorem 3.1 is well-defined for all x in Gϕₙ\{1}
+//
+// Divisions by 4g3 or g2 is batched using Montgomery batch inverse
+func BatchDecompressKarabina(x []E6) []E6 {
+
+ n := len(x)
+ if n == 0 {
+ return x
+ }
+
+ t0 := make([]fp.Element, n)
+ t1 := make([]fp.Element, n)
+ t2 := make([]fp.Element, n)
+
+ var one fp.Element
+ one.SetOne()
+
+ for i := 0; i < n; i++ {
+ // g3 == 0
+ if x[i].B1.A0.IsZero() {
+ t0[i].Mul(&x[i].B0.A1, &x[i].B1.A2).
+ Double(&t0[i])
+ // t1 = g2
+ t1[i].Set(&x[i].B0.A2)
+
+ // g3 != 0
+ } else {
+ // t0 = g1²
+ t0[i].Square(&x[i].B0.A1)
+ // t1 = 3 * g1² - 2 * g2
+ t1[i].Sub(&t0[i], &x[i].B0.A2).
+ Double(&t1[i]).
+ Add(&t1[i], &t0[i])
+ // t0 = E * g5² + t1
+ t2[i].Square(&x[i].B1.A2)
+ t0[i].MulByNonResidue(&t2[i]).
+ Add(&t0[i], &t1[i])
+ // t1 = 1/(4 * g3)
+ t1[i].Double(&x[i].B1.A0).
+ Double(&t1[i])
+ }
+ }
+
+ t1 = fp.BatchInvert(t1) // costs 1 inverse
+
+ for i := 0; i < n; i++ {
+ // z4 = g4
+ x[i].B1.A1.Mul(&t0[i], &t1[i])
+
+ // t1 = g2 * g1
+ t1[i].Mul(&x[i].B0.A2, &x[i].B0.A1)
+ // t2 = 2 * g4^2 - 3 * g2 * g1
+ t2[i].Square(&x[i].B1.A1)
+ t2[i].Sub(&t2[i], &t1[i])
+ t2[i].Double(&t2[i])
+ t2[i].Sub(&t2[i], &t1[i])
+
+ // t1 = g3 * g5 (g3s can be 0s)
+ t1[i].Mul(&x[i].B1.A0, &x[i].B1.A2)
+ // z0 = E * (2 * g4^2 + g3 * g5 - 3 * g2 * g1) + 1
+ t2[i].Add(&t2[i], &t1[i])
+ x[i].B0.A0.MulByNonResidue(&t2[i]).
+ Add(&x[i].B0.A0, &one)
+ }
+
+ return x
+}
+
// Granger-Scott's cyclotomic square
// https://eprint.iacr.org/2009/565.pdf, 3.2
func (z *E6) CyclotomicSquare(x *E6) *E6 {
@@ -309,6 +401,8 @@ func (z *E6) CyclotomicSquare(x *E6) *E6 {
}
// Inverse set z to the inverse of x in E6 and return z
+//
+// if x == 0, sets and returns z = x
func (z *E6) Inverse(x *E6) *E6 {
// Algorithm 23 from https://eprint.iacr.org/2010/354.pdf
@@ -326,6 +420,8 @@ func (z *E6) Inverse(x *E6) *E6 {
// BatchInvertE6 returns a new slice with every element inverted.
// Uses Montgomery batch inversion trick
+//
+// if a[i] == 0, returns result[i] = a[i]
func BatchInvertE6(a []E6) []E6 {
res := make([]E6, len(a))
if len(a) == 0 {
@@ -727,6 +823,10 @@ func BatchCompressTorus(x []E6) ([]E3, error) {
for i := 0; i < n; i++ {
res[i].Set(&x[i].B1)
+ // throw an error if any of the x[i].C1 is 0
+ if res[i].IsZero() {
+ return []E3{}, errors.New("invalid input")
+ }
}
t := BatchInvertE3(res) // costs 1 inverse
diff --git a/ecc/bw6-633/internal/fptower/e6_pairing.go b/ecc/bw6-633/internal/fptower/e6_pairing.go
index e305f7adbc..691e862bc1 100644
--- a/ecc/bw6-633/internal/fptower/e6_pairing.go
+++ b/ecc/bw6-633/internal/fptower/e6_pairing.go
@@ -2,29 +2,36 @@ package fptower
import "github.com/consensys/gnark-crypto/ecc/bw6-633/fp"
-func (z *E6) nSquare(n int) {
+func (z *E6) nSquareCompressed(n int) {
for i := 0; i < n; i++ {
- z.CyclotomicSquare(z)
+ z.CyclotomicSquareCompressed(z)
}
}
// Expt set z to x^t in E6 and return z (t is the seed of the curve)
+// -2**32+2**30+2**22-2**20+1
func (z *E6) Expt(x *E6) *E6 {
- var result, xInv E6
+ var result, x20, x22, x30, x32 E6
result.Set(x)
- xInv.Conjugate(x)
-
- result.nSquare(2)
- result.Mul(&result, &xInv)
- result.nSquare(8)
- result.Mul(&result, &xInv)
- result.nSquare(2)
- result.Mul(&result, x)
- result.nSquare(20)
- result.Mul(&result, &xInv)
-
- z.Conjugate(&result)
+
+ result.nSquareCompressed(20)
+ x20.Conjugate(&result)
+ result.nSquareCompressed(2)
+ x22.Set(&result)
+ result.nSquareCompressed(8)
+ x30.Set(&result)
+
+ batch := BatchDecompressKarabina([]E6{x20, x22, x30})
+
+ x32.CyclotomicSquare(&batch[2]).
+ CyclotomicSquare(&x32).
+ Conjugate(&x32)
+
+ z.Mul(x, &batch[0]).
+ Mul(z, &batch[1]).
+ Mul(z, &batch[2]).
+ Mul(z, &x32)
return z
}
diff --git a/ecc/bw6-633/internal/fptower/e6_test.go b/ecc/bw6-633/internal/fptower/e6_test.go
index 8bda2d0922..c87b33ac0d 100644
--- a/ecc/bw6-633/internal/fptower/e6_test.go
+++ b/ecc/bw6-633/internal/fptower/e6_test.go
@@ -308,12 +308,46 @@ func TestE6Ops(t *testing.T) {
b.Mul(&b, a)
a.Frobenius(&b).Mul(a, &b)
c.Square(a)
- d.CyclotomicSquareCompressed(a).Decompress(&d)
+ d.CyclotomicSquareCompressed(a).DecompressKarabina(&d)
return c.Equal(&d)
},
genA,
))
+ properties.Property("[BW6-633] batch decompress and individual decompress (Karabina) should be the same", prop.ForAll(
+ func(a *E6) bool {
+ var _a, b E6
+ _a.SetOne().Double(&_a)
+
+ // put a and _a in the cyclotomic subgroup
+ // a (g3 !=0 probably)
+ b.Conjugate(a)
+ a.Inverse(a)
+ b.Mul(&b, a)
+ a.Frobenius(&b).Mul(a, &b)
+ // _a (g3 == 0)
+ b.Conjugate(&_a)
+ _a.Inverse(&_a)
+ b.Mul(&b, &_a)
+ _a.Frobenius(&b).Mul(&_a, &b)
+
+ var a2, a4, a17 E6
+ a2.Set(&_a)
+ a4.Set(a)
+ a17.Set(a)
+ a2.nSquareCompressed(2) // case g3 == 0
+ a4.nSquareCompressed(4)
+ a17.nSquareCompressed(17)
+ batch := BatchDecompressKarabina([]E6{a2, a4, a17})
+ a2.DecompressKarabina(&a2)
+ a4.DecompressKarabina(&a4)
+ a17.DecompressKarabina(&a17)
+
+ return a2.Equal(&batch[0]) && a4.Equal(&batch[1]) && a17.Equal(&batch[2])
+ },
+ genA,
+ ))
+
properties.Property("[BW6-633] Exp and CyclotomicExp results must be the same in the cyclotomic subgroup", prop.ForAll(
func(a *E6, e fp.Element) bool {
var b, c, d E6
diff --git a/ecc/bw6-633/multiexp.go b/ecc/bw6-633/multiexp.go
index ce461a3a4e..2006674935 100644
--- a/ecc/bw6-633/multiexp.go
+++ b/ecc/bw6-633/multiexp.go
@@ -41,7 +41,7 @@ type selector struct {
// if the digit is larger than 2^{c-1}, then, we borrow 2^c from the next window and substract
// 2^{c} to the current digit, making it negative.
// negative digits can be processed in a later step as adding -G into the bucket instead of G
-// (computing -G is cheap, and this saves us half of the buckets in the MultiExp or BatchScalarMul)
+// (computing -G is cheap, and this saves us half of the buckets in the MultiExp or BatchScalarMultiplication)
// scalarsMont indicates wheter the provided scalars are in montgomery form
// returns smallValues, which represent the number of scalars which meets the following condition
// 0 < scalar < 2^c (in other words, scalars where only the c-least significant bits are non zero)
@@ -163,6 +163,8 @@ func partitionScalars(scalars []fr.Element, c uint64, scalarsMont bool, nbTasks
}
// MultiExp implements section 4 of https://eprint.iacr.org/2012/549.pdf
+//
+// This call return an error if len(scalars) != len(points) or if provided config is invalid.
func (p *G1Affine) MultiExp(points []G1Affine, scalars []fr.Element, config ecc.MultiExpConfig) (*G1Affine, error) {
var _p G1Jac
if _, err := _p.MultiExp(points, scalars, config); err != nil {
@@ -173,6 +175,8 @@ func (p *G1Affine) MultiExp(points []G1Affine, scalars []fr.Element, config ecc.
}
// MultiExp implements section 4 of https://eprint.iacr.org/2012/549.pdf
+//
+// This call return an error if len(scalars) != len(points) or if provided config is invalid.
func (p *G1Jac) MultiExp(points []G1Affine, scalars []fr.Element, config ecc.MultiExpConfig) (*G1Jac, error) {
// note:
// each of the msmCX method is the same, except for the c constant it declares
@@ -209,6 +213,8 @@ func (p *G1Jac) MultiExp(points []G1Affine, scalars []fr.Element, config ecc.Mul
// if nbTasks is not set, use all available CPUs
if config.NbTasks <= 0 {
config.NbTasks = runtime.NumCPU()
+ } else if config.NbTasks > 1024 {
+ return nil, errors.New("invalid config: config.NbTasks > 1024")
}
// here, we compute the best C for nbPoints
@@ -566,6 +572,8 @@ func (p *G1Jac) msmC16(points []G1Affine, scalars []fr.Element, splitFirstChunk
}
// MultiExp implements section 4 of https://eprint.iacr.org/2012/549.pdf
+//
+// This call return an error if len(scalars) != len(points) or if provided config is invalid.
func (p *G2Affine) MultiExp(points []G2Affine, scalars []fr.Element, config ecc.MultiExpConfig) (*G2Affine, error) {
var _p G2Jac
if _, err := _p.MultiExp(points, scalars, config); err != nil {
@@ -576,6 +584,8 @@ func (p *G2Affine) MultiExp(points []G2Affine, scalars []fr.Element, config ecc.
}
// MultiExp implements section 4 of https://eprint.iacr.org/2012/549.pdf
+//
+// This call return an error if len(scalars) != len(points) or if provided config is invalid.
func (p *G2Jac) MultiExp(points []G2Affine, scalars []fr.Element, config ecc.MultiExpConfig) (*G2Jac, error) {
// note:
// each of the msmCX method is the same, except for the c constant it declares
@@ -612,6 +622,8 @@ func (p *G2Jac) MultiExp(points []G2Affine, scalars []fr.Element, config ecc.Mul
// if nbTasks is not set, use all available CPUs
if config.NbTasks <= 0 {
config.NbTasks = runtime.NumCPU()
+ } else if config.NbTasks > 1024 {
+ return nil, errors.New("invalid config: config.NbTasks > 1024")
}
// here, we compute the best C for nbPoints
diff --git a/ecc/bw6-633/multiexp_test.go b/ecc/bw6-633/multiexp_test.go
index d4e68b19cf..725042a72b 100644
--- a/ecc/bw6-633/multiexp_test.go
+++ b/ecc/bw6-633/multiexp_test.go
@@ -92,7 +92,14 @@ func TestMultiExpG1(t *testing.T) {
genScalar,
))
- properties.Property("[G1] Multi exponentation (c=5, c=16) should be consistent with sum of square", prop.ForAll(
+ // cRange is generated from template and contains the available parameters for the multiexp window size
+ cRange := []uint64{4, 5, 8, 16}
+ if testing.Short() {
+ // test only "odd" and "even" (ie windows size divide word size vs not)
+ cRange = []uint64{5, 16}
+ }
+
+ properties.Property(fmt.Sprintf("[G1] Multi exponentation (c in %v) should be consistent with sum of square", cRange), prop.ForAll(
func(mixer fr.Element) bool {
var expected G1Jac
@@ -111,13 +118,21 @@ func TestMultiExpG1(t *testing.T) {
FromMont()
}
- scalars5, _ := partitionScalars(sampleScalars[:], 5, false, runtime.NumCPU())
- scalars16, _ := partitionScalars(sampleScalars[:], 16, false, runtime.NumCPU())
-
- var r5, r16 G1Jac
- r5.msmC5(samplePoints[:], scalars5, false)
- r16.msmC16(samplePoints[:], scalars16, true)
- return (r5.Equal(&expected) && r16.Equal(&expected))
+ results := make([]G1Jac, len(cRange)+1)
+ for i, c := range cRange {
+ scalars, _ := partitionScalars(sampleScalars[:], c, false, runtime.NumCPU())
+ msmInnerG1Jac(&results[i], int(c), samplePoints[:], scalars, false)
+ if c == 16 {
+ // split the first chunk
+ msmInnerG1Jac(&results[len(results)-1], 16, samplePoints[:], scalars, true)
+ }
+ }
+ for i := 1; i < len(results); i++ {
+ if !results[i].Equal(&results[i-1]) {
+ return false
+ }
+ }
+ return true
},
genScalar,
))
@@ -148,7 +163,7 @@ func TestMultiExpG1(t *testing.T) {
var finalBigScalar fr.Element
var finalBigScalarBi big.Int
var op1ScalarMul G1Affine
- finalBigScalar.SetString("9455").Mul(&finalBigScalar, &mixer)
+ finalBigScalar.SetUint64(9455).Mul(&finalBigScalar, &mixer)
finalBigScalar.ToBigIntRegular(&finalBigScalarBi)
op1ScalarMul.ScalarMultiplication(&g1GenAff, &finalBigScalarBi)
@@ -322,7 +337,12 @@ func TestMultiExpG2(t *testing.T) {
genScalar,
))
- properties.Property("[G2] Multi exponentation (c=5, c=16) should be consistent with sum of square", prop.ForAll(
+ // cRange is generated from template and contains the available parameters for the multiexp window size
+ // for g2, CI suffers with large c size since it needs to allocate a lot of memory for the buckets.
+ // test only "odd" and "even" (ie windows size divide word size vs not)
+ cRange := []uint64{5, 16}
+
+ properties.Property(fmt.Sprintf("[G2] Multi exponentation (c in %v) should be consistent with sum of square", cRange), prop.ForAll(
func(mixer fr.Element) bool {
var expected G2Jac
@@ -341,13 +361,21 @@ func TestMultiExpG2(t *testing.T) {
FromMont()
}
- scalars5, _ := partitionScalars(sampleScalars[:], 5, false, runtime.NumCPU())
- scalars16, _ := partitionScalars(sampleScalars[:], 16, false, runtime.NumCPU())
-
- var r5, r16 G2Jac
- r5.msmC5(samplePoints[:], scalars5, false)
- r16.msmC16(samplePoints[:], scalars16, true)
- return (r5.Equal(&expected) && r16.Equal(&expected))
+ results := make([]G2Jac, len(cRange)+1)
+ for i, c := range cRange {
+ scalars, _ := partitionScalars(sampleScalars[:], c, false, runtime.NumCPU())
+ msmInnerG2Jac(&results[i], int(c), samplePoints[:], scalars, false)
+ if c == 16 {
+ // split the first chunk
+ msmInnerG2Jac(&results[len(results)-1], 16, samplePoints[:], scalars, true)
+ }
+ }
+ for i := 1; i < len(results); i++ {
+ if !results[i].Equal(&results[i-1]) {
+ return false
+ }
+ }
+ return true
},
genScalar,
))
@@ -378,7 +406,7 @@ func TestMultiExpG2(t *testing.T) {
var finalBigScalar fr.Element
var finalBigScalarBi big.Int
var op1ScalarMul G2Affine
- finalBigScalar.SetString("9455").Mul(&finalBigScalar, &mixer)
+ finalBigScalar.SetUint64(9455).Mul(&finalBigScalar, &mixer)
finalBigScalar.ToBigIntRegular(&finalBigScalarBi)
op1ScalarMul.ScalarMultiplication(&g2GenAff, &finalBigScalarBi)
diff --git a/ecc/bw6-633/pairing.go b/ecc/bw6-633/pairing.go
index 0a706b0770..337b104b51 100644
--- a/ecc/bw6-633/pairing.go
+++ b/ecc/bw6-633/pairing.go
@@ -31,7 +31,9 @@ type lineEvaluation struct {
}
// Pair calculates the reduced pairing for a set of points
-// ∏ᵢ e(Pᵢ, Qᵢ)
+// ∏ᵢ e(Pᵢ, Qᵢ).
+//
+// This function doesn't check that the inputs are in the correct subgroup. See IsInSubGroup.
func Pair(P []G1Affine, Q []G2Affine) (GT, error) {
f, err := MillerLoop(P, Q)
if err != nil {
@@ -42,6 +44,8 @@ func Pair(P []G1Affine, Q []G2Affine) (GT, error) {
// PairingCheck calculates the reduced pairing for a set of points and returns True if the result is One
// ∏ᵢ e(Pᵢ, Qᵢ) =? 1
+//
+// This function doesn't check that the inputs are in the correct subgroup. See IsInSubGroup.
func PairingCheck(P []G1Affine, Q []G2Affine) (bool, error) {
f, err := Pair(P, Q)
if err != nil {
@@ -198,8 +202,6 @@ func MillerLoop(P []G1Affine, Q []G2Affine) (GT, error) {
// precomputations
pProj0 := make([]g1Proj, n)
p1 := make([]G1Affine, n)
- p01 := make([]G1Affine, n)
- p10 := make([]G1Affine, n)
pProj01 := make([]g1Proj, n) // P0+P1
pProj10 := make([]g1Proj, n) // P0-P1
l01 := make([]lineEvaluation, n)
@@ -222,8 +224,8 @@ func MillerLoop(P []G1Affine, Q []G2Affine) (GT, error) {
l10[k].r1.Mul(&l10[k].r1, &q[k].X)
l10[k].r0.Mul(&l10[k].r0, &q[k].Y)
}
- BatchProjectiveToAffineG1(pProj01, p01)
- BatchProjectiveToAffineG1(pProj10, p10)
+ p01 := BatchProjectiveToAffineG1(pProj01)
+ p10 := BatchProjectiveToAffineG1(pProj10)
// f_{a0+\lambda*a1,P}(Q)
var result, ss GT
diff --git a/ecc/bw6-633/twistededwards/eddsa/eddsa.go b/ecc/bw6-633/twistededwards/eddsa/eddsa.go
index 91bc243e93..1767ebb298 100644
--- a/ecc/bw6-633/twistededwards/eddsa/eddsa.go
+++ b/ecc/bw6-633/twistededwards/eddsa/eddsa.go
@@ -98,7 +98,7 @@ func GenerateKey(r io.Reader) (*PrivateKey, error) {
var bScalar big.Int
bScalar.SetBytes(priv.scalar[:])
- pub.A.ScalarMul(&c.Base, &bScalar)
+ pub.A.ScalarMultiplication(&c.Base, &bScalar)
priv.PublicKey = pub
@@ -146,7 +146,7 @@ func (privKey *PrivateKey) Sign(message []byte, hFunc hash.Hash) ([]byte, error)
blindingFactorBigInt.SetBytes(blindingFactorBytes[:sizeFr])
// compute R = randScalar*Base
- res.R.ScalarMul(&curveParams.Base, &blindingFactorBigInt)
+ res.R.ScalarMultiplication(&curveParams.Base, &blindingFactorBigInt)
if !res.R.IsOnCurve() {
return nil, errNotOnCurve
}
@@ -232,8 +232,8 @@ func (pub *PublicKey) Verify(sigBin, message []byte, hFunc hash.Hash) (bool, err
var bCofactor, bs big.Int
curveParams.Cofactor.ToBigIntRegular(&bCofactor)
bs.SetBytes(sig.S[:])
- lhs.ScalarMul(&curveParams.Base, &bs).
- ScalarMul(&lhs, &bCofactor)
+ lhs.ScalarMultiplication(&curveParams.Base, &bs).
+ ScalarMultiplication(&lhs, &bCofactor)
if !lhs.IsOnCurve() {
return false, errNotOnCurve
@@ -241,9 +241,9 @@ func (pub *PublicKey) Verify(sigBin, message []byte, hFunc hash.Hash) (bool, err
// rhs = cofactor*(R + H(R,A,M)*A)
var rhs twistededwards.PointAffine
- rhs.ScalarMul(&pub.A, &hramInt).
+ rhs.ScalarMultiplication(&pub.A, &hramInt).
Add(&rhs, &sig.R).
- ScalarMul(&rhs, &bCofactor)
+ ScalarMultiplication(&rhs, &bCofactor)
if !rhs.IsOnCurve() {
return false, errNotOnCurve
}
diff --git a/ecc/bw6-633/twistededwards/point.go b/ecc/bw6-633/twistededwards/point.go
index b658c41e59..e4055d8d8d 100644
--- a/ecc/bw6-633/twistededwards/point.go
+++ b/ecc/bw6-633/twistededwards/point.go
@@ -256,13 +256,13 @@ func (p *PointAffine) FromExtended(p1 *PointExtended) *PointAffine {
return p
}
-// ScalarMul scalar multiplication of a point
+// ScalarMultiplication scalar multiplication of a point
// p1 in affine coordinates with a scalar in big.Int
-func (p *PointAffine) ScalarMul(p1 *PointAffine, scalar *big.Int) *PointAffine {
+func (p *PointAffine) ScalarMultiplication(p1 *PointAffine, scalar *big.Int) *PointAffine {
var p1Extended, resExtended PointExtended
p1Extended.FromAffine(p1)
- resExtended.ScalarMul(&p1Extended, scalar)
+ resExtended.ScalarMultiplication(&p1Extended, scalar)
p.FromExtended(&resExtended)
return p
@@ -409,9 +409,9 @@ func (p *PointProj) Add(p1, p2 *PointProj) *PointProj {
return p
}
-// ScalarMul scalar multiplication of a point
+// ScalarMultiplication scalar multiplication of a point
// p1 in projective coordinates with a scalar in big.Int
-func (p *PointProj) ScalarMul(p1 *PointProj, scalar *big.Int) *PointProj {
+func (p *PointProj) ScalarMultiplication(p1 *PointProj, scalar *big.Int) *PointProj {
var _scalar big.Int
_scalar.Set(scalar)
p.Set(p1)
@@ -622,9 +622,9 @@ func (p *PointExtended) setInfinity() *PointExtended {
return p
}
-// ScalarMul scalar multiplication of a point
+// ScalarMultiplication scalar multiplication of a point
// p1 in extended coordinates with a scalar in big.Int
-func (p *PointExtended) ScalarMul(p1 *PointExtended, scalar *big.Int) *PointExtended {
+func (p *PointExtended) ScalarMultiplication(p1 *PointExtended, scalar *big.Int) *PointExtended {
var _scalar big.Int
_scalar.Set(scalar)
p.Set(p1)
diff --git a/ecc/bw6-633/twistededwards/point_test.go b/ecc/bw6-633/twistededwards/point_test.go
index 005b1469b8..0ffa874543 100644
--- a/ecc/bw6-633/twistededwards/point_test.go
+++ b/ecc/bw6-633/twistededwards/point_test.go
@@ -124,8 +124,8 @@ func TestReceiverIsOperand(t *testing.T) {
var s big.Int
s.SetUint64(10)
- p2.ScalarMul(&p1, &s)
- p1.ScalarMul(&p1, &s)
+ p2.ScalarMultiplication(&p1, &s)
+ p1.ScalarMultiplication(&p1, &s)
return p2.Equal(&p1)
},
@@ -336,7 +336,7 @@ func TestOps(t *testing.T) {
params := GetEdwardsCurve()
var p1, p2, zero PointAffine
- p1.ScalarMul(¶ms.Base, &s1)
+ p1.ScalarMultiplication(¶ms.Base, &s1)
zero.setInfinity()
p2.Add(&p1, &zero)
@@ -352,7 +352,7 @@ func TestOps(t *testing.T) {
params := GetEdwardsCurve()
var p1, p2 PointAffine
- p1.ScalarMul(¶ms.Base, &s1)
+ p1.ScalarMultiplication(¶ms.Base, &s1)
p2.Neg(&p1)
p1.Add(&p1, &p2)
@@ -371,8 +371,8 @@ func TestOps(t *testing.T) {
params := GetEdwardsCurve()
var p1, p2, inf PointAffine
- p1.ScalarMul(¶ms.Base, &s)
- p2.ScalarMul(¶ms.Base, &s)
+ p1.ScalarMultiplication(¶ms.Base, &s)
+ p2.ScalarMultiplication(¶ms.Base, &s)
p1.Add(&p1, &p2)
p2.Double(&p2)
@@ -390,14 +390,14 @@ func TestOps(t *testing.T) {
var p1, p2, p3, inf PointAffine
inf.X.SetZero()
inf.Y.SetZero()
- p1.ScalarMul(¶ms.Base, &s1)
- p2.ScalarMul(¶ms.Base, &s2)
+ p1.ScalarMultiplication(¶ms.Base, &s1)
+ p2.ScalarMultiplication(¶ms.Base, &s2)
p3.Set(¶ms.Base)
p2.Add(&p1, &p2)
s1.Add(&s1, &s2)
- p3.ScalarMul(¶ms.Base, &s1)
+ p3.ScalarMultiplication(¶ms.Base, &s1)
return p2.IsOnCurve() && p3.Equal(&p2) && !p3.Equal(&inf)
},
@@ -413,9 +413,9 @@ func TestOps(t *testing.T) {
var p1, p2, inf PointAffine
inf.X.SetZero()
inf.Y.SetOne()
- p1.ScalarMul(¶ms.Base, &s1)
+ p1.ScalarMultiplication(¶ms.Base, &s1)
s1.Neg(&s1)
- p2.ScalarMul(¶ms.Base, &s1)
+ p2.ScalarMultiplication(¶ms.Base, &s1)
p2.Add(&p1, &p2)
@@ -430,11 +430,11 @@ func TestOps(t *testing.T) {
params := GetEdwardsCurve()
var p1, p2 PointAffine
- p1.ScalarMul(¶ms.Base, &s1)
+ p1.ScalarMultiplication(¶ms.Base, &s1)
five := big.NewInt(5)
p2.Double(&p1).Double(&p2).Add(&p2, &p1)
- p1.ScalarMul(&p1, five)
+ p1.ScalarMultiplication(&p1, five)
return p2.IsOnCurve() && p2.Equal(&p1)
},
@@ -463,7 +463,7 @@ func TestOps(t *testing.T) {
var baseProj, p1, p2, zero PointProj
baseProj.FromAffine(¶ms.Base)
- p1.ScalarMul(&baseProj, &s1)
+ p1.ScalarMultiplication(&baseProj, &s1)
zero.setInfinity()
p2.Add(&p1, &zero)
@@ -480,7 +480,7 @@ func TestOps(t *testing.T) {
var baseProj, p1, p2, p PointProj
baseProj.FromAffine(¶ms.Base)
- p1.ScalarMul(&baseProj, &s1)
+ p1.ScalarMultiplication(&baseProj, &s1)
p2.Neg(&p1)
p.Add(&p1, &p2)
@@ -498,7 +498,7 @@ func TestOps(t *testing.T) {
var baseProj, p1, p2, p PointProj
baseProj.FromAffine(¶ms.Base)
- p.ScalarMul(&baseProj, &s)
+ p.ScalarMultiplication(&baseProj, &s)
p1.Add(&p, &p)
p2.Double(&p)
@@ -515,11 +515,11 @@ func TestOps(t *testing.T) {
var baseProj, p1, p2 PointProj
baseProj.FromAffine(¶ms.Base)
- p1.ScalarMul(&baseProj, &s1)
+ p1.ScalarMultiplication(&baseProj, &s1)
five := big.NewInt(5)
p2.Double(&p1).Double(&p2).Add(&p2, &p1)
- p1.ScalarMul(&p1, five)
+ p1.ScalarMultiplication(&p1, five)
return p2.Equal(&p1)
},
@@ -547,7 +547,7 @@ func TestOps(t *testing.T) {
var baseExtended, p1, p2, zero PointExtended
baseExtended.FromAffine(¶ms.Base)
- p1.ScalarMul(&baseExtended, &s1)
+ p1.ScalarMultiplication(&baseExtended, &s1)
zero.setInfinity()
p2.Add(&p1, &zero)
@@ -564,7 +564,7 @@ func TestOps(t *testing.T) {
var baseExtended, p1, p2, p PointExtended
baseExtended.FromAffine(¶ms.Base)
- p1.ScalarMul(&baseExtended, &s1)
+ p1.ScalarMultiplication(&baseExtended, &s1)
p2.Neg(&p1)
p.Add(&p1, &p2)
@@ -582,7 +582,7 @@ func TestOps(t *testing.T) {
var baseExtended, p1, p2, p PointExtended
baseExtended.FromAffine(¶ms.Base)
- p.ScalarMul(&baseExtended, &s)
+ p.ScalarMultiplication(&baseExtended, &s)
p1.Add(&p, &p)
p2.Double(&p)
@@ -599,11 +599,11 @@ func TestOps(t *testing.T) {
var baseExtended, p1, p2 PointExtended
baseExtended.FromAffine(¶ms.Base)
- p1.ScalarMul(&baseExtended, &s1)
+ p1.ScalarMultiplication(&baseExtended, &s1)
five := big.NewInt(5)
p2.Double(&p1).Double(&p2).Add(&p2, &p1)
- p1.ScalarMul(&p1, five)
+ p1.ScalarMultiplication(&p1, five)
return p2.Equal(&p1)
},
@@ -619,8 +619,8 @@ func TestOps(t *testing.T) {
var baseExtended, pExtended, p PointExtended
var pAffine PointAffine
baseExtended.FromAffine(¶ms.Base)
- pExtended.ScalarMul(&baseExtended, &s)
- pAffine.ScalarMul(¶ms.Base, &s)
+ pExtended.ScalarMultiplication(&baseExtended, &s)
+ pAffine.ScalarMultiplication(¶ms.Base, &s)
pAffine.Neg(&pAffine)
p.MixedAdd(&pExtended, &pAffine)
@@ -638,8 +638,8 @@ func TestOps(t *testing.T) {
var baseExtended, pExtended, p, p2 PointExtended
var pAffine PointAffine
baseExtended.FromAffine(¶ms.Base)
- pExtended.ScalarMul(&baseExtended, &s)
- pAffine.ScalarMul(¶ms.Base, &s)
+ pExtended.ScalarMultiplication(&baseExtended, &s)
+ pAffine.ScalarMultiplication(¶ms.Base, &s)
p.MixedAdd(&pExtended, &pAffine)
p2.MixedDouble(&pExtended)
@@ -658,8 +658,8 @@ func TestOps(t *testing.T) {
var baseProj, pProj, p PointProj
var pAffine PointAffine
baseProj.FromAffine(¶ms.Base)
- pProj.ScalarMul(&baseProj, &s)
- pAffine.ScalarMul(¶ms.Base, &s)
+ pProj.ScalarMultiplication(&baseProj, &s)
+ pAffine.ScalarMultiplication(¶ms.Base, &s)
pAffine.Neg(&pAffine)
p.MixedAdd(&pProj, &pAffine)
@@ -677,8 +677,8 @@ func TestOps(t *testing.T) {
var baseProj, pProj, p, p2 PointProj
var pAffine PointAffine
baseProj.FromAffine(¶ms.Base)
- pProj.ScalarMul(&baseProj, &s)
- pAffine.ScalarMul(¶ms.Base, &s)
+ pProj.ScalarMultiplication(&baseProj, &s)
+ pAffine.ScalarMultiplication(¶ms.Base, &s)
p.MixedAdd(&pProj, &pAffine)
p2.Double(&pProj)
@@ -697,9 +697,9 @@ func TestOps(t *testing.T) {
var baseExt PointExtended
var p1, p2 PointAffine
baseProj.FromAffine(¶ms.Base)
- baseProj.ScalarMul(&baseProj, &s)
+ baseProj.ScalarMultiplication(&baseProj, &s)
baseExt.FromAffine(¶ms.Base)
- baseExt.ScalarMul(&baseExt, &s)
+ baseExt.ScalarMultiplication(&baseExt, &s)
p1.FromProj(&baseProj)
p2.FromExtended(&baseExt)
@@ -760,7 +760,7 @@ func BenchmarkScalarMulExtended(b *testing.B) {
b.ResetTimer()
for j := 0; j < b.N; j++ {
- doubleAndAdd.ScalarMul(&a, &s)
+ doubleAndAdd.ScalarMultiplication(&a, &s)
}
}
@@ -776,6 +776,6 @@ func BenchmarkScalarMulProjective(b *testing.B) {
b.ResetTimer()
for j := 0; j < b.N; j++ {
- doubleAndAdd.ScalarMul(&a, &s)
+ doubleAndAdd.ScalarMultiplication(&a, &s)
}
}
diff --git a/ecc/bw6-756/bw6-756.go b/ecc/bw6-756/bw6-756.go
index 72374914eb..ae47133309 100644
--- a/ecc/bw6-756/bw6-756.go
+++ b/ecc/bw6-756/bw6-756.go
@@ -1,17 +1,24 @@
-// Copyright 2020 ConsenSys AG
+// Package bw6756 efficient elliptic curve, pairing and hash to curve implementation for bw6-756.
//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
+// bw6-756: A Brezing--Weng curve (2-chain with bls12-378)
+// embedding degree k=6
+// seed x₀=11045256207009841153.
+// 𝔽p: p=366325390957376286590726555727219947825377821289246188278797409783441745356050456327989347160777465284190855125642086860525706497928518803244008749360363712553766506755227344593404398783886857865261088226271336335268413437902849
+// 𝔽r: r=605248206075306171733248481581800960739847691770924913753520744034740935903401304776283802348837311170974282940417
+// (E/𝔽p): Y²=X³+1
+// (Eₜ/𝔽p): Y² = X³+33 (M-type twist)
+// r ∣ #E(Fp) and r ∣ #Eₜ(𝔽p)
+// Extension fields tower:
+// 𝔽p³[u] = 𝔽p/u³-33
+// 𝔽p⁶[v] = 𝔽p²/v²-u
+// optimal Ate loops:
+// x₀+1, x₀²-x₀-1
+// Security: estimated 126-bit level following [https://eprint.iacr.org/2019/885.pdf]
+// (r is 378 bits and p⁶ is 4536 bits)
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// Warning
//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
+// This code has not been audited and is provided as-is. In particular, there is no security guarantees such as constant time implementation or side-channel attack resistance.
package bw6756
import (
@@ -22,17 +29,6 @@ import (
"github.com/consensys/gnark-crypto/ecc/bw6-756/fr"
)
-// BW6-756: A Brezing--Weng curve of embedding degree k=6 with seed x₀=11045256207009841153. It forms a 2-chain with BLS12-378.
-// 𝔽p: p=366325390957376286590726555727219947825377821289246188278797409783441745356050456327989347160777465284190855125642086860525706497928518803244008749360363712553766506755227344593404398783886857865261088226271336335268413437902849
-// 𝔽r: r=605248206075306171733248481581800960739847691770924913753520744034740935903401304776283802348837311170974282940417
-// (E/𝔽p): Y²=X³+1
-// (Eₜ/𝔽p): Y² = X³+33 (M-type twist)
-// r ∣ #E(Fp) and r ∣ #Eₜ(𝔽p)
-// Extension fields tower:
-// 𝔽p³[u] = 𝔽p/u³-33
-// 𝔽p⁶[v] = 𝔽p²/v²-u
-// optimal Ate loops: x₀+1, x₀²-x₀-1
-
// ID BW6_756 ID
const ID = ecc.BW6_756
@@ -81,12 +77,12 @@ func init() {
// E(3,y) * cofactor
g1Gen.X.SetString("286035407532233812057489253822435660910062665263942803649298092690795938518721117964189338863504082781482751182899097859005716378386344565362972291164604792882058761734674709131229927253172681714645554597102571818586966737895501")
g1Gen.Y.SetString("250540671634276190125882738767359258920233951524378923555904955920886135268516617166458911260101792169356480449980342047600821278990712908224386045486820019065641642853528653616206514851361917670279865872746658429844440125628329")
- g1Gen.Z.SetString("1")
+ g1Gen.Z.SetOne()
// E(1,y) * cofactor
g2Gen.X.SetString("270164867145533700243149075881223225204067215320977230235816769808318087164726583740674261721395147407122688542569094772405350936550575160051166652281373572919753182191250641388443572739372443497834910784618354592418817138212395")
g2Gen.Y.SetString("296695446824796322573519291690935001172593568823998954880196613542512471119971074118215403545906873458039024520146929054366200365532511334310660691775675887531695313103875249166779149013653038059140912965769351316868363001510735")
- g2Gen.Z.SetString("1")
+ g2Gen.Z.SetOne()
g1GenAff.FromJacobian(&g1Gen)
g2GenAff.FromJacobian(&g2Gen)
diff --git a/ecc/bw6-756/doc.go b/ecc/bw6-756/doc.go
deleted file mode 100644
index 65afd2ba62..0000000000
--- a/ecc/bw6-756/doc.go
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright 2020 ConsenSys Software Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Code generated by consensys/gnark-crypto DO NOT EDIT
-
-// Package bw6756 efficient elliptic curve and pairing implementation for bw6-756.
-//
-// Warning
-//
-// This code has not been audited and is provided as-is. In particular, there is no security guarantees such as constant time implementation or side-channel attack resistance.
-package bw6756
diff --git a/ecc/bw6-756/fp/element.go b/ecc/bw6-756/fp/element.go
index 77848bda10..4ef865aed0 100644
--- a/ecc/bw6-756/fp/element.go
+++ b/ecc/bw6-756/fp/element.go
@@ -199,7 +199,7 @@ func (z *Element) SetInterface(i1 interface{}) (*Element, error) {
case int:
return z.SetInt64(int64(c1)), nil
case string:
- return z.SetString(c1), nil
+ return z.SetString(c1)
case *big.Int:
if c1 == nil {
return nil, errors.New("can't set fp.Element with ")
@@ -1656,12 +1656,13 @@ func (z *Element) setBigInt(v *big.Int) *Element {
// Incorrect placement of underscores is reported as a panic if there
// are no other errors.
//
-func (z *Element) SetString(number string) *Element {
+// If the number is invalid this method leaves z unchanged and returns nil, error.
+func (z *Element) SetString(number string) (*Element, error) {
// get temporary big int from the pool
vv := bigIntPool.Get().(*big.Int)
if _, ok := vv.SetString(number, 0); !ok {
- panic("Element.SetString failed -> can't parse number into a big.Int " + number)
+ return nil, errors.New("Element.SetString failed -> can't parse number into a big.Int " + number)
}
z.SetBigInt(vv)
@@ -1669,7 +1670,7 @@ func (z *Element) SetString(number string) *Element {
// release object into pool
bigIntPool.Put(vv)
- return z
+ return z, nil
}
// MarshalJSON returns json encoding of z (z.Text(10))
diff --git a/ecc/bw6-756/fr/element.go b/ecc/bw6-756/fr/element.go
index 2c31968430..6beb3e9c00 100644
--- a/ecc/bw6-756/fr/element.go
+++ b/ecc/bw6-756/fr/element.go
@@ -181,7 +181,7 @@ func (z *Element) SetInterface(i1 interface{}) (*Element, error) {
case int:
return z.SetInt64(int64(c1)), nil
case string:
- return z.SetString(c1), nil
+ return z.SetString(c1)
case *big.Int:
if c1 == nil {
return nil, errors.New("can't set fr.Element with ")
@@ -1086,12 +1086,13 @@ func (z *Element) setBigInt(v *big.Int) *Element {
// Incorrect placement of underscores is reported as a panic if there
// are no other errors.
//
-func (z *Element) SetString(number string) *Element {
+// If the number is invalid this method leaves z unchanged and returns nil, error.
+func (z *Element) SetString(number string) (*Element, error) {
// get temporary big int from the pool
vv := bigIntPool.Get().(*big.Int)
if _, ok := vv.SetString(number, 0); !ok {
- panic("Element.SetString failed -> can't parse number into a big.Int " + number)
+ return nil, errors.New("Element.SetString failed -> can't parse number into a big.Int " + number)
}
z.SetBigInt(vv)
@@ -1099,7 +1100,7 @@ func (z *Element) SetString(number string) *Element {
// release object into pool
bigIntPool.Put(vv)
- return z
+ return z, nil
}
// MarshalJSON returns json encoding of z (z.Text(10))
diff --git a/ecc/bw6-756/fr/kzg/kzg.go b/ecc/bw6-756/fr/kzg/kzg.go
index 4a5c112601..7796c16b52 100644
--- a/ecc/bw6-756/fr/kzg/kzg.go
+++ b/ecc/bw6-756/fr/kzg/kzg.go
@@ -169,16 +169,15 @@ func Open(p []fr.Element, point fr.Element, srs *SRS) (OpeningProof, error) {
func Verify(commitment *Digest, proof *OpeningProof, point fr.Element, srs *SRS) error {
// [f(a)]G₁
- var claimedValueG1Aff bw6756.G1Affine
+ var claimedValueG1Aff bw6756.G1Jac
var claimedValueBigInt big.Int
proof.ClaimedValue.ToBigIntRegular(&claimedValueBigInt)
- claimedValueG1Aff.ScalarMultiplication(&srs.G1[0], &claimedValueBigInt)
+ claimedValueG1Aff.ScalarMultiplicationAffine(&srs.G1[0], &claimedValueBigInt)
// [f(α) - f(a)]G₁
- var fminusfaG1Jac, tmpG1Jac bw6756.G1Jac
+ var fminusfaG1Jac bw6756.G1Jac
fminusfaG1Jac.FromAffine(commitment)
- tmpG1Jac.FromAffine(&claimedValueG1Aff)
- fminusfaG1Jac.SubAssign(&tmpG1Jac)
+ fminusfaG1Jac.SubAssign(&claimedValueG1Aff)
// [-H(α)]G₁
var negH bw6756.G1Affine
diff --git a/ecc/bw6-756/fr/polynomial/multilin.go b/ecc/bw6-756/fr/polynomial/multilin.go
new file mode 100644
index 0000000000..26ba3f26ff
--- /dev/null
+++ b/ecc/bw6-756/fr/polynomial/multilin.go
@@ -0,0 +1,250 @@
+// Copyright 2020 ConsenSys Software Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by consensys/gnark-crypto DO NOT EDIT
+
+package polynomial
+
+import (
+ "github.com/consensys/gnark-crypto/ecc/bw6-756/fr"
+)
+
+// MultiLin tracks the values of a (dense i.e. not sparse) multilinear polynomial
+// The variables are X₁ through Xₙ where n = log(len(.))
+// .[∑ᵢ 2ⁱ⁻¹ bₙ₋ᵢ] = the polynomial evaluated at (b₁, b₂, ..., bₙ)
+// It is understood that any hypercube evaluation can be extrapolated to a multilinear polynomial
+type MultiLin []fr.Element
+
+// Fold is partial evaluation function k[X₁, X₂, ..., Xₙ] → k[X₂, ..., Xₙ] by setting X₁=r
+func (m *MultiLin) Fold(r fr.Element) {
+ mid := len(*m) / 2
+
+ bottom, top := (*m)[:mid], (*m)[mid:]
+
+ // updating bookkeeping table
+ // knowing that the polynomial f ∈ (k[X₂, ..., Xₙ])[X₁] is linear, we would get f(r) = f(0) + r(f(1) - f(0))
+ // the following loop computes the evaluations of f(r) accordingly:
+ // f(r, b₂, ..., bₙ) = f(0, b₂, ..., bₙ) + r(f(1, b₂, ..., bₙ) - f(0, b₂, ..., bₙ))
+ for i := 0; i < mid; i++ {
+ // table[i] ← table[i] + r (table[i + mid] - table[i])
+ top[i].Sub(&top[i], &bottom[i])
+ top[i].Mul(&top[i], &r)
+ bottom[i].Add(&bottom[i], &top[i])
+ }
+
+ *m = (*m)[:mid]
+}
+
+// Evaluate extrapolate the value of the multilinear polynomial corresponding to m
+// on the given coordinates
+func (m MultiLin) Evaluate(coordinates []fr.Element) fr.Element {
+ // Folding is a mutating operation
+ bkCopy := m.Clone()
+
+ // Evaluate step by step through repeated folding (i.e. evaluation at the first remaining variable)
+ for _, r := range coordinates {
+ bkCopy.Fold(r)
+ }
+
+ return bkCopy[0]
+}
+
+// Clone creates a deep copy of a book-keeping table.
+// Both multilinear interpolation and sumcheck require folding an underlying
+// array, but folding changes the array. To do both one requires a deep copy
+// of the book-keeping table.
+func (m MultiLin) Clone() MultiLin {
+ tableDeepCopy := Make(len(m))
+ copy(tableDeepCopy, m)
+ return tableDeepCopy
+}
+
+// Add two bookKeepingTables
+func (m *MultiLin) Add(left, right MultiLin) {
+ size := len(left)
+ // Check that left and right have the same size
+ if len(right) != size {
+ panic("Left and right do not have the right size")
+ }
+ // Reallocate the table if necessary
+ if cap(*m) < size {
+ *m = make([]fr.Element, size)
+ }
+
+ // Resize the destination table
+ *m = (*m)[:size]
+
+ // Add elementwise
+ for i := 0; i < size; i++ {
+ (*m)[i].Add(&left[i], &right[i])
+ }
+}
+
+// EvalEq computes Eq(q₁, ... , qₙ, h₁, ... , hₙ) = Π₁ⁿ Eq(qᵢ, hᵢ)
+// where Eq(x,y) = xy + (1-x)(1-y) = 1 - x - y + xy + xy interpolates
+// _________________
+// | | |
+// | 0 | 1 |
+// |_______|_______|
+// y | | |
+// | 1 | 0 |
+// |_______|_______|
+//
+// x
+// In other words the polynomial evaluated here is the multilinear extrapolation of
+// one that evaluates to q' == h' for vectors q', h' of binary values
+func EvalEq(q, h []fr.Element) fr.Element {
+ var res, nxt, one, sum fr.Element
+ one.SetOne()
+ for i := 0; i < len(q); i++ {
+ nxt.Mul(&q[i], &h[i]) // nxt <- qᵢ * hᵢ
+ nxt.Double(&nxt) // nxt <- 2 * qᵢ * hᵢ
+ nxt.Add(&nxt, &one) // nxt <- 1 + 2 * qᵢ * hᵢ
+ sum.Add(&q[i], &h[i]) // sum <- qᵢ + hᵢ TODO: Why not subtract one by one from nxt? More parallel?
+
+ if i == 0 {
+ res.Sub(&nxt, &sum) // nxt <- 1 + 2 * qᵢ * hᵢ - qᵢ - hᵢ
+ } else {
+ nxt.Sub(&nxt, &sum) // nxt <- 1 + 2 * qᵢ * hᵢ - qᵢ - hᵢ
+ res.Mul(&res, &nxt) // res <- res * nxt
+ }
+ }
+ return res
+}
+
+// Eq sets m to the representation of the polynomial Eq(q₁, ..., qₙ, *, ..., *) × m[0]
+func (m *MultiLin) Eq(q []fr.Element) {
+ n := len(q)
+
+ if len(*m) != 1< 0 {
+ i.Sub(fr.Modulus(), &i)
+ i.Neg(&i)
+ }
+ return i
+}
+
+func (p Polynomial) Text(base int) string {
+
+ var builder strings.Builder
+
+ first := true
+ for d := len(p) - 1; d >= 0; d-- {
+ if p[d].IsZero() {
+ continue
+ }
+
+ i := signedBigInt(&p[d])
+
+ initialLen := builder.Len()
+
+ if i.Sign() < 1 {
+ i.Neg(&i)
+ if first {
+ builder.WriteString("-")
+ } else {
+ builder.WriteString(" - ")
+ }
+ } else if !first {
+ builder.WriteString(" + ")
+ }
+
+ first = false
+
+ asInt64 := int64(0)
+ if i.IsInt64() {
+ asInt64 = i.Int64()
+ }
+
+ if asInt64 != 1 || d == 0 {
+ builder.WriteString(i.Text(base))
+ }
+
+ if builder.Len()-initialLen > 10 {
+ builder.WriteString("×")
+ }
+
+ if d != 0 {
+ builder.WriteString("X")
+ }
+ if d > 1 {
+ builder.WriteString(
+ utils.ToSuperscript(strconv.Itoa(d)),
+ )
+ }
+
+ }
+
+ if first {
+ return "0"
+ }
+
+ return builder.String()
+}
diff --git a/ecc/bw6-756/fr/polynomial/pool.go b/ecc/bw6-756/fr/polynomial/pool.go
new file mode 100644
index 0000000000..3d3f5155ff
--- /dev/null
+++ b/ecc/bw6-756/fr/polynomial/pool.go
@@ -0,0 +1,130 @@
+// Copyright 2020 ConsenSys Software Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by consensys/gnark-crypto DO NOT EDIT
+
+package polynomial
+
+import (
+ "fmt"
+ "github.com/consensys/gnark-crypto/ecc/bw6-756/fr"
+ "reflect"
+ "sync"
+ "unsafe"
+)
+
+// Memory management for polynomials
+// Copied verbatim from gkr repo
+
+// Sets a maximum for the array size we keep in pool
+const maxNForLargePool int = 1 << 24
+const maxNForSmallPool int = 256
+
+// Aliases because it is annoying to use arrays in all the places
+type largeArr = [maxNForLargePool]fr.Element
+type smallArr = [maxNForSmallPool]fr.Element
+
+var rC = sync.Map{}
+
+var (
+ largePool = sync.Pool{
+ New: func() interface{} {
+ var res largeArr
+ return &res
+ },
+ }
+ smallPool = sync.Pool{
+ New: func() interface{} {
+ var res smallArr
+ return &res
+ },
+ }
+)
+
+// ClearPool Clears the pool completely, shields against memory leaks
+// Eg: if we forgot to dump a polynomial at some point, this will ensure the value get dumped eventually
+// Returns how many polynomials were cleared that way
+func ClearPool() int {
+ res := 0
+ rC.Range(func(k, _ interface{}) bool {
+ switch ptr := k.(type) {
+ case *largeArr:
+ largePool.Put(ptr)
+ case *smallArr:
+ smallPool.Put(ptr)
+ default:
+ panic(fmt.Sprintf("tried to clear %v", reflect.TypeOf(ptr)))
+ }
+ res++
+ return true
+ })
+ return res
+}
+
+// CountPool Returns the number of elements in the pool without mutating it
+func CountPool() int {
+ res := 0
+ rC.Range(func(_, _ interface{}) bool {
+ res++
+ return true
+ })
+ return res
+}
+
+// Make tries to find a reusable polynomial or allocates a new one
+func Make(n int) []fr.Element {
+ if n > maxNForLargePool {
+ panic(fmt.Sprintf("been provided with size of %v but the maximum is %v", n, maxNForLargePool))
+ }
+
+ if n <= maxNForSmallPool {
+ ptr := smallPool.Get().(*smallArr)
+ rC.Store(ptr, struct{}{}) // registers the pointer being used
+ return (*ptr)[:n]
+ }
+
+ ptr := largePool.Get().(*largeArr)
+ rC.Store(ptr, struct{}{}) // remember we allocated the pointer is being used
+ return (*ptr)[:n]
+}
+
+// Dump dumps a set of polynomials into the pool
+// Returns the number of deallocated polys
+func Dump(arrs ...[]fr.Element) int {
+ cnt := 0
+ for _, arr := range arrs {
+ ptr := ptr(arr)
+ pool := &smallPool
+ if len(arr) > maxNForSmallPool {
+ pool = &largePool
+ }
+ // If the rC did not register, then
+ // either the array was allocated somewhere else which can be ignored
+ // otherwise a double put which MUST be ignored
+ if _, ok := rC.Load(ptr); ok {
+ pool.Put(ptr)
+ // And deregisters the ptr
+ rC.Delete(ptr)
+ cnt++
+ }
+ }
+ return cnt
+}
+
+func ptr(m []fr.Element) unsafe.Pointer {
+ if cap(m) != maxNForSmallPool && cap(m) != maxNForLargePool {
+ panic(fmt.Sprintf("can't cast to large or small array, the put array's is %v it should have capacity %v or %v", cap(m), maxNForLargePool, maxNForSmallPool))
+ }
+ return unsafe.Pointer(&m[0])
+}
diff --git a/ecc/bw6-756/g1.go b/ecc/bw6-756/g1.go
index 179139b664..899113c8d1 100644
--- a/ecc/bw6-756/g1.go
+++ b/ecc/bw6-756/g1.go
@@ -64,6 +64,14 @@ func (p *G1Affine) ScalarMultiplication(a *G1Affine, s *big.Int) *G1Affine {
return p
}
+// ScalarMultiplicationAffine computes and returns p = a ⋅ s
+// Takes an affine point and returns a Jacobian point (useful for KZG)
+func (p *G1Jac) ScalarMultiplicationAffine(a *G1Affine, s *big.Int) *G1Jac {
+ p.FromAffine(a)
+ p.mulGLV(p, s)
+ return p
+}
+
// Add adds two point in affine coordinates.
// This should rarely be used as it is very inefficient compared to Jacobian
func (p *G1Affine) Add(a, b *G1Affine) *G1Affine {
@@ -341,7 +349,7 @@ func (p *G1Jac) String() string {
return _p.String()
}
-// FromAffine sets p = Q, p in Jacboian, Q in affine
+// FromAffine sets p = Q, p in Jacobian, Q in affine
func (p *G1Jac) FromAffine(Q *G1Affine) *G1Jac {
if Q.IsInfinity() {
p.Z.SetZero()
@@ -873,9 +881,9 @@ func (p *g1Proj) FromAffine(Q *G1Affine) *g1Proj {
}
// BatchProjectiveToAffineG1 converts points in Projective coordinates to Affine coordinates
-// performing a single field inversion (Montgomery batch inversion trick)
-// result must be allocated with len(result) == len(points)
-func BatchProjectiveToAffineG1(points []g1Proj, result []G1Affine) {
+// performing a single field inversion (Montgomery batch inversion trick).
+func BatchProjectiveToAffineG1(points []g1Proj) []G1Affine {
+ result := make([]G1Affine, len(points))
zeroes := make([]bool, len(points))
accumulator := fp.One()
@@ -895,7 +903,7 @@ func BatchProjectiveToAffineG1(points []g1Proj, result []G1Affine) {
for i := len(points) - 1; i >= 0; i-- {
if zeroes[i] {
- // do nothing, X and Y are zeroes in affine.
+ // do nothing, (X=0, Y=0) is infinity point in affine
continue
}
result[i].X.Mul(&result[i].X, &accInverse)
@@ -906,7 +914,7 @@ func BatchProjectiveToAffineG1(points []g1Proj, result []G1Affine) {
parallel.Execute(len(points), func(start, end int) {
for i := start; i < end; i++ {
if zeroes[i] {
- // do nothing, X and Y are zeroes in affine.
+ // do nothing, (X=0, Y=0) is infinity point in affine
continue
}
a := result[i].X
@@ -914,12 +922,13 @@ func BatchProjectiveToAffineG1(points []g1Proj, result []G1Affine) {
result[i].Y.Mul(&points[i].y, &a)
}
})
+ return result
}
// BatchJacobianToAffineG1 converts points in Jacobian coordinates to Affine coordinates
-// performing a single field inversion (Montgomery batch inversion trick)
-// result must be allocated with len(result) == len(points)
-func BatchJacobianToAffineG1(points []G1Jac, result []G1Affine) {
+// performing a single field inversion (Montgomery batch inversion trick).
+func BatchJacobianToAffineG1(points []G1Jac) []G1Affine {
+ result := make([]G1Affine, len(points))
zeroes := make([]bool, len(points))
accumulator := fp.One()
@@ -939,7 +948,7 @@ func BatchJacobianToAffineG1(points []G1Jac, result []G1Affine) {
for i := len(points) - 1; i >= 0; i-- {
if zeroes[i] {
- // do nothing, X and Y are zeroes in affine.
+ // do nothing, (X=0, Y=0) is infinity point in affine
continue
}
result[i].X.Mul(&result[i].X, &accInverse)
@@ -950,7 +959,7 @@ func BatchJacobianToAffineG1(points []G1Jac, result []G1Affine) {
parallel.Execute(len(points), func(start, end int) {
for i := start; i < end; i++ {
if zeroes[i] {
- // do nothing, X and Y are zeroes in affine.
+ // do nothing, (X=0, Y=0) is infinity point in affine
continue
}
var a, b fp.Element
@@ -962,6 +971,7 @@ func BatchJacobianToAffineG1(points []G1Jac, result []G1Affine) {
}
})
+ return result
}
// BatchScalarMultiplicationG1 multiplies the same base by all scalars
@@ -1025,8 +1035,7 @@ func BatchScalarMultiplicationG1(base *G1Affine, scalars []fr.Element) []G1Affin
selectors[chunk] = d
}
// convert our base exp table into affine to use AddMixed
- baseTableAff := make([]G1Affine, (1 << (c - 1)))
- BatchJacobianToAffineG1(baseTable, baseTableAff)
+ baseTableAff := BatchJacobianToAffineG1(baseTable)
toReturn := make([]G1Jac, len(scalars))
// for each digit, take value in the base table, double it c time, voilà.
@@ -1068,7 +1077,6 @@ func BatchScalarMultiplicationG1(base *G1Affine, scalars []fr.Element) []G1Affin
}
})
- toReturnAff := make([]G1Affine, len(scalars))
- BatchJacobianToAffineG1(toReturn, toReturnAff)
+ toReturnAff := BatchJacobianToAffineG1(toReturn)
return toReturnAff
}
diff --git a/ecc/bw6-756/g1_test.go b/ecc/bw6-756/g1_test.go
index 27a3bbbca0..81ecf81553 100644
--- a/ecc/bw6-756/g1_test.go
+++ b/ecc/bw6-756/g1_test.go
@@ -85,7 +85,7 @@ func TestG1AffineIsOnCurve(t *testing.T) {
func(a fp.Element) bool {
var op1, op2 G1Affine
op1.FromJacobian(&g1Gen)
- op2.FromJacobian(&g1Gen)
+ op2.Set(&op1)
op2.Y.Mul(&op2.Y, &a)
return op1.IsOnCurve() && !op2.IsOnCurve()
},
@@ -220,6 +220,19 @@ func TestG1AffineConversions(t *testing.T) {
GenFp(),
GenFp(),
))
+ properties.Property("[BW6-756] BatchJacobianToAffineG1 and FromJacobian should output the same result", prop.ForAll(
+ func(a, b fp.Element) bool {
+ g1 := fuzzG1Jac(&g1Gen, a)
+ g2 := fuzzG1Jac(&g1Gen, b)
+ var op1, op2 G1Affine
+ op1.FromJacobian(&g1)
+ op2.FromJacobian(&g2)
+ baseTableAff := BatchJacobianToAffineG1([]G1Jac{g1, g2})
+ return op1.Equal(&baseTableAff[0]) && op2.Equal(&baseTableAff[1])
+ },
+ GenFp(),
+ GenFp(),
+ ))
properties.TestingRun(t, gopter.ConsoleReporter(false))
}
@@ -486,7 +499,7 @@ func BenchmarkG1JacIsInSubGroup(b *testing.B) {
}
-func BenchmarkG1AffineBatchScalarMul(b *testing.B) {
+func BenchmarkG1AffineBatchScalarMultiplication(b *testing.B) {
// ensure every words of the scalars are filled
var mixer fr.Element
mixer.SetString("7716837800905789770901243404444209691916730933998574719964609384059111546487")
@@ -514,7 +527,7 @@ func BenchmarkG1AffineBatchScalarMul(b *testing.B) {
}
}
-func BenchmarkG1JacScalarMul(b *testing.B) {
+func BenchmarkG1JacScalarMultiplication(b *testing.B) {
var scalar big.Int
r := fr.Modulus()
diff --git a/ecc/bw6-756/g2.go b/ecc/bw6-756/g2.go
index fbcd9c4eee..3abfa71d5a 100644
--- a/ecc/bw6-756/g2.go
+++ b/ecc/bw6-756/g2.go
@@ -336,7 +336,7 @@ func (p *G2Jac) String() string {
return _p.String()
}
-// FromAffine sets p = Q, p in Jacboian, Q in affine
+// FromAffine sets p = Q, p in Jacobian, Q in affine
func (p *G2Jac) FromAffine(Q *G2Affine) *G2Jac {
if Q.IsInfinity() {
p.Z.SetZero()
diff --git a/ecc/bw6-756/g2_test.go b/ecc/bw6-756/g2_test.go
index a0bb5b030e..ecfc973322 100644
--- a/ecc/bw6-756/g2_test.go
+++ b/ecc/bw6-756/g2_test.go
@@ -85,7 +85,7 @@ func TestG2AffineIsOnCurve(t *testing.T) {
func(a fp.Element) bool {
var op1, op2 G2Affine
op1.FromJacobian(&g2Gen)
- op2.FromJacobian(&g2Gen)
+ op2.Set(&op1)
op2.Y.Mul(&op2.Y, &a)
return op1.IsOnCurve() && !op2.IsOnCurve()
},
@@ -486,7 +486,7 @@ func BenchmarkG2JacIsInSubGroup(b *testing.B) {
}
-func BenchmarkG2AffineBatchScalarMul(b *testing.B) {
+func BenchmarkG2AffineBatchScalarMultiplication(b *testing.B) {
// ensure every words of the scalars are filled
var mixer fr.Element
mixer.SetString("7716837800905789770901243404444209691916730933998574719964609384059111546487")
@@ -514,7 +514,7 @@ func BenchmarkG2AffineBatchScalarMul(b *testing.B) {
}
}
-func BenchmarkG2JacScalarMul(b *testing.B) {
+func BenchmarkG2JacScalarMultiplication(b *testing.B) {
var scalar big.Int
r := fr.Modulus()
diff --git a/ecc/bw6-756/hash_to_g1.go b/ecc/bw6-756/hash_to_g1.go
index 6a02d3ce8e..03ee750cf2 100644
--- a/ecc/bw6-756/hash_to_g1.go
+++ b/ecc/bw6-756/hash_to_g1.go
@@ -89,65 +89,58 @@ func g1Isogeny(p *G1Affine) {
// g1SqrtRatio computes the square root of u/v and returns 0 iff u/v was indeed a quadratic residue
// if not, we get sqrt(Z * u / v). Recall that Z is non-residue
+// If v = 0, u/v is meaningless and the output is unspecified, without raising an error.
// The main idea is that since the computation of the square root involves taking large powers of u/v, the inversion of v can be avoided
func g1SqrtRatio(z *fp.Element, u *fp.Element, v *fp.Element) uint64 {
- // Taken from https://datatracker.ietf.org/doc/draft-irtf-cfrg-hash-to-curve/13/ F.2.1.1. for any field
+ // https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-16.html#name-sqrt_ratio-for-any-field
tv1 := fp.Element{17302715199413996045, 15077845457253267709, 8842885729139027579, 12189878420705505575, 12380986790262239346, 585111498723936856, 4947215576903759546, 1186632482028566920, 14543050817583235372, 5644943604719368358, 9440830989708189862, 1039766423535362} //tv1 = c6
var tv2, tv3, tv4, tv5 fp.Element
var exp big.Int
- // c4 = 4835703278458516698824703 = 2^82 - 1
+ // c4 = 4835703278458516698824703 = 2⁸² - 1
// q is odd so c1 is at least 1.
exp.SetBytes([]byte{3, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255})
- tv2.Exp(*v, &exp)
- tv3.Mul(&tv2, &tv2)
- tv3.Mul(&tv3, v)
-
- // line 5
- tv5.Mul(u, &tv3)
+ tv2.Exp(*v, &exp) // 2. tv2 = vᶜ⁴
+ tv3.Square(&tv2) // 3. tv3 = tv2²
+ tv3.Mul(&tv3, v) // 4. tv3 = tv3 * v
+ tv5.Mul(u, &tv3) // 5. tv5 = u * tv3
// c3 = 37877157660731232732990269576663233239936484746509109593426423261538632780449313352717366389444912082695314931794809746268936574949192324351273838279701014606648452884726586254167471840902479876056412368
exp.SetBytes([]byte{1, 238, 213, 183, 107, 119, 49, 92, 85, 130, 79, 195, 198, 173, 25, 235, 146, 241, 154, 95, 88, 89, 209, 63, 126, 70, 68, 40, 170, 44, 116, 217, 152, 213, 206, 120, 133, 72, 219, 61, 96, 89, 2, 93, 64, 159, 85, 65, 79, 214, 57, 103, 160, 220, 200, 220, 82, 89, 162, 189, 182, 200, 212, 168, 96, 85, 71, 132, 177, 188, 251, 218, 22, 208, 189, 13, 10, 73, 216, 6, 120, 252, 199, 240, 208})
- tv5.Exp(tv5, &exp)
- tv5.Mul(&tv5, &tv2)
- tv2.Mul(&tv5, v)
- tv3.Mul(&tv5, u)
- // line 10
- tv4.Mul(&tv3, &tv2)
+ tv5.Exp(tv5, &exp) // 6. tv5 = tv5ᶜ³
+ tv5.Mul(&tv5, &tv2) // 7. tv5 = tv5 * tv2
+ tv2.Mul(&tv5, v) // 8. tv2 = tv5 * v
+ tv3.Mul(&tv5, u) // 9. tv3 = tv5 * u
+ tv4.Mul(&tv3, &tv2) // 10. tv4 = tv3 * tv2
// c5 = 2417851639229258349412352
exp.SetBytes([]byte{2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0})
- tv5.Exp(tv4, &exp)
-
- isQNr := g1NotOne(&tv5)
-
- tv2.Mul(&tv3, &fp.Element{13990906742184113945, 15879050380504523621, 13768460034940508157, 12337541071329853620, 6296858130192020747, 9289986178217863086, 18403114759403589657, 4546259071787184045, 5504643400205978814, 13830311104669138548, 96107744534255859, 1024735223965534})
- tv5.Mul(&tv4, &tv1)
-
- // line 15
-
- tv3.Select(int(isQNr), &tv3, &tv2)
- tv4.Select(int(isQNr), &tv4, &tv5)
-
- exp.Lsh(big.NewInt(1), 82-2)
-
- for i := 82; i >= 2; i-- {
- //line 20
- tv5.Exp(tv4, &exp)
- nE1 := g1NotOne(&tv5)
-
- tv2.Mul(&tv3, &tv1)
- tv1.Mul(&tv1, &tv1)
- tv5.Mul(&tv4, &tv1)
-
- tv3.Select(int(nE1), &tv3, &tv2)
- tv4.Select(int(nE1), &tv4, &tv5)
-
- exp.Rsh(&exp, 1)
+ tv5.Exp(tv4, &exp) // 11. tv5 = tv4ᶜ⁵
+ isQNr := g1NotOne(&tv5) // 12. isQR = tv5 == 1
+ c7 := fp.Element{13990906742184113945, 15879050380504523621, 13768460034940508157, 12337541071329853620, 6296858130192020747, 9289986178217863086, 18403114759403589657, 4546259071787184045, 5504643400205978814, 13830311104669138548, 96107744534255859, 1024735223965534}
+ tv2.Mul(&tv3, &c7) // 13. tv2 = tv3 * c7
+ tv5.Mul(&tv4, &tv1) // 14. tv5 = tv4 * tv1
+ tv3.Select(int(isQNr), &tv3, &tv2) // 15. tv3 = CMOV(tv2, tv3, isQR)
+ tv4.Select(int(isQNr), &tv4, &tv5) // 16. tv4 = CMOV(tv5, tv4, isQR)
+ exp.Lsh(big.NewInt(1), 82-2) // 18, 19: tv5 = 2ⁱ⁻² for i = c1
+
+ for i := 82; i >= 2; i-- { // 17. for i in (c1, c1 - 1, ..., 2):
+
+ tv5.Exp(tv4, &exp) // 20. tv5 = tv4ᵗᵛ⁵
+ nE1 := g1NotOne(&tv5) // 21. e1 = tv5 == 1
+ tv2.Mul(&tv3, &tv1) // 22. tv2 = tv3 * tv1
+ tv1.Mul(&tv1, &tv1) // 23. tv1 = tv1 * tv1 Why not write square?
+ tv5.Mul(&tv4, &tv1) // 24. tv5 = tv4 * tv1
+ tv3.Select(int(nE1), &tv3, &tv2) // 25. tv3 = CMOV(tv2, tv3, e1)
+ tv4.Select(int(nE1), &tv4, &tv5) // 26. tv4 = CMOV(tv5, tv4, e1)
+
+ if i > 2 {
+ exp.Rsh(&exp, 1) // 18, 19. tv5 = 2ⁱ⁻²
+ }
}
*z = tv3
@@ -161,12 +154,6 @@ func g1NotOne(x *fp.Element) uint64 {
}
-/*
-// g1SetZ sets z to [11].
-func g1SetZ(z *fp.Element) {
- z.Set( &fp.Element {18446744073709504998, 11529623972028612607, 739483395258014634, 5527028560780200701, 11477868704616895891, 15905434021829949368, 2844651761892435780, 17567410508478669002, 4162242322955979641, 15743938111024983262, 11916654042695069468, 4062866236140222} )
-}*/
-
// g1MulByZ multiplies x by [11] and stores the result in z
func g1MulByZ(z *fp.Element, x *fp.Element) {
@@ -181,30 +168,29 @@ func g1MulByZ(z *fp.Element, x *fp.Element) {
*z = res
}
-//TODO: Define A,B here
-
-// From https://datatracker.ietf.org/doc/draft-irtf-cfrg-hash-to-curve/13/ Pg 80
+// https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-16.html#name-simplified-swu-method
// mapToCurve1 implements the SSWU map
// No cofactor clearing or isogeny
func mapToCurve1(u *fp.Element) G1Affine {
+ var sswuIsoCurveCoeffA = fp.Element{6087387690755251612, 7643068232434215576, 6195945763281467660, 97569654519975969, 1505434147110560758, 12342644747290341982, 14059794106692380317, 15229664573794943703, 16908793757593141664, 1949816925291208189, 9451095697369482684, 234190359239853}
+ var sswuIsoCurveCoeffB = fp.Element{18446744073709458379, 881299893533802495, 4886355625346099349, 6225448195760991771, 6629400315996169345, 12607886696045185322, 7201730065066775519, 1932403901886200506, 8616600553259348813, 6369175937589644082, 7499857803942196586, 3773119276850162}
+
var tv1 fp.Element
- tv1.Square(u)
+ tv1.Square(u) // 1. tv1 = u²
//mul tv1 by Z
- g1MulByZ(&tv1, &tv1)
+ g1MulByZ(&tv1, &tv1) // 2. tv1 = Z * tv1
var tv2 fp.Element
- tv2.Square(&tv1)
- tv2.Add(&tv2, &tv1)
+ tv2.Square(&tv1) // 3. tv2 = tv1²
+ tv2.Add(&tv2, &tv1) // 4. tv2 = tv2 + tv1
var tv3 fp.Element
- //Standard doc line 5
var tv4 fp.Element
tv4.SetOne()
- tv3.Add(&tv2, &tv4)
- //TODO: Use bCurveConf when no isogeny
- tv3.Mul(&tv3, &fp.Element{18446744073709458379, 881299893533802495, 4886355625346099349, 6225448195760991771, 6629400315996169345, 12607886696045185322, 7201730065066775519, 1932403901886200506, 8616600553259348813, 6369175937589644082, 7499857803942196586, 3773119276850162})
+ tv3.Add(&tv2, &tv4) // 5. tv3 = tv2 + 1
+ tv3.Mul(&tv3, &sswuIsoCurveCoeffB) // 6. tv3 = B * tv3
tv2NZero := g1NotZero(&tv2)
@@ -212,48 +198,45 @@ func mapToCurve1(u *fp.Element) G1Affine {
tv4 = fp.Element{18446744073709504998, 11529623972028612607, 739483395258014634, 5527028560780200701, 11477868704616895891, 15905434021829949368, 2844651761892435780, 17567410508478669002, 4162242322955979641, 15743938111024983262, 11916654042695069468, 4062866236140222}
tv2.Neg(&tv2)
- tv4.Select(int(tv2NZero), &tv4, &tv2)
- //TODO: When no isogeny use curve constants
- tv2 = fp.Element{6087387690755251612, 7643068232434215576, 6195945763281467660, 97569654519975969, 1505434147110560758, 12342644747290341982, 14059794106692380317, 15229664573794943703, 16908793757593141664, 1949816925291208189, 9451095697369482684, 234190359239853}
- tv4.Mul(&tv4, &tv2)
+ tv4.Select(int(tv2NZero), &tv4, &tv2) // 7. tv4 = CMOV(Z, -tv2, tv2 != 0)
+ tv4.Mul(&tv4, &sswuIsoCurveCoeffA) // 8. tv4 = A * tv4
- tv2.Square(&tv3)
+ tv2.Square(&tv3) // 9. tv2 = tv3²
var tv6 fp.Element
- //Standard doc line 10
- tv6.Square(&tv4)
+ tv6.Square(&tv4) // 10. tv6 = tv4²
var tv5 fp.Element
- tv5.Mul(&tv6, &fp.Element{6087387690755251612, 7643068232434215576, 6195945763281467660, 97569654519975969, 1505434147110560758, 12342644747290341982, 14059794106692380317, 15229664573794943703, 16908793757593141664, 1949816925291208189, 9451095697369482684, 234190359239853})
+ tv5.Mul(&tv6, &sswuIsoCurveCoeffA) // 11. tv5 = A * tv6
- tv2.Add(&tv2, &tv5)
- tv2.Mul(&tv2, &tv3)
- tv6.Mul(&tv6, &tv4)
+ tv2.Add(&tv2, &tv5) // 12. tv2 = tv2 + tv5
+ tv2.Mul(&tv2, &tv3) // 13. tv2 = tv2 * tv3
+ tv6.Mul(&tv6, &tv4) // 14. tv6 = tv6 * tv4
- //Standards doc line 15
- tv5.Mul(&tv6, &fp.Element{18446744073709458379, 881299893533802495, 4886355625346099349, 6225448195760991771, 6629400315996169345, 12607886696045185322, 7201730065066775519, 1932403901886200506, 8616600553259348813, 6369175937589644082, 7499857803942196586, 3773119276850162})
- tv2.Add(&tv2, &tv5)
+ tv5.Mul(&tv6, &sswuIsoCurveCoeffB) // 15. tv5 = B * tv6
+ tv2.Add(&tv2, &tv5) // 16. tv2 = tv2 + tv5
var x fp.Element
- x.Mul(&tv1, &tv3)
+ x.Mul(&tv1, &tv3) // 17. x = tv1 * tv3
var y1 fp.Element
- gx1NSquare := g1SqrtRatio(&y1, &tv2, &tv6)
+ gx1NSquare := g1SqrtRatio(&y1, &tv2, &tv6) // 18. (is_gx1_square, y1) = sqrt_ratio(tv2, tv6)
var y fp.Element
- y.Mul(&tv1, u)
+ y.Mul(&tv1, u) // 19. y = tv1 * u
- //Standards doc line 20
- y.Mul(&y, &y1)
+ y.Mul(&y, &y1) // 20. y = y * y1
- x.Select(int(gx1NSquare), &tv3, &x)
- y.Select(int(gx1NSquare), &y1, &y)
+ x.Select(int(gx1NSquare), &tv3, &x) // 21. x = CMOV(x, tv3, is_gx1_square)
+ y.Select(int(gx1NSquare), &y1, &y) // 22. y = CMOV(y, y1, is_gx1_square)
y1.Neg(&y)
y.Select(int(g1Sgn0(u)^g1Sgn0(&y)), &y, &y1)
- //Standards doc line 25
- x.Div(&x, &tv4)
+ // 23. e1 = sgn0(u) == sgn0(y)
+ // 24. y = CMOV(-y, y, e1)
+
+ x.Div(&x, &tv4) // 25. x = x / tv4
return G1Affine{x, y}
}
@@ -296,13 +279,13 @@ func hashToFp(msg, dst []byte, count int) ([]fp.Element, error) {
// g1Sgn0 is an algebraic substitute for the notion of sign in ordered fields
// Namely, every non-zero quadratic residue in a finite field of characteristic =/= 2 has exactly two square roots, one of each sign
-// Taken from https://datatracker.ietf.org/doc/draft-irtf-cfrg-hash-to-curve/ section 4.1
+// https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-16.html#name-the-sgn0-function
// The sign of an element is not obviously related to that of its Montgomery form
func g1Sgn0(z *fp.Element) uint64 {
nonMont := *z
nonMont.FromMont()
-
+ // m == 1
return nonMont[0] % 2
}
@@ -319,7 +302,7 @@ func MapToG1(u fp.Element) G1Affine {
// EncodeToG1 hashes a message to a point on the G1 curve using the SSWU map.
// It is faster than HashToG1, but the result is not uniformly distributed. Unsuitable as a random oracle.
// dst stands for "domain separation tag", a string unique to the construction using the hash function
-//https://datatracker.ietf.org/doc/draft-irtf-cfrg-hash-to-curve/13/#section-6.6.3
+//https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-16.html#roadmap
func EncodeToG1(msg, dst []byte) (G1Affine, error) {
var res G1Affine
@@ -339,7 +322,7 @@ func EncodeToG1(msg, dst []byte) (G1Affine, error) {
// HashToG1 hashes a message to a point on the G1 curve using the SSWU map.
// Slower than EncodeToG1, but usable as a random oracle.
// dst stands for "domain separation tag", a string unique to the construction using the hash function
-// https://tools.ietf.org/html/draft-irtf-cfrg-hash-to-curve-06#section-3
+//https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-16.html#roadmap
func HashToG1(msg, dst []byte) (G1Affine, error) {
u, err := hashToFp(msg, dst, 2*1)
if err != nil {
@@ -349,7 +332,7 @@ func HashToG1(msg, dst []byte) (G1Affine, error) {
Q0 := mapToCurve1(&u[0])
Q1 := mapToCurve1(&u[1])
- //TODO: Add in E' first, then apply isogeny
+ //TODO (perf): Add in E' first, then apply isogeny
g1Isogeny(&Q0)
g1Isogeny(&Q1)
diff --git a/ecc/bw6-756/hash_to_g2.go b/ecc/bw6-756/hash_to_g2.go
index 7ec07baec5..d84ae9b50c 100644
--- a/ecc/bw6-756/hash_to_g2.go
+++ b/ecc/bw6-756/hash_to_g2.go
@@ -160,65 +160,58 @@ func g2Isogeny(p *G2Affine) {
// g2SqrtRatio computes the square root of u/v and returns 0 iff u/v was indeed a quadratic residue
// if not, we get sqrt(Z * u / v). Recall that Z is non-residue
+// If v = 0, u/v is meaningless and the output is unspecified, without raising an error.
// The main idea is that since the computation of the square root involves taking large powers of u/v, the inversion of v can be avoided
func g2SqrtRatio(z *fp.Element, u *fp.Element, v *fp.Element) uint64 {
- // Taken from https://datatracker.ietf.org/doc/draft-irtf-cfrg-hash-to-curve/13/ F.2.1.1. for any field
+ // https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-16.html#name-sqrt_ratio-for-any-field
tv1 := fp.Element{17302715199413996045, 15077845457253267709, 8842885729139027579, 12189878420705505575, 12380986790262239346, 585111498723936856, 4947215576903759546, 1186632482028566920, 14543050817583235372, 5644943604719368358, 9440830989708189862, 1039766423535362} //tv1 = c6
var tv2, tv3, tv4, tv5 fp.Element
var exp big.Int
- // c4 = 4835703278458516698824703 = 2^82 - 1
+ // c4 = 4835703278458516698824703 = 2⁸² - 1
// q is odd so c1 is at least 1.
exp.SetBytes([]byte{3, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255})
- tv2.Exp(*v, &exp)
- tv3.Mul(&tv2, &tv2)
- tv3.Mul(&tv3, v)
-
- // line 5
- tv5.Mul(u, &tv3)
+ tv2.Exp(*v, &exp) // 2. tv2 = vᶜ⁴
+ tv3.Square(&tv2) // 3. tv3 = tv2²
+ tv3.Mul(&tv3, v) // 4. tv3 = tv3 * v
+ tv5.Mul(u, &tv3) // 5. tv5 = u * tv3
// c3 = 37877157660731232732990269576663233239936484746509109593426423261538632780449313352717366389444912082695314931794809746268936574949192324351273838279701014606648452884726586254167471840902479876056412368
exp.SetBytes([]byte{1, 238, 213, 183, 107, 119, 49, 92, 85, 130, 79, 195, 198, 173, 25, 235, 146, 241, 154, 95, 88, 89, 209, 63, 126, 70, 68, 40, 170, 44, 116, 217, 152, 213, 206, 120, 133, 72, 219, 61, 96, 89, 2, 93, 64, 159, 85, 65, 79, 214, 57, 103, 160, 220, 200, 220, 82, 89, 162, 189, 182, 200, 212, 168, 96, 85, 71, 132, 177, 188, 251, 218, 22, 208, 189, 13, 10, 73, 216, 6, 120, 252, 199, 240, 208})
- tv5.Exp(tv5, &exp)
- tv5.Mul(&tv5, &tv2)
- tv2.Mul(&tv5, v)
- tv3.Mul(&tv5, u)
- // line 10
- tv4.Mul(&tv3, &tv2)
+ tv5.Exp(tv5, &exp) // 6. tv5 = tv5ᶜ³
+ tv5.Mul(&tv5, &tv2) // 7. tv5 = tv5 * tv2
+ tv2.Mul(&tv5, v) // 8. tv2 = tv5 * v
+ tv3.Mul(&tv5, u) // 9. tv3 = tv5 * u
+ tv4.Mul(&tv3, &tv2) // 10. tv4 = tv3 * tv2
// c5 = 2417851639229258349412352
exp.SetBytes([]byte{2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0})
- tv5.Exp(tv4, &exp)
-
- isQNr := g2NotOne(&tv5)
-
- tv2.Mul(&tv3, &fp.Element{13990906742184113945, 15879050380504523621, 13768460034940508157, 12337541071329853620, 6296858130192020747, 9289986178217863086, 18403114759403589657, 4546259071787184045, 5504643400205978814, 13830311104669138548, 96107744534255859, 1024735223965534})
- tv5.Mul(&tv4, &tv1)
-
- // line 15
-
- tv3.Select(int(isQNr), &tv3, &tv2)
- tv4.Select(int(isQNr), &tv4, &tv5)
-
- exp.Lsh(big.NewInt(1), 82-2)
-
- for i := 82; i >= 2; i-- {
- //line 20
- tv5.Exp(tv4, &exp)
- nE1 := g2NotOne(&tv5)
-
- tv2.Mul(&tv3, &tv1)
- tv1.Mul(&tv1, &tv1)
- tv5.Mul(&tv4, &tv1)
-
- tv3.Select(int(nE1), &tv3, &tv2)
- tv4.Select(int(nE1), &tv4, &tv5)
-
- exp.Rsh(&exp, 1)
+ tv5.Exp(tv4, &exp) // 11. tv5 = tv4ᶜ⁵
+ isQNr := g2NotOne(&tv5) // 12. isQR = tv5 == 1
+ c7 := fp.Element{13990906742184113945, 15879050380504523621, 13768460034940508157, 12337541071329853620, 6296858130192020747, 9289986178217863086, 18403114759403589657, 4546259071787184045, 5504643400205978814, 13830311104669138548, 96107744534255859, 1024735223965534}
+ tv2.Mul(&tv3, &c7) // 13. tv2 = tv3 * c7
+ tv5.Mul(&tv4, &tv1) // 14. tv5 = tv4 * tv1
+ tv3.Select(int(isQNr), &tv3, &tv2) // 15. tv3 = CMOV(tv2, tv3, isQR)
+ tv4.Select(int(isQNr), &tv4, &tv5) // 16. tv4 = CMOV(tv5, tv4, isQR)
+ exp.Lsh(big.NewInt(1), 82-2) // 18, 19: tv5 = 2ⁱ⁻² for i = c1
+
+ for i := 82; i >= 2; i-- { // 17. for i in (c1, c1 - 1, ..., 2):
+
+ tv5.Exp(tv4, &exp) // 20. tv5 = tv4ᵗᵛ⁵
+ nE1 := g2NotOne(&tv5) // 21. e1 = tv5 == 1
+ tv2.Mul(&tv3, &tv1) // 22. tv2 = tv3 * tv1
+ tv1.Mul(&tv1, &tv1) // 23. tv1 = tv1 * tv1 Why not write square?
+ tv5.Mul(&tv4, &tv1) // 24. tv5 = tv4 * tv1
+ tv3.Select(int(nE1), &tv3, &tv2) // 25. tv3 = CMOV(tv2, tv3, e1)
+ tv4.Select(int(nE1), &tv4, &tv5) // 26. tv4 = CMOV(tv5, tv4, e1)
+
+ if i > 2 {
+ exp.Rsh(&exp, 1) // 18, 19. tv5 = 2ⁱ⁻²
+ }
}
*z = tv3
@@ -232,12 +225,6 @@ func g2NotOne(x *fp.Element) uint64 {
}
-/*
-// g2SetZ sets z to [11].
-func g2SetZ(z *fp.Element) {
- z.Set( &fp.Element {18446744073709504998, 11529623972028612607, 739483395258014634, 5527028560780200701, 11477868704616895891, 15905434021829949368, 2844651761892435780, 17567410508478669002, 4162242322955979641, 15743938111024983262, 11916654042695069468, 4062866236140222} )
-}*/
-
// g2MulByZ multiplies x by [11] and stores the result in z
func g2MulByZ(z *fp.Element, x *fp.Element) {
@@ -252,30 +239,29 @@ func g2MulByZ(z *fp.Element, x *fp.Element) {
*z = res
}
-//TODO: Define A,B here
-
-// From https://datatracker.ietf.org/doc/draft-irtf-cfrg-hash-to-curve/13/ Pg 80
+// https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-16.html#name-simplified-swu-method
// mapToCurve2 implements the SSWU map
// No cofactor clearing or isogeny
func mapToCurve2(u *fp.Element) G2Affine {
+ var sswuIsoCurveCoeffA = fp.Element{11188695195863236139, 18339800635248689929, 13644954250665578253, 16122525194076552550, 1985822167495960177, 11021218035968661748, 12951199075167016614, 18080500199774882647, 3065668365127963650, 1810223365641727596, 18249180996905802984, 4351293214471385}
+ var sswuIsoCurveCoeffB = fp.Element{3597427888115195847, 8485485194496420669, 9451115945982544412, 10217463679676360079, 3023875305953960937, 5866766270380139867, 15059909646037855295, 1065687373540957157, 12978541562777068958, 18112033168403904062, 11632286302244735111, 1469792042332206}
+
var tv1 fp.Element
- tv1.Square(u)
+ tv1.Square(u) // 1. tv1 = u²
//mul tv1 by Z
- g2MulByZ(&tv1, &tv1)
+ g2MulByZ(&tv1, &tv1) // 2. tv1 = Z * tv1
var tv2 fp.Element
- tv2.Square(&tv1)
- tv2.Add(&tv2, &tv1)
+ tv2.Square(&tv1) // 3. tv2 = tv1²
+ tv2.Add(&tv2, &tv1) // 4. tv2 = tv2 + tv1
var tv3 fp.Element
- //Standard doc line 5
var tv4 fp.Element
tv4.SetOne()
- tv3.Add(&tv2, &tv4)
- //TODO: Use bCurveConf when no isogeny
- tv3.Mul(&tv3, &fp.Element{3597427888115195847, 8485485194496420669, 9451115945982544412, 10217463679676360079, 3023875305953960937, 5866766270380139867, 15059909646037855295, 1065687373540957157, 12978541562777068958, 18112033168403904062, 11632286302244735111, 1469792042332206})
+ tv3.Add(&tv2, &tv4) // 5. tv3 = tv2 + 1
+ tv3.Mul(&tv3, &sswuIsoCurveCoeffB) // 6. tv3 = B * tv3
tv2NZero := g2NotZero(&tv2)
@@ -283,48 +269,45 @@ func mapToCurve2(u *fp.Element) G2Affine {
tv4 = fp.Element{18446744073709504998, 11529623972028612607, 739483395258014634, 5527028560780200701, 11477868704616895891, 15905434021829949368, 2844651761892435780, 17567410508478669002, 4162242322955979641, 15743938111024983262, 11916654042695069468, 4062866236140222}
tv2.Neg(&tv2)
- tv4.Select(int(tv2NZero), &tv4, &tv2)
- //TODO: When no isogeny use curve constants
- tv2 = fp.Element{11188695195863236139, 18339800635248689929, 13644954250665578253, 16122525194076552550, 1985822167495960177, 11021218035968661748, 12951199075167016614, 18080500199774882647, 3065668365127963650, 1810223365641727596, 18249180996905802984, 4351293214471385}
- tv4.Mul(&tv4, &tv2)
+ tv4.Select(int(tv2NZero), &tv4, &tv2) // 7. tv4 = CMOV(Z, -tv2, tv2 != 0)
+ tv4.Mul(&tv4, &sswuIsoCurveCoeffA) // 8. tv4 = A * tv4
- tv2.Square(&tv3)
+ tv2.Square(&tv3) // 9. tv2 = tv3²
var tv6 fp.Element
- //Standard doc line 10
- tv6.Square(&tv4)
+ tv6.Square(&tv4) // 10. tv6 = tv4²
var tv5 fp.Element
- tv5.Mul(&tv6, &fp.Element{11188695195863236139, 18339800635248689929, 13644954250665578253, 16122525194076552550, 1985822167495960177, 11021218035968661748, 12951199075167016614, 18080500199774882647, 3065668365127963650, 1810223365641727596, 18249180996905802984, 4351293214471385})
+ tv5.Mul(&tv6, &sswuIsoCurveCoeffA) // 11. tv5 = A * tv6
- tv2.Add(&tv2, &tv5)
- tv2.Mul(&tv2, &tv3)
- tv6.Mul(&tv6, &tv4)
+ tv2.Add(&tv2, &tv5) // 12. tv2 = tv2 + tv5
+ tv2.Mul(&tv2, &tv3) // 13. tv2 = tv2 * tv3
+ tv6.Mul(&tv6, &tv4) // 14. tv6 = tv6 * tv4
- //Standards doc line 15
- tv5.Mul(&tv6, &fp.Element{3597427888115195847, 8485485194496420669, 9451115945982544412, 10217463679676360079, 3023875305953960937, 5866766270380139867, 15059909646037855295, 1065687373540957157, 12978541562777068958, 18112033168403904062, 11632286302244735111, 1469792042332206})
- tv2.Add(&tv2, &tv5)
+ tv5.Mul(&tv6, &sswuIsoCurveCoeffB) // 15. tv5 = B * tv6
+ tv2.Add(&tv2, &tv5) // 16. tv2 = tv2 + tv5
var x fp.Element
- x.Mul(&tv1, &tv3)
+ x.Mul(&tv1, &tv3) // 17. x = tv1 * tv3
var y1 fp.Element
- gx1NSquare := g2SqrtRatio(&y1, &tv2, &tv6)
+ gx1NSquare := g2SqrtRatio(&y1, &tv2, &tv6) // 18. (is_gx1_square, y1) = sqrt_ratio(tv2, tv6)
var y fp.Element
- y.Mul(&tv1, u)
+ y.Mul(&tv1, u) // 19. y = tv1 * u
- //Standards doc line 20
- y.Mul(&y, &y1)
+ y.Mul(&y, &y1) // 20. y = y * y1
- x.Select(int(gx1NSquare), &tv3, &x)
- y.Select(int(gx1NSquare), &y1, &y)
+ x.Select(int(gx1NSquare), &tv3, &x) // 21. x = CMOV(x, tv3, is_gx1_square)
+ y.Select(int(gx1NSquare), &y1, &y) // 22. y = CMOV(y, y1, is_gx1_square)
y1.Neg(&y)
y.Select(int(g2Sgn0(u)^g2Sgn0(&y)), &y, &y1)
- //Standards doc line 25
- x.Div(&x, &tv4)
+ // 23. e1 = sgn0(u) == sgn0(y)
+ // 24. y = CMOV(-y, y, e1)
+
+ x.Div(&x, &tv4) // 25. x = x / tv4
return G2Affine{x, y}
}
@@ -346,13 +329,13 @@ func g2EvalPolynomial(z *fp.Element, monic bool, coefficients []fp.Element, x *f
// g2Sgn0 is an algebraic substitute for the notion of sign in ordered fields
// Namely, every non-zero quadratic residue in a finite field of characteristic =/= 2 has exactly two square roots, one of each sign
-// Taken from https://datatracker.ietf.org/doc/draft-irtf-cfrg-hash-to-curve/ section 4.1
+// https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-16.html#name-the-sgn0-function
// The sign of an element is not obviously related to that of its Montgomery form
func g2Sgn0(z *fp.Element) uint64 {
nonMont := *z
nonMont.FromMont()
-
+ // m == 1
return nonMont[0] % 2
}
@@ -369,7 +352,7 @@ func MapToG2(u fp.Element) G2Affine {
// EncodeToG2 hashes a message to a point on the G2 curve using the SSWU map.
// It is faster than HashToG2, but the result is not uniformly distributed. Unsuitable as a random oracle.
// dst stands for "domain separation tag", a string unique to the construction using the hash function
-//https://datatracker.ietf.org/doc/draft-irtf-cfrg-hash-to-curve/13/#section-6.6.3
+//https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-16.html#roadmap
func EncodeToG2(msg, dst []byte) (G2Affine, error) {
var res G2Affine
@@ -389,7 +372,7 @@ func EncodeToG2(msg, dst []byte) (G2Affine, error) {
// HashToG2 hashes a message to a point on the G2 curve using the SSWU map.
// Slower than EncodeToG2, but usable as a random oracle.
// dst stands for "domain separation tag", a string unique to the construction using the hash function
-// https://tools.ietf.org/html/draft-irtf-cfrg-hash-to-curve-06#section-3
+//https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-16.html#roadmap
func HashToG2(msg, dst []byte) (G2Affine, error) {
u, err := hashToFp(msg, dst, 2*1)
if err != nil {
@@ -399,7 +382,7 @@ func HashToG2(msg, dst []byte) (G2Affine, error) {
Q0 := mapToCurve2(&u[0])
Q1 := mapToCurve2(&u[1])
- //TODO: Add in E' first, then apply isogeny
+ //TODO (perf): Add in E' first, then apply isogeny
g2Isogeny(&Q0)
g2Isogeny(&Q1)
diff --git a/ecc/bw6-756/internal/fptower/e3.go b/ecc/bw6-756/internal/fptower/e3.go
index f41b9da633..afd75d0627 100644
--- a/ecc/bw6-756/internal/fptower/e3.go
+++ b/ecc/bw6-756/internal/fptower/e3.go
@@ -272,6 +272,8 @@ func (z *E3) MulByNonResidue(x *E3) *E3 {
}
// Inverse an element in E3
+//
+// if x == 0, sets and returns z = x
func (z *E3) Inverse(x *E3) *E3 {
// Algorithm 17 from https://eprint.iacr.org/2010/354.pdf
// step 9 is wrong in the paper it's t1-t4
@@ -300,6 +302,8 @@ func (z *E3) Inverse(x *E3) *E3 {
// BatchInvertE3 returns a new slice with every element inverted.
// Uses Montgomery batch inversion trick
+//
+// if a[i] == 0, returns result[i] = a[i]
func BatchInvertE3(a []E3) []E3 {
res := make([]E3, len(a))
if len(a) == 0 {
diff --git a/ecc/bw6-756/internal/fptower/e6.go b/ecc/bw6-756/internal/fptower/e6.go
index 35d6b82a76..6dadd7270a 100644
--- a/ecc/bw6-756/internal/fptower/e6.go
+++ b/ecc/bw6-756/internal/fptower/e6.go
@@ -223,29 +223,46 @@ func (z *E6) CyclotomicSquareCompressed(x *E6) *E6 {
return z
}
-// Decompress Karabina's cyclotomic square result
-func (z *E6) Decompress(x *E6) *E6 {
+// DecompressKarabina Karabina's cyclotomic square result
+// if g3 != 0
+// g4 = (E * g5^2 + 3 * g1^2 - 2 * g2)/4g3
+// if g3 == 0
+// g4 = 2g1g5/g2
+//
+// if g3=g2=0 then g4=g5=g1=0 and g0=1 (x=1)
+// Theorem 3.1 is well-defined for all x in Gϕₙ\{1}
+func (z *E6) DecompressKarabina(x *E6) *E6 {
var t [3]fp.Element
var one fp.Element
one.SetOne()
- // t0 = g1²
- t[0].Square(&x.B0.A1)
- // t1 = 3 * g1² - 2 * g2
- t[1].Sub(&t[0], &x.B0.A2).
- Double(&t[1]).
- Add(&t[1], &t[0])
- // t0 = E * g5² + t1
- t[2].Square(&x.B1.A2)
- t[0].MulByNonResidue(&t[2]).
- Add(&t[0], &t[1])
- // t1 = 1/(4 * g3)
- t[1].Double(&x.B1.A0).
- Double(&t[1]).
- Inverse(&t[1]) // costly
+ // g3 == 0
+ if x.B1.A0.IsZero() {
+ t[0].Mul(&x.B0.A1, &x.B1.A2).
+ Double(&t[0])
+ // t1 = g2
+ t[1].Set(&x.B0.A2)
+
+ // g3 != 0
+ } else {
+ // t0 = g1^2
+ t[0].Square(&x.B0.A1)
+ // t1 = 3 * g1^2 - 2 * g2
+ t[1].Sub(&t[0], &x.B0.A2).
+ Double(&t[1]).
+ Add(&t[1], &t[0])
+ // t0 = E * g5^2 + t1
+ t[2].Square(&x.B1.A2)
+ t[0].MulByNonResidue(&t[2]).
+ Add(&t[0], &t[1])
+ // t1 = 1/(4 * g3)
+ t[1].Double(&x.B1.A0).
+ Double(&t[1])
+ }
+
// z4 = g4
- z.B1.A1.Mul(&t[0], &t[1])
+ z.B1.A1.Div(&t[0], &t[1]) // costly
// t1 = g2 * g1
t[1].Mul(&x.B0.A2, &x.B0.A1)
@@ -254,7 +271,7 @@ func (z *E6) Decompress(x *E6) *E6 {
Sub(&t[2], &t[1]).
Double(&t[2]).
Sub(&t[2], &t[1])
- // t1 = g3 * g5
+ // t1 = g3 * g5 (g3 can be 0)
t[1].Mul(&x.B1.A0, &x.B1.A2)
// c₀ = E * (2 * g4² + g3 * g5 - 3 * g2 * g1) + 1
t[2].Add(&t[2], &t[1])
@@ -308,6 +325,8 @@ func (z *E6) CyclotomicSquare(x *E6) *E6 {
}
// Inverse set z to the inverse of x in E6 and return z
+//
+// if x == 0, sets and returns z = x
func (z *E6) Inverse(x *E6) *E6 {
// Algorithm 23 from https://eprint.iacr.org/2010/354.pdf
@@ -325,6 +344,8 @@ func (z *E6) Inverse(x *E6) *E6 {
// BatchInvertE6 returns a new slice with every element inverted.
// Uses Montgomery batch inversion trick
+//
+// if a[i] == 0, returns result[i] = a[i]
func BatchInvertE6(a []E6) []E6 {
res := make([]E6, len(a))
if len(a) == 0 {
@@ -644,6 +665,10 @@ func BatchCompressTorus(x []E6) ([]E3, error) {
for i := 0; i < n; i++ {
res[i].Set(&x[i].B1)
+ // throw an error if any of the x[i].C1 is 0
+ if res[i].IsZero() {
+ return []E3{}, errors.New("invalid input")
+ }
}
t := BatchInvertE3(res) // costs 1 inverse
diff --git a/ecc/bw6-756/internal/fptower/e6_pairing.go b/ecc/bw6-756/internal/fptower/e6_pairing.go
index c177bfaa7f..60358a0190 100644
--- a/ecc/bw6-756/internal/fptower/e6_pairing.go
+++ b/ecc/bw6-756/internal/fptower/e6_pairing.go
@@ -65,7 +65,7 @@ func (z *E6) Expt(x *E6) *E6 {
// Step 67: result = x^0x9948a20000000000
result.nSquareCompressed(41)
- result.Decompress(&result)
+ result.DecompressKarabina(&result)
// Step 68: result = x^0x9948a20000000001
z.Mul(x, &result)
diff --git a/ecc/bw6-756/internal/fptower/e6_test.go b/ecc/bw6-756/internal/fptower/e6_test.go
index 58048b0e8b..c32630a401 100644
--- a/ecc/bw6-756/internal/fptower/e6_test.go
+++ b/ecc/bw6-756/internal/fptower/e6_test.go
@@ -300,13 +300,29 @@ func TestE6Ops(t *testing.T) {
properties.Property("[BW6-756] compressed cyclotomic square (Karabina) and square should be the same in the cyclotomic subgroup", prop.ForAll(
func(a *E6) bool {
- var b, c, d E6
+ var _a, b, c, d, _c, _d E6
+ _a.SetOne().Double(&_a)
+
+ // put a and _a in the cyclotomic subgroup
+ // a (g3 != 0 probably)
b.Conjugate(a)
a.Inverse(a)
b.Mul(&b, a)
a.Frobenius(&b).Mul(a, &b)
+ // _a (g3 == 0)
+ b.Conjugate(&_a)
+ _a.Inverse(&_a)
+ b.Mul(&b, &_a)
+ _a.Frobenius(&b).Mul(&_a, &b)
+
+ // case g3 != 0
c.Square(a)
- d.CyclotomicSquareCompressed(a).Decompress(&d)
+ d.CyclotomicSquareCompressed(a).DecompressKarabina(&d)
+
+ // case g3 == 0
+ _c.Square(&_a)
+ _d.CyclotomicSquareCompressed(&_a).DecompressKarabina(&_d)
+
return c.Equal(&d)
},
genA,
diff --git a/ecc/bw6-756/multiexp.go b/ecc/bw6-756/multiexp.go
index 2a11889a24..698835f344 100644
--- a/ecc/bw6-756/multiexp.go
+++ b/ecc/bw6-756/multiexp.go
@@ -41,7 +41,7 @@ type selector struct {
// if the digit is larger than 2^{c-1}, then, we borrow 2^c from the next window and substract
// 2^{c} to the current digit, making it negative.
// negative digits can be processed in a later step as adding -G into the bucket instead of G
-// (computing -G is cheap, and this saves us half of the buckets in the MultiExp or BatchScalarMul)
+// (computing -G is cheap, and this saves us half of the buckets in the MultiExp or BatchScalarMultiplication)
// scalarsMont indicates wheter the provided scalars are in montgomery form
// returns smallValues, which represent the number of scalars which meets the following condition
// 0 < scalar < 2^c (in other words, scalars where only the c-least significant bits are non zero)
@@ -163,6 +163,8 @@ func partitionScalars(scalars []fr.Element, c uint64, scalarsMont bool, nbTasks
}
// MultiExp implements section 4 of https://eprint.iacr.org/2012/549.pdf
+//
+// This call return an error if len(scalars) != len(points) or if provided config is invalid.
func (p *G1Affine) MultiExp(points []G1Affine, scalars []fr.Element, config ecc.MultiExpConfig) (*G1Affine, error) {
var _p G1Jac
if _, err := _p.MultiExp(points, scalars, config); err != nil {
@@ -173,6 +175,8 @@ func (p *G1Affine) MultiExp(points []G1Affine, scalars []fr.Element, config ecc.
}
// MultiExp implements section 4 of https://eprint.iacr.org/2012/549.pdf
+//
+// This call return an error if len(scalars) != len(points) or if provided config is invalid.
func (p *G1Jac) MultiExp(points []G1Affine, scalars []fr.Element, config ecc.MultiExpConfig) (*G1Jac, error) {
// note:
// each of the msmCX method is the same, except for the c constant it declares
@@ -209,6 +213,8 @@ func (p *G1Jac) MultiExp(points []G1Affine, scalars []fr.Element, config ecc.Mul
// if nbTasks is not set, use all available CPUs
if config.NbTasks <= 0 {
config.NbTasks = runtime.NumCPU()
+ } else if config.NbTasks > 1024 {
+ return nil, errors.New("invalid config: config.NbTasks > 1024")
}
// here, we compute the best C for nbPoints
@@ -573,6 +579,8 @@ func (p *G1Jac) msmC16(points []G1Affine, scalars []fr.Element, splitFirstChunk
}
// MultiExp implements section 4 of https://eprint.iacr.org/2012/549.pdf
+//
+// This call return an error if len(scalars) != len(points) or if provided config is invalid.
func (p *G2Affine) MultiExp(points []G2Affine, scalars []fr.Element, config ecc.MultiExpConfig) (*G2Affine, error) {
var _p G2Jac
if _, err := _p.MultiExp(points, scalars, config); err != nil {
@@ -583,6 +591,8 @@ func (p *G2Affine) MultiExp(points []G2Affine, scalars []fr.Element, config ecc.
}
// MultiExp implements section 4 of https://eprint.iacr.org/2012/549.pdf
+//
+// This call return an error if len(scalars) != len(points) or if provided config is invalid.
func (p *G2Jac) MultiExp(points []G2Affine, scalars []fr.Element, config ecc.MultiExpConfig) (*G2Jac, error) {
// note:
// each of the msmCX method is the same, except for the c constant it declares
@@ -619,6 +629,8 @@ func (p *G2Jac) MultiExp(points []G2Affine, scalars []fr.Element, config ecc.Mul
// if nbTasks is not set, use all available CPUs
if config.NbTasks <= 0 {
config.NbTasks = runtime.NumCPU()
+ } else if config.NbTasks > 1024 {
+ return nil, errors.New("invalid config: config.NbTasks > 1024")
}
// here, we compute the best C for nbPoints
diff --git a/ecc/bw6-756/multiexp_test.go b/ecc/bw6-756/multiexp_test.go
index 584f1d296a..4a6f1b1613 100644
--- a/ecc/bw6-756/multiexp_test.go
+++ b/ecc/bw6-756/multiexp_test.go
@@ -92,7 +92,14 @@ func TestMultiExpG1(t *testing.T) {
genScalar,
))
- properties.Property("[G1] Multi exponentation (c=5, c=16) should be consistent with sum of square", prop.ForAll(
+ // cRange is generated from template and contains the available parameters for the multiexp window size
+ cRange := []uint64{4, 5, 8, 16}
+ if testing.Short() {
+ // test only "odd" and "even" (ie windows size divide word size vs not)
+ cRange = []uint64{5, 16}
+ }
+
+ properties.Property(fmt.Sprintf("[G1] Multi exponentation (c in %v) should be consistent with sum of square", cRange), prop.ForAll(
func(mixer fr.Element) bool {
var expected G1Jac
@@ -111,13 +118,21 @@ func TestMultiExpG1(t *testing.T) {
FromMont()
}
- scalars5, _ := partitionScalars(sampleScalars[:], 5, false, runtime.NumCPU())
- scalars16, _ := partitionScalars(sampleScalars[:], 16, false, runtime.NumCPU())
-
- var r5, r16 G1Jac
- r5.msmC5(samplePoints[:], scalars5, false)
- r16.msmC16(samplePoints[:], scalars16, true)
- return (r5.Equal(&expected) && r16.Equal(&expected))
+ results := make([]G1Jac, len(cRange)+1)
+ for i, c := range cRange {
+ scalars, _ := partitionScalars(sampleScalars[:], c, false, runtime.NumCPU())
+ msmInnerG1Jac(&results[i], int(c), samplePoints[:], scalars, false)
+ if c == 16 {
+ // split the first chunk
+ msmInnerG1Jac(&results[len(results)-1], 16, samplePoints[:], scalars, true)
+ }
+ }
+ for i := 1; i < len(results); i++ {
+ if !results[i].Equal(&results[i-1]) {
+ return false
+ }
+ }
+ return true
},
genScalar,
))
@@ -148,7 +163,7 @@ func TestMultiExpG1(t *testing.T) {
var finalBigScalar fr.Element
var finalBigScalarBi big.Int
var op1ScalarMul G1Affine
- finalBigScalar.SetString("9455").Mul(&finalBigScalar, &mixer)
+ finalBigScalar.SetUint64(9455).Mul(&finalBigScalar, &mixer)
finalBigScalar.ToBigIntRegular(&finalBigScalarBi)
op1ScalarMul.ScalarMultiplication(&g1GenAff, &finalBigScalarBi)
@@ -322,7 +337,12 @@ func TestMultiExpG2(t *testing.T) {
genScalar,
))
- properties.Property("[G2] Multi exponentation (c=5, c=16) should be consistent with sum of square", prop.ForAll(
+ // cRange is generated from template and contains the available parameters for the multiexp window size
+ // for g2, CI suffers with large c size since it needs to allocate a lot of memory for the buckets.
+ // test only "odd" and "even" (ie windows size divide word size vs not)
+ cRange := []uint64{5, 16}
+
+ properties.Property(fmt.Sprintf("[G2] Multi exponentation (c in %v) should be consistent with sum of square", cRange), prop.ForAll(
func(mixer fr.Element) bool {
var expected G2Jac
@@ -341,13 +361,21 @@ func TestMultiExpG2(t *testing.T) {
FromMont()
}
- scalars5, _ := partitionScalars(sampleScalars[:], 5, false, runtime.NumCPU())
- scalars16, _ := partitionScalars(sampleScalars[:], 16, false, runtime.NumCPU())
-
- var r5, r16 G2Jac
- r5.msmC5(samplePoints[:], scalars5, false)
- r16.msmC16(samplePoints[:], scalars16, true)
- return (r5.Equal(&expected) && r16.Equal(&expected))
+ results := make([]G2Jac, len(cRange)+1)
+ for i, c := range cRange {
+ scalars, _ := partitionScalars(sampleScalars[:], c, false, runtime.NumCPU())
+ msmInnerG2Jac(&results[i], int(c), samplePoints[:], scalars, false)
+ if c == 16 {
+ // split the first chunk
+ msmInnerG2Jac(&results[len(results)-1], 16, samplePoints[:], scalars, true)
+ }
+ }
+ for i := 1; i < len(results); i++ {
+ if !results[i].Equal(&results[i-1]) {
+ return false
+ }
+ }
+ return true
},
genScalar,
))
@@ -378,7 +406,7 @@ func TestMultiExpG2(t *testing.T) {
var finalBigScalar fr.Element
var finalBigScalarBi big.Int
var op1ScalarMul G2Affine
- finalBigScalar.SetString("9455").Mul(&finalBigScalar, &mixer)
+ finalBigScalar.SetUint64(9455).Mul(&finalBigScalar, &mixer)
finalBigScalar.ToBigIntRegular(&finalBigScalarBi)
op1ScalarMul.ScalarMultiplication(&g2GenAff, &finalBigScalarBi)
diff --git a/ecc/bw6-756/pairing.go b/ecc/bw6-756/pairing.go
index 32b520521f..e4b1f3ad9b 100644
--- a/ecc/bw6-756/pairing.go
+++ b/ecc/bw6-756/pairing.go
@@ -31,7 +31,9 @@ type lineEvaluation struct {
}
// Pair calculates the reduced pairing for a set of points
-// ∏ᵢ e(Pᵢ, Qᵢ)
+// ∏ᵢ e(Pᵢ, Qᵢ).
+//
+// This function doesn't check that the inputs are in the correct subgroup. See IsInSubGroup.
func Pair(P []G1Affine, Q []G2Affine) (GT, error) {
f, err := MillerLoop(P, Q)
if err != nil {
@@ -42,6 +44,8 @@ func Pair(P []G1Affine, Q []G2Affine) (GT, error) {
// PairingCheck calculates the reduced pairing for a set of points and returns True if the result is One
// ∏ᵢ e(Pᵢ, Qᵢ) =? 1
+//
+// This function doesn't check that the inputs are in the correct subgroup. See IsInSubGroup.
func PairingCheck(P []G1Affine, Q []G2Affine) (bool, error) {
f, err := Pair(P, Q)
if err != nil {
@@ -176,8 +180,6 @@ func MillerLoop(P []G1Affine, Q []G2Affine) (GT, error) {
// precomputations
pProj1 := make([]g1Proj, n)
p1 := make([]G1Affine, n)
- p01 := make([]G1Affine, n)
- p10 := make([]G1Affine, n)
pProj01 := make([]g1Proj, n) // P0+P1
pProj10 := make([]g1Proj, n) // P0-P1
l01 := make([]lineEvaluation, n)
@@ -199,8 +201,8 @@ func MillerLoop(P []G1Affine, Q []G2Affine) (GT, error) {
l10[k].r1.Mul(&l10[k].r1, &q[k].X)
l10[k].r0.Mul(&l10[k].r0, &q[k].Y)
}
- BatchProjectiveToAffineG1(pProj01, p01)
- BatchProjectiveToAffineG1(pProj10, p10)
+ p01 := BatchProjectiveToAffineG1(pProj01)
+ p10 := BatchProjectiveToAffineG1(pProj10)
// f_{a0+lambda*a1,P}(Q)
var result, ss GT
diff --git a/ecc/bw6-756/twistededwards/eddsa/eddsa.go b/ecc/bw6-756/twistededwards/eddsa/eddsa.go
index 94f8d03782..c09978073d 100644
--- a/ecc/bw6-756/twistededwards/eddsa/eddsa.go
+++ b/ecc/bw6-756/twistededwards/eddsa/eddsa.go
@@ -98,7 +98,7 @@ func GenerateKey(r io.Reader) (*PrivateKey, error) {
var bScalar big.Int
bScalar.SetBytes(priv.scalar[:])
- pub.A.ScalarMul(&c.Base, &bScalar)
+ pub.A.ScalarMultiplication(&c.Base, &bScalar)
priv.PublicKey = pub
@@ -146,7 +146,7 @@ func (privKey *PrivateKey) Sign(message []byte, hFunc hash.Hash) ([]byte, error)
blindingFactorBigInt.SetBytes(blindingFactorBytes[:sizeFr])
// compute R = randScalar*Base
- res.R.ScalarMul(&curveParams.Base, &blindingFactorBigInt)
+ res.R.ScalarMultiplication(&curveParams.Base, &blindingFactorBigInt)
if !res.R.IsOnCurve() {
return nil, errNotOnCurve
}
@@ -232,8 +232,8 @@ func (pub *PublicKey) Verify(sigBin, message []byte, hFunc hash.Hash) (bool, err
var bCofactor, bs big.Int
curveParams.Cofactor.ToBigIntRegular(&bCofactor)
bs.SetBytes(sig.S[:])
- lhs.ScalarMul(&curveParams.Base, &bs).
- ScalarMul(&lhs, &bCofactor)
+ lhs.ScalarMultiplication(&curveParams.Base, &bs).
+ ScalarMultiplication(&lhs, &bCofactor)
if !lhs.IsOnCurve() {
return false, errNotOnCurve
@@ -241,9 +241,9 @@ func (pub *PublicKey) Verify(sigBin, message []byte, hFunc hash.Hash) (bool, err
// rhs = cofactor*(R + H(R,A,M)*A)
var rhs twistededwards.PointAffine
- rhs.ScalarMul(&pub.A, &hramInt).
+ rhs.ScalarMultiplication(&pub.A, &hramInt).
Add(&rhs, &sig.R).
- ScalarMul(&rhs, &bCofactor)
+ ScalarMultiplication(&rhs, &bCofactor)
if !rhs.IsOnCurve() {
return false, errNotOnCurve
}
diff --git a/ecc/bw6-756/twistededwards/point.go b/ecc/bw6-756/twistededwards/point.go
index 239aca743c..fab8897d98 100644
--- a/ecc/bw6-756/twistededwards/point.go
+++ b/ecc/bw6-756/twistededwards/point.go
@@ -256,13 +256,13 @@ func (p *PointAffine) FromExtended(p1 *PointExtended) *PointAffine {
return p
}
-// ScalarMul scalar multiplication of a point
+// ScalarMultiplication scalar multiplication of a point
// p1 in affine coordinates with a scalar in big.Int
-func (p *PointAffine) ScalarMul(p1 *PointAffine, scalar *big.Int) *PointAffine {
+func (p *PointAffine) ScalarMultiplication(p1 *PointAffine, scalar *big.Int) *PointAffine {
var p1Extended, resExtended PointExtended
p1Extended.FromAffine(p1)
- resExtended.ScalarMul(&p1Extended, scalar)
+ resExtended.ScalarMultiplication(&p1Extended, scalar)
p.FromExtended(&resExtended)
return p
@@ -409,9 +409,9 @@ func (p *PointProj) Add(p1, p2 *PointProj) *PointProj {
return p
}
-// ScalarMul scalar multiplication of a point
+// ScalarMultiplication scalar multiplication of a point
// p1 in projective coordinates with a scalar in big.Int
-func (p *PointProj) ScalarMul(p1 *PointProj, scalar *big.Int) *PointProj {
+func (p *PointProj) ScalarMultiplication(p1 *PointProj, scalar *big.Int) *PointProj {
var _scalar big.Int
_scalar.Set(scalar)
p.Set(p1)
@@ -622,9 +622,9 @@ func (p *PointExtended) setInfinity() *PointExtended {
return p
}
-// ScalarMul scalar multiplication of a point
+// ScalarMultiplication scalar multiplication of a point
// p1 in extended coordinates with a scalar in big.Int
-func (p *PointExtended) ScalarMul(p1 *PointExtended, scalar *big.Int) *PointExtended {
+func (p *PointExtended) ScalarMultiplication(p1 *PointExtended, scalar *big.Int) *PointExtended {
var _scalar big.Int
_scalar.Set(scalar)
p.Set(p1)
diff --git a/ecc/bw6-756/twistededwards/point_test.go b/ecc/bw6-756/twistededwards/point_test.go
index ba1b7f3f0a..fc5347fb00 100644
--- a/ecc/bw6-756/twistededwards/point_test.go
+++ b/ecc/bw6-756/twistededwards/point_test.go
@@ -124,8 +124,8 @@ func TestReceiverIsOperand(t *testing.T) {
var s big.Int
s.SetUint64(10)
- p2.ScalarMul(&p1, &s)
- p1.ScalarMul(&p1, &s)
+ p2.ScalarMultiplication(&p1, &s)
+ p1.ScalarMultiplication(&p1, &s)
return p2.Equal(&p1)
},
@@ -336,7 +336,7 @@ func TestOps(t *testing.T) {
params := GetEdwardsCurve()
var p1, p2, zero PointAffine
- p1.ScalarMul(¶ms.Base, &s1)
+ p1.ScalarMultiplication(¶ms.Base, &s1)
zero.setInfinity()
p2.Add(&p1, &zero)
@@ -352,7 +352,7 @@ func TestOps(t *testing.T) {
params := GetEdwardsCurve()
var p1, p2 PointAffine
- p1.ScalarMul(¶ms.Base, &s1)
+ p1.ScalarMultiplication(¶ms.Base, &s1)
p2.Neg(&p1)
p1.Add(&p1, &p2)
@@ -371,8 +371,8 @@ func TestOps(t *testing.T) {
params := GetEdwardsCurve()
var p1, p2, inf PointAffine
- p1.ScalarMul(¶ms.Base, &s)
- p2.ScalarMul(¶ms.Base, &s)
+ p1.ScalarMultiplication(¶ms.Base, &s)
+ p2.ScalarMultiplication(¶ms.Base, &s)
p1.Add(&p1, &p2)
p2.Double(&p2)
@@ -390,14 +390,14 @@ func TestOps(t *testing.T) {
var p1, p2, p3, inf PointAffine
inf.X.SetZero()
inf.Y.SetZero()
- p1.ScalarMul(¶ms.Base, &s1)
- p2.ScalarMul(¶ms.Base, &s2)
+ p1.ScalarMultiplication(¶ms.Base, &s1)
+ p2.ScalarMultiplication(¶ms.Base, &s2)
p3.Set(¶ms.Base)
p2.Add(&p1, &p2)
s1.Add(&s1, &s2)
- p3.ScalarMul(¶ms.Base, &s1)
+ p3.ScalarMultiplication(¶ms.Base, &s1)
return p2.IsOnCurve() && p3.Equal(&p2) && !p3.Equal(&inf)
},
@@ -413,9 +413,9 @@ func TestOps(t *testing.T) {
var p1, p2, inf PointAffine
inf.X.SetZero()
inf.Y.SetOne()
- p1.ScalarMul(¶ms.Base, &s1)
+ p1.ScalarMultiplication(¶ms.Base, &s1)
s1.Neg(&s1)
- p2.ScalarMul(¶ms.Base, &s1)
+ p2.ScalarMultiplication(¶ms.Base, &s1)
p2.Add(&p1, &p2)
@@ -430,11 +430,11 @@ func TestOps(t *testing.T) {
params := GetEdwardsCurve()
var p1, p2 PointAffine
- p1.ScalarMul(¶ms.Base, &s1)
+ p1.ScalarMultiplication(¶ms.Base, &s1)
five := big.NewInt(5)
p2.Double(&p1).Double(&p2).Add(&p2, &p1)
- p1.ScalarMul(&p1, five)
+ p1.ScalarMultiplication(&p1, five)
return p2.IsOnCurve() && p2.Equal(&p1)
},
@@ -463,7 +463,7 @@ func TestOps(t *testing.T) {
var baseProj, p1, p2, zero PointProj
baseProj.FromAffine(¶ms.Base)
- p1.ScalarMul(&baseProj, &s1)
+ p1.ScalarMultiplication(&baseProj, &s1)
zero.setInfinity()
p2.Add(&p1, &zero)
@@ -480,7 +480,7 @@ func TestOps(t *testing.T) {
var baseProj, p1, p2, p PointProj
baseProj.FromAffine(¶ms.Base)
- p1.ScalarMul(&baseProj, &s1)
+ p1.ScalarMultiplication(&baseProj, &s1)
p2.Neg(&p1)
p.Add(&p1, &p2)
@@ -498,7 +498,7 @@ func TestOps(t *testing.T) {
var baseProj, p1, p2, p PointProj
baseProj.FromAffine(¶ms.Base)
- p.ScalarMul(&baseProj, &s)
+ p.ScalarMultiplication(&baseProj, &s)
p1.Add(&p, &p)
p2.Double(&p)
@@ -515,11 +515,11 @@ func TestOps(t *testing.T) {
var baseProj, p1, p2 PointProj
baseProj.FromAffine(¶ms.Base)
- p1.ScalarMul(&baseProj, &s1)
+ p1.ScalarMultiplication(&baseProj, &s1)
five := big.NewInt(5)
p2.Double(&p1).Double(&p2).Add(&p2, &p1)
- p1.ScalarMul(&p1, five)
+ p1.ScalarMultiplication(&p1, five)
return p2.Equal(&p1)
},
@@ -547,7 +547,7 @@ func TestOps(t *testing.T) {
var baseExtended, p1, p2, zero PointExtended
baseExtended.FromAffine(¶ms.Base)
- p1.ScalarMul(&baseExtended, &s1)
+ p1.ScalarMultiplication(&baseExtended, &s1)
zero.setInfinity()
p2.Add(&p1, &zero)
@@ -564,7 +564,7 @@ func TestOps(t *testing.T) {
var baseExtended, p1, p2, p PointExtended
baseExtended.FromAffine(¶ms.Base)
- p1.ScalarMul(&baseExtended, &s1)
+ p1.ScalarMultiplication(&baseExtended, &s1)
p2.Neg(&p1)
p.Add(&p1, &p2)
@@ -582,7 +582,7 @@ func TestOps(t *testing.T) {
var baseExtended, p1, p2, p PointExtended
baseExtended.FromAffine(¶ms.Base)
- p.ScalarMul(&baseExtended, &s)
+ p.ScalarMultiplication(&baseExtended, &s)
p1.Add(&p, &p)
p2.Double(&p)
@@ -599,11 +599,11 @@ func TestOps(t *testing.T) {
var baseExtended, p1, p2 PointExtended
baseExtended.FromAffine(¶ms.Base)
- p1.ScalarMul(&baseExtended, &s1)
+ p1.ScalarMultiplication(&baseExtended, &s1)
five := big.NewInt(5)
p2.Double(&p1).Double(&p2).Add(&p2, &p1)
- p1.ScalarMul(&p1, five)
+ p1.ScalarMultiplication(&p1, five)
return p2.Equal(&p1)
},
@@ -619,8 +619,8 @@ func TestOps(t *testing.T) {
var baseExtended, pExtended, p PointExtended
var pAffine PointAffine
baseExtended.FromAffine(¶ms.Base)
- pExtended.ScalarMul(&baseExtended, &s)
- pAffine.ScalarMul(¶ms.Base, &s)
+ pExtended.ScalarMultiplication(&baseExtended, &s)
+ pAffine.ScalarMultiplication(¶ms.Base, &s)
pAffine.Neg(&pAffine)
p.MixedAdd(&pExtended, &pAffine)
@@ -638,8 +638,8 @@ func TestOps(t *testing.T) {
var baseExtended, pExtended, p, p2 PointExtended
var pAffine PointAffine
baseExtended.FromAffine(¶ms.Base)
- pExtended.ScalarMul(&baseExtended, &s)
- pAffine.ScalarMul(¶ms.Base, &s)
+ pExtended.ScalarMultiplication(&baseExtended, &s)
+ pAffine.ScalarMultiplication(¶ms.Base, &s)
p.MixedAdd(&pExtended, &pAffine)
p2.MixedDouble(&pExtended)
@@ -658,8 +658,8 @@ func TestOps(t *testing.T) {
var baseProj, pProj, p PointProj
var pAffine PointAffine
baseProj.FromAffine(¶ms.Base)
- pProj.ScalarMul(&baseProj, &s)
- pAffine.ScalarMul(¶ms.Base, &s)
+ pProj.ScalarMultiplication(&baseProj, &s)
+ pAffine.ScalarMultiplication(¶ms.Base, &s)
pAffine.Neg(&pAffine)
p.MixedAdd(&pProj, &pAffine)
@@ -677,8 +677,8 @@ func TestOps(t *testing.T) {
var baseProj, pProj, p, p2 PointProj
var pAffine PointAffine
baseProj.FromAffine(¶ms.Base)
- pProj.ScalarMul(&baseProj, &s)
- pAffine.ScalarMul(¶ms.Base, &s)
+ pProj.ScalarMultiplication(&baseProj, &s)
+ pAffine.ScalarMultiplication(¶ms.Base, &s)
p.MixedAdd(&pProj, &pAffine)
p2.Double(&pProj)
@@ -697,9 +697,9 @@ func TestOps(t *testing.T) {
var baseExt PointExtended
var p1, p2 PointAffine
baseProj.FromAffine(¶ms.Base)
- baseProj.ScalarMul(&baseProj, &s)
+ baseProj.ScalarMultiplication(&baseProj, &s)
baseExt.FromAffine(¶ms.Base)
- baseExt.ScalarMul(&baseExt, &s)
+ baseExt.ScalarMultiplication(&baseExt, &s)
p1.FromProj(&baseProj)
p2.FromExtended(&baseExt)
@@ -760,7 +760,7 @@ func BenchmarkScalarMulExtended(b *testing.B) {
b.ResetTimer()
for j := 0; j < b.N; j++ {
- doubleAndAdd.ScalarMul(&a, &s)
+ doubleAndAdd.ScalarMultiplication(&a, &s)
}
}
@@ -776,6 +776,6 @@ func BenchmarkScalarMulProjective(b *testing.B) {
b.ResetTimer()
for j := 0; j < b.N; j++ {
- doubleAndAdd.ScalarMul(&a, &s)
+ doubleAndAdd.ScalarMultiplication(&a, &s)
}
}
diff --git a/ecc/bw6-761/bw6-761.go b/ecc/bw6-761/bw6-761.go
index 8d1b6dcabb..a115d92dc2 100644
--- a/ecc/bw6-761/bw6-761.go
+++ b/ecc/bw6-761/bw6-761.go
@@ -1,17 +1,26 @@
-// Copyright 2020 ConsenSys AG
+// Package bw6761 efficient elliptic curve, pairing and hash to curve implementation for bw6-761.
//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
+// bw6-761: A Brezing--Weng curve (2-chain with bls12-377)
+// embedding degree k=6
+// seed x₀=9586122913090633729
+// 𝔽p: p=6891450384315732539396789682275657542479668912536150109513790160209623422243491736087683183289411687640864567753786613451161759120554247759349511699125301598951605099378508850372543631423596795951899700429969112842764913119068299
+// 𝔽r: r=258664426012969094010652733694893533536393512754914660539884262666720468348340822774968888139573360124440321458177
+// (E/𝔽p): Y²=X³-1
+// (Eₜ/𝔽p): Y² = X³+4 (M-type twist)
+// r ∣ #E(Fp) and r ∣ #Eₜ(𝔽p)
+// Extension fields tower:
+// 𝔽p³[u] = 𝔽p/u³+4
+// 𝔽p⁶[v] = 𝔽p²/v²-u
+// optimal Ate loops:
+// x₀+1, x₀²-x₀-1
+// Security: estimated 126-bit level following [https://eprint.iacr.org/2019/885.pdf]
+// (r is 377 bits and p⁶ is 4566 bits)
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// https://eprint.iacr.org/2020/351.pdf
//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
+// Warning
+//
+// This code has not been audited and is provided as-is. In particular, there is no security guarantees such as constant time implementation or side-channel attack resistance.
package bw6761
import (
@@ -22,19 +31,6 @@ import (
"github.com/consensys/gnark-crypto/ecc/bw6-761/fr"
)
-// https://eprint.iacr.org/2020/351.pdf
-
-// BW6-761: A Brezing--Weng curve of embedding degree k=6 with seed x₀=9586122913090633729. It forms a 2-chain with BLS12-377.
-// 𝔽p: p=6891450384315732539396789682275657542479668912536150109513790160209623422243491736087683183289411687640864567753786613451161759120554247759349511699125301598951605099378508850372543631423596795951899700429969112842764913119068299
-// 𝔽r: r=258664426012969094010652733694893533536393512754914660539884262666720468348340822774968888139573360124440321458177
-// (E/𝔽p): Y²=X³-1
-// (Eₜ/𝔽p): Y² = X³+4 (M-type twist)
-// r ∣ #E(Fp) and r ∣ #Eₜ(𝔽p)
-// Extension fields tower:
-// 𝔽p³[u] = 𝔽p/u³+4
-// 𝔽p⁶[v] = 𝔽p²/v²-u
-// optimal Ate loops: x₀+1, x₀²-x₀-1
-
// ID BW6_761 ID
const ID = ecc.BW6_761
@@ -83,11 +79,11 @@ func init() {
g1Gen.X.SetString("6238772257594679368032145693622812838779005809760824733138787810501188623461307351759238099287535516224314149266511977132140828635950940021790489507611754366317801811090811367945064510304504157188661901055903167026722666149426237")
g1Gen.Y.SetString("2101735126520897423911504562215834951148127555913367997162789335052900271653517958562461315794228241561913734371411178226936527683203879553093934185950470971848972085321797958124416462268292467002957525517188485984766314758624099")
- g1Gen.Z.SetString("1")
+ g1Gen.Z.SetOne()
g2Gen.X.SetString("6445332910596979336035888152774071626898886139774101364933948236926875073754470830732273879639675437155036544153105017729592600560631678554299562762294743927912429096636156401171909259073181112518725201388196280039960074422214428")
g2Gen.Y.SetString("562923658089539719386922163444547387757586534741080263946953401595155211934630598999300396317104182598044793758153214972605680357108252243146746187917218885078195819486220416605630144001533548163105316661692978285266378674355041")
- g2Gen.Z.SetString("1")
+ g2Gen.Z.SetOne()
g1GenAff.FromJacobian(&g1Gen)
g2GenAff.FromJacobian(&g2Gen)
diff --git a/ecc/bw6-761/doc.go b/ecc/bw6-761/doc.go
deleted file mode 100644
index b0a80ef624..0000000000
--- a/ecc/bw6-761/doc.go
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright 2020 ConsenSys Software Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Code generated by consensys/gnark-crypto DO NOT EDIT
-
-// Package bw6761 efficient elliptic curve and pairing implementation for bw6-761.
-//
-// Warning
-//
-// This code has not been audited and is provided as-is. In particular, there is no security guarantees such as constant time implementation or side-channel attack resistance.
-package bw6761
diff --git a/ecc/bw6-761/fp/element.go b/ecc/bw6-761/fp/element.go
index 4627505b73..a410b94f8c 100644
--- a/ecc/bw6-761/fp/element.go
+++ b/ecc/bw6-761/fp/element.go
@@ -199,7 +199,7 @@ func (z *Element) SetInterface(i1 interface{}) (*Element, error) {
case int:
return z.SetInt64(int64(c1)), nil
case string:
- return z.SetString(c1), nil
+ return z.SetString(c1)
case *big.Int:
if c1 == nil {
return nil, errors.New("can't set fp.Element with ")
@@ -1656,12 +1656,13 @@ func (z *Element) setBigInt(v *big.Int) *Element {
// Incorrect placement of underscores is reported as a panic if there
// are no other errors.
//
-func (z *Element) SetString(number string) *Element {
+// If the number is invalid this method leaves z unchanged and returns nil, error.
+func (z *Element) SetString(number string) (*Element, error) {
// get temporary big int from the pool
vv := bigIntPool.Get().(*big.Int)
if _, ok := vv.SetString(number, 0); !ok {
- panic("Element.SetString failed -> can't parse number into a big.Int " + number)
+ return nil, errors.New("Element.SetString failed -> can't parse number into a big.Int " + number)
}
z.SetBigInt(vv)
@@ -1669,7 +1670,7 @@ func (z *Element) SetString(number string) *Element {
// release object into pool
bigIntPool.Put(vv)
- return z
+ return z, nil
}
// MarshalJSON returns json encoding of z (z.Text(10))
diff --git a/ecc/bw6-761/fr/element.go b/ecc/bw6-761/fr/element.go
index 49535cd73c..dd4c184df8 100644
--- a/ecc/bw6-761/fr/element.go
+++ b/ecc/bw6-761/fr/element.go
@@ -181,7 +181,7 @@ func (z *Element) SetInterface(i1 interface{}) (*Element, error) {
case int:
return z.SetInt64(int64(c1)), nil
case string:
- return z.SetString(c1), nil
+ return z.SetString(c1)
case *big.Int:
if c1 == nil {
return nil, errors.New("can't set fr.Element with ")
@@ -1086,12 +1086,13 @@ func (z *Element) setBigInt(v *big.Int) *Element {
// Incorrect placement of underscores is reported as a panic if there
// are no other errors.
//
-func (z *Element) SetString(number string) *Element {
+// If the number is invalid this method leaves z unchanged and returns nil, error.
+func (z *Element) SetString(number string) (*Element, error) {
// get temporary big int from the pool
vv := bigIntPool.Get().(*big.Int)
if _, ok := vv.SetString(number, 0); !ok {
- panic("Element.SetString failed -> can't parse number into a big.Int " + number)
+ return nil, errors.New("Element.SetString failed -> can't parse number into a big.Int " + number)
}
z.SetBigInt(vv)
@@ -1099,7 +1100,7 @@ func (z *Element) SetString(number string) *Element {
// release object into pool
bigIntPool.Put(vv)
- return z
+ return z, nil
}
// MarshalJSON returns json encoding of z (z.Text(10))
diff --git a/ecc/bw6-761/fr/kzg/kzg.go b/ecc/bw6-761/fr/kzg/kzg.go
index 4d8b8e850d..58c3c23fb3 100644
--- a/ecc/bw6-761/fr/kzg/kzg.go
+++ b/ecc/bw6-761/fr/kzg/kzg.go
@@ -169,16 +169,15 @@ func Open(p []fr.Element, point fr.Element, srs *SRS) (OpeningProof, error) {
func Verify(commitment *Digest, proof *OpeningProof, point fr.Element, srs *SRS) error {
// [f(a)]G₁
- var claimedValueG1Aff bw6761.G1Affine
+ var claimedValueG1Aff bw6761.G1Jac
var claimedValueBigInt big.Int
proof.ClaimedValue.ToBigIntRegular(&claimedValueBigInt)
- claimedValueG1Aff.ScalarMultiplication(&srs.G1[0], &claimedValueBigInt)
+ claimedValueG1Aff.ScalarMultiplicationAffine(&srs.G1[0], &claimedValueBigInt)
// [f(α) - f(a)]G₁
- var fminusfaG1Jac, tmpG1Jac bw6761.G1Jac
+ var fminusfaG1Jac bw6761.G1Jac
fminusfaG1Jac.FromAffine(commitment)
- tmpG1Jac.FromAffine(&claimedValueG1Aff)
- fminusfaG1Jac.SubAssign(&tmpG1Jac)
+ fminusfaG1Jac.SubAssign(&claimedValueG1Aff)
// [-H(α)]G₁
var negH bw6761.G1Affine
diff --git a/ecc/bw6-761/fr/polynomial/multilin.go b/ecc/bw6-761/fr/polynomial/multilin.go
new file mode 100644
index 0000000000..c20d53b13a
--- /dev/null
+++ b/ecc/bw6-761/fr/polynomial/multilin.go
@@ -0,0 +1,250 @@
+// Copyright 2020 ConsenSys Software Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by consensys/gnark-crypto DO NOT EDIT
+
+package polynomial
+
+import (
+ "github.com/consensys/gnark-crypto/ecc/bw6-761/fr"
+)
+
+// MultiLin tracks the values of a (dense i.e. not sparse) multilinear polynomial
+// The variables are X₁ through Xₙ where n = log(len(.))
+// .[∑ᵢ 2ⁱ⁻¹ bₙ₋ᵢ] = the polynomial evaluated at (b₁, b₂, ..., bₙ)
+// It is understood that any hypercube evaluation can be extrapolated to a multilinear polynomial
+type MultiLin []fr.Element
+
+// Fold is partial evaluation function k[X₁, X₂, ..., Xₙ] → k[X₂, ..., Xₙ] by setting X₁=r
+func (m *MultiLin) Fold(r fr.Element) {
+ mid := len(*m) / 2
+
+ bottom, top := (*m)[:mid], (*m)[mid:]
+
+ // updating bookkeeping table
+ // knowing that the polynomial f ∈ (k[X₂, ..., Xₙ])[X₁] is linear, we would get f(r) = f(0) + r(f(1) - f(0))
+ // the following loop computes the evaluations of f(r) accordingly:
+ // f(r, b₂, ..., bₙ) = f(0, b₂, ..., bₙ) + r(f(1, b₂, ..., bₙ) - f(0, b₂, ..., bₙ))
+ for i := 0; i < mid; i++ {
+ // table[i] ← table[i] + r (table[i + mid] - table[i])
+ top[i].Sub(&top[i], &bottom[i])
+ top[i].Mul(&top[i], &r)
+ bottom[i].Add(&bottom[i], &top[i])
+ }
+
+ *m = (*m)[:mid]
+}
+
+// Evaluate extrapolate the value of the multilinear polynomial corresponding to m
+// on the given coordinates
+func (m MultiLin) Evaluate(coordinates []fr.Element) fr.Element {
+ // Folding is a mutating operation
+ bkCopy := m.Clone()
+
+ // Evaluate step by step through repeated folding (i.e. evaluation at the first remaining variable)
+ for _, r := range coordinates {
+ bkCopy.Fold(r)
+ }
+
+ return bkCopy[0]
+}
+
+// Clone creates a deep copy of a book-keeping table.
+// Both multilinear interpolation and sumcheck require folding an underlying
+// array, but folding changes the array. To do both one requires a deep copy
+// of the book-keeping table.
+func (m MultiLin) Clone() MultiLin {
+ tableDeepCopy := Make(len(m))
+ copy(tableDeepCopy, m)
+ return tableDeepCopy
+}
+
+// Add two bookKeepingTables
+func (m *MultiLin) Add(left, right MultiLin) {
+ size := len(left)
+ // Check that left and right have the same size
+ if len(right) != size {
+ panic("Left and right do not have the right size")
+ }
+ // Reallocate the table if necessary
+ if cap(*m) < size {
+ *m = make([]fr.Element, size)
+ }
+
+ // Resize the destination table
+ *m = (*m)[:size]
+
+ // Add elementwise
+ for i := 0; i < size; i++ {
+ (*m)[i].Add(&left[i], &right[i])
+ }
+}
+
+// EvalEq computes Eq(q₁, ... , qₙ, h₁, ... , hₙ) = Π₁ⁿ Eq(qᵢ, hᵢ)
+// where Eq(x,y) = xy + (1-x)(1-y) = 1 - x - y + xy + xy interpolates
+// _________________
+// | | |
+// | 0 | 1 |
+// |_______|_______|
+// y | | |
+// | 1 | 0 |
+// |_______|_______|
+//
+// x
+// In other words the polynomial evaluated here is the multilinear extrapolation of
+// one that evaluates to q' == h' for vectors q', h' of binary values
+func EvalEq(q, h []fr.Element) fr.Element {
+ var res, nxt, one, sum fr.Element
+ one.SetOne()
+ for i := 0; i < len(q); i++ {
+ nxt.Mul(&q[i], &h[i]) // nxt <- qᵢ * hᵢ
+ nxt.Double(&nxt) // nxt <- 2 * qᵢ * hᵢ
+ nxt.Add(&nxt, &one) // nxt <- 1 + 2 * qᵢ * hᵢ
+ sum.Add(&q[i], &h[i]) // sum <- qᵢ + hᵢ TODO: Why not subtract one by one from nxt? More parallel?
+
+ if i == 0 {
+ res.Sub(&nxt, &sum) // nxt <- 1 + 2 * qᵢ * hᵢ - qᵢ - hᵢ
+ } else {
+ nxt.Sub(&nxt, &sum) // nxt <- 1 + 2 * qᵢ * hᵢ - qᵢ - hᵢ
+ res.Mul(&res, &nxt) // res <- res * nxt
+ }
+ }
+ return res
+}
+
+// Eq sets m to the representation of the polynomial Eq(q₁, ..., qₙ, *, ..., *) × m[0]
+func (m *MultiLin) Eq(q []fr.Element) {
+ n := len(q)
+
+ if len(*m) != 1< 0 {
+ i.Sub(fr.Modulus(), &i)
+ i.Neg(&i)
+ }
+ return i
+}
+
+func (p Polynomial) Text(base int) string {
+
+ var builder strings.Builder
+
+ first := true
+ for d := len(p) - 1; d >= 0; d-- {
+ if p[d].IsZero() {
+ continue
+ }
+
+ i := signedBigInt(&p[d])
+
+ initialLen := builder.Len()
+
+ if i.Sign() < 1 {
+ i.Neg(&i)
+ if first {
+ builder.WriteString("-")
+ } else {
+ builder.WriteString(" - ")
+ }
+ } else if !first {
+ builder.WriteString(" + ")
+ }
+
+ first = false
+
+ asInt64 := int64(0)
+ if i.IsInt64() {
+ asInt64 = i.Int64()
+ }
+
+ if asInt64 != 1 || d == 0 {
+ builder.WriteString(i.Text(base))
+ }
+
+ if builder.Len()-initialLen > 10 {
+ builder.WriteString("×")
+ }
+
+ if d != 0 {
+ builder.WriteString("X")
+ }
+ if d > 1 {
+ builder.WriteString(
+ utils.ToSuperscript(strconv.Itoa(d)),
+ )
+ }
+
+ }
+
+ if first {
+ return "0"
+ }
+
+ return builder.String()
+}
diff --git a/ecc/bw6-761/fr/polynomial/pool.go b/ecc/bw6-761/fr/polynomial/pool.go
new file mode 100644
index 0000000000..1f57a87ce5
--- /dev/null
+++ b/ecc/bw6-761/fr/polynomial/pool.go
@@ -0,0 +1,130 @@
+// Copyright 2020 ConsenSys Software Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by consensys/gnark-crypto DO NOT EDIT
+
+package polynomial
+
+import (
+ "fmt"
+ "github.com/consensys/gnark-crypto/ecc/bw6-761/fr"
+ "reflect"
+ "sync"
+ "unsafe"
+)
+
+// Memory management for polynomials
+// Copied verbatim from gkr repo
+
+// Sets a maximum for the array size we keep in pool
+const maxNForLargePool int = 1 << 24
+const maxNForSmallPool int = 256
+
+// Aliases because it is annoying to use arrays in all the places
+type largeArr = [maxNForLargePool]fr.Element
+type smallArr = [maxNForSmallPool]fr.Element
+
+var rC = sync.Map{}
+
+var (
+ largePool = sync.Pool{
+ New: func() interface{} {
+ var res largeArr
+ return &res
+ },
+ }
+ smallPool = sync.Pool{
+ New: func() interface{} {
+ var res smallArr
+ return &res
+ },
+ }
+)
+
+// ClearPool Clears the pool completely, shields against memory leaks
+// Eg: if we forgot to dump a polynomial at some point, this will ensure the value get dumped eventually
+// Returns how many polynomials were cleared that way
+func ClearPool() int {
+ res := 0
+ rC.Range(func(k, _ interface{}) bool {
+ switch ptr := k.(type) {
+ case *largeArr:
+ largePool.Put(ptr)
+ case *smallArr:
+ smallPool.Put(ptr)
+ default:
+ panic(fmt.Sprintf("tried to clear %v", reflect.TypeOf(ptr)))
+ }
+ res++
+ return true
+ })
+ return res
+}
+
+// CountPool Returns the number of elements in the pool without mutating it
+func CountPool() int {
+ res := 0
+ rC.Range(func(_, _ interface{}) bool {
+ res++
+ return true
+ })
+ return res
+}
+
+// Make tries to find a reusable polynomial or allocates a new one
+func Make(n int) []fr.Element {
+ if n > maxNForLargePool {
+ panic(fmt.Sprintf("been provided with size of %v but the maximum is %v", n, maxNForLargePool))
+ }
+
+ if n <= maxNForSmallPool {
+ ptr := smallPool.Get().(*smallArr)
+ rC.Store(ptr, struct{}{}) // registers the pointer being used
+ return (*ptr)[:n]
+ }
+
+ ptr := largePool.Get().(*largeArr)
+ rC.Store(ptr, struct{}{}) // remember we allocated the pointer is being used
+ return (*ptr)[:n]
+}
+
+// Dump dumps a set of polynomials into the pool
+// Returns the number of deallocated polys
+func Dump(arrs ...[]fr.Element) int {
+ cnt := 0
+ for _, arr := range arrs {
+ ptr := ptr(arr)
+ pool := &smallPool
+ if len(arr) > maxNForSmallPool {
+ pool = &largePool
+ }
+ // If the rC did not register, then
+ // either the array was allocated somewhere else which can be ignored
+ // otherwise a double put which MUST be ignored
+ if _, ok := rC.Load(ptr); ok {
+ pool.Put(ptr)
+ // And deregisters the ptr
+ rC.Delete(ptr)
+ cnt++
+ }
+ }
+ return cnt
+}
+
+func ptr(m []fr.Element) unsafe.Pointer {
+ if cap(m) != maxNForSmallPool && cap(m) != maxNForLargePool {
+ panic(fmt.Sprintf("can't cast to large or small array, the put array's is %v it should have capacity %v or %v", cap(m), maxNForLargePool, maxNForSmallPool))
+ }
+ return unsafe.Pointer(&m[0])
+}
diff --git a/ecc/bw6-761/g1.go b/ecc/bw6-761/g1.go
index d2d3ca9a44..0263285ab7 100644
--- a/ecc/bw6-761/g1.go
+++ b/ecc/bw6-761/g1.go
@@ -64,6 +64,14 @@ func (p *G1Affine) ScalarMultiplication(a *G1Affine, s *big.Int) *G1Affine {
return p
}
+// ScalarMultiplicationAffine computes and returns p = a ⋅ s
+// Takes an affine point and returns a Jacobian point (useful for KZG)
+func (p *G1Jac) ScalarMultiplicationAffine(a *G1Affine, s *big.Int) *G1Jac {
+ p.FromAffine(a)
+ p.mulGLV(p, s)
+ return p
+}
+
// Add adds two point in affine coordinates.
// This should rarely be used as it is very inefficient compared to Jacobian
func (p *G1Affine) Add(a, b *G1Affine) *G1Affine {
@@ -341,7 +349,7 @@ func (p *G1Jac) String() string {
return _p.String()
}
-// FromAffine sets p = Q, p in Jacboian, Q in affine
+// FromAffine sets p = Q, p in Jacobian, Q in affine
func (p *G1Jac) FromAffine(Q *G1Affine) *G1Jac {
if Q.IsInfinity() {
p.Z.SetZero()
@@ -884,9 +892,9 @@ func (p *g1Proj) FromAffine(Q *G1Affine) *g1Proj {
}
// BatchProjectiveToAffineG1 converts points in Projective coordinates to Affine coordinates
-// performing a single field inversion (Montgomery batch inversion trick)
-// result must be allocated with len(result) == len(points)
-func BatchProjectiveToAffineG1(points []g1Proj, result []G1Affine) {
+// performing a single field inversion (Montgomery batch inversion trick).
+func BatchProjectiveToAffineG1(points []g1Proj) []G1Affine {
+ result := make([]G1Affine, len(points))
zeroes := make([]bool, len(points))
accumulator := fp.One()
@@ -906,7 +914,7 @@ func BatchProjectiveToAffineG1(points []g1Proj, result []G1Affine) {
for i := len(points) - 1; i >= 0; i-- {
if zeroes[i] {
- // do nothing, X and Y are zeroes in affine.
+ // do nothing, (X=0, Y=0) is infinity point in affine
continue
}
result[i].X.Mul(&result[i].X, &accInverse)
@@ -917,7 +925,7 @@ func BatchProjectiveToAffineG1(points []g1Proj, result []G1Affine) {
parallel.Execute(len(points), func(start, end int) {
for i := start; i < end; i++ {
if zeroes[i] {
- // do nothing, X and Y are zeroes in affine.
+ // do nothing, (X=0, Y=0) is infinity point in affine
continue
}
a := result[i].X
@@ -925,12 +933,13 @@ func BatchProjectiveToAffineG1(points []g1Proj, result []G1Affine) {
result[i].Y.Mul(&points[i].y, &a)
}
})
+ return result
}
// BatchJacobianToAffineG1 converts points in Jacobian coordinates to Affine coordinates
-// performing a single field inversion (Montgomery batch inversion trick)
-// result must be allocated with len(result) == len(points)
-func BatchJacobianToAffineG1(points []G1Jac, result []G1Affine) {
+// performing a single field inversion (Montgomery batch inversion trick).
+func BatchJacobianToAffineG1(points []G1Jac) []G1Affine {
+ result := make([]G1Affine, len(points))
zeroes := make([]bool, len(points))
accumulator := fp.One()
@@ -950,7 +959,7 @@ func BatchJacobianToAffineG1(points []G1Jac, result []G1Affine) {
for i := len(points) - 1; i >= 0; i-- {
if zeroes[i] {
- // do nothing, X and Y are zeroes in affine.
+ // do nothing, (X=0, Y=0) is infinity point in affine
continue
}
result[i].X.Mul(&result[i].X, &accInverse)
@@ -961,7 +970,7 @@ func BatchJacobianToAffineG1(points []G1Jac, result []G1Affine) {
parallel.Execute(len(points), func(start, end int) {
for i := start; i < end; i++ {
if zeroes[i] {
- // do nothing, X and Y are zeroes in affine.
+ // do nothing, (X=0, Y=0) is infinity point in affine
continue
}
var a, b fp.Element
@@ -973,6 +982,7 @@ func BatchJacobianToAffineG1(points []G1Jac, result []G1Affine) {
}
})
+ return result
}
// BatchScalarMultiplicationG1 multiplies the same base by all scalars
@@ -1036,8 +1046,7 @@ func BatchScalarMultiplicationG1(base *G1Affine, scalars []fr.Element) []G1Affin
selectors[chunk] = d
}
// convert our base exp table into affine to use AddMixed
- baseTableAff := make([]G1Affine, (1 << (c - 1)))
- BatchJacobianToAffineG1(baseTable, baseTableAff)
+ baseTableAff := BatchJacobianToAffineG1(baseTable)
toReturn := make([]G1Jac, len(scalars))
// for each digit, take value in the base table, double it c time, voilà.
@@ -1079,7 +1088,6 @@ func BatchScalarMultiplicationG1(base *G1Affine, scalars []fr.Element) []G1Affin
}
})
- toReturnAff := make([]G1Affine, len(scalars))
- BatchJacobianToAffineG1(toReturn, toReturnAff)
+ toReturnAff := BatchJacobianToAffineG1(toReturn)
return toReturnAff
}
diff --git a/ecc/bw6-761/g1_test.go b/ecc/bw6-761/g1_test.go
index 0deac0bf2b..6ace718ac2 100644
--- a/ecc/bw6-761/g1_test.go
+++ b/ecc/bw6-761/g1_test.go
@@ -85,7 +85,7 @@ func TestG1AffineIsOnCurve(t *testing.T) {
func(a fp.Element) bool {
var op1, op2 G1Affine
op1.FromJacobian(&g1Gen)
- op2.FromJacobian(&g1Gen)
+ op2.Set(&op1)
op2.Y.Mul(&op2.Y, &a)
return op1.IsOnCurve() && !op2.IsOnCurve()
},
@@ -220,6 +220,19 @@ func TestG1AffineConversions(t *testing.T) {
GenFp(),
GenFp(),
))
+ properties.Property("[BW6-761] BatchJacobianToAffineG1 and FromJacobian should output the same result", prop.ForAll(
+ func(a, b fp.Element) bool {
+ g1 := fuzzG1Jac(&g1Gen, a)
+ g2 := fuzzG1Jac(&g1Gen, b)
+ var op1, op2 G1Affine
+ op1.FromJacobian(&g1)
+ op2.FromJacobian(&g2)
+ baseTableAff := BatchJacobianToAffineG1([]G1Jac{g1, g2})
+ return op1.Equal(&baseTableAff[0]) && op2.Equal(&baseTableAff[1])
+ },
+ GenFp(),
+ GenFp(),
+ ))
properties.TestingRun(t, gopter.ConsoleReporter(false))
}
@@ -486,7 +499,7 @@ func BenchmarkG1JacIsInSubGroup(b *testing.B) {
}
-func BenchmarkG1AffineBatchScalarMul(b *testing.B) {
+func BenchmarkG1AffineBatchScalarMultiplication(b *testing.B) {
// ensure every words of the scalars are filled
var mixer fr.Element
mixer.SetString("7716837800905789770901243404444209691916730933998574719964609384059111546487")
@@ -514,7 +527,7 @@ func BenchmarkG1AffineBatchScalarMul(b *testing.B) {
}
}
-func BenchmarkG1JacScalarMul(b *testing.B) {
+func BenchmarkG1JacScalarMultiplication(b *testing.B) {
var scalar big.Int
r := fr.Modulus()
diff --git a/ecc/bw6-761/g2.go b/ecc/bw6-761/g2.go
index a15998b959..a56f3dfeb7 100644
--- a/ecc/bw6-761/g2.go
+++ b/ecc/bw6-761/g2.go
@@ -336,7 +336,7 @@ func (p *G2Jac) String() string {
return _p.String()
}
-// FromAffine sets p = Q, p in Jacboian, Q in affine
+// FromAffine sets p = Q, p in Jacobian, Q in affine
func (p *G2Jac) FromAffine(Q *G2Affine) *G2Jac {
if Q.IsInfinity() {
p.Z.SetZero()
diff --git a/ecc/bw6-761/g2_test.go b/ecc/bw6-761/g2_test.go
index 3f1c02bd0b..9630dbf178 100644
--- a/ecc/bw6-761/g2_test.go
+++ b/ecc/bw6-761/g2_test.go
@@ -85,7 +85,7 @@ func TestG2AffineIsOnCurve(t *testing.T) {
func(a fp.Element) bool {
var op1, op2 G2Affine
op1.FromJacobian(&g2Gen)
- op2.FromJacobian(&g2Gen)
+ op2.Set(&op1)
op2.Y.Mul(&op2.Y, &a)
return op1.IsOnCurve() && !op2.IsOnCurve()
},
@@ -486,7 +486,7 @@ func BenchmarkG2JacIsInSubGroup(b *testing.B) {
}
-func BenchmarkG2AffineBatchScalarMul(b *testing.B) {
+func BenchmarkG2AffineBatchScalarMultiplication(b *testing.B) {
// ensure every words of the scalars are filled
var mixer fr.Element
mixer.SetString("7716837800905789770901243404444209691916730933998574719964609384059111546487")
@@ -514,7 +514,7 @@ func BenchmarkG2AffineBatchScalarMul(b *testing.B) {
}
}
-func BenchmarkG2JacScalarMul(b *testing.B) {
+func BenchmarkG2JacScalarMultiplication(b *testing.B) {
var scalar big.Int
r := fr.Modulus()
diff --git a/ecc/bw6-761/hash_to_g1.go b/ecc/bw6-761/hash_to_g1.go
index ac6fa56242..716a516b96 100644
--- a/ecc/bw6-761/hash_to_g1.go
+++ b/ecc/bw6-761/hash_to_g1.go
@@ -89,43 +89,38 @@ func g1Isogeny(p *G1Affine) {
// g1SqrtRatio computes the square root of u/v and returns 0 iff u/v was indeed a quadratic residue
// if not, we get sqrt(Z * u / v). Recall that Z is non-residue
+// If v = 0, u/v is meaningless and the output is unspecified, without raising an error.
// The main idea is that since the computation of the square root involves taking large powers of u/v, the inversion of v can be avoided
func g1SqrtRatio(z *fp.Element, u *fp.Element, v *fp.Element) uint64 {
- // Taken from https://datatracker.ietf.org/doc/draft-irtf-cfrg-hash-to-curve/13/ F.2.1.2. q = 3 mod 4
+ // https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-16.html#name-optimized-sqrt_ratio-for-q- (3 mod 4)
var tv1 fp.Element
- tv1.Square(v)
+ tv1.Square(v) // 1. tv1 = v²
var tv2 fp.Element
- tv2.Mul(u, v)
- tv1.Mul(&tv1, &tv2)
+ tv2.Mul(u, v) // 2. tv2 = u * v
+ tv1.Mul(&tv1, &tv2) // 3. tv1 = tv1 * tv2
var y1 fp.Element
{
var c1 big.Int
// c1 = 1722862596078933134849197420568914385619917228134037527378447540052405855560872934021920795822352921910216141938446653362790439780138561939837377924781325399737901274844627212593135907855899198987974925107492278210691228279767074
- c1.SetBytes([]byte{72, 186, 9, 62, 224, 243, 130, 180, 97, 242, 80, 1, 62, 191, 207, 174, 73, 134, 26, 160, 116, 81, 162, 20, 160, 157, 123, 224, 33, 239, 144, 92, 30, 233, 142, 57, 97, 58, 70, 64, 243, 174, 191, 201, 109, 8, 193, 33, 162, 114, 59, 68, 190, 127, 100, 28, 119, 52, 247, 28, 250, 255, 203, 166, 40, 69, 176, 149, 153, 234, 62, 5, 131, 62, 43, 186, 188, 41, 13, 249, 164, 79, 154, 28, 0, 0, 32, 189, 39, 64, 0, 0, 0, 0, 34})
- y1.Exp(tv1, &c1)
+ c1.SetBytes([]byte{72, 186, 9, 62, 224, 243, 130, 180, 97, 242, 80, 1, 62, 191, 207, 174, 73, 134, 26, 160, 116, 81, 162, 20, 160, 157, 123, 224, 33, 239, 144, 92, 30, 233, 142, 57, 97, 58, 70, 64, 243, 174, 191, 201, 109, 8, 193, 33, 162, 114, 59, 68, 190, 127, 100, 28, 119, 52, 247, 28, 250, 255, 203, 166, 40, 69, 176, 149, 153, 234, 62, 5, 131, 62, 43, 186, 188, 41, 13, 249, 164, 79, 154, 28, 0, 0, 32, 189, 39, 64, 0, 0, 0, 0, 34}) // c1 = (q - 3) / 4 # Integer arithmetic
+
+ y1.Exp(tv1, &c1) // 4. y1 = tv1ᶜ¹
}
- y1.Mul(&y1, &tv2)
+ y1.Mul(&y1, &tv2) // 5. y1 = y1 * tv2
var y2 fp.Element
- y2.Mul(&y1, &fp.Element{10289215067249928212, 13987875627487618797, 10154775028297877632, 5892581882377791321, 12835424790914788634, 14963278386355512102, 10283221901563449361, 9868336211540881409, 7345304935218488881, 6998778443322886180, 9453359982570584357, 56775348355244645})
-
- var tv3 fp.Element
- tv3.Square(&y1)
- tv3.Mul(&tv3, v)
-
- isQNr := tv3.NotEqual(u)
- z.Select(int(isQNr), &y1, &y2)
+ // c2 = sqrt(-Z)
+ tv3 := fp.Element{10289215067249928212, 13987875627487618797, 10154775028297877632, 5892581882377791321, 12835424790914788634, 14963278386355512102, 10283221901563449361, 9868336211540881409, 7345304935218488881, 6998778443322886180, 9453359982570584357, 56775348355244645}
+ y2.Mul(&y1, &tv3) // 6. y2 = y1 * c2
+ tv3.Square(&y1) // 7. tv3 = y1²
+ tv3.Mul(&tv3, v) // 8. tv3 = tv3 * v
+ isQNr := tv3.NotEqual(u) // 9. isQR = tv3 == u
+ z.Select(int(isQNr), &y1, &y2) // 10. y = CMOV(y2, y1, isQR)
return isQNr
}
-/*
-// g1SetZ sets z to [2].
-func g1SetZ(z *fp.Element) {
- z.Set( &fp.Element {289919226011913130, 13019990545710127566, 4409829457611675068, 13030600802816293865, 15696054586628993047, 9353078419867322391, 5664203968291172875, 5090703637405909511, 17774776443174359288, 10018561694451762270, 12632664537138156478, 46143195394855163} )
-}*/
-
// g1MulByZ multiplies x by [2] and stores the result in z
func g1MulByZ(z *fp.Element, x *fp.Element) {
@@ -136,30 +131,29 @@ func g1MulByZ(z *fp.Element, x *fp.Element) {
*z = res
}
-//TODO: Define A,B here
-
-// From https://datatracker.ietf.org/doc/draft-irtf-cfrg-hash-to-curve/13/ Pg 80
+// https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-16.html#name-simplified-swu-method
// mapToCurve1 implements the SSWU map
// No cofactor clearing or isogeny
func mapToCurve1(u *fp.Element) G1Affine {
+ var sswuIsoCurveCoeffA = fp.Element{12169852093062392636, 3867460573998792965, 2540986171999662608, 3377838107874487171, 6313266756742099767, 5994530928773814047, 5007141583730923456, 2345996307867737670, 7096861766432061441, 10014420324597579745, 8416419844935780388, 63340978449966806}
+ var sswuIsoCurveCoeffB = fp.Element{9514135687797572479, 9972495974968977338, 17954535578332286571, 7437044986470910914, 13903267017721129281, 1871129682978723308, 13401268269932482209, 739043012311877982, 12116264695643437343, 1632209977726909861, 3621981106970059143, 65605772132525947}
+
var tv1 fp.Element
- tv1.Square(u)
+ tv1.Square(u) // 1. tv1 = u²
//mul tv1 by Z
- g1MulByZ(&tv1, &tv1)
+ g1MulByZ(&tv1, &tv1) // 2. tv1 = Z * tv1
var tv2 fp.Element
- tv2.Square(&tv1)
- tv2.Add(&tv2, &tv1)
+ tv2.Square(&tv1) // 3. tv2 = tv1²
+ tv2.Add(&tv2, &tv1) // 4. tv2 = tv2 + tv1
var tv3 fp.Element
- //Standard doc line 5
var tv4 fp.Element
tv4.SetOne()
- tv3.Add(&tv2, &tv4)
- //TODO: Use bCurveConf when no isogeny
- tv3.Mul(&tv3, &fp.Element{9514135687797572479, 9972495974968977338, 17954535578332286571, 7437044986470910914, 13903267017721129281, 1871129682978723308, 13401268269932482209, 739043012311877982, 12116264695643437343, 1632209977726909861, 3621981106970059143, 65605772132525947})
+ tv3.Add(&tv2, &tv4) // 5. tv3 = tv2 + 1
+ tv3.Mul(&tv3, &sswuIsoCurveCoeffB) // 6. tv3 = B * tv3
tv2NZero := g1NotZero(&tv2)
@@ -167,48 +161,45 @@ func mapToCurve1(u *fp.Element) G1Affine {
tv4 = fp.Element{289919226011913130, 13019990545710127566, 4409829457611675068, 13030600802816293865, 15696054586628993047, 9353078419867322391, 5664203968291172875, 5090703637405909511, 17774776443174359288, 10018561694451762270, 12632664537138156478, 46143195394855163}
tv2.Neg(&tv2)
- tv4.Select(int(tv2NZero), &tv4, &tv2)
- //TODO: When no isogeny use curve constants
- tv2 = fp.Element{12169852093062392636, 3867460573998792965, 2540986171999662608, 3377838107874487171, 6313266756742099767, 5994530928773814047, 5007141583730923456, 2345996307867737670, 7096861766432061441, 10014420324597579745, 8416419844935780388, 63340978449966806}
- tv4.Mul(&tv4, &tv2)
+ tv4.Select(int(tv2NZero), &tv4, &tv2) // 7. tv4 = CMOV(Z, -tv2, tv2 != 0)
+ tv4.Mul(&tv4, &sswuIsoCurveCoeffA) // 8. tv4 = A * tv4
- tv2.Square(&tv3)
+ tv2.Square(&tv3) // 9. tv2 = tv3²
var tv6 fp.Element
- //Standard doc line 10
- tv6.Square(&tv4)
+ tv6.Square(&tv4) // 10. tv6 = tv4²
var tv5 fp.Element
- tv5.Mul(&tv6, &fp.Element{12169852093062392636, 3867460573998792965, 2540986171999662608, 3377838107874487171, 6313266756742099767, 5994530928773814047, 5007141583730923456, 2345996307867737670, 7096861766432061441, 10014420324597579745, 8416419844935780388, 63340978449966806})
+ tv5.Mul(&tv6, &sswuIsoCurveCoeffA) // 11. tv5 = A * tv6
- tv2.Add(&tv2, &tv5)
- tv2.Mul(&tv2, &tv3)
- tv6.Mul(&tv6, &tv4)
+ tv2.Add(&tv2, &tv5) // 12. tv2 = tv2 + tv5
+ tv2.Mul(&tv2, &tv3) // 13. tv2 = tv2 * tv3
+ tv6.Mul(&tv6, &tv4) // 14. tv6 = tv6 * tv4
- //Standards doc line 15
- tv5.Mul(&tv6, &fp.Element{9514135687797572479, 9972495974968977338, 17954535578332286571, 7437044986470910914, 13903267017721129281, 1871129682978723308, 13401268269932482209, 739043012311877982, 12116264695643437343, 1632209977726909861, 3621981106970059143, 65605772132525947})
- tv2.Add(&tv2, &tv5)
+ tv5.Mul(&tv6, &sswuIsoCurveCoeffB) // 15. tv5 = B * tv6
+ tv2.Add(&tv2, &tv5) // 16. tv2 = tv2 + tv5
var x fp.Element
- x.Mul(&tv1, &tv3)
+ x.Mul(&tv1, &tv3) // 17. x = tv1 * tv3
var y1 fp.Element
- gx1NSquare := g1SqrtRatio(&y1, &tv2, &tv6)
+ gx1NSquare := g1SqrtRatio(&y1, &tv2, &tv6) // 18. (is_gx1_square, y1) = sqrt_ratio(tv2, tv6)
var y fp.Element
- y.Mul(&tv1, u)
+ y.Mul(&tv1, u) // 19. y = tv1 * u
- //Standards doc line 20
- y.Mul(&y, &y1)
+ y.Mul(&y, &y1) // 20. y = y * y1
- x.Select(int(gx1NSquare), &tv3, &x)
- y.Select(int(gx1NSquare), &y1, &y)
+ x.Select(int(gx1NSquare), &tv3, &x) // 21. x = CMOV(x, tv3, is_gx1_square)
+ y.Select(int(gx1NSquare), &y1, &y) // 22. y = CMOV(y, y1, is_gx1_square)
y1.Neg(&y)
y.Select(int(g1Sgn0(u)^g1Sgn0(&y)), &y, &y1)
- //Standards doc line 25
- x.Div(&x, &tv4)
+ // 23. e1 = sgn0(u) == sgn0(y)
+ // 24. y = CMOV(-y, y, e1)
+
+ x.Div(&x, &tv4) // 25. x = x / tv4
return G1Affine{x, y}
}
@@ -251,13 +242,13 @@ func hashToFp(msg, dst []byte, count int) ([]fp.Element, error) {
// g1Sgn0 is an algebraic substitute for the notion of sign in ordered fields
// Namely, every non-zero quadratic residue in a finite field of characteristic =/= 2 has exactly two square roots, one of each sign
-// Taken from https://datatracker.ietf.org/doc/draft-irtf-cfrg-hash-to-curve/ section 4.1
+// https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-16.html#name-the-sgn0-function
// The sign of an element is not obviously related to that of its Montgomery form
func g1Sgn0(z *fp.Element) uint64 {
nonMont := *z
nonMont.FromMont()
-
+ // m == 1
return nonMont[0] % 2
}
@@ -274,7 +265,7 @@ func MapToG1(u fp.Element) G1Affine {
// EncodeToG1 hashes a message to a point on the G1 curve using the SSWU map.
// It is faster than HashToG1, but the result is not uniformly distributed. Unsuitable as a random oracle.
// dst stands for "domain separation tag", a string unique to the construction using the hash function
-//https://datatracker.ietf.org/doc/draft-irtf-cfrg-hash-to-curve/13/#section-6.6.3
+//https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-16.html#roadmap
func EncodeToG1(msg, dst []byte) (G1Affine, error) {
var res G1Affine
@@ -294,7 +285,7 @@ func EncodeToG1(msg, dst []byte) (G1Affine, error) {
// HashToG1 hashes a message to a point on the G1 curve using the SSWU map.
// Slower than EncodeToG1, but usable as a random oracle.
// dst stands for "domain separation tag", a string unique to the construction using the hash function
-// https://tools.ietf.org/html/draft-irtf-cfrg-hash-to-curve-06#section-3
+//https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-16.html#roadmap
func HashToG1(msg, dst []byte) (G1Affine, error) {
u, err := hashToFp(msg, dst, 2*1)
if err != nil {
@@ -304,7 +295,7 @@ func HashToG1(msg, dst []byte) (G1Affine, error) {
Q0 := mapToCurve1(&u[0])
Q1 := mapToCurve1(&u[1])
- //TODO: Add in E' first, then apply isogeny
+ //TODO (perf): Add in E' first, then apply isogeny
g1Isogeny(&Q0)
g1Isogeny(&Q1)
diff --git a/ecc/bw6-761/hash_to_g2.go b/ecc/bw6-761/hash_to_g2.go
index 076ad977e5..bacb36d661 100644
--- a/ecc/bw6-761/hash_to_g2.go
+++ b/ecc/bw6-761/hash_to_g2.go
@@ -260,43 +260,38 @@ func g2Isogeny(p *G2Affine) {
// g2SqrtRatio computes the square root of u/v and returns 0 iff u/v was indeed a quadratic residue
// if not, we get sqrt(Z * u / v). Recall that Z is non-residue
+// If v = 0, u/v is meaningless and the output is unspecified, without raising an error.
// The main idea is that since the computation of the square root involves taking large powers of u/v, the inversion of v can be avoided
func g2SqrtRatio(z *fp.Element, u *fp.Element, v *fp.Element) uint64 {
- // Taken from https://datatracker.ietf.org/doc/draft-irtf-cfrg-hash-to-curve/13/ F.2.1.2. q = 3 mod 4
+ // https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-16.html#name-optimized-sqrt_ratio-for-q- (3 mod 4)
var tv1 fp.Element
- tv1.Square(v)
+ tv1.Square(v) // 1. tv1 = v²
var tv2 fp.Element
- tv2.Mul(u, v)
- tv1.Mul(&tv1, &tv2)
+ tv2.Mul(u, v) // 2. tv2 = u * v
+ tv1.Mul(&tv1, &tv2) // 3. tv1 = tv1 * tv2
var y1 fp.Element
{
var c1 big.Int
// c1 = 1722862596078933134849197420568914385619917228134037527378447540052405855560872934021920795822352921910216141938446653362790439780138561939837377924781325399737901274844627212593135907855899198987974925107492278210691228279767074
- c1.SetBytes([]byte{72, 186, 9, 62, 224, 243, 130, 180, 97, 242, 80, 1, 62, 191, 207, 174, 73, 134, 26, 160, 116, 81, 162, 20, 160, 157, 123, 224, 33, 239, 144, 92, 30, 233, 142, 57, 97, 58, 70, 64, 243, 174, 191, 201, 109, 8, 193, 33, 162, 114, 59, 68, 190, 127, 100, 28, 119, 52, 247, 28, 250, 255, 203, 166, 40, 69, 176, 149, 153, 234, 62, 5, 131, 62, 43, 186, 188, 41, 13, 249, 164, 79, 154, 28, 0, 0, 32, 189, 39, 64, 0, 0, 0, 0, 34})
- y1.Exp(tv1, &c1)
+ c1.SetBytes([]byte{72, 186, 9, 62, 224, 243, 130, 180, 97, 242, 80, 1, 62, 191, 207, 174, 73, 134, 26, 160, 116, 81, 162, 20, 160, 157, 123, 224, 33, 239, 144, 92, 30, 233, 142, 57, 97, 58, 70, 64, 243, 174, 191, 201, 109, 8, 193, 33, 162, 114, 59, 68, 190, 127, 100, 28, 119, 52, 247, 28, 250, 255, 203, 166, 40, 69, 176, 149, 153, 234, 62, 5, 131, 62, 43, 186, 188, 41, 13, 249, 164, 79, 154, 28, 0, 0, 32, 189, 39, 64, 0, 0, 0, 0, 34}) // c1 = (q - 3) / 4 # Integer arithmetic
+
+ y1.Exp(tv1, &c1) // 4. y1 = tv1ᶜ¹
}
- y1.Mul(&y1, &tv2)
+ y1.Mul(&y1, &tv2) // 5. y1 = y1 * tv2
var y2 fp.Element
- y2.Mul(&y1, &fp.Element{10751254254539175147, 9766094991671066833, 2332062865126794245, 278658983607570155, 16958907618386923993, 3828006658569666757, 13622760089646566549, 10723972911493525927, 12307000153990793747, 14543371959135298146, 6497680552248674778, 41056765998425020})
-
- var tv3 fp.Element
- tv3.Square(&y1)
- tv3.Mul(&tv3, v)
-
- isQNr := tv3.NotEqual(u)
- z.Select(int(isQNr), &y1, &y2)
+ // c2 = sqrt(-Z)
+ tv3 := fp.Element{10751254254539175147, 9766094991671066833, 2332062865126794245, 278658983607570155, 16958907618386923993, 3828006658569666757, 13622760089646566549, 10723972911493525927, 12307000153990793747, 14543371959135298146, 6497680552248674778, 41056765998425020}
+ y2.Mul(&y1, &tv3) // 6. y2 = y1 * c2
+ tv3.Square(&y1) // 7. tv3 = y1²
+ tv3.Mul(&tv3, v) // 8. tv3 = tv3 * v
+ isQNr := tv3.NotEqual(u) // 9. isQR = tv3 == u
+ z.Select(int(isQNr), &y1, &y2) // 10. y = CMOV(y2, y1, isQR)
return isQNr
}
-/*
-// g2SetZ sets z to [11].
-func g2SetZ(z *fp.Element) {
- z.Set( &fp.Element {4056054414400208518, 3320816571827031140, 10263935383895698150, 11003897938091601562, 15597443347325643510, 13135057492086854609, 2659919018052618801, 3683105852685266909, 6137961753831301777, 15077955943918945393, 14961510259660508891, 8138608324875079} )
-}*/
-
// g2MulByZ multiplies x by [11] and stores the result in z
func g2MulByZ(z *fp.Element, x *fp.Element) {
@@ -311,30 +306,29 @@ func g2MulByZ(z *fp.Element, x *fp.Element) {
*z = res
}
-//TODO: Define A,B here
-
-// From https://datatracker.ietf.org/doc/draft-irtf-cfrg-hash-to-curve/13/ Pg 80
+// https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-16.html#name-simplified-swu-method
// mapToCurve2 implements the SSWU map
// No cofactor clearing or isogeny
func mapToCurve2(u *fp.Element) G2Affine {
+ var sswuIsoCurveCoeffA = fp.Element{13704010396169241312, 14330175345318364589, 4449492585807198633, 9884564993510771995, 16507506367033405761, 12171409358426895620, 3759742122315801393, 6972450370136308820, 13649992927502603798, 15742083997009939515, 4062268800652448528, 42571325818609943}
+ var sswuIsoCurveCoeffB = fp.Element{17251063859315847117, 13422534455279952781, 15626212001505409941, 8548929388122544483, 12216093319907597521, 15761783579263790289, 10925761432004348632, 8228665107915194054, 13147767302058909808, 5735540302608306489, 5152863309501448410, 45595036249636616}
+
var tv1 fp.Element
- tv1.Square(u)
+ tv1.Square(u) // 1. tv1 = u²
//mul tv1 by Z
- g2MulByZ(&tv1, &tv1)
+ g2MulByZ(&tv1, &tv1) // 2. tv1 = Z * tv1
var tv2 fp.Element
- tv2.Square(&tv1)
- tv2.Add(&tv2, &tv1)
+ tv2.Square(&tv1) // 3. tv2 = tv1²
+ tv2.Add(&tv2, &tv1) // 4. tv2 = tv2 + tv1
var tv3 fp.Element
- //Standard doc line 5
var tv4 fp.Element
tv4.SetOne()
- tv3.Add(&tv2, &tv4)
- //TODO: Use bCurveConf when no isogeny
- tv3.Mul(&tv3, &fp.Element{17251063859315847117, 13422534455279952781, 15626212001505409941, 8548929388122544483, 12216093319907597521, 15761783579263790289, 10925761432004348632, 8228665107915194054, 13147767302058909808, 5735540302608306489, 5152863309501448410, 45595036249636616})
+ tv3.Add(&tv2, &tv4) // 5. tv3 = tv2 + 1
+ tv3.Mul(&tv3, &sswuIsoCurveCoeffB) // 6. tv3 = B * tv3
tv2NZero := g2NotZero(&tv2)
@@ -342,48 +336,45 @@ func mapToCurve2(u *fp.Element) G2Affine {
tv4 = fp.Element{4056054414400208518, 3320816571827031140, 10263935383895698150, 11003897938091601562, 15597443347325643510, 13135057492086854609, 2659919018052618801, 3683105852685266909, 6137961753831301777, 15077955943918945393, 14961510259660508891, 8138608324875079}
tv2.Neg(&tv2)
- tv4.Select(int(tv2NZero), &tv4, &tv2)
- //TODO: When no isogeny use curve constants
- tv2 = fp.Element{13704010396169241312, 14330175345318364589, 4449492585807198633, 9884564993510771995, 16507506367033405761, 12171409358426895620, 3759742122315801393, 6972450370136308820, 13649992927502603798, 15742083997009939515, 4062268800652448528, 42571325818609943}
- tv4.Mul(&tv4, &tv2)
+ tv4.Select(int(tv2NZero), &tv4, &tv2) // 7. tv4 = CMOV(Z, -tv2, tv2 != 0)
+ tv4.Mul(&tv4, &sswuIsoCurveCoeffA) // 8. tv4 = A * tv4
- tv2.Square(&tv3)
+ tv2.Square(&tv3) // 9. tv2 = tv3²
var tv6 fp.Element
- //Standard doc line 10
- tv6.Square(&tv4)
+ tv6.Square(&tv4) // 10. tv6 = tv4²
var tv5 fp.Element
- tv5.Mul(&tv6, &fp.Element{13704010396169241312, 14330175345318364589, 4449492585807198633, 9884564993510771995, 16507506367033405761, 12171409358426895620, 3759742122315801393, 6972450370136308820, 13649992927502603798, 15742083997009939515, 4062268800652448528, 42571325818609943})
+ tv5.Mul(&tv6, &sswuIsoCurveCoeffA) // 11. tv5 = A * tv6
- tv2.Add(&tv2, &tv5)
- tv2.Mul(&tv2, &tv3)
- tv6.Mul(&tv6, &tv4)
+ tv2.Add(&tv2, &tv5) // 12. tv2 = tv2 + tv5
+ tv2.Mul(&tv2, &tv3) // 13. tv2 = tv2 * tv3
+ tv6.Mul(&tv6, &tv4) // 14. tv6 = tv6 * tv4
- //Standards doc line 15
- tv5.Mul(&tv6, &fp.Element{17251063859315847117, 13422534455279952781, 15626212001505409941, 8548929388122544483, 12216093319907597521, 15761783579263790289, 10925761432004348632, 8228665107915194054, 13147767302058909808, 5735540302608306489, 5152863309501448410, 45595036249636616})
- tv2.Add(&tv2, &tv5)
+ tv5.Mul(&tv6, &sswuIsoCurveCoeffB) // 15. tv5 = B * tv6
+ tv2.Add(&tv2, &tv5) // 16. tv2 = tv2 + tv5
var x fp.Element
- x.Mul(&tv1, &tv3)
+ x.Mul(&tv1, &tv3) // 17. x = tv1 * tv3
var y1 fp.Element
- gx1NSquare := g2SqrtRatio(&y1, &tv2, &tv6)
+ gx1NSquare := g2SqrtRatio(&y1, &tv2, &tv6) // 18. (is_gx1_square, y1) = sqrt_ratio(tv2, tv6)
var y fp.Element
- y.Mul(&tv1, u)
+ y.Mul(&tv1, u) // 19. y = tv1 * u
- //Standards doc line 20
- y.Mul(&y, &y1)
+ y.Mul(&y, &y1) // 20. y = y * y1
- x.Select(int(gx1NSquare), &tv3, &x)
- y.Select(int(gx1NSquare), &y1, &y)
+ x.Select(int(gx1NSquare), &tv3, &x) // 21. x = CMOV(x, tv3, is_gx1_square)
+ y.Select(int(gx1NSquare), &y1, &y) // 22. y = CMOV(y, y1, is_gx1_square)
y1.Neg(&y)
y.Select(int(g2Sgn0(u)^g2Sgn0(&y)), &y, &y1)
- //Standards doc line 25
- x.Div(&x, &tv4)
+ // 23. e1 = sgn0(u) == sgn0(y)
+ // 24. y = CMOV(-y, y, e1)
+
+ x.Div(&x, &tv4) // 25. x = x / tv4
return G2Affine{x, y}
}
@@ -405,13 +396,13 @@ func g2EvalPolynomial(z *fp.Element, monic bool, coefficients []fp.Element, x *f
// g2Sgn0 is an algebraic substitute for the notion of sign in ordered fields
// Namely, every non-zero quadratic residue in a finite field of characteristic =/= 2 has exactly two square roots, one of each sign
-// Taken from https://datatracker.ietf.org/doc/draft-irtf-cfrg-hash-to-curve/ section 4.1
+// https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-16.html#name-the-sgn0-function
// The sign of an element is not obviously related to that of its Montgomery form
func g2Sgn0(z *fp.Element) uint64 {
nonMont := *z
nonMont.FromMont()
-
+ // m == 1
return nonMont[0] % 2
}
@@ -428,7 +419,7 @@ func MapToG2(u fp.Element) G2Affine {
// EncodeToG2 hashes a message to a point on the G2 curve using the SSWU map.
// It is faster than HashToG2, but the result is not uniformly distributed. Unsuitable as a random oracle.
// dst stands for "domain separation tag", a string unique to the construction using the hash function
-//https://datatracker.ietf.org/doc/draft-irtf-cfrg-hash-to-curve/13/#section-6.6.3
+//https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-16.html#roadmap
func EncodeToG2(msg, dst []byte) (G2Affine, error) {
var res G2Affine
@@ -448,7 +439,7 @@ func EncodeToG2(msg, dst []byte) (G2Affine, error) {
// HashToG2 hashes a message to a point on the G2 curve using the SSWU map.
// Slower than EncodeToG2, but usable as a random oracle.
// dst stands for "domain separation tag", a string unique to the construction using the hash function
-// https://tools.ietf.org/html/draft-irtf-cfrg-hash-to-curve-06#section-3
+//https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-16.html#roadmap
func HashToG2(msg, dst []byte) (G2Affine, error) {
u, err := hashToFp(msg, dst, 2*1)
if err != nil {
@@ -458,7 +449,7 @@ func HashToG2(msg, dst []byte) (G2Affine, error) {
Q0 := mapToCurve2(&u[0])
Q1 := mapToCurve2(&u[1])
- //TODO: Add in E' first, then apply isogeny
+ //TODO (perf): Add in E' first, then apply isogeny
g2Isogeny(&Q0)
g2Isogeny(&Q1)
diff --git a/ecc/bw6-761/internal/fptower/e3.go b/ecc/bw6-761/internal/fptower/e3.go
index d30ca85df9..c8e1b9a76f 100644
--- a/ecc/bw6-761/internal/fptower/e3.go
+++ b/ecc/bw6-761/internal/fptower/e3.go
@@ -272,6 +272,8 @@ func (z *E3) MulByNonResidue(x *E3) *E3 {
}
// Inverse an element in E3
+//
+// if x == 0, sets and returns z = x
func (z *E3) Inverse(x *E3) *E3 {
// Algorithm 17 from https://eprint.iacr.org/2010/354.pdf
// step 9 is wrong in the paper it's t1-t4
@@ -300,6 +302,8 @@ func (z *E3) Inverse(x *E3) *E3 {
// BatchInvertE3 returns a new slice with every element inverted.
// Uses Montgomery batch inversion trick
+//
+// if a[i] == 0, returns result[i] = a[i]
func BatchInvertE3(a []E3) []E3 {
res := make([]E3, len(a))
if len(a) == 0 {
diff --git a/ecc/bw6-761/internal/fptower/e6.go b/ecc/bw6-761/internal/fptower/e6.go
index f211c4a0c7..a23797234b 100644
--- a/ecc/bw6-761/internal/fptower/e6.go
+++ b/ecc/bw6-761/internal/fptower/e6.go
@@ -223,29 +223,46 @@ func (z *E6) CyclotomicSquareCompressed(x *E6) *E6 {
return z
}
-// Decompress Karabina's cyclotomic square result
-func (z *E6) Decompress(x *E6) *E6 {
+// DecompressKarabina Karabina's cyclotomic square result
+// if g3 != 0
+// g4 = (E * g5^2 + 3 * g1^2 - 2 * g2)/4g3
+// if g3 == 0
+// g4 = 2g1g5/g2
+//
+// if g3=g2=0 then g4=g5=g1=0 and g0=1 (x=1)
+// Theorem 3.1 is well-defined for all x in Gϕₙ\{1}
+func (z *E6) DecompressKarabina(x *E6) *E6 {
var t [3]fp.Element
var one fp.Element
one.SetOne()
- // t0 = g1²
- t[0].Square(&x.B0.A1)
- // t1 = 3 * g1² - 2 * g2
- t[1].Sub(&t[0], &x.B0.A2).
- Double(&t[1]).
- Add(&t[1], &t[0])
- // t0 = E * g5² + t1
- t[2].Square(&x.B1.A2)
- t[0].MulByNonResidue(&t[2]).
- Add(&t[0], &t[1])
- // t1 = 1/(4 * g3)
- t[1].Double(&x.B1.A0).
- Double(&t[1]).
- Inverse(&t[1]) // costly
+ // g3 == 0
+ if x.B1.A0.IsZero() {
+ t[0].Mul(&x.B0.A1, &x.B1.A2).
+ Double(&t[0])
+ // t1 = g2
+ t[1].Set(&x.B0.A2)
+
+ // g3 != 0
+ } else {
+ // t0 = g1^2
+ t[0].Square(&x.B0.A1)
+ // t1 = 3 * g1^2 - 2 * g2
+ t[1].Sub(&t[0], &x.B0.A2).
+ Double(&t[1]).
+ Add(&t[1], &t[0])
+ // t0 = E * g5^2 + t1
+ t[2].Square(&x.B1.A2)
+ t[0].MulByNonResidue(&t[2]).
+ Add(&t[0], &t[1])
+ // t1 = 1/(4 * g3)
+ t[1].Double(&x.B1.A0).
+ Double(&t[1])
+ }
+
// z4 = g4
- z.B1.A1.Mul(&t[0], &t[1])
+ z.B1.A1.Div(&t[0], &t[1]) // costly
// t1 = g2 * g1
t[1].Mul(&x.B0.A2, &x.B0.A1)
@@ -254,7 +271,7 @@ func (z *E6) Decompress(x *E6) *E6 {
Sub(&t[2], &t[1]).
Double(&t[2]).
Sub(&t[2], &t[1])
- // t1 = g3 * g5
+ // t1 = g3 * g5 (g3 can be 0)
t[1].Mul(&x.B1.A0, &x.B1.A2)
// c₀ = E * (2 * g4² + g3 * g5 - 3 * g2 * g1) + 1
t[2].Add(&t[2], &t[1])
@@ -308,6 +325,8 @@ func (z *E6) CyclotomicSquare(x *E6) *E6 {
}
// Inverse set z to the inverse of x in E6 and return z
+//
+// if x == 0, sets and returns z = x
func (z *E6) Inverse(x *E6) *E6 {
// Algorithm 23 from https://eprint.iacr.org/2010/354.pdf
@@ -325,6 +344,8 @@ func (z *E6) Inverse(x *E6) *E6 {
// BatchInvertE6 returns a new slice with every element inverted.
// Uses Montgomery batch inversion trick
+//
+// if a[i] == 0, returns result[i] = a[i]
func BatchInvertE6(a []E6) []E6 {
res := make([]E6, len(a))
if len(a) == 0 {
@@ -697,6 +718,10 @@ func BatchCompressTorus(x []E6) ([]E3, error) {
for i := 0; i < n; i++ {
res[i].Set(&x[i].B1)
+ // throw an error if any of the x[i].C1 is 0
+ if res[i].IsZero() {
+ return []E3{}, errors.New("invalid input")
+ }
}
t := BatchInvertE3(res) // costs 1 inverse
diff --git a/ecc/bw6-761/internal/fptower/e6_pairing.go b/ecc/bw6-761/internal/fptower/e6_pairing.go
index 94421c3f3e..b8d2ca40fe 100644
--- a/ecc/bw6-761/internal/fptower/e6_pairing.go
+++ b/ecc/bw6-761/internal/fptower/e6_pairing.go
@@ -2,9 +2,21 @@ package fptower
import "github.com/consensys/gnark-crypto/ecc/bw6-761/fp"
+func (z *E6) nSquare(n int) {
+ for i := 0; i < n; i++ {
+ z.CyclotomicSquare(z)
+ }
+}
+
+func (z *E6) nSquareCompressed(n int) {
+ for i := 0; i < n; i++ {
+ z.CyclotomicSquareCompressed(z)
+ }
+}
+
// Expt set z to x^t in E6 and return z
func (z *E6) Expt(x *E6) *E6 {
-
+ // const tAbsVal uint64 = 9586122913090633729
// tAbsVal in binary: 1000010100001000110000000000000000000000000000000000000000000001
// drop the low 46 bits (all 0 except the least significant bit): 100001010000100011 = 136227
// Shortest addition chains can be found at https://wwwhomes.uni-bielefeld.de/achim/addition_chain.html
@@ -12,34 +24,20 @@ func (z *E6) Expt(x *E6) *E6 {
var result, x33 E6
// a shortest addition chain for 136227
- result.Set(x) // 0 1
- result.CyclotomicSquare(&result) // 1( 0) 2
- result.CyclotomicSquare(&result) // 2( 1) 4
- result.CyclotomicSquare(&result) // 3( 2) 8
- result.CyclotomicSquare(&result) // 4( 3) 16
- result.CyclotomicSquare(&result) // 5( 4) 32
- result.Mul(&result, x) // 6( 5, 0) 33
- x33.Set(&result) // save x33 for step 14
- result.CyclotomicSquare(&result) // 7( 6) 66
- result.CyclotomicSquare(&result) // 8( 7) 132
- result.CyclotomicSquare(&result) // 9( 8) 264
- result.CyclotomicSquare(&result) // 10( 9) 528
- result.CyclotomicSquare(&result) // 11(10) 1056
- result.CyclotomicSquare(&result) // 12(11) 2112
- result.CyclotomicSquare(&result) // 13(12) 4224
- result.Mul(&result, &x33) // 14(13, 6) 4257
- result.CyclotomicSquare(&result) // 15(14) 8514
- result.CyclotomicSquare(&result) // 16(15) 17028
- result.CyclotomicSquare(&result) // 17(16) 34056
- result.CyclotomicSquare(&result) // 18(17) 68112
- result.Mul(&result, x) // 19(18, 0) 68113
- result.CyclotomicSquare(&result) // 20(19) 136226
- result.Mul(&result, x) // 21(20, 0) 136227
+ result.Set(x)
+ result.nSquare(5)
+ result.Mul(&result, x)
+ x33.Set(&result)
+ result.nSquare(7)
+ result.Mul(&result, &x33)
+ result.nSquare(4)
+ result.Mul(&result, x)
+ result.CyclotomicSquare(&result)
+ result.Mul(&result, x)
// the remaining 46 bits
- for i := 0; i < 46; i++ {
- result.CyclotomicSquare(&result)
- }
+ result.nSquareCompressed(46)
+ result.DecompressKarabina(&result)
result.Mul(&result, x)
z.Set(&result)
diff --git a/ecc/bw6-761/internal/fptower/e6_test.go b/ecc/bw6-761/internal/fptower/e6_test.go
index 4841bb4564..398f8fdea1 100644
--- a/ecc/bw6-761/internal/fptower/e6_test.go
+++ b/ecc/bw6-761/internal/fptower/e6_test.go
@@ -302,13 +302,29 @@ func TestE6Ops(t *testing.T) {
properties.Property("[BW6-761] compressed cyclotomic square (Karabina) and square should be the same in the cyclotomic subgroup", prop.ForAll(
func(a *E6) bool {
- var b, c, d E6
+ var _a, b, c, d, _c, _d E6
+ _a.SetOne().Double(&_a)
+
+ // put a and _a in the cyclotomic subgroup
+ // a (g3 != 0 probably)
b.Conjugate(a)
a.Inverse(a)
b.Mul(&b, a)
a.Frobenius(&b).Mul(a, &b)
+ // _a (g3 == 0)
+ b.Conjugate(&_a)
+ _a.Inverse(&_a)
+ b.Mul(&b, &_a)
+ _a.Frobenius(&b).Mul(&_a, &b)
+
+ // case g3 != 0
c.Square(a)
- d.CyclotomicSquareCompressed(a).Decompress(&d)
+ d.CyclotomicSquareCompressed(a).DecompressKarabina(&d)
+
+ // case g3 == 0
+ _c.Square(&_a)
+ _d.CyclotomicSquareCompressed(&_a).DecompressKarabina(&_d)
+
return c.Equal(&d)
},
genA,
diff --git a/ecc/bw6-761/multiexp.go b/ecc/bw6-761/multiexp.go
index edc0a7371b..e9cef54ee0 100644
--- a/ecc/bw6-761/multiexp.go
+++ b/ecc/bw6-761/multiexp.go
@@ -41,7 +41,7 @@ type selector struct {
// if the digit is larger than 2^{c-1}, then, we borrow 2^c from the next window and substract
// 2^{c} to the current digit, making it negative.
// negative digits can be processed in a later step as adding -G into the bucket instead of G
-// (computing -G is cheap, and this saves us half of the buckets in the MultiExp or BatchScalarMul)
+// (computing -G is cheap, and this saves us half of the buckets in the MultiExp or BatchScalarMultiplication)
// scalarsMont indicates wheter the provided scalars are in montgomery form
// returns smallValues, which represent the number of scalars which meets the following condition
// 0 < scalar < 2^c (in other words, scalars where only the c-least significant bits are non zero)
@@ -163,6 +163,8 @@ func partitionScalars(scalars []fr.Element, c uint64, scalarsMont bool, nbTasks
}
// MultiExp implements section 4 of https://eprint.iacr.org/2012/549.pdf
+//
+// This call return an error if len(scalars) != len(points) or if provided config is invalid.
func (p *G1Affine) MultiExp(points []G1Affine, scalars []fr.Element, config ecc.MultiExpConfig) (*G1Affine, error) {
var _p G1Jac
if _, err := _p.MultiExp(points, scalars, config); err != nil {
@@ -173,6 +175,8 @@ func (p *G1Affine) MultiExp(points []G1Affine, scalars []fr.Element, config ecc.
}
// MultiExp implements section 4 of https://eprint.iacr.org/2012/549.pdf
+//
+// This call return an error if len(scalars) != len(points) or if provided config is invalid.
func (p *G1Jac) MultiExp(points []G1Affine, scalars []fr.Element, config ecc.MultiExpConfig) (*G1Jac, error) {
// note:
// each of the msmCX method is the same, except for the c constant it declares
@@ -209,6 +213,8 @@ func (p *G1Jac) MultiExp(points []G1Affine, scalars []fr.Element, config ecc.Mul
// if nbTasks is not set, use all available CPUs
if config.NbTasks <= 0 {
config.NbTasks = runtime.NumCPU()
+ } else if config.NbTasks > 1024 {
+ return nil, errors.New("invalid config: config.NbTasks > 1024")
}
// here, we compute the best C for nbPoints
@@ -573,6 +579,8 @@ func (p *G1Jac) msmC16(points []G1Affine, scalars []fr.Element, splitFirstChunk
}
// MultiExp implements section 4 of https://eprint.iacr.org/2012/549.pdf
+//
+// This call return an error if len(scalars) != len(points) or if provided config is invalid.
func (p *G2Affine) MultiExp(points []G2Affine, scalars []fr.Element, config ecc.MultiExpConfig) (*G2Affine, error) {
var _p G2Jac
if _, err := _p.MultiExp(points, scalars, config); err != nil {
@@ -583,6 +591,8 @@ func (p *G2Affine) MultiExp(points []G2Affine, scalars []fr.Element, config ecc.
}
// MultiExp implements section 4 of https://eprint.iacr.org/2012/549.pdf
+//
+// This call return an error if len(scalars) != len(points) or if provided config is invalid.
func (p *G2Jac) MultiExp(points []G2Affine, scalars []fr.Element, config ecc.MultiExpConfig) (*G2Jac, error) {
// note:
// each of the msmCX method is the same, except for the c constant it declares
@@ -619,6 +629,8 @@ func (p *G2Jac) MultiExp(points []G2Affine, scalars []fr.Element, config ecc.Mul
// if nbTasks is not set, use all available CPUs
if config.NbTasks <= 0 {
config.NbTasks = runtime.NumCPU()
+ } else if config.NbTasks > 1024 {
+ return nil, errors.New("invalid config: config.NbTasks > 1024")
}
// here, we compute the best C for nbPoints
diff --git a/ecc/bw6-761/multiexp_test.go b/ecc/bw6-761/multiexp_test.go
index d6901edc88..33a90ba3ad 100644
--- a/ecc/bw6-761/multiexp_test.go
+++ b/ecc/bw6-761/multiexp_test.go
@@ -92,7 +92,14 @@ func TestMultiExpG1(t *testing.T) {
genScalar,
))
- properties.Property("[G1] Multi exponentation (c=5, c=16) should be consistent with sum of square", prop.ForAll(
+ // cRange is generated from template and contains the available parameters for the multiexp window size
+ cRange := []uint64{4, 5, 8, 16}
+ if testing.Short() {
+ // test only "odd" and "even" (ie windows size divide word size vs not)
+ cRange = []uint64{5, 16}
+ }
+
+ properties.Property(fmt.Sprintf("[G1] Multi exponentation (c in %v) should be consistent with sum of square", cRange), prop.ForAll(
func(mixer fr.Element) bool {
var expected G1Jac
@@ -111,13 +118,21 @@ func TestMultiExpG1(t *testing.T) {
FromMont()
}
- scalars5, _ := partitionScalars(sampleScalars[:], 5, false, runtime.NumCPU())
- scalars16, _ := partitionScalars(sampleScalars[:], 16, false, runtime.NumCPU())
-
- var r5, r16 G1Jac
- r5.msmC5(samplePoints[:], scalars5, false)
- r16.msmC16(samplePoints[:], scalars16, true)
- return (r5.Equal(&expected) && r16.Equal(&expected))
+ results := make([]G1Jac, len(cRange)+1)
+ for i, c := range cRange {
+ scalars, _ := partitionScalars(sampleScalars[:], c, false, runtime.NumCPU())
+ msmInnerG1Jac(&results[i], int(c), samplePoints[:], scalars, false)
+ if c == 16 {
+ // split the first chunk
+ msmInnerG1Jac(&results[len(results)-1], 16, samplePoints[:], scalars, true)
+ }
+ }
+ for i := 1; i < len(results); i++ {
+ if !results[i].Equal(&results[i-1]) {
+ return false
+ }
+ }
+ return true
},
genScalar,
))
@@ -148,7 +163,7 @@ func TestMultiExpG1(t *testing.T) {
var finalBigScalar fr.Element
var finalBigScalarBi big.Int
var op1ScalarMul G1Affine
- finalBigScalar.SetString("9455").Mul(&finalBigScalar, &mixer)
+ finalBigScalar.SetUint64(9455).Mul(&finalBigScalar, &mixer)
finalBigScalar.ToBigIntRegular(&finalBigScalarBi)
op1ScalarMul.ScalarMultiplication(&g1GenAff, &finalBigScalarBi)
@@ -322,7 +337,12 @@ func TestMultiExpG2(t *testing.T) {
genScalar,
))
- properties.Property("[G2] Multi exponentation (c=5, c=16) should be consistent with sum of square", prop.ForAll(
+ // cRange is generated from template and contains the available parameters for the multiexp window size
+ // for g2, CI suffers with large c size since it needs to allocate a lot of memory for the buckets.
+ // test only "odd" and "even" (ie windows size divide word size vs not)
+ cRange := []uint64{5, 16}
+
+ properties.Property(fmt.Sprintf("[G2] Multi exponentation (c in %v) should be consistent with sum of square", cRange), prop.ForAll(
func(mixer fr.Element) bool {
var expected G2Jac
@@ -341,13 +361,21 @@ func TestMultiExpG2(t *testing.T) {
FromMont()
}
- scalars5, _ := partitionScalars(sampleScalars[:], 5, false, runtime.NumCPU())
- scalars16, _ := partitionScalars(sampleScalars[:], 16, false, runtime.NumCPU())
-
- var r5, r16 G2Jac
- r5.msmC5(samplePoints[:], scalars5, false)
- r16.msmC16(samplePoints[:], scalars16, true)
- return (r5.Equal(&expected) && r16.Equal(&expected))
+ results := make([]G2Jac, len(cRange)+1)
+ for i, c := range cRange {
+ scalars, _ := partitionScalars(sampleScalars[:], c, false, runtime.NumCPU())
+ msmInnerG2Jac(&results[i], int(c), samplePoints[:], scalars, false)
+ if c == 16 {
+ // split the first chunk
+ msmInnerG2Jac(&results[len(results)-1], 16, samplePoints[:], scalars, true)
+ }
+ }
+ for i := 1; i < len(results); i++ {
+ if !results[i].Equal(&results[i-1]) {
+ return false
+ }
+ }
+ return true
},
genScalar,
))
@@ -378,7 +406,7 @@ func TestMultiExpG2(t *testing.T) {
var finalBigScalar fr.Element
var finalBigScalarBi big.Int
var op1ScalarMul G2Affine
- finalBigScalar.SetString("9455").Mul(&finalBigScalar, &mixer)
+ finalBigScalar.SetUint64(9455).Mul(&finalBigScalar, &mixer)
finalBigScalar.ToBigIntRegular(&finalBigScalarBi)
op1ScalarMul.ScalarMultiplication(&g2GenAff, &finalBigScalarBi)
diff --git a/ecc/bw6-761/pairing.go b/ecc/bw6-761/pairing.go
index 4cba6ae442..4e5dcfa8d0 100644
--- a/ecc/bw6-761/pairing.go
+++ b/ecc/bw6-761/pairing.go
@@ -31,7 +31,9 @@ type lineEvaluation struct {
}
// Pair calculates the reduced pairing for a set of points
-// ∏ᵢ e(Pᵢ, Qᵢ)
+// ∏ᵢ e(Pᵢ, Qᵢ).
+//
+// This function doesn't check that the inputs are in the correct subgroup. See IsInSubGroup.
func Pair(P []G1Affine, Q []G2Affine) (GT, error) {
f, err := MillerLoop(P, Q)
if err != nil {
@@ -42,6 +44,8 @@ func Pair(P []G1Affine, Q []G2Affine) (GT, error) {
// PairingCheck calculates the reduced pairing for a set of points and returns True if the result is One
// ∏ᵢ e(Pᵢ, Qᵢ) =? 1
+//
+// This function doesn't check that the inputs are in the correct subgroup. See IsInSubGroup.
func PairingCheck(P []G1Affine, Q []G2Affine) (bool, error) {
f, err := Pair(P, Q)
if err != nil {
@@ -175,8 +179,6 @@ func MillerLoop(P []G1Affine, Q []G2Affine) (GT, error) {
// precomputations
pProj1 := make([]g1Proj, n)
p1 := make([]G1Affine, n)
- p01 := make([]G1Affine, n)
- p10 := make([]G1Affine, n)
pProj01 := make([]g1Proj, n) // P0+P1
pProj10 := make([]g1Proj, n) // P0-P1
l01 := make([]lineEvaluation, n)
@@ -198,8 +200,8 @@ func MillerLoop(P []G1Affine, Q []G2Affine) (GT, error) {
l10[k].r1.Mul(&l10[k].r1, &q[k].X)
l10[k].r0.Mul(&l10[k].r0, &q[k].Y)
}
- BatchProjectiveToAffineG1(pProj01, p01)
- BatchProjectiveToAffineG1(pProj10, p10)
+ p01 := BatchProjectiveToAffineG1(pProj01)
+ p10 := BatchProjectiveToAffineG1(pProj10)
// f_{a0+\lambda*a1,P}(Q)
var result, ss GT
diff --git a/ecc/bw6-761/twistededwards/eddsa/eddsa.go b/ecc/bw6-761/twistededwards/eddsa/eddsa.go
index d475671752..cf0574e10e 100644
--- a/ecc/bw6-761/twistededwards/eddsa/eddsa.go
+++ b/ecc/bw6-761/twistededwards/eddsa/eddsa.go
@@ -98,7 +98,7 @@ func GenerateKey(r io.Reader) (*PrivateKey, error) {
var bScalar big.Int
bScalar.SetBytes(priv.scalar[:])
- pub.A.ScalarMul(&c.Base, &bScalar)
+ pub.A.ScalarMultiplication(&c.Base, &bScalar)
priv.PublicKey = pub
@@ -146,7 +146,7 @@ func (privKey *PrivateKey) Sign(message []byte, hFunc hash.Hash) ([]byte, error)
blindingFactorBigInt.SetBytes(blindingFactorBytes[:sizeFr])
// compute R = randScalar*Base
- res.R.ScalarMul(&curveParams.Base, &blindingFactorBigInt)
+ res.R.ScalarMultiplication(&curveParams.Base, &blindingFactorBigInt)
if !res.R.IsOnCurve() {
return nil, errNotOnCurve
}
@@ -232,8 +232,8 @@ func (pub *PublicKey) Verify(sigBin, message []byte, hFunc hash.Hash) (bool, err
var bCofactor, bs big.Int
curveParams.Cofactor.ToBigIntRegular(&bCofactor)
bs.SetBytes(sig.S[:])
- lhs.ScalarMul(&curveParams.Base, &bs).
- ScalarMul(&lhs, &bCofactor)
+ lhs.ScalarMultiplication(&curveParams.Base, &bs).
+ ScalarMultiplication(&lhs, &bCofactor)
if !lhs.IsOnCurve() {
return false, errNotOnCurve
@@ -241,9 +241,9 @@ func (pub *PublicKey) Verify(sigBin, message []byte, hFunc hash.Hash) (bool, err
// rhs = cofactor*(R + H(R,A,M)*A)
var rhs twistededwards.PointAffine
- rhs.ScalarMul(&pub.A, &hramInt).
+ rhs.ScalarMultiplication(&pub.A, &hramInt).
Add(&rhs, &sig.R).
- ScalarMul(&rhs, &bCofactor)
+ ScalarMultiplication(&rhs, &bCofactor)
if !rhs.IsOnCurve() {
return false, errNotOnCurve
}
diff --git a/ecc/bw6-761/twistededwards/point.go b/ecc/bw6-761/twistededwards/point.go
index 34a7e3429f..aa332d2aa4 100644
--- a/ecc/bw6-761/twistededwards/point.go
+++ b/ecc/bw6-761/twistededwards/point.go
@@ -256,13 +256,13 @@ func (p *PointAffine) FromExtended(p1 *PointExtended) *PointAffine {
return p
}
-// ScalarMul scalar multiplication of a point
+// ScalarMultiplication scalar multiplication of a point
// p1 in affine coordinates with a scalar in big.Int
-func (p *PointAffine) ScalarMul(p1 *PointAffine, scalar *big.Int) *PointAffine {
+func (p *PointAffine) ScalarMultiplication(p1 *PointAffine, scalar *big.Int) *PointAffine {
var p1Extended, resExtended PointExtended
p1Extended.FromAffine(p1)
- resExtended.ScalarMul(&p1Extended, scalar)
+ resExtended.ScalarMultiplication(&p1Extended, scalar)
p.FromExtended(&resExtended)
return p
@@ -409,9 +409,9 @@ func (p *PointProj) Add(p1, p2 *PointProj) *PointProj {
return p
}
-// ScalarMul scalar multiplication of a point
+// ScalarMultiplication scalar multiplication of a point
// p1 in projective coordinates with a scalar in big.Int
-func (p *PointProj) ScalarMul(p1 *PointProj, scalar *big.Int) *PointProj {
+func (p *PointProj) ScalarMultiplication(p1 *PointProj, scalar *big.Int) *PointProj {
var _scalar big.Int
_scalar.Set(scalar)
p.Set(p1)
@@ -622,9 +622,9 @@ func (p *PointExtended) setInfinity() *PointExtended {
return p
}
-// ScalarMul scalar multiplication of a point
+// ScalarMultiplication scalar multiplication of a point
// p1 in extended coordinates with a scalar in big.Int
-func (p *PointExtended) ScalarMul(p1 *PointExtended, scalar *big.Int) *PointExtended {
+func (p *PointExtended) ScalarMultiplication(p1 *PointExtended, scalar *big.Int) *PointExtended {
var _scalar big.Int
_scalar.Set(scalar)
p.Set(p1)
diff --git a/ecc/bw6-761/twistededwards/point_test.go b/ecc/bw6-761/twistededwards/point_test.go
index a75c5b24be..56225dc1bd 100644
--- a/ecc/bw6-761/twistededwards/point_test.go
+++ b/ecc/bw6-761/twistededwards/point_test.go
@@ -124,8 +124,8 @@ func TestReceiverIsOperand(t *testing.T) {
var s big.Int
s.SetUint64(10)
- p2.ScalarMul(&p1, &s)
- p1.ScalarMul(&p1, &s)
+ p2.ScalarMultiplication(&p1, &s)
+ p1.ScalarMultiplication(&p1, &s)
return p2.Equal(&p1)
},
@@ -336,7 +336,7 @@ func TestOps(t *testing.T) {
params := GetEdwardsCurve()
var p1, p2, zero PointAffine
- p1.ScalarMul(¶ms.Base, &s1)
+ p1.ScalarMultiplication(¶ms.Base, &s1)
zero.setInfinity()
p2.Add(&p1, &zero)
@@ -352,7 +352,7 @@ func TestOps(t *testing.T) {
params := GetEdwardsCurve()
var p1, p2 PointAffine
- p1.ScalarMul(¶ms.Base, &s1)
+ p1.ScalarMultiplication(¶ms.Base, &s1)
p2.Neg(&p1)
p1.Add(&p1, &p2)
@@ -371,8 +371,8 @@ func TestOps(t *testing.T) {
params := GetEdwardsCurve()
var p1, p2, inf PointAffine
- p1.ScalarMul(¶ms.Base, &s)
- p2.ScalarMul(¶ms.Base, &s)
+ p1.ScalarMultiplication(¶ms.Base, &s)
+ p2.ScalarMultiplication(¶ms.Base, &s)
p1.Add(&p1, &p2)
p2.Double(&p2)
@@ -390,14 +390,14 @@ func TestOps(t *testing.T) {
var p1, p2, p3, inf PointAffine
inf.X.SetZero()
inf.Y.SetZero()
- p1.ScalarMul(¶ms.Base, &s1)
- p2.ScalarMul(¶ms.Base, &s2)
+ p1.ScalarMultiplication(¶ms.Base, &s1)
+ p2.ScalarMultiplication(¶ms.Base, &s2)
p3.Set(¶ms.Base)
p2.Add(&p1, &p2)
s1.Add(&s1, &s2)
- p3.ScalarMul(¶ms.Base, &s1)
+ p3.ScalarMultiplication(¶ms.Base, &s1)
return p2.IsOnCurve() && p3.Equal(&p2) && !p3.Equal(&inf)
},
@@ -413,9 +413,9 @@ func TestOps(t *testing.T) {
var p1, p2, inf PointAffine
inf.X.SetZero()
inf.Y.SetOne()
- p1.ScalarMul(¶ms.Base, &s1)
+ p1.ScalarMultiplication(¶ms.Base, &s1)
s1.Neg(&s1)
- p2.ScalarMul(¶ms.Base, &s1)
+ p2.ScalarMultiplication(¶ms.Base, &s1)
p2.Add(&p1, &p2)
@@ -430,11 +430,11 @@ func TestOps(t *testing.T) {
params := GetEdwardsCurve()
var p1, p2 PointAffine
- p1.ScalarMul(¶ms.Base, &s1)
+ p1.ScalarMultiplication(¶ms.Base, &s1)
five := big.NewInt(5)
p2.Double(&p1).Double(&p2).Add(&p2, &p1)
- p1.ScalarMul(&p1, five)
+ p1.ScalarMultiplication(&p1, five)
return p2.IsOnCurve() && p2.Equal(&p1)
},
@@ -463,7 +463,7 @@ func TestOps(t *testing.T) {
var baseProj, p1, p2, zero PointProj
baseProj.FromAffine(¶ms.Base)
- p1.ScalarMul(&baseProj, &s1)
+ p1.ScalarMultiplication(&baseProj, &s1)
zero.setInfinity()
p2.Add(&p1, &zero)
@@ -480,7 +480,7 @@ func TestOps(t *testing.T) {
var baseProj, p1, p2, p PointProj
baseProj.FromAffine(¶ms.Base)
- p1.ScalarMul(&baseProj, &s1)
+ p1.ScalarMultiplication(&baseProj, &s1)
p2.Neg(&p1)
p.Add(&p1, &p2)
@@ -498,7 +498,7 @@ func TestOps(t *testing.T) {
var baseProj, p1, p2, p PointProj
baseProj.FromAffine(¶ms.Base)
- p.ScalarMul(&baseProj, &s)
+ p.ScalarMultiplication(&baseProj, &s)
p1.Add(&p, &p)
p2.Double(&p)
@@ -515,11 +515,11 @@ func TestOps(t *testing.T) {
var baseProj, p1, p2 PointProj
baseProj.FromAffine(¶ms.Base)
- p1.ScalarMul(&baseProj, &s1)
+ p1.ScalarMultiplication(&baseProj, &s1)
five := big.NewInt(5)
p2.Double(&p1).Double(&p2).Add(&p2, &p1)
- p1.ScalarMul(&p1, five)
+ p1.ScalarMultiplication(&p1, five)
return p2.Equal(&p1)
},
@@ -547,7 +547,7 @@ func TestOps(t *testing.T) {
var baseExtended, p1, p2, zero PointExtended
baseExtended.FromAffine(¶ms.Base)
- p1.ScalarMul(&baseExtended, &s1)
+ p1.ScalarMultiplication(&baseExtended, &s1)
zero.setInfinity()
p2.Add(&p1, &zero)
@@ -564,7 +564,7 @@ func TestOps(t *testing.T) {
var baseExtended, p1, p2, p PointExtended
baseExtended.FromAffine(¶ms.Base)
- p1.ScalarMul(&baseExtended, &s1)
+ p1.ScalarMultiplication(&baseExtended, &s1)
p2.Neg(&p1)
p.Add(&p1, &p2)
@@ -582,7 +582,7 @@ func TestOps(t *testing.T) {
var baseExtended, p1, p2, p PointExtended
baseExtended.FromAffine(¶ms.Base)
- p.ScalarMul(&baseExtended, &s)
+ p.ScalarMultiplication(&baseExtended, &s)
p1.Add(&p, &p)
p2.Double(&p)
@@ -599,11 +599,11 @@ func TestOps(t *testing.T) {
var baseExtended, p1, p2 PointExtended
baseExtended.FromAffine(¶ms.Base)
- p1.ScalarMul(&baseExtended, &s1)
+ p1.ScalarMultiplication(&baseExtended, &s1)
five := big.NewInt(5)
p2.Double(&p1).Double(&p2).Add(&p2, &p1)
- p1.ScalarMul(&p1, five)
+ p1.ScalarMultiplication(&p1, five)
return p2.Equal(&p1)
},
@@ -619,8 +619,8 @@ func TestOps(t *testing.T) {
var baseExtended, pExtended, p PointExtended
var pAffine PointAffine
baseExtended.FromAffine(¶ms.Base)
- pExtended.ScalarMul(&baseExtended, &s)
- pAffine.ScalarMul(¶ms.Base, &s)
+ pExtended.ScalarMultiplication(&baseExtended, &s)
+ pAffine.ScalarMultiplication(¶ms.Base, &s)
pAffine.Neg(&pAffine)
p.MixedAdd(&pExtended, &pAffine)
@@ -638,8 +638,8 @@ func TestOps(t *testing.T) {
var baseExtended, pExtended, p, p2 PointExtended
var pAffine PointAffine
baseExtended.FromAffine(¶ms.Base)
- pExtended.ScalarMul(&baseExtended, &s)
- pAffine.ScalarMul(¶ms.Base, &s)
+ pExtended.ScalarMultiplication(&baseExtended, &s)
+ pAffine.ScalarMultiplication(¶ms.Base, &s)
p.MixedAdd(&pExtended, &pAffine)
p2.MixedDouble(&pExtended)
@@ -658,8 +658,8 @@ func TestOps(t *testing.T) {
var baseProj, pProj, p PointProj
var pAffine PointAffine
baseProj.FromAffine(¶ms.Base)
- pProj.ScalarMul(&baseProj, &s)
- pAffine.ScalarMul(¶ms.Base, &s)
+ pProj.ScalarMultiplication(&baseProj, &s)
+ pAffine.ScalarMultiplication(¶ms.Base, &s)
pAffine.Neg(&pAffine)
p.MixedAdd(&pProj, &pAffine)
@@ -677,8 +677,8 @@ func TestOps(t *testing.T) {
var baseProj, pProj, p, p2 PointProj
var pAffine PointAffine
baseProj.FromAffine(¶ms.Base)
- pProj.ScalarMul(&baseProj, &s)
- pAffine.ScalarMul(¶ms.Base, &s)
+ pProj.ScalarMultiplication(&baseProj, &s)
+ pAffine.ScalarMultiplication(¶ms.Base, &s)
p.MixedAdd(&pProj, &pAffine)
p2.Double(&pProj)
@@ -697,9 +697,9 @@ func TestOps(t *testing.T) {
var baseExt PointExtended
var p1, p2 PointAffine
baseProj.FromAffine(¶ms.Base)
- baseProj.ScalarMul(&baseProj, &s)
+ baseProj.ScalarMultiplication(&baseProj, &s)
baseExt.FromAffine(¶ms.Base)
- baseExt.ScalarMul(&baseExt, &s)
+ baseExt.ScalarMultiplication(&baseExt, &s)
p1.FromProj(&baseProj)
p2.FromExtended(&baseExt)
@@ -760,7 +760,7 @@ func BenchmarkScalarMulExtended(b *testing.B) {
b.ResetTimer()
for j := 0; j < b.N; j++ {
- doubleAndAdd.ScalarMul(&a, &s)
+ doubleAndAdd.ScalarMultiplication(&a, &s)
}
}
@@ -776,6 +776,6 @@ func BenchmarkScalarMulProjective(b *testing.B) {
b.ResetTimer()
for j := 0; j < b.N; j++ {
- doubleAndAdd.ScalarMul(&a, &s)
+ doubleAndAdd.ScalarMultiplication(&a, &s)
}
}
diff --git a/ecc/utils.go b/ecc/utils.go
index 78da3a8197..f02b3c3d74 100644
--- a/ecc/utils.go
+++ b/ecc/utils.go
@@ -147,7 +147,7 @@ func SplitScalar(s *big.Int, l *Lattice) [2]big.Int {
k2.Mul(s, &l.b2).Neg(&k2)
// right-shift instead of division by lattice determinant
// this increases the bounds on k1 and k2 by 1
- // but we check this ScalarMul alg. (not constant-time)
+ // but we check this ScalarMultiplication alg. (not constant-time)
n := 2 * uint(((l.Det.BitLen()+32)>>6)<<6)
k1.Rsh(&k1, n)
k2.Rsh(&k2, n)
diff --git a/field/goff/cmd/root.go b/field/goff/cmd/root.go
index 37b07864ec..2d5aedfbe0 100644
--- a/field/goff/cmd/root.go
+++ b/field/goff/cmd/root.go
@@ -13,8 +13,6 @@
// limitations under the License.
// Package cmd is the CLI interface for goff
-// goff -m 21888242871...94645226208583 -o ./bn256/ -p bn256 -e Element
-// will generate field arithmetic code for given modulus
package cmd
import (
diff --git a/field/goff/main.go b/field/goff/main.go
index b2bcbc2bda..54ff21a9c6 100644
--- a/field/goff/main.go
+++ b/field/goff/main.go
@@ -12,8 +12,16 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-// Package goff (go finite field) is a library that generates fast field arithmetic code for a given modulus
-// see github.com/consensys/goff/cmd for CLI usage
+// Package goff (go finite field) is a library that generates fast field arithmetic code for a given modulus.
+//
+// Generated code is optimized for x86 (amd64) targets, and most methods do not allocate memory on the heap.
+//
+// Example usage:
+// goff -m 0xffffffff00000001 -o ./goldilocks/ -p goldilocks -e Element
+//
+// Warning
+//
+// The generated code has not been audited for all moduli (only bn254 and bls12-381) and is provided as-is. In particular, there is no security guarantees such as constant time implementation or side-channel attack resistance.
package main
import "github.com/consensys/gnark-crypto/field/goff/cmd"
diff --git a/field/goldilocks/element.go b/field/goldilocks/element.go
index a074d0f81e..5f8df411a7 100644
--- a/field/goldilocks/element.go
+++ b/field/goldilocks/element.go
@@ -167,7 +167,7 @@ func (z *Element) SetInterface(i1 interface{}) (*Element, error) {
case int:
return z.SetInt64(int64(c1)), nil
case string:
- return z.SetString(c1), nil
+ return z.SetString(c1)
case *big.Int:
if c1 == nil {
return nil, errors.New("can't set goldilocks.Element with ")
@@ -778,12 +778,13 @@ func (z *Element) setBigInt(v *big.Int) *Element {
// Incorrect placement of underscores is reported as a panic if there
// are no other errors.
//
-func (z *Element) SetString(number string) *Element {
+// If the number is invalid this method leaves z unchanged and returns nil, error.
+func (z *Element) SetString(number string) (*Element, error) {
// get temporary big int from the pool
vv := bigIntPool.Get().(*big.Int)
if _, ok := vv.SetString(number, 0); !ok {
- panic("Element.SetString failed -> can't parse number into a big.Int " + number)
+ return nil, errors.New("Element.SetString failed -> can't parse number into a big.Int " + number)
}
z.SetBigInt(vv)
@@ -791,7 +792,7 @@ func (z *Element) SetString(number string) *Element {
// release object into pool
bigIntPool.Put(vv)
- return z
+ return z, nil
}
// MarshalJSON returns json encoding of z (z.Text(10))
diff --git a/go.mod b/go.mod
index f4b9cd36c7..583ea31428 100644
--- a/go.mod
+++ b/go.mod
@@ -3,13 +3,13 @@ module github.com/consensys/gnark-crypto
go 1.17
require (
- github.com/consensys/bavard v0.1.12
+ github.com/consensys/bavard v0.1.13
github.com/leanovate/gopter v0.2.9
github.com/mmcloughlin/addchain v0.4.0
- github.com/spf13/cobra v1.4.0
- github.com/stretchr/testify v1.7.1
- golang.org/x/crypto v0.0.0-20220321153916-2c7772ba3064
- golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8
+ github.com/spf13/cobra v1.5.0
+ github.com/stretchr/testify v1.8.0
+ golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa
+ golang.org/x/sys v0.0.0-20220727055044-e65921a090b8
)
require (
@@ -17,6 +17,6 @@ require (
github.com/inconshreveable/mousetrap v1.0.0 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/spf13/pflag v1.0.5 // indirect
- gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
+ gopkg.in/yaml.v3 v3.0.1 // indirect
rsc.io/tmplfunc v0.0.3 // indirect
)
diff --git a/go.sum b/go.sum
index 19e320387d..24019a2212 100644
--- a/go.sum
+++ b/go.sum
@@ -1,6 +1,8 @@
github.com/consensys/bavard v0.1.12 h1:rApQlUvBg5FeW/fnigtVnAs0sBrgDN2pEuHNdWElSUE=
github.com/consensys/bavard v0.1.12/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI=
-github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
+github.com/consensys/bavard v0.1.13 h1:oLhMLOFGTLdlda/kma4VOJazblc7IM5y5QPd2A/YjhQ=
+github.com/consensys/bavard v0.1.13/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI=
+github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@@ -15,21 +17,23 @@ github.com/mmcloughlin/profile v0.1.1/go.mod h1:IhHD7q1ooxgwTgjxQYkACGA77oFTDdFV
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
-github.com/spf13/cobra v1.4.0 h1:y+wJpx64xcgO1V+RcnwW0LEHxTKRi2ZDPSBjWnrg88Q=
-github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g=
+github.com/spf13/cobra v1.5.0 h1:X+jTBEBqF0bHN+9cSMgmfuvv2VHJ9ezmFNf9Y/XstYU=
+github.com/spf13/cobra v1.5.0/go.mod h1:dWXEIy2H428czQCjInthrTRUg7yKbok+2Qi/yBIJoUM=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY=
+github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-golang.org/x/crypto v0.0.0-20220321153916-2c7772ba3064 h1:S25/rfnfsMVgORT4/J61MJ7rdyseOZOyvLIrZEZ7s6s=
-golang.org/x/crypto v0.0.0-20220321153916-2c7772ba3064/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
+github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk=
+github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
+golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa h1:zuSxTR4o9y82ebqCUJYNGJbGPo6sKVl54f/TVDObg1c=
+golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8 h1:OH54vjqzRWmbJ62fjuhxy7AxFFgoHN0/DPc/UrL8cAs=
-golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220727055044-e65921a090b8 h1:dyU22nBWzrmTQxtNrr4dzVOvaw35nUYE279vF9UmsI8=
+golang.org/x/sys v0.0.0-20220727055044-e65921a090b8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@@ -37,7 +41,7 @@ gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
-gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
rsc.io/tmplfunc v0.0.3 h1:53XFQh69AfOa8Tw0Jm7t+GV7KZhOi6jzsCzTtKbMvzU=
rsc.io/tmplfunc v0.0.3/go.mod h1:AG3sTPzElb1Io3Yg4voV9AGZJuleGAwaVRxL9M49PhA=
diff --git a/internal/field/field.go b/internal/field/field.go
index 0d22e2a333..eafca73357 100644
--- a/internal/field/field.go
+++ b/internal/field/field.go
@@ -431,9 +431,7 @@ func CoordNameForExtensionDegree(degree uint8) string {
}
func (f *FieldConfig) WriteElement(element Element) string {
- builder := bavard.StringBuilderPool.Get().(*strings.Builder)
- builder.Reset()
- defer bavard.StringBuilderPool.Put(builder)
+ var builder strings.Builder
builder.WriteString("{")
length := len(element)
@@ -449,7 +447,7 @@ func (f *FieldConfig) WriteElement(element Element) string {
builder.WriteString(": fp.Element{")
}
mont := f.ToMont(e)
- bavard.WriteBigIntAsUint64Slice(builder, &mont)
+ bavard.WriteBigIntAsUint64Slice(&builder, &mont)
if length > 1 {
builder.WriteString("},\n")
}
diff --git a/internal/field/internal/templates/element/base.go b/internal/field/internal/templates/element/base.go
index 17b6047340..f0388acf2f 100644
--- a/internal/field/internal/templates/element/base.go
+++ b/internal/field/internal/templates/element/base.go
@@ -161,7 +161,7 @@ func (z *{{.ElementName}}) SetInterface(i1 interface{}) (*{{.ElementName}}, erro
case int:
return z.SetInt64(int64(c1)), nil
case string:
- return z.SetString(c1), nil
+ return z.SetString(c1)
case *big.Int:
if c1 == nil {
return nil, errors.New("can't set {{.PackageName}}.{{.ElementName}} with ")
diff --git a/internal/field/internal/templates/element/conv.go b/internal/field/internal/templates/element/conv.go
index a04a311e93..2633f448a0 100644
--- a/internal/field/internal/templates/element/conv.go
+++ b/internal/field/internal/templates/element/conv.go
@@ -207,12 +207,13 @@ func (z *{{.ElementName}}) setBigInt(v *big.Int) *{{.ElementName}} {
// Incorrect placement of underscores is reported as a panic if there
// are no other errors.
//
-func (z *{{.ElementName}}) SetString(number string) *{{.ElementName}} {
+// If the number is invalid this method leaves z unchanged and returns nil, error.
+func (z *{{.ElementName}}) SetString(number string) (*{{.ElementName}}, error) {
// get temporary big int from the pool
vv := bigIntPool.Get().(*big.Int)
if _, ok := vv.SetString(number, 0); !ok {
- panic("{{.ElementName}}.SetString failed -> can't parse number into a big.Int " + number)
+ return nil, errors.New("{{.ElementName}}.SetString failed -> can't parse number into a big.Int " + number)
}
z.SetBigInt(vv)
@@ -220,7 +221,7 @@ func (z *{{.ElementName}}) SetString(number string) *{{.ElementName}} {
// release object into pool
bigIntPool.Put(vv)
- return z
+ return z, nil
}
diff --git a/internal/generator/config/curve.go b/internal/generator/config/curve.go
index 6fcb954c94..0d387a7cf2 100644
--- a/internal/generator/config/curve.go
+++ b/internal/generator/config/curve.go
@@ -68,7 +68,7 @@ var TwistedEdwardsCurves []TwistedEdwardsCurve
func defaultCRange() []int {
// default range for C values in the multiExp
- return []int{4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 20, 21, 22}
+ return []int{4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 20, 21}
}
func addCurve(c *Curve) {
diff --git a/internal/generator/ecc/generate.go b/internal/generator/ecc/generate.go
index 587e865b6b..594e1343cd 100644
--- a/internal/generator/ecc/generate.go
+++ b/internal/generator/ecc/generate.go
@@ -14,7 +14,6 @@ func Generate(conf config.Curve, baseDir string, bgen *bavard.BatchGenerator) er
packageName := strings.ReplaceAll(conf.Name, "-", "")
entries := []bavard.Entry{
- {File: filepath.Join(baseDir, "doc.go"), Templates: []string{"doc.go.tmpl"}},
{File: filepath.Join(baseDir, "multiexp.go"), Templates: []string{"multiexp.go.tmpl"}},
{File: filepath.Join(baseDir, "multiexp_test.go"), Templates: []string{"tests/multiexp.go.tmpl"}},
{File: filepath.Join(baseDir, "marshal.go"), Templates: []string{"marshal.go.tmpl"}},
diff --git a/internal/generator/ecc/template/doc.go.tmpl b/internal/generator/ecc/template/doc.go.tmpl
deleted file mode 100644
index 58fc8ef640..0000000000
--- a/internal/generator/ecc/template/doc.go.tmpl
+++ /dev/null
@@ -1,6 +0,0 @@
-// Package {{.Package}} efficient elliptic curve and pairing implementation for {{.Name}}.
-//
-// Warning
-//
-// This code has not been audited and is provided as-is. In particular, there is no security guarantees such as constant time implementation or side-channel attack resistance.
-package {{.Package}}
\ No newline at end of file
diff --git a/internal/generator/ecc/template/hash_to_curve.go.tmpl b/internal/generator/ecc/template/hash_to_curve.go.tmpl
index 7a6e87575f..d4e720bf57 100644
--- a/internal/generator/ecc/template/hash_to_curve.go.tmpl
+++ b/internal/generator/ecc/template/hash_to_curve.go.tmpl
@@ -50,28 +50,36 @@ func hashToFp(msg, dst []byte, count int) ([]fp.Element, error) {
// {{$CurveName}}Sgn0 is an algebraic substitute for the notion of sign in ordered fields
// Namely, every non-zero quadratic residue in a finite field of characteristic =/= 2 has exactly two square roots, one of each sign
-// Taken from https://datatracker.ietf.org/doc/draft-irtf-cfrg-hash-to-curve/ section 4.1
+// https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-16.html#name-the-sgn0-function
// The sign of an element is not obviously related to that of its Montgomery form
func {{$CurveName}}Sgn0(z *{{$CoordType}}) uint64 {
nonMont := *z
nonMont.FromMont()
- {{if eq $TowerDegree 1}}
+ {{if eq $TowerDegree 1}} // m == 1
return nonMont[0]%2
{{else}}
- sign := uint64(0)
- zero := uint64(1)
+ sign := uint64(0) // 1. sign = 0
+ zero := uint64(1) // 2. zero = 1
var signI uint64
var zeroI uint64
- {{range $i := interval 0 $TowerDegree}}
- signI = nonMont.{{$.FieldCoordName}}{{$i}}[0] % 2
- sign = sign | (zero & signI)
- {{if not (eq $i (sub $TowerDegree 1))}}
+ {{ range $i := interval 0 $TowerDegree}}
+ // 3. i = {{add $i 1}}
+ signI = nonMont.{{$.FieldCoordName}}{{$i}}[0] % 2 // 4. sign_i = x_i mod 2
+ {{- $notLast := not (eq $i (sub $TowerDegree 1))}}
+ {{- if $notLast}}
zeroI = g1NotZero(&nonMont.{{$.FieldCoordName}}{{$i}})
- zeroI = 1 ^ (zeroI|-zeroI)>>63
- zero = zero & zeroI
- {{end}}
- {{end}}
+ zeroI = 1 ^ (zeroI|-zeroI)>>63 // 5. zero_i = x_i == 0
+ {{- else}}
+ // 5. zero_i = x_i == 0
+ {{- end}}
+ sign = sign | (zero & signI) // 6. sign = sign OR (zero AND sign_i) # Avoid short-circuit logic ops
+ {{- if $notLast}}
+ zero = zero & zeroI // 7. zero = zero AND zero_i
+ {{- else}}
+ // 7. zero = zero AND zero_i
+ {{- end}}
+ {{- end}}
return sign
{{end}}
}
@@ -93,7 +101,7 @@ func MapTo{{$CurveTitle}}(u {{$CoordType}}) {{$AffineType}} {
// EncodeTo{{$CurveTitle}} hashes a message to a point on the {{$CurveTitle}} curve using the {{.MappingAlgorithm}} map.
// It is faster than HashTo{{$CurveTitle}}, but the result is not uniformly distributed. Unsuitable as a random oracle.
// dst stands for "domain separation tag", a string unique to the construction using the hash function
-//https://datatracker.ietf.org/doc/draft-irtf-cfrg-hash-to-curve/13/#section-6.6.3
+//https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-16.html#roadmap
func EncodeTo{{$CurveTitle}}(msg, dst []byte) ({{$AffineType}}, error) {
var res {{$AffineType}}
@@ -123,7 +131,7 @@ func EncodeTo{{$CurveTitle}}(msg, dst []byte) ({{$AffineType}}, error) {
// HashTo{{$CurveTitle}} hashes a message to a point on the {{$CurveTitle}} curve using the {{.MappingAlgorithm}} map.
// Slower than EncodeTo{{$CurveTitle}}, but usable as a random oracle.
// dst stands for "domain separation tag", a string unique to the construction using the hash function
-// https://tools.ietf.org/html/draft-irtf-cfrg-hash-to-curve-06#section-3
+//https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-16.html#roadmap
func HashTo{{$CurveTitle}}(msg, dst []byte) ({{$AffineType}}, error) {
u, err := hashToFp(msg, dst, 2 * {{$TowerDegree}})
if err != nil {
@@ -143,7 +151,7 @@ func HashTo{{$CurveTitle}}(msg, dst []byte) ({{$AffineType}}, error) {
{{end}}
{{ if $isogenyNeeded }}
- //TODO: Add in E' first, then apply isogeny
+ //TODO (perf): Add in E' first, then apply isogeny
{{$CurveName}}Isogeny(&Q0)
{{$CurveName}}Isogeny(&Q1)
{{ end }}
diff --git a/internal/generator/ecc/template/multiexp.go.tmpl b/internal/generator/ecc/template/multiexp.go.tmpl
index 8571c7c79d..e476f889b7 100644
--- a/internal/generator/ecc/template/multiexp.go.tmpl
+++ b/internal/generator/ecc/template/multiexp.go.tmpl
@@ -32,7 +32,7 @@ type selector struct {
// if the digit is larger than 2^{c-1}, then, we borrow 2^c from the next window and substract
// 2^{c} to the current digit, making it negative.
// negative digits can be processed in a later step as adding -G into the bucket instead of G
-// (computing -G is cheap, and this saves us half of the buckets in the MultiExp or BatchScalarMul)
+// (computing -G is cheap, and this saves us half of the buckets in the MultiExp or BatchScalarMultiplication)
// scalarsMont indicates wheter the provided scalars are in montgomery form
// returns smallValues, which represent the number of scalars which meets the following condition
// 0 < scalar < 2^c (in other words, scalars where only the c-least significant bits are non zero)
@@ -165,6 +165,8 @@ func partitionScalars(scalars []fr.Element, c uint64, scalarsMont bool, nbTasks
// MultiExp implements section 4 of https://eprint.iacr.org/2012/549.pdf
+//
+// This call return an error if len(scalars) != len(points) or if provided config is invalid.
func (p *{{ $.TAffine }}) MultiExp(points []{{ $.TAffine }}, scalars []fr.Element, config ecc.MultiExpConfig) (*{{ $.TAffine }}, error) {
var _p {{$.TJacobian}}
if _, err := _p.MultiExp(points, scalars, config); err != nil {
@@ -175,6 +177,8 @@ func (p *{{ $.TAffine }}) MultiExp(points []{{ $.TAffine }}, scalars []fr.Elemen
}
// MultiExp implements section 4 of https://eprint.iacr.org/2012/549.pdf
+//
+// This call return an error if len(scalars) != len(points) or if provided config is invalid.
func (p *{{ $.TJacobian }}) MultiExp(points []{{ $.TAffine }}, scalars []fr.Element, config ecc.MultiExpConfig) (*{{ $.TJacobian }}, error) {
// note:
// each of the msmCX method is the same, except for the c constant it declares
@@ -211,6 +215,8 @@ func (p *{{ $.TJacobian }}) MultiExp(points []{{ $.TAffine }}, scalars []fr.Elem
// if nbTasks is not set, use all available CPUs
if config.NbTasks <= 0 {
config.NbTasks = runtime.NumCPU()
+ } else if config.NbTasks > 1024 {
+ return nil, errors.New("invalid config: config.NbTasks > 1024")
}
// here, we compute the best C for nbPoints
diff --git a/internal/generator/ecc/template/point.go.tmpl b/internal/generator/ecc/template/point.go.tmpl
index 1274d5708c..e54abcd566 100644
--- a/internal/generator/ecc/template/point.go.tmpl
+++ b/internal/generator/ecc/template/point.go.tmpl
@@ -61,6 +61,16 @@ func (p *{{ $TAffine }}) ScalarMultiplication(a *{{ $TAffine }}, s *big.Int) *{{
return p
}
+{{- if eq .PointName "g1"}}
+// ScalarMultiplicationAffine computes and returns p = a ⋅ s
+// Takes an affine point and returns a Jacobian point (useful for KZG)
+func (p *{{ $TJacobian }}) ScalarMultiplicationAffine(a *{{ $TAffine }}, s *big.Int) *{{ $TJacobian }} {
+ p.FromAffine(a)
+ p.mulGLV(p, s)
+ return p
+}
+{{- end}}
+
// Add adds two point in affine coordinates.
// This should rarely be used as it is very inefficient compared to Jacobian
func (p *{{ $TAffine }}) Add(a, b *{{ $TAffine }}) *{{ $TAffine }} {
@@ -352,7 +362,7 @@ func (p *{{ $TJacobian }}) String() string {
return _p.String()
}
-// FromAffine sets p = Q, p in Jacboian, Q in affine
+// FromAffine sets p = Q, p in Jacobian, Q in affine
func (p *{{ $TJacobian }}) FromAffine(Q *{{ $TAffine }}) *{{ $TJacobian }} {
if Q.IsInfinity() {
p.Z.SetZero()
@@ -1313,9 +1323,9 @@ func (p *{{ $TProjective }}) FromAffine(Q *{{ $TAffine }}) *{{ $TProjective }} {
{{- if eq .PointName "g1"}}
// BatchProjectiveToAffine{{ toUpper .PointName }} converts points in Projective coordinates to Affine coordinates
-// performing a single field inversion (Montgomery batch inversion trick)
-// result must be allocated with len(result) == len(points)
-func BatchProjectiveToAffine{{ toUpper .PointName }}(points []{{ $TProjective }}, result []{{ $TAffine }}) {
+// performing a single field inversion (Montgomery batch inversion trick).
+func BatchProjectiveToAffine{{ toUpper .PointName }}(points []{{ $TProjective }}) []{{ $TAffine }} {
+ result := make([]{{ $TAffine }}, len(points))
zeroes := make([]bool, len(points))
accumulator := fp.One()
@@ -1335,7 +1345,7 @@ func BatchProjectiveToAffine{{ toUpper .PointName }}(points []{{ $TProjective }}
for i := len(points) - 1; i >= 0; i-- {
if zeroes[i] {
- // do nothing, X and Y are zeroes in affine.
+ // do nothing, (X=0, Y=0) is infinity point in affine
continue
}
result[i].X.Mul(&result[i].X, &accInverse)
@@ -1346,7 +1356,7 @@ func BatchProjectiveToAffine{{ toUpper .PointName }}(points []{{ $TProjective }}
parallel.Execute(len(points), func(start, end int) {
for i := start; i < end; i++ {
if zeroes[i] {
- // do nothing, X and Y are zeroes in affine.
+ // do nothing, (X=0, Y=0) is infinity point in affine
continue
}
a := result[i].X
@@ -1354,6 +1364,7 @@ func BatchProjectiveToAffine{{ toUpper .PointName }}(points []{{ $TProjective }}
result[i].Y.Mul(&points[i].y, &a)
}
})
+ return result
}
{{end }}
{{end }}
@@ -1363,9 +1374,9 @@ func BatchProjectiveToAffine{{ toUpper .PointName }}(points []{{ $TProjective }}
{{- if eq .PointName "g1"}}
// BatchJacobianToAffine{{ toUpper .PointName }} converts points in Jacobian coordinates to Affine coordinates
-// performing a single field inversion (Montgomery batch inversion trick)
-// result must be allocated with len(result) == len(points)
-func BatchJacobianToAffine{{ toUpper .PointName }}(points []{{ $TJacobian }}, result []{{ $TAffine }}) {
+// performing a single field inversion (Montgomery batch inversion trick).
+func BatchJacobianToAffine{{ toUpper .PointName }}(points []{{ $TJacobian }}) []{{ $TAffine }} {
+ result := make([]{{ $TAffine }}, len(points))
zeroes := make([]bool, len(points))
accumulator := fp.One()
@@ -1385,7 +1396,7 @@ func BatchJacobianToAffine{{ toUpper .PointName }}(points []{{ $TJacobian }}, re
for i := len(points) - 1; i >= 0; i-- {
if zeroes[i] {
- // do nothing, X and Y are zeroes in affine.
+ // do nothing, (X=0, Y=0) is infinity point in affine
continue
}
result[i].X.Mul(&result[i].X, &accInverse)
@@ -1396,7 +1407,7 @@ func BatchJacobianToAffine{{ toUpper .PointName }}(points []{{ $TJacobian }}, re
parallel.Execute( len(points), func(start, end int) {
for i:=start; i < end; i++ {
if zeroes[i] {
- // do nothing, X and Y are zeroes in affine.
+ // do nothing, (X=0, Y=0) is infinity point in affine
continue
}
var a, b fp.Element
@@ -1408,6 +1419,7 @@ func BatchJacobianToAffine{{ toUpper .PointName }}(points []{{ $TJacobian }}, re
}
})
+ return result
}
{{- end}}
@@ -1475,8 +1487,7 @@ func BatchScalarMultiplication{{ toUpper .PointName }}(base *{{ $TAffine }}, sca
{{- if eq .PointName "g1"}}
// convert our base exp table into affine to use AddMixed
- baseTableAff := make([]{{ $TAffine }}, (1<<(c-1)))
- BatchJacobianToAffine{{ toUpper .PointName}}(baseTable, baseTableAff)
+ baseTableAff := BatchJacobianToAffine{{ toUpper .PointName}}(baseTable)
toReturn := make([]{{ $TJacobian }}, len(scalars))
{{- else}}
toReturn := make([]{{ $TAffine }}, len(scalars))
@@ -1537,8 +1548,7 @@ func BatchScalarMultiplication{{ toUpper .PointName }}(base *{{ $TAffine }}, sca
})
{{- if eq .PointName "g1"}}
- toReturnAff := make([]{{ $TAffine }}, len(scalars))
- BatchJacobianToAffine{{ toUpper .PointName}}(toReturn, toReturnAff)
+ toReturnAff := BatchJacobianToAffine{{ toUpper .PointName}}(toReturn)
return toReturnAff
{{- else}}
return toReturn
diff --git a/internal/generator/ecc/template/sswu.go.tmpl b/internal/generator/ecc/template/sswu.go.tmpl
index f996717a76..50afc2acf5 100644
--- a/internal/generator/ecc/template/sswu.go.tmpl
+++ b/internal/generator/ecc/template/sswu.go.tmpl
@@ -9,11 +9,10 @@
{{$AffineType := print $CurveTitle "Affine"}}
{{$JacType := print $CurveTitle "Jac"}}
{{$IsG1 := eq $CurveTitle "G1"}}
-{{$CurveIndex := "2"}}
-{{$package := "fptower"}}
-{{if eq $TowerDegree 1}}{{$package = "fp"}}{{end}}
-{{if $IsG1}}{{$CurveIndex = "1"}}{{end}}
-
+{{$CurveIndex := select $IsG1 "2" "1"}}
+{{$package := select (eq $TowerDegree 1) "fptower" "fp"}}
+{{$sswuCurveACoeff := select $isogenyNeeded "This is meant to produce an error. Since most likely A = 0, there is opportunity for optimizations that need to be looked at." "sswuIsoCurveCoeffA"}}
+{{$sswuCurveBCoeff := select $isogenyNeeded "bCurveConf" "sswuIsoCurveCoeffB"}}
//Note: This only works for simple extensions
@@ -98,88 +97,84 @@ func {{$CurveName}}Isogeny(p *{{$AffineType}}) {
// {{$CurveName}}SqrtRatio computes the square root of u/v and returns 0 iff u/v was indeed a quadratic residue
// if not, we get sqrt(Z * u / v). Recall that Z is non-residue
+// If v = 0, u/v is meaningless and the output is unspecified, without raising an error.
// The main idea is that since the computation of the square root involves taking large powers of u/v, the inversion of v can be avoided
func {{$CurveName}}SqrtRatio(z *{{$CoordType}}, u *{{$CoordType}}, v *{{$CoordType}}) uint64 {
-{{ if eq (mod .FieldSizeMod256 4) 3 }} // Taken from https://datatracker.ietf.org/doc/draft-irtf-cfrg-hash-to-curve/13/ F.2.1.2. q = 3 mod 4
+{{ if eq (mod .FieldSizeMod256 4) 3 }} // https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-16.html#name-optimized-sqrt_ratio-for-q- (3 mod 4)
var tv1 {{$CoordType}}
- tv1.Square(v)
+ tv1.Square(v) // 1. tv1 = v²
var tv2 {{$CoordType}}
- tv2.Mul(u, v)
- tv1.Mul(&tv1, &tv2)
+ tv2.Mul(u, v) // 2. tv2 = u * v
+ tv1.Mul(&tv1, &tv2) // 3. tv1 = tv1 * tv2
var y1 {{$CoordType}}
{
var c1 big.Int
// c1 = {{ $c1Int }}
- c1.SetBytes([]byte{ {{ $c1IntBytes }} })
- y1.Exp(tv1, &c1)
+ c1.SetBytes([]byte{ {{ $c1IntBytes }} }) // c1 = (q - 3) / 4 # Integer arithmetic
+
+ y1.Exp(tv1, &c1) // 4. y1 = tv1ᶜ¹
}
- y1.Mul(&y1, &tv2)
+ y1.Mul(&y1, &tv2) // 5. y1 = y1 * tv2
var y2 {{$CoordType}}
- y2.Mul(&y1, &{{$CoordType}} {{ asElement (index .PrecomputedParams 1)}})
-
- var tv3 {{$CoordType}}
- tv3.Square(&y1)
- tv3.Mul(&tv3, v)
-
- isQNr := tv3.NotEqual(u)
- z.Select(int(isQNr), &y1, &y2)
+ // c2 = sqrt(-Z)
+ tv3 := {{$CoordType}} {{ asElement (index .PrecomputedParams 1)}}
+ y2.Mul(&y1, &tv3) // 6. y2 = y1 * c2
+ tv3.Square(&y1) // 7. tv3 = y1²
+ tv3.Mul(&tv3, v) // 8. tv3 = tv3 * v
+ isQNr := tv3.NotEqual(u) // 9. isQR = tv3 == u
+ z.Select(int(isQNr), &y1, &y2) // 10. y = CMOV(y2, y1, isQR)
return isQNr
}
{{ end }}
-{{ if eq (mod .FieldSizeMod256 8) 5 }} // Taken from https://datatracker.ietf.org/doc/draft-irtf-cfrg-hash-to-curve/13/ F.2.1.3. q = 5 mod 8
-// TODO: Test correct use of Element.Select
+{{ if eq (mod .FieldSizeMod256 8) 5 }} // https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-16.html#name-optimized-sqrt_ratio-for-q-5 (mod 8)
var tv1, tv2 {{$CoordType}}
- tv1.Square(v)
- tv2.Mul(&tv1, v)
- tv1.Square(&tv1)
- tv2.Mul(&tv2, u)
- tv1.Mul(&tv1, &tv2)
+ tv1.Square(v) // 1. tv1 = v²
+ tv2.Mul(&tv1, v) // 2. tv2 = tv1 * v
+ tv1.Square(&tv1) // 3. tv1 = tv1²
+ tv2.Mul(&tv2, u) // 4. tv2 = tv2 * u
+ tv1.Mul(&tv1, &tv2) // 5. tv1 = tv1 * tv2
var c1 big.Int
- // c1 = {{ $c1Int }}
+ // c1 = (q - 5) / 8 = {{ $c1Int }}
c1.SetBytes([]byte { {{ $c1IntBytes }} })
var y1 {{$CoordType}}
- y1.Exp(tv1, &c1)
- y1.Mul(&y1, &tv2)
- tv1.Mul(&y1, &{{$CoordType}} {{asElement (index .PrecomputedParams 1)}} )
- tv2.Square(&tv1)
-
- //Line 10 in std doc
- tv2.Mul(&tv2, v)
-
- y1.Select(int(tv2.NotEqual(u)), &tv1, &y1)
-
- tv2.Square(&y1)
- tv2.Mul(&tv2, v)
-
- //Line 15
- isQNr := tv2.NotEqual(u)
+ y1.Exp(tv1, &c1) // 6. y1 = tv1ᶜ¹
+ y1.Mul(&y1, &tv2) // 7. y1 = y1 * tv2
+ // c2 = sqrt(-1)
+ c2 := {{$CoordType}} {{asElement (index .PrecomputedParams 1)}}
+ tv1.Mul(&y1, &c2) // 8. tv1 = y1 * c2
+ tv2.Square(&tv1) // 9. tv2 = tv1²
+ tv2.Mul(&tv2, v) // 10. tv2 = tv2 * v
+ // 11. e1 = tv2 == u
+ y1.Select(int(tv2.NotEqual(u)), &tv1, &y1) // 12. y1 = CMOV(y1, tv1, e1)
+ tv2.Square(&y1) // 13. tv2 = y1²
+ tv2.Mul(&tv2, v) // 14. tv2 = tv2 * v
+ isQNr := tv2.NotEqual(u) // 15. isQR = tv2 == u
var y2 {{$CoordType}}
- y2.Mul(&y1, &{{$CoordType}} {{asElement (index .PrecomputedParams 2)}} )
- tv1.Mul(&y2, &{{$CoordType}} {{asElement (index .PrecomputedParams 1)}})
- tv2.Square(&tv1)
- tv2.Mul(&tv2, v)
-
+ // c3 = sqrt(Z / c2)
+ y2 = {{$CoordType}} {{asElement (index .PrecomputedParams 2)}}
+ y2.Mul(&y1, &y2) // 16. y2 = y1 * c3
+ tv1.Mul(&y2, &c2) // 17. tv1 = y2 * c2
+ tv2.Square(&tv1) // 18. tv2 = tv1²
+ tv2.Mul(&tv2, v) // 19. tv2 = tv2 * v
var tv3 {{$CoordType}}
- //Line 20
// Z = [{{printList .Z}}]
- {{$CurveName}}MulByZ(&tv3, u)
-
- y2.Select(int(tv2.NotEqual(&tv3)), &tv1, &y2)
-
- z.Select(int(isQNr), &y1, &y2)
+ {{$CurveName}}MulByZ(&tv3, u) // 20. tv3 = Z * u
+ // 21. e2 = tv2 == tv3
+ y2.Select(int(tv2.NotEqual(&tv3)), &tv1, &y2) // 22. y2 = CMOV(y2, tv1, e2)
+ z.Select(int(isQNr), &y1, &y2) // 23. y = CMOV(y2, y1, isQR)
return isQNr
}
{{ end }}
-{{ if eq (mod .FieldSizeMod256 8) 1 }}// Taken from https://datatracker.ietf.org/doc/draft-irtf-cfrg-hash-to-curve/13/ F.2.1.1. for any field
+{{ if eq (mod .FieldSizeMod256 8) 1 }}// https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-16.html#name-sqrt_ratio-for-any-field
{{ $c2Int := index $cInts 1}}
{{ $c2IntBytes := printList (bytes $c2Int ) }}
@@ -195,58 +190,50 @@ func {{$CurveName}}SqrtRatio(z *{{$CoordType}}, u *{{$CoordType}}, v *{{$CoordTy
tv1 := {{$CoordType}} {{asElement (index .PrecomputedParams 1) }} //tv1 = c6
- var tv2, tv3, tv4, tv5 {{$CoordType}}
+ var tv2, tv3, tv4, tv5 {{$CoordType}}
var exp big.Int
- // c4 = {{ $c4Int }} = 2^{{$c1Int}} - 1
+ // c4 = {{ $c4Int }} = 2{{supScr $c1Int}} - 1
// q is odd so c1 is at least 1.
exp.SetBytes([]byte { {{ $c4IntBytes }} })
- tv2.Exp(*v, &exp)
- tv3.Mul(&tv2, &tv2)
- tv3.Mul(&tv3, v)
-
- // line 5
- tv5.Mul(u, &tv3)
+ tv2.Exp(*v, &exp) // 2. tv2 = vᶜ⁴
+ tv3.Square(&tv2) // 3. tv3 = tv2²
+ tv3.Mul(&tv3, v) // 4. tv3 = tv3 * v
+ tv5.Mul(u, &tv3) // 5. tv5 = u * tv3
// c3 = {{ $c3Int }}
exp.SetBytes([]byte { {{ $c3IntBytes }} })
- tv5.Exp(tv5, &exp)
- tv5.Mul(&tv5, &tv2)
- tv2.Mul(&tv5, v)
- tv3.Mul(&tv5, u)
- // line 10
- tv4.Mul(&tv3, &tv2)
+ tv5.Exp(tv5, &exp) // 6. tv5 = tv5ᶜ³
+ tv5.Mul(&tv5, &tv2) // 7. tv5 = tv5 * tv2
+ tv2.Mul(&tv5, v) // 8. tv2 = tv5 * v
+ tv3.Mul(&tv5, u) // 9. tv3 = tv5 * u
+ tv4.Mul(&tv3, &tv2) // 10. tv4 = tv3 * tv2
// c5 = {{ $c5Int }}
exp.SetBytes([]byte { {{ $c5IntBytes }} })
- tv5.Exp(tv4, &exp)
-
- isQNr := {{$CurveName}}NotOne(&tv5)
-
- tv2.Mul(&tv3, &{{$CoordType}} {{asElement (index .PrecomputedParams 2) }} )
- tv5.Mul(&tv4, &tv1)
-
- // line 15
-
- tv3.Select(int(isQNr), &tv3, &tv2)
- tv4.Select(int(isQNr), &tv4, &tv5)
-
- exp.Lsh( big.NewInt(1), {{ $c1Int }} - 2)
-
- for i := {{ $c1Int }}; i >= 2; i -- {
- //line 20
- tv5.Exp(tv4, &exp)
- nE1 := {{$CurveName}}NotOne(&tv5)
-
- tv2.Mul(&tv3, &tv1)
- tv1.Mul(&tv1, &tv1)
- tv5.Mul(&tv4, &tv1)
-
- tv3.Select(int(nE1), &tv3, &tv2)
- tv4.Select(int(nE1), &tv4, &tv5)
-
- exp.Rsh(&exp,1)
+ tv5.Exp(tv4, &exp) // 11. tv5 = tv4ᶜ⁵
+ isQNr := {{$CurveName}}NotOne(&tv5) // 12. isQR = tv5 == 1
+ c7 := {{$CoordType}} {{asElement (index .PrecomputedParams 2) }}
+ tv2.Mul(&tv3, &c7) // 13. tv2 = tv3 * c7
+ tv5.Mul(&tv4, &tv1) // 14. tv5 = tv4 * tv1
+ tv3.Select(int(isQNr), &tv3, &tv2) // 15. tv3 = CMOV(tv2, tv3, isQR)
+ tv4.Select(int(isQNr), &tv4, &tv5) // 16. tv4 = CMOV(tv5, tv4, isQR)
+ exp.Lsh( big.NewInt(1), {{ $c1Int }} - 2) // 18, 19: tv5 = 2ⁱ⁻² for i = c1
+
+ for i := {{ $c1Int }}; i >= 2; i -- { // 17. for i in (c1, c1 - 1, ..., 2):
+
+ tv5.Exp(tv4, &exp) // 20. tv5 = tv4ᵗᵛ⁵
+ nE1 := {{$CurveName}}NotOne(&tv5) // 21. e1 = tv5 == 1
+ tv2.Mul(&tv3, &tv1) // 22. tv2 = tv3 * tv1
+ tv1.Mul(&tv1, &tv1) // 23. tv1 = tv1 * tv1 Why not write square?
+ tv5.Mul(&tv4, &tv1) // 24. tv5 = tv4 * tv1
+ tv3.Select(int(nE1), &tv3, &tv2) // 25. tv3 = CMOV(tv2, tv3, e1)
+ tv4.Select(int(nE1), &tv4, &tv5) // 26. tv4 = CMOV(tv5, tv4, e1)
+
+ if i > 2 {
+ exp.Rsh(&exp, 1) // 18, 19. tv5 = 2ⁱ⁻²
+ }
}
*z = tv3
@@ -265,12 +252,6 @@ func {{$CurveName}}NotOne(x *{{$CoordType}}) uint64 {
}
{{ end }}
-/*
-// {{$CurveName}}SetZ sets z to [{{printList .Z}}].
-func {{$CurveName}}SetZ(z *{{$CoordType}}) {
- z.Set( &{{$CoordType}} {{asElement .Z }} )
-}*/
-
// {{$CurveName}}MulByZ multiplies x by [{{printList .Z}}] and stores the result in z
func {{$CurveName}}MulByZ(z *{{$CoordType}}, x *{{$CoordType}}) {
@@ -310,30 +291,31 @@ func {{$CurveName}}MulByZ(z *{{$CoordType}}, x *{{$CoordType}}) {
{{ end }}}
-//TODO: Define A,B here
-
-// From https://datatracker.ietf.org/doc/draft-irtf-cfrg-hash-to-curve/13/ Pg 80
+// https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-16.html#name-simplified-swu-method
// mapToCurve{{$CurveIndex}} implements the SSWU map
// No cofactor clearing or isogeny
func mapToCurve{{$CurveIndex}}(u *{{$CoordType}}) {{$AffineType}} {
+ {{if $isogenyNeeded}}
+ var {{$sswuCurveACoeff}} = {{$CoordType}} {{asElement .A}}
+ var {{$sswuCurveBCoeff}} = {{$CoordType}} {{asElement .B}}
+ {{end}}
+
var tv1 {{$CoordType}}
- tv1.Square(u)
+ tv1.Square(u) // 1. tv1 = u²
//mul tv1 by Z
- {{$CurveName}}MulByZ(&tv1, &tv1)
+ {{$CurveName}}MulByZ(&tv1, &tv1) // 2. tv1 = Z * tv1
var tv2 {{$CoordType}}
- tv2.Square(&tv1)
- tv2.Add(&tv2, &tv1)
+ tv2.Square(&tv1) // 3. tv2 = tv1²
+ tv2.Add(&tv2, &tv1) // 4. tv2 = tv2 + tv1
var tv3 {{$CoordType}}
- //Standard doc line 5
var tv4 {{$CoordType}}
tv4.SetOne()
- tv3.Add(&tv2, &tv4)
- //TODO: Use bCurveConf when no isogeny
- tv3.Mul(&tv3, &{{$CoordType}}{{ asElement .B }})
+ tv3.Add(&tv2, &tv4) // 5. tv3 = tv2 + 1
+ tv3.Mul(&tv3, &{{$sswuCurveBCoeff}}) // 6. tv3 = B * tv3
tv2NZero := {{$CurveName}}NotZero(&tv2)
@@ -341,48 +323,45 @@ func mapToCurve{{$CurveIndex}}(u *{{$CoordType}}) {{$AffineType}} {
tv4 = {{$CoordType}}{{ asElement .Z }}
tv2.Neg(&tv2)
- tv4.Select(int(tv2NZero), &tv4, &tv2)
- //TODO: When no isogeny use curve constants
- tv2 = {{$CoordType}}{{ asElement .A }}
- tv4.Mul(&tv4, &tv2)
+ tv4.Select(int(tv2NZero), &tv4, &tv2) // 7. tv4 = CMOV(Z, -tv2, tv2 != 0)
+ tv4.Mul(&tv4, &{{$sswuCurveACoeff}}) // 8. tv4 = A * tv4
- tv2.Square(&tv3)
+ tv2.Square(&tv3) // 9. tv2 = tv3²
var tv6 {{$CoordType}}
- //Standard doc line 10
- tv6.Square(&tv4)
+ tv6.Square(&tv4) // 10. tv6 = tv4²
var tv5 {{$CoordType}}
- tv5.Mul(&tv6, &{{$CoordType}}{{ asElement .A }})
+ tv5.Mul(&tv6, &{{$sswuCurveACoeff}}) // 11. tv5 = A * tv6
- tv2.Add(&tv2, &tv5)
- tv2.Mul(&tv2, &tv3)
- tv6.Mul(&tv6, &tv4)
+ tv2.Add(&tv2, &tv5) // 12. tv2 = tv2 + tv5
+ tv2.Mul(&tv2, &tv3) // 13. tv2 = tv2 * tv3
+ tv6.Mul(&tv6, &tv4) // 14. tv6 = tv6 * tv4
- //Standards doc line 15
- tv5.Mul(&tv6, &{{$CoordType}}{{asElement .B}})
- tv2.Add(&tv2, &tv5)
+ tv5.Mul(&tv6, &{{$sswuCurveBCoeff}}) // 15. tv5 = B * tv6
+ tv2.Add(&tv2, &tv5) // 16. tv2 = tv2 + tv5
var x {{$CoordType}}
- x.Mul(&tv1, &tv3)
+ x.Mul(&tv1, &tv3) // 17. x = tv1 * tv3
var y1 {{$CoordType}}
- gx1NSquare := {{$CurveName}}SqrtRatio(&y1, &tv2, &tv6)
+ gx1NSquare := {{$CurveName}}SqrtRatio(&y1, &tv2, &tv6) // 18. (is_gx1_square, y1) = sqrt_ratio(tv2, tv6)
var y {{$CoordType}}
- y.Mul(&tv1, u)
+ y.Mul(&tv1, u) // 19. y = tv1 * u
- //Standards doc line 20
- y.Mul(&y, &y1)
+ y.Mul(&y, &y1) // 20. y = y * y1
- x.Select(int(gx1NSquare), &tv3, &x)
- y.Select(int(gx1NSquare), &y1, &y)
+ x.Select(int(gx1NSquare), &tv3, &x) // 21. x = CMOV(x, tv3, is_gx1_square)
+ y.Select(int(gx1NSquare), &y1, &y) // 22. y = CMOV(y, y1, is_gx1_square)
y1.Neg(&y)
y.Select(int({{$CurveName}}Sgn0(u)^{{$CurveName}}Sgn0(&y)), &y, &y1)
- //Standards doc line 25
- x.Div(&x, &tv4)
+ // 23. e1 = sgn0(u) == sgn0(y)
+ // 24. y = CMOV(-y, y, e1)
+
+ x.Div(&x, &tv4) // 25. x = x / tv4
return {{$AffineType}}{x, y}
}
diff --git a/internal/generator/ecc/template/svdw.go.tmpl b/internal/generator/ecc/template/svdw.go.tmpl
index 111899008e..7056bf5ade 100644
--- a/internal/generator/ecc/template/svdw.go.tmpl
+++ b/internal/generator/ecc/template/svdw.go.tmpl
@@ -13,7 +13,7 @@
// mapToCurve{{$CurveIndex}} implements the Shallue and van de Woestijne method, applicable to any elliptic curve in Weierstrass form
// No cofactor clearing or isogeny
-// https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-hash-to-curve-14#appendix-F.1
+// https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-16.html#straightline-svdw
func mapToCurve{{$CurveIndex}}(u *{{$CoordType}}) {{$AffineType}} {
var tv1, tv2, tv3, tv4 {{$CoordType}}
var x1, x2, x3, gx1, gx2, gx, x, y {{$CoordType}}
@@ -26,7 +26,6 @@ func mapToCurve{{$CurveIndex}}(u *{{$CoordType}}) {{$AffineType}} {
//c3 = sqrt(-g(Z) * (3 * Z² + 4 * A)) # sgn0(c3) MUST equal 0
//c4 = -4 * g(Z) / (3 * Z² + 4 * A)
- //TODO: Move outside function?
Z := {{$CoordType}}{{asElement (index $.PrecomputedParams 0)}}
c1 := {{$CoordType}}{{asElement (index $.PrecomputedParams 1)}}
c2 := {{$CoordType}}{{asElement (index $.PrecomputedParams 2)}}
@@ -48,8 +47,7 @@ func mapToCurve{{$CurveIndex}}(u *{{$CoordType}}) {{$AffineType}} {
x1.Sub(&c2, &tv4) // 10. x1 = c2 - tv4
gx1.Square(&x1) // 11. gx1 = x1²
- //TODO: Beware A ≠ 0
- //12. gx1 = gx1 + A
+ //12. gx1 = gx1 + A All curves in gnark-crypto have A=0 (j-invariant=0). It is crucial to include this step if the curve has nonzero A coefficient.
gx1.Mul(&gx1, &x1) // 13. gx1 = gx1 * x1
gx1.Add(&gx1, &{{$B}}) // 14. gx1 = gx1 + B
gx1NotSquare = gx1.Legendre() >> 1 // 15. e1 = is_square(gx1)
@@ -57,7 +55,7 @@ func mapToCurve{{$CurveIndex}}(u *{{$CoordType}}) {{$AffineType}} {
x2.Add(&c2, &tv4) // 16. x2 = c2 + tv4
gx2.Square(&x2) // 17. gx2 = x2²
- // 18. gx2 = gx2 + A
+ // 18. gx2 = gx2 + A See line 12
gx2.Mul(&gx2, &x2) // 19. gx2 = gx2 * x2
gx2.Add(&gx2, &{{$B}}) // 20. gx2 = gx2 + B
diff --git a/internal/generator/ecc/template/tests/multiexp.go.tmpl b/internal/generator/ecc/template/tests/multiexp.go.tmpl
index 20b9210524..dd8751733f 100644
--- a/internal/generator/ecc/template/tests/multiexp.go.tmpl
+++ b/internal/generator/ecc/template/tests/multiexp.go.tmpl
@@ -91,8 +91,22 @@ func TestMultiExp{{toUpper $.PointName}}(t *testing.T) {
genScalar,
))
+ // cRange is generated from template and contains the available parameters for the multiexp window size
+ {{- if eq $.PointName "g1" }}
+ cRange := []uint64{
+ {{- range $c := $.CRange}} {{- if and (eq $.PointName "g1") (gt $c 21)}}{{- else}} {{$c}},{{- end}}{{- end}}
+ }
+ if testing.Short() {
+ // test only "odd" and "even" (ie windows size divide word size vs not)
+ cRange = []uint64{5, 16}
+ }
+ {{- else }}
+ // for g2, CI suffers with large c size since it needs to allocate a lot of memory for the buckets.
+ // test only "odd" and "even" (ie windows size divide word size vs not)
+ cRange := []uint64{5, 16}
+ {{- end}}
- properties.Property("[{{ toUpper $.PointName }}] Multi exponentation (c=5, c=16) should be consistent with sum of square", prop.ForAll(
+ properties.Property(fmt.Sprintf("[{{ toUpper $.PointName }}] Multi exponentation (c in %v) should be consistent with sum of square", cRange), prop.ForAll(
func(mixer fr.Element) bool {
var expected {{ $.TJacobian }}
@@ -111,14 +125,22 @@ func TestMultiExp{{toUpper $.PointName}}(t *testing.T) {
FromMont()
}
- scalars5, _ := partitionScalars(sampleScalars[:], 5, false, runtime.NumCPU())
- scalars16, _ := partitionScalars(sampleScalars[:], 16, false, runtime.NumCPU())
-
- var r5, r16 {{ $.TJacobian }}
- r5.msmC5(samplePoints[:], scalars5, false)
- r16.msmC16(samplePoints[:], scalars16, true)
- return (r5.Equal(&expected) && r16.Equal(&expected))
+ results := make([]{{ $.TJacobian }}, len(cRange) + 1)
+ for i, c := range cRange {
+ scalars, _ := partitionScalars(sampleScalars[:], c, false, runtime.NumCPU())
+ msmInner{{ $.TJacobian }}(&results[i], int(c), samplePoints[:], scalars, false)
+ if c == 16 {
+ // split the first chunk
+ msmInner{{ $.TJacobian }}(&results[len(results)-1], 16, samplePoints[:], scalars, true)
+ }
+ }
+ for i:=1; i < len(results);i++ {
+ if !results[i].Equal(&results[i-1]) {
+ return false
+ }
+ }
+ return true
},
genScalar,
))
@@ -150,7 +172,7 @@ func TestMultiExp{{toUpper $.PointName}}(t *testing.T) {
var finalBigScalar fr.Element
var finalBigScalarBi big.Int
var op1ScalarMul {{ $.TAffine }}
- finalBigScalar.SetString("9455").Mul(&finalBigScalar, &mixer)
+ finalBigScalar.SetUint64(9455).Mul(&finalBigScalar, &mixer)
finalBigScalar.ToBigIntRegular(&finalBigScalarBi)
op1ScalarMul.ScalarMultiplication(&{{ toLower .PointName}}GenAff, &finalBigScalarBi)
diff --git a/internal/generator/ecc/template/tests/point.go.tmpl b/internal/generator/ecc/template/tests/point.go.tmpl
index 3727be2917..556d9befc3 100644
--- a/internal/generator/ecc/template/tests/point.go.tmpl
+++ b/internal/generator/ecc/template/tests/point.go.tmpl
@@ -111,7 +111,7 @@ func Test{{ $TAffine }}IsOnCurve(t *testing.T) {
func(a {{ .CoordType}}) bool {
var op1, op2 {{ $TAffine }}
op1.FromJacobian(&{{.PointName}}Gen)
- op2.FromJacobian(&{{.PointName}}Gen)
+ op2.Set(&op1)
op2.Y.Mul(&op2.Y, &a)
return op1.IsOnCurve() && !op2.IsOnCurve()
},
@@ -250,6 +250,22 @@ func Test{{ $TAffine }}Conversions(t *testing.T) {
{{$fuzzer}},
))
+ {{- if eq .PointName "g1" }}
+ properties.Property("[{{ toUpper .Name }}] BatchJacobianToAffineG1 and FromJacobian should output the same result", prop.ForAll(
+ func(a, b {{ .CoordType}}) bool {
+ g1 := fuzz{{ $TJacobian }}(&{{ toLower .PointName }}Gen, a)
+ g2 := fuzz{{ $TJacobian }}(&{{ toLower .PointName }}Gen, b)
+ var op1, op2 {{ $TAffine }}
+ op1.FromJacobian(&g1)
+ op2.FromJacobian(&g2)
+ baseTableAff := BatchJacobianToAffineG1([]G1Jac{g1, g2})
+ return op1.Equal(&baseTableAff[0]) && op2.Equal(&baseTableAff[1])
+ },
+ GenFp(),
+ GenFp(),
+ ))
+ {{- end }}
+
properties.TestingRun(t, gopter.ConsoleReporter(false))
}
@@ -543,7 +559,7 @@ func Benchmark{{ $TJacobian }}IsInSubGroup(b *testing.B) {
}
-func Benchmark{{ $TAffine }}BatchScalarMul(b *testing.B) {
+func Benchmark{{ $TAffine }}BatchScalarMultiplication(b *testing.B) {
// ensure every words of the scalars are filled
var mixer fr.Element
mixer.SetString("7716837800905789770901243404444209691916730933998574719964609384059111546487")
@@ -571,7 +587,7 @@ func Benchmark{{ $TAffine }}BatchScalarMul(b *testing.B) {
}
}
-func Benchmark{{ $TJacobian }}ScalarMul(b *testing.B) {
+func Benchmark{{ $TJacobian }}ScalarMultiplication(b *testing.B) {
var scalar big.Int
r := fr.Modulus()
diff --git a/internal/generator/edwards/eddsa/template/eddsa.go.tmpl b/internal/generator/edwards/eddsa/template/eddsa.go.tmpl
index efb18cedc2..61708430fd 100644
--- a/internal/generator/edwards/eddsa/template/eddsa.go.tmpl
+++ b/internal/generator/edwards/eddsa/template/eddsa.go.tmpl
@@ -105,7 +105,7 @@ func GenerateKey(r io.Reader) (*PrivateKey, error) {
var bScalar big.Int
bScalar.SetBytes(priv.scalar[:])
- pub.A.ScalarMul(&c.Base, &bScalar)
+ pub.A.ScalarMultiplication(&c.Base, &bScalar)
priv.PublicKey = pub
@@ -154,7 +154,7 @@ func (privKey *PrivateKey) Sign(message []byte, hFunc hash.Hash) ([]byte, error)
blindingFactorBigInt.SetBytes(blindingFactorBytes[:sizeFr])
// compute R = randScalar*Base
- res.R.ScalarMul(&curveParams.Base, &blindingFactorBigInt)
+ res.R.ScalarMultiplication(&curveParams.Base, &blindingFactorBigInt)
if !res.R.IsOnCurve() {
return nil, errNotOnCurve
}
@@ -240,8 +240,8 @@ func (pub *PublicKey) Verify(sigBin, message []byte, hFunc hash.Hash) (bool, err
var bCofactor, bs big.Int
curveParams.Cofactor.ToBigIntRegular(&bCofactor)
bs.SetBytes(sig.S[:])
- lhs.ScalarMul(&curveParams.Base, &bs).
- ScalarMul(&lhs, &bCofactor)
+ lhs.ScalarMultiplication(&curveParams.Base, &bs).
+ ScalarMultiplication(&lhs, &bCofactor)
if !lhs.IsOnCurve() {
return false, errNotOnCurve
@@ -249,9 +249,9 @@ func (pub *PublicKey) Verify(sigBin, message []byte, hFunc hash.Hash) (bool, err
// rhs = cofactor*(R + H(R,A,M)*A)
var rhs twistededwards.PointAffine
- rhs.ScalarMul(&pub.A, &hramInt).
+ rhs.ScalarMultiplication(&pub.A, &hramInt).
Add(&rhs, &sig.R).
- ScalarMul(&rhs, &bCofactor)
+ ScalarMultiplication(&rhs, &bCofactor)
if !rhs.IsOnCurve() {
return false, errNotOnCurve
}
diff --git a/internal/generator/edwards/template/point.go.tmpl b/internal/generator/edwards/template/point.go.tmpl
index 9beadb2064..9bb9a7244e 100644
--- a/internal/generator/edwards/template/point.go.tmpl
+++ b/internal/generator/edwards/template/point.go.tmpl
@@ -240,13 +240,13 @@ func (p *PointAffine) FromExtended(p1 *PointExtended) *PointAffine {
return p
}
-// ScalarMul scalar multiplication of a point
+// ScalarMultiplication scalar multiplication of a point
// p1 in affine coordinates with a scalar in big.Int
-func (p *PointAffine) ScalarMul(p1 *PointAffine, scalar *big.Int) *PointAffine {
+func (p *PointAffine) ScalarMultiplication(p1 *PointAffine, scalar *big.Int) *PointAffine {
var p1Extended, resExtended PointExtended
p1Extended.FromAffine(p1)
- resExtended.ScalarMul(&p1Extended, scalar)
+ resExtended.ScalarMultiplication(&p1Extended, scalar)
p.FromExtended(&resExtended)
return p
@@ -393,9 +393,9 @@ func (p *PointProj) Add(p1, p2 *PointProj) *PointProj {
return p
}
-// ScalarMul scalar multiplication of a point
+// ScalarMultiplication scalar multiplication of a point
// p1 in projective coordinates with a scalar in big.Int
-func (p *PointProj) ScalarMul(p1 *PointProj, scalar *big.Int) *PointProj {
+func (p *PointProj) ScalarMultiplication(p1 *PointProj, scalar *big.Int) *PointProj {
{{- if .HasEndomorphism}}
return p.scalarMulGLV(p1, scalar)
{{- else }}
@@ -618,9 +618,9 @@ func (p *PointExtended) setInfinity() *PointExtended {
return p
}
-// ScalarMul scalar multiplication of a point
+// ScalarMultiplication scalar multiplication of a point
// p1 in extended coordinates with a scalar in big.Int
-func (p *PointExtended) ScalarMul(p1 *PointExtended, scalar *big.Int) *PointExtended {
+func (p *PointExtended) ScalarMultiplication(p1 *PointExtended, scalar *big.Int) *PointExtended {
{{- if .HasEndomorphism}}
return p.scalarMulGLV(p1, scalar)
{{- else }}
diff --git a/internal/generator/edwards/template/tests/point.go.tmpl b/internal/generator/edwards/template/tests/point.go.tmpl
index 1c498fae6c..650dd93d4a 100644
--- a/internal/generator/edwards/template/tests/point.go.tmpl
+++ b/internal/generator/edwards/template/tests/point.go.tmpl
@@ -106,8 +106,8 @@ func TestReceiverIsOperand(t *testing.T) {
var s big.Int
s.SetUint64(10)
- p2.ScalarMul(&p1, &s)
- p1.ScalarMul(&p1, &s)
+ p2.ScalarMultiplication(&p1, &s)
+ p1.ScalarMultiplication(&p1, &s)
return p2.Equal(&p1)
},
@@ -318,7 +318,7 @@ func TestOps(t *testing.T) {
params := GetEdwardsCurve()
var p1, p2, zero PointAffine
- p1.ScalarMul(¶ms.Base, &s1)
+ p1.ScalarMultiplication(¶ms.Base, &s1)
zero.setInfinity()
p2.Add(&p1, &zero)
@@ -334,7 +334,7 @@ func TestOps(t *testing.T) {
params := GetEdwardsCurve()
var p1, p2 PointAffine
- p1.ScalarMul(¶ms.Base, &s1)
+ p1.ScalarMultiplication(¶ms.Base, &s1)
p2.Neg(&p1)
p1.Add(&p1, &p2)
@@ -353,8 +353,8 @@ func TestOps(t *testing.T) {
params := GetEdwardsCurve()
var p1, p2, inf PointAffine
- p1.ScalarMul(¶ms.Base, &s)
- p2.ScalarMul(¶ms.Base, &s)
+ p1.ScalarMultiplication(¶ms.Base, &s)
+ p2.ScalarMultiplication(¶ms.Base, &s)
p1.Add(&p1, &p2)
p2.Double(&p2)
@@ -372,14 +372,14 @@ func TestOps(t *testing.T) {
var p1, p2, p3, inf PointAffine
inf.X.SetZero()
inf.Y.SetZero()
- p1.ScalarMul(¶ms.Base, &s1)
- p2.ScalarMul(¶ms.Base, &s2)
+ p1.ScalarMultiplication(¶ms.Base, &s1)
+ p2.ScalarMultiplication(¶ms.Base, &s2)
p3.Set(¶ms.Base)
p2.Add(&p1, &p2)
s1.Add(&s1, &s2)
- p3.ScalarMul(¶ms.Base, &s1)
+ p3.ScalarMultiplication(¶ms.Base, &s1)
return p2.IsOnCurve() && p3.Equal(&p2) && !p3.Equal(&inf)
},
@@ -395,9 +395,9 @@ func TestOps(t *testing.T) {
var p1, p2, inf PointAffine
inf.X.SetZero()
inf.Y.SetOne()
- p1.ScalarMul(¶ms.Base, &s1)
+ p1.ScalarMultiplication(¶ms.Base, &s1)
s1.Neg(&s1)
- p2.ScalarMul(¶ms.Base, &s1)
+ p2.ScalarMultiplication(¶ms.Base, &s1)
p2.Add(&p1, &p2)
@@ -412,11 +412,11 @@ func TestOps(t *testing.T) {
params := GetEdwardsCurve()
var p1, p2 PointAffine
- p1.ScalarMul(¶ms.Base, &s1)
+ p1.ScalarMultiplication(¶ms.Base, &s1)
five := big.NewInt(5)
p2.Double(&p1).Double(&p2).Add(&p2, &p1)
- p1.ScalarMul(&p1, five)
+ p1.ScalarMultiplication(&p1, five)
return p2.IsOnCurve() && p2.Equal(&p1)
},
@@ -445,7 +445,7 @@ func TestOps(t *testing.T) {
var baseProj, p1, p2, zero PointProj
baseProj.FromAffine(¶ms.Base)
- p1.ScalarMul(&baseProj, &s1)
+ p1.ScalarMultiplication(&baseProj, &s1)
zero.setInfinity()
p2.Add(&p1, &zero)
@@ -462,7 +462,7 @@ func TestOps(t *testing.T) {
var baseProj, p1, p2, p PointProj
baseProj.FromAffine(¶ms.Base)
- p1.ScalarMul(&baseProj, &s1)
+ p1.ScalarMultiplication(&baseProj, &s1)
p2.Neg(&p1)
p.Add(&p1, &p2)
@@ -480,7 +480,7 @@ func TestOps(t *testing.T) {
var baseProj, p1, p2, p PointProj
baseProj.FromAffine(¶ms.Base)
- p.ScalarMul(&baseProj, &s)
+ p.ScalarMultiplication(&baseProj, &s)
p1.Add(&p, &p)
p2.Double(&p)
@@ -497,11 +497,11 @@ func TestOps(t *testing.T) {
var baseProj, p1, p2 PointProj
baseProj.FromAffine(¶ms.Base)
- p1.ScalarMul(&baseProj, &s1)
+ p1.ScalarMultiplication(&baseProj, &s1)
five := big.NewInt(5)
p2.Double(&p1).Double(&p2).Add(&p2, &p1)
- p1.ScalarMul(&p1, five)
+ p1.ScalarMultiplication(&p1, five)
return p2.Equal(&p1)
},
@@ -529,7 +529,7 @@ func TestOps(t *testing.T) {
var baseExtended, p1, p2, zero PointExtended
baseExtended.FromAffine(¶ms.Base)
- p1.ScalarMul(&baseExtended, &s1)
+ p1.ScalarMultiplication(&baseExtended, &s1)
zero.setInfinity()
p2.Add(&p1, &zero)
@@ -546,7 +546,7 @@ func TestOps(t *testing.T) {
var baseExtended, p1, p2, p PointExtended
baseExtended.FromAffine(¶ms.Base)
- p1.ScalarMul(&baseExtended, &s1)
+ p1.ScalarMultiplication(&baseExtended, &s1)
p2.Neg(&p1)
p.Add(&p1, &p2)
@@ -564,7 +564,7 @@ func TestOps(t *testing.T) {
var baseExtended, p1, p2, p PointExtended
baseExtended.FromAffine(¶ms.Base)
- p.ScalarMul(&baseExtended, &s)
+ p.ScalarMultiplication(&baseExtended, &s)
p1.Add(&p, &p)
p2.Double(&p)
@@ -581,11 +581,11 @@ func TestOps(t *testing.T) {
var baseExtended, p1, p2 PointExtended
baseExtended.FromAffine(¶ms.Base)
- p1.ScalarMul(&baseExtended, &s1)
+ p1.ScalarMultiplication(&baseExtended, &s1)
five := big.NewInt(5)
p2.Double(&p1).Double(&p2).Add(&p2, &p1)
- p1.ScalarMul(&p1, five)
+ p1.ScalarMultiplication(&p1, five)
return p2.Equal(&p1)
},
@@ -601,8 +601,8 @@ func TestOps(t *testing.T) {
var baseExtended, pExtended, p PointExtended
var pAffine PointAffine
baseExtended.FromAffine(¶ms.Base)
- pExtended.ScalarMul(&baseExtended, &s)
- pAffine.ScalarMul(¶ms.Base, &s)
+ pExtended.ScalarMultiplication(&baseExtended, &s)
+ pAffine.ScalarMultiplication(¶ms.Base, &s)
pAffine.Neg(&pAffine)
p.MixedAdd(&pExtended, &pAffine)
@@ -620,8 +620,8 @@ func TestOps(t *testing.T) {
var baseExtended, pExtended, p, p2 PointExtended
var pAffine PointAffine
baseExtended.FromAffine(¶ms.Base)
- pExtended.ScalarMul(&baseExtended, &s)
- pAffine.ScalarMul(¶ms.Base, &s)
+ pExtended.ScalarMultiplication(&baseExtended, &s)
+ pAffine.ScalarMultiplication(¶ms.Base, &s)
p.MixedAdd(&pExtended, &pAffine)
p2.MixedDouble(&pExtended)
@@ -640,8 +640,8 @@ func TestOps(t *testing.T) {
var baseProj, pProj, p PointProj
var pAffine PointAffine
baseProj.FromAffine(¶ms.Base)
- pProj.ScalarMul(&baseProj, &s)
- pAffine.ScalarMul(¶ms.Base, &s)
+ pProj.ScalarMultiplication(&baseProj, &s)
+ pAffine.ScalarMultiplication(¶ms.Base, &s)
pAffine.Neg(&pAffine)
p.MixedAdd(&pProj, &pAffine)
@@ -659,8 +659,8 @@ func TestOps(t *testing.T) {
var baseProj, pProj, p, p2 PointProj
var pAffine PointAffine
baseProj.FromAffine(¶ms.Base)
- pProj.ScalarMul(&baseProj, &s)
- pAffine.ScalarMul(¶ms.Base, &s)
+ pProj.ScalarMultiplication(&baseProj, &s)
+ pAffine.ScalarMultiplication(¶ms.Base, &s)
p.MixedAdd(&pProj, &pAffine)
p2.Double(&pProj)
@@ -679,9 +679,9 @@ func TestOps(t *testing.T) {
var baseExt PointExtended
var p1, p2 PointAffine
baseProj.FromAffine(¶ms.Base)
- baseProj.ScalarMul(&baseProj, &s)
+ baseProj.ScalarMultiplication(&baseProj, &s)
baseExt.FromAffine(¶ms.Base)
- baseExt.ScalarMul(&baseExt, &s)
+ baseExt.ScalarMultiplication(&baseExt, &s)
p1.FromProj(&baseProj)
p2.FromExtended(&baseExt)
@@ -744,7 +744,7 @@ func BenchmarkScalarMulExtended(b *testing.B) {
b.ResetTimer()
for j := 0; j < b.N; j++ {
- doubleAndAdd.ScalarMul(&a, &s)
+ doubleAndAdd.ScalarMultiplication(&a, &s)
}
}
@@ -760,6 +760,6 @@ func BenchmarkScalarMulProjective(b *testing.B) {
b.ResetTimer()
for j := 0; j < b.N; j++ {
- doubleAndAdd.ScalarMul(&a, &s)
+ doubleAndAdd.ScalarMultiplication(&a, &s)
}
}
diff --git a/internal/generator/kzg/template/kzg.go.tmpl b/internal/generator/kzg/template/kzg.go.tmpl
index 81c8d87f97..b387b4df52 100644
--- a/internal/generator/kzg/template/kzg.go.tmpl
+++ b/internal/generator/kzg/template/kzg.go.tmpl
@@ -151,16 +151,15 @@ func Open(p []fr.Element, point fr.Element, srs *SRS) (OpeningProof, error) {
func Verify(commitment *Digest, proof *OpeningProof, point fr.Element, srs *SRS) error {
// [f(a)]G₁
- var claimedValueG1Aff {{ .CurvePackage }}.G1Affine
+ var claimedValueG1Aff {{ .CurvePackage }}.G1Jac
var claimedValueBigInt big.Int
proof.ClaimedValue.ToBigIntRegular(&claimedValueBigInt)
- claimedValueG1Aff.ScalarMultiplication(&srs.G1[0], &claimedValueBigInt)
+ claimedValueG1Aff.ScalarMultiplicationAffine(&srs.G1[0], &claimedValueBigInt)
// [f(α) - f(a)]G₁
- var fminusfaG1Jac, tmpG1Jac {{ .CurvePackage }}.G1Jac
+ var fminusfaG1Jac {{ .CurvePackage }}.G1Jac
fminusfaG1Jac.FromAffine(commitment)
- tmpG1Jac.FromAffine(&claimedValueG1Aff)
- fminusfaG1Jac.SubAssign(&tmpG1Jac)
+ fminusfaG1Jac.SubAssign(&claimedValueG1Aff)
// [-H(α)]G₁
var negH {{ .CurvePackage }}.G1Affine
diff --git a/internal/generator/polynomial/generate.go b/internal/generator/polynomial/generate.go
index 685742b6db..a9a55993cd 100644
--- a/internal/generator/polynomial/generate.go
+++ b/internal/generator/polynomial/generate.go
@@ -13,7 +13,10 @@ func Generate(conf config.Curve, baseDir string, bgen *bavard.BatchGenerator) er
entries := []bavard.Entry{
{File: filepath.Join(baseDir, "doc.go"), Templates: []string{"doc.go.tmpl"}},
{File: filepath.Join(baseDir, "polynomial.go"), Templates: []string{"polynomial.go.tmpl"}},
+ {File: filepath.Join(baseDir, "multilin.go"), Templates: []string{"multilin.go.tmpl"}},
+ {File: filepath.Join(baseDir, "pool.go"), Templates: []string{"pool.go.tmpl"}},
{File: filepath.Join(baseDir, "polynomial_test.go"), Templates: []string{"polynomial.test.go.tmpl"}},
+ {File: filepath.Join(baseDir, "multilin_test.go"), Templates: []string{"multilin.test.go.tmpl"}},
}
return bgen.Generate(conf, conf.Package, "./polynomial/template/", entries...)
}
diff --git a/internal/generator/polynomial/template/multilin.go.tmpl b/internal/generator/polynomial/template/multilin.go.tmpl
new file mode 100644
index 0000000000..4a1880aef8
--- /dev/null
+++ b/internal/generator/polynomial/template/multilin.go.tmpl
@@ -0,0 +1,235 @@
+import (
+ "github.com/consensys/gnark-crypto/ecc/{{ .Name }}/fr"
+)
+
+
+// MultiLin tracks the values of a (dense i.e. not sparse) multilinear polynomial
+// The variables are X₁ through Xₙ where n = log(len(.))
+// .[∑ᵢ 2ⁱ⁻¹ bₙ₋ᵢ] = the polynomial evaluated at (b₁, b₂, ..., bₙ)
+// It is understood that any hypercube evaluation can be extrapolated to a multilinear polynomial
+type MultiLin []fr.Element
+
+// Fold is partial evaluation function k[X₁, X₂, ..., Xₙ] → k[X₂, ..., Xₙ] by setting X₁=r
+func (m *MultiLin) Fold(r fr.Element) {
+ mid := len(*m) / 2
+
+ bottom, top := (*m)[:mid], (*m)[mid:]
+
+ // updating bookkeeping table
+ // knowing that the polynomial f ∈ (k[X₂, ..., Xₙ])[X₁] is linear, we would get f(r) = f(0) + r(f(1) - f(0))
+ // the following loop computes the evaluations of f(r) accordingly:
+ // f(r, b₂, ..., bₙ) = f(0, b₂, ..., bₙ) + r(f(1, b₂, ..., bₙ) - f(0, b₂, ..., bₙ))
+ for i := 0; i < mid; i++ {
+ // table[i] ← table[i] + r (table[i + mid] - table[i])
+ top[i].Sub(&top[i], &bottom[i])
+ top[i].Mul(&top[i], &r)
+ bottom[i].Add(&bottom[i], &top[i])
+ }
+
+ *m = (*m)[:mid]
+}
+
+
+// Evaluate extrapolate the value of the multilinear polynomial corresponding to m
+// on the given coordinates
+func (m MultiLin) Evaluate(coordinates []fr.Element) fr.Element {
+ // Folding is a mutating operation
+ bkCopy := m.Clone()
+
+ // Evaluate step by step through repeated folding (i.e. evaluation at the first remaining variable)
+ for _, r := range coordinates {
+ bkCopy.Fold(r)
+ }
+
+ return bkCopy[0]
+}
+
+// Clone creates a deep copy of a book-keeping table.
+// Both multilinear interpolation and sumcheck require folding an underlying
+// array, but folding changes the array. To do both one requires a deep copy
+// of the book-keeping table.
+func (m MultiLin) Clone() MultiLin {
+ tableDeepCopy := Make(len(m))
+ copy(tableDeepCopy, m)
+ return tableDeepCopy
+}
+
+// Add two bookKeepingTables
+func (m *MultiLin) Add(left, right MultiLin) {
+ size := len(left)
+ // Check that left and right have the same size
+ if len(right) != size {
+ panic("Left and right do not have the right size")
+ }
+ // Reallocate the table if necessary
+ if cap(*m) < size {
+ *m = make([]fr.Element, size)
+ }
+
+ // Resize the destination table
+ *m = (*m)[:size]
+
+ // Add elementwise
+ for i := 0; i < size; i++ {
+ (*m)[i].Add(&left[i], &right[i])
+ }
+}
+
+
+// EvalEq computes Eq(q₁, ... , qₙ, h₁, ... , hₙ) = Π₁ⁿ Eq(qᵢ, hᵢ)
+// where Eq(x,y) = xy + (1-x)(1-y) = 1 - x - y + xy + xy interpolates
+// _________________
+// | | |
+// | 0 | 1 |
+// |_______|_______|
+// y | | |
+// | 1 | 0 |
+// |_______|_______|
+//
+// x
+// In other words the polynomial evaluated here is the multilinear extrapolation of
+// one that evaluates to q' == h' for vectors q', h' of binary values
+func EvalEq(q, h []fr.Element) fr.Element {
+ var res, nxt, one, sum fr.Element
+ one.SetOne()
+ for i := 0; i < len(q); i++ {
+ nxt.Mul(&q[i], &h[i]) // nxt <- qᵢ * hᵢ
+ nxt.Double(&nxt) // nxt <- 2 * qᵢ * hᵢ
+ nxt.Add(&nxt, &one) // nxt <- 1 + 2 * qᵢ * hᵢ
+ sum.Add(&q[i], &h[i]) // sum <- qᵢ + hᵢ TODO: Why not subtract one by one from nxt? More parallel?
+
+ if i == 0 {
+ res.Sub(&nxt, &sum) // nxt <- 1 + 2 * qᵢ * hᵢ - qᵢ - hᵢ
+ } else {
+ nxt.Sub(&nxt, &sum) // nxt <- 1 + 2 * qᵢ * hᵢ - qᵢ - hᵢ
+ res.Mul(&res, &nxt) // res <- res * nxt
+ }
+ }
+ return res
+}
+
+// Eq sets m to the representation of the polynomial Eq(q₁, ..., qₙ, *, ..., *) × m[0]
+func (m *MultiLin) Eq(q []fr.Element) {
+ n := len(q)
+
+ if len(*m) != 1< 0 {
+ i.Sub(fr.Modulus(), &i)
+ i.Neg(&i)
+ }
+ return i
+}
+
+func (p Polynomial) Text(base int) string {
+
+ var builder strings.Builder
+
+ first := true
+ for d := len(p) - 1; d >= 0; d-- {
+ if p[d].IsZero() {
+ continue
+ }
+
+ i := signedBigInt(&p[d])
+
+ initialLen := builder.Len()
+
+ if i.Sign() < 1 {
+ i.Neg(&i)
+ if first {
+ builder.WriteString("-")
+ } else {
+ builder.WriteString(" - ")
+ }
+ } else if !first {
+ builder.WriteString(" + ")
+ }
+
+ first = false
+
+ asInt64 := int64(0)
+ if i.IsInt64() {
+ asInt64 = i.Int64()
+ }
+
+ if asInt64 != 1 || d == 0 {
+ builder.WriteString(i.Text(base))
+ }
+
+ if builder.Len()-initialLen > 10 {
+ builder.WriteString("×")
+ }
+
+ if d != 0 {
+ builder.WriteString("X")
+ }
+ if d > 1 {
+ builder.WriteString(
+ utils.ToSuperscript(strconv.Itoa(d)),
+ )
+ }
+
+ }
+
+ if first {
+ return "0"
+ }
+
+ return builder.String()
}
\ No newline at end of file
diff --git a/internal/generator/polynomial/template/pool.go.tmpl b/internal/generator/polynomial/template/pool.go.tmpl
new file mode 100644
index 0000000000..9a532ab424
--- /dev/null
+++ b/internal/generator/polynomial/template/pool.go.tmpl
@@ -0,0 +1,114 @@
+
+
+import (
+ "fmt"
+ "github.com/consensys/gnark-crypto/ecc/{{.Name}}/fr"
+ "reflect"
+ "sync"
+ "unsafe"
+)
+
+// Memory management for polynomials
+// Copied verbatim from gkr repo
+
+// Sets a maximum for the array size we keep in pool
+const maxNForLargePool int = 1 << 24
+const maxNForSmallPool int = 256
+
+// Aliases because it is annoying to use arrays in all the places
+type largeArr = [maxNForLargePool]fr.Element
+type smallArr = [maxNForSmallPool]fr.Element
+
+var rC = sync.Map{}
+
+var (
+ largePool = sync.Pool{
+ New: func() interface{} {
+ var res largeArr
+ return &res
+ },
+ }
+ smallPool = sync.Pool{
+ New: func() interface{} {
+ var res smallArr
+ return &res
+ },
+ }
+)
+
+// ClearPool Clears the pool completely, shields against memory leaks
+// Eg: if we forgot to dump a polynomial at some point, this will ensure the value get dumped eventually
+// Returns how many polynomials were cleared that way
+func ClearPool() int {
+ res := 0
+ rC.Range(func(k, _ interface{}) bool {
+ switch ptr := k.(type) {
+ case *largeArr:
+ largePool.Put(ptr)
+ case *smallArr:
+ smallPool.Put(ptr)
+ default:
+ panic(fmt.Sprintf("tried to clear %v", reflect.TypeOf(ptr)))
+ }
+ res++
+ return true
+ })
+ return res
+}
+
+// CountPool Returns the number of elements in the pool without mutating it
+func CountPool() int {
+ res := 0
+ rC.Range(func(_, _ interface{}) bool {
+ res++
+ return true
+ })
+ return res
+}
+
+// Make tries to find a reusable polynomial or allocates a new one
+func Make(n int) []fr.Element {
+ if n > maxNForLargePool {
+ panic(fmt.Sprintf("been provided with size of %v but the maximum is %v", n, maxNForLargePool))
+ }
+
+ if n <= maxNForSmallPool {
+ ptr := smallPool.Get().(*smallArr)
+ rC.Store(ptr, struct{}{}) // registers the pointer being used
+ return (*ptr)[:n]
+ }
+
+ ptr := largePool.Get().(*largeArr)
+ rC.Store(ptr, struct{}{}) // remember we allocated the pointer is being used
+ return (*ptr)[:n]
+}
+
+// Dump dumps a set of polynomials into the pool
+// Returns the number of deallocated polys
+func Dump(arrs ...[]fr.Element) int {
+ cnt := 0
+ for _, arr := range arrs {
+ ptr := ptr(arr)
+ pool := &smallPool
+ if len(arr) > maxNForSmallPool {
+ pool = &largePool
+ }
+ // If the rC did not register, then
+ // either the array was allocated somewhere else which can be ignored
+ // otherwise a double put which MUST be ignored
+ if _, ok := rC.Load(ptr); ok {
+ pool.Put(ptr)
+ // And deregisters the ptr
+ rC.Delete(ptr)
+ cnt++
+ }
+ }
+ return cnt
+}
+
+func ptr(m []fr.Element) unsafe.Pointer {
+ if cap(m) != maxNForSmallPool && cap(m) != maxNForLargePool {
+ panic(fmt.Sprintf("can't cast to large or small array, the put array's is %v it should have capacity %v or %v", cap(m), maxNForLargePool, maxNForSmallPool))
+ }
+ return unsafe.Pointer(&m[0])
+}
diff --git a/internal/generator/tower/asm/amd64/e2_bls381.go b/internal/generator/tower/asm/amd64/e2_bls381.go
index 57abf58a05..d67663746e 100644
--- a/internal/generator/tower/asm/amd64/e2_bls381.go
+++ b/internal/generator/tower/asm/amd64/e2_bls381.go
@@ -56,7 +56,7 @@ func (fq2 *Fq2Amd64) generateMulByNonResidueE2BLS381() {
func (fq2 *Fq2Amd64) generateSquareE2BLS381(forceCheck bool) {
// // Square sets z to the E2-product of x,x returns z
// func (z *E2) Square(x *E2) *E2 {
- // // algo 22 https://eprint.iacr.org/2010/354.pdf
+ // // adapted from algo 22 https://eprint.iacr.org/2010/354.pdf
// var a, b fp.Element
// a.Add(&x.A0, &x.A1)
// b.Sub(&x.A0, &x.A1)
diff --git a/internal/generator/tower/template/fq12over6over2/fq12.go.tmpl b/internal/generator/tower/template/fq12over6over2/fq12.go.tmpl
index 8fdcabdd00..c37fa97962 100644
--- a/internal/generator/tower/template/fq12over6over2/fq12.go.tmpl
+++ b/internal/generator/tower/template/fq12over6over2/fq12.go.tmpl
@@ -207,28 +207,45 @@ func (z *E12) CyclotomicSquareCompressed(x *E12) *E12 {
}
// DecompressKarabina Karabina's cyclotomic square result
+// if g3 != 0
+// g4 = (E * g5^2 + 3 * g1^2 - 2 * g2)/4g3
+// if g3 == 0
+// g4 = 2g1g5/g2
+//
+// if g3=g2=0 then g4=g5=g1=0 and g0=1 (x=1)
+// Theorem 3.1 is well-defined for all x in Gϕₙ\{1}
func (z *E12) DecompressKarabina(x *E12) *E12 {
var t [3]E2
var one E2
one.SetOne()
- // t0 = g1^2
- t[0].Square(&x.C0.B1)
- // t1 = 3 * g1^2 - 2 * g2
- t[1].Sub(&t[0], &x.C0.B2).
- Double(&t[1]).
- Add(&t[1], &t[0])
- // t0 = E * g5^2 + t1
- t[2].Square(&x.C1.B2)
- t[0].MulByNonResidue(&t[2]).
- Add(&t[0], &t[1])
- // t1 = 1/(4 * g3)
- t[1].Double(&x.C1.B0).
- Double(&t[1]).
- Inverse(&t[1]) // costly
+ // g3 == 0
+ if x.C1.B2.IsZero() {
+ t[0].Mul(&x.C0.B1, &x.C1.B2).
+ Double(&t[0])
+ // t1 = g2
+ t[1].Set(&x.C0.B2)
+
+ // g3 != 0
+ } else {
+ // t0 = g1^2
+ t[0].Square(&x.C0.B1)
+ // t1 = 3 * g1^2 - 2 * g2
+ t[1].Sub(&t[0], &x.C0.B2).
+ Double(&t[1]).
+ Add(&t[1], &t[0])
+ // t0 = E * g5^2 + t1
+ t[2].Square(&x.C1.B2)
+ t[0].MulByNonResidue(&t[2]).
+ Add(&t[0], &t[1])
+ // t1 = 4 * g3
+ t[1].Double(&x.C1.B0).
+ Double(&t[1])
+ }
+
// z4 = g4
- z.C1.B1.Mul(&t[0], &t[1])
+ z.C1.B1.Div(&t[0], &t[1]) // costly
// t1 = g2 * g1
t[1].Mul(&x.C0.B2, &x.C0.B1);
@@ -237,7 +254,7 @@ func (z *E12) DecompressKarabina(x *E12) *E12 {
Sub(&t[2], &t[1]).
Double(&t[2]).
Sub(&t[2], &t[1])
- // t1 = g3 * g5
+ // t1 = g3 * g5 (g3 can be 0)
t[1].Mul(&x.C1.B0, &x.C1.B2)
// c_0 = E * (2 * g4^2 + g3 * g5 - 3 * g2 * g1) + 1
t[2].Add(&t[2], &t[1])
@@ -253,6 +270,15 @@ func (z *E12) DecompressKarabina(x *E12) *E12 {
}
// BatchDecompressKarabina multiple Karabina's cyclotomic square results
+// if g3 != 0
+// g4 = (E * g5^2 + 3 * g1^2 - 2 * g2)/4g3
+// if g3 == 0
+// g4 = 2g1g5/g2
+//
+// if g3=g2=0 then g4=g5=g1=0 and g0=1 (x=1)
+// Theorem 3.1 is well-defined for all x in Gϕₙ\{1}
+//
+// Divisions by 4g3 or g2 is batched using Montgomery batch inverse
func BatchDecompressKarabina(x []E12) []E12 {
n := len(x)
@@ -268,19 +294,29 @@ func BatchDecompressKarabina(x []E12) []E12 {
one.SetOne()
for i := 0; i < n; i++ {
- // t0 = g1^2
- t0[i].Square(&x[i].C0.B1)
- // t1 = 3 * g1^2 - 2 * g2
- t1[i].Sub(&t0[i], &x[i].C0.B2).
- Double(&t1[i]).
- Add(&t1[i], &t0[i])
- // t0 = E * g5^2 + t1
- t2[i].Square(&x[i].C1.B2)
- t0[i].MulByNonResidue(&t2[i]).
- Add(&t0[i], &t1[i])
- // t1 = 4 * g3
- t1[i].Double(&x[i].C1.B0).
- Double(&t1[i])
+ // g3 == 0
+ if x[i].C1.B2.IsZero() {
+ t0[i].Mul(&x[i].C0.B1, &x[i].C1.B2).
+ Double(&t0[i])
+ // t1 = g2
+ t1[i].Set(&x[i].C0.B2)
+
+ // g3 != 0
+ } else {
+ // t0 = g1^2
+ t0[i].Square(&x[i].C0.B1)
+ // t1 = 3 * g1^2 - 2 * g2
+ t1[i].Sub(&t0[i], &x[i].C0.B2).
+ Double(&t1[i]).
+ Add(&t1[i], &t0[i])
+ // t0 = E * g5^2 + t1
+ t2[i].Square(&x[i].C1.B2)
+ t0[i].MulByNonResidue(&t2[i]).
+ Add(&t0[i], &t1[i])
+ // t1 = 4 * g3
+ t1[i].Double(&x[i].C1.B0).
+ Double(&t1[i])
+ }
}
t1 = BatchInvertE2(t1) // costs 1 inverse
@@ -297,7 +333,7 @@ func BatchDecompressKarabina(x []E12) []E12 {
t2[i].Double(&t2[i])
t2[i].Sub(&t2[i], &t1[i])
- // t1 = g3 * g5
+ // t1 = g3 * g5 (g3s can be 0s)
t1[i].Mul(&x[i].C1.B0, &x[i].C1.B2)
// z0 = E * (2 * g4^2 + g3 * g5 - 3 * g2 * g1) + 1
t2[i].Add(&t2[i], &t1[i])
@@ -349,6 +385,8 @@ func (z *E12) CyclotomicSquare(x *E12) *E12 {
// Inverse set z to the inverse of x in E12 and return z
+//
+// if x == 0, sets and returns z = x
func (z *E12) Inverse(x *E12) *E12 {
// Algorithm 23 from https://eprint.iacr.org/2010/354.pdf
@@ -366,6 +404,8 @@ func (z *E12) Inverse(x *E12) *E12 {
// BatchInvertE12 returns a new slice with every element inverted.
// Uses Montgomery batch inversion trick
+//
+// if a[i] == 0, returns result[i] = a[i]
func BatchInvertE12(a []E12) []E12 {
res := make([]E12, len(a))
if len(a) == 0 {
@@ -408,7 +448,7 @@ func (z *E12) Exp(x E12, k *big.Int) *E12 {
e := k
if k.Sign() == -1 {
// negative k, we invert
- // if k < 0: xᵏ (mod q) == (x⁻¹)ᵏ (mod q)
+ // if k < 0: xᵏ (mod q¹²) == (x⁻¹)ᵏ (mod q¹²)
x.Inverse(&x)
// we negate k in a temp big.Int since
@@ -757,13 +797,14 @@ func (z *E12) CompressTorus() (E6, error) {
return res, nil
}
-// BatchCompressTorus GT/E12 elements to half their size
-// using a batch inversion
+// BatchCompressTorus GT/E12 elements to half their size using a batch inversion.
+//
+// if len(x) == 0 or if any of the x[i].C1 coordinate is 0, this function returns an error.
func BatchCompressTorus(x []E12) ([]E6, error) {
n := len(x)
if n == 0 {
- return []E6{}, errors.New("invalid input size")
+ return nil, errors.New("invalid input size")
}
var one E6
@@ -772,6 +813,10 @@ func BatchCompressTorus(x []E12) ([]E6, error) {
for i := 0; i < n; i++ {
res[i].Set(&x[i].C1)
+ // throw an error if any of the x[i].C1 is 0
+ if res[i].IsZero() {
+ return nil, errors.New("invalid input; C1 is 0")
+ }
}
t := BatchInvertE6(res) // costs 1 inverse
diff --git a/internal/generator/tower/template/fq12over6over2/fq2.go.tmpl b/internal/generator/tower/template/fq12over6over2/fq2.go.tmpl
index 11612bc060..471d6af8f6 100644
--- a/internal/generator/tower/template/fq12over6over2/fq2.go.tmpl
+++ b/internal/generator/tower/template/fq12over6over2/fq2.go.tmpl
@@ -274,6 +274,8 @@ func (z *E2) Exp(x E2, k *big.Int) *E2 {
// BatchInvertE2 returns a new slice with every element inverted.
// Uses Montgomery batch inversion trick
+//
+// if a[i] == 0, returns result[i] = a[i]
func BatchInvertE2(a []E2) []E2 {
res := make([]E2, len(a))
if len(a) == 0 {
diff --git a/internal/generator/tower/template/fq12over6over2/fq6.go.tmpl b/internal/generator/tower/template/fq12over6over2/fq6.go.tmpl
index dc4c1bca45..71337e9f6c 100644
--- a/internal/generator/tower/template/fq12over6over2/fq6.go.tmpl
+++ b/internal/generator/tower/template/fq12over6over2/fq6.go.tmpl
@@ -224,6 +224,8 @@ func (z *E6) Square(x *E6) *E6 {
}
// Inverse an element in E6
+//
+// if x == 0, sets and returns z = x
func (z *E6) Inverse(x *E6) *E6 {
// Algorithm 17 from https://eprint.iacr.org/2010/354.pdf
// step 9 is wrong in the paper it's t1-t4
@@ -252,6 +254,8 @@ func (z *E6) Inverse(x *E6) *E6 {
// BatchInvertE6 returns a new slice with every element inverted.
// Uses Montgomery batch inversion trick
+//
+// if a[i] == 0, returns result[i] = a[i]
func BatchInvertE6(a []E6) []E6 {
res := make([]E6, len(a))
if len(a) == 0 {
diff --git a/internal/generator/tower/template/fq12over6over2/tests/fq12.go.tmpl b/internal/generator/tower/template/fq12over6over2/tests/fq12.go.tmpl
index e94dc8988a..d7131bd297 100644
--- a/internal/generator/tower/template/fq12over6over2/tests/fq12.go.tmpl
+++ b/internal/generator/tower/template/fq12over6over2/tests/fq12.go.tmpl
@@ -324,13 +324,29 @@ func TestE12Ops(t *testing.T) {
properties.Property("[{{ toUpper $Name }}] compressed cyclotomic square (Karabina) and square should be the same in the cyclotomic subgroup", prop.ForAll(
func(a *E12) bool {
- var b, c, d E12
+ var _a, b, c, d, _c, _d E12
+ _a.SetOne().Double(&_a)
+
+ // put a and _a in the cyclotomic subgroup
+ // a (g3 != 0 probably)
b.Conjugate(a)
a.Inverse(a)
b.Mul(&b, a)
a.FrobeniusSquare(&b).Mul(a, &b)
+ // _a (g3 == 0)
+ b.Conjugate(&_a)
+ _a.Inverse(&_a)
+ b.Mul(&b, &_a)
+ _a.FrobeniusSquare(&b).Mul(&_a, &b)
+
+ // case g3 != 0
c.Square(a)
d.CyclotomicSquareCompressed(a).DecompressKarabina(&d)
+
+ // case g3 == 0
+ _c.Square(&_a)
+ _d.CyclotomicSquareCompressed(&_a).DecompressKarabina(&_d)
+
return c.Equal(&d)
},
genA,
@@ -338,18 +354,26 @@ func TestE12Ops(t *testing.T) {
properties.Property("[{{ toUpper $Name }}] batch decompress and individual decompress (Karabina) should be the same", prop.ForAll(
func(a *E12) bool {
- var b E12
- // put in the cyclotomic subgroup
+ var _a, b E12
+ _a.SetOne().Double(&_a)
+
+ // put a and _a in the cyclotomic subgroup
+ // a (g3 !=0 probably)
b.Conjugate(a)
a.Inverse(a)
b.Mul(&b, a)
a.FrobeniusSquare(&b).Mul(a, &b)
-
+ // _a (g3 == 0)
+ b.Conjugate(&_a)
+ _a.Inverse(&_a)
+ b.Mul(&b, &_a)
+ _a.FrobeniusSquare(&b).Mul(&_a, &b)
+
var a2, a4, a17 E12
- a2.Set(a)
+ a2.Set(&_a)
a4.Set(a)
a17.Set(a)
- a2.nSquareCompressed(2)
+ a2.nSquareCompressed(2) // case g3 == 0
a4.nSquareCompressed(4)
a17.nSquareCompressed(17)
batch := BatchDecompressKarabina([]E12{a2, a4, a17})
diff --git a/internal/generator/tower/template/fq12over6over2/tests/fq2.go.tmpl b/internal/generator/tower/template/fq12over6over2/tests/fq2.go.tmpl
index eaadf32efb..057acab5ef 100644
--- a/internal/generator/tower/template/fq12over6over2/tests/fq2.go.tmpl
+++ b/internal/generator/tower/template/fq12over6over2/tests/fq2.go.tmpl
@@ -173,12 +173,7 @@ func TestE2ReceiverIsOperand(t *testing.T) {
properties.TestingRun(t, gopter.ConsoleReporter(false))
- if supportAdx {
- t.Log("disabling ADX")
- supportAdx = false
- properties.TestingRun(t, gopter.ConsoleReporter(false))
- supportAdx = true
- }
+
}
func TestE2MulMaxed(t *testing.T) {
@@ -401,12 +396,6 @@ func TestE2Ops(t *testing.T) {
properties.TestingRun(t, gopter.ConsoleReporter(false))
- if supportAdx {
- t.Log("disabling ADX")
- supportAdx = false
- properties.TestingRun(t, gopter.ConsoleReporter(false))
- supportAdx = true
- }
}
// ------------------------------------------------------------
diff --git a/utils/supsub.go b/utils/supsub.go
new file mode 100644
index 0000000000..759d4e394f
--- /dev/null
+++ b/utils/supsub.go
@@ -0,0 +1,251 @@
+package utils
+
+import (
+ "bytes"
+ "fmt"
+)
+
+// The following is taken from https://github.com/lynn9388/supsub
+// developed by lynn9388@gmail.com under Apache 2.0.
+// It provides a mapping from normal unicode text to superscript
+// and subscript.
+
+var superscripts = map[rune]rune{
+ // Superscripts - Superscripts and Subscripts
+ // https://www.unicode.org/charts/PDF/U2070.pdf
+ '\u0030': '\u2070',
+ '\u0069': '\u2071',
+ '\u0034': '\u2074',
+ '\u0035': '\u2075',
+ '\u0036': '\u2076',
+ '\u0037': '\u2077',
+ '\u0038': '\u2078',
+ '\u0039': '\u2079',
+ '\u002b': '\u207a',
+ '\u2212': '\u207b',
+ '\u003d': '\u207c',
+ '\u0028': '\u207d',
+ '\u0029': '\u207e',
+ '\u006e': '\u207f',
+
+ // Latin superscript modifier letters - Spacing Modifier Letters
+ // https://www.unicode.org/charts/PDF/U02B0.pdf
+ '\u0068': '\u02b0',
+ '\u0266': '\u02b1',
+ '\u006a': '\u02b2',
+ '\u0072': '\u02b3',
+ '\u0279': '\u02b4',
+ '\u027b': '\u02b5',
+ '\u0281': '\u02b6',
+ '\u0077': '\u02b7',
+ '\u0079': '\u02b8',
+
+ // Additions based on 1989 IPA - Spacing Modifier Letters
+ // https://www.unicode.org/charts/PDF/U02B0.pdf
+ '\u0263': '\u02e0',
+ '\u006c': '\u02e1',
+ '\u0073': '\u02e2',
+ '\u0078': '\u02e3',
+ '\u0295': '\u02e4',
+
+ // Latin superscript modifier letters - Phonetic Extensions
+ // https://www.unicode.org/charts/PDF/U1D00.pdf
+ '\u0041': '\u1d2c',
+ '\u00c6': '\u1d2d',
+ '\u0042': '\u1d2e',
+ '\u0044': '\u1d30',
+ '\u0045': '\u1d31',
+ '\u018e': '\u1d32',
+ '\u0047': '\u1d33',
+ '\u0048': '\u1d34',
+ '\u0049': '\u1d35',
+ '\u004a': '\u1d36',
+ '\u004b': '\u1d37',
+ '\u004c': '\u1d38',
+ '\u004d': '\u1d39',
+ '\u004e': '\u1d3a',
+ '\u004f': '\u1d3c',
+ '\u0222': '\u1d3d',
+ '\u0050': '\u1d3e',
+ '\u0052': '\u1d3f',
+ '\u0054': '\u1d40',
+ '\u0055': '\u1d41',
+ '\u0057': '\u1d42',
+ '\u0061': '\u1d43', // '\u0061'(a) '\u1d43'(ᵃ) '\u00aa'(ª)
+ '\u0250': '\u1d44',
+ '\u0251': '\u1d45',
+ '\u1d02': '\u1d46',
+ '\u0062': '\u1d47',
+ '\u0064': '\u1d48',
+ '\u0065': '\u1d49',
+ '\u0259': '\u1d4a',
+ '\u025b': '\u1d4b',
+ '\u1d08': '\u1d4c',
+ '\u0067': '\u1d4d',
+ '\u006b': '\u1d4f',
+ '\u006d': '\u1d50',
+ '\u014b': '\u1d51',
+ '\u006f': '\u1d52', // '\u006f'(o) '\u1d52'(ᵒ) '\u00ba'(º)
+ '\u0254': '\u1d53',
+ '\u1d16': '\u1d54',
+ '\u1d17': '\u1d55',
+ '\u0070': '\u1d56',
+ '\u0074': '\u1d57',
+ '\u0075': '\u1d58',
+ '\u1d1d': '\u1d59',
+ '\u026f': '\u1d5a',
+ '\u0076': '\u1d5b',
+ '\u1d25': '\u1d5c',
+
+ // Greek superscript modifier letters - Phonetic Extensions
+ // https://www.unicode.org/charts/PDF/U1D00.pdf
+ '\u03b2': '\u1d5d',
+ '\u03b3': '\u1d5e',
+ '\u03b4': '\u1d5f',
+ '\u03c6': '\u1d60',
+ '\u03c7': '\u1d61',
+
+ // Caucasian linguistics - Phonetic Extensions
+ // https://www.unicode.org/charts/PDF/U1D00.pdf
+ '\u043d': '\u1d78',
+
+ // Modifier letters - Phonetic Extensions Supplement
+ // https://www.unicode.org/charts/PDF/U1D80.pdf
+ '\u0252': '\u1d9b',
+ '\u0063': '\u1d9c',
+ '\u0255': '\u1d9d',
+ '\u00f0': '\u1d9e',
+ '\u025c': '\u1d9f',
+ '\u0066': '\u1da0',
+ '\u025f': '\u1da1',
+ '\u0261': '\u1da2',
+ '\u0265': '\u1da3',
+ '\u0268': '\u1da4',
+ '\u0269': '\u1da5',
+ '\u026a': '\u1da6',
+ '\u1d7b': '\u1da7',
+ '\u029d': '\u1da8',
+ '\u026d': '\u1da9',
+ '\u1d85': '\u1daa',
+ '\u029f': '\u1dab',
+ '\u0271': '\u1dac',
+ '\u0270': '\u1dad',
+ '\u0272': '\u1dae',
+ '\u0273': '\u1daf',
+ '\u0274': '\u1db0',
+ '\u0275': '\u1db1',
+ '\u0278': '\u1db2',
+ '\u0282': '\u1db3',
+ '\u0283': '\u1db4',
+ '\u01ab': '\u1db5',
+ '\u0289': '\u1db6',
+ '\u028a': '\u1db7',
+ '\u1d1c': '\u1db8',
+ '\u028b': '\u1db9',
+ '\u028c': '\u1dba',
+ '\u007a': '\u1dbb',
+ '\u0290': '\u1dbc',
+ '\u0291': '\u1dbd',
+ '\u0292': '\u1dbe',
+ '\u03b8': '\u1dbf',
+
+ // Latin-1 punctuation and symbols - C1 Controls and Latin-1 Supplement
+ // https://www.unicode.org/charts/PDF/U0080.pdf
+ //'\u0061': '\u00aa', // '\u0061'(a) '\u1d43'(ᵃ) '\u00aa'(ª)
+ '\u0032': '\u00b2',
+ '\u0033': '\u00b3',
+ '\u0031': '\u00b9',
+ //'\u006f': '\u00ba', // '\u006f'(o) '\u1d52'(ᵒ) '\u00ba'(º)
+}
+var subscripts = map[rune]rune{
+ // Subscripts - Superscripts and Subscripts
+ // https://www.unicode.org/charts/PDF/U2070.pdf
+ '\u0030': '\u2080',
+ '\u0031': '\u2081',
+ '\u0032': '\u2082',
+ '\u0033': '\u2083',
+ '\u0034': '\u2084',
+ '\u0035': '\u2085',
+ '\u0036': '\u2086',
+ '\u0037': '\u2087',
+ '\u0038': '\u2088',
+ '\u0039': '\u2089',
+ '\u002b': '\u208a',
+ '\u2212': '\u208b',
+ '\u003d': '\u208c',
+ '\u0028': '\u208d',
+ '\u0029': '\u208e',
+ '\u0061': '\u2090',
+ '\u0065': '\u2091',
+ '\u006f': '\u2092',
+ '\u0078': '\u2093',
+ '\u0259': '\u2094',
+
+ // Subscripts for UPA - Superscripts and Subscripts
+ // https://www.unicode.org/charts/PDF/U2070.pdf
+ '\u0068': '\u2095',
+ '\u006b': '\u2096',
+ '\u006c': '\u2097',
+ '\u006d': '\u2098',
+ '\u006e': '\u2099',
+ '\u0070': '\u209a',
+ '\u0073': '\u209b',
+ '\u0074': '\u209c',
+
+ // Latin subscript modifier letters - Phonetic Extensions
+ // https://www.unicode.org/charts/PDF/U1D00.pdf
+ '\u0069': '\u1d62',
+ '\u0072': '\u1d63',
+ '\u0075': '\u1d64',
+ '\u0076': '\u1d65',
+
+ // Greek subscript modifier letters - Phonetic Extensions
+ // https://www.unicode.org/charts/PDF/U1D00.pdf
+ '\u03b2': '\u1d66',
+ '\u03b3': '\u1d67',
+ '\u03c1': '\u1d68',
+ '\u03c6': '\u1d69',
+ '\u03c7': '\u1d6a',
+}
+
+// sup converts a rune to superscript. It returns the superscript or the
+// original rune and a error if there is no corresponding superscript.
+func sup(r rune) (rune, error) {
+ s, ok := superscripts[r]
+ if !ok {
+ return r, fmt.Errorf("no corresponding superscript: %c", r)
+ }
+ return s, nil
+}
+
+// ToSuperscript converts a string to superscript to the utmost. It will use original
+// rune if there has no corresponding superscript for a letter.
+func ToSuperscript(s string) string {
+ buf := bytes.NewBuffer(make([]byte, 0, len(s)*3))
+ for _, r := range s {
+ sup, _ := sup(r)
+ buf.WriteRune(sup)
+ }
+ return buf.String()
+}
+
+// sub converts a rune to subscript. It returns the subscript or the original
+// rune and a error if there is no corresponding subscript.
+func sub(r rune) (rune, error) {
+ s, ok := subscripts[r]
+ if !ok {
+ return r, fmt.Errorf("no corresponding subscript: %c", r)
+ }
+ return s, nil
+}
+
+// ToSubscript converts a string to subscript to the utmost. It will use original
+// rune if there has no corresponding subscript for a letter.
+func ToSubscript(s string) string {
+ buf := bytes.NewBuffer(make([]byte, 0, len(s)*3))
+ for _, r := range s {
+ sub, _ := sub(r)
+ buf.WriteRune(sub)
+ }
+ return buf.String()
+}