Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,7 @@ require (
github.com/lib/pq v1.10.9
github.com/redis/go-redis/v9 v9.0.4
github.com/testcontainers/testcontainers-go/modules/postgres v0.39.0
golang.org/x/crypto v0.41.0
)

require (
Expand Down Expand Up @@ -308,7 +309,6 @@ require (
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/ratelimit v0.3.1 // indirect
golang.org/x/arch v0.8.0 // indirect
golang.org/x/crypto v0.41.0 // indirect
golang.org/x/net v0.43.0 // indirect
golang.org/x/sys v0.36.0 // indirect
golang.org/x/term v0.34.0 // indirect
Expand Down
29 changes: 24 additions & 5 deletions protocol/hashing.go
Original file line number Diff line number Diff line change
@@ -1,11 +1,30 @@
package protocol

import "github.com/ethereum/go-ethereum/crypto"
import (
"hash"
"sync"

"golang.org/x/crypto/sha3"
)

var hasherPool = sync.Pool{
New: func() any {
return sha3.NewLegacyKeccak256()
},
}
Comment on lines +10 to +14
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why is this needed?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It lets you reuse the keccak objects instead of reallocating them every time. I'll add a benchmark, now I'm curious how impactful it is.

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Looks like object pooling improves things from 378ns -> 251.2ns. I tested with different inputs, and it seems to be a fixed ~100ns overhead for object allocation with the total duration going up based on the input data size.

goos: linux
goarch: amd64
pkg: github.com/smartcontractkit/chainlink-ccv/protocol
cpu: 13th Gen Intel(R) Core(TM) i9-13900HX
BenchmarkHashing
BenchmarkHashing-32            	 4575128	       251.2 ns/op
BenchmarkHashinbBaseline
BenchmarkHashinbBaseline-32    	 2765898	       378.0 ns/op
PASS


// Keccak256 computes the Keccak256 hash of the input.
func Keccak256(data []byte) [32]byte {
hash := crypto.Keccak256(data)
var result [32]byte
copy(result[:], hash)
return result
h, ok := hasherPool.Get().(hash.Hash)
if !ok {
// This should never happen, but just in case.
h = sha3.NewLegacyKeccak256()
}

h.Reset()
h.Write(data) //nolint // keccak256 never returns an error
var out [32]byte
copy(out[:], h.Sum(nil))
hasherPool.Put(h)
return out
}
25 changes: 25 additions & 0 deletions protocol/hashing_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
package protocol

import (
"testing"

"golang.org/x/crypto/sha3"
)

var data = []byte("The quick brown fox jumps over the lazy dogThe quick brown fox jumps over the lazy dogThe quick brown fox jumps over the lazy dogThe quick brown fox jumps over the lazy dogThe quick brown fox jumps over the lazy dogThe quick brown fox jumps over the lazy dog")

func BenchmarkHashing(b *testing.B) {
for i := 0; i < b.N; i++ {
Keccak256(data)
}
}

func BenchmarkHashinbBaseline(b *testing.B) {
for i := 0; i < b.N; i++ {
h := sha3.NewLegacyKeccak256()
h.Write(data)
var out [32]byte
copy(out[:], h.Sum(nil))
hasherPool.Put(h)
}
}
Loading