From a4ee944cbfb395a3a27b5b070446fe80b7ce5809 Mon Sep 17 00:00:00 2001 From: Zsolt Felfoldi Date: Tue, 20 Sep 2022 21:36:31 +0200 Subject: [PATCH 1/2] cmd/blsync, beacon/light: standalone beacon light sync tool --- beacon/light/api/light_api.go | 675 ++++++++++++++++++++ beacon/light/api/syncer.go | 264 ++++++++ beacon/light/sync/checkpoint.go | 234 +++++++ beacon/light/sync/committee_signature.go | 290 +++++++++ beacon/light/sync/committee_tracker.go | 513 +++++++++++++++ beacon/light/sync/committee_tracker_test.go | 439 +++++++++++++ beacon/light/sync/committee_update.go | 465 ++++++++++++++ beacon/light/sync/signed_head.go | 211 ++++++ beacon/light/types/header.go | 103 +++ beacon/light/types/protocol.go | 194 ++++++ beacon/merkle/binary_merkle.go | 494 ++++++++++++++ beacon/merkle/binary_merkle_test.go | 259 ++++++++ beacon/params/constants.go | 29 + beacon/params/tree_indices.go | 46 ++ cmd/blsync/config.go | 177 +++++ cmd/blsync/main.go | 246 +++++++ cmd/blsync/test.go | 351 ++++++++++ cmd/utils/flags.go | 63 ++ common/types.go | 20 + go.mod | 7 + go.sum | 18 + internal/flags/categories.go | 1 + node/node.go | 24 +- 23 files changed, 5114 insertions(+), 9 deletions(-) create mode 100644 beacon/light/api/light_api.go create mode 100644 beacon/light/api/syncer.go create mode 100644 beacon/light/sync/checkpoint.go create mode 100644 beacon/light/sync/committee_signature.go create mode 100644 beacon/light/sync/committee_tracker.go create mode 100644 beacon/light/sync/committee_tracker_test.go create mode 100644 beacon/light/sync/committee_update.go create mode 100644 beacon/light/sync/signed_head.go create mode 100644 beacon/light/types/header.go create mode 100644 beacon/light/types/protocol.go create mode 100644 beacon/merkle/binary_merkle.go create mode 100644 beacon/merkle/binary_merkle_test.go create mode 100644 beacon/params/constants.go create mode 100644 beacon/params/tree_indices.go create mode 100644 cmd/blsync/config.go create mode 100644 cmd/blsync/main.go create mode 100644 cmd/blsync/test.go diff --git a/beacon/light/api/light_api.go b/beacon/light/api/light_api.go new file mode 100644 index 000000000000..2c3e6de9f9df --- /dev/null +++ b/beacon/light/api/light_api.go @@ -0,0 +1,675 @@ +// Copyright 2022 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more detaiapi. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package api + +import ( + "context" + "encoding/binary" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "io" + "math/big" + "net/http" + "strconv" + "time" + + "github.com/donovanhide/eventsource" + + "github.com/ethereum/go-ethereum/beacon/light/sync" + "github.com/ethereum/go-ethereum/beacon/light/types" + "github.com/ethereum/go-ethereum/beacon/merkle" + "github.com/ethereum/go-ethereum/beacon/params" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + ctypes "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/trie" + "github.com/holiman/uint256" + "github.com/protolambda/zrnt/eth2/beacon/bellatrix" + "github.com/protolambda/zrnt/eth2/configs" + "github.com/protolambda/ztyp/tree" +) + +// BeaconLightApi requests light client information from a beacon node REST API. +// Note: all required API endpoints are currently only implemented by Lodestar. +type BeaconLightApi struct { + url string + client *http.Client + customHeaders map[string]string + stateProofVersion int +} + +func NewBeaconLightApi(url string, customHeaders map[string]string, stateProofVersion int) *BeaconLightApi { + return &BeaconLightApi{ + url: url, + client: &http.Client{ + Timeout: time.Second * 10, + }, + customHeaders: customHeaders, + stateProofVersion: stateProofVersion, + } +} + +func (api *BeaconLightApi) httpGet(path string) ([]byte, error) { + req, err := http.NewRequest("GET", api.url+path, nil) + if err != nil { + return nil, err + } + for k, v := range api.customHeaders { + req.Header.Set(k, v) + } + resp, err := api.client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + if resp.StatusCode != 200 { + return nil, fmt.Errorf("Error from API endpoint \"%s\": status code %d", path, resp.StatusCode) + } + return io.ReadAll(resp.Body) +} + +// Header defines a beacon header and supports JSON encoding according to the +// standard beacon API format +// +// See data structure definition here: +// https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/beacon-chain.md#beaconblockheader +type jsonHeader struct { + Slot common.Decimal `json:"slot"` + ProposerIndex common.Decimal `json:"proposer_index"` + ParentRoot common.Hash `json:"parent_root"` + StateRoot common.Hash `json:"state_root"` + BodyRoot common.Hash `json:"body_root"` +} + +type jsonBeaconHeader struct { + Beacon jsonHeader `json:"beacon"` +} + +func (h *jsonHeader) header() types.Header { + return types.Header{ + Slot: uint64(h.Slot), + ProposerIndex: uint64(h.ProposerIndex), + ParentRoot: h.ParentRoot, + StateRoot: h.StateRoot, + BodyRoot: h.BodyRoot, + } +} + +func (h *jsonBeaconHeader) header() types.Header { + return h.Beacon.header() +} + +// GetBestUpdateAndCommittee fetches and validates LightClientUpdate for given +// period and full serialized committee for the next period (committee root hash +// equals update.NextSyncCommitteeRoot). +// Note that the results are validated but the update signature should be verified +// by the caller as its validity depends on the update chain. +func (api *BeaconLightApi) GetBestUpdateAndCommittee(period uint64) (types.LightClientUpdate, []byte, error) { + resp, err := api.httpGet("/eth/v1/beacon/light_client/updates?start_period=" + strconv.Itoa(int(period)) + "&count=1") + if err != nil { + return types.LightClientUpdate{}, nil, err + } + + // See data structure definition here: + // https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/light-client/sync-protocol.md#lightclientupdate + type committeeUpdate struct { + Header jsonBeaconHeader `json:"attested_header"` + NextSyncCommittee syncCommitteeJson `json:"next_sync_committee"` + NextSyncCommitteeBranch merkle.Values `json:"next_sync_committee_branch"` + FinalizedHeader jsonBeaconHeader `json:"finalized_header"` + FinalityBranch merkle.Values `json:"finality_branch"` + Aggregate syncAggregate `json:"sync_aggregate"` + SignatureSlot common.Decimal `json:"signature_slot"` + } + + var data []struct { + Data committeeUpdate `json:"data"` + } + if err := json.Unmarshal(resp, &data); err != nil { + return types.LightClientUpdate{}, nil, err + } + if len(data) != 1 { + return types.LightClientUpdate{}, nil, errors.New("invalid number of committee updates") + } + c := data[0].Data + header := c.Header.header() + if header.SyncPeriod() != period { + return types.LightClientUpdate{}, nil, errors.New("wrong committee update header period") + } + if types.PeriodOfSlot(uint64(c.SignatureSlot)) != period { + return types.LightClientUpdate{}, nil, errors.New("wrong committee update signature period") + } + if len(c.NextSyncCommittee.Pubkeys) != params.SyncCommitteeSize { + return types.LightClientUpdate{}, nil, errors.New("invalid number of pubkeys in next_sync_committee") + } + + committee, ok := c.NextSyncCommittee.serialize() + if !ok { + return types.LightClientUpdate{}, nil, errors.New("invalid sync committee") + } + update := types.LightClientUpdate{ + Header: header, + NextSyncCommitteeRoot: sync.SerializedCommitteeRoot(committee), + NextSyncCommitteeBranch: c.NextSyncCommitteeBranch, + FinalizedHeader: c.FinalizedHeader.header(), + FinalityBranch: c.FinalityBranch, + SyncCommitteeBits: c.Aggregate.BitMask, + SyncCommitteeSignature: c.Aggregate.Signature, + } + if err := update.Validate(); err != nil { + return types.LightClientUpdate{}, nil, err + } + return update, committee, nil +} + +// syncAggregate represents an aggregated BLS signature with BitMask referring +// to a subset of the corresponding sync committee +// +// See data structure definition here: +// https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/beacon-chain.md#syncaggregate +type syncAggregate struct { + BitMask hexutil.Bytes `json:"sync_committee_bits"` + Signature hexutil.Bytes `json:"sync_committee_signature"` +} + +// GetOptimisticHeadUpdate fetches a signed header based on the latest available +// optimistic update. Note that the signature should be verified by the caller +// as its validity depends on the update chain. +// +// See data structure definition here: +// https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/light-client/sync-protocol.md#lightclientoptimisticupdate +func (api *BeaconLightApi) GetOptimisticHeadUpdate() (sync.SignedHead, error) { + resp, err := api.httpGet("/eth/v1/beacon/light_client/optimistic_update") + if err != nil { + return sync.SignedHead{}, err + } + return decodeOptimisticHeadUpdate(resp) +} + +func decodeOptimisticHeadUpdate(enc []byte) (sync.SignedHead, error) { + var data struct { + Data struct { + Header jsonBeaconHeader `json:"attested_header"` + Aggregate syncAggregate `json:"sync_aggregate"` + SignatureSlot common.Decimal `json:"signature_slot"` + } `json:"data"` + } + if err := json.Unmarshal(enc, &data); err != nil { + return sync.SignedHead{}, err + } + if data.Data.Header.Beacon.StateRoot == (common.Hash{}) { + // workaround for different event encoding format in Lodestar + if err := json.Unmarshal(enc, &data.Data); err != nil { + return sync.SignedHead{}, err + } + } + + if len(data.Data.Aggregate.BitMask) != params.SyncCommitteeBitmaskSize { + return sync.SignedHead{}, errors.New("invalid sync_committee_bits length") + } + if len(data.Data.Aggregate.Signature) != params.BlsSignatureSize { + return sync.SignedHead{}, errors.New("invalid sync_committee_signature length") + } + return sync.SignedHead{ + Header: data.Data.Header.header(), + BitMask: data.Data.Aggregate.BitMask, + Signature: data.Data.Aggregate.Signature, + SignatureSlot: uint64(data.Data.SignatureSlot), + }, nil +} + +// syncCommitteeJson is the JSON representation of a sync committee +// +// See data structure definition here: +// https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/beacon-chain.md#syncaggregate +type syncCommitteeJson struct { + Pubkeys []hexutil.Bytes `json:"pubkeys"` + Aggregate hexutil.Bytes `json:"aggregate_pubkey"` +} + +// serialize returns the serialized version of the committee +func (s *syncCommitteeJson) serialize() ([]byte, bool) { + if len(s.Pubkeys) != params.SyncCommitteeSize { + return nil, false + } + sk := make([]byte, sync.SerializedCommitteeSize) + for i, key := range s.Pubkeys { + if len(key) != params.BlsPubkeySize { + return nil, false + } + copy(sk[i*params.BlsPubkeySize:(i+1)*params.BlsPubkeySize], key[:]) + } + if len(s.Aggregate) != params.BlsPubkeySize { + return nil, false + } + copy(sk[params.SyncCommitteeSize*params.BlsPubkeySize:], s.Aggregate[:]) + return sk, true +} + +// GetHead fetches and validates the beacon header with the given blockRoot. +// If blockRoot is null hash then the latest head header is fetched. +func (api *BeaconLightApi) GetHeader(blockRoot common.Hash) (types.Header, error) { + path := "/eth/v1/beacon/headers/" + if blockRoot == (common.Hash{}) { + path += "head" + } else { + path += blockRoot.Hex() + } + resp, err := api.httpGet(path) + if err != nil { + return types.Header{}, err + } + + var data struct { + Data struct { + Root common.Hash `json:"root"` + Canonical bool `json:"canonical"` + Header struct { + Message jsonHeader `json:"message"` + Signature hexutil.Bytes `json:"signature"` + } `json:"header"` + } `json:"data"` + } + if err := json.Unmarshal(resp, &data); err != nil { + return types.Header{}, err + } + header := data.Data.Header.Message.header() + if blockRoot == (common.Hash{}) { + blockRoot = data.Data.Root + } + if header.Hash() != blockRoot { + return types.Header{}, errors.New("retrieved beacon header root does not match") + } + return header, nil +} + +// does not verify state root +func (api *BeaconLightApi) GetHeadStateProof(format merkle.ProofFormat, paths []string) (merkle.MultiProof, error) { + if api.stateProofVersion >= 2 { + encFormat, bitLength := EncodeCompactProofFormat(format) + return api.getStateProof("head", format, encFormat, bitLength) + } else { + proof, _, err := api.getOldStateProof("head", format, paths) + return proof, err + } +} + +type StateProofSub struct { + api *BeaconLightApi + format merkle.ProofFormat + paths []string + encFormat []byte + bitLength int +} + +func (api *BeaconLightApi) SubscribeStateProof(format merkle.ProofFormat, paths []string, first, period int) (*StateProofSub, error) { + if api.stateProofVersion == 0 { + return nil, errors.New("State proof API disabled") + } + encFormat, bitLength := EncodeCompactProofFormat(format) + if api.stateProofVersion >= 2 { + _, err := api.httpGet("/eth/v0/beacon/proof/subscribe/states?format=0x" + hex.EncodeToString(encFormat) + "&first=" + strconv.Itoa(first) + "&period=" + strconv.Itoa(period)) + if err != nil { + return nil, err + } + } + return &StateProofSub{ + api: api, + format: format, + encFormat: encFormat, + bitLength: bitLength, + paths: paths, + }, nil +} + +// verifies state root +func (sub *StateProofSub) Get(stateRoot common.Hash) (merkle.MultiProof, error) { + var ( + proof merkle.MultiProof + rootHash common.Hash + err error + ) + if sub.api.stateProofVersion >= 2 { + proof, err = sub.api.getStateProof(stateRoot.Hex(), sub.format, sub.encFormat, sub.bitLength) + if err == nil { + rootHash = proof.RootHash() + } + } else { + proof, rootHash, err = sub.api.getOldStateProof(stateRoot.Hex(), sub.format, sub.paths) + } + if err != nil { + return merkle.MultiProof{}, err + } + if rootHash != stateRoot { + return merkle.MultiProof{}, errors.New("Received proof has incorrect state root") + } + return proof, nil +} + +func (api *BeaconLightApi) getStateProof(stateId string, format merkle.ProofFormat, encFormat []byte, bitLength int) (merkle.MultiProof, error) { + resp, err := api.httpGet("/eth/v0/beacon/proof/state/" + stateId + "?format=0x" + hex.EncodeToString(encFormat)) + if err != nil { + return merkle.MultiProof{}, err + } + valueCount := (bitLength + 1) / 2 + if len(resp) != valueCount*32 { + return merkle.MultiProof{}, errors.New("Invalid state proof length") + } + values := make(merkle.Values, valueCount) + for i := range values { + copy(values[i][:], resp[i*32:(i+1)*32]) + } + return merkle.MultiProof{Format: format, Values: values}, nil +} + +// getOldStateProof fetches and validates a Merkle proof for the specified parts of +// the recent beacon state referenced by stateRoot. If successful the returned +// multiproof has the format specified by expFormat. The state subset specified by +// the list of string keys (paths) should cover the subset specified by expFormat. +func (api *BeaconLightApi) getOldStateProof(stateId string, expFormat merkle.ProofFormat, paths []string) (merkle.MultiProof, common.Hash, error) { + path := "/eth/v0/beacon/proof/state/" + stateId + "?paths=" + paths[0] + for i := 1; i < len(paths); i++ { + path += "&paths=" + paths[i] + } + resp, err := api.httpGet(path) + if err != nil { + return merkle.MultiProof{}, common.Hash{}, err + } + proof, err := parseSSZMultiProof(resp) + if err != nil { + return merkle.MultiProof{}, common.Hash{}, err + } + var values merkle.Values + reader := proof.Reader(nil) + root, ok := merkle.TraverseProof(reader, merkle.NewMultiProofWriter(expFormat, &values, nil)) + if !ok || !reader.Finished() { + return merkle.MultiProof{}, common.Hash{}, errors.New("invalid state proof") + } + return merkle.MultiProof{Format: expFormat, Values: values}, root, nil +} + +// parseSSZMultiProof creates a MultiProof from a serialized format: +// +// 1 byte: always 1 +// 2 bytes: leafCount +// (leafCount-1) * 2 bytes: as the tree is traversed in depth-first, left-to-right +// order, the number of leaves on the left branch of each traversed non-leaf +// subtree are listed here +// leafCount * 32 bytes: leaf values and internal sibling hashes in the same traversal order +// +// Note: this is the format generated by the /eth/v1/beacon/light_client/proof/ +// beacon light client API endpoint which is currently only supported by Lodestar. +// A different format is proposed to be standardized so this function will +// probably be replaced later, see here: +// +// https://github.com/ethereum/consensus-specs/pull/3148 +// https://github.com/ethereum/beacon-APIs/pull/267 +func parseSSZMultiProof(proof []byte) (merkle.MultiProof, error) { + if len(proof) < 3 || proof[0] != 1 { + return merkle.MultiProof{}, errors.New("invalid proof length") + } + var ( + leafCount = int(binary.LittleEndian.Uint16(proof[1:3])) + format = merkle.NewIndexMapFormat() + values = make(merkle.Values, leafCount) + valuesStart = leafCount*2 + 1 + ) + if len(proof) != leafCount*34+1 { + return merkle.MultiProof{}, errors.New("invalid proof length") + } + if err := parseMultiProofFormat(format, 1, proof[3:valuesStart]); err != nil { + return merkle.MultiProof{}, err + } + for i := range values { + copy(values[i][:], proof[valuesStart+i*32:valuesStart+(i+1)*32]) + } + return merkle.MultiProof{Format: format, Values: values}, nil +} + +// parseMultiProofFormat recursively parses the SSZ serialized proof format +func parseMultiProofFormat(indexMap merkle.IndexMapFormat, index uint64, format []byte) error { + indexMap.AddLeaf(index, nil) + if len(format) == 0 { + return nil + } + boundary := int(binary.LittleEndian.Uint16(format[:2])) * 2 + if boundary > len(format) { + return errors.New("invalid proof format") + } + if err := parseMultiProofFormat(indexMap, index*2, format[2:boundary]); err != nil { + return err + } + if err := parseMultiProofFormat(indexMap, index*2+1, format[boundary:]); err != nil { + return err + } + return nil +} + +// EncodeCompactProofFormat encodes a merkle.ProofFormat into a binary compact +// proof format. See description here: +// https://github.com/ChainSafe/consensus-specs/blob/feat/multiproof/ssz/merkle-proofs.md#compact-multiproofs +func EncodeCompactProofFormat(format merkle.ProofFormat) ([]byte, int) { + target := make([]byte, 0, 64) + var bitLength int + encodeProofFormatSubtree(format, &target, &bitLength) + return target, bitLength +} + +// encodeProofFormatSubtree recursively encodes a subtree of a proof format into +// binary compact format. +func encodeProofFormatSubtree(format merkle.ProofFormat, target *[]byte, bitLength *int) { + bytePtr, bitMask := *bitLength>>3, byte(128)>>(*bitLength&7) + *bitLength++ + if bytePtr == len(*target) { + *target = append(*target, byte(0)) + } + if left, right := format.Children(); left != nil { + (*target)[bytePtr] += bitMask + encodeProofFormatSubtree(left, target, bitLength) + encodeProofFormatSubtree(right, target, bitLength) + } +} + +// GetCheckpointData fetches and validates bootstrap data belonging to the given checkpoint. +func (api *BeaconLightApi) GetCheckpointData(ctx context.Context, checkpoint common.Hash) (types.Header, sync.CheckpointData, []byte, error) { + resp, err := api.httpGet("/eth/v1/beacon/light_client/bootstrap/" + checkpoint.String()) + if err != nil { + return types.Header{}, sync.CheckpointData{}, nil, err + } + + // See data structure definition here: + // https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/light-client/sync-protocol.md#lightclientbootstrap + type bootstrapData struct { + Data struct { + Header jsonBeaconHeader `json:"header"` + Committee syncCommitteeJson `json:"current_sync_committee"` + CommitteeBranch merkle.Values `json:"current_sync_committee_branch"` + } `json:"data"` + } + + var data bootstrapData + if err := json.Unmarshal(resp, &data); err != nil { + return types.Header{}, sync.CheckpointData{}, nil, err + } + header := data.Data.Header.header() + if header.Hash() != checkpoint { + return types.Header{}, sync.CheckpointData{}, nil, errors.New("invalid checkpoint block header") + } + committee, ok := data.Data.Committee.serialize() + if !ok { + return types.Header{}, sync.CheckpointData{}, nil, errors.New("invalid sync committee JSON") + } + committeeRoot := sync.SerializedCommitteeRoot(committee) + expStateRoot, ok := merkle.VerifySingleProof(data.Data.CommitteeBranch, params.BsiSyncCommittee, merkle.Value(committeeRoot)) + if !ok || expStateRoot != header.StateRoot { + return types.Header{}, sync.CheckpointData{}, nil, errors.New("invalid sync committee Merkle proof") + } + nextCommitteeRoot := common.Hash(data.Data.CommitteeBranch[0]) + checkpointData := sync.CheckpointData{ + Checkpoint: checkpoint, + Period: header.SyncPeriod(), + CommitteeRoots: [2]common.Hash{committeeRoot, nextCommitteeRoot}, + } + return header, checkpointData, committee, nil +} + +// GetExecutionPayload fetches the execution block belonging to the beacon block +// specified by beaconRoot and validates its block hash against the expected execRoot. +func (api *BeaconLightApi) GetExecutionPayload(header types.Header) (*ctypes.Block, error) { + resp, err := api.httpGet("/eth/v2/beacon/blocks/" + header.Hash().Hex()) + if err != nil { + return nil, err + } + + spec := configs.Mainnet + // note: eth2 api endpoints serve bellatrix.SignedBeaconBlock instead + // also try github.com/protolambda/eth2api for api bindings + var beaconBlock bellatrix.BeaconBlock + myJSONBlockData := resp + var beaconBlockMessage struct { + Data struct { + Message bellatrix.BeaconBlock `json:"message"` + } `json:"data"` + } + if err := json.Unmarshal(myJSONBlockData, &beaconBlockMessage); err != nil { + return nil, fmt.Errorf("invalid block json data: %v", err) + } + beaconBlock = beaconBlockMessage.Data.Message + beaconBodyRoot := common.Hash(beaconBlock.Body.HashTreeRoot(spec, tree.GetHashFn())) + if beaconBodyRoot != header.BodyRoot { + return nil, fmt.Errorf("Beacon body root hash mismatch (expected: %x, got: %x)", header.BodyRoot.Bytes(), beaconBodyRoot.Bytes()) + } + + payload := &beaconBlock.Body.ExecutionPayload + txs := make([]*ctypes.Transaction, len(payload.Transactions)) + for i, opaqueTx := range payload.Transactions { + var tx ctypes.Transaction + if err := tx.UnmarshalBinary(opaqueTx); err != nil { + return nil, fmt.Errorf("failed to parse tx %d: %v", i, err) + } + txs[i] = &tx + } + execBlock := ctypes.NewBlockWithHeader(&ctypes.Header{ + ParentHash: common.Hash(payload.ParentHash), + UncleHash: ctypes.EmptyUncleHash, + Coinbase: common.Address(payload.FeeRecipient), + Root: common.Hash(payload.StateRoot), + TxHash: ctypes.DeriveSha(ctypes.Transactions(txs), trie.NewStackTrie(nil)), + ReceiptHash: common.Hash(payload.ReceiptsRoot), + Bloom: ctypes.Bloom(payload.LogsBloom), + Difficulty: big.NewInt(0), // constant + Number: new(big.Int).SetUint64(uint64(payload.BlockNumber)), + GasLimit: uint64(payload.GasLimit), + GasUsed: uint64(payload.GasUsed), + Time: uint64(payload.Timestamp), + Extra: []byte(payload.ExtraData), + MixDigest: common.Hash(payload.PrevRandao), // reused in merge + Nonce: ctypes.BlockNonce{}, // zero + BaseFee: (*uint256.Int)(&payload.BaseFeePerGas).ToBig(), + }).WithBody(txs, nil) + if execBlock.Hash() != common.Hash(payload.BlockHash) { + return nil, fmt.Errorf("sanity check failed, payload hash does not match.\nPayload: %v\nGeth: %v", payload, execBlock) + } + return execBlock, nil +} + +func decodeHeadEvent(enc []byte) (uint64, common.Hash, error) { + var data struct { + Slot common.Decimal `json:"slot"` + Block common.Hash `json:"block"` + } + if err := json.Unmarshal(enc, &data); err != nil { + return 0, common.Hash{}, err + } + return uint64(data.Slot), data.Block, nil +} + +// StartHeadListener creates an event subscription for heads and signed (optimistic) +// head updates and calls the specified callback functions when they are received. +// The callbacks are also called for the current head and optimistic head at startup. +// They are never called concurrently. +func (api *BeaconLightApi) StartHeadListener(headFn func(slot uint64, blockRoot common.Hash), signedFn func(head sync.SignedHead), errFn func(err error)) func() { + var ( + stream *eventsource.Stream + err error + ) + streamCh := make(chan struct{}) + closedCh := make(chan struct{}) + go func() { + // when connected to a Lodestar node the subscription blocks until the + // first actual event arrives; therefore we create the subscription in + // a separate goroutine while letting the main goroutine sync up to the + // current head + stream, err = eventsource.Subscribe(api.url+"/eth/v1/events?topics=head&topics=light_client_optimistic_update", "") + if err != nil { + errFn(fmt.Errorf("Error creating event subscription: %v", err)) + return + } + close(streamCh) + }() + go func() { + if head, err := api.GetHeader(common.Hash{}); err == nil { + headFn(head.Slot, head.Hash()) + } + if signedHead, err := api.GetOptimisticHeadUpdate(); err == nil { + signedFn(signedHead) + } + <-streamCh + if stream == nil { + return + } + for { + select { + case event, ok := <-stream.Events: + if !ok { + break + } + switch event.Event() { + case "head": + if slot, blockRoot, err := decodeHeadEvent([]byte(event.Data())); err == nil { + headFn(slot, blockRoot) + } else { + errFn(fmt.Errorf("Error decoding head event: %v", err)) + } + case "light_client_optimistic_update": + if signedHead, err := decodeOptimisticHeadUpdate([]byte(event.Data())); err == nil { + signedFn(signedHead) + } else { + errFn(fmt.Errorf("Error decoding optimistic update event: %v", err)) + } + default: + errFn(fmt.Errorf("Unexpected event: %s", event.Event())) + } + case err, ok := <-stream.Errors: + if !ok { + break + } + errFn(err) + } + } + close(closedCh) + }() + return func() { + <-streamCh + if stream != nil { + stream.Close() + } + <-closedCh + } +} diff --git a/beacon/light/api/syncer.go b/beacon/light/api/syncer.go new file mode 100644 index 000000000000..94d72c203802 --- /dev/null +++ b/beacon/light/api/syncer.go @@ -0,0 +1,264 @@ +// Copyright 2022 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package api + +import ( + "context" + "errors" + "time" + + "github.com/ethereum/go-ethereum/beacon/light/sync" + "github.com/ethereum/go-ethereum/beacon/light/types" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/lru" + "github.com/ethereum/go-ethereum/log" +) + +const ( + headPollFrequency = time.Millisecond * 200 + headPollCount = 50 + maxRequest = 8 +) + +// committee update syncing is initiated in each period for each syncPeriodOffsets[i] +// when slot (period+1)*params.SyncPeriodLength+syncPeriodOffsets[i] has been reached. +// This ensures that a close-to-best update for each period can be synced and +// propagated well in advance before the next period begins but later (when it's +// very unlikely that even a reorg could change the given period) the absolute +// best update will also be propagated if it's different from the previous one. +var syncPeriodOffsets = []int{-256, -16, 64} + +// CommitteeSyncer syncs committee updates and signed heads from BeaconLightApi +// to CommitteeTracker +type CommitteeSyncer struct { + api *BeaconLightApi + + genesisData sync.GenesisData + checkpointPeriod uint64 + checkpointCommittee []byte + committeeTracker *sync.CommitteeTracker + + lastAdvertisedPeriod uint64 + lastPeriodOffset int + + updateCache *lru.Cache[uint64, types.LightClientUpdate] + committeeCache *lru.Cache[uint64, []byte] + closeCh chan struct{} + stopFn func() +} + +// NewCommitteeSyncer creates a new CommitteeSyncer +// Note: genesisData is only needed when light syncing (using GetInitData for bootstrap) +func NewCommitteeSyncer(api *BeaconLightApi, genesisData sync.GenesisData) *CommitteeSyncer { + return &CommitteeSyncer{ + api: api, + genesisData: genesisData, + closeCh: make(chan struct{}), + updateCache: lru.NewCache[uint64, types.LightClientUpdate](maxRequest), + committeeCache: lru.NewCache[uint64, []byte](maxRequest), + } +} + +// Start starts the syncing of the given CommitteeTracker +func (cs *CommitteeSyncer) Start(committeeTracker *sync.CommitteeTracker) { + cs.committeeTracker = committeeTracker + committeeTracker.SyncWithPeer(cs, nil) + stopFn := cs.api.StartHeadListener( + func(slot uint64, blockRoot common.Hash) { + cs.updateCache.Purge() + cs.committeeCache.Purge() + cs.syncUpdates(slot, false) + }, func(signedHead sync.SignedHead) { + if cs.committeeTracker.AddSignedHeads(cs, []sync.SignedHead{signedHead}) != nil { + cs.syncUpdates(signedHead.Header.Slot, true) + if err := cs.committeeTracker.AddSignedHeads(cs, []sync.SignedHead{signedHead}); err != nil { + log.Error("Error adding new signed head", "error", err) + } + } + }, func(err error) { + log.Warn("Head event stream error", "err", err) + }) + cs.stopFn = stopFn +} + +// Stop stops the syncing process +func (cs *CommitteeSyncer) Stop() { + cs.committeeTracker.Disconnect(cs) + close(cs.closeCh) + if cs.stopFn != nil { + cs.stopFn() + } +} + +// syncUpdates checks whether one of the syncPeriodOffsets for the latest period +// has been reached by the current head and initiates az update sync if necessary. +// If retry is true then syncing is tried again even if no new syncing offset +// point has been reached. +func (cs *CommitteeSyncer) syncUpdates(slot uint64, retry bool) { + nextPeriod := types.PeriodOfSlot(slot + uint64(-syncPeriodOffsets[0])) + if nextPeriod == 0 { + return + } + var ( + nextPeriodStart = types.PeriodStart(nextPeriod) + lastPeriod = nextPeriod - 1 + offset = 1 + ) + for offset < len(syncPeriodOffsets) && slot >= nextPeriodStart+uint64(syncPeriodOffsets[offset]) { + offset++ + } + if (retry || lastPeriod != cs.lastAdvertisedPeriod || offset != cs.lastPeriodOffset) && cs.syncUpdatesUntil(lastPeriod) { + cs.lastAdvertisedPeriod, cs.lastPeriodOffset = lastPeriod, offset + } +} + +// syncUpdatesUntil queries committee updates that the tracker does not have or +// might have improved since the last query and advertises them to the tracker. +// The tracker can then fetch the actual updates and committees via GetBestCommitteeProofs. +func (cs *CommitteeSyncer) syncUpdatesUntil(lastPeriod uint64) bool { + ptr := int(types.MaxUpdateInfoLength) + if lastPeriod+1 < uint64(ptr) { + ptr = int(lastPeriod + 1) + } + var ( + updateInfo = &types.UpdateInfo{ + AfterLastPeriod: lastPeriod + 1, + Scores: make(types.UpdateScores, ptr), + } + localNextPeriod = cs.committeeTracker.NextPeriod() + period = lastPeriod + ) + for { + remoteUpdate, err := cs.getBestUpdate(period) + if err != nil { + break + } + ptr-- + updateInfo.Scores[ptr] = remoteUpdate.Score() + if ptr == 0 || period == 0 { + break + } + if period < localNextPeriod { + localUpdate := cs.committeeTracker.GetBestUpdate(period) + if localUpdate == nil || localUpdate.NextSyncCommitteeRoot == remoteUpdate.NextSyncCommitteeRoot { + break + } + } + period-- + } + updateInfo.Scores = updateInfo.Scores[ptr:] + log.Info("Fetched committee updates", "localNext", localNextPeriod, "count", len(updateInfo.Scores)) + if len(updateInfo.Scores) == 0 { + log.Error("Could not fetch last committee update") + return false + } + select { + case <-cs.committeeTracker.SyncWithPeer(cs, updateInfo): + localNextPeriod = cs.committeeTracker.NextPeriod() + if localNextPeriod <= lastPeriod { + log.Error("Failed to sync all API committee updates", "local next period", localNextPeriod, "remote next period", lastPeriod+1) + } + case <-cs.closeCh: + return false + } + return true +} + +// GetBestCommitteeProofs fetches updates and committees for the specified periods +func (cs *CommitteeSyncer) GetBestCommitteeProofs(ctx context.Context, req types.CommitteeRequest) (types.CommitteeReply, error) { + reply := types.CommitteeReply{ + Updates: make([]types.LightClientUpdate, len(req.UpdatePeriods)), + Committees: make([][]byte, len(req.CommitteePeriods)), + } + var err error + for i, period := range req.UpdatePeriods { + if reply.Updates[i], err = cs.getBestUpdate(period); err != nil { + return types.CommitteeReply{}, err + } + } + for i, period := range req.CommitteePeriods { + if reply.Committees[i], err = cs.getCommittee(period); err != nil { + return types.CommitteeReply{}, err + } + } + return reply, nil +} + +// CanRequest returns true if a request for the given amount of items can be processed +func (cs *CommitteeSyncer) CanRequest(updateCount, committeeCount int) bool { + return updateCount <= maxRequest && committeeCount <= maxRequest +} + +// getBestUpdate returns the best update for the given period +func (cs *CommitteeSyncer) getBestUpdate(period uint64) (types.LightClientUpdate, error) { + if c, ok := cs.updateCache.Get(period); ok { + return c, nil + } + update, _, err := cs.getBestUpdateAndCommittee(period) + return update, err +} + +// getCommittee returns the committee for the given period +// Note: cannot return committee altair fork period; this is always same as the +// committee of the next period +func (cs *CommitteeSyncer) getCommittee(period uint64) ([]byte, error) { + if period == 0 { + return nil, errors.New("no committee available for period 0") + } + if cs.checkpointCommittee != nil && period == cs.checkpointPeriod { + return cs.checkpointCommittee, nil + } + if c, ok := cs.committeeCache.Get(period); ok { + return c, nil + } + _, committee, err := cs.getBestUpdateAndCommittee(period - 1) + return committee, err +} + +// getBestUpdateAndCommittee fetches the best update for period and corresponding +// committee for period+1 and caches the results until a new head is received by +// headPollLoop +func (cs *CommitteeSyncer) getBestUpdateAndCommittee(period uint64) (types.LightClientUpdate, []byte, error) { + update, committee, err := cs.api.GetBestUpdateAndCommittee(period) + if err != nil { + return types.LightClientUpdate{}, nil, err + } + cs.updateCache.Add(period, update) + cs.committeeCache.Add(period+1, committee) + return update, committee, nil +} + +// GetInitData fetches the bootstrap data and returns LightClientInitData (the +// corresponding committee is stored so that a subsequent GetBestCommitteeProofs +// can return it when requested) +func (cs *CommitteeSyncer) GetInitData(ctx context.Context, checkpoint common.Hash) (types.Header, sync.LightClientInitData, error) { + if cs.genesisData == (sync.GenesisData{}) { + return types.Header{}, sync.LightClientInitData{}, errors.New("missing genesis data") + } + header, checkpointData, committee, err := cs.api.GetCheckpointData(ctx, checkpoint) + if err != nil { + return types.Header{}, sync.LightClientInitData{}, err + } + cs.checkpointPeriod, cs.checkpointCommittee = checkpointData.Period, committee + return header, sync.LightClientInitData{GenesisData: cs.genesisData, CheckpointData: checkpointData}, nil +} + +// ProtocolError is called by the tracker when the BeaconLightApi has provided +// wrong committee updates or signed heads +func (cs *CommitteeSyncer) ProtocolError(description string) { + log.Error("Beacon node API data source delivered wrong reply", "error", description) +} diff --git a/beacon/light/sync/checkpoint.go b/beacon/light/sync/checkpoint.go new file mode 100644 index 000000000000..7ba98c6068da --- /dev/null +++ b/beacon/light/sync/checkpoint.go @@ -0,0 +1,234 @@ +// Copyright 2022 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package sync + +import ( + "context" + "math" + "sync" + "time" + + "github.com/ethereum/go-ethereum/beacon/light/types" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/rlp" +) + +// CheckpointData contains known committee roots based on a weak subjectivity checkpoint +// +// Note: this structure is the result of a successfully validated light client +// bootstrap data structure which proves CommitteeRoot and NextCommitteeRoot of +// the checkpoint header (committee roots for Period and Period+1): +// https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/light-client/sync-protocol.md#lightclientbootstrap +type CheckpointData struct { + Checkpoint common.Hash // block root of the weak subjectivity checkpoint + Period uint64 // period in which the checkpoint is located + CommitteeRoots [2]common.Hash // committee roots fixed by the checkpoint (CommitteeRoots[i] belongs to period Period+i) +} + +// LightClientInitData contains light sync initialization data based on a +// weak subjectivity checkpoint +type LightClientInitData struct { + GenesisData + CheckpointData +} + +// ctInitBackend retrieves light sync initialization data based on a weak subjectivity checkpoint hash +type ctInitBackend interface { + GetInitData(ctx context.Context, checkpoint common.Hash) (types.Header, LightClientInitData, error) +} + +// WeakSubjectivityCheckpoint implements Constraints in a way that it fixes +// the committee belonging to the checkpoint and allows forward extending the +// committee chain indefinitely. If a parent constraint is specified then it is +// applied for committee periods older than the checkpoint period, also allowing +// backward syncing the committees. +// Note that light clients typically do not need to backward sync, this feature is +// intended for nodes serving other clients that might have an earlier checkpoint. +type WeakSubjectivityCheckpoint struct { + lock sync.RWMutex + + parent Constraints // constraints applied to pre-checkpoint history (no old committees synced if nil) + db ethdb.KeyValueStore + initData LightClientInitData + initialized bool + initTriggerCh, parentInitCh, stopCh chan struct{} + initCallback func(GenesisData) + updateCallback func() +} + +// NewWeakSubjectivityCheckpoint creates a WeakSubjectivityCheckpoint that either +// initializes itself from the specified ctInitBackend based on the given +// checkpoint or from the database if the same checkpoint has been fetched before. +func NewWeakSubjectivityCheckpoint(db ethdb.KeyValueStore, backend ctInitBackend, checkpoint common.Hash, parent Constraints) *WeakSubjectivityCheckpoint { + wsc := &WeakSubjectivityCheckpoint{ + parent: parent, + db: db, + initTriggerCh: make(chan struct{}, 1), + stopCh: make(chan struct{}), + } + if parent != nil { + wsc.parentInitCh = make(chan struct{}) + } + + var storedInitData bool + if enc, err := db.Get(initDataKey); err == nil { + var initData LightClientInitData + if err := rlp.DecodeBytes(enc, &initData); err == nil { + if initData.Checkpoint == checkpoint || initData.Checkpoint == (common.Hash{}) { + log.Info("Beacon chain initialized with stored checkpoint", "checkpoint", initData.Checkpoint) + wsc.initData = initData + storedInitData = true + } + } else { + log.Error("Error decoding stored beacon checkpoint", "error", err) + } + } + if !storedInitData && checkpoint == (common.Hash{}) { + return nil + } + go func() { + var initData LightClientInitData + if !storedInitData { + loop: + for { + select { + case <-wsc.stopCh: + return + case <-wsc.initTriggerCh: + ctx, cancel := context.WithTimeout(context.Background(), time.Second*20) + log.Info("Requesting beacon init data", "checkpoint", checkpoint) + var ( + header types.Header + err error + ) + header, initData, err = backend.GetInitData(ctx, checkpoint) + cancel() + if err == nil { + log.Info("Successfully fetched checkpoint data", "checkpoint", checkpoint, "slot", header.Slot) + break loop + } else { + log.Warn("Failed to retrieve beacon init data", "error", err) + } + } + } + } + if wsc.parentInitCh != nil { + select { + case <-wsc.stopCh: + return + case <-wsc.parentInitCh: + } + } + log.Info("Initialized beacon chain", "checkpoint", checkpoint, "period", initData.CheckpointData.Period) + wsc.init(initData, !storedInitData) + }() + return wsc +} + +// init initializes the checkpoint with the given init data +func (wsc *WeakSubjectivityCheckpoint) init(initData LightClientInitData, store bool) { + wsc.lock.Lock() + if store { + if enc, err := rlp.EncodeToBytes(&initData); err == nil { + wsc.db.Put(initDataKey, enc) + } else { + log.Error("Error encoding initData", "error", err) + } + } + wsc.initData, wsc.initialized = initData, true + updateCallback, initCallback := wsc.updateCallback, wsc.initCallback + wsc.lock.Unlock() + if initCallback != nil { + initCallback(initData.GenesisData) + } + updateCallback() +} + +// PeriodRange implements Constraints +func (wsc *WeakSubjectivityCheckpoint) SyncRange() (syncRange types.UpdateRange, lastFixed uint64) { + wsc.lock.RLock() + defer wsc.lock.RUnlock() + + if !wsc.initialized { + return + } + if wsc.parent != nil { + syncRange, lastFixed = wsc.parent.SyncRange() + } + if lastFixed+1 < wsc.initData.Period { + syncRange.First = wsc.initData.Period + } + lf := wsc.initData.Period + uint64(len(wsc.initData.CommitteeRoots)-1) + if lf > lastFixed { + lastFixed = lf + } + syncRange.AfterLast = math.MaxUint64 // no constraints on valid committee updates after the checkpoint + return +} + +// CommitteeRoot implements Constraints +func (wsc *WeakSubjectivityCheckpoint) CommitteeRoot(period uint64) (root common.Hash, matchAll bool) { + wsc.lock.RLock() + defer wsc.lock.RUnlock() + + if !wsc.initialized { + return common.Hash{}, false + } + switch { + case period < wsc.initData.Period: + if wsc.parent != nil { + return wsc.parent.CommitteeRoot(period) + } + return common.Hash{}, false + case period >= wsc.initData.Period && period < wsc.initData.Period+uint64(len(wsc.initData.CommitteeRoots)): + return wsc.initData.CommitteeRoots[int(period-wsc.initData.Period)], false + default: + return common.Hash{}, true // match all, no constraints on valid committee updates after the checkpoint + } +} + +// SetCallbacks implements Constraints +func (wsc *WeakSubjectivityCheckpoint) SetCallbacks(initCallback func(GenesisData), updateCallback func()) { + wsc.lock.Lock() + if wsc.initialized { + wsc.lock.Unlock() + initCallback(wsc.initData.GenesisData) + } else { + wsc.initCallback = initCallback + wsc.updateCallback = updateCallback + wsc.lock.Unlock() + } + if wsc.parent != nil { + wsc.parent.SetCallbacks(func(GenesisData) { close(wsc.parentInitCh) }, updateCallback) + } +} + +// TriggerFetch triggers fetching the init data from the backend +func (wsc *WeakSubjectivityCheckpoint) TriggerFetch() { + select { + case wsc.initTriggerCh <- struct{}{}: + default: + } +} + +// Stop should be called after ODR backend shutdown to ensure that init request +// does not get stuck +func (wsc *WeakSubjectivityCheckpoint) Stop() { + close(wsc.stopCh) +} diff --git a/beacon/light/sync/committee_signature.go b/beacon/light/sync/committee_signature.go new file mode 100644 index 000000000000..0bd981bfd0cb --- /dev/null +++ b/beacon/light/sync/committee_signature.go @@ -0,0 +1,290 @@ +// Copyright 2022 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package sync + +import ( + "bufio" + "bytes" + "fmt" + "io" + "math/rand" + "os" + "sort" + "strconv" + "strings" + + "github.com/ethereum/go-ethereum/beacon/light/types" + "github.com/ethereum/go-ethereum/beacon/merkle" + "github.com/ethereum/go-ethereum/beacon/params" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/log" + "github.com/minio/sha256-simd" + bls "github.com/protolambda/bls12-381-util" +) + +// syncCommittee holds either a blsSyncCommittee or a fake dummySyncCommittee used for testing +type syncCommittee interface{} + +// committeeSigVerifier verifies sync committee signatures (either proper BLS +// signatures or fake signatures used for testing) +type committeeSigVerifier interface { + deserializeSyncCommittee(enc []byte) syncCommittee + verifySignature(committee syncCommittee, signedRoot common.Hash, bitmask, signature []byte) bool +} + +// blsSyncCommittee is a set of sync committee signer pubkeys +// +// See data structure definition here: +// https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/beacon-chain.md#syncaggregate +type blsSyncCommittee struct { + keys [params.SyncCommitteeSize]*bls.Pubkey + aggregate *bls.Pubkey +} + +// BLSVerifier implements committeeSigVerifier +type BLSVerifier struct{} + +// deserializeSyncCommittee implements committeeSigVerifier +func (BLSVerifier) deserializeSyncCommittee(enc []byte) syncCommittee { + if len(enc) != SerializedCommitteeSize { + log.Error("Wrong input size for deserializeSyncCommittee", "expected", SerializedCommitteeSize, "got", len(enc)) + return nil + } + sc := new(blsSyncCommittee) + for i := 0; i <= params.SyncCommitteeSize; i++ { + pk := new(bls.Pubkey) + var sk [params.BlsPubkeySize]byte + copy(sk[:], enc[i*params.BlsPubkeySize:(i+1)*params.BlsPubkeySize]) + if err := pk.Deserialize(&sk); err != nil { + log.Error("bls.Pubkey.Deserialize failed", "error", err, "data", sk) + return nil + } + if i < params.SyncCommitteeSize { + sc.keys[i] = pk + } else { + sc.aggregate = pk + } + } + return sc +} + +// verifySignature implements committeeSigVerifier +func (BLSVerifier) verifySignature(committee syncCommittee, signingRoot common.Hash, bitmask, signature []byte) bool { + if len(signature) != params.BlsSignatureSize || len(bitmask) != params.SyncCommitteeSize/8 { + return false + } + var ( + sig bls.Signature + sigBytes [params.BlsSignatureSize]byte + signerKeys [params.SyncCommitteeSize]*bls.Pubkey + signerCount int + blsCommittee = committee.(*blsSyncCommittee) + ) + copy(sigBytes[:], signature) + if err := sig.Deserialize(&sigBytes); err != nil { + return false + } + for i, key := range blsCommittee.keys { + if bitmask[i/8]&(byte(1)<<(i%8)) != 0 { + signerKeys[signerCount] = key + signerCount++ + } + } + return bls.FastAggregateVerify(signerKeys[:signerCount], signingRoot[:], &sig) +} + +type dummySyncCommittee [32]byte + +// dummyVerifier implements committeeSigVerifier +type dummyVerifier struct{} + +// deserializeSyncCommittee implements committeeSigVerifier +func (dummyVerifier) deserializeSyncCommittee(enc []byte) syncCommittee { + if len(enc) != SerializedCommitteeSize { + log.Error("Wrong input size for deserializeSyncCommittee", "expected", SerializedCommitteeSize, "got", len(enc)) + return nil + } + var sc dummySyncCommittee + copy(sc[:], enc[:32]) + return sc +} + +// verifySignature implements committeeSigVerifier +func (dummyVerifier) verifySignature(committee syncCommittee, signingRoot common.Hash, bitmask, signature []byte) bool { + return bytes.Equal(signature, makeDummySignature(committee.(dummySyncCommittee), signingRoot, bitmask)) +} + +func randomDummySyncCommittee() dummySyncCommittee { + var sc dummySyncCommittee + rand.Read(sc[:]) + return sc +} + +func serializeDummySyncCommittee(sc dummySyncCommittee) []byte { + enc := make([]byte, SerializedCommitteeSize) + copy(enc[:32], sc[:]) + return enc +} + +func makeDummySignature(committee dummySyncCommittee, signingRoot common.Hash, bitmask []byte) []byte { + sig := make([]byte, params.BlsSignatureSize) + for i, b := range committee[:] { + sig[i] = b ^ signingRoot[i] + } + copy(sig[32:], bitmask) + return sig +} + +// Fork describes a single beacon chain fork and also stores the calculated +// signature domain used after this fork. +type Fork struct { + Epoch uint64 // epoch when given fork version is activated + Name string // name of the fork in the chain config (config.yaml) file + // See fork version definition here: + // https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/beacon-chain.md#custom-types + Version []byte // fork version + domain merkle.Value // calculated by computeDomain, based on fork version and genesis validators root +} + +// Forks is the list of all beacon chain forks in the chain configuration. +type Forks []Fork + +// domain returns the signature domain for the given epoch (assumes that domains +// have already been calculated). +func (bf Forks) domain(epoch uint64) merkle.Value { + for i := len(bf) - 1; i >= 0; i-- { + if epoch >= bf[i].Epoch { + return bf[i].domain + } + } + log.Error("Fork domain unknown", "epoch", epoch) + return merkle.Value{} +} + +// computeDomain returns the signature domain based on the given fork version +// and genesis validator set root +func computeDomain(forkVersion []byte, genesisValidatorsRoot common.Hash) merkle.Value { + var ( + hasher = sha256.New() + forkVersion32 merkle.Value + forkDataRoot merkle.Value + domain merkle.Value + ) + copy(forkVersion32[:len(forkVersion)], forkVersion) + hasher.Write(forkVersion32[:]) + hasher.Write(genesisValidatorsRoot[:]) + hasher.Sum(forkDataRoot[:0]) + domain[0] = 7 + copy(domain[4:], forkDataRoot[:28]) + return domain +} + +// computeDomains calculates and stores signature domains for each fork in the list. +func (bf Forks) computeDomains(genesisValidatorsRoot common.Hash) { + for i := range bf { + bf[i].domain = computeDomain(bf[i].Version, genesisValidatorsRoot) + } +} + +// signingRoot calculates the signing root of the given header. +func (bf Forks) signingRoot(header types.Header) common.Hash { + var ( + signingRoot common.Hash + headerHash = header.Hash() + hasher = sha256.New() + domain = bf.domain(header.Epoch()) + ) + hasher.Write(headerHash[:]) + hasher.Write(domain[:]) + hasher.Sum(signingRoot[:0]) + return signingRoot +} + +func (f Forks) Len() int { return len(f) } +func (f Forks) Swap(i, j int) { f[i], f[j] = f[j], f[i] } +func (f Forks) Less(i, j int) bool { return f[i].Epoch < f[j].Epoch } + +// fieldValue checks if the given fork parameter field is present in the given line +// and if it is then returns the field value and the name of the fork it belongs to. +func fieldValue(line, field string) (name, value string, ok bool) { + if pos := strings.Index(line, field); pos >= 0 { + cutFrom := strings.Index(line, "#") // cut in-line comments + if cutFrom < 0 { + cutFrom = len(line) + } + return line[:pos], strings.TrimSpace(line[pos+len(field) : cutFrom]), true + } + return "", "", false +} + +// LoadForks parses the beacon chain configuration file (config.yaml) and extracts +// the list of forks +func LoadForks(fileName string) (Forks, error) { + file, err := os.Open(fileName) + if err != nil { + return nil, fmt.Errorf("Error opening beacon chain config file: %v", err) + } + defer file.Close() + var ( + forks Forks + forkVersions = make(map[string][]byte) + forkEpochs = make(map[string]uint64) + reader = bufio.NewReader(file) + ) + forkEpochs["GENESIS"] = 0 + + for { + l, _, err := reader.ReadLine() + if err == io.EOF { + break + } + if err != nil { + return nil, fmt.Errorf("Error reading beacon chain config file: %v", err) + } + line := string(l) + if name, value, ok := fieldValue(line, "_FORK_VERSION:"); ok { + if v, err := hexutil.Decode(value); err == nil { + forkVersions[name] = v + } else { + return nil, fmt.Errorf("Error decoding hex fork id \"%s\" in beacon chain config file: %v", value, err) + } + } + if name, value, ok := fieldValue(line, "_FORK_EPOCH:"); ok { + if v, err := strconv.ParseUint(value, 10, 64); err == nil { + forkEpochs[name] = v + } else { + return nil, fmt.Errorf("Error parsing epoch number \"%s\" in beacon chain config file: %v", value, err) + } + } + } + + for name, epoch := range forkEpochs { + if version, ok := forkVersions[name]; ok { + delete(forkVersions, name) + forks = append(forks, Fork{Epoch: epoch, Name: name, Version: version}) + } else { + return nil, fmt.Errorf("Fork id missing for \"%s\" in beacon chain config file", name) + } + } + + for name := range forkVersions { + return nil, fmt.Errorf("Epoch number missing for fork \"%s\" in beacon chain config file", name) + } + sort.Sort(forks) + return forks, nil +} diff --git a/beacon/light/sync/committee_tracker.go b/beacon/light/sync/committee_tracker.go new file mode 100644 index 000000000000..aeed489d1b6b --- /dev/null +++ b/beacon/light/sync/committee_tracker.go @@ -0,0 +1,513 @@ +// Copyright 2022 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package sync + +import ( + "encoding/binary" + "sync" + + "github.com/ethereum/go-ethereum/beacon/light/types" + "github.com/ethereum/go-ethereum/beacon/params" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/lru" + "github.com/ethereum/go-ethereum/common/mclock" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/rlp" + "github.com/minio/sha256-simd" +) + +var ( + initDataKey = []byte("ct.init") // RLP(LightClientInitData) + bestUpdateKey = []byte("ct.bu-") // bigEndian64(syncPeriod) -> RLP(types.LightClientUpdate) (nextCommittee only referenced by root hash) + syncCommitteeKey = []byte("ct.sc-") // bigEndian64(syncPeriod) + committee root hash -> serialized committee +) + +const SerializedCommitteeSize = (params.SyncCommitteeSize + 1) * params.BlsPubkeySize + +// ChainConfig contains built-in chain configuration presets for certain networks +type ChainConfig struct { + GenesisData + Forks Forks + Checkpoint common.Hash +} + +// GenesisData is required for signature verification and is set by the +// CommitteeTracker.Init function. +type GenesisData struct { + GenesisTime uint64 // unix time (in seconds) of slot 0 + GenesisValidatorsRoot common.Hash // root hash of the genesis validator set, used for signature domain calculation +} + +// Constraints defines constraints on the synced update chain. These constraints +// include the GenesisData, a range of periods (first <= period < afterFixed) +// where committee roots are fixed and another "free" range (afterFixed <= period < afterLast) +// where committee roots are determined by the best known update chain. +// An implementation of Constraints should call initCallback to pass +// GenesisData whenever it is available (either durinrg SetCallbacks or later). +// If the constraints are changed then it should call updateCallback. +// +// Note: this interface can be used either for light syncing mode (in which case +// only the checkpoint is fixed and any valid update chain can be synced starting +// from there) or full syncing light service mode (in which case a full beacon +// header chain is synced based on the externally driven consensus and the update +// chain is fully restricted based on that). +type Constraints interface { + SyncRange() (syncRange types.UpdateRange, lastFixed uint64) + CommitteeRoot(period uint64) (root common.Hash, matchAll bool) // matchAll is true in the free range where any committee root matches the constraints + SetCallbacks(initCallback func(GenesisData), updateCallback func()) +} + +// CommitteeTracker maintains a chain of sync committee updates and a small +// set of best known signed heads. It is used in all client configurations +// operating on a beacon chain. It can sync its update chain and receive signed +// heads from either an ODR or beacon node API backend and propagate/serve this +// data to subscribed peers. Received signed heads are validated based on the +// known sync committee chain and added to the local set if valid or placed in a +// deferred queue if the committees are not synced up to the period of the new +// head yet. +// Sync committee chain is either initialized from a weak subjectivity checkpoint +// or controlled by a BeaconChain that is driven by a trusted source (beacon node API). +type CommitteeTracker struct { + lock sync.RWMutex + db ethdb.KeyValueStore + sigVerifier committeeSigVerifier + clock mclock.Clock + bestUpdateCache *lru.Cache[uint64, *types.LightClientUpdate] + serializedCommitteeCache *lru.Cache[string, []byte] + syncCommitteeCache *lru.Cache[string, syncCommittee] + committeeRootCache *lru.Cache[uint64, common.Hash] + unixNano func() int64 + + forks Forks + constraints Constraints + signerThreshold int + minimumUpdateScore types.UpdateScore + enforceTime bool + + genesisInit bool // genesis data initialized (signature check possible) + genesisTime uint64 // unix time (seconds) + chainInit bool // update and committee chain initialized + // if chain is initialized then best updates for periods between firstPeriod to nextPeriod-1 + // and committees for periods between firstPeriod to nextPeriod are available + firstPeriod, nextPeriod uint64 + + updateInfo *types.UpdateInfo + connected map[ctServer]*ctPeerInfo + requestQueue []*ctPeerInfo + broadcastTo, advertisedTo map[ctClient]struct{} + advertiseScheduled bool + triggerCh, initCh, stopCh chan struct{} + acceptedList headList + + headSubs []func(types.Header) +} + +// NewCommitteeTracker creates a new CommitteeTracker +func NewCommitteeTracker(db ethdb.KeyValueStore, forks Forks, constraints Constraints, signerThreshold int, enforceTime bool, sigVerifier committeeSigVerifier, clock mclock.Clock, unixNano func() int64) *CommitteeTracker { + s := &CommitteeTracker{ + bestUpdateCache: lru.NewCache[uint64, *types.LightClientUpdate](1000), + serializedCommitteeCache: lru.NewCache[string, []byte](100), + syncCommitteeCache: lru.NewCache[string, syncCommittee](100), + committeeRootCache: lru.NewCache[uint64, common.Hash](100), + db: db, + sigVerifier: sigVerifier, + clock: clock, + unixNano: unixNano, + forks: forks, + constraints: constraints, + signerThreshold: signerThreshold, + enforceTime: enforceTime, + minimumUpdateScore: types.UpdateScore{ + SignerCount: uint32(signerThreshold), + SubPeriodIndex: params.SyncPeriodLength / 16, + }, + connected: make(map[ctServer]*ctPeerInfo), + broadcastTo: make(map[ctClient]struct{}), + triggerCh: make(chan struct{}, 1), + initCh: make(chan struct{}), + stopCh: make(chan struct{}), + acceptedList: newHeadList(4), + } + var ( + iter = s.db.NewIterator(bestUpdateKey, nil) + kl = len(bestUpdateKey) + ) + // iterate through them all for simplicity; at most a few hundred items + for iter.Next() { + period := binary.BigEndian.Uint64(iter.Key()[kl : kl+8]) + if !s.chainInit { + s.chainInit = true + s.firstPeriod = period + } else if s.nextPeriod != period { + break // continuity guaranteed + } + s.nextPeriod = period + 1 + } + iter.Release() + constraints.SetCallbacks(s.Init, s.EnforceForksAndConstraints) + return s +} + +// Init initializes the tracker with the given GenesisData and starts the update +// syncing process. +// Note that Init may be called either at startup or later if it has to be +// fetched from the network based on a checkpoint hash. +func (s *CommitteeTracker) Init(genesisData GenesisData) { + if s.genesisInit { + log.Error("CommitteeTracker already initialized") + return + } + s.lock.Lock() + s.forks.computeDomains(genesisData.GenesisValidatorsRoot) + s.genesisTime = genesisData.GenesisTime + s.genesisInit = true + s.enforceForksAndConstraints() + s.lock.Unlock() + close(s.initCh) + + go s.syncLoop() +} + +// GetInitChannel returns a channel that gets closed when the tracker has been initialized +func (s *CommitteeTracker) GetInitChannel() chan struct{} { + return s.initCh +} + +const ( + sciSuccess = iota + sciNeedCommittee + sciWrongUpdate + sciUnexpectedError +) + +// insertUpdate verifies the update and stores it in the update chain if possible. +// The serialized version of the next committee should also be supplied if it is +// not already stored in the database. +func (s *CommitteeTracker) insertUpdate(update *types.LightClientUpdate, nextCommittee []byte) int { + var ( + period = update.Header.SyncPeriod() + rollback bool + ) + if !s.verifyUpdate(update) { + return sciWrongUpdate + } + + if !s.chainInit || period > s.nextPeriod || period+1 < s.firstPeriod { + log.Error("Unexpected insertUpdate", "period", period, "firstPeriod", s.firstPeriod, "nextPeriod", s.nextPeriod) + return sciUnexpectedError + } + if period+1 == s.firstPeriod { + if update.NextSyncCommitteeRoot != s.getSyncCommitteeRoot(period+1) { + return sciWrongUpdate + } + } else if period < s.nextPeriod { + // update should already exist + oldUpdate := s.GetBestUpdate(period) + if oldUpdate == nil { + log.Error("Update expected to exist but missing from db") + return sciUnexpectedError + } + if !update.Score().BetterThan(oldUpdate.Score()) { + // not better that existing one, nothing to do + return sciSuccess + } + rollback = update.NextSyncCommitteeRoot != oldUpdate.NextSyncCommitteeRoot + } + + if (period == s.nextPeriod || rollback) && s.GetSerializedSyncCommittee(period+1, update.NextSyncCommitteeRoot) == nil { + // committee is not yet stored in db + if nextCommittee == nil { + return sciNeedCommittee + } + if SerializedCommitteeRoot(nextCommittee) != update.NextSyncCommitteeRoot { + return sciWrongUpdate + } + s.storeSerializedSyncCommittee(period+1, update.NextSyncCommitteeRoot, nextCommittee) + } + + if rollback { + for p := s.nextPeriod - 1; p >= period; p-- { + s.deleteBestUpdate(p) + } + s.nextPeriod = period + } + s.storeBestUpdate(update) + if period == s.nextPeriod { + s.nextPeriod++ + } + if period+1 == s.firstPeriod { + s.firstPeriod-- + } + log.Info("Synced new committee update", "period", period, "nextCommitteeRoot", update.NextSyncCommitteeRoot) + return sciSuccess +} + +// verifyUpdate checks whether the header signature is correct and the update +// fits into the specified constraints (assumes that the update has been +// successfully validated previously) +func (s *CommitteeTracker) verifyUpdate(update *types.LightClientUpdate) bool { + if !s.checkConstraints(update) { + return false + } + ok, age := s.verifySignature(SignedHead{Header: update.Header, Signature: update.SyncCommitteeSignature, BitMask: update.SyncCommitteeBits, SignatureSlot: update.Header.Slot}) + if age < 0 { + log.Warn("Future committee update received", "age", age) + } + return ok +} + +// getBestUpdateKey returns the database key for the canonical sync committee +// update at the given period +func getBestUpdateKey(period uint64) []byte { + var ( + kl = len(bestUpdateKey) + key = make([]byte, kl+8) + ) + copy(key[:kl], bestUpdateKey) + binary.BigEndian.PutUint64(key[kl:], period) + return key +} + +// GetBestUpdate returns the best known canonical sync committee update at the given period +func (s *CommitteeTracker) GetBestUpdate(period uint64) *types.LightClientUpdate { + if update, ok := s.bestUpdateCache.Get(period); ok { + if update != nil { + if update.Header.SyncPeriod() != period { + log.Error("Best update from wrong period found in cache") + } + } + return update + } + if updateEnc, err := s.db.Get(getBestUpdateKey(period)); err == nil { + update := new(types.LightClientUpdate) + if err := rlp.DecodeBytes(updateEnc, update); err == nil { + update.Score() // ensure that canonical updates in memory always have their score calculated and therefore are thread safe + s.bestUpdateCache.Add(period, update) + if update.Header.SyncPeriod() != period { + log.Error("Best update from wrong period found in database") + } + return update + } else { + log.Error("Error decoding best update", "error", err) + } + } + s.bestUpdateCache.Add(period, nil) + return nil +} + +// storeBestUpdate stores a sync committee update in the canonical update chain +func (s *CommitteeTracker) storeBestUpdate(update *types.LightClientUpdate) { + period := update.Header.SyncPeriod() + updateEnc, err := rlp.EncodeToBytes(update) + if err != nil { + log.Error("Error encoding types.LightClientUpdate", "error", err) + return + } + s.bestUpdateCache.Add(period, update) + s.db.Put(getBestUpdateKey(period), updateEnc) + s.committeeRootCache.Remove(period + 1) + s.updateInfoChanged() +} + +// deleteBestUpdate deletes a sync committee update from the canonical update chain +func (s *CommitteeTracker) deleteBestUpdate(period uint64) { + s.db.Delete(getBestUpdateKey(period)) + s.bestUpdateCache.Remove(period) + s.committeeRootCache.Remove(period + 1) + s.updateInfoChanged() +} + +// getSyncCommitteeKey returns the database key for the specified sync committee +func getSyncCommitteeKey(period uint64, committeeRoot common.Hash) []byte { + var ( + kl = len(syncCommitteeKey) + key = make([]byte, kl+8+32) + ) + copy(key[:kl], syncCommitteeKey) + binary.BigEndian.PutUint64(key[kl:kl+8], period) + copy(key[kl+8:], committeeRoot[:]) + return key +} + +// GetSerializedSyncCommittee fetches the serialized version of a sync committee +// from cache or database +func (s *CommitteeTracker) GetSerializedSyncCommittee(period uint64, committeeRoot common.Hash) []byte { + key := getSyncCommitteeKey(period, committeeRoot) + if committee, ok := s.serializedCommitteeCache.Get(string(key)); ok { + if len(committee) == SerializedCommitteeSize { + return committee + } else { + log.Error("Serialized committee with invalid size found in cache") + } + } + if committee, err := s.db.Get(key); err == nil { + if len(committee) == SerializedCommitteeSize { + s.serializedCommitteeCache.Add(string(key), committee) + return committee + } else { + log.Error("Serialized committee with invalid size found in database") + } + } + return nil +} + +// storeSerializedSyncCommittee stores the serialized version of a sync committee +// to cache and database +func (s *CommitteeTracker) storeSerializedSyncCommittee(period uint64, committeeRoot common.Hash, committee []byte) { + key := getSyncCommitteeKey(period, committeeRoot) + s.serializedCommitteeCache.Add(string(key), committee) + s.syncCommitteeCache.Remove(string(key)) // a nil entry for "not found" might have been stored here earlier + s.db.Put(key, committee) +} + +// SerializedCommitteeRoot calculates the root hash of the binary tree representation +// of a sync committee provided in serialized format +func SerializedCommitteeRoot(enc []byte) common.Hash { + if len(enc) != SerializedCommitteeSize { + return common.Hash{} + } + var ( + hasher = sha256.New() + padding [64 - params.BlsPubkeySize]byte + data [params.SyncCommitteeSize]common.Hash + l = params.SyncCommitteeSize + ) + for i := range data { + hasher.Reset() + hasher.Write(enc[i*params.BlsPubkeySize : (i+1)*params.BlsPubkeySize]) + hasher.Write(padding[:]) + hasher.Sum(data[i][:0]) + } + for l > 1 { + for i := 0; i < l/2; i++ { + hasher.Reset() + hasher.Write(data[i*2][:]) + hasher.Write(data[i*2+1][:]) + hasher.Sum(data[i][:0]) + } + l /= 2 + } + hasher.Reset() + hasher.Write(enc[SerializedCommitteeSize-params.BlsPubkeySize : SerializedCommitteeSize]) + hasher.Write(padding[:]) + hasher.Sum(data[1][:0]) + hasher.Reset() + hasher.Write(data[0][:]) + hasher.Write(data[1][:]) + hasher.Sum(data[0][:0]) + return data[0] +} + +// getSyncCommitteeRoot returns the sync committee root at the given period of +// the current local committee root constraints or update chain (tracker mutex +// lock expected). +func (s *CommitteeTracker) getSyncCommitteeRoot(period uint64) (root common.Hash) { + if r, ok := s.committeeRootCache.Get(period); ok { + return r + } + defer func() { + s.committeeRootCache.Add(period, root) + }() + + if r, matchAll := s.constraints.CommitteeRoot(period); !matchAll { + return r + } + if !s.chainInit || period <= s.firstPeriod || period > s.nextPeriod { + return common.Hash{} + } + if update := s.GetBestUpdate(period - 1); update != nil { + return update.NextSyncCommitteeRoot + } + return common.Hash{} +} + +// GetSyncCommitteeRoot returns the sync committee root at the given period of the +// current local committee root constraints or update chain (tracker mutex locked). +func (s *CommitteeTracker) GetSyncCommitteeRoot(period uint64) common.Hash { + s.lock.RLock() + defer s.lock.RUnlock() + + return s.getSyncCommitteeRoot(period) +} + +// getSyncCommittee returns the deserialized sync committee at the given period +// of the current local committee chain (tracker mutex lock expected). +func (s *CommitteeTracker) getSyncCommittee(period uint64) syncCommittee { + if committeeRoot := s.getSyncCommitteeRoot(period); committeeRoot != (common.Hash{}) { + key := string(getSyncCommitteeKey(period, committeeRoot)) + if sc, ok := s.syncCommitteeCache.Get(key); ok { + return sc + } + if sc := s.GetSerializedSyncCommittee(period, committeeRoot); sc != nil { + c := s.sigVerifier.deserializeSyncCommittee(sc) + s.syncCommitteeCache.Add(key, c) + return c + } else { + log.Error("Missing serialized sync committee", "period", period, "committeeRoot", committeeRoot) + } + } + return nil +} + +// EnforceForksAndConstraints rolls back committee updates that do not match the +// tracker's forks and constraints and also starts new requests if possible +// (tracker mutex locked) +func (s *CommitteeTracker) EnforceForksAndConstraints() { + s.lock.Lock() + s.enforceForksAndConstraints() + s.lock.Unlock() +} + +// enforceForksAndConstraints rolls back committee updates that do not match the +// tracker's forks and constraints and also starts new requests if possible +// (tracker mutex expected) +func (s *CommitteeTracker) enforceForksAndConstraints() { + if !s.genesisInit || !s.chainInit { + return + } + s.committeeRootCache.Purge() + for s.nextPeriod > s.firstPeriod { + if update := s.GetBestUpdate(s.nextPeriod - 1); update == nil || s.verifyUpdate(update) { // check constraints and signature + if update == nil { + log.Error("Sync committee update missing", "period", s.nextPeriod-1) + } + break + } + s.nextPeriod-- + s.deleteBestUpdate(s.nextPeriod) + } + if s.nextPeriod == s.firstPeriod { + if root, matchAll := s.constraints.CommitteeRoot(s.firstPeriod); matchAll || s.getSyncCommitteeRoot(s.firstPeriod) != root || s.getSyncCommittee(s.firstPeriod) == nil { + s.nextPeriod, s.firstPeriod, s.chainInit = 0, 0, false + } + } + + s.retrySyncAllPeers() +} + +// checkConstraints checks whether the signed headers of the given committee +// update is on the right fork and the proven NextSyncCommitteeRoot matches the +// update chain constraints. +func (s *CommitteeTracker) checkConstraints(update *types.LightClientUpdate) bool { + if !s.genesisInit { + log.Error("CommitteeTracker not initialized") + return false + } + root, matchAll := s.constraints.CommitteeRoot(update.Header.SyncPeriod() + 1) + return matchAll || root == update.NextSyncCommitteeRoot +} diff --git a/beacon/light/sync/committee_tracker_test.go b/beacon/light/sync/committee_tracker_test.go new file mode 100644 index 000000000000..1dfd8b12bf24 --- /dev/null +++ b/beacon/light/sync/committee_tracker_test.go @@ -0,0 +1,439 @@ +// Copyright 2022 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package sync + +import ( + "context" + "math/rand" + "testing" + "time" + + "github.com/ethereum/go-ethereum/beacon/light/types" + "github.com/ethereum/go-ethereum/beacon/merkle" + "github.com/ethereum/go-ethereum/beacon/params" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/mclock" + "github.com/ethereum/go-ethereum/ethdb/memorydb" + "github.com/minio/sha256-simd" +) + +var ( + testGenesis = newTestGenesis() + testGenesis2 = newTestGenesis() + + tfNormal = newTestForks(testGenesis, Forks{ + Fork{Epoch: 0, Version: []byte{0}}, + }) + tfAlternative = newTestForks(testGenesis, Forks{ + Fork{Epoch: 0, Version: []byte{0}}, + Fork{Epoch: 0x700, Version: []byte{1}}, + }) + tfAnotherGenesis = newTestForks(testGenesis2, Forks{ + Fork{Epoch: 0, Version: []byte{0}}, + }) + + tcBase = newTestChain(nil, testGenesis, tfNormal, true, 0, 9, 200, 400) + tcLowParticipation = newTestChain(newTestChain(tcBase, testGenesis, tfNormal, true, 8, 14, 1000, 257), testGenesis, tfNormal, true, 15, 19, 1000, 100) + tcFork = newTestChain(tcBase, testGenesis, tfAlternative, true, 7, 9, 200, 400) + tcAnotherGenesis = newTestChain(nil, testGenesis2, tfAnotherGenesis, true, 0, 9, 200, 450) + tcBetterUpdates2 = newTestChain(tcBase, testGenesis, tfNormal, false, 5, 7, 1000, 450) // better signer participation from period 5 to 7 + tcBetterUpdates = newTestChain(tcBase, testGenesis, tfNormal, false, 5, 7, finalizedTestUpdate, 400) // finalized updates from period 5 to 7 (stronger than the one above) +) + +type ctTestCase []ctTestStep + +type ctTestStep struct { + periodTime float64 // slotTime uint64 + trackers []ctTestTrackerStep + sync []ctTestTrackerSync +} + +type ctTestTrackerSync struct { + sourceTc *testChain // nil if target is synced from another source tracker + source, target int // tracker index in the test setup; source is -1 if the target is synced from a testChain + expFail bool +} + +type ctTestTrackerStep struct { + forks Forks + signerThreshold int + newTracker bool // should always be true at first step and whenever forks/signerThreshold is changed + // constraint + constraintsTc *testChain + constraintsFirst, constraintsAfterFixed, constraintsAfterLast uint64 + // exp result + expTc *testChain + expFirst, expAfterLast uint64 +} + +func TestCommitteeTrackerConstraints(t *testing.T) { + runCtTest(t, ctTestCase{ + {7.5, []ctTestTrackerStep{{tfNormal, 257, true, tcBase, 0, 9, 9, tcBase, 0, 8}, {tfNormal, 257, true, tcBase, 5, 6, 1000, tcBase, 5, 8}}, []ctTestTrackerSync{{tcBase, -1, 0, false}, {nil, 0, 1, false}}}, + {8.5, []ctTestTrackerStep{{tfNormal, 257, false, tcBase, 0, 10, 10, tcBase, 0, 9}, {tfNormal, 257, false, tcBase, 5, 6, 1000, tcBase, 5, 9}}, []ctTestTrackerSync{{tcBase, -1, 0, false}, {nil, 0, 1, false}}}, + {9.5, []ctTestTrackerStep{{tfNormal, 257, true, tcBase, 0, 11, 11, tcBase, 0, 10}, {tfNormal, 257, true, tcBase, 5, 6, 1000, tcBase, 5, 10}}, []ctTestTrackerSync{{tcBase, -1, 0, false}, {nil, 0, 1, false}}}, + {9.6, []ctTestTrackerStep{{tfNormal, 257, false, tcBase, 0, 11, 11, tcBase, 0, 10}, {tfNormal, 257, false, tcBase, 0, 6, 1000, tcBase, 0, 10}}, []ctTestTrackerSync{{nil, 0, 1, false}}}, + }) +} + +func TestCommitteeTrackerLowParticipation(t *testing.T) { + runCtTest(t, ctTestCase{ + {9.5, []ctTestTrackerStep{{tfNormal, 257, true, tcLowParticipation, 0, 9, 9, tcLowParticipation, 0, 8}, {tfNormal, 300, true, tcBase, 5, 6, 1000, tcLowParticipation, 5, 8}}, []ctTestTrackerSync{{tcLowParticipation, -1, 0, false}, {nil, 0, 1, false}}}, + {11.5, []ctTestTrackerStep{{tfNormal, 257, false, tcLowParticipation, 0, 13, 13, tcLowParticipation, 0, 12}, {tfNormal, 300, false, tcBase, 5, 6, 1000, tcLowParticipation, 5, 8}}, []ctTestTrackerSync{{tcLowParticipation, -1, 0, false}, {nil, 0, 1, false}}}, + {11.6, []ctTestTrackerStep{{tfNormal, 257, false, tcLowParticipation, 0, 13, 13, tcLowParticipation, 0, 12}, {tfNormal, 257, true, tcBase, 5, 6, 1000, tcLowParticipation, 5, 12}}, []ctTestTrackerSync{{nil, 0, 1, false}}}, + {13.5, []ctTestTrackerStep{{tfNormal, 257, false, tcLowParticipation, 0, 16, 16, tcLowParticipation, 0, 14}, {tfNormal, 257, false, tcBase, 5, 6, 1000, tcLowParticipation, 5, 14}}, []ctTestTrackerSync{{tcLowParticipation, -1, 0, true}, {nil, 0, 1, false}}}, + {14.5, []ctTestTrackerStep{{tfNormal, 257, false, tcLowParticipation, 0, 16, 16, tcLowParticipation, 0, 15}, {tfNormal, 257, false, tcBase, 5, 6, 1000, tcLowParticipation, 5, 15}}, []ctTestTrackerSync{{tcLowParticipation, -1, 0, false}, {nil, 0, 1, false}}}, + {19.5, []ctTestTrackerStep{{tfNormal, 257, false, tcLowParticipation, 0, 21, 21, tcLowParticipation, 0, 15}, {tfNormal, 257, false, tcBase, 5, 6, 1000, tcLowParticipation, 5, 15}}, []ctTestTrackerSync{{tcLowParticipation, -1, 0, false}, {nil, 0, 1, false}}}, + {19.6, []ctTestTrackerStep{{tfNormal, 257, false, tcBase, 0, 11, 11, tcBase, 0, 10}, {tfNormal, 257, false, tcBase, 5, 6, 1000, tcBase, 5, 10}}, []ctTestTrackerSync{{tcBase, -1, 0, false}, {nil, 0, 1, false}}}, + }) +} + +func TestCommitteeTrackerFork(t *testing.T) { + runCtTest(t, ctTestCase{ + {9.5, []ctTestTrackerStep{{tfNormal, 257, true, tcBase, 0, 11, 11, tcBase, 0, 10}, {tfAlternative, 257, true, tcFork, 0, 11, 11, tcFork, 0, 10}, {tfNormal, 257, true, tcBase, 5, 6, 1000, tcBase, 5, 7}}, []ctTestTrackerSync{{tcBase, -1, 0, false}, {tcFork, -1, 1, false}, {nil, 1, 2, true}}}, + {9.6, []ctTestTrackerStep{{tfNormal, 257, false, tcBase, 0, 11, 11, tcBase, 0, 10}, {tfAlternative, 257, false, tcFork, 0, 11, 11, tcFork, 0, 10}, {tfNormal, 257, false, tcBase, 5, 6, 1000, tcBase, 5, 10}}, []ctTestTrackerSync{{nil, 0, 2, false}}}, + {9.7, []ctTestTrackerStep{{tfNormal, 257, false, tcBase, 0, 11, 11, tcBase, 0, 10}, {tfAlternative, 257, false, tcFork, 0, 11, 11, tcFork, 0, 10}, {tfAlternative, 257, true, tcFork, 5, 6, 1000, tcFork, 5, 7}}, []ctTestTrackerSync{}}, + {9.8, []ctTestTrackerStep{{tfNormal, 257, false, tcBase, 0, 11, 11, tcBase, 0, 10}, {tfAlternative, 257, false, tcFork, 0, 11, 11, tcFork, 0, 10}, {tfAlternative, 257, true, tcFork, 5, 6, 1000, tcFork, 5, 10}}, []ctTestTrackerSync{{nil, 1, 2, false}}}, + }) +} + +func TestCommitteeTrackerAnotherGenesis(t *testing.T) { + runCtTest(t, ctTestCase{ + {9.5, []ctTestTrackerStep{{tfNormal, 257, true, tcBase, 0, 11, 11, tcBase, 0, 10}, {tfAnotherGenesis, 257, true, tcAnotherGenesis, 0, 11, 11, tcAnotherGenesis, 0, 10}, {tfNormal, 257, true, tcBase, 5, 6, 1000, tcBase, 1, 0}}, []ctTestTrackerSync{{tcBase, -1, 0, false}, {tcAnotherGenesis, -1, 1, false}, {nil, 1, 0, true}, {nil, 1, 2, true}}}, + {9.6, []ctTestTrackerStep{{tfNormal, 257, true, tcBase, 0, 11, 11, tcBase, 0, 10}, {tfAnotherGenesis, 257, true, tcAnotherGenesis, 0, 11, 11, tcAnotherGenesis, 0, 10}, {tfNormal, 257, true, tcBase, 5, 6, 1000, tcBase, 5, 10}}, []ctTestTrackerSync{{nil, 0, 2, false}}}, + }) +} + +func TestCommitteeTrackerBetterUpdates(t *testing.T) { + runCtTest(t, ctTestCase{ + {9.5, []ctTestTrackerStep{{tfNormal, 257, true, tcBase, 2, 11, 11, tcBase, 2, 10}, {tfNormal, 257, true, tcBase, 0, 9, 9, tcBetterUpdates, 0, 8}, {tfNormal, 257, true, tcBase, 0, 9, 9, tcBetterUpdates2, 0, 8}}, []ctTestTrackerSync{{tcBase, -1, 0, false}, {tcBetterUpdates, -1, 1, false}, {tcBetterUpdates2, -1, 2, false}}}, + {9.6, []ctTestTrackerStep{{tfNormal, 257, false, tcBase, 0, 11, 11, tcBetterUpdates, 0, 10}, {tfNormal, 257, false, tcBase, 0, 11, 11, tcBetterUpdates, 0, 10}, {tfNormal, 257, false, tcBase, 0, 11, 11, tcBetterUpdates2, 0, 10}}, []ctTestTrackerSync{{tcBetterUpdates, -1, 1, false}, {nil, 1, 0, false}, {nil, 0, 2, false}}}, + {9.7, []ctTestTrackerStep{{tfNormal, 257, false, tcBase, 0, 11, 11, tcBetterUpdates2, 0, 10}, {tfNormal, 257, false, tcBase, 0, 11, 11, tcBetterUpdates2, 0, 10}, {tfNormal, 257, false, tcBase, 0, 11, 11, tcBetterUpdates2, 0, 10}}, []ctTestTrackerSync{{nil, 2, 0, false}, {nil, 2, 1, false}}}, + }) +} + +func runCtTest(t *testing.T, testCase ctTestCase) { + count := len(testCase[0].trackers) + dbs := make([]*memorydb.Database, count) + trackers := make([]*CommitteeTracker, count) + constraints := make([]*testConstraints, count) + for i := range dbs { + dbs[i] = memorydb.New() + } + clock := &mclock.Simulated{} + var lastTime time.Duration + for stepIndex, step := range testCase { + tm := time.Duration(float64(time.Second*12*params.SyncPeriodLength) * step.periodTime) + clock.Run(tm - lastTime) + lastTime = tm + for i, ts := range step.trackers { + if ts.newTracker { + if trackers[i] != nil { + trackers[i].Stop() + } + constraints[i] = &testConstraints{} + trackers[i] = NewCommitteeTracker(dbs[i], ts.forks, constraints[i], ts.signerThreshold, true, dummyVerifier{}, clock, func() int64 { return int64(clock.Now()) }) + } + constraints[i].setRoots(ts.constraintsTc, ts.constraintsFirst, ts.constraintsAfterFixed, ts.constraintsAfterLast) + } + for syncIndex, ss := range step.sync { + var failed bool + if ss.sourceTc != nil { + s := &tcSyncer{tc: ss.sourceTc} + s.syncTracker(trackers[ss.target]) + failed = s.failed + } else { + s := &ctSyncer{ct: trackers[ss.source]} + s.syncTracker(trackers[ss.target]) + failed = s.failed + } + if failed != ss.expFail { + t.Errorf("Step %d sync %d result mismatch (got %v, expected %v)", stepIndex, syncIndex, failed, ss.expFail) + } + } + // check resulting tracker state + for i, ts := range step.trackers { + ct := trackers[i] + if ts.expFirst > 0 { + if ct.GetBestUpdate(ts.expFirst-1) != nil { + t.Errorf("Step %d tracker %d: update found in synced chain before the expected range (period %d)", stepIndex, i, ts.expFirst-1) + } + } + for period := ts.expFirst; period < ts.expAfterLast; period++ { + if update := ct.GetBestUpdate(period); update == nil { + t.Errorf("Step %d tracker %d: update missing from synced chain (period %d)", stepIndex, i, period) + } else if update.Score() != ts.expTc.periods[period].update.Score() { + t.Errorf("Step %d tracker %d: wrong update found in synced chain (period %d)", stepIndex, i, period) + } + } + for period := ts.expFirst; period <= ts.expAfterLast; period++ { + if ct.GetSyncCommitteeRoot(period) != ts.expTc.periods[period].committeeRoot { + t.Errorf("Step %d tracker %d: committee root mismatch in synced chain (period %d)", stepIndex, i, period) + } + } + if ct.GetBestUpdate(ts.expAfterLast) != nil { + t.Errorf("Step %d tracker %d: update found in synced chain after the expected range (period %d)", stepIndex, i, ts.expAfterLast) + } + } + } + for _, ct := range trackers { + if ct != nil { + ct.Stop() + } + } +} + +func newTestGenesis() GenesisData { + var genesisData GenesisData + rand.Read(genesisData.GenesisValidatorsRoot[:]) + return genesisData +} + +func newTestForks(genesisData GenesisData, forks Forks) Forks { + forks.computeDomains(genesisData.GenesisValidatorsRoot) + return forks +} + +func newTestChain(parent *testChain, genesisData GenesisData, forks Forks, newCommittees bool, begin, end int, subPeriodIndex uint64, signerCount int) *testChain { + tc := &testChain{ + genesisData: genesisData, + forks: forks, + } + if parent != nil { + tc.periods = make([]testPeriod, len(parent.periods)) + copy(tc.periods, parent.periods) + } + if newCommittees { + if begin == 0 { + tc.fillCommittees(begin, end+1) + } else { + tc.fillCommittees(begin+1, end+1) + } + } + tc.fillUpdates(begin, end, subPeriodIndex, signerCount) + return tc +} + +func makeTestHeaderWithSingleProof(slot, index uint64, value merkle.Value) (types.Header, merkle.Values) { + var branch merkle.Values + hasher := sha256.New() + for index > 1 { + var proofHash merkle.Value + rand.Read(proofHash[:]) + hasher.Reset() + if index&1 == 0 { + hasher.Write(value[:]) + hasher.Write(proofHash[:]) + } else { + hasher.Write(proofHash[:]) + hasher.Write(value[:]) + } + hasher.Sum(value[:0]) + index /= 2 + branch = append(branch, proofHash) + } + return types.Header{Slot: slot, StateRoot: common.Hash(value)}, branch +} + +func makeBitmask(signerCount int) []byte { + bitmask := make([]byte, params.SyncCommitteeSize/8) + for i := 0; i < params.SyncCommitteeSize; i++ { + if rand.Intn(params.SyncCommitteeSize-i) < signerCount { + bitmask[i/8] += byte(1) << (i & 7) + signerCount-- + } + } + return bitmask +} + +type testPeriod struct { + committee dummySyncCommittee + committeeRoot common.Hash + update types.LightClientUpdate +} + +type testChain struct { + periods []testPeriod + forks Forks + genesisData GenesisData +} + +func (tc *testChain) makeTestSignedHead(header types.Header, signerCount int) SignedHead { + bitmask := makeBitmask(signerCount) + return SignedHead{ + Header: header, + BitMask: bitmask, + Signature: makeDummySignature(tc.periods[types.PeriodOfSlot(header.Slot+1)].committee, tc.forks.signingRoot(header), bitmask), + SignatureSlot: header.Slot + 1, + } +} + +const finalizedTestUpdate = 8191 // if subPeriodIndex == finalizedTestUpdate then a finalized update is generated + +func (tc *testChain) makeTestUpdate(period, subPeriodIndex uint64, signerCount int) types.LightClientUpdate { + var update types.LightClientUpdate + update.NextSyncCommitteeRoot = tc.periods[period+1].committeeRoot + if subPeriodIndex == finalizedTestUpdate { + update.FinalizedHeader, update.NextSyncCommitteeBranch = makeTestHeaderWithSingleProof(types.PeriodStart(period)+100, params.BsiNextSyncCommittee, merkle.Value(update.NextSyncCommitteeRoot)) + update.Header, update.FinalityBranch = makeTestHeaderWithSingleProof(types.PeriodStart(period)+200, params.BsiFinalBlock, merkle.Value(update.FinalizedHeader.Hash())) + } else { + update.Header, update.NextSyncCommitteeBranch = makeTestHeaderWithSingleProof(types.PeriodStart(period)+subPeriodIndex, params.BsiNextSyncCommittee, merkle.Value(update.NextSyncCommitteeRoot)) + } + signedHead := tc.makeTestSignedHead(update.Header, signerCount) + update.SyncCommitteeBits, update.SyncCommitteeSignature = signedHead.BitMask, signedHead.Signature + return update +} + +func (tc *testChain) fillCommittees(begin, end int) { + if len(tc.periods) <= end { + tc.periods = append(tc.periods, make([]testPeriod, end+1-len(tc.periods))...) + } + for i := begin; i <= end; i++ { + tc.periods[i].committee = randomDummySyncCommittee() + tc.periods[i].committeeRoot = SerializedCommitteeRoot(serializeDummySyncCommittee(tc.periods[i].committee)) + } +} + +func (tc *testChain) fillUpdates(begin, end int, subPeriodIndex uint64, signerCount int) { + for i := begin; i <= end; i++ { + tc.periods[i].update = tc.makeTestUpdate(uint64(i), subPeriodIndex, signerCount) + } +} + +type tcSyncer struct { + tc *testChain + failed bool +} + +func (s *tcSyncer) CanRequest(updateCount, committeeCount int) bool { return true } + +func (s *tcSyncer) GetBestCommitteeProofs(ctx context.Context, req types.CommitteeRequest) (types.CommitteeReply, error) { + reply := types.CommitteeReply{ + Updates: make([]types.LightClientUpdate, len(req.UpdatePeriods)), + Committees: make([][]byte, len(req.CommitteePeriods)), + } + for i, period := range req.UpdatePeriods { + reply.Updates[i] = s.tc.periods[period].update + } + for i, period := range req.CommitteePeriods { + reply.Committees[i] = serializeDummySyncCommittee(s.tc.periods[period].committee) + } + return reply, nil +} + +func (s *tcSyncer) ProtocolError(description string) { + s.failed = true +} + +func (tc *testChain) makeUpdateInfo(firstPeriod int) *types.UpdateInfo { + u := &types.UpdateInfo{ + AfterLastPeriod: uint64(len(tc.periods) - 1), + Scores: make(types.UpdateScores, len(tc.periods)-firstPeriod-1), + } + for i := range u.Scores { + u.Scores[i] = tc.periods[firstPeriod+i].update.Score() + } + return u +} + +func (s *tcSyncer) syncTracker(ct *CommitteeTracker) { + <-ct.SyncWithPeer(s, s.tc.makeUpdateInfo(0)) +} + +type ctSyncer struct { + ct *CommitteeTracker + failed bool +} + +func (s *ctSyncer) CanRequest(updateCount, committeeCount int) bool { return true } + +func (s *ctSyncer) GetBestCommitteeProofs(ctx context.Context, req types.CommitteeRequest) (types.CommitteeReply, error) { + reply := types.CommitteeReply{ + Updates: make([]types.LightClientUpdate, len(req.UpdatePeriods)), + Committees: make([][]byte, len(req.CommitteePeriods)), + } + for i, period := range req.UpdatePeriods { + if u := s.ct.GetBestUpdate(period); u != nil { + reply.Updates[i] = *u + } + } + for i, period := range req.CommitteePeriods { + reply.Committees[i] = s.ct.GetSerializedSyncCommittee(period, s.ct.GetSyncCommitteeRoot(period)) + } + return reply, nil +} + +func (s *ctSyncer) ProtocolError(description string) { + s.failed = true +} + +func (s *ctSyncer) syncTracker(ct *CommitteeTracker) { + <-ct.SyncWithPeer(s, s.ct.GetUpdateInfo()) +} + +type testConstraints struct { + committeeRoots []common.Hash + first, afterLast uint64 + + genesisData GenesisData + initCallback func(GenesisData) + updateCallback func() +} + +func (tcs *testConstraints) SyncRange() (syncRange types.UpdateRange, lastFixed uint64) { + afterLast := tcs.afterLast + if afterLast > tcs.first { + afterLast-- + } + return types.UpdateRange{First: tcs.first, AfterLast: afterLast}, + tcs.first + uint64(len(tcs.committeeRoots)-1) +} + +func (tcs *testConstraints) CommitteeRoot(period uint64) (root common.Hash, matchAll bool) { + if period < tcs.first || period >= tcs.afterLast { + return common.Hash{}, false + } + if period >= tcs.first+uint64(len(tcs.committeeRoots)) { + return common.Hash{}, true + } + return tcs.committeeRoots[period-tcs.first], false +} + +func (tcs *testConstraints) SetCallbacks(initCallback func(GenesisData), updateCallback func()) { + if tcs.genesisData == (GenesisData{}) { + tcs.initCallback = initCallback + } else { + initCallback(tcs.genesisData) + } + tcs.updateCallback = updateCallback +} + +func (tcs *testConstraints) setRoots(tc *testChain, first, afterFixed, afterLast uint64) { + tcs.first, tcs.afterLast = first, afterLast + tcs.committeeRoots = make([]common.Hash, int(afterFixed-first)) + for i := range tcs.committeeRoots { + tcs.committeeRoots[i] = tc.periods[first+uint64(i)].committeeRoot + } + if tcs.genesisData == (GenesisData{}) { + tcs.genesisData = tc.genesisData + if tcs.initCallback != nil { + tcs.initCallback(tcs.genesisData) + } + } + if tcs.updateCallback != nil { + tcs.updateCallback() + } +} diff --git a/beacon/light/sync/committee_update.go b/beacon/light/sync/committee_update.go new file mode 100644 index 000000000000..eb582a4ddd3d --- /dev/null +++ b/beacon/light/sync/committee_update.go @@ -0,0 +1,465 @@ +// Copyright 2022 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package sync + +import ( + "context" + "errors" + "math" + "time" + + "github.com/ethereum/go-ethereum/beacon/light/types" + "github.com/ethereum/go-ethereum/log" +) + +const ( + broadcastFrequencyLimit = time.Millisecond * 200 + advertiseDelay = time.Second * 10 +) + +// ctClient represents a peer that CommitteeTracker sends signed heads and +// sync committee advertisements to +type ctClient interface { + SendSignedHeads(heads []SignedHead) + SendUpdateInfo(updateInfo *types.UpdateInfo) +} + +// ctServer represents a peer that CommitteeTracker can request sync committee update proofs from +type ctServer interface { + GetBestCommitteeProofs(ctx context.Context, req types.CommitteeRequest) (types.CommitteeReply, error) + CanRequest(updateCount, committeeCount int) bool + ProtocolError(description string) +} + +// SyncWithPeer starts or updates the syncing process with a given peer, based +// on the advertised update scores. +// Note that calling with remoteInfo == nil does not start syncing but allows +// attempting the init process with the given peer if not initialized yet. +func (s *CommitteeTracker) SyncWithPeer(peer ctServer, remoteInfo *types.UpdateInfo) chan struct{} { + if remoteInfo != nil && !remoteInfo.IsValid() { + peer.ProtocolError("Invalid update info") + doneSyncing := make(chan struct{}) + close(doneSyncing) + return doneSyncing + } + s.lock.Lock() + sp := s.connected[peer] + if sp == nil { + sp = &ctPeerInfo{peer: peer} + s.connected[peer] = sp + } + if remoteInfo != nil { + sp.remoteInfo = *remoteInfo + sp.forkPeriod = math.MaxUint64 + if !sp.queued && !sp.requesting { + s.requestQueue = append(s.requestQueue, sp) + sp.queued = true + sp.doneSyncing = make(chan struct{}) + select { + case s.triggerCh <- struct{}{}: + default: + } + } + } + doneSyncing := sp.doneSyncing + s.lock.Unlock() + return doneSyncing +} + +// Disconnect notifies the tracker about a peer being disconnected +func (s *CommitteeTracker) Disconnect(peer ctServer) { + s.lock.Lock() + delete(s.connected, peer) + s.lock.Unlock() +} + +// retrySyncAllPeers re-triggers the syncing process (check if there is something +// new to request) with all connected peers. Should be called when constraints +// are updated and might allow syncing further. +func (s *CommitteeTracker) retrySyncAllPeers() { + for _, sp := range s.connected { + if !sp.queued && !sp.requesting { + s.requestQueue = append(s.requestQueue, sp) + sp.queued = true + sp.doneSyncing = make(chan struct{}) + } + } + select { + case s.triggerCh <- struct{}{}: + default: + } +} + +// Stop stops the syncing/propagation process and shuts down the tracker +func (s *CommitteeTracker) Stop() { + close(s.stopCh) +} + +// ctPeerInfo is the state of the syncing process from an individual server peer +type ctPeerInfo struct { + peer ctServer + remoteInfo types.UpdateInfo + forkPeriod uint64 // remote is known to be on a different and higher valued fork starting from this period + requesting, queued bool + deferredHeads []SignedHead + doneSyncing chan struct{} +} + +// syncLoop is the global syncing loop starting requests to all peers where there +// is something to sync according to the most recent advertisement. +func (s *CommitteeTracker) syncLoop() { + s.lock.Lock() + for { + if len(s.requestQueue) > 0 { + sp := s.requestQueue[0] + s.requestQueue = s.requestQueue[1:] + if len(s.requestQueue) == 0 { + s.requestQueue = nil + } + sp.queued = false + if s.startRequest(sp) { + s.lock.Unlock() + select { + case <-s.triggerCh: + case <-s.clock.After(time.Second): + case <-s.stopCh: + return + } + s.lock.Lock() + } + } else { + s.lock.Unlock() + select { + case <-s.triggerCh: + case <-s.stopCh: + return + } + s.lock.Lock() + } + } +} + +// startRequest sends a new request to the given peer if there is anything to +// request; finishes the syncing otherwise (processes deferred signed head +// advertisements and closes the doneSyncing channel). +// Returns true if a new request has been sent. +func (s *CommitteeTracker) startRequest(sp *ctPeerInfo) bool { + req := s.nextRequest(sp) + if req.IsEmpty() { + if sp.deferredHeads != nil { + s.addSignedHeads(sp.peer, sp.deferredHeads) + sp.deferredHeads = nil + } + close(sp.doneSyncing) + sp.doneSyncing = nil + return false + } + sp.requesting = true + go func() { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*20) + reply, err := sp.peer.GetBestCommitteeProofs(ctx, req) // expected to return with error in case of shutdown + cancel() + if err != nil { + s.lock.Lock() + sp.requesting = false + close(sp.doneSyncing) + sp.doneSyncing = nil + select { + case s.triggerCh <- struct{}{}: // trigger next request + default: + } + s.lock.Unlock() + return + } + s.lock.Lock() + sp.requesting = false + if err := s.processReply(sp, req, reply); err == nil { + s.requestQueue = append(s.requestQueue, sp) + sp.queued = true + } else { + sp.peer.ProtocolError(err.Error()) + close(sp.doneSyncing) + sp.doneSyncing = nil + } + select { + case s.triggerCh <- struct{}{}: // trigger next request + default: + } + s.lock.Unlock() + }() + return true +} + +// nextRequest creates the next request to be sent to the given peer, based on +// the difference between the remote advertised and the local update chains. +func (s *CommitteeTracker) nextRequest(sp *ctPeerInfo) types.CommitteeRequest { + if !sp.remoteInfo.IsValid() { + return types.CommitteeRequest{} + } + var ( + request types.CommitteeRequest + localRange = types.UpdateRange{First: s.firstPeriod, AfterLast: s.nextPeriod} + localInfo = s.getUpdateInfo() + localInfoRange = localInfo.Range() + remoteRange = sp.remoteInfo.Range() + ) + syncRange, lastFixed := s.constraints.SyncRange() + if lastFixed < syncRange.First || lastFixed > syncRange.AfterLast { + log.Error("Invalid sync constraints", "sync first", + syncRange.First, "sync afterLast", syncRange.AfterLast, + "last fixed", lastFixed) + return types.CommitteeRequest{} + } + if !s.chainInit { + request.CommitteePeriods = []uint64{lastFixed} + localRange = types.UpdateRange{First: lastFixed, AfterLast: lastFixed} + localInfoRange = localRange + } + if localRange.First > lastFixed || localRange.AfterLast < syncRange.First { + log.Error("Gap between local updates and fixed committee range, cannot sync", "local first", + localRange.First, "local afterLast", localRange.AfterLast, + "sync first", syncRange.First, "last fixed", lastFixed) + return types.CommitteeRequest{} + } + if remoteRange.First > localRange.AfterLast { + // if the missing range is longer than the remote advertised range then assume + // that the remote has that range and try anyways + remoteRange.First = localRange.AfterLast + } + syncRange = syncRange.Shared(remoteRange) + sharedRange := localInfoRange.Shared(syncRange).Shared(types.UpdateRange{AfterLast: sp.forkPeriod}) + if !sharedRange.IsValid() { + return types.CommitteeRequest{} + } + + // shared range: here we assume that local and remote updates have the same + // NextSyncCommitteeRoot and only fetch updates with higher remote score + for period := sharedRange.First; period < sharedRange.AfterLast; period++ { + if !sp.peer.CanRequest(len(request.UpdatePeriods)+1, len(request.CommitteePeriods)) { + break + } + if sp.remoteInfo.Score(period).BetterThan(localInfo.Score(period)) { + request.UpdatePeriods = append(request.UpdatePeriods, period) + } + } + // future range: fetch update and next committee as long as remote score reaches required minimum + for period := sharedRange.AfterLast; period < syncRange.AfterLast; period++ { + if !sp.peer.CanRequest(len(request.UpdatePeriods)+1, len(request.CommitteePeriods)+1) { + break // cannot fetch update + committee any more + } + // Note: we might try syncing before remote advertised range here is local known + // chain head is older than that; in this case we skip score check here and hope + // for the best (will be checked by processReply later; we drop the peer as + // useless if it cannot serve us) + if sp.remoteInfo.HasScore(period) && s.minimumUpdateScore.BetterThan(sp.remoteInfo.Score(period)) { + break // do not sync further if advertised score is less than our minimum requirement + } + request.UpdatePeriods = append(request.UpdatePeriods, period) + request.CommitteePeriods = append(request.CommitteePeriods, period+1) + } + // past range: fetch update and committee for periods before the locally stored + // range that are covered by the sync range constraints (known committee roots) + for nextPeriod := localRange.First; nextPeriod > syncRange.First; nextPeriod-- { // loop variable is nextPeriod == period+1 to avoid uint64 underflow + if !sp.peer.CanRequest(len(request.UpdatePeriods)+1, len(request.CommitteePeriods)+1) { + break // cannot fetch update + committee any more + } + period := nextPeriod - 1 + if period > sp.remoteInfo.AfterLastPeriod { + break + } + if s.minimumUpdateScore.BetterThan(sp.remoteInfo.Score(period)) { + break // do not sync further if advertised score is less than our minimum requirement + } + // Note: updates are available from localFirst to localAfterLast-1 while + // committees are available from localFirst to localAfterLast so we extend + // backwards by requesting updates and committees for the same period + // (committee for localFirst should be available or requested here already + // so update for localFirst-1 can always be inserted if it matches our chain) + request.UpdatePeriods = append(request.UpdatePeriods, period) + request.CommitteePeriods = append(request.CommitteePeriods, period) + } + return request +} + +// processReply processes the reply to a previous request, verifying received +// updates and committees and extending/improving the local update chain if possible. +func (s *CommitteeTracker) processReply(sp *ctPeerInfo, sentRequest types.CommitteeRequest, reply types.CommitteeReply) error { + if len(reply.Updates) != len(sentRequest.UpdatePeriods) || len(reply.Committees) != len(sentRequest.CommitteePeriods) { + return errors.New("reply length mismatch") + } + var ( + futureCommittees = make(map[uint64][]byte) + storedCommittee bool + lastStoredCommittee uint64 + ) + for i, c := range reply.Committees { + if len(c) != SerializedCommitteeSize { + return errors.New("wrong committee size") + } + period := sentRequest.CommitteePeriods[i] + if len(sentRequest.UpdatePeriods) == 0 || period <= sentRequest.UpdatePeriods[0] { + if root := SerializedCommitteeRoot(c); root != s.getSyncCommitteeRoot(period) { + return errors.New("wrong committee root") + } else { + s.storeSerializedSyncCommittee(period, root, c) + if !storedCommittee || period > lastStoredCommittee { + storedCommittee, lastStoredCommittee = true, period + } + } + } else { + futureCommittees[period] = c + } + } + + if !s.chainInit { + // chain not initialized + if storedCommittee { + s.firstPeriod, s.nextPeriod, s.chainInit = lastStoredCommittee, lastStoredCommittee, true + s.updateInfoChanged() + } else { + return errors.New("cannot initialize without committees") + } + } + + firstPeriod := sp.remoteInfo.AfterLastPeriod - uint64(len(sp.remoteInfo.Scores)) + for i, update := range reply.Updates { + var ( + update = update // updates are cached by reference, do not overwrite + period = update.Header.SyncPeriod() + remoteInfoScore types.UpdateScore + ) + if period != sentRequest.UpdatePeriods[i] { + return errors.New("wrong update period") + } + if period > s.nextPeriod { // a previous insertUpdate could have reduced nextPeriod since the request was created + continue // skip but do not fail because it is not the remote side's fault; retry with new request + } + if period >= firstPeriod { + remoteInfoScore = sp.remoteInfo.Scores[period-firstPeriod] + } else { + remoteInfoScore = s.minimumUpdateScore + } + if remoteInfoScore.BetterThan(update.Score()) { + return errors.New("update score lower than promised") // remote did not deliver an update with the promised score + } + + switch s.insertUpdate(&update, futureCommittees[period+1]) { + case sciSuccess: + if sp.forkPeriod == period { + // if local chain is successfully updated to the remote fork then remote is not on a different fork anymore + sp.forkPeriod = math.MaxUint64 + } + case sciWrongUpdate: + return errors.New("insert update failed") + case sciNeedCommittee: + // remember that remote is on a different and more valuable fork; + // do not fail but construct next request accordingly + sp.forkPeriod = period + return nil //continue + case sciUnexpectedError: + // local error, insertUpdate has already printed an error log + return errors.New("unexpected local error") // though not the remote's fault, fail here to avoid infinite retries + } + } + return nil +} + +// NextPeriod returns the next update period to be synced (the period after the +// last update if there are updates or the first period fixed by the constraints +// if there are no updates yet) +func (s *CommitteeTracker) NextPeriod() uint64 { + s.lock.Lock() + defer s.lock.Unlock() + + if !s.chainInit { + syncRange, _ := s.constraints.SyncRange() + return syncRange.First + } + return s.nextPeriod +} + +// GetUpdateInfo returns and types.UpdateInfo based on the current local update chain +// (tracker mutex locked). +func (s *CommitteeTracker) GetUpdateInfo() *types.UpdateInfo { + s.lock.Lock() + defer s.lock.Unlock() + + return s.getUpdateInfo() +} + +// getUpdateInfo returns and types.UpdateInfo based on the current local update chain +// (tracker mutex expected). +func (s *CommitteeTracker) getUpdateInfo() *types.UpdateInfo { + if s.updateInfo != nil { + return s.updateInfo + } + l := s.nextPeriod - s.firstPeriod + if l > types.MaxUpdateInfoLength { + l = types.MaxUpdateInfoLength + } + firstPeriod := s.nextPeriod - l + + u := &types.UpdateInfo{ + AfterLastPeriod: s.nextPeriod, + Scores: make(types.UpdateScores, int(l)), + } + + for period := firstPeriod; period < s.nextPeriod; period++ { + if update := s.GetBestUpdate(period); update != nil { + u.Scores[period-firstPeriod] = update.Score() + } else { + log.Error("Update missing from database", "period", period) + } + } + + s.updateInfo = u + return u +} + +// updateInfoChanged should be called whenever the committee update chain is +// changed. It schedules a call to advertiseCommitteesNow in the near future +// (after advertiseDelay) unless it is already scheduled. This delay ensures that +// advertisements are not sent too frequently. +func (s *CommitteeTracker) updateInfoChanged() { + s.updateInfo = nil + if s.advertiseScheduled { + return + } + s.advertiseScheduled = true + s.advertisedTo = nil + + s.clock.AfterFunc(advertiseDelay, func() { + s.lock.Lock() + s.advertiseCommitteesNow() + s.advertiseScheduled = false + s.lock.Unlock() + }) +} + +// advertiseCommitteesNow sends committee update chain advertisements to all active peers. +func (s *CommitteeTracker) advertiseCommitteesNow() { + info := s.getUpdateInfo() + if s.advertisedTo == nil { + s.advertisedTo = make(map[ctClient]struct{}) + } + for peer := range s.broadcastTo { + if _, ok := s.advertisedTo[peer]; !ok { + peer.SendUpdateInfo(info) + s.advertisedTo[peer] = struct{}{} + } + } +} diff --git a/beacon/light/sync/signed_head.go b/beacon/light/sync/signed_head.go new file mode 100644 index 000000000000..ebd0b1e301d0 --- /dev/null +++ b/beacon/light/sync/signed_head.go @@ -0,0 +1,211 @@ +// Copyright 2022 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package sync + +import ( + "bytes" + "errors" + "math/bits" + "time" + + "github.com/ethereum/go-ethereum/beacon/light/types" + "github.com/ethereum/go-ethereum/beacon/params" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" +) + +// SignedHead represents a beacon header signed by a sync committee +// +// Note: this structure is created from either an optimistic update or an instant update: +// https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/light-client/sync-protocol.md#lightclientoptimisticupdate +// https://github.com/zsfelfoldi/beacon-APIs/blob/instant_update/apis/beacon/light_client/instant_update.yaml +type SignedHead struct { + Header types.Header // signed beacon header + BitMask []byte // bit vector (LSB first) encoding the subset of the relevant sync committee that signed the header + Signature []byte // BLS sync aggregate validating the signingRoot (Forks.signintypes.Header)) + SignatureSlot uint64 // slot in which the signature has been created (newertypes.Header.Slot, determines the signing sync committee) +} + +// SignerCount returns the number of individual signers in the signature aggregate +func (s *SignedHead) SignerCount() int { + if len(s.BitMask) != params.SyncCommitteeBitmaskSize { + return 0 // signature check will filter it out later but we calculate this before sig check + } + var signerCount int + for _, v := range s.BitMask { + signerCount += bits.OnesCount8(v) + } + return signerCount +} + +// Equal returns true if both the headers and the signer sets are the same +func (s *SignedHead) Equal(s2 *SignedHead) bool { + return s.Header == s2.Header && bytes.Equal(s.BitMask, s2.BitMask) && bytes.Equal(s.Signature, s2.Signature) +} + +// AddSignedHeads adds signed heads to the tracker if the syncing process has +// been finished; adds them to a deferred list otherwise that is processed when +// the syncing is finished. +func (s *CommitteeTracker) AddSignedHeads(peer ctServer, heads []SignedHead) error { + s.lock.Lock() + defer s.lock.Unlock() + + if sp := s.connected[peer]; sp != nil && (sp.requesting || sp.queued) { + sp.deferredHeads = append(sp.deferredHeads, heads...) + return nil + } + return s.addSignedHeads(peer, heads) +} + +// addSignedHeads adds signed heads to the tracker after a successful verification +// (it is assumed that the local update chain has been synced with the given peer) +func (s *CommitteeTracker) addSignedHeads(peer ctServer, heads []SignedHead) error { + var ( + oldHeadHash common.Hash + err error + ) + if len(s.acceptedList.list) > 0 { + oldHeadHash = s.acceptedList.list[0].hash + } + for _, head := range heads { + signerCount := head.SignerCount() + if signerCount < s.signerThreshold { + continue + } + sigOk, age := s.verifySignature(head) + if age < 0 { + log.Warn("Future signed head received", "age", age) + } + if age > time.Minute*2 { + log.Warn("Old signed head received", "age", age) + } + if !sigOk { + err = errors.New("invalid header signature") + continue + } + hash := head.Header.Hash() + if h := s.acceptedList.getHead(hash); h != nil { + h.receivedFrom[peer] = struct{}{} + if signerCount > h.signerCount { + h.head = head + h.signerCount = signerCount + h.sentTo = nil + s.acceptedList.updateHead(h) + } + } else { + h := &headInfo{ + head: head, + hash: hash, + sentTo: make(map[ctClient]struct{}), + receivedFrom: map[ctServer]struct{}{peer: struct{}{}}, + } + s.acceptedList.updateHead(h) + } + } + if len(s.acceptedList.list) > 0 && oldHeadHash != s.acceptedList.list[0].hash { + head := s.acceptedList.list[0].head.Header + for _, subFn := range s.headSubs { + subFn(head) + } + } + return err +} + +// verifySignature returns true if the given signed head has a valid signature +// according to the local committee chain. The caller should ensure that the +// committees advertised by the same source where the signed head came from are +// synced before verifying the signature. +// The age of the header is also returned (the time elapsed since the beginning +// of the given slot, according to the local system clock). If enforceTime is +// true then negative age (future) headers are rejected. +func (s *CommitteeTracker) verifySignature(head SignedHead) (bool, time.Duration) { + var ( + slotTime = int64(time.Second) * int64(s.genesisTime+head.Header.Slot*12) + age = time.Duration(s.unixNano() - slotTime) + ) + if s.enforceTime && age < 0 { + return false, age + } + committee := s.getSyncCommittee(types.PeriodOfSlot(head.SignatureSlot)) // signed with the next slot's committee + if committee == nil { + return false, age + } + return s.sigVerifier.verifySignature(committee, s.forks.signingRoot(head.Header), head.BitMask, head.Signature), age +} + +// SubscribeToNewHeads subscribes the given callback function to head beacon headers with a verified valid sync committee signature. +func (s *CommitteeTracker) SubscribeToNewHeads(subFn func(types.Header)) { + s.lock.Lock() + defer s.lock.Unlock() + + s.headSubs = append(s.headSubs, subFn) +} + +// headInfo contains the best signed header and the state of propagation belonging +// to a given block root +type headInfo struct { + head SignedHead + hash common.Hash + signerCount int + receivedFrom map[ctServer]struct{} + sentTo map[ctClient]struct{} +} + +// headList is a list of best known heads for the few most recent slots +// Note: usually only the highest slot is interesting but in case of low signer +// participation or slow propagation/aggregation of signatures it might make +// sense to keep track of multiple heads as different clients might have +// different tradeoff preferences between delay and security. +type headList struct { + list []*headInfo // highest slot first + limit int +} + +// newHeadList creates a new headList +func newHeadList(limit int) headList { + return headList{limit: limit} +} + +// getHead returns the headInfo belonging to the given block root +func (h *headList) getHead(hash common.Hash) *headInfo { + //return h.hashMap[hash] + for _, headInfo := range h.list { + if headInfo.hash == hash { + return headInfo + } + } + return nil +} + +// updateHead adds or updates the given headInfo in the list +func (h *headList) updateHead(head *headInfo) { + for i, hh := range h.list { + if hh.head.Header.Slot <= head.head.Header.Slot { + if hh.head.Header.Slot < head.head.Header.Slot { + if len(h.list) < h.limit { + h.list = append(h.list, nil) + } + copy(h.list[i+1:len(h.list)], h.list[i:len(h.list)-1]) + } + h.list[i] = head + return + } + } + if len(h.list) < h.limit { + h.list = append(h.list, head) + } +} diff --git a/beacon/light/types/header.go b/beacon/light/types/header.go new file mode 100644 index 000000000000..92bc88f893d9 --- /dev/null +++ b/beacon/light/types/header.go @@ -0,0 +1,103 @@ +// Copyright 2022 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package types + +import ( + "encoding/binary" + + "github.com/ethereum/go-ethereum/beacon/merkle" + "github.com/ethereum/go-ethereum/beacon/params" + "github.com/ethereum/go-ethereum/common" +) + +// Header defines a beacon header +// +// See data structure definition here: +// https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/beacon-chain.md#beaconblockheader +type Header struct { + Slot uint64 + ProposerIndex uint64 + ParentRoot common.Hash + StateRoot common.Hash + BodyRoot common.Hash +} + +// Hash calculates the block root of the header +func (bh *Header) Hash() common.Hash { + var values [8]merkle.Value // values corresponding to indices 8 to 15 of the beacon header tree + binary.LittleEndian.PutUint64(values[params.BhiSlot-8][:8], bh.Slot) + binary.LittleEndian.PutUint64(values[params.BhiProposerIndex-8][:8], bh.ProposerIndex) + values[params.BhiParentRoot-8] = merkle.Value(bh.ParentRoot) + values[params.BhiStateRoot-8] = merkle.Value(bh.StateRoot) + values[params.BhiBodyRoot-8] = merkle.Value(bh.BodyRoot) + return merkle.MultiProof{Format: merkle.NewRangeFormat(8, 15, nil), Values: values[:]}.RootHash() +} + +// Epoch returns the epoch the header belongs to +func (bh *Header) Epoch() uint64 { + return bh.Slot >> params.Log2EpochLength +} + +// SyncPeriod returns the sync period the header belongs to +func (bh *Header) SyncPeriod() uint64 { + return bh.Slot >> params.Log2SyncPeriodLength +} + +// PeriodStart returns the first slot of the given period +func PeriodStart(period uint64) uint64 { + return period << params.Log2SyncPeriodLength +} + +// PeriodOfSlot returns the sync period that the given slot belongs to +func PeriodOfSlot(slot uint64) uint64 { + return slot >> params.Log2SyncPeriodLength +} + +// HeaderWithoutState stores beacon header fields except the state root which can +// be reconstructed from a partial beacon state proof stored alongside the header +type HeaderWithoutState struct { + Slot uint64 + ProposerIndex uint64 + ParentRoot, BodyRoot common.Hash +} + +// Hash calculates the block root of the header +func (bh *HeaderWithoutState) Hash(stateRoot common.Hash) common.Hash { + return bh.Proof(stateRoot).RootHash() +} + +// Proof returns a MultiProof of the header +func (bh *HeaderWithoutState) Proof(stateRoot common.Hash) merkle.MultiProof { + var values [8]merkle.Value // values corresponding to indices 8 to 15 of the beacon header tree + binary.LittleEndian.PutUint64(values[params.BhiSlot-8][:8], bh.Slot) + binary.LittleEndian.PutUint64(values[params.BhiProposerIndex-8][:8], bh.ProposerIndex) + values[params.BhiParentRoot-8] = merkle.Value(bh.ParentRoot) + values[params.BhiStateRoot-8] = merkle.Value(stateRoot) + values[params.BhiBodyRoot-8] = merkle.Value(bh.BodyRoot) + return merkle.MultiProof{Format: merkle.NewRangeFormat(8, 15, nil), Values: values[:]} +} + +// FullHeader reconstructs a full Header from a HeaderWithoutState and a state root +func (bh *HeaderWithoutState) FullHeader(stateRoot common.Hash) Header { + return Header{ + Slot: bh.Slot, + ProposerIndex: bh.ProposerIndex, + ParentRoot: bh.ParentRoot, + StateRoot: stateRoot, + BodyRoot: bh.BodyRoot, + } +} diff --git a/beacon/light/types/protocol.go b/beacon/light/types/protocol.go new file mode 100644 index 000000000000..4b9dd777117a --- /dev/null +++ b/beacon/light/types/protocol.go @@ -0,0 +1,194 @@ +// Copyright 2022 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package types + +import ( + "errors" + "math/bits" + + "github.com/ethereum/go-ethereum/beacon/merkle" + "github.com/ethereum/go-ethereum/beacon/params" + "github.com/ethereum/go-ethereum/common" +) + +const MaxUpdateInfoLength = 128 // max number of advertised update scores of most recent periods + +// LightClientUpdate is a proof of the next sync committee root based on a header +// signed by the sync committee of the given period. Optionally the update can +// prove quasi-finality by the signed header referring to a previous, finalized +// header from the same period, and the finalized header referring to the next +// sync committee root. +// +// See data structure definition here: +// https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/light-client/sync-protocol.md#lightclientupdate +type LightClientUpdate struct { + Header Header + NextSyncCommitteeRoot common.Hash + NextSyncCommitteeBranch merkle.Values + FinalizedHeader Header + FinalityBranch merkle.Values + SyncCommitteeBits []byte + SyncCommitteeSignature []byte + score UpdateScore // not part of the encoding, calculated after decoding + scoreCalculated bool +} + +// Validate verifies the validity of the update +func (update *LightClientUpdate) Validate() error { + if update.hasFinalizedHeader() { + if update.FinalizedHeader.SyncPeriod() != update.Header.SyncPeriod() { + return errors.New("finalizedHeader is from previous period") // proves the same committee it is signed by + } + if root, ok := merkle.VerifySingleProof(update.FinalityBranch, params.BsiFinalBlock, merkle.Value(update.FinalizedHeader.Hash())); !ok || root != update.Header.StateRoot { + return errors.New("invalid FinalizedHeader merkle proof") + } + } + if root, ok := merkle.VerifySingleProof(update.NextSyncCommitteeBranch, params.BsiNextSyncCommittee, merkle.Value(update.NextSyncCommitteeRoot)); !ok || root != update.Header.StateRoot { + return errors.New("invalid NextSyncCommittee merkle proof") + } + return nil +} + +// hasFinalizedHeader returns true if the update has a finalized header referred +// by the signed header and referring to the next sync committee. +// Note that in addition to this, a sufficient signer participation is also needed +// in order to fulfill the quasi-finality condition (see UpdateScore.isFinalized). +func (l *LightClientUpdate) hasFinalizedHeader() bool { + return l.FinalizedHeader.BodyRoot != (common.Hash{}) && l.FinalizedHeader.SyncPeriod() == l.Header.SyncPeriod() +} + +// Score returns the UpdateScore describing the proof strength of the update +// Note: thread safety can be ensured by always calling Score on a newly received +// or decoded update before making it potentially available for other threads +func (l *LightClientUpdate) Score() UpdateScore { + if l.scoreCalculated { + return l.score + } + l.score.SignerCount = 0 + for _, v := range l.SyncCommitteeBits { + l.score.SignerCount += uint32(bits.OnesCount8(v)) + } + l.score.SubPeriodIndex = uint32(l.Header.Slot & 0x1fff) + l.score.FinalizedHeader = l.hasFinalizedHeader() + l.scoreCalculated = true + return l.score +} + +// CommitteeRequest represents a request for fetching updates and committees at the given periods +type CommitteeRequest struct { + UpdatePeriods []uint64 // list of periods where LightClientUpdates are requested (not including full sync committee) + CommitteePeriods []uint64 // list of periods where sync committees are requested +} + +// IsEmpty returns true if the request does not request anything +func (req CommitteeRequest) IsEmpty() bool { + return req.UpdatePeriods == nil && req.CommitteePeriods == nil +} + +// CommitteeReply is an answer to a CommitteeRequest, contains the updates and +// committees corresponding to the period numbers in the request in the same order +type CommitteeReply struct { + Updates []LightClientUpdate // list of requested LightClientUpdates + Committees [][]byte // list of requested sync committees in serialized form +} + +// UpdateInfo contains scores for an advertised update chain. Note that the most +// recent updates are always advertised but earliest ones might not because of +// length limitation. +type UpdateInfo struct { + AfterLastPeriod uint64 // first period not covered by Scores + Scores UpdateScores // Scores[i] is the UpdateScore of period AfterLastPeriod-len(Scores)+i +} + +func (u UpdateInfo) IsValid() bool { + return uint64(len(u.Scores)) <= u.AfterLastPeriod +} + +func (u UpdateInfo) Range() UpdateRange { + l := uint64(len(u.Scores)) + if l > u.AfterLastPeriod { + panic(nil) + } + return UpdateRange{First: u.AfterLastPeriod - l, AfterLast: u.AfterLastPeriod} +} + +func (u UpdateInfo) HasScore(period uint64) bool { + return period < u.AfterLastPeriod && period >= u.AfterLastPeriod-uint64(len(u.Scores)) +} + +func (u UpdateInfo) Score(period uint64) UpdateScore { + return u.Scores[len(u.Scores)-int(u.AfterLastPeriod-period)] +} + +type UpdateRange struct { + First, AfterLast uint64 +} + +func (a UpdateRange) Shared(b UpdateRange) UpdateRange { + if b.First > a.First { + a.First = b.First + } + if b.AfterLast < a.AfterLast { + a.AfterLast = b.AfterLast + } + return a +} + +func (a UpdateRange) IsValid() bool { + return a.AfterLast >= a.First +} + +// UpdateScore allows the comparison between updates at the same period in order +// to find the best update chain that provides the strongest proof of being canonical. +// +// UpdateScores have a tightly packed binary encoding format for efficient p2p +// protocol transmission. Each UpdateScore is encoded in 3 bytes. +// When interpreted as a 24 bit little indian unsigned integer: +// - the lowest 10 bits contain the number of signers in the header signature aggregate +// - the next 13 bits contain the "sub-period index" which is he signed header's +// slot modulo params.SyncPeriodLength (which is correlated with the risk of the chain being +// re-orged before the previous period boundary in case of non-finalized updates) +// - the highest bit is set when the update is finalized (meaning that the finality +// header referenced by the signed header is in the same period as the signed +// header, making reorgs before the period boundary impossible +type UpdateScore struct { + SignerCount uint32 // number of signers in the header signature aggregate + SubPeriodIndex uint32 // signed header's slot modulo params.SyncPeriodLength + FinalizedHeader bool // update is considered finalized if has finalized header from the same period and 2/3 signatures +} + +type UpdateScores []UpdateScore + +// isFinalized returns true if the update has a header signed by at least 2/3 of +// the committee, referring to a finalized header that refers to the next sync +// committee. This condition is a close approximation of the actual finality +// condition that can only be verified by full beacon nodes. +func (u *UpdateScore) isFinalized() bool { + return u.FinalizedHeader && u.SignerCount >= params.SyncCommitteeSupermajority +} + +// BetterThan returns true if update u is considered better than w. +func (u UpdateScore) BetterThan(w UpdateScore) bool { + var ( + uFinalized = u.isFinalized() + wFinalized = w.isFinalized() + ) + if uFinalized != wFinalized { + return uFinalized + } + return u.SignerCount > w.SignerCount +} diff --git a/beacon/merkle/binary_merkle.go b/beacon/merkle/binary_merkle.go new file mode 100644 index 000000000000..cf54a3a2e2a4 --- /dev/null +++ b/beacon/merkle/binary_merkle.go @@ -0,0 +1,494 @@ +// Copyright 2022 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package merkle + +import ( + "math/bits" + "reflect" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/log" + "github.com/minio/sha256-simd" +) + +// Value represents either a 32 byte value or hash node in a binary merkle tree/partial proof +type ( + Value [32]byte + Values []Value +) + +var ValueT = reflect.TypeOf(Value{}) + +// UnmarshalJSON parses a merkle value in hex syntax. +func (m *Value) UnmarshalJSON(input []byte) error { + return hexutil.UnmarshalFixedJSON(ValueT, input, m[:]) +} + +// VerifySingleProof verifies a Merkle proof branch for a single value in a +// binary Merkle tree (index is a generalized tree index). +func VerifySingleProof(proof Values, index uint64, value Value) (common.Hash, bool) { + hasher := sha256.New() + for _, proofHash := range proof { + hasher.Reset() + if index&1 == 0 { + hasher.Write(value[:]) + hasher.Write(proofHash[:]) + } else { + hasher.Write(proofHash[:]) + hasher.Write(value[:]) + } + hasher.Sum(value[:0]) + index /= 2 + if index == 0 { + return common.Hash{}, false + } + } + if index != 1 { + return common.Hash{}, false + } + return common.Hash(value), true +} + +// ProofFormat defines the shape of a partial proof and allows traversing a subset of a tree +type ProofFormat interface { + Children() (left, right ProofFormat) // either both or neither should be nil +} + +// ProofReader allows traversing and reading a tree structure or a subset of it. +// Note: the hash of each traversed node is always requested. If the internal +// hash is not available then subtrees are always traversed (first left, then right). +// If internal hash is available then subtrees are only traversed if needed by the writer. +type ProofReader interface { + Children() (left, right ProofReader) // subtrees accessible if not nil + ReadNode() (Value, bool) // hash should be available if children are nil (leaf node), optional otherwise (internal node) +} + +// ProofWriter allow collecting data for a partial proof while a subset of a tree is traversed. +type ProofWriter interface { + Children() (left, right ProofWriter) // all non-nil subtrees are traversed + WriteNode(Value) // called for every traversed tree node (both leaf and internal) +} + +// TraverseProof traverses a reader and a writer defined on the same tree +// simultaneously, copies data from the reader to the writer (if writer is not nil) +// and returns the root hash. At least the shape defined by the writer is traversed; +// subtrees not required by the writer are only traversed (with writer == nil) +// if the hash of the internal tree node is not provided by the reader. +func TraverseProof(reader ProofReader, writer ProofWriter) (common.Hash, bool) { + var ( + wl ProofWriter + wr ProofWriter + ) + if writer != nil { + wl, wr = writer.Children() + } + node, nodeAvailable := reader.ReadNode() + if nodeAvailable && wl == nil { + if writer != nil { + writer.WriteNode(node) + } + return common.Hash(node), true + } + rl, rr := reader.Children() + if rl == nil { + return common.Hash{}, false + } + lhash, ok := TraverseProof(rl, wl) + if !ok { + return common.Hash{}, false + } + rhash, ok := TraverseProof(rr, wr) + if !ok { + return common.Hash{}, false + } + if !nodeAvailable { + hasher := sha256.New() + hasher.Write(lhash[:]) + hasher.Write(rhash[:]) + hasher.Sum(node[:0]) + } + if writer != nil { + writer.WriteNode(node) + } + return common.Hash(node), true +} + +// MultiProof stores a partial Merkle tree proof +type MultiProof struct { + Format ProofFormat + Values Values +} + +// multiProofReader implements ProofReader based on a MultiProof and also allows +// attaching further subtree readers at certain indices +// Note: valuePtr is stored and copied as a reference because child readers read +// from the same value list as the tree is traversed +type multiProofReader struct { + format ProofFormat // corresponding proof format + values Values // proof values + valuePtr *int // next index to be read from values + index uint64 // generalized tree index + subtrees func(uint64) ProofReader // attached subtrees +} + +// children implements ProofReader +func (mpr multiProofReader) Children() (left, right ProofReader) { + lf, rf := mpr.format.Children() + if lf == nil { + if mpr.subtrees != nil { + if subtree := mpr.subtrees(mpr.index); subtree != nil { + return subtree.Children() + } + } + return nil, nil + } + return multiProofReader{format: lf, values: mpr.values, valuePtr: mpr.valuePtr, index: mpr.index * 2, subtrees: mpr.subtrees}, + multiProofReader{format: rf, values: mpr.values, valuePtr: mpr.valuePtr, index: mpr.index*2 + 1, subtrees: mpr.subtrees} +} + +// readNode implements ProofReader +func (mpr multiProofReader) ReadNode() (Value, bool) { + if l, _ := mpr.format.Children(); l == nil && len(mpr.values) > *mpr.valuePtr { + hash := mpr.values[*mpr.valuePtr] + (*mpr.valuePtr)++ + return hash, true + } + return Value{}, false +} + +// Reader creates a multiProofReader for the given proof; if subtrees != nil +// then also attaches subtree readers at indices where the function returns a +// non-nil reader. +// Note that the reader can only be traversed once as the values slice is +// sequentially consumed. +func (mp MultiProof) Reader(subtrees func(uint64) ProofReader) multiProofReader { + return multiProofReader{format: mp.Format, values: mp.Values, valuePtr: new(int), index: 1, subtrees: subtrees} +} + +// Finished returns true if all values have been consumed by the traversal. +// Should be checked after TraverseProof if received from an untrusted source in +// order to prevent DoS attacks by excess proof values. +func (mpr multiProofReader) Finished() bool { + return len(mpr.values) == *mpr.valuePtr +} + +// rootHash returns the root hash of the proven structure. +func (mp MultiProof) RootHash() common.Hash { + reader := mp.Reader(nil) + hash, ok := TraverseProof(reader, nil) + if !ok || !reader.Finished() { + log.Error("MultiProof.rootHash: invalid proof format") + } + return hash +} + +// multiProofWriter implements ProofWriter and creates a MultiProof with the +// previously specified format. Also allows attaching further subtree writers at +// certain indices. +// Note: values is stored and copied as a reference because child writers append +// to the same value list as the tree is traversed +type multiProofWriter struct { + format ProofFormat // target proof format + values *Values // target proof value list + index uint64 // generalized tree index + subtrees func(uint64) ProofWriter // attached subtrees +} + +// NewMultiProofWriter creates a new multiproof writer with the specified format. +// If subtrees != nil then further subtree writers are attached at indices where +// the function returns a non-nil writer. +// Note that the specified format should not include these attached subtrees; +// they should be attached at leaf indices of the given format. +// Also note that target can be nil in which case the nodes specified by the format +// are traversed but not stored; subtree writers might still store tree data. +func NewMultiProofWriter(format ProofFormat, target *Values, subtrees func(uint64) ProofWriter) multiProofWriter { + return multiProofWriter{format: format, values: target, index: 1, subtrees: subtrees} +} + +// children implements ProofWriter +func (mpw multiProofWriter) Children() (left, right ProofWriter) { + if mpw.subtrees != nil { + if subtree := mpw.subtrees(mpw.index); subtree != nil { + return subtree.Children() + } + } + lf, rf := mpw.format.Children() + if lf == nil { + return nil, nil + } + return multiProofWriter{format: lf, values: mpw.values, index: mpw.index * 2, subtrees: mpw.subtrees}, + multiProofWriter{format: rf, values: mpw.values, index: mpw.index*2 + 1, subtrees: mpw.subtrees} +} + +// writeNode implements ProofWriter +func (mpw multiProofWriter) WriteNode(node Value) { + if mpw.values != nil { + if lf, _ := mpw.format.Children(); lf == nil { + *mpw.values = append(*mpw.values, node) + } + } + if mpw.subtrees != nil { + if subtree := mpw.subtrees(mpw.index); subtree != nil { + subtree.WriteNode(node) + } + } +} + +// ProofFormatIndexMap creates a generalized tree index -> MultiProof value +// slice index association map based on the given proof format. +func ProofFormatIndexMap(f ProofFormat) map[uint64]int { + var ( + m = make(map[uint64]int) + pos int + ) + addToIndexMap(m, f, &pos, 1) + return m +} + +// addToIndexMap recursively creates index associations for a given proof format subtree. +func addToIndexMap(m map[uint64]int, f ProofFormat, pos *int, index uint64) { + l, r := f.Children() + if l == nil { + m[index] = *pos + (*pos)++ + } else { + addToIndexMap(m, l, pos, index*2) + addToIndexMap(m, r, pos, index*2+1) + } +} + +// ChildIndex returns the generalized tree index of a subtree node in terms of +// the main tree where a is the main tree index of the subtree root and b is the +// subtree index of the node in question. +func ChildIndex(a, b uint64) uint64 { + return (a-1)<<(63-bits.LeadingZeros64(b)) + b +} + +// IndexMapFormat implements ProofFormat based on an index map filled with +// AddLeaf calls. Subtree formats can also be attached at certain indices. +type IndexMapFormat struct { + leaves map[uint64]ProofFormat + index uint64 +} + +// NewIndexMapFormat returns an empty format. +func NewIndexMapFormat() IndexMapFormat { + return IndexMapFormat{leaves: make(map[uint64]ProofFormat), index: 1} +} + +// AddLeaf adds either a single leaf or attaches a subtree at the given tree index. +func (f IndexMapFormat) AddLeaf(index uint64, subtree ProofFormat) IndexMapFormat { + if subtree != nil { + f.leaves[index] = subtree + } + for index > 1 { + index /= 2 + f.leaves[index] = nil + } + return f +} + +// children implements ProofFormat +func (f IndexMapFormat) Children() (left, right ProofFormat) { + if st, ok := f.leaves[f.index]; ok { + if st != nil { + return st.Children() + } + return IndexMapFormat{leaves: f.leaves, index: f.index * 2}, IndexMapFormat{leaves: f.leaves, index: f.index*2 + 1} + } + return nil, nil +} + +// rangeFormat defined a proof format with a continuous range of leaf indices. +// Attaching subtree formats is also possible. +type rangeFormat struct { + begin, end, index uint64 // begin and end should be on the same level + subtree func(uint64) ProofFormat +} + +// NewRangeFormat creates a new rangeFormat with leafs in the begin..end range. +// If subtrees != nil then further subtree formats are attached at indices where +// the function returns a non-nil format. +func NewRangeFormat(begin, end uint64, subtree func(uint64) ProofFormat) rangeFormat { + return rangeFormat{ + begin: begin, + end: end, + index: 1, + subtree: subtree, + } +} + +// children implements ProofFormat +func (rf rangeFormat) Children() (left, right ProofFormat) { + var ( + lzr = bits.LeadingZeros64(rf.begin) + lzi = bits.LeadingZeros64(rf.index) + ) + if lzi < lzr { + return nil, nil + } + if lzi == lzr { + if rf.subtree != nil && rf.index >= rf.begin && rf.index <= rf.end { + if st := rf.subtree(rf.index); st != nil { + return st.Children() + } + } + return nil, nil + } + var ( + // i1..i2 are the descendants of rf.index at the tree level where begin and end are located + i1 = rf.index << (lzi - lzr) + i2 = ((rf.index + 1) << (lzi - lzr)) - 1 + ) + if i1 <= rf.end && i2 >= rf.begin { + // Return child formats if there is an overlap (rf.index has any descendants + // in the begin..end range). + // Note that if begin..end only touches one of the returned child subtrees, + // we still return a rangeFormat for both branches and the other one will + // not have any further children (that child of rf.index will be stored + // in the proof as a single sibling node). + return rangeFormat{begin: rf.begin, end: rf.end, index: rf.index * 2, subtree: rf.subtree}, + rangeFormat{begin: rf.begin, end: rf.end, index: rf.index*2 + 1, subtree: rf.subtree} + } + return nil, nil +} + +// MergedFormat implements ProofFormat and realizes the union of the included +// individual formats. +type MergedFormat []ProofFormat + +// children implements ProofFormat +func (m MergedFormat) Children() (left, right ProofFormat) { + var ( + l = make(MergedFormat, 0, len(m)) + r = make(MergedFormat, 0, len(m)) + ) + for _, f := range m { + if left, right := f.Children(); left != nil { + l = append(l, left) + r = append(r, right) + } + } + if len(l) > 0 { + return l, r + } + return nil, nil +} + +// MergedReader implements ProofReader and realizes the union of the included +// individual readers. +// Note that the readers belonging to the same structure (having the same root) +// is not checked by MergedReader. +// Also note that fully consuming underlying sequential readers is not guaranteed +// (MultiProofReader.Finalized will not necessarily return true so if necessary +// then the well-formedness of individual multiproofs should be checked separately). +type MergedReader []ProofReader + +// children implements ProofReader +func (m MergedReader) Children() (left, right ProofReader) { + var ( + l = make(MergedReader, 0, len(m)) + r = make(MergedReader, 0, len(m)) + ) + for _, reader := range m { + if left, right := reader.Children(); left != nil { + l = append(l, left) + r = append(r, right) + } + } + if len(l) > 0 { + return l, r + } + return nil, nil +} + +// readNode implements ProofReader +func (m MergedReader) ReadNode() (value Value, ok bool) { + var hasChildren bool + for _, reader := range m { + if left, _ := reader.Children(); left != nil { + // ensure that all readers are fully traversed + hasChildren = true + } + if v, o := reader.ReadNode(); o { + value, ok = v, o + } + } + if hasChildren { + return Value{}, false + } + return +} + +// MergedWriter implements ProofWriter and realizes the union of the included +// individual writers. The shape traversed by MergedWriter is the union of the +// shapes traversed by individual writers. +type MergedWriter []ProofWriter + +// children implements ProofWriter +func (m MergedWriter) Children() (left, right ProofWriter) { + var ( + l = make(MergedWriter, 0, len(m)) + r = make(MergedWriter, 0, len(m)) + ) + for _, w := range m { + if left, right := w.Children(); left != nil { + l = append(l, left) + r = append(r, right) + } + } + if len(l) > 0 { + return l, r + } + return nil, nil +} + +// writeNode implements ProofWriter +func (m MergedWriter) WriteNode(value Value) { + for _, w := range m { + w.WriteNode(value) + } +} + +// callbackWriter implements ProofWriter with a simple callback mechanism +type callbackWriter struct { + format ProofFormat + index uint64 + storeCallback func(uint64, Value) +} + +// NewCallbackWriter creates a callbackWriter that traverses the tree subset +// defined by the given proof format and calls callbackWriter for each traversed node +func NewCallbackWriter(format ProofFormat, storeCallback func(uint64, Value)) callbackWriter { + return callbackWriter{format: format, index: 1, storeCallback: storeCallback} +} + +// children implements ProofWriter +func (cw callbackWriter) Children() (left, right ProofWriter) { + lf, rf := cw.format.Children() + if lf == nil { + return nil, nil + } + return callbackWriter{format: lf, index: cw.index * 2, storeCallback: cw.storeCallback}, + callbackWriter{format: rf, index: cw.index*2 + 1, storeCallback: cw.storeCallback} +} + +// writeNode implements ProofWriter +func (cw callbackWriter) WriteNode(node Value) { + cw.storeCallback(cw.index, node) +} diff --git a/beacon/merkle/binary_merkle_test.go b/beacon/merkle/binary_merkle_test.go new file mode 100644 index 000000000000..9dab56ba325b --- /dev/null +++ b/beacon/merkle/binary_merkle_test.go @@ -0,0 +1,259 @@ +// Copyright 2022 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package merkle + +import ( + "math/bits" + "math/rand" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/minio/sha256-simd" +) + +func TestMergedFormat(t *testing.T) { + for count := 0; count < 1000; count++ { + single := NewIndexMapFormat() + merged := MergedFormat{} + for { + if rand.Intn(5) == 0 { + break + } + f := NewIndexMapFormat() + for { + if rand.Intn(5) == 0 { + break + } + index := uint64(rand.Intn(255) + 1) + single.AddLeaf(index, nil) + f.AddLeaf(index, nil) + } + merged = append(merged, f) + } + if !formatsEqual(single, merged) { + t.Errorf("Single and merged formats do not match") + } + } +} + +func TestIndexMapSubtrees(t *testing.T) { + for count := 0; count < 1000; count++ { + single := NewIndexMapFormat() + withSubtrees := NewIndexMapFormat() + // put single leaves and subtrees randomly into a single row in order to avoid collisions + for index := uint64(256); index < 512; index++ { + switch rand.Intn(100) { + case 0: // put single leaf at index + single.AddLeaf(index, nil) + withSubtrees.AddLeaf(index, nil) + case 1: // put subtree at index + subtree := NewIndexMapFormat() + for { + subindex := uint64(rand.Intn(255) + 1) + single.AddLeaf(ChildIndex(index, subindex), nil) + subtree.AddLeaf(subindex, nil) + if rand.Intn(5) == 0 { // exit here in order to avoid empty subtrees + break + } + } + withSubtrees.AddLeaf(index, subtree) + } + } + if !formatsEqual(single, withSubtrees) { + t.Errorf("Single and subtree formats do not match") + } + } +} + +func TestRangeFormat(t *testing.T) { + for count := 0; count < 1000; count++ { + single := NewIndexMapFormat() + begin := uint64(rand.Intn(255) + 1) + nextLevel := uint64(1) + for nextLevel <= begin { + nextLevel += nextLevel + } + end := begin + uint64(rand.Intn(int(nextLevel-begin))) + for i := begin; i <= end; i++ { + single.AddLeaf(i, nil) + } + + var subFn func(index uint64) ProofFormat + if rand.Intn(2) == 0 { + subroot := begin + uint64(rand.Intn(int(end+1-begin))) + subBegin := uint64(rand.Intn(255) + 1) + nextLevel = uint64(1) + for nextLevel <= subBegin { + nextLevel += nextLevel + } + subEnd := subBegin + uint64(rand.Intn(int(nextLevel-subBegin))) + for i := subBegin; i <= subEnd; i++ { + single.AddLeaf(ChildIndex(subroot, i), nil) + } + subtree := NewRangeFormat(subBegin, subEnd, nil) + subFn = func(index uint64) ProofFormat { + if index == subroot { + return subtree + } + return nil + } + } + + rangeFormat := NewRangeFormat(begin, end, subFn) + if !formatsEqual(single, rangeFormat) { + t.Errorf("Single and range formats do not match") + } + } +} + +func TestSingleProof(t *testing.T) { + for index := uint64(1); index < 256; index++ { + proof := make(Values, 63-bits.LeadingZeros64(index)) + writer := NewCallbackWriter(NewIndexMapFormat().AddLeaf(index, nil), func(i uint64, v Value) { + shift := bits.LeadingZeros64(i) - bits.LeadingZeros64(index) + if i^(index>>shift) == 1 { + proof[shift] = v + } + }) + testTraverseProof(t, testProofReader, writer, true) + root, ok := VerifySingleProof(proof, index, testMerkleTree[index]) + if root != common.Hash(testMerkleTree[1]) { + t.Errorf("VerifySingleProof root hash mismatch (index = %d)", index) + } + if !ok { + t.Errorf("VerifySingleProof length invalid (index = %d)", index) + } + } +} + +func TestMultiProof(t *testing.T) { + for count := 0; count < 300; count++ { + failIndex := uint64(128 + rand.Intn(128)) + indexList := make([]uint64, 10) + for i := range indexList { + for { + indexList[i] = uint64(128 + rand.Intn(128)) + if indexList[i]^failIndex > 1 { + // failIndex should not be available in proofs so it should not be equal to or sibling of a stored index + break + } + } + } + + readers := make([]ProofReader, len(indexList)) + for i, index := range indexList { + var mp MultiProof + mp.Format = NewIndexMapFormat().AddLeaf(index, nil) + writer := NewMultiProofWriter(mp.Format, &mp.Values, nil) + testTraverseProof(t, testProofReader, writer, true) + readers[i] = mp.Reader(nil) + } + + // create a single multiproof from the merged reader using a subset of indices + var mp MultiProof + format := NewIndexMapFormat() + mpCount := rand.Intn(11) // add a subset of indices to the created multiproof format + for i := 0; i < mpCount; i++ { + format.AddLeaf(indexList[i], nil) + } + mp.Format = format + expSuccess := rand.Intn(2) == 0 + if !expSuccess { + // add an index that should not be available in the merged reader, expect the traversal to fail + format.AddLeaf(failIndex, nil) + } + testTraverseProof(t, MergedReader(readers), NewMultiProofWriter(format, &mp.Values, nil), expSuccess) + + if expSuccess { + mpwCount := rand.Intn(mpCount + 1) // create writers for a subset of the previously selected indices (available in mp) + mps := make([]MultiProof, mpwCount) + writers := make([]ProofWriter, mpwCount) + for i := range mps { + mps[i].Format = NewIndexMapFormat().AddLeaf(indexList[i], nil) + writers[i] = NewMultiProofWriter(mps[i].Format, &mps[i].Values, nil) + } + reader := mp.Reader(nil) + testTraverseProof(t, reader, MergedWriter(writers), true) + if !reader.Finished() { + t.Errorf("MultiProofReader not finished") + } + // test individual single-value multiproofs + for i, mp := range mps { + if valueIndex, ok := ProofFormatIndexMap(mp.Format)[indexList[i]]; !ok || mp.Values[valueIndex] != testMerkleTree[indexList[i]] { + t.Errorf("Could not find tree index %d in single-value multiproof", indexList[i]) + } + } + } + } +} + +func testTraverseProof(t *testing.T, reader ProofReader, writer ProofWriter, expSuccess bool) { + root, ok := TraverseProof(reader, writer) + if expSuccess { + if root != common.Hash(testMerkleTree[1]) { + t.Errorf("TraverseProof root hash mismatch") + } + if !ok { + t.Errorf("TraverseProof insufficient reader data") + } + } else if ok { + t.Errorf("TraverseProof succeeded (expected to fail)") + } +} + +func formatsEqual(f1, f2 ProofFormat) bool { + if f1 == nil && f2 == nil { + return true + } + if f1 == nil || f2 == nil { + return false + } + c1l, c1r := f1.Children() + c2l, c2r := f2.Children() + return formatsEqual(c1l, c2l) && formatsEqual(c1r, c2r) +} + +type testReader byte + +var testProofReader = testReader(1) + +func (r testReader) Children() (left, right ProofReader) { + if r >= 128 { + return nil, nil + } + return r * 2, r*2 + 1 +} + +func (r testReader) ReadNode() (Value, bool) { + return testMerkleTree[r], true +} + +var testMerkleTree [256]Value + +func init() { + hasher := sha256.New() + for i := byte(255); i >= 1; i-- { + if i >= 128 { + testMerkleTree[i][0] = i + } else { + hasher.Reset() + hasher.Write(testMerkleTree[i*2][:]) + hasher.Write(testMerkleTree[i*2+1][:]) + hasher.Sum(testMerkleTree[i][:0]) + } + } +} diff --git a/beacon/params/constants.go b/beacon/params/constants.go new file mode 100644 index 000000000000..3546b0fd90eb --- /dev/null +++ b/beacon/params/constants.go @@ -0,0 +1,29 @@ +// Copyright 2022 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package params + +const ( + SyncCommitteeSize = 512 + SyncCommitteeBitmaskSize = SyncCommitteeSize / 8 + SyncCommitteeSupermajority = (SyncCommitteeSize*2 + 2) / 3 + BlsSignatureSize = 96 + BlsPubkeySize = 48 + SyncPeriodLength = 8192 + Log2SyncPeriodLength = 13 + EpochLength = 32 + Log2EpochLength = 5 +) diff --git a/beacon/params/tree_indices.go b/beacon/params/tree_indices.go new file mode 100644 index 000000000000..9ca906e525ab --- /dev/null +++ b/beacon/params/tree_indices.go @@ -0,0 +1,46 @@ +// Copyright 2022 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package params + +import ( + "github.com/ethereum/go-ethereum/beacon/merkle" +) + +const ( + // beacon header fields + BhiSlot = 8 + BhiProposerIndex = 9 + BhiParentRoot = 10 + BhiStateRoot = 11 + BhiBodyRoot = 12 + + // beacon state fields + BsiGenesisTime = 32 + BsiGenesisValidators = 33 + BsiForkVersion = 141 + BsiLatestHeader = 36 + BsiBlockRoots = 37 + BsiStateRoots = 38 + BsiHistoricRoots = 39 + BsiFinalBlock = 105 + BsiSyncCommittee = 54 + BsiNextSyncCommittee = 55 + BsiExecPayload = 56 + BsiExecHead = 908 +) + +var BsiFinalExecHash = merkle.ChildIndex(merkle.ChildIndex(BsiFinalBlock, BhiStateRoot), BsiExecHead) diff --git a/cmd/blsync/config.go b/cmd/blsync/config.go new file mode 100644 index 000000000000..d754a2d19a65 --- /dev/null +++ b/cmd/blsync/config.go @@ -0,0 +1,177 @@ +// Copyright 2022 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package main + +import ( + "context" + + "github.com/ethereum/go-ethereum/beacon/light/sync" + "github.com/ethereum/go-ethereum/cmd/utils" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/node" + "github.com/ethereum/go-ethereum/rpc" + "github.com/urfave/cli/v2" +) + +var ( + MainnetConfig = sync.ChainConfig{ + GenesisData: sync.GenesisData{ + GenesisValidatorsRoot: common.HexToHash("0x4b363db94e286120d76eb905340fdd4e54bfe9f06bf33ff6cf5ad27f511bfe95"), + GenesisTime: 1606824023, + }, + Forks: sync.Forks{ + sync.Fork{ + Epoch: 0, + Name: "GENESIS", + Version: []byte{0, 0, 0, 0}, + }, + sync.Fork{ + Epoch: 74240, + Name: "ALTAIR", + Version: []byte{1, 0, 0, 0}, + }, + sync.Fork{ + Epoch: 144896, + Name: "BELLATRIX", + Version: []byte{2, 0, 0, 0}, + }, + }, + Checkpoint: common.HexToHash("0x388be41594ec7d6a6894f18c73f3469f07e2c19a803de4755d335817ed8e2e5a"), + } + + SepoliaConfig = sync.ChainConfig{ + GenesisData: sync.GenesisData{ + GenesisValidatorsRoot: common.HexToHash("0xd8ea171f3c94aea21ebc42a1ed61052acf3f9209c00e4efbaaddac09ed9b8078"), + GenesisTime: 1655733600, + }, + Forks: sync.Forks{ + sync.Fork{ + Epoch: 0, + Name: "GENESIS", + Version: []byte{144, 0, 0, 105}, + }, + sync.Fork{ + Epoch: 50, + Name: "ALTAIR", + Version: []byte{144, 0, 0, 112}, + }, + sync.Fork{ + Epoch: 100, + Name: "BELLATRIX", + Version: []byte{144, 0, 0, 113}, + }, + }, + Checkpoint: common.HexToHash("0x1005a6d9175e96bfbce4d35b80f468e9bff0b674e1e861d16e09e10005a58e81"), + } + + GoerliConfig = sync.ChainConfig{ + GenesisData: sync.GenesisData{ + GenesisValidatorsRoot: common.HexToHash("0x043db0d9a83813551ee2f33450d23797757d430911a9320530ad8a0eabc43efb"), + GenesisTime: 1614588812, + }, + Forks: sync.Forks{ + sync.Fork{ + Epoch: 0, + Name: "GENESIS", + Version: []byte{0, 0, 16, 32}, + }, + sync.Fork{ + Epoch: 36660, + Name: "ALTAIR", + Version: []byte{1, 0, 16, 32}, + }, + sync.Fork{ + Epoch: 112260, + Name: "BELLATRIX", + Version: []byte{2, 0, 16, 32}, + }, + }, + Checkpoint: common.HexToHash("0x53a0f4f0a378e2c4ae0a9ee97407eb69d0d737d8d8cd0a5fb1093f42f7b81c49"), + } +) + +func makeChainConfig(ctx *cli.Context) sync.ChainConfig { + utils.CheckExclusive(ctx, utils.MainnetFlag, utils.GoerliFlag, utils.SepoliaFlag) + customConfig := ctx.IsSet(utils.BeaconConfigFlag.Name) || ctx.IsSet(utils.BeaconGenesisRootFlag.Name) || ctx.IsSet(utils.BeaconGenesisTimeFlag.Name) + var config sync.ChainConfig + switch { + case ctx.Bool(utils.MainnetFlag.Name): + config = MainnetConfig + case ctx.Bool(utils.SepoliaFlag.Name): + config = SepoliaConfig + case ctx.Bool(utils.GoerliFlag.Name): + config = GoerliConfig + default: + if !customConfig { + config = MainnetConfig + } + } + if customConfig && config.Forks != nil { + utils.Fatalf("Cannot use custom beacon chain config flags in combination with pre-defined network config") + } + if ctx.IsSet(utils.BeaconConfigFlag.Name) { + forks, err := sync.LoadForks(ctx.String(utils.BeaconConfigFlag.Name)) + if err != nil { + utils.Fatalf("Could not load beacon chain config file", "file name", ctx.String(utils.BeaconConfigFlag.Name), "error", err) + } + config.Forks = forks + } + if ctx.IsSet(utils.BeaconGenesisRootFlag.Name) { + if c, err := hexutil.Decode(ctx.String(utils.BeaconGenesisRootFlag.Name)); err == nil && len(c) <= 32 { + copy(config.GenesisValidatorsRoot[:len(c)], c) + } else { + utils.Fatalf("Invalid hex string", "beacon.genesis.gvroot", ctx.String(utils.BeaconGenesisRootFlag.Name), "error", err) + } + } + if ctx.IsSet(utils.BeaconGenesisTimeFlag.Name) { + config.GenesisTime = ctx.Uint64(utils.BeaconGenesisTimeFlag.Name) + } + if ctx.IsSet(utils.BeaconCheckpointFlag.Name) { + if c, err := hexutil.Decode(ctx.String(utils.BeaconCheckpointFlag.Name)); err == nil && len(c) <= 32 { + copy(config.Checkpoint[:len(c)], c) + } else { + utils.Fatalf("Invalid hex string", "beacon.checkpoint", ctx.String(utils.BeaconCheckpointFlag.Name), "error", err) + } + } + return config +} + +func makeRPCClient(ctx *cli.Context) *rpc.Client { + if !ctx.IsSet(utils.BlsyncApiFlag.Name) { + log.Warn("No engine API target specified, performing a dry run") + return nil + } + if !ctx.IsSet(utils.BlsyncJWTSecretFlag.Name) { + utils.Fatalf("JWT secret parameter missing") //TODO use default if datadir is specified + } + + engineApiUrl, jwtFileName := ctx.String(utils.BlsyncApiFlag.Name), ctx.String(utils.BlsyncJWTSecretFlag.Name) + var jwtSecret [32]byte + if jwt, err := node.ObtainJWTSecret(jwtFileName); err == nil { + copy(jwtSecret[:], jwt) + } else { + utils.Fatalf("Error loading or generating JWT secret: %v", err) + } + auth := node.NewJWTAuth(jwtSecret) + cl, err := rpc.DialOptions(context.Background(), engineApiUrl, rpc.WithHTTPAuth(auth)) + if err != nil { + utils.Fatalf("Could not create RPC client: %v", err) + } + return cl +} diff --git a/cmd/blsync/main.go b/cmd/blsync/main.go new file mode 100644 index 000000000000..1d8b6a390edb --- /dev/null +++ b/cmd/blsync/main.go @@ -0,0 +1,246 @@ +// Copyright 2022 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package main + +import ( + "context" + "fmt" + "os" + "strings" + "time" + + "github.com/ethereum/go-ethereum/beacon/engine" + "github.com/ethereum/go-ethereum/beacon/light/api" + "github.com/ethereum/go-ethereum/beacon/light/sync" + "github.com/ethereum/go-ethereum/beacon/light/types" + "github.com/ethereum/go-ethereum/beacon/merkle" + "github.com/ethereum/go-ethereum/beacon/params" + "github.com/ethereum/go-ethereum/cmd/utils" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/lru" + "github.com/ethereum/go-ethereum/common/mclock" + ctypes "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/ethdb/memorydb" + "github.com/ethereum/go-ethereum/internal/flags" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/rpc" + "github.com/urfave/cli/v2" +) + +func main() { + log.Root().SetHandler(log.LvlFilterHandler(log.LvlInfo, log.StreamHandler(os.Stderr, log.TerminalFormat(true)))) + app := flags.NewApp("beacon light syncer tool") + app.Flags = []cli.Flag{ + utils.BeaconApiFlag, + utils.BeaconApiHeaderFlag, + utils.BeaconThresholdFlag, + utils.BeaconNoFilterFlag, + utils.BeaconConfigFlag, + utils.BeaconGenesisRootFlag, + utils.BeaconGenesisTimeFlag, + utils.BeaconCheckpointFlag, + //TODO datadir for optional permanent database + utils.MainnetFlag, + utils.SepoliaFlag, + utils.GoerliFlag, + utils.BlsyncApiFlag, + utils.BlsyncJWTSecretFlag, + utils.BlsyncTestFlag, + utils.BeaconApiStateProofFlag, + } + app.Action = blsync + + if err := app.Run(os.Args); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } +} + +var ( + stateProofFormat merkle.ProofFormat // requested multiproof format + execBlockIndex int // index of execution block root in proof.Values where proof.Format == stateProofFormat + finalizedBlockIndex int // index of finalized block root in proof.Values where proof.Format == stateProofFormat +) + +func blsync(ctx *cli.Context) error { + if !ctx.IsSet(utils.BeaconApiFlag.Name) { + utils.Fatalf("Beacon node light client API URL not specified") + } + stateProofFormat = merkle.NewIndexMapFormat().AddLeaf(params.BsiExecHead, nil).AddLeaf(params.BsiFinalBlock, nil) + var ( + stateIndexMap = merkle.ProofFormatIndexMap(stateProofFormat) + chainConfig = makeChainConfig(ctx) + customHeader = make(map[string]string) + ) + execBlockIndex = stateIndexMap[params.BsiExecHead] + finalizedBlockIndex = stateIndexMap[params.BsiFinalBlock] + + for _, s := range utils.SplitAndTrim(ctx.String(utils.BeaconApiHeaderFlag.Name)) { + kv := strings.Split(s, ":") + if len(kv) != 2 { + utils.Fatalf("Invalid custom API header entry: %s", s) + } + customHeader[strings.TrimSpace(kv[0])] = strings.TrimSpace(kv[1]) + } + + var ( + stateProofVersion = ctx.Int(utils.BeaconApiStateProofFlag.Name) + beaconApi = api.NewBeaconLightApi(ctx.String(utils.BeaconApiFlag.Name), customHeader, stateProofVersion) + committeeSyncer = api.NewCommitteeSyncer(beaconApi, chainConfig.GenesisData) + db = memorydb.New() + syncCommitteeCheckpoint = sync.NewWeakSubjectivityCheckpoint(db, committeeSyncer, chainConfig.Checkpoint, nil) + ) + if syncCommitteeCheckpoint == nil { + utils.Fatalf("No beacon chain checkpoint") + } + syncCommitteeTracker := sync.NewCommitteeTracker(db, chainConfig.Forks, syncCommitteeCheckpoint, ctx.Int(utils.BeaconThresholdFlag.Name), !ctx.Bool(utils.BeaconNoFilterFlag.Name), sync.BLSVerifier{}, &mclock.System{}, func() int64 { return time.Now().UnixNano() }) + if ctx.IsSet(utils.BlsyncTestFlag.Name) { + if ctx.IsSet(utils.BlsyncApiFlag.Name) || ctx.IsSet(utils.BlsyncJWTSecretFlag.Name) { + utils.Fatalf("Target engine API/JWT secret specified in test mode") + } + if !ctx.IsSet(utils.BeaconApiStateProofFlag.Name) { + stateProofVersion = 1 + } + if stateProofVersion == 0 { + utils.Fatalf("Cannot run test mode without state proof API enabled") + } + testSyncer := newTestSyncer(beaconApi, stateProofVersion) + syncCommitteeTracker.SubscribeToNewHeads(testSyncer.newSignedHead) + } else { + syncer := &execSyncer{ + api: beaconApi, + client: makeRPCClient(ctx), + execRootCache: lru.NewCache[common.Hash, common.Hash](1000), + useStateProofs: stateProofVersion != 0, + } + syncCommitteeTracker.SubscribeToNewHeads(syncer.newHead) + } + committeeSyncer.Start(syncCommitteeTracker) + syncCommitteeCheckpoint.TriggerFetch() + <-ctx.Done() + return nil +} + +func callNewPayloadV1(client *rpc.Client, block *ctypes.Block) (string, error) { + var resp engine.PayloadStatusV1 + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + err := client.CallContext(ctx, &resp, "engine_newPayloadV1", *engine.BlockToExecutableData(block, nil).ExecutionPayload) + cancel() + return resp.Status, err +} + +func callForkchoiceUpdatedV1(client *rpc.Client, headHash, finalizedHash common.Hash) (string, error) { + var resp engine.ForkChoiceResponse + update := engine.ForkchoiceStateV1{ + HeadBlockHash: headHash, + SafeBlockHash: finalizedHash, + FinalizedBlockHash: finalizedHash, + } + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + err := client.CallContext(ctx, &resp, "engine_forkchoiceUpdatedV1", update, nil) + cancel() + return resp.PayloadStatus.Status, err +} + +type execSyncer struct { + api *api.BeaconLightApi + sub *api.StateProofSub + useStateProofs bool + client *rpc.Client + execRootCache *lru.Cache[common.Hash, common.Hash] // beacon block root -> execution block root +} + +var statePaths = []string{ + "[\"finalizedCheckpoint\",\"root\"]", + "[\"latestExecutionPayloadHeader\",\"blockHash\"]", +} + +// newHead fetches state proofs to determine the execution block root and calls +// the engine API if specified +func (e *execSyncer) newHead(head types.Header) { + log.Info("Received new beacon head", "slot", head.Slot, "blockRoot", head.Hash()) + block, err := e.api.GetExecutionPayload(head) + if err != nil { + log.Error("Error fetching execution payload from beacon API", "error", err) + return + } + blockRoot := block.Hash() + var finalizedExecRoot common.Hash + if e.useStateProofs { + if e.sub == nil { + if sub, err := e.api.SubscribeStateProof(stateProofFormat, statePaths, 0, 1); err == nil { + log.Info("Successfully created beacon state subscription") + e.sub = sub + } else { + log.Error("Failed to create beacon state subscription", "error", err) + return + } + } + proof, err := e.sub.Get(head.StateRoot) + if err != nil { + log.Error("Error fetching state proof from beacon API", "error", err) + return + } + var ( + execBlockRoot = common.Hash(proof.Values[execBlockIndex]) + finalizedBeaconRoot = common.Hash(proof.Values[finalizedBlockIndex]) + beaconRoot = head.Hash() + ) + e.execRootCache.Add(beaconRoot, execBlockRoot) + if blockRoot != execBlockRoot { + log.Error("Execution payload block hash does not match value in beacon state", "expected", execBlockRoot, "got", block.Hash()) + return + } + if _, ok := e.execRootCache.Get(head.ParentRoot); !ok { + e.fetchExecRoots(head.ParentRoot) + } + finalizedExecRoot, _ = e.execRootCache.Get(finalizedBeaconRoot) + } + if e.client == nil { // dry run, no engine API specified + log.Info("New execution block retrieved", "block number", block.NumberU64(), "block hash", blockRoot, "finalized block hash", finalizedExecRoot) + return + } + if status, err := callNewPayloadV1(e.client, block); err == nil { + log.Info("Successful NewPayload", "block number", block.NumberU64(), "block hash", blockRoot, "status", status) + } else { + log.Error("Failed NewPayload", "block number", block.NumberU64(), "block hash", blockRoot, "error", err) + } + if status, err := callForkchoiceUpdatedV1(e.client, blockRoot, finalizedExecRoot); err == nil { + log.Info("Successful ForkchoiceUpdated", "head", blockRoot, "finalized", finalizedExecRoot, "status", status) + } else { + log.Error("Failed ForkchoiceUpdated", "head", blockRoot, "finalized", finalizedExecRoot, "error", err) + } +} + +func (e *execSyncer) fetchExecRoots(blockRoot common.Hash) { + for maxFetch := 256; maxFetch > 0; maxFetch-- { + header, err := e.api.GetHeader(blockRoot) + if err != nil { + break + } + proof, err := e.sub.Get(header.StateRoot) + if err != nil { + // exit silently because we expect running into an error when parent is unknown + break + } + e.execRootCache.Add(header.Hash(), common.Hash(proof.Values[execBlockIndex])) + if _, ok := e.execRootCache.Get(header.ParentRoot); ok { + break + } + blockRoot = header.ParentRoot + } +} diff --git a/cmd/blsync/test.go b/cmd/blsync/test.go new file mode 100644 index 000000000000..623d33bd5da0 --- /dev/null +++ b/cmd/blsync/test.go @@ -0,0 +1,351 @@ +// Copyright 2022 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package main + +import ( + "encoding/hex" + "math/rand" + "strconv" + "sync" + "time" + + "github.com/ethereum/go-ethereum/beacon/light/api" + "github.com/ethereum/go-ethereum/beacon/light/types" + "github.com/ethereum/go-ethereum/beacon/merkle" + "github.com/ethereum/go-ethereum/beacon/params" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/mclock" + "github.com/ethereum/go-ethereum/log" +) + +const ( + maxTestRequestAge = 64 + testStateSubCount = 8 +) + +type testSyncer struct { + lock sync.Mutex + api *api.BeaconLightApi + stateProofVersion int + subs []*api.StateProofSub // only for new api + // maps are nil until initialized by first head + headers map[uint64]types.Header // slot -> canonical header + headSlot, tailSlot uint64 // headers available between tailSlot..headSlot + // waitForSig contains the arrival time of canonical headers (also in headers map) + // which are waiting for a signature (no signed header seen, not timed out yet). + waitForSig map[common.Hash]mclock.AbsTime // blockRoot -> abs time + headLastSeen mclock.AbsTime // last time when the current head was confirmed + headStateCount, recentStateCount int +} + +func newTestSyncer(api *api.BeaconLightApi, stateProofVersion int) *testSyncer { + rand.Seed(time.Now().UnixNano()) + t := &testSyncer{ + api: api, + stateProofVersion: stateProofVersion, + } + go t.updateLoop() + return t +} + +func (t *testSyncer) createSubs() bool { + t.subs = make([]*api.StateProofSub, testStateSubCount) + for i := range t.subs { + subFormat, subPaths := t.makeTestFormat(3, 0.25/float32(i)) + encFormat, _ := api.EncodeCompactProofFormat(subFormat) + hexFormat := "0x" + hex.EncodeToString(encFormat) + if sub, err := t.api.SubscribeStateProof(subFormat, subPaths, 0, 1); err == nil { + log.Info("Successfully created state subscription", "subscription index", i, "format", hexFormat) + t.subs[i] = sub + } else { + log.Error("Could not create state subscription", "format", hexFormat, "error", err) + t.subs = nil + return false + } + } + if err := t.updateHead(true); err != nil { + log.Error("Error retrieving updated head", "error", err) + return false + } + t.pruneTail(t.headSlot) + return true +} + +func (t *testSyncer) pruneTail(newTail uint64) { + if newTail > t.tailSlot { + for slot := t.tailSlot; slot < newTail; slot++ { + if header, ok := t.headers[slot]; ok { + delete(t.waitForSig, header.Hash()) + delete(t.headers, slot) + } + } + t.tailSlot = newTail + } +} + +func (t *testSyncer) updateLoop() { + for { + time.Sleep(time.Millisecond * 100) + t.lock.Lock() + t.checkSignatureTimeouts() + if err := t.updateHead(false); err == nil { + t.testHeadProof() + t.testRecentProof() + t.lock.Unlock() + } else { + t.lock.Unlock() + log.Warn("Could not retrieve head", "error", err) + time.Sleep(time.Second) + } + } +} + +func (t *testSyncer) makeTestFormat(avgIndexCount, stopRatio float32) (merkle.ProofFormat, []string) { + format := merkle.NewIndexMapFormat() + var paths []string + if t.stateProofVersion >= 2 { + for i := uint64(params.BsiGenesisTime); i <= params.BsiExecPayload; i++ { + format.AddLeaf(i, nil) + } + format.AddLeaf(params.BsiForkVersion, nil) + format.AddLeaf(params.BsiFinalBlock, nil) + format.AddLeaf(params.BsiExecHead, nil) + } else { + format.AddLeaf(params.BsiFinalBlock, nil) + format.AddLeaf(params.BsiExecHead, nil) + paths = []string{ + "[\"finalizedCheckpoint\",\"root\"]", + "[\"latestExecutionPayloadHeader\",\"blockHash\"]", + } + } + for rand.Float32()*avgIndexCount > 1 { + srIndex := rand.Intn(0x2000) + format.AddLeaf(merkle.ChildIndex(params.BsiStateRoots, uint64(0x2000+srIndex)), nil) + if t.stateProofVersion == 1 { + paths = append(paths, "[\"stateRoots\","+strconv.Itoa(srIndex)+"]") + } + } + for rand.Float32()*avgIndexCount > 1 { + brIndex := rand.Intn(0x2000) + format.AddLeaf(merkle.ChildIndex(params.BsiBlockRoots, uint64(0x2000+brIndex)), nil) + if t.stateProofVersion == 1 { + paths = append(paths, "[\"blockRoots\","+strconv.Itoa(brIndex)+"]") + } + } + for rand.Float32()*avgIndexCount > 1 { + hrIndex := rand.Intn(0x1000000) + format.AddLeaf(merkle.ChildIndex(params.BsiHistoricRoots, merkle.ChildIndex(2, uint64(0x1000000+hrIndex))), nil) + if t.stateProofVersion == 1 { + paths = append(paths, "[\"historicalRoots\","+strconv.Itoa(hrIndex)+"]") + } + } + //TODO sample all lists/vectors? + return randomSubset(format, stopRatio), paths +} + +func randomSubset(format merkle.ProofFormat, stopRatio float32) merkle.ProofFormat { + subset := merkle.NewIndexMapFormat() + addRandomSubset(format, subset, 1, stopRatio) + return subset +} + +func addRandomSubset(format merkle.ProofFormat, subset merkle.IndexMapFormat, index uint64, stopRatio float32) { + left, right := format.Children() + if left == nil || rand.Float32() < stopRatio { + subset.AddLeaf(index, nil) + return + } + addRandomSubset(left, subset, index*2, stopRatio) + addRandomSubset(right, subset, index*2+1, stopRatio) +} + +func (t *testSyncer) testHeadProof() { + format, paths := t.makeTestFormat(3, 0.1) + proof, err := t.api.GetHeadStateProof(format, paths) + if err != nil { + encFormat, _ := api.EncodeCompactProofFormat(format) + log.Error("Error retrieving head state proof", "format", "0x"+hex.EncodeToString(encFormat), "error", err) + return + } + stateRoot := proof.RootHash() + if stateRoot == t.headers[t.headSlot].StateRoot { + t.headStateCount++ + t.headLastSeen = mclock.Now() + return + } + oldHeadSlot := t.headSlot + if err := t.updateHead(true); err != nil { + log.Error("Error retrieving updated head", "error", err) + return + } + if header, ok := t.findHeaderWithStateRoot(stateRoot); !ok || header.Slot < oldHeadSlot { + if ok { + log.Error("Head state proof request returned proof with old state root", "slot", header.Slot, "head slot before request", oldHeadSlot) + } else { + log.Error("Head state proof request returned proof with unknown state root") + } + return + } + t.headStateCount++ +} + +func (t *testSyncer) findHeaderWithBlockRoot(blockRoot common.Hash) (types.Header, bool) { + for _, header := range t.headers { + if header.Hash() == blockRoot { + return header, true + } + } + return types.Header{}, false +} + +func (t *testSyncer) findHeaderWithStateRoot(stateRoot common.Hash) (types.Header, bool) { + for _, header := range t.headers { + if header.StateRoot == stateRoot { + return header, true + } + } + return types.Header{}, false +} + +func (t *testSyncer) testRecentProof() { + if t.subs == nil && !t.createSubs() { + return + } + maxAge := t.headSlot - t.tailSlot + if maxAge > maxTestRequestAge { + maxAge = maxTestRequestAge + t.pruneTail(t.headSlot - maxTestRequestAge) + } + var ( + slot uint64 + stateRoot common.Hash + ) + for slot = t.headSlot - uint64(rand.Intn(int(maxAge)+1)); slot <= t.headSlot; slot++ { + if header, ok := t.headers[slot]; ok { + stateRoot = header.StateRoot + break + } + } + subIndex := rand.Intn(testStateSubCount) + if _, err := t.subs[subIndex].Get(stateRoot); err != nil { // sub.Get checks state root + log.Error("Error retrieving subscribed state proof", "error", err, "subscription index", subIndex, "requested slot", slot, "head slot", t.headSlot) + return + } + t.recentStateCount++ +} + +func (t *testSyncer) resetChain(head types.Header) { + t.headers = make(map[uint64]types.Header) + t.waitForSig = make(map[common.Hash]mclock.AbsTime) + t.headSlot, t.tailSlot = head.Slot, head.Slot + t.headers[head.Slot] = head + t.headLastSeen = mclock.Now() + // do not add to waitForSig because we don't know when this head has first appeared +} + +func (t *testSyncer) updateHead(force bool) error { + if !force && t.headers != nil && time.Duration(mclock.Now()-t.headLastSeen) < time.Second { + return nil + } + head, err := t.api.GetHeader(common.Hash{}) + if err != nil { + return err + } + if t.headers == nil { + t.resetChain(head) + log.Info("Initialized header chain", "slot", head.Slot) + } + oldHeadSeen := t.headLastSeen + t.headLastSeen = mclock.Now() + if head == t.headers[t.headSlot] { + return nil + } + if time.Duration(t.headLastSeen-oldHeadSeen) < time.Millisecond*500 { + t.waitForSig[head.Hash()] = t.headLastSeen + } + removeSlot := t.headSlot + t.headSlot = head.Slot + for { + var ( + parent types.Header + parentFound bool + ) + slot := head.Slot + for slot > t.tailSlot { + slot-- + if header, ok := t.headers[slot]; ok { + if header.Hash() == head.ParentRoot { + parent, parentFound = header, true + } + break + } + } + if !parentFound { + parent, err = t.api.GetHeader(head.ParentRoot) + if err != nil { + //cannot trace back to the known chain + t.resetChain(head) + return err + } + } + // now parent is always valid + for slot := parent.Slot + 1; slot <= removeSlot; slot++ { + if header, ok := t.headers[slot]; ok { + delete(t.headers, slot) + delete(t.waitForSig, header.Hash()) + } + } + removeSlot = parent.Slot + t.headers[head.Slot] = head + head = parent + if parentFound { + return nil + } + if head.Slot < t.tailSlot { + t.resetChain(head) + return nil + } + } +} + +func (t *testSyncer) newSignedHead(head types.Header) { + now := mclock.Now() + go func() { + var delay interface{} + hash := head.Hash() + t.lock.Lock() + if arrivedAt, ok := t.waitForSig[hash]; ok { + delay = time.Duration(now - arrivedAt) + delete(t.waitForSig, hash) + } else { + delay = "unknown" + } + t.lock.Unlock() + log.Info("Received new signed head", "slot", head.Slot, "blockRoot", hash, "delay", delay, "head states retrieved", t.headStateCount, "subscribed states retrieved", t.recentStateCount) + }() +} + +func (t *testSyncer) checkSignatureTimeouts() { + now := mclock.Now() + for hash, arrivedAt := range t.waitForSig { + if dt := time.Duration(now - arrivedAt); dt > time.Second*65 { + header, _ := t.findHeaderWithBlockRoot(hash) + log.Warn("Wait for header signature timed out", "slot", header.Slot, "blockRoot", hash, "delay", dt) + delete(t.waitForSig, hash) + } + } +} diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 78b3cbee60b8..cfd93cf51548 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -36,6 +36,7 @@ import ( "github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/accounts/keystore" + bparams "github.com/ethereum/go-ethereum/beacon/params" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/fdlimit" "github.com/ethereum/go-ethereum/common/hexutil" @@ -272,6 +273,68 @@ var ( Usage: "Manually specify the Shanghai fork timestamp, overriding the bundled setting", Category: flags.EthCategory, } + // Beacon client light sync settings + BeaconApiFlag = &cli.StringFlag{ + Name: "beacon.api", + Usage: "Beacon node (CL) light client API URL (currently only supports LodeStar)", + Category: flags.BeaconCategory, + } + BeaconApiHeaderFlag = &cli.StringFlag{ + Name: "beacon.api.header", + Usage: "Remote beacon node API custom HTTP header fields (\"key:value,key:value\")", + Category: flags.BeaconCategory, + } + BeaconApiStateProofFlag = &cli.IntFlag{ + Name: "beacon.api.stateproof", + Usage: "State proof API version (0: no state API 1: old Lodestar API 2: https://github.com/ethereum/beacon-APIs/pull/267 )", + Category: flags.BeaconCategory, + } + BeaconThresholdFlag = &cli.IntFlag{ + Name: "beacon.threshold", + Usage: "Beacon sync committee participation threshold", + Value: bparams.SyncCommitteeSupermajority, + Category: flags.BeaconCategory, + } + BeaconNoFilterFlag = &cli.BoolFlag{ + Name: "beacon.nofilter", + Usage: "Disable future slot signature filter", + Category: flags.BeaconCategory, + } + BeaconConfigFlag = &cli.StringFlag{ + Name: "beacon.config", + Usage: "Beacon chain config YAML file", + Category: flags.BeaconCategory, + } + BeaconGenesisRootFlag = &cli.StringFlag{ + Name: "beacon.genesis.gvroot", + Usage: "Beacon chain genesis validators root", + Category: flags.BeaconCategory, + } + BeaconGenesisTimeFlag = &cli.Uint64Flag{ + Name: "beacon.genesis.time", + Usage: "Beacon chain genesis time", + Category: flags.BeaconCategory, + } + BeaconCheckpointFlag = &cli.StringFlag{ + Name: "beacon.checkpoint", + Usage: "Beacon chain weak subjectivity checkpoint block hash", + Category: flags.BeaconCategory, + } + BlsyncApiFlag = &cli.StringFlag{ + Name: "blsync.engine.api", + Usage: "Target EL engine API URL", + Category: flags.BeaconCategory, + } + BlsyncJWTSecretFlag = &cli.StringFlag{ + Name: "blsync.jwtsecret", + Usage: "Path to a JWT secret to use for target engine API endpoint", + Category: flags.BeaconCategory, + } + BlsyncTestFlag = &cli.BoolFlag{ + Name: "blsync.test", + Usage: "Test beacon API", + Category: flags.BeaconCategory, + } // Light server and client settings LightServeFlag = &cli.IntFlag{ Name: "light.serve", diff --git a/common/types.go b/common/types.go index 218ca0be4c44..068ced4241ef 100644 --- a/common/types.go +++ b/common/types.go @@ -26,6 +26,7 @@ import ( "math/big" "math/rand" "reflect" + "strconv" "strings" "github.com/ethereum/go-ethereum/common/hexutil" @@ -429,3 +430,22 @@ func (ma *MixedcaseAddress) ValidChecksum() bool { func (ma *MixedcaseAddress) Original() string { return ma.original } + +type Decimal uint64 + +func isString(input []byte) bool { + return len(input) >= 2 && input[0] == '"' && input[len(input)-1] == '"' +} + +// UnmarshalJSON parses a hash in hex syntax. +func (d *Decimal) UnmarshalJSON(input []byte) error { + if !isString(input) { + return &json.UnmarshalTypeError{Value: "non-string", Type: reflect.TypeOf(uint64(0))} + } + if i, err := strconv.ParseInt(string(input[1:len(input)-1]), 10, 64); err == nil { + *d = Decimal(i) + return nil + } else { + return err + } +} diff --git a/go.mod b/go.mod index 8860447f089a..766c702dab9f 100644 --- a/go.mod +++ b/go.mod @@ -16,6 +16,7 @@ require ( github.com/davecgh/go-spew v1.1.1 github.com/deckarep/golang-set/v2 v2.1.0 github.com/docker/docker v1.6.2 + github.com/donovanhide/eventsource v0.0.0-20210830082556-c59027999da0 github.com/dop251/goja v0.0.0-20230122112309-96b1610dd4f7 github.com/edsrzf/mmap-go v1.0.0 github.com/fatih/color v1.7.0 @@ -45,10 +46,14 @@ require ( github.com/karalabe/usb v0.0.2 github.com/mattn/go-colorable v0.1.8 github.com/mattn/go-isatty v0.0.12 + github.com/minio/sha256-simd v1.0.0 github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416 github.com/olekukonko/tablewriter v0.0.5 github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7 github.com/prometheus/tsdb v0.7.1 + github.com/protolambda/bls12-381-util v0.0.0-20220416220906-d8552aa452c7 + github.com/protolambda/zrnt v0.30.0 + github.com/protolambda/ztyp v0.2.2 github.com/rs/cors v1.7.0 github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible github.com/status-im/keycard-go v0.2.0 @@ -86,6 +91,8 @@ require ( github.com/go-ole/go-ole v1.2.1 // indirect github.com/go-sourcemap/sourcemap v2.1.3+incompatible // indirect github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097 // indirect + github.com/kilic/bls12-381 v0.1.0 // indirect + github.com/klauspost/cpuid/v2 v2.0.4 // indirect github.com/kylelemons/godebug v1.1.0 // indirect github.com/mattn/go-runewidth v0.0.9 // indirect github.com/mitchellh/mapstructure v1.4.1 // indirect diff --git a/go.sum b/go.sum index 400bcce8c9d1..2b58a5e16535 100644 --- a/go.sum +++ b/go.sum @@ -112,6 +112,8 @@ github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= github.com/docker/docker v1.6.2 h1:HlFGsy+9/xrgMmhmN+NGhCc5SHGJ7I+kHosRR1xc/aI= github.com/docker/docker v1.6.2/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/donovanhide/eventsource v0.0.0-20210830082556-c59027999da0 h1:C7t6eeMaEQVy6e8CarIhscYQlNmw5e3G36y7l7Y21Ao= +github.com/donovanhide/eventsource v0.0.0-20210830082556-c59027999da0/go.mod h1:56wL82FO0bfMU5RvfXoIwSOP2ggqqxT+tAfNEIyxuHw= github.com/dop251/goja v0.0.0-20211022113120-dc8c55024d06/go.mod h1:R9ET47fwRVRPZnOGvHxxhuZcbrMCuiqOz3Rlrh4KSnk= github.com/dop251/goja v0.0.0-20230122112309-96b1610dd4f7 h1:kgvzE5wLsLa7XKfV85VZl40QXaMCaeFtHpPwJ8fhotY= github.com/dop251/goja v0.0.0-20230122112309-96b1610dd4f7/go.mod h1:yRkwfj0CBpOGre+TwBsqPV0IH0Pk73e4PXJOeNDboGs= @@ -268,10 +270,14 @@ github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E github.com/jwilder/encoding v0.0.0-20170811194829-b4e1701a28ef/go.mod h1:Ct9fl0F6iIOGgxJ5npU/IUOhOhqlVrGjyIZc8/MagT0= github.com/karalabe/usb v0.0.2 h1:M6QQBNxF+CQ8OFvxrT90BA0qBOXymndZnk5q235mFc4= github.com/karalabe/usb v0.0.2/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU= +github.com/kilic/bls12-381 v0.1.0 h1:encrdjqKMEvabVQ7qYOKu1OvhqpK4s47wDYtNiPtlp4= +github.com/kilic/bls12-381 v0.1.0/go.mod h1:vDTTHJONJ6G+P2R74EhnyotQDTliQDnFEwhdmfzw1ig= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/klauspost/cpuid/v2 v2.0.4 h1:g0I61F2K2DjRHz1cnxlkNSBIaePVoJIjjnHui8QHbiw= +github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg= github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -311,6 +317,9 @@ github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/minio/sha256-simd v0.1.0/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= +github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g= +github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM= github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxdASFVQag= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/pointerstructure v1.2.0 h1:O+i9nHnXS3l/9Wu7r4NrEdwA2VFTicjUEN1uBnDo34A= @@ -368,6 +377,14 @@ github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/protolambda/bls12-381-util v0.0.0-20210720105258-a772f2aac13e/go.mod h1:MPZvj2Pr0N8/dXyTPS5REeg2sdLG7t8DRzC1rLv925w= +github.com/protolambda/bls12-381-util v0.0.0-20220416220906-d8552aa452c7 h1:cZC+usqsYgHtlBaGulVnZ1hfKAi8iWtujBnRLQE698c= +github.com/protolambda/bls12-381-util v0.0.0-20220416220906-d8552aa452c7/go.mod h1:IToEjHuttnUzwZI5KBSM/LOOW3qLbbrHOEfp3SbECGY= +github.com/protolambda/messagediff v1.4.0/go.mod h1:LboJp0EwIbJsePYpzh5Op/9G1/4mIztMRYzzwR0dR2M= +github.com/protolambda/zrnt v0.30.0 h1:pHEn69ZgaDFGpLGGYG1oD7DvYI7RDirbMBPfbC+8p4g= +github.com/protolambda/zrnt v0.30.0/go.mod h1:qcdX9CXFeVNCQK/q0nswpzhd+31RHMk2Ax/2lMsJ4Jw= +github.com/protolambda/ztyp v0.2.2 h1:rVcL3vBu9W/aV646zF6caLS/dyn9BN8NYiuJzicLNyY= +github.com/protolambda/ztyp v0.2.2/go.mod h1:9bYgKGqg3wJqT9ac1gI2hnVb0STQq7p/1lapqrqY1dU= github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52/go.mod h1:RDpi1RftBQPUCDRw6SmxeaREsAaRKnOclghuzp/WRzc= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= @@ -538,6 +555,7 @@ golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201101102859-da207088b7d1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210316164454-77fc1eacc6aa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/internal/flags/categories.go b/internal/flags/categories.go index c2db6c6c1d25..7b976e19a243 100644 --- a/internal/flags/categories.go +++ b/internal/flags/categories.go @@ -20,6 +20,7 @@ import "github.com/urfave/cli/v2" const ( EthCategory = "ETHEREUM" + BeaconCategory = "BEACON CHAIN" LightCategory = "LIGHT CLIENT" DevCategory = "DEVELOPER CHAIN" EthashCategory = "ETHASH" diff --git a/node/node.go b/node/node.go index 760e34d33597..56b6490bf203 100644 --- a/node/node.go +++ b/node/node.go @@ -338,15 +338,9 @@ func (n *Node) closeDataDir() { } } -// obtainJWTSecret loads the jwt-secret, either from the provided config, -// or from the default location. If neither of those are present, it generates -// a new secret and stores to the default location. -func (n *Node) obtainJWTSecret(cliParam string) ([]byte, error) { - fileName := cliParam - if len(fileName) == 0 { - // no path provided, use default - fileName = n.ResolvePath(datadirJWTKey) - } +// ObtainJWTSecret loads the jwt-secret from the provided config. If the file is not +// present, it generates a new secret and stores to the given location. +func ObtainJWTSecret(fileName string) ([]byte, error) { // try reading from file if data, err := os.ReadFile(fileName); err == nil { jwtSecret := common.FromHex(strings.TrimSpace(string(data))) @@ -372,6 +366,18 @@ func (n *Node) obtainJWTSecret(cliParam string) ([]byte, error) { return jwtSecret, nil } +// obtainJWTSecret loads the jwt-secret, either from the provided config, +// or from the default location. If neither of those are present, it generates +// a new secret and stores to the default location. +func (n *Node) obtainJWTSecret(cliParam string) ([]byte, error) { + fileName := cliParam + if len(fileName) == 0 { + // no path provided, use default + fileName = n.ResolvePath(datadirJWTKey) + } + return ObtainJWTSecret(fileName) +} + // startRPC is a helper method to configure all the various RPC endpoints during node // startup. It's not meant to be called at any time afterwards as it makes certain // assumptions about the state of the node. From d8d83bc9b55d2baccbd0711d8c6ddfe5c3dea512 Mon Sep 17 00:00:00 2001 From: Zsolt Felfoldi Date: Thu, 16 Feb 2023 23:54:27 +0100 Subject: [PATCH 2/2] beacon/light/api: fixed new state proof format encoding --- beacon/light/api/light_api.go | 35 ++++++++++++++++++++--------------- 1 file changed, 20 insertions(+), 15 deletions(-) diff --git a/beacon/light/api/light_api.go b/beacon/light/api/light_api.go index 2c3e6de9f9df..6dc34aedc584 100644 --- a/beacon/light/api/light_api.go +++ b/beacon/light/api/light_api.go @@ -65,23 +65,24 @@ func NewBeaconLightApi(url string, customHeaders map[string]string, stateProofVe } } -func (api *BeaconLightApi) httpGet(path string) ([]byte, error) { +func (api *BeaconLightApi) httpGet(path string) ([]byte, int, error) { req, err := http.NewRequest("GET", api.url+path, nil) if err != nil { - return nil, err + return nil, 0, err } for k, v := range api.customHeaders { req.Header.Set(k, v) } resp, err := api.client.Do(req) if err != nil { - return nil, err + return nil, 0, err } defer resp.Body.Close() if resp.StatusCode != 200 { - return nil, fmt.Errorf("Error from API endpoint \"%s\": status code %d", path, resp.StatusCode) + return nil, resp.StatusCode, fmt.Errorf("Error from API endpoint \"%s\": status code %d", path, resp.StatusCode) } - return io.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) + return body, resp.StatusCode, err } // Header defines a beacon header and supports JSON encoding according to the @@ -121,7 +122,7 @@ func (h *jsonBeaconHeader) header() types.Header { // Note that the results are validated but the update signature should be verified // by the caller as its validity depends on the update chain. func (api *BeaconLightApi) GetBestUpdateAndCommittee(period uint64) (types.LightClientUpdate, []byte, error) { - resp, err := api.httpGet("/eth/v1/beacon/light_client/updates?start_period=" + strconv.Itoa(int(period)) + "&count=1") + resp, _, err := api.httpGet("/eth/v1/beacon/light_client/updates?start_period=" + strconv.Itoa(int(period)) + "&count=1") if err != nil { return types.LightClientUpdate{}, nil, err } @@ -195,7 +196,7 @@ type syncAggregate struct { // See data structure definition here: // https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/light-client/sync-protocol.md#lightclientoptimisticupdate func (api *BeaconLightApi) GetOptimisticHeadUpdate() (sync.SignedHead, error) { - resp, err := api.httpGet("/eth/v1/beacon/light_client/optimistic_update") + resp, _, err := api.httpGet("/eth/v1/beacon/light_client/optimistic_update") if err != nil { return sync.SignedHead{}, err } @@ -271,7 +272,7 @@ func (api *BeaconLightApi) GetHeader(blockRoot common.Hash) (types.Header, error } else { path += blockRoot.Hex() } - resp, err := api.httpGet(path) + resp, _, err := api.httpGet(path) if err != nil { return types.Header{}, err } @@ -324,8 +325,11 @@ func (api *BeaconLightApi) SubscribeStateProof(format merkle.ProofFormat, paths } encFormat, bitLength := EncodeCompactProofFormat(format) if api.stateProofVersion >= 2 { - _, err := api.httpGet("/eth/v0/beacon/proof/subscribe/states?format=0x" + hex.EncodeToString(encFormat) + "&first=" + strconv.Itoa(first) + "&period=" + strconv.Itoa(period)) - if err != nil { + _, status, err := api.httpGet("/eth/v0/beacon/proof/subscribe/states?format=0x" + hex.EncodeToString(encFormat) + "&first=" + strconv.Itoa(first) + "&period=" + strconv.Itoa(period)) + // do not return error if the remote node works but endpoint does not exist; + // in this case we can assume that the required proof will be available to + // request from recent states without explicit subscription + if err != nil && status != 404 { return nil, err } } @@ -363,7 +367,7 @@ func (sub *StateProofSub) Get(stateRoot common.Hash) (merkle.MultiProof, error) } func (api *BeaconLightApi) getStateProof(stateId string, format merkle.ProofFormat, encFormat []byte, bitLength int) (merkle.MultiProof, error) { - resp, err := api.httpGet("/eth/v0/beacon/proof/state/" + stateId + "?format=0x" + hex.EncodeToString(encFormat)) + resp, _, err := api.httpGet("/eth/v0/beacon/proof/state/" + stateId + "?format=0x" + hex.EncodeToString(encFormat)) if err != nil { return merkle.MultiProof{}, err } @@ -387,7 +391,7 @@ func (api *BeaconLightApi) getOldStateProof(stateId string, expFormat merkle.Pro for i := 1; i < len(paths); i++ { path += "&paths=" + paths[i] } - resp, err := api.httpGet(path) + resp, _, err := api.httpGet(path) if err != nil { return merkle.MultiProof{}, common.Hash{}, err } @@ -479,8 +483,9 @@ func encodeProofFormatSubtree(format merkle.ProofFormat, target *[]byte, bitLeng if bytePtr == len(*target) { *target = append(*target, byte(0)) } - if left, right := format.Children(); left != nil { + if left, right := format.Children(); left == nil { (*target)[bytePtr] += bitMask + } else { encodeProofFormatSubtree(left, target, bitLength) encodeProofFormatSubtree(right, target, bitLength) } @@ -488,7 +493,7 @@ func encodeProofFormatSubtree(format merkle.ProofFormat, target *[]byte, bitLeng // GetCheckpointData fetches and validates bootstrap data belonging to the given checkpoint. func (api *BeaconLightApi) GetCheckpointData(ctx context.Context, checkpoint common.Hash) (types.Header, sync.CheckpointData, []byte, error) { - resp, err := api.httpGet("/eth/v1/beacon/light_client/bootstrap/" + checkpoint.String()) + resp, _, err := api.httpGet("/eth/v1/beacon/light_client/bootstrap/" + checkpoint.String()) if err != nil { return types.Header{}, sync.CheckpointData{}, nil, err } @@ -532,7 +537,7 @@ func (api *BeaconLightApi) GetCheckpointData(ctx context.Context, checkpoint com // GetExecutionPayload fetches the execution block belonging to the beacon block // specified by beaconRoot and validates its block hash against the expected execRoot. func (api *BeaconLightApi) GetExecutionPayload(header types.Header) (*ctypes.Block, error) { - resp, err := api.httpGet("/eth/v2/beacon/blocks/" + header.Hash().Hex()) + resp, _, err := api.httpGet("/eth/v2/beacon/blocks/" + header.Hash().Hex()) if err != nil { return nil, err }