From 59f59f14aa49defdd4df90130386129b74f943fe Mon Sep 17 00:00:00 2001 From: danielntmd <162406516+danielntmd@users.noreply.github.com> Date: Thu, 9 Apr 2026 04:17:00 -0400 Subject: [PATCH 01/11] fix(stdlib): use bigint arithmetic in GasFees.mul() for non-integer scalars (#22383) GasFees.mul() converted bigint fee values to Number for non-integer scalar multiplication, silently losing precision for values above 2^53. Since gas fees are UInt128 and can exceed 2^53 under realistic conditions (e.g. when the fee asset depreciates relative to ETH), this produced incorrect fee calculations in wallet code that calls .mul(1.5). Replaced the Number conversion with scaled bigint arithmetic that preserves full precision for all UInt128 values while maintaining the ceiling behavior needed for fee padding. Co-authored-by: danielntmd Co-authored-by: Claude Opus 4.6 (1M context) --- yarn-project/stdlib/src/gas/gas_fees.test.ts | 135 +++++++++++++++++++ yarn-project/stdlib/src/gas/gas_fees.ts | 16 ++- 2 files changed, 147 insertions(+), 4 deletions(-) create mode 100644 yarn-project/stdlib/src/gas/gas_fees.test.ts diff --git a/yarn-project/stdlib/src/gas/gas_fees.test.ts b/yarn-project/stdlib/src/gas/gas_fees.test.ts new file mode 100644 index 000000000000..9a0f07f28c99 --- /dev/null +++ b/yarn-project/stdlib/src/gas/gas_fees.test.ts @@ -0,0 +1,135 @@ +import { GasFees } from './gas_fees.js'; + +/** The old Number()-based multiplication that was replaced. Used to demonstrate precision loss. */ +function oldNumberMulCeil(value: bigint, scalar: number): bigint { + return BigInt(Math.ceil(Number(value) * scalar)); +} + +describe('GasFees', () => { + describe('mul with non-integer scalar', () => { + it('multiplies with ceiling', () => { + const fees = new GasFees(10, 7); + const result = fees.mul(1.5); + expect(result.feePerDaGas).toBe(15n); + expect(result.feePerL2Gas).toBe(11n); // ceil(7 * 1.5) = ceil(10.5) = 11 + }); + + it('returns exact result when non-integer multiplication has no remainder', () => { + const fees = new GasFees(10, 20); + const result = fees.mul(1.5); + expect(result.feePerDaGas).toBe(15n); + expect(result.feePerL2Gas).toBe(30n); + }); + + it('handles zero fees', () => { + const fees = new GasFees(0, 0); + const result = fees.mul(1.5); + expect(result.feePerDaGas).toBe(0n); + expect(result.feePerL2Gas).toBe(0n); + }); + + it('preserves precision for values above 2^53 with non-integer scalar', () => { + // 2^53 + 1 is the first integer not safely representable as Number. + // Number(2^53 + 1) === 2^53, silently losing the +1. + const aboveSafeInt = (1n << 53n) + 1n; // 9007199254740993n + const fees = new GasFees(0, aboveSafeInt); + const result = fees.mul(1.5); + + // Exact: ceil(9007199254740993 * 1.5) = ceil(13510798882111489.5) = 13510798882111490 + const expected = 13510798882111490n; + expect(result.feePerL2Gas).toBe(expected); + + // The old Number()-based approach produces a wrong result: + // Number(9007199254740993n) === 9007199254740992 (lost the +1) + // ceil(9007199254740992 * 1.5) = 13510798882111488 (off by 2) + const oldResult = oldNumberMulCeil(aboveSafeInt, 1.5); + expect(oldResult).not.toBe(expected); + expect(expected - oldResult).toBe(2n); + }); + + it('preserves precision for values near 2^64 with non-integer scalar', () => { + // At 2^64, Number() loses ~2^11 = 2048 units of precision. + const largeValue = (1n << 64n) + 12345n; + const fees = new GasFees(0, largeValue); + const result = fees.mul(1.5); + + const expected = (largeValue * 3n + 1n) / 2n; + expect(result.feePerL2Gas).toBe(expected); + + // The old approach diverges significantly at this magnitude. + const oldResult = oldNumberMulCeil(largeValue, 1.5); + const drift = oldResult > expected ? oldResult - expected : expected - oldResult; + expect(drift > 0n).toBe(true); + }); + + it('preserves precision at uint128 max with non-integer scalar', () => { + // The protocol caps feePerL2Gas at uint128 max via summedMinFee. + const uint128Max = (1n << 128n) - 1n; + const fees = new GasFees(0, uint128Max); + const result = fees.mul(1.5); + + const trueResult = (uint128Max * 3n + 1n) / 2n; + const diff = result.feePerL2Gas - trueResult; + expect(diff >= 0n).toBe(true); + expect(diff <= 1n).toBe(true); + }); + + it('old Number approach loses significant precision near 2^100', () => { + // A fee near 2^100. + // Number() can only represent 53 bits of mantissa, so ~47 bits are lost. + const largeValue = (1n << 100n) + 123456789012345n; + const fees = new GasFees(0, largeValue); + const result = fees.mul(1.5); + + const trueResult = (largeValue * 3n + 1n) / 2n; + expect(result.feePerL2Gas).toBe(trueResult); + + // The old approach drifts by ~185 trillion units at this magnitude. + const oldResult = oldNumberMulCeil(largeValue, 1.5); + const oldDrift = trueResult - oldResult; + expect(oldDrift > 100_000_000_000_000n).toBe(true); + }); + + it('matches expected results for small values', () => { + const testCases: [bigint, number, bigint][] = [ + // [value, scalar, expected ceiling] + [100n, 1.5, 150n], + [100n, 0.5, 50n], + [100n, 2.5, 250n], + [7n, 1.5, 11n], // ceil(10.5) = 11 + [7n, 0.3, 3n], // ceil(2.1) = 3 + [999n, 1.99, 1989n], // ceil(999 * 1.99) = ceil(1988.01) = 1989 + [1_000_000n, 1.5, 1_500_000n], + [1_000_000_000n, 0.5, 500_000_000n], + [1_000_000_000_000n, 1.5, 1_500_000_000_000n], + ]; + + for (const [value, scalar, expected] of testCases) { + const fees = new GasFees(value, value); + const result = fees.mul(scalar); + expect(result.feePerDaGas).toBe(expected); + expect(result.feePerL2Gas).toBe(expected); + } + }); + + it('avoids false rounding from floating-point imprecision', () => { + // 100 * 1.1 in IEEE-754 is 110.00000000000001, which Math.ceil rounds to 111. + // The bigint implementation correctly computes ceil(110.0) = 110. + const fees = new GasFees(100, 100); + const result = fees.mul(1.1); + expect(result.feePerDaGas).toBe(110n); + + // The old approach rounds up due to floating-point artifact. + expect(oldNumberMulCeil(100n, 1.1)).toBe(111n); + }); + + it('always rounds up (ceiling) for non-integer results', () => { + // 1 * 1.5 = 1.5 -> ceil = 2 + expect(new GasFees(1, 1).mul(1.5).feePerDaGas).toBe(2n); + // 3 * 1.5 = 4.5 -> ceil = 5 + expect(new GasFees(3, 3).mul(1.5).feePerDaGas).toBe(5n); + // 1 * 0.3 = 0.3 -> ceil = 1 + expect(new GasFees(1, 1).mul(0.3).feePerDaGas).toBe(1n); + }); + }); +}); diff --git a/yarn-project/stdlib/src/gas/gas_fees.ts b/yarn-project/stdlib/src/gas/gas_fees.ts index 7387b2df0496..94cf4b9c469f 100644 --- a/yarn-project/stdlib/src/gas/gas_fees.ts +++ b/yarn-project/stdlib/src/gas/gas_fees.ts @@ -15,6 +15,17 @@ import { z } from 'zod'; import type { UInt128 } from '../types/shared.js'; import type { GasDimensions } from './gas.js'; +/** + * Multiplies a bigint by a non-integer scalar and returns the ceiling of the result. + * Avoids converting the bigint to Number (which loses precision above 2^53) by instead + * scaling the scalar into a bigint rational and performing ceiling division. + */ +function bigintMulCeil(value: bigint, scalar: number): bigint { + const SCALE = 1_000_000_000_000n; // 1e12 + const scaledScalar = BigInt(Math.round(scalar * 1e12)); + return (value * scaledScalar + SCALE - 1n) / SCALE; +} + /** Gas prices for each dimension. */ export class GasFees { public readonly feePerDaGas: UInt128; @@ -60,10 +71,7 @@ export class GasFees { const s = BigInt(scalar); return new GasFees(this.feePerDaGas * s, this.feePerL2Gas * s); } else { - return new GasFees( - BigInt(Math.ceil(Number(this.feePerDaGas) * scalar)), - BigInt(Math.ceil(Number(this.feePerL2Gas) * scalar)), - ); + return new GasFees(bigintMulCeil(this.feePerDaGas, scalar), bigintMulCeil(this.feePerL2Gas, scalar)); } } From 3c7b55a8b3c580ef2b293fb072e04d56d01018fc Mon Sep 17 00:00:00 2001 From: danielntmd <162406516+danielntmd@users.noreply.github.com> Date: Thu, 9 Apr 2026 04:17:08 -0400 Subject: [PATCH 02/11] fix(node-lib): reuse existing fileStore in snapshot sync instead of recreating (#22375) ## Summary `snapshotSync` accepted a URL string and created a new `ReadOnlyFileStore` internally, discarding the instance already created by the caller. This resulted in redundant object instantiation during snapshot downloads. ## Changes - Changed `snapshotSync` to accept a `fileStore: ReadOnlyFileStore` instead of `snapshotsUrl: string`, removing the redundant `createReadOnlyFileStore` call inside the function. - Updated `trySnapshotSync` to pass the pre-created `fileStore` from each snapshot candidate. - Updated `downloadEpochProvingJob` to pass its already-created `fileStore` through to `snapshotSync`. Co-authored-by: danielntmd Co-authored-by: Claude Opus 4.6 (1M context) --- yarn-project/node-lib/src/actions/snapshot-sync.ts | 10 ++++------ .../src/actions/download-epoch-proving-job.ts | 2 +- 2 files changed, 5 insertions(+), 7 deletions(-) diff --git a/yarn-project/node-lib/src/actions/snapshot-sync.ts b/yarn-project/node-lib/src/actions/snapshot-sync.ts index b3dd1f954f54..03775102f909 100644 --- a/yarn-project/node-lib/src/actions/snapshot-sync.ts +++ b/yarn-project/node-lib/src/actions/snapshot-sync.ts @@ -178,7 +178,7 @@ export async function trySnapshotSync(config: SnapshotSyncConfig, log: Logger) { snapshotCandidates.sort((a, b) => b.snapshot.l1BlockNumber - a.snapshot.l1BlockNumber); // Try each candidate in order until one succeeds - for (const { snapshot, url } of snapshotCandidates) { + for (const { snapshot, url, fileStore } of snapshotCandidates) { const { l1BlockNumber, l2BlockNumber } = snapshot; log.info(`Attempting to sync from snapshot at L1 block ${l1BlockNumber} L2 block ${l2BlockNumber}`, { snapshot, @@ -189,7 +189,7 @@ export async function trySnapshotSync(config: SnapshotSyncConfig, log: Logger) { await snapshotSync(snapshot, log, { dataDirectory: config.dataDirectory!, rollupAddress: config.l1Contracts.rollupAddress, - snapshotsUrl: url, + fileStore, }); log.info(`Snapshot synced to L1 block ${l1BlockNumber} L2 block ${l2BlockNumber}`, { snapshot, @@ -215,15 +215,13 @@ export async function trySnapshotSync(config: SnapshotSyncConfig, log: Logger) { export async function snapshotSync( snapshot: Pick, log: Logger, - config: { dataDirectory: string; rollupAddress: EthAddress; snapshotsUrl: string }, + config: { dataDirectory: string; rollupAddress: EthAddress; fileStore: ReadOnlyFileStore }, ) { - const { dataDirectory, rollupAddress } = config; + const { dataDirectory, rollupAddress, fileStore } = config; if (!dataDirectory) { throw new Error(`No local data directory defined. Cannot sync snapshot.`); } - const fileStore = await createReadOnlyFileStore(config.snapshotsUrl, log); - let downloadDir: string | undefined; try { diff --git a/yarn-project/prover-node/src/actions/download-epoch-proving-job.ts b/yarn-project/prover-node/src/actions/download-epoch-proving-job.ts index 8f6cb05e9e46..c428b20239da 100644 --- a/yarn-project/prover-node/src/actions/download-epoch-proving-job.ts +++ b/yarn-project/prover-node/src/actions/download-epoch-proving-job.ts @@ -30,7 +30,7 @@ export async function downloadEpochProvingJob( const dataUrls = makeSnapshotPaths(location); log.info(`Downloading state snapshot from ${location} to local data directory`, { metadata, dataUrls }); - await snapshotSync({ dataUrls }, log, { ...config, ...metadata, snapshotsUrl: location }); + await snapshotSync({ dataUrls }, log, { ...config, ...metadata, fileStore }); const dataPath = urlJoin(location, 'data.bin'); const localPath = config.jobDataDownloadPath; From 8150494567041cf5cb96c95e0e605b52fc7bcefb Mon Sep 17 00:00:00 2001 From: Nikita Meshcheriakov Date: Thu, 9 Apr 2026 06:11:58 -0300 Subject: [PATCH 03/11] fix: gate req/resp data protocols for unauthenticated peers (#22406) When `p2pAllowOnlyValidators` is enabled, unauthenticated peers were excluded from gossipsub, but could still freely query block data and transactions through req/resp protocols (BLOCK, BLOCK_TXS, TX) with no authentication check. This adds an authentication gate in `ReqResp.streamHandler()` that rejects unauthenticated peers on all protocols except those needed for the handshake flow (PING, STATUS, AUTH, GOODBYE) Ref: A-698 --- .../p2p/src/services/dummy_service.ts | 1 + .../p2p/src/services/libp2p/libp2p_service.ts | 3 + .../p2p/src/services/reqresp/interface.ts | 21 ++++ .../p2p/src/services/reqresp/reqresp.test.ts | 100 ++++++++++++++++++ .../p2p/src/services/reqresp/reqresp.ts | 17 +++ .../p2p/src/test-helpers/mock-pubsub.ts | 1 + .../p2p/src/test-helpers/reqresp-nodes.ts | 2 + 7 files changed, 145 insertions(+) diff --git a/yarn-project/p2p/src/services/dummy_service.ts b/yarn-project/p2p/src/services/dummy_service.ts index 5158ade97362..0e7beea22441 100644 --- a/yarn-project/p2p/src/services/dummy_service.ts +++ b/yarn-project/p2p/src/services/dummy_service.ts @@ -287,6 +287,7 @@ export class DummyPeerManager implements PeerManagerInterface { export class DummyReqResp implements ReqRespInterface { updateConfig(_config: Partial): void {} + setShouldRejectPeer(): void {} start( _subProtocolHandlers: ReqRespSubProtocolHandlers, _subProtocolValidators: ReqRespSubProtocolValidators, diff --git a/yarn-project/p2p/src/services/libp2p/libp2p_service.ts b/yarn-project/p2p/src/services/libp2p/libp2p_service.ts index d79065b2e0df..b7d9afe74f9b 100644 --- a/yarn-project/p2p/src/services/libp2p/libp2p_service.ts +++ b/yarn-project/p2p/src/services/libp2p/libp2p_service.ts @@ -473,6 +473,9 @@ export class LibP2PService extends WithTracer implements P2PService { epochCache, ); + // Gate req/resp data protocols for unauthenticated peers when p2pAllowOnlyValidators is enabled + reqresp.setShouldRejectPeer(peerId => peerManager.shouldDisableP2PGossip(peerId)); + // Configure application-specific scoring for gossipsub. // The weight scales app score to align with gossipsub thresholds: // - Disconnect (-50) × 10 = -500 = gossipThreshold (stops receiving gossip) diff --git a/yarn-project/p2p/src/services/reqresp/interface.ts b/yarn-project/p2p/src/services/reqresp/interface.ts index 7e76e77a0f78..016525a98919 100644 --- a/yarn-project/p2p/src/services/reqresp/interface.ts +++ b/yarn-project/p2p/src/services/reqresp/interface.ts @@ -95,6 +95,24 @@ export type ReqRespSubProtocolValidators = { [S in ReqRespSubProtocol]: ResponseValidator; }; +/** + * Protocols that are always allowed without authentication, even when p2pAllowOnlyValidators is enabled. + * These are needed for the handshake and connection management flow. + * All other protocols require the remote peer to be authenticated. + */ +export const UNAUTHENTICATED_ALLOWED_PROTOCOLS: ReadonlySet = new Set([ + ReqRespSubProtocol.PING, + ReqRespSubProtocol.STATUS, + ReqRespSubProtocol.AUTH, + ReqRespSubProtocol.GOODBYE, +]); + +/** + * Callback that checks whether a peer should be rejected from req/resp data protocols. + * Returns true if the peer should be rejected (i.e. p2pAllowOnlyValidators is on and peer is unauthenticated). + */ +export type ShouldRejectPeer = (peerId: string) => boolean; + export const DEFAULT_SUB_PROTOCOL_VALIDATORS: ReqRespSubProtocolValidators = { [ReqRespSubProtocol.PING]: noopValidator, [ReqRespSubProtocol.STATUS]: noopValidator, @@ -253,5 +271,8 @@ export interface ReqRespInterface { updateConfig(config: Partial): void; + /** Sets the callback used to reject unauthenticated peers on gated req/resp protocols. */ + setShouldRejectPeer(checker: ShouldRejectPeer): void; + getConnectionSampler(): Pick; } diff --git a/yarn-project/p2p/src/services/reqresp/reqresp.test.ts b/yarn-project/p2p/src/services/reqresp/reqresp.test.ts index 5c7f26fe1035..2ddf4d9a2cbe 100644 --- a/yarn-project/p2p/src/services/reqresp/reqresp.test.ts +++ b/yarn-project/p2p/src/services/reqresp/reqresp.test.ts @@ -366,6 +366,106 @@ describe('ReqResp', () => { }); }); + describe('Authentication gating', () => { + it('should reject unauthenticated peers on all data protocols', async () => { + nodes = await createNodes(peerScoring, 2); + + await startNodes(nodes); + await sleep(500); + await connectToPeers(nodes); + await sleep(500); + + // Set up auth checker that rejects all peers (simulates p2pAllowOnlyValidators=true with no authenticated peers) + nodes[1].req.setShouldRejectPeer(() => true); + + // All data protocols should be rejected + for (const protocol of [ReqRespSubProtocol.TX, ReqRespSubProtocol.BLOCK_TXS]) { + const resp = await nodes[0].req.sendRequestToPeer(nodes[1].p2p.peerId, protocol, Buffer.from('request')); + expect(resp.status).toEqual(ReqRespStatus.FAILURE); + } + + // PING is an allowed protocol — should succeed + const pingResp = await nodes[0].req.sendRequestToPeer(nodes[1].p2p.peerId, ReqRespSubProtocol.PING, PING_REQUEST); + expectSuccess(pingResp); + expect(pingResp.data.toString('utf-8')).toEqual('pong'); + }); + + it('should allow handshake protocols for unauthenticated peers', async () => { + nodes = await createNodes(peerScoring, 2); + + await startNodes(nodes); + await sleep(500); + await connectToPeers(nodes); + await sleep(500); + + // Reject all peers on gated protocols + nodes[1].req.setShouldRejectPeer(() => true); + + // PING, STATUS, AUTH, GOODBYE should still work + const pingResp = await nodes[0].req.sendRequestToPeer(nodes[1].p2p.peerId, ReqRespSubProtocol.PING, PING_REQUEST); + expectSuccess(pingResp); + + const statusResp = await nodes[0].req.sendRequestToPeer( + nodes[1].p2p.peerId, + ReqRespSubProtocol.STATUS, + Buffer.from('status'), + ); + expectSuccess(statusResp); + + const authResp = await nodes[0].req.sendRequestToPeer( + nodes[1].p2p.peerId, + ReqRespSubProtocol.AUTH, + Buffer.from('auth'), + ); + expectSuccess(authResp); + }); + + it('should allow authenticated peers on all protocols', async () => { + nodes = await createNodes(peerScoring, 2); + + await startNodes(nodes); + await sleep(500); + await connectToPeers(nodes); + await sleep(500); + + // Set up auth checker that allows all peers (simulates authenticated validator) + nodes[1].req.setShouldRejectPeer(() => false); + + // Data protocols should succeed for authenticated peers + const pingResp = await nodes[0].req.sendRequestToPeer(nodes[1].p2p.peerId, ReqRespSubProtocol.PING, PING_REQUEST); + expectSuccess(pingResp); + expect(pingResp.data.toString('utf-8')).toEqual('pong'); + + const txResp = await nodes[0].req.sendRequestToPeer( + nodes[1].p2p.peerId, + ReqRespSubProtocol.TX, + Buffer.from('request'), + ); + expectSuccess(txResp); + }); + + it('should allow all protocols when no auth checker is set', async () => { + nodes = await createNodes(peerScoring, 2); + + await startNodes(nodes); + await sleep(500); + await connectToPeers(nodes); + await sleep(500); + + // No setShouldRejectPeer called — all protocols should work (backwards compatible) + const pingResp = await nodes[0].req.sendRequestToPeer(nodes[1].p2p.peerId, ReqRespSubProtocol.PING, PING_REQUEST); + expectSuccess(pingResp); + expect(pingResp.data.toString('utf-8')).toEqual('pong'); + + const txResp = await nodes[0].req.sendRequestToPeer( + nodes[1].p2p.peerId, + ReqRespSubProtocol.TX, + Buffer.from('request'), + ); + expectSuccess(txResp); + }); + }); + describe('Batch requests', () => { it('should send a batch request between many peers', async () => { const batchSize = 9; diff --git a/yarn-project/p2p/src/services/reqresp/reqresp.ts b/yarn-project/p2p/src/services/reqresp/reqresp.ts index 65d61c8f7ea2..ba3fe8e518f5 100644 --- a/yarn-project/p2p/src/services/reqresp/reqresp.ts +++ b/yarn-project/p2p/src/services/reqresp/reqresp.ts @@ -34,7 +34,9 @@ import { type ReqRespSubProtocolHandlers, type ReqRespSubProtocolRateLimits, type ReqRespSubProtocolValidators, + type ShouldRejectPeer, type SubProtocolMap, + UNAUTHENTICATED_ALLOWED_PROTOCOLS, responseFromBuffer, subProtocolSizeCalculators, } from './interface.js'; @@ -72,6 +74,8 @@ export class ReqResp implements ReqRespInterface { private snappyTransform: SnappyTransform; + private shouldRejectPeer: ShouldRejectPeer | undefined; + private metrics: ReqRespMetrics; constructor( @@ -108,6 +112,10 @@ export class ReqResp implements ReqRespInterface { } } + public setShouldRejectPeer(checker: ShouldRejectPeer): void { + this.shouldRejectPeer = checker; + } + get tracer() { return this.metrics.tracer; } @@ -596,6 +604,15 @@ export class ReqResp implements ReqRespInterface { throw new ReqRespStatusError(ReqRespStatus.RATE_LIMIT_EXCEEDED); } + // When p2pAllowOnlyValidators is enabled, reject unauthenticated peers on data protocols + if ( + !UNAUTHENTICATED_ALLOWED_PROTOCOLS.has(protocol) && + (this.shouldRejectPeer?.(connection.remotePeer.toString()) ?? false) + ) { + this.logger.debug(`Rejecting unauthenticated peer ${connection.remotePeer} on gated protocol ${protocol}`); + throw new ReqRespStatusError(ReqRespStatus.FAILURE); + } + await this.processStream(protocol, incomingStream); } catch (err: any) { this.metrics.recordResponseError(protocol); diff --git a/yarn-project/p2p/src/test-helpers/mock-pubsub.ts b/yarn-project/p2p/src/test-helpers/mock-pubsub.ts index cf48654e0aff..fa11a03415a2 100644 --- a/yarn-project/p2p/src/test-helpers/mock-pubsub.ts +++ b/yarn-project/p2p/src/test-helpers/mock-pubsub.ts @@ -100,6 +100,7 @@ class MockReqResp implements ReqRespInterface { } updateConfig(_config: Partial): void {} + setShouldRejectPeer(): void {} start( subProtocolHandlers: Partial, diff --git a/yarn-project/p2p/src/test-helpers/reqresp-nodes.ts b/yarn-project/p2p/src/test-helpers/reqresp-nodes.ts index bd55ba2d0643..624123df0bc6 100644 --- a/yarn-project/p2p/src/test-helpers/reqresp-nodes.ts +++ b/yarn-project/p2p/src/test-helpers/reqresp-nodes.ts @@ -153,6 +153,8 @@ export async function createTestLibP2PService( epochCache, ); + reqresp.setShouldRejectPeer(peerId => peerManager.shouldDisableP2PGossip(peerId)); + p2pNode.services.pubsub.score.params.appSpecificWeight = APP_SPECIFIC_WEIGHT; p2pNode.services.pubsub.score.params.appSpecificScore = (peerId: string) => peerManager.shouldDisableP2PGossip(peerId) ? -Infinity : peerManager.getPeerScore(peerId); From d2d33ddb4c7fb9ec5a91209dfc7d351e81600474 Mon Sep 17 00:00:00 2001 From: spypsy Date: Thu, 9 Apr 2026 11:59:45 +0100 Subject: [PATCH 04/11] fix(p2p): use per-batch ops array in AztecDatastore.batch() (#22357) ## Summary - `AztecDatastore.batch()` stored batch operations in a shared class-level `#batchOps` array, meaning concurrent batches would corrupt each other's operations. Replaced with a local `ops` array per batch call, matching the reference `BaseDatastore` implementation from `datastore-core`. - Added a test that verifies concurrent batches don't interfere with each other. Fixes [A-761](https://linear.app/aztec-labs/issue/A-761/audit-92-aztecdatastorebatch-shares-batchops-across-concurrent-batches) Co-authored-by: Claude Opus 4.6 (1M context) --- .../p2p/src/services/data_store.test.ts | 22 +++++++++++++++++++ yarn-project/p2p/src/services/data_store.ts | 18 +++++---------- 2 files changed, 27 insertions(+), 13 deletions(-) diff --git a/yarn-project/p2p/src/services/data_store.test.ts b/yarn-project/p2p/src/services/data_store.test.ts index a818130b8a13..3c27bae2e643 100644 --- a/yarn-project/p2p/src/services/data_store.test.ts +++ b/yarn-project/p2p/src/services/data_store.test.ts @@ -50,6 +50,28 @@ describe('AztecDatastore with AztecLmdbStore', () => { await expect(datastore.get(key)).rejects.toHaveProperty('code', 'ERR_NOT_FOUND'); }); + it('concurrent batches do not interfere with each other', async () => { + const batch1 = datastore.batch(); + const batch2 = datastore.batch(); + + const key1 = new Key('batch1key'); + const key2 = new Key('batch2key'); + const value1 = new Uint8Array([1, 2, 3]); + const value2 = new Uint8Array([4, 5, 6]); + + batch1.put(key1, value1); + batch2.put(key2, value2); + + // Committing batch1 should only apply batch1's operations + await batch1.commit(); + expect(await datastore.has(key1)).toBe(true); + expect(await datastore.has(key2)).toBe(false); + + // Committing batch2 should still apply batch2's operations + await batch2.commit(); + expect(await datastore.has(key2)).toBe(true); + }); + it('batch operations commit correctly', async () => { const batch = datastore.batch(); const key1 = new Key('key1'); diff --git a/yarn-project/p2p/src/services/data_store.ts b/yarn-project/p2p/src/services/data_store.ts index ba7775ac4281..edd75553b763 100644 --- a/yarn-project/p2p/src/services/data_store.ts +++ b/yarn-project/p2p/src/services/data_store.ts @@ -28,8 +28,6 @@ export class AztecDatastore implements Datastore { #memoryDatastore: Map; #dbDatastore: AztecAsyncMap; - #batchOps: BatchOp[] = []; - private maxMemoryItems: number; constructor(db: AztecAsyncKVStore, { maxMemoryItems } = { maxMemoryItems: 50 }) { @@ -92,23 +90,17 @@ export class AztecDatastore implements Datastore { } batch(): Batch { + const ops: BatchOp[] = []; return { put: (key, value) => { - this.#batchOps.push({ - type: 'put', - key, - value, - }); + ops.push({ type: 'put', key, value }); }, delete: key => { - this.#batchOps.push({ - type: 'del', - key, - }); + ops.push({ type: 'del', key }); }, commit: async () => { await this.#db.transactionAsync(async () => { - for (const op of this.#batchOps) { + for (const op of ops) { if (op.type === 'put' && op.value) { await this.put(op.key, op.value); } else if (op.type === 'del') { @@ -116,7 +108,7 @@ export class AztecDatastore implements Datastore { } } }); - this.#batchOps = []; // Clear operations after commit + ops.length = 0; }, }; } From 625046df1ef6700b59a533da5bd6826c90cf0b7a Mon Sep 17 00:00:00 2001 From: Maddiaa <47148561+Maddiaa0@users.noreply.github.com> Date: Thu, 9 Apr 2026 12:15:34 +0100 Subject: [PATCH 05/11] chore(pipeline): spartan config (#21285) pipelined mbps spartan scenario --- spartan/.gitignore | 1 + spartan/environments/mbps-pipeline.env | 67 +++++++++++++++++++ spartan/scripts/deploy_network.sh | 6 ++ spartan/terraform/deploy-aztec-infra/main.tf | 1 + .../terraform/deploy-aztec-infra/variables.tf | 20 ++++++ yarn-project/archiver/src/archiver.ts | 8 ++- .../archiver/src/modules/instrumentation.ts | 26 +++++-- .../archiver/src/modules/l1_synchronizer.ts | 11 +-- .../sequencer/checkpoint_proposal_job.test.ts | 7 ++ .../src/sequencer/checkpoint_proposal_job.ts | 3 +- .../sequencer-client/src/sequencer/metrics.ts | 16 ++++- yarn-project/telemetry-client/src/metrics.ts | 12 ++++ 12 files changed, 163 insertions(+), 15 deletions(-) create mode 100644 spartan/environments/mbps-pipeline.env diff --git a/spartan/.gitignore b/spartan/.gitignore index eb33fd7e67da..6594d1aed966 100644 --- a/spartan/.gitignore +++ b/spartan/.gitignore @@ -31,5 +31,6 @@ environments/* !environments/kind-minimal.env !environments/kind-provers.env !environments/alpha-net.env +!environments/mbps-pipeline.env *.tfvars !terraform/deploy-external-secrets/*.tfvars diff --git a/spartan/environments/mbps-pipeline.env b/spartan/environments/mbps-pipeline.env new file mode 100644 index 000000000000..f4c7df1c0c4c --- /dev/null +++ b/spartan/environments/mbps-pipeline.env @@ -0,0 +1,67 @@ +CREATE_ETH_DEVNET=true +GCP_REGION=us-west1-a +CLUSTER=aztec-gke-private +NETWORK=next-net +NAMESPACE=mbps-pipe +DESTROY_NAMESPACE=true +ETHEREUM_CHAIN_ID=1337 +FUNDING_PRIVATE_KEY="0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80" +LABS_INFRA_MNEMONIC="test test test test test test test test test test test junk" +OTEL_COLLECTOR_ENDPOINT=REPLACE_WITH_GCP_SECRET + +DEPLOY_INTERNAL_BOOTNODE=true +TEST_ACCOUNTS=true +SPONSORED_FPC=true +SEQ_MIN_TX_PER_BLOCK=0 +SEQ_MAX_TX_PER_BLOCK=8 +AZTEC_EPOCH_DURATION=8 +REAL_VERIFIER=false +PROVER_REAL_PROOFS=false + +CREATE_ROLLUP_CONTRACTS=true +VERIFY_CONTRACTS=false +DESTROY_AZTEC_INFRA=true + +SEQ_BUILD_CHECKPOINT_IF_EMPTY=true +SEQ_BLOCK_DURATION_MS=6000 +SEQ_ENABLE_PROPOSER_PIPELINING=true +SEQ_PER_BLOCK_ALLOCATION_MULTIPLIER=1.1 +LOG_LEVEL=verbose + +AZTEC_LAG_IN_EPOCHS_FOR_VALIDATOR_SET=2 +AZTEC_LAG_IN_EPOCHS_FOR_RANDAO=2 + +AZTEC_TARGET_COMMITTEE_SIZE=24 + +VALIDATOR_REPLICAS=4 +VALIDATORS_PER_NODE=12 +VALIDATOR_PUBLISHERS_PER_REPLICA=4 +VALIDATOR_PUBLISHER_MNEMONIC_START_INDEX=5000 + +PUBLISHERS_PER_PROVER=2 +PROVER_PUBLISHER_MNEMONIC_START_INDEX=8000 + +BOT_TRANSFERS_REPLICAS=1 +BOT_TRANSFERS_TX_INTERVAL_SECONDS=4 +BOT_TRANSFERS_FOLLOW_CHAIN=PROPOSED +BOT_TRANSFERS_PXE_SYNC_CHAIN_TIP=proposed + +BOT_SWAPS_REPLICAS=1 +BOT_SWAPS_TX_INTERVAL_SECONDS=4 +BOT_SWAPS_FOLLOW_CHAIN=PROPOSED +BOT_SWAPS_PXE_SYNC_CHAIN_TIP=proposed + +BOT_CROSS_CHAIN_REPLICAS=1 +BOT_CROSS_CHAIN_TX_INTERVAL_SECONDS=8 +BOT_CROSS_CHAIN_FOLLOW_CHAIN=PROPOSED +BOT_CROSS_CHAIN_PXE_SYNC_CHAIN_TIP=proposed + +REDEPLOY_ROLLUP_CONTRACTS=true + +DEBUG_P2P_INSTRUMENT_MESSAGES=true +OTEL_COLLECT_INTERVAL_MS=10000 +OTEL_EXPORT_TIMEOUT_MS=5000 + +VALIDATOR_HA_REPLICAS=1 +VALIDATOR_RESOURCE_PROFILE="prod-spot" + diff --git a/spartan/scripts/deploy_network.sh b/spartan/scripts/deploy_network.sh index f92fde896a10..488ac74afd38 100755 --- a/spartan/scripts/deploy_network.sh +++ b/spartan/scripts/deploy_network.sh @@ -125,6 +125,7 @@ SEQ_PER_BLOCK_ALLOCATION_MULTIPLIER=${SEQ_PER_BLOCK_ALLOCATION_MULTIPLIER:-null} SEQ_BLOCK_DURATION_MS=${SEQ_BLOCK_DURATION_MS:-} SEQ_L1_PUBLISHING_TIME_ALLOWANCE_IN_SLOT=${SEQ_L1_PUBLISHING_TIME_ALLOWANCE_IN_SLOT:-} SEQ_BUILD_CHECKPOINT_IF_EMPTY=${SEQ_BUILD_CHECKPOINT_IF_EMPTY:-} +SEQ_ENABLE_PROPOSER_PIPELINING=${SEQ_ENABLE_PROPOSER_PIPELINING:-false} SEQ_ENFORCE_TIME_TABLE=${SEQ_ENFORCE_TIME_TABLE:-} SEQ_SKIP_CHECKPOINT_PUBLISH_PERCENT=${SEQ_SKIP_CHECKPOINT_PUBLISH_PERCENT:-0} PROVER_REPLICAS=${PROVER_REPLICAS:-4} @@ -133,6 +134,8 @@ R2_ACCESS_KEY_ID=${R2_ACCESS_KEY_ID:-} R2_SECRET_ACCESS_KEY=${R2_SECRET_ACCESS_KEY:-} OTEL_COLLECTOR_ENDPOINT=${OTEL_COLLECTOR_ENDPOINT:-} +OTEL_COLLECT_INTERVAL_MS=${OTEL_COLLECT_INTERVAL_MS:-} +OTEL_EXPORT_TIMEOUT_MS=${OTEL_EXPORT_TIMEOUT_MS:-} DEPLOY_INTERNAL_BOOTNODE=${DEPLOY_INTERNAL_BOOTNODE:-} DEPLOY_ARCHIVAL_NODE=${DEPLOY_ARCHIVAL_NODE:-false} @@ -554,6 +557,7 @@ SEQ_PER_BLOCK_ALLOCATION_MULTIPLIER = ${SEQ_PER_BLOCK_ALLOCATION_MULTIPLIER} SEQ_BLOCK_DURATION_MS = ${SEQ_BLOCK_DURATION_MS:-null} SEQ_L1_PUBLISHING_TIME_ALLOWANCE_IN_SLOT = ${SEQ_L1_PUBLISHING_TIME_ALLOWANCE_IN_SLOT:-null} SEQ_BUILD_CHECKPOINT_IF_EMPTY = ${SEQ_BUILD_CHECKPOINT_IF_EMPTY:-null} +SEQ_ENABLE_PROPOSER_PIPELINING = ${SEQ_ENABLE_PROPOSER_PIPELINING} SEQ_ENFORCE_TIME_TABLE = ${SEQ_ENFORCE_TIME_TABLE:-null} SEQ_SKIP_CHECKPOINT_PUBLISH_PERCENT = ${SEQ_SKIP_CHECKPOINT_PUBLISH_PERCENT} PROVER_MNEMONIC = "${LABS_INFRA_MNEMONIC}" @@ -573,6 +577,8 @@ SLASH_INVALID_BLOCK_PENALTY = ${SLASH_INVALID_BLOCK_PENALTY:-null} SLASH_OFFENSE_EXPIRATION_ROUNDS = ${SLASH_OFFENSE_EXPIRATION_ROUNDS:-null} SLASH_MAX_PAYLOAD_SIZE = ${SLASH_MAX_PAYLOAD_SIZE:-null} OTEL_COLLECTOR_ENDPOINT = "${OTEL_COLLECTOR_ENDPOINT}" +OTEL_COLLECT_INTERVAL_MS = ${OTEL_COLLECT_INTERVAL_MS:-null} +OTEL_EXPORT_TIMEOUT_MS = ${OTEL_EXPORT_TIMEOUT_MS:-null} DEPLOY_INTERNAL_BOOTNODE = ${DEPLOY_INTERNAL_BOOTNODE:-true} PROVER_REAL_PROOFS = ${PROVER_REAL_PROOFS} TRANSACTIONS_DISABLED = ${TRANSACTIONS_DISABLED:-null} diff --git a/spartan/terraform/deploy-aztec-infra/main.tf b/spartan/terraform/deploy-aztec-infra/main.tf index a06e7873343b..50835989dbd8 100644 --- a/spartan/terraform/deploy-aztec-infra/main.tf +++ b/spartan/terraform/deploy-aztec-infra/main.tf @@ -224,6 +224,7 @@ locals { "validator.node.env.SEQ_BLOCK_DURATION_MS" = var.SEQ_BLOCK_DURATION_MS "validator.node.env.SEQ_L1_PUBLISHING_TIME_ALLOWANCE_IN_SLOT" = var.SEQ_L1_PUBLISHING_TIME_ALLOWANCE_IN_SLOT "validator.node.env.SEQ_BUILD_CHECKPOINT_IF_EMPTY" = var.SEQ_BUILD_CHECKPOINT_IF_EMPTY + "validator.node.env.SEQ_ENABLE_PROPOSER_PIPELINING" = var.SEQ_ENABLE_PROPOSER_PIPELINING "validator.node.env.SEQ_ENFORCE_TIME_TABLE" = var.SEQ_ENFORCE_TIME_TABLE "validator.node.env.P2P_TX_POOL_DELETE_TXS_AFTER_REORG" = var.P2P_TX_POOL_DELETE_TXS_AFTER_REORG "validator.node.env.L1_PRIORITY_FEE_BUMP_PERCENTAGE" = var.VALIDATOR_L1_PRIORITY_FEE_BUMP_PERCENTAGE diff --git a/spartan/terraform/deploy-aztec-infra/variables.tf b/spartan/terraform/deploy-aztec-infra/variables.tf index 2d69743c7bc4..06d495ddad37 100644 --- a/spartan/terraform/deploy-aztec-infra/variables.tf +++ b/spartan/terraform/deploy-aztec-infra/variables.tf @@ -335,6 +335,20 @@ variable "OTEL_COLLECTOR_ENDPOINT" { nullable = true } +variable "OTEL_COLLECT_INTERVAL_MS" { + description = "Interval in ms at which OTEL metrics are exported from nodes" + type = string + nullable = true + default = null +} + +variable "OTEL_EXPORT_TIMEOUT_MS" { + description = "Timeout in ms for OTEL metric exports (must be <= OTEL_COLLECT_INTERVAL_MS)" + type = string + nullable = true + default = null +} + variable "LOG_LEVEL" { description = "Log level for all nodes" type = string @@ -415,6 +429,12 @@ variable "SEQ_PER_BLOCK_ALLOCATION_MULTIPLIER" { default = null } +variable "SEQ_ENABLE_PROPOSER_PIPELINING" { + description = "Whether to enable build-ahead proposer pipelining" + type = string + default = "false" +} + variable "SENTINEL_ENABLED" { description = "Whether to enable sentinel" type = string diff --git a/yarn-project/archiver/src/archiver.ts b/yarn-project/archiver/src/archiver.ts index 559e0597f13c..931395155957 100644 --- a/yarn-project/archiver/src/archiver.ts +++ b/yarn-project/archiver/src/archiver.ts @@ -11,7 +11,7 @@ import { EthAddress } from '@aztec/foundation/eth-address'; import { type Logger, createLogger } from '@aztec/foundation/log'; import { type PromiseWithResolvers, promiseWithResolvers } from '@aztec/foundation/promise'; import { RunningPromise, makeLoggingErrorHandler } from '@aztec/foundation/running-promise'; -import { DateProvider } from '@aztec/foundation/timer'; +import { DateProvider, elapsed } from '@aztec/foundation/timer'; import { type ArchiverEmitter, L2Block, @@ -85,6 +85,8 @@ export class Archiver extends ArchiverDataSourceBase implements L2BlockSink, Tra public readonly tracer: Tracer; + private readonly instrumentation: ArchiverInstrumentation; + /** * Creates a new instance of the Archiver. * @param publicClient - A client for interacting with the Ethereum node. @@ -129,6 +131,7 @@ export class Archiver extends ArchiverDataSourceBase implements L2BlockSink, Tra super(dataStore, l1Constants); this.tracer = instrumentation.tracer; + this.instrumentation = instrumentation; this.initialSyncPromise = promiseWithResolvers(); this.synchronizer = synchronizer; this.events = events; @@ -244,7 +247,8 @@ export class Archiver extends ArchiverDataSourceBase implements L2BlockSink, Tra } try { - await this.updater.addProposedBlock(block); + const [durationMs] = await elapsed(() => this.updater.addProposedBlock(block)); + this.instrumentation.processNewProposedBlock(durationMs, block); this.log.debug(`Added block ${block.number} to store`); resolve(); } catch (err: any) { diff --git a/yarn-project/archiver/src/modules/instrumentation.ts b/yarn-project/archiver/src/modules/instrumentation.ts index 8f08ddeb3541..6b70bbfeeb7b 100644 --- a/yarn-project/archiver/src/modules/instrumentation.ts +++ b/yarn-project/archiver/src/modules/instrumentation.ts @@ -32,6 +32,7 @@ export class ArchiverInstrumentation { private pruneCount: UpDownCounter; private syncDurationPerBlock: Histogram; + private syncDurationPerCheckpoint: Histogram; private syncBlockCount: UpDownCounter; private manaPerBlock: Histogram; private txsPerBlock: Histogram; @@ -68,6 +69,8 @@ export class ArchiverInstrumentation { this.syncDurationPerBlock = meter.createHistogram(Metrics.ARCHIVER_SYNC_PER_BLOCK); + this.syncDurationPerCheckpoint = meter.createHistogram(Metrics.ARCHIVER_SYNC_PER_CHECKPOINT); + this.syncBlockCount = createUpDownCounterWithDefault(meter, Metrics.ARCHIVER_SYNC_BLOCK_COUNT); this.manaPerBlock = meter.createHistogram(Metrics.ARCHIVER_MANA_PER_BLOCK); @@ -113,17 +116,26 @@ export class ArchiverInstrumentation { return this.telemetry.isEnabled(); } - public processNewBlocks(syncTimePerBlock: number, blocks: L2Block[]) { + public processNewProposedBlock(syncTimePerBlock: number, block: L2Block) { + const attrs = { [Attributes.STATUS]: 'proposed' }; + this.blockHeight.record(block.number, attrs); this.syncDurationPerBlock.record(Math.ceil(syncTimePerBlock)); + + // Per block metrics + this.txCount.add(block.body.txEffects.length); + this.txsPerBlock.record(block.body.txEffects.length); + this.manaPerBlock.record(block.header.totalManaUsed.toNumber() / 1e6); + } + + public processNewCheckpointedBlocks(syncTimePerCheckpoint: number, blocks: L2Block[]) { + if (blocks.length === 0) { + return; + } + + this.syncDurationPerCheckpoint.record(Math.ceil(syncTimePerCheckpoint)); this.blockHeight.record(Math.max(...blocks.map(b => b.number))); this.checkpointHeight.record(Math.max(...blocks.map(b => b.checkpointNumber))); this.syncBlockCount.add(blocks.length); - - for (const block of blocks) { - this.txCount.add(block.body.txEffects.length); - this.txsPerBlock.record(block.body.txEffects.length); - this.manaPerBlock.record(block.header.totalManaUsed.toNumber() / 1e6); - } } public processNewMessages(count: number, syncPerMessageMs: number) { diff --git a/yarn-project/archiver/src/modules/l1_synchronizer.ts b/yarn-project/archiver/src/modules/l1_synchronizer.ts index 4eaef03cbf5c..e05b34e3c3b1 100644 --- a/yarn-project/archiver/src/modules/l1_synchronizer.ts +++ b/yarn-project/archiver/src/modules/l1_synchronizer.ts @@ -837,10 +837,13 @@ export class ArchiverL1Synchronizer implements Traceable { this.updater.addCheckpoints(validCheckpoints, updatedValidationResult), ), ); - this.instrumentation.processNewBlocks( - processDuration / validCheckpoints.length, - validCheckpoints.flatMap(c => c.checkpoint.blocks), - ); + + if (validCheckpoints.length > 0) { + this.instrumentation.processNewCheckpointedBlocks( + processDuration / validCheckpoints.length, + validCheckpoints.flatMap(c => c.checkpoint.blocks), + ); + } // If blocks were pruned due to conflict with L1 checkpoints, emit event if (result.prunedBlocks && result.prunedBlocks.length > 0) { diff --git a/yarn-project/sequencer-client/src/sequencer/checkpoint_proposal_job.test.ts b/yarn-project/sequencer-client/src/sequencer/checkpoint_proposal_job.test.ts index 373c9a8a7491..b9cd6c06880e 100644 --- a/yarn-project/sequencer-client/src/sequencer/checkpoint_proposal_job.test.ts +++ b/yarn-project/sequencer-client/src/sequencer/checkpoint_proposal_job.test.ts @@ -315,6 +315,13 @@ describe('CheckpointProposalJob', () => { expect(checkpointBuilder.buildBlockCalls).toHaveLength(1); expect(validatorClient.collectAttestations).toHaveBeenCalledTimes(1); expect(publisher.enqueueProposeCheckpoint).toHaveBeenCalledTimes(1); + // recordBuiltBlock must receive the target slot so metrics can gate inter-block time + // to blocks within the same slot and avoid pollution across the proposer's turn gaps. + expect(metrics.recordBuiltBlock).toHaveBeenCalledWith( + expect.any(Number), + expect.any(Number), + SlotNumber(newSlotNumber), + ); }); it('skips building if not enough txs and not forced', async () => { diff --git a/yarn-project/sequencer-client/src/sequencer/checkpoint_proposal_job.ts b/yarn-project/sequencer-client/src/sequencer/checkpoint_proposal_job.ts index 4373b7ed267a..211b3e0204dd 100644 --- a/yarn-project/sequencer-client/src/sequencer/checkpoint_proposal_job.ts +++ b/yarn-project/sequencer-client/src/sequencer/checkpoint_proposal_job.ts @@ -575,6 +575,7 @@ export class CheckpointProposalJob implements Traceable { blockNumber, blocksBuilt, }); + blockPendingBroadcast = proposal; break; } @@ -751,7 +752,7 @@ export class CheckpointProposalJob implements Traceable { slot: this.targetSlot, buildSlot: this.slotNow, }); - this.metrics.recordBuiltBlock(blockBuildDuration, block.header.totalManaUsed.toNumberUnsafe()); + this.metrics.recordBuiltBlock(blockBuildDuration, block.header.totalManaUsed.toNumberUnsafe(), this.targetSlot); return { block, usedTxs }; } catch (err: any) { diff --git a/yarn-project/sequencer-client/src/sequencer/metrics.ts b/yarn-project/sequencer-client/src/sequencer/metrics.ts index dcc1d8a2b9e6..f077f002e4b4 100644 --- a/yarn-project/sequencer-client/src/sequencer/metrics.ts +++ b/yarn-project/sequencer-client/src/sequencer/metrics.ts @@ -68,6 +68,10 @@ export class SequencerMetrics { private fishermanMinedBlobTxPriorityFee: Histogram; private fishermanMinedBlobTxTotalCost: Histogram; + private blockInterBlockTime: Histogram; + private lastBlockBuiltTimestamp?: number; + private lastBlockBuiltSlot?: SlotNumber; + private lastSeenSlot?: SlotNumber; constructor( @@ -86,6 +90,8 @@ export class SequencerMetrics { this.blockBuildManaPerSecond = this.meter.createGauge(Metrics.SEQUENCER_BLOCK_BUILD_MANA_PER_SECOND); + this.blockInterBlockTime = this.meter.createHistogram(Metrics.SEQUENCER_BLOCK_INTER_BLOCK_TIME); + this.stateTransitionBufferDuration = this.meter.createHistogram(Metrics.SEQUENCER_STATE_TRANSITION_BUFFER_DURATION); this.checkpointAttestationDelay = this.meter.createHistogram(Metrics.SEQUENCER_CHECKPOINT_ATTESTATION_DELAY); @@ -220,12 +226,20 @@ export class SequencerMetrics { this.timeToCollectAttestations.record(Math.ceil(durationMs)); } - recordBuiltBlock(buildDurationMs: number, totalMana: number) { + recordBuiltBlock(buildDurationMs: number, totalMana: number, slot: SlotNumber) { this.blockCounter.add(1, { [Attributes.STATUS]: 'built', }); this.blockBuildDuration.record(Math.ceil(buildDurationMs)); this.blockBuildManaPerSecond.record(Math.ceil((totalMana * 1000) / buildDurationMs)); + + // Only record inter-block time between blocks built within the same slot. + const now = Date.now(); + if (this.lastBlockBuiltTimestamp !== undefined && this.lastBlockBuiltSlot === slot) { + this.blockInterBlockTime.record(now - this.lastBlockBuiltTimestamp); + } + this.lastBlockBuiltTimestamp = now; + this.lastBlockBuiltSlot = slot; } recordFailedBlock() { diff --git a/yarn-project/telemetry-client/src/metrics.ts b/yarn-project/telemetry-client/src/metrics.ts index 6bd63208404b..945547ec4d6f 100644 --- a/yarn-project/telemetry-client/src/metrics.ts +++ b/yarn-project/telemetry-client/src/metrics.ts @@ -308,6 +308,12 @@ export const ARCHIVER_SYNC_PER_BLOCK: MetricDefinition = { unit: 'ms', valueType: ValueType.INT, }; +export const ARCHIVER_SYNC_PER_CHECKPOINT: MetricDefinition = { + name: 'aztec.archiver.checkpoint.sync_per_item_duration', + description: 'Duration to sync a checkpoint', + unit: 'ms', + valueType: ValueType.INT, +}; export const ARCHIVER_SYNC_BLOCK_COUNT: MetricDefinition = { name: 'aztec.archiver.block.sync_count', description: 'Number of blocks synced from L1', @@ -405,6 +411,12 @@ export const SEQUENCER_BLOCK_COUNT: MetricDefinition = { description: 'Number of blocks built by this sequencer', valueType: ValueType.INT, }; +export const SEQUENCER_BLOCK_INTER_BLOCK_TIME: MetricDefinition = { + name: 'aztec.sequencer.block.inter_block_time', + description: 'Wall-clock time elapsed between consecutive blocks being built by this sequencer', + unit: 'ms', + valueType: ValueType.INT, +}; export const SEQUENCER_CURRENT_SLOT_REWARDS: MetricDefinition = { name: 'aztec.sequencer.current_slot_rewards', description: 'The rewards earned per filled slot', From fd7df4987b131dd9957911113674a1ade129cb8e Mon Sep 17 00:00:00 2001 From: Alex Gherghisan Date: Thu, 9 Apr 2026 14:17:38 +0100 Subject: [PATCH 06/11] chore: add claude skill to send txs (#22439) This PR adds a claude skill that uses the published cli-wallet package to interact with a live network. It can deploy contracts, send txs and query state. --- .claude/agents/aztec-wallet.md | 244 +++++++++++++++++++++++++++ .claude/skills/aztec-wallet/SKILL.md | 86 ++++++++++ 2 files changed, 330 insertions(+) create mode 100644 .claude/agents/aztec-wallet.md create mode 100644 .claude/skills/aztec-wallet/SKILL.md diff --git a/.claude/agents/aztec-wallet.md b/.claude/agents/aztec-wallet.md new file mode 100644 index 000000000000..316449270476 --- /dev/null +++ b/.claude/agents/aztec-wallet.md @@ -0,0 +1,244 @@ +--- +name: aztec-wallet +description: | + Execute cli-wallet commands on live Aztec networks. Handles cli-wallet installation, account setup, contract deployment, function calls, state queries, and fee juice bridging. Receives pre-computed configuration from the aztec-wallet skill. +--- + +# Aztec Wallet Agent + +You execute `@aztec/cli-wallet` commands on live Aztec networks. You receive pre-computed configuration (network, RPC URL) and handle setup + command execution. + +## Input Format + +You receive: +- `NETWORK`: Network name or "custom" +- `RPC_URL`: HTTP RPC endpoint +- `WORKING_DIR`: working directory +- `PRIVATE_KEY`: Account secret key (default `0xc140de`) +- `SALT`: Account salt (default `0`) +- `COMMAND`: What to execute (natural language or cli-wallet command) + +## Execution Pattern + +**Always use script files** — never run inline bash commands. This keeps permission prompts clean and allows "always allow". + +There are two scripts, both written to `WORKING_DIR` using the Write tool: + +1. `install.sh` — one-time setup (version query + npm install). Run with `bash /install.sh` +2. `run.sh` — env setup + account registration + command. Run with `bash /run.sh` + +Both commands are stable per network, so the user can "always allow" them. + +## Phase 1: Install cli-wallet + +**Always run this phase** — it queries the live node version and only reinstalls if the version changed. + +Create the directory if needed, **Write** `/install.sh`: + +```bash +set -e +cd "" +RPC_URL="" + +# Check jq is available +if ! command -v jq &>/dev/null; then + echo "ERROR: jq is required but not installed. Install it with: sudo apt-get install jq" >&2 + exit 1 +fi + +# Query node version +RESPONSE=$(curl -sf -X POST -H 'Content-type: application/json' \ + --data '{"jsonrpc":"2.0","id":1,"method":"node_getNodeInfo"}' \ + "$RPC_URL" 2>&1) || { + echo "ERROR: Could not reach node at $RPC_URL" >&2 + echo "Response: $RESPONSE" >&2 + exit 1 +} + +VERSION=$(echo "$RESPONSE" | jq -r '.result.nodeVersion') +if [ -z "$VERSION" ] || [ "$VERSION" = "null" ]; then + echo "ERROR: Node returned unexpected response (no nodeVersion found)" >&2 + echo "Response: $RESPONSE" >&2 + exit 1 +fi +echo "Node version: $VERSION" + +# Skip install if already on the correct version +INSTALLED_VERSION="" +if [ -f "node_modules/.bin/aztec-wallet" ]; then + INSTALLED_VERSION=$(node -e "console.log(require('@aztec/cli-wallet/package.json').version)" 2>/dev/null || true) +fi + +if [ "$INSTALLED_VERSION" = "$VERSION" ]; then + echo "cli-wallet@$VERSION already installed, skipping" +else + [ -n "$INSTALLED_VERSION" ] && echo "Upgrading cli-wallet from $INSTALLED_VERSION to $VERSION" + npm init -y >/dev/null 2>&1 + npm install --no-fund --no-audit --save @aztec/cli-wallet@$VERSION 2>&1 | tail -5 +fi +echo "DONE" +``` + +Then run: `bash /install.sh` + +## Phase 2: Execute Command + +**Write** `/run.sh` with env setup, account registration, and the command. + +Overwrite `run.sh` each time with the new command — the `bash /run.sh` prompt stays the same. + +```bash +set -e +RPC_URL="" +PRIVATE_KEY="" +SALT="" +WORKING_DIR="" +INSTANCE_HASH=$(echo "$PRIVATE_KEY $SALT" | sha256sum | head -c 6) +DATA_DIR="$WORKING_DIR/data_$INSTANCE_HASH" +WAIT_STATUS="proposed" # or checkpointed, proven +LOG_LEVEL=warn + +mkdir -p "$DATA_DIR" +CLI="$WORKING_DIR/node_modules/.bin/aztec-wallet -n $RPC_URL -d $DATA_DIR -p native" + +# --- Register account --- +$CLI create-account -sk $PRIVATE_KEY -s $SALT -t schnorr -a default --register-only 2>&1 || true + +# --- Command --- + +``` + +Then run: `bash /run.sh` + +## Command-Specific Script Tails + +### status +```bash +echo "--- Account address ---" +$CLI get-alias accounts 2>&1 +echo "--- Fee Juice Balance ---" +$CLI get-fee-juice-balance accounts:default 2>&1 || true +echo "--- Known Contracts ---" +$CLI get-alias contracts 2>&1 || echo " (none)" +``` + +**IMPORTANT**: When reporting status to the user, always print the **full, untruncated L2 address** (all 66 hex characters). Never abbreviate it as `0xdead...beef` — the user needs the complete address to bridge funds to it. + +**IMPORTANT**: When listing registered contracts, ignore the protocol contracts (addresses 0x01 through 0x06). + +### deploy +```bash +echo "=== Deploying ===" +OUTPUT=$($CLI deploy --args -f accounts:default -a --wait-for-status $WAIT_STATUS 2>&1) +echo "$OUTPUT" +echo "" +echo "=== Registration Info (for use with a different wallet) ===" +echo "Contract address: " +echo "Artifact: " +echo "To register in another wallet:" +echo " aztec-wallet register-contract -ca
-a " +``` +- Artifact: name from `@aztec/noir-contracts.js` (e.g. `TokenContract`) or file path +- Auto-alias: lowercase, strip "Contract" suffix (`TokenContract` → `token`) +- If no constructor args, omit `--args` +- **Always** print registration info after deploy so the user can register the contract in a different wallet + +### send +```bash +$CLI send -ca contracts: --args -f accounts:default --wait-for-status $WAIT_STATUS 2>&1 +``` +- If contract not registered, add a `$CLI register-contract -ca ` line before the send +- If no args, omit `--args` + +### Private token operations + +Private minting and private transfers do **not** require the recipient's account to be deployed on-chain. The sender just needs to register the recipient's address locally. + +**Setup**: Register the recipient in the sender's wallet before sending private tokens: +```bash +$CLI register-sender -a 2>&1 +``` + +**Mint to private balance**: +```bash +$CLI send mint_to_private -ca contracts: --args accounts:default -f accounts:default --wait-for-status $WAIT_STATUS 2>&1 +``` + +**Private transfer**: +```bash +$CLI send transfer -ca contracts: --args -f accounts:default --wait-for-status $WAIT_STATUS 2>&1 +``` + +- Amounts must include decimals (e.g. 1000 tokens with 18 decimals → `1000000000000000000000`) +- The recipient can receive private tokens without having their account deployed or having fee juice +- The recipient will need a deployed account later to *spend* those tokens + +### simulate (read-only call) +```bash +$CLI simulate -ca contracts: --args -f accounts:default 2>&1 +``` + +### bridge-fee-juice +```bash +$CLI bridge-fee-juice --amount --recipient accounts:default --wait 2>&1 +``` +- **Requires Sepolia ETH** on the L1 address derived from the private key — the command mints Fee Juice on L1 and bridges to L2, which costs L1 gas +- If the L1 account has no Sepolia ETH, this will fail with `insufficient funds for transfer` +- The `--recipient` flag can target any L2 address, not just the sender's own account + +### get-fee-juice-balance +```bash +$CLI get-fee-juice-balance accounts:default 2>&1 +``` + +### Any other cli-wallet command +```bash +$CLI 2>&1 +``` + +Available commands: `create-account`, `deploy-account`, `deploy`, `send`, `simulate`, `register-contract`, `register-sender`, `create-authwit`, `authorize-action`, `bridge-fee-juice`, `get-fee-juice-balance`, `get-alias`, `alias`, `create-secret`, `get-tx`, `profile`. + +## Alias Conventions + +- Account alias: `default` for the auto-created account +- Contract alias: lowercase artifact name without "Contract" suffix +- Use `accounts:` and `contracts:` prefix syntax in commands +- If a value starts with `0x`, it's a raw address — use directly + +## Output + +For transactions, report: +``` +Transaction: +Status: +Local processing: s +Node inclusion: s +Fee: +Block: +``` + +For deploy, also report registration info: +``` +To register in another wallet: + aztec-wallet register-contract -ca -a +``` + +For queries/calls, report the returned value directly. + +## Error Handling + +- **RPC unreachable**: Report error, stop +- **Account already exists**: Non-fatal (the `|| true` handles it), proceed +- **Transaction fails**: Report full error, do not retry +- **npm install fails**: Report error, suggest checking version on npm +- **Artifact not found**: Report error, suggest checking `@aztec/noir-contracts.js` + +## Important Notes + +- **Shared WORKING_DIR**: Multiple agents with different private keys share the same `WORKING_DIR` (one per network). Per-account isolation is handled by `INSTANCE_HASH` which creates separate `data_$INSTANCE_HASH` subdirectories. Never create per-agent working directories — this wastes npm installs and breaks the intended design. +- Default `--wait-for-status proposed` for deploy/send; user can request `checkpointed` or `proven` +- Data directory is isolated per account (via INSTANCE_HASH) and per network — never touches `~/.aztec/wallet` +- For private calls, the cli-wallet proves locally using the native prover +- Slot duration is typically 72s — inclusion under 40s is normal, over 50s may indicate issues +- Each phase runs as a separate bash command from `WORKING_DIR` — env vars must be re-exported each time +- Always print full, untruncated addresses (all 66 hex chars) — never abbreviate diff --git a/.claude/skills/aztec-wallet/SKILL.md b/.claude/skills/aztec-wallet/SKILL.md new file mode 100644 index 000000000000..37172abd410c --- /dev/null +++ b/.claude/skills/aztec-wallet/SKILL.md @@ -0,0 +1,86 @@ +--- +name: aztec-wallet +description: Run cli-wallet commands against a live Aztec network. Deploy contracts, send transactions, query state, bridge funds, and manage accounts. +argument-hint: +--- + +# Aztec Wallet + +Run `@aztec/cli-wallet` commands against a live Aztec network. Handles network resolution, cli-wallet installation, account setup, and alias management automatically. + +## Known Networks + +| Network | RPC Endpoint | Notes | +|---------|-------------|-------| +| `next-net` | `https://nextnet.aztec-labs.com` | Runs nightly from `next` branch | +| `testnet` | `https://rpc.testnet.aztec-labs.com` | Runs nightly from `v4-next` branch | + +Custom RPC URLs (starting with `http://` or `https://` or IP addresses) are also accepted. If an IP address is provided without a port assume a default port of 8080. + +## Step 1: Parse User Request + +Determine from the user's message: +- **Network**: A known name from the table above, or a custom RPC URL +- **Command**: What the user wants to do — deploy, send, call, bridge, account setup, etc. +- **Private key** (optional): defaults to `0xc140de` (schnorr account, salt `0`) + +If the request is ambiguous, ask the user to clarify. + +## Step 2: Resolve RPC URL + +- Known network name → look up from the table +- Unknown network → ask the user for their RPC URL +- Custom URL → use directly + +Do NOT run any bash commands (no curl, no version query). The agent handles that. + +## Step 3: Delegate to Agent + +Spawn the `aztec-wallet` agent with a structured prompt: + +``` +FIRST: Read .claude/agents/aztec-wallet.md for full operational instructions. + +Then execute the following: + +NETWORK: +RPC_URL: +WORKING_DIR: /tmp/aztec-wallet-/ +PRIVATE_KEY: +SALT: 0 + +COMMAND: +``` + +The agent will query the node version, install the matching cli-wallet, set up the account, and execute the command. + +**IMPORTANT — shared WORKING_DIR**: When spawning multiple agents (e.g. different accounts on the same network), they must ALL use the same `WORKING_DIR` (`/tmp/aztec-wallet-/`). Do NOT create per-agent directories. Per-account isolation is already handled by `INSTANCE_HASH` inside the agent, which creates separate `data_$INSTANCE_HASH` subdirectories based on the private key and salt. Sharing the working dir also avoids redundant npm installs. + +## Alias Conventions + +The cli-wallet has a persistent alias system. Use it consistently: + +- **Account alias**: `default` for the auto-created account, or user-provided names +- **Contract alias**: lowercase artifact name without "Contract" suffix (e.g. `TokenContract` → `token`) +- Aliases are prefixed by type when used: `accounts:default`, `contracts:token` +- When the user says "the token contract" or "on token", interpret as alias `contracts:token` +- When the user says "transfer to Grego", figure out from context which contract and function they mean + +## Examples + +- `/aztec-wallet testnet status` → show L2 address, balance, known contracts (no tx sent) +- `/aztec-wallet testnet deploy TokenContract` → deploy Token, alias as `token` +- `/aztec-wallet testnet send mint_to_public on token --args 1000` → mint tokens +- `/aztec-wallet testnet call balance_of_public on token --args ` → read balance +- `/aztec-wallet testnet send mint_to_private on token --args 1000` → mint tokens to private balance +- `/aztec-wallet testnet send transfer on token --args 150` → private transfer (register recipient first) +- `/aztec-wallet testnet bridge 1000 to
` → bridge Fee Juice (requires Sepolia ETH on L1) +- `/aztec-wallet next-net setup-account` → create account, show L2 address for bridging +- `/aztec-wallet https://my-rpc.com deploy ./path/to/artifact.json` → custom RPC + custom contract + +## Notes on Private Transfers + +- The recipient does **not** need their account deployed on-chain to receive private tokens +- The sender must `register-sender ` before sending private tokens +- The recipient will need a deployed account to later *spend* those tokens +- `bridge-fee-juice` requires Sepolia ETH on the L1 address — it won't work with an unfunded key From 3548b451f75fbb91e6fb5f94da90750b94c8865e Mon Sep 17 00:00:00 2001 From: Maddiaa <47148561+Maddiaa0@users.noreply.github.com> Date: Thu, 9 Apr 2026 16:19:21 +0100 Subject: [PATCH 07/11] feat(pipeline): minimize deadzone w cross slot attesting (#21435) ## Overview Allow validators to attest to old proposals slightly into the next slot, this allows validators from the slot before to send their checkpoint proposals later in their own slot. ## Key points - Allows timetable to extend past the current slot - Decouples attestation gathering from the hot path for sequencers - it can move async - Refactor of the timetable model into stdlib so it can be consumed elsewhere --- .../epochs_mbps.pipeline.parallel.test.ts | 13 +- .../e2e_l1_publisher/e2e_l1_publisher.test.ts | 13 +- .../duplicate_attestation_slash.test.ts | 1 + yarn-project/end-to-end/src/fixtures/setup.ts | 4 +- .../src/contracts/chain_state_override.ts | 147 +++++++++ yarn-project/ethereum/src/contracts/index.ts | 1 + .../ethereum/src/contracts/rollup.test.ts | 21 ++ yarn-project/ethereum/src/contracts/rollup.ts | 10 +- yarn-project/p2p/src/config.ts | 14 +- .../attestation_validator.test.ts | 74 ++++- .../attestation_validator.ts | 25 +- .../fisherman_attestation_validator.test.ts | 8 +- .../fisherman_attestation_validator.ts | 5 +- .../msg_validators/clock_tolerance.test.ts | 177 +++++++++- .../p2p/src/msg_validators/clock_tolerance.ts | 68 ++++ .../block_proposal_validator.ts | 5 +- .../checkpoint_proposal_validator.ts | 5 +- .../proposal_validator.test.ts | 63 +++- .../proposal_validator/proposal_validator.ts | 20 +- .../gossipsub/topic_score_params.test.ts | 19 +- .../services/gossipsub/topic_score_params.ts | 40 ++- .../services/libp2p/libp2p_service.test.ts | 4 + .../p2p/src/services/libp2p/libp2p_service.ts | 11 +- .../p2p/src/test-helpers/mock-pubsub.ts | 35 +- yarn-project/sequencer-client/src/config.ts | 5 - .../global_variable_builder/global_builder.ts | 22 +- .../src/publisher/sequencer-publisher.ts | 132 +++----- .../sequencer-client/src/sequencer/README.md | 17 +- .../src/sequencer/chain_state_overrides.ts | 87 +++++ .../sequencer/checkpoint_proposal_job.test.ts | 162 +++++---- .../checkpoint_proposal_job.timing.test.ts | 133 ++++++++ .../src/sequencer/checkpoint_proposal_job.ts | 278 +++++++--------- .../src/sequencer/sequencer.test.ts | 43 ++- .../src/sequencer/sequencer.ts | 24 +- .../src/sequencer/timetable.test.ts | 70 +++- .../src/sequencer/timetable.ts | 99 +++--- .../stdlib/src/config/sequencer-config.ts | 12 +- .../stdlib/src/timetable/index.test.ts | 79 +++++ yarn-project/stdlib/src/timetable/index.ts | 307 +++++++++++++++++- .../stdlib/src/tx/global_variable_builder.ts | 17 +- .../state_machine/global_variable_builder.ts | 10 +- .../txe/src/state_machine/mock_epoch_cache.ts | 8 +- .../src/validator.ha.integration.test.ts | 4 + 43 files changed, 1753 insertions(+), 539 deletions(-) create mode 100644 yarn-project/ethereum/src/contracts/chain_state_override.ts create mode 100644 yarn-project/sequencer-client/src/sequencer/chain_state_overrides.ts create mode 100644 yarn-project/stdlib/src/timetable/index.test.ts diff --git a/yarn-project/end-to-end/src/e2e_epochs/epochs_mbps.pipeline.parallel.test.ts b/yarn-project/end-to-end/src/e2e_epochs/epochs_mbps.pipeline.parallel.test.ts index b926a5f9f201..b3aeb792f54b 100644 --- a/yarn-project/end-to-end/src/e2e_epochs/epochs_mbps.pipeline.parallel.test.ts +++ b/yarn-project/end-to-end/src/e2e_epochs/epochs_mbps.pipeline.parallel.test.ts @@ -30,10 +30,10 @@ import { EpochsTestContext } from './epochs_test.js'; jest.setTimeout(1000 * 60 * 20); const NODE_COUNT = 4; -const EXPECTED_BLOCKS_PER_CHECKPOINT = 3; +const EXPECTED_BLOCKS_PER_CHECKPOINT = 8; // Send enough transactions to trigger multiple blocks within a checkpoint assuming 2 txs per block. -const TX_COUNT = 10; +const TX_COUNT = 24; /** * E2E tests for proposer pipelining with Multiple Blocks Per Slot (MBPS). @@ -72,16 +72,15 @@ describe('e2e_epochs/epochs_mbps_pipeline', () => { initialValidators: validators, enableProposerPipelining: true, // <- yehaw mockGossipSubNetwork: true, + mockGossipSubNetworkLatency: 500, // adverse network conditions disableAnvilTestWatcher: true, startProverNode: true, - perBlockAllocationMultiplier: 1, + perBlockAllocationMultiplier: 8, aztecEpochDuration: 4, enforceTimeTable: true, - ethereumSlotDuration: 4, - aztecSlotDuration: 36, + ethereumSlotDuration: 12, + aztecSlotDuration: 72, blockDurationMs: 8000, - l1PublishingTime: 2, - attestationPropagationTime: 0.5, aztecTargetCommitteeSize: 3, inboxLag: 2, ...setupOpts, diff --git a/yarn-project/end-to-end/src/e2e_l1_publisher/e2e_l1_publisher.test.ts b/yarn-project/end-to-end/src/e2e_l1_publisher/e2e_l1_publisher.test.ts index 818742798fcb..865fef02282f 100644 --- a/yarn-project/end-to-end/src/e2e_l1_publisher/e2e_l1_publisher.test.ts +++ b/yarn-project/end-to-end/src/e2e_l1_publisher/e2e_l1_publisher.test.ts @@ -22,7 +22,7 @@ import { EpochCache } from '@aztec/epoch-cache'; import { createEthereumChain } from '@aztec/ethereum/chain'; import { createExtendedL1Client } from '@aztec/ethereum/client'; import { getL1ContractsConfigEnvVars } from '@aztec/ethereum/config'; -import { GovernanceProposerContract, RollupContract } from '@aztec/ethereum/contracts'; +import { GovernanceProposerContract, RollupContract, SimulationOverridesBuilder } from '@aztec/ethereum/contracts'; import { type DeployAztecL1ContractsArgs, deployAztecL1Contracts } from '@aztec/ethereum/deploy-aztec-l1-contracts'; import type { L1ContractAddresses } from '@aztec/ethereum/l1-contract-addresses'; import { TxUtilsState, createL1TxUtils } from '@aztec/ethereum/l1-tx-utils'; @@ -743,20 +743,21 @@ describe('L1Publisher integration', () => { expect(invalidateRequest).toBeDefined(); const forcePendingCheckpointNumber = invalidateRequest?.forcePendingCheckpointNumber; expect(forcePendingCheckpointNumber).toEqual(0); + const invalidationSimulationOverridesPlan = new SimulationOverridesBuilder() + .forPendingCheckpoint(forcePendingCheckpointNumber ?? CheckpointNumber.ZERO) + .build(); // We cannot propose directly, we need to assume the previous checkpoint is invalidated const genesis = new Fr(GENESIS_ARCHIVE_ROOT); logger.warn(`Checking can propose at next eth block on top of genesis ${genesis}`); expect(await publisher.canProposeAt(genesis, proposer!)).toBeUndefined(); - const canPropose = await publisher.canProposeAt(genesis, proposer!, { forcePendingCheckpointNumber }); + const canPropose = await publisher.canProposeAt(genesis, proposer!, invalidationSimulationOverridesPlan); expect(canPropose?.slot).toEqual(block.header.getSlot()); // Same for validation logger.warn('Checking validate block header'); await expect(publisher.validateBlockHeader(checkpoint.header)).rejects.toThrow(/Rollup__InvalidArchive/); - await publisher.validateBlockHeader(checkpoint.header, { - forcePendingCheckpointNumber: forcePendingCheckpointNumber ?? CheckpointNumber.ZERO, - }); + await publisher.validateBlockHeader(checkpoint.header, invalidationSimulationOverridesPlan); // At this point I'm gonna need to propose the correct signature ye? So confused actually here. const attestationsAndSigners = new CommitteeAttestationsAndSigners(attestations); @@ -769,7 +770,7 @@ describe('L1Publisher integration', () => { logger.warn('Enqueuing requests to invalidate and propose the checkpoint'); publisher.enqueueInvalidateCheckpoint(invalidateRequest); await publisher.enqueueProposeCheckpoint(checkpoint, attestationsAndSigners, attestationsAndSignersSignature, { - forcePendingCheckpointNumber: forcePendingCheckpointNumber ?? CheckpointNumber.ZERO, + simulationOverridesPlan: invalidationSimulationOverridesPlan, }); const result = await publisher.sendRequests(); expect(result!.successfulActions).toEqual(['invalidate-by-insufficient-attestations', 'propose']); diff --git a/yarn-project/end-to-end/src/e2e_p2p/duplicate_attestation_slash.test.ts b/yarn-project/end-to-end/src/e2e_p2p/duplicate_attestation_slash.test.ts index fef299ea45e2..0f6cd6c0f9c3 100644 --- a/yarn-project/end-to-end/src/e2e_p2p/duplicate_attestation_slash.test.ts +++ b/yarn-project/end-to-end/src/e2e_p2p/duplicate_attestation_slash.test.ts @@ -85,6 +85,7 @@ describe('e2e_p2p_duplicate_attestation_slash', () => { slashAmountLarge: slashingUnit * 3n, enforceTimeTable: true, blockDurationMs: BLOCK_DURATION * 1000, + l1PublishingTime: 1, slashDuplicateProposalPenalty: slashingUnit, slashDuplicateAttestationPenalty: slashingUnit, slashingOffsetInRounds: 1, diff --git a/yarn-project/end-to-end/src/fixtures/setup.ts b/yarn-project/end-to-end/src/fixtures/setup.ts index c0dafd21a7a7..1b580af1e0de 100644 --- a/yarn-project/end-to-end/src/fixtures/setup.ts +++ b/yarn-project/end-to-end/src/fixtures/setup.ts @@ -179,6 +179,8 @@ export type SetupOptions = { proverNodeConfig?: Partial; /** Whether to use a mock gossip sub network for p2p clients. */ mockGossipSubNetwork?: boolean; + /** Whether to add simulated latency to the mock gossipsub network (in ms) */ + mockGossipSubNetworkLatency?: number; /** Whether to disable the anvil test watcher (can still be manually started) */ disableAnvilTestWatcher?: boolean; /** Whether to enable anvil automine during deployment of L1 contracts (consider defaulting this to true). */ @@ -470,7 +472,7 @@ export async function setup( let p2pClientDeps: P2PClientDeps | undefined = undefined; if (opts.mockGossipSubNetwork) { - mockGossipSubNetwork = new MockGossipSubNetwork(); + mockGossipSubNetwork = new MockGossipSubNetwork(opts.mockGossipSubNetworkLatency); p2pClientDeps = { p2pServiceFactory: getMockPubSubP2PServiceFactory(mockGossipSubNetwork) }; } diff --git a/yarn-project/ethereum/src/contracts/chain_state_override.ts b/yarn-project/ethereum/src/contracts/chain_state_override.ts new file mode 100644 index 000000000000..a695b3c4ba5a --- /dev/null +++ b/yarn-project/ethereum/src/contracts/chain_state_override.ts @@ -0,0 +1,147 @@ +import { toHex as toPaddedHex } from '@aztec/foundation/bigint-buffer'; +import type { CheckpointNumber } from '@aztec/foundation/branded-types'; +import type { Fr } from '@aztec/foundation/curves/bn254'; + +import type { StateOverride } from 'viem'; + +import { type FeeHeader, RollupContract } from './rollup.js'; + +export type PendingCheckpointOverrideState = { + archive?: Fr; + feeHeader?: FeeHeader; +}; + +/** Describes the simulated L1 rollup state that downstream calls should observe. */ +export type SimulationOverridesPlan = { + pendingCheckpointNumber?: CheckpointNumber; + pendingCheckpointState?: PendingCheckpointOverrideState; + disableBlobCheck?: boolean; +}; + +/** Builds a single-checkpoint simulation plan before it is translated into a viem state override. */ +export class SimulationOverridesBuilder { + private pendingCheckpointNumber?: CheckpointNumber; + private pendingCheckpointState?: PendingCheckpointOverrideState; + private disableBlobCheck = false; + + /** Starts from an existing plan so callers can extend or specialize it. */ + public static from(plan: SimulationOverridesPlan | undefined): SimulationOverridesBuilder { + return new SimulationOverridesBuilder().merge(plan); + } + + /** Merges another plan into this builder. Later values win. */ + public merge(plan: SimulationOverridesPlan | undefined): this { + if (!plan) { + return this; + } + + this.pendingCheckpointNumber = plan.pendingCheckpointNumber; + this.pendingCheckpointState = plan.pendingCheckpointState + ? { ...(this.pendingCheckpointState ?? {}), ...plan.pendingCheckpointState } + : this.pendingCheckpointState; + this.disableBlobCheck = this.disableBlobCheck || (plan.disableBlobCheck ?? false); + + return this; + } + + /** Sets the checkpoint number that archive and fee header overrides should attach to. */ + public forPendingCheckpoint(pendingCheckpointNumber: CheckpointNumber | undefined): this { + this.pendingCheckpointNumber = pendingCheckpointNumber; + return this; + } + + /** Overrides the archive root for the configured pending checkpoint. */ + public withPendingArchive(archive: Fr): this { + this.assertPendingCheckpointNumber(); + this.pendingCheckpointState = { ...(this.pendingCheckpointState ?? {}), archive }; + return this; + } + + /** Overrides the fee header for the configured pending checkpoint. */ + public withPendingFeeHeader(feeHeader: FeeHeader): this { + this.assertPendingCheckpointNumber(); + this.pendingCheckpointState = { ...(this.pendingCheckpointState ?? {}), feeHeader }; + return this; + } + + /** Disables blob checking for simulations that cannot provide DA inputs. */ + public withoutBlobCheck(): this { + this.disableBlobCheck = true; + return this; + } + + /** Builds the final plan, or `undefined` when no overrides were configured. */ + public build(): SimulationOverridesPlan | undefined { + if (!this.pendingCheckpointState && this.pendingCheckpointNumber === undefined && !this.disableBlobCheck) { + return undefined; + } + + return { + pendingCheckpointNumber: this.pendingCheckpointNumber, + pendingCheckpointState: this.pendingCheckpointState, + disableBlobCheck: this.disableBlobCheck || undefined, + }; + } + + private assertPendingCheckpointNumber(): void { + if (this.pendingCheckpointNumber === undefined) { + throw new Error('pendingCheckpointNumber must be set before attaching archive or fee header overrides'); + } + } +} + +/** Translates a simulation plan into the viem state override shape expected by rollup calls. */ +export async function buildSimulationOverridesStateOverride( + rollup: RollupContract, + plan: SimulationOverridesPlan | undefined, +): Promise { + if (!plan) { + return []; + } + + const rollupStateDiff: NonNullable = []; + + if (plan.pendingCheckpointNumber !== undefined) { + rollupStateDiff.push( + ...extractRollupStateDiff(await rollup.makePendingCheckpointNumberOverride(plan.pendingCheckpointNumber)), + ); + } + + if (plan.pendingCheckpointState && plan.pendingCheckpointNumber === undefined) { + throw new Error('pendingCheckpointState requires pendingCheckpointNumber to be set'); + } + + if (plan.pendingCheckpointState?.archive) { + rollupStateDiff.push( + ...extractRollupStateDiff( + rollup.makeArchiveOverride(plan.pendingCheckpointNumber!, plan.pendingCheckpointState.archive), + ), + ); + } + + if (plan.pendingCheckpointState?.feeHeader) { + rollupStateDiff.push( + ...extractRollupStateDiff( + await rollup.makeFeeHeaderOverride(plan.pendingCheckpointNumber!, plan.pendingCheckpointState.feeHeader), + ), + ); + } + + if (plan.disableBlobCheck) { + rollupStateDiff.push({ + slot: toPaddedHex(RollupContract.checkBlobStorageSlot, true), + value: toPaddedHex(0n, true), + }); + } + + if (rollupStateDiff.length === 0) { + return []; + } + + return [{ address: rollup.address, stateDiff: rollupStateDiff }]; +} + +function extractRollupStateDiff(override: StateOverride | StateOverride[number] | undefined) { + const entries = Array.isArray(override) ? override : override ? [override] : []; + return entries.flatMap(entry => entry.stateDiff ?? []); +} diff --git a/yarn-project/ethereum/src/contracts/index.ts b/yarn-project/ethereum/src/contracts/index.ts index 732cef47a5e5..b83465876044 100644 --- a/yarn-project/ethereum/src/contracts/index.ts +++ b/yarn-project/ethereum/src/contracts/index.ts @@ -1,3 +1,4 @@ +export * from './chain_state_override.js'; export * from './empire_base.js'; export * from './errors.js'; export * from './fee_asset_handler.js'; diff --git a/yarn-project/ethereum/src/contracts/rollup.test.ts b/yarn-project/ethereum/src/contracts/rollup.test.ts index eab6c239c21b..d250f5a7eee5 100644 --- a/yarn-project/ethereum/src/contracts/rollup.test.ts +++ b/yarn-project/ethereum/src/contracts/rollup.test.ts @@ -328,6 +328,27 @@ describe('Rollup', () => { }); }); + describe('makeArchiveOverride', () => { + it('creates state override that correctly sets archive for a checkpoint number', async () => { + const checkpointNumber = CheckpointNumber(5); + const expectedArchive = Fr.random(); + + // Create the override + const stateOverride = rollup.makeArchiveOverride(checkpointNumber, expectedArchive); + + // Test the override using simulateContract to read archiveAt(checkpointNumber) + const { result: overriddenArchive } = await publicClient.simulateContract({ + address: rollupAddress, + abi: RollupAbi as Abi, + functionName: 'archiveAt', + args: [BigInt(checkpointNumber)], + stateOverride, + }); + + expect(Fr.fromString(overriddenArchive as string).equals(expectedArchive)).toBe(true); + }); + }); + describe('getSlashingProposer', () => { it('returns a slashing proposer', async () => { const slashingProposer = await rollup.getSlashingProposer(); diff --git a/yarn-project/ethereum/src/contracts/rollup.ts b/yarn-project/ethereum/src/contracts/rollup.ts index 8468e8a2b112..105b0a888db5 100644 --- a/yarn-project/ethereum/src/contracts/rollup.ts +++ b/yarn-project/ethereum/src/contracts/rollup.ts @@ -765,19 +765,11 @@ export class RollupContract { archive: Buffer, account: `0x${string}` | Account, timestamp: bigint, - opts: { - forcePendingCheckpointNumber?: CheckpointNumber; - forceArchive?: { checkpointNumber: CheckpointNumber; archive: Fr }; - } = {}, + stateOverride: StateOverride = [], ): Promise<{ slot: SlotNumber; checkpointNumber: CheckpointNumber; timeOfNextL1Slot: bigint }> { const timeOfNextL1Slot = timestamp; const who = typeof account === 'string' ? account : account.address; - const stateOverride = RollupContract.mergeStateOverrides( - await this.makePendingCheckpointNumberOverride(opts.forcePendingCheckpointNumber), - opts.forceArchive ? this.makeArchiveOverride(opts.forceArchive.checkpointNumber, opts.forceArchive.archive) : [], - ); - try { const { result: [slot, checkpointNumber], diff --git a/yarn-project/p2p/src/config.ts b/yarn-project/p2p/src/config.ts index a753e50feb29..315b9075cfa1 100644 --- a/yarn-project/p2p/src/config.ts +++ b/yarn-project/p2p/src/config.ts @@ -39,7 +39,14 @@ export interface P2PConfig ChainConfig, TxCollectionConfig, TxFileStoreConfig, - Pick { + Pick< + SequencerConfig, + | 'blockDurationMs' + | 'expectedBlockProposalsPerSlot' + | 'l1PublishingTime' + | 'maxTxsPerBlock' + | 'attestationPropagationTime' + > { /** Maximum transactions per block for validation. Overrides maxTxsPerBlock for gossip validation when set. */ validateMaxTxsPerBlock?: number; @@ -495,6 +502,11 @@ export const p2pConfigMappings: ConfigMappingsType = { description: 'Alters the format of p2p messages to include things like broadcast timestamp FOR TESTING ONLY', ...booleanConfigHelper(false), }, + l1PublishingTime: { + env: 'SEQ_L1_PUBLISHING_TIME_ALLOWANCE_IN_SLOT', + description: 'How much time (in seconds) we allow in the slot for publishing the L1 tx (defaults to 1 L1 slot).', + parseEnv: (val: string) => (val ? parseInt(val, 10) : undefined), + }, fishermanMode: { env: 'FISHERMAN_MODE', description: diff --git a/yarn-project/p2p/src/msg_validators/attestation_validator/attestation_validator.test.ts b/yarn-project/p2p/src/msg_validators/attestation_validator/attestation_validator.test.ts index f69b53930c38..1c1acd16e7f2 100644 --- a/yarn-project/p2p/src/msg_validators/attestation_validator/attestation_validator.test.ts +++ b/yarn-project/p2p/src/msg_validators/attestation_validator/attestation_validator.test.ts @@ -18,7 +18,11 @@ describe('CheckpointAttestationValidator', () => { beforeEach(() => { epochCache = mock(); - validator = new CheckpointAttestationValidator(epochCache); + epochCache.getL1Constants.mockReturnValue({ + slotDuration: 72, + ethereumSlotDuration: 12, + } as any); + validator = new CheckpointAttestationValidator(epochCache, { l1PublishingTime: 12 }); proposer = Secp256k1Signer.random(); attester = Secp256k1Signer.random(); }); @@ -74,6 +78,74 @@ describe('CheckpointAttestationValidator', () => { expect(result).toEqual({ result: 'ignore' }); }); + it('accepts attestation for current slot until the target-slot publish cutoff', async () => { + // Attestation is for slot 98 (current wallclock slot), but targetSlot is 99 (pipelining). + const header = CheckpointHeader.random({ slotNumber: SlotNumber(98) }); + const mockAttestation = makeCheckpointAttestation({ + header, + attesterSigner: attester, + proposerSigner: proposer, + }); + + epochCache.getTargetAndNextSlot.mockReturnValue({ + targetSlot: SlotNumber(99), + nextSlot: SlotNumber(100), + }); + epochCache.getSlotNow.mockReturnValue(SlotNumber(98)); + epochCache.isProposerPipeliningEnabled.mockReturnValue(true); + epochCache.getL1Constants.mockReturnValue({ + slotDuration: 72, + ethereumSlotDuration: 12, + } as any); + + // Within attestation window: 59000ms elapsed < (slotDuration - l1PublishingTime) * 1000 = 60000ms + epochCache.getEpochAndSlotNow.mockReturnValue({ + epoch: EpochNumber(1), + slot: SlotNumber(98), + ts: 1000n, + nowMs: 1059000n, // 59000ms elapsed + }); + epochCache.isInCommittee.mockResolvedValue(true); + epochCache.getProposerAttesterAddressInSlot.mockResolvedValue(proposer.address); + + const result = await validator.validate(mockAttestation); + expect(result).toEqual({ result: 'accept' }); + }); + + it('rejects attestation for current slot after the target-slot publish cutoff', async () => { + // Attestation is for slot 98 (one behind target slot 99), after the publish cutoff. + const header = CheckpointHeader.random({ slotNumber: SlotNumber(98) }); + const mockAttestation = makeCheckpointAttestation({ + header, + attesterSigner: attester, + proposerSigner: proposer, + }); + + epochCache.getTargetAndNextSlot.mockReturnValue({ + targetSlot: SlotNumber(99), + nextSlot: SlotNumber(100), + }); + epochCache.getTargetSlot.mockReturnValue(SlotNumber(99)); + epochCache.getSlotNow.mockReturnValue(SlotNumber(98)); + epochCache.isProposerPipeliningEnabled.mockReturnValue(true); + epochCache.getL1Constants.mockReturnValue({ + slotDuration: 72, + ethereumSlotDuration: 12, + } as any); + + // Outside attestation window AND outside clock tolerance: 61000ms elapsed > 60000ms cutoff + epochCache.getEpochAndSlotNow.mockReturnValue({ + epoch: EpochNumber(1), + slot: SlotNumber(99), + ts: 1000n, + nowMs: 1061000n, // 61000ms elapsed + }); + epochCache.isInCommittee.mockResolvedValue(true); + + const result = await validator.validate(mockAttestation); + expect(result).toEqual({ result: 'reject', severity: PeerErrorSeverity.HighToleranceError }); + }); + it('returns high tolerance error if attester is not in committee', async () => { const header = CheckpointHeader.random({ slotNumber: SlotNumber(100) }); const mockAttestation = makeCheckpointAttestation({ diff --git a/yarn-project/p2p/src/msg_validators/attestation_validator/attestation_validator.ts b/yarn-project/p2p/src/msg_validators/attestation_validator/attestation_validator.ts index 0f6fbcab8b94..5d84604b592a 100644 --- a/yarn-project/p2p/src/msg_validators/attestation_validator/attestation_validator.ts +++ b/yarn-project/p2p/src/msg_validators/attestation_validator/attestation_validator.ts @@ -8,14 +8,21 @@ import { type ValidationResult, } from '@aztec/stdlib/p2p'; -import { isWithinClockTolerance } from '../clock_tolerance.js'; +import { PipeliningWindow, isWithinClockTolerance } from '../clock_tolerance.js'; export class CheckpointAttestationValidator implements P2PValidator { protected epochCache: EpochCacheInterface; protected logger: Logger; + private readonly pipeliningWindow: PipeliningWindow; - constructor(epochCache: EpochCacheInterface) { + constructor( + epochCache: EpochCacheInterface, + opts: { + l1PublishingTime?: number; + }, + ) { this.epochCache = epochCache; + this.pipeliningWindow = new PipeliningWindow(epochCache, { l1PublishingTime: opts.l1PublishingTime }); this.logger = createLogger('p2p:checkpoint-attestation-validator'); } @@ -23,19 +30,23 @@ export class CheckpointAttestationValidator implements P2PValidator { beforeEach(() => { epochCache = mock(); + epochCache.getL1Constants.mockReturnValue({ + slotDuration: 72, + ethereumSlotDuration: 12, + } as any); attestationPool = mock(); - validator = new FishermanAttestationValidator(epochCache, attestationPool, getTelemetryClient()); + validator = new FishermanAttestationValidator(epochCache, attestationPool, getTelemetryClient(), { + l1PublishingTime: 12, + }); proposer = Secp256k1Signer.random(); attester = Secp256k1Signer.random(); }); diff --git a/yarn-project/p2p/src/msg_validators/attestation_validator/fisherman_attestation_validator.ts b/yarn-project/p2p/src/msg_validators/attestation_validator/fisherman_attestation_validator.ts index 2588db397668..5a349757a3b0 100644 --- a/yarn-project/p2p/src/msg_validators/attestation_validator/fisherman_attestation_validator.ts +++ b/yarn-project/p2p/src/msg_validators/attestation_validator/fisherman_attestation_validator.ts @@ -20,8 +20,11 @@ export class FishermanAttestationValidator extends CheckpointAttestationValidato epochCache: EpochCacheInterface, private attestationPool: AttestationPoolApi, telemetryClient: TelemetryClient, + opts: { + l1PublishingTime?: number; + } = {}, ) { - super(epochCache); + super(epochCache, opts); this.logger = this.logger.createChild('[FISHERMAN]'); const meter = telemetryClient.getMeter('FishermanAttestationValidator'); diff --git a/yarn-project/p2p/src/msg_validators/clock_tolerance.test.ts b/yarn-project/p2p/src/msg_validators/clock_tolerance.test.ts index cfdbf4bcd2bb..f788ef041da5 100644 --- a/yarn-project/p2p/src/msg_validators/clock_tolerance.test.ts +++ b/yarn-project/p2p/src/msg_validators/clock_tolerance.test.ts @@ -3,7 +3,7 @@ import { SlotNumber } from '@aztec/foundation/branded-types'; import { mock } from 'jest-mock-extended'; -import { MAXIMUM_GOSSIP_CLOCK_DISPARITY_MS, isWithinClockTolerance } from './clock_tolerance.js'; +import { MAXIMUM_GOSSIP_CLOCK_DISPARITY_MS, PipeliningWindow, isWithinClockTolerance } from './clock_tolerance.js'; describe('clock_tolerance', () => { describe('MAXIMUM_GOSSIP_CLOCK_DISPARITY_MS', () => { @@ -204,4 +204,179 @@ describe('clock_tolerance', () => { expect(isWithinClockTolerance(messageSlot, currentSlot, epochCache)).toBe(false); }); }); + + describe('PipeliningWindow.acceptsProposal', () => { + let epochCache: ReturnType>; + let pipeliningWindow: PipeliningWindow; + + beforeEach(() => { + epochCache = mock(); + epochCache.getSlotNow.mockReturnValue(SlotNumber(100)); + epochCache.isProposerPipeliningEnabled.mockReturnValue(true); + epochCache.getL1Constants.mockReturnValue({ + slotDuration: 72, + ethereumSlotDuration: 12, + } as any); + pipeliningWindow = new PipeliningWindow(epochCache); + }); + + it('returns true when pipelining enabled, message is for current slot, and within grace period', () => { + // Grace period = DEFAULT_P2P_PROPAGATION_TIME * 1000 = 2000ms + epochCache.getEpochAndSlotNow.mockReturnValue({ + epoch: 1 as any, + slot: SlotNumber(100), + ts: 1000n, + nowMs: 1001000n, // 1000ms elapsed, within 2000ms grace period + }); + + expect(pipeliningWindow.acceptsProposal(SlotNumber(100))).toBe(true); + }); + + it('returns true at exactly 0ms elapsed', () => { + epochCache.getEpochAndSlotNow.mockReturnValue({ + epoch: 1 as any, + slot: SlotNumber(100), + ts: 1000n, + nowMs: 1000000n, // 0ms elapsed + }); + + expect(pipeliningWindow.acceptsProposal(SlotNumber(100))).toBe(true); + }); + + it('returns false when elapsed time exceeds grace period', () => { + // 3000ms elapsed > 2000ms grace period + epochCache.getEpochAndSlotNow.mockReturnValue({ + epoch: 1 as any, + slot: SlotNumber(100), + ts: 1000n, + nowMs: 1003000n, // 3000ms elapsed + }); + + expect(pipeliningWindow.acceptsProposal(SlotNumber(100))).toBe(false); + }); + + it('returns true at the propagation boundary when within clock disparity allowance', () => { + // 2000ms elapsed = DEFAULT_P2P_PROPAGATION_TIME * 1000, still within the extra 500ms allowance + epochCache.getEpochAndSlotNow.mockReturnValue({ + epoch: 1 as any, + slot: SlotNumber(100), + ts: 1000n, + nowMs: 1002000n, // 2000ms elapsed + }); + + expect(pipeliningWindow.acceptsProposal(SlotNumber(100))).toBe(true); + }); + + it('returns false at exactly the propagation boundary plus clock disparity allowance', () => { + // 2500ms elapsed = 2000ms propagation window + 500ms disparity allowance (not strictly less than) + epochCache.getEpochAndSlotNow.mockReturnValue({ + epoch: 1 as any, + slot: SlotNumber(100), + ts: 1000n, + nowMs: 1002500n, // 2500ms elapsed + }); + + expect(pipeliningWindow.acceptsProposal(SlotNumber(100))).toBe(false); + }); + + it('returns false when pipelining is disabled', () => { + epochCache.isProposerPipeliningEnabled.mockReturnValue(false); + + epochCache.getEpochAndSlotNow.mockReturnValue({ + epoch: 1 as any, + slot: SlotNumber(100), + ts: 1000n, + nowMs: 1001000n, // 1000ms elapsed, within grace period + }); + + expect(pipeliningWindow.acceptsProposal(SlotNumber(100))).toBe(false); + }); + + it('returns false when message is not for current slot', () => { + epochCache.getEpochAndSlotNow.mockReturnValue({ + epoch: 1 as any, + slot: SlotNumber(100), + ts: 1000n, + nowMs: 1001000n, + }); + + // Message for slot 99, current slot is 100 + expect(pipeliningWindow.acceptsProposal(SlotNumber(99))).toBe(false); + }); + + it('returns false when message is for a future slot', () => { + epochCache.getEpochAndSlotNow.mockReturnValue({ + epoch: 1 as any, + slot: SlotNumber(100), + ts: 1000n, + nowMs: 1001000n, + }); + + // Message for slot 101, current slot is 100 + expect(pipeliningWindow.acceptsProposal(SlotNumber(101))).toBe(false); + }); + + it('uses the provided propagation time instead of the default', () => { + epochCache.getEpochAndSlotNow.mockReturnValue({ + epoch: 1 as any, + slot: SlotNumber(100), + ts: 1000n, + nowMs: 1003000n, // 3000ms elapsed + }); + + const longerWindow = new PipeliningWindow(epochCache, { p2pPropagationTime: 4 }); + + expect(longerWindow.acceptsProposal(SlotNumber(100))).toBe(true); + expect(pipeliningWindow.acceptsProposal(SlotNumber(100))).toBe(false); + }); + }); + + describe('PipeliningWindow.acceptsAttestation', () => { + let epochCache: ReturnType>; + let pipeliningWindow: PipeliningWindow; + + beforeEach(() => { + epochCache = mock(); + epochCache.getSlotNow.mockReturnValue(SlotNumber(100)); + epochCache.isProposerPipeliningEnabled.mockReturnValue(true); + epochCache.getL1Constants.mockReturnValue({ + slotDuration: 72, + ethereumSlotDuration: 12, + } as any); + pipeliningWindow = new PipeliningWindow(epochCache, { l1PublishingTime: 12 }); + }); + + it('returns true while still before the target-slot publish cutoff', () => { + epochCache.getEpochAndSlotNow.mockReturnValue({ + epoch: 1 as any, + slot: SlotNumber(100), + ts: 1000n, + nowMs: 1059000n, // 59000ms elapsed + }); + + expect(pipeliningWindow.acceptsAttestation(SlotNumber(100))).toBe(true); + }); + + it('returns true at the target-slot publish cutoff when within clock disparity allowance', () => { + epochCache.getEpochAndSlotNow.mockReturnValue({ + epoch: 1 as any, + slot: SlotNumber(100), + ts: 1000n, + nowMs: 1060000n, // 60000ms elapsed + }); + + expect(pipeliningWindow.acceptsAttestation(SlotNumber(100))).toBe(true); + }); + + it('returns false at the target-slot publish cutoff plus clock disparity allowance', () => { + epochCache.getEpochAndSlotNow.mockReturnValue({ + epoch: 1 as any, + slot: SlotNumber(100), + ts: 1000n, + nowMs: 1060500n, // 60500ms elapsed + }); + + expect(pipeliningWindow.acceptsAttestation(SlotNumber(100))).toBe(false); + }); + }); }); diff --git a/yarn-project/p2p/src/msg_validators/clock_tolerance.ts b/yarn-project/p2p/src/msg_validators/clock_tolerance.ts index dc00e9e6ce2b..6eee1f56e72e 100644 --- a/yarn-project/p2p/src/msg_validators/clock_tolerance.ts +++ b/yarn-project/p2p/src/msg_validators/clock_tolerance.ts @@ -1,5 +1,6 @@ import type { EpochCacheInterface } from '@aztec/epoch-cache'; import { SlotNumber } from '@aztec/foundation/branded-types'; +import { DEFAULT_P2P_PROPAGATION_TIME, createPipelinedCheckpointTimingModel } from '@aztec/stdlib/timetable'; /** * Maximum clock disparity tolerance for P2P message validation (in milliseconds). @@ -50,3 +51,70 @@ export function isWithinClockTolerance( return elapsedMs < MAXIMUM_GOSSIP_CLOCK_DISPARITY_MS; } + +/** + * Checks if a message should be accepted under the pipelining grace period. + * + * When pipelining is enabled, `targetSlot = slotNow + 1`. A proposal built in slot N-1 + * for slot N arrives when validators are in slot N, so their `targetSlot = N+1`. + * This function accepts proposals for the current wallclock slot if we're within the + * first `windowSeconds` seconds of the slot (the pipelining grace period). - see stdlib/timetable/index.ts + * + * @param messageSlot - The slot number from the received message + * @param epochCache - EpochCache to get timing and pipelining state + * @param windowSeconds - The window grace period allowed for attestations into the next slot + * @returns true if pipelining is enabled, the message is for the current slot, and we're within the grace period + */ +function isWithinPipeliningWindow( + messageSlot: SlotNumber, + epochCache: EpochCacheInterface, + windowSeconds: number, +): boolean { + if (!epochCache.isProposerPipeliningEnabled()) { + return false; + } + + const currentSlot = epochCache.getSlotNow(); + if (messageSlot !== currentSlot) { + return false; + } + + const { ts: slotStartTs, nowMs } = epochCache.getEpochAndSlotNow(); + const slotStartMs = slotStartTs * 1000n; + const elapsedMs = Number(nowMs - slotStartMs); + const windowMs = windowSeconds * 1000 + MAXIMUM_GOSSIP_CLOCK_DISPARITY_MS; + + return elapsedMs < windowMs; +} + +export class PipeliningWindow { + private readonly proposalWindowIntoTargetSlot: number; + private readonly attestationWindowIntoTargetSlot: number; + + constructor( + private readonly epochCache: EpochCacheInterface, + opts: { + p2pPropagationTime?: number; + l1PublishingTime?: number; + } = {}, + ) { + const l1Constants = epochCache.getL1Constants(); + const checkpointTiming = createPipelinedCheckpointTimingModel({ + aztecSlotDuration: l1Constants.slotDuration, + ethereumSlotDuration: l1Constants.ethereumSlotDuration, + l1PublishingTime: opts.l1PublishingTime ?? l1Constants.ethereumSlotDuration, + p2pPropagationTime: opts.p2pPropagationTime ?? DEFAULT_P2P_PROPAGATION_TIME, + }); + + this.proposalWindowIntoTargetSlot = checkpointTiming.proposalWindowIntoTargetSlot; + this.attestationWindowIntoTargetSlot = checkpointTiming.attestationWindowIntoTargetSlot; + } + + public acceptsProposal(messageSlot: SlotNumber): boolean { + return isWithinPipeliningWindow(messageSlot, this.epochCache, this.proposalWindowIntoTargetSlot); + } + + public acceptsAttestation(messageSlot: SlotNumber): boolean { + return isWithinPipeliningWindow(messageSlot, this.epochCache, this.attestationWindowIntoTargetSlot); + } +} diff --git a/yarn-project/p2p/src/msg_validators/proposal_validator/block_proposal_validator.ts b/yarn-project/p2p/src/msg_validators/proposal_validator/block_proposal_validator.ts index bac274f46475..acd0e5bb079c 100644 --- a/yarn-project/p2p/src/msg_validators/proposal_validator/block_proposal_validator.ts +++ b/yarn-project/p2p/src/msg_validators/proposal_validator/block_proposal_validator.ts @@ -6,7 +6,10 @@ import { ProposalValidator } from '../proposal_validator/proposal_validator.js'; export class BlockProposalValidator implements P2PValidator { private proposalValidator: ProposalValidator; - constructor(epochCache: EpochCacheInterface, opts: { txsPermitted: boolean; maxTxsPerBlock?: number }) { + constructor( + epochCache: EpochCacheInterface, + opts: { txsPermitted: boolean; maxTxsPerBlock?: number; p2pPropagationTime?: number }, + ) { this.proposalValidator = new ProposalValidator(epochCache, opts, 'p2p:block_proposal_validator'); } diff --git a/yarn-project/p2p/src/msg_validators/proposal_validator/checkpoint_proposal_validator.ts b/yarn-project/p2p/src/msg_validators/proposal_validator/checkpoint_proposal_validator.ts index 11d94fe6a9d5..21261084defa 100644 --- a/yarn-project/p2p/src/msg_validators/proposal_validator/checkpoint_proposal_validator.ts +++ b/yarn-project/p2p/src/msg_validators/proposal_validator/checkpoint_proposal_validator.ts @@ -6,7 +6,10 @@ import { ProposalValidator } from '../proposal_validator/proposal_validator.js'; export class CheckpointProposalValidator implements P2PValidator { private proposalValidator: ProposalValidator; - constructor(epochCache: EpochCacheInterface, opts: { txsPermitted: boolean; maxTxsPerBlock?: number }) { + constructor( + epochCache: EpochCacheInterface, + opts: { txsPermitted: boolean; maxTxsPerBlock?: number; p2pPropagationTime?: number }, + ) { this.proposalValidator = new ProposalValidator(epochCache, opts, 'p2p:checkpoint_proposal_validator'); } diff --git a/yarn-project/p2p/src/msg_validators/proposal_validator/proposal_validator.test.ts b/yarn-project/p2p/src/msg_validators/proposal_validator/proposal_validator.test.ts index 4210645babbe..0414efbd5eb3 100644 --- a/yarn-project/p2p/src/msg_validators/proposal_validator/proposal_validator.test.ts +++ b/yarn-project/p2p/src/msg_validators/proposal_validator/proposal_validator.test.ts @@ -41,7 +41,15 @@ describe('ProposalValidator', () => { beforeEach(() => { epochCache = mock(); - validator = new ProposalValidator(epochCache, { txsPermitted: true, maxTxsPerBlock: undefined }, 'test'); + epochCache.getL1Constants.mockReturnValue({ + slotDuration: 72, + ethereumSlotDuration: 12, + } as any); + validator = new ProposalValidator( + epochCache, + { txsPermitted: true, maxTxsPerBlock: undefined, p2pPropagationTime: 2 }, + 'test', + ); epochCache.getEpochAndSlotNow.mockReturnValue({ epoch: EpochNumber(1), slot: currentSlot, @@ -53,6 +61,8 @@ describe('ProposalValidator', () => { nextSlot, }); epochCache.getTargetSlot.mockReturnValue(currentSlot); + epochCache.getSlotNow.mockReturnValue(currentSlot); + epochCache.isProposerPipeliningEnabled.mockReturnValue(false); }); describe.each([ @@ -169,6 +179,57 @@ describe('ProposalValidator', () => { const result = await validator.validate(proposal); expect(result).toEqual({ result: 'accept' }); }); + + it('accepts proposal for current slot within pipelining grace period', async () => { + // Simulate pipelining: targetSlot = 101, but proposal is for slot 100 (current wallclock slot) + epochCache.getTargetAndNextSlot.mockReturnValue({ + targetSlot: SlotNumber(101), + nextSlot: SlotNumber(102), + }); + epochCache.getSlotNow.mockReturnValue(currentSlot); // slot 100 + epochCache.isProposerPipeliningEnabled.mockReturnValue(true); + + // Within grace period: 1000ms elapsed < configured propagation window 2000ms + epochCache.getEpochAndSlotNow.mockReturnValue({ + epoch: EpochNumber(1), + slot: currentSlot, + ts: 1000n, + nowMs: 1001000n, // 1000ms elapsed + }); + + const signer = Secp256k1Signer.random(); + const proposal = await factory(currentSlot, signer); + + epochCache.getProposerAttesterAddressInSlot.mockResolvedValue(signer.address); + const result = await validator.validate(proposal); + expect(result).toEqual({ result: 'accept' }); + }); + + it('rejects proposal for current slot outside pipelining grace period', async () => { + // Simulate pipelining: targetSlot = 101, but proposal is for slot 100 (current wallclock slot) + epochCache.getTargetAndNextSlot.mockReturnValue({ + targetSlot: SlotNumber(101), + nextSlot: SlotNumber(102), + }); + epochCache.getTargetSlot.mockReturnValue(SlotNumber(101)); + epochCache.getSlotNow.mockReturnValue(currentSlot); // slot 100 + epochCache.isProposerPipeliningEnabled.mockReturnValue(true); + + // Outside grace period: 7000ms elapsed > configured propagation window 2000ms + epochCache.getEpochAndSlotNow.mockReturnValue({ + epoch: EpochNumber(1), + slot: currentSlot, + ts: 1000n, + nowMs: 1007000n, // 7000ms elapsed + }); + + const signer = Secp256k1Signer.random(); + const proposal = await factory(currentSlot, signer); + + epochCache.getProposerAttesterAddressInSlot.mockResolvedValue(signer.address); + const result = await validator.validate(proposal); + expect(result).toEqual({ result: 'reject', severity: PeerErrorSeverity.HighToleranceError }); + }); }); describe('validateTxs', () => { diff --git a/yarn-project/p2p/src/msg_validators/proposal_validator/proposal_validator.ts b/yarn-project/p2p/src/msg_validators/proposal_validator/proposal_validator.ts index 0f2c5d47c5bf..2997a71a0a12 100644 --- a/yarn-project/p2p/src/msg_validators/proposal_validator/proposal_validator.ts +++ b/yarn-project/p2p/src/msg_validators/proposal_validator/proposal_validator.ts @@ -8,7 +8,7 @@ import { type ValidationResult, } from '@aztec/stdlib/p2p'; -import { isWithinClockTolerance } from '../clock_tolerance.js'; +import { PipeliningWindow, isWithinClockTolerance } from '../clock_tolerance.js'; /** Validates header-level and tx-level fields of block and checkpoint proposals. */ export class ProposalValidator { @@ -16,33 +16,39 @@ export class ProposalValidator { private logger: Logger; private txsPermitted: boolean; private maxTxsPerBlock?: number; + private pipeliningWindow: PipeliningWindow; constructor( epochCache: EpochCacheInterface, - opts: { txsPermitted: boolean; maxTxsPerBlock?: number }, + opts: { txsPermitted: boolean; maxTxsPerBlock?: number; p2pPropagationTime?: number }, loggerName: string, ) { this.epochCache = epochCache; this.txsPermitted = opts.txsPermitted; this.maxTxsPerBlock = opts.maxTxsPerBlock; + this.pipeliningWindow = new PipeliningWindow(epochCache, { p2pPropagationTime: opts.p2pPropagationTime }); this.logger = createLogger(loggerName); } /** Validates header-level fields: slot, signature, and proposer. */ public async validate(proposal: BlockProposal | CheckpointProposalCore): Promise { try { - // Slot check: use target slots since proposals target pipeline slots (slot + 1 when pipelining) + // Slot check: use target slots since proposals target pipeline slots (slot + 1 when pipelining). const { targetSlot, nextSlot } = this.epochCache.getTargetAndNextSlot(); const slotNumber = proposal.slotNumber; if (slotNumber !== targetSlot && slotNumber !== nextSlot) { - // Check if message is for previous slot and within clock tolerance - if (!isWithinClockTolerance(slotNumber, targetSlot, this.epochCache)) { + // When pipelining, accept proposals for the current slot (built in the previous slot) + // if they're still within the shared proposal acceptance window. + if (this.pipeliningWindow.acceptsProposal(slotNumber)) { + // Fall through to remaining validation (signature, proposer, etc.) + } else if (!isWithinClockTolerance(slotNumber, targetSlot, this.epochCache)) { this.logger.warn(`Penalizing peer for invalid slot number ${slotNumber}`, { targetSlot, nextSlot }); return { result: 'reject', severity: PeerErrorSeverity.HighToleranceError }; + } else { + this.logger.verbose(`Ignoring proposal for previous slot ${slotNumber} within clock tolerance`); + return { result: 'ignore' }; } - this.logger.verbose(`Ignoring proposal for previous slot ${slotNumber} within clock tolerance`); - return { result: 'ignore' }; } // Signature validity diff --git a/yarn-project/p2p/src/services/gossipsub/topic_score_params.test.ts b/yarn-project/p2p/src/services/gossipsub/topic_score_params.test.ts index d644397a7ba2..f1241c141520 100644 --- a/yarn-project/p2p/src/services/gossipsub/topic_score_params.test.ts +++ b/yarn-project/p2p/src/services/gossipsub/topic_score_params.test.ts @@ -20,8 +20,10 @@ describe('Topic Score Params', () => { // Standard network parameters for testing (matching production values) const standardParams = { slotDurationMs: 72000, // 72 seconds + ethereumSlotDuration: 12, heartbeatIntervalMs: 700, // 700ms gossipsub heartbeat targetCommitteeSize: 48, + l1PublishingTime: 12, }; describe('calculateBlocksPerSlot', () => { @@ -42,10 +44,19 @@ describe('Topic Score Params', () => { expect(result).toBeGreaterThanOrEqual(1); }); - it('returns at least 1 block per slot', () => { - // Even with very long block duration, should return at least 1 - const result = calculateBlocksPerSlot(72000, 60000); - expect(result).toBeGreaterThanOrEqual(1); + it('uses the same compressed timing allowances as the sequencer for test configs', () => { + expect(() => + calculateBlocksPerSlot(24000, 4000, { + ethereumSlotDuration: 8, + l1PublishingTime: 1, + }), + ).not.toThrow(); + }); + + it('throws for an impossible timing configuration', () => { + expect(() => calculateBlocksPerSlot(72000, 60000)).toThrow( + 'Invalid timing configuration: only -6s available for block building, which is less than one blockDuration (60s).', + ); }); }); diff --git a/yarn-project/p2p/src/services/gossipsub/topic_score_params.ts b/yarn-project/p2p/src/services/gossipsub/topic_score_params.ts index b7fce0e87b46..8f324406b7bc 100644 --- a/yarn-project/p2p/src/services/gossipsub/topic_score_params.ts +++ b/yarn-project/p2p/src/services/gossipsub/topic_score_params.ts @@ -1,5 +1,5 @@ import { TopicType, createTopicString } from '@aztec/stdlib/p2p'; -import { calculateMaxBlocksPerSlot } from '@aztec/stdlib/timetable'; +import { createCheckpointTimingModel } from '@aztec/stdlib/timetable'; import { createTopicScoreParams } from '@chainsafe/libp2p-gossipsub/score'; @@ -9,12 +9,18 @@ import { createTopicScoreParams } from '@chainsafe/libp2p-gossipsub/score'; export type TopicScoringNetworkParams = { /** L2 slot duration in milliseconds */ slotDurationMs: number; + /** L1 slot duration in seconds */ + ethereumSlotDuration: number; /** Gossipsub heartbeat interval in milliseconds */ heartbeatIntervalMs: number; /** Target committee size (number of validators expected to attest per slot) */ targetCommitteeSize: number; /** Duration per block in milliseconds when building multiple blocks per slot. If undefined, single block mode. */ blockDurationMs?: number; + /** Time budget in seconds reserved for L1 publishing. Defaults to ethereumSlotDuration. */ + l1PublishingTime?: number; + /** One-way proposal/attestation propagation budget in seconds. */ + p2pPropagationTime?: number; /** Expected number of block proposals per slot for scoring override. 0 disables scoring, undefined falls back to blocksPerSlot - 1. */ expectedBlockProposalsPerSlot?: number; }; @@ -25,10 +31,32 @@ export type TopicScoringNetworkParams = { * * @param slotDurationMs - L2 slot duration in milliseconds * @param blockDurationMs - Duration per block in milliseconds (undefined = single block mode) + * @param opts - Shared checkpoint timing inputs used by the sequencer and validators * @returns Number of blocks per slot */ -export function calculateBlocksPerSlot(slotDurationMs: number, blockDurationMs: number | undefined): number { - return calculateMaxBlocksPerSlot(slotDurationMs / 1000, blockDurationMs ? blockDurationMs / 1000 : undefined); +export function calculateBlocksPerSlot( + slotDurationMs: number, + blockDurationMs: number | undefined, + opts?: { + ethereumSlotDuration: number; + l1PublishingTime?: number; + p2pPropagationTime?: number; + }, +): number { + if (!opts) { + return createCheckpointTimingModel({ + aztecSlotDuration: slotDurationMs / 1000, + blockDuration: blockDurationMs ? blockDurationMs / 1000 : undefined, + }).calculateMaxBlocksPerSlot(); + } + + return createCheckpointTimingModel({ + aztecSlotDuration: slotDurationMs / 1000, + ethereumSlotDuration: opts.ethereumSlotDuration, + blockDuration: blockDurationMs ? blockDurationMs / 1000 : undefined, + l1PublishingTime: opts.l1PublishingTime ?? opts.ethereumSlotDuration, + p2pPropagationTime: opts.p2pPropagationTime, + }).calculateMaxBlocksPerSlot(); } /** @@ -279,7 +307,11 @@ export class TopicScoreParamsFactory { const { slotDurationMs, heartbeatIntervalMs, blockDurationMs } = params; // Compute values that are the same for all topics - this.blocksPerSlot = calculateBlocksPerSlot(slotDurationMs, blockDurationMs); + this.blocksPerSlot = calculateBlocksPerSlot(slotDurationMs, blockDurationMs, { + ethereumSlotDuration: params.ethereumSlotDuration, + l1PublishingTime: params.l1PublishingTime, + p2pPropagationTime: params.p2pPropagationTime, + }); this.heartbeatsPerSlot = slotDurationMs / heartbeatIntervalMs; this.invalidDecay = computeDecay(heartbeatIntervalMs, slotDurationMs, INVALID_DECAY_WINDOW_SLOTS); diff --git a/yarn-project/p2p/src/services/libp2p/libp2p_service.test.ts b/yarn-project/p2p/src/services/libp2p/libp2p_service.test.ts index ac3e23d7fd23..4d5ee8f0957a 100644 --- a/yarn-project/p2p/src/services/libp2p/libp2p_service.test.ts +++ b/yarn-project/p2p/src/services/libp2p/libp2p_service.test.ts @@ -1180,6 +1180,10 @@ function createTestLibP2PService(options: CreateTestLibP2PServiceOptions): TestL epochCache = mock(), } = options; + epochCache.getL1Constants.mockReturnValue({ + slotDuration: 36, + } as any); + const mempools = mock(); mempools.attestationPool = attestationPool; mempools.txPool = txPool; diff --git a/yarn-project/p2p/src/services/libp2p/libp2p_service.ts b/yarn-project/p2p/src/services/libp2p/libp2p_service.ts index b7d9afe74f9b..e95ccbd05b4f 100644 --- a/yarn-project/p2p/src/services/libp2p/libp2p_service.ts +++ b/yarn-project/p2p/src/services/libp2p/libp2p_service.ts @@ -228,15 +228,19 @@ export class LibP2PService extends WithTracer implements P2PService { this.protocolVersion, ); + const p2pPropagationTime = config.attestationPropagationTime; const proposalValidatorOpts = { txsPermitted: !config.disableTransactions, maxTxsPerBlock: config.validateMaxTxsPerBlock ?? config.validateMaxTxsPerCheckpoint, + p2pPropagationTime, }; this.blockProposalValidator = new BlockProposalValidator(epochCache, proposalValidatorOpts); this.checkpointProposalValidator = new CheckpointProposalValidator(epochCache, proposalValidatorOpts); this.checkpointAttestationValidator = config.fishermanMode - ? new FishermanAttestationValidator(epochCache, mempools.attestationPool, telemetry) - : new CheckpointAttestationValidator(epochCache); + ? new FishermanAttestationValidator(epochCache, mempools.attestationPool, telemetry, { + l1PublishingTime: config.l1PublishingTime, + }) + : new CheckpointAttestationValidator(epochCache, { l1PublishingTime: config.l1PublishingTime }); this.gossipSubEventHandler = this.handleGossipSubEvent.bind(this); @@ -346,9 +350,12 @@ export class LibP2PService extends WithTracer implements P2PService { const l1Constants = epochCache.getL1Constants(); const topicScoreParams = createAllTopicScoreParams(protocolVersion, { slotDurationMs: l1Constants.slotDuration * 1000, + ethereumSlotDuration: l1Constants.ethereumSlotDuration, heartbeatIntervalMs: config.gossipsubInterval, targetCommitteeSize: l1Constants.targetCommitteeSize, blockDurationMs: config.blockDurationMs, + l1PublishingTime: config.l1PublishingTime, + p2pPropagationTime: config.attestationPropagationTime, expectedBlockProposalsPerSlot: config.expectedBlockProposalsPerSlot, }); diff --git a/yarn-project/p2p/src/test-helpers/mock-pubsub.ts b/yarn-project/p2p/src/test-helpers/mock-pubsub.ts index fa11a03415a2..c6ec55b7b8b6 100644 --- a/yarn-project/p2p/src/test-helpers/mock-pubsub.ts +++ b/yarn-project/p2p/src/test-helpers/mock-pubsub.ts @@ -1,5 +1,6 @@ import type { EpochCacheInterface } from '@aztec/epoch-cache'; import { type Logger, createLogger } from '@aztec/foundation/log'; +import { sleep } from '@aztec/foundation/sleep'; import type { AztecAsyncKVStore } from '@aztec/kv-store'; import type { L2BlockSource } from '@aztec/stdlib/block'; import type { ContractDataSource } from '@aztec/stdlib/contract'; @@ -139,6 +140,11 @@ class MockReqResp implements ReqRespInterface { const responses: InstanceType[] = []; const peers = this.network.getReqRespPeers().filter(p => !p.peerId.equals(this.peerId)); const targetPeers = pinnedPeer ? peers.filter(p => p.peerId.equals(pinnedPeer)) : peers; + const delayMs = this.network.getPropagationDelayMs(); + + if (delayMs > 0) { + await sleep(delayMs); + } for (const request of requests) { const requestBuffer = request.toBuffer(); @@ -175,7 +181,12 @@ class MockReqResp implements ReqRespInterface { return { status: ReqRespStatus.SUCCESS, data: Buffer.from([]) }; } try { + const delayMs = this.network.getPropagationDelayMs(); + if (delayMs > 0) { + await sleep(delayMs); + } const data = await handler(this.peerId, payload); + return { status: ReqRespStatus.SUCCESS, data }; } catch { return { status: ReqRespStatus.FAILURE }; @@ -243,10 +254,10 @@ class MockGossipSubService extends TypedEventEmitter implements score: (_peerId: PeerIdStr) => 0, }; - publish(topic: TopicStr, data: Uint8Array, _opts?: PublishOpts): Promise { + async publish(topic: TopicStr, data: Uint8Array, _opts?: PublishOpts): Promise { this.logger.debug(`Publishing message on topic ${topic}`, { topic, sender: this.peerId.toString() }); - this.network.publishToPeers(topic, data, this.peerId); - return Promise.resolve({ recipients: this.network.getPeers().filter(peer => !this.peerId.equals(peer)) }); + await this.network.publishToPeers(topic, data, this.peerId); + return { recipients: this.network.getPeers().filter(peer => !this.peerId.equals(peer)) }; } receive(msg: GossipsubMessage) { @@ -282,7 +293,8 @@ class MockGossipSubService extends TypedEventEmitter implements /** * Mock gossip sub network used for testing. - * All instances of MockGossipSubService connected to the same network will instantly receive the same messages. + * All instances of MockGossipSubService connected to the same network receive the same messages, + * optionally delayed by a configurable propagation time. */ export class MockGossipSubNetwork { private peers: MockGossipSubService[] = []; @@ -291,6 +303,15 @@ export class MockGossipSubNetwork { private logger = createLogger('p2p:test:mock-gossipsub-network'); + constructor( + /** Artificial propagation delay in milliseconds applied to each message delivery. */ + private propagationDelayMs: number = 0, + ) {} + + public getPropagationDelayMs(): number { + return this.propagationDelayMs; + } + public getPeers(): PeerId[] { return this.peers.map(peer => peer.peerId); } @@ -307,7 +328,7 @@ export class MockGossipSubNetwork { return this.reqRespPeers; } - public publishToPeers(topic: TopicStr, data: Uint8Array, sender: PeerId): void { + public async publishToPeers(topic: TopicStr, data: Uint8Array, sender: PeerId): Promise { const msgId = (this.nextMsgId++).toString(); this.logger.debug(`Network is distributing message on topic ${topic}`, { topic, @@ -316,6 +337,10 @@ export class MockGossipSubNetwork { msgId, }); + if (this.propagationDelayMs > 0) { + await sleep(this.propagationDelayMs); + } + const gossipSubMsg: GossipsubMessage = { msgId, msg: { type: 'unsigned', topic, data }, propagationSource: sender }; for (const peer of this.peers) { if (peer.subscribedTopics.has(topic)) { diff --git a/yarn-project/sequencer-client/src/config.ts b/yarn-project/sequencer-client/src/config.ts index 68865991147d..7d30fe194942 100644 --- a/yarn-project/sequencer-client/src/config.ts +++ b/yarn-project/sequencer-client/src/config.ts @@ -155,11 +155,6 @@ export const sequencerConfigMappings: ConfigMappingsType = { description: 'How much time (in seconds) we allow in the slot for publishing the L1 tx (defaults to 1 L1 slot).', parseEnv: (val: string) => (val ? parseInt(val, 10) : undefined), }, - attestationPropagationTime: { - env: 'SEQ_ATTESTATION_PROPAGATION_TIME', - description: 'How many seconds it takes for proposals and attestations to travel across the p2p layer (one-way)', - ...numberConfigHelper(DefaultSequencerConfig.attestationPropagationTime), - }, fakeProcessingDelayPerTxMs: { description: 'Used for testing to introduce a fake delay after processing each tx', }, diff --git a/yarn-project/sequencer-client/src/global_variable_builder/global_builder.ts b/yarn-project/sequencer-client/src/global_variable_builder/global_builder.ts index ee6e37f02da7..929a1a3ebdff 100644 --- a/yarn-project/sequencer-client/src/global_variable_builder/global_builder.ts +++ b/yarn-project/sequencer-client/src/global_variable_builder/global_builder.ts @@ -1,4 +1,8 @@ -import { RollupContract } from '@aztec/ethereum/contracts'; +import { + RollupContract, + type SimulationOverridesPlan, + buildSimulationOverridesStateOverride, +} from '@aztec/ethereum/contracts'; import type { L1ContractAddresses } from '@aztec/ethereum/l1-contract-addresses'; import type { ViemPublicClient } from '@aztec/ethereum/types'; import { BlockNumber, SlotNumber } from '@aztec/foundation/branded-types'; @@ -10,7 +14,6 @@ import type { AztecAddress } from '@aztec/stdlib/aztec-address'; import { type L1RollupConstants, getNextL1SlotTimestamp, getTimestampForSlot } from '@aztec/stdlib/epoch-helpers'; import { GasFees } from '@aztec/stdlib/gas'; import type { - BuildCheckpointGlobalVariablesOpts, CheckpointGlobalVariables, GlobalVariableBuilder as GlobalVariableBuilderInterface, } from '@aztec/stdlib/tx'; @@ -120,7 +123,7 @@ export class GlobalVariableBuilder implements GlobalVariableBuilderInterface { coinbase: EthAddress, feeRecipient: AztecAddress, slotNumber: SlotNumber, - opts?: BuildCheckpointGlobalVariablesOpts, + simulationOverridesPlan?: SimulationOverridesPlan, ): Promise { const { chainId, version } = this; @@ -129,18 +132,7 @@ export class GlobalVariableBuilder implements GlobalVariableBuilderInterface { l1GenesisTime: this.l1GenesisTime, }); - // When pipelining, force the proposed checkpoint number and fee header to the parent so that - // the fee computation matches what L1 will see when the previous pipelined checkpoint has landed. - const pendingNumberOverride = await this.rollupContract.makePendingCheckpointNumberOverride( - opts?.forcePendingCheckpointNumber, - ); - const feeHeaderOverride = opts?.forceProposedFeeHeader - ? await this.rollupContract.makeFeeHeaderOverride( - opts.forceProposedFeeHeader.checkpointNumber, - opts.forceProposedFeeHeader.feeHeader, - ) - : []; - const stateOverride = RollupContract.mergeStateOverrides(pendingNumberOverride, feeHeaderOverride); + const stateOverride = await buildSimulationOverridesStateOverride(this.rollupContract, simulationOverridesPlan); const gasFees = new GasFees(0, await this.rollupContract.getManaMinFeeAt(timestamp, true, stateOverride)); return { chainId, version, slotNumber, timestamp, coinbase, feeRecipient, gasFees }; diff --git a/yarn-project/sequencer-client/src/publisher/sequencer-publisher.ts b/yarn-project/sequencer-client/src/publisher/sequencer-publisher.ts index 9a8367e8b1f3..29ce86960587 100644 --- a/yarn-project/sequencer-client/src/publisher/sequencer-publisher.ts +++ b/yarn-project/sequencer-client/src/publisher/sequencer-publisher.ts @@ -4,15 +4,17 @@ import type { EpochCache } from '@aztec/epoch-cache'; import type { L1ContractsConfig } from '@aztec/ethereum/config'; import { FeeAssetPriceOracle, - type FeeHeader, type GovernanceProposerContract, type IEmpireBase, MULTI_CALL_3_ADDRESS, Multicall3, RollupContract, + SimulationOverridesBuilder, + type SimulationOverridesPlan, type SlashingProposerContract, type ViemCommitteeAttestations, type ViemHeader, + buildSimulationOverridesStateOverride, } from '@aztec/ethereum/contracts'; import { type L1FeeAnalysisResult, L1FeeAnalyzer } from '@aztec/ethereum/l1-fee-analysis'; import { @@ -26,7 +28,6 @@ import { } from '@aztec/ethereum/l1-tx-utils'; import { FormattedViemError, formatViemError, mergeAbis, tryExtractEvent } from '@aztec/ethereum/utils'; import { sumBigint } from '@aztec/foundation/bigint'; -import { toHex as toPaddedHex } from '@aztec/foundation/bigint-buffer'; import { CheckpointNumber, SlotNumber } from '@aztec/foundation/branded-types'; import { trimmedBytesLength } from '@aztec/foundation/buffer'; import { pick } from '@aztec/foundation/collection'; @@ -50,7 +51,6 @@ import { type TelemetryClient, type Tracer, getTelemetryClient, trackSpan } from import { type Hex, - type StateOverride, type TransactionReceipt, type TypedDataDefinition, encodeFunctionData, @@ -119,6 +119,11 @@ export type InvalidateCheckpointRequest = { lastArchive: Fr; }; +type EnqueueProposeCheckpointOpts = { + txTimeoutAt?: Date; + simulationOverridesPlan?: SimulationOverridesPlan; +}; + interface RequestWithExpiry { action: Action; request: L1TxRequest; @@ -662,27 +667,21 @@ export class SequencerPublisher { * @param tipArchive - The archive to check * @returns The slot and block number if it is possible to propose, undefined otherwise */ - public canProposeAt( - tipArchive: Fr, - msgSender: EthAddress, - opts: { - forcePendingCheckpointNumber?: CheckpointNumber; - forceArchive?: { checkpointNumber: CheckpointNumber; archive: Fr }; - pipelined?: boolean; - } = {}, - ) { + public async canProposeAt(tipArchive: Fr, msgSender: EthAddress, simulationOverridesPlan?: SimulationOverridesPlan) { // TODO: #14291 - should loop through multiple keys to check if any of them can propose const ignoredErrors = ['SlotAlreadyInChain', 'InvalidProposer', 'InvalidArchive']; - const pipelined = opts.pipelined ?? this.epochCache.isProposerPipeliningEnabled(); + const pipelined = this.epochCache.isProposerPipeliningEnabled(); const slotOffset = pipelined ? this.aztecSlotDuration : 0n; const nextL1SlotTs = this.getNextL1SlotTimestamp() + slotOffset; return this.rollupContract - .canProposeAt(tipArchive.toBuffer(), msgSender.toString(), nextL1SlotTs, { - forcePendingCheckpointNumber: opts.forcePendingCheckpointNumber, - forceArchive: opts.forceArchive, - }) + .canProposeAt( + tipArchive.toBuffer(), + msgSender.toString(), + nextL1SlotTs, + await buildSimulationOverridesStateOverride(this.rollupContract, simulationOverridesPlan), + ) .catch(err => { if (err instanceof FormattedViemError && ignoredErrors.find(e => err.message.includes(e))) { this.log.warn(`Failed canProposeAtTime check with ${ignoredErrors.find(e => err.message.includes(e))}`, { @@ -704,7 +703,7 @@ export class SequencerPublisher { @trackSpan('SequencerPublisher.validateBlockHeader') public async validateBlockHeader( header: CheckpointHeader, - opts?: { forcePendingCheckpointNumber: CheckpointNumber | undefined }, + simulationOverridesPlan?: SimulationOverridesPlan, ): Promise { const flags = { ignoreDA: true, ignoreSignatures: true }; @@ -719,9 +718,7 @@ export class SequencerPublisher { ] as const; const ts = this.getSimulationTimestamp(header.slotNumber); - const stateOverrides = await this.rollupContract.makePendingCheckpointNumberOverride( - opts?.forcePendingCheckpointNumber, - ); + const stateOverrides = await buildSimulationOverridesStateOverride(this.rollupContract, simulationOverridesPlan); let balance = 0n; if (this.config.fishermanMode) { // In fisherman mode, we can't know where the proposer is publishing from @@ -882,10 +879,7 @@ export class SequencerPublisher { checkpoint: Checkpoint, attestationsAndSigners: CommitteeAttestationsAndSigners, attestationsAndSignersSignature: Signature, - options: { - forcePendingCheckpointNumber?: CheckpointNumber; - forceProposedFeeHeader?: { checkpointNumber: CheckpointNumber; feeHeader: FeeHeader }; - }, + simulationOverridesPlan?: SimulationOverridesPlan, ): Promise { const blobFields = checkpoint.toBlobFields(); const blobs = await getBlobsPerL1Block(blobFields); @@ -905,7 +899,7 @@ export class SequencerPublisher { blobInput, ] as const; - await this.simulateProposeTx(args, options); + await this.simulateProposeTx(args, simulationOverridesPlan); } private async enqueueCastSignalHelper( @@ -1154,11 +1148,7 @@ export class SequencerPublisher { checkpoint: Checkpoint, attestationsAndSigners: CommitteeAttestationsAndSigners, attestationsAndSignersSignature: Signature, - opts: { - txTimeoutAt?: Date; - forcePendingCheckpointNumber?: CheckpointNumber; - forceProposedFeeHeader?: { checkpointNumber: CheckpointNumber; feeHeader: FeeHeader }; - } = {}, + opts: EnqueueProposeCheckpointOpts = {}, ): Promise { const checkpointHeader = checkpoint.header; @@ -1174,6 +1164,10 @@ export class SequencerPublisher { feeAssetPriceModifier: checkpoint.feeAssetPriceModifier, }; + const simulationOverridesPlan = SimulationOverridesBuilder.from(opts.simulationOverridesPlan) + .withoutBlobCheck() + .build(); + try { // @note This will make sure that we are passing the checks for our header ASSUMING that the data is also made available // This means that we can avoid the simulation issues in later checks. @@ -1184,13 +1178,13 @@ export class SequencerPublisher { checkpoint, attestationsAndSigners, attestationsAndSignersSignature, - opts, + simulationOverridesPlan, ); } catch (err: any) { this.log.error(`Checkpoint validation failed. ${err instanceof Error ? err.message : 'No error message'}`, err, { ...checkpoint.getStats(), slotNumber: checkpoint.header.slotNumber, - forcePendingCheckpointNumber: opts.forcePendingCheckpointNumber, + simulationOverridesPlan, }); throw err; } @@ -1205,16 +1199,22 @@ export class SequencerPublisher { checkpoint, attestationsAndSigners, attestationsAndSignersSignature, - { - // Forcing pending checkpoint number is included its required if an invalidation request is included - forcePendingCheckpointNumber: opts.forcePendingCheckpointNumber, - }, + simulationOverridesPlan, ); }; } - this.log.verbose(`Enqueuing checkpoint propose transaction`, { ...checkpoint.toCheckpointInfo(), ...opts }); - await this.addProposeTx(checkpoint, proposeTxArgs, opts, preCheck); + this.log.verbose(`Enqueuing checkpoint propose transaction`, { + ...checkpoint.toCheckpointInfo(), + txTimeoutAt: opts.txTimeoutAt, + simulationOverridesPlan, + }); + await this.addProposeTx( + checkpoint, + proposeTxArgs, + { txTimeoutAt: opts.txTimeoutAt, simulationOverridesPlan }, + preCheck, + ); } public enqueueInvalidateCheckpoint( @@ -1344,10 +1344,7 @@ export class SequencerPublisher { this.l1TxUtils.restart(); } - private async prepareProposeTx( - encodedData: L1ProcessArgs, - options: { forcePendingCheckpointNumber?: CheckpointNumber }, - ) { + private async prepareProposeTx(encodedData: L1ProcessArgs, simulationOverridesPlan?: SimulationOverridesPlan) { const kzg = Blob.getViemKzgInstance(); const blobInput = getPrefixedEthBlobCommitments(encodedData.blobs); this.log.debug('Validating blob input', { blobInput }); @@ -1418,7 +1415,7 @@ export class SequencerPublisher { blobInput, ] as const; - const { rollupData, simulationResult } = await this.simulateProposeTx(args, options); + const { rollupData, simulationResult } = await this.simulateProposeTx(args, simulationOverridesPlan); return { args, blobEvaluationGas, rollupData, simulationResult }; } @@ -1442,10 +1439,7 @@ export class SequencerPublisher { ViemSignature, `0x${string}`, ], - options: { - forcePendingCheckpointNumber?: CheckpointNumber; - forceProposedFeeHeader?: { checkpointNumber: CheckpointNumber; feeHeader: FeeHeader }; - }, + simulationOverridesPlan?: SimulationOverridesPlan, ) { const rollupData = encodeFunctionData({ abi: RollupAbi, @@ -1453,34 +1447,7 @@ export class SequencerPublisher { args, }); - // override the proposed checkpoint number if requested - const forcePendingCheckpointNumberStateDiff = ( - options.forcePendingCheckpointNumber !== undefined - ? await this.rollupContract.makePendingCheckpointNumberOverride(options.forcePendingCheckpointNumber) - : [] - ).flatMap(override => override.stateDiff ?? []); - - // override the fee header for a specific checkpoint number if requested (used when pipelining) - const forceProposedFeeHeaderStateDiff = ( - options.forceProposedFeeHeader !== undefined - ? await this.rollupContract.makeFeeHeaderOverride( - options.forceProposedFeeHeader.checkpointNumber, - options.forceProposedFeeHeader.feeHeader, - ) - : [] - ).flatMap(override => override.stateDiff ?? []); - - const stateOverrides: StateOverride = [ - { - address: this.rollupContract.address, - // @note we override checkBlob to false since blobs are not part simulate() - stateDiff: [ - { slot: toPaddedHex(RollupContract.checkBlobStorageSlot, true), value: toPaddedHex(0n, true) }, - ...forcePendingCheckpointNumberStateDiff, - ...forceProposedFeeHeaderStateDiff, - ], - }, - ]; + const stateOverrides = await buildSimulationOverridesStateOverride(this.rollupContract, simulationOverridesPlan); // In fisherman mode, simulate as the proposer but with sufficient balance if (this.proposerAddressForSimulation) { stateOverrides.push({ @@ -1545,17 +1512,16 @@ export class SequencerPublisher { private async addProposeTx( checkpoint: Checkpoint, encodedData: L1ProcessArgs, - opts: { - txTimeoutAt?: Date; - forcePendingCheckpointNumber?: CheckpointNumber; - forceProposedFeeHeader?: { checkpointNumber: CheckpointNumber; feeHeader: FeeHeader }; - } = {}, + opts: EnqueueProposeCheckpointOpts = {}, preCheck?: () => Promise, ): Promise { const slot = checkpoint.header.slotNumber; const timer = new Timer(); const kzg = Blob.getViemKzgInstance(); - const { rollupData, simulationResult, blobEvaluationGas } = await this.prepareProposeTx(encodedData, opts); + const { rollupData, simulationResult, blobEvaluationGas } = await this.prepareProposeTx( + encodedData, + opts.simulationOverridesPlan, + ); const startBlock = await this.l1TxUtils.getBlockNumber(); const gasLimit = this.l1TxUtils.bumpGasLimit( BigInt(Math.ceil((Number(simulationResult.gasUsed) * 64) / 63)) + @@ -1578,7 +1544,7 @@ export class SequencerPublisher { data: rollupData, }, lastValidL2Slot: checkpoint.header.slotNumber, - gasConfig: { ...opts, gasLimit }, + gasConfig: { txTimeoutAt: opts.txTimeoutAt, gasLimit }, preCheck, blobConfig: { blobs: encodedData.blobs.map(b => b.data), diff --git a/yarn-project/sequencer-client/src/sequencer/README.md b/yarn-project/sequencer-client/src/sequencer/README.md index 5c62434b4e5a..cd45b846babd 100644 --- a/yarn-project/sequencer-client/src/sequencer/README.md +++ b/yarn-project/sequencer-client/src/sequencer/README.md @@ -88,7 +88,8 @@ checkpointFinalizationTime = propagationTime timeReservedAtEnd (normal mode) = blockDuration (last sub-slot for reexecution) + checkpointFinalizationTime -timeReservedAtEnd (pipelining) = blockDuration (last sub-slot for reexecution only) +timeReservedAtEnd (pipelining) = assembleTime + + propagationTime (proposal must reach validators before the slot flips) timeAvailableForBlocks = slotDuration - initializationOffset - timeReservedAtEnd @@ -109,9 +110,9 @@ This means: **The same slot with proposer pipelining enabled:** ``` -timeReservedAtEnd = 8s -timeAvailableForBlocks = 72s - 2s - 8s = 62s -numberOfBlocks = floor(62s / 8s) = 7 blocks +timeReservedAtEnd = 1s + 2s = 3s +timeAvailableForBlocks = 72s - 2s - 3s = 67s +numberOfBlocks = floor(67s / 8s) = 8 blocks ``` The extra two block opportunities come from not charging the current slot for checkpoint finalization and L1 publishing. @@ -128,8 +129,8 @@ It helps to think in terms of two different slots: So the work is split like this: - **During slot N-1**: Initialization, block building, and last-block re-execution -- **Near the end of slot N-1**: The checkpoint proposal is broadcast and validators attest to checkpoint N. -- **During slot N**: The proposer collects signatures, and the checkpoint is submitted to L1 +- **Near the end of slot N-1**: The checkpoint proposal is broadcast so validators can start the last re-execution as slot `N` begins. +- **During slot N**: Validators finish re-executing, send attestations, the proposer collects them, and the checkpoint is submitted to L1 before slot `N` reaches its publish cutoff In other words, pipelining does not mean "do everything for slot N earlier". It specifically moves **block production and block re-execution** earlier, while **checkpoint proposal, attestation gathering, and L1 submission** remain aligned with slot `N`. @@ -142,7 +143,7 @@ Slot 11 (wall clock): - Collect checkpoint 12 attestations Slot 12 (target/submission slot): -- Collect remaining checkpoint 12 attestations +- Collect attestations for checkpoint 12 until slot 12 reaches its L1 publish cutoff - Submit checkpoint 12 to L1 ``` @@ -534,7 +535,7 @@ The sequencer transitions through these states during a slot: | **WAITING_UNTIL_NEXT_BLOCK** | Until next sub-slot start | Sleep between blocks to maintain intervals | | **ASSEMBLING_CHECKPOINT** | assembleTime (1s) | Assemble final checkpoint | | **COLLECTING_ATTESTATIONS** | Until L1 publish deadline | Wait for validator signatures | -| **PUBLISHING_CHECKPOINT** | Until slot end | Submit to L1 | +| **PUBLISHING_CHECKPOINT** | Until L1 publish deadline | Submit to L1 | ## Complete Example: 72-Second Slot with 8-Second Sub-Slots diff --git a/yarn-project/sequencer-client/src/sequencer/chain_state_overrides.ts b/yarn-project/sequencer-client/src/sequencer/chain_state_overrides.ts new file mode 100644 index 000000000000..e2cdc58522e2 --- /dev/null +++ b/yarn-project/sequencer-client/src/sequencer/chain_state_overrides.ts @@ -0,0 +1,87 @@ +import { RollupContract, SimulationOverridesBuilder, type SimulationOverridesPlan } from '@aztec/ethereum/contracts'; +import { CheckpointNumber } from '@aztec/foundation/branded-types'; +import type { Fr } from '@aztec/foundation/curves/bn254'; +import type { Logger } from '@aztec/foundation/log'; +import type { ProposedCheckpointData } from '@aztec/stdlib/checkpoint'; + +type PipelinedParentSimulationOverridesPlanInput = { + checkpointNumber: CheckpointNumber; + proposedCheckpointData?: ProposedCheckpointData; + rollup: RollupContract; + log: Logger; +}; + +type SubmissionSimulationOverridesPlanInput = { + pipelinedParentPlan?: SimulationOverridesPlan; + invalidateToPendingCheckpointNumber?: CheckpointNumber; + lastArchiveRoot: Fr; + pipeliningEnabled: boolean; +}; + +/** Builds the simulated parent checkpoint view used while constructing a pipelined proposal. */ +export async function buildPipelinedParentSimulationOverridesPlan( + input: PipelinedParentSimulationOverridesPlanInput, +): Promise { + const parentCheckpointNumber = CheckpointNumber(input.checkpointNumber - 1); + const builder = new SimulationOverridesBuilder().forPendingCheckpoint(parentCheckpointNumber); + + const pendingFeeHeader = await computePipelinedParentFeeHeader(input); + if (pendingFeeHeader) { + builder.withPendingFeeHeader(pendingFeeHeader); + } + + return builder.build(); +} + +/** Builds the simulated chain view used when validating and enqueueing checkpoint submission. */ +export function buildSubmissionSimulationOverridesPlan( + input: SubmissionSimulationOverridesPlanInput, +): SimulationOverridesPlan | undefined { + const pendingCheckpointNumber = + input.invalidateToPendingCheckpointNumber ?? input.pipelinedParentPlan?.pendingCheckpointNumber; + + const builder = SimulationOverridesBuilder.from(input.pipelinedParentPlan).forPendingCheckpoint( + pendingCheckpointNumber, + ); + + if (input.pipeliningEnabled && pendingCheckpointNumber !== undefined) { + builder.withPendingArchive(input.lastArchiveRoot); + } + + return builder.build(); +} + +/** Derives the pending parent fee header used during pipelined proposal simulation. */ +export async function computePipelinedParentFeeHeader(input: PipelinedParentSimulationOverridesPlanInput) { + if (!input.proposedCheckpointData || input.checkpointNumber < 2) { + return undefined; + } + + const grandparentCheckpointNumber = CheckpointNumber(input.checkpointNumber - 2); + + try { + const [grandparentCheckpoint, manaTarget] = await Promise.all([ + input.rollup.getCheckpoint(grandparentCheckpointNumber), + input.rollup.getManaTarget(), + ]); + + if (!grandparentCheckpoint?.feeHeader) { + input.log.error( + `Grandparent checkpoint or feeHeader missing for checkpoint ${grandparentCheckpointNumber.toString()}`, + ); + return undefined; + } + + return RollupContract.computeChildFeeHeader( + grandparentCheckpoint.feeHeader, + input.proposedCheckpointData.totalManaUsed, + input.proposedCheckpointData.feeAssetPriceModifier, + manaTarget, + ); + } catch (err) { + input.log.error( + `Failed to derive pipelined parent fee header for checkpoint ${grandparentCheckpointNumber.toString()}: ${err}`, + ); + return undefined; + } +} diff --git a/yarn-project/sequencer-client/src/sequencer/checkpoint_proposal_job.test.ts b/yarn-project/sequencer-client/src/sequencer/checkpoint_proposal_job.test.ts index b9cd6c06880e..0b3466989209 100644 --- a/yarn-project/sequencer-client/src/sequencer/checkpoint_proposal_job.test.ts +++ b/yarn-project/sequencer-client/src/sequencer/checkpoint_proposal_job.test.ts @@ -13,6 +13,7 @@ import { Fr } from '@aztec/foundation/curves/bn254'; import { TimeoutError } from '@aztec/foundation/error'; import { EthAddress } from '@aztec/foundation/eth-address'; import { Signature } from '@aztec/foundation/eth-signature'; +import { createLogger } from '@aztec/foundation/log'; import { TestDateProvider } from '@aztec/foundation/timer'; import type { TypedEventEmitter } from '@aztec/foundation/types'; import { type P2P, P2PClientState } from '@aztec/p2p'; @@ -62,6 +63,7 @@ import { mockTxIterator, setupTxsAndBlock, } from '../test/utils.js'; +import { computePipelinedParentFeeHeader } from './chain_state_overrides.js'; import { CheckpointProposalJob } from './checkpoint_proposal_job.js'; import type { SequencerEvents } from './events.js'; import type { SequencerMetrics } from './metrics.js'; @@ -309,7 +311,7 @@ describe('CheckpointProposalJob', () => { validatorClient.collectAttestations.mockResolvedValue(getAttestations(block)); - const checkpoint = await job.execute(); + const checkpoint = await job.executeAndAwait(); expect(checkpoint).toBeDefined(); expect(checkpointBuilder.buildBlockCalls).toHaveLength(1); @@ -330,7 +332,7 @@ describe('CheckpointProposalJob', () => { job.updateConfig({ minTxsPerBlock: 2 }); - const checkpoint = await job.execute(); + const checkpoint = await job.executeAndAwait(); expect(checkpoint).toBeUndefined(); expect(checkpointBuilder.buildBlockCalls).toHaveLength(0); @@ -347,7 +349,7 @@ describe('CheckpointProposalJob', () => { job.updateConfig({ buildCheckpointIfEmpty: true, minTxsPerBlock: 1 }); - const checkpoint = await job.execute(); + const checkpoint = await job.executeAndAwait(); expect(checkpoint).toBeDefined(); expect(checkpointBuilder.buildBlockCalls).toHaveLength(1); @@ -368,7 +370,7 @@ describe('CheckpointProposalJob', () => { validatorClient.collectAttestations.mockResolvedValue(getAttestations(block)); - await job.execute(); + await job.executeAndAwait(); expect(validatorClient.collectAttestations).toHaveBeenCalledTimes(1); expect(validatorClient.collectAttestations).toHaveBeenCalledWith( @@ -403,7 +405,7 @@ describe('CheckpointProposalJob', () => { checkpointBuilder.seedBlocks([block], [txs]); validatorClient.collectAttestations.mockResolvedValue(getAttestations(block)); - await job.execute(); + await job.executeAndAwait(); // Verify startCheckpoint was called with the out hashes from previous checkpoints expect(checkpointsBuilder.startCheckpointCalls).toHaveLength(1); @@ -444,7 +446,7 @@ describe('CheckpointProposalJob', () => { checkpointBuilder.seedBlocks([block], [txs]); validatorClient.collectAttestations.mockResolvedValue(getAttestations(block)); - await job.execute(); + await job.executeAndAwait(); // Verify only the checkpoint before the current one is included expect(checkpointsBuilder.startCheckpointCalls).toHaveLength(1); @@ -601,47 +603,10 @@ describe('CheckpointProposalJob', () => { ); } - describe('computeForceProposedFeeHeader', () => { + describe('computePipelinedParentFeeHeader', () => { // Use checkpoint 3 so the grandparent (checkpoint 1) is valid const pipelinedCheckpointNumber = CheckpointNumber(3); - function createJobWithProposedCheckpoint(pendingData: ProposedCheckpointData): TestCheckpointProposalJob { - const setStateFn = jest.fn(); - const eventEmitter = new EventEmitter() as TypedEventEmitter; - - return new TestCheckpointProposalJob( - SlotNumber(newSlotNumber), - SlotNumber(newSlotNumber), - epoch, - pipelinedCheckpointNumber, - lastBlockNumber, - proposer, - publisher, - attestorAddress, - undefined, - validatorClient, - globalVariableBuilder, - p2p, - worldState, - l1ToL2MessageSource, - l2BlockSource, - checkpointsBuilder as unknown as FullNodeCheckpointsBuilder, - blockSink, - l1Constants, - config, - timetable, - slasherClient, - epochCache, - dateProvider, - metrics, - eventEmitter, - setStateFn, - getTelemetryClient().getTracer('test'), - { actor: 'test' }, - pendingData, - ); - } - const pendingData: ProposedCheckpointData = { checkpointNumber: CheckpointNumber(1), header: CheckpointHeader.empty(), @@ -662,7 +627,12 @@ describe('CheckpointProposalJob', () => { }; it('returns undefined when proposedCheckpointData is not set', async () => { - const result = await job.computeForceProposedFeeHeader(CheckpointNumber(1)); + const result = await computePipelinedParentFeeHeader({ + checkpointNumber: pipelinedCheckpointNumber, + proposedCheckpointData: undefined, + rollup: publisher.rollupContract, + log: createLogger('test'), + }); expect(result).toBeUndefined(); }); @@ -673,16 +643,18 @@ describe('CheckpointProposalJob', () => { } it('computes fee header from grandparent checkpoint', async () => { - const jobWithPending = createJobWithProposedCheckpoint(pendingData); const manaTarget = 10_000n; mockRollup({ grandparentCheckpoint: { feeHeader: grandparentFeeHeader }, manaTarget }); - const parentCheckpointNumber = CheckpointNumber(1); - const result = await jobWithPending.computeForceProposedFeeHeader(parentCheckpointNumber); + const result = await computePipelinedParentFeeHeader({ + checkpointNumber: pipelinedCheckpointNumber, + proposedCheckpointData: pendingData, + rollup: publisher.rollupContract, + log: createLogger('test'), + }); expect(result).toBeDefined(); - expect(result!.checkpointNumber).toBe(parentCheckpointNumber); const expected = RollupContract.computeChildFeeHeader( grandparentFeeHeader, @@ -690,43 +662,56 @@ describe('CheckpointProposalJob', () => { pendingData.feeAssetPriceModifier, manaTarget, ); - expect(result!.feeHeader).toEqual(expected); + expect(result).toEqual(expected); }); it('returns undefined when grandparent checkpoint is not found', async () => { - const jobWithPending = createJobWithProposedCheckpoint(pendingData); mockRollup({ grandparentCheckpoint: undefined }); - const result = await jobWithPending.computeForceProposedFeeHeader(CheckpointNumber(1)); + const result = await computePipelinedParentFeeHeader({ + checkpointNumber: pipelinedCheckpointNumber, + proposedCheckpointData: pendingData, + rollup: publisher.rollupContract, + log: createLogger('test'), + }); expect(result).toBeUndefined(); }); it('returns undefined when grandparent checkpoint has no feeHeader', async () => { - const jobWithPending = createJobWithProposedCheckpoint(pendingData); mockRollup({ grandparentCheckpoint: { feeHeader: undefined } }); - const result = await jobWithPending.computeForceProposedFeeHeader(CheckpointNumber(1)); + const result = await computePipelinedParentFeeHeader({ + checkpointNumber: pipelinedCheckpointNumber, + proposedCheckpointData: pendingData, + rollup: publisher.rollupContract, + log: createLogger('test'), + }); expect(result).toBeUndefined(); }); it('returns undefined when rollup calls throw', async () => { - const jobWithPending = createJobWithProposedCheckpoint(pendingData); jest.spyOn(publisher.rollupContract, 'getCheckpoint').mockRejectedValue(new Error('rpc error')); - const result = await jobWithPending.computeForceProposedFeeHeader(CheckpointNumber(1)); + const result = await computePipelinedParentFeeHeader({ + checkpointNumber: pipelinedCheckpointNumber, + proposedCheckpointData: pendingData, + rollup: publisher.rollupContract, + log: createLogger('test'), + }); expect(result).toBeUndefined(); }); }); describe('multiple block mode', () => { beforeEach(() => { - // Multiple block mode: set blockDurationMs to 8 seconds + // Keep the real L1 publish budget and use the largest valid non-pipelined + // block duration for a 24s slot under the stricter timing guards. job.setTimetable( new SequencerTimetable({ ethereumSlotDuration, aztecSlotDuration: slotDuration, l1PublishingTime: ethereumSlotDuration, - blockDurationMs: 8000, + blockDurationMs: 3000, enforce: true, }), ); @@ -747,7 +732,7 @@ describe('CheckpointProposalJob', () => { // Install spy on waitUntilTimeInSlot to verify it's called with expected deadlines const waitSpy = jest.spyOn(job, 'waitUntilTimeInSlot'); - const checkpoint = await job.execute(); + const checkpoint = await job.executeAndAwait(); expect(checkpoint).toBeDefined(); expect(checkpointBuilder.buildBlockCalls).toHaveLength(2); @@ -777,7 +762,7 @@ describe('CheckpointProposalJob', () => { const waitSpy = jest.spyOn(job, 'waitUntilTimeInSlot'); job.updateConfig({ minTxsPerBlock: 0 }); - const checkpoint = await job.execute(); + const checkpoint = await job.executeAndAwait(); expect(checkpoint).toBeDefined(); expect(checkpointBuilder.buildBlockCalls).toHaveLength(1); @@ -806,7 +791,7 @@ describe('CheckpointProposalJob', () => { const waitSpy = jest.spyOn(job, 'waitUntilTimeInSlot'); job.updateConfig({ minTxsPerBlock: 5, buildCheckpointIfEmpty: true }); - const checkpoint = await job.execute(); + const checkpoint = await job.executeAndAwait(); expect(checkpoint).toBeDefined(); expect(checkpointBuilder.buildBlockCalls).toHaveLength(1); @@ -834,7 +819,7 @@ describe('CheckpointProposalJob', () => { const waitSpy = jest.spyOn(job, 'waitUntilTimeInSlot'); job.updateConfig({ minTxsPerBlock: 5, buildCheckpointIfEmpty: false }); - const checkpoint = await job.execute(); + const checkpoint = await job.executeAndAwait(); expect(checkpoint).toBeUndefined(); expect(checkpointBuilder.buildBlockCalls).toHaveLength(0); @@ -867,7 +852,7 @@ describe('CheckpointProposalJob', () => { // Install spy on waitUntilTimeInSlot const waitSpy = jest.spyOn(job, 'waitUntilTimeInSlot'); - const checkpoint = await job.execute(); + const checkpoint = await job.executeAndAwait(); expect(checkpoint).toBeDefined(); // Only one block built due to time constraints @@ -880,7 +865,7 @@ describe('CheckpointProposalJob', () => { }); it('calls waitUntilTimeInSlot with expected deadline based on block duration', async () => { - const blockDurationSeconds = 8; // 8000ms / 1000 + const blockDurationSeconds = 3; // 3000ms / 1000 // Mock timetable to allow 3 blocks jest @@ -896,13 +881,13 @@ describe('CheckpointProposalJob', () => { const waitSpy = jest.spyOn(job, 'waitUntilTimeInSlot'); - await job.execute(); + await job.executeAndAwait(); // With 3 blocks where the 3rd is the last, waitUntilTimeInSlot should be called twice // (after block 1 and block 2, but not after block 3 since it's the last) expect(waitSpy).toHaveBeenCalledTimes(2); - expect(waitSpy.mock.calls[0][0]).toEqual(10); - expect(waitSpy.mock.calls[1][0]).toEqual(18); + expect(waitSpy.mock.calls[0][0]).toEqual(5); + expect(waitSpy.mock.calls[1][0]).toEqual(8); }); it('does not call waitUntilTimeInSlot when building the last block', async () => { @@ -925,7 +910,7 @@ describe('CheckpointProposalJob', () => { const waitSpy = jest.spyOn(job, 'waitUntilTimeInSlot'); - const checkpoint = await job.execute(); + const checkpoint = await job.executeAndAwait(); expect(checkpoint).toBeDefined(); expect(checkpointBuilder.buildBlockCalls).toHaveLength(1); @@ -993,7 +978,7 @@ describe('CheckpointProposalJob', () => { p2p.getPendingTxCount.mockResolvedValue(txs.length); p2p.iterateEligiblePendingTxs.mockImplementation(() => mockTxIterator(Promise.resolve(txs))); - const checkpoint = await job.execute(); + const checkpoint = await job.executeAndAwait(); // Should return undefined when no time available expect(checkpoint).toBeUndefined(); @@ -1011,7 +996,7 @@ describe('CheckpointProposalJob', () => { validatorClient.collectAttestations.mockResolvedValue(getAttestations(block)); - const checkpoint = await job.execute(); + const checkpoint = await job.executeAndAwait(); expect(checkpoint).toBeDefined(); expect(checkpointBuilder.buildBlockCalls).toHaveLength(1); @@ -1034,7 +1019,7 @@ describe('CheckpointProposalJob', () => { validatorClient.collectAttestations.mockResolvedValue(getAttestations(block)); - const checkpoint = await job.execute(); + const checkpoint = await job.executeAndAwait(); // Should still complete if first block succeeds expect(checkpoint).toBeDefined(); @@ -1052,7 +1037,7 @@ describe('CheckpointProposalJob', () => { checkpointBuilder.errorOnBuild = new Error('Block build failed'); // The job catches the error internally and returns undefined - const checkpoint = await job.execute(); + const checkpoint = await job.executeAndAwait(); expect(checkpoint).toBeUndefined(); }); @@ -1063,9 +1048,10 @@ describe('CheckpointProposalJob', () => { // Mock collectAttestations to fail with timeout validatorClient.collectAttestations.mockRejectedValue(new AttestationTimeoutError(0, 3, SlotNumber.ZERO)); - const checkpoint = await job.execute(); + // Checkpoint is returned after broadcast — attestation failure happens in the background + const checkpoint = await job.executeAndAwait(); - expect(checkpoint).toBeUndefined(); + expect(checkpoint).toBeDefined(); expect(validatorClient.collectAttestations).toHaveBeenCalled(); }); @@ -1112,7 +1098,7 @@ describe('CheckpointProposalJob', () => { const { txs, block } = await setupTxsAndBlock(p2p, globalVariables, 1, chainId); checkpointBuilder.seedBlocks([block], [txs]); - const checkpoint = await job.execute(); + const checkpoint = await job.executeAndAwait(); // Should complete even with empty committee expect(checkpoint).toBeDefined(); @@ -1127,7 +1113,7 @@ describe('CheckpointProposalJob', () => { const attestations = getAttestations(block); validatorClient.collectAttestations.mockResolvedValue(attestations); - const checkpoint = await job.execute(); + const checkpoint = await job.executeAndAwait(); expect(checkpoint).toBeDefined(); expect(validatorClient.collectAttestations).toHaveBeenCalled(); @@ -1139,9 +1125,9 @@ describe('CheckpointProposalJob', () => { validatorClient.collectAttestations.mockRejectedValue(new TimeoutError('Attestation collection timed out')); - await job.execute(); + await job.executeAndAwait(); - // Should handle timeout gracefully + // Should handle timeout gracefully (in background pipeline) expect(validatorClient.collectAttestations).toHaveBeenCalled(); }); }); @@ -1158,7 +1144,7 @@ describe('CheckpointProposalJob', () => { ethereumSlotDuration, aztecSlotDuration: slotDuration, l1PublishingTime: ethereumSlotDuration, - blockDurationMs: 8000, + blockDurationMs: 3000, enforce: true, }), ); @@ -1176,7 +1162,7 @@ describe('CheckpointProposalJob', () => { throw new DutyAlreadySignedError(SlotNumber(1), DutyType.BLOCK_PROPOSAL, 0, 'node-2'); }); - const result = await job.execute(); + const result = await job.executeAndAwait(); // Should return undefined and stop building expect(result).toBeUndefined(); @@ -1199,7 +1185,7 @@ describe('CheckpointProposalJob', () => { ethereumSlotDuration, aztecSlotDuration: slotDuration, l1PublishingTime: ethereumSlotDuration, - blockDurationMs: 8000, + blockDurationMs: 3000, enforce: true, }), ); @@ -1217,7 +1203,7 @@ describe('CheckpointProposalJob', () => { throw new SlashingProtectionError(SlotNumber(1), DutyType.BLOCK_PROPOSAL, 0, 'hash1', 'hash2', 'node-1'); }); - const result = await job.execute(); + const result = await job.executeAndAwait(); // Should return undefined and stop building expect(result).toBeUndefined(); @@ -1238,6 +1224,13 @@ class TestCheckpointProposalJob extends CheckpointProposalJob { return Promise.resolve(); } + /** Wraps execute + awaitPendingSubmission so tests see the full pipeline complete. */ + public async executeAndAwait(): Promise { + const result = await this.execute(); + await this.awaitPendingSubmission(); + return result; + } + /** Update config for testing - allows tests to modify config after job creation */ public updateConfig(partialConfig: Partial): void { this.config = { ...this.config, ...partialConfig }; @@ -1253,11 +1246,6 @@ class TestCheckpointProposalJob extends CheckpointProposalJob { return this.timetable; } - /** Expose computeForceProposedFeeHeader for testing */ - public override computeForceProposedFeeHeader(parentCheckpointNumber: CheckpointNumber) { - return super.computeForceProposedFeeHeader(parentCheckpointNumber); - } - /** Expose internal buildSingleBlock method */ public override buildSingleBlock( checkpointBuilder: CheckpointBuilder, diff --git a/yarn-project/sequencer-client/src/sequencer/checkpoint_proposal_job.timing.test.ts b/yarn-project/sequencer-client/src/sequencer/checkpoint_proposal_job.timing.test.ts index 0b660e9bcba3..81ceda5c9b40 100644 --- a/yarn-project/sequencer-client/src/sequencer/checkpoint_proposal_job.timing.test.ts +++ b/yarn-project/sequencer-client/src/sequencer/checkpoint_proposal_job.timing.test.ts @@ -889,6 +889,7 @@ describe('CheckpointProposalJob Timing Tests', () => { job.setTimetable(timetable); await job.execute(); + await job.awaitPendingSubmission(); // Verify collectAttestations was called expect(validatorClient.collectAttestations).toHaveBeenCalled(); @@ -927,6 +928,7 @@ describe('CheckpointProposalJob Timing Tests', () => { job.setTimetable(timetable); await job.execute(); + await job.awaitPendingSubmission(); // Attestation collection should start after the last block is built and checkpoint is assembled // Last block deadline at 17s (sub-slot 2), plus assembly time @@ -956,6 +958,7 @@ describe('CheckpointProposalJob Timing Tests', () => { job.setTimetable(timetable); await job.execute(); + await job.awaitPendingSubmission(); // Deadline should still be absolute (slotStart + 60s), not relative to start time // Uses PUBLISHING_CHECKPOINT state: slotDuration - l1PublishingTime = 72 - 12 = 60 @@ -966,4 +969,134 @@ describe('CheckpointProposalJob Timing Tests', () => { expect(actualDeadlineSeconds).toBeCloseTo(expectedDeadlineSeconds, 0); }); }); + + describe('Pipelining Attestation Timing', () => { + const targetSlot = SlotNumber(2); // Target slot is one ahead of build slot + + /** Create a pipelining-aware job where targetSlot = slotNumber + 1 */ + function createPipeliningJob(): TimingTestCheckpointProposalJob { + const pipeliningTimetable = new SequencerTimetable( + { + ethereumSlotDuration: ETHEREUM_SLOT_DURATION, + aztecSlotDuration: AZTEC_SLOT_DURATION, + l1PublishingTime: L1_PUBLISHING_TIME, + p2pPropagationTime: P2P_PROPAGATION_TIME, + blockDurationMs: BLOCK_DURATION * 1000, + enforce: true, + pipelining: true, + }, + undefined, + createLogger('test:timetable:pipelining'), + ); + + const setStateFn = jest.fn(); + const eventEmitter = new EventEmitter() as TypedEventEmitter; + + const job = new TimingTestCheckpointProposalJob( + dateProvider, + getSecondsIntoSlot, + slotNumber, + targetSlot, + epoch, + checkpointNumber, + BlockNumber.ZERO, + proposer, + publisher, + attestorAddress, + undefined, // invalidateCheckpoint + validatorClient, + globalVariableBuilder, + p2p, + worldState, + l1ToL2MessageSource, + l2BlockSource, + checkpointsBuilder as unknown as FullNodeCheckpointsBuilder, + blockSink, + l1Constants, + config, + pipeliningTimetable, + slasherClient, + epochCache, + dateProvider, + metrics, + eventEmitter, + setStateFn, + getTelemetryClient().getTracer('timing-test-pipelining'), + { actor: 'timing-test-pipelining' }, + ); + + return job; + } + + beforeEach(() => { + epochCache.isProposerPipeliningEnabled.mockReturnValue(true); + }); + + it('sets attestation deadline to the target-slot publish cutoff when pipelining', async () => { + const { blocks, txs } = await createTestBlocksAndTxs(2); + mockP2pWithTxs(txs); + checkpointBuilder.seedBlocks( + blocks, + blocks.map((_, i) => [txs[i]]), + ); + checkpointBuilder.setExecutionDurations([5, 5]); + + let collectAttestationsDeadline: Date | undefined; + validatorClient.collectAttestations.mockImplementation((_proposal, _required, deadline) => { + collectAttestationsDeadline = deadline; + return Promise.resolve(getAttestations(blocks[1])); + }); + + setTimeInSlot(1); + + const job = createPipeliningJob(); + await job.execute(); + await job.awaitPendingSubmission(); + + expect(validatorClient.collectAttestations).toHaveBeenCalled(); + expect(collectAttestationsDeadline).toBeDefined(); + + // Attestation deadline = buildSlotStart + (2 * aztecSlotDuration - l1PublishingTime) + // so collection can continue until the target slot's publish cutoff. + const buildSlotStart = getSlotStartTime(slotNumber); + const expectedDeadlineSeconds = buildSlotStart + 2 * AZTEC_SLOT_DURATION - L1_PUBLISHING_TIME; + const actualDeadlineSeconds = collectAttestationsDeadline!.getTime() / 1000; + + expect(actualDeadlineSeconds).toBeCloseTo(expectedDeadlineSeconds, 0); + }); + + it('non-pipelining attestation deadline is unchanged', async () => { + epochCache.isProposerPipeliningEnabled.mockReturnValue(false); + + const { blocks, txs } = await createTestBlocksAndTxs(2); + mockP2pWithTxs(txs); + checkpointBuilder.seedBlocks( + blocks, + blocks.map((_, i) => [txs[i]]), + ); + checkpointBuilder.setExecutionDurations([5, 5]); + + let collectAttestationsDeadline: Date | undefined; + validatorClient.collectAttestations.mockImplementation((_proposal, _required, deadline) => { + collectAttestationsDeadline = deadline; + return Promise.resolve(getAttestations(blocks[1])); + }); + + setTimeInSlot(1); + + const job = createJob(); + job.setTimetable(timetable); + await job.execute(); + await job.awaitPendingSubmission(); + + expect(collectAttestationsDeadline).toBeDefined(); + + // Non-pipelining: deadline = buildSlotStart + slotDuration - l1PublishingTime + const slotStart = getSlotStartTime(slotNumber); + const expectedDeadlineSeconds = slotStart + AZTEC_SLOT_DURATION - L1_PUBLISHING_TIME; + const actualDeadlineSeconds = collectAttestationsDeadline!.getTime() / 1000; + + expect(actualDeadlineSeconds).toBeCloseTo(expectedDeadlineSeconds, 0); + }); + }); }); diff --git a/yarn-project/sequencer-client/src/sequencer/checkpoint_proposal_job.ts b/yarn-project/sequencer-client/src/sequencer/checkpoint_proposal_job.ts index 211b3e0204dd..d8a9102d672b 100644 --- a/yarn-project/sequencer-client/src/sequencer/checkpoint_proposal_job.ts +++ b/yarn-project/sequencer-client/src/sequencer/checkpoint_proposal_job.ts @@ -1,5 +1,5 @@ import type { EpochCache } from '@aztec/epoch-cache'; -import { type FeeHeader, RollupContract } from '@aztec/ethereum/contracts'; +import type { SimulationOverridesPlan } from '@aztec/ethereum/contracts'; import { BlockNumber, CheckpointNumber, @@ -57,6 +57,10 @@ import { DutyAlreadySignedError, SlashingProtectionError } from '@aztec/validato import type { GlobalVariableBuilder } from '../global_variable_builder/global_builder.js'; import type { InvalidateCheckpointRequest, SequencerPublisher } from '../publisher/sequencer-publisher.js'; +import { + buildPipelinedParentSimulationOverridesPlan, + buildSubmissionSimulationOverridesPlan, +} from './chain_state_overrides.js'; import { CheckpointVoter } from './checkpoint_voter.js'; import { SequencerInterruptedError } from './errors.js'; import type { SequencerEvents } from './events.js'; @@ -68,7 +72,14 @@ import { SequencerState } from './utils.js'; /** How much time to sleep while waiting for min transactions to accumulate for a block */ const TXS_POLLING_MS = 500; -/** Result from proposeCheckpoint when a checkpoint was successfully built and attested. */ +/** Result from proposeCheckpoint when a checkpoint was successfully built and broadcast. */ +type CheckpointProposalBroadcast = { + checkpoint: Checkpoint; + proposal: CheckpointProposal; + blockProposedAt: number; +}; + +/** Result after attestation collection and signing, ready for L1 submission. */ type CheckpointProposalResult = { checkpoint: Checkpoint; attestations: CommitteeAttestationsAndSigners; @@ -87,8 +98,8 @@ export class CheckpointProposalJob implements Traceable { /** Tracks the fire-and-forget L1 submission promise so it can be awaited during shutdown. */ private pendingL1Submission: Promise | undefined; - /** Fee header override computed during proposeCheckpoint, reused in enqueueCheckpointForSubmission. */ - private computedForceProposedFeeHeader?: { checkpointNumber: CheckpointNumber; feeHeader: FeeHeader }; + /** Pipelined parent chain state used while building and later submitting this checkpoint. */ + private pipelinedParentSimulationOverridesPlan?: SimulationOverridesPlan; constructor( private readonly slotNow: SlotNumber, @@ -136,8 +147,9 @@ export class CheckpointProposalJob implements Traceable { /** * Executes the checkpoint proposal job. - * Builds blocks, collects attestations, enqueues requests, and schedules L1 submission as a - * background task so the work loop can return to IDLE immediately. + * Builds blocks, assembles checkpoint, and broadcasts the proposal (blocking). + * Attestation collection, signing, and L1 submission are backgrounded so the + * work loop can return to IDLE immediately for consecutive slot proposals. * Returns the built checkpoint if successful, undefined otherwise. */ @trackSpan('CheckpointProposalJob.execute') @@ -157,71 +169,99 @@ export class CheckpointProposalJob implements Traceable { this.log, ).enqueueVotes(); - // Build and propose the checkpoint. Builds blocks, broadcasts, collects attestations, and signs. - // Does NOT enqueue to L1 yet — that happens after the pipeline sleep. - const proposalResult = await this.proposeCheckpoint(); - const checkpoint = proposalResult?.checkpoint; + // Build blocks, assemble checkpoint, and broadcast proposal (BLOCKING). + // Returns after broadcast — attestation collection is deferred. + const broadcast = await this.proposeCheckpoint(); - // Wait until the voting promises have resolved, so all requests are enqueued (not sent) - await Promise.all(votesPromises); - - if (checkpoint) { - this.metrics.recordCheckpointProposalSuccess(); + if (!broadcast) { + await Promise.all(votesPromises); + // Still submit votes even without a checkpoint + if (!this.config.fishermanMode) { + this.pendingL1Submission = this.publisher.sendRequestsAt(this.dateProvider.nowAsDate()).then(() => {}); + } + return undefined; } + const { checkpoint } = broadcast; + this.metrics.recordCheckpointProposalSuccess(); + // Do not post anything to L1 if we are fishermen, but do perform L1 fee analysis if (this.config.fishermanMode) { await this.handleCheckpointEndAsFisherman(checkpoint); - return; + return checkpoint; } - // Enqueue the checkpoint for L1 submission - if (proposalResult) { + // Background the attestation → signing → L1 pipeline so the work loop is unblocked + this.pendingL1Submission = this.waitForAttestationsAndEnqueueSubmissionAsync(broadcast, votesPromises); + + // Return the built checkpoint immediately — the work loop is now unblocked + return checkpoint; + } + + /** + * Background pipeline: collects attestations, signs them, enqueues the checkpoint, and submits to L1. + * Runs as a fire-and-forget task stored in `pendingL1Submission` so the work loop is unblocked. + */ + private async waitForAttestationsAndEnqueueSubmissionAsync( + broadcast: CheckpointProposalBroadcast, + votesPromises: Promise[], + ): Promise { + const { checkpoint, proposal, blockProposedAt } = broadcast; + try { + await Promise.all(votesPromises); + + this.setStateFn(SequencerState.COLLECTING_ATTESTATIONS, this.targetSlot); + const attestations = await this.waitForAttestations(proposal); + + this.metrics.recordCheckpointAttestationDelay(this.dateProvider.now() - blockProposedAt); + + // Proposer must sign over the attestations before pushing them to L1 + const signer = this.proposer ?? this.publisher.getSenderAddress(); + let attestationsSignature: Signature; try { - await this.enqueueCheckpointForSubmission(proposalResult); + attestationsSignature = await this.validatorClient.signAttestationsAndSigners( + attestations, + signer, + this.targetSlot, + this.checkpointNumber, + ); } catch (err) { - this.log.error(`Failed to enqueue checkpoint for L1 submission at slot ${this.targetSlot}`, err); - // Continue to sendRequestsAt so votes are still sent + if (this.handleHASigningError(err, 'Attestations signature')) { + return; + } + throw err; } - } - // Compute the earliest time to submit: pipeline slot start when pipelining, now otherwise. - const submitAfter = this.epochCache.isProposerPipeliningEnabled() - ? new Date(Number(getTimestampForSlot(this.targetSlot, this.l1Constants)) * 1000) - : new Date(this.dateProvider.now()); - - // Schedule L1 submission in the background so the work loop returns immediately. - // The publisher will sleep until submitAfter, then send the bundled requests. - // The promise is stored so it can be awaited during shutdown. - this.pendingL1Submission = this.publisher - .sendRequestsAt(submitAfter) - .then(async l1Response => { - const proposedAction = l1Response?.successfulActions.find(a => a === 'propose'); - if (proposedAction) { - this.eventEmitter.emit('checkpoint-published', { checkpoint: this.checkpointNumber, slot: this.targetSlot }); - const coinbase = checkpoint?.header.coinbase; - await this.metrics.incFilledSlot(this.publisher.getSenderAddress().toString(), coinbase); - } else if (checkpoint) { - this.eventEmitter.emit('checkpoint-publish-failed', { ...l1Response, slot: this.targetSlot }); - - if (this.epochCache.isProposerPipeliningEnabled()) { - this.metrics.recordPipelineDiscard(); - } - } - }) - .catch(err => { - this.log.error(`Background L1 submission failed for slot ${this.targetSlot}`, err); - if (checkpoint) { - this.eventEmitter.emit('checkpoint-publish-failed', { slot: this.targetSlot }); - - if (this.epochCache.isProposerPipeliningEnabled()) { - this.metrics.recordPipelineDiscard(); - } - } - }); + // Enqueue the checkpoint for L1 submission + await this.enqueueCheckpointForSubmission({ checkpoint, attestations, attestationsSignature }); - // Return the built checkpoint immediately — the work loop is now unblocked - return checkpoint; + // Compute the earliest time to submit: pipeline slot start when pipelining, now otherwise. + const submitAfter = this.epochCache.isProposerPipeliningEnabled() + ? new Date(Number(getTimestampForSlot(this.targetSlot, this.l1Constants)) * 1000) + : new Date(this.dateProvider.now()); + + const l1Response = await this.publisher.sendRequestsAt(submitAfter); + const proposedAction = l1Response?.successfulActions.find(a => a === 'propose'); + if (proposedAction) { + this.eventEmitter.emit('checkpoint-published', { checkpoint: this.checkpointNumber, slot: this.targetSlot }); + const coinbase = checkpoint.header.coinbase; + await this.metrics.incFilledSlot(this.publisher.getSenderAddress().toString(), coinbase); + } else { + this.eventEmitter.emit('checkpoint-publish-failed', { ...l1Response, slot: this.targetSlot }); + if (this.epochCache.isProposerPipeliningEnabled()) { + this.metrics.recordPipelineDiscard(); + } + } + } catch (err) { + if (err instanceof SequencerInterruptedError) { + return; + } + this.log.error(`Background attestation/L1 pipeline failed for slot ${this.targetSlot}`, err); + this.eventEmitter.emit('checkpoint-publish-failed', { slot: this.targetSlot }); + if (this.epochCache.isProposerPipeliningEnabled()) { + this.metrics.recordPipelineDiscard(); + } + } } /** Enqueues the checkpoint for L1 submission. Called after pipeline sleep in execute(). */ @@ -247,10 +287,17 @@ export class CheckpointProposalJob implements Traceable { } } + const isPipelining = this.epochCache.isProposerPipeliningEnabled(); + const submissionSimulationOverridesPlan = buildSubmissionSimulationOverridesPlan({ + pipelinedParentPlan: this.pipelinedParentSimulationOverridesPlan, + invalidateToPendingCheckpointNumber: this.invalidateCheckpoint?.forcePendingCheckpointNumber, + lastArchiveRoot: checkpoint.header.lastArchiveRoot, + pipeliningEnabled: isPipelining, + }); + await this.publisher.enqueueProposeCheckpoint(checkpoint, attestations, attestationsSignature, { txTimeoutAt, - forcePendingCheckpointNumber: this.invalidateCheckpoint?.forcePendingCheckpointNumber, - forceProposedFeeHeader: this.computedForceProposedFeeHeader, + ...(submissionSimulationOverridesPlan ? { simulationOverridesPlan: submissionSimulationOverridesPlan } : {}), }); } @@ -261,7 +308,7 @@ export class CheckpointProposalJob implements Traceable { [Attributes.SLOT_NUMBER]: this.targetSlot, }; }) - private async proposeCheckpoint(): Promise { + private async proposeCheckpoint(): Promise { try { // Get operator configured coinbase and fee recipient for this attestor const coinbase = this.validatorClient.getCoinbaseForAttestor(this.attestorAddress); @@ -287,21 +334,20 @@ export class CheckpointProposalJob implements Traceable { // When pipelining, force the proposed checkpoint number and fee header to our parent so the // fee computation sees the same chain tip that L1 will see once the previous pipelined checkpoint lands. const isPipelining = this.epochCache.isProposerPipeliningEnabled(); - const parentCheckpointNumber = isPipelining ? CheckpointNumber(this.checkpointNumber - 1) : undefined; - - // Compute the parent's fee header override when pipelining - if (isPipelining && this.proposedCheckpointData) { - this.computedForceProposedFeeHeader = await this.computeForceProposedFeeHeader(parentCheckpointNumber!); - } + this.pipelinedParentSimulationOverridesPlan = isPipelining + ? await buildPipelinedParentSimulationOverridesPlan({ + checkpointNumber: this.checkpointNumber, + proposedCheckpointData: this.proposedCheckpointData, + rollup: this.publisher.rollupContract, + log: this.log, + }) + : undefined; const checkpointGlobalVariables = await this.globalsBuilder.buildCheckpointGlobalVariables( coinbase, feeRecipient, this.targetSlot, - { - forcePendingCheckpointNumber: parentCheckpointNumber, - forceProposedFeeHeader: this.computedForceProposedFeeHeader, - }, + this.pipelinedParentSimulationOverridesPlan, ); // Collect L1 to L2 messages for the checkpoint and compute their hash @@ -410,7 +456,7 @@ export class CheckpointProposalJob implements Traceable { Number(checkpoint.header.totalManaUsed.toBigInt()), ); - // Do not collect attestations nor publish to L1 in fisherman mode + // In fisherman mode, return the checkpoint without broadcasting or collecting attestations if (this.config.fishermanMode) { this.log.info( `Built checkpoint for slot ${this.targetSlot} with ${blocksInCheckpoint.length} blocks. ` + @@ -422,11 +468,8 @@ export class CheckpointProposalJob implements Traceable { }, ); this.metrics.recordCheckpointSuccess(); - return { - checkpoint, - attestations: CommitteeAttestationsAndSigners.empty(), - attestationsSignature: Signature.empty(), - }; + // Return a broadcast result with a dummy proposal — fisherman mode skips attestation collection + return { checkpoint, proposal: undefined!, blockProposedAt: this.dateProvider.now() }; } // Create the checkpoint proposal and broadcast it @@ -442,33 +485,8 @@ export class CheckpointProposalJob implements Traceable { const blockProposedAt = this.dateProvider.now(); await this.p2pClient.broadcastCheckpointProposal(proposal); - this.setStateFn(SequencerState.COLLECTING_ATTESTATIONS, this.targetSlot); - const attestations = await this.waitForAttestations(proposal); - const blockAttestedAt = this.dateProvider.now(); - - this.metrics.recordCheckpointAttestationDelay(blockAttestedAt - blockProposedAt); - - // Proposer must sign over the attestations before pushing them to L1 - const signer = this.proposer ?? this.publisher.getSenderAddress(); - let attestationsSignature: Signature; - try { - attestationsSignature = await this.validatorClient.signAttestationsAndSigners( - attestations, - signer, - this.targetSlot, - this.checkpointNumber, - ); - } catch (err) { - // We shouldn't really get here since we yield to another HA node - // as soon as we see these errors when creating block or checkpoint proposals. - if (this.handleHASigningError(err, 'Attestations signature')) { - return undefined; - } - throw err; - } - - // Return the result for the caller to enqueue after the pipeline sleep - return { checkpoint, attestations, attestationsSignature }; + // Return immediately after broadcast — attestation collection happens in the background + return { checkpoint, proposal, blockProposedAt }; } catch (err) { if (err && (err instanceof DutyAlreadySignedError || err instanceof SlashingProtectionError)) { // swallow this error. It's already been logged by a function deeper in the stack @@ -864,7 +882,7 @@ export class CheckpointProposalJob implements Traceable { } const attestationTimeAllowed = this.config.enforceTimeTable - ? this.timetable.getMaxAllowedTime(SequencerState.PUBLISHING_CHECKPOINT)! + ? this.timetable.getCheckpointAttestationDeadline() : this.l1Constants.slotDuration; const attestationDeadline = new Date((this.getSlotStartBuildTimestamp() + attestationTimeAllowed) * 1000); @@ -1070,56 +1088,6 @@ export class CheckpointProposalJob implements Traceable { return false; } - /** - * In times of congestion we need to simulate using the correct fee header override for the previous block - * We calculate the correct fee header values. - * - * If we are in block 1, or the checkpoint we are querying does not exist, we return undefined. However - * If we are pipelining - where this function is called, the grandparentCheckpointNumber should always exist - * @param parentCheckpointNumber - * @returns - */ - protected async computeForceProposedFeeHeader(parentCheckpointNumber: CheckpointNumber): Promise< - | { - checkpointNumber: CheckpointNumber; - feeHeader: FeeHeader; - } - | undefined - > { - if (!this.proposedCheckpointData) { - return undefined; - } - - const rollup = this.publisher.rollupContract; - const grandparentCheckpointNumber = CheckpointNumber(this.checkpointNumber - 2); - try { - const [grandparentCheckpoint, manaTarget] = await Promise.all([ - rollup.getCheckpoint(grandparentCheckpointNumber), - rollup.getManaTarget(), - ]); - - if (!grandparentCheckpoint || !grandparentCheckpoint.feeHeader) { - this.log.error( - `Grandparent checkpoint or its feeHeader is undefined for checkpointNumber=${grandparentCheckpointNumber.toString()}`, - ); - return undefined; - } else { - const parentFeeHeader = RollupContract.computeChildFeeHeader( - grandparentCheckpoint.feeHeader, - this.proposedCheckpointData.totalManaUsed, - this.proposedCheckpointData.feeAssetPriceModifier, - manaTarget, - ); - return { checkpointNumber: parentCheckpointNumber, feeHeader: parentFeeHeader }; - } - } catch (err) { - this.log.error( - `Failed to fetch grandparent checkpoint or mana target for checkpointNumber=${grandparentCheckpointNumber.toString()}: ${err}`, - ); - return undefined; - } - } - /** Waits until a specific time within the current slot */ @trackSpan('CheckpointProposalJob.waitUntilTimeInSlot') protected async waitUntilTimeInSlot(targetSecondsIntoSlot: number): Promise { diff --git a/yarn-project/sequencer-client/src/sequencer/sequencer.test.ts b/yarn-project/sequencer-client/src/sequencer/sequencer.test.ts index dedba58e031d..e849ffc8f6e2 100644 --- a/yarn-project/sequencer-client/src/sequencer/sequencer.test.ts +++ b/yarn-project/sequencer-client/src/sequencer/sequencer.test.ts @@ -158,9 +158,9 @@ describe('sequencer', () => { expect.any(Checkpoint), attestationsAndSigners, getSignatures()[0].signature, - { + expect.objectContaining({ txTimeoutAt: expect.any(Date), - }, + }), ); }; @@ -373,6 +373,7 @@ describe('sequencer', () => { it('builds a block out of a single tx', async () => { await setupSingleTxBlock(); await sequencer.work(); + await sequencer.awaitLastProposalSubmission(); expectPublisherProposeL2Block(); }); @@ -417,6 +418,7 @@ describe('sequencer', () => { }); await sequencer.work(); + await sequencer.awaitLastProposalSubmission(); // Now we should build and publish the checkpoint expect(checkpointBuilder.buildBlockCalls.length).toBeGreaterThan(0); expectPublisherProposeL2Block(); @@ -439,6 +441,7 @@ describe('sequencer', () => { block = await makeBlock(txs); await sequencer.work(); + await sequencer.awaitLastProposalSubmission(); expect(checkpointBuilder.buildBlockCalls.length).toBeGreaterThan(0); expectPublisherProposeL2Block(); @@ -455,6 +458,7 @@ describe('sequencer', () => { // Archiver reports synced to slot 0, which satisfies syncedL2Slot + 1 >= slot (slot=1) l2BlockSource.getSyncedL2SlotNumber.mockResolvedValue(SlotNumber(0)); await sequencer.work(); + await sequencer.awaitLastProposalSubmission(); expect(publisher.enqueueProposeCheckpoint).toHaveBeenCalled(); }); @@ -475,10 +479,9 @@ describe('sequencer', () => { publisher.enqueueProposeCheckpoint.mockRejectedValueOnce(new Error('Failed to enqueue propose checkpoint')); + // The error is caught in the background attestation/L1 pipeline and does not surface as an unhandled rejection await sequencer.work(); - - // We still call sendRequestsAt in case there are votes enqueued - expect(publisher.sendRequestsAt).toHaveBeenCalled(); + await sequencer.awaitLastProposalSubmission(); }); it('should proceed with block proposal when there is no proposer yet', async () => { @@ -497,6 +500,7 @@ describe('sequencer', () => { block = await makeBlock(txs); await sequencer.work(); + await sequencer.awaitLastProposalSubmission(); // Verify that the sequencer attempted to create and broadcast a block proposal expect(publisher.enqueueProposeCheckpoint).toHaveBeenCalled(); @@ -579,6 +583,7 @@ describe('sequencer', () => { block = await makeBlock([tx]); TestUtils.mockPendingTxs(p2p, [tx]); await sequencer.work(); + await sequencer.awaitLastProposalSubmission(); const attestationsAndSigners = new CommitteeAttestationsAndSigners(getSignatures()); expect(publishers[i].enqueueProposeCheckpoint).toHaveBeenCalledTimes(1); @@ -586,7 +591,9 @@ describe('sequencer', () => { expect.any(Checkpoint), attestationsAndSigners, getSignatures()[0].signature, - { txTimeoutAt: expect.any(Date) }, + expect.objectContaining({ + txTimeoutAt: expect.any(Date), + }), ); } }); @@ -928,6 +935,7 @@ describe('sequencer', () => { await setupSingleTxBlock(); await sequencer.work(); + await sequencer.awaitLastProposalSubmission(); // Verify checkpoint was built and proposed expect(checkpointBuilder.buildBlockCalls.length).toBeGreaterThan(0); @@ -941,6 +949,7 @@ describe('sequencer', () => { await setupSingleTxBlock(); await sequencer.work(); + await sequencer.awaitLastProposalSubmission(); // Verify checkpoint was built and proposed expect(checkpointBuilder.buildBlockCalls).toHaveLength(1); @@ -957,6 +966,7 @@ describe('sequencer', () => { TestUtils.mockPendingTxs(p2p, txs); await sequencer.work(); + await sequencer.awaitLastProposalSubmission(); expect(checkpointBuilder.buildBlockCalls.length).toBeGreaterThan(1); expect(validatorClient.createCheckpointProposal).toHaveBeenCalled(); @@ -1055,14 +1065,9 @@ describe('sequencer', () => { await sequencer.work(); - // L1 check should be called with archive override for the parent checkpoint - expect(publisher.canProposeAt).toHaveBeenCalledWith( - expect.anything(), // archive - expect.anything(), // proposer - expect.objectContaining({ - forceArchive: expect.objectContaining({ checkpointNumber: CheckpointNumber(1) }), - }), - ); + const simulationOverridesPlan = publisher.canProposeAt.mock.calls.at(-1)?.[2]; + expect(simulationOverridesPlan?.pendingCheckpointNumber).toEqual(CheckpointNumber(1)); + expect(simulationOverridesPlan?.pendingCheckpointState?.archive).toEqual(expect.anything()); }); it('skips proposal when checkpoint exceeds pipeline depth', async () => { @@ -1130,8 +1135,7 @@ describe('sequencer', () => { await sequencer.work(); - // L1 check should be called without archive override (empty overrides object) - expect(publisher.canProposeAt).toHaveBeenCalledWith(expect.anything(), expect.anything(), {}); + expect(publisher.canProposeAt.mock.calls.at(-1)?.[2]).toBeUndefined(); }); it('calls L1 check without overrides when not pipelining', async () => { @@ -1153,8 +1157,7 @@ describe('sequencer', () => { await sequencer.work(); - // L1 check should be called without any overrides (empty object) - expect(publisher.canProposeAt).toHaveBeenCalledWith(expect.anything(), expect.anything(), {}); + expect(publisher.canProposeAt.mock.calls.at(-1)?.[2]).toBeUndefined(); }); }); @@ -1212,6 +1215,10 @@ class TestSequencer extends Sequencer { return super.work(); } + public async awaitLastProposalSubmission() { + await this.lastCheckpointProposalJob?.awaitPendingSubmission(); + } + public checkCanProposeForTest(slot: SlotNumber) { return this.checkCanPropose(slot); } diff --git a/yarn-project/sequencer-client/src/sequencer/sequencer.ts b/yarn-project/sequencer-client/src/sequencer/sequencer.ts index f028072d2d3f..7ac67ed3d05d 100644 --- a/yarn-project/sequencer-client/src/sequencer/sequencer.ts +++ b/yarn-project/sequencer-client/src/sequencer/sequencer.ts @@ -1,7 +1,7 @@ import { getKzg } from '@aztec/blob-lib'; import { INITIAL_L2_BLOCK_NUM } from '@aztec/constants'; import type { EpochCache } from '@aztec/epoch-cache'; -import { NoCommitteeError, type RollupContract } from '@aztec/ethereum/contracts'; +import { NoCommitteeError, type RollupContract, SimulationOverridesBuilder } from '@aztec/ethereum/contracts'; import { BlockNumber, CheckpointNumber, EpochNumber, SlotNumber } from '@aztec/foundation/branded-types'; import { merge, omit, pick } from '@aztec/foundation/collection'; import { Fr } from '@aztec/foundation/curves/bn254'; @@ -73,7 +73,7 @@ export class Sequencer extends (EventEmitter as new () => TypedEventEmitter TypedEventEmitter TypedEventEmitter TypedEventEmitter { expect(withPipelining.maxNumberOfBlocks).toBeGreaterThan(withoutPipelining.maxNumberOfBlocks); }); - it('uses entire slot minus init and re-execution for block building', () => { + it('reserves time for assembly and one-way broadcast at end of slot', () => { const tt = new SequencerTimetable({ ethereumSlotDuration: ETHEREUM_SLOT_DURATION, aztecSlotDuration: AZTEC_SLOT_DURATION, @@ -468,8 +468,9 @@ describe('sequencer-timetable', () => { }); const blockDuration = BLOCK_DURATION_MS / 1000; - // Reserves one blockDuration for validator re-execution, but no finalization time - const availableTime = AZTEC_SLOT_DURATION - tt.initializationOffset - blockDuration; + // Reserves assembleTime + p2pPropagation (one-way broadcast) at end + const timeReservedAtEnd = tt.checkpointAssembleTime + tt.p2pPropagationTime; + const availableTime = AZTEC_SLOT_DURATION - tt.initializationOffset - timeReservedAtEnd; expect(tt.maxNumberOfBlocks).toBe(Math.floor(availableTime / blockDuration)); }); @@ -501,8 +502,67 @@ describe('sequencer-timetable', () => { }); // With pipelining and test config (ethereumSlotDuration < 8): - // init=0.5, reExec=8, available = 36 - 0.5 - 8 = 27.5, floor(27.5/8) = 3 - expect(tt.maxNumberOfBlocks).toBe(3); + // init=0.5, reservedAtEnd = 0.5 + 0 = 0.5, available = 36 - 0.5 - 0.5 = 35, floor(35/8) = 4 + expect(tt.maxNumberOfBlocks).toBe(4); + }); + + it('sets pipeliningAttestationGracePeriod to blockDuration + p2pPropagationTime', () => { + const tt = new SequencerTimetable({ + ethereumSlotDuration: ETHEREUM_SLOT_DURATION, + aztecSlotDuration: AZTEC_SLOT_DURATION, + l1PublishingTime: L1_PUBLISHING_TIME, + blockDurationMs: BLOCK_DURATION_MS, + enforce: ENFORCE_TIMETABLE, + pipelining: true, + }); + + expect(tt.pipeliningAttestationGracePeriod).toBe(tt.blockDuration! + tt.p2pPropagationTime); + }); + + it('uses separate pipelined deadlines for attestation start vs publish cutoff', () => { + const tt = new SequencerTimetable({ + ethereumSlotDuration: ETHEREUM_SLOT_DURATION, + aztecSlotDuration: AZTEC_SLOT_DURATION, + l1PublishingTime: L1_PUBLISHING_TIME, + blockDurationMs: BLOCK_DURATION_MS, + enforce: ENFORCE_TIMETABLE, + pipelining: true, + }); + + expect(tt.getMaxAllowedTime(SequencerState.ASSEMBLING_CHECKPOINT)).toBe( + AZTEC_SLOT_DURATION + tt.pipeliningAttestationGracePeriod, + ); + expect(tt.getMaxAllowedTime(SequencerState.COLLECTING_ATTESTATIONS)).toBe( + AZTEC_SLOT_DURATION + tt.pipeliningAttestationGracePeriod, + ); + expect(tt.getCheckpointAttestationDeadline()).toBe(2 * AZTEC_SLOT_DURATION - L1_PUBLISHING_TIME); + expect(tt.getMaxAllowedTime(SequencerState.PUBLISHING_CHECKPOINT)).toBe( + 2 * AZTEC_SLOT_DURATION - L1_PUBLISHING_TIME, + ); + }); + + it('ensures enough time from last block deadline to grace period end for assembly + round-trip + re-execution', () => { + const P2P_PROPAGATION_TIME = 2; + const BLOCK_DURATION = BLOCK_DURATION_MS / 1000; + + const tt = new SequencerTimetable({ + ethereumSlotDuration: ETHEREUM_SLOT_DURATION, + aztecSlotDuration: AZTEC_SLOT_DURATION, + l1PublishingTime: L1_PUBLISHING_TIME, + p2pPropagationTime: P2P_PROPAGATION_TIME, + blockDurationMs: BLOCK_DURATION_MS, + enforce: ENFORCE_TIMETABLE, + pipelining: true, + }); + + // Time from last block deadline to end of grace period into next slot + const lastBlockDeadline = tt.initializationOffset + tt.maxNumberOfBlocks * BLOCK_DURATION; + const remainingInBuildSlot = AZTEC_SLOT_DURATION - lastBlockDeadline; + const totalTimeAvailable = remainingInBuildSlot + tt.pipeliningAttestationGracePeriod; + + // Must be enough for: assembly + round-trip p2p + re-execution + const requiredTime = tt.checkpointAssembleTime + 2 * P2P_PROPAGATION_TIME + BLOCK_DURATION; + expect(totalTimeAvailable).toBeGreaterThanOrEqual(requiredTime); }); it('produces more blocks with production config where finalization time is large', () => { diff --git a/yarn-project/sequencer-client/src/sequencer/timetable.ts b/yarn-project/sequencer-client/src/sequencer/timetable.ts index 98373bf284da..d70ec6a4bccf 100644 --- a/yarn-project/sequencer-client/src/sequencer/timetable.ts +++ b/yarn-project/sequencer-client/src/sequencer/timetable.ts @@ -2,8 +2,10 @@ import type { Logger } from '@aztec/foundation/log'; import { CHECKPOINT_ASSEMBLE_TIME, CHECKPOINT_INITIALIZATION_TIME, + type CheckpointTiming, DEFAULT_P2P_PROPAGATION_TIME, MIN_EXECUTION_TIME, + createCheckpointTimingModel, } from '@aztec/stdlib/timetable'; import { SequencerTooSlowError } from './errors.js'; @@ -11,6 +13,8 @@ import type { SequencerMetrics } from './metrics.js'; import { SequencerState } from './utils.js'; export class SequencerTimetable { + private readonly checkpointTiming: CheckpointTiming; + /** * How late into the slot can we be to start working. Computed as the total time needed for assembling and publishing a block, * assuming an execution time equal to `minExecutionTime`, subtracted from the slot duration. This means that, if the proposer @@ -73,6 +77,12 @@ export class SequencerTimetable { /** Whether pipelining is enabled (checkpoint finalization deferred to next slot). */ public readonly pipelining: boolean; + /** + * How far into the target slot attestation collection can extend when pipelining. + * Covers validator re-execution (one block duration) plus one-way attestation return. + */ + public readonly pipeliningAttestationGracePeriod: number; + constructor( opts: { ethereumSlotDuration: number; @@ -89,58 +99,31 @@ export class SequencerTimetable { this.ethereumSlotDuration = opts.ethereumSlotDuration; this.aztecSlotDuration = opts.aztecSlotDuration; this.l1PublishingTime = opts.l1PublishingTime; - this.p2pPropagationTime = opts.p2pPropagationTime ?? DEFAULT_P2P_PROPAGATION_TIME; this.blockDuration = opts.blockDurationMs ? opts.blockDurationMs / 1000 : undefined; this.enforce = opts.enforce; this.pipelining = opts.pipelining ?? false; - // Assume zero-cost propagation time and faster runs in test environments where L1 slot duration is shortened - if (this.ethereumSlotDuration < 8) { - this.p2pPropagationTime = 0; - this.checkpointAssembleTime = 0.5; - this.checkpointInitializationTime = 0.5; - this.minExecutionTime = 1; - } + this.checkpointTiming = createCheckpointTimingModel({ + aztecSlotDuration: this.aztecSlotDuration, + ethereumSlotDuration: this.ethereumSlotDuration, + blockDuration: this.blockDuration, + l1PublishingTime: this.l1PublishingTime, + p2pPropagationTime: opts.p2pPropagationTime ?? DEFAULT_P2P_PROPAGATION_TIME, + pipelining: this.pipelining, + }); - // Min execution time cannot be less than the block duration if set - if (this.blockDuration !== undefined && this.minExecutionTime > this.blockDuration) { - this.minExecutionTime = this.blockDuration; - } + this.p2pPropagationTime = this.checkpointTiming.p2pPropagationTime; + this.checkpointAssembleTime = this.checkpointTiming.checkpointAssembleTime; + this.checkpointInitializationTime = this.checkpointTiming.checkpointInitializationTime; + this.minExecutionTime = this.checkpointTiming.minExecutionTime; // Calculate initialization offset - estimate of time needed for sync + proposer check // This is the baseline for all sub-slot deadlines - this.initializationOffset = this.checkpointInitializationTime; - - // Calculate total checkpoint finalization time (assembly + attestations + L1 publishing) - this.checkpointFinalizationTime = - this.checkpointAssembleTime + - this.p2pPropagationTime * 2 + // Round-trip propagation - this.l1PublishingTime; // L1 publishing - - // Calculate maximum number of blocks that fit in this slot - if (!this.blockDuration) { - this.maxNumberOfBlocks = 1; // Single block per slot - } else { - // When pipelining, finalization is deferred to the next slot, but we still need - // a sub-slot for validator re-execution so they can produce attestations. - let timeReservedAtEnd = this.blockDuration; // Validatior re-execution only - if (!this.pipelining) { - timeReservedAtEnd += this.checkpointFinalizationTime; - } - - const timeAvailableForBlocks = this.aztecSlotDuration - this.initializationOffset - timeReservedAtEnd; - this.maxNumberOfBlocks = Math.floor(timeAvailableForBlocks / this.blockDuration); - } - - // Minimum work to do within a slot for building a block with the minimum time for execution and publishing its checkpoint. - // When pipelining, finalization is deferred, but we still need time for execution and validator re-execution. - let minWorkToDo = this.initializationOffset + this.minExecutionTime * 2; - if (!this.pipelining) { - minWorkToDo += this.checkpointFinalizationTime; - } - - const initializeDeadline = this.aztecSlotDuration - minWorkToDo; - this.initializeDeadline = initializeDeadline; + this.initializationOffset = this.checkpointTiming.checkpointInitializationTime; + this.checkpointFinalizationTime = this.checkpointTiming.checkpointFinalizationTime; + this.pipeliningAttestationGracePeriod = this.checkpointTiming.pipeliningAttestationGracePeriod; + this.maxNumberOfBlocks = this.checkpointTiming.calculateMaxBlocksPerSlot(); + this.initializeDeadline = this.checkpointTiming.initializeDeadline; this.log?.info( `Sequencer timetable initialized with ${this.maxNumberOfBlocks} blocks per slot (${this.enforce ? 'enforced' : 'not enforced'})`, @@ -155,19 +138,36 @@ export class SequencerTimetable { initializeDeadline: this.initializeDeadline, enforce: this.enforce, pipelining: this.pipelining, - minWorkToDo, + pipeliningAttestationGracePeriod: this.pipeliningAttestationGracePeriod, + minWorkToDo: this.checkpointTiming.minimumBuildSlotWork, blockDuration: this.blockDuration, maxNumberOfBlocks: this.maxNumberOfBlocks, }, ); - if (initializeDeadline <= 0) { + if (this.initializeDeadline <= 0) { throw new Error( - `Block proposal initialize deadline cannot be negative (got ${initializeDeadline} from total time needed ${minWorkToDo} and a slot duration of ${this.aztecSlotDuration}).`, + `Block proposal initialize deadline cannot be negative (got ${this.initializeDeadline} from total time needed ${this.checkpointTiming.minimumBuildSlotWork} and a slot duration of ${this.aztecSlotDuration}).`, ); } } + public getCheckpointAssemblyDeadline(): number { + return this.checkpointTiming.checkpointAssemblyDeadline; + } + + public getCheckpointAttestationDeadline(): number { + return this.checkpointTiming.checkpointAttestationDeadline; + } + + public getCheckpointAttestationStartDeadline(): number { + return this.checkpointTiming.checkpointAttestationStartDeadline; + } + + public getCheckpointPublishingDeadline(): number { + return this.checkpointTiming.checkpointPublishingDeadline; + } + public getMaxAllowedTime( state: Extract, ): undefined; @@ -190,10 +190,11 @@ export class SequencerTimetable { case SequencerState.WAITING_UNTIL_NEXT_BLOCK: return this.initializeDeadline + this.checkpointInitializationTime; case SequencerState.ASSEMBLING_CHECKPOINT: + return this.getCheckpointAssemblyDeadline(); case SequencerState.COLLECTING_ATTESTATIONS: - return this.aztecSlotDuration - this.l1PublishingTime - 2 * this.p2pPropagationTime; + return this.getCheckpointAttestationStartDeadline(); case SequencerState.PUBLISHING_CHECKPOINT: - return this.aztecSlotDuration - this.l1PublishingTime; + return this.getCheckpointPublishingDeadline(); default: { const _exhaustiveCheck: never = state; throw new Error(`Unexpected state: ${state}`); diff --git a/yarn-project/stdlib/src/config/sequencer-config.ts b/yarn-project/stdlib/src/config/sequencer-config.ts index 77bdfd94ed82..b7687059b326 100644 --- a/yarn-project/stdlib/src/config/sequencer-config.ts +++ b/yarn-project/stdlib/src/config/sequencer-config.ts @@ -1,6 +1,7 @@ import type { ConfigMappingsType } from '@aztec/foundation/config'; import type { SequencerConfig } from '../interfaces/configs.js'; +import { DEFAULT_P2P_PROPAGATION_TIME } from '../timetable/index.js'; /** Default maximum number of transactions per block. */ export const DEFAULT_MAX_TXS_PER_BLOCK = 32; @@ -12,7 +13,10 @@ export const DEFAULT_MAX_TXS_PER_BLOCK = 32; * to avoid duplication. */ export const sharedSequencerConfigMappings: ConfigMappingsType< - Pick + Pick< + SequencerConfig, + 'blockDurationMs' | 'expectedBlockProposalsPerSlot' | 'maxTxsPerBlock' | 'attestationPropagationTime' + > > = { blockDurationMs: { env: 'SEQ_BLOCK_DURATION_MS', @@ -34,4 +38,10 @@ export const sharedSequencerConfigMappings: ConfigMappingsType< description: 'The maximum number of txs to include in a block.', parseEnv: (val: string) => (val ? parseInt(val, 10) : undefined), }, + attestationPropagationTime: { + env: 'SEQ_ATTESTATION_PROPAGATION_TIME', + description: 'How many seconds it takes for proposals and attestations to travel across the p2p layer (one-way).', + parseEnv: (val: string) => (val ? parseFloat(val) : undefined), + defaultValue: DEFAULT_P2P_PROPAGATION_TIME, + }, }; diff --git a/yarn-project/stdlib/src/timetable/index.test.ts b/yarn-project/stdlib/src/timetable/index.test.ts new file mode 100644 index 000000000000..a3e08687e9af --- /dev/null +++ b/yarn-project/stdlib/src/timetable/index.test.ts @@ -0,0 +1,79 @@ +import { createCheckpointTimingModel, createPipelinedCheckpointTimingModel } from './index.js'; + +describe('timetable validation', () => { + it('accepts a non-pipelined multi-block config that fits exactly one block', () => { + const timing = createCheckpointTimingModel({ + aztecSlotDuration: 34, + blockDuration: 8, + checkpointInitializationTime: 1, + checkpointAssembleTime: 1, + p2pPropagationTime: 2, + l1PublishingTime: 12, + pipelining: false, + }); + + expect(timing.calculateMaxBlocksPerSlot()).toBe(1); + }); + + it('rejects a non-pipelined multi-block config that cannot fit one block', () => { + expect(() => + createCheckpointTimingModel({ + aztecSlotDuration: 33, + blockDuration: 8, + checkpointInitializationTime: 1, + checkpointAssembleTime: 1, + p2pPropagationTime: 2, + l1PublishingTime: 12, + pipelining: false, + }), + ).toThrow(/less than one blockDuration/i); + }); + + it('accepts a pipelined multi-block config that fits exactly one block', () => { + const timing = createPipelinedCheckpointTimingModel({ + aztecSlotDuration: 12, + blockDuration: 8, + checkpointInitializationTime: 1, + checkpointAssembleTime: 1, + p2pPropagationTime: 2, + l1PublishingTime: 12, + }); + + expect(timing.calculateMaxBlocksPerSlot()).toBe(1); + }); + + it('rejects a pipelined multi-block config that cannot fit one block', () => { + expect(() => + createPipelinedCheckpointTimingModel({ + aztecSlotDuration: 11, + blockDuration: 8, + checkpointInitializationTime: 1, + checkpointAssembleTime: 1, + p2pPropagationTime: 2, + l1PublishingTime: 12, + }), + ).toThrow(/less than one blockDuration/i); + }); + + it('allows single-block mode without blockDuration', () => { + const timing = createCheckpointTimingModel({ + aztecSlotDuration: 10, + checkpointInitializationTime: 1, + pipelining: false, + }); + + expect(timing.calculateMaxBlocksPerSlot()).toBe(1); + }); + + it('uses compressed timing allowances for short ethereum test slots', () => { + const timing = createCheckpointTimingModel({ + aztecSlotDuration: 24, + ethereumSlotDuration: 4, + blockDuration: 8, + l1PublishingTime: 2, + p2pPropagationTime: 0.5, + }); + + expect(timing.calculateMaxBlocksPerSlot()).toBe(1); + }); +}); diff --git a/yarn-project/stdlib/src/timetable/index.ts b/yarn-project/stdlib/src/timetable/index.ts index eb76fb63f72a..c94dcbbecb3f 100644 --- a/yarn-project/stdlib/src/timetable/index.ts +++ b/yarn-project/stdlib/src/timetable/index.ts @@ -25,6 +25,219 @@ export const DEFAULT_L1_PUBLISHING_TIME = 12; /** Minimum execution time for building a block in seconds */ export const MIN_EXECUTION_TIME = 2; +export type CheckpointTimingConfig = { + aztecSlotDuration: number; + ethereumSlotDuration?: number; + blockDuration?: number; + checkpointAssembleTime?: number; + checkpointInitializationTime?: number; + l1PublishingTime?: number; + minExecutionTime?: number; + p2pPropagationTime?: number; + pipelining?: boolean; +}; + +export interface CheckpointTiming { + readonly aztecSlotDuration: number; + readonly blockDuration: number | undefined; + readonly checkpointAssembleTime: number; + readonly checkpointInitializationTime: number; + readonly l1PublishingTime: number; + readonly minExecutionTime: number; + readonly p2pPropagationTime: number; + readonly checkpointFinalizationTime: number; + readonly pipeliningAttestationGracePeriod: number; + readonly timeReservedAtEnd: number; + readonly minimumBuildSlotWork: number; + readonly initializeDeadline: number; + readonly checkpointAssemblyDeadline: number; + readonly checkpointAttestationStartDeadline: number; + readonly checkpointAttestationDeadline: number; + readonly checkpointPublishingDeadline: number; + + calculateMaxBlocksPerSlot(): number; +} + +export interface PipelinedCheckpointTiming extends CheckpointTiming { + readonly proposalWindowIntoTargetSlot: number; + readonly attestationWindowIntoTargetSlot: number; +} + +/** + * Shared base for checkpoint timing implementations. + * + * This class owns the common inputs and formulas used by both pipelined and + * non-pipelined scheduling. Variant-specific deadline math is delegated to the + * concrete subclasses below. + */ +abstract class BaseCheckpointTiming implements CheckpointTiming { + public readonly aztecSlotDuration: number; + public readonly blockDuration: number | undefined; + public readonly checkpointAssembleTime: number; + public readonly checkpointInitializationTime: number; + public readonly l1PublishingTime: number; + public readonly minExecutionTime: number; + public readonly p2pPropagationTime: number; + + constructor(opts: CheckpointTimingConfig) { + this.aztecSlotDuration = opts.aztecSlotDuration; + this.blockDuration = opts.blockDuration; + + this.checkpointAssembleTime = opts.checkpointAssembleTime ?? CHECKPOINT_ASSEMBLE_TIME; + this.checkpointInitializationTime = opts.checkpointInitializationTime ?? CHECKPOINT_INITIALIZATION_TIME; + this.l1PublishingTime = opts.l1PublishingTime ?? DEFAULT_L1_PUBLISHING_TIME; + this.minExecutionTime = opts.minExecutionTime ?? MIN_EXECUTION_TIME; + this.p2pPropagationTime = opts.p2pPropagationTime ?? DEFAULT_P2P_PROPAGATION_TIME; + } + + public get checkpointFinalizationTime(): number { + // Allow enough time to + // - build the checkpoint + // - Round-trip over p2p + // - Publish to L1 + return this.checkpointAssembleTime + this.p2pPropagationTime * 2 + this.l1PublishingTime; + } + + public get pipeliningAttestationGracePeriod(): number { + // Allow enough time to + // - build the block + // - pass it back over p2p + return (this.blockDuration ?? 0) + this.p2pPropagationTime; + } + + public abstract get timeReservedAtEnd(): number; + public abstract get minimumBuildSlotWork(): number; + + public get initializeDeadline(): number { + return this.aztecSlotDuration - this.minimumBuildSlotWork; + } + + public abstract get checkpointAssemblyDeadline(): number; + + public get checkpointAttestationStartDeadline(): number { + return this.checkpointAssemblyDeadline; + } + + public abstract get checkpointAttestationDeadline(): number; + public abstract get checkpointPublishingDeadline(): number; + + public calculateMaxBlocksPerSlot(): number { + if (!this.blockDuration) { + return 1; + } + + const timeAvailableForBlocks = this.aztecSlotDuration - this.checkpointInitializationTime - this.timeReservedAtEnd; + return Math.floor(timeAvailableForBlocks / this.blockDuration); + } +} + +/** + * Checkpoint timing model for the non-pipelined sequencer flow. + * + * In this mode, checkpoint assembly, attestation collection, and L1 publishing + * must all complete within the current Aztec slot. + */ +class StandardCheckpointTimingModel extends BaseCheckpointTiming { + public get timeReservedAtEnd(): number { + return (this.blockDuration ?? 0) + this.checkpointFinalizationTime; + } + + public get minimumBuildSlotWork(): number { + return this.checkpointInitializationTime + this.minExecutionTime * 2 + this.checkpointFinalizationTime; + } + + public get checkpointAssemblyDeadline(): number { + return this.aztecSlotDuration - this.l1PublishingTime - 2 * this.p2pPropagationTime; + } + + public get checkpointAttestationDeadline(): number { + return this.aztecSlotDuration - this.l1PublishingTime; + } + + public get checkpointPublishingDeadline(): number { + return this.aztecSlotDuration - this.l1PublishingTime; + } +} + +/** + * Checkpoint timing model for proposer pipelining. + * + * In this mode, the build work still starts in the current slot, but checkpoint + * assembly and attestation collection can extend into the target slot. The extra + * target-slot window getters are intended for consumers such as P2P validators + * that need to validate pipelined messages against wallclock time. + */ +class PipelinedCheckpointTimingModel extends BaseCheckpointTiming implements PipelinedCheckpointTiming { + public get proposalWindowIntoTargetSlot(): number { + // Allow the p2p propagation time to receive a checkpoint proposal from leader + return this.p2pPropagationTime; + } + + public get attestationWindowIntoTargetSlot(): number { + return this.aztecSlotDuration - this.l1PublishingTime; + } + + public get timeReservedAtEnd(): number { + return this.checkpointAssembleTime + this.p2pPropagationTime; + } + + public get minimumBuildSlotWork(): number { + return this.checkpointInitializationTime + this.minExecutionTime * 2; + } + + public get checkpointAssemblyDeadline(): number { + // Allow enough time to + // - build all blocks + // - receive attestations + return this.aztecSlotDuration + this.pipeliningAttestationGracePeriod; + } + + public get checkpointAttestationDeadline(): number { + // Allowed to be into the next wallclock slot minus the allocated l1 publishing time + return this.aztecSlotDuration * 2 - this.l1PublishingTime; + } + + public get checkpointPublishingDeadline(): number { + // Allowed to be into the next wallclock slot minus the allocated l1 Publishing time + return this.aztecSlotDuration * 2 - this.l1PublishingTime; + } +} + +/** + * Creates a checkpoint timing model for the requested scheduling mode. + * + * Most callers should use this factory and depend only on the shared + * `CheckpointTiming` interface. The returned implementation is selected from + * `opts.pipelining`. + */ +export function createCheckpointTimingModel(opts: CheckpointTimingConfig): CheckpointTiming { + validateCheckpointTimingConfig(opts); + const normalizedOpts = normalizeCheckpointTimingConfig(opts); + + const timing = normalizedOpts.pipelining + ? new PipelinedCheckpointTimingModel(normalizedOpts) + : new StandardCheckpointTimingModel(normalizedOpts); + validateCheckpointTimingModel(timing); + return timing; +} + +/** + * Creates a pipelined checkpoint timing model with target-slot window accessors. + * + * Use this when the caller specifically needs the pipelined-only timing surface, + * such as proposal or attestation acceptance windows into the target slot. + */ +export function createPipelinedCheckpointTimingModel( + opts: Omit, +): PipelinedCheckpointTiming { + validateCheckpointTimingConfig(opts); + const normalizedOpts = normalizeCheckpointTimingConfig(opts); + + const timing = new PipelinedCheckpointTimingModel(normalizedOpts); + validateCheckpointTimingModel(timing); + return timing; +} + /** * Calculates the maximum number of blocks that can be built in a slot. * Used by both the sequencer timetable and p2p gossipsub scoring. @@ -45,27 +258,89 @@ export function calculateMaxBlocksPerSlot( pipelining?: boolean; } = {}, ): number { - if (!blockDurationSec) { - return 1; // Single block per slot + return createCheckpointTimingModel({ + aztecSlotDuration: aztecSlotDurationSec, + blockDuration: blockDurationSec, + checkpointAssembleTime: opts.checkpointAssembleTime, + checkpointInitializationTime: opts.checkpointInitializationTime, + l1PublishingTime: opts.l1PublishingTime, + p2pPropagationTime: opts.p2pPropagationTime, + pipelining: opts.pipelining, + }).calculateMaxBlocksPerSlot(); +} + +function assertNonNegative(name: string, value: number): void { + if (value < 0) { + throw new Error(`${name} must be non-negative (got ${value})`); } +} - const initOffset = opts.checkpointInitializationTime ?? CHECKPOINT_INITIALIZATION_TIME; - const assembleTime = opts.checkpointAssembleTime ?? CHECKPOINT_ASSEMBLE_TIME; - const p2pTime = opts.p2pPropagationTime ?? DEFAULT_P2P_PROPAGATION_TIME; - const l1Time = opts.l1PublishingTime ?? DEFAULT_L1_PUBLISHING_TIME; +function validateCheckpointTimingConfig(opts: CheckpointTimingConfig): void { + if (opts.aztecSlotDuration <= 0) { + throw new Error(`aztecSlotDuration must be positive (got ${opts.aztecSlotDuration})`); + } - // Calculate checkpoint finalization time (assembly + round-trip propagation + L1 publishing) - const checkpointFinalizationTime = assembleTime + p2pTime * 2 + l1Time; + if (opts.ethereumSlotDuration !== undefined && opts.ethereumSlotDuration <= 0) { + throw new Error(`ethereumSlotDuration must be positive when provided (got ${opts.ethereumSlotDuration})`); + } - // When pipelining, finalization is deferred to the next slot, but we still reserve - // a sub-slot for validator re-execution so they can produce attestations. - let timeReservedAtEnd = blockDurationSec; - if (!opts.pipelining) { - timeReservedAtEnd += checkpointFinalizationTime; + if (opts.blockDuration !== undefined && opts.blockDuration <= 0) { + throw new Error(`blockDuration must be positive when provided (got ${opts.blockDuration})`); } - // Time available for building blocks - const timeAvailableForBlocks = aztecSlotDurationSec - initOffset - timeReservedAtEnd; + if (opts.minExecutionTime !== undefined && opts.minExecutionTime <= 0) { + throw new Error(`minExecutionTime must be positive when provided (got ${opts.minExecutionTime})`); + } - return Math.max(1, Math.floor(timeAvailableForBlocks / blockDurationSec)); + if (opts.checkpointAssembleTime !== undefined) { + assertNonNegative('checkpointAssembleTime', opts.checkpointAssembleTime); + } + if (opts.checkpointInitializationTime !== undefined) { + assertNonNegative('checkpointInitializationTime', opts.checkpointInitializationTime); + } + if (opts.l1PublishingTime !== undefined) { + assertNonNegative('l1PublishingTime', opts.l1PublishingTime); + } + if (opts.p2pPropagationTime !== undefined) { + assertNonNegative('p2pPropagationTime', opts.p2pPropagationTime); + } +} + +function normalizeCheckpointTimingConfig(opts: CheckpointTimingConfig): CheckpointTimingConfig { + let checkpointAssembleTime = opts.checkpointAssembleTime ?? CHECKPOINT_ASSEMBLE_TIME; + let checkpointInitializationTime = opts.checkpointInitializationTime ?? CHECKPOINT_INITIALIZATION_TIME; + let minExecutionTime = opts.minExecutionTime ?? MIN_EXECUTION_TIME; + let p2pPropagationTime = opts.p2pPropagationTime ?? DEFAULT_P2P_PROPAGATION_TIME; + + if (opts.ethereumSlotDuration !== undefined && opts.ethereumSlotDuration < 8) { + p2pPropagationTime = 0; + checkpointAssembleTime = 0.5; + checkpointInitializationTime = 0.5; + minExecutionTime = 1; + } + + if (opts.blockDuration !== undefined && minExecutionTime > opts.blockDuration) { + minExecutionTime = opts.blockDuration; + } + + return { + ...opts, + checkpointAssembleTime, + checkpointInitializationTime, + minExecutionTime, + p2pPropagationTime, + }; +} + +function validateCheckpointTimingModel(model: CheckpointTiming): void { + if (model.blockDuration === undefined) { + return; + } + + const timeAvailableForBlocks = model.aztecSlotDuration - model.checkpointInitializationTime - model.timeReservedAtEnd; + if (timeAvailableForBlocks < model.blockDuration) { + throw new Error( + `Invalid timing configuration: only ${timeAvailableForBlocks}s available for block building, which is less than one blockDuration (${model.blockDuration}s).`, + ); + } } diff --git a/yarn-project/stdlib/src/tx/global_variable_builder.ts b/yarn-project/stdlib/src/tx/global_variable_builder.ts index 9fb9a7b0c68c..f35e95e96534 100644 --- a/yarn-project/stdlib/src/tx/global_variable_builder.ts +++ b/yarn-project/stdlib/src/tx/global_variable_builder.ts @@ -1,5 +1,4 @@ -import type { FeeHeader } from '@aztec/ethereum/contracts'; -import type { CheckpointNumber } from '@aztec/foundation/branded-types'; +import type { SimulationOverridesPlan } from '@aztec/ethereum/contracts'; import type { EthAddress } from '@aztec/foundation/eth-address'; import type { SlotNumber } from '@aztec/foundation/schemas'; @@ -8,18 +7,6 @@ import type { GasFees } from '../gas/gas_fees.js'; import type { UInt32 } from '../types/index.js'; import type { CheckpointGlobalVariables, GlobalVariables } from './global_variables.js'; -/** Fee header fields needed for pipelining overrides. */ -export type ForceProposedFeeHeader = { - checkpointNumber: CheckpointNumber; - feeHeader: FeeHeader; -}; - -/** Options for building checkpoint global variables during pipelining. */ -export type BuildCheckpointGlobalVariablesOpts = { - forcePendingCheckpointNumber?: CheckpointNumber; - forceProposedFeeHeader?: ForceProposedFeeHeader; -}; - /** * Interface for building global variables for Aztec blocks. */ @@ -46,6 +33,6 @@ export interface GlobalVariableBuilder { coinbase: EthAddress, feeRecipient: AztecAddress, slotNumber: SlotNumber, - opts?: BuildCheckpointGlobalVariablesOpts, + simulationOverridesPlan?: SimulationOverridesPlan, ): Promise; } diff --git a/yarn-project/txe/src/state_machine/global_variable_builder.ts b/yarn-project/txe/src/state_machine/global_variable_builder.ts index 68143e67c383..67d64df6cfea 100644 --- a/yarn-project/txe/src/state_machine/global_variable_builder.ts +++ b/yarn-project/txe/src/state_machine/global_variable_builder.ts @@ -1,14 +1,10 @@ +import type { SimulationOverridesPlan } from '@aztec/ethereum/contracts'; import { BlockNumber, type SlotNumber } from '@aztec/foundation/branded-types'; import type { EthAddress } from '@aztec/foundation/eth-address'; import type { AztecAddress } from '@aztec/stdlib/aztec-address'; import { GasFees } from '@aztec/stdlib/gas'; import { makeGlobalVariables } from '@aztec/stdlib/testing'; -import { - type BuildCheckpointGlobalVariablesOpts, - type CheckpointGlobalVariables, - type GlobalVariableBuilder, - GlobalVariables, -} from '@aztec/stdlib/tx'; +import { type CheckpointGlobalVariables, type GlobalVariableBuilder, GlobalVariables } from '@aztec/stdlib/tx'; export class TXEGlobalVariablesBuilder implements GlobalVariableBuilder { public getCurrentMinFees(): Promise { @@ -28,7 +24,7 @@ export class TXEGlobalVariablesBuilder implements GlobalVariableBuilder { _coinbase: EthAddress, _feeRecipient: AztecAddress, _slotNumber: SlotNumber, - _opts?: BuildCheckpointGlobalVariablesOpts, + _simulationOverridesPlan?: SimulationOverridesPlan, ): Promise { const vars = makeGlobalVariables(); return Promise.resolve({ diff --git a/yarn-project/txe/src/state_machine/mock_epoch_cache.ts b/yarn-project/txe/src/state_machine/mock_epoch_cache.ts index cfca8a29605b..616f9744b218 100644 --- a/yarn-project/txe/src/state_machine/mock_epoch_cache.ts +++ b/yarn-project/txe/src/state_machine/mock_epoch_cache.ts @@ -33,10 +33,6 @@ export class MockEpochCache implements EpochCacheInterface { return EpochNumber.ZERO; } - pipeliningOffset(): number { - return 0; - } - getEpochAndSlotNow(): EpochAndSlot & { nowMs: bigint } { return { epoch: EpochNumber.ZERO, @@ -63,6 +59,10 @@ export class MockEpochCache implements EpochCacheInterface { return false; } + pipeliningOffset(): number { + return 0; + } + getProposerIndexEncoding(_epoch: EpochNumber, _slot: SlotNumber, _seed: bigint): `0x${string}` { return '0x00'; } diff --git a/yarn-project/validator-client/src/validator.ha.integration.test.ts b/yarn-project/validator-client/src/validator.ha.integration.test.ts index fd5eff6479e7..9f398b619050 100644 --- a/yarn-project/validator-client/src/validator.ha.integration.test.ts +++ b/yarn-project/validator-client/src/validator.ha.integration.test.ts @@ -86,6 +86,7 @@ describe('ValidatorClient HA Integration', () => { p2pClient.getCheckpointAttestationsForSlot.mockImplementation(() => Promise.resolve([])); p2pClient.addOwnCheckpointAttestations.mockResolvedValue(); p2pClient.broadcastCheckpointAttestations.mockResolvedValue(); + const slotDuration = 24; checkpointsBuilder = mock(); checkpointsBuilder.getConfig.mockReturnValue({ l1GenesisTime: 1n, @@ -96,6 +97,9 @@ describe('ValidatorClient HA Integration', () => { }); worldState = mock(); epochCache = mock(); + epochCache.getL1Constants.mockReturnValue({ + slotDuration, + } as any); // Default mock: return all addresses passed (all are in committee) epochCache.filterInCommittee.mockImplementation((_slot, addresses) => Promise.resolve(addresses)); blockSource = mock(); From 6b698eeacdd58525bb19d520dceba1c35549a908 Mon Sep 17 00:00:00 2001 From: spypsy Date: Fri, 10 Apr 2026 00:12:25 +0100 Subject: [PATCH 08/11] fix(p2p): avoid 32-bit overflow in attestation pool block position key (#22412) ## Summary - Replace bit-shift (`<<`) with multiplication in `AttestationPool.getBlockPositionKey` to prevent 32-bit signed integer overflow on large slot numbers. JavaScript's `<<` converts operands to signed 32-bit ints, which overflows after slot ~2^21. Using multiplication keeps values in float64 range (safe up to 2^53). Fixes [A-760](https://linear.app/aztec-labs/issue/A-760/audit-91-attestationpoolgetblockpositionkey-bit-shift-overflow-after) --- .../src/mem_pools/attestation_pool/attestation_pool.ts | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/yarn-project/p2p/src/mem_pools/attestation_pool/attestation_pool.ts b/yarn-project/p2p/src/mem_pools/attestation_pool/attestation_pool.ts index 4e4596bc791b..69c2e02f8e55 100644 --- a/yarn-project/p2p/src/mem_pools/attestation_pool/attestation_pool.ts +++ b/yarn-project/p2p/src/mem_pools/attestation_pool/attestation_pool.ts @@ -154,14 +154,16 @@ export class AttestationPool { /** Maximum indexWithinCheckpoint value (2^10 - 1 = 1023). */ private static readonly MAX_INDEX = (1 << AttestationPool.INDEX_BITS) - 1; - /** Creates a position key for block proposals: (slot << 10) | indexWithinCheckpoint. */ + /** Creates a position key for block proposals: slot * 1024 + indexWithinCheckpoint. + * Uses multiplication instead of bit-shift to avoid 32-bit signed integer overflow + * (bit-shift overflows after slot ~2^21, roughly 278 days of uptime). */ private getBlockPositionKey(slot: number, indexWithinCheckpoint: number): number { if (indexWithinCheckpoint > AttestationPool.MAX_INDEX) { throw new Error( `Value for indexWithinCheckpoint ${indexWithinCheckpoint} exceeds maximum ${AttestationPool.MAX_INDEX}`, ); } - return (slot << AttestationPool.INDEX_BITS) | indexWithinCheckpoint; + return slot * (1 << AttestationPool.INDEX_BITS) + indexWithinCheckpoint; } /** @@ -454,7 +456,7 @@ export class AttestationPool { // Delete block proposals for slots < oldestSlot, using blockProposalsForSlotAndIndex as index // Key format: (slot << INDEX_BITS) | indexWithinCheckpoint - const blockPositionEndKey = oldestSlot << AttestationPool.INDEX_BITS; + const blockPositionEndKey = oldestSlot * (1 << AttestationPool.INDEX_BITS); for await (const positionKey of this.blockProposalsForSlotAndIndex.keysAsync({ end: blockPositionEndKey })) { const proposalIds = await toArray(this.blockProposalsForSlotAndIndex.getValuesAsync(positionKey)); for (const proposalId of proposalIds) { From 56b32d5babed5e5371d6afafb90bc3bd84e3cd01 Mon Sep 17 00:00:00 2001 From: spypsy Date: Fri, 10 Apr 2026 00:13:55 +0100 Subject: [PATCH 09/11] fix(prover-client): increment retry count on timeout re-enqueue to prevent infinite loop (#22355) ## Summary - Timed-out proving jobs in `ProvingBroker.reEnqueueExpiredJobs()` were being re-enqueued without incrementing the retry count or checking `maxRetries`, creating an infinite retry loop for jobs that consistently time out. - Now the timeout path increments the retry counter and rejects jobs that exceed `maxRetries`, matching the behavior of the error retry path in `#reportProvingJobError`. Fixes [A-715](https://linear.app/aztec-labs/issue/A-715/audit-35-timed-out-proving-jobs-re-enqueue-without-incrementing-retry) --- .../src/proving_broker/proving_broker.test.ts | 24 +++++++++++++++++++ .../src/proving_broker/proving_broker.ts | 20 ++++++++++++++-- 2 files changed, 42 insertions(+), 2 deletions(-) diff --git a/yarn-project/prover-client/src/proving_broker/proving_broker.test.ts b/yarn-project/prover-client/src/proving_broker/proving_broker.test.ts index 8d87a5d424fc..ea75037f0150 100644 --- a/yarn-project/prover-client/src/proving_broker/proving_broker.test.ts +++ b/yarn-project/prover-client/src/proving_broker/proving_broker.test.ts @@ -894,6 +894,30 @@ describe.each([ await assertJobStatus(id, 'not-found'); }); + it('rejects jobs that time out more than maxRetries times', async () => { + const id = makeRandomProvingJobId(); + await broker.enqueueProvingJob({ + id, + type: ProvingRequestType.PARITY_BASE, + epochNumber: EpochNumber(1), + inputsUri: makeInputsUri(), + }); + + for (let i = 0; i < maxRetries; i++) { + await assertJobStatus(id, 'in-queue'); + await getAndAssertNextJobId(id); + await assertJobStatus(id, 'in-progress'); + + await sleep(jobTimeoutMs); + await assertJobTransition(id, 'in-progress', i + 1 < maxRetries ? 'in-queue' : 'rejected'); + } + + await expect(broker.getProvingJobStatus(id)).resolves.toEqual({ + status: 'rejected', + reason: 'Timed out', + }); + }); + it('keeps the jobs in progress while it is alive', async () => { const id = makeRandomProvingJobId(); await broker.enqueueProvingJob({ diff --git a/yarn-project/prover-client/src/proving_broker/proving_broker.ts b/yarn-project/prover-client/src/proving_broker/proving_broker.ts index 8d9db0e1bf6e..6e9cf3f0651d 100644 --- a/yarn-project/prover-client/src/proving_broker/proving_broker.ts +++ b/yarn-project/prover-client/src/proving_broker/proving_broker.ts @@ -632,10 +632,26 @@ export class ProvingBroker implements ProvingJobProducer, ProvingJobConsumer, Pr const now = this.msTimeSource(); const msSinceLastUpdate = now - metadata.lastUpdatedAt; if (msSinceLastUpdate >= this.jobTimeoutMs) { - this.logger.warn(`Proving job id=${id} timed out. Adding it back to the queue.`, { provingJobId: id }); this.inProgress.delete(id); - this.enqueueJobInternal(item); this.instrumentation.incTimedOutJobs(item.type); + + const retries = this.retries.get(id) ?? 0; + if (retries + 1 < this.maxRetries && !this.isJobStale(item)) { + this.logger.warn(`Proving job id=${id} timed out. Re-enqueueing (retry ${retries + 1}/${this.maxRetries}).`, { + provingJobId: id, + }); + this.retries.set(id, retries + 1); + this.enqueueJobInternal(item); + } else { + this.logger.error(`Proving job id=${id} timed out after ${retries + 1} attempts. Marking as failed.`, { + provingJobId: id, + }); + const result: ProvingJobSettledResult = { status: 'rejected', reason: 'Timed out' }; + this.resultsCache.set(id, result); + this.promises.get(id)?.resolve(result); + this.completedJobNotifications.push(id); + this.instrumentation.incRejectedJobs(item.type); + } } } } From 064a1b2e1e56b00a690ccd9563b52cfdfc17e4e4 Mon Sep 17 00:00:00 2001 From: spypsy Date: Fri, 10 Apr 2026 00:14:11 +0100 Subject: [PATCH 10/11] fix: remove redundant p2pClient.start() call (#22438) Removes the duplicate `p2pClient.start()`. The P2P client is already fully started (and synced) by the unconditional `await p2pClient.start()` earlier in `createAztecNode`. The second call just hits the guard clause (`if state !== IDLE return syncPromise`) and is a no-op. This is just for cleanup Fixes [A-736](https://linear.app/aztec-labs/issue/A-736/audit-60-p2p-client-start-called-twice-in-serverts-duplicate-listeners) --- yarn-project/aztec-node/src/aztec-node/server.ts | 1 - 1 file changed, 1 deletion(-) diff --git a/yarn-project/aztec-node/src/aztec-node/server.ts b/yarn-project/aztec-node/src/aztec-node/server.ts index 4d8d84d58057..37be748394b7 100644 --- a/yarn-project/aztec-node/src/aztec-node/server.ts +++ b/yarn-project/aztec-node/src/aztec-node/server.ts @@ -466,7 +466,6 @@ export class AztecNodeService implements AztecNode, AztecNodeAdmin, Traceable { void archiver .waitForInitialSync() .then(async () => { - await p2pClient.start(); await validatorsSentinel?.start(); await epochPruneWatcher?.start(); await attestationsBlockWatcher?.start(); From 60b68ad73a781df361bc433ed549c2449ef91bd2 Mon Sep 17 00:00:00 2001 From: Aztec Bot <49558828+AztecBot@users.noreply.github.com> Date: Thu, 9 Apr 2026 19:30:26 -0400 Subject: [PATCH 11/11] chore: add kubectl binary to spartan .gitignore (#22454) ## Summary - Add `scripts/kubectl` to `spartan/.gitignore` to prevent the downloaded kubectl binary from being accidentally committed - The binary is downloaded by `spartan/scripts/deploy_network.sh` at runtime ClaudeBox log: https://claudebox.work/s/98e0d3700d589d71?run=3 --- spartan/.gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/spartan/.gitignore b/spartan/.gitignore index 6594d1aed966..a2b3a200dd90 100644 --- a/spartan/.gitignore +++ b/spartan/.gitignore @@ -1,6 +1,7 @@ *.tgz !terraform/modules/web3signer/web3signer-1.0.6.tgz !terraform/deploy-external-secrets/external-secrets-*.tgz +scripts/kubectl scripts/logs scripts/LICENSE tfplan