Providing eth68 stubs while setting eth67 as default (#2129)
* Suspend `snap` sync tests why: Snap needs to be updated. While this is not done, tests are obsolete and eat up memory resources only * Suspend rocks DB timing tests why: Currently of no use for general test suite. * Mothballed unused evm code for opportunistic DB handling why: Needs to be refactored anyway. Uses hard coded protocol dependencies. * Update `eth` protocol configuration why: + new upcoming version `eth68` + prepare for eth-multi protocol stack support * Strip the `legacy_` prefix from eth66 compiler flag variable why: Being it is an oddity having `eth67_enabled`, `eth68_enabled`, and `legacy_eth_66_enabled`. * Providing eth68 stubs * Fix copyright year * Fix eth68 stub method name
This commit is contained in:
parent
b6f8a000b9
commit
dac7a2cbe1
12
Makefile
12
Makefile
|
@ -56,6 +56,7 @@ EXCLUDED_NIM_PACKAGES := \
|
|||
|
||||
# we don't want an error here, so we can handle things later, in the ".DEFAULT" target
|
||||
-include $(BUILD_SYSTEM_DIR)/makefiles/variables.mk
|
||||
-include ./nimbus/sync/protocol/eth/eth-variables.mk
|
||||
|
||||
# debugging tools + testing tools
|
||||
TOOLS := \
|
||||
|
@ -195,15 +196,8 @@ ifneq ($(if $(ENABLE_VMLOWMEM),$(ENABLE_VMLOWMEM),0),0)
|
|||
NIM_PARAMS += -d:lowmem:1
|
||||
endif
|
||||
|
||||
# chunked messages enabled by default, use ENABLE_CHUNKED_RLPX=0 to disable
|
||||
ifneq ($(if $(ENABLE_CHUNKED_RLPX),$(ENABLE_CHUNKED_RLPX),1),0)
|
||||
NIM_PARAMS := $(NIM_PARAMS) -d:chunked_rlpx_enabled
|
||||
endif
|
||||
|
||||
# legacy wire protocol enabled by default, use ENABLE_LEGACY_ETH66=0 to disable
|
||||
ifneq ($(if $(ENABLE_LEGACY_ETH66),$(ENABLE_LEGACY_ETH66),1),0)
|
||||
NIM_PARAMS := $(NIM_PARAMS) -d:legacy_eth66_enabled
|
||||
endif
|
||||
# eth protocol settings, rules from "nimbus/sync/protocol/eth/variables.mk"
|
||||
NIM_PARAMS := $(NIM_PARAMS) $(NIM_ETH_PARAMS)
|
||||
|
||||
#- deletes and recreates "nimbus.nims" which on Windows is a copy instead of a proper symlink
|
||||
update: | update-common
|
||||
|
|
|
@ -230,6 +230,9 @@ available.)
|
|||
Disable legacy chunked RLPx messages which are enabled by default for
|
||||
synchronising against `Nethermind` nodes
|
||||
|
||||
* ENABLE_ETH_VERSION=66<br>
|
||||
Enable legacy protocol `eth66` (or whatever other protocol version.)
|
||||
|
||||
* ENABLE_EVMC=1<br>
|
||||
Enable mostly EVMC compliant wrapper around the native Nim VM
|
||||
|
||||
|
|
|
@ -29,11 +29,6 @@ import
|
|||
web3/conversions,
|
||||
web3
|
||||
|
||||
when defined(legacy_eth66_enabled):
|
||||
#import
|
||||
#../../../sync/protocol/eth66 as proto_eth66
|
||||
from ../../../sync/protocol/eth66 import getNodeData
|
||||
|
||||
export AsyncOperationFactory, AsyncDataSource
|
||||
|
||||
type
|
||||
|
@ -85,41 +80,7 @@ proc fetchBlockHeaderWithNumber*(rpcClient: RpcClient, n: common.BlockNumber): F
|
|||
fetchCounter += 1
|
||||
return blockHeaderFromBlockObject(blockObject)
|
||||
|
||||
#[
|
||||
proc parseBlockBodyAndFetchUncles(rpcClient: RpcClient, r: JsonNode): Future[BlockBody] {.async.} =
|
||||
var body: BlockBody
|
||||
for tn in r["transactions"].getElems:
|
||||
body.transactions.add(parseTransaction(tn))
|
||||
for un in r["uncles"].getElems:
|
||||
let uncleHash: Hash256 = un.getStr.ethHash
|
||||
let uncleHeader = await fetchBlockHeaderWithHash(rpcClient, uncleHash)
|
||||
body.uncles.add(uncleHeader)
|
||||
return body
|
||||
|
||||
proc fetchBlockHeaderAndBodyWithHash*(rpcClient: RpcClient, h: Hash256): Future[(BlockHeader, BlockBody)] {.async.} =
|
||||
let t0 = now()
|
||||
let r = request("eth_getBlockByHash", %[%h.prefixHex, %true], some(rpcClient))
|
||||
durationSpentDoingFetches += now() - t0
|
||||
fetchCounter += 1
|
||||
if r.kind == JNull:
|
||||
error "requested block not available", blockHash=h
|
||||
raise newException(ValueError, "Error when retrieving block header and body")
|
||||
let header = parseBlockHeader(r)
|
||||
let body = await parseBlockBodyAndFetchUncles(rpcClient, r)
|
||||
return (header, body)
|
||||
|
||||
proc fetchBlockHeaderAndBodyWithNumber*(rpcClient: RpcClient, n: BlockNumber): Future[(BlockHeader, BlockBody)] {.async.} =
|
||||
let t0 = now()
|
||||
let r = request("eth_getBlockByNumber", %[%n.prefixHex, %true], some(rpcClient))
|
||||
durationSpentDoingFetches += now() - t0
|
||||
fetchCounter += 1
|
||||
if r.kind == JNull:
|
||||
error "requested block not available", blockNumber=n
|
||||
raise newException(ValueError, "Error when retrieving block header and body")
|
||||
let header = parseBlockHeader(r)
|
||||
let body = await parseBlockBodyAndFetchUncles(rpcClient, r)
|
||||
return (header, body)
|
||||
]#
|
||||
# Cruft moved to `json_rpc_data_source/currently_unused.nim`
|
||||
|
||||
proc fetchBlockHeaderAndBodyWithHash*(rpcClient: RpcClient, h: common.Hash256): Future[(common.BlockHeader, BlockBody)] {.async.} =
|
||||
doAssert(false, "AARDVARK not implemented")
|
||||
|
@ -167,64 +128,7 @@ proc fetchCode*(client: RpcClient, blockNumber: common.BlockNumber, address: Eth
|
|||
fetchCounter += 1
|
||||
return fetchedCode
|
||||
|
||||
#[
|
||||
const bytesLimit = 2 * 1024 * 1024
|
||||
const maxNumberOfPeersToAttempt = 3
|
||||
|
||||
proc fetchUsingGetTrieNodes(peer: Peer, stateRoot: common.Hash256, paths: seq[SnapTriePaths]): Future[seq[seq[byte]]] {.async.} =
|
||||
let r = await peer.getTrieNodes(stateRoot, paths, bytesLimit)
|
||||
if r.isNone:
|
||||
raise newException(CatchableError, "AARDVARK: received None in GetTrieNodes response")
|
||||
else:
|
||||
return r.get.nodes
|
||||
|
||||
proc fetchUsingGetNodeData(peer: Peer, nodeHashes: seq[common.Hash256]): Future[seq[seq[byte]]] {.async.} =
|
||||
|
||||
let r: Option[seq[seq[byte]]] = none[seq[seq[byte]]]() # AARDVARK await peer.getNodeData(nodeHashes)
|
||||
if r.isNone:
|
||||
raise newException(CatchableError, "AARDVARK: received None in GetNodeData response")
|
||||
else:
|
||||
echo "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA fetchUsingGetNodeData received nodes: " & $(r.get.data)
|
||||
return r.get.data
|
||||
|
||||
# AARDVARK whatever
|
||||
return @[]
|
||||
|
||||
proc findPeersAndMakeSomeCalls[R](peerPool: PeerPool, protocolName: string, protocolType: typedesc, initiateAttempt: (proc(p: Peer): Future[R] {.gcsafe, raises: [].})): Future[seq[Future[R]]] {.async.} =
|
||||
var attempts: seq[Future[R]]
|
||||
while true:
|
||||
#info("AARDVARK: findPeersAndMakeSomeCalls about to loop through the peer pool", count=peerPool.connectedNodes.len)
|
||||
for nodeOfSomeSort, peer in peerPool.connectedNodes:
|
||||
if peer.supports(protocolType):
|
||||
info("AARDVARK: findPeersAndMakeSomeCalls calling peer", protocolName, peer)
|
||||
attempts.add(initiateAttempt(peer))
|
||||
if attempts.len >= maxNumberOfPeersToAttempt:
|
||||
break
|
||||
#else:
|
||||
# info("AARDVARK: peer does not support protocol", protocolName, peer)
|
||||
if attempts.len == 0:
|
||||
warn("AARDVARK: findPeersAndMakeSomeCalls did not find any peers; waiting and trying again", protocolName, totalPeerPoolSize=peerPool.connectedNodes.len)
|
||||
await sleepAsync(chronos.seconds(5))
|
||||
else:
|
||||
if attempts.len < maxNumberOfPeersToAttempt:
|
||||
warn("AARDVARK: findPeersAndMakeSomeCalls did not find enough peers, but found some", protocolName, totalPeerPoolSize=peerPool.connectedNodes.len, found=attempts.len)
|
||||
break
|
||||
return attempts
|
||||
|
||||
proc findPeersAndMakeSomeAttemptsToCallGetTrieNodes(peerPool: PeerPool, stateRoot: common.Hash256, paths: seq[SnapTriePaths]): Future[seq[Future[seq[seq[byte]]]]] =
|
||||
findPeersAndMakeSomeCalls(peerPool, "snap", protocol.snap, (proc(peer: Peer): Future[seq[seq[byte]]] = fetchUsingGetTrieNodes(peer, stateRoot, paths)))
|
||||
|
||||
proc findPeersAndMakeSomeAttemptsToCallGetNodeData(peerPool: PeerPool, stateRoot: Hash256, nodeHashes: seq[Hash256]): Future[seq[Future[seq[seq[byte]]]]] =
|
||||
findPeersAndMakeSomeCalls(peerPool, "eth66", eth66, (proc(peer: Peer): Future[seq[seq[byte]]] = fetchUsingGetNodeData(peer, nodeHashes)))
|
||||
|
||||
proc fetchNodes(peerPool: PeerPool, stateRoot: common.Hash256, paths: seq[SnapTriePaths], nodeHashes: seq[common.Hash256]): Future[seq[seq[byte]]] {.async.} =
|
||||
let attempts = await findPeersAndMakeSomeAttemptsToCallGetTrieNodes(peerPool, stateRoot, paths)
|
||||
#let attempts = await findPeersAndMakeSomeAttemptsToCallGetNodeData(peerPool, stateRoot, nodeHashes)
|
||||
let completedAttempt = await one(attempts)
|
||||
let nodes: seq[seq[byte]] = completedAttempt.read
|
||||
info("AARDVARK: fetchNodes received nodes", nodes)
|
||||
return nodes
|
||||
]#
|
||||
# Cruft moved to json_rpc_data_source/currently_unused.nim
|
||||
|
||||
proc verifyFetchedAccount(stateRoot: common.Hash256, address: EthAddress, acc: Account, accProof: seq[seq[byte]]): Result[void, string] =
|
||||
let accKey = toSeq(keccakHash(address).data)
|
||||
|
|
|
@ -0,0 +1,133 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
# http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed except
|
||||
# according to those terms.
|
||||
|
||||
import
|
||||
std/[sequtils, typetraits, options, times, json],
|
||||
chronicles,
|
||||
chronos,
|
||||
nimcrypto,
|
||||
stint,
|
||||
stew/byteutils,
|
||||
json_rpc/rpcclient,
|
||||
eth/common,
|
||||
eth/rlp,
|
||||
eth/trie/hexary_proof_verification,
|
||||
eth/p2p,
|
||||
eth/p2p/rlpx,
|
||||
eth/p2p/private/p2p_types,
|
||||
#../../../sync/protocol,
|
||||
../../../../db/[core_db, distinct_tries, incomplete_db, storage_types],
|
||||
../../data_sources,
|
||||
../../../../beacon/web3_eth_conv,
|
||||
web3/conversions,
|
||||
web3
|
||||
|
||||
when defined(legacy_eth66_enabled):
|
||||
from ../../../sync/protocol/eth66 import getNodeData
|
||||
|
||||
# Comment extracted from `json_rpc_data_source.nim` line 83
|
||||
# ---------------------------------------------------------
|
||||
|
||||
proc parseBlockBodyAndFetchUncles(rpcClient: RpcClient, r: JsonNode): Future[BlockBody] {.async.} =
|
||||
var body: BlockBody
|
||||
for tn in r["transactions"].getElems:
|
||||
body.transactions.add(parseTransaction(tn))
|
||||
for un in r["uncles"].getElems:
|
||||
let uncleHash: Hash256 = un.getStr.ethHash
|
||||
let uncleHeader = await fetchBlockHeaderWithHash(rpcClient, uncleHash)
|
||||
body.uncles.add(uncleHeader)
|
||||
return body
|
||||
|
||||
proc fetchBlockHeaderAndBodyWithHash*(rpcClient: RpcClient, h: Hash256): Future[(BlockHeader, BlockBody)] {.async.} =
|
||||
let t0 = now()
|
||||
let r = request("eth_getBlockByHash", %[%h.prefixHex, %true], some(rpcClient))
|
||||
durationSpentDoingFetches += now() - t0
|
||||
fetchCounter += 1
|
||||
if r.kind == JNull:
|
||||
error "requested block not available", blockHash=h
|
||||
raise newException(ValueError, "Error when retrieving block header and body")
|
||||
let header = parseBlockHeader(r)
|
||||
let body = await parseBlockBodyAndFetchUncles(rpcClient, r)
|
||||
return (header, body)
|
||||
|
||||
proc fetchBlockHeaderAndBodyWithNumber*(rpcClient: RpcClient, n: BlockNumber): Future[(BlockHeader, BlockBody)] {.async.} =
|
||||
let t0 = now()
|
||||
let r = request("eth_getBlockByNumber", %[%n.prefixHex, %true], some(rpcClient))
|
||||
durationSpentDoingFetches += now() - t0
|
||||
fetchCounter += 1
|
||||
if r.kind == JNull:
|
||||
error "requested block not available", blockNumber=n
|
||||
raise newException(ValueError, "Error when retrieving block header and body")
|
||||
let header = parseBlockHeader(r)
|
||||
let body = await parseBlockBodyAndFetchUncles(rpcClient, r)
|
||||
return (header, body)
|
||||
|
||||
|
||||
# Comment extracted from `json_rpc_data_source.nim` line 131
|
||||
# ----------------------------------------------------------
|
||||
|
||||
const bytesLimit = 2 * 1024 * 1024
|
||||
const maxNumberOfPeersToAttempt = 3
|
||||
|
||||
proc fetchUsingGetTrieNodes(peer: Peer, stateRoot: common.Hash256, paths: seq[SnapTriePaths]): Future[seq[seq[byte]]] {.async.} =
|
||||
let r = await peer.getTrieNodes(stateRoot, paths, bytesLimit)
|
||||
if r.isNone:
|
||||
raise newException(CatchableError, "AARDVARK: received None in GetTrieNodes response")
|
||||
else:
|
||||
return r.get.nodes
|
||||
|
||||
proc fetchUsingGetNodeData(peer: Peer, nodeHashes: seq[common.Hash256]): Future[seq[seq[byte]]] {.async.} =
|
||||
|
||||
let r: Option[seq[seq[byte]]] = none[seq[seq[byte]]]() # AARDVARK await peer.getNodeData(nodeHashes)
|
||||
if r.isNone:
|
||||
raise newException(CatchableError, "AARDVARK: received None in GetNodeData response")
|
||||
else:
|
||||
echo "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA fetchUsingGetNodeData received nodes: " & $(r.get.data)
|
||||
return r.get.data
|
||||
|
||||
# AARDVARK whatever
|
||||
return @[]
|
||||
|
||||
proc findPeersAndMakeSomeCalls[R](peerPool: PeerPool, protocolName: string, protocolType: typedesc, initiateAttempt: (proc(p: Peer): Future[R] {.gcsafe, raises: [].})): Future[seq[Future[R]]] {.async.} =
|
||||
var attempts: seq[Future[R]]
|
||||
while true:
|
||||
#info("AARDVARK: findPeersAndMakeSomeCalls about to loop through the peer pool", count=peerPool.connectedNodes.len)
|
||||
for nodeOfSomeSort, peer in peerPool.connectedNodes:
|
||||
if peer.supports(protocolType):
|
||||
info("AARDVARK: findPeersAndMakeSomeCalls calling peer", protocolName, peer)
|
||||
attempts.add(initiateAttempt(peer))
|
||||
if attempts.len >= maxNumberOfPeersToAttempt:
|
||||
break
|
||||
#else:
|
||||
# info("AARDVARK: peer does not support protocol", protocolName, peer)
|
||||
if attempts.len == 0:
|
||||
warn("AARDVARK: findPeersAndMakeSomeCalls did not find any peers; waiting and trying again", protocolName, totalPeerPoolSize=peerPool.connectedNodes.len)
|
||||
await sleepAsync(chronos.seconds(5))
|
||||
else:
|
||||
if attempts.len < maxNumberOfPeersToAttempt:
|
||||
warn("AARDVARK: findPeersAndMakeSomeCalls did not find enough peers, but found some", protocolName, totalPeerPoolSize=peerPool.connectedNodes.len, found=attempts.len)
|
||||
break
|
||||
return attempts
|
||||
|
||||
proc findPeersAndMakeSomeAttemptsToCallGetTrieNodes(peerPool: PeerPool, stateRoot: common.Hash256, paths: seq[SnapTriePaths]): Future[seq[Future[seq[seq[byte]]]]] =
|
||||
findPeersAndMakeSomeCalls(peerPool, "snap", protocol.snap, (proc(peer: Peer): Future[seq[seq[byte]]] = fetchUsingGetTrieNodes(peer, stateRoot, paths)))
|
||||
|
||||
proc findPeersAndMakeSomeAttemptsToCallGetNodeData(peerPool: PeerPool, stateRoot: Hash256, nodeHashes: seq[Hash256]): Future[seq[Future[seq[seq[byte]]]]] =
|
||||
findPeersAndMakeSomeCalls(peerPool, "eth66", eth66, (proc(peer: Peer): Future[seq[seq[byte]]] = fetchUsingGetNodeData(peer, nodeHashes)))
|
||||
|
||||
proc fetchNodes(peerPool: PeerPool, stateRoot: common.Hash256, paths: seq[SnapTriePaths], nodeHashes: seq[common.Hash256]): Future[seq[seq[byte]]] {.async.} =
|
||||
let attempts = await findPeersAndMakeSomeAttemptsToCallGetTrieNodes(peerPool, stateRoot, paths)
|
||||
#let attempts = await findPeersAndMakeSomeAttemptsToCallGetNodeData(peerPool, stateRoot, nodeHashes)
|
||||
let completedAttempt = await one(attempts)
|
||||
let nodes: seq[seq[byte]] = completedAttempt.read
|
||||
info("AARDVARK: fetchNodes received nodes", nodes)
|
||||
return nodes
|
||||
|
||||
# End
|
|
@ -21,6 +21,12 @@ import
|
|||
../protocol/trace_config, # gossip noise control
|
||||
../../core/[chain, tx_pool, tx_pool/tx_item]
|
||||
|
||||
# There is only one eth protocol version possible at compile time. This
|
||||
# might change in future.
|
||||
when ethVersion == 68:
|
||||
import
|
||||
std/sequtils
|
||||
|
||||
logScope:
|
||||
topics = "eth-wire"
|
||||
|
||||
|
@ -221,7 +227,6 @@ proc sendNewTxHashes(ctx: EthWireRef,
|
|||
txHashes: seq[Hash256],
|
||||
peers: seq[Peer]): Future[void] {.async.} =
|
||||
try:
|
||||
|
||||
for peer in peers:
|
||||
# Add to known tx hashes and get hashes still to send to peer
|
||||
var hashesToSend: seq[Hash256]
|
||||
|
@ -229,7 +234,15 @@ proc sendNewTxHashes(ctx: EthWireRef,
|
|||
|
||||
# Broadcast to peer if at least 1 new tx hash to announce
|
||||
if hashesToSend.len > 0:
|
||||
await peer.newPooledTransactionHashes(hashesToSend)
|
||||
# Currently only one protocol version is available as compiled
|
||||
when ethVersion == 68:
|
||||
await newPooledTransactionHashes(
|
||||
peer,
|
||||
1u8.repeat hashesToSend.len, # type
|
||||
0.repeat hashesToSend.len, # sizes
|
||||
hashesToSend)
|
||||
else:
|
||||
await newPooledTransactionHashes(peer, hashesToSend)
|
||||
|
||||
except TransportError:
|
||||
debug "Transport got closed during sendNewTxHashes"
|
||||
|
@ -241,7 +254,6 @@ proc sendTransactions(ctx: EthWireRef,
|
|||
txs: seq[Transaction],
|
||||
peers: seq[Peer]): Future[void] {.async.} =
|
||||
try:
|
||||
|
||||
for peer in peers:
|
||||
# This is used to avoid re-sending along pooledTxHashes
|
||||
# announcements/re-broadcasts
|
||||
|
@ -536,40 +548,53 @@ method handleAnnouncedTxs*(ctx: EthWireRef,
|
|||
except CatchableError as exc:
|
||||
return err(exc.msg)
|
||||
|
||||
method handleAnnouncedTxsHashes*(ctx: EthWireRef,
|
||||
peer: Peer,
|
||||
txHashes: openArray[Hash256]):
|
||||
Result[void, string] =
|
||||
if ctx.enableTxPool != Enabled:
|
||||
when trMissingOrDisabledGossipOk:
|
||||
notEnabled("handleAnnouncedTxsHashes")
|
||||
when ethVersion == 68:
|
||||
method handleAnnouncedTxsHashes*(
|
||||
ctx: EthWireRef;
|
||||
peer: Peer;
|
||||
txTypes: Blob;
|
||||
txSizes: openArray[int];
|
||||
txHashes: openArray[Hash256];
|
||||
): Result[void, string] =
|
||||
## `Eth68` method
|
||||
notImplemented "handleAnnouncedTxsHashes()/eth68"
|
||||
|
||||
else:
|
||||
method handleAnnouncedTxsHashes*(ctx: EthWireRef,
|
||||
peer: Peer,
|
||||
txHashes: openArray[Hash256]):
|
||||
Result[void, string] =
|
||||
## Pre-eth68 method
|
||||
if ctx.enableTxPool != Enabled:
|
||||
when trMissingOrDisabledGossipOk:
|
||||
notEnabled("handleAnnouncedTxsHashes")
|
||||
return ok()
|
||||
|
||||
if txHashes.len == 0:
|
||||
return ok()
|
||||
|
||||
if ctx.lastCleanup - getTime() > POOLED_STORAGE_TIME_LIMIT:
|
||||
ctx.cleanupKnownByPeer()
|
||||
|
||||
ctx.addToKnownByPeer(txHashes, peer)
|
||||
var reqHashes = newSeqOfCap[Hash256](txHashes.len)
|
||||
for txHash in txHashes:
|
||||
if txHash in ctx.pending or ctx.inPool(txHash):
|
||||
continue
|
||||
reqHashes.add txHash
|
||||
|
||||
if reqHashes.len == 0:
|
||||
return ok()
|
||||
|
||||
debug "handleAnnouncedTxsHashes: received new tx hashes",
|
||||
number = reqHashes.len
|
||||
|
||||
for txHash in reqHashes:
|
||||
ctx.pending.incl txHash
|
||||
|
||||
asyncSpawn ctx.fetchTransactions(reqHashes, peer)
|
||||
return ok()
|
||||
|
||||
if txHashes.len == 0:
|
||||
return ok()
|
||||
|
||||
if ctx.lastCleanup - getTime() > POOLED_STORAGE_TIME_LIMIT:
|
||||
ctx.cleanupKnownByPeer()
|
||||
|
||||
ctx.addToKnownByPeer(txHashes, peer)
|
||||
var reqHashes = newSeqOfCap[Hash256](txHashes.len)
|
||||
for txHash in txHashes:
|
||||
if txHash in ctx.pending or ctx.inPool(txHash):
|
||||
continue
|
||||
reqHashes.add txHash
|
||||
|
||||
if reqHashes.len == 0:
|
||||
return ok()
|
||||
|
||||
debug "handleAnnouncedTxsHashes: received new tx hashes",
|
||||
number = reqHashes.len
|
||||
|
||||
for txHash in reqHashes:
|
||||
ctx.pending.incl txHash
|
||||
|
||||
asyncSpawn ctx.fetchTransactions(reqHashes, peer)
|
||||
return ok()
|
||||
|
||||
method handleNewBlock*(ctx: EthWireRef,
|
||||
peer: Peer,
|
||||
blk: EthBlock,
|
||||
|
@ -615,7 +640,9 @@ method handleNewBlockHashes*(ctx: EthWireRef,
|
|||
except CatchableError as exc:
|
||||
return err(exc.msg)
|
||||
|
||||
when defined(legacy_eth66_enabled):
|
||||
# There is only one eth protocol version possible at compile time. This
|
||||
# might change in future.
|
||||
when ethVersion == 66:
|
||||
method getStorageNodes*(ctx: EthWireRef,
|
||||
hashes: openArray[Hash256]):
|
||||
Result[seq[Blob], string] {.gcsafe.} =
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
||||
# Copyright (c) 2021-2024 Status Research & Development GmbH
|
||||
# Licensed and distributed under either of
|
||||
# * MIT license (license terms in the root directory or at
|
||||
# https://opensource.org/licenses/MIT).
|
||||
|
@ -8,13 +8,43 @@
|
|||
# at your option. This file may not be copied, modified, or distributed
|
||||
# except according to those terms.
|
||||
|
||||
when defined(legacy_eth66_enabled):
|
||||
import ./protocol/eth66 as proto_eth
|
||||
type eth* = eth66
|
||||
else:
|
||||
## Provision of `eth` and `snap` protocol version parameters
|
||||
##
|
||||
## `Eth` related parameters:
|
||||
## `ethVersions`: seq[int] -- constant list of all available versions
|
||||
## `eth` -- type symbol of default version
|
||||
## `proto_eth` -- export of default version directives
|
||||
##
|
||||
## `Snap` related parameters:
|
||||
## `snap` -- type symbol of default version
|
||||
## `proto_snap` -- export of default version directives
|
||||
## ..aliases.. -- type names, syntactic sugar (see below)
|
||||
##
|
||||
|
||||
include
|
||||
./protocol/eth/eth_versions
|
||||
|
||||
assert 0 < ethVersions.len
|
||||
|
||||
# multi protocol not functional, yet
|
||||
when 1 < buildEthVersions.len:
|
||||
{.warning: "No multi eth-protocol yet (using latest version only)".}
|
||||
|
||||
when 68 in ethVersions:
|
||||
{.warning: "Protocol eth68 is not fully functional yet".}
|
||||
import ./protocol/eth68 as proto_eth
|
||||
type eth* = eth68
|
||||
|
||||
elif 67 in ethVersions:
|
||||
import ./protocol/eth67 as proto_eth
|
||||
type eth* = eth67
|
||||
|
||||
elif 66 in ethVersions:
|
||||
import ./protocol/eth66 as proto_eth
|
||||
type eth* = eth66
|
||||
|
||||
# ---------------
|
||||
|
||||
import
|
||||
./protocol/snap1 as proto_snap
|
||||
|
||||
|
|
|
@ -0,0 +1,48 @@
|
|||
# Copyright (c) 2024 Status Research & Development GmbH. Licensed under
|
||||
# either of:
|
||||
# - Apache License, version 2.0
|
||||
# - MIT license
|
||||
# at your option. This file may not be copied, modified, or distributed except
|
||||
# according to those terms.
|
||||
|
||||
# GNU-Makefile include
|
||||
#
|
||||
# When running make, the instructions here will define the variable
|
||||
# "NIM_ETH_PARAMS" which should be appended to the nim compiler options.
|
||||
#
|
||||
# This makefile snippet supports multi-protocol arguments as in
|
||||
# "ENABLE_ETH_VERSION=66:67:999" where available protocol versions are
|
||||
# extracted and processed. In this case, protocol version 999 will be
|
||||
# silently ignored.
|
||||
|
||||
# default wire protocol, triggered by empty/unset variable or zero value
|
||||
ifeq ($(if $(ENABLE_ETH_VERSION),$(ENABLE_ETH_VERSION),0),0)
|
||||
NIM_ETH_PARAMS := $(NIM_ETH_PARAMS) -d:eth67_enabled
|
||||
endif
|
||||
|
||||
# parse list for supported items
|
||||
ifneq ($(findstring :66:,:$(ENABLE_ETH_VERSION):),)
|
||||
NIM_ETH_PARAMS := $(NIM_ETH_PARAMS) -d:eth66_enabled
|
||||
endif
|
||||
|
||||
ifneq ($(findstring :67:,:$(ENABLE_ETH_VERSION):),)
|
||||
NIM_ETH_PARAMS := $(NIM_ETH_PARAMS) -d:eth67_enabled
|
||||
endif
|
||||
|
||||
ifneq ($(findstring :68:,:$(ENABLE_ETH_VERSION):),)
|
||||
NIM_ETH_PARAMS := $(NIM_ETH_PARAMS) -d:eth68_enabled
|
||||
endif
|
||||
|
||||
# There must be at least one protocol version.
|
||||
ifeq ($(NIM_ETH_PARAMS),)
|
||||
$(error Unacceptable protocol versions in "ENABLE_ETH_VERSION=$(ENABLE_ETH_VERSION)")
|
||||
endif
|
||||
|
||||
# ------------
|
||||
|
||||
# chunked messages enabled by default, use ENABLE_CHUNKED_RLPX=0 to disable
|
||||
ifneq ($(if $(ENABLE_CHUNKED_RLPX),$(ENABLE_CHUNKED_RLPX),1),0)
|
||||
NIM_ETH_PARAMS := $(NIM_ETH_PARAMS) -d:chunked_rlpx_enabled
|
||||
endif
|
||||
|
||||
# End
|
|
@ -16,6 +16,9 @@ import
|
|||
eth/[common, p2p, p2p/private/p2p_types],
|
||||
../../types
|
||||
|
||||
include
|
||||
./eth_versions # early compile time list of proto versions
|
||||
|
||||
logScope:
|
||||
topics = "eth-wire"
|
||||
|
||||
|
@ -93,12 +96,24 @@ method handleAnnouncedTxs*(ctx: EthWireBase,
|
|||
{.base, gcsafe.} =
|
||||
notImplemented("handleAnnouncedTxs")
|
||||
|
||||
method handleAnnouncedTxsHashes*(ctx: EthWireBase,
|
||||
peer: Peer,
|
||||
txHashes: openArray[Hash256]):
|
||||
Result[void, string]
|
||||
{.base, gcsafe.} =
|
||||
notImplemented("handleAnnouncedTxsHashes")
|
||||
# Most recent setting, only the latest version is active
|
||||
when 68 in ethVersions:
|
||||
method handleAnnouncedTxsHashes*(
|
||||
ctx: EthWireBase;
|
||||
peer: Peer;
|
||||
txTypes: Blob;
|
||||
txSizes: openArray[int];
|
||||
txHashes: openArray[Hash256];
|
||||
): Result[void, string]
|
||||
{.base, gcsafe.} =
|
||||
notImplemented("handleAnnouncedTxsHashes/eth68")
|
||||
else:
|
||||
method handleAnnouncedTxsHashes*(ctx: EthWireBase,
|
||||
peer: Peer,
|
||||
txHashes: openArray[Hash256]):
|
||||
Result[void, string]
|
||||
{.base, gcsafe.} =
|
||||
notImplemented("handleAnnouncedTxsHashes")
|
||||
|
||||
method handleNewBlockHashes*(ctx: EthWireBase,
|
||||
peer: Peer,
|
||||
|
@ -107,7 +122,8 @@ method handleNewBlockHashes*(ctx: EthWireBase,
|
|||
{.base, gcsafe.} =
|
||||
notImplemented("handleNewBlockHashes")
|
||||
|
||||
when defined(legacy_eth66_enabled):
|
||||
# Legacy setting, currently the latest version is active only
|
||||
when 66 in ethVersions and ethVersions.len == 1:
|
||||
method getStorageNodes*(ctx: EthWireBase,
|
||||
hashes: openArray[Hash256]):
|
||||
Result[seq[Blob], string]
|
||||
|
|
|
@ -0,0 +1,37 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2024 Status Research & Development GmbH
|
||||
# Licensed and distributed under either of
|
||||
# * MIT license (license terms in the root directory or at
|
||||
# https://opensource.org/licenses/MIT).
|
||||
# * Apache v2 license (license terms in the root directory or at
|
||||
# https://www.apache.org/licenses/LICENSE-2.0).
|
||||
# at your option. This file may not be copied, modified, or distributed
|
||||
# except according to those terms.
|
||||
|
||||
## Provision of `eth` versions list at compile time
|
||||
##
|
||||
## Note: This file allows read the `ethVersions` list at each level of the
|
||||
## source code import hierarchy. It needs to be *included* though if
|
||||
## `ethVersions` needs to be available at compile time (as of
|
||||
## NIM 1.6.18). This allows to construct directives like:
|
||||
## `when 66 in ethVersions: ..`
|
||||
##
|
||||
|
||||
const
|
||||
buildEthVersions = block:
|
||||
var rc: seq[int]
|
||||
when defined(eth66_enabled): rc.add 66
|
||||
when defined(eth67_enabled): rc.add 67
|
||||
when defined(eth68_enabled): rc.add 68
|
||||
rc
|
||||
|
||||
# Default protocol only
|
||||
when buildEthVersions.len == 0:
|
||||
const ethVersions* = @[67]
|
||||
## Compile time list of available/supported eth versions
|
||||
else:
|
||||
# One or more protocols
|
||||
const ethVersions* = buildEthVersions
|
||||
## Compile time list of available/supported eth versions
|
||||
|
||||
# End
|
|
@ -0,0 +1,299 @@
|
|||
# Nimbus - Ethereum Wire Protocol
|
||||
#
|
||||
# Copyright (c) 2018-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
# http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed
|
||||
# except according to those terms.
|
||||
|
||||
## This module implements Ethereum Wire Protocol version 67, `eth/67`.
|
||||
## Specification:
|
||||
## `eth/68 <https://github.com/ethereum/devp2p/blob/master/caps/eth.md>`_
|
||||
|
||||
import
|
||||
stint,
|
||||
chronicles,
|
||||
chronos,
|
||||
eth/[common, p2p, p2p/private/p2p_types],
|
||||
stew/byteutils,
|
||||
./trace_config,
|
||||
./eth/eth_types,
|
||||
../types,
|
||||
../../utils/utils
|
||||
|
||||
export
|
||||
eth_types
|
||||
|
||||
logScope:
|
||||
topics = "eth68"
|
||||
|
||||
const
|
||||
ethVersion* = 68
|
||||
prettyEthProtoName* = "[eth/" & $ethVersion & "]"
|
||||
|
||||
# Pickeled tracer texts
|
||||
trEthRecvReceived* =
|
||||
"<< " & prettyEthProtoName & " Received "
|
||||
trEthRecvReceivedBlockHeaders* =
|
||||
trEthRecvReceived & "BlockHeaders (0x04)"
|
||||
trEthRecvReceivedBlockBodies* =
|
||||
trEthRecvReceived & "BlockBodies (0x06)"
|
||||
|
||||
trEthRecvProtocolViolation* =
|
||||
"<< " & prettyEthProtoName & " Protocol violation, "
|
||||
trEthRecvError* =
|
||||
"<< " & prettyEthProtoName & " Error "
|
||||
trEthRecvTimeoutWaiting* =
|
||||
"<< " & prettyEthProtoName & " Timeout waiting "
|
||||
trEthRecvDiscarding* =
|
||||
"<< " & prettyEthProtoName & " Discarding "
|
||||
|
||||
trEthSendSending* =
|
||||
">> " & prettyEthProtoName & " Sending "
|
||||
trEthSendSendingGetBlockHeaders* =
|
||||
trEthSendSending & "GetBlockHeaders (0x03)"
|
||||
trEthSendSendingGetBlockBodies* =
|
||||
trEthSendSending & "GetBlockBodies (0x05)"
|
||||
|
||||
trEthSendReplying* =
|
||||
">> " & prettyEthProtoName & " Replying "
|
||||
|
||||
trEthSendDelaying* =
|
||||
">> " & prettyEthProtoName & " Delaying "
|
||||
|
||||
trEthRecvNewBlock* =
|
||||
"<< " & prettyEthProtoName & " Received NewBlock"
|
||||
trEthRecvNewBlockHashes* =
|
||||
"<< " & prettyEthProtoName & " Received NewBlockHashes"
|
||||
trEthSendNewBlock* =
|
||||
">> " & prettyEthProtoName & " Sending NewBlock"
|
||||
trEthSendNewBlockHashes* =
|
||||
">> " & prettyEthProtoName & " Sending NewBlockHashes"
|
||||
|
||||
template handleHandlerError(x: untyped) =
|
||||
if x.isErr:
|
||||
raise newException(EthP2PError, x.error)
|
||||
|
||||
p2pProtocol eth68(version = ethVersion,
|
||||
rlpxName = "eth",
|
||||
peerState = EthPeerState,
|
||||
networkState = EthWireBase,
|
||||
useRequestIds = true):
|
||||
|
||||
onPeerConnected do (peer: Peer):
|
||||
let
|
||||
network = peer.network
|
||||
ctx = peer.networkState
|
||||
statusRes = ctx.getStatus()
|
||||
|
||||
handleHandlerError(statusRes)
|
||||
let status = statusRes.get
|
||||
|
||||
trace trEthSendSending & "Status (0x00)", peer,
|
||||
td = status.totalDifficulty,
|
||||
bestHash = short(status.bestBlockHash),
|
||||
networkId = network.networkId,
|
||||
genesis = short(status.genesisHash),
|
||||
forkHash = status.forkId.forkHash.toHex,
|
||||
forkNext = status.forkId.forkNext
|
||||
|
||||
let m = await peer.status(ethVersion,
|
||||
network.networkId,
|
||||
status.totalDifficulty,
|
||||
status.bestBlockHash,
|
||||
status.genesisHash,
|
||||
status.forkId,
|
||||
timeout = chronos.seconds(10))
|
||||
|
||||
when trEthTraceHandshakesOk:
|
||||
trace "Handshake: Local and remote networkId",
|
||||
local=network.networkId, remote=m.networkId
|
||||
trace "Handshake: Local and remote genesisHash",
|
||||
local=short(status.genesisHash), remote=short(m.genesisHash)
|
||||
trace "Handshake: Local and remote forkId",
|
||||
local=(status.forkId.forkHash.toHex & "/" & $status.forkId.forkNext),
|
||||
remote=(m.forkId.forkHash.toHex & "/" & $m.forkId.forkNext)
|
||||
|
||||
if m.networkId != network.networkId:
|
||||
trace "Peer for a different network (networkId)", peer,
|
||||
expectNetworkId=network.networkId, gotNetworkId=m.networkId
|
||||
raise newException(
|
||||
UselessPeerError, "Eth handshake for different network")
|
||||
|
||||
if m.genesisHash != status.genesisHash:
|
||||
trace "Peer for a different network (genesisHash)", peer,
|
||||
expectGenesis=short(status.genesisHash), gotGenesis=short(m.genesisHash)
|
||||
raise newException(
|
||||
UselessPeerError, "Eth handshake for different network")
|
||||
|
||||
trace "Peer matches our network", peer
|
||||
peer.state.initialized = true
|
||||
peer.state.bestDifficulty = m.totalDifficulty
|
||||
peer.state.bestBlockHash = m.bestHash
|
||||
|
||||
handshake:
|
||||
# User message 0x00: Status.
|
||||
proc status(peer: Peer,
|
||||
ethVersionArg: uint,
|
||||
networkId: NetworkId,
|
||||
totalDifficulty: DifficultyInt,
|
||||
bestHash: Hash256,
|
||||
genesisHash: Hash256,
|
||||
forkId: ChainForkId) =
|
||||
trace trEthRecvReceived & "Status (0x00)", peer,
|
||||
networkId, totalDifficulty, bestHash=short(bestHash), genesisHash=short(genesisHash),
|
||||
forkHash=forkId.forkHash.toHex, forkNext=forkId.forkNext
|
||||
|
||||
# User message 0x01: NewBlockHashes.
|
||||
proc newBlockHashes(peer: Peer, hashes: openArray[NewBlockHashesAnnounce]) =
|
||||
when trEthTraceGossipOk:
|
||||
trace trEthRecvReceived & "NewBlockHashes (0x01)", peer,
|
||||
hashes=hashes.len
|
||||
|
||||
let ctx = peer.networkState()
|
||||
let res = ctx.handleNewBlockHashes(peer, hashes)
|
||||
handleHandlerError(res)
|
||||
|
||||
# User message 0x02: Transactions.
|
||||
proc transactions(peer: Peer, transactions: openArray[Transaction]) =
|
||||
when trEthTraceGossipOk:
|
||||
trace trEthRecvReceived & "Transactions (0x02)", peer,
|
||||
transactions=transactions.len
|
||||
|
||||
let ctx = peer.networkState()
|
||||
let res = ctx.handleAnnouncedTxs(peer, transactions)
|
||||
handleHandlerError(res)
|
||||
|
||||
requestResponse:
|
||||
# User message 0x03: GetBlockHeaders.
|
||||
proc getBlockHeaders(peer: Peer, request: BlocksRequest) =
|
||||
when trEthTracePacketsOk:
|
||||
trace trEthRecvReceived & "GetBlockHeaders (0x03)", peer,
|
||||
count=request.maxResults
|
||||
|
||||
if request.maxResults > uint64(maxHeadersFetch):
|
||||
debug "GetBlockHeaders (0x03) requested too many headers",
|
||||
peer, requested=request.maxResults, max=maxHeadersFetch
|
||||
await peer.disconnect(BreachOfProtocol)
|
||||
return
|
||||
|
||||
let ctx = peer.networkState()
|
||||
let headers = ctx.getBlockHeaders(request)
|
||||
handleHandlerError(headers)
|
||||
if headers.get.len > 0:
|
||||
trace trEthSendReplying & "with BlockHeaders (0x04)", peer,
|
||||
sent=headers.get.len, requested=request.maxResults
|
||||
else:
|
||||
trace trEthSendReplying & "EMPTY BlockHeaders (0x04)", peer,
|
||||
sent=0, requested=request.maxResults
|
||||
|
||||
await response.send(headers.get)
|
||||
|
||||
# User message 0x04: BlockHeaders.
|
||||
proc blockHeaders(p: Peer, headers: openArray[BlockHeader])
|
||||
|
||||
requestResponse:
|
||||
# User message 0x05: GetBlockBodies.
|
||||
proc getBlockBodies(peer: Peer, hashes: openArray[Hash256]) =
|
||||
trace trEthRecvReceived & "GetBlockBodies (0x05)", peer,
|
||||
hashes=hashes.len
|
||||
if hashes.len > maxBodiesFetch:
|
||||
debug "GetBlockBodies (0x05) requested too many bodies",
|
||||
peer, requested=hashes.len, max=maxBodiesFetch
|
||||
await peer.disconnect(BreachOfProtocol)
|
||||
return
|
||||
|
||||
let ctx = peer.networkState()
|
||||
let bodies = ctx.getBlockBodies(hashes)
|
||||
handleHandlerError(bodies)
|
||||
if bodies.get.len > 0:
|
||||
trace trEthSendReplying & "with BlockBodies (0x06)", peer,
|
||||
sent=bodies.get.len, requested=hashes.len
|
||||
else:
|
||||
trace trEthSendReplying & "EMPTY BlockBodies (0x06)", peer,
|
||||
sent=0, requested=hashes.len
|
||||
|
||||
await response.send(bodies.get)
|
||||
|
||||
# User message 0x06: BlockBodies.
|
||||
proc blockBodies(peer: Peer, blocks: openArray[BlockBody])
|
||||
|
||||
# User message 0x07: NewBlock.
|
||||
proc newBlock(peer: Peer, blk: EthBlock, totalDifficulty: DifficultyInt) =
|
||||
# (Note, needs to use `EthBlock` instead of its alias `NewBlockAnnounce`
|
||||
# because either `p2pProtocol` or RLPx doesn't work with an alias.)
|
||||
when trEthTraceGossipOk:
|
||||
trace trEthRecvReceived & "NewBlock (0x07)", peer,
|
||||
totalDifficulty,
|
||||
blockNumber = blk.header.blockNumber,
|
||||
blockDifficulty = blk.header.difficulty
|
||||
|
||||
let ctx = peer.networkState()
|
||||
let res = ctx.handleNewBlock(peer, blk, totalDifficulty)
|
||||
handleHandlerError(res)
|
||||
|
||||
# User message 0x08: NewPooledTransactionHashes.
|
||||
proc newPooledTransactionHashes(
|
||||
peer: Peer,
|
||||
txTypes: Blob,
|
||||
txSizes: openArray[int],
|
||||
txHashes: openArray[Hash256]
|
||||
) =
|
||||
when trEthTraceGossipOk:
|
||||
trace trEthRecvReceived & "NewPooledTransactionHashes (0x08)", peer,
|
||||
txTypes=txTypes.toHex, txSizes,
|
||||
hashes=txHashes.len
|
||||
|
||||
let ctx = peer.networkState()
|
||||
let res = ctx.handleAnnouncedTxsHashes(peer, txTypes, txSizes, txHashes)
|
||||
handleHandlerError(res)
|
||||
|
||||
requestResponse:
|
||||
# User message 0x09: GetPooledTransactions.
|
||||
proc getPooledTransactions(peer: Peer, txHashes: openArray[Hash256]) =
|
||||
trace trEthRecvReceived & "GetPooledTransactions (0x09)", peer,
|
||||
hashes=txHashes.len
|
||||
|
||||
let ctx = peer.networkState()
|
||||
let txs = ctx.getPooledTxs(txHashes)
|
||||
handleHandlerError(txs)
|
||||
if txs.get.len > 0:
|
||||
trace trEthSendReplying & "with PooledTransactions (0x0a)", peer,
|
||||
sent=txs.get.len, requested=txHashes.len
|
||||
else:
|
||||
trace trEthSendReplying & "EMPTY PooledTransactions (0x0a)", peer,
|
||||
sent=0, requested=txHashes.len
|
||||
|
||||
await response.send(txs.get)
|
||||
|
||||
# User message 0x0a: PooledTransactions.
|
||||
proc pooledTransactions(peer: Peer, transactions: openArray[Transaction])
|
||||
|
||||
# User message 0x0d: GetNodeData -- removed, was so 66ish
|
||||
# User message 0x0e: NodeData -- removed, was so 66ish
|
||||
|
||||
nextId 0x0f
|
||||
|
||||
requestResponse:
|
||||
# User message 0x0f: GetReceipts.
|
||||
proc getReceipts(peer: Peer, hashes: openArray[Hash256]) =
|
||||
trace trEthRecvReceived & "GetReceipts (0x0f)", peer,
|
||||
hashes=hashes.len
|
||||
|
||||
let ctx = peer.networkState()
|
||||
let rec = ctx.getReceipts(hashes)
|
||||
handleHandlerError(rec)
|
||||
if rec.get.len > 0:
|
||||
trace trEthSendReplying & "with Receipts (0x10)", peer,
|
||||
sent=rec.get.len, requested=hashes.len
|
||||
else:
|
||||
trace trEthSendReplying & "EMPTY Receipts (0x10)", peer,
|
||||
sent=0, requested=hashes.len
|
||||
|
||||
await response.send(rec.get)
|
||||
|
||||
# User message 0x10: Receipts.
|
||||
proc receipts(peer: Peer, receipts: openArray[seq[Receipt]])
|
|
@ -1,5 +1,5 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2018 Status Research & Development GmbH
|
||||
# Copyright (c) 2021-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
|
@ -18,8 +18,12 @@ const
|
|||
VmName* = vmName()
|
||||
warningMsg = block:
|
||||
var rc = "*** Compiling with " & VmName
|
||||
when defined(legacy_eth66_enabled):
|
||||
rc &= ", legacy-eth/66"
|
||||
when defined(eth66_enabled):
|
||||
rc &= ", eth/66"
|
||||
when defined(eth67_enabled):
|
||||
rc &= ", eth/67"
|
||||
when defined(eth68_enabled):
|
||||
rc &= ", eth/68"
|
||||
when defined(chunked_rlpx_enabled):
|
||||
rc &= ", chunked-rlpx"
|
||||
when defined(boehmgc):
|
||||
|
|
|
@ -14,8 +14,8 @@ cliBuilder:
|
|||
./test_accounts_cache,
|
||||
./test_aristo,
|
||||
./test_coredb,
|
||||
./test_sync_snap,
|
||||
./test_rocksdb_timing,
|
||||
#./test_sync_snap, -- temporarily suspended
|
||||
#./test_rocksdb_timing, -- temporarily suspended
|
||||
./test_jwt_auth,
|
||||
./test_gas_meter,
|
||||
./test_memory,
|
||||
|
|
Loading…
Reference in New Issue