Commit 3ac4d2a4 authored by Barnabas Busa's avatar Barnabas Busa Committed by GitHub

feat: add grandine (#517)

parent 0615cd1b
participants:
- el_type: geth
cl_type: grandine
- el_type: nethermind
cl_type: grandine
- el_type: erigon
cl_type: grandine
- el_type: besu
cl_type: grandine
- el_type: reth
cl_type: grandine
- el_type: ethereumjs
cl_type: grandine
additional_services: []
...@@ -9,6 +9,8 @@ participants: ...@@ -9,6 +9,8 @@ participants:
cl_type: lighthouse cl_type: lighthouse
- el_type: reth - el_type: reth
cl_type: lodestar cl_type: lodestar
- el_type: ethereumjs - el_type: nimbus
cl_type: teku cl_type: teku
- el_type: ethereumjs
cl_type: grandine
additional_services: [] additional_services: []
...@@ -258,7 +258,7 @@ participants: ...@@ -258,7 +258,7 @@ participants:
# CL(Consensus Layer) Specific flags # CL(Consensus Layer) Specific flags
# The type of CL client that should be started # The type of CL client that should be started
# Valid values are nimbus, lighthouse, lodestar, teku, and prysm # Valid values are nimbus, lighthouse, lodestar, teku, prysm, and grandine
cl_type: lighthouse cl_type: lighthouse
# The Docker image that should be used for the CL client; leave blank to use the default for the client type # The Docker image that should be used for the CL client; leave blank to use the default for the client type
......
...@@ -3,6 +3,7 @@ lodestar = import_module("./lodestar/lodestar_launcher.star") ...@@ -3,6 +3,7 @@ lodestar = import_module("./lodestar/lodestar_launcher.star")
nimbus = import_module("./nimbus/nimbus_launcher.star") nimbus = import_module("./nimbus/nimbus_launcher.star")
prysm = import_module("./prysm/prysm_launcher.star") prysm = import_module("./prysm/prysm_launcher.star")
teku = import_module("./teku/teku_launcher.star") teku = import_module("./teku/teku_launcher.star")
grandine = import_module("./grandine/grandine_launcher.star")
constants = import_module("../package_io/constants.star") constants = import_module("../package_io/constants.star")
input_parser = import_module("../package_io/input_parser.star") input_parser = import_module("../package_io/input_parser.star")
...@@ -76,6 +77,14 @@ def launch( ...@@ -76,6 +77,14 @@ def launch(
), ),
"launch_method": teku.launch, "launch_method": teku.launch,
}, },
constants.CL_TYPE.grandine: {
"launcher": grandine.new_grandine_launcher(
el_cl_data,
jwt_file,
network_params.network,
),
"launch_method": grandine.launch,
},
} }
all_snooper_engine_contexts = [] all_snooper_engine_contexts = []
......
shared_utils = import_module("../../shared_utils/shared_utils.star")
input_parser = import_module("../../package_io/input_parser.star")
cl_context = import_module("../../cl/cl_context.star")
node_metrics = import_module("../../node_metrics_info.star")
cl_node_ready_conditions = import_module("../../cl/cl_node_ready_conditions.star")
constants = import_module("../../package_io/constants.star")
vc_shared = import_module("../../vc/shared.star")
# ---------------------------------- Beacon client -------------------------------------
# The Docker container runs as the "grandine" user so we can't write to root
BEACON_DATA_DIRPATH_ON_SERVICE_CONTAINER = "/data/grandine/grandine-beacon-data"
# Port IDs
BEACON_TCP_DISCOVERY_PORT_ID = "tcp-discovery"
BEACON_UDP_DISCOVERY_PORT_ID = "udp-discovery"
BEACON_HTTP_PORT_ID = "http"
BEACON_METRICS_PORT_ID = "metrics"
VALIDATOR_HTTP_PORT_ID = "http-validator"
# Port nums
BEACON_DISCOVERY_PORT_NUM = 9000
BEACON_HTTP_PORT_NUM = 4000
BEACON_METRICS_PORT_NUM = 8008
# The min/max CPU/memory that the beacon node can use
BEACON_MIN_CPU = 50
BEACON_MIN_MEMORY = 1024
BEACON_METRICS_PATH = "/metrics"
VALIDATOR_KEYS_DIRPATH_ON_SERVICE_CONTAINER = "/validator-keys"
MIN_PEERS = 1
PRIVATE_IP_ADDRESS_PLACEHOLDER = "KURTOSIS_IP_ADDR_PLACEHOLDER"
BEACON_USED_PORTS = {
BEACON_TCP_DISCOVERY_PORT_ID: shared_utils.new_port_spec(
BEACON_DISCOVERY_PORT_NUM, shared_utils.TCP_PROTOCOL
),
BEACON_UDP_DISCOVERY_PORT_ID: shared_utils.new_port_spec(
BEACON_DISCOVERY_PORT_NUM, shared_utils.UDP_PROTOCOL
),
BEACON_HTTP_PORT_ID: shared_utils.new_port_spec(
BEACON_HTTP_PORT_NUM, shared_utils.TCP_PROTOCOL
),
BEACON_METRICS_PORT_ID: shared_utils.new_port_spec(
BEACON_METRICS_PORT_NUM, shared_utils.TCP_PROTOCOL
),
}
ENTRYPOINT_ARGS = ["sh", "-c"]
VERBOSITY_LEVELS = {
constants.GLOBAL_LOG_LEVEL.error: "ERROR",
constants.GLOBAL_LOG_LEVEL.warn: "WARN",
constants.GLOBAL_LOG_LEVEL.info: "INFO",
constants.GLOBAL_LOG_LEVEL.debug: "DEBUG",
constants.GLOBAL_LOG_LEVEL.trace: "TRACE",
}
def launch(
plan,
launcher,
service_name,
image,
participant_log_level,
global_log_level,
bootnode_context,
el_context,
node_keystore_files,
cl_min_cpu,
cl_max_cpu,
cl_min_mem,
cl_max_mem,
snooper_enabled,
snooper_engine_context,
blobber_enabled,
blobber_extra_params,
extra_params,
extra_env_vars,
extra_labels,
persistent,
cl_volume_size,
cl_tolerations,
participant_tolerations,
global_tolerations,
node_selectors,
use_separate_vc,
):
beacon_service_name = "{0}".format(service_name)
log_level = input_parser.get_client_log_level_or_default(
participant_log_level, global_log_level, VERBOSITY_LEVELS
)
tolerations = input_parser.get_client_tolerations(
cl_tolerations, participant_tolerations, global_tolerations
)
extra_params = [param for param in extra_params]
network_name = shared_utils.get_network_name(launcher.network)
cl_min_cpu = int(cl_min_cpu) if int(cl_min_cpu) > 0 else BEACON_MIN_CPU
cl_max_cpu = (
int(cl_max_cpu)
if int(cl_max_cpu) > 0
else constants.RAM_CPU_OVERRIDES[network_name]["grandine_max_cpu"]
)
cl_min_mem = int(cl_min_mem) if int(cl_min_mem) > 0 else BEACON_MIN_MEMORY
cl_max_mem = (
int(cl_max_mem)
if int(cl_max_mem) > 0
else constants.RAM_CPU_OVERRIDES[network_name]["grandine_max_mem"]
)
cl_volume_size = (
int(cl_volume_size)
if int(cl_volume_size) > 0
else constants.VOLUME_SIZE[network_name]["grandine_volume_size"]
)
config = get_beacon_config(
plan,
launcher.el_cl_genesis_data,
launcher.jwt_file,
launcher.network,
image,
beacon_service_name,
bootnode_context,
el_context,
log_level,
node_keystore_files,
cl_min_cpu,
cl_max_cpu,
cl_min_mem,
cl_max_mem,
snooper_enabled,
snooper_engine_context,
extra_params,
extra_env_vars,
extra_labels,
use_separate_vc,
persistent,
cl_volume_size,
tolerations,
node_selectors,
)
beacon_service = plan.add_service(service_name, config)
beacon_http_port = beacon_service.ports[BEACON_HTTP_PORT_ID]
beacon_http_url = "http://{0}:{1}".format(
beacon_service.ip_address, beacon_http_port.number
)
beacon_metrics_port = beacon_service.ports[BEACON_METRICS_PORT_ID]
beacon_metrics_url = "{0}:{1}".format(
beacon_service.ip_address, beacon_metrics_port.number
)
beacon_node_identity_recipe = GetHttpRequestRecipe(
endpoint="/eth/v1/node/identity",
port_id=BEACON_HTTP_PORT_ID,
extract={
"enr": ".data.enr",
"multiaddr": ".data.p2p_addresses[0]",
"peer_id": ".data.peer_id",
},
)
response = plan.request(
recipe=beacon_node_identity_recipe, service_name=service_name
)
beacon_node_enr = response["extract.enr"]
beacon_multiaddr = response["extract.multiaddr"]
beacon_peer_id = response["extract.peer_id"]
beacon_node_metrics_info = node_metrics.new_node_metrics_info(
service_name, BEACON_METRICS_PATH, beacon_metrics_url
)
nodes_metrics_info = [beacon_node_metrics_info]
return cl_context.new_cl_context(
"grandine",
beacon_node_enr,
beacon_service.ip_address,
BEACON_HTTP_PORT_NUM,
nodes_metrics_info,
beacon_service_name,
multiaddr=beacon_multiaddr,
peer_id=beacon_peer_id,
snooper_enabled=snooper_enabled,
snooper_engine_context=snooper_engine_context,
validator_keystore_files_artifact_uuid=node_keystore_files.files_artifact_uuid
if node_keystore_files
else "",
)
def get_beacon_config(
plan,
el_cl_genesis_data,
jwt_file,
network,
image,
service_name,
bootnode_contexts,
el_context,
log_level,
node_keystore_files,
cl_min_cpu,
cl_max_cpu,
cl_min_mem,
cl_max_mem,
snooper_enabled,
snooper_engine_context,
extra_params,
extra_env_vars,
extra_labels,
use_separate_vc,
persistent,
cl_volume_size,
tolerations,
node_selectors,
):
validator_keys_dirpath = ""
validator_secrets_dirpath = ""
if node_keystore_files:
validator_keys_dirpath = shared_utils.path_join(
VALIDATOR_KEYS_DIRPATH_ON_SERVICE_CONTAINER,
node_keystore_files.teku_keys_relative_dirpath,
)
validator_secrets_dirpath = shared_utils.path_join(
VALIDATOR_KEYS_DIRPATH_ON_SERVICE_CONTAINER,
node_keystore_files.teku_secrets_relative_dirpath,
)
# If snooper is enabled use the snooper engine context, otherwise use the execution client context
if snooper_enabled:
EXECUTION_ENGINE_ENDPOINT = "http://{0}:{1}".format(
snooper_engine_context.ip_addr,
snooper_engine_context.engine_rpc_port_num,
)
else:
EXECUTION_ENGINE_ENDPOINT = "http://{0}:{1}".format(
el_context.ip_addr,
el_context.engine_rpc_port_num,
)
cmd = [
"--network={0}".format(
network if network in constants.PUBLIC_NETWORKS else "custom"
),
"--data-dir=" + BEACON_DATA_DIRPATH_ON_SERVICE_CONTAINER,
"--http-address=0.0.0.0",
"--http-port={0}".format(BEACON_HTTP_PORT_NUM),
"--libp2p-port={0}".format(BEACON_DISCOVERY_PORT_NUM),
"--discovery-port={0}".format(BEACON_DISCOVERY_PORT_NUM),
"--jwt-secret=" + constants.JWT_MOUNT_PATH_ON_CONTAINER,
"--eth1-rpc-urls=" + EXECUTION_ENGINE_ENDPOINT,
# vvvvvvvvvvvvvvvvvvv REMOVE THESE WHEN CONNECTING TO EXTERNAL NET vvvvvvvvvvvvvvvvvvvvv
"--disable-enr-auto-update",
"--enr-address=" + PRIVATE_IP_ADDRESS_PLACEHOLDER,
"--enr-udp-port={0}".format(BEACON_DISCOVERY_PORT_NUM),
"--enr-tcp-port={0}".format(BEACON_DISCOVERY_PORT_NUM),
# ^^^^^^^^^^^^^^^^^^^ REMOVE THESE WHEN CONNECTING TO EXTERNAL NET ^^^^^^^^^^^^^^^^^^^^^
# vvvvvvvvvvvvvvvvvvv METRICS CONFIG vvvvvvvvvvvvvvvvvvvvv
"--metrics",
"--metrics-address=0.0.0.0",
"--metrics-port={0}".format(BEACON_METRICS_PORT_NUM),
# ^^^^^^^^^^^^^^^^^^^ METRICS CONFIG ^^^^^^^^^^^^^^^^^^^^^
# To enable syncing other networks too without checkpoint syncing
]
validator_flags = [
"--keystore-dir=" + validator_keys_dirpath,
"--keystore-password-file=" + validator_secrets_dirpath,
"--suggested-fee-recipient=" + constants.VALIDATING_REWARDS_ACCOUNT,
"--graffiti=" + constants.CL_TYPE.grandine + "-" + el_context.client_name,
]
if network not in constants.PUBLIC_NETWORKS:
cmd.append(
"--configuration-directory="
+ constants.GENESIS_CONFIG_MOUNT_PATH_ON_CONTAINER
)
if (
network == constants.NETWORK_NAME.kurtosis
or constants.NETWORK_NAME.shadowfork in network
):
if bootnode_contexts != None:
cmd.append(
"--boot-nodes="
+ ",".join(
[
ctx.enr
for ctx in bootnode_contexts[: constants.MAX_ENR_ENTRIES]
]
)
)
elif network == constants.NETWORK_NAME.ephemery:
cmd.append(
"--checkpoint-sync-url=" + constants.CHECKPOINT_SYNC_URL[network]
)
cmd.append(
"--boot-nodes="
+ shared_utils.get_devnet_enrs_list(
plan, el_cl_genesis_data.files_artifact_uuid
)
)
elif constants.NETWORK_NAME.shadowfork in network:
cmd.append(
"--boot-nodes="
+ shared_utils.get_devnet_enrs_list(
plan, el_cl_genesis_data.files_artifact_uuid
)
)
else: # Devnets
# TODO Remove once checkpoint sync is working for verkle
if constants.NETWORK_NAME.verkle not in network:
cmd.append(
"--checkpoint-sync-url=https://checkpoint-sync.{0}.ethpandaops.io".format(
network
)
)
cmd.append(
"--boot-nodes="
+ shared_utils.get_devnet_enrs_list(
plan, el_cl_genesis_data.files_artifact_uuid
)
)
else: # Public networks
cmd.append("--checkpoint-sync-url=" + constants.CHECKPOINT_SYNC_URL[network])
if len(extra_params) > 0:
# we do the list comprehension as the default extra_params is a proto repeated string
cmd.extend([param for param in extra_params])
files = {
constants.GENESIS_DATA_MOUNTPOINT_ON_CLIENTS: el_cl_genesis_data.files_artifact_uuid,
constants.JWT_MOUNTPOINT_ON_CLIENTS: jwt_file,
}
beacon_validator_used_ports = {}
beacon_validator_used_ports.update(BEACON_USED_PORTS)
if node_keystore_files != None and not use_separate_vc:
# validator_http_port_id_spec = shared_utils.new_port_spec(
# vc_shared.VALIDATOR_HTTP_PORT_NUM,
# shared_utils.TCP_PROTOCOL,
# shared_utils.HTTP_APPLICATION_PROTOCOL,
# )
# beacon_validator_used_ports.update(
# {VALIDATOR_HTTP_PORT_ID: validator_http_port_id_spec}
# )
cmd.extend(validator_flags)
files[
VALIDATOR_KEYS_DIRPATH_ON_SERVICE_CONTAINER
] = node_keystore_files.files_artifact_uuid
if persistent:
files[BEACON_DATA_DIRPATH_ON_SERVICE_CONTAINER] = Directory(
persistent_key="data-{0}".format(service_name),
size=cl_volume_size,
)
return ServiceConfig(
image=image,
ports=beacon_validator_used_ports,
cmd=cmd,
env_vars=extra_env_vars,
files=files,
private_ip_address_placeholder=PRIVATE_IP_ADDRESS_PLACEHOLDER,
ready_conditions=cl_node_ready_conditions.get_ready_conditions(
BEACON_HTTP_PORT_ID
),
min_cpu=cl_min_cpu,
max_cpu=cl_max_cpu,
min_memory=cl_min_mem,
max_memory=cl_max_mem,
labels=shared_utils.label_maker(
constants.CL_TYPE.grandine,
constants.CLIENT_TYPES.cl,
image,
el_context.client_name,
extra_labels,
),
user=User(uid=0, gid=0),
tolerations=tolerations,
node_selectors=node_selectors,
)
def new_grandine_launcher(
el_cl_genesis_data,
jwt_file,
network,
):
return struct(
el_cl_genesis_data=el_cl_genesis_data,
jwt_file=jwt_file,
network=network,
)
...@@ -15,6 +15,7 @@ CL_TYPE = struct( ...@@ -15,6 +15,7 @@ CL_TYPE = struct(
nimbus="nimbus", nimbus="nimbus",
prysm="prysm", prysm="prysm",
lodestar="lodestar", lodestar="lodestar",
grandine="grandine",
) )
VC_TYPE = struct( VC_TYPE = struct(
...@@ -147,6 +148,7 @@ VOLUME_SIZE = { ...@@ -147,6 +148,7 @@ VOLUME_SIZE = {
"teku_volume_size": 500000, # 500GB "teku_volume_size": 500000, # 500GB
"nimbus_volume_size": 500000, # 500GB "nimbus_volume_size": 500000, # 500GB
"lodestar_volume_size": 500000, # 500GB "lodestar_volume_size": 500000, # 500GB
"grandine_volume_size": 500000, # 500GB
}, },
"goerli": { "goerli": {
"geth_volume_size": 800000, # 800GB "geth_volume_size": 800000, # 800GB
...@@ -161,6 +163,7 @@ VOLUME_SIZE = { ...@@ -161,6 +163,7 @@ VOLUME_SIZE = {
"teku_volume_size": 300000, # 300GB "teku_volume_size": 300000, # 300GB
"nimbus_volume_size": 300000, # 300GB "nimbus_volume_size": 300000, # 300GB
"lodestar_volume_size": 300000, # 300GB "lodestar_volume_size": 300000, # 300GB
"grandine_volume_size": 300000, # 300GB
}, },
"sepolia": { "sepolia": {
"geth_volume_size": 300000, # 300GB "geth_volume_size": 300000, # 300GB
...@@ -175,6 +178,7 @@ VOLUME_SIZE = { ...@@ -175,6 +178,7 @@ VOLUME_SIZE = {
"teku_volume_size": 150000, # 150GB "teku_volume_size": 150000, # 150GB
"nimbus_volume_size": 150000, # 150GB "nimbus_volume_size": 150000, # 150GB
"lodestar_volume_size": 150000, # 150GB "lodestar_volume_size": 150000, # 150GB
"grandine_volume_size": 150000, # 150GB
}, },
"holesky": { "holesky": {
"geth_volume_size": 100000, # 100GB "geth_volume_size": 100000, # 100GB
...@@ -189,6 +193,7 @@ VOLUME_SIZE = { ...@@ -189,6 +193,7 @@ VOLUME_SIZE = {
"teku_volume_size": 100000, # 100GB "teku_volume_size": 100000, # 100GB
"nimbus_volume_size": 100000, # 100GB "nimbus_volume_size": 100000, # 100GB
"lodestar_volume_size": 100000, # 100GB "lodestar_volume_size": 100000, # 100GB
"grandine_volume_size": 100000, # 100GB
}, },
"devnets": { "devnets": {
"geth_volume_size": 100000, # 100GB "geth_volume_size": 100000, # 100GB
...@@ -203,6 +208,7 @@ VOLUME_SIZE = { ...@@ -203,6 +208,7 @@ VOLUME_SIZE = {
"teku_volume_size": 100000, # 100GB "teku_volume_size": 100000, # 100GB
"nimbus_volume_size": 100000, # 100GB "nimbus_volume_size": 100000, # 100GB
"lodestar_volume_size": 100000, # 100GB "lodestar_volume_size": 100000, # 100GB
"grandine_volume_size": 100000, # 100GB
}, },
"ephemery": { "ephemery": {
"geth_volume_size": 5000, # 5GB "geth_volume_size": 5000, # 5GB
...@@ -217,6 +223,7 @@ VOLUME_SIZE = { ...@@ -217,6 +223,7 @@ VOLUME_SIZE = {
"teku_volume_size": 1000, # 1GB "teku_volume_size": 1000, # 1GB
"nimbus_volume_size": 1000, # 1GB "nimbus_volume_size": 1000, # 1GB
"lodestar_volume_size": 1000, # 1GB "lodestar_volume_size": 1000, # 1GB
"grandine_volume_size": 1000, # 1GB
}, },
"kurtosis": { "kurtosis": {
"geth_volume_size": 5000, # 5GB "geth_volume_size": 5000, # 5GB
...@@ -231,6 +238,7 @@ VOLUME_SIZE = { ...@@ -231,6 +238,7 @@ VOLUME_SIZE = {
"teku_volume_size": 1000, # 1GB "teku_volume_size": 1000, # 1GB
"nimbus_volume_size": 1000, # 1GB "nimbus_volume_size": 1000, # 1GB
"lodestar_volume_size": 1000, # 1GB "lodestar_volume_size": 1000, # 1GB
"grandine_volume_size": 1000, # 1GB
}, },
} }
...@@ -260,6 +268,8 @@ RAM_CPU_OVERRIDES = { ...@@ -260,6 +268,8 @@ RAM_CPU_OVERRIDES = {
"nimbus_max_cpu": 4000, # 4 cores "nimbus_max_cpu": 4000, # 4 cores
"lodestar_max_mem": 16384, # 16GB "lodestar_max_mem": 16384, # 16GB
"lodestar_max_cpu": 4000, # 4 cores "lodestar_max_cpu": 4000, # 4 cores
"grandine_max_mem": 16384, # 16GB
"grandine_max_cpu": 4000, # 4 cores
}, },
"goerli": { "goerli": {
"geth_max_mem": 8192, # 8GB "geth_max_mem": 8192, # 8GB
...@@ -286,6 +296,8 @@ RAM_CPU_OVERRIDES = { ...@@ -286,6 +296,8 @@ RAM_CPU_OVERRIDES = {
"nimbus_max_cpu": 2000, # 2 cores "nimbus_max_cpu": 2000, # 2 cores
"lodestar_max_mem": 8192, # 8GB "lodestar_max_mem": 8192, # 8GB
"lodestar_max_cpu": 2000, # 2 cores "lodestar_max_cpu": 2000, # 2 cores
"grandine_max_mem": 8192, # 8GB
"grandine_max_cpu": 2000, # 2 cores
}, },
"sepolia": { "sepolia": {
"geth_max_mem": 4096, # 4GB "geth_max_mem": 4096, # 4GB
...@@ -312,6 +324,8 @@ RAM_CPU_OVERRIDES = { ...@@ -312,6 +324,8 @@ RAM_CPU_OVERRIDES = {
"nimbus_max_cpu": 1000, # 1 core "nimbus_max_cpu": 1000, # 1 core
"lodestar_max_mem": 4096, # 4GB "lodestar_max_mem": 4096, # 4GB
"lodestar_max_cpu": 1000, # 1 core "lodestar_max_cpu": 1000, # 1 core
"grandine_max_mem": 4096, # 4GB
"grandine_max_cpu": 1000, # 1 core
}, },
"holesky": { "holesky": {
"geth_max_mem": 8192, # 8GB "geth_max_mem": 8192, # 8GB
...@@ -338,6 +352,8 @@ RAM_CPU_OVERRIDES = { ...@@ -338,6 +352,8 @@ RAM_CPU_OVERRIDES = {
"nimbus_max_cpu": 2000, # 2 cores "nimbus_max_cpu": 2000, # 2 cores
"lodestar_max_mem": 8192, # 8GB "lodestar_max_mem": 8192, # 8GB
"lodestar_max_cpu": 2000, # 2 cores "lodestar_max_cpu": 2000, # 2 cores
"grandine_max_mem": 8192, # 8GB
"grandine_max_cpu": 2000, # 2 cores
}, },
"devnets": { "devnets": {
"geth_max_mem": 4096, # 4GB "geth_max_mem": 4096, # 4GB
...@@ -364,6 +380,8 @@ RAM_CPU_OVERRIDES = { ...@@ -364,6 +380,8 @@ RAM_CPU_OVERRIDES = {
"nimbus_max_cpu": 1000, # 1 core "nimbus_max_cpu": 1000, # 1 core
"lodestar_max_mem": 4096, # 4GB "lodestar_max_mem": 4096, # 4GB
"lodestar_max_cpu": 1000, # 1 core "lodestar_max_cpu": 1000, # 1 core
"grandine_max_mem": 4096, # 4GB
"grandine_max_cpu": 1000, # 1 core
}, },
"ephemery": { "ephemery": {
"geth_max_mem": 1024, # 1GB "geth_max_mem": 1024, # 1GB
...@@ -390,6 +408,8 @@ RAM_CPU_OVERRIDES = { ...@@ -390,6 +408,8 @@ RAM_CPU_OVERRIDES = {
"nimbus_max_cpu": 1000, # 1 core "nimbus_max_cpu": 1000, # 1 core
"lodestar_max_mem": 1024, # 1GB "lodestar_max_mem": 1024, # 1GB
"lodestar_max_cpu": 1000, # 1 core "lodestar_max_cpu": 1000, # 1 core
"grandine_max_mem": 1024, # 1GB
"grandine_max_cpu": 1000, # 1 core
}, },
"kurtosis": { "kurtosis": {
"geth_max_mem": 1024, # 1GB "geth_max_mem": 1024, # 1GB
...@@ -416,5 +436,7 @@ RAM_CPU_OVERRIDES = { ...@@ -416,5 +436,7 @@ RAM_CPU_OVERRIDES = {
"nimbus_max_cpu": 1000, # 1 core "nimbus_max_cpu": 1000, # 1 core
"lodestar_max_mem": 2048, # 2GB "lodestar_max_mem": 2048, # 2GB
"lodestar_max_cpu": 1000, # 1 core "lodestar_max_cpu": 1000, # 1 core
"grandine_max_mem": 2048, # 2GB
"grandine_max_cpu": 1000, # 1 core
}, },
} }
...@@ -20,6 +20,7 @@ DEFAULT_CL_IMAGES = { ...@@ -20,6 +20,7 @@ DEFAULT_CL_IMAGES = {
"nimbus": "statusim/nimbus-eth2:multiarch-latest", "nimbus": "statusim/nimbus-eth2:multiarch-latest",
"prysm": "gcr.io/prysmaticlabs/prysm/beacon-chain:latest", "prysm": "gcr.io/prysmaticlabs/prysm/beacon-chain:latest",
"lodestar": "chainsafe/lodestar:latest", "lodestar": "chainsafe/lodestar:latest",
"grandine": "ethpandaops/grandine:develop",
} }
DEFAULT_VC_IMAGES = { DEFAULT_VC_IMAGES = {
...@@ -28,6 +29,7 @@ DEFAULT_VC_IMAGES = { ...@@ -28,6 +29,7 @@ DEFAULT_VC_IMAGES = {
"nimbus": "statusim/nimbus-validator-client:multiarch-latest", "nimbus": "statusim/nimbus-validator-client:multiarch-latest",
"prysm": "gcr.io/prysmaticlabs/prysm/validator:latest", "prysm": "gcr.io/prysmaticlabs/prysm/validator:latest",
"teku": "consensys/teku:latest", "teku": "consensys/teku:latest",
"grandine": "sifrai/grandine:latest",
} }
MEV_BOOST_RELAY_DEFAULT_IMAGE = "flashbots/mev-boost-relay:0.27" MEV_BOOST_RELAY_DEFAULT_IMAGE = "flashbots/mev-boost-relay:0.27"
...@@ -381,6 +383,7 @@ def parse_network_params(input_args): ...@@ -381,6 +383,7 @@ def parse_network_params(input_args):
if cl_type in ( if cl_type in (
constants.CL_TYPE.nimbus, constants.CL_TYPE.nimbus,
constants.CL_TYPE.teku, constants.CL_TYPE.teku,
constants.CL_TYPE.grandine,
): ):
participant["use_separate_vc"] = False participant["use_separate_vc"] = False
else: else:
...@@ -391,6 +394,12 @@ def parse_network_params(input_args): ...@@ -391,6 +394,12 @@ def parse_network_params(input_args):
vc_type = cl_type vc_type = cl_type
participant["vc_type"] = vc_type participant["vc_type"] = vc_type
if (
cl_type == constants.CL_TYPE.grandine
and vc_type != constants.CL_TYPE.grandine
):
fail("grandine does not support running a different validator client")
vc_image = participant["vc_image"] vc_image = participant["vc_image"]
if vc_image == "": if vc_image == "":
if cl_image == "": if cl_image == "":
...@@ -753,6 +762,8 @@ def enrich_disable_peer_scoring(parsed_arguments_dict): ...@@ -753,6 +762,8 @@ def enrich_disable_peer_scoring(parsed_arguments_dict):
participant["cl_extra_params"].append("--Xp2p-gossip-scoring-enabled") participant["cl_extra_params"].append("--Xp2p-gossip-scoring-enabled")
if participant["cl_type"] == "lodestar": if participant["cl_type"] == "lodestar":
participant["cl_extra_params"].append("--disablePeerScoring") participant["cl_extra_params"].append("--disablePeerScoring")
if participant["cl_type"] == "grandine":
participant["cl_extra_params"].append("--disable-peer-scoring")
return parsed_arguments_dict return parsed_arguments_dict
...@@ -795,6 +806,8 @@ def enrich_mev_extra_params(parsed_arguments_dict, mev_prefix, mev_port, mev_typ ...@@ -795,6 +806,8 @@ def enrich_mev_extra_params(parsed_arguments_dict, mev_prefix, mev_port, mev_typ
participant["cl_extra_params"].append( participant["cl_extra_params"].append(
"--http-mev-relay={0}".format(mev_url) "--http-mev-relay={0}".format(mev_url)
) )
if participant["cl_type"] == "grandine":
participant["cl_extra_params"].append("--builder-url={0}".format(mev_url))
num_participants = len(parsed_arguments_dict["participants"]) num_participants = len(parsed_arguments_dict["participants"])
index_str = shared_utils.zfill_custom( index_str = shared_utils.zfill_custom(
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment