Commit b72dad35 authored by Gyanendra Mishra's avatar Gyanendra Mishra Committed by GitHub

refactor!: merge eth-network-package onto eth2-package (#228)

Notes for reviewer - perhaps @leoporoli ?

1. src/el & src/cl & src/node_metrics_info & src/participant.star &
src/participant_network.star & src/prelaunch_data_generator/ &
src/snooper & static_files have just been lifted and shifted
2. constants is just a union of the two
3. ️ parse input is a best effort union of the two; needs some 👀 
4. shared_utils.star is a best effort union of the two
5. package_io, shared_utils etc used to be at root level they have been
shifted
6. eth-network-package has been changed to eth2-package throughout all
`.star` files

Pending after PR - perhaps @leeederek ?
1. Get the README's to make sense together
2. Get the run.gif over here?

️  Note to testers

1. Clone the package
2. Check out this PR
3. Run `kurtosis run .` with args of choice
parent b943370b
...@@ -10,9 +10,10 @@ executors: ...@@ -10,9 +10,10 @@ executors:
parameters: parameters:
# To enable/disabled the check_latest_version workflow execution which will be triggered by this scheduled pipeline: https://app.circleci.com/settings/project/github/kurtosis-tech/eth2-package/triggers # To enable/disabled the check_latest_version workflow execution which will be triggered by this scheduled pipeline: https://app.circleci.com/settings/project/github/kurtosis-tech/eth2-package/triggers
# TODO revert this - setting this to true to get all existing tests to run on CI during merge
should-enable-check-latest-version-workflow: should-enable-check-latest-version-workflow:
type: boolean type: boolean
default: false default: true
# To enable/disabled the check_code workflow execution which will be triggered by the PR's checkers # To enable/disabled the check_code workflow execution which will be triggered by the PR's checkers
should-enable-build-workflow: should-enable-build-workflow:
type: boolean type: boolean
......
...@@ -2,16 +2,17 @@ parse_input = import_module( ...@@ -2,16 +2,17 @@ parse_input = import_module(
"github.com/kurtosis-tech/eth2-package/src/package_io/parse_input.star" "github.com/kurtosis-tech/eth2-package/src/package_io/parse_input.star"
) )
participant_network = import_module(
"github.com/kurtosis-tech/eth2-package/src/participant_network.star"
)
static_files = import_module( static_files = import_module(
"github.com/kurtosis-tech/eth2-package/src/static_files/static_files.star" "github.com/kurtosis-tech/eth2-package/src/static_files/static_files.star"
) )
genesis_constants = import_module( genesis_constants = import_module(
"github.com/kurtosis-tech/eth-network-package/src/prelaunch_data_generator/genesis_constants/genesis_constants.star" "github.com/kurtosis-tech/eth2-package/src/prelaunch_data_generator/genesis_constants/genesis_constants.star"
) )
eth_network_module = import_module(
"github.com/kurtosis-tech/eth-network-package/main.star"
)
transaction_spammer = import_module( transaction_spammer = import_module(
"github.com/kurtosis-tech/eth2-package/src/transaction_spammer/transaction_spammer.star" "github.com/kurtosis-tech/eth2-package/src/transaction_spammer/transaction_spammer.star"
) )
...@@ -66,13 +67,12 @@ PATH_TO_PARSED_BEACON_STATE = "/genesis/output/parsedBeaconState.json" ...@@ -66,13 +67,12 @@ PATH_TO_PARSED_BEACON_STATE = "/genesis/output/parsedBeaconState.json"
def run(plan, args={}): def run(plan, args={}):
args_with_right_defaults, args_with_defaults_dict = parse_input.parse_input( args_with_right_defaults = parse_input.parse_input(plan, args)
plan, args
)
num_participants = len(args_with_right_defaults.participants) num_participants = len(args_with_right_defaults.participants)
network_params = args_with_right_defaults.network_params network_params = args_with_right_defaults.network_params
mev_params = args_with_right_defaults.mev_params mev_params = args_with_right_defaults.mev_params
parallel_keystore_generation = args_with_right_defaults.parallel_keystore_generation
grafana_datasource_config_template = read_file( grafana_datasource_config_template = read_file(
static_files.GRAFANA_DATASOURCE_CONFIG_TEMPLATE_FILEPATH static_files.GRAFANA_DATASOURCE_CONFIG_TEMPLATE_FILEPATH
...@@ -95,7 +95,20 @@ def run(plan, args={}): ...@@ -95,7 +95,20 @@ def run(plan, args={}):
all_participants, all_participants,
cl_genesis_timestamp, cl_genesis_timestamp,
genesis_validators_root, genesis_validators_root,
) = eth_network_module.run(plan, args_with_defaults_dict) ) = participant_network.launch_participant_network(
plan,
args_with_right_defaults.participants,
network_params,
args_with_right_defaults.global_client_log_level,
parallel_keystore_generation,
)
plan.print(
"NODE JSON RPC URI: '{0}:{1}'".format(
all_participants[0].el_client_context.ip_addr,
all_participants[0].el_client_context.rpc_port_num,
)
)
all_el_client_contexts = [] all_el_client_contexts = []
all_cl_client_contexts = [] all_cl_client_contexts = []
......
def new_cl_client_context(
client_name,
enr,
ip_addr,
http_port_num,
cl_nodes_metrics_info,
beacon_service_name,
validator_service_name="",
multiaddr="",
peer_id="",
snooper_enabled=False,
snooper_engine_context=None,
):
return struct(
client_name=client_name,
enr=enr,
ip_addr=ip_addr,
http_port_num=http_port_num,
cl_nodes_metrics_info=cl_nodes_metrics_info,
beacon_service_name=beacon_service_name,
validator_service_name=validator_service_name,
multiaddr=multiaddr,
peer_id=peer_id,
snooper_enabled=snooper_enabled,
snooper_engine_context=snooper_engine_context,
)
def get_ready_conditions(port_id):
recipe = GetHttpRequestRecipe(endpoint="/eth/v1/node/health", port_id=port_id)
ready_conditions = ReadyCondition(
recipe=recipe,
field="code",
assertion="IN",
target_value=[200, 206],
timeout="15m",
)
return ready_conditions
shared_utils = import_module(
"github.com/kurtosis-tech/eth2-package/src/shared_utils/shared_utils.star"
)
input_parser = import_module(
"github.com/kurtosis-tech/eth2-package/src/package_io/parse_input.star"
)
cl_client_context = import_module(
"github.com/kurtosis-tech/eth2-package/src/cl/cl_client_context.star"
)
node_metrics = import_module(
"github.com/kurtosis-tech/eth2-package/src/node_metrics_info.star"
)
package_io = import_module(
"github.com/kurtosis-tech/eth2-package/src/package_io/constants.star"
)
LIGHTHOUSE_BINARY_COMMAND = "lighthouse"
GENESIS_DATA_MOUNTPOINT_ON_CLIENTS = "/genesis"
VALIDATOR_KEYS_MOUNTPOINT_ON_CLIENTS = "/validator-keys"
RUST_BACKTRACE_ENVVAR_NAME = "RUST_BACKTRACE"
RUST_FULL_BACKTRACE_KEYWORD = "full"
# ---------------------------------- Beacon client -------------------------------------
CONSENSUS_DATA_DIRPATH_ON_BEACON_SERVICE_CONTAINER = "/consensus-data"
# Port IDs
BEACON_TCP_DISCOVERY_PORT_ID = "tcp-discovery"
BEACON_UDP_DISCOVERY_PORT_ID = "udp-discovery"
BEACON_HTTP_PORT_ID = "http"
BEACON_METRICS_PORT_ID = "metrics"
# Port nums
BEACON_DISCOVERY_PORT_NUM = 9000
BEACON_HTTP_PORT_NUM = 4000
BEACON_METRICS_PORT_NUM = 5054
# The min/max CPU/memory that the beacon node can use
BEACON_MIN_CPU = 50
BEACON_MAX_CPU = 1000
BEACON_MIN_MEMORY = 256
BEACON_MAX_MEMORY = 1024
# ---------------------------------- Validator client -------------------------------------
VALIDATOR_HTTP_PORT_ID = "http"
VALIDATOR_METRICS_PORT_ID = "metrics"
VALIDATOR_HTTP_PORT_NUM = 5042
VALIDATOR_METRICS_PORT_NUM = 5064
VALIDATOR_HTTP_PORT_WAIT_DISABLED = None
METRICS_PATH = "/metrics"
VALIDATOR_SUFFIX_SERVICE_NAME = "validator"
# The min/max CPU/memory that the validator node can use
VALIDATOR_MIN_CPU = 50
VALIDATOR_MAX_CPU = 300
VALIDATOR_MIN_MEMORY = 128
VALIDATOR_MAX_MEMORY = 512
PRIVATE_IP_ADDRESS_PLACEHOLDER = "KURTOSIS_IP_ADDR_PLACEHOLDER"
BEACON_USED_PORTS = {
BEACON_TCP_DISCOVERY_PORT_ID: shared_utils.new_port_spec(
BEACON_DISCOVERY_PORT_NUM, shared_utils.TCP_PROTOCOL
),
BEACON_UDP_DISCOVERY_PORT_ID: shared_utils.new_port_spec(
BEACON_DISCOVERY_PORT_NUM, shared_utils.UDP_PROTOCOL
),
BEACON_HTTP_PORT_ID: shared_utils.new_port_spec(
BEACON_HTTP_PORT_NUM,
shared_utils.TCP_PROTOCOL,
shared_utils.HTTP_APPLICATION_PROTOCOL,
),
BEACON_METRICS_PORT_ID: shared_utils.new_port_spec(
BEACON_METRICS_PORT_NUM,
shared_utils.TCP_PROTOCOL,
shared_utils.HTTP_APPLICATION_PROTOCOL,
),
}
VALIDATOR_USED_PORTS = {
VALIDATOR_HTTP_PORT_ID: shared_utils.new_port_spec(
VALIDATOR_HTTP_PORT_NUM,
shared_utils.TCP_PROTOCOL,
shared_utils.NOT_PROVIDED_APPLICATION_PROTOCOL,
VALIDATOR_HTTP_PORT_WAIT_DISABLED,
),
VALIDATOR_METRICS_PORT_ID: shared_utils.new_port_spec(
VALIDATOR_METRICS_PORT_NUM,
shared_utils.TCP_PROTOCOL,
shared_utils.HTTP_APPLICATION_PROTOCOL,
),
}
LIGHTHOUSE_LOG_LEVELS = {
package_io.GLOBAL_CLIENT_LOG_LEVEL.error: "error",
package_io.GLOBAL_CLIENT_LOG_LEVEL.warn: "warn",
package_io.GLOBAL_CLIENT_LOG_LEVEL.info: "info",
package_io.GLOBAL_CLIENT_LOG_LEVEL.debug: "debug",
package_io.GLOBAL_CLIENT_LOG_LEVEL.trace: "trace",
}
def launch(
plan,
launcher,
service_name,
image,
participant_log_level,
global_log_level,
bootnode_contexts,
el_client_context,
node_keystore_files,
bn_min_cpu,
bn_max_cpu,
bn_min_mem,
bn_max_mem,
v_min_cpu,
v_max_cpu,
v_min_mem,
v_max_mem,
snooper_enabled,
snooper_engine_context,
extra_beacon_params,
extra_validator_params,
):
beacon_node_service_name = "{0}".format(service_name)
validator_node_service_name = "{0}-{1}".format(
service_name, VALIDATOR_SUFFIX_SERVICE_NAME
)
log_level = input_parser.get_client_log_level_or_default(
participant_log_level, global_log_level, LIGHTHOUSE_LOG_LEVELS
)
bn_min_cpu = int(bn_min_cpu) if int(bn_min_cpu) > 0 else BEACON_MIN_CPU
bn_max_cpu = int(bn_max_cpu) if int(bn_max_cpu) > 0 else BEACON_MAX_CPU
bn_min_mem = int(bn_min_mem) if int(bn_min_mem) > 0 else BEACON_MIN_MEMORY
bn_max_mem = int(bn_max_mem) if int(bn_max_mem) > 0 else BEACON_MAX_MEMORY
# Launch Beacon node
beacon_config = get_beacon_config(
launcher.genesis_data,
image,
bootnode_contexts,
el_client_context,
log_level,
bn_min_cpu,
bn_max_cpu,
bn_min_mem,
bn_max_mem,
snooper_enabled,
snooper_engine_context,
extra_beacon_params,
)
beacon_service = plan.add_service(beacon_node_service_name, beacon_config)
beacon_http_port = beacon_service.ports[BEACON_HTTP_PORT_ID]
beacon_http_url = "http://{0}:{1}".format(
beacon_service.ip_address, beacon_http_port.number
)
# Launch validator node if we have a keystore
validator_service = None
if node_keystore_files != None:
v_min_cpu = int(v_min_cpu) if int(v_min_cpu) > 0 else VALIDATOR_MIN_CPU
v_max_cpu = int(v_max_cpu) if int(v_max_cpu) > 0 else VALIDATOR_MAX_CPU
v_min_mem = int(v_min_mem) if int(v_min_mem) > 0 else VALIDATOR_MIN_MEMORY
v_max_mem = int(v_max_mem) if int(v_max_mem) > 0 else VALIDATOR_MAX_MEMORY
validator_config = get_validator_config(
launcher.genesis_data,
image,
log_level,
beacon_http_url,
node_keystore_files,
v_min_cpu,
v_max_cpu,
v_min_mem,
v_max_mem,
extra_validator_params,
)
validator_service = plan.add_service(
validator_node_service_name, validator_config
)
# TODO(old) add validator availability using the validator API: https://ethereum.github.io/beacon-APIs/?urls.primaryName=v1#/ValidatorRequiredApi | from eth2-merge-kurtosis-module
beacon_node_identity_recipe = GetHttpRequestRecipe(
endpoint="/eth/v1/node/identity",
port_id=BEACON_HTTP_PORT_ID,
extract={
"enr": ".data.enr",
"multiaddr": ".data.discovery_addresses[0]",
"peer_id": ".data.peer_id",
},
)
response = plan.request(
recipe=beacon_node_identity_recipe, service_name=beacon_node_service_name
)
beacon_node_enr = response["extract.enr"]
beacon_multiaddr = response["extract.multiaddr"]
beacon_peer_id = response["extract.peer_id"]
beacon_metrics_port = beacon_service.ports[BEACON_METRICS_PORT_ID]
beacon_metrics_url = "{0}:{1}".format(
beacon_service.ip_address, beacon_metrics_port.number
)
beacon_node_metrics_info = node_metrics.new_node_metrics_info(
beacon_node_service_name, METRICS_PATH, beacon_metrics_url
)
nodes_metrics_info = [beacon_node_metrics_info]
if validator_service:
validator_metrics_port = validator_service.ports[VALIDATOR_METRICS_PORT_ID]
validator_metrics_url = "{0}:{1}".format(
validator_service.ip_address, validator_metrics_port.number
)
validator_node_metrics_info = node_metrics.new_node_metrics_info(
validator_node_service_name, METRICS_PATH, validator_metrics_url
)
nodes_metrics_info.append(validator_node_metrics_info)
return cl_client_context.new_cl_client_context(
"lighthouse",
beacon_node_enr,
beacon_service.ip_address,
BEACON_HTTP_PORT_NUM,
nodes_metrics_info,
beacon_node_service_name,
validator_node_service_name,
beacon_multiaddr,
beacon_peer_id,
snooper_enabled,
snooper_engine_context,
)
def get_beacon_config(
genesis_data,
image,
boot_cl_client_ctxs,
el_client_context,
log_level,
bn_min_cpu,
bn_max_cpu,
bn_min_mem,
bn_max_mem,
snooper_enabled,
snooper_engine_context,
extra_params,
):
# If snooper is enabled use the snooper engine context, otherwise use the execution client context
if snooper_enabled:
EXECUTION_ENGINE_ENDPOINT = "http://{0}:{1}".format(
snooper_engine_context.ip_addr,
snooper_engine_context.engine_rpc_port_num,
)
else:
EXECUTION_ENGINE_ENDPOINT = "http://{0}:{1}".format(
el_client_context.ip_addr,
el_client_context.engine_rpc_port_num,
)
# For some reason, Lighthouse takes in the parent directory of the config file (rather than the path to the config file itself)
genesis_config_parent_dirpath_on_client = shared_utils.path_join(
GENESIS_DATA_MOUNTPOINT_ON_CLIENTS,
shared_utils.path_dir(genesis_data.config_yml_rel_filepath),
)
jwt_secret_filepath = shared_utils.path_join(
GENESIS_DATA_MOUNTPOINT_ON_CLIENTS, genesis_data.jwt_secret_rel_filepath
)
# NOTE: If connecting to the merge devnet remotely we DON'T want the following flags; when they're not set, the node's external IP address is auto-detected
# from the peers it communicates with but when they're set they basically say "override the autodetection and
# use what I specify instead." This requires having a know external IP address and port, which we definitely won't
# have with a network running in Kurtosis.
# "--disable-enr-auto-update",
# "--enr-address=" + externalIpAddress,
# fmt.Sprintf("--enr-udp-port=%v", BEACON_DISCOVERY_PORT_NUM),
# fmt.Sprintf("--enr-tcp-port=%v", beaconDiscoveryPortNum),
cmd = [
LIGHTHOUSE_BINARY_COMMAND,
"beacon_node",
"--debug-level=" + log_level,
"--datadir=" + CONSENSUS_DATA_DIRPATH_ON_BEACON_SERVICE_CONTAINER,
"--testnet-dir=" + genesis_config_parent_dirpath_on_client,
# vvvvvvvvvvvvvvvvvvv REMOVE THESE WHEN CONNECTING TO EXTERNAL NET vvvvvvvvvvvvvvvvvvvvv
"--disable-enr-auto-update",
"--enr-address=" + PRIVATE_IP_ADDRESS_PLACEHOLDER,
"--enr-udp-port={0}".format(BEACON_DISCOVERY_PORT_NUM),
"--enr-tcp-port={0}".format(BEACON_DISCOVERY_PORT_NUM),
# ^^^^^^^^^^^^^^^^^^^ REMOVE THESE WHEN CONNECTING TO EXTERNAL NET ^^^^^^^^^^^^^^^^^^^^^
"--listen-address=0.0.0.0",
"--port={0}".format(
BEACON_DISCOVERY_PORT_NUM
), # NOTE: Remove for connecting to external net!
"--http",
"--http-address=0.0.0.0",
"--http-port={0}".format(BEACON_HTTP_PORT_NUM),
"--http-allow-sync-stalled",
"--slots-per-restore-point={0}".format(32 if package_io.ARCHIVE_MODE else 8192),
# NOTE: This comes from:
# https://github.com/sigp/lighthouse/blob/7c88f582d955537f7ffff9b2c879dcf5bf80ce13/scripts/local_testnet/beacon_node.sh
# and the option says it's "useful for testing in smaller networks" (unclear what happens in larger networks)
"--disable-packet-filter",
"--execution-endpoints=" + EXECUTION_ENGINE_ENDPOINT,
"--jwt-secrets=" + jwt_secret_filepath,
"--suggested-fee-recipient=" + package_io.VALIDATING_REWARDS_ACCOUNT,
# Set per Paris' recommendation to reduce noise in the logs
"--subscribe-all-subnets",
# vvvvvvvvvvvvvvvvvvv METRICS CONFIG vvvvvvvvvvvvvvvvvvvvv
"--metrics",
"--metrics-address=0.0.0.0",
"--metrics-allow-origin=*",
"--metrics-port={0}".format(BEACON_METRICS_PORT_NUM),
# ^^^^^^^^^^^^^^^^^^^ METRICS CONFIG ^^^^^^^^^^^^^^^^^^^^^
]
if boot_cl_client_ctxs != None:
cmd.append(
"--boot-nodes="
+ ",".join(
[ctx.enr for ctx in boot_cl_client_ctxs[: package_io.MAX_ENR_ENTRIES]]
)
)
cmd.append(
"--trusted-peers="
+ ",".join(
[
ctx.peer_id
for ctx in boot_cl_client_ctxs[: package_io.MAX_ENR_ENTRIES]
]
)
)
if len(extra_params) > 0:
# this is a repeated<proto type>, we convert it into Starlark
cmd.extend([param for param in extra_params])
recipe = GetHttpRequestRecipe(
endpoint="/eth/v1/node/identity", port_id=BEACON_HTTP_PORT_ID
)
ready_conditions = ReadyCondition(
recipe=recipe,
field="code",
assertion="IN",
target_value=[200, 206],
timeout="15m",
)
return ServiceConfig(
image=image,
ports=BEACON_USED_PORTS,
cmd=cmd,
files={GENESIS_DATA_MOUNTPOINT_ON_CLIENTS: genesis_data.files_artifact_uuid},
env_vars={RUST_BACKTRACE_ENVVAR_NAME: RUST_FULL_BACKTRACE_KEYWORD},
private_ip_address_placeholder=PRIVATE_IP_ADDRESS_PLACEHOLDER,
ready_conditions=ready_conditions,
min_cpu=bn_min_cpu,
max_cpu=bn_max_cpu,
min_memory=bn_min_mem,
max_memory=bn_max_mem,
)
def get_validator_config(
genesis_data,
image,
log_level,
beacon_client_http_url,
node_keystore_files,
v_min_cpu,
v_max_cpu,
v_min_mem,
v_max_mem,
extra_params,
):
# For some reason, Lighthouse takes in the parent directory of the config file (rather than the path to the config file itself)
genesis_config_parent_dirpath_on_client = shared_utils.path_join(
GENESIS_DATA_MOUNTPOINT_ON_CLIENTS,
shared_utils.path_dir(genesis_data.config_yml_rel_filepath),
)
validator_keys_dirpath = shared_utils.path_join(
VALIDATOR_KEYS_MOUNTPOINT_ON_CLIENTS,
node_keystore_files.raw_keys_relative_dirpath,
)
validator_secrets_dirpath = shared_utils.path_join(
VALIDATOR_KEYS_MOUNTPOINT_ON_CLIENTS,
node_keystore_files.raw_secrets_relative_dirpath,
)
cmd = [
"lighthouse",
"validator_client",
"--debug-level=" + log_level,
"--testnet-dir=" + genesis_config_parent_dirpath_on_client,
"--validators-dir=" + validator_keys_dirpath,
# NOTE: When secrets-dir is specified, we can't add the --data-dir flag
"--secrets-dir=" + validator_secrets_dirpath,
# The node won't have a slashing protection database and will fail to start otherwise
"--init-slashing-protection",
"--http",
"--unencrypted-http-transport",
"--http-address=0.0.0.0",
"--http-port={0}".format(VALIDATOR_HTTP_PORT_NUM),
"--beacon-nodes=" + beacon_client_http_url,
# "--enable-doppelganger-protection", // Disabled to not have to wait 2 epochs before validator can start
# burn address - If unset, the validator will scream in its logs
"--suggested-fee-recipient=" + package_io.VALIDATING_REWARDS_ACCOUNT,
# vvvvvvvvvvvvvvvvvvv PROMETHEUS CONFIG vvvvvvvvvvvvvvvvvvvvv
"--metrics",
"--metrics-address=0.0.0.0",
"--metrics-allow-origin=*",
"--metrics-port={0}".format(VALIDATOR_METRICS_PORT_NUM),
# ^^^^^^^^^^^^^^^^^^^ PROMETHEUS CONFIG ^^^^^^^^^^^^^^^^^^^^^
]
if len(extra_params):
cmd.extend([param for param in extra_params])
return ServiceConfig(
image=image,
ports=VALIDATOR_USED_PORTS,
cmd=cmd,
files={
GENESIS_DATA_MOUNTPOINT_ON_CLIENTS: genesis_data.files_artifact_uuid,
VALIDATOR_KEYS_MOUNTPOINT_ON_CLIENTS: node_keystore_files.files_artifact_uuid,
},
env_vars={RUST_BACKTRACE_ENVVAR_NAME: RUST_FULL_BACKTRACE_KEYWORD},
min_cpu=v_min_cpu,
max_cpu=v_max_cpu,
min_memory=v_min_mem,
max_memory=v_max_mem,
)
def new_lighthouse_launcher(cl_genesis_data):
return struct(
genesis_data=cl_genesis_data,
)
shared_utils = import_module(
"github.com/kurtosis-tech/eth2-package/src/shared_utils/shared_utils.star"
)
input_parser = import_module(
"github.com/kurtosis-tech/eth2-package/src/package_io/parse_input.star"
)
cl_client_context = import_module(
"github.com/kurtosis-tech/eth2-package/src/cl/cl_client_context.star"
)
node_metrics = import_module(
"github.com/kurtosis-tech/eth2-package/src/node_metrics_info.star"
)
cl_node_ready_conditions = import_module(
"github.com/kurtosis-tech/eth2-package/src/cl/cl_node_ready_conditions.star"
)
package_io = import_module(
"github.com/kurtosis-tech/eth2-package/src/package_io/constants.star"
)
GENESIS_DATA_MOUNT_DIRPATH_ON_SERVICE_CONTAINER = "/genesis"
# ---------------------------------- Beacon client -------------------------------------
CONSENSUS_DATA_DIRPATH_ON_SERVICE_CONTAINER = "/consensus-data"
# Port IDs
TCP_DISCOVERY_PORT_ID = "tcp-discovery"
UDP_DISCOVERY_PORT_ID = "udp-discovery"
HTTP_PORT_ID = "http"
METRICS_PORT_ID = "metrics"
VALIDATOR_METRICS_PORT_ID = "validator-metrics"
# Port nums
DISCOVERY_PORT_NUM = 9000
HTTP_PORT_NUM = 4000
METRICS_PORT_NUM = 8008
# The min/max CPU/memory that the beacon node can use
BEACON_MIN_CPU = 50
BEACON_MAX_CPU = 1000
BEACON_MIN_MEMORY = 256
BEACON_MAX_MEMORY = 1024
# ---------------------------------- Validator client -------------------------------------
VALIDATOR_KEYS_MOUNT_DIRPATH_ON_SERVICE_CONTAINER = "/validator-keys"
# The min/max CPU/memory that the validator node can use
VALIDATOR_MIN_CPU = 50
VALIDATOR_MAX_CPU = 300
VALIDATOR_MIN_MEMORY = 128
VALIDATOR_MAX_MEMORY = 512
VALIDATOR_SUFFIX_SERVICE_NAME = "validator"
METRICS_PATH = "/metrics"
PRIVATE_IP_ADDRESS_PLACEHOLDER = "KURTOSIS_IP_ADDR_PLACEHOLDER"
BEACON_USED_PORTS = {
TCP_DISCOVERY_PORT_ID: shared_utils.new_port_spec(
DISCOVERY_PORT_NUM, shared_utils.TCP_PROTOCOL
),
UDP_DISCOVERY_PORT_ID: shared_utils.new_port_spec(
DISCOVERY_PORT_NUM, shared_utils.UDP_PROTOCOL
),
HTTP_PORT_ID: shared_utils.new_port_spec(HTTP_PORT_NUM, shared_utils.TCP_PROTOCOL),
METRICS_PORT_ID: shared_utils.new_port_spec(
METRICS_PORT_NUM, shared_utils.TCP_PROTOCOL
),
}
VALIDATOR_USED_PORTS = {
METRICS_PORT_ID: shared_utils.new_port_spec(
METRICS_PORT_NUM, shared_utils.TCP_PROTOCOL
),
}
LODESTAR_LOG_LEVELS = {
package_io.GLOBAL_CLIENT_LOG_LEVEL.error: "error",
package_io.GLOBAL_CLIENT_LOG_LEVEL.warn: "warn",
package_io.GLOBAL_CLIENT_LOG_LEVEL.info: "info",
package_io.GLOBAL_CLIENT_LOG_LEVEL.debug: "debug",
package_io.GLOBAL_CLIENT_LOG_LEVEL.trace: "trace",
}
def launch(
plan,
launcher,
service_name,
image,
participant_log_level,
global_log_level,
bootnode_contexts,
el_client_context,
node_keystore_files,
bn_min_cpu,
bn_max_cpu,
bn_min_mem,
bn_max_mem,
v_min_cpu,
v_max_cpu,
v_min_mem,
v_max_mem,
snooper_enabled,
snooper_engine_context,
extra_beacon_params,
extra_validator_params,
):
beacon_node_service_name = "{0}".format(service_name)
validator_node_service_name = "{0}-{1}".format(
service_name, VALIDATOR_SUFFIX_SERVICE_NAME
)
log_level = input_parser.get_client_log_level_or_default(
participant_log_level, global_log_level, LODESTAR_LOG_LEVELS
)
bn_min_cpu = int(bn_min_cpu) if int(bn_min_cpu) > 0 else BEACON_MIN_CPU
bn_max_cpu = int(bn_max_cpu) if int(bn_max_cpu) > 0 else BEACON_MAX_CPU
bn_min_mem = int(bn_min_mem) if int(bn_min_mem) > 0 else BEACON_MIN_MEMORY
bn_max_mem = int(bn_max_mem) if int(bn_max_mem) > 0 else BEACON_MAX_MEMORY
# Launch Beacon node
beacon_config = get_beacon_config(
launcher.cl_genesis_data,
image,
bootnode_contexts,
el_client_context,
log_level,
bn_min_cpu,
bn_max_cpu,
bn_min_mem,
bn_max_mem,
snooper_enabled,
snooper_engine_context,
extra_beacon_params,
)
beacon_service = plan.add_service(beacon_node_service_name, beacon_config)
beacon_http_port = beacon_service.ports[HTTP_PORT_ID]
beacon_http_url = "http://{0}:{1}".format(
beacon_service.ip_address, beacon_http_port.number
)
# Launch validator node if we have a keystore
if node_keystore_files != None:
v_min_cpu = int(v_min_cpu) if int(v_min_cpu) > 0 else VALIDATOR_MIN_CPU
v_max_cpu = int(v_max_cpu) if int(v_max_cpu) > 0 else VALIDATOR_MAX_CPU
v_min_mem = int(v_min_mem) if int(v_min_mem) > 0 else VALIDATOR_MIN_MEMORY
v_max_mem = int(v_max_mem) if int(v_max_mem) > 0 else VALIDATOR_MAX_MEMORY
validator_config = get_validator_config(
validator_node_service_name,
launcher.cl_genesis_data,
image,
log_level,
beacon_http_url,
node_keystore_files,
v_min_cpu,
v_max_cpu,
v_min_mem,
v_max_mem,
extra_validator_params,
)
plan.add_service(validator_node_service_name, validator_config)
# TODO(old) add validator availability using the validator API: https://ethereum.github.io/beacon-APIs/?urls.primaryName=v1#/ValidatorRequiredApi | from eth2-merge-kurtosis-module
beacon_node_identity_recipe = GetHttpRequestRecipe(
endpoint="/eth/v1/node/identity",
port_id=HTTP_PORT_ID,
extract={
"enr": ".data.enr",
"multiaddr": ".data.p2p_addresses[-1]",
"peer_id": ".data.peer_id",
},
)
response = plan.request(
recipe=beacon_node_identity_recipe, service_name=beacon_node_service_name
)
beacon_node_enr = response["extract.enr"]
beacon_multiaddr = response["extract.multiaddr"]
beacon_peer_id = response["extract.peer_id"]
beacon_metrics_port = beacon_service.ports[METRICS_PORT_ID]
beacon_metrics_url = "{0}:{1}".format(
beacon_service.ip_address, beacon_metrics_port.number
)
beacon_node_metrics_info = node_metrics.new_node_metrics_info(
service_name, METRICS_PATH, beacon_metrics_url
)
nodes_metrics_info = [beacon_node_metrics_info]
return cl_client_context.new_cl_client_context(
"lodestar",
beacon_node_enr,
beacon_service.ip_address,
HTTP_PORT_NUM,
nodes_metrics_info,
beacon_node_service_name,
validator_node_service_name,
beacon_multiaddr,
beacon_peer_id,
snooper_enabled,
snooper_engine_context,
)
def get_beacon_config(
genesis_data,
image,
bootnode_contexts,
el_client_context,
log_level,
bn_min_cpu,
bn_max_cpu,
bn_min_mem,
bn_max_mem,
snooper_enabled,
snooper_engine_context,
extra_params,
):
el_client_rpc_url_str = "http://{0}:{1}".format(
el_client_context.ip_addr,
el_client_context.rpc_port_num,
)
# If snooper is enabled use the snooper engine context, otherwise use the execution client context
if snooper_enabled:
EXECUTION_ENGINE_ENDPOINT = "http://{0}:{1}".format(
snooper_engine_context.ip_addr,
snooper_engine_context.engine_rpc_port_num,
)
else:
EXECUTION_ENGINE_ENDPOINT = "http://{0}:{1}".format(
el_client_context.ip_addr,
el_client_context.engine_rpc_port_num,
)
genesis_config_filepath = shared_utils.path_join(
GENESIS_DATA_MOUNT_DIRPATH_ON_SERVICE_CONTAINER,
genesis_data.config_yml_rel_filepath,
)
genesis_ssz_filepath = shared_utils.path_join(
GENESIS_DATA_MOUNT_DIRPATH_ON_SERVICE_CONTAINER,
genesis_data.genesis_ssz_rel_filepath,
)
jwt_secret_filepath = shared_utils.path_join(
GENESIS_DATA_MOUNT_DIRPATH_ON_SERVICE_CONTAINER,
genesis_data.jwt_secret_rel_filepath,
)
cmd = [
"beacon",
"--logLevel=" + log_level,
"--port={0}".format(DISCOVERY_PORT_NUM),
"--discoveryPort={0}".format(DISCOVERY_PORT_NUM),
"--dataDir=" + CONSENSUS_DATA_DIRPATH_ON_SERVICE_CONTAINER,
"--paramsFile=" + genesis_config_filepath,
"--genesisStateFile=" + genesis_ssz_filepath,
"--eth1.depositContractDeployBlock=0",
"--network.connectToDiscv5Bootnodes=true",
"--discv5=true",
"--eth1=true",
"--eth1.providerUrls=" + el_client_rpc_url_str,
"--execution.urls=" + EXECUTION_ENGINE_ENDPOINT,
"--rest=true",
"--rest.address=0.0.0.0",
"--rest.namespace=*",
"--rest.port={0}".format(HTTP_PORT_NUM),
"--nat=true",
"--enr.ip=" + PRIVATE_IP_ADDRESS_PLACEHOLDER,
"--enr.tcp={0}".format(DISCOVERY_PORT_NUM),
"--enr.udp={0}".format(DISCOVERY_PORT_NUM),
# Set per Pari's recommendation to reduce noise in the logs
"--subscribeAllSubnets=true",
"--jwt-secret={0}".format(jwt_secret_filepath),
# vvvvvvvvvvvvvvvvvvv METRICS CONFIG vvvvvvvvvvvvvvvvvvvvv
"--metrics",
"--metrics.address=0.0.0.0",
"--metrics.port={0}".format(METRICS_PORT_NUM),
# ^^^^^^^^^^^^^^^^^^^ METRICS CONFIG ^^^^^^^^^^^^^^^^^^^^^
]
if bootnode_contexts != None:
cmd.append(
"--bootnodes="
+ ",".join(
[ctx.enr for ctx in bootnode_contexts[: package_io.MAX_ENR_ENTRIES]]
)
)
if len(extra_params) > 0:
# this is a repeated<proto type>, we convert it into Starlark
cmd.extend([param for param in extra_params])
return ServiceConfig(
image=image,
ports=BEACON_USED_PORTS,
cmd=cmd,
files={
GENESIS_DATA_MOUNT_DIRPATH_ON_SERVICE_CONTAINER: genesis_data.files_artifact_uuid
},
private_ip_address_placeholder=PRIVATE_IP_ADDRESS_PLACEHOLDER,
ready_conditions=cl_node_ready_conditions.get_ready_conditions(HTTP_PORT_ID),
min_cpu=bn_min_cpu,
max_cpu=bn_max_cpu,
min_memory=bn_min_mem,
max_memory=bn_max_mem,
)
def get_validator_config(
service_name,
genesis_data,
image,
log_level,
beacon_client_http_url,
node_keystore_files,
v_min_cpu,
v_max_cpu,
v_min_mem,
v_max_mem,
extra_params,
):
root_dirpath = shared_utils.path_join(
CONSENSUS_DATA_DIRPATH_ON_SERVICE_CONTAINER, service_name
)
genesis_config_filepath = shared_utils.path_join(
GENESIS_DATA_MOUNT_DIRPATH_ON_SERVICE_CONTAINER,
genesis_data.config_yml_rel_filepath,
)
validator_keys_dirpath = shared_utils.path_join(
VALIDATOR_KEYS_MOUNT_DIRPATH_ON_SERVICE_CONTAINER,
node_keystore_files.raw_keys_relative_dirpath,
)
validator_secrets_dirpath = shared_utils.path_join(
VALIDATOR_KEYS_MOUNT_DIRPATH_ON_SERVICE_CONTAINER,
node_keystore_files.raw_secrets_relative_dirpath,
)
cmd = [
"validator",
"--logLevel=" + log_level,
"--dataDir=" + root_dirpath,
"--paramsFile=" + genesis_config_filepath,
"--beaconNodes=" + beacon_client_http_url,
"--keystoresDir=" + validator_keys_dirpath,
"--secretsDir=" + validator_secrets_dirpath,
"--suggestedFeeRecipient=" + package_io.VALIDATING_REWARDS_ACCOUNT,
# vvvvvvvvvvvvvvvvvvv PROMETHEUS CONFIG vvvvvvvvvvvvvvvvvvvvv
"--metrics",
"--metrics.address=0.0.0.0",
"--metrics.port={0}".format(METRICS_PORT_NUM),
# ^^^^^^^^^^^^^^^^^^^ PROMETHEUS CONFIG ^^^^^^^^^^^^^^^^^^^^^
]
if len(extra_params) > 0:
# this is a repeated<proto type>, we convert it into Starlark
cmd.extend([param for param in extra_params])
return ServiceConfig(
image=image,
ports=VALIDATOR_USED_PORTS,
cmd=cmd,
files={
GENESIS_DATA_MOUNT_DIRPATH_ON_SERVICE_CONTAINER: genesis_data.files_artifact_uuid,
VALIDATOR_KEYS_MOUNT_DIRPATH_ON_SERVICE_CONTAINER: node_keystore_files.files_artifact_uuid,
},
private_ip_address_placeholder=PRIVATE_IP_ADDRESS_PLACEHOLDER,
min_cpu=v_min_cpu,
max_cpu=v_max_cpu,
min_memory=v_min_mem,
max_memory=v_max_mem,
)
def new_lodestar_launcher(cl_genesis_data):
return struct(
cl_genesis_data=cl_genesis_data,
)
shared_utils = import_module(
"github.com/kurtosis-tech/eth2-package/src/shared_utils/shared_utils.star"
)
input_parser = import_module(
"github.com/kurtosis-tech/eth2-package/src/package_io/parse_input.star"
)
cl_client_context = import_module(
"github.com/kurtosis-tech/eth2-package/src/cl/cl_client_context.star"
)
node_metrics = import_module(
"github.com/kurtosis-tech/eth2-package/src/node_metrics_info.star"
)
cl_node_ready_conditions = import_module(
"github.com/kurtosis-tech/eth2-package/src/cl/cl_node_ready_conditions.star"
)
package_io = import_module(
"github.com/kurtosis-tech/eth2-package/src/package_io/constants.star"
)
GENESIS_DATA_MOUNTPOINT_ON_CLIENT = "/genesis-data"
VALIDATOR_KEYS_MOUNTPOINT_ON_CLIENT = "/validator-keys"
# Port IDs
TCP_DISCOVERY_PORT_ID = "tcp-discovery"
UDP_DISCOVERY_PORT_ID = "udp-discovery"
HTTP_PORT_ID = "http"
METRICS_PORT_ID = "metrics"
# Port nums
DISCOVERY_PORT_NUM = 9000
HTTP_PORT_NUM = 4000
METRICS_PORT_NUM = 8008
# The min/max CPU/memory that the beacon node can use
BEACON_MIN_CPU = 50
BEACON_MAX_CPU = 1000
BEACON_MIN_MEMORY = 128
BEACON_MAX_MEMORY = 1024
# Nimbus requires that its data directory already exists (because it expects you to bind-mount it), so we
# have to to create it
CONSENSUS_DATA_DIRPATH_IN_SERVICE_CONTAINER = "$HOME/consensus-data"
# Nimbus wants the data dir to have these perms
CONSENSUS_DATA_DIR_PERMS_STR = "0700"
# The entrypoint the image normally starts with (we need to override the entrypoint to create the
# consensus data directory on the image before it starts)
DEFAULT_IMAGE_ENTRYPOINT = "/home/user/nimbus-eth2/build/nimbus_beacon_node"
# Nimbus needs write access to the validator keys/secrets directories, and b/c the module container runs as root
# while the Nimbus container does not, we can't just point the Nimbus binary to the paths in the shared dir because
# it won't be able to open them. To get around this, we copy the validator keys/secrets to a path inside the Nimbus
# container that is owned by the container's user
VALIDATOR_KEYS_DIRPATH_ON_SERVICE_CONTAINER = "$HOME/validator-keys"
VALIDATOR_SECRETS_DIRPATH_ON_SERVICE_CONTAINER = "$HOME/validator-secrets"
METRICS_PATH = "/metrics"
PRIVATE_IP_ADDRESS_PLACEHOLDER = "KURTOSIS_IP_ADDR_PLACEHOLDER"
USED_PORTS = {
TCP_DISCOVERY_PORT_ID: shared_utils.new_port_spec(
DISCOVERY_PORT_NUM, shared_utils.TCP_PROTOCOL
),
UDP_DISCOVERY_PORT_ID: shared_utils.new_port_spec(
DISCOVERY_PORT_NUM, shared_utils.UDP_PROTOCOL
),
HTTP_PORT_ID: shared_utils.new_port_spec(HTTP_PORT_NUM, shared_utils.TCP_PROTOCOL),
METRICS_PORT_ID: shared_utils.new_port_spec(
METRICS_PORT_NUM, shared_utils.TCP_PROTOCOL
),
}
NIMBUS_LOG_LEVELS = {
package_io.GLOBAL_CLIENT_LOG_LEVEL.error: "ERROR",
package_io.GLOBAL_CLIENT_LOG_LEVEL.warn: "WARN",
package_io.GLOBAL_CLIENT_LOG_LEVEL.info: "INFO",
package_io.GLOBAL_CLIENT_LOG_LEVEL.debug: "DEBUG",
package_io.GLOBAL_CLIENT_LOG_LEVEL.trace: "TRACE",
}
ENTRYPOINT_ARGS = ["sh", "-c"]
def launch(
plan,
launcher,
service_name,
image,
participant_log_level,
global_log_level,
bootnode_contexts,
el_client_context,
node_keystore_files,
bn_min_cpu,
bn_max_cpu,
bn_min_mem,
bn_max_mem,
v_min_cpu,
v_max_cpu,
v_min_mem,
v_max_mem,
snooper_enabled,
snooper_engine_context,
extra_beacon_params,
extra_validator_params,
):
log_level = input_parser.get_client_log_level_or_default(
participant_log_level, global_log_level, NIMBUS_LOG_LEVELS
)
extra_params = [param for param in extra_beacon_params] + [
param for param in extra_validator_params
]
bn_min_cpu = int(bn_min_cpu) if int(bn_min_cpu) > 0 else BEACON_MIN_CPU
bn_max_cpu = int(bn_max_cpu) if int(bn_max_cpu) > 0 else BEACON_MAX_CPU
bn_min_mem = int(bn_min_mem) if int(bn_min_mem) > 0 else BEACON_MIN_MEMORY
bn_max_mem = int(bn_max_mem) if int(bn_max_mem) > 0 else BEACON_MAX_MEMORY
# Set the min/max CPU/memory for the beacon node to be the max of the beacon node and validator node values, unless this is defined, it will use the default beacon values
bn_min_cpu = int(v_min_cpu) if (int(v_min_cpu) > bn_min_cpu) else bn_min_cpu
bn_max_cpu = int(v_max_cpu) if (int(v_max_cpu) > bn_max_cpu) else bn_max_cpu
bn_min_mem = int(v_min_mem) if (int(v_min_mem) > bn_min_mem) else bn_min_mem
bn_max_mem = int(v_max_mem) if (int(v_max_mem) > bn_max_mem) else bn_max_mem
config = get_config(
launcher.cl_genesis_data,
image,
bootnode_contexts,
el_client_context,
log_level,
node_keystore_files,
bn_min_cpu,
bn_max_cpu,
bn_min_mem,
bn_max_mem,
snooper_enabled,
snooper_engine_context,
extra_params,
)
nimbus_service = plan.add_service(service_name, config)
cl_node_identity_recipe = GetHttpRequestRecipe(
endpoint="/eth/v1/node/identity",
port_id=HTTP_PORT_ID,
extract={
"enr": ".data.enr",
"multiaddr": ".data.discovery_addresses[0]",
"peer_id": ".data.peer_id",
},
)
response = plan.request(recipe=cl_node_identity_recipe, service_name=service_name)
node_enr = response["extract.enr"]
multiaddr = response["extract.multiaddr"]
peer_id = response["extract.peer_id"]
metrics_port = nimbus_service.ports[METRICS_PORT_ID]
metrics_url = "{0}:{1}".format(nimbus_service.ip_address, metrics_port.number)
nimbus_node_metrics_info = node_metrics.new_node_metrics_info(
service_name, METRICS_PATH, metrics_url
)
nodes_metrics_info = [nimbus_node_metrics_info]
return cl_client_context.new_cl_client_context(
"nimbus",
node_enr,
nimbus_service.ip_address,
HTTP_PORT_NUM,
nodes_metrics_info,
service_name,
multiaddr=multiaddr,
peer_id=peer_id,
snooper_enabled=snooper_enabled,
snooper_engine_context=snooper_engine_context,
)
def get_config(
genesis_data,
image,
bootnode_contexts,
el_client_context,
log_level,
node_keystore_files,
bn_min_cpu,
bn_max_cpu,
bn_min_mem,
bn_max_mem,
snooper_enabled,
snooper_engine_context,
extra_params,
):
# If snooper is enabled use the snooper engine context, otherwise use the execution client context
if snooper_enabled:
EXECUTION_ENGINE_ENDPOINT = "http://{0}:{1}".format(
snooper_engine_context.ip_addr,
snooper_engine_context.engine_rpc_port_num,
)
else:
EXECUTION_ENGINE_ENDPOINT = "http://{0}:{1}".format(
el_client_context.ip_addr,
el_client_context.engine_rpc_port_num,
)
# For some reason, Nimbus takes in the parent directory of the config file (rather than the path to the config file itself)
genesis_config_parent_dirpath_on_client = shared_utils.path_join(
GENESIS_DATA_MOUNTPOINT_ON_CLIENT,
shared_utils.path_dir(genesis_data.config_yml_rel_filepath),
)
jwt_secret_filepath = shared_utils.path_join(
GENESIS_DATA_MOUNTPOINT_ON_CLIENT, genesis_data.jwt_secret_rel_filepath
)
validator_keys_dirpath = ""
validator_secrets_dirpath = ""
if node_keystore_files != None:
validator_keys_dirpath = shared_utils.path_join(
VALIDATOR_KEYS_MOUNTPOINT_ON_CLIENT,
node_keystore_files.nimbus_keys_relative_dirpath,
)
validator_secrets_dirpath = shared_utils.path_join(
VALIDATOR_KEYS_MOUNTPOINT_ON_CLIENT,
node_keystore_files.raw_secrets_relative_dirpath,
)
# Sources for these flags:
# 1) https://github.com/status-im/nimbus-eth2/blob/stable/scripts/launch_local_testnet.sh
# 2) https://github.com/status-im/nimbus-eth2/blob/67ab477a27e358d605e99bffeb67f98d18218eca/scripts/launch_local_testnet.sh#L417
# WARNING: Do NOT set the --max-peers flag here, as doing so to the exact number of nodes seems to mess things up!
# See: https://github.com/kurtosis-tech/eth2-merge-kurtosis-module/issues/26
validator_copy = [
"mkdir",
CONSENSUS_DATA_DIRPATH_IN_SERVICE_CONTAINER,
"-m",
CONSENSUS_DATA_DIR_PERMS_STR,
"&&",
# TODO(old) COMMENT THIS OUT?
"cp",
"-R",
validator_keys_dirpath,
VALIDATOR_KEYS_DIRPATH_ON_SERVICE_CONTAINER,
"&&",
"cp",
"-R",
validator_secrets_dirpath,
VALIDATOR_SECRETS_DIRPATH_ON_SERVICE_CONTAINER,
"&&",
# If we don't do this chmod, Nimbus will spend a crazy amount of time manually correcting them
# before it starts
"chmod",
"600",
VALIDATOR_SECRETS_DIRPATH_ON_SERVICE_CONTAINER + "/*",
"&&",
]
validator_flags = [
"--validators-dir=" + VALIDATOR_KEYS_DIRPATH_ON_SERVICE_CONTAINER,
"--secrets-dir=" + VALIDATOR_SECRETS_DIRPATH_ON_SERVICE_CONTAINER,
"--suggested-fee-recipient=" + package_io.VALIDATING_REWARDS_ACCOUNT,
]
beacon_start = [
DEFAULT_IMAGE_ENTRYPOINT,
"--non-interactive=true",
"--log-level=" + log_level,
"--udp-port={0}".format(DISCOVERY_PORT_NUM),
"--tcp-port={0}".format(DISCOVERY_PORT_NUM),
"--network=" + genesis_config_parent_dirpath_on_client,
"--data-dir=" + CONSENSUS_DATA_DIRPATH_IN_SERVICE_CONTAINER,
"--web3-url=" + EXECUTION_ENGINE_ENDPOINT,
"--nat=extip:" + PRIVATE_IP_ADDRESS_PLACEHOLDER,
"--enr-auto-update=false",
"--history={0}".format("archive" if package_io.ARCHIVE_MODE else "prune"),
"--rest",
"--rest-address=0.0.0.0",
"--rest-allow-origin=*",
"--rest-port={0}".format(HTTP_PORT_NUM),
# There's a bug where if we don't set this flag, the Nimbus nodes won't work:
# https://discord.com/channels/641364059387854899/674288681737256970/922890280120750170
# https://github.com/status-im/nimbus-eth2/issues/2451
"--doppelganger-detection=false",
# Set per Pari's recommendation to reduce noise in the logs
"--subscribe-all-subnets=true",
# Nimbus can handle a max of 256 threads, if the host has more then nimbus crashes. Setting it to 4 so it doesn't crash on build servers
"--num-threads=4",
"--jwt-secret={0}".format(jwt_secret_filepath),
# vvvvvvvvvvvvvvvvvvv METRICS CONFIG vvvvvvvvvvvvvvvvvvvvv
"--metrics",
"--metrics-address=0.0.0.0",
"--metrics-port={0}".format(METRICS_PORT_NUM),
# ^^^^^^^^^^^^^^^^^^^ METRICS CONFIG ^^^^^^^^^^^^^^^^^^^^^
]
# Depending on whether we're using a node keystore, we'll need to add the validator flags
cmd = []
if node_keystore_files != None:
cmd.extend(validator_copy)
cmd.extend(beacon_start)
cmd.extend(validator_flags)
else:
cmd.extend(beacon_start)
if bootnode_contexts == None:
# Copied from https://github.com/status-im/nimbus-eth2/blob/67ab477a27e358d605e99bffeb67f98d18218eca/scripts/launch_local_testnet.sh#L417
# See explanation there
cmd.append("--subscribe-all-subnets")
else:
for ctx in bootnode_contexts[: package_io.MAX_ENR_ENTRIES]:
cmd.append("--bootstrap-node=" + ctx.enr)
cmd.append("--direct-peer=" + ctx.multiaddr)
if len(extra_params) > 0:
cmd.extend([param for param in extra_params])
files = {
GENESIS_DATA_MOUNTPOINT_ON_CLIENT: genesis_data.files_artifact_uuid,
}
if node_keystore_files:
files[
VALIDATOR_KEYS_MOUNTPOINT_ON_CLIENT
] = node_keystore_files.files_artifact_uuid
cmd_str = " ".join(cmd)
return ServiceConfig(
image=image,
ports=USED_PORTS,
cmd=[cmd_str],
entrypoint=ENTRYPOINT_ARGS,
files=files,
private_ip_address_placeholder=PRIVATE_IP_ADDRESS_PLACEHOLDER,
ready_conditions=cl_node_ready_conditions.get_ready_conditions(HTTP_PORT_ID),
min_cpu=bn_min_cpu,
max_cpu=bn_max_cpu,
min_memory=bn_min_mem,
max_memory=bn_max_mem,
)
def new_nimbus_launcher(cl_genesis_data):
return struct(
cl_genesis_data=cl_genesis_data,
)
shared_utils = import_module(
"github.com/kurtosis-tech/eth2-package/src/shared_utils/shared_utils.star"
)
input_parser = import_module(
"github.com/kurtosis-tech/eth2-package/src/package_io/parse_input.star"
)
cl_client_context = import_module(
"github.com/kurtosis-tech/eth2-package/src/cl/cl_client_context.star"
)
node_metrics = import_module(
"github.com/kurtosis-tech/eth2-package/src/node_metrics_info.star"
)
cl_node_ready_conditions = import_module(
"github.com/kurtosis-tech/eth2-package/src/cl/cl_node_ready_conditions.star"
)
package_io = import_module(
"github.com/kurtosis-tech/eth2-package/src/package_io/constants.star"
)
IMAGE_SEPARATOR_DELIMITER = ","
EXPECTED_NUM_IMAGES = 2
# ---------------------------------- Beacon client -------------------------------------
CONSENSUS_DATA_DIRPATH_ON_SERVICE_CONTAINER = "/consensus-data"
GENESIS_DATA_MOUNT_DIRPATH_ON_SERVICE_CONTAINER = "/genesis"
# Port IDs
TCP_DISCOVERY_PORT_ID = "tcp-discovery"
UDP_DISCOVERY_PORT_ID = "udp-discovery"
RPC_PORT_ID = "rpc"
HTTP_PORT_ID = "http"
BEACON_MONITORING_PORT_ID = "monitoring"
# Port nums
DISCOVERY_TCP_PORT_NUM = 13000
DISCOVERY_UDP_PORT_NUM = 12000
RPC_PORT_NUM = 4000
HTTP_PORT_NUM = 3500
BEACON_MONITORING_PORT_NUM = 8080
# The min/max CPU/memory that the beacon node can use
BEACON_MIN_CPU = 50
BEACON_MAX_CPU = 1000
BEACON_MIN_MEMORY = 256
BEACON_MAX_MEMORY = 1024
# ---------------------------------- Validator client -------------------------------------
VALIDATOR_KEYS_MOUNT_DIRPATH_ON_SERVICE_CONTAINER = "/validator-keys"
PRYSM_PASSWORD_MOUNT_DIRPATH_ON_SERVICE_CONTAINER = "/prysm-password"
# Port IDs
VALIDATOR_MONITORING_PORT_NUM = 8081
VALIDATOR_MONITORING_PORT_ID = "monitoring"
METRICS_PATH = "/metrics"
VALIDATOR_SUFFIX_SERVICE_NAME = "validator"
# The min/max CPU/memory that the validator node can use
VALIDATOR_MIN_CPU = 50
VALIDATOR_MAX_CPU = 300
VALIDATOR_MIN_MEMORY = 64
VALIDATOR_MAX_MEMORY = 256
MIN_PEERS = 1
PRIVATE_IP_ADDRESS_PLACEHOLDER = "KURTOSIS_IP_ADDR_PLACEHOLDER"
BEACON_NODE_USED_PORTS = {
TCP_DISCOVERY_PORT_ID: shared_utils.new_port_spec(
DISCOVERY_TCP_PORT_NUM, shared_utils.TCP_PROTOCOL
),
UDP_DISCOVERY_PORT_ID: shared_utils.new_port_spec(
DISCOVERY_UDP_PORT_NUM, shared_utils.UDP_PROTOCOL
),
RPC_PORT_ID: shared_utils.new_port_spec(RPC_PORT_NUM, shared_utils.TCP_PROTOCOL),
HTTP_PORT_ID: shared_utils.new_port_spec(HTTP_PORT_NUM, shared_utils.TCP_PROTOCOL),
BEACON_MONITORING_PORT_ID: shared_utils.new_port_spec(
BEACON_MONITORING_PORT_NUM, shared_utils.TCP_PROTOCOL
),
}
VALIDATOR_NODE_USED_PORTS = {
VALIDATOR_MONITORING_PORT_ID: shared_utils.new_port_spec(
VALIDATOR_MONITORING_PORT_NUM, shared_utils.TCP_PROTOCOL
),
}
PRYSM_LOG_LEVELS = {
package_io.GLOBAL_CLIENT_LOG_LEVEL.error: "error",
package_io.GLOBAL_CLIENT_LOG_LEVEL.warn: "warn",
package_io.GLOBAL_CLIENT_LOG_LEVEL.info: "info",
package_io.GLOBAL_CLIENT_LOG_LEVEL.debug: "debug",
package_io.GLOBAL_CLIENT_LOG_LEVEL.trace: "trace",
}
def launch(
plan,
launcher,
service_name,
images,
participant_log_level,
global_log_level,
bootnode_contexts,
el_client_context,
node_keystore_files,
bn_min_cpu,
bn_max_cpu,
bn_min_mem,
bn_max_mem,
v_min_cpu,
v_max_cpu,
v_min_mem,
v_max_mem,
snooper_enabled,
snooper_engine_context,
extra_beacon_params,
extra_validator_params,
):
split_images = images.split(IMAGE_SEPARATOR_DELIMITER)
if len(split_images) != EXPECTED_NUM_IMAGES:
fail(
"Expected {0} images but got {1}".format(
EXPECTED_NUM_IMAGES, len(split_images)
)
)
beacon_image, validator_image = split_images
if beacon_image.strip() == "":
fail("An empty beacon image was provided")
if validator_image.strip() == "":
fail("An empty validator image was provided")
beacon_node_service_name = "{0}".format(service_name)
validator_node_service_name = "{0}-{1}".format(
service_name, VALIDATOR_SUFFIX_SERVICE_NAME
)
log_level = input_parser.get_client_log_level_or_default(
participant_log_level, global_log_level, PRYSM_LOG_LEVELS
)
bn_min_cpu = int(bn_min_cpu) if int(bn_min_cpu) > 0 else BEACON_MIN_CPU
bn_max_cpu = int(bn_max_cpu) if int(bn_max_cpu) > 0 else BEACON_MAX_CPU
bn_min_mem = int(bn_min_mem) if int(bn_min_mem) > 0 else BEACON_MIN_MEMORY
bn_max_mem = int(bn_max_mem) if int(bn_max_mem) > 0 else BEACON_MAX_MEMORY
beacon_config = get_beacon_config(
launcher.genesis_data,
beacon_image,
bootnode_contexts,
el_client_context,
log_level,
bn_min_cpu,
bn_max_cpu,
bn_min_mem,
bn_max_mem,
snooper_enabled,
snooper_engine_context,
extra_beacon_params,
)
beacon_service = plan.add_service(beacon_node_service_name, beacon_config)
beacon_http_port = beacon_service.ports[HTTP_PORT_ID]
beacon_http_endpoint = "{0}:{1}".format(beacon_service.ip_address, HTTP_PORT_NUM)
beacon_rpc_endpoint = "{0}:{1}".format(beacon_service.ip_address, RPC_PORT_NUM)
# Launch validator node if we have a keystore file
validator_service = None
if node_keystore_files != None:
v_min_cpu = int(v_min_cpu) if int(v_min_cpu) > 0 else VALIDATOR_MIN_CPU
v_max_cpu = int(v_max_cpu) if int(v_max_cpu) > 0 else VALIDATOR_MAX_CPU
v_min_mem = int(v_min_mem) if int(v_min_mem) > 0 else VALIDATOR_MIN_MEMORY
v_max_mem = int(v_max_mem) if int(v_max_mem) > 0 else VALIDATOR_MAX_MEMORY
validator_config = get_validator_config(
launcher.genesis_data,
validator_image,
validator_node_service_name,
log_level,
beacon_rpc_endpoint,
beacon_http_endpoint,
node_keystore_files,
v_min_cpu,
v_max_cpu,
v_min_mem,
v_max_mem,
extra_validator_params,
launcher.prysm_password_relative_filepath,
launcher.prysm_password_artifact_uuid,
)
validator_service = plan.add_service(
validator_node_service_name, validator_config
)
# TODO(old) add validator availability using the validator API: https://ethereum.github.io/beacon-APIs/?urls.primaryName=v1#/ValidatorRequiredApi | from eth2-merge-kurtosis-module
beacon_node_identity_recipe = GetHttpRequestRecipe(
endpoint="/eth/v1/node/identity",
port_id=HTTP_PORT_ID,
extract={
"enr": ".data.enr",
"multiaddr": ".data.discovery_addresses[0]",
"peer_id": ".data.peer_id",
},
)
response = plan.request(
recipe=beacon_node_identity_recipe, service_name=beacon_node_service_name
)
beacon_node_enr = response["extract.enr"]
beacon_multiaddr = response["extract.multiaddr"]
beacon_peer_id = response["extract.peer_id"]
beacon_metrics_port = beacon_service.ports[BEACON_MONITORING_PORT_ID]
beacon_metrics_url = "{0}:{1}".format(
beacon_service.ip_address, beacon_metrics_port.number
)
beacon_node_metrics_info = node_metrics.new_node_metrics_info(
beacon_node_service_name, METRICS_PATH, beacon_metrics_url
)
nodes_metrics_info = [beacon_node_metrics_info]
if validator_service:
validator_metrics_port = validator_service.ports[VALIDATOR_MONITORING_PORT_ID]
validator_metrics_url = "{0}:{1}".format(
validator_service.ip_address, validator_metrics_port.number
)
validator_node_metrics_info = node_metrics.new_node_metrics_info(
validator_node_service_name, METRICS_PATH, validator_metrics_url
)
nodes_metrics_info.append(validator_node_metrics_info)
return cl_client_context.new_cl_client_context(
"prysm",
beacon_node_enr,
beacon_service.ip_address,
HTTP_PORT_NUM,
nodes_metrics_info,
beacon_node_service_name,
validator_node_service_name,
beacon_multiaddr,
beacon_peer_id,
snooper_enabled,
snooper_engine_context,
)
def get_beacon_config(
genesis_data,
beacon_image,
bootnode_contexts,
el_client_context,
log_level,
bn_min_cpu,
bn_max_cpu,
bn_min_mem,
bn_max_mem,
snooper_enabled,
snooper_engine_context,
extra_params,
):
# If snooper is enabled use the snooper engine context, otherwise use the execution client context
if snooper_enabled:
EXECUTION_ENGINE_ENDPOINT = "http://{0}:{1}".format(
snooper_engine_context.ip_addr,
snooper_engine_context.engine_rpc_port_num,
)
else:
EXECUTION_ENGINE_ENDPOINT = "http://{0}:{1}".format(
el_client_context.ip_addr,
el_client_context.engine_rpc_port_num,
)
genesis_config_filepath = shared_utils.path_join(
GENESIS_DATA_MOUNT_DIRPATH_ON_SERVICE_CONTAINER,
genesis_data.config_yml_rel_filepath,
)
genesis_ssz_filepath = shared_utils.path_join(
GENESIS_DATA_MOUNT_DIRPATH_ON_SERVICE_CONTAINER,
genesis_data.genesis_ssz_rel_filepath,
)
jwt_secret_filepath = shared_utils.path_join(
GENESIS_DATA_MOUNT_DIRPATH_ON_SERVICE_CONTAINER,
genesis_data.jwt_secret_rel_filepath,
)
cmd = [
"--accept-terms-of-use=true", # it's mandatory in order to run the node
"--datadir=" + CONSENSUS_DATA_DIRPATH_ON_SERVICE_CONTAINER,
"--chain-config-file=" + genesis_config_filepath,
"--genesis-state=" + genesis_ssz_filepath,
"--execution-endpoint=" + EXECUTION_ENGINE_ENDPOINT,
"--rpc-host=0.0.0.0",
"--rpc-port={0}".format(RPC_PORT_NUM),
"--grpc-gateway-host=0.0.0.0",
"--grpc-gateway-corsdomain=*",
"--grpc-gateway-port={0}".format(HTTP_PORT_NUM),
"--p2p-host-ip=" + PRIVATE_IP_ADDRESS_PLACEHOLDER,
"--p2p-tcp-port={0}".format(DISCOVERY_TCP_PORT_NUM),
"--p2p-udp-port={0}".format(DISCOVERY_UDP_PORT_NUM),
"--min-sync-peers={0}".format(MIN_PEERS),
"--verbosity=" + log_level,
"--slots-per-archive-point={0}".format(32 if package_io.ARCHIVE_MODE else 8192),
"--suggested-fee-recipient=" + package_io.VALIDATING_REWARDS_ACCOUNT,
# Set per Pari's recommendation to reduce noise
"--subscribe-all-subnets=true",
"--jwt-secret={0}".format(jwt_secret_filepath),
# vvvvvvvvv METRICS CONFIG vvvvvvvvvvvvvvvvvvvvv
"--disable-monitoring=false",
"--monitoring-host=0.0.0.0",
"--monitoring-port={0}".format(BEACON_MONITORING_PORT_NUM)
# ^^^^^^^^^^^^^^^^^^^ METRICS CONFIG ^^^^^^^^^^^^^^^^^^^^^
]
if bootnode_contexts != None:
for ctx in bootnode_contexts[: package_io.MAX_ENR_ENTRIES]:
cmd.append("--peer=" + ctx.multiaddr)
cmd.append("--bootstrap-node=" + ctx.enr)
cmd.append("--p2p-static-id=true")
if len(extra_params) > 0:
# we do the for loop as otherwise its a proto repeated array
cmd.extend([param for param in extra_params])
return ServiceConfig(
image=beacon_image,
ports=BEACON_NODE_USED_PORTS,
cmd=cmd,
files={
GENESIS_DATA_MOUNT_DIRPATH_ON_SERVICE_CONTAINER: genesis_data.files_artifact_uuid,
},
private_ip_address_placeholder=PRIVATE_IP_ADDRESS_PLACEHOLDER,
ready_conditions=cl_node_ready_conditions.get_ready_conditions(HTTP_PORT_ID),
min_cpu=bn_min_cpu,
max_cpu=bn_max_cpu,
min_memory=bn_min_mem,
max_memory=bn_max_mem,
)
def get_validator_config(
genesis_data,
validator_image,
service_name,
log_level,
beacon_rpc_endpoint,
beacon_http_endpoint,
node_keystore_files,
v_min_cpu,
v_max_cpu,
v_min_mem,
v_max_mem,
extra_params,
prysm_password_relative_filepath,
prysm_password_artifact_uuid,
):
consensus_data_dirpath = shared_utils.path_join(
CONSENSUS_DATA_DIRPATH_ON_SERVICE_CONTAINER, service_name
)
genesis_config_filepath = shared_utils.path_join(
GENESIS_DATA_MOUNT_DIRPATH_ON_SERVICE_CONTAINER,
genesis_data.config_yml_rel_filepath,
)
validator_keys_dirpath = shared_utils.path_join(
VALIDATOR_KEYS_MOUNT_DIRPATH_ON_SERVICE_CONTAINER,
node_keystore_files.prysm_relative_dirpath,
)
validator_secrets_dirpath = shared_utils.path_join(
PRYSM_PASSWORD_MOUNT_DIRPATH_ON_SERVICE_CONTAINER,
prysm_password_relative_filepath,
)
cmd = [
"--accept-terms-of-use=true", # it's mandatory in order to run the node
"--chain-config-file=" + genesis_config_filepath,
"--beacon-rpc-gateway-provider=" + beacon_http_endpoint,
"--beacon-rpc-provider=" + beacon_rpc_endpoint,
"--wallet-dir=" + validator_keys_dirpath,
"--wallet-password-file=" + validator_secrets_dirpath,
"--datadir=" + consensus_data_dirpath,
"--monitoring-port={0}".format(VALIDATOR_MONITORING_PORT_NUM),
"--verbosity=" + log_level,
"--suggested-fee-recipient=" + package_io.VALIDATING_REWARDS_ACCOUNT,
# TODO(old) SOMETHING ABOUT JWT
# vvvvvvvvvvvvvvvvvvv METRICS CONFIG vvvvvvvvvvvvvvvvvvvvv
"--disable-monitoring=false",
"--monitoring-host=0.0.0.0",
"--monitoring-port={0}".format(VALIDATOR_MONITORING_PORT_NUM)
# ^^^^^^^^^^^^^^^^^^^ METRICS CONFIG ^^^^^^^^^^^^^^^^^^^^^
]
if len(extra_params) > 0:
# we do the for loop as otherwise its a proto repeated array
cmd.extend([param for param in extra_params])
return ServiceConfig(
image=validator_image,
ports=VALIDATOR_NODE_USED_PORTS,
cmd=cmd,
files={
GENESIS_DATA_MOUNT_DIRPATH_ON_SERVICE_CONTAINER: genesis_data.files_artifact_uuid,
VALIDATOR_KEYS_MOUNT_DIRPATH_ON_SERVICE_CONTAINER: node_keystore_files.files_artifact_uuid,
PRYSM_PASSWORD_MOUNT_DIRPATH_ON_SERVICE_CONTAINER: prysm_password_artifact_uuid,
},
private_ip_address_placeholder=PRIVATE_IP_ADDRESS_PLACEHOLDER,
min_cpu=v_min_cpu,
max_cpu=v_max_cpu,
min_memory=v_min_mem,
max_memory=v_max_mem,
)
def new_prysm_launcher(
genesis_data, prysm_password_relative_filepath, prysm_password_artifact_uuid
):
return struct(
genesis_data=genesis_data,
prysm_password_artifact_uuid=prysm_password_artifact_uuid,
prysm_password_relative_filepath=prysm_password_relative_filepath,
)
shared_utils = import_module(
"github.com/kurtosis-tech/eth2-package/src/shared_utils/shared_utils.star"
)
input_parser = import_module(
"github.com/kurtosis-tech/eth2-package/src/package_io/parse_input.star"
)
cl_client_context = import_module(
"github.com/kurtosis-tech/eth2-package/src/cl/cl_client_context.star"
)
node_metrics = import_module(
"github.com/kurtosis-tech/eth2-package/src/node_metrics_info.star"
)
cl_node_ready_conditions = import_module(
"github.com/kurtosis-tech/eth2-package/src/cl/cl_node_ready_conditions.star"
)
package_io = import_module(
"github.com/kurtosis-tech/eth2-package/src/package_io/constants.star"
)
TEKU_BINARY_FILEPATH_IN_IMAGE = "/opt/teku/bin/teku"
GENESIS_DATA_MOUNT_DIRPATH_ON_SERVICE_CONTAINER = "/genesis"
# The Docker container runs as the "teku" user so we can't write to root
CONSENSUS_DATA_DIRPATH_ON_SERVICE_CONTAINER = "/opt/teku/consensus-data"
# These will get mounted as root and Teku needs directory write permissions, so we'll copy this
# into the Teku user's home directory to get around it
VALIDATOR_KEYS_DIRPATH_ON_SERVICE_CONTAINER = "/validator-keys"
# Port IDs
TCP_DISCOVERY_PORT_ID = "tcp-discovery"
UDP_DISCOVERY_PORT_ID = "udp-discovery"
HTTP_PORT_ID = "http"
METRICS_PORT_ID = "metrics"
# Port nums
DISCOVERY_PORT_NUM = 9000
HTTP_PORT_NUM = 4000
METRICS_PORT_NUM = 8008
# The min/max CPU/memory that the beacon node can use
BEACON_MIN_CPU = 50
BEACON_MAX_CPU = 1000
BEACON_MIN_MEMORY = 512
BEACON_MAX_MEMORY = 1024
# 1) The Teku container runs as the "teku" user
# 2) Teku requires write access to the validator secrets directory, so it can write a lockfile into it as it uses the keys
# 3) The module container runs as 'root'
# With these three things combined, it means that when the module container tries to write the validator keys/secrets into
# the shared directory, it does so as 'root'. When Teku tries to consum the same files, it will get a failure because it
# doesn't have permission to write to the 'validator-secrets' directory.
# To get around this, we copy the files AGAIN from
DEST_VALIDATOR_KEYS_DIRPATH_IN_SERVICE_CONTAINER = "$HOME/validator-keys"
DEST_VALIDATOR_SECRETS_DIRPATH_IN_SERVICE_CONTAINER = "$HOME/validator-secrets"
MIN_PEERS = 1
METRICS_PATH = "/metrics"
PRIVATE_IP_ADDRESS_PLACEHOLDER = "KURTOSIS_IP_ADDR_PLACEHOLDER"
USED_PORTS = {
TCP_DISCOVERY_PORT_ID: shared_utils.new_port_spec(
DISCOVERY_PORT_NUM, shared_utils.TCP_PROTOCOL
),
UDP_DISCOVERY_PORT_ID: shared_utils.new_port_spec(
DISCOVERY_PORT_NUM, shared_utils.UDP_PROTOCOL
),
HTTP_PORT_ID: shared_utils.new_port_spec(HTTP_PORT_NUM, shared_utils.TCP_PROTOCOL),
METRICS_PORT_ID: shared_utils.new_port_spec(
METRICS_PORT_NUM, shared_utils.TCP_PROTOCOL
),
}
ENTRYPOINT_ARGS = ["sh", "-c"]
TEKU_LOG_LEVELS = {
package_io.GLOBAL_CLIENT_LOG_LEVEL.error: "ERROR",
package_io.GLOBAL_CLIENT_LOG_LEVEL.warn: "WARN",
package_io.GLOBAL_CLIENT_LOG_LEVEL.info: "INFO",
package_io.GLOBAL_CLIENT_LOG_LEVEL.debug: "DEBUG",
package_io.GLOBAL_CLIENT_LOG_LEVEL.trace: "TRACE",
}
def launch(
plan,
launcher,
service_name,
image,
participant_log_level,
global_log_level,
bootnode_context,
el_client_context,
node_keystore_files,
bn_min_cpu,
bn_max_cpu,
bn_min_mem,
bn_max_mem,
v_min_cpu,
v_max_cpu,
v_min_mem,
v_max_mem,
snooper_enabled,
snooper_engine_context,
extra_beacon_params,
extra_validator_params,
):
log_level = input_parser.get_client_log_level_or_default(
participant_log_level, global_log_level, TEKU_LOG_LEVELS
)
extra_params = [param for param in extra_beacon_params] + [
param for param in extra_validator_params
]
bn_min_cpu = int(bn_min_cpu) if int(bn_min_cpu) > 0 else BEACON_MIN_CPU
bn_max_cpu = int(bn_max_cpu) if int(bn_max_cpu) > 0 else BEACON_MAX_CPU
bn_min_mem = int(bn_min_mem) if int(bn_min_mem) > 0 else BEACON_MIN_MEMORY
bn_max_mem = int(bn_max_mem) if int(bn_max_mem) > 0 else BEACON_MAX_MEMORY
# Set the min/max CPU/memory for the beacon node to be the max of the beacon node and validator node values, unless this is defined, it will use the default beacon values
bn_min_cpu = int(v_min_cpu) if (int(v_min_cpu) > bn_min_cpu) else bn_min_cpu
bn_max_cpu = int(v_max_cpu) if (int(v_max_cpu) > bn_max_cpu) else bn_max_cpu
bn_min_mem = int(v_min_mem) if (int(v_min_mem) > bn_min_mem) else bn_min_mem
bn_max_mem = int(v_max_mem) if (int(v_max_mem) > bn_max_mem) else bn_max_mem
config = get_config(
launcher.cl_genesis_data,
image,
bootnode_context,
el_client_context,
log_level,
node_keystore_files,
bn_min_cpu,
bn_max_cpu,
bn_min_mem,
bn_max_mem,
snooper_enabled,
snooper_engine_context,
extra_params,
)
teku_service = plan.add_service(service_name, config)
node_identity_recipe = GetHttpRequestRecipe(
endpoint="/eth/v1/node/identity",
port_id=HTTP_PORT_ID,
extract={
"enr": ".data.enr",
"multiaddr": ".data.discovery_addresses[0]",
"peer_id": ".data.peer_id",
},
)
response = plan.request(recipe=node_identity_recipe, service_name=service_name)
node_enr = response["extract.enr"]
multiaddr = response["extract.multiaddr"]
peer_id = response["extract.peer_id"]
teku_metrics_port = teku_service.ports[METRICS_PORT_ID]
teku_metrics_url = "{0}:{1}".format(
teku_service.ip_address, teku_metrics_port.number
)
teku_node_metrics_info = node_metrics.new_node_metrics_info(
service_name, METRICS_PATH, teku_metrics_url
)
nodes_metrics_info = [teku_node_metrics_info]
return cl_client_context.new_cl_client_context(
"teku",
node_enr,
teku_service.ip_address,
HTTP_PORT_NUM,
nodes_metrics_info,
service_name,
multiaddr=multiaddr,
peer_id=peer_id,
snooper_enabled=snooper_enabled,
snooper_engine_context=snooper_engine_context,
)
def get_config(
genesis_data,
image,
bootnode_contexts,
el_client_context,
log_level,
node_keystore_files,
bn_min_cpu,
bn_max_cpu,
bn_min_mem,
bn_max_mem,
snooper_enabled,
snooper_engine_context,
extra_params,
):
# If snooper is enabled use the snooper engine context, otherwise use the execution client context
if snooper_enabled:
EXECUTION_ENGINE_ENDPOINT = "http://{0}:{1}".format(
snooper_engine_context.ip_addr,
snooper_engine_context.engine_rpc_port_num,
)
else:
EXECUTION_ENGINE_ENDPOINT = "http://{0}:{1}".format(
el_client_context.ip_addr,
el_client_context.engine_rpc_port_num,
)
genesis_config_filepath = shared_utils.path_join(
GENESIS_DATA_MOUNT_DIRPATH_ON_SERVICE_CONTAINER,
genesis_data.config_yml_rel_filepath,
)
genesis_ssz_filepath = shared_utils.path_join(
GENESIS_DATA_MOUNT_DIRPATH_ON_SERVICE_CONTAINER,
genesis_data.genesis_ssz_rel_filepath,
)
jwt_secret_filepath = shared_utils.path_join(
GENESIS_DATA_MOUNT_DIRPATH_ON_SERVICE_CONTAINER,
genesis_data.jwt_secret_rel_filepath,
)
validator_keys_dirpath = ""
validator_secrets_dirpath = ""
if node_keystore_files:
validator_keys_dirpath = shared_utils.path_join(
VALIDATOR_KEYS_DIRPATH_ON_SERVICE_CONTAINER,
node_keystore_files.teku_keys_relative_dirpath,
)
validator_secrets_dirpath = shared_utils.path_join(
VALIDATOR_KEYS_DIRPATH_ON_SERVICE_CONTAINER,
node_keystore_files.teku_secrets_relative_dirpath,
)
validator_copy = [
# Needed because the generated keys are owned by root and the Teku image runs as the 'teku' user
"cp",
"-R",
validator_keys_dirpath,
DEST_VALIDATOR_KEYS_DIRPATH_IN_SERVICE_CONTAINER,
"&&",
# Needed because the generated keys are owned by root and the Teku image runs as the 'teku' user
"cp",
"-R",
validator_secrets_dirpath,
DEST_VALIDATOR_SECRETS_DIRPATH_IN_SERVICE_CONTAINER,
"&&",
]
validator_flags = [
"--validator-keys={0}:{1}".format(
DEST_VALIDATOR_KEYS_DIRPATH_IN_SERVICE_CONTAINER,
DEST_VALIDATOR_SECRETS_DIRPATH_IN_SERVICE_CONTAINER,
),
"--validators-proposer-default-fee-recipient="
+ package_io.VALIDATING_REWARDS_ACCOUNT,
]
beacon_start = [
TEKU_BINARY_FILEPATH_IN_IMAGE,
"--logging=" + log_level,
"--log-destination=CONSOLE",
"--network=" + genesis_config_filepath,
"--initial-state=" + genesis_ssz_filepath,
"--data-path=" + CONSENSUS_DATA_DIRPATH_ON_SERVICE_CONTAINER,
"--data-storage-mode={0}".format(
"ARCHIVE" if package_io.ARCHIVE_MODE else "PRUNE"
),
"--p2p-enabled=true",
# Set per Pari's recommendation, to reduce noise in the logs
"--p2p-subscribe-all-subnets-enabled=true",
"--p2p-peer-lower-bound={0}".format(MIN_PEERS),
"--p2p-advertised-ip=" + PRIVATE_IP_ADDRESS_PLACEHOLDER,
"--p2p-discovery-site-local-addresses-enabled",
"--rest-api-enabled=true",
"--rest-api-docs-enabled=true",
"--rest-api-interface=0.0.0.0",
"--rest-api-port={0}".format(HTTP_PORT_NUM),
"--rest-api-host-allowlist=*",
"--data-storage-non-canonical-blocks-enabled=true",
"--ee-jwt-secret-file={0}".format(jwt_secret_filepath),
"--ee-endpoint=" + EXECUTION_ENGINE_ENDPOINT,
# vvvvvvvvvvvvvvvvvvv METRICS CONFIG vvvvvvvvvvvvvvvvvvvvv
"--metrics-enabled",
"--metrics-interface=0.0.0.0",
"--metrics-host-allowlist='*'",
"--metrics-categories=BEACON,PROCESS,LIBP2P,JVM,NETWORK,PROCESS",
"--metrics-port={0}".format(METRICS_PORT_NUM),
# ^^^^^^^^^^^^^^^^^^^ METRICS CONFIG ^^^^^^^^^^^^^^^^^^^^^
]
# Depending on whether we're using a node keystore, we'll need to add the validator flags
cmd = []
if node_keystore_files != None:
cmd.extend(validator_copy)
cmd.extend(beacon_start)
cmd.extend(validator_flags)
else:
cmd.extend(beacon_start)
if bootnode_contexts != None:
cmd.append(
"--p2p-discovery-bootnodes="
+ ",".join(
[ctx.enr for ctx in bootnode_contexts[: package_io.MAX_ENR_ENTRIES]]
)
)
cmd.append(
"--p2p-static-peers="
+ ",".join(
[
ctx.multiaddr
for ctx in bootnode_contexts[: package_io.MAX_ENR_ENTRIES]
]
)
)
if len(extra_params) > 0:
# we do the list comprehension as the default extra_params is a proto repeated string
cmd.extend([param for param in extra_params])
files = {
GENESIS_DATA_MOUNT_DIRPATH_ON_SERVICE_CONTAINER: genesis_data.files_artifact_uuid,
}
if node_keystore_files:
files[
VALIDATOR_KEYS_DIRPATH_ON_SERVICE_CONTAINER
] = node_keystore_files.files_artifact_uuid
cmd_str = " ".join(cmd)
return ServiceConfig(
image=image,
ports=USED_PORTS,
cmd=[cmd_str],
entrypoint=ENTRYPOINT_ARGS,
files=files,
private_ip_address_placeholder=PRIVATE_IP_ADDRESS_PLACEHOLDER,
ready_conditions=cl_node_ready_conditions.get_ready_conditions(HTTP_PORT_ID),
min_cpu=bn_min_cpu,
max_cpu=bn_max_cpu,
min_memory=bn_min_mem,
max_memory=bn_max_mem,
)
def new_teku_launcher(cl_genesis_data):
return struct(cl_genesis_data=cl_genesis_data)
shared_utils = import_module(
"github.com/kurtosis-tech/eth2-package/src/shared_utils/shared_utils.star"
)
input_parser = import_module(
"github.com/kurtosis-tech/eth2-package/src/package_io/parse_input.star"
)
el_client_context = import_module(
"github.com/kurtosis-tech/eth2-package/src/el/el_client_context.star"
)
el_admin_node_info = import_module(
"github.com/kurtosis-tech/eth2-package/src/el/el_admin_node_info.star"
)
node_metrics = import_module(
"github.com/kurtosis-tech/eth2-package/src/node_metrics_info.star"
)
package_io = import_module(
"github.com/kurtosis-tech/eth2-package/src/package_io/constants.star"
)
# The dirpath of the execution data directory on the client container
EXECUTION_DATA_DIRPATH_ON_CLIENT_CONTAINER = "/opt/besu/execution-data"
KZG_DATA_DIRPATH_ON_CLIENT_CONTAINER = "/opt/besu/genesis/output/trusted_setup.txt"
GENESIS_DATA_DIRPATH_ON_CLIENT_CONTAINER = "/opt/besu/genesis"
METRICS_PATH = "/metrics"
RPC_PORT_NUM = 8545
WS_PORT_NUM = 8546
DISCOVERY_PORT_NUM = 30303
ENGINE_HTTP_RPC_PORT_NUM = 8551
METRICS_PORT_NUM = 9001
# The min/max CPU/memory that the execution node can use
EXECUTION_MIN_CPU = 100
EXECUTION_MAX_CPU = 1000
EXECUTION_MIN_MEMORY = 512
EXECUTION_MAX_MEMORY = 2048
# Port IDs
RPC_PORT_ID = "rpc"
WS_PORT_ID = "ws"
TCP_DISCOVERY_PORT_ID = "tcp-discovery"
UDP_DISCOVERY_PORT_ID = "udp-discovery"
ENGINE_HTTP_RPC_PORT_ID = "engine-rpc"
METRICS_PORT_ID = "metrics"
PRIVATE_IP_ADDRESS_PLACEHOLDER = "KURTOSIS_IP_ADDR_PLACEHOLDER"
USED_PORTS = {
RPC_PORT_ID: shared_utils.new_port_spec(RPC_PORT_NUM, shared_utils.TCP_PROTOCOL),
WS_PORT_ID: shared_utils.new_port_spec(WS_PORT_NUM, shared_utils.TCP_PROTOCOL),
TCP_DISCOVERY_PORT_ID: shared_utils.new_port_spec(
DISCOVERY_PORT_NUM, shared_utils.TCP_PROTOCOL
),
UDP_DISCOVERY_PORT_ID: shared_utils.new_port_spec(
DISCOVERY_PORT_NUM, shared_utils.UDP_PROTOCOL
),
ENGINE_HTTP_RPC_PORT_ID: shared_utils.new_port_spec(
ENGINE_HTTP_RPC_PORT_NUM, shared_utils.TCP_PROTOCOL
),
METRICS_PORT_ID: shared_utils.new_port_spec(
METRICS_PORT_NUM, shared_utils.TCP_PROTOCOL
),
}
ENTRYPOINT_ARGS = ["sh", "-c"]
BESU_LOG_LEVELS = {
package_io.GLOBAL_CLIENT_LOG_LEVEL.error: "ERROR",
package_io.GLOBAL_CLIENT_LOG_LEVEL.warn: "WARN",
package_io.GLOBAL_CLIENT_LOG_LEVEL.info: "INFO",
package_io.GLOBAL_CLIENT_LOG_LEVEL.debug: "DEBUG",
package_io.GLOBAL_CLIENT_LOG_LEVEL.trace: "TRACE",
}
def launch(
plan,
launcher,
service_name,
image,
participant_log_level,
global_log_level,
existing_el_clients,
el_min_cpu,
el_max_cpu,
el_min_mem,
el_max_mem,
extra_params,
extra_env_vars,
):
log_level = input_parser.get_client_log_level_or_default(
participant_log_level, global_log_level, BESU_LOG_LEVELS
)
el_min_cpu = int(el_min_cpu) if int(el_min_cpu) > 0 else EXECUTION_MIN_CPU
el_max_cpu = int(el_max_cpu) if int(el_max_cpu) > 0 else EXECUTION_MAX_CPU
el_min_mem = int(el_min_mem) if int(el_min_mem) > 0 else EXECUTION_MIN_MEMORY
el_max_mem = int(el_max_mem) if int(el_max_mem) > 0 else EXECUTION_MAX_MEMORY
config, jwt_secret_json_filepath_on_client = get_config(
launcher.network_id,
launcher.el_genesis_data,
image,
existing_el_clients,
log_level,
el_min_cpu,
el_max_cpu,
el_min_mem,
el_max_mem,
extra_params,
extra_env_vars,
)
service = plan.add_service(service_name, config)
enode = el_admin_node_info.get_enode_for_node(plan, service_name, RPC_PORT_ID)
jwt_secret = shared_utils.read_file_from_service(
plan, service_name, jwt_secret_json_filepath_on_client
)
metrics_url = "{0}:{1}".format(service.ip_address, METRICS_PORT_NUM)
besu_metrics_info = node_metrics.new_node_metrics_info(
service_name, METRICS_PATH, metrics_url
)
return el_client_context.new_el_client_context(
"besu",
"", # besu has no ENR
enode,
service.ip_address,
RPC_PORT_NUM,
WS_PORT_NUM,
ENGINE_HTTP_RPC_PORT_NUM,
jwt_secret,
service_name,
[besu_metrics_info],
)
def get_config(
network_id,
genesis_data,
image,
existing_el_clients,
log_level,
el_min_cpu,
el_max_cpu,
el_min_mem,
el_max_mem,
extra_params,
extra_env_vars,
):
genesis_json_filepath_on_client = shared_utils.path_join(
GENESIS_DATA_DIRPATH_ON_CLIENT_CONTAINER,
genesis_data.besu_genesis_json_relative_filepath,
)
jwt_secret_json_filepath_on_client = shared_utils.path_join(
GENESIS_DATA_DIRPATH_ON_CLIENT_CONTAINER,
genesis_data.jwt_secret_relative_filepath,
)
cmd = [
"besu",
"--logging=" + log_level,
"--data-path=" + EXECUTION_DATA_DIRPATH_ON_CLIENT_CONTAINER,
"--genesis-file=" + genesis_json_filepath_on_client,
"--network-id=" + network_id,
"--host-allowlist=*",
"--rpc-http-enabled=true",
"--rpc-http-host=0.0.0.0",
"--rpc-http-port={0}".format(RPC_PORT_NUM),
"--rpc-http-api=ADMIN,CLIQUE,ETH,NET,DEBUG,TXPOOL,ENGINE,TRACE,WEB3",
"--rpc-http-cors-origins=*",
"--rpc-ws-enabled=true",
"--rpc-ws-host=0.0.0.0",
"--rpc-ws-port={0}".format(WS_PORT_NUM),
"--rpc-ws-api=ADMIN,CLIQUE,ETH,NET,DEBUG,TXPOOL,ENGINE,TRACE,WEB3",
"--p2p-enabled=true",
"--p2p-host=" + PRIVATE_IP_ADDRESS_PLACEHOLDER,
"--p2p-port={0}".format(DISCOVERY_PORT_NUM),
"--engine-rpc-enabled=true",
"--engine-jwt-secret={0}".format(jwt_secret_json_filepath_on_client),
"--engine-host-allowlist=*",
"--engine-rpc-port={0}".format(ENGINE_HTTP_RPC_PORT_NUM),
"--sync-mode=FULL",
"--data-storage-format=BONSAI",
"--kzg-trusted-setup=" + KZG_DATA_DIRPATH_ON_CLIENT_CONTAINER,
"--metrics-enabled=true",
"--metrics-host=0.0.0.0",
"--metrics-port={0}".format(METRICS_PORT_NUM),
]
if len(existing_el_clients) > 0:
cmd.append(
"--bootnodes="
+ ",".join(
[
ctx.enode
for ctx in existing_el_clients[: package_io.MAX_ENODE_ENTRIES]
]
)
)
if len(extra_params) > 0:
# we do this as extra_params isn't a normal [] but a proto repeated array
cmd.extend([param for param in extra_params])
cmd_str = " ".join(cmd)
return (
ServiceConfig(
image=image,
ports=USED_PORTS,
cmd=[cmd_str],
files={
GENESIS_DATA_DIRPATH_ON_CLIENT_CONTAINER: genesis_data.files_artifact_uuid
},
entrypoint=ENTRYPOINT_ARGS,
private_ip_address_placeholder=PRIVATE_IP_ADDRESS_PLACEHOLDER,
min_cpu=el_min_cpu,
max_cpu=el_max_cpu,
min_memory=el_min_mem,
max_memory=el_max_mem,
env_vars=extra_env_vars,
),
jwt_secret_json_filepath_on_client,
)
def new_besu_launcher(network_id, el_genesis_data):
return struct(network_id=network_id, el_genesis_data=el_genesis_data)
def get_enode_enr_for_node(plan, service_name, port_id):
recipe = PostHttpRequestRecipe(
endpoint="",
body='{"method":"admin_nodeInfo","params":[],"id":1,"jsonrpc":"2.0"}',
content_type="application/json",
port_id=port_id,
extract={
"enode": """.result.enode | split("?") | .[0]""",
"enr": ".result.enr",
},
)
response = plan.wait(
recipe=recipe,
field="extract.enode",
assertion="!=",
target_value="",
timeout="15m",
service_name=service_name,
)
return (response["extract.enode"], response["extract.enr"])
def get_enode_for_node(plan, service_name, port_id):
recipe = PostHttpRequestRecipe(
endpoint="",
body='{"method":"admin_nodeInfo","params":[],"id":1,"jsonrpc":"2.0"}',
content_type="application/json",
port_id=port_id,
extract={
"enode": """.result.enode | split("?") | .[0]""",
},
)
response = plan.wait(
recipe=recipe,
field="extract.enode",
assertion="!=",
target_value="",
timeout="15m",
service_name=service_name,
)
return response["extract.enode"]
def new_el_client_context(
client_name,
enr,
enode,
ip_addr,
rpc_port_num,
ws_port_num,
engine_rpc_port_num,
jwt_secret,
service_name="",
el_metrics_info=None,
):
return struct(
service_name=service_name,
client_name=client_name,
enr=enr,
enode=enode,
ip_addr=ip_addr,
rpc_port_num=rpc_port_num,
ws_port_num=ws_port_num,
engine_rpc_port_num=engine_rpc_port_num,
jwt_secret=jwt_secret,
el_metrics_info=el_metrics_info,
)
shared_utils = import_module(
"github.com/kurtosis-tech/eth2-package/src/shared_utils/shared_utils.star"
)
input_parser = import_module(
"github.com/kurtosis-tech/eth2-package/src/package_io/parse_input.star"
)
el_admin_node_info = import_module(
"github.com/kurtosis-tech/eth2-package/src/el/el_admin_node_info.star"
)
el_client_context = import_module(
"github.com/kurtosis-tech/eth2-package/src/el/el_client_context.star"
)
node_metrics = import_module(
"github.com/kurtosis-tech/eth2-package/src/node_metrics_info.star"
)
package_io = import_module(
"github.com/kurtosis-tech/eth2-package/src/package_io/constants.star"
)
# The dirpath of the execution data directory on the client container
EXECUTION_DATA_DIRPATH_ON_CLIENT_CONTAINER = "/home/erigon/execution-data"
GENESIS_DATA_MOUNT_DIRPATH = "/genesis"
METRICS_PATH = "/metrics"
WS_RPC_PORT_NUM = 8545
DISCOVERY_PORT_NUM = 30303
ENGINE_RPC_PORT_NUM = 8551
METRICS_PORT_NUM = 9001
# The min/max CPU/memory that the execution node can use
EXECUTION_MIN_CPU = 100
EXECUTION_MAX_CPU = 1000
EXECUTION_MIN_MEMORY = 512
EXECUTION_MAX_MEMORY = 2048
# Port IDs
WS_RPC_PORT_ID = "ws-rpc"
TCP_DISCOVERY_PORT_ID = "tcp-discovery"
UDP_DISCOVERY_PORT_ID = "udp-discovery"
ENGINE_RPC_PORT_ID = "engine-rpc"
METRICS_PORT_ID = "metrics"
PRIVATE_IP_ADDRESS_PLACEHOLDER = "KURTOSIS_IP_ADDR_PLACEHOLDER"
USED_PORTS = {
WS_RPC_PORT_ID: shared_utils.new_port_spec(
WS_RPC_PORT_NUM, shared_utils.TCP_PROTOCOL
),
TCP_DISCOVERY_PORT_ID: shared_utils.new_port_spec(
DISCOVERY_PORT_NUM, shared_utils.TCP_PROTOCOL
),
UDP_DISCOVERY_PORT_ID: shared_utils.new_port_spec(
DISCOVERY_PORT_NUM, shared_utils.UDP_PROTOCOL
),
ENGINE_RPC_PORT_ID: shared_utils.new_port_spec(
ENGINE_RPC_PORT_NUM, shared_utils.TCP_PROTOCOL
),
METRICS_PORT_ID: shared_utils.new_port_spec(
METRICS_PORT_NUM, shared_utils.TCP_PROTOCOL
),
}
ENTRYPOINT_ARGS = ["sh", "-c"]
ERIGON_LOG_LEVELS = {
package_io.GLOBAL_CLIENT_LOG_LEVEL.error: "1",
package_io.GLOBAL_CLIENT_LOG_LEVEL.warn: "2",
package_io.GLOBAL_CLIENT_LOG_LEVEL.info: "3",
package_io.GLOBAL_CLIENT_LOG_LEVEL.debug: "4",
package_io.GLOBAL_CLIENT_LOG_LEVEL.trace: "5",
}
def launch(
plan,
launcher,
service_name,
image,
participant_log_level,
global_log_level,
existing_el_clients,
el_min_cpu,
el_max_cpu,
el_min_mem,
el_max_mem,
extra_params,
extra_env_vars,
):
log_level = input_parser.get_client_log_level_or_default(
participant_log_level, global_log_level, ERIGON_LOG_LEVELS
)
el_min_cpu = el_min_cpu if int(el_min_cpu) > 0 else EXECUTION_MIN_CPU
el_max_cpu = el_max_cpu if int(el_max_cpu) > 0 else EXECUTION_MAX_CPU
el_min_mem = el_min_mem if int(el_min_mem) > 0 else EXECUTION_MIN_MEMORY
el_max_mem = el_max_mem if int(el_max_mem) > 0 else EXECUTION_MAX_MEMORY
config, jwt_secret_json_filepath_on_client = get_config(
launcher.network_id,
launcher.el_genesis_data,
image,
existing_el_clients,
log_level,
el_min_cpu,
el_max_cpu,
el_min_mem,
el_max_mem,
extra_params,
extra_env_vars,
)
service = plan.add_service(service_name, config)
enode, enr = el_admin_node_info.get_enode_enr_for_node(
plan, service_name, WS_RPC_PORT_ID
)
jwt_secret = shared_utils.read_file_from_service(
plan, service_name, jwt_secret_json_filepath_on_client
)
metrics_url = "{0}:{1}".format(service.ip_address, METRICS_PORT_NUM)
erigon_metrics_info = node_metrics.new_node_metrics_info(
service_name, METRICS_PATH, metrics_url
)
return el_client_context.new_el_client_context(
"erigon",
enr,
enode,
service.ip_address,
WS_RPC_PORT_NUM,
WS_RPC_PORT_NUM,
ENGINE_RPC_PORT_NUM,
jwt_secret,
service_name,
[erigon_metrics_info],
)
def get_config(
network_id,
genesis_data,
image,
existing_el_clients,
verbosity_level,
el_min_cpu,
el_max_cpu,
el_min_mem,
el_max_mem,
extra_params,
extra_env_vars,
):
network_id = network_id
genesis_json_filepath_on_client = shared_utils.path_join(
GENESIS_DATA_MOUNT_DIRPATH, genesis_data.erigon_genesis_json_relative_filepath
)
jwt_secret_json_filepath_on_client = shared_utils.path_join(
GENESIS_DATA_MOUNT_DIRPATH, genesis_data.jwt_secret_relative_filepath
)
init_datadir_cmd_str = "erigon init --datadir={0} {1}".format(
EXECUTION_DATA_DIRPATH_ON_CLIENT_CONTAINER,
genesis_json_filepath_on_client,
)
# TODO remove this based on https://github.com/kurtosis-tech/eth2-merge-kurtosis-module/issues/152
if len(existing_el_clients) == 0:
fail("Erigon needs at least one node to exist, which it treats as the bootnode")
boot_node_1 = existing_el_clients[0]
cmd = [
"erigon",
"--log.console.verbosity=" + verbosity_level,
"--datadir=" + EXECUTION_DATA_DIRPATH_ON_CLIENT_CONTAINER,
"--port={0}".format(DISCOVERY_PORT_NUM),
"--networkid=" + network_id,
"--http.api=eth,erigon,engine,web3,net,debug,trace,txpool,admin",
"--http.vhosts=*",
"--ws",
"--allow-insecure-unlock",
"--nat=extip:" + PRIVATE_IP_ADDRESS_PLACEHOLDER,
"--http",
"--http.addr=0.0.0.0",
"--http.corsdomain=*",
"--http.port={0}".format(WS_RPC_PORT_NUM),
"--authrpc.jwtsecret={0}".format(jwt_secret_json_filepath_on_client),
"--authrpc.addr=0.0.0.0",
"--authrpc.port={0}".format(ENGINE_RPC_PORT_NUM),
"--authrpc.vhosts=*",
"--metrics",
"--metrics.addr=0.0.0.0",
"--metrics.port={0}".format(METRICS_PORT_NUM),
]
if len(existing_el_clients) > 0:
cmd.append(
"--bootnodes="
+ ",".join(
[
ctx.enode
for ctx in existing_el_clients[: package_io.MAX_ENODE_ENTRIES]
]
)
)
cmd.append(
"--staticpeers="
+ ",".join(
[
ctx.enode
for ctx in existing_el_clients[: package_io.MAX_ENODE_ENTRIES]
]
)
)
if len(extra_params) > 0:
# this is a repeated<proto type>, we convert it into Starlark
cmd.extend([param for param in extra_params])
command_arg = [init_datadir_cmd_str, " ".join(cmd)]
command_arg_str = " && ".join(command_arg)
return (
ServiceConfig(
image=image,
ports=USED_PORTS,
cmd=[command_arg_str],
files={GENESIS_DATA_MOUNT_DIRPATH: genesis_data.files_artifact_uuid},
entrypoint=ENTRYPOINT_ARGS,
private_ip_address_placeholder=PRIVATE_IP_ADDRESS_PLACEHOLDER,
min_cpu=el_min_cpu,
max_cpu=el_max_cpu,
min_memory=el_min_mem,
max_memory=el_max_mem,
env_vars=extra_env_vars,
),
jwt_secret_json_filepath_on_client,
)
def new_erigon_launcher(network_id, el_genesis_data):
return struct(
network_id=network_id,
el_genesis_data=el_genesis_data,
)
shared_utils = import_module(
"github.com/kurtosis-tech/eth2-package/src/shared_utils/shared_utils.star"
)
input_parser = import_module(
"github.com/kurtosis-tech/eth2-package/src/package_io/parse_input.star"
)
el_client_context = import_module(
"github.com/kurtosis-tech/eth2-package/src/el/el_client_context.star"
)
el_admin_node_info = import_module(
"github.com/kurtosis-tech/eth2-package/src/el/el_admin_node_info.star"
)
node_metrics = import_module(
"github.com/kurtosis-tech/eth2-package/src/node_metrics_info.star"
)
package_io = import_module(
"github.com/kurtosis-tech/eth2-package/src/package_io/constants.star"
)
RPC_PORT_NUM = 8545
WS_PORT_NUM = 8546
WS_PORT_ENGINE_NUM = 8547
DISCOVERY_PORT_NUM = 30303
ENGINE_RPC_PORT_NUM = 8551
METRICS_PORT_NUM = 9001
# The min/max CPU/memory that the execution node can use
EXECUTION_MIN_CPU = 100
EXECUTION_MAX_CPU = 1000
EXECUTION_MIN_MEMORY = 256
EXECUTION_MAX_MEMORY = 1024
# Port IDs
RPC_PORT_ID = "rpc"
WS_PORT_ID = "ws"
TCP_DISCOVERY_PORT_ID = "tcp-discovery"
UDP_DISCOVERY_PORT_ID = "udp-discovery"
ENGINE_RPC_PORT_ID = "engine-rpc"
WS_PORT_ENGINE_ID = "ws-engine"
METRICS_PORT_ID = "metrics"
GENESIS_DATA_MOUNT_DIRPATH = "/genesis"
PREFUNDED_KEYS_MOUNT_DIRPATH = "/prefunded-keys"
METRICS_PATH = "/metrics"
# The dirpath of the execution data directory on the client container
EXECUTION_DATA_DIRPATH_ON_CLIENT_CONTAINER = "/execution-data"
PRIVATE_IP_ADDRESS_PLACEHOLDER = "KURTOSIS_IP_ADDR_PLACEHOLDER"
USED_PORTS = {
RPC_PORT_ID: shared_utils.new_port_spec(RPC_PORT_NUM, shared_utils.TCP_PROTOCOL),
WS_PORT_ID: shared_utils.new_port_spec(WS_PORT_NUM, shared_utils.TCP_PROTOCOL),
WS_PORT_ENGINE_ID: shared_utils.new_port_spec(
WS_PORT_ENGINE_NUM, shared_utils.TCP_PROTOCOL
),
TCP_DISCOVERY_PORT_ID: shared_utils.new_port_spec(
DISCOVERY_PORT_NUM, shared_utils.TCP_PROTOCOL
),
UDP_DISCOVERY_PORT_ID: shared_utils.new_port_spec(
DISCOVERY_PORT_NUM, shared_utils.UDP_PROTOCOL
),
ENGINE_RPC_PORT_ID: shared_utils.new_port_spec(
ENGINE_RPC_PORT_NUM, shared_utils.TCP_PROTOCOL
),
# METRICS_PORT_ID: shared_utils.new_port_spec(METRICS_PORT_NUM, shared_utils.TCP_PROTOCOL)
}
ENTRYPOINT_ARGS = []
VERBOSITY_LEVELS = {
package_io.GLOBAL_CLIENT_LOG_LEVEL.error: "error",
package_io.GLOBAL_CLIENT_LOG_LEVEL.warn: "warn",
package_io.GLOBAL_CLIENT_LOG_LEVEL.info: "info",
package_io.GLOBAL_CLIENT_LOG_LEVEL.debug: "debug",
package_io.GLOBAL_CLIENT_LOG_LEVEL.trace: "trace",
}
def launch(
plan,
launcher,
service_name,
image,
participant_log_level,
global_log_level,
# If empty then the node will be launched as a bootnode
existing_el_clients,
el_min_cpu,
el_max_cpu,
el_min_mem,
el_max_mem,
extra_params,
extra_env_vars,
):
log_level = input_parser.get_client_log_level_or_default(
participant_log_level, global_log_level, VERBOSITY_LEVELS
)
el_min_cpu = el_min_cpu if int(el_min_cpu) > 0 else EXECUTION_MIN_CPU
el_max_cpu = el_max_cpu if int(el_max_cpu) > 0 else EXECUTION_MAX_CPU
el_min_mem = el_min_mem if int(el_min_mem) > 0 else EXECUTION_MIN_MEMORY
el_max_mem = el_max_mem if int(el_max_mem) > 0 else EXECUTION_MAX_MEMORY
config, jwt_secret_json_filepath_on_client = get_config(
launcher.el_genesis_data,
image,
existing_el_clients,
log_level,
el_min_cpu,
el_max_cpu,
el_min_mem,
el_max_mem,
extra_params,
extra_env_vars,
)
service = plan.add_service(service_name, config)
enode = el_admin_node_info.get_enode_for_node(plan, service_name, RPC_PORT_ID)
jwt_secret = shared_utils.read_file_from_service(
plan, service_name, jwt_secret_json_filepath_on_client
)
# TODO: Passing empty string for metrics_url for now https://github.com/kurtosis-tech/eth2-package/issues/127
# metrics_url = "http://{0}:{1}".format(service.ip_address, METRICS_PORT_NUM)
ethjs_metrics_info = None
return el_client_context.new_el_client_context(
"ethereumjs",
"", # ethereumjs has no enr
enode,
service.ip_address,
RPC_PORT_NUM,
WS_PORT_NUM,
ENGINE_RPC_PORT_NUM,
jwt_secret,
service_name,
[ethjs_metrics_info],
)
def get_config(
genesis_data,
image,
existing_el_clients,
verbosity_level,
el_min_cpu,
el_max_cpu,
el_min_mem,
el_max_mem,
extra_params,
extra_env_vars,
):
genesis_json_filepath_on_client = shared_utils.path_join(
GENESIS_DATA_MOUNT_DIRPATH, genesis_data.geth_genesis_json_relative_filepath
)
jwt_secret_json_filepath_on_client = shared_utils.path_join(
GENESIS_DATA_MOUNT_DIRPATH, genesis_data.jwt_secret_relative_filepath
)
cmd = [
"--gethGenesis=" + genesis_json_filepath_on_client,
"--dataDir=" + EXECUTION_DATA_DIRPATH_ON_CLIENT_CONTAINER,
"--port={0}".format(DISCOVERY_PORT_NUM),
"--rpc",
"--rpcAddr=0.0.0.0",
"--rpcPort={0}".format(RPC_PORT_NUM),
"--rpcCors=*",
"--rpcEngine",
"--rpcEngineAddr=0.0.0.0",
"--rpcEnginePort={0}".format(ENGINE_RPC_PORT_NUM),
"--ws",
"--wsAddr=0.0.0.0",
"--wsPort={0}".format(WS_PORT_NUM),
"--wsEnginePort={0}".format(WS_PORT_ENGINE_NUM),
"--wsEngineAddr=0.0.0.0",
"--jwt-secret={0}".format(jwt_secret_json_filepath_on_client),
"--extIP={0}".format(PRIVATE_IP_ADDRESS_PLACEHOLDER),
"--sync=full",
"--isSingleNode=true",
"--logLevel={0}".format(verbosity_level),
]
if len(existing_el_clients) > 0:
cmd.append(
"--bootnodes="
+ ",".join(
[
ctx.enode
for ctx in existing_el_clients[: package_io.MAX_ENODE_ENTRIES]
]
)
)
if len(extra_params) > 0:
# this is a repeated<proto type>, we convert it into Starlark
cmd.extend([param for param in extra_params])
return (
ServiceConfig(
image=image,
ports=USED_PORTS,
cmd=cmd,
files={
GENESIS_DATA_MOUNT_DIRPATH: genesis_data.files_artifact_uuid,
},
entrypoint=ENTRYPOINT_ARGS,
private_ip_address_placeholder=PRIVATE_IP_ADDRESS_PLACEHOLDER,
min_cpu=el_min_cpu,
max_cpu=el_max_cpu,
min_memory=el_min_mem,
max_memory=el_max_mem,
env_vars=extra_env_vars,
),
jwt_secret_json_filepath_on_client,
)
def new_ethereumjs_launcher(el_genesis_data):
return struct(
el_genesis_data=el_genesis_data,
)
shared_utils = import_module(
"github.com/kurtosis-tech/eth2-package/src/shared_utils/shared_utils.star"
)
input_parser = import_module(
"github.com/kurtosis-tech/eth2-package/src/package_io/parse_input.star"
)
el_client_context = import_module(
"github.com/kurtosis-tech/eth2-package/src/el/el_client_context.star"
)
el_admin_node_info = import_module(
"github.com/kurtosis-tech/eth2-package/src/el/el_admin_node_info.star"
)
genesis_constants = import_module(
"github.com/kurtosis-tech/eth2-package/src/prelaunch_data_generator/genesis_constants/genesis_constants.star"
)
node_metrics = import_module(
"github.com/kurtosis-tech/eth2-package/src/node_metrics_info.star"
)
package_io = import_module(
"github.com/kurtosis-tech/eth2-package/src/package_io/constants.star"
)
RPC_PORT_NUM = 8545
WS_PORT_NUM = 8546
DISCOVERY_PORT_NUM = 30303
ENGINE_RPC_PORT_NUM = 8551
METRICS_PORT_NUM = 9001
# The min/max CPU/memory that the execution node can use
EXECUTION_MIN_CPU = 100
EXECUTION_MAX_CPU = 1000
EXECUTION_MIN_MEMORY = 256
EXECUTION_MAX_MEMORY = 1024
# Port IDs
RPC_PORT_ID = "rpc"
WS_PORT_ID = "ws"
TCP_DISCOVERY_PORT_ID = "tcp-discovery"
UDP_DISCOVERY_PORT_ID = "udp-discovery"
ENGINE_RPC_PORT_ID = "engine-rpc"
ENGINE_WS_PORT_ID = "engineWs"
METRICS_PORT_ID = "metrics"
# TODO(old) Scale this dynamically based on CPUs available and Geth nodes mining
NUM_MINING_THREADS = 1
GENESIS_DATA_MOUNT_DIRPATH = "/genesis"
PREFUNDED_KEYS_MOUNT_DIRPATH = "/prefunded-keys"
METRICS_PATH = "/debug/metrics/prometheus"
# The dirpath of the execution data directory on the client container
EXECUTION_DATA_DIRPATH_ON_CLIENT_CONTAINER = "/execution-data"
KEYSTORE_DIRPATH_ON_CLIENT_CONTAINER = (
EXECUTION_DATA_DIRPATH_ON_CLIENT_CONTAINER + "/keystore"
)
GETH_ACCOUNT_PASSWORD = (
"password" # Password that the Geth accounts will be locked with
)
GETH_ACCOUNT_PASSWORDS_FILE = "/tmp/password.txt" # Importing an account to
PRIVATE_IP_ADDRESS_PLACEHOLDER = "KURTOSIS_IP_ADDR_PLACEHOLDER"
USED_PORTS = {
RPC_PORT_ID: shared_utils.new_port_spec(RPC_PORT_NUM, shared_utils.TCP_PROTOCOL),
WS_PORT_ID: shared_utils.new_port_spec(WS_PORT_NUM, shared_utils.TCP_PROTOCOL),
TCP_DISCOVERY_PORT_ID: shared_utils.new_port_spec(
DISCOVERY_PORT_NUM, shared_utils.TCP_PROTOCOL
),
UDP_DISCOVERY_PORT_ID: shared_utils.new_port_spec(
DISCOVERY_PORT_NUM, shared_utils.UDP_PROTOCOL
),
ENGINE_RPC_PORT_ID: shared_utils.new_port_spec(
ENGINE_RPC_PORT_NUM, shared_utils.TCP_PROTOCOL
),
METRICS_PORT_ID: shared_utils.new_port_spec(
METRICS_PORT_NUM, shared_utils.TCP_PROTOCOL
),
}
ENTRYPOINT_ARGS = ["sh", "-c"]
VERBOSITY_LEVELS = {
package_io.GLOBAL_CLIENT_LOG_LEVEL.error: "1",
package_io.GLOBAL_CLIENT_LOG_LEVEL.warn: "2",
package_io.GLOBAL_CLIENT_LOG_LEVEL.info: "3",
package_io.GLOBAL_CLIENT_LOG_LEVEL.debug: "4",
package_io.GLOBAL_CLIENT_LOG_LEVEL.trace: "5",
}
BUILDER_IMAGE_STR = "builder"
def launch(
plan,
launcher,
service_name,
image,
participant_log_level,
global_log_level,
# If empty then the node will be launched as a bootnode
existing_el_clients,
el_min_cpu,
el_max_cpu,
el_min_mem,
el_max_mem,
extra_params,
extra_env_vars,
):
log_level = input_parser.get_client_log_level_or_default(
participant_log_level, global_log_level, VERBOSITY_LEVELS
)
el_min_cpu = el_min_cpu if int(el_min_cpu) > 0 else EXECUTION_MIN_CPU
el_max_cpu = el_max_cpu if int(el_max_cpu) > 0 else EXECUTION_MAX_CPU
el_min_mem = el_min_mem if int(el_min_mem) > 0 else EXECUTION_MIN_MEMORY
el_max_mem = el_max_mem if int(el_max_mem) > 0 else EXECUTION_MAX_MEMORY
config, jwt_secret_json_filepath_on_client = get_config(
launcher.network_id,
launcher.el_genesis_data,
launcher.prefunded_geth_keys_artifact_uuid,
launcher.prefunded_account_info,
launcher.genesis_validators_root,
image,
existing_el_clients,
log_level,
el_min_cpu,
el_max_cpu,
el_min_mem,
el_max_mem,
extra_params,
extra_env_vars,
launcher.electra_fork_epoch,
)
service = plan.add_service(service_name, config)
enode, enr = el_admin_node_info.get_enode_enr_for_node(
plan, service_name, RPC_PORT_ID
)
jwt_secret = shared_utils.read_file_from_service(
plan, service_name, jwt_secret_json_filepath_on_client
)
metrics_url = "{0}:{1}".format(service.ip_address, METRICS_PORT_NUM)
geth_metrics_info = node_metrics.new_node_metrics_info(
service_name, METRICS_PATH, metrics_url
)
return el_client_context.new_el_client_context(
"geth",
enr,
enode,
service.ip_address,
RPC_PORT_NUM,
WS_PORT_NUM,
ENGINE_RPC_PORT_NUM,
jwt_secret,
service_name,
[geth_metrics_info],
)
def get_config(
network_id,
genesis_data,
prefunded_geth_keys_artifact_uuid,
prefunded_account_info,
genesis_validators_root,
image,
existing_el_clients,
verbosity_level,
el_min_cpu,
el_max_cpu,
el_min_mem,
el_max_mem,
extra_params,
extra_env_vars,
electra_fork_epoch,
):
genesis_json_filepath_on_client = shared_utils.path_join(
GENESIS_DATA_MOUNT_DIRPATH, genesis_data.geth_genesis_json_relative_filepath
)
jwt_secret_json_filepath_on_client = shared_utils.path_join(
GENESIS_DATA_MOUNT_DIRPATH, genesis_data.jwt_secret_relative_filepath
)
account_addresses_to_unlock = []
for prefunded_account in prefunded_account_info:
account_addresses_to_unlock.append(prefunded_account.address)
for index, extra_param in enumerate(extra_params):
if package_io.GENESIS_VALIDATORS_ROOT_PLACEHOLDER in extra_param:
extra_params[index] = extra_param.replace(
package_io.GENESIS_VALIDATORS_ROOT_PLACEHOLDER, genesis_validators_root
)
accounts_to_unlock_str = ",".join(account_addresses_to_unlock)
init_datadir_cmd_str = "geth init {0} --datadir={1} {2}".format(
"--cache.preimages" if electra_fork_epoch != None else "",
EXECUTION_DATA_DIRPATH_ON_CLIENT_CONTAINER,
genesis_json_filepath_on_client,
)
# We need to put the keys into the right spot
copy_keys_into_keystore_cmd_str = "cp -r {0}/* {1}/".format(
PREFUNDED_KEYS_MOUNT_DIRPATH,
KEYSTORE_DIRPATH_ON_CLIENT_CONTAINER,
)
create_passwords_file_cmd_str = (
"{"
+ ' for i in $(seq 1 {0}); do echo "{1}" >> {2}; done; '.format(
len(prefunded_account_info),
GETH_ACCOUNT_PASSWORD,
GETH_ACCOUNT_PASSWORDS_FILE,
)
+ "}"
)
cmd = [
"geth",
"--verbosity=" + verbosity_level,
"--unlock=" + accounts_to_unlock_str,
"--password=" + GETH_ACCOUNT_PASSWORDS_FILE,
"--datadir=" + EXECUTION_DATA_DIRPATH_ON_CLIENT_CONTAINER,
"--networkid=" + network_id,
"--http",
"--http.addr=0.0.0.0",
"--http.vhosts=*",
"--http.corsdomain=*",
# WARNING: The admin info endpoint is enabled so that we can easily get ENR/enode, which means
# that users should NOT store private information in these Kurtosis nodes!
"--http.api=admin,engine,net,eth,web3,debug",
"--ws",
"--ws.addr=0.0.0.0",
"--ws.port={0}".format(WS_PORT_NUM),
"--ws.api=admin,engine,net,eth,web3,debug",
"--ws.origins=*",
"--allow-insecure-unlock",
"--nat=extip:" + PRIVATE_IP_ADDRESS_PLACEHOLDER,
"--verbosity=" + verbosity_level,
"--authrpc.port={0}".format(ENGINE_RPC_PORT_NUM),
"--authrpc.addr=0.0.0.0",
"--authrpc.vhosts=*",
"--authrpc.jwtsecret={0}".format(jwt_secret_json_filepath_on_client),
"--syncmode=full",
"--rpc.allow-unprotected-txs",
"--metrics",
"--metrics.addr=0.0.0.0",
"--metrics.port={0}".format(METRICS_PORT_NUM),
]
if BUILDER_IMAGE_STR in image:
cmd[10] = "--http.api=admin,engine,net,eth,web3,debug,flashbots"
cmd[14] = "--ws.api=admin,engine,net,eth,web3,debug,flashbots"
if len(existing_el_clients) > 0:
cmd.append(
"--bootnodes="
+ ",".join(
[
ctx.enode
for ctx in existing_el_clients[: package_io.MAX_ENODE_ENTRIES]
]
)
)
if len(extra_params) > 0:
# this is a repeated<proto type>, we convert it into Starlark
cmd.extend([param for param in extra_params])
cmd_str = " ".join(cmd)
subcommand_strs = [
init_datadir_cmd_str,
copy_keys_into_keystore_cmd_str,
create_passwords_file_cmd_str,
cmd_str,
]
command_str = " && ".join(subcommand_strs)
return (
ServiceConfig(
image=image,
ports=USED_PORTS,
cmd=[command_str],
files={
GENESIS_DATA_MOUNT_DIRPATH: genesis_data.files_artifact_uuid,
PREFUNDED_KEYS_MOUNT_DIRPATH: prefunded_geth_keys_artifact_uuid,
},
entrypoint=ENTRYPOINT_ARGS,
private_ip_address_placeholder=PRIVATE_IP_ADDRESS_PLACEHOLDER,
min_cpu=el_min_cpu,
max_cpu=el_max_cpu,
min_memory=el_min_mem,
max_memory=el_max_mem,
env_vars=extra_env_vars,
),
jwt_secret_json_filepath_on_client,
)
def new_geth_launcher(
network_id,
el_genesis_data,
prefunded_geth_keys_artifact_uuid,
prefunded_account_info,
genesis_validators_root="",
electra_fork_epoch=None,
):
return struct(
network_id=network_id,
el_genesis_data=el_genesis_data,
prefunded_account_info=prefunded_account_info,
prefunded_geth_keys_artifact_uuid=prefunded_geth_keys_artifact_uuid,
genesis_validators_root=genesis_validators_root,
electra_fork_epoch=electra_fork_epoch,
)
shared_utils = import_module(
"github.com/kurtosis-tech/eth2-package/src/shared_utils/shared_utils.star"
)
input_parser = import_module(
"github.com/kurtosis-tech/eth2-package/src/package_io/parse_input.star"
)
el_client_context = import_module(
"github.com/kurtosis-tech/eth2-package/src/el/el_client_context.star"
)
el_admin_node_info = import_module(
"github.com/kurtosis-tech/eth2-package/src/el/el_admin_node_info.star"
)
node_metrics = import_module(
"github.com/kurtosis-tech/eth2-package/src/node_metrics_info.star"
)
package_io = import_module(
"github.com/kurtosis-tech/eth2-package/src/package_io/constants.star"
)
# The dirpath of the execution data directory on the client container
EXECUTION_DATA_DIRPATH_ON_CLIENT_CONTAINER = "/execution-data"
KZG_DATA_DIRPATH_ON_CLIENT_CONTAINER = "/genesis/output/trusted_setup.txt"
GENESIS_DATA_MOUNT_DIRPATH = "/genesis"
METRICS_PATH = "/metrics"
RPC_PORT_NUM = 8545
WS_PORT_NUM = 8546
DISCOVERY_PORT_NUM = 30303
ENGINE_RPC_PORT_NUM = 8551
METRICS_PORT_NUM = 9001
# The min/max CPU/memory that the execution node can use
EXECUTION_MIN_CPU = 100
EXECUTION_MAX_CPU = 1000
EXECUTION_MIN_MEMORY = 512
EXECUTION_MAX_MEMORY = 2048
# Port IDs
RPC_PORT_ID = "rpc"
WS_PORT_ID = "ws"
TCP_DISCOVERY_PORT_ID = "tcp-discovery"
UDP_DISCOVERY_PORT_ID = "udp-discovery"
ENGINE_RPC_PORT_ID = "engine-rpc"
METRICS_PORT_ID = "metrics"
PRIVATE_IP_ADDRESS_PLACEHOLDER = "KURTOSIS_IP_ADDR_PLACEHOLDER"
USED_PORTS = {
RPC_PORT_ID: shared_utils.new_port_spec(RPC_PORT_NUM, shared_utils.TCP_PROTOCOL),
WS_PORT_ID: shared_utils.new_port_spec(WS_PORT_NUM, shared_utils.TCP_PROTOCOL),
TCP_DISCOVERY_PORT_ID: shared_utils.new_port_spec(
DISCOVERY_PORT_NUM, shared_utils.TCP_PROTOCOL
),
UDP_DISCOVERY_PORT_ID: shared_utils.new_port_spec(
DISCOVERY_PORT_NUM, shared_utils.UDP_PROTOCOL
),
ENGINE_RPC_PORT_ID: shared_utils.new_port_spec(
ENGINE_RPC_PORT_NUM, shared_utils.TCP_PROTOCOL
),
METRICS_PORT_ID: shared_utils.new_port_spec(
METRICS_PORT_NUM, shared_utils.TCP_PROTOCOL
),
}
NETHERMIND_LOG_LEVELS = {
package_io.GLOBAL_CLIENT_LOG_LEVEL.error: "ERROR",
package_io.GLOBAL_CLIENT_LOG_LEVEL.warn: "WARN",
package_io.GLOBAL_CLIENT_LOG_LEVEL.info: "INFO",
package_io.GLOBAL_CLIENT_LOG_LEVEL.debug: "DEBUG",
package_io.GLOBAL_CLIENT_LOG_LEVEL.trace: "TRACE",
}
def launch(
plan,
launcher,
service_name,
image,
participant_log_level,
global_log_level,
existing_el_clients,
el_min_cpu,
el_max_cpu,
el_min_mem,
el_max_mem,
extra_params,
extra_env_vars,
):
log_level = input_parser.get_client_log_level_or_default(
participant_log_level, global_log_level, NETHERMIND_LOG_LEVELS
)
el_min_cpu = el_min_cpu if int(el_min_cpu) > 0 else EXECUTION_MIN_CPU
el_max_cpu = el_max_cpu if int(el_max_cpu) > 0 else EXECUTION_MAX_CPU
el_min_mem = el_min_mem if int(el_min_mem) > 0 else EXECUTION_MIN_MEMORY
el_max_mem = el_max_mem if int(el_max_mem) > 0 else EXECUTION_MAX_MEMORY
config, jwt_secret_json_filepath_on_client = get_config(
launcher.el_genesis_data,
image,
existing_el_clients,
log_level,
el_min_cpu,
el_max_cpu,
el_min_mem,
el_max_mem,
extra_params,
extra_env_vars,
)
service = plan.add_service(service_name, config)
enode = el_admin_node_info.get_enode_for_node(plan, service_name, RPC_PORT_ID)
jwt_secret = shared_utils.read_file_from_service(
plan, service_name, jwt_secret_json_filepath_on_client
)
metrics_url = "{0}:{1}".format(service.ip_address, METRICS_PORT_NUM)
nethermind_metrics_info = node_metrics.new_node_metrics_info(
service_name, METRICS_PATH, metrics_url
)
return el_client_context.new_el_client_context(
"nethermind",
"", # nethermind has no ENR in the eth2-merge-kurtosis-module either
# Nethermind node info endpoint doesn't return ENR field https://docs.nethermind.io/nethermind/ethereum-client/json-rpc/admin
enode,
service.ip_address,
RPC_PORT_NUM,
WS_PORT_NUM,
ENGINE_RPC_PORT_NUM,
jwt_secret,
service_name,
[nethermind_metrics_info],
)
def get_config(
genesis_data,
image,
existing_el_clients,
log_level,
el_min_cpu,
el_max_cpu,
el_min_mem,
el_max_mem,
extra_params,
extra_env_vars,
):
genesis_json_filepath_on_client = shared_utils.path_join(
GENESIS_DATA_MOUNT_DIRPATH,
genesis_data.nethermind_genesis_json_relative_filepath,
)
jwt_secret_json_filepath_on_client = shared_utils.path_join(
GENESIS_DATA_MOUNT_DIRPATH, genesis_data.jwt_secret_relative_filepath
)
cmd = [
"--log=" + log_level,
"--datadir=" + EXECUTION_DATA_DIRPATH_ON_CLIENT_CONTAINER,
"--Init.ChainSpecPath=" + genesis_json_filepath_on_client,
"--Init.WebSocketsEnabled=true",
"--Init.KzgSetupPath=" + KZG_DATA_DIRPATH_ON_CLIENT_CONTAINER,
"--config=none.cfg",
"--JsonRpc.Enabled=true",
"--JsonRpc.EnabledModules=net,eth,consensus,subscribe,web3,admin",
"--JsonRpc.Host=0.0.0.0",
"--JsonRpc.Port={0}".format(RPC_PORT_NUM),
"--JsonRpc.WebSocketsPort={0}".format(WS_PORT_NUM),
"--JsonRpc.EngineHost=0.0.0.0",
"--JsonRpc.EnginePort={0}".format(ENGINE_RPC_PORT_NUM),
"--Network.ExternalIp={0}".format(PRIVATE_IP_ADDRESS_PLACEHOLDER),
"--Network.DiscoveryPort={0}".format(DISCOVERY_PORT_NUM),
"--Network.P2PPort={0}".format(DISCOVERY_PORT_NUM),
"--JsonRpc.JwtSecretFile={0}".format(jwt_secret_json_filepath_on_client),
"--Network.OnlyStaticPeers=true",
"--Metrics.Enabled=true",
"--Metrics.ExposePort={0}".format(METRICS_PORT_NUM),
]
if len(existing_el_clients) > 0:
cmd.append(
"--Network.StaticPeers="
+ ",".join(
[
ctx.enode
for ctx in existing_el_clients[: package_io.MAX_ENODE_ENTRIES]
]
)
)
if len(extra_params) > 0:
# this is a repeated<proto type>, we convert it into Starlark
cmd.extend([param for param in extra_params])
return (
ServiceConfig(
image=image,
ports=USED_PORTS,
cmd=cmd,
files={
GENESIS_DATA_MOUNT_DIRPATH: genesis_data.files_artifact_uuid,
},
private_ip_address_placeholder=PRIVATE_IP_ADDRESS_PLACEHOLDER,
min_cpu=el_min_cpu,
max_cpu=el_max_cpu,
min_memory=el_min_mem,
max_memory=el_max_mem,
env_vars=extra_env_vars,
),
jwt_secret_json_filepath_on_client,
)
def new_nethermind_launcher(el_genesis_data):
return struct(el_genesis_data=el_genesis_data)
shared_utils = import_module(
"github.com/kurtosis-tech/eth2-package/src/shared_utils/shared_utils.star"
)
input_parser = import_module(
"github.com/kurtosis-tech/eth2-package/src/package_io/parse_input.star"
)
el_client_context = import_module(
"github.com/kurtosis-tech/eth2-package/src/el/el_client_context.star"
)
el_admin_node_info = import_module(
"github.com/kurtosis-tech/eth2-package/src/el/el_admin_node_info.star"
)
node_metrics = import_module(
"github.com/kurtosis-tech/eth2-package/src/node_metrics_info.star"
)
package_io = import_module(
"github.com/kurtosis-tech/eth2-package/src/package_io/constants.star"
)
RPC_PORT_NUM = 8545
WS_PORT_NUM = 8546
DISCOVERY_PORT_NUM = 30303
ENGINE_RPC_PORT_NUM = 8551
METRICS_PORT_NUM = 9001
# The min/max CPU/memory that the execution node can use
EXECUTION_MIN_CPU = 100
EXECUTION_MAX_CPU = 1000
EXECUTION_MIN_MEMORY = 256
EXECUTION_MAX_MEMORY = 1024
# Port IDs
RPC_PORT_ID = "rpc"
WS_PORT_ID = "ws"
TCP_DISCOVERY_PORT_ID = "tcp-discovery"
UDP_DISCOVERY_PORT_ID = "udp-discovery"
ENGINE_RPC_PORT_ID = "engine-rpc"
METRICS_PORT_ID = "metrics"
# Paths
METRICS_PATH = "/metrics"
GENESIS_DATA_MOUNT_DIRPATH = "/genesis"
PREFUNDED_KEYS_MOUNT_DIRPATH = "/prefunded-keys"
# The dirpath of the execution data directory on the client container
EXECUTION_DATA_DIRPATH_ON_CLIENT_CONTAINER = "/execution-data"
PRIVATE_IP_ADDRESS_PLACEHOLDER = "KURTOSIS_IP_ADDR_PLACEHOLDER"
USED_PORTS = {
RPC_PORT_ID: shared_utils.new_port_spec(RPC_PORT_NUM, shared_utils.TCP_PROTOCOL),
WS_PORT_ID: shared_utils.new_port_spec(WS_PORT_NUM, shared_utils.TCP_PROTOCOL),
TCP_DISCOVERY_PORT_ID: shared_utils.new_port_spec(
DISCOVERY_PORT_NUM, shared_utils.TCP_PROTOCOL
),
UDP_DISCOVERY_PORT_ID: shared_utils.new_port_spec(
DISCOVERY_PORT_NUM, shared_utils.UDP_PROTOCOL
),
ENGINE_RPC_PORT_ID: shared_utils.new_port_spec(
ENGINE_RPC_PORT_NUM, shared_utils.TCP_PROTOCOL
),
METRICS_PORT_ID: shared_utils.new_port_spec(
METRICS_PORT_NUM, shared_utils.TCP_PROTOCOL
),
}
ENTRYPOINT_ARGS = ["sh", "-c"]
VERBOSITY_LEVELS = {
package_io.GLOBAL_CLIENT_LOG_LEVEL.error: "v",
package_io.GLOBAL_CLIENT_LOG_LEVEL.warn: "vv",
package_io.GLOBAL_CLIENT_LOG_LEVEL.info: "vvv",
package_io.GLOBAL_CLIENT_LOG_LEVEL.debug: "vvvv",
package_io.GLOBAL_CLIENT_LOG_LEVEL.trace: "vvvvv",
}
def launch(
plan,
launcher,
service_name,
image,
participant_log_level,
global_log_level,
# If empty then the node will be launched as a bootnode
existing_el_clients,
el_min_cpu,
el_max_cpu,
el_min_mem,
el_max_mem,
extra_params,
extra_env_vars,
):
log_level = input_parser.get_client_log_level_or_default(
participant_log_level, global_log_level, VERBOSITY_LEVELS
)
el_min_cpu = el_min_cpu if int(el_min_cpu) > 0 else EXECUTION_MIN_CPU
el_max_cpu = el_max_cpu if int(el_max_cpu) > 0 else EXECUTION_MAX_CPU
el_min_mem = el_min_mem if int(el_min_mem) > 0 else EXECUTION_MIN_MEMORY
el_max_mem = el_max_mem if int(el_max_mem) > 0 else EXECUTION_MAX_MEMORY
config, jwt_secret_json_filepath_on_client = get_config(
launcher.el_genesis_data,
image,
existing_el_clients,
log_level,
el_min_cpu,
el_max_cpu,
el_min_mem,
el_max_mem,
extra_params,
extra_env_vars,
)
service = plan.add_service(service_name, config)
enode = el_admin_node_info.get_enode_for_node(plan, service_name, RPC_PORT_ID)
jwt_secret = shared_utils.read_file_from_service(
plan, service_name, jwt_secret_json_filepath_on_client
)
metric_url = "{0}:{1}".format(service.ip_address, METRICS_PORT_NUM)
reth_metrics_info = node_metrics.new_node_metrics_info(
service_name, METRICS_PATH, metric_url
)
return el_client_context.new_el_client_context(
"reth",
"", # reth has no enr
enode,
service.ip_address,
RPC_PORT_NUM,
WS_PORT_NUM,
ENGINE_RPC_PORT_NUM,
jwt_secret,
service_name,
[reth_metrics_info],
)
def get_config(
genesis_data,
image,
existing_el_clients,
verbosity_level,
el_min_cpu,
el_max_cpu,
el_min_mem,
el_max_mem,
extra_params,
extra_env_vars,
):
genesis_json_filepath_on_client = shared_utils.path_join(
GENESIS_DATA_MOUNT_DIRPATH, genesis_data.geth_genesis_json_relative_filepath
)
jwt_secret_json_filepath_on_client = shared_utils.path_join(
GENESIS_DATA_MOUNT_DIRPATH, genesis_data.jwt_secret_relative_filepath
)
init_datadir_cmd_str = "reth init --datadir={0} --chain={1}".format(
EXECUTION_DATA_DIRPATH_ON_CLIENT_CONTAINER,
genesis_json_filepath_on_client,
)
cmd = [
"reth",
"node",
"-{0}".format(verbosity_level),
"--datadir=" + EXECUTION_DATA_DIRPATH_ON_CLIENT_CONTAINER,
"--chain=" + genesis_json_filepath_on_client,
"--http",
"--http.port={0}".format(RPC_PORT_NUM),
"--http.addr=0.0.0.0",
"--http.corsdomain=*",
# WARNING: The admin info endpoint is enabled so that we can easily get ENR/enode, which means
# that users should NOT store private information in these Kurtosis nodes!
"--http.api=admin,net,eth",
"--ws",
"--ws.addr=0.0.0.0",
"--ws.port={0}".format(WS_PORT_NUM),
"--ws.api=net,eth",
"--ws.origins=*",
"--nat=extip:" + PRIVATE_IP_ADDRESS_PLACEHOLDER,
"--authrpc.port={0}".format(ENGINE_RPC_PORT_NUM),
"--authrpc.jwtsecret={0}".format(jwt_secret_json_filepath_on_client),
"--authrpc.addr=0.0.0.0",
"--metrics=0.0.0.0:{0}".format(METRICS_PORT_NUM),
]
if len(existing_el_clients) > 0:
cmd.append(
"--bootnodes="
+ ",".join(
[
ctx.enode
for ctx in existing_el_clients[: package_io.MAX_ENODE_ENTRIES]
]
)
)
if len(extra_params) > 0:
# this is a repeated<proto type>, we convert it into Starlark
cmd.extend([param for param in extra_params])
cmd_str = " ".join(cmd)
subcommand_strs = [
init_datadir_cmd_str,
cmd_str,
]
command_str = " && ".join(subcommand_strs)
return (
ServiceConfig(
image=image,
ports=USED_PORTS,
cmd=[command_str],
files={
GENESIS_DATA_MOUNT_DIRPATH: genesis_data.files_artifact_uuid,
},
entrypoint=ENTRYPOINT_ARGS,
private_ip_address_placeholder=PRIVATE_IP_ADDRESS_PLACEHOLDER,
min_cpu=el_min_cpu,
max_cpu=el_max_cpu,
min_memory=el_min_mem,
max_memory=el_max_mem,
env_vars=extra_env_vars,
),
jwt_secret_json_filepath_on_client,
)
def new_reth_launcher(el_genesis_data):
return struct(
el_genesis_data=el_genesis_data,
)
...@@ -45,7 +45,7 @@ def get_config(mev_boost_launcher, network_id, mev_boost_image): ...@@ -45,7 +45,7 @@ def get_config(mev_boost_launcher, network_id, mev_boost_image):
cmd=command, cmd=command,
env_vars={ env_vars={
# TODO(maybe) remove the hardcoding # TODO(maybe) remove the hardcoding
# This is set to match this file https://github.com/kurtosis-tech/eth-network-package/blob/main/static_files/genesis-generation-config/cl/config.yaml.tmpl#L11 # This is set to match this file https://github.com/kurtosis-tech/eth2-package/blob/main/static_files/genesis-generation-config/cl/config.yaml.tmpl#L11
# latest-notes # latest-notes
# does this need genesis time to be set as well # does this need genesis time to be set as well
"GENESIS_FORK_VERSION": "0x10000038", "GENESIS_FORK_VERSION": "0x10000038",
......
# this is a dictionary as this will get serialzed to JSON
def new_node_metrics_info(name, path, url):
return {"name": name, "path": path, "url": url}
EL_CLIENT_TYPE = struct( EL_CLIENT_TYPE = struct(
geth="geth", erigon="erigon", nethermind="nethermind", besu="besu" geth="geth",
erigon="erigon",
nethermind="nethermind",
besu="besu",
reth="reth",
ethereumjs="ethereumjs",
) )
CL_CLIENT_TYPE = struct( CL_CLIENT_TYPE = struct(
...@@ -17,3 +22,13 @@ GLOBAL_CLIENT_LOG_LEVEL = struct( ...@@ -17,3 +22,13 @@ GLOBAL_CLIENT_LOG_LEVEL = struct(
debug="debug", debug="debug",
trace="trace", trace="trace",
) )
VALIDATING_REWARDS_ACCOUNT = "0x878705ba3f8Bc32FCf7F4CAa1A35E72AF65CF766"
MAX_ENR_ENTRIES = 20
MAX_ENODE_ENTRIES = 20
GENESIS_VALIDATORS_ROOT_PLACEHOLDER = "GENESIS_VALIDATORS_ROOT_PLACEHOLDER"
DEFAULT_SNOOPER_IMAGE = "parithoshj/json_rpc_snoop:v1.0.0-x86"
ARCHIVE_MODE = True
DEFAULT_EL_IMAGES = {
"geth": "ethereum/client-go:latest",
"erigon": "thorax/erigon:devel",
"nethermind": "nethermind/nethermind:latest",
"besu": "hyperledger/besu:develop",
"reth": "ghcr.io/paradigmxyz/reth",
"ethereumjs": "ethpandaops/ethereumjs:master",
}
DEFAULT_CL_IMAGES = {
"lighthouse": "sigp/lighthouse:latest",
"teku": "consensys/teku:latest",
"nimbus": "statusim/nimbus-eth2:multiarch-latest",
"prysm": "prysmaticlabs/prysm-beacon-chain:latest,prysmaticlabs/prysm-validator:latest",
"lodestar": "chainsafe/lodestar:latest",
}
NETHERMIND_NODE_NAME = "nethermind"
NIMBUS_NODE_NAME = "nimbus"
# Placeholder value for the deneb fork epoch if electra is being run
# TODO: This is a hack, and should be removed once we electra is rebased on deneb
HIGH_DENEB_VALUE_FORK_VERKLE = 20000
# MEV Params # MEV Params
FLASHBOTS_MEV_BOOST_PORT = 18550 FLASHBOTS_MEV_BOOST_PORT = 18550
MEV_BOOST_SERVICE_NAME_PREFIX = "mev-boost-" MEV_BOOST_SERVICE_NAME_PREFIX = "mev-boost-"
DEFAULT_ADDITIONAL_SERVICES = [ DEFAULT_ADDITIONAL_SERVICES = [
"tx_spammer", "tx_spammer",
"blob_spammer", "blob_spammer",
...@@ -19,25 +45,16 @@ ATTR_TO_BE_SKIPPED_AT_ROOT = ( ...@@ -19,25 +45,16 @@ ATTR_TO_BE_SKIPPED_AT_ROOT = (
) )
package_io_constants = import_module( package_io_constants = import_module(
"github.com/kurtosis-tech/eth-network-package/package_io/constants.star" "github.com/kurtosis-tech/eth2-package/src/package_io/constants.star"
)
package_io_parser = import_module(
"github.com/kurtosis-tech/eth-network-package/package_io/input_parser.star"
) )
genesis_constants = import_module( genesis_constants = import_module(
"github.com/kurtosis-tech/eth-network-package/src/prelaunch_data_generator/genesis_constants/genesis_constants.star" "github.com/kurtosis-tech/eth2-package/src/prelaunch_data_generator/genesis_constants/genesis_constants.star"
) )
def parse_input(plan, input_args): def parse_input(plan, input_args):
result = package_io_parser.parse_input(input_args) result = parse_network_params(input_args)
# we do this as the count has already been accounted for by the `package_io_parser`
# and we end up sending the same args to `package_io_parser` again when we do eth_network_package.run()
# that we have to do as we want to send in MEV participants
# this will all be cleaner post merge
for participant in result["participants"]:
participant["count"] = 1
# add default eth2 input params # add default eth2 input params
result["mev_type"] = None result["mev_type"] = None
...@@ -66,78 +83,292 @@ def parse_input(plan, input_args): ...@@ -66,78 +83,292 @@ def parse_input(plan, input_args):
result["tx_spammer_params"] = get_default_tx_spammer_params() result["tx_spammer_params"] = get_default_tx_spammer_params()
return ( return struct(
struct( participants=[
participants=[ struct(
struct( el_client_type=participant["el_client_type"],
el_client_type=participant["el_client_type"], el_client_image=participant["el_client_image"],
el_client_image=participant["el_client_image"], el_client_log_level=participant["el_client_log_level"],
el_client_log_level=participant["el_client_log_level"], cl_client_type=participant["cl_client_type"],
cl_client_type=participant["cl_client_type"], cl_client_image=participant["cl_client_image"],
cl_client_image=participant["cl_client_image"], cl_client_log_level=participant["cl_client_log_level"],
cl_client_log_level=participant["cl_client_log_level"], beacon_extra_params=participant["beacon_extra_params"],
beacon_extra_params=participant["beacon_extra_params"], el_extra_params=participant["el_extra_params"],
el_extra_params=participant["el_extra_params"], el_extra_env_vars=participant["el_extra_env_vars"],
validator_extra_params=participant["validator_extra_params"], validator_extra_params=participant["validator_extra_params"],
builder_network_params=participant["builder_network_params"], builder_network_params=participant["builder_network_params"],
validator_count=participant["validator_count"], el_min_cpu=participant["el_min_cpu"],
) el_max_cpu=participant["el_max_cpu"],
for participant in result["participants"] el_min_mem=participant["el_min_mem"],
el_max_mem=participant["el_max_mem"],
bn_min_cpu=participant["bn_min_cpu"],
bn_max_cpu=participant["bn_max_cpu"],
bn_min_mem=participant["bn_min_mem"],
bn_max_mem=participant["bn_max_mem"],
v_min_cpu=participant["v_min_cpu"],
v_max_cpu=participant["v_max_cpu"],
v_min_mem=participant["v_min_mem"],
v_max_mem=participant["v_max_mem"],
validator_count=participant["validator_count"],
snooper_enabled=participant["snooper_enabled"],
count=participant["count"],
)
for participant in result["participants"]
],
network_params=struct(
preregistered_validator_keys_mnemonic=result["network_params"][
"preregistered_validator_keys_mnemonic"
], ],
network_params=struct( num_validator_keys_per_node=result["network_params"][
preregistered_validator_keys_mnemonic=result["network_params"][ "num_validator_keys_per_node"
"preregistered_validator_keys_mnemonic" ],
], network_id=result["network_params"]["network_id"],
num_validator_keys_per_node=result["network_params"][ deposit_contract_address=result["network_params"][
"num_validator_keys_per_node" "deposit_contract_address"
], ],
network_id=result["network_params"]["network_id"], seconds_per_slot=result["network_params"]["seconds_per_slot"],
deposit_contract_address=result["network_params"][ slots_per_epoch=result["network_params"]["slots_per_epoch"],
"deposit_contract_address" genesis_delay=result["network_params"]["genesis_delay"],
], capella_fork_epoch=result["network_params"]["capella_fork_epoch"],
seconds_per_slot=result["network_params"]["seconds_per_slot"], deneb_fork_epoch=result["network_params"]["deneb_fork_epoch"],
slots_per_epoch=result["network_params"]["slots_per_epoch"], electra_fork_epoch=result["network_params"]["electra_fork_epoch"],
genesis_delay=result["network_params"]["genesis_delay"],
capella_fork_epoch=result["network_params"]["capella_fork_epoch"],
deneb_fork_epoch=result["network_params"]["deneb_fork_epoch"],
electra_fork_epoch=result["network_params"]["electra_fork_epoch"],
),
mev_params=struct(
mev_relay_image=result["mev_params"]["mev_relay_image"],
mev_builder_image=result["mev_params"]["mev_builder_image"],
mev_boost_image=result["mev_params"]["mev_boost_image"],
mev_relay_api_extra_args=result["mev_params"][
"mev_relay_api_extra_args"
],
mev_relay_housekeeper_extra_args=result["mev_params"][
"mev_relay_housekeeper_extra_args"
],
mev_relay_website_extra_args=result["mev_params"][
"mev_relay_website_extra_args"
],
mev_builder_extra_args=result["mev_params"]["mev_builder_extra_args"],
mev_flood_image=result["mev_params"]["mev_flood_image"],
mev_flood_extra_args=result["mev_params"]["mev_flood_extra_args"],
mev_flood_seconds_per_bundle=result["mev_params"][
"mev_flood_seconds_per_bundle"
],
launch_custom_flood=result["mev_params"]["launch_custom_flood"],
),
tx_spammer_params=struct(
tx_spammer_extra_args=result["tx_spammer_params"][
"tx_spammer_extra_args"
],
),
launch_additional_services=result["launch_additional_services"],
additional_services=result["additional_services"],
wait_for_finalization=result["wait_for_finalization"],
global_client_log_level=result["global_client_log_level"],
mev_type=result["mev_type"],
), ),
result, mev_params=struct(
mev_relay_image=result["mev_params"]["mev_relay_image"],
mev_builder_image=result["mev_params"]["mev_builder_image"],
mev_boost_image=result["mev_params"]["mev_boost_image"],
mev_relay_api_extra_args=result["mev_params"]["mev_relay_api_extra_args"],
mev_relay_housekeeper_extra_args=result["mev_params"][
"mev_relay_housekeeper_extra_args"
],
mev_relay_website_extra_args=result["mev_params"][
"mev_relay_website_extra_args"
],
mev_builder_extra_args=result["mev_params"]["mev_builder_extra_args"],
mev_flood_image=result["mev_params"]["mev_flood_image"],
mev_flood_extra_args=result["mev_params"]["mev_flood_extra_args"],
mev_flood_seconds_per_bundle=result["mev_params"][
"mev_flood_seconds_per_bundle"
],
launch_custom_flood=result["mev_params"]["launch_custom_flood"],
),
tx_spammer_params=struct(
tx_spammer_extra_args=result["tx_spammer_params"]["tx_spammer_extra_args"],
),
launch_additional_services=result["launch_additional_services"],
additional_services=result["additional_services"],
wait_for_finalization=result["wait_for_finalization"],
global_client_log_level=result["global_client_log_level"],
mev_type=result["mev_type"],
snooper_enabled=result["snooper_enabled"],
parallel_keystore_generation=result["parallel_keystore_generation"],
) )
def parse_network_params(input_args):
result = default_input_args()
for attr in input_args:
value = input_args[attr]
# if its insterted we use the value inserted
if attr not in ATTR_TO_BE_SKIPPED_AT_ROOT and attr in input_args:
result[attr] = value
elif attr == "network_params":
for sub_attr in input_args["network_params"]:
sub_value = input_args["network_params"][sub_attr]
result["network_params"][sub_attr] = sub_value
elif attr == "participants":
participants = []
for participant in input_args["participants"]:
new_participant = default_participant()
for sub_attr, sub_value in participant.items():
# if the value is set in input we set it in participant
new_participant[sub_attr] = sub_value
for _ in range(0, new_participant["count"]):
participant_copy = deep_copy_participant(new_participant)
participants.append(participant_copy)
result["participants"] = participants
total_participant_count = 0
actual_num_validators = 0
# validation of the above defaults
for index, participant in enumerate(result["participants"]):
el_client_type = participant["el_client_type"]
cl_client_type = participant["cl_client_type"]
if cl_client_type in (NIMBUS_NODE_NAME) and (
result["network_params"]["seconds_per_slot"] < 12
):
fail("nimbus can't be run with slot times below 12 seconds")
el_image = participant["el_client_image"]
if el_image == "":
default_image = DEFAULT_EL_IMAGES.get(el_client_type, "")
if default_image == "":
fail(
"{0} received an empty image name and we don't have a default for it".format(
el_client_type
)
)
participant["el_client_image"] = default_image
cl_image = participant["cl_client_image"]
if cl_image == "":
default_image = DEFAULT_CL_IMAGES.get(cl_client_type, "")
if default_image == "":
fail(
"{0} received an empty image name and we don't have a default for it".format(
cl_client_type
)
)
participant["cl_client_image"] = default_image
snooper_enabled = participant["snooper_enabled"]
if snooper_enabled == False:
default_snooper_enabled = result["snooper_enabled"]
if default_snooper_enabled:
participant["snooper_enabled"] = default_snooper_enabled
validator_count = participant["validator_count"]
if validator_count == None:
default_validator_count = result["network_params"][
"num_validator_keys_per_node"
]
participant["validator_count"] = default_validator_count
actual_num_validators += participant["validator_count"]
beacon_extra_params = participant.get("beacon_extra_params", [])
participant["beacon_extra_params"] = beacon_extra_params
validator_extra_params = participant.get("validator_extra_params", [])
participant["validator_extra_params"] = validator_extra_params
total_participant_count += participant["count"]
if result["network_params"]["network_id"].strip() == "":
fail("network_id is empty or spaces it needs to be of non zero length")
if result["network_params"]["deposit_contract_address"].strip() == "":
fail(
"deposit_contract_address is empty or spaces it needs to be of non zero length"
)
if result["network_params"]["preregistered_validator_keys_mnemonic"].strip() == "":
fail(
"preregistered_validator_keys_mnemonic is empty or spaces it needs to be of non zero length"
)
if result["network_params"]["slots_per_epoch"] == 0:
fail("slots_per_epoch is 0 needs to be > 0 ")
if result["network_params"]["seconds_per_slot"] == 0:
fail("seconds_per_slot is 0 needs to be > 0 ")
if result["network_params"]["genesis_delay"] == 0:
fail("genesis_delay is 0 needs to be > 0 ")
if result["network_params"]["deneb_fork_epoch"] == 0:
fail("deneb_fork_epoch is 0 needs to be > 0 ")
if result["network_params"]["electra_fork_epoch"] != None:
# if electra is defined, then deneb needs to be set very high
result["network_params"]["deneb_fork_epoch"] = HIGH_DENEB_VALUE_FORK_VERKLE
if (
result["network_params"]["capella_fork_epoch"] > 0
and result["network_params"]["electra_fork_epoch"] != None
):
fail("electra can only happen with capella genesis not bellatrix")
required_num_validators = 2 * result["network_params"]["slots_per_epoch"]
actual_num_validators = (
total_participant_count
* result["network_params"]["num_validator_keys_per_node"]
)
if required_num_validators > actual_num_validators:
fail(
"required_num_validators - {0} is greater than actual_num_validators - {1}".format(
required_num_validators, actual_num_validators
)
)
return result
def get_client_log_level_or_default(
participant_log_level, global_log_level, client_log_levels
):
log_level = participant_log_level
if log_level == "":
log_level = client_log_levels.get(global_log_level, "")
if log_level == "":
fail(
"No participant log level defined, and the client log level has no mapping for global log level '{0}'".format(
global_log_level
)
)
return log_level
def default_input_args():
network_params = default_network_params()
participants = [default_participant()]
return {
"participants": participants,
"network_params": network_params,
"wait_for_finalization": False,
"global_client_log_level": "info",
"snooper_enabled": False,
"parallel_keystore_generation": False,
}
def default_network_params():
# this is temporary till we get params working
return {
"preregistered_validator_keys_mnemonic": "giant issue aisle success illegal bike spike question tent bar rely arctic volcano long crawl hungry vocal artwork sniff fantasy very lucky have athlete",
"num_validator_keys_per_node": 64,
"network_id": "3151908",
"deposit_contract_address": "0x4242424242424242424242424242424242424242",
"seconds_per_slot": 12,
"slots_per_epoch": 32,
"genesis_delay": 120,
"capella_fork_epoch": 0,
"deneb_fork_epoch": 500,
"electra_fork_epoch": None,
}
def default_participant():
return {
"el_client_type": "geth",
"el_client_image": "",
"el_client_log_level": "",
"cl_client_type": "lighthouse",
"cl_client_image": "",
"cl_client_log_level": "",
"beacon_extra_params": [],
"el_extra_params": [],
"el_extra_env_vars": {},
"validator_extra_params": [],
"builder_network_params": None,
"el_min_cpu": 0,
"el_max_cpu": 0,
"el_min_mem": 0,
"el_max_mem": 0,
"bn_min_cpu": 0,
"bn_max_cpu": 0,
"bn_min_mem": 0,
"bn_max_mem": 0,
"v_min_cpu": 0,
"v_max_cpu": 0,
"v_min_mem": 0,
"v_max_mem": 0,
"validator_count": None,
"snooper_enabled": False,
"count": 1,
}
def get_default_mev_params(): def get_default_mev_params():
return { return {
"mev_relay_image": "flashbots/mev-boost-relay:latest", "mev_relay_image": "flashbots/mev-boost-relay:latest",
...@@ -194,45 +425,53 @@ def enrich_mev_extra_params(parsed_arguments_dict, mev_prefix, mev_port, mev_typ ...@@ -194,45 +425,53 @@ def enrich_mev_extra_params(parsed_arguments_dict, mev_prefix, mev_port, mev_typ
num_participants = len(parsed_arguments_dict["participants"]) num_participants = len(parsed_arguments_dict["participants"])
if mev_type == "full": if mev_type == "full":
mev_participant = { mev_participant = default_participant()
"el_client_type": "geth", mev_participant.update(
# TODO replace with actual when flashbots/builder is published {
"el_client_image": parsed_arguments_dict["mev_params"]["mev_builder_image"], # TODO replace with actual when flashbots/builder is published
"el_client_log_level": "", "el_client_image": parsed_arguments_dict["mev_params"][
"cl_client_type": "lighthouse", "mev_builder_image"
# THIS overrides the beacon image ],
"cl_client_image": "sigp/lighthouse", "cl_client_image": "sigp/lighthouse",
"cl_client_log_level": "", "beacon_extra_params": [
"beacon_extra_params": [ "--always-prepare-payload",
"--always-prepare-payload", "--prepare-payload-lookahead",
"--prepare-payload-lookahead", "12000",
"12000", ],
], # TODO(maybe) make parts of this more passable like the mev-relay-endpoint & forks
# TODO(maybe) make parts of this more passable like the mev-relay-endpoint & forks "el_extra_params": [
"el_extra_params": [ "--builder",
"--builder", "--builder.remote_relay_endpoint=http://mev-relay-api:9062",
"--builder.remote_relay_endpoint=http://mev-relay-api:9062", "--builder.beacon_endpoints=http://cl-{0}-lighthouse-geth:4000".format(
"--builder.beacon_endpoints=http://cl-{0}-lighthouse-geth:4000".format( num_participants + 1
num_participants + 1 ),
), "--builder.bellatrix_fork_version=0x30000038",
"--builder.bellatrix_fork_version=0x30000038", "--builder.genesis_fork_version=0x10000038",
"--builder.genesis_fork_version=0x10000038", "--builder.genesis_validators_root={0}".format(
"--builder.genesis_validators_root={0}".format( package_io_constants.GENESIS_VALIDATORS_ROOT_PLACEHOLDER
package_io_constants.GENESIS_VALIDATORS_ROOT_PLACEHOLDER ),
), '--miner.extradata="Illuminate Dmocratize Dstribute"',
'--miner.extradata="Illuminate Dmocratize Dstribute"', "--builder.algotype=greedy",
"--builder.algotype=greedy", ]
] + parsed_arguments_dict["mev_params"]["mev_builder_extra_args"],
+ parsed_arguments_dict["mev_params"]["mev_builder_extra_args"], "el_extra_env_vars": {
"el_extra_env_vars": { "BUILDER_TX_SIGNING_KEY": "0x"
"BUILDER_TX_SIGNING_KEY": "0x" + genesis_constants.PRE_FUNDED_ACCOUNTS[0].private_key
+ genesis_constants.PRE_FUNDED_ACCOUNTS[0].private_key },
}, "validator_count": 0,
"validator_extra_params": [], }
"builder_network_params": None, )
"validator_count": 0,
}
parsed_arguments_dict["participants"].append(mev_participant) parsed_arguments_dict["participants"].append(mev_participant)
return parsed_arguments_dict return parsed_arguments_dict
def deep_copy_participant(participant):
part = {}
for k, v in participant.items():
if type(v) == type([]):
part[k] = list(v)
else:
part[k] = v
return part
def new_participant(
el_client_type,
cl_client_type,
el_client_context,
cl_client_context,
snooper_engine_context,
):
return struct(
el_client_type=el_client_type,
cl_client_type=cl_client_type,
el_client_context=el_client_context,
cl_client_context=cl_client_context,
snooper_engine_context=snooper_engine_context,
)
cl_validator_keystores = import_module(
"github.com/kurtosis-tech/eth2-package/src/prelaunch_data_generator/cl_validator_keystores/cl_validator_keystore_generator.star"
)
el_genesis_data_generator = import_module(
"github.com/kurtosis-tech/eth2-package/src/prelaunch_data_generator/el_genesis/el_genesis_data_generator.star"
)
cl_genesis_data_generator = import_module(
"github.com/kurtosis-tech/eth2-package/src/prelaunch_data_generator/cl_genesis/cl_genesis_data_generator.star"
)
static_files = import_module(
"github.com/kurtosis-tech/eth2-package/static_files/static_files.star"
)
geth = import_module(
"github.com/kurtosis-tech/eth2-package/src/el/geth/geth_launcher.star"
)
besu = import_module(
"github.com/kurtosis-tech/eth2-package/src/el/besu/besu_launcher.star"
)
erigon = import_module(
"github.com/kurtosis-tech/eth2-package/src/el/erigon/erigon_launcher.star"
)
nethermind = import_module(
"github.com/kurtosis-tech/eth2-package/src/el/nethermind/nethermind_launcher.star"
)
reth = import_module(
"github.com/kurtosis-tech/eth2-package/src/el/reth/reth_launcher.star"
)
ethereumjs = import_module(
"github.com/kurtosis-tech/eth2-package/src/el/ethereumjs/ethereumjs_launcher.star"
)
lighthouse = import_module(
"github.com/kurtosis-tech/eth2-package/src/cl/lighthouse/lighthouse_launcher.star"
)
lodestar = import_module(
"github.com/kurtosis-tech/eth2-package/src/cl/lodestar/lodestar_launcher.star"
)
nimbus = import_module(
"github.com/kurtosis-tech/eth2-package/src/cl/nimbus/nimbus_launcher.star"
)
prysm = import_module(
"github.com/kurtosis-tech/eth2-package/src/cl/prysm/prysm_launcher.star"
)
teku = import_module(
"github.com/kurtosis-tech/eth2-package/src/cl/teku/teku_launcher.star"
)
snooper = import_module(
"github.com/kurtosis-tech/eth2-package/src/snooper/snooper_engine_launcher.star"
)
genesis_constants = import_module(
"github.com/kurtosis-tech/eth2-package/src/prelaunch_data_generator/genesis_constants/genesis_constants.star"
)
participant_module = import_module(
"github.com/kurtosis-tech/eth2-package/src/participant.star"
)
package_io = import_module(
"github.com/kurtosis-tech/eth2-package/src/package_io/constants.star"
)
BOOT_PARTICIPANT_INDEX = 0
# The time that the CL genesis generation step takes to complete, based off what we've seen
# This is in seconds
CL_GENESIS_DATA_GENERATION_TIME = 5
# Each CL node takes about this time to start up and start processing blocks, so when we create the CL
# genesis data we need to set the genesis timestamp in the future so that nodes don't miss important slots
# (e.g. Altair fork)
# TODO(old) Make this client-specific (currently this is Nimbus)
# This is in seconds
CL_NODE_STARTUP_TIME = 5
CL_CLIENT_CONTEXT_BOOTNODE = None
GLOBAL_INDEX_ZFILL = {
"zfill_values": [(1, 1), (2, 10), (3, 100), (4, 1000), (5, 10000)]
}
def launch_participant_network(
plan,
participants,
network_params,
global_log_level,
parallel_keystore_generation=False,
):
num_participants = len(participants)
plan.print("Generating cl validator key stores")
cl_validator_data = None
if not parallel_keystore_generation:
cl_validator_data = cl_validator_keystores.generate_cl_validator_keystores(
plan, network_params.preregistered_validator_keys_mnemonic, participants
)
else:
cl_validator_data = (
cl_validator_keystores.generate_cl_valdiator_keystores_in_parallel(
plan, network_params.preregistered_validator_keys_mnemonic, participants
)
)
plan.print(json.indent(json.encode(cl_validator_data)))
# We need to send the same genesis time to both the EL and the CL to ensure that timestamp based forking works as expected
final_genesis_timestamp = get_final_genesis_timestamp(
plan, CL_GENESIS_DATA_GENERATION_TIME + num_participants * CL_NODE_STARTUP_TIME
)
plan.print("Generating EL data")
el_genesis_generation_config_template = read_file(
static_files.EL_GENESIS_GENERATION_CONFIG_TEMPLATE_FILEPATH
)
el_genesis_data = el_genesis_data_generator.generate_el_genesis_data(
plan,
el_genesis_generation_config_template,
final_genesis_timestamp,
network_params.network_id,
network_params.deposit_contract_address,
network_params.genesis_delay,
network_params.seconds_per_slot,
network_params.capella_fork_epoch,
network_params.deneb_fork_epoch,
network_params.electra_fork_epoch,
)
plan.print(json.indent(json.encode(el_genesis_data)))
plan.print("Uploading GETH prefunded keys")
geth_prefunded_keys_artifact_name = plan.upload_files(
static_files.GETH_PREFUNDED_KEYS_DIRPATH, name="geth-prefunded-keys"
)
plan.print("Uploaded GETH files succesfully")
plan.print("Generating CL data")
genesis_generation_config_yml_template = read_file(
static_files.CL_GENESIS_GENERATION_CONFIG_TEMPLATE_FILEPATH
)
genesis_generation_mnemonics_yml_template = read_file(
static_files.CL_GENESIS_GENERATION_MNEMONICS_TEMPLATE_FILEPATH
)
total_number_of_validator_keys = 0
for participant in participants:
total_number_of_validator_keys += participant.validator_count
cl_genesis_data = cl_genesis_data_generator.generate_cl_genesis_data(
plan,
genesis_generation_config_yml_template,
genesis_generation_mnemonics_yml_template,
el_genesis_data,
final_genesis_timestamp,
network_params.network_id,
network_params.deposit_contract_address,
network_params.seconds_per_slot,
network_params.preregistered_validator_keys_mnemonic,
total_number_of_validator_keys,
network_params.genesis_delay,
network_params.capella_fork_epoch,
network_params.deneb_fork_epoch,
network_params.electra_fork_epoch,
)
plan.print(json.indent(json.encode(cl_genesis_data)))
plan.print("Generated CL genesis data succesfully, launching EL & CL Participants")
genesis_validators_root = cl_genesis_data.genesis_validators_root
el_launchers = {
package_io.EL_CLIENT_TYPE.geth: {
"launcher": geth.new_geth_launcher(
network_params.network_id,
el_genesis_data,
geth_prefunded_keys_artifact_name,
genesis_constants.PRE_FUNDED_ACCOUNTS,
genesis_validators_root,
network_params.electra_fork_epoch,
),
"launch_method": geth.launch,
},
package_io.EL_CLIENT_TYPE.besu: {
"launcher": besu.new_besu_launcher(
network_params.network_id, el_genesis_data
),
"launch_method": besu.launch,
},
package_io.EL_CLIENT_TYPE.erigon: {
"launcher": erigon.new_erigon_launcher(
network_params.network_id, el_genesis_data
),
"launch_method": erigon.launch,
},
package_io.EL_CLIENT_TYPE.nethermind: {
"launcher": nethermind.new_nethermind_launcher(el_genesis_data),
"launch_method": nethermind.launch,
},
package_io.EL_CLIENT_TYPE.reth: {
"launcher": reth.new_reth_launcher(el_genesis_data),
"launch_method": reth.launch,
},
package_io.EL_CLIENT_TYPE.ethereumjs: {
"launcher": ethereumjs.new_ethereumjs_launcher(el_genesis_data),
"launch_method": ethereumjs.launch,
},
}
all_el_client_contexts = []
for index, participant in enumerate(participants):
cl_client_type = participant.cl_client_type
el_client_type = participant.el_client_type
if el_client_type not in el_launchers:
fail(
"Unsupported launcher '{0}', need one of '{1}'".format(
el_client_type, ",".join([el.name for el in el_launchers.keys()])
)
)
el_launcher, launch_method = (
el_launchers[el_client_type]["launcher"],
el_launchers[el_client_type]["launch_method"],
)
# Zero-pad the index using the calculated zfill value
index_str = zfill_custom(index + 1, zfill_calculator(participants))
el_service_name = "el-{0}-{1}-{2}".format(
index_str, el_client_type, cl_client_type
)
el_client_context = launch_method(
plan,
el_launcher,
el_service_name,
participant.el_client_image,
participant.el_client_log_level,
global_log_level,
all_el_client_contexts,
participant.el_min_cpu,
participant.el_max_cpu,
participant.el_min_mem,
participant.el_max_mem,
participant.el_extra_params,
participant.el_extra_env_vars,
)
all_el_client_contexts.append(el_client_context)
plan.print("Succesfully added {0} EL participants".format(num_participants))
plan.print("Launching CL network")
cl_launchers = {
package_io.CL_CLIENT_TYPE.lighthouse: {
"launcher": lighthouse.new_lighthouse_launcher(cl_genesis_data),
"launch_method": lighthouse.launch,
},
package_io.CL_CLIENT_TYPE.lodestar: {
"launcher": lodestar.new_lodestar_launcher(cl_genesis_data),
"launch_method": lodestar.launch,
},
package_io.CL_CLIENT_TYPE.nimbus: {
"launcher": nimbus.new_nimbus_launcher(cl_genesis_data),
"launch_method": nimbus.launch,
},
package_io.CL_CLIENT_TYPE.prysm: {
"launcher": prysm.new_prysm_launcher(
cl_genesis_data,
cl_validator_data.prysm_password_relative_filepath,
cl_validator_data.prysm_password_artifact_uuid,
),
"launch_method": prysm.launch,
},
package_io.CL_CLIENT_TYPE.teku: {
"launcher": teku.new_teku_launcher(cl_genesis_data),
"launch_method": teku.launch,
},
}
all_snooper_engine_contexts = []
all_cl_client_contexts = []
preregistered_validator_keys_for_nodes = cl_validator_data.per_node_keystores
for index, participant in enumerate(participants):
cl_client_type = participant.cl_client_type
el_client_type = participant.el_client_type
if cl_client_type not in cl_launchers:
fail(
"Unsupported launcher '{0}', need one of '{1}'".format(
cl_client_type, ",".join([cl.name for cl in cl_launchers.keys()])
)
)
cl_launcher, launch_method = (
cl_launchers[cl_client_type]["launcher"],
cl_launchers[cl_client_type]["launch_method"],
)
index_str = zfill_custom(index + 1, zfill_calculator(participants))
cl_service_name = "cl-{0}-{1}-{2}".format(
index_str, cl_client_type, el_client_type
)
new_cl_node_validator_keystores = None
if participant.validator_count != 0:
new_cl_node_validator_keystores = preregistered_validator_keys_for_nodes[
index
]
el_client_context = all_el_client_contexts[index]
cl_client_context = None
snooper_engine_context = None
if participant.snooper_enabled:
snooper_service_name = "snooper-{0}-{1}-{2}".format(
index_str, cl_client_type, el_client_type
)
snooper_image = package_io.DEFAULT_SNOOPER_IMAGE
snooper_engine_context = snooper.launch(
plan,
snooper_service_name,
snooper_image,
el_client_context,
)
plan.print(
"Succesfully added {0} snooper participants".format(
snooper_engine_context
)
)
all_snooper_engine_contexts.append(snooper_engine_context)
if index == 0:
cl_client_context = launch_method(
plan,
cl_launcher,
cl_service_name,
participant.cl_client_image,
participant.cl_client_log_level,
global_log_level,
CL_CLIENT_CONTEXT_BOOTNODE,
el_client_context,
new_cl_node_validator_keystores,
participant.bn_min_cpu,
participant.bn_max_cpu,
participant.bn_min_mem,
participant.bn_max_mem,
participant.v_min_cpu,
participant.v_max_cpu,
participant.v_min_mem,
participant.v_max_mem,
participant.snooper_enabled,
snooper_engine_context,
participant.beacon_extra_params,
participant.validator_extra_params,
)
else:
boot_cl_client_ctx = all_cl_client_contexts
cl_client_context = launch_method(
plan,
cl_launcher,
cl_service_name,
participant.cl_client_image,
participant.cl_client_log_level,
global_log_level,
boot_cl_client_ctx,
el_client_context,
new_cl_node_validator_keystores,
participant.bn_min_cpu,
participant.bn_max_cpu,
participant.bn_min_mem,
participant.bn_max_mem,
participant.v_min_cpu,
participant.v_max_cpu,
participant.v_min_mem,
participant.v_max_mem,
participant.snooper_enabled,
snooper_engine_context,
participant.beacon_extra_params,
participant.validator_extra_params,
)
all_cl_client_contexts.append(cl_client_context)
plan.print("Succesfully added {0} CL participants".format(num_participants))
all_participants = []
for index, participant in enumerate(participants):
el_client_type = participant.el_client_type
cl_client_type = participant.cl_client_type
el_client_context = all_el_client_contexts[index]
cl_client_context = all_cl_client_contexts[index]
if participant.snooper_enabled:
snooper_engine_context = all_snooper_engine_contexts[index]
participant_entry = participant_module.new_participant(
el_client_type,
cl_client_type,
el_client_context,
cl_client_context,
snooper_engine_context,
)
all_participants.append(participant_entry)
return all_participants, final_genesis_timestamp, genesis_validators_root
def zfill_calculator(participants):
for zf, par in GLOBAL_INDEX_ZFILL["zfill_values"]:
if len(participants) < par:
zfill = zf - 1
return zfill
break
def zfill_custom(value, width):
return ("0" * (width - len(str(value)))) + str(value)
# this is a python procedure so that Kurtosis can do idempotent runs
# time.now() runs everytime bringing non determinism
# note that the timestamp it returns is a string
def get_final_genesis_timestamp(plan, padding):
result = plan.run_python(
run="""
import time
import sys
padding = int(sys.argv[1])
print(int(time.time()+padding), end="")
""",
args=[str(padding)],
)
return result.output
def new_cl_genesis_data(
files_artifact_uuid,
jwt_secret_rel_filepath,
config_yml_rel_filepath,
genesis_ssz_rel_filepath,
genesis_validators_root="",
):
return struct(
files_artifact_uuid=files_artifact_uuid,
jwt_secret_rel_filepath=jwt_secret_rel_filepath,
config_yml_rel_filepath=config_yml_rel_filepath,
genesis_ssz_rel_filepath=genesis_ssz_rel_filepath,
genesis_validators_root=genesis_validators_root,
)
shared_utils = import_module(
"github.com/kurtosis-tech/eth2-package/src/shared_utils/shared_utils.star"
)
cl_genesis_data = import_module(
"github.com/kurtosis-tech/eth2-package/src/prelaunch_data_generator/cl_genesis/cl_genesis_data.star"
)
prelaunch_data_generator_launcher = import_module(
"github.com/kurtosis-tech/eth2-package/src/prelaunch_data_generator/prelaunch_data_generator_launcher/prelaunch_data_generator_launcher.star"
)
# Needed to copy the JWT secret and the EL genesis.json file
EL_GENESIS_DIRPATH_ON_GENERATOR = "/el-genesis"
CONFIG_DIRPATH_ON_GENERATOR = "/config"
GENESIS_CONFIG_YML_FILENAME = "config.yaml" # WARNING: Do not change this! It will get copied to the CL genesis data, and the CL clients are hardcoded to look for this filename
MNEMONICS_YML_FILENAME = "mnemonics.yaml"
OUTPUT_DIRPATH_ON_GENERATOR = "/output"
TRANCHES_DIRANME = "tranches"
GENESIS_STATE_FILENAME = "genesis.ssz"
DEPLOY_BLOCK_FILENAME = "deploy_block.txt"
DEPOSIT_CONTRACT_BLOCK_HASH_FILENAME = "deposit_contract_block_hash.txt"
DEPOSIT_CONTRACT_FILENAME = "deposit_contract.txt"
PARSED_BEACON_STATE_FILENAME = "parsedBeaconState.json"
TRUSTED_SETUP_FILENAME = "trusted_setup.txt"
# Generation constants
CL_GENESIS_GENERATION_BINARY_FILEPATH_ON_CONTAINER = (
"/usr/local/bin/eth2-testnet-genesis"
)
CL_PARSED_BEACON_STATE_GENERATOR_BINARY = "/usr/local/bin/zcli"
DEPLOY_BLOCK = "0"
ETH1_BLOCK = "0x0000000000000000000000000000000000000000000000000000000000000000"
def generate_cl_genesis_data(
plan,
genesis_generation_config_yml_template,
genesis_generation_mnemonics_yml_template,
el_genesis_data,
genesis_unix_timestamp,
network_id,
deposit_contract_address,
seconds_per_slot,
preregistered_validator_keys_mnemonic,
total_num_validator_keys_to_preregister,
genesis_delay,
capella_fork_epoch,
deneb_fork_epoch,
electra_fork_epoch,
):
template_data = new_cl_genesis_config_template_data(
network_id,
seconds_per_slot,
genesis_unix_timestamp,
total_num_validator_keys_to_preregister,
preregistered_validator_keys_mnemonic,
deposit_contract_address,
genesis_delay,
capella_fork_epoch,
deneb_fork_epoch,
electra_fork_epoch,
)
genesis_generation_mnemonics_template_and_data = shared_utils.new_template_and_data(
genesis_generation_mnemonics_yml_template, template_data
)
genesis_generation_config_template_and_data = shared_utils.new_template_and_data(
genesis_generation_config_yml_template, template_data
)
template_and_data_by_rel_dest_filepath = {}
template_and_data_by_rel_dest_filepath[
MNEMONICS_YML_FILENAME
] = genesis_generation_mnemonics_template_and_data
template_and_data_by_rel_dest_filepath[
GENESIS_CONFIG_YML_FILENAME
] = genesis_generation_config_template_and_data
genesis_generation_config_artifact_name = plan.render_templates(
template_and_data_by_rel_dest_filepath, "genesis-generation-config-cl"
)
# TODO(old) Make this the actual data generator - comment copied from the original module
launcher_service_name = (
prelaunch_data_generator_launcher.launch_prelaunch_data_generator(
plan,
{
CONFIG_DIRPATH_ON_GENERATOR: genesis_generation_config_artifact_name,
EL_GENESIS_DIRPATH_ON_GENERATOR: el_genesis_data.files_artifact_uuid,
},
"cl-genesis-data",
capella_fork_epoch,
electra_fork_epoch,
)
)
all_dirpaths_to_create_on_generator = [
CONFIG_DIRPATH_ON_GENERATOR,
OUTPUT_DIRPATH_ON_GENERATOR,
]
all_dirpath_creation_commands = []
for dirpath_to_create_on_generator in all_dirpaths_to_create_on_generator:
all_dirpath_creation_commands.append(
"mkdir -p {0}".format(dirpath_to_create_on_generator)
)
dir_creation_cmd = [
"bash",
"-c",
(" && ").join(all_dirpath_creation_commands),
]
dir_creation_cmd_result = plan.exec(
recipe=ExecRecipe(command=dir_creation_cmd), service_name=launcher_service_name
)
# Copy files to output
all_filepaths_to_copy_to_ouptut_directory = [
shared_utils.path_join(
CONFIG_DIRPATH_ON_GENERATOR, GENESIS_CONFIG_YML_FILENAME
),
shared_utils.path_join(CONFIG_DIRPATH_ON_GENERATOR, MNEMONICS_YML_FILENAME),
shared_utils.path_join(
EL_GENESIS_DIRPATH_ON_GENERATOR,
el_genesis_data.jwt_secret_relative_filepath,
),
]
for filepath_on_generator in all_filepaths_to_copy_to_ouptut_directory:
cmd = [
"cp",
filepath_on_generator,
OUTPUT_DIRPATH_ON_GENERATOR,
]
cmd_result = plan.exec(
recipe=ExecRecipe(command=cmd), service_name=launcher_service_name
)
# Generate files that need dynamic content
content_to_write_to_output_filename = {
DEPLOY_BLOCK: DEPLOY_BLOCK_FILENAME,
deposit_contract_address: DEPOSIT_CONTRACT_FILENAME,
}
for content, destFilename in content_to_write_to_output_filename.items():
destFilepath = shared_utils.path_join(OUTPUT_DIRPATH_ON_GENERATOR, destFilename)
cmd = [
"sh",
"-c",
"echo {0} > {1}".format(
content,
destFilepath,
),
]
cmd_result = plan.exec(
recipe=ExecRecipe(command=cmd), service_name=launcher_service_name
)
cl_genesis_generation_cmd = [
CL_GENESIS_GENERATION_BINARY_FILEPATH_ON_CONTAINER,
"merge" if capella_fork_epoch > 0 else "capella",
"--config",
shared_utils.path_join(
OUTPUT_DIRPATH_ON_GENERATOR, GENESIS_CONFIG_YML_FILENAME
),
"--mnemonics",
shared_utils.path_join(OUTPUT_DIRPATH_ON_GENERATOR, MNEMONICS_YML_FILENAME),
"--eth1-config",
shared_utils.path_join(
EL_GENESIS_DIRPATH_ON_GENERATOR,
el_genesis_data.geth_genesis_json_relative_filepath,
),
"--tranches-dir",
shared_utils.path_join(OUTPUT_DIRPATH_ON_GENERATOR, TRANCHES_DIRANME),
"--state-output",
shared_utils.path_join(OUTPUT_DIRPATH_ON_GENERATOR, GENESIS_STATE_FILENAME),
]
plan.exec(
recipe=ExecRecipe(command=cl_genesis_generation_cmd),
service_name=launcher_service_name,
)
parsed_beacon_state_file_generation = [
CL_PARSED_BEACON_STATE_GENERATOR_BINARY,
"pretty",
"bellatrix" if capella_fork_epoch > 0 else "capella",
"BeaconState",
shared_utils.path_join(OUTPUT_DIRPATH_ON_GENERATOR, GENESIS_STATE_FILENAME),
">",
shared_utils.path_join(
OUTPUT_DIRPATH_ON_GENERATOR, PARSED_BEACON_STATE_FILENAME
),
]
parsed_beacon_state_file_generation_str = " ".join(
parsed_beacon_state_file_generation
)
plan.exec(
recipe=ExecRecipe(
command=["/bin/sh", "-c", parsed_beacon_state_file_generation_str]
),
service_name=launcher_service_name,
)
# Generate the deposit contract block hash file
deposit_block_hash_generation_cmd = [
"jq",
"-r",
"'.eth1_data.block_hash'",
shared_utils.path_join(
OUTPUT_DIRPATH_ON_GENERATOR, PARSED_BEACON_STATE_FILENAME
),
">",
shared_utils.path_join(
OUTPUT_DIRPATH_ON_GENERATOR, DEPOSIT_CONTRACT_BLOCK_HASH_FILENAME
),
]
deposit_block_hash_file_generation_str = " ".join(deposit_block_hash_generation_cmd)
plan.exec(
recipe=ExecRecipe(
command=["/bin/sh", "-c", deposit_block_hash_file_generation_str]
),
service_name=launcher_service_name,
)
genesis_validators_root = get_genesis_validators_root(
plan,
launcher_service_name,
shared_utils.path_join(
OUTPUT_DIRPATH_ON_GENERATOR, PARSED_BEACON_STATE_FILENAME
),
)
shared_utils.download_trusted_setup(
plan,
launcher_service_name,
shared_utils.path_join(OUTPUT_DIRPATH_ON_GENERATOR, TRUSTED_SETUP_FILENAME),
)
cl_genesis_data_artifact_name = plan.store_service_files(
launcher_service_name, OUTPUT_DIRPATH_ON_GENERATOR, name="cl-genesis-data"
)
jwt_secret_rel_filepath = shared_utils.path_join(
shared_utils.path_base(OUTPUT_DIRPATH_ON_GENERATOR),
shared_utils.path_base(el_genesis_data.jwt_secret_relative_filepath),
)
genesis_config_rel_filepath = shared_utils.path_join(
shared_utils.path_base(OUTPUT_DIRPATH_ON_GENERATOR),
GENESIS_CONFIG_YML_FILENAME,
)
genesis_ssz_rel_filepath = shared_utils.path_join(
shared_utils.path_base(OUTPUT_DIRPATH_ON_GENERATOR),
GENESIS_STATE_FILENAME,
)
result = cl_genesis_data.new_cl_genesis_data(
cl_genesis_data_artifact_name,
jwt_secret_rel_filepath,
genesis_config_rel_filepath,
genesis_ssz_rel_filepath,
genesis_validators_root,
)
# TODO(gyani) remove the container when the job is done - this is a resource leaker
return result
def new_cl_genesis_config_template_data(
network_id,
seconds_per_slot,
unix_timestamp,
num_validator_keys_to_preregister,
preregistered_validator_keys_mnemonic,
deposit_contract_address,
genesis_delay,
capella_fork_epoch,
deneb_fork_epoch,
electra_fork_epoch,
):
return {
"NetworkId": network_id,
"SecondsPerSlot": seconds_per_slot,
"UnixTimestamp": unix_timestamp,
"NumValidatorKeysToPreregister": num_validator_keys_to_preregister,
"PreregisteredValidatorKeysMnemonic": preregistered_validator_keys_mnemonic,
"DepositContractAddress": deposit_contract_address,
"GenesisDelay": genesis_delay,
"CapellaForkEpoch": capella_fork_epoch,
"DenebForkEpoch": deneb_fork_epoch,
"ElectraForkEpoch": electra_fork_epoch,
}
def get_genesis_validators_root(plan, service_name, beacon_state_file_path):
response = plan.exec(
service_name=service_name,
recipe=ExecRecipe(
command=[
"/bin/sh",
"-c",
"cat {0} | grep genesis_validators_root | grep -oE '0x[0-9a-fA-F]+' | tr -d '\n'".format(
beacon_state_file_path
),
],
),
)
return response["output"]
prelaunch_data_generator_launcher = import_module(
"github.com/kurtosis-tech/eth2-package/src/prelaunch_data_generator/prelaunch_data_generator_launcher/prelaunch_data_generator_launcher.star"
)
shared_utils = import_module(
"github.com/kurtosis-tech/eth2-package/src/shared_utils/shared_utils.star"
)
keystore_files_module = import_module(
"github.com/kurtosis-tech/eth2-package/src/prelaunch_data_generator/cl_validator_keystores/keystore_files.star"
)
keystores_result = import_module(
"github.com/kurtosis-tech/eth2-package/src/prelaunch_data_generator/cl_validator_keystores/generate_keystores_result.star"
)
NODE_KEYSTORES_OUTPUT_DIRPATH_FORMAT_STR = "/node-{0}-keystores"
# Prysm keystores are encrypted with a password
PRYSM_PASSWORD = "password"
PRYSM_PASSWORD_FILEPATH_ON_GENERATOR = "/tmp/prysm-password.txt"
KEYSTORES_GENERATION_TOOL_NAME = "eth2-val-tools"
SUCCESSFUL_EXEC_CMD_EXIT_CODE = 0
RAW_KEYS_DIRNAME = "keys"
RAW_SECRETS_DIRNAME = "secrets"
NIMBUS_KEYS_DIRNAME = "nimbus-keys"
PRYSM_DIRNAME = "prysm"
TEKU_KEYS_DIRNAME = "teku-keys"
TEKU_SECRETS_DIRNAME = "teku-secrets"
KEYSTORE_GENERATION_FINISHED_FILEPATH_FORMAT = "/tmp/keystores_generated-{0}-{1}"
# Generates keystores for the given number of nodes from the given mnemonic, where each keystore contains approximately
#
# num_keys / num_nodes keys
def generate_cl_validator_keystores(plan, mnemonic, participants):
service_name = prelaunch_data_generator_launcher.launch_prelaunch_data_generator(
plan,
{},
"cl-validator-keystore",
capella_fork_epoch=0, # It doesn't matter how the validator keys are generated
electra_fork_epoch=None, # It doesn't matter how the validator keys are generated
)
all_output_dirpaths = []
all_sub_command_strs = []
running_total_validator_count = 0
for idx, participant in enumerate(participants):
output_dirpath = NODE_KEYSTORES_OUTPUT_DIRPATH_FORMAT_STR.format(idx)
if participant.validator_count == 0:
all_output_dirpaths.append(output_dirpath)
continue
start_index = running_total_validator_count
running_total_validator_count += participant.validator_count
stop_index = start_index + participant.validator_count
generate_keystores_cmd = '{0} keystores --insecure --prysm-pass {1} --out-loc {2} --source-mnemonic "{3}" --source-min {4} --source-max {5}'.format(
KEYSTORES_GENERATION_TOOL_NAME,
PRYSM_PASSWORD,
output_dirpath,
mnemonic,
start_index,
stop_index,
)
all_sub_command_strs.append(generate_keystores_cmd)
all_output_dirpaths.append(output_dirpath)
command_str = " && ".join(all_sub_command_strs)
command_result = plan.exec(
recipe=ExecRecipe(command=["sh", "-c", command_str]), service_name=service_name
)
plan.verify(command_result["code"], "==", SUCCESSFUL_EXEC_CMD_EXIT_CODE)
# Store outputs into files artifacts
keystore_files = []
running_total_validator_count = 0
for idx, participant in enumerate(participants):
output_dirpath = all_output_dirpaths[idx]
if participant.validator_count == 0:
keystore_files.append(None)
continue
padded_idx = zfill_custom(idx + 1, len(str(len(participants))))
keystore_start_index = running_total_validator_count
running_total_validator_count += participant.validator_count
keystore_stop_index = (keystore_start_index + participant.validator_count) - 1
artifact_name = "{0}-{1}-{2}-{3}-{4}".format(
padded_idx,
participant.cl_client_type,
participant.el_client_type,
keystore_start_index,
keystore_stop_index,
)
artifact_name = plan.store_service_files(
service_name, output_dirpath, name=artifact_name
)
# This is necessary because the way Kurtosis currently implements artifact-storing is
base_dirname_in_artifact = shared_utils.path_base(output_dirpath)
to_add = keystore_files_module.new_keystore_files(
artifact_name,
shared_utils.path_join(base_dirname_in_artifact, RAW_KEYS_DIRNAME),
shared_utils.path_join(base_dirname_in_artifact, RAW_SECRETS_DIRNAME),
shared_utils.path_join(base_dirname_in_artifact, NIMBUS_KEYS_DIRNAME),
shared_utils.path_join(base_dirname_in_artifact, PRYSM_DIRNAME),
shared_utils.path_join(base_dirname_in_artifact, TEKU_KEYS_DIRNAME),
shared_utils.path_join(base_dirname_in_artifact, TEKU_SECRETS_DIRNAME),
)
keystore_files.append(to_add)
write_prysm_password_file_cmd = [
"sh",
"-c",
"echo '{0}' > {1}".format(
PRYSM_PASSWORD,
PRYSM_PASSWORD_FILEPATH_ON_GENERATOR,
),
]
write_prysm_password_file_cmd_result = plan.exec(
recipe=ExecRecipe(command=write_prysm_password_file_cmd),
service_name=service_name,
)
plan.verify(
write_prysm_password_file_cmd_result["code"],
"==",
SUCCESSFUL_EXEC_CMD_EXIT_CODE,
)
prysm_password_artifact_name = plan.store_service_files(
service_name, PRYSM_PASSWORD_FILEPATH_ON_GENERATOR, name="prysm-password"
)
result = keystores_result.new_generate_keystores_result(
prysm_password_artifact_name,
shared_utils.path_base(PRYSM_PASSWORD_FILEPATH_ON_GENERATOR),
keystore_files,
)
# TODO replace this with a task so that we can get the container removed
# we are removing a call to remove_service for idempotency
return result
# this is like above but runs things in parallel - for large networks that run on k8s or gigantic dockers
def generate_cl_valdiator_keystores_in_parallel(plan, mnemonic, participants):
service_names = prelaunch_data_generator_launcher.launch_prelaunch_data_generator_parallel(
plan,
{},
["cl-validator-keystore-" + str(idx) for idx in range(0, len(participants))],
capella_fork_epoch=0, # It doesn't matter how the validator keys are generated
electra_fork_epoch=None,
) # It doesn't matter how the validator keys are generated
all_output_dirpaths = []
all_generation_commands = []
finished_files_to_verify = []
running_total_validator_count = 0
for idx, participant in enumerate(participants):
output_dirpath = NODE_KEYSTORES_OUTPUT_DIRPATH_FORMAT_STR.format(idx)
if participant.validator_count == 0:
all_output_dirpaths.append(output_dirpath)
continue
start_index = idx * participant.validator_count
stop_index = (idx + 1) * participant.validator_count
generation_finished_filepath = (
KEYSTORE_GENERATION_FINISHED_FILEPATH_FORMAT.format(start_index, stop_index)
)
finished_files_to_verify.append(generation_finished_filepath)
generate_keystores_cmd = 'nohup {0} keystores --insecure --prysm-pass {1} --out-loc {2} --source-mnemonic "{3}" --source-min {4} --source-max {5} && touch {6}'.format(
KEYSTORES_GENERATION_TOOL_NAME,
PRYSM_PASSWORD,
output_dirpath,
mnemonic,
start_index,
stop_index,
generation_finished_filepath,
)
all_generation_commands.append(generate_keystores_cmd)
all_output_dirpaths.append(output_dirpath)
# spin up all jobs
for idx in range(0, len(participants)):
service_name = service_names[idx]
generation_command = all_generation_commands[idx]
plan.exec(
recipe=ExecRecipe(
command=["sh", "-c", generation_command + " >/dev/null 2>&1 &"]
),
service_name=service_name,
)
# verify that files got created
for idx in range(0, len(participants)):
service_name = service_names[idx]
output_dirpath = all_output_dirpaths[idx]
generation_finished_filepath = finished_files_to_verify[idx]
verificaiton_command = ["ls", generation_finished_filepath]
plan.wait(
recipe=ExecRecipe(command=verificaiton_command),
service_name=service_name,
field="code",
assertion="==",
target_value=0,
timeout="5m",
interval="0.5s",
)
# Store outputs into files artifacts
keystore_files = []
running_total_validator_count = 0
for idx, participant in enumerate(participants):
if participant.validator_count == 0:
keystore_files.append(None)
continue
service_name = service_names[idx]
output_dirpath = all_output_dirpaths[idx]
running_total_validator_count += participant.validator_count
padded_idx = zfill_custom(idx + 1, len(str(len(participants))))
keystore_start_index = running_total_validator_count
running_total_validator_count += participant.validator_count
keystore_stop_index = (keystore_start_index + participant.validator_count) - 1
artifact_name = "{0}-{1}-{2}-{3}-{4}".format(
padded_idx,
participant.cl_client_type,
participant.el_client_type,
keystore_start_index,
keystore_stop_index,
)
artifact_name = plan.store_service_files(
service_name, output_dirpath, name=artifact_name
)
# This is necessary because the way Kurtosis currently implements artifact-storing is
base_dirname_in_artifact = shared_utils.path_base(output_dirpath)
to_add = keystore_files_module.new_keystore_files(
artifact_name,
shared_utils.path_join(base_dirname_in_artifact, RAW_KEYS_DIRNAME),
shared_utils.path_join(base_dirname_in_artifact, RAW_SECRETS_DIRNAME),
shared_utils.path_join(base_dirname_in_artifact, NIMBUS_KEYS_DIRNAME),
shared_utils.path_join(base_dirname_in_artifact, PRYSM_DIRNAME),
shared_utils.path_join(base_dirname_in_artifact, TEKU_KEYS_DIRNAME),
shared_utils.path_join(base_dirname_in_artifact, TEKU_SECRETS_DIRNAME),
)
keystore_files.append(to_add)
write_prysm_password_file_cmd = [
"sh",
"-c",
"echo '{0}' > {1}".format(
PRYSM_PASSWORD,
PRYSM_PASSWORD_FILEPATH_ON_GENERATOR,
),
]
write_prysm_password_file_cmd_result = plan.exec(
recipe=ExecRecipe(command=write_prysm_password_file_cmd),
service_name=service_names[0],
)
plan.verify(
write_prysm_password_file_cmd_result["code"],
"==",
SUCCESSFUL_EXEC_CMD_EXIT_CODE,
)
prysm_password_artifact_name = plan.store_service_files(
service_names[0], PRYSM_PASSWORD_FILEPATH_ON_GENERATOR, name="prysm-password"
)
result = keystores_result.new_generate_keystores_result(
prysm_password_artifact_name,
shared_utils.path_base(PRYSM_PASSWORD_FILEPATH_ON_GENERATOR),
keystore_files,
)
# we don't cleanup the containers as its a costly operation
return result
def zfill_custom(value, width):
return ("0" * (width - len(str(value)))) + str(value)
# Package object containing information about the keystores that were generated for validators
# during genesis creation
def new_generate_keystores_result(
prysm_password_artifact_uuid, prysm_password_relative_filepath, per_node_keystores
):
return struct(
# Files artifact UUID where the Prysm password is stored
prysm_password_artifact_uuid=prysm_password_artifact_uuid,
# Relative to root of files artifact
prysm_password_relative_filepath=prysm_password_relative_filepath,
# Contains keystores-per-client-type for each node in the network
per_node_keystores=per_node_keystores,
)
# One of these will be created per node we're trying to start
def new_keystore_files(
files_artifact_uuid,
raw_keys_relative_dirpath,
raw_secrets_relative_dirpath,
nimbus_keys_relative_dirpath,
prysm_relative_dirpath,
teku_keys_relative_dirpath,
teku_secrets_relative_dirpath,
):
return struct(
files_artifact_uuid=files_artifact_uuid,
# ------------ All directories below are relative to the root of the files artifact ----------------
raw_keys_relative_dirpath=raw_keys_relative_dirpath,
raw_secrets_relative_dirpath=raw_secrets_relative_dirpath,
nimbus_keys_relative_dirpath=nimbus_keys_relative_dirpath,
prysm_relative_dirpath=prysm_relative_dirpath,
teku_keys_relative_dirpath=teku_keys_relative_dirpath,
teku_secrets_relative_dirpath=teku_secrets_relative_dirpath,
)
def new_el_genesis_data(
files_artifact_uuid,
jwt_secret_relative_filepath,
geth_genesis_json_relative_filepath,
erigon_genesis_json_relative_filepath,
nethermind_genesis_json_relative_filepath,
besu_genesis_json_relative_filepath,
):
return struct(
files_artifact_uuid=files_artifact_uuid,
jwt_secret_relative_filepath=jwt_secret_relative_filepath,
geth_genesis_json_relative_filepath=geth_genesis_json_relative_filepath,
erigon_genesis_json_relative_filepath=erigon_genesis_json_relative_filepath,
nethermind_genesis_json_relative_filepath=nethermind_genesis_json_relative_filepath,
besu_genesis_json_relative_filepath=besu_genesis_json_relative_filepath,
)
shared_utils = import_module(
"github.com/kurtosis-tech/eth2-package/src/shared_utils/shared_utils.star"
)
el_genesis = import_module(
"github.com/kurtosis-tech/eth2-package/src/prelaunch_data_generator/el_genesis/el_genesis_data.star"
)
prelaunch_data_generator_launcher = import_module(
"github.com/kurtosis-tech/eth2-package/src/prelaunch_data_generator/prelaunch_data_generator_launcher/prelaunch_data_generator_launcher.star"
)
CONFIG_DIRPATH_ON_GENERATOR = "/config"
GENESIS_CONFIG_FILENAME = "genesis-config.yaml"
OUTPUT_DIRPATH_ON_GENERATOR = "/output"
GETH_GENESIS_FILENAME = "genesis.json"
ERIGON_GENESIS_FILENAME = "erigon.json"
NETHERMIND_GENESIS_FILENAME = "nethermind.json"
BESU_GENESIS_FILENAME = "besu.json"
TRUSTED_SETUP_FILENAME = "trusted_setup.txt"
JWT_SECRET_FILENAME = "jwtsecret"
SUCCESSFUL_EXEC_CMD_EXIT_CODE = 0
# Mapping of output genesis filename -> generator to create the file
all_genesis_generation_cmds = {
GETH_GENESIS_FILENAME: lambda genesis_config_filepath_on_generator: [
"python3",
"/apps/el-gen/genesis_geth.py",
genesis_config_filepath_on_generator,
],
ERIGON_GENESIS_FILENAME: lambda genesis_config_filepath_on_generator: [
"python3",
"/apps/el-gen/genesis_geth.py",
genesis_config_filepath_on_generator,
],
NETHERMIND_GENESIS_FILENAME: lambda genesis_config_filepath_on_generator: [
"python3",
"/apps/el-gen/genesis_chainspec.py",
genesis_config_filepath_on_generator,
],
BESU_GENESIS_FILENAME: lambda genesis_config_filepath_on_generator: [
"python3",
"/apps/el-gen/genesis_besu.py",
genesis_config_filepath_on_generator,
],
}
def generate_el_genesis_data(
plan,
genesis_generation_config_template,
genesis_unix_timestamp,
network_id,
deposit_contract_address,
genesis_delay,
seconds_per_slot,
capella_fork_epoch,
deneb_fork_epoch,
electra_fork_epoch,
):
template_data = genesis_generation_config_template_data(
network_id,
deposit_contract_address,
genesis_unix_timestamp,
genesis_delay,
seconds_per_slot,
capella_fork_epoch,
deneb_fork_epoch,
electra_fork_epoch,
)
genesis_config_file_template_and_data = shared_utils.new_template_and_data(
genesis_generation_config_template, template_data
)
template_and_data_by_rel_dest_filepath = {}
template_and_data_by_rel_dest_filepath[
GENESIS_CONFIG_FILENAME
] = genesis_config_file_template_and_data
genesis_generation_config_artifact_name = plan.render_templates(
template_and_data_by_rel_dest_filepath, name="genesis-generation-config-el"
)
# TODO(old) Make this the actual data generator - comment copied from the original module
launcher_service_name = (
prelaunch_data_generator_launcher.launch_prelaunch_data_generator(
plan,
{
CONFIG_DIRPATH_ON_GENERATOR: genesis_generation_config_artifact_name,
},
"el-genesis-data",
capella_fork_epoch,
electra_fork_epoch,
)
)
all_dirpaths_to_create_on_generator = [
CONFIG_DIRPATH_ON_GENERATOR,
OUTPUT_DIRPATH_ON_GENERATOR,
]
all_dirpath_creation_commands = []
for dirpath_to_create_on_generator in all_dirpaths_to_create_on_generator:
all_dirpath_creation_commands.append(
"mkdir -p {0}".format(dirpath_to_create_on_generator),
)
dir_creation_cmd = [
"bash",
"-c",
" && ".join(all_dirpath_creation_commands),
]
dir_creation_cmd_result = plan.exec(
recipe=ExecRecipe(command=dir_creation_cmd), service_name=launcher_service_name
)
plan.verify(dir_creation_cmd_result["code"], "==", SUCCESSFUL_EXEC_CMD_EXIT_CODE)
genesis_config_filepath_on_generator = shared_utils.path_join(
CONFIG_DIRPATH_ON_GENERATOR, GENESIS_CONFIG_FILENAME
)
genesis_filename_to_relative_filepath_in_artifact = {}
for output_filename, generation_cmd in all_genesis_generation_cmds.items():
cmd = generation_cmd(genesis_config_filepath_on_generator)
output_filepath_on_generator = shared_utils.path_join(
OUTPUT_DIRPATH_ON_GENERATOR, output_filename
)
cmd.append(">")
cmd.append(output_filepath_on_generator)
cmd_to_execute = ["bash", "-c", " ".join(cmd)]
cmd_to_execute_result = plan.exec(
recipe=ExecRecipe(command=cmd_to_execute),
service_name=launcher_service_name,
)
plan.verify(cmd_to_execute_result["code"], "==", SUCCESSFUL_EXEC_CMD_EXIT_CODE)
genesis_filename_to_relative_filepath_in_artifact[
output_filename
] = shared_utils.path_join(
shared_utils.path_base(OUTPUT_DIRPATH_ON_GENERATOR),
output_filename,
)
shared_utils.download_trusted_setup(
plan,
launcher_service_name,
shared_utils.path_join(OUTPUT_DIRPATH_ON_GENERATOR, TRUSTED_SETUP_FILENAME),
)
jwt_secret_filepath_on_generator = shared_utils.path_join(
OUTPUT_DIRPATH_ON_GENERATOR, JWT_SECRET_FILENAME
)
jwt_secret_generation_cmd = [
"bash",
"-c",
"openssl rand -hex 32 | tr -d \"\\n\" | sed 's/^/0x/' > {0}".format(
jwt_secret_filepath_on_generator,
),
]
jwt_secret_generation_cmd_result = plan.exec(
recipe=ExecRecipe(command=jwt_secret_generation_cmd),
service_name=launcher_service_name,
)
plan.verify(
jwt_secret_generation_cmd_result["code"], "==", SUCCESSFUL_EXEC_CMD_EXIT_CODE
)
el_genesis_data_artifact_name = plan.store_service_files(
launcher_service_name, OUTPUT_DIRPATH_ON_GENERATOR, name="el-genesis-data"
)
result = el_genesis.new_el_genesis_data(
el_genesis_data_artifact_name,
shared_utils.path_join(
shared_utils.path_base(OUTPUT_DIRPATH_ON_GENERATOR), JWT_SECRET_FILENAME
),
genesis_filename_to_relative_filepath_in_artifact[GETH_GENESIS_FILENAME],
genesis_filename_to_relative_filepath_in_artifact[ERIGON_GENESIS_FILENAME],
genesis_filename_to_relative_filepath_in_artifact[NETHERMIND_GENESIS_FILENAME],
genesis_filename_to_relative_filepath_in_artifact[BESU_GENESIS_FILENAME],
)
# TODO(gyani) remove the container when the job is done - this is a resource leaker
return result
def genesis_generation_config_template_data(
network_id,
deposit_contract_address,
unix_timestamp,
genesis_delay,
seconds_per_slot,
capella_fork_epoch,
deneb_fork_epoch,
electra_fork_epoch,
):
return {
"NetworkId": network_id,
"DepositContractAddress": deposit_contract_address,
"UnixTimestamp": unix_timestamp,
"GenesisDelay": genesis_delay,
"SecondsPerSlot": seconds_per_slot,
"CapellaForkEpoch": capella_fork_epoch,
"DenebForkEpoch": deneb_fork_epoch,
"ElectraForkEpoch": electra_fork_epoch,
}
def new_prefunded_account(address, private_key):
return struct(address=address, private_key=private_key)
# This information was generated by:
# 1) Installing Wagyu: https://github.com/AleoHQ/wagyu
# 2) Running `wagyu ethereum import-hd -m MNEMONIC_FROM_GENESIS -d PREFUNDED_ACCOUNT_DERIVATION_PATH`
# 3) Copying the outputted information
PRE_FUNDED_ACCOUNTS = [
# UTC--2021-12-22T19-14-08.590377700Z--878705ba3f8bc32fcf7f4caa1a35e72af65cf766
# m/44'/60'/0'/0/0
new_prefunded_account(
"0x878705ba3f8Bc32FCf7F4CAa1A35E72AF65CF766",
"ef5177cd0b6b21c87db5a0bf35d4084a8a57a9d6a064f86d51ac85f2b873a4e2",
),
# UTC--2021-12-22T19-14-13.423928600Z--4e9a3d9d1cd2a2b2371b8b3f489ae72259886f1a
# m/44'/60'/0'/0/1
new_prefunded_account(
"0x4E9A3d9D1cd2A2b2371b8b3F489aE72259886f1A",
"48fcc39ae27a0e8bf0274021ae6ebd8fe4a0e12623d61464c498900b28feb567",
),
# UTC--2021-12-22T19-14-16.977667900Z--df8466f277964bb7a0ffd819403302c34dcd530a
# m/44'/60'/0'/0/2
new_prefunded_account(
"0xdF8466f277964Bb7a0FFD819403302C34DCD530A",
"7988b3a148716ff800414935b305436493e1f25237a2a03e5eebc343735e2f31",
),
# UTC--2021-12-22T19-14-21.531351400Z--5c613e39fc0ad91afda24587e6f52192d75fba50
# m/44'/60'/0'/0/3
new_prefunded_account(
"0x5c613e39Fc0Ad91AfDA24587e6f52192d75FBA50",
"b3c409b6b0b3aa5e65ab2dc1930534608239a478106acf6f3d9178e9f9b00b35",
),
# UTC--2021-12-22T19-14-25.369306000Z--375ae6107f8cc4cf34842b71c6f746a362ad8eac
# m/44'/60'/0'/0/4
new_prefunded_account(
"0x375ae6107f8cC4cF34842B71C6F746a362Ad8EAc",
"df9bb6de5d3dc59595bcaa676397d837ff49441d211878c024eabda2cd067c9f",
),
# UTC--2021-12-22T19-14-33.473095100Z--1f6298457c5d76270325b724da5d1953923a6b88
# m/44'/60'/0'/0/5
new_prefunded_account(
"0x1F6298457C5d76270325B724Da5d1953923a6B88",
"7da08f856b5956d40a72968f93396f6acff17193f013e8053f6fbb6c08c194d6",
),
# Use geth account import to generate the key inside genesis-prefunded-keys/geth
# use password "password"
# UTC--2023-05-19T11-17-22.403583626Z--fe08e6f330f4e5e624ad759625b71b2e52594feb
# m/44'/60'/0'/0/6
new_prefunded_account(
"0xFE08e6f330F4E5E624Ad759625B71B2e52594FEB",
"17fdf89989597e8bcac6cdfcc001b6241c64cece2c358ffc818b72ca70f5e1ce",
),
]
SERVICE_NAME_PREFIX = "prelaunch-data-generator-"
# We use Docker exec commands to run the commands we need, so we override the default
ENTRYPOINT_ARGS = [
"sleep",
"999999",
]
# Launches a prelaunch data generator IMAGE, for use in various of the genesis generation
def launch_prelaunch_data_generator(
plan,
files_artifact_mountpoints,
service_name_suffix,
capella_fork_epoch,
electra_fork_epoch,
):
config = get_config(
files_artifact_mountpoints, capella_fork_epoch, electra_fork_epoch
)
service_name = "{0}{1}".format(
SERVICE_NAME_PREFIX,
service_name_suffix,
)
plan.add_service(service_name, config)
return service_name
def launch_prelaunch_data_generator_parallel(
plan,
files_artifact_mountpoints,
service_name_suffixes,
capella_fork_epoch,
electra_fork_epoch,
):
config = get_config(
files_artifact_mountpoints, capella_fork_epoch, electra_fork_epoch
)
service_names = [
"{0}{1}".format(
SERVICE_NAME_PREFIX,
service_name_suffix,
)
for service_name_suffix in service_name_suffixes
]
services_to_add = {service_name: config for service_name in service_names}
plan.add_services(services_to_add)
return service_names
def get_config(files_artifact_mountpoints, capella_fork_epoch, electra_fork_epoch):
if capella_fork_epoch > 0 and electra_fork_epoch == None: # we are running capella
img = "ethpandaops/ethereum-genesis-generator:1.3.4"
elif (
capella_fork_epoch == 0 and electra_fork_epoch == None
): # we are running dencun
img = "ethpandaops/ethereum-genesis-generator:2.0.0-rc.6"
else: # we are running electra
img = "ethpandaops/ethereum-genesis-generator:3.0.0-rc.2"
return ServiceConfig(
image=img,
entrypoint=ENTRYPOINT_ARGS,
files=files_artifact_mountpoints,
)
...@@ -46,3 +46,27 @@ def new_port_spec( ...@@ -46,3 +46,27 @@ def new_port_spec(
application_protocol=application_protocol, application_protocol=application_protocol,
wait=wait, wait=wait,
) )
def read_file_from_service(plan, service_name, filename):
output = plan.exec(
service_name=service_name,
recipe=ExecRecipe(
command=["/bin/sh", "-c", "cat {} | tr -d '\n'".format(filename)]
),
)
return output["output"]
def download_trusted_setup(plan, service_name, output_filepath):
plan.exec(
service_name=service_name,
recipe=ExecRecipe(
command=[
"wget",
"-O",
output_filepath,
"https://raw.githubusercontent.com/ethereum/c-kzg-4844/main/src/trusted_setup.txt",
]
),
)
def new_snooper_engine_client_context(ip_addr, engine_rpc_port_num):
return struct(
ip_addr=ip_addr,
engine_rpc_port_num=engine_rpc_port_num,
)
shared_utils = import_module(
"github.com/kurtosis-tech/eth2-package/src/shared_utils/shared_utils.star"
)
input_parser = import_module(
"github.com/kurtosis-tech/eth2-package/src/package_io/parse_input.star"
)
el_client_context = import_module(
"github.com/kurtosis-tech/eth2-package/src/el/el_client_context.star"
)
el_admin_node_info = import_module(
"github.com/kurtosis-tech/eth2-package/src/el/el_admin_node_info.star"
)
package_io = import_module(
"github.com/kurtosis-tech/eth2-package/src/package_io/constants.star"
)
snooper_engine_context = import_module(
"github.com/kurtosis-tech/eth2-package/src/snooper/snooper_engine_context.star"
)
SNOOPER_ENGINE_RPC_PORT_NUM = 8561
SNOOPER_ENGINE_RPC_PORT_ID = "http"
SNOOPER_BINARY_COMMAND = "./json_rpc_snoop"
PRIVATE_IP_ADDRESS_PLACEHOLDER = "KURTOSIS_IP_ADDR_PLACEHOLDER"
SNOOPER_USED_PORTS = {
SNOOPER_ENGINE_RPC_PORT_ID: shared_utils.new_port_spec(
SNOOPER_ENGINE_RPC_PORT_NUM, shared_utils.TCP_PROTOCOL, wait="5s"
),
}
def launch(plan, service_name, image, el_client_context):
snooper_service_name = "{0}".format(service_name)
snooper_config = get_config(image, service_name, el_client_context)
snooper_service = plan.add_service(snooper_service_name, snooper_config)
snooper_http_port = snooper_service.ports[SNOOPER_ENGINE_RPC_PORT_ID]
return snooper_engine_context.new_snooper_engine_client_context(
snooper_service.ip_address, SNOOPER_ENGINE_RPC_PORT_NUM
)
def get_config(image, service_name, el_client_context):
engine_rpc_port_num = "http://{0}:{1}".format(
el_client_context.ip_addr,
el_client_context.engine_rpc_port_num,
)
cmd = [
SNOOPER_BINARY_COMMAND,
"-b=0.0.0.0",
"-p={0}".format(SNOOPER_ENGINE_RPC_PORT_NUM),
"{0}".format(engine_rpc_port_num),
]
return ServiceConfig(
image=image,
ports=SNOOPER_USED_PORTS,
cmd=cmd,
private_ip_address_placeholder=PRIVATE_IP_ADDRESS_PLACEHOLDER,
)
# Extends the mainnet preset
# This *could* be 'minimal', but it's not recommended to use because not every client supports 'minimal'
PRESET_BASE: 'mainnet'
CONFIG_NAME: testnet # needs to exist because of Prysm. Otherwise it conflicts with mainnet genesis
# Genesis
# ---------------------------------------------------------------
# `2**14` (= 16,384)
MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: {{ .NumValidatorKeysToPreregister }}
MIN_GENESIS_TIME: {{ .UnixTimestamp }}
GENESIS_FORK_VERSION: 0x10000038
GENESIS_DELAY: {{ .GenesisDelay }}
# Forking
# ---------------------------------------------------------------
# Some forks are disabled for now:
# - These may be re-assigned to another fork-version later
# - Temporarily set to max uint64 value: 2**64 - 1
# Note: The module runs a merged chain so ALTAIR_FORK_EPOCH, BELLATRIX_FORK_EPOCH and TERMINAL_TOTAL_DIFFICULTY
# are all hardcoded to zero.
# Altair
ALTAIR_FORK_VERSION: 0x20000038
ALTAIR_FORK_EPOCH: 0
# Merge
BELLATRIX_FORK_VERSION: 0x30000038
BELLATRIX_FORK_EPOCH: 0
TERMINAL_TOTAL_DIFFICULTY: 0
# 0x0000...000 indicates that we use TERMINAL_TOTAL_DIFFICULTY instead of a block has to trigger the merge
TERMINAL_BLOCK_HASH: 0x0000000000000000000000000000000000000000000000000000000000000000
# NOTE: This is commented out because Nimbus warns us that it's an unrecognized parameter
TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH: 18446744073709551615
# Capella
CAPELLA_FORK_VERSION: 0x40000038
CAPELLA_FORK_EPOCH: {{ .CapellaForkEpoch }}
# DENEB
DENEB_FORK_VERSION: 0x50000038
DENEB_FORK_EPOCH: {{ .DenebForkEpoch }}
# Electra
ELECTRA_FORK_VERSION: 0x60000038
ELECTRA_FORK_EPOCH: {{ .ElectraForkEpoch }}
# Time parameters
# ---------------------------------------------------------------
# 12 seconds
SECONDS_PER_SLOT: {{ .SecondsPerSlot }}
# 5 epochs ~0.5 hours
MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 1
# 2**8 (= 256) epochs ~27 hours
SHARD_COMMITTEE_PERIOD: 1
# It's very important that SECONDS_PER_ETH1_BLOCK * ETH1_FOLLOW_DISTANCE is a good amount of time, else
# jitter will cause the Beacon nodes to think they're far behind the Eth1 nodes and give up syncing
SECONDS_PER_ETH1_BLOCK: {{ .SecondsPerSlot }}
ETH1_FOLLOW_DISTANCE: 12
# Validator cycle
# ---------------------------------------------------------------
# 2**2 (= 4)
INACTIVITY_SCORE_BIAS: 4
# 2**4 (= 16)
INACTIVITY_SCORE_RECOVERY_RATE: 16
# 2**4 * 10**9 (= 16,000,000,000) Gwei
EJECTION_BALANCE: 16000000000
# 2**2 (= 4)
MIN_PER_EPOCH_CHURN_LIMIT: 4
# 2**16 (= 65,536)
CHURN_LIMIT_QUOTIENT: 65536
# [Deneb:EIP7514] 2**3 (= 8)
MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT: 8
# Fork choice
# ---------------------------------------------------------------
# 40%
PROPOSER_SCORE_BOOST: 40
# Deposit contract
# ---------------------------------------------------------------
DEPOSIT_CHAIN_ID: {{ .NetworkId }}
DEPOSIT_NETWORK_ID: {{ .NetworkId }}
DEPOSIT_CONTRACT_ADDRESS: {{ .DepositContractAddress }}
# Networking
# ---------------------------------------------------------------
# `10 * 2**20` (= 10485760, 10 MiB)
GOSSIP_MAX_SIZE: 10485760
# `2**10` (= 1024)
MAX_REQUEST_BLOCKS: 1024
# `2**8` (= 256)
EPOCHS_PER_SUBNET_SUBSCRIPTION: 256
# `MIN_VALIDATOR_WITHDRAWABILITY_DELAY + CHURN_LIMIT_QUOTIENT // 2` (= 33024, ~5 months)
MIN_EPOCHS_FOR_BLOCK_REQUESTS: 33024
# `10 * 2**20` (=10485760, 10 MiB)
MAX_CHUNK_SIZE: 10485760
# 5s
TTFB_TIMEOUT: 5
# 10s
RESP_TIMEOUT: 10
ATTESTATION_PROPAGATION_SLOT_RANGE: 32
# 500ms
MAXIMUM_GOSSIP_CLOCK_DISPARITY: 500
MESSAGE_DOMAIN_INVALID_SNAPPY: 0x00000000
MESSAGE_DOMAIN_VALID_SNAPPY: 0x01000000
# 2 subnets per node
SUBNETS_PER_NODE: 2
# 2**8 (= 64)
ATTESTATION_SUBNET_COUNT: 64
ATTESTATION_SUBNET_EXTRA_BITS: 0
# ceillog2(ATTESTATION_SUBNET_COUNT) + ATTESTATION_SUBNET_EXTRA_BITS
ATTESTATION_SUBNET_PREFIX_BITS: 6
# Deneb
# `2**7` (=128)
MAX_REQUEST_BLOCKS_DENEB: 128
# MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK
MAX_REQUEST_BLOB_SIDECARS: 768
# `2**12` (= 4096 epochs, ~18 days)
MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS: 4096
# `6`
BLOB_SIDECAR_SUBNET_COUNT: 6
# `uint64(6)`
MAX_BLOBS_PER_BLOCK: 6
- mnemonic: "{{ .PreregisteredValidatorKeysMnemonic }}" # a 24 word BIP 39 mnemonic
# Number of validator keys to preregister inside the outputted CL genesis.ssz
count: {{ .NumValidatorKeysToPreregister }}
# NOTE: This does NOT have any relevance to the mnemonics & validator keys in the CL genesis!
mnemonic: "stumble horn valley travel milk void screen bulk wink hood cup item glove setup wrong toward erase invite saddle this poverty basket index lab"
el_premine:
"m/44'/60'/0'/0/0": 1000000000ETH
"m/44'/60'/0'/0/1": 1000000000ETH
"m/44'/60'/0'/0/2": 1000000000ETH
"m/44'/60'/0'/0/3": 1000000000ETH
"m/44'/60'/0'/0/4": 1000000000ETH
"m/44'/60'/0'/0/5": 1000000000ETH
"m/44'/60'/0'/0/6": 1000000000ETH
el_premine_addrs: {}
chain_id: {{ .NetworkId }}
deposit_contract_address: "{{ .DepositContractAddress }}"
genesis_timestamp: {{ .UnixTimestamp }}
# Note: The module runs a merged chain so terminal_total_difficulty is hardcoded to zero.
terminal_total_difficulty: 0
genesis_delay: {{ .GenesisDelay }}
slot_duration_in_seconds: {{ .SecondsPerSlot }}
capella_fork_epoch: {{ .CapellaForkEpoch }}
deneb_fork_epoch: {{ .DenebForkEpoch }}
electra_fork_epoch: {{ .ElectraForkEpoch }}
{"address":"878705ba3f8bc32fcf7f4caa1a35e72af65cf766","crypto":{"cipher":"aes-128-ctr","ciphertext":"f02daebbf456faf787c5cd61a33ce780857c1ca10b00972aa451f0e9688e4ead","cipherparams":{"iv":"ef1668814155862f0653f15dae845e58"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":262144,"p":1,"r":8,"salt":"55e5ee70d3e882d2f00a073eda252ff01437abf51d7bfa76c06dcc73f7e8f1a3"},"mac":"d8d04625d0769fe286756734f946c78663961b74f0caaff1d768f0d255632f04"},"id":"5fb9083a-a221-412b-b0e0-921e22cc9645","version":3}
{"address":"4e9a3d9d1cd2a2b2371b8b3f489ae72259886f1a","crypto":{"cipher":"aes-128-ctr","ciphertext":"ab715382b1e1f13d927b2e3d22e087a51ccb72b32f9bac71727ec8438ecb6d54","cipherparams":{"iv":"dee12212262986854a0bfd9a5c766ced"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":262144,"p":1,"r":8,"salt":"7112f2fed838981fde1ca00cdd1d95630981bd22f772666fafd0778525cf1cc4"},"mac":"2a75795bb0513859355ce2086668398ee821a2e708edd856f8a85cf638fede9a"},"id":"f849b7fe-aff7-454f-91e1-838de2a8da6b","version":3}
{"address":"df8466f277964bb7a0ffd819403302c34dcd530a","crypto":{"cipher":"aes-128-ctr","ciphertext":"322e59ab95797f2ea9a1162e3f28e2ff7e27415b6e9d7d990a197e09dc9043d7","cipherparams":{"iv":"6179d5971b93a09799ace7371801e371"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":262144,"p":1,"r":8,"salt":"5ac65d7e25467366764d4539e8ae0c78d31dce4002042c06d3971b103c95f2a3"},"mac":"b5d060f2c0a5f8446dd4d718eee66c7eeff3feb90aafa8201fd7501c8f5c180a"},"id":"c5fda7a7-816a-4740-8804-afdc0d410cfb","version":3}
{"address":"5c613e39fc0ad91afda24587e6f52192d75fba50","crypto":{"cipher":"aes-128-ctr","ciphertext":"4ba38c15225d92f2cbac5eafb7cf5ef358332037cd9730dce595a7a4cc3a39d0","cipherparams":{"iv":"6a83dc5b43b0c9c8948905ccc697455a"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":262144,"p":1,"r":8,"salt":"cdfa581366dbc4f566404b0ac64d9d0c1a8a9d24202fe3c428212f020fd6cdbb"},"mac":"95a2e987cbe87d4b532f830df8c5cabc8a7bbd4e70eda672252ed4d8b967e660"},"id":"09d1b784-fb8f-4d25-8720-a683bb0c13ab","version":3}
{"address":"375ae6107f8cc4cf34842b71c6f746a362ad8eac","crypto":{"cipher":"aes-128-ctr","ciphertext":"ab13f28ad41bcb73f5ae982a5bde37ba737515fef848ea365911be3d97682530","cipherparams":{"iv":"fb4d9bfab1d9c5d47e46052ea80275e1"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":262144,"p":1,"r":8,"salt":"56ba7f86161cb419f27c19b5845b6dd8033927ff0d362863e5723e3568d5d0c7"},"mac":"ad9197d47c57c601313d49fb15392a29c8a16418d1bf6f39ac3b822bd5205593"},"id":"353e4c37-a37f-4b2a-8729-81460c6a92d4","version":3}
{"address":"1f6298457c5d76270325b724da5d1953923a6b88","crypto":{"cipher":"aes-128-ctr","ciphertext":"42348bd719f9225cc91184a3daf7005a89cec8be7d907c92c57ac01f29b61e2d","cipherparams":{"iv":"ee8d92dde2c3dc230f1f6e765641e0ce"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":262144,"p":1,"r":8,"salt":"74992f3587a487202cd76d6aa625af6b0e5b2f68eb0ea3647edb4541ce24adb9"},"mac":"27cd4f0aa624fce848aebccbd80efda35d615da2d274cc39e5185170d2ff4017"},"id":"27bfc138-d358-4c21-b040-93458f11e4c4","version":3}
{"address":"fe08e6f330f4e5e624ad759625b71b2e52594feb","crypto":{"cipher":"aes-128-ctr","ciphertext":"df41d4a4eef8ce781354c4058eac980bd349c12b40efeba38189e3d20f65eccd","cipherparams":{"iv":"c8ee56c122b8b70ab026a80ddaeaf987"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":262144,"p":1,"r":8,"salt":"d284320c522d4cec86d2965f8827ca63429f95c1f0a02f410393754f44b49480"},"mac":"c8d798da5f3de89fccf0a9d502f9bfb89876bee6c86d3d9176f043d9a8310579"},"id":"c9320f41-e707-49a2-9271-660c59e3bbdc","version":3}
# The path on the module container where static files are housed
STATIC_FILES_DIRPATH = "github.com/kurtosis-tech/eth2-package/static_files"
# Geth + CL genesis generation
GENESIS_GENERATION_CONFIG_DIRPATH = STATIC_FILES_DIRPATH + "/genesis-generation-config"
EL_GENESIS_GENERATION_CONFIG_DIRPATH = GENESIS_GENERATION_CONFIG_DIRPATH + "/el"
EL_GENESIS_GENERATION_CONFIG_TEMPLATE_FILEPATH = (
EL_GENESIS_GENERATION_CONFIG_DIRPATH + "/genesis-config.yaml.tmpl"
)
CL_GENESIS_GENERATION_CONFIG_DIRPATH = GENESIS_GENERATION_CONFIG_DIRPATH + "/cl"
CL_GENESIS_GENERATION_CONFIG_TEMPLATE_FILEPATH = (
CL_GENESIS_GENERATION_CONFIG_DIRPATH + "/config.yaml.tmpl"
)
CL_GENESIS_GENERATION_MNEMONICS_TEMPLATE_FILEPATH = (
CL_GENESIS_GENERATION_CONFIG_DIRPATH + "/mnemonics.yaml.tmpl"
)
# Prefunded keys
PREFUNDED_KEYS_DIRPATH = STATIC_FILES_DIRPATH + "/genesis-prefunded-keys"
GETH_PREFUNDED_KEYS_DIRPATH = PREFUNDED_KEYS_DIRPATH + "/geth"
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment