Commit 0b2a2ae0 authored by Barnabas Busa's avatar Barnabas Busa Committed by GitHub

feat: add support for pull through cache (#833)

parent 2633d15b
......@@ -31,3 +31,6 @@ additional_services:
ethereum_metrics_exporter_enabled: true
snooper_enabled: true
keymanager_enabled: true
docker_cache_params:
enabled: true
url: "docker.ethquokkaops.io"
......@@ -637,19 +637,24 @@ additional_services:
# Configuration place for dora the explorer - https://github.com/ethpandaops/dora
dora_params:
# Dora docker image to use
# Leave blank to use the default image according to your network params
image: ""
# Defaults to the latest image
image: "ethpandaops/dora:latest"
# A list of optional extra env_vars the dora container should spin up with
env: {}
# Configuration place for transaction spammer - https://github.com/MariusVanDerWijden/tx-fuzz
tx_spammer_params:
# TX Spammer docker image to use
# Defaults to the latest master image
image: "ethpandaops/tx-fuzz:master"
# A list of optional extra params that will be passed to the TX Spammer container for modifying its behaviour
tx_spammer_extra_args: []
# Configuration place for goomy the blob spammer - https://github.com/ethpandaops/goomy-blob
goomy_blob_params:
# Goomy Blob docker image to use
# Defaults to the latest
image: "ethpandaops/goomy-blob:latest"
# A list of optional params that will be passed to the blob-spammer comamnd for modifying its behaviour
goomy_blob_args: []
......@@ -664,6 +669,9 @@ prometheus_params:
max_cpu: 1000
min_mem: 128
max_mem: 2048
# Prometheus docker image to use
# Defaults to the latest image
image: "prom/prometheus:latest"
# Configuration place for grafana
grafana_params:
......@@ -676,12 +684,15 @@ grafana_params:
max_cpu: 1000
min_mem: 128
max_mem: 2048
# Grafana docker image to use
# Defaults to the latest image
image: "grafana/grafana:latest"
# Configuration place for the assertoor testing tool - https://github.com/ethpandaops/assertoor
assertoor_params:
# Assertoor docker image to use
# Leave blank to use the default image according to your network params
image: ""
# Defaults to the latest image
image: "ethpandaops/assertoor:latest"
# Check chain stability
# This check monitors the chain and succeeds if:
......@@ -771,6 +782,20 @@ disable_peer_scoring: false
# Defaults to false
persistent: false
# Docker cache url enables all docker images to be pulled through a custom docker registry
# Disabled by default
# Defaults to empty cache url
# Images pulled from dockerhub will be prefixed with "/dh/" by default (docker.io)
# Images pulled from github registry will be prefixed with "/gh/" by default (ghcr.io)
# Images pulled from google registory will be prefixed with "/gcr/" by default (gcr.io)
# If you want to use a local image in combination with the cache, do not put "/" in your local image name
docker_cache_params:
enabled: false
url: ""
dockerhub_prefix: "/dh/"
github_prefix: "/gh/"
google_prefix: "/gcr/"
# Supports three valeus
# Default: "null" - no mev boost, mev builder, mev flood or relays are spun up
# "mock" - mock-builder & mev-boost are spun up
......
......@@ -92,6 +92,7 @@ def run(plan, args={}):
global_node_selectors = args_with_right_defaults.global_node_selectors
keymanager_enabled = args_with_right_defaults.keymanager_enabled
apache_port = args_with_right_defaults.apache_port
docker_cache_params = args_with_right_defaults.docker_cache_params
prefunded_accounts = genesis_constants.PRE_FUNDED_ACCOUNTS
if (
......@@ -158,9 +159,8 @@ def run(plan, args={}):
network_id,
) = participant_network.launch_participant_network(
plan,
args_with_right_defaults.participants,
args_with_right_defaults,
network_params,
args_with_right_defaults.global_log_level,
jwt_file,
keymanager_file,
persistent,
......@@ -169,10 +169,6 @@ def run(plan, args={}):
global_node_selectors,
keymanager_enabled,
parallel_keystore_generation,
args_with_right_defaults.checkpoint_sync_enabled,
args_with_right_defaults.checkpoint_sync_url,
args_with_right_defaults.port_publisher,
args_with_right_defaults.mev_type,
)
plan.print(
......@@ -459,6 +455,7 @@ def run(plan, args={}):
network_params.seconds_per_slot,
network_params.genesis_delay,
global_node_selectors,
args_with_right_defaults.tx_spammer_params,
)
plan.print("Successfully launched blob spammer")
elif additional_service == "goomy_blob":
......@@ -488,6 +485,7 @@ def run(plan, args={}):
global_node_selectors,
args_with_right_defaults.port_publisher,
index,
args_with_right_defaults.docker_cache_params,
)
plan.print("Successfully launched execution layer forkmon")
elif additional_service == "beacon_metrics_gazer":
......@@ -500,6 +498,7 @@ def run(plan, args={}):
global_node_selectors,
args_with_right_defaults.port_publisher,
index,
args_with_right_defaults.docker_cache_params,
)
)
launch_prometheus_grafana = True
......@@ -516,6 +515,7 @@ def run(plan, args={}):
global_node_selectors,
args_with_right_defaults.port_publisher,
index,
args_with_right_defaults.docker_cache_params,
)
plan.print("Successfully launched blockscout")
elif additional_service == "dora":
......@@ -550,6 +550,7 @@ def run(plan, args={}):
global_node_selectors,
args_with_right_defaults.port_publisher,
index,
args_with_right_defaults.docker_cache_params,
)
plan.print("Successfully launched dugtrio")
elif additional_service == "blutgang":
......@@ -566,6 +567,7 @@ def run(plan, args={}):
global_node_selectors,
args_with_right_defaults.port_publisher,
index,
args_with_right_defaults.docker_cache_params,
)
plan.print("Successfully launched blutgang")
elif additional_service == "blobscan":
......@@ -580,6 +582,7 @@ def run(plan, args={}):
global_node_selectors,
args_with_right_defaults.port_publisher,
index,
args_with_right_defaults.docker_cache_params,
)
plan.print("Successfully launched blobscan")
elif additional_service == "forky":
......@@ -598,6 +601,7 @@ def run(plan, args={}):
final_genesis_timestamp,
args_with_right_defaults.port_publisher,
index,
args_with_right_defaults.docker_cache_params,
)
plan.print("Successfully launched forky")
elif additional_service == "tracoor":
......@@ -616,6 +620,7 @@ def run(plan, args={}):
final_genesis_timestamp,
args_with_right_defaults.port_publisher,
index,
args_with_right_defaults.docker_cache_params,
)
plan.print("Successfully launched tracoor")
elif additional_service == "apache":
......@@ -627,6 +632,7 @@ def run(plan, args={}):
all_participants,
args_with_right_defaults.participants,
global_node_selectors,
args_with_right_defaults.docker_cache_params,
)
plan.print("Successfully launched apache")
elif additional_service == "full_beaconchain_explorer":
......@@ -673,6 +679,7 @@ def run(plan, args={}):
fuzz_target,
args_with_right_defaults.custom_flood_params,
global_node_selectors,
args_with_right_defaults.docker_cache_params,
)
else:
fail("Invalid additional service %s" % (additional_service))
......
......@@ -11,6 +11,7 @@ APACHE_ENR_LIST_FILENAME = "bootstrap_nodes.txt"
APACHE_CONFIG_MOUNT_DIRPATH_ON_SERVICE = "/usr/local/apache2/htdocs/"
IMAGE_NAME = "library/httpd:latest"
# The min/max CPU/memory that assertoor can use
MIN_CPU = 100
MAX_CPU = 300
......@@ -33,6 +34,7 @@ def launch_apache(
participant_contexts,
participant_configs,
global_node_selectors,
docker_cache_params,
):
config_files_artifact_name = plan.upload_files(
src=static_files.APACHE_CONFIG_FILEPATH, name="apache-config"
......@@ -93,6 +95,7 @@ def launch_apache(
public_ports,
bootstrap_info_files_artifact_name,
global_node_selectors,
docker_cache_params,
)
plan.add_service(SERVICE_NAME, config)
......@@ -104,6 +107,7 @@ def get_config(
public_ports,
bootstrap_info_files_artifact_name,
node_selectors,
docker_cache_params,
):
files = {
constants.GENESIS_DATA_MOUNTPOINT_ON_CLIENTS: el_cl_genesis_data,
......@@ -145,7 +149,10 @@ def get_config(
cmd_str = " ".join(cmd)
return ServiceConfig(
image="httpd:latest",
image=shared_utils.docker_cache_image_calc(
docker_cache_params,
IMAGE_NAME,
),
ports=USED_PORTS,
cmd=[cmd_str],
public_ports=public_ports,
......
......@@ -119,12 +119,10 @@ def get_config(
ASSERTOOR_CONFIG_FILENAME,
)
if assertoor_params.image != "":
IMAGE_NAME = assertoor_params.image
elif network_params.electra_fork_epoch < constants.ELECTRA_FORK_EPOCH:
if network_params.electra_fork_epoch < constants.ELECTRA_FORK_EPOCH:
IMAGE_NAME = "ethpandaops/assertoor:electra-support"
else:
IMAGE_NAME = "ethpandaops/assertoor:latest"
return ServiceConfig(
image=IMAGE_NAME,
......
......@@ -37,12 +37,14 @@ def launch_beacon_metrics_gazer(
global_node_selectors,
port_publisher,
additional_service_index,
docker_cache_params,
):
config = get_config(
cl_contexts[0].beacon_http_url,
global_node_selectors,
port_publisher,
additional_service_index,
docker_cache_params,
)
beacon_metrics_gazer_service = plan.add_service(SERVICE_NAME, config)
......@@ -64,6 +66,7 @@ def get_config(
node_selectors,
port_publisher,
additional_service_index,
docker_cache_params,
):
config_file_path = shared_utils.path_join(
BEACON_METRICS_GAZER_CONFIG_MOUNT_DIRPATH_ON_SERVICE,
......@@ -78,7 +81,10 @@ def get_config(
)
return ServiceConfig(
image=IMAGE_NAME,
image=shared_utils.docker_cache_image_calc(
docker_cache_params,
IMAGE_NAME,
),
ports=USED_PORTS,
public_ports=public_ports,
files={
......
IMAGE_NAME = "ethpandaops/tx-fuzz:master"
SERVICE_NAME = "blob-spammer"
ENTRYPOINT_ARGS = ["/bin/sh", "-c"]
......@@ -19,6 +18,7 @@ def launch_blob_spammer(
seconds_per_slot,
genesis_delay,
global_node_selectors,
tx_spammer_params,
):
config = get_config(
prefunded_addresses,
......@@ -28,6 +28,7 @@ def launch_blob_spammer(
seconds_per_slot,
genesis_delay,
global_node_selectors,
tx_spammer_params.image,
)
plan.add_service(SERVICE_NAME, config)
......@@ -40,10 +41,11 @@ def get_config(
seconds_per_slot,
genesis_delay,
node_selectors,
image,
):
dencunTime = (deneb_fork_epoch * 32 * seconds_per_slot) + genesis_delay
return ServiceConfig(
image=IMAGE_NAME,
image=image,
entrypoint=ENTRYPOINT_ARGS,
cmd=[
" && ".join(
......
......@@ -69,6 +69,7 @@ def launch_blobscan(
global_node_selectors,
port_publisher,
additional_service_index,
docker_cache_params,
):
node_selectors = global_node_selectors
beacon_node_rpc_uri = "{0}".format(cl_contexts[0].beacon_http_url)
......@@ -83,6 +84,9 @@ def launch_blobscan(
max_memory=POSTGRES_MAX_MEMORY,
persistent=persistent,
node_selectors=node_selectors,
image=shared_utils.docker_cache_image_calc(
docker_cache_params, "library/postgres:alpine"
),
)
redis_output = redis.run(
......@@ -94,6 +98,9 @@ def launch_blobscan(
max_memory=REDIS_MAX_MEMORY,
persistent=persistent,
node_selectors=node_selectors,
image=shared_utils.docker_cache_image_calc(
docker_cache_params, "library/redis:alpine"
),
)
api_config = get_api_config(
......@@ -104,6 +111,7 @@ def launch_blobscan(
node_selectors,
port_publisher,
additional_service_index,
docker_cache_params,
)
blobscan_config = plan.add_service(API_SERVICE_NAME, api_config)
......@@ -119,6 +127,7 @@ def launch_blobscan(
node_selectors,
port_publisher,
additional_service_index,
docker_cache_params,
)
plan.add_service(WEB_SERVICE_NAME, web_config)
......@@ -128,6 +137,7 @@ def launch_blobscan(
execution_node_rpc_uri,
network_params.network,
node_selectors,
docker_cache_params,
)
plan.add_service(INDEXER_SERVICE_NAME, indexer_config)
......@@ -140,6 +150,7 @@ def get_api_config(
node_selectors,
port_publisher,
additional_service_index,
docker_cache_params,
):
IMAGE_NAME = "blossomlabs/blobscan-api:latest"
......@@ -151,7 +162,10 @@ def get_api_config(
)
return ServiceConfig(
image=IMAGE_NAME,
image=shared_utils.docker_cache_image_calc(
docker_cache_params,
IMAGE_NAME,
),
ports=API_PORTS,
public_ports=public_ports,
env_vars={
......@@ -192,6 +206,7 @@ def get_web_config(
node_selectors,
port_publisher,
additional_service_index,
docker_cache_params,
):
# TODO: https://github.com/kurtosis-tech/kurtosis/issues/1861
# Configure NEXT_PUBLIC_BEACON_BASE_URL and NEXT_PUBLIC_EXPLORER_BASE env vars
......@@ -206,7 +221,10 @@ def get_web_config(
)
return ServiceConfig(
image=IMAGE_NAME,
image=shared_utils.docker_cache_image_calc(
docker_cache_params,
IMAGE_NAME,
),
ports=WEB_PORTS,
public_ports=public_ports,
env_vars={
......@@ -231,11 +249,15 @@ def get_indexer_config(
execution_node_rpc,
network_name,
node_selectors,
docker_cache_params,
):
IMAGE_NAME = "blossomlabs/blobscan-indexer:master"
return ServiceConfig(
image=IMAGE_NAME,
image=shared_utils.docker_cache_image_calc(
docker_cache_params,
IMAGE_NAME,
),
env_vars={
"BEACON_NODE_ENDPOINT": beacon_node_rpc,
"BLOBSCAN_API_ENDPOINT": blobscan_api_url,
......
......@@ -4,6 +4,7 @@ postgres = import_module("github.com/kurtosis-tech/postgres-package/main.star")
IMAGE_NAME_BLOCKSCOUT = "blockscout/blockscout:6.8.0"
IMAGE_NAME_BLOCKSCOUT_VERIF = "ghcr.io/blockscout/smart-contract-verifier:v1.9.0"
POSTGRES_IMAGE = "library/postgres:alpine"
SERVICE_NAME_BLOCKSCOUT = "blockscout"
......@@ -44,6 +45,7 @@ def launch_blockscout(
global_node_selectors,
port_publisher,
additional_service_index,
docker_cache_params,
):
postgres_output = postgres.run(
plan,
......@@ -52,6 +54,7 @@ def launch_blockscout(
extra_configs=["max_connections=1000"],
persistent=persistent,
node_selectors=global_node_selectors,
image=shared_utils.docker_cache_image_calc(docker_cache_params, POSTGRES_IMAGE),
)
el_context = el_contexts[0]
......@@ -64,6 +67,7 @@ def launch_blockscout(
global_node_selectors,
port_publisher,
additional_service_index,
docker_cache_params,
)
verif_service_name = "{}-verif".format(SERVICE_NAME_BLOCKSCOUT)
verif_service = plan.add_service(verif_service_name, config_verif)
......@@ -79,6 +83,7 @@ def launch_blockscout(
global_node_selectors,
port_publisher,
additional_service_index,
docker_cache_params,
)
blockscout_service = plan.add_service(SERVICE_NAME_BLOCKSCOUT, config_backend)
plan.print(blockscout_service)
......@@ -90,7 +95,9 @@ def launch_blockscout(
return blockscout_url
def get_config_verif(node_selectors, port_publisher, additional_service_index):
def get_config_verif(
node_selectors, port_publisher, additional_service_index, docker_cache_params
):
public_ports = shared_utils.get_additional_service_standard_public_port(
port_publisher,
constants.HTTP_PORT_ID,
......@@ -99,7 +106,10 @@ def get_config_verif(node_selectors, port_publisher, additional_service_index):
)
return ServiceConfig(
image=IMAGE_NAME_BLOCKSCOUT_VERIF,
image=shared_utils.docker_cache_image_calc(
docker_cache_params,
IMAGE_NAME_BLOCKSCOUT_VERIF,
),
ports=VERIF_USED_PORTS,
public_ports=public_ports,
env_vars={
......@@ -123,6 +133,7 @@ def get_config_backend(
node_selectors,
port_publisher,
additional_service_index,
docker_cache_params,
):
database_url = "{protocol}://{user}:{password}@{hostname}:{port}/{database}".format(
protocol="postgresql",
......@@ -141,7 +152,10 @@ def get_config_backend(
)
return ServiceConfig(
image=IMAGE_NAME_BLOCKSCOUT,
image=shared_utils.docker_cache_image_calc(
docker_cache_params,
IMAGE_NAME_BLOCKSCOUT,
),
ports=USED_PORTS,
public_ports=public_ports,
cmd=[
......
......@@ -41,6 +41,7 @@ def launch_blutgang(
global_node_selectors,
port_publisher,
additional_service_index,
docker_cache_params,
):
all_el_client_info = []
for index, participant in enumerate(participant_contexts):
......@@ -76,6 +77,7 @@ def launch_blutgang(
global_node_selectors,
port_publisher,
additional_service_index,
docker_cache_params,
)
plan.add_service(SERVICE_NAME, config)
......@@ -87,6 +89,7 @@ def get_config(
node_selectors,
port_publisher,
additional_service_index,
docker_cache_params,
):
config_file_path = shared_utils.path_join(
BLUTGANG_CONFIG_MOUNT_DIRPATH_ON_SERVICE,
......@@ -105,7 +108,10 @@ def get_config(
public_ports = shared_utils.get_port_specs(public_port_assignments)
return ServiceConfig(
image=IMAGE_NAME,
image=shared_utils.docker_cache_image_calc(
docker_cache_params,
IMAGE_NAME,
),
ports=USED_PORTS,
public_ports=public_ports,
files={
......
......@@ -20,9 +20,8 @@ def launch(
el_cl_data,
jwt_file,
keymanager_file,
participants,
args_with_right_defaults,
all_el_contexts,
global_log_level,
global_node_selectors,
global_tolerations,
persistent,
......@@ -30,9 +29,6 @@ def launch(
validator_data,
prysm_password_relative_filepath,
prysm_password_artifact_uuid,
checkpoint_sync_enabled,
checkpoint_sync_url,
port_publisher,
):
plan.print("Launching CL network")
......@@ -94,7 +90,7 @@ def launch(
else None
)
network_name = shared_utils.get_network_name(network_params.network)
for index, participant in enumerate(participants):
for index, participant in enumerate(args_with_right_defaults.participants):
cl_type = participant.cl_type
el_type = participant.el_type
node_selectors = input_parser.get_client_node_selectors(
......@@ -118,7 +114,9 @@ def launch(
cl_launchers[cl_type]["launch_method"],
)
index_str = shared_utils.zfill_custom(index + 1, len(str(len(participants))))
index_str = shared_utils.zfill_custom(
index + 1, len(str(len(args_with_right_defaults.participants)))
)
cl_service_name = "cl-{0}-{1}-{2}".format(index_str, cl_type, el_type)
new_cl_node_validator_keystores = None
......@@ -140,6 +138,7 @@ def launch(
snooper_service_name,
el_context,
node_selectors,
args_with_right_defaults.docker_cache_params,
)
plan.print(
"Successfully added {0} snooper participants".format(
......@@ -147,15 +146,15 @@ def launch(
)
)
if checkpoint_sync_enabled:
if checkpoint_sync_url == "":
if args_with_right_defaults.checkpoint_sync_enabled:
if args_with_right_defaults.checkpoint_sync_url == "":
if (
network_params.network in constants.PUBLIC_NETWORKS
or network_params.network == constants.NETWORK_NAME.ephemery
):
checkpoint_sync_url = constants.CHECKPOINT_SYNC_URL[
network_params.network
]
args_with_right_defaults.checkpoint_sync_url = (
constants.CHECKPOINT_SYNC_URL[network_params.network]
)
else:
fail(
"Checkpoint sync URL is required if you enabled checkpoint_sync for custom networks. Please provide a valid URL."
......@@ -169,7 +168,7 @@ def launch(
cl_launcher,
cl_service_name,
participant,
global_log_level,
args_with_right_defaults.global_log_level,
cl_context_BOOTNODE,
el_context,
full_name,
......@@ -178,9 +177,9 @@ def launch(
persistent,
tolerations,
node_selectors,
checkpoint_sync_enabled,
checkpoint_sync_url,
port_publisher,
args_with_right_defaults.checkpoint_sync_enabled,
args_with_right_defaults.checkpoint_sync_url,
args_with_right_defaults.port_publisher,
index,
)
else:
......@@ -190,7 +189,7 @@ def launch(
cl_launcher,
cl_service_name,
participant,
global_log_level,
args_with_right_defaults.global_log_level,
boot_cl_client_ctx,
el_context,
full_name,
......@@ -199,9 +198,9 @@ def launch(
persistent,
tolerations,
node_selectors,
checkpoint_sync_enabled,
checkpoint_sync_url,
port_publisher,
args_with_right_defaults.checkpoint_sync_enabled,
args_with_right_defaults.checkpoint_sync_url,
args_with_right_defaults.port_publisher,
index,
)
......
......@@ -326,7 +326,7 @@ def get_beacon_config(
"labels": shared_utils.label_maker(
client=constants.CL_TYPE.grandine,
client_type=constants.CLIENT_TYPES.cl,
image=participant.cl_image,
image=participant.cl_image[-constants.MAX_LABEL_LENGTH :],
connected_client=el_context.client_name,
extra_labels=participant.cl_extra_labels,
supernode=participant.supernode,
......
......@@ -323,7 +323,7 @@ def get_beacon_config(
"labels": shared_utils.label_maker(
client=constants.CL_TYPE.lighthouse,
client_type=constants.CLIENT_TYPES.cl,
image=participant.cl_image,
image=participant.cl_image[-constants.MAX_LABEL_LENGTH :],
connected_client=el_context.client_name,
extra_labels=participant.cl_extra_labels,
supernode=participant.supernode,
......
......@@ -316,7 +316,7 @@ def get_beacon_config(
"labels": shared_utils.label_maker(
client=constants.CL_TYPE.lodestar,
client_type=constants.CLIENT_TYPES.cl,
image=participant.cl_image,
image=participant.cl_image[-constants.MAX_LABEL_LENGTH :],
connected_client=el_context.client_name,
extra_labels=participant.cl_extra_labels,
supernode=participant.supernode,
......
......@@ -337,7 +337,7 @@ def get_beacon_config(
"labels": shared_utils.label_maker(
client=constants.CL_TYPE.nimbus,
client_type=constants.CLIENT_TYPES.cl,
image=participant.cl_image,
image=participant.cl_image[-constants.MAX_LABEL_LENGTH :],
connected_client=el_context.client_name,
extra_labels=participant.cl_extra_labels,
supernode=participant.supernode,
......
......@@ -303,7 +303,7 @@ def get_beacon_config(
"labels": shared_utils.label_maker(
client=constants.CL_TYPE.prysm,
client_type=constants.CLIENT_TYPES.cl,
image=participant.cl_image,
image=participant.cl_image[-constants.MAX_LABEL_LENGTH :],
connected_client=el_context.client_name,
extra_labels=participant.cl_extra_labels,
supernode=participant.supernode,
......
......@@ -346,7 +346,7 @@ def get_beacon_config(
"labels": shared_utils.label_maker(
client=constants.CL_TYPE.teku,
client_type=constants.CLIENT_TYPES.cl,
image=participant.cl_image,
image=participant.cl_image[-constants.MAX_LABEL_LENGTH :],
connected_client=el_context.client_name,
extra_labels=participant.cl_extra_labels,
supernode=participant.supernode,
......
......@@ -120,12 +120,10 @@ def get_config(
0,
)
if dora_params.image != "":
IMAGE_NAME = dora_params.image
elif network_params.electra_fork_epoch < constants.ELECTRA_FORK_EPOCH:
if network_params.electra_fork_epoch < constants.ELECTRA_FORK_EPOCH:
IMAGE_NAME = "ethpandaops/dora:master"
else:
IMAGE_NAME = "ethpandaops/dora:latest"
return ServiceConfig(
image=IMAGE_NAME,
......
......@@ -34,6 +34,7 @@ def launch_dugtrio(
global_node_selectors,
port_publisher,
additional_service_index,
docker_cache_params,
):
all_cl_client_info = []
for index, participant in enumerate(participant_contexts):
......@@ -66,6 +67,7 @@ def launch_dugtrio(
global_node_selectors,
port_publisher,
additional_service_index,
docker_cache_params,
)
plan.add_service(SERVICE_NAME, config)
......@@ -77,6 +79,7 @@ def get_config(
node_selectors,
port_publisher,
additional_service_index,
docker_cache_params,
):
config_file_path = shared_utils.path_join(
DUGTRIO_CONFIG_MOUNT_DIRPATH_ON_SERVICE,
......@@ -91,7 +94,10 @@ def get_config(
)
return ServiceConfig(
image=IMAGE_NAME,
image=shared_utils.docker_cache_image_calc(
docker_cache_params,
IMAGE_NAME,
),
ports=USED_PORTS,
public_ports=public_ports,
files={
......
......@@ -233,7 +233,7 @@ def get_config(
"labels": shared_utils.label_maker(
client=constants.EL_TYPE.besu,
client_type=constants.CLIENT_TYPES.el,
image=participant.el_image,
image=participant.el_image[-constants.MAX_LABEL_LENGTH :],
connected_client=cl_client_name,
extra_labels=participant.el_extra_labels,
supernode=participant.supernode,
......
......@@ -230,7 +230,7 @@ def get_config(
"labels": shared_utils.label_maker(
client=constants.EL_TYPE.erigon,
client_type=constants.CLIENT_TYPES.el,
image=participant.el_image,
image=participant.el_image[-constants.MAX_LABEL_LENGTH :],
connected_client=cl_client_name,
extra_labels=participant.el_extra_labels,
supernode=participant.supernode,
......
......@@ -216,7 +216,7 @@ def get_config(
"labels": shared_utils.label_maker(
client=constants.EL_TYPE.ethereumjs,
client_type=constants.CLIENT_TYPES.el,
image=participant.el_image,
image=participant.el_image[-constants.MAX_LABEL_LENGTH :],
connected_client=cl_client_name,
extra_labels=participant.el_extra_labels,
supernode=participant.supernode,
......
......@@ -312,7 +312,7 @@ def get_config(
"labels": shared_utils.label_maker(
client=constants.EL_TYPE.geth,
client_type=constants.CLIENT_TYPES.el,
image=participant.el_image,
image=participant.el_image[-constants.MAX_LABEL_LENGTH :],
connected_client=cl_client_name,
extra_labels=participant.el_extra_labels,
supernode=participant.supernode,
......
......@@ -223,7 +223,7 @@ def get_config(
"labels": shared_utils.label_maker(
client=constants.EL_TYPE.nethermind,
client_type=constants.CLIENT_TYPES.el,
image=participant.el_image,
image=participant.el_image[-constants.MAX_LABEL_LENGTH :],
connected_client=cl_client_name,
extra_labels=participant.el_extra_labels,
supernode=participant.supernode,
......
......@@ -209,7 +209,7 @@ def get_config(
"labels": shared_utils.label_maker(
client=constants.EL_TYPE.nimbus,
client_type=constants.CLIENT_TYPES.el,
image=participant.el_image,
image=participant.el_image[-constants.MAX_LABEL_LENGTH :],
connected_client=cl_client_name,
extra_labels=participant.el_extra_labels,
supernode=participant.supernode,
......
......@@ -263,7 +263,7 @@ def get_config(
"labels": shared_utils.label_maker(
client=constants.EL_TYPE.reth,
client_type=constants.CLIENT_TYPES.el,
image=participant.el_image,
image=participant.el_image[-constants.MAX_LABEL_LENGTH :],
connected_client=cl_client_name,
extra_labels=participant.el_extra_labels,
supernode=participant.supernode,
......
......@@ -32,6 +32,7 @@ def launch_el_forkmon(
global_node_selectors,
port_publisher,
additional_service_index,
docker_cache_params,
):
all_el_client_info = []
for client in el_contexts:
......@@ -59,6 +60,7 @@ def launch_el_forkmon(
global_node_selectors,
port_publisher,
additional_service_index,
docker_cache_params,
)
plan.add_service(SERVICE_NAME, config)
......@@ -69,6 +71,7 @@ def get_config(
node_selectors,
port_publisher,
additional_service_index,
docker_cache_params,
):
config_file_path = shared_utils.path_join(
EL_FORKMON_CONFIG_MOUNT_DIRPATH_ON_SERVICE, EL_FORKMON_CONFIG_FILENAME
......@@ -82,7 +85,10 @@ def get_config(
)
return ServiceConfig(
image=IMAGE_NAME,
image=shared_utils.docker_cache_image_calc(
docker_cache_params,
IMAGE_NAME,
),
ports=USED_PORTS,
public_ports=public_ports,
files={
......
......@@ -23,11 +23,15 @@ def launch(
el_context,
cl_context,
node_selectors,
docker_cache_params,
):
exporter_service = plan.add_service(
ethereum_metrics_exporter_service_name,
ServiceConfig(
image=DEFAULT_ETHEREUM_METRICS_EXPORTER_IMAGE,
image=shared_utils.docker_cache_image_calc(
docker_cache_params,
DEFAULT_ETHEREUM_METRICS_EXPORTER_IMAGE,
),
ports={
HTTP_PORT_ID: shared_utils.new_port_spec(
METRICS_PORT_NUMBER,
......
......@@ -38,6 +38,7 @@ def launch_forky(
final_genesis_timestamp,
port_publisher,
additional_service_index,
docker_cache_params,
):
all_cl_client_info = []
all_el_client_info = []
......@@ -88,6 +89,7 @@ def launch_forky(
global_node_selectors,
port_publisher,
additional_service_index,
docker_cache_params,
)
plan.add_service(SERVICE_NAME, config)
......@@ -100,6 +102,7 @@ def get_config(
node_selectors,
port_publisher,
additional_service_index,
docker_cache_params,
):
config_file_path = shared_utils.path_join(
FORKY_CONFIG_MOUNT_DIRPATH_ON_SERVICE,
......@@ -116,7 +119,10 @@ def get_config(
)
return ServiceConfig(
image=IMAGE_NAME,
image=shared_utils.docker_cache_image_calc(
docker_cache_params,
IMAGE_NAME,
),
ports=USED_PORTS,
public_ports=public_ports,
files={
......
SERVICE_NAME = "goomy-blob-spammer"
IMAGE_NAME = "ethpandaops/goomy-blob:master"
ENTRYPOINT_ARGS = ["/bin/sh", "-c"]
......@@ -24,7 +23,7 @@ def launch_goomy_blob(
el_contexts,
cl_context,
seconds_per_slot,
goomy_blob_params.goomy_blob_args,
goomy_blob_params,
global_node_selectors,
)
plan.add_service(SERVICE_NAME, config)
......@@ -35,7 +34,7 @@ def get_config(
el_contexts,
cl_context,
seconds_per_slot,
goomy_blob_args,
goomy_blob_params,
node_selectors,
):
goomy_cli_args = []
......@@ -47,7 +46,7 @@ def get_config(
)
)
goomy_args = " ".join(goomy_blob_args)
goomy_args = " ".join(goomy_blob_params.goomy_blob_args)
if goomy_args == "":
goomy_args = "combined -b 2 -t 2 --max-pending 3"
goomy_cli_args.append(goomy_args)
......@@ -57,7 +56,7 @@ def get_config(
)
return ServiceConfig(
image=IMAGE_NAME,
image=goomy_blob_params.image,
entrypoint=ENTRYPOINT_ARGS,
cmd=[cmd],
min_cpu=MIN_CPU,
......
......@@ -3,8 +3,6 @@ static_files = import_module("../static_files/static_files.star")
SERVICE_NAME = "grafana"
IMAGE_NAME = "grafana/grafana:latest-ubuntu"
HTTP_PORT_ID = "http"
HTTP_PORT_NUMBER_UINT16 = 3000
......@@ -128,7 +126,7 @@ def get_config(
grafana_params,
):
return ServiceConfig(
image=IMAGE_NAME,
image=grafana_params.image,
ports=USED_PORTS,
env_vars={
CONFIG_DIRPATH_ENV_VAR: GRAFANA_CONFIG_DIRPATH_ON_SERVICE,
......
shared_utils = import_module("../../../shared_utils/shared_utils.star")
PYTHON_IMAGE = "ethpandaops/python-web3"
CUSTOM_FLOOD_SERVICE_NAME = "mev-custom-flood"
......@@ -15,13 +16,16 @@ def spam_in_background(
el_uri,
params,
global_node_selectors,
docker_cache_params,
):
sender_script = plan.upload_files(src="./sender.py", name="mev-custom-flood-sender")
plan.add_service(
name=CUSTOM_FLOOD_SERVICE_NAME,
config=ServiceConfig(
image=PYTHON_IMAGE,
image=shared_utils.docker_cache_image_calc(
docker_cache_params, PYTHON_IMAGE
),
files={"/tmp": sender_script},
cmd=["/bin/sh", "-c", "touch /tmp/sender.log && tail -f /tmp/sender.log"],
env_vars={
......
......@@ -17,7 +17,6 @@ def launch(plan, prague_time):
mv /ephemery-release/metadata/* /network-configs/ ;\
cat /network-configs/genesis_validators_root.txt ;\
'",
image="badouralix/curl-jq",
store=[StoreSpec(src="/network-configs/", name="el_cl_genesis_data")],
)
genesis_validators_root = el_cl_genesis_data_uuid.output
......
......@@ -17,19 +17,25 @@ CL_GENESIS_DATA_GENERATION_TIME = 5
CL_NODE_STARTUP_TIME = 5
def launch(plan, network_params, participants, parallel_keystore_generation):
num_participants = len(participants)
def launch(
plan, network_params, args_with_right_defaults, parallel_keystore_generation
):
num_participants = len(args_with_right_defaults.participants)
plan.print("Generating cl validator key stores")
validator_data = None
if not parallel_keystore_generation:
validator_data = validator_keystores.generate_validator_keystores(
plan, network_params.preregistered_validator_keys_mnemonic, participants
plan,
network_params.preregistered_validator_keys_mnemonic,
args_with_right_defaults.participants,
args_with_right_defaults.docker_cache_params,
)
else:
validator_data = validator_keystores.generate_valdiator_keystores_in_parallel(
plan,
network_params.preregistered_validator_keys_mnemonic,
participants,
args_with_right_defaults.participants,
args_with_right_defaults.docker_cache_params,
)
plan.print(json.indent(json.encode(validator_data)))
......@@ -46,30 +52,34 @@ def launch(plan, network_params, participants, parallel_keystore_generation):
total_number_of_validator_keys = network_params.preregistered_validator_count
if network_params.preregistered_validator_count == 0:
for participant in participants:
for participant in args_with_right_defaults.participants:
total_number_of_validator_keys += participant.validator_count
plan.print("Generating EL CL data")
# we are running capella genesis - deprecated
if network_params.deneb_fork_epoch > 0:
ethereum_genesis_generator_image = (
constants.ETHEREUM_GENESIS_GENERATOR.capella_genesis
ethereum_genesis_generator_image = shared_utils.docker_cache_image_calc(
args_with_right_defaults.docker_cache_params,
constants.ETHEREUM_GENESIS_GENERATOR.capella_genesis,
)
# we are running deneb genesis - default behavior
elif network_params.deneb_fork_epoch == 0:
ethereum_genesis_generator_image = (
constants.ETHEREUM_GENESIS_GENERATOR.deneb_genesis
ethereum_genesis_generator_image = shared_utils.docker_cache_image_calc(
args_with_right_defaults.docker_cache_params,
constants.ETHEREUM_GENESIS_GENERATOR.deneb_genesis,
)
# we are running electra - experimental
elif network_params.electra_fork_epoch != None:
if network_params.electra_fork_epoch == 0:
ethereum_genesis_generator_image = (
constants.ETHEREUM_GENESIS_GENERATOR.verkle_genesis
ethereum_genesis_generator_image = shared_utils.docker_cache_image_calc(
args_with_right_defaults.docker_cache_params,
constants.ETHEREUM_GENESIS_GENERATOR.verkle_genesis,
)
else:
ethereum_genesis_generator_image = (
constants.ETHEREUM_GENESIS_GENERATOR.verkle_support_genesis
ethereum_genesis_generator_image = shared_utils.docker_cache_image_calc(
args_with_right_defaults.docker_cache_params,
constants.ETHEREUM_GENESIS_GENERATOR.verkle_support_genesis,
)
else:
fail(
......
......@@ -35,7 +35,6 @@ def shadowfork_prep(
+ "/geth/"
+ shadowfork_block
+ "/_snapshot_eth_getBlockByNumber.json",
image="badouralix/curl-jq",
store=[StoreSpec(src="/shadowfork", name="latest_blocks")],
)
......
......@@ -113,6 +113,15 @@ FULU_FORK_EPOCH = 100000001
EIP7594_FORK_VERSION = "0x80000038"
EIP7594_FORK_EPOCH = 100000002
MAX_LABEL_LENGTH = 63
CONTAINER_REGISTRY = struct(
dockerhub="/",
ghcr="ghcr.io",
gcr="gcr.io",
)
ETHEREUM_GENESIS_GENERATOR = struct(
capella_genesis="ethpandaops/ethereum-genesis-generator:2.0.12", # Deprecated (no support for minimal config)
deneb_genesis="ethpandaops/ethereum-genesis-generator:3.4.1", # Default
......
This diff is collapsed.
......@@ -175,7 +175,15 @@ SUBCATEGORY_PARAMS = {
"image",
"env",
],
"docker_cache_params": [
"enabled",
"url",
"dockerhub_prefix",
"github_prefix",
"google_prefix",
],
"tx_spammer_params": [
"image",
"tx_spammer_extra_args",
],
"goomy_blob_params": [
......@@ -188,6 +196,7 @@ SUBCATEGORY_PARAMS = {
"max_mem",
"storage_tsdb_retention_time",
"storage_tsdb_retention_size",
"image",
],
"grafana_params": [
"additional_dashboards",
......@@ -195,6 +204,7 @@ SUBCATEGORY_PARAMS = {
"max_cpu",
"min_mem",
"max_mem",
"image",
],
"assertoor_params": [
"image",
......
......@@ -30,9 +30,8 @@ beacon_snooper = import_module("./snooper/snooper_beacon_launcher.star")
def launch_participant_network(
plan,
participants,
args_with_right_defaults,
network_params,
global_log_level,
jwt_file,
keymanager_file,
persistent,
......@@ -41,14 +40,10 @@ def launch_participant_network(
global_node_selectors,
keymanager_enabled,
parallel_keystore_generation,
checkpoint_sync_enabled,
checkpoint_sync_url,
port_publisher,
mev_builder_type,
):
network_id = network_params.network_id
latest_block = ""
num_participants = len(participants)
num_participants = len(args_with_right_defaults.participants)
prague_time = 0
shadowfork_block = "latest"
total_number_of_validator_keys = 0
......@@ -70,7 +65,7 @@ def launch_participant_network(
plan,
network_params,
shadowfork_block,
participants,
args_with_right_defaults.participants,
global_tolerations,
global_node_selectors,
)
......@@ -82,7 +77,7 @@ def launch_participant_network(
final_genesis_timestamp,
validator_data,
) = launch_kurtosis.launch(
plan, network_params, participants, parallel_keystore_generation
plan, network_params, args_with_right_defaults, parallel_keystore_generation
)
el_cl_genesis_config_template = read_file(
......@@ -137,15 +132,15 @@ def launch_participant_network(
network_params,
el_cl_data,
jwt_file,
participants,
global_log_level,
args_with_right_defaults.participants,
args_with_right_defaults.global_log_level,
global_node_selectors,
global_tolerations,
persistent,
network_id,
num_participants,
port_publisher,
mev_builder_type,
args_with_right_defaults.port_publisher,
args_with_right_defaults.mev_type,
)
# Launch all consensus layer clients
......@@ -170,9 +165,8 @@ def launch_participant_network(
el_cl_data,
jwt_file,
keymanager_file,
participants,
args_with_right_defaults,
all_el_contexts,
global_log_level,
global_node_selectors,
global_tolerations,
persistent,
......@@ -180,9 +174,6 @@ def launch_participant_network(
validator_data,
prysm_password_relative_filepath,
prysm_password_artifact_uuid,
checkpoint_sync_enabled,
checkpoint_sync_url,
port_publisher,
)
ethereum_metrics_exporter_context = None
......@@ -200,12 +191,14 @@ def launch_participant_network(
]
current_vc_index = 0
for index, participant in enumerate(participants):
for index, participant in enumerate(args_with_right_defaults.participants):
el_type = participant.el_type
cl_type = participant.cl_type
vc_type = participant.vc_type
remote_signer_type = participant.remote_signer_type
index_str = shared_utils.zfill_custom(index + 1, len(str(len(participants))))
index_str = shared_utils.zfill_custom(
index + 1, len(str(len(args_with_right_defaults.participants)))
)
for sub_index in range(participant.vc_count):
vc_index_str = shared_utils.zfill_custom(
sub_index + 1, len(str(participant.vc_count))
......@@ -231,6 +224,7 @@ def launch_participant_network(
el_context,
cl_context,
node_selectors,
args_with_right_defaults.docker_cache_params,
)
plan.print(
"Successfully added {0} ethereum metrics exporter participants".format(
......@@ -320,6 +314,7 @@ def launch_participant_network(
snooper_service_name,
cl_context,
node_selectors,
args_with_right_defaults.docker_cache_params,
)
plan.print(
"Successfully added {0} snooper participants".format(
......@@ -359,7 +354,7 @@ def launch_participant_network(
participant=participant,
global_tolerations=global_tolerations,
node_selectors=node_selectors,
port_publisher=port_publisher,
port_publisher=args_with_right_defaults.port_publisher,
remote_signer_index=current_vc_index,
)
......@@ -376,7 +371,7 @@ def launch_participant_network(
service_name="vc-{0}".format(full_name),
vc_type=vc_type,
image=participant.vc_image,
global_log_level=global_log_level,
global_log_level=args_with_right_defaults.global_log_level,
cl_context=cl_context,
el_context=el_context,
remote_signer_context=remote_signer_context,
......@@ -392,7 +387,7 @@ def launch_participant_network(
preset=network_params.preset,
network=network_params.network,
electra_fork_epoch=network_params.electra_fork_epoch,
port_publisher=port_publisher,
port_publisher=args_with_right_defaults.port_publisher,
vc_index=current_vc_index,
)
all_vc_contexts.append(vc_context)
......@@ -403,7 +398,7 @@ def launch_participant_network(
all_participants = []
for index, participant in enumerate(participants):
for index, participant in enumerate(args_with_right_defaults.participants):
el_type = participant.el_type
cl_type = participant.cl_type
vc_type = participant.vc_type
......
......@@ -74,7 +74,6 @@ def generate_el_cl_genesis_data(
name="read-prague-time",
description="Reading prague time from genesis",
run="jq .config.pragueTime /data/genesis.json | tr -d '\n'",
image="badouralix/curl-jq",
files={"/data": genesis.files_artifacts[0]},
)
......
......@@ -38,8 +38,9 @@ def launch_prelaunch_data_generator(
plan,
files_artifact_mountpoints,
service_name_suffix,
docker_cache_params,
):
config = get_config(files_artifact_mountpoints)
config = get_config(files_artifact_mountpoints, docker_cache_params)
service_name = "{0}{1}".format(
SERVICE_NAME_PREFIX,
......@@ -51,11 +52,9 @@ def launch_prelaunch_data_generator(
def launch_prelaunch_data_generator_parallel(
plan, files_artifact_mountpoints, service_name_suffixes
plan, files_artifact_mountpoints, service_name_suffixes, docker_cache_params
):
config = get_config(
files_artifact_mountpoints,
)
config = get_config(files_artifact_mountpoints, docker_cache_params)
service_names = [
"{0}{1}".format(
SERVICE_NAME_PREFIX,
......@@ -68,9 +67,12 @@ def launch_prelaunch_data_generator_parallel(
return service_names
def get_config(files_artifact_mountpoints):
def get_config(files_artifact_mountpoints, docker_cache_params):
return ServiceConfig(
image=ETH_VAL_TOOLS_IMAGE,
image=shared_utils.docker_cache_image_calc(
docker_cache_params,
ETH_VAL_TOOLS_IMAGE,
),
entrypoint=ENTRYPOINT_ARGS,
files=files_artifact_mountpoints,
)
......@@ -79,8 +81,10 @@ def get_config(files_artifact_mountpoints):
# Generates keystores for the given number of nodes from the given mnemonic, where each keystore contains approximately
#
# num_keys / num_nodes keys
def generate_validator_keystores(plan, mnemonic, participants):
service_name = launch_prelaunch_data_generator(plan, {}, "cl-validator-keystore")
def generate_validator_keystores(plan, mnemonic, participants, docker_cache_params):
service_name = launch_prelaunch_data_generator(
plan, {}, "cl-validator-keystore", docker_cache_params
)
all_output_dirpaths = []
all_sub_command_strs = []
......
......@@ -46,6 +46,7 @@ def launch_prometheus(
node_selectors=global_node_selectors,
storage_tsdb_retention_time=prometheus_params.storage_tsdb_retention_time,
storage_tsdb_retention_size=prometheus_params.storage_tsdb_retention_size,
image=prometheus_params.image,
)
return prometheus_url
......
......@@ -83,9 +83,9 @@ def label_maker(
labels = {
"ethereum-package.client": client,
"ethereum-package.client-type": client_type,
"ethereum-package.client-image": image.replace("/", "-")
.replace(":", "_")
.split("@")[0], # drop the sha256 part of the image from the label
"ethereum-package.client-image": ensure_alphanumeric_bounds(
image.replace("/", "-").replace(":", "_").replace(".", "-").split("@")[0]
), # drop the sha256 part of the image from the label
"ethereum-package.sha256": sha256,
"ethereum-package.connected-client": connected_client,
}
......@@ -346,3 +346,49 @@ def get_cpu_mem_resource_limits(
else constants.VOLUME_SIZE[network_name][client_type + "_volume_size"]
)
return min_cpu, max_cpu, min_mem, max_mem, volume_size
def docker_cache_image_calc(docker_cache_params, image):
if docker_cache_params.enabled:
if docker_cache_params.url in image:
return image
if constants.CONTAINER_REGISTRY.ghcr in image:
return (
docker_cache_params.url
+ docker_cache_params.github_prefix
+ "/".join(image.split("/")[1:])
)
elif constants.CONTAINER_REGISTRY.gcr in image:
return (
docker_cache_params.url
+ docker_cache_params.gcr_prefix
+ "/".join(image.split("/")[1:])
)
elif constants.CONTAINER_REGISTRY.dockerhub in image:
return (
docker_cache_params.url + docker_cache_params.dockerhub_prefix + image
)
return image
def is_alphanumeric(c):
return ("a" <= c and c <= "z") or ("A" <= c and c <= "Z") or ("0" <= c and c <= "9")
def ensure_alphanumeric_bounds(s):
# Trim from the start
start = 0
for i in range(len(s)):
if is_alphanumeric(s[i]):
start = i
break
# Trim from the end
end = len(s)
for i in range(len(s) - 1, -1, -1):
if is_alphanumeric(s[i]):
end = i + 1
break
return s[start:end]
......@@ -21,10 +21,21 @@ MIN_MEMORY = 10
MAX_MEMORY = 600
def launch(plan, service_name, cl_context, node_selectors):
def launch(
plan,
service_name,
cl_context,
node_selectors,
docker_cache_params,
):
snooper_service_name = "{0}".format(service_name)
snooper_config = get_config(service_name, cl_context, node_selectors)
snooper_config = get_config(
service_name,
cl_context,
node_selectors,
docker_cache_params,
)
snooper_service = plan.add_service(snooper_service_name, snooper_config)
snooper_http_port = snooper_service.ports[SNOOPER_BEACON_RPC_PORT_ID]
......@@ -33,7 +44,12 @@ def launch(plan, service_name, cl_context, node_selectors):
)
def get_config(service_name, cl_context, node_selectors):
def get_config(
service_name,
cl_context,
node_selectors,
docker_cache_params,
):
beacon_rpc_port_num = "{0}".format(
cl_context.beacon_http_url,
)
......@@ -45,7 +61,9 @@ def get_config(service_name, cl_context, node_selectors):
]
return ServiceConfig(
image=constants.DEFAULT_SNOOPER_IMAGE,
image=shared_utils.docker_cache_image_calc(
docker_cache_params, constants.DEFAULT_SNOOPER_IMAGE
),
ports=SNOOPER_USED_PORTS,
cmd=cmd,
min_cpu=MIN_CPU,
......
......@@ -22,10 +22,15 @@ MIN_MEMORY = 10
MAX_MEMORY = 600
def launch(plan, service_name, el_context, node_selectors):
def launch(plan, service_name, el_context, node_selectors, docker_cache_params):
snooper_service_name = "{0}".format(service_name)
snooper_config = get_config(service_name, el_context, node_selectors)
snooper_config = get_config(
service_name,
el_context,
node_selectors,
docker_cache_params,
)
snooper_service = plan.add_service(snooper_service_name, snooper_config)
snooper_http_port = snooper_service.ports[SNOOPER_ENGINE_RPC_PORT_ID]
......@@ -34,7 +39,7 @@ def launch(plan, service_name, el_context, node_selectors):
)
def get_config(service_name, el_context, node_selectors):
def get_config(service_name, el_context, node_selectors, docker_cache_params):
engine_rpc_port_num = "http://{0}:{1}".format(
el_context.ip_addr,
el_context.engine_rpc_port_num,
......@@ -47,7 +52,9 @@ def get_config(service_name, el_context, node_selectors):
]
return ServiceConfig(
image=constants.DEFAULT_SNOOPER_IMAGE,
image=shared_utils.docker_cache_image_calc(
docker_cache_params, constants.DEFAULT_SNOOPER_IMAGE
),
ports=SNOOPER_USED_PORTS,
cmd=cmd,
min_cpu=MIN_CPU,
......
......@@ -36,6 +36,7 @@ def launch_tracoor(
final_genesis_timestamp,
port_publisher,
additional_service_index,
docker_cache_params,
):
all_client_info = []
for index, participant in enumerate(participant_contexts):
......@@ -81,6 +82,7 @@ def launch_tracoor(
global_node_selectors,
port_publisher,
additional_service_index,
docker_cache_params,
)
plan.add_service(SERVICE_NAME, config)
......@@ -93,6 +95,7 @@ def get_config(
node_selectors,
port_publisher,
additional_service_index,
docker_cache_params,
):
config_file_path = shared_utils.path_join(
TRACOOR_CONFIG_MOUNT_DIRPATH_ON_SERVICE,
......@@ -107,7 +110,10 @@ def get_config(
)
return ServiceConfig(
image=IMAGE_NAME,
image=shared_utils.docker_cache_image_calc(
docker_cache_params,
IMAGE_NAME,
),
ports=USED_PORTS,
public_ports=public_ports,
files={
......
......@@ -18,7 +18,7 @@ def launch_transaction_spammer(
config = get_config(
prefunded_addresses,
el_uri,
tx_spammer_params.tx_spammer_extra_args,
tx_spammer_params,
global_node_selectors,
)
plan.add_service(SERVICE_NAME, config)
......@@ -27,22 +27,20 @@ def launch_transaction_spammer(
def get_config(
prefunded_addresses,
el_uri,
tx_spammer_extra_args,
tx_spammer_params,
node_selectors,
):
tx_spammer_image = "ethpandaops/tx-fuzz:master"
cmd = [
"spam",
"--rpc={}".format(el_uri),
"--sk={0}".format(prefunded_addresses[3].private_key),
]
if len(tx_spammer_extra_args) > 0:
cmd.extend([param for param in tx_spammer_extra_args])
if len(tx_spammer_params.tx_spammer_extra_args) > 0:
cmd.extend([param for param in tx_spammer_params.tx_spammer_extra_args])
return ServiceConfig(
image=tx_spammer_image,
image=tx_spammer_params.image,
cmd=cmd,
min_cpu=MIN_CPU,
max_cpu=MAX_CPU,
......
......@@ -119,7 +119,7 @@ def get_config(
"labels": shared_utils.label_maker(
client=constants.VC_TYPE.lighthouse,
client_type=constants.CLIENT_TYPES.validator,
image=image,
image=image[-constants.MAX_LABEL_LENGTH :],
connected_client=cl_context.client_name,
extra_labels=participant.vc_extra_labels,
supernode=participant.supernode,
......
......@@ -135,7 +135,7 @@ def get_config(
"labels": shared_utils.label_maker(
client=constants.VC_TYPE.lodestar,
client_type=constants.CLIENT_TYPES.validator,
image=image,
image=image[-constants.MAX_LABEL_LENGTH :],
connected_client=cl_context.client_name,
extra_labels=participant.vc_extra_labels,
supernode=participant.supernode,
......
......@@ -107,7 +107,7 @@ def get_config(
"labels": shared_utils.label_maker(
client=constants.VC_TYPE.nimbus,
client_type=constants.CLIENT_TYPES.validator,
image=image,
image=image[-constants.MAX_LABEL_LENGTH :],
connected_client=cl_context.client_name,
extra_labels=participant.vc_extra_labels,
supernode=participant.supernode,
......
......@@ -125,7 +125,7 @@ def get_config(
"labels": shared_utils.label_maker(
client=constants.VC_TYPE.prysm,
client_type=constants.CLIENT_TYPES.validator,
image=image,
image=image[-constants.MAX_LABEL_LENGTH :],
connected_client=cl_context.client_name,
extra_labels=participant.vc_extra_labels,
supernode=participant.supernode,
......
......@@ -121,7 +121,7 @@ def get_config(
"labels": shared_utils.label_maker(
client=constants.VC_TYPE.teku,
client_type=constants.CLIENT_TYPES.validator,
image=image,
image=image[-constants.MAX_LABEL_LENGTH :],
connected_client=cl_context.client_name,
extra_labels=participant.vc_extra_labels,
supernode=participant.supernode,
......
......@@ -65,7 +65,7 @@ def get_config(
"labels": shared_utils.label_maker(
client=constants.VC_TYPE.vero,
client_type=constants.CLIENT_TYPES.validator,
image=image,
image=image[-constants.MAX_LABEL_LENGTH :],
connected_client=cl_context.client_name,
extra_labels=participant.vc_extra_labels,
supernode=participant.supernode,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment