Commit 316d42fb authored by Barnabas Busa's avatar Barnabas Busa Committed by GitHub

feat!: add node selectors features (#491)

Co-authored-by: default avatarpk910 <github@pk910.de>
Co-authored-by: default avatarparithosh <parithosh.jayanthi@ethereum.org>
Co-authored-by: default avatarGyanendra Mishra <anomaly.the@gmail.com>
parent 5602a02b
participants:
- el_client_type: reth
cl_client_type: teku
cl_split_mode_enabled: true
node_selectors: {
"kubernetes.io/hostname": testing-1,
}
- el_client_type: reth
cl_client_type: teku
cl_split_mode_enabled: true
global_node_selectors: {
"kubernetes.io/hostname": testing-2,
}
......@@ -264,6 +264,12 @@ participants:
# Defaults to empty
tolerations: []
# Node selector
# Only works with Kubernetes
# Example: node_selectors: { "disktype": "ssd" }
# Defaults to empty
node_selectors: {}
# A list of optional extra params that will be passed to the CL client Beacon container for modifying its behaviour
# If the client combines the Beacon & validator nodes (e.g. Teku, Nimbus), then this list will be passed to the combined Beacon-validator node
beacon_extra_params: []
......@@ -605,6 +611,12 @@ xatu_sentry_params:
# toleration_seconds: 3600
# Defaults to empty
global_tolerations: []
# Global node selector that will be passed to all containers (unless overridden by a more specific node selector)
# Only works with Kubernetes
# Example: node_selectors: { "disktype": "ssd" }
# Defaults to empty
node_selectors: {}
```
#### Example configurations
......
......@@ -61,6 +61,7 @@ def run(plan, args={}):
persistent = args_with_right_defaults.persistent
xatu_sentry_params = args_with_right_defaults.xatu_sentry_params
global_tolerations = args_with_right_defaults.global_tolerations
global_node_selectors = args_with_right_defaults.global_node_selectors
grafana_datasource_config_template = read_file(
static_files.GRAFANA_DATASOURCE_CONFIG_TEMPLATE_FILEPATH
......@@ -95,6 +96,7 @@ def run(plan, args={}):
persistent,
xatu_sentry_params,
global_tolerations,
global_node_selectors,
parallel_keystore_generation,
)
......@@ -137,7 +139,9 @@ def run(plan, args={}):
if "broadcaster" in args_with_right_defaults.additional_services:
args_with_right_defaults.additional_services.remove("broadcaster")
broadcaster_service = broadcaster.launch_broadcaster(
plan, all_el_client_contexts
plan,
all_el_client_contexts,
global_node_selectors,
)
fuzz_target = "http://{0}:{1}".format(
broadcaster_service.ip_address,
......@@ -170,6 +174,7 @@ def run(plan, args={}):
beacon_uri,
raw_jwt_secret,
args_with_right_defaults.global_client_log_level,
global_node_selectors,
)
mev_endpoints.append(endpoint)
elif (
......@@ -195,6 +200,7 @@ def run(plan, args={}):
fuzz_target,
contract_owner.private_key,
normal_user.private_key,
global_node_selectors,
)
epoch_recipe = GetHttpRequestRecipe(
endpoint="/eth/v2/beacon/blocks/head",
......@@ -218,6 +224,7 @@ def run(plan, args={}):
builder_uri,
network_params.seconds_per_slot,
persistent,
global_node_selectors,
)
mev_flood.spam_in_background(
plan,
......@@ -238,7 +245,8 @@ def run(plan, args={}):
)
if args_with_right_defaults.participants[index].validator_count != 0:
mev_boost_launcher = mev_boost.new_mev_boost_launcher(
MEV_BOOST_SHOULD_CHECK_RELAY, mev_endpoints
MEV_BOOST_SHOULD_CHECK_RELAY,
mev_endpoints,
)
mev_boost_service_name = "{0}-{1}-{2}-{3}".format(
input_parser.MEV_BOOST_SERVICE_NAME_PREFIX,
......@@ -252,6 +260,7 @@ def run(plan, args={}):
mev_boost_service_name,
network_params.network_id,
mev_params.mev_boost_image,
global_node_selectors,
)
all_mevboost_contexts.append(mev_boost_context)
......@@ -275,6 +284,7 @@ def run(plan, args={}):
fuzz_target,
tx_spammer_params,
network_params.electra_fork_epoch,
global_node_selectors,
)
plan.print("Successfully launched transaction spammer")
elif additional_service == "blob_spammer":
......@@ -287,6 +297,7 @@ def run(plan, args={}):
network_params.deneb_fork_epoch,
network_params.seconds_per_slot,
network_params.genesis_delay,
global_node_selectors,
)
plan.print("Successfully launched blob spammer")
elif additional_service == "goomy_blob":
......@@ -299,6 +310,7 @@ def run(plan, args={}):
all_cl_client_contexts[0],
network_params.seconds_per_slot,
goomy_blob_params,
global_node_selectors,
)
plan.print("Successfully launched goomy the blob spammer")
# We need a way to do time.sleep
......@@ -309,7 +321,10 @@ def run(plan, args={}):
static_files.EL_FORKMON_CONFIG_TEMPLATE_FILEPATH
)
el_forkmon.launch_el_forkmon(
plan, el_forkmon_config_template, all_el_client_contexts
plan,
el_forkmon_config_template,
all_el_client_contexts,
global_node_selectors,
)
plan.print("Successfully launched execution layer forkmon")
elif additional_service == "beacon_metrics_gazer":
......@@ -319,6 +334,7 @@ def run(plan, args={}):
plan,
all_cl_client_contexts,
network_params,
global_node_selectors,
)
)
launch_prometheus_grafana = True
......@@ -329,7 +345,10 @@ def run(plan, args={}):
elif additional_service == "blockscout":
plan.print("Launching blockscout")
blockscout_sc_verif_url = blockscout.launch_blockscout(
plan, all_el_client_contexts, persistent
plan,
all_el_client_contexts,
persistent,
global_node_selectors,
)
plan.print("Successfully launched blockscout")
elif additional_service == "dora":
......@@ -342,6 +361,7 @@ def run(plan, args={}):
el_cl_data_files_artifact_uuid,
network_params.electra_fork_epoch,
network_params.network,
global_node_selectors,
)
plan.print("Successfully launched dora")
elif additional_service == "blobscan":
......@@ -352,6 +372,7 @@ def run(plan, args={}):
all_el_client_contexts,
network_params.network_id,
persistent,
global_node_selectors,
)
plan.print("Successfully launched blobscan")
elif additional_service == "full_beaconchain_explorer":
......@@ -365,6 +386,7 @@ def run(plan, args={}):
all_cl_client_contexts,
all_el_client_contexts,
persistent,
global_node_selectors,
)
plan.print("Successfully launched full-beaconchain-explorer")
elif additional_service == "prometheus_grafana":
......@@ -383,6 +405,7 @@ def run(plan, args={}):
args_with_right_defaults.participants,
network_params,
assertoor_params,
global_node_selectors,
)
plan.print("Successfully launched assertoor")
elif additional_service == "custom_flood":
......@@ -392,6 +415,7 @@ def run(plan, args={}):
genesis_constants.PRE_FUNDED_ACCOUNTS[0].address,
fuzz_target,
args_with_right_defaults.custom_flood_params,
global_node_selectors,
)
else:
fail("Invalid additional service %s" % (additional_service))
......@@ -404,6 +428,7 @@ def run(plan, args={}):
prometheus_additional_metrics_jobs,
all_ethereum_metrics_exporter_contexts,
all_xatu_sentry_contexts,
global_node_selectors,
)
plan.print("Launching grafana...")
......@@ -412,6 +437,7 @@ def run(plan, args={}):
grafana_datasource_config_template,
grafana_dashboards_config_template,
prometheus_private_url,
global_node_selectors,
additional_dashboards=args_with_right_defaults.grafana_additional_dashboards,
)
plan.print("Successfully launched grafana")
......
......@@ -11,6 +11,7 @@ participants:
cl_tolerations: []
validator_tolerations: []
tolerations: []
node_selectors: {}
beacon_extra_params: []
beacon_extra_labels: {}
validator_extra_params: []
......@@ -87,3 +88,4 @@ grafana_additional_dashboards: []
persistent: false
xatu_sentry_enabled: false
global_tolerations: []
global_node_selectors: {}
......@@ -36,6 +36,7 @@ def launch_assertoor(
participant_configs,
network_params,
assertoor_params,
global_node_selectors,
):
all_client_info = []
validator_client_info = []
......@@ -91,6 +92,7 @@ def launch_assertoor(
tests_config_artifacts_name,
network_params,
assertoor_params,
global_node_selectors,
)
plan.add_service(SERVICE_NAME, config)
......@@ -101,6 +103,7 @@ def get_config(
tests_config_artifacts_name,
network_params,
assertoor_params,
node_selectors,
):
config_file_path = shared_utils.path_join(
ASSERTOOR_CONFIG_MOUNT_DIRPATH_ON_SERVICE,
......@@ -127,6 +130,7 @@ def get_config(
max_cpu=MAX_CPU,
min_memory=MIN_MEMORY,
max_memory=MAX_MEMORY,
node_selectors=node_selectors,
)
......
......@@ -31,10 +31,16 @@ MIN_MEMORY = 20
MAX_MEMORY = 300
def launch_beacon_metrics_gazer(plan, cl_client_contexts, network_params):
def launch_beacon_metrics_gazer(
plan,
cl_client_contexts,
network_params,
global_node_selectors,
):
config = get_config(
cl_client_contexts[0].ip_addr,
cl_client_contexts[0].http_port_num,
global_node_selectors,
)
beacon_metrics_gazer_service = plan.add_service(SERVICE_NAME, config)
......@@ -51,7 +57,7 @@ def launch_beacon_metrics_gazer(plan, cl_client_contexts, network_params):
)
def get_config(ip_addr, http_port_num):
def get_config(ip_addr, http_port_num, node_selectors):
config_file_path = shared_utils.path_join(
BEACON_METRICS_GAZER_CONFIG_MOUNT_DIRPATH_ON_SERVICE,
BEACON_METRICS_GAZER_CONFIG_FILENAME,
......@@ -76,4 +82,5 @@ def get_config(ip_addr, http_port_num):
max_cpu=MAX_CPU,
min_memory=MIN_MEMORY,
max_memory=MAX_MEMORY,
node_selectors=node_selectors,
)
......@@ -18,6 +18,7 @@ def launch_blob_spammer(
deneb_fork_epoch,
seconds_per_slot,
genesis_delay,
global_node_selectors,
):
config = get_config(
prefunded_addresses,
......@@ -26,6 +27,7 @@ def launch_blob_spammer(
deneb_fork_epoch,
seconds_per_slot,
genesis_delay,
global_node_selectors,
)
plan.add_service(SERVICE_NAME, config)
......@@ -37,6 +39,7 @@ def get_config(
deneb_fork_epoch,
seconds_per_slot,
genesis_delay,
node_selectors,
):
dencunTime = (deneb_fork_epoch * 32 * seconds_per_slot) + genesis_delay
return ServiceConfig(
......@@ -68,4 +71,5 @@ def get_config(
max_cpu=MAX_CPU,
min_memory=MIN_MEMORY,
max_memory=MAX_MEMORY,
node_selectors=node_selectors,
)
......@@ -35,11 +35,22 @@ MIN_MEMORY = 10
MAX_MEMORY = 300
def launch(plan, service_name, node_keystore_files, beacon_http_url, extra_params):
def launch(
plan,
service_name,
node_keystore_files,
beacon_http_url,
extra_params,
node_selectors,
):
blobber_service_name = "{0}".format(service_name)
blobber_config = get_config(
service_name, node_keystore_files, beacon_http_url, extra_params
service_name,
node_keystore_files,
beacon_http_url,
extra_params,
node_selectors,
)
blobber_service = plan.add_service(blobber_service_name, blobber_config)
......@@ -49,7 +60,13 @@ def launch(plan, service_name, node_keystore_files, beacon_http_url, extra_param
)
def get_config(service_name, node_keystore_files, beacon_http_url, extra_params):
def get_config(
service_name,
node_keystore_files,
beacon_http_url,
extra_params,
node_selectors,
):
validator_root_dirpath = shared_utils.path_join(
VALIDATOR_KEYS_MOUNTPOINT_ON_CLIENTS,
node_keystore_files.raw_root_dirpath,
......@@ -78,4 +95,5 @@ def get_config(service_name, node_keystore_files, beacon_http_url, extra_params)
max_cpu=MAX_CPU,
min_memory=MIN_MEMORY,
max_memory=MAX_MEMORY,
node_selectors=node_selectors,
)
......@@ -59,7 +59,9 @@ def launch_blobscan(
el_client_contexts,
chain_id,
persistent,
global_node_selectors,
):
node_selectors = global_node_selectors
beacon_node_rpc_uri = "http://{0}:{1}".format(
cl_client_contexts[0].ip_addr, cl_client_contexts[0].http_port_num
)
......@@ -75,24 +77,43 @@ def launch_blobscan(
min_memory=POSTGRES_MIN_MEMORY,
max_memory=POSTGRES_MAX_MEMORY,
persistent=persistent,
node_selectors=node_selectors,
)
api_config = get_api_config(
postgres_output.url,
beacon_node_rpc_uri,
chain_id,
node_selectors,
)
api_config = get_api_config(postgres_output.url, beacon_node_rpc_uri, chain_id)
blobscan_config = plan.add_service(API_SERVICE_NAME, api_config)
blobscan_api_url = "http://{0}:{1}".format(
blobscan_config.ip_address, blobscan_config.ports[HTTP_PORT_ID].number
)
web_config = get_web_config(postgres_output.url, beacon_node_rpc_uri, chain_id)
web_config = get_web_config(
postgres_output.url,
beacon_node_rpc_uri,
chain_id,
node_selectors,
)
plan.add_service(WEB_SERVICE_NAME, web_config)
indexer_config = get_indexer_config(
beacon_node_rpc_uri, execution_node_rpc_uri, blobscan_api_url
beacon_node_rpc_uri,
execution_node_rpc_uri,
blobscan_api_url,
node_selectors,
)
plan.add_service(INDEXER_SERVICE_NAME, indexer_config)
def get_api_config(database_url, beacon_node_rpc, chain_id):
def get_api_config(
database_url,
beacon_node_rpc,
chain_id,
node_selectors,
):
IMAGE_NAME = "blossomlabs/blobscan:stable"
return ServiceConfig(
......@@ -121,10 +142,11 @@ def get_api_config(database_url, beacon_node_rpc, chain_id):
max_cpu=API_MAX_CPU,
min_memory=API_MIN_MEMORY,
max_memory=API_MAX_MEMORY,
node_selectors=node_selectors,
)
def get_web_config(database_url, beacon_node_rpc, chain_id):
def get_web_config(database_url, beacon_node_rpc, chain_id, node_selectors):
# TODO: https://github.com/kurtosis-tech/kurtosis/issues/1861
# Configure NEXT_PUBLIC_BEACON_BASE_URL and NEXT_PUBLIC_EXPLORER_BASE env vars
# once retrieving external URLs from services are supported in Kurtosis.
......@@ -145,10 +167,16 @@ def get_web_config(database_url, beacon_node_rpc, chain_id):
max_cpu=WEB_MAX_CPU,
min_memory=WEB_MIN_MEMORY,
max_memory=WEB_MAX_MEMORY,
node_selectors=node_selectors,
)
def get_indexer_config(beacon_node_rpc, execution_node_rpc, blobscan_api_url):
def get_indexer_config(
beacon_node_rpc,
execution_node_rpc,
blobscan_api_url,
node_selectors,
):
IMAGE_NAME = "blossomlabs/blobscan-indexer:master"
return ServiceConfig(
......@@ -165,4 +193,5 @@ def get_indexer_config(beacon_node_rpc, execution_node_rpc, blobscan_api_url):
max_cpu=INDEX_MAX_CPU,
min_memory=INDEX_MIN_MEMORY,
max_memory=INDEX_MAX_MEMORY,
node_selectors=node_selectors,
)
......@@ -38,13 +38,19 @@ VERIF_USED_PORTS = {
}
def launch_blockscout(plan, el_client_contexts, persistent):
def launch_blockscout(
plan,
el_client_contexts,
persistent,
global_node_selectors,
):
postgres_output = postgres.run(
plan,
service_name="{}-postgres".format(SERVICE_NAME_BLOCKSCOUT),
database="blockscout",
extra_configs=["max_connections=1000"],
persistent=persistent,
node_selectors=global_node_selectors,
)
el_client_context = el_client_contexts[0]
......@@ -53,7 +59,7 @@ def launch_blockscout(plan, el_client_contexts, persistent):
)
el_client_name = el_client_context.client_name
config_verif = get_config_verif()
config_verif = get_config_verif(global_node_selectors)
verif_service_name = "{}-verif".format(SERVICE_NAME_BLOCKSCOUT)
verif_service = plan.add_service(verif_service_name, config_verif)
verif_url = "http://{}:{}/api".format(
......@@ -61,7 +67,11 @@ def launch_blockscout(plan, el_client_contexts, persistent):
)
config_backend = get_config_backend(
postgres_output, el_client_rpc_url, verif_url, el_client_name
postgres_output,
el_client_rpc_url,
verif_url,
el_client_name,
global_node_selectors,
)
blockscout_service = plan.add_service(SERVICE_NAME_BLOCKSCOUT, config_backend)
plan.print(blockscout_service)
......@@ -73,7 +83,7 @@ def launch_blockscout(plan, el_client_contexts, persistent):
return blockscout_url
def get_config_verif():
def get_config_verif(node_selectors):
return ServiceConfig(
image=IMAGE_NAME_BLOCKSCOUT_VERIF,
ports=VERIF_USED_PORTS,
......@@ -86,10 +96,13 @@ def get_config_verif():
max_cpu=BLOCKSCOUT_VERIF_MAX_CPU,
min_memory=BLOCKSCOUT_VERIF_MIN_MEMORY,
max_memory=BLOCKSCOUT_VERIF_MAX_MEMORY,
node_selectors=node_selectors,
)
def get_config_backend(postgres_output, el_client_rpc_url, verif_url, el_client_name):
def get_config_backend(
postgres_output, el_client_rpc_url, verif_url, el_client_name, node_selectors
):
database_url = "{protocol}://{user}:{password}@{hostname}:{port}/{database}".format(
protocol="postgresql",
user=postgres_output.user,
......@@ -128,4 +141,5 @@ def get_config_backend(postgres_output, el_client_rpc_url, verif_url, el_client_
max_cpu=BLOCKSCOUT_MAX_CPU,
min_memory=BLOCKSCOUT_MIN_MEMORY,
max_memory=BLOCKSCOUT_MAX_MEMORY,
node_selectors=node_selectors,
)
......@@ -9,12 +9,15 @@ MIN_MEMORY = 128
MAX_MEMORY = 2048
def launch_broadcaster(plan, all_el_client_contexts):
config = get_config(all_el_client_contexts)
def launch_broadcaster(plan, all_el_client_contexts, global_node_selectors):
config = get_config(all_el_client_contexts, global_node_selectors)
return plan.add_service(SERVICE_NAME, config)
def get_config(all_el_client_contexts):
def get_config(
all_el_client_contexts,
node_selectors,
):
return ServiceConfig(
image=IMAGE_NAME,
cmd=[
......@@ -25,4 +28,5 @@ def get_config(all_el_client_contexts):
max_cpu=MAX_CPU,
min_memory=MIN_MEMORY,
max_memory=MAX_MEMORY,
node_selectors=node_selectors,
)
......@@ -123,6 +123,7 @@ def launch(
validator_tolerations,
participant_tolerations,
global_tolerations,
node_selectors,
split_mode_enabled=False,
):
beacon_service_name = "{0}".format(service_name)
......@@ -181,6 +182,7 @@ def launch(
persistent,
cl_volume_size,
tolerations,
node_selectors,
)
beacon_service = plan.add_service(beacon_service_name, beacon_config)
......@@ -197,6 +199,7 @@ def launch(
node_keystore_files,
beacon_http_url,
blobber_extra_params,
node_selectors,
)
blobber_service = plan.add_service(blobber_service_name, blobber_config)
......@@ -234,6 +237,7 @@ def launch(
extra_validator_labels,
persistent,
tolerations,
node_selectors,
)
validator_service = plan.add_service(validator_service_name, validator_config)
......@@ -313,6 +317,7 @@ def get_beacon_config(
persistent,
cl_volume_size,
tolerations,
node_selectors,
):
# If snooper is enabled use the snooper engine context, otherwise use the execution client context
if snooper_enabled:
......@@ -465,6 +470,7 @@ def get_beacon_config(
extra_labels,
),
tolerations=tolerations,
node_selectors=node_selectors,
)
......@@ -484,6 +490,7 @@ def get_validator_config(
extra_labels,
persistent,
tolerations,
node_selectors,
):
validator_keys_dirpath = shared_utils.path_join(
VALIDATOR_KEYS_MOUNTPOINT_ON_CLIENTS,
......@@ -550,6 +557,7 @@ def get_validator_config(
extra_labels,
),
tolerations=tolerations,
node_selectors=node_selectors,
)
......
......@@ -102,6 +102,7 @@ def launch(
validator_tolerations,
participant_tolerations,
global_tolerations,
node_selectors,
split_mode_enabled=False,
):
beacon_service_name = "{0}".format(service_name)
......@@ -159,6 +160,7 @@ def launch(
persistent,
cl_volume_size,
tolerations,
node_selectors,
)
beacon_service = plan.add_service(beacon_service_name, beacon_config)
......@@ -213,6 +215,7 @@ def launch(
extra_validator_labels,
persistent,
tolerations,
node_selectors,
)
plan.add_service(validator_service_name, validator_config)
......@@ -284,6 +287,7 @@ def get_beacon_config(
persistent,
cl_volume_size,
tolerations,
node_selectors,
):
el_client_rpc_url_str = "http://{0}:{1}".format(
el_client_context.ip_addr,
......@@ -417,6 +421,7 @@ def get_beacon_config(
extra_labels,
),
tolerations=tolerations,
node_selectors=node_selectors,
)
......@@ -436,6 +441,7 @@ def get_validator_config(
extra_labels,
persistent,
tolerations,
node_selectors,
):
root_dirpath = shared_utils.path_join(
VALIDATOR_DATA_DIRPATH_ON_SERVICE_CONTAINER, service_name
......@@ -500,6 +506,7 @@ def get_validator_config(
extra_labels,
),
tolerations=tolerations,
node_selectors=node_selectors,
)
......
......@@ -137,6 +137,7 @@ def launch(
validator_tolerations,
participant_tolerations,
global_tolerations,
node_selectors,
split_mode_enabled,
):
beacon_service_name = "{0}".format(service_name)
......@@ -196,6 +197,7 @@ def launch(
persistent,
cl_volume_size,
tolerations,
node_selectors,
)
beacon_service = plan.add_service(beacon_service_name, beacon_config)
......@@ -255,6 +257,7 @@ def launch(
extra_validator_labels,
persistent,
tolerations,
node_selectors,
)
validator_service = plan.add_service(validator_service_name, validator_config)
......@@ -310,6 +313,7 @@ def get_beacon_config(
persistent,
cl_volume_size,
tolerations,
node_selectors,
):
validator_keys_dirpath = ""
validator_secrets_dirpath = ""
......@@ -439,6 +443,7 @@ def get_beacon_config(
),
user=User(uid=0, gid=0),
tolerations=tolerations,
node_selectors=node_selectors,
)
......@@ -458,6 +463,7 @@ def get_validator_config(
extra_labels,
persistent,
tolerations,
node_selectors,
):
validator_keys_dirpath = ""
validator_secrets_dirpath = ""
......@@ -512,6 +518,7 @@ def get_validator_config(
extra_labels,
),
tolerations=tolerations,
node_selectors=node_selectors,
)
......
......@@ -114,6 +114,7 @@ def launch(
validator_tolerations,
participant_tolerations,
global_tolerations,
node_selectors,
split_mode_enabled=False,
):
split_images = images.split(IMAGE_SEPARATOR_DELIMITER)
......@@ -185,6 +186,7 @@ def launch(
persistent,
cl_volume_size,
tolerations,
node_selectors,
)
beacon_service = plan.add_service(beacon_service_name, beacon_config)
......@@ -223,6 +225,7 @@ def launch(
launcher.prysm_password_artifact_uuid,
persistent,
tolerations,
node_selectors,
)
validator_service = plan.add_service(validator_service_name, validator_config)
......@@ -302,6 +305,7 @@ def get_beacon_config(
persistent,
cl_volume_size,
tolerations,
node_selectors,
):
# If snooper is enabled use the snooper engine context, otherwise use the execution client context
if snooper_enabled:
......@@ -433,6 +437,7 @@ def get_beacon_config(
extra_labels,
),
tolerations=tolerations,
node_selectors=node_selectors,
)
......@@ -455,6 +460,7 @@ def get_validator_config(
prysm_password_artifact_uuid,
persistent,
tolerations,
node_selectors,
):
validator_keys_dirpath = shared_utils.path_join(
VALIDATOR_KEYS_MOUNT_DIRPATH_ON_SERVICE_CONTAINER,
......@@ -520,6 +526,7 @@ def get_validator_config(
extra_labels,
),
tolerations=tolerations,
node_selectors=node_selectors,
)
......
......@@ -127,6 +127,7 @@ def launch(
validator_tolerations,
participant_tolerations,
global_tolerations,
node_selectors,
split_mode_enabled,
):
beacon_service_name = "{0}".format(service_name)
......@@ -145,13 +146,6 @@ def launch(
param for param in extra_validator_params
]
# Holesky has a bigger memory footprint, so it needs more memory
if launcher.network == "holesky":
holesky_beacon_memory_limit = 4096
bn_max_mem = (
int(bn_max_mem) if int(bn_max_mem) > 0 else holesky_beacon_memory_limit
)
network_name = shared_utils.get_network_name(launcher.network)
bn_min_cpu = int(bn_min_cpu) if int(bn_min_cpu) > 0 else BEACON_MIN_CPU
......@@ -196,6 +190,7 @@ def launch(
persistent,
cl_volume_size,
tolerations,
node_selectors,
)
beacon_service = plan.add_service(service_name, config)
......@@ -258,6 +253,7 @@ def launch(
extra_validator_labels,
persistent,
tolerations,
node_selectors,
)
validator_service = plan.add_service(validator_service_name, validator_config)
......@@ -313,6 +309,7 @@ def get_beacon_config(
persistent,
cl_volume_size,
tolerations,
node_selectors,
):
validator_keys_dirpath = ""
validator_secrets_dirpath = ""
......@@ -492,6 +489,7 @@ def get_beacon_config(
),
user=User(uid=0, gid=0),
tolerations=tolerations,
node_selectors=node_selectors,
)
......@@ -512,6 +510,7 @@ def get_validator_config(
extra_labels,
persistent,
tolerations,
node_selectors,
):
validator_keys_dirpath = ""
validator_secrets_dirpath = ""
......@@ -577,6 +576,7 @@ def get_validator_config(
extra_labels,
),
tolerations=tolerations,
node_selectors=node_selectors,
)
......
......@@ -34,6 +34,7 @@ def launch_dora(
el_cl_data_files_artifact_uuid,
electra_fork_epoch,
network,
global_node_selectors,
):
all_cl_client_info = []
for index, client in enumerate(cl_client_contexts):
......@@ -62,6 +63,7 @@ def launch_dora(
el_cl_data_files_artifact_uuid,
electra_fork_epoch,
network,
global_node_selectors,
)
plan.add_service(SERVICE_NAME, config)
......@@ -72,6 +74,7 @@ def get_config(
el_cl_data_files_artifact_uuid,
electra_fork_epoch,
network,
node_selectors,
):
config_file_path = shared_utils.path_join(
DORA_CONFIG_MOUNT_DIRPATH_ON_SERVICE,
......@@ -97,6 +100,7 @@ def get_config(
max_cpu=MAX_CPU,
min_memory=MIN_MEMORY,
max_memory=MAX_MEMORY,
node_selectors=node_selectors,
)
......
......@@ -76,16 +76,12 @@ def launch(
extra_labels,
persistent,
el_volume_size,
el_tolerations,
participant_tolerations,
global_tolerations,
tolerations,
node_selectors,
):
log_level = input_parser.get_client_log_level_or_default(
participant_log_level, global_log_level, VERBOSITY_LEVELS
)
tolerations = input_parser.get_client_tolerations(
el_tolerations, participant_tolerations, global_tolerations
)
network_name = shared_utils.get_network_name(launcher.network)
......@@ -130,6 +126,7 @@ def launch(
persistent,
el_volume_size,
tolerations,
node_selectors,
)
service = plan.add_service(service_name, config)
......@@ -174,6 +171,7 @@ def get_config(
persistent,
el_volume_size,
tolerations,
node_selectors,
):
cmd = [
"besu",
......@@ -272,6 +270,7 @@ def get_config(
),
user=User(uid=0, gid=0),
tolerations=tolerations,
node_selectors=node_selectors,
)
......
......@@ -76,16 +76,12 @@ def launch(
extra_labels,
persistent,
el_volume_size,
el_tolerations,
participant_tolerations,
global_tolerations,
tolerations,
node_selectors,
):
log_level = input_parser.get_client_log_level_or_default(
participant_log_level, global_log_level, VERBOSITY_LEVELS
)
tolerations = input_parser.get_client_tolerations(
el_tolerations, participant_tolerations, global_tolerations
)
network_name = shared_utils.get_network_name(launcher.network)
......@@ -132,6 +128,7 @@ def launch(
persistent,
el_volume_size,
tolerations,
node_selectors,
)
service = plan.add_service(service_name, config)
......@@ -180,6 +177,7 @@ def get_config(
persistent,
el_volume_size,
tolerations,
node_selectors,
):
init_datadir_cmd_str = "erigon init --datadir={0} {1}".format(
EXECUTION_DATA_DIRPATH_ON_CLIENT_CONTAINER,
......@@ -294,6 +292,7 @@ def get_config(
),
user=User(uid=0, gid=0),
tolerations=tolerations,
node_selectors=node_selectors,
)
......
......@@ -78,16 +78,12 @@ def launch(
extra_labels,
persistent,
el_volume_size,
el_tolerations,
participant_tolerations,
global_tolerations,
tolerations,
node_selectors,
):
log_level = input_parser.get_client_log_level_or_default(
participant_log_level, global_log_level, VERBOSITY_LEVELS
)
tolerations = input_parser.get_client_tolerations(
el_tolerations, participant_tolerations, global_tolerations
)
network_name = shared_utils.get_network_name(launcher.network)
......@@ -132,6 +128,7 @@ def launch(
persistent,
el_volume_size,
tolerations,
node_selectors,
)
service = plan.add_service(service_name, config)
......@@ -175,6 +172,7 @@ def get_config(
persistent,
el_volume_size,
tolerations,
node_selectors,
):
cmd = [
"--dataDir=" + EXECUTION_DATA_DIRPATH_ON_CLIENT_CONTAINER,
......@@ -260,6 +258,7 @@ def get_config(
extra_labels,
),
tolerations=tolerations,
node_selectors=node_selectors,
)
......
......@@ -87,16 +87,12 @@ def launch(
extra_labels,
persistent,
el_volume_size,
el_tolerations,
participant_tolerations,
global_tolerations,
tolerations,
node_selectors,
):
log_level = input_parser.get_client_log_level_or_default(
participant_log_level, global_log_level, VERBOSITY_LEVELS
)
tolerations = input_parser.get_client_tolerations(
el_tolerations, participant_tolerations, global_tolerations
)
network_name = shared_utils.get_network_name(launcher.network)
......@@ -146,6 +142,7 @@ def launch(
persistent,
el_volume_size,
tolerations,
node_selectors,
)
service = plan.add_service(service_name, config)
......@@ -197,6 +194,7 @@ def get_config(
persistent,
el_volume_size,
tolerations,
node_selectors,
):
# TODO: Remove this once electra fork has path based storage scheme implemented
if (
......@@ -379,6 +377,7 @@ def get_config(
extra_labels,
),
tolerations=tolerations,
node_selectors=node_selectors,
)
......
......@@ -74,16 +74,12 @@ def launch(
extra_labels,
persistent,
el_volume_size,
el_tolerations,
participant_tolerations,
global_tolerations,
tolerations,
node_selectors,
):
log_level = input_parser.get_client_log_level_or_default(
participant_log_level, global_log_level, VERBOSITY_LEVELS
)
tolerations = input_parser.get_client_tolerations(
el_tolerations, participant_tolerations, global_tolerations
)
network_name = shared_utils.get_network_name(launcher.network)
......@@ -128,6 +124,7 @@ def launch(
persistent,
el_volume_size,
tolerations,
node_selectors,
)
service = plan.add_service(service_name, config)
......@@ -173,6 +170,7 @@ def get_config(
persistent,
el_volume_size,
tolerations,
node_selectors,
):
cmd = [
"--log=" + log_level,
......@@ -268,6 +266,7 @@ def get_config(
extra_labels,
),
tolerations=tolerations,
node_selectors=node_selectors,
)
......
......@@ -77,16 +77,12 @@ def launch(
extra_labels,
persistent,
el_volume_size,
el_tolerations,
participant_tolerations,
global_tolerations,
tolerations,
node_selectors,
):
log_level = input_parser.get_client_log_level_or_default(
participant_log_level, global_log_level, VERBOSITY_LEVELS
)
tolerations = input_parser.get_client_tolerations(
el_tolerations, participant_tolerations, global_tolerations
)
network_name = shared_utils.get_network_name(launcher.network)
......@@ -131,6 +127,7 @@ def launch(
persistent,
el_volume_size,
tolerations,
node_selectors,
)
service = plan.add_service(service_name, config)
......@@ -175,6 +172,7 @@ def get_config(
persistent,
el_volume_size,
tolerations,
node_selectors,
):
init_datadir_cmd_str = "reth init --datadir={0} --chain={1}".format(
EXECUTION_DATA_DIRPATH_ON_CLIENT_CONTAINER,
......@@ -274,6 +272,7 @@ def get_config(
extra_labels,
),
tolerations=tolerations,
node_selectors=node_selectors,
)
......
......@@ -30,6 +30,7 @@ def launch_el_forkmon(
plan,
config_template,
el_client_contexts,
global_node_selectors,
):
all_el_client_info = []
for client in el_client_contexts:
......@@ -52,12 +53,15 @@ def launch_el_forkmon(
template_and_data_by_rel_dest_filepath, "el-forkmon-config"
)
config = get_config(config_files_artifact_name)
config = get_config(
config_files_artifact_name,
global_node_selectors,
)
plan.add_service(SERVICE_NAME, config)
def get_config(config_files_artifact_name):
def get_config(config_files_artifact_name, node_selectors):
config_file_path = shared_utils.path_join(
EL_FORKMON_CONFIG_MOUNT_DIRPATH_ON_SERVICE, EL_FORKMON_CONFIG_FILENAME
)
......@@ -72,6 +76,7 @@ def get_config(config_files_artifact_name):
max_cpu=MAX_CPU,
min_memory=MIN_MEMORY,
max_memory=MAX_MEMORY,
node_selectors=node_selectors,
)
......
......@@ -22,6 +22,7 @@ def launch(
ethereum_metrics_exporter_service_name,
el_client_context,
cl_client_context,
node_selectors,
):
exporter_service = plan.add_service(
ethereum_metrics_exporter_service_name,
......@@ -52,6 +53,7 @@ def launch(
max_cpu=MAX_CPU,
min_memory=MIN_MEMORY,
max_memory=MAX_MEMORY,
node_selectors=node_selectors,
),
)
......
......@@ -97,7 +97,9 @@ def launch_full_beacon(
cl_client_contexts,
el_client_contexts,
persistent,
global_node_selectors,
):
node_selectors = global_node_selectors
postgres_output = postgres.run(
plan,
service_name="beaconchain-postgres",
......@@ -110,6 +112,7 @@ def launch_full_beacon(
min_memory=POSTGRES_MIN_MEMORY,
max_memory=POSTGRES_MAX_MEMORY,
persistent=persistent,
node_selectors=node_selectors,
)
redis_output = redis.run(
plan,
......@@ -119,6 +122,7 @@ def launch_full_beacon(
max_cpu=REDIS_MAX_CPU,
min_memory=REDIS_MIN_MEMORY,
max_memory=REDIS_MAX_MEMORY,
node_selectors=node_selectors,
)
# TODO perhaps create a new service for the littlebigtable
little_bigtable = plan.add_service(
......@@ -134,6 +138,7 @@ def launch_full_beacon(
max_cpu=LITTLE_BIGTABLE_MAX_CPU,
min_memory=LITTLE_BIGTABLE_MIN_MEMORY,
max_memory=LITTLE_BIGTABLE_MAX_MEMORY,
node_selectors=node_selectors,
),
)
......@@ -178,6 +183,7 @@ def launch_full_beacon(
max_cpu=INIT_MAX_CPU,
min_memory=INIT_MIN_MEMORY,
max_memory=INIT_MAX_MEMORY,
node_selectors=node_selectors,
),
)
......@@ -224,6 +230,7 @@ def launch_full_beacon(
max_cpu=INDEXER_MAX_CPU,
min_memory=INDEXER_MIN_MEMORY,
max_memory=INDEXER_MAX_MEMORY,
node_selectors=node_selectors,
),
)
# Start the eth1indexer
......@@ -250,6 +257,7 @@ def launch_full_beacon(
max_cpu=ETH1INDEXER_MAX_CPU,
min_memory=ETH1INDEXER_MIN_MEMORY,
max_memory=ETH1INDEXER_MAX_MEMORY,
node_selectors=node_selectors,
),
)
......@@ -269,6 +277,7 @@ def launch_full_beacon(
max_cpu=REWARDSEXPORTER_MAX_CPU,
min_memory=REWARDSEXPORTER_MIN_MEMORY,
max_memory=REWARDSEXPORTER_MAX_MEMORY,
node_selectors=node_selectors,
),
)
......@@ -291,6 +300,7 @@ def launch_full_beacon(
max_cpu=STATISTICS_MAX_CPU,
min_memory=STATISTICS_MIN_MEMORY,
max_memory=STATISTICS_MAX_MEMORY,
node_selectors=node_selectors,
),
)
......@@ -310,6 +320,7 @@ def launch_full_beacon(
max_cpu=FDU_MAX_CPU,
min_memory=FDU_MIN_MEMORY,
max_memory=FDU_MAX_MEMORY,
node_selectors=node_selectors,
),
)
......@@ -337,6 +348,7 @@ def launch_full_beacon(
max_cpu=FRONTEND_MAX_CPU,
min_memory=FRONTEND_MIN_MEMORY,
max_memory=FRONTEND_MAX_MEMORY,
node_selectors=node_selectors,
),
)
......
......@@ -17,6 +17,7 @@ def launch_goomy_blob(
cl_client_context,
seconds_per_slot,
goomy_blob_params,
global_node_selectors,
):
config = get_config(
prefunded_addresses,
......@@ -24,6 +25,7 @@ def launch_goomy_blob(
cl_client_context,
seconds_per_slot,
goomy_blob_params.goomy_blob_args,
global_node_selectors,
)
plan.add_service(SERVICE_NAME, config)
......@@ -34,6 +36,7 @@ def get_config(
cl_client_context,
seconds_per_slot,
goomy_blob_args,
node_selectors,
):
goomy_cli_args = []
for index, client in enumerate(el_client_contexts):
......@@ -77,4 +80,5 @@ def get_config(
max_cpu=MAX_CPU,
min_memory=MIN_MEMORY,
max_memory=MAX_MEMORY,
node_selectors=node_selectors,
)
......@@ -50,6 +50,7 @@ def launch_grafana(
datasource_config_template,
dashboard_providers_config_template,
prometheus_private_url,
global_node_selectors,
additional_dashboards=[],
):
(
......@@ -73,6 +74,7 @@ def launch_grafana(
config = get_config(
grafana_config_artifacts_uuid,
merged_dashboards_artifact_name,
global_node_selectors,
)
plan.add_service(SERVICE_NAME, config)
......@@ -127,6 +129,7 @@ def get_grafana_config_dir_artifact_uuid(
def get_config(
grafana_config_artifacts_name,
grafana_dashboards_artifacts_name,
node_selectors,
):
return ServiceConfig(
image=IMAGE_NAME,
......@@ -146,6 +149,7 @@ def get_config(
max_cpu=MAX_CPU,
min_memory=MIN_MEMORY,
max_memory=MAX_MEMORY,
node_selectors=node_selectors,
)
......
......@@ -23,8 +23,20 @@ MIN_MEMORY = 16
MAX_MEMORY = 256
def launch(plan, mev_boost_launcher, service_name, network_id, mev_boost_image):
config = get_config(mev_boost_launcher, network_id, mev_boost_image)
def launch(
plan,
mev_boost_launcher,
service_name,
network_id,
mev_boost_image,
global_node_selectors,
):
config = get_config(
mev_boost_launcher,
network_id,
mev_boost_image,
global_node_selectors,
)
mev_boost_service = plan.add_service(service_name, config)
......@@ -33,7 +45,12 @@ def launch(plan, mev_boost_launcher, service_name, network_id, mev_boost_image):
)
def get_config(mev_boost_launcher, network_id, mev_boost_image):
def get_config(
mev_boost_launcher,
network_id,
mev_boost_image,
node_selectors,
):
command = ["mev-boost"]
if mev_boost_launcher.should_check_relay:
......@@ -60,6 +77,7 @@ def get_config(mev_boost_launcher, network_id, mev_boost_image):
max_cpu=MAX_CPU,
min_memory=MIN_MEMORY,
max_memory=MAX_MEMORY,
node_selectors=node_selectors,
)
......
......@@ -8,7 +8,14 @@ MIN_MEMORY = 128
MAX_MEMORY = 1024
def spam_in_background(plan, sender_key, receiver_key, el_uri, params):
def spam_in_background(
plan,
sender_key,
receiver_key,
el_uri,
params,
global_node_selectors,
):
sender_script = plan.upload_files(src="./sender.py", name="mev-custom-flood-sender")
plan.add_service(
......@@ -26,6 +33,7 @@ def spam_in_background(plan, sender_key, receiver_key, el_uri, params):
max_cpu=MAX_CPU,
min_memory=MIN_MEMORY,
max_memory=MAX_MEMORY,
node_selectors=global_node_selectors,
),
)
......
......@@ -12,7 +12,14 @@ def prefixed_address(address):
return "0x" + address
def launch_mev_flood(plan, image, el_uri, contract_owner, normal_user):
def launch_mev_flood(
plan,
image,
el_uri,
contract_owner,
normal_user,
global_node_selectors,
):
plan.add_service(
name="mev-flood",
config=ServiceConfig(
......@@ -22,6 +29,7 @@ def launch_mev_flood(plan, image, el_uri, contract_owner, normal_user):
max_cpu=MAX_CPU,
min_memory=MIN_MEMORY,
max_memory=MAX_MEMORY,
node_selectors=global_node_selectors,
),
)
......
......@@ -48,7 +48,9 @@ def launch_mev_relay(
builder_uri,
seconds_per_slot,
persistent,
global_node_selectors,
):
node_selectors = global_node_selectors
redis = redis_module.run(
plan,
service_name="mev-relay-redis",
......@@ -56,6 +58,7 @@ def launch_mev_relay(
max_cpu=REDIS_MAX_CPU,
min_memory=REDIS_MIN_MEMORY,
max_memory=REDIS_MAX_MEMORY,
node_selectors=node_selectors,
)
# making the password postgres as the relay expects it to be postgres
postgres = postgres_module.run(
......@@ -70,6 +73,7 @@ def launch_mev_relay(
max_cpu=POSTGRES_MAX_CPU,
min_memory=POSTGRES_MIN_MEMORY,
max_memory=POSTGRES_MAX_MEMORY,
node_selectors=node_selectors,
)
network_name = NETWORK_ID_TO_NAME.get(network_id, network_id)
......@@ -110,6 +114,7 @@ def launch_mev_relay(
max_cpu=RELAY_MAX_CPU,
min_memory=RELAY_MIN_MEMORY,
max_memory=RELAY_MAX_MEMORY,
node_selectors=node_selectors,
),
)
......@@ -145,6 +150,7 @@ def launch_mev_relay(
max_cpu=RELAY_MAX_CPU,
min_memory=RELAY_MIN_MEMORY,
max_memory=RELAY_MAX_MEMORY,
node_selectors=node_selectors,
),
)
......@@ -177,6 +183,7 @@ def launch_mev_relay(
max_cpu=RELAY_MAX_CPU,
min_memory=RELAY_MIN_MEMORY,
max_memory=RELAY_MAX_MEMORY,
node_selectors=node_selectors,
),
)
......
......@@ -10,7 +10,14 @@ MIN_MEMORY = 128
MAX_MEMORY = 1024
def launch_mock_mev(plan, el_uri, beacon_uri, jwt_secret, global_client_log_level):
def launch_mock_mev(
plan,
el_uri,
beacon_uri,
jwt_secret,
global_client_log_level,
global_node_selectors,
):
mock_builder = plan.add_service(
name=MOCK_MEV_SERVICE_NAME,
config=ServiceConfig(
......@@ -31,6 +38,7 @@ def launch_mock_mev(plan, el_uri, beacon_uri, jwt_secret, global_client_log_leve
max_cpu=MAX_CPU,
min_memory=MIN_MEMORY,
max_memory=MAX_MEMORY,
node_selectors=global_node_selectors,
),
)
return "http://{0}@{1}:{2}".format(
......
......@@ -81,7 +81,15 @@ def input_parser(plan, input_args):
result["assertoor_params"] = get_default_assertoor_params()
result["xatu_sentry_params"] = get_default_xatu_sentry_params()
result["persistent"] = False
result["parallel_keystore_generation"] = False
result["global_tolerations"] = []
result["global_node_selectors"] = {}
if constants.NETWORK_NAME.shadowfork in result["network_params"]["network"]:
shadow_base = result["network_params"]["network"].split("-shadowfork")[0]
result["network_params"][
"deposit_contract_address"
] = constants.DEPOSIT_CONTRACT_ADDRESS[shadow_base]
if constants.NETWORK_NAME.shadowfork in result["network_params"]["network"]:
shadow_base = result["network_params"]["network"].split("-shadowfork")[0]
......@@ -162,6 +170,7 @@ def input_parser(plan, input_args):
cl_tolerations=participant["cl_tolerations"],
tolerations=participant["tolerations"],
validator_tolerations=participant["validator_tolerations"],
node_selectors=participant["node_selectors"],
beacon_extra_params=participant["beacon_extra_params"],
beacon_extra_labels=participant["beacon_extra_labels"],
validator_extra_params=participant["validator_extra_params"],
......@@ -288,6 +297,7 @@ def input_parser(plan, input_args):
xatu_server_tls=result["xatu_sentry_params"]["xatu_server_tls"],
),
global_tolerations=result["global_tolerations"],
global_node_selectors=result["global_node_selectors"],
)
......@@ -518,6 +528,15 @@ def get_client_tolerations(
return toleration_list
def get_client_node_selectors(participant_node_selectors, global_node_selectors):
node_selectors = {}
node_selectors = participant_node_selectors if participant_node_selectors else {}
if node_selectors == {}:
node_selectors = global_node_selectors if global_node_selectors else {}
return node_selectors
def default_input_args():
network_params = default_network_params()
participants = [default_participant()]
......@@ -575,6 +594,7 @@ def default_participant():
"cl_tolerations": [],
"validator_tolerations": [],
"tolerations": [],
"node_selectors": {},
"beacon_extra_params": [],
"beacon_extra_labels": {},
"validator_extra_params": [],
......@@ -603,6 +623,8 @@ def default_participant():
},
"blobber_enabled": False,
"blobber_extra_params": [],
"global_tolerations": [],
"global_node_selectors": {},
}
......
......@@ -68,6 +68,7 @@ def launch_participant_network(
persistent,
xatu_sentry_params,
global_tolerations,
global_node_selectors,
parallel_keystore_generation=False,
):
network_id = network_params.network_id
......@@ -113,13 +114,17 @@ def launch_participant_network(
store=[StoreSpec(src="/shadowfork", name="latest_blocks")],
)
# maybe we can do the copy in the same step as the fetch?
for index, participant in enumerate(participants):
tolerations = input_parser.get_client_tolerations(
participant.el_tolerations,
participant.tolerations,
global_tolerations,
)
node_selectors = input_parser.get_client_node_selectors(
participant.node_selectors,
global_node_selectors,
)
cl_client_type = participant.cl_client_type
el_client_type = participant.el_client_type
......@@ -160,12 +165,8 @@ def launch_participant_network(
],
),
},
env_vars={
"RCLONE_CONFIG_MYS3_TYPE": "s3",
"RCLONE_CONFIG_MYS3_PROVIDER": "DigitalOcean",
"RCLONE_CONFIG_MYS3_ENDPOINT": "https://ams3.digitaloceanspaces.com",
},
tolerations=tolerations,
node_selectors=node_selectors,
),
)
for index, participant in enumerate(participants):
......@@ -423,7 +424,13 @@ def launch_participant_network(
for index, participant in enumerate(participants):
cl_client_type = participant.cl_client_type
el_client_type = participant.el_client_type
node_selectors = input_parser.get_client_node_selectors(
participant.node_selectors,
global_node_selectors,
)
tolerations = input_parser.get_client_tolerations(
participant.el_tolerations, participant.tolerations, global_tolerations
)
if el_client_type not in el_launchers:
fail(
"Unsupported launcher '{0}', need one of '{1}'".format(
......@@ -460,9 +467,8 @@ def launch_participant_network(
participant.el_extra_labels,
persistent,
participant.el_client_volume_size,
participant.el_tolerations,
participant.tolerations,
global_tolerations,
tolerations,
node_selectors,
)
# Add participant el additional prometheus metrics
......@@ -538,6 +544,10 @@ def launch_participant_network(
for index, participant in enumerate(participants):
cl_client_type = participant.cl_client_type
el_client_type = participant.el_client_type
node_selectors = input_parser.get_client_node_selectors(
participant.node_selectors,
global_node_selectors,
)
if cl_client_type not in cl_launchers:
fail(
......@@ -574,6 +584,7 @@ def launch_participant_network(
plan,
snooper_service_name,
el_client_context,
node_selectors,
)
plan.print(
"Successfully added {0} snooper participants".format(
......@@ -615,6 +626,7 @@ def launch_participant_network(
participant.validator_tolerations,
participant.tolerations,
global_tolerations,
node_selectors,
participant.cl_split_mode_enabled,
)
else:
......@@ -651,6 +663,7 @@ def launch_participant_network(
participant.validator_tolerations,
participant.tolerations,
global_tolerations,
node_selectors,
participant.cl_split_mode_enabled,
)
......@@ -676,6 +689,7 @@ def launch_participant_network(
ethereum_metrics_exporter_service_name,
el_client_context,
cl_client_context,
node_selectors,
)
plan.print(
"Successfully added {0} ethereum metrics exporter participants".format(
......@@ -699,6 +713,7 @@ def launch_participant_network(
xatu_sentry_params,
network_params,
pair_name,
node_selectors,
)
plan.print(
"Successfully added {0} xatu sentry participants".format(
......
......@@ -26,6 +26,7 @@ def launch_prometheus(
additional_metrics_jobs,
ethereum_metrics_exporter_contexts,
xatu_sentry_contexts,
global_node_selectors,
):
metrics_jobs = get_metrics_jobs(
el_client_contexts,
......@@ -35,7 +36,13 @@ def launch_prometheus(
xatu_sentry_contexts,
)
prometheus_url = prometheus.run(
plan, metrics_jobs, MIN_CPU, MAX_CPU, MIN_MEMORY, MAX_MEMORY
plan,
metrics_jobs,
MIN_CPU,
MAX_CPU,
MIN_MEMORY,
MAX_MEMORY,
node_selectors=global_node_selectors,
)
return prometheus_url
......
......@@ -25,10 +25,10 @@ MIN_MEMORY = 10
MAX_MEMORY = 300
def launch(plan, service_name, el_client_context):
def launch(plan, service_name, el_client_context, node_selectors):
snooper_service_name = "{0}".format(service_name)
snooper_config = get_config(service_name, el_client_context)
snooper_config = get_config(service_name, el_client_context, node_selectors)
snooper_service = plan.add_service(snooper_service_name, snooper_config)
snooper_http_port = snooper_service.ports[SNOOPER_ENGINE_RPC_PORT_ID]
......@@ -37,7 +37,7 @@ def launch(plan, service_name, el_client_context):
)
def get_config(service_name, el_client_context):
def get_config(service_name, el_client_context, node_selectors):
engine_rpc_port_num = "http://{0}:{1}".format(
el_client_context.ip_addr,
el_client_context.engine_rpc_port_num,
......@@ -58,4 +58,5 @@ def get_config(service_name, el_client_context):
max_cpu=MAX_CPU,
min_memory=MIN_MEMORY,
max_memory=MAX_MEMORY,
node_selectors=node_selectors,
)
......@@ -9,18 +9,30 @@ MAX_MEMORY = 300
def launch_transaction_spammer(
plan, prefunded_addresses, el_uri, tx_spammer_params, electra_fork_epoch
plan,
prefunded_addresses,
el_uri,
tx_spammer_params,
electra_fork_epoch,
global_node_selectors,
):
config = get_config(
prefunded_addresses,
el_uri,
tx_spammer_params.tx_spammer_extra_args,
electra_fork_epoch,
global_node_selectors,
)
plan.add_service(SERVICE_NAME, config)
def get_config(prefunded_addresses, el_uri, tx_spammer_extra_args, electra_fork_epoch):
def get_config(
prefunded_addresses,
el_uri,
tx_spammer_extra_args,
electra_fork_epoch,
node_selectors,
):
# Temp hack to use the old tx-fuzz image until we can get the new one working
if electra_fork_epoch != None:
tx_spammer_image = "ethpandaops/tx-fuzz:kaustinen-281adbc"
......@@ -43,4 +55,5 @@ def get_config(prefunded_addresses, el_uri, tx_spammer_extra_args, electra_fork_
max_cpu=MAX_CPU,
min_memory=MIN_MEMORY,
max_memory=MAX_MEMORY,
node_selectors=node_selectors,
)
......@@ -22,6 +22,7 @@ def launch(
xatu_sentry_params,
network_params,
pair_name,
node_selectors,
):
config_template = read_file(static_files.XATU_SENTRY_CONFIG_TEMPLATE_FILEPATH)
......@@ -81,6 +82,7 @@ def launch(
max_cpu=MAX_CPU,
min_memory=MIN_MEMORY,
max_memory=MAX_MEMORY,
node_selectors=node_selectors,
),
)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment