Commit b61a128b authored by Barnabas Busa's avatar Barnabas Busa Committed by GitHub

fix!: remove vc_count (#844)

parent 3bb88e04
......@@ -12,6 +12,5 @@ additional_services:
- prometheus_grafana
mev_params:
mev_boost_image: ghcr.io/commit-boost/pbs:latest
mev_relay_image: flashbots/mev-boost-relay:latest
network_params:
seconds_per_slot: 3
......@@ -13,6 +13,7 @@ participants:
cl_type: grandine
additional_services:
- assertoor
- dora
assertoor_params:
run_stability_check: false
run_block_proposal_check: true
......@@ -304,10 +304,6 @@ participants:
# - vero: ghcr.io/serenita-org/vero:master
vc_image: ""
# The number of validator clients to run for this participant
# Defaults to 1
vc_count: 1
# The log level string that this participant's validator client should log at
# If this is emptystring then the global `logLevel` parameter's value will be translated into a string appropriate for the client (e.g. if
# global `logLevel` = `info` then Teku would receive `INFO`, Prysm would receive `info`, etc.)
......
......@@ -133,7 +133,10 @@ def run(plan, args={}):
args_with_right_defaults.mev_params.mev_builder_extra_data,
global_node_selectors,
)
elif args_with_right_defaults.mev_type == constants.FLASHBOTS_MEV_TYPE:
elif (
args_with_right_defaults.mev_type == constants.FLASHBOTS_MEV_TYPE
or args_with_right_defaults.mev_type == constants.COMMIT_BOOST_MEV_TYPE
):
plan.print("Generating flashbots builder config file")
flashbots_builder_config_file = flashbots_mev_rbuilder.new_builder_config(
plan,
......
......@@ -31,7 +31,6 @@ participants:
vc_type: lighthouse
vc_image: sigp/lighthouse:latest-unstable
vc_log_level: ""
vc_count: 1
vc_extra_env_vars: {}
vc_extra_labels: {}
vc_extra_params: []
......
......@@ -120,7 +120,7 @@ def launch(
cl_service_name = "cl-{0}-{1}-{2}".format(index_str, cl_type, el_type)
new_cl_node_validator_keystores = None
if participant.validator_count != 0 and participant.vc_count != 0:
if participant.validator_count != 0:
new_cl_node_validator_keystores = preregistered_validator_keys_for_nodes[
index
]
......@@ -145,16 +145,16 @@ def launch(
snooper_engine_context
)
)
checkpoint_sync_url = args_with_right_defaults.checkpoint_sync_url
if args_with_right_defaults.checkpoint_sync_enabled:
if args_with_right_defaults.checkpoint_sync_url == "":
if (
network_params.network in constants.PUBLIC_NETWORKS
or network_params.network == constants.NETWORK_NAME.ephemery
):
args_with_right_defaults.checkpoint_sync_url = (
constants.CHECKPOINT_SYNC_URL[network_params.network]
)
checkpoint_sync_url = constants.CHECKPOINT_SYNC_URL[
network_params.network
]
else:
fail(
"Checkpoint sync URL is required if you enabled checkpoint_sync for custom networks. Please provide a valid URL."
......@@ -178,7 +178,7 @@ def launch(
tolerations,
node_selectors,
args_with_right_defaults.checkpoint_sync_enabled,
args_with_right_defaults.checkpoint_sync_url,
checkpoint_sync_url,
args_with_right_defaults.port_publisher,
index,
)
......@@ -199,7 +199,7 @@ def launch(
tolerations,
node_selectors,
args_with_right_defaults.checkpoint_sync_enabled,
args_with_right_defaults.checkpoint_sync_url,
checkpoint_sync_url,
args_with_right_defaults.port_publisher,
index,
)
......
......@@ -136,7 +136,10 @@ def get_config(
constants.METRICS_PORT_ID: METRICS_PORT_NUM,
}
if launcher.builder_type == "flashbots":
if (
launcher.builder_type == constants.FLASHBOTS_MEV_TYPE
or launcher.builder_type == constants.COMMIT_BOOST_MEV_TYPE
):
used_port_assignments[constants.RBUILDER_PORT_ID] = RBUILDER_PORT_NUM
used_ports = shared_utils.get_port_specs(used_port_assignments)
......@@ -161,7 +164,10 @@ def get_config(
"--http.addr=0.0.0.0",
"--http.corsdomain=*",
"--http.api=admin,net,eth,web3,debug,txpool,trace{0}".format(
",flashbots" if launcher.builder_type == "flashbots" else ""
",flashbots"
if launcher.builder_type == constants.FLASHBOTS_MEV_TYPE
or launcher.builder_type == constants.COMMIT_BOOST_MEV_TYPE
else ""
),
"--ws",
"--ws.addr=0.0.0.0",
......@@ -220,11 +226,14 @@ def get_config(
)
env_vars = {}
image = participant.el_image
if launcher.builder_type == "mev-rs":
if launcher.builder_type == constants.MEV_RS_MEV_TYPE:
files[
mev_rs_builder.MEV_BUILDER_MOUNT_DIRPATH_ON_SERVICE
] = mev_rs_builder.MEV_BUILDER_FILES_ARTIFACT_NAME
elif launcher.builder_type == "flashbots":
elif (
launcher.builder_type == constants.FLASHBOTS_MEV_TYPE
or launcher.builder_type == constants.COMMIT_BOOST_MEV_TYPE
):
image = constants.DEFAULT_FLASHBOTS_BUILDER_IMAGE
cl_client_name = service_name.split("-")[4]
cmd.append("--engine.experimental")
......
......@@ -238,7 +238,6 @@ def input_parser(plan, input_args):
vc_type=participant["vc_type"],
vc_image=participant["vc_image"],
vc_log_level=participant["vc_log_level"],
vc_count=participant["vc_count"],
vc_tolerations=participant["vc_tolerations"],
cl_extra_params=participant["cl_extra_params"],
cl_extra_labels=participant["cl_extra_labels"],
......@@ -642,30 +641,6 @@ def parse_network_params(plan, input_args):
remote_signer_type, ""
)
if result["parallel_keystore_generation"] and participant["vc_count"] != 1:
fail(
"parallel_keystore_generation is only supported for 1 validator client per participant (for now)"
)
# If the num validator keys per node is not divisible by vc_count of a participant, fail
if (
participant["vc_count"] > 0
and result["network_params"]["num_validator_keys_per_node"]
% participant["vc_count"]
!= 0
):
fail(
"num_validator_keys_per_node: {0} is not divisible by vc_count: {1} for participant: {2}".format(
result["network_params"]["num_validator_keys_per_node"],
participant["vc_count"],
str(index + 1)
+ "-"
+ participant["el_type"]
+ "-"
+ participant["cl_type"],
)
)
snooper_enabled = participant["snooper_enabled"]
if snooper_enabled == None:
participant["snooper_enabled"] = result["snooper_enabled"]
......@@ -972,7 +947,6 @@ def default_participant():
"vc_type": "",
"vc_image": "",
"vc_log_level": "",
"vc_count": 1,
"vc_extra_env_vars": {},
"vc_extra_labels": {},
"vc_extra_params": [],
......
......@@ -28,7 +28,6 @@ PARTICIPANT_CATEGORIES = {
"use_separate_vc",
"vc_type",
"vc_image",
"vc_count",
"vc_log_level",
"vc_extra_env_vars",
"vc_extra_labels",
......@@ -96,7 +95,6 @@ PARTICIPANT_MATRIX_PARAMS = {
"use_separate_vc",
"vc_type",
"vc_image",
"vc_count",
"vc_log_level",
"vc_extra_env_vars",
"vc_extra_labels",
......@@ -112,7 +110,6 @@ PARTICIPANT_MATRIX_PARAMS = {
"vc": [
"vc_type",
"vc_image",
"vc_count",
"vc_log_level",
"vc_extra_env_vars",
"vc_extra_labels",
......
......@@ -191,6 +191,9 @@ def launch_participant_network(
]
current_vc_index = 0
if not args_with_right_defaults.participants:
fail("No participants configured")
for index, participant in enumerate(args_with_right_defaults.participants):
el_type = participant.el_type
cl_type = participant.cl_type
......@@ -199,38 +202,34 @@ def launch_participant_network(
index_str = shared_utils.zfill_custom(
index + 1, len(str(len(args_with_right_defaults.participants)))
)
for sub_index in range(participant.vc_count):
vc_index_str = shared_utils.zfill_custom(
sub_index + 1, len(str(participant.vc_count))
)
el_context = all_el_contexts[index]
cl_context = all_cl_contexts[index]
el_context = all_el_contexts[index] if index < len(all_el_contexts) else None
cl_context = all_cl_contexts[index] if index < len(all_cl_contexts) else None
node_selectors = input_parser.get_client_node_selectors(
participant.node_selectors,
global_node_selectors,
)
if participant.ethereum_metrics_exporter_enabled:
pair_name = "{0}-{1}-{2}".format(index_str, cl_type, el_type)
node_selectors = input_parser.get_client_node_selectors(
participant.node_selectors,
global_node_selectors,
)
if participant.ethereum_metrics_exporter_enabled:
pair_name = "{0}-{1}-{2}".format(index_str, cl_type, el_type)
ethereum_metrics_exporter_service_name = (
"ethereum-metrics-exporter-{0}".format(pair_name)
)
ethereum_metrics_exporter_service_name = (
"ethereum-metrics-exporter-{0}".format(pair_name)
)
ethereum_metrics_exporter_context = ethereum_metrics_exporter.launch(
plan,
pair_name,
ethereum_metrics_exporter_service_name,
el_context,
cl_context,
node_selectors,
args_with_right_defaults.docker_cache_params,
)
plan.print(
"Successfully added {0} ethereum metrics exporter participants".format(
ethereum_metrics_exporter_context
)
ethereum_metrics_exporter_context = ethereum_metrics_exporter.launch(
plan,
pair_name,
ethereum_metrics_exporter_service_name,
el_context,
cl_context,
node_selectors,
args_with_right_defaults.docker_cache_params,
)
plan.print(
"Successfully added {0} ethereum metrics exporter participants".format(
ethereum_metrics_exporter_context
)
)
all_ethereum_metrics_exporter_contexts.append(
ethereum_metrics_exporter_context
......@@ -238,165 +237,150 @@ def launch_participant_network(
xatu_sentry_context = None
if participant.xatu_sentry_enabled:
pair_name = "{0}-{1}-{2}".format(index_str, cl_type, el_type)
if participant.xatu_sentry_enabled:
pair_name = "{0}-{1}-{2}".format(index_str, cl_type, el_type)
xatu_sentry_service_name = "xatu-sentry-{0}".format(pair_name)
xatu_sentry_service_name = "xatu-sentry-{0}".format(pair_name)
xatu_sentry_context = xatu_sentry.launch(
plan,
xatu_sentry_service_name,
cl_context,
xatu_sentry_params,
network_params,
pair_name,
node_selectors,
)
plan.print(
"Successfully added {0} xatu sentry participants".format(
xatu_sentry_context
)
xatu_sentry_context = xatu_sentry.launch(
plan,
xatu_sentry_service_name,
cl_context,
xatu_sentry_params,
network_params,
pair_name,
node_selectors,
)
plan.print(
"Successfully added {0} xatu sentry participants".format(
xatu_sentry_context
)
)
all_xatu_sentry_contexts.append(xatu_sentry_context)
plan.print(
"Successfully added {0} CL participants".format(num_participants)
)
plan.print("Successfully added {0} CL participants".format(num_participants))
plan.print("Start adding validators for participant #{0}".format(index_str))
if participant.use_separate_vc == None:
# This should only be the case for the MEV participant,
# the regular participants default to False/True
all_vc_contexts.append(None)
all_remote_signer_contexts.append(None)
all_snooper_beacon_contexts.append(None)
continue
if (
cl_type in _cls_that_need_separate_vc
and not participant.use_separate_vc
):
fail("{0} needs a separate validator client!".format(cl_type))
if not participant.use_separate_vc:
all_vc_contexts.append(None)
all_remote_signer_contexts.append(None)
all_snooper_beacon_contexts.append(None)
continue
plan.print("Start adding validators for participant #{0}".format(index_str))
if participant.use_separate_vc == None:
# This should only be the case for the MEV participant,
# the regular participants default to False/True
all_vc_contexts.append(None)
all_remote_signer_contexts.append(None)
all_snooper_beacon_contexts.append(None)
continue
plan.print(
"Using separate validator client for participant #{0}".format(index_str)
)
if cl_type in _cls_that_need_separate_vc and not participant.use_separate_vc:
fail("{0} needs a separate validator client!".format(cl_type))
vc_keystores = None
if participant.validator_count != 0:
if participant.vc_count == 1:
vc_keystores = preregistered_validator_keys_for_nodes[index]
else:
vc_keystores = preregistered_validator_keys_for_nodes[
index + sub_index
]
vc_context = None
remote_signer_context = None
snooper_beacon_context = None
if participant.snooper_enabled:
snooper_service_name = "snooper-beacon-{0}-{1}-{2}{3}".format(
index_str,
cl_type,
vc_type,
"-" + vc_index_str if participant.vc_count != 1 else "",
)
snooper_beacon_context = beacon_snooper.launch(
plan,
snooper_service_name,
cl_context,
node_selectors,
args_with_right_defaults.docker_cache_params,
)
plan.print(
"Successfully added {0} snooper participants".format(
snooper_beacon_context
)
)
all_snooper_beacon_contexts.append(snooper_beacon_context)
full_name = (
"{0}-{1}-{2}-{3}{4}".format(
index_str,
el_type,
cl_type,
vc_type,
"-" + vc_index_str if participant.vc_count != 1 else "",
)
if participant.cl_type != participant.vc_type
else "{0}-{1}-{2}{3}".format(
index_str,
el_type,
cl_type,
"-" + vc_index_str if participant.vc_count != 1 else "",
)
)
if not participant.use_separate_vc:
all_vc_contexts.append(None)
all_remote_signer_contexts.append(None)
all_snooper_beacon_contexts.append(None)
continue
if participant.use_remote_signer:
remote_signer_context = remote_signer.launch(
plan=plan,
launcher=remote_signer.new_remote_signer_launcher(
el_cl_genesis_data=el_cl_data
),
service_name="signer-{0}".format(full_name),
remote_signer_type=remote_signer_type,
image=participant.remote_signer_image,
full_name="{0}-remote_signer".format(full_name),
vc_type=vc_type,
node_keystore_files=vc_keystores,
participant=participant,
global_tolerations=global_tolerations,
node_selectors=node_selectors,
port_publisher=args_with_right_defaults.port_publisher,
remote_signer_index=current_vc_index,
)
plan.print(
"Using separate validator client for participant #{0}".format(index_str)
)
all_remote_signer_contexts.append(remote_signer_context)
if remote_signer_context and remote_signer_context.metrics_info:
remote_signer_context.metrics_info[
"config"
] = participant.prometheus_config
vc_keystores = None
if participant.validator_count != 0:
vc_keystores = preregistered_validator_keys_for_nodes[index]
vc_context = vc.launch(
vc_context = None
remote_signer_context = None
snooper_beacon_context = None
if participant.snooper_enabled:
snooper_service_name = "snooper-beacon-{0}-{1}-{2}".format(
index_str,
cl_type,
vc_type,
)
snooper_beacon_context = beacon_snooper.launch(
plan,
snooper_service_name,
cl_context,
node_selectors,
args_with_right_defaults.docker_cache_params,
)
plan.print(
"Successfully added {0} snooper participants".format(
snooper_beacon_context
)
)
all_snooper_beacon_contexts.append(snooper_beacon_context)
full_name = (
"{0}-{1}-{2}-{3}".format(
index_str,
el_type,
cl_type,
vc_type,
)
if participant.cl_type != participant.vc_type
else "{0}-{1}-{2}".format(
index_str,
el_type,
cl_type,
)
)
if participant.use_remote_signer:
remote_signer_context = remote_signer.launch(
plan=plan,
launcher=vc.new_vc_launcher(el_cl_genesis_data=el_cl_data),
keymanager_file=keymanager_file,
service_name="vc-{0}".format(full_name),
launcher=remote_signer.new_remote_signer_launcher(
el_cl_genesis_data=el_cl_data
),
service_name="signer-{0}".format(full_name),
remote_signer_type=remote_signer_type,
image=participant.remote_signer_image,
full_name="{0}-remote_signer".format(full_name),
vc_type=vc_type,
image=participant.vc_image,
global_log_level=args_with_right_defaults.global_log_level,
cl_context=cl_context,
el_context=el_context,
remote_signer_context=remote_signer_context,
full_name=full_name,
snooper_enabled=participant.snooper_enabled,
snooper_beacon_context=snooper_beacon_context,
node_keystore_files=vc_keystores,
participant=participant,
prysm_password_relative_filepath=prysm_password_relative_filepath,
prysm_password_artifact_uuid=prysm_password_artifact_uuid,
global_tolerations=global_tolerations,
node_selectors=node_selectors,
preset=network_params.preset,
network=network_params.network,
electra_fork_epoch=network_params.electra_fork_epoch,
port_publisher=args_with_right_defaults.port_publisher,
vc_index=current_vc_index,
remote_signer_index=current_vc_index,
)
all_vc_contexts.append(vc_context)
if vc_context and vc_context.metrics_info:
vc_context.metrics_info["config"] = participant.prometheus_config
current_vc_index += 1
all_remote_signer_contexts.append(remote_signer_context)
if remote_signer_context and remote_signer_context.metrics_info:
remote_signer_context.metrics_info["config"] = participant.prometheus_config
vc_context = vc.launch(
plan=plan,
launcher=vc.new_vc_launcher(el_cl_genesis_data=el_cl_data),
keymanager_file=keymanager_file,
service_name="vc-{0}".format(full_name),
vc_type=vc_type,
image=participant.vc_image,
global_log_level=args_with_right_defaults.global_log_level,
cl_context=cl_context,
el_context=el_context,
remote_signer_context=remote_signer_context,
full_name=full_name,
snooper_enabled=participant.snooper_enabled,
snooper_beacon_context=snooper_beacon_context,
node_keystore_files=vc_keystores,
participant=participant,
prysm_password_relative_filepath=prysm_password_relative_filepath,
prysm_password_artifact_uuid=prysm_password_artifact_uuid,
global_tolerations=global_tolerations,
node_selectors=node_selectors,
preset=network_params.preset,
network=network_params.network,
electra_fork_epoch=network_params.electra_fork_epoch,
port_publisher=args_with_right_defaults.port_publisher,
vc_index=current_vc_index,
)
all_vc_contexts.append(vc_context)
if vc_context and vc_context.metrics_info:
vc_context.metrics_info["config"] = participant.prometheus_config
current_vc_index += 1
all_participants = []
all_participants = []
for index, participant in enumerate(args_with_right_defaults.participants):
el_type = participant.el_type
......@@ -406,14 +390,15 @@ def launch_participant_network(
snooper_engine_context = None
snooper_beacon_context = None
el_context = all_el_contexts[index]
cl_context = all_cl_contexts[index]
if participant.vc_count != 0:
vc_context = all_vc_contexts[index]
remote_signer_context = all_remote_signer_contexts[index]
else:
vc_context = None
remote_signer_context = None
el_context = all_el_contexts[index] if index < len(all_el_contexts) else None
cl_context = all_cl_contexts[index] if index < len(all_cl_contexts) else None
vc_context = all_vc_contexts[index] if index < len(all_vc_contexts) else None
remote_signer_context = (
all_remote_signer_contexts[index]
if index < len(all_remote_signer_contexts)
else None
)
if participant.snooper_enabled:
snooper_engine_context = all_snooper_engine_contexts[index]
......@@ -427,7 +412,7 @@ def launch_participant_network(
]
xatu_sentry_context = None
if participant.xatu_sentry_enabled:
if participant.xatu_sentry_enabled and index < len(all_xatu_sentry_contexts):
xatu_sentry_context = all_xatu_sentry_contexts[index]
participant_entry = participant_module.new_participant(
......
......@@ -96,41 +96,26 @@ def generate_validator_keystores(plan, mnemonic, participants, docker_cache_para
all_output_dirpaths.append(output_dirpath)
continue
for i in range(participant.vc_count):
output_dirpath = (
NODE_KEYSTORES_OUTPUT_DIRPATH_FORMAT_STR.format(idx, "-" + str(i))
if participant.vc_count != 1
else NODE_KEYSTORES_OUTPUT_DIRPATH_FORMAT_STR.format(idx, "")
)
start_index = running_total_validator_count + i * (
participant.validator_count // participant.vc_count
)
stop_index = start_index + (
participant.validator_count // participant.vc_count
)
# Adjust stop_index for the last partition to include all remaining validators
if i == participant.vc_count - 1:
stop_index = running_total_validator_count + participant.validator_count
generate_keystores_cmd = '{0} keystores --insecure --prysm-pass {1} --out-loc {2} --source-mnemonic "{3}" --source-min {4} --source-max {5}'.format(
KEYSTORES_GENERATION_TOOL_NAME,
PRYSM_PASSWORD,
output_dirpath,
mnemonic,
start_index,
stop_index,
)
all_output_dirpaths.append(output_dirpath)
all_sub_command_strs.append(generate_keystores_cmd)
start_index = running_total_validator_count
stop_index = start_index + participant.validator_count
generate_keystores_cmd = '{0} keystores --insecure --prysm-pass {1} --out-loc {2} --source-mnemonic "{3}" --source-min {4} --source-max {5}'.format(
KEYSTORES_GENERATION_TOOL_NAME,
PRYSM_PASSWORD,
output_dirpath,
mnemonic,
start_index,
stop_index,
)
all_output_dirpaths.append(output_dirpath)
all_sub_command_strs.append(generate_keystores_cmd)
teku_permissions_cmd = "chmod 0777 -R " + output_dirpath + TEKU_KEYS_DIRNAME
raw_secret_permissions_cmd = (
"chmod 0600 -R " + output_dirpath + RAW_SECRETS_DIRNAME
)
all_sub_command_strs.append(teku_permissions_cmd)
all_sub_command_strs.append(raw_secret_permissions_cmd)
teku_permissions_cmd = "chmod 0777 -R " + output_dirpath + TEKU_KEYS_DIRNAME
raw_secret_permissions_cmd = (
"chmod 0600 -R " + output_dirpath + RAW_SECRETS_DIRNAME
)
all_sub_command_strs.append(teku_permissions_cmd)
all_sub_command_strs.append(raw_secret_permissions_cmd)
running_total_validator_count += participant.validator_count
......@@ -151,51 +136,37 @@ def generate_validator_keystores(plan, mnemonic, participants, docker_cache_para
keystore_files.append(None)
continue
for i in range(participant.vc_count):
output_dirpath = (
NODE_KEYSTORES_OUTPUT_DIRPATH_FORMAT_STR.format(idx, "-" + str(i))
if participant.vc_count != 1
else NODE_KEYSTORES_OUTPUT_DIRPATH_FORMAT_STR.format(idx, "")
)
padded_idx = shared_utils.zfill_custom(idx + 1, len(str(len(participants))))
keystore_start_index = running_total_validator_count + i * (
participant.validator_count // participant.vc_count
)
keystore_stop_index = keystore_start_index + (
participant.validator_count // participant.vc_count
)
if i == participant.vc_count - 1:
keystore_stop_index = (
running_total_validator_count + participant.validator_count
)
artifact_name = "{0}-{1}-{2}-{3}-{4}-{5}".format(
padded_idx,
participant.cl_type,
participant.el_type,
keystore_start_index,
keystore_stop_index - 1,
i,
)
artifact_name = plan.store_service_files(
service_name, output_dirpath, name=artifact_name
)
base_dirname_in_artifact = shared_utils.path_base(output_dirpath)
to_add = keystore_files_module.new_keystore_files(
artifact_name,
shared_utils.path_join(base_dirname_in_artifact),
shared_utils.path_join(base_dirname_in_artifact, RAW_KEYS_DIRNAME),
shared_utils.path_join(base_dirname_in_artifact, RAW_SECRETS_DIRNAME),
shared_utils.path_join(base_dirname_in_artifact, NIMBUS_KEYS_DIRNAME),
shared_utils.path_join(base_dirname_in_artifact, PRYSM_DIRNAME),
shared_utils.path_join(base_dirname_in_artifact, TEKU_KEYS_DIRNAME),
shared_utils.path_join(base_dirname_in_artifact, TEKU_SECRETS_DIRNAME),
)
keystore_files.append(to_add)
output_dirpath = NODE_KEYSTORES_OUTPUT_DIRPATH_FORMAT_STR.format(idx, "")
padded_idx = shared_utils.zfill_custom(idx + 1, len(str(len(participants))))
keystore_start_index = running_total_validator_count
keystore_stop_index = (
running_total_validator_count + participant.validator_count
)
artifact_name = "{0}-{1}-{2}-{3}-{4}".format(
padded_idx,
participant.cl_type,
participant.el_type,
keystore_start_index,
keystore_stop_index - 1,
)
artifact_name = plan.store_service_files(
service_name, output_dirpath, name=artifact_name
)
base_dirname_in_artifact = shared_utils.path_base(output_dirpath)
to_add = keystore_files_module.new_keystore_files(
artifact_name,
shared_utils.path_join(base_dirname_in_artifact),
shared_utils.path_join(base_dirname_in_artifact, RAW_KEYS_DIRNAME),
shared_utils.path_join(base_dirname_in_artifact, RAW_SECRETS_DIRNAME),
shared_utils.path_join(base_dirname_in_artifact, NIMBUS_KEYS_DIRNAME),
shared_utils.path_join(base_dirname_in_artifact, PRYSM_DIRNAME),
shared_utils.path_join(base_dirname_in_artifact, TEKU_KEYS_DIRNAME),
shared_utils.path_join(base_dirname_in_artifact, TEKU_SECRETS_DIRNAME),
)
keystore_files.append(to_add)
running_total_validator_count += participant.validator_count
......@@ -232,11 +203,14 @@ def generate_validator_keystores(plan, mnemonic, participants, docker_cache_para
# this is like above but runs things in parallel - for large networks that run on k8s or gigantic dockers
def generate_valdiator_keystores_in_parallel(plan, mnemonic, participants):
def generate_valdiator_keystores_in_parallel(
plan, mnemonic, participants, docker_cache_params
):
service_names = launch_prelaunch_data_generator_parallel(
plan,
{},
["cl-validator-keystore-" + str(idx) for idx in range(0, len(participants))],
docker_cache_params,
)
all_output_dirpaths = []
all_generation_commands = []
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment