Commit 59f15cae authored by Barnabas Busa's avatar Barnabas Busa Committed by GitHub

feat: rework how keys are generated (#301)

Co-authored-by: default avatarGyanendra Mishra <anomaly.the@gmail.com>
parent 63f7ff3c
......@@ -8,6 +8,10 @@ genesis_constants = import_module(
"./src/prelaunch_data_generator/genesis_constants/genesis_constants.star"
)
validator_ranges = import_module(
"./src/prelaunch_data_generator/validator_keystores/validator_ranges_generator.star"
)
transaction_spammer = import_module(
"./src/transaction_spammer/transaction_spammer.star"
)
......@@ -98,6 +102,17 @@ def run(plan, args={}):
all_el_client_contexts.append(participant.el_client_context)
all_cl_client_contexts.append(participant.cl_client_context)
# Generate validator ranges
validator_ranges_config_template = read_file(
static_files.VALIDATOR_RANGES_CONFIG_TEMPLATE_FILEPATH
)
ranges = validator_ranges.generate_validator_ranges(
plan,
validator_ranges_config_template,
all_cl_client_contexts,
args_with_right_defaults.participants,
)
if network_params.deneb_fork_epoch != 0:
plan.print("Launching 4788 contract deployer")
el_uri = "http://{0}:{1}".format(
......@@ -277,15 +292,10 @@ def run(plan, args={}):
plan.print("Succesfully launched execution layer forkmon")
elif additional_service == "beacon_metrics_gazer":
plan.print("Launching beacon metrics gazer")
beacon_metrics_gazer_config_template = read_file(
static_files.BEACON_METRICS_GAZER_CONFIG_TEMPLATE_FILEPATH
)
beacon_metrics_gazer_prometheus_metrics_job = (
beacon_metrics_gazer.launch_beacon_metrics_gazer(
plan,
beacon_metrics_gazer_config_template,
all_cl_client_contexts,
args_with_right_defaults.participants,
network_params,
)
)
......
......@@ -14,6 +14,8 @@ BEACON_METRICS_GAZER_CONFIG_FILENAME = "validator-ranges.yaml"
BEACON_METRICS_GAZER_CONFIG_MOUNT_DIRPATH_ON_SERVICE = "/config"
VALIDATOR_RANGES_ARTIFACT_NAME = "validator-ranges"
USED_PORTS = {
HTTP_PORT_ID: shared_utils.new_port_spec(
HTTP_PORT_NUMBER,
......@@ -23,39 +25,8 @@ USED_PORTS = {
}
def launch_beacon_metrics_gazer(
plan, config_template, cl_client_contexts, participants, network_params
):
data = []
running_total_validator_count = 0
for index, client in enumerate(cl_client_contexts):
participant = participants[index]
if participant.validator_count == 0:
continue
start_index = running_total_validator_count
running_total_validator_count += participant.validator_count
end_index = start_index + participant.validator_count
service_name = client.beacon_service_name
data.append(
{
"ClientName": service_name,
"Range": "{0}-{1}".format(start_index, end_index),
}
)
template_data = {"Data": data}
template_and_data_by_rel_dest_filepath = {}
template_and_data_by_rel_dest_filepath[
BEACON_METRICS_GAZER_CONFIG_FILENAME
] = shared_utils.new_template_and_data(config_template, template_data)
config_files_artifact_name = plan.render_templates(
template_and_data_by_rel_dest_filepath, "validator-ranges"
)
def launch_beacon_metrics_gazer(plan, cl_client_contexts, network_params):
config = get_config(
config_files_artifact_name,
cl_client_contexts[0].ip_addr,
cl_client_contexts[0].http_port_num,
)
......@@ -74,7 +45,7 @@ def launch_beacon_metrics_gazer(
)
def get_config(config_files_artifact_name, ip_addr, http_port_num):
def get_config(ip_addr, http_port_num):
config_file_path = shared_utils.path_join(
BEACON_METRICS_GAZER_CONFIG_MOUNT_DIRPATH_ON_SERVICE,
BEACON_METRICS_GAZER_CONFIG_FILENAME,
......@@ -83,7 +54,7 @@ def get_config(config_files_artifact_name, ip_addr, http_port_num):
image=IMAGE_NAME,
ports=USED_PORTS,
files={
BEACON_METRICS_GAZER_CONFIG_MOUNT_DIRPATH_ON_SERVICE: config_files_artifact_name,
BEACON_METRICS_GAZER_CONFIG_MOUNT_DIRPATH_ON_SERVICE: VALIDATOR_RANGES_ARTIFACT_NAME,
},
cmd=[
"http://{0}:{1}".format(ip_addr, http_port_num),
......
cl_validator_keystores = import_module(
"./prelaunch_data_generator/cl_validator_keystores/cl_validator_keystore_generator.star"
validator_keystores = import_module(
"./prelaunch_data_generator/validator_keystores/validator_keystore_generator.star"
)
el_cl_genesis_data_generator = import_module(
"./prelaunch_data_generator/el_cl_genesis/el_cl_genesis_generator.star"
)
shared_utils = import_module("./shared_utils/shared_utils.star")
static_files = import_module("./static_files/static_files.star")
......@@ -45,10 +46,6 @@ CL_NODE_STARTUP_TIME = 5
CL_CLIENT_CONTEXT_BOOTNODE = None
GLOBAL_INDEX_ZFILL = {
"zfill_values": [(1, 1), (2, 10), (3, 100), (4, 1000), (5, 10000)]
}
def launch_participant_network(
plan,
......@@ -60,19 +57,17 @@ def launch_participant_network(
num_participants = len(participants)
plan.print("Generating cl validator key stores")
cl_validator_data = None
validator_data = None
if not parallel_keystore_generation:
cl_validator_data = cl_validator_keystores.generate_cl_validator_keystores(
validator_data = validator_keystores.generate_validator_keystores(
plan, network_params.preregistered_validator_keys_mnemonic, participants
)
else:
cl_validator_data = (
cl_validator_keystores.generate_cl_valdiator_keystores_in_parallel(
plan, network_params.preregistered_validator_keys_mnemonic, participants
)
validator_data = validator_keystores.generate_valdiator_keystores_in_parallel(
plan, network_params.preregistered_validator_keys_mnemonic, participants
)
plan.print(json.indent(json.encode(cl_validator_data)))
plan.print(json.indent(json.encode(validator_data)))
# We need to send the same genesis time to both the EL and the CL to ensure that timestamp based forking works as expected
final_genesis_timestamp = get_final_genesis_timestamp(
......@@ -184,7 +179,7 @@ def launch_participant_network(
)
# Zero-pad the index using the calculated zfill value
index_str = zfill_custom(index + 1, zfill_calculator(participants))
index_str = shared_utils.zfill_custom(index + 1, len(str(len(participants))))
el_service_name = "el-{0}-{1}-{2}".format(
index_str, el_client_type, cl_client_type
......@@ -228,8 +223,8 @@ def launch_participant_network(
package_io.CL_CLIENT_TYPE.prysm: {
"launcher": prysm.new_prysm_launcher(
el_cl_data,
cl_validator_data.prysm_password_relative_filepath,
cl_validator_data.prysm_password_artifact_uuid,
validator_data.prysm_password_relative_filepath,
validator_data.prysm_password_artifact_uuid,
),
"launch_method": prysm.launch,
},
......@@ -241,7 +236,7 @@ def launch_participant_network(
all_snooper_engine_contexts = []
all_cl_client_contexts = []
preregistered_validator_keys_for_nodes = cl_validator_data.per_node_keystores
preregistered_validator_keys_for_nodes = validator_data.per_node_keystores
for index, participant in enumerate(participants):
cl_client_type = participant.cl_client_type
......@@ -259,7 +254,7 @@ def launch_participant_network(
cl_launchers[cl_client_type]["launch_method"],
)
index_str = zfill_custom(index + 1, zfill_calculator(participants))
index_str = shared_utils.zfill_custom(index + 1, len(str(len(participants))))
cl_service_name = "cl-{0}-{1}-{2}".format(
index_str, cl_client_type, el_client_type
......@@ -376,14 +371,6 @@ def launch_participant_network(
)
def zfill_calculator(participants):
for zf, par in GLOBAL_INDEX_ZFILL["zfill_values"]:
if len(participants) < par:
zfill = zf - 1
return zfill
break
def zfill_custom(value, width):
return ("0" * (width - len(str(value)))) + str(value)
......
shared_utils = import_module("../../shared_utils/shared_utils.star")
prelaunch_data_generator_launcher = import_module(
"../../prelaunch_data_generator/prelaunch_data_generator_launcher/prelaunch_data_generator_launcher.star"
)
el_cl_genesis_data = import_module("./el_cl_genesis_data.star")
GENESIS_VALUES_PATH = "/opt"
......
SERVICE_NAME_PREFIX = "prelaunch-data-generator-"
# We use Docker exec commands to run the commands we need, so we override the default
ENTRYPOINT_ARGS = [
"sleep",
"999999",
]
# Launches a prelaunch data generator IMAGE, for use in various of the genesis generation
def launch_prelaunch_data_generator(
plan,
files_artifact_mountpoints,
service_name_suffix,
capella_fork_epoch,
electra_fork_epoch,
):
config = get_config(
files_artifact_mountpoints, capella_fork_epoch, electra_fork_epoch
)
service_name = "{0}{1}".format(
SERVICE_NAME_PREFIX,
service_name_suffix,
)
plan.add_service(service_name, config)
return service_name
def launch_prelaunch_data_generator_parallel(
plan,
files_artifact_mountpoints,
service_name_suffixes,
capella_fork_epoch,
electra_fork_epoch,
):
config = get_config(
files_artifact_mountpoints, capella_fork_epoch, electra_fork_epoch
)
service_names = [
"{0}{1}".format(
SERVICE_NAME_PREFIX,
service_name_suffix,
)
for service_name_suffix in service_name_suffixes
]
services_to_add = {service_name: config for service_name in service_names}
plan.add_services(services_to_add)
return service_names
def get_config(files_artifact_mountpoints, capella_fork_epoch, electra_fork_epoch):
if capella_fork_epoch > 0 and electra_fork_epoch == None: # we are running capella
img = "ethpandaops/ethereum-genesis-generator:1.3.12"
elif (
capella_fork_epoch == 0 and electra_fork_epoch == None
): # we are running dencun
img = "ethpandaops/ethereum-genesis-generator:2.0.0"
else: # we are running electra
img = "ethpandaops/ethereum-genesis-generator:3.0.0-rc.2"
return ServiceConfig(
image=img,
entrypoint=ENTRYPOINT_ARGS,
files=files_artifact_mountpoints,
)
prelaunch_data_generator_launcher = import_module(
"../../prelaunch_data_generator/prelaunch_data_generator_launcher/prelaunch_data_generator_launcher.star"
)
shared_utils = import_module("../../shared_utils/shared_utils.star")
keystore_files_module = import_module(
"../../prelaunch_data_generator/cl_validator_keystores/keystore_files.star"
)
keystores_result = import_module(
"../../prelaunch_data_generator/cl_validator_keystores/generate_keystores_result.star"
)
keystore_files_module = import_module("./keystore_files.star")
keystores_result = import_module("./generate_keystores_result.star")
NODE_KEYSTORES_OUTPUT_DIRPATH_FORMAT_STR = "/node-{0}-keystores"
......@@ -17,7 +8,9 @@ NODE_KEYSTORES_OUTPUT_DIRPATH_FORMAT_STR = "/node-{0}-keystores"
PRYSM_PASSWORD = "password"
PRYSM_PASSWORD_FILEPATH_ON_GENERATOR = "/tmp/prysm-password.txt"
KEYSTORES_GENERATION_TOOL_NAME = "eth2-val-tools"
KEYSTORES_GENERATION_TOOL_NAME = "/app/eth2-val-tools"
ETH_VAL_TOOLS_IMAGE = "protolambda/eth2-val-tools:latest"
SUCCESSFUL_EXEC_CMD_EXIT_CODE = 0
......@@ -32,18 +25,62 @@ TEKU_SECRETS_DIRNAME = "teku-secrets"
KEYSTORE_GENERATION_FINISHED_FILEPATH_FORMAT = "/tmp/keystores_generated-{0}-{1}"
SERVICE_NAME_PREFIX = "validator-key-generation-"
ENTRYPOINT_ARGS = [
"sleep",
"99999",
]
# Launches a prelaunch data generator IMAGE, for use in various of the genesis generation
def launch_prelaunch_data_generator(
plan,
files_artifact_mountpoints,
service_name_suffix,
):
config = get_config(files_artifact_mountpoints)
service_name = "{0}{1}".format(
SERVICE_NAME_PREFIX,
service_name_suffix,
)
plan.add_service(service_name, config)
return service_name
def launch_prelaunch_data_generator_parallel(
plan, files_artifact_mountpoints, service_name_suffixes
):
config = get_config(
files_artifact_mountpoints,
)
service_names = [
"{0}{1}".format(
SERVICE_NAME_PREFIX,
service_name_suffix,
)
for service_name_suffix in service_name_suffixes
]
services_to_add = {service_name: config for service_name in service_names}
plan.add_services(services_to_add)
return service_names
def get_config(files_artifact_mountpoints):
return ServiceConfig(
image=ETH_VAL_TOOLS_IMAGE,
entrypoint=ENTRYPOINT_ARGS,
files=files_artifact_mountpoints,
)
# Generates keystores for the given number of nodes from the given mnemonic, where each keystore contains approximately
#
# num_keys / num_nodes keys
def generate_cl_validator_keystores(plan, mnemonic, participants):
service_name = prelaunch_data_generator_launcher.launch_prelaunch_data_generator(
plan,
{},
"cl-validator-keystore",
capella_fork_epoch=0, # It doesn't matter how the validator keys are generated
electra_fork_epoch=None, # It doesn't matter how the validator keys are generated
)
def generate_validator_keystores(plan, mnemonic, participants):
service_name = launch_prelaunch_data_generator(plan, {}, "cl-validator-keystore")
all_output_dirpaths = []
all_sub_command_strs = []
......@@ -84,7 +121,7 @@ def generate_cl_validator_keystores(plan, mnemonic, participants):
if participant.validator_count == 0:
keystore_files.append(None)
continue
padded_idx = zfill_custom(idx + 1, len(str(len(participants))))
padded_idx = shared_utils.zfill_custom(idx + 1, len(str(len(participants))))
keystore_start_index = running_total_validator_count
running_total_validator_count += participant.validator_count
keystore_stop_index = (keystore_start_index + participant.validator_count) - 1
......@@ -147,15 +184,12 @@ def generate_cl_validator_keystores(plan, mnemonic, participants):
# this is like above but runs things in parallel - for large networks that run on k8s or gigantic dockers
def generate_cl_valdiator_keystores_in_parallel(plan, mnemonic, participants):
service_names = prelaunch_data_generator_launcher.launch_prelaunch_data_generator_parallel(
def generate_valdiator_keystores_in_parallel(plan, mnemonic, participants):
service_names = launch_prelaunch_data_generator_parallel(
plan,
{},
["cl-validator-keystore-" + str(idx) for idx in range(0, len(participants))],
capella_fork_epoch=0, # It doesn't matter how the validator keys are generated
electra_fork_epoch=None,
) # It doesn't matter how the validator keys are generated
)
all_output_dirpaths = []
all_generation_commands = []
finished_files_to_verify = []
......@@ -167,8 +201,9 @@ def generate_cl_valdiator_keystores_in_parallel(plan, mnemonic, participants):
all_output_dirpaths.append(None)
finished_files_to_verify.append(None)
continue
start_index = idx * participant.validator_count
stop_index = (idx + 1) * participant.validator_count
start_index = running_total_validator_count
running_total_validator_count += participant.validator_count
stop_index = start_index + participant.validator_count
generation_finished_filepath = (
KEYSTORE_GENERATION_FINISHED_FILEPATH_FORMAT.format(start_index, stop_index)
)
......@@ -223,13 +258,13 @@ def generate_cl_valdiator_keystores_in_parallel(plan, mnemonic, participants):
keystore_files = []
running_total_validator_count = 0
for idx, participant in enumerate(participants):
output_dirpath = all_output_dirpaths[idx]
if participant.validator_count == 0:
keystore_files.append(None)
continue
service_name = service_names[idx]
output_dirpath = all_output_dirpaths[idx]
padded_idx = zfill_custom(idx + 1, len(str(len(participants))))
padded_idx = shared_utils.zfill_custom(idx + 1, len(str(len(participants))))
keystore_start_index = running_total_validator_count
running_total_validator_count += participant.validator_count
keystore_stop_index = (keystore_start_index + participant.validator_count) - 1
......@@ -288,7 +323,3 @@ def generate_cl_valdiator_keystores_in_parallel(plan, mnemonic, participants):
# we don't cleanup the containers as its a costly operation
return result
def zfill_custom(value, width):
return ("0" * (width - len(str(value)))) + str(value)
VALIDATOR_RANGES_FILE_NAME = "validator-ranges.yaml"
shared_utils = import_module("../../shared_utils/shared_utils.star")
def generate_validator_ranges(
plan,
config_template,
cl_client_contexts,
participants,
):
data = []
running_total_validator_count = 0
for index, client in enumerate(cl_client_contexts):
participant = participants[index]
if participant.validator_count == 0:
continue
start_index = running_total_validator_count
running_total_validator_count += participant.validator_count
end_index = start_index + participant.validator_count
service_name = client.beacon_service_name
data.append(
{
"ClientName": service_name,
"Range": "{0}-{1}".format(start_index, end_index),
}
)
template_data = {"Data": data}
template_and_data_by_rel_dest_filepath = {}
template_and_data_by_rel_dest_filepath[
VALIDATOR_RANGES_FILE_NAME
] = shared_utils.new_template_and_data(config_template, template_data)
VALIDATOR_RANGES_ARTIFACT_NAME = plan.render_templates(
template_and_data_by_rel_dest_filepath, "validator-ranges"
)
......@@ -56,3 +56,7 @@ def read_file_from_service(plan, service_name, filename):
),
)
return output["output"]
def zfill_custom(value, width):
return ("0" * (width - len(str(value)))) + str(value)
......@@ -16,9 +16,9 @@ PROMETHEUS_CONFIG_TEMPLATE_FILEPATH = (
STATIC_FILES_DIRPATH + "/prometheus-config/prometheus.yml.tmpl"
)
# Beacon Metrics Gazer config
BEACON_METRICS_GAZER_CONFIG_TEMPLATE_FILEPATH = (
STATIC_FILES_DIRPATH + "/beacon-metrics-gazer-config/config.yaml.tmpl"
# Validator Ranges config
VALIDATOR_RANGES_CONFIG_TEMPLATE_FILEPATH = (
STATIC_FILES_DIRPATH + "/validator-ranges/config.yaml.tmpl"
)
DORA_CONFIG_TEMPLATE_FILEPATH = STATIC_FILES_DIRPATH + "/dora-config/config.yaml.tmpl"
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment