Commit 8e5d1855 authored by Gyanendra Mishra's avatar Gyanendra Mishra Committed by GitHub

fix!: Use plan object (#65)

BREAKING-CHANGE: Uses the `plan` object. Users will have to update their
Kurtosis CLI to >= 0.63.0 and restart the engine
parent 4f350ba2
......@@ -40,7 +40,14 @@ workflows:
# -- PR check jobs ------------------------------------------
- kurtosis-docs-checker/check-docs:
should-check-changelog: false
markdown-link-check-config-json: "{}"
markdown-link-check-config-json: |
{
"ignorePatterns": [
{
"pattern": "https://github.com/kurtosis-tech/eth2-package"
}
]
}
filters:
branches:
ignore:
......
......@@ -17,7 +17,7 @@ GRAFANA_DASHBOARD_PATH_URL = "/d/QdTOwy-nz/eth2-merge-kurtosis-module-dashboard?
FIRST_NODE_FINALIZATION_FACT = "cl-boot-finalization-fact"
HTTP_PORT_ID_FOR_FACT = "http"
def run(args):
def run(plan, args):
args_with_right_defaults = parse_input.parse_input(args)
num_participants = len(args_with_right_defaults.participants)
......@@ -27,10 +27,10 @@ def run(args):
grafana_dashboards_config_template = read_file(static_files.GRAFANA_DASHBOARD_PROVIDERS_CONFIG_TEMPLATE_FILEPATH)
prometheus_config_template = read_file(static_files.PROMETHEUS_CONFIG_TEMPLATE_FILEPATH)
print("Read the prometheus, grafana templates")
plan.print("Read the prometheus, grafana templates")
print("Launching participant network with {0} participants and the following network params {1}".format(num_participants, network_params))
all_participants, cl_gensis_timestamp = participant_network.launch_participant_network(args_with_right_defaults.participants, network_params, args_with_right_defaults.global_client_log_level)
plan.print("Launching participant network with {0} participants and the following network params {1}".format(num_participants, network_params))
all_participants, cl_gensis_timestamp = participant_network.launch_participant_network(plan, args_with_right_defaults.participants, network_params, args_with_right_defaults.global_client_log_level)
all_el_client_contexts = []
all_cl_client_contexts = []
......@@ -42,39 +42,40 @@ def run(args):
if not args_with_right_defaults.launch_additional_services:
return
print("Launching transaction spammer")
transaction_spammer.launch_transaction_spammer(genesis_constants.PRE_FUNDED_ACCOUNTS, all_el_client_contexts[0])
print("Succesfully launched transaction spammer")
plan.print("Launching transaction spammer")
transaction_spammer.launch_transaction_spammer(plan, genesis_constants.PRE_FUNDED_ACCOUNTS, all_el_client_contexts[0])
plan.print("Succesfully launched transaction spammer")
# We need a way to do time.sleep
# TODO add code that waits for CL genesis
print("Launching forkmon")
plan.print("Launching forkmon")
forkmon_config_template = read_file(static_files.FORKMON_CONFIG_TEMPLATE_FILEPATH)
forkmon.launch_forkmon(forkmon_config_template, all_cl_client_contexts, cl_gensis_timestamp, network_params.seconds_per_slot, network_params.slots_per_epoch)
print("Succesfully launched forkmon")
forkmon.launch_forkmon(plan, forkmon_config_template, all_cl_client_contexts, cl_gensis_timestamp, network_params.seconds_per_slot, network_params.slots_per_epoch)
plan.print("Succesfully launched forkmon")
print("Launching prometheus...")
plan.print("Launching prometheus...")
prometheus_private_url = prometheus.launch_prometheus(
plan,
prometheus_config_template,
all_cl_client_contexts,
)
print("Successfully launched Prometheus")
plan.print("Successfully launched Prometheus")
print("Launching grafana...")
grafana.launch_grafana(grafana_datasource_config_template, grafana_dashboards_config_template, prometheus_private_url)
print("Succesfully launched grafana")
plan.print("Launching grafana...")
grafana.launch_grafana(plan, grafana_datasource_config_template, grafana_dashboards_config_template, prometheus_private_url)
plan.print("Succesfully launched grafana")
if args_with_right_defaults.wait_for_verifications:
print("Running synchrnous testnet verifier")
testnet_verifier.run_synchronous_testnet_verification(args_with_right_defaults, all_el_client_contexts, all_cl_client_contexts)
print("Verification succeeded")
plan.print("Running synchrnous testnet verifier")
testnet_verifier.run_synchronous_testnet_verification(plan, args_with_right_defaults, all_el_client_contexts, all_cl_client_contexts)
plan.print("Verification succeeded")
else:
print("Running asynchronous verification")
testnet_verifier.launch_testnet_verifier(args_with_right_defaults, all_el_client_contexts, all_cl_client_contexts)
print("Succesfully launched asynchronous verifier")
plan.print("Running asynchronous verification")
testnet_verifier.launch_testnet_verifier(plan, args_with_right_defaults, all_el_client_contexts, all_cl_client_contexts)
plan.print("Succesfully launched asynchronous verifier")
if args_with_right_defaults.wait_for_finalization:
print("Waiting for the first finalized epoch")
plan.print("Waiting for the first finalized epoch")
first_cl_client = all_cl_client_contexts[0]
first_cl_client_id = first_cl_client.beacon_service_id
epoch_recipe = struct(
......@@ -87,8 +88,8 @@ def run(args):
"finalized_epoch": ".data.finalized.epoch"
}
)
wait(epoch_recipe, "extract.finalized_epoch", "!=", "0", timeout="40m")
print("First finalized epoch occurred successfully")
plan.wait(epoch_recipe, "extract.finalized_epoch", "!=", "0", timeout="40m")
plan.print("First finalized epoch occurred successfully")
grafana_info = struct(
......
......@@ -17,6 +17,7 @@ USED_PORTS = {
def launch_forkmon(
plan,
config_template,
cl_client_contexts,
genesis_unix_timestamp,
......@@ -35,11 +36,11 @@ def launch_forkmon(
template_and_data_by_rel_dest_filepath = {}
template_and_data_by_rel_dest_filepath[FORKMON_CONFIG_FILENAME] = template_and_data
config_files_artifact_uuid = render_templates(template_and_data_by_rel_dest_filepath)
config_files_artifact_uuid = plan.render_templates(template_and_data_by_rel_dest_filepath)
config = get_config(config_files_artifact_uuid)
add_service(SERVICE_ID, config)
plan.add_service(SERVICE_ID, config)
def get_config(config_files_artifact_uuid):
......
......@@ -25,15 +25,15 @@ USED_PORTS = {
}
def launch_grafana(datasource_config_template, dashboard_providers_config_template, prometheus_private_url):
grafana_config_artifacts_uuid, grafana_dashboards_artifacts_uuid = get_grafana_config_dir_artifact_uuid(datasource_config_template, dashboard_providers_config_template, prometheus_private_url)
def launch_grafana(plan, datasource_config_template, dashboard_providers_config_template, prometheus_private_url):
grafana_config_artifacts_uuid, grafana_dashboards_artifacts_uuid = get_grafana_config_dir_artifact_uuid(plan, datasource_config_template, dashboard_providers_config_template, prometheus_private_url)
config = get_config(grafana_config_artifacts_uuid, grafana_dashboards_artifacts_uuid)
add_service(SERVICE_ID, config)
plan.add_service(SERVICE_ID, config)
def get_grafana_config_dir_artifact_uuid(datasource_config_template, dashboard_providers_config_template, prometheus_private_url):
def get_grafana_config_dir_artifact_uuid(plan, datasource_config_template, dashboard_providers_config_template, prometheus_private_url):
datasource_data = new_datasource_config_template_data(prometheus_private_url)
datasource_template_and_data = shared_utils.new_template_and_data(datasource_config_template, datasource_data)
......@@ -44,9 +44,9 @@ def get_grafana_config_dir_artifact_uuid(datasource_config_template, dashboard_p
template_and_data_by_rel_dest_filepath[DATASOURCE_CONFIG_REL_FILEPATH] = datasource_template_and_data
template_and_data_by_rel_dest_filepath[DASHBOARD_PROVIDERS_CONFIG_REL_FILEPATH] = dashboard_providers_template_and_data
grafana_config_artifacts_uuid = render_templates(template_and_data_by_rel_dest_filepath)
grafana_config_artifacts_uuid = plan.render_templates(template_and_data_by_rel_dest_filepath)
grafana_dashboards_artifacts_uuid = upload_files(static_files.GRAFANA_DASHBOARDS_CONFIG_DIRPATH)
grafana_dashboards_artifacts_uuid = plan.upload_files(static_files.GRAFANA_DASHBOARDS_CONFIG_DIRPATH)
return grafana_config_artifacts_uuid, grafana_dashboards_artifacts_uuid
......
def wait_for_healthy(service_id, port_id):
def wait_for_healthy(plan, service_id, port_id):
recipe = struct(
service_id = service_id,
method= "GET",
......@@ -6,4 +6,4 @@ def wait_for_healthy(service_id, port_id):
content_type = "application/json",
port_id = port_id
)
return wait(recipe, "code", "IN", [200, 206, 503])
return plan.wait(recipe, "code", "IN", [200, 206, 503])
......@@ -66,6 +66,7 @@ LIGHTHOUSE_LOG_LEVELS = {
}
def launch(
plan,
launcher,
service_id,
image,
......@@ -94,9 +95,9 @@ def launch(
extra_beacon_params,
)
beacon_service = add_service(beacon_node_service_id, beacon_config)
beacon_service = plan.add_service(beacon_node_service_id, beacon_config)
cl_node_health_checker.wait_for_healthy(beacon_node_service_id, BEACON_HTTP_PORT_ID)
cl_node_health_checker.wait_for_healthy(plan, beacon_node_service_id, BEACON_HTTP_PORT_ID)
beacon_http_port = beacon_service.ports[BEACON_HTTP_PORT_ID]
......@@ -113,7 +114,7 @@ def launch(
extra_validator_params,
)
validator_service = add_service(validator_node_service_id, validator_config)
validator_service = plan.add_service(validator_node_service_id, validator_config)
# TODO(old) add validator availability using the validator API: https://ethereum.github.io/beacon-APIs/?urls.primaryName=v1#/ValidatorRequiredApi | from eth2-merge-kurtosis-module
beacon_node_identity_recipe = struct(
......@@ -126,7 +127,7 @@ def launch(
"enr": ".data.enr"
}
)
beacon_node_enr = request(beacon_node_identity_recipe)["extract.enr"]
beacon_node_enr = plan.request(beacon_node_identity_recipe)["extract.enr"]
beacon_metrics_port = beacon_service.ports[BEACON_METRICS_PORT_ID]
beacon_metrics_url = "{0}:{1}".format(beacon_service.ip_address, beacon_metrics_port.number)
......
......@@ -40,7 +40,6 @@ USED_PORTS = {
}
LODESTAR_LOG_LEVELS = {
package_io.GLOBAL_CLIENT_LOG_LEVEL.error: "error",
package_io.GLOBAL_CLIENT_LOG_LEVEL.warn: "warn",
......@@ -51,6 +50,7 @@ LODESTAR_LOG_LEVELS = {
def launch(
plan,
launcher,
service_id,
image,
......@@ -79,11 +79,11 @@ def launch(
extra_beacon_params,
)
beacon_service = add_service(beacon_node_service_id, beacon_config)
beacon_service = plan.add_service(beacon_node_service_id, beacon_config)
beacon_http_port = beacon_service.ports[HTTP_PORT_ID]
cl_node_health_checker.wait_for_healthy(beacon_node_service_id, HTTP_PORT_ID)
cl_node_health_checker.wait_for_healthy(plan, beacon_node_service_id, HTTP_PORT_ID)
# Launch validator node
......@@ -100,7 +100,7 @@ def launch(
extra_validator_params,
)
validator_service = add_service(validator_node_service_id, validator_config)
validator_service = plan.add_service(validator_node_service_id, validator_config)
# TODO(old) add validator availability using the validator API: https://ethereum.github.io/beacon-APIs/?urls.primaryName=v1#/ValidatorRequiredApi | from eth2-merge-kurtosis-module
......@@ -114,7 +114,7 @@ def launch(
"enr": ".data.enr"
}
)
beacon_node_enr = request(beacon_node_identity_recipe)["extract.enr"]
beacon_node_enr = plan.request(beacon_node_identity_recipe)["extract.enr"]
beacon_metrics_port = beacon_service.ports[METRICS_PORT_ID]
beacon_metrics_url = "{0}:{1}".format(beacon_service.ip_address, beacon_metrics_port.number)
......
......@@ -60,6 +60,7 @@ NIMBUS_LOG_LEVELS = {
ENTRYPOINT_ARGS = ["sh", "-c"]
def launch(
plan,
launcher,
service_id,
image,
......@@ -78,9 +79,9 @@ def launch(
config = get_config(launcher.cl_genesis_data, image, bootnode_context, el_client_context, mev_boost_context, log_level, node_keystore_files, extra_params)
nimbus_service = add_service(service_id, config)
nimbus_service = plan.add_service(service_id, config)
cl_node_health_checker.wait_for_healthy(service_id, HTTP_PORT_ID)
cl_node_health_checker.wait_for_healthy(plan, service_id, HTTP_PORT_ID)
cl_node_identity_recipe = struct(
service_id = service_id,
......@@ -92,7 +93,7 @@ def launch(
"enr": ".data.enr"
}
)
node_enr = request(cl_node_identity_recipe)["extract.enr"]
node_enr = plan.request(cl_node_identity_recipe)["extract.enr"]
metrics_port = nimbus_service.ports[METRICS_PORT_ID]
metrics_url = "{0}:{1}".format(nimbus_service.ip_address, metrics_port.number)
......
......@@ -62,6 +62,7 @@ PRYSM_LOG_LEVELS = {
def launch(
plan,
launcher,
service_id,
images,
......@@ -101,9 +102,9 @@ def launch(
extra_beacon_params,
)
beacon_service = add_service(beacon_node_service_id, beacon_config)
beacon_service = plan.add_service(beacon_node_service_id, beacon_config)
cl_node_health_checker.wait_for_healthy(beacon_node_service_id, HTTP_PORT_ID)
cl_node_health_checker.wait_for_healthy(plan, beacon_node_service_id, HTTP_PORT_ID)
beacon_http_port = beacon_service.ports[HTTP_PORT_ID]
......@@ -125,7 +126,7 @@ def launch(
launcher.prysm_password_artifact_uuid
)
validator_service = add_service(validator_node_service_id, validator_config)
validator_service = plan.add_service(validator_node_service_id, validator_config)
# TODO(old) add validator availability using the validator API: https://ethereum.github.io/beacon-APIs/?urls.primaryName=v1#/ValidatorRequiredApi | from eth2-merge-kurtosis-module
beacon_node_identity_recipe = struct(
......@@ -138,7 +139,7 @@ def launch(
"enr": ".data.enr"
}
)
beacon_node_enr = request(beacon_node_identity_recipe)["extract.enr"]
beacon_node_enr = plan.request(beacon_node_identity_recipe)["extract.enr"]
beacon_metrics_port = beacon_service.ports[BEACON_MONITORING_PORT_ID]
beacon_metrics_url = "{0}:{1}".format(beacon_service.ip_address, beacon_metrics_port.number)
......
......@@ -67,6 +67,7 @@ TEKU_LOG_LEVELS = {
}
def launch(
plan,
launcher,
service_id,
image,
......@@ -85,9 +86,9 @@ def launch(
config = get_config(launcher.cl_genesis_data, image, bootnode_context, el_client_context, mev_boost_context, log_level, node_keystore_files, extra_params)
teku_service = add_service(service_id, config)
teku_service = plan.add_service(service_id, config)
cl_node_health_checker.wait_for_healthy(service_id, HTTP_PORT_ID)
cl_node_health_checker.wait_for_healthy(plan, service_id, HTTP_PORT_ID)
node_identity_recipe = struct(
service_id = service_id,
......@@ -99,7 +100,7 @@ def launch(
"enr": ".data.enr"
}
)
node_enr = request(node_identity_recipe)["extract.enr"]
node_enr = plan.request(node_identity_recipe)["extract.enr"]
teku_metrics_port = teku_service.ports[METRICS_PORT_ID]
......
......@@ -45,6 +45,7 @@ BESU_LOG_LEVELS = {
}
def launch(
plan,
launcher,
service_id,
image,
......@@ -58,9 +59,9 @@ def launch(
config = get_config(launcher.network_id, launcher.el_genesis_data,
image, existing_el_clients, log_level, extra_params)
service = add_service(service_id, config)
service = plan.add_service(service_id, config)
enode = el_admin_node_info.get_enode_for_node(service_id, RPC_PORT_ID)
enode = el_admin_node_info.get_enode_for_node(plan, service_id, RPC_PORT_ID)
return el_client_context.new_el_client_context(
"besu",
......
def get_enode_enr_for_node(service_id, port_id):
def get_enode_enr_for_node(plan, service_id, port_id):
recipe = struct(
service_id = service_id,
method= "POST",
......@@ -12,10 +12,10 @@ def get_enode_enr_for_node(service_id, port_id):
"enr": ".result.enr",
}
)
response = wait(recipe, "extract.enode", "!=", "")
response = plan.wait(recipe, "extract.enode", "!=", "")
return (response["extract.enode"], response["extract.enr"])
def get_enode_for_node(service_id, port_id):
def get_enode_for_node(plan, service_id, port_id):
recipe = struct(
service_id = service_id,
method= "POST",
......@@ -27,5 +27,5 @@ def get_enode_for_node(service_id, port_id):
"enode": ".result.enode",
}
)
response = wait(recipe, "extract.enode", "!=", "")
response = plan.wait(recipe, "extract.enode", "!=", "")
return response["extract.enode"]
......@@ -43,6 +43,7 @@ ERIGON_LOG_LEVELS = {
}
def launch(
plan,
launcher,
service_id,
image,
......@@ -56,9 +57,9 @@ def launch(
config = get_config(launcher.network_id, launcher.el_genesis_data,
image, existing_el_clients, log_level, extra_params)
service = add_service(service_id, config)
service = plan.add_service(service_id, config)
enode, enr = el_admin_node_info.get_enode_enr_for_node(service_id, RPC_PORT_ID)
enode, enr = el_admin_node_info.get_enode_enr_for_node(plan, service_id, RPC_PORT_ID)
return el_client_context.new_el_client_context(
"erigon",
......
......@@ -53,7 +53,9 @@ VERBOSITY_LEVELS = {
package_io.GLOBAL_CLIENT_LOG_LEVEL.trace: "5",
}
def launch(
plan,
launcher,
service_id,
image,
......@@ -69,9 +71,9 @@ def launch(
config = get_config(launcher.network_id, launcher.el_genesis_data, launcher.prefunded_geth_keys_artifact_uuid,
launcher.prefunded_account_info, image, existing_el_clients, log_level, extra_params)
service = add_service(service_id, config)
service = plan.add_service(service_id, config)
enode, enr = el_admin_node_info.get_enode_enr_for_node(service_id, RPC_PORT_ID)
enode, enr = el_admin_node_info.get_enode_enr_for_node(plan, service_id, RPC_PORT_ID)
return el_client_context.new_el_client_context(
"geth",
......
......@@ -40,7 +40,9 @@ NETHERMIND_LOG_LEVELS = {
package_io.GLOBAL_CLIENT_LOG_LEVEL.trace: "TRACE",
}
def launch(
plan,
launcher,
service_id,
image,
......@@ -53,9 +55,9 @@ def launch(
config = get_config(launcher.el_genesis_data, image, existing_el_clients, log_level, extra_params)
service = add_service(service_id, config)
service = plan.add_service(service_id, config)
enode = el_admin_node_info.get_enode_for_node(service_id, RPC_PORT_ID)
enode = el_admin_node_info.get_enode_for_node(plan, service_id, RPC_PORT_ID)
return el_client_context.new_el_client_context(
"nethermind",
......
......@@ -15,10 +15,10 @@ NETWORK_ID_TO_NAME = {
"3": "ropsten",
}
def launch(mev_boost_launcher, service_id, network_id):
def launch(plan, mev_boost_launcher, service_id, network_id):
config = get_config(mev_boost_launcher, network_id)
mev_boost_service = add_service(service_id, config)
mev_boost_service = plan.add_service(service_id, config)
return mev_boost_context.new_mev_boost_context(mev_boost_service.ip_address, FLASHBOTS_MEV_BOOST_PORT)
......
......@@ -43,26 +43,26 @@ MEV_BOOST_SHOULD_CHECK_RELAY = True
CL_CLIENT_CONTEXT_BOOTNODE = None
def launch_participant_network(participants, network_params, global_log_level):
num_participants = len(participants)
def launch_participant_network(plan, participants, network_params, global_log_level):
num_participants = len(participants)
print("Generating cl validator key stores")
plan.print("Generating cl validator key stores")
cl_validator_data = cl_validator_keystores.generate_cl_validator_keystores(
plan,
network_params.preregistered_validator_keys_mnemonic,
num_participants,
network_params.num_validator_keys_per_node,
)
print(json.indent(json.encode(cl_validator_data)))
plan.print(json.indent(json.encode(cl_validator_data)))
# We need to send the same genesis time to both the EL and the CL to ensure that timestamp based forking works as expected
final_genesis_timestamp = (time.now() + CL_GENESIS_DATA_GENERATION_TIME + num_participants*CL_NODE_STARTUP_TIME).unix
print("Generating EL data")
plan.print("Generating EL data")
el_genesis_generation_config_template = read_file(static_files.EL_GENESIS_GENERATION_CONFIG_TEMPLATE_FILEPATH)
el_genesis_data = el_genesis_data_generator.generate_el_genesis_data(
plan,
el_genesis_generation_config_template,
final_genesis_timestamp,
network_params.network_id,
......@@ -72,13 +72,13 @@ def launch_participant_network(participants, network_params, global_log_level):
)
print(json.indent(json.encode(el_genesis_data)))
plan.print(json.indent(json.encode(el_genesis_data)))
print("Uploading GETH prefunded keys")
plan.print("Uploading GETH prefunded keys")
geth_prefunded_keys_artifact_id = upload_files(static_files.GETH_PREFUNDED_KEYS_DIRPATH)
geth_prefunded_keys_artifact_id = plan.upload_files(static_files.GETH_PREFUNDED_KEYS_DIRPATH)
print("Uploaded GETH files succesfully, launching EL participants")
plan.print("Uploaded GETH files succesfully, launching EL participants")
el_launchers = {
package_io.EL_CLIENT_TYPE.geth : {"launcher": geth.new_geth_launcher(network_params.network_id, el_genesis_data, geth_prefunded_keys_artifact_id, genesis_constants.PRE_FUNDED_ACCOUNTS), "launch_method": geth.launch},
......@@ -99,6 +99,7 @@ def launch_participant_network(participants, network_params, global_log_level):
el_service_id = "{0}{1}".format(EL_CLIENT_SERVICE_ID_PREFIX, index)
el_client_context = launch_method(
plan,
el_launcher,
el_service_id,
participant.el_client_image,
......@@ -110,15 +111,16 @@ def launch_participant_network(participants, network_params, global_log_level):
all_el_client_contexts.append(el_client_context)
print("Succesfully added {0} EL participants".format(num_participants))
plan.print("Succesfully added {0} EL participants".format(num_participants))
print("Generating CL data")
plan.print("Generating CL data")
genesis_generation_config_yml_template = read_file(static_files.CL_GENESIS_GENERATION_CONFIG_TEMPLATE_FILEPATH)
genesis_generation_mnemonics_yml_template = read_file(static_files.CL_GENESIS_GENERATION_MNEMONICS_TEMPLATE_FILEPATH)
total_number_of_validator_keys = network_params.num_validator_keys_per_node * num_participants
cl_genesis_data = cl_genesis_data_generator.generate_cl_genesis_data(
plan,
genesis_generation_config_yml_template,
genesis_generation_mnemonics_yml_template,
el_genesis_data,
......@@ -132,9 +134,9 @@ def launch_participant_network(participants, network_params, global_log_level):
network_params.capella_fork_epoch
)
print(json.indent(json.encode(cl_genesis_data)))
plan.print(json.indent(json.encode(cl_genesis_data)))
print("Launching CL network")
plan.print("Launching CL network")
cl_launchers = {
package_io.CL_CLIENT_TYPE.lighthouse : {"launcher": lighthouse.new_lighthouse_launcher(cl_genesis_data), "launch_method": lighthouse.launch},
......@@ -166,7 +168,7 @@ def launch_participant_network(participants, network_params, global_log_level):
if hasattr(participant, "builder_network_params") and participant.builder_network_params != None:
mev_boost_launcher = mev_boost_launcher_module.new_mev_boost_launcher(MEV_BOOST_SHOULD_CHECK_RELAY, participant.builder_network_params.relay_endpoints)
mev_boost_service_id = MEV_BOOST_SERVICE_ID_PREFIX.format(1)
mev_boost_context = mev_boost_launcher_module.launch_mevboost(mev_boost_launcher, mev_boost_service_id, network_params.network_id)
mev_boost_context = mev_boost_launcher_module.launch_mevboost(plan, mev_boost_launcher, mev_boost_service_id, network_params.network_id)
all_mevboost_contexts.append(mev_boost_context)
......@@ -174,6 +176,7 @@ def launch_participant_network(participants, network_params, global_log_level):
if index == 0:
cl_client_context = launch_method(
plan,
cl_launcher,
cl_service_id,
participant.cl_client_image,
......@@ -189,6 +192,7 @@ def launch_participant_network(participants, network_params, global_log_level):
else:
boot_cl_client_ctx = all_cl_client_contexts[0]
cl_client_context = launch_method(
plan,
cl_launcher,
cl_service_id,
participant.cl_client_image,
......@@ -204,7 +208,7 @@ def launch_participant_network(participants, network_params, global_log_level):
all_cl_client_contexts.append(cl_client_context)
print("Succesfully added {0} CL participants".format(num_participants))
plan.print("Succesfully added {0} CL participants".format(num_participants))
all_participants = []
......
......@@ -24,6 +24,7 @@ SUCCESSFUL_EXEC_CMD_EXIT_CODE = 0
def generate_cl_genesis_data(
plan,
genesis_generation_config_yml_template,
genesis_generation_mnemonics_yml_template,
el_genesis_data,
......@@ -33,7 +34,7 @@ def generate_cl_genesis_data(
seconds_per_slot,
preregistered_validator_keys_mnemonic,
total_num_validator_keys_to_preregister,
genesis_delay,
genesis_delay,
capella_fork_epoch
):
......@@ -55,10 +56,11 @@ def generate_cl_genesis_data(
template_and_data_by_rel_dest_filepath[MNEMONICS_YML_FILENAME] = genesis_generation_mnemonics_template_and_data
template_and_data_by_rel_dest_filepath[GENESIS_CONFIG_YML_FILENAME] = genesis_generation_config_template_and_data
genesis_generation_config_artifact_uuid = render_templates(template_and_data_by_rel_dest_filepath)
genesis_generation_config_artifact_uuid = plan.render_templates(template_and_data_by_rel_dest_filepath)
# TODO(old) Make this the actual data generator - comment copied from the original module
launcher_service_id = prelaunch_data_generator_launcher.launch_prelaunch_data_generator(
plan,
{
CONFIG_DIRPATH_ON_GENERATOR: genesis_generation_config_artifact_uuid,
EL_GENESIS_DIRPATH_ON_GENERATOR: el_genesis_data.files_artifact_uuid,
......@@ -81,8 +83,8 @@ def generate_cl_genesis_data(
(" && ").join(all_dirpath_creation_commands),
]
dir_creation_cmd_result = exec(struct(service_id=launcher_service_id, command=dir_creation_cmd))
assert(dir_creation_cmd_result["code"], "==", SUCCESSFUL_EXEC_CMD_EXIT_CODE)
dir_creation_cmd_result = plan.exec(struct(service_id=launcher_service_id, command=dir_creation_cmd))
plan.assert(dir_creation_cmd_result["code"], "==", SUCCESSFUL_EXEC_CMD_EXIT_CODE)
# Copy files to output
......@@ -98,8 +100,8 @@ def generate_cl_genesis_data(
filepath_on_generator,
OUTPUT_DIRPATH_ON_GENERATOR,
]
cmd_result = exec(struct(service_id=launcher_service_id, command=cmd))
assert(cmd_result["code"], "==", SUCCESSFUL_EXEC_CMD_EXIT_CODE)
cmd_result = plan.exec(struct(service_id=launcher_service_id, command=cmd))
plan.assert(cmd_result["code"], "==", SUCCESSFUL_EXEC_CMD_EXIT_CODE)
# Generate files that need dynamic content
content_to_write_to_output_filename = {
......@@ -116,8 +118,8 @@ def generate_cl_genesis_data(
destFilepath,
)
]
cmd_result = exec(struct(service_id=launcher_service_id, command=cmd))
assert(cmd_result["code"], "==", SUCCESSFUL_EXEC_CMD_EXIT_CODE)
cmd_result = plan.exec(struct(service_id=launcher_service_id, command=cmd))
plan.assert(cmd_result["code"], "==", SUCCESSFUL_EXEC_CMD_EXIT_CODE)
cl_genesis_generation_cmd = [
......@@ -130,10 +132,10 @@ def generate_cl_genesis_data(
"--state-output", shared_utils.path_join(OUTPUT_DIRPATH_ON_GENERATOR, GENESIS_STATE_FILENAME)
]
genesis_generation_result = exec(struct(service_id=launcher_service_id, command=cl_genesis_generation_cmd))
assert(genesis_generation_result["code"], "==", SUCCESSFUL_EXEC_CMD_EXIT_CODE)
genesis_generation_result = plan.exec(struct(service_id=launcher_service_id, command=cl_genesis_generation_cmd))
plan.assert(genesis_generation_result["code"], "==", SUCCESSFUL_EXEC_CMD_EXIT_CODE)
cl_genesis_data_artifact_uuid = store_service_files(launcher_service_id, OUTPUT_DIRPATH_ON_GENERATOR)
cl_genesis_data_artifact_uuid = plan.store_service_files(launcher_service_id, OUTPUT_DIRPATH_ON_GENERATOR)
jwt_secret_rel_filepath = shared_utils.path_join(
shared_utils.path_base(OUTPUT_DIRPATH_ON_GENERATOR),
......@@ -155,7 +157,7 @@ def generate_cl_genesis_data(
)
# we cleanup as the data generation is done
remove_service(launcher_service_id)
plan.remove_service(launcher_service_id)
return result
......
......@@ -29,11 +29,13 @@ TEKU_SECRETS_DIRNAME = "teku-secrets"
#
# num_keys / num_nodes keys
def generate_cl_validator_keystores(
plan,
mnemonic,
num_nodes,
num_validators_per_node):
service_id = prelaunch_data_generator_launcher.launch_prelaunch_data_generator(
plan,
{},
)
......@@ -64,13 +66,13 @@ def generate_cl_validator_keystores(
command_str = " && ".join(all_sub_command_strs)
command_result = exec(struct(service_id=service_id, command=["sh", "-c", command_str]))
assert(command_result["code"], "==", SUCCESSFUL_EXEC_CMD_EXIT_CODE)
command_result = plan.exec(struct(service_id=service_id, command=["sh", "-c", command_str]))
plan.assert(command_result["code"], "==", SUCCESSFUL_EXEC_CMD_EXIT_CODE)
# Store outputs into files artifacts
keystore_files = []
for idx, output_dirpath in enumerate(all_output_dirpaths):
artifact_uuid = store_service_files(service_id, output_dirpath)
artifact_uuid = plan.store_service_files(service_id, output_dirpath)
# This is necessary because the way Kurtosis currently implements artifact-storing is
base_dirname_in_artifact = shared_utils.path_base(output_dirpath)
......@@ -95,10 +97,10 @@ def generate_cl_validator_keystores(
PRYSM_PASSWORD_FILEPATH_ON_GENERATOR,
),
]
write_prysm_password_file_cmd_result = exec(struct(service_id=service_id, command=write_prysm_password_file_cmd))
assert(write_prysm_password_file_cmd_result["code"], "==", SUCCESSFUL_EXEC_CMD_EXIT_CODE)
write_prysm_password_file_cmd_result = plan.exec(struct(service_id=service_id, command=write_prysm_password_file_cmd))
plan.assert(write_prysm_password_file_cmd_result["code"], "==", SUCCESSFUL_EXEC_CMD_EXIT_CODE)
prysm_password_artifact_uuid = store_service_files(service_id, PRYSM_PASSWORD_FILEPATH_ON_GENERATOR)
prysm_password_artifact_uuid = plan.store_service_files(service_id, PRYSM_PASSWORD_FILEPATH_ON_GENERATOR)
result = keystores_result.new_generate_keystores_result(
prysm_password_artifact_uuid,
......@@ -107,5 +109,5 @@ def generate_cl_validator_keystores(
)
# we cleanup as the data generation is done
remove_service(service_id)
plan.remove_service(service_id)
return result
......@@ -26,6 +26,7 @@ all_genesis_generation_cmds = {
}
def generate_el_genesis_data(
plan,
genesis_generation_config_template,
genesis_unix_timestamp,
network_id,
......@@ -47,11 +48,12 @@ def generate_el_genesis_data(
template_and_data_by_rel_dest_filepath = {}
template_and_data_by_rel_dest_filepath[GENESIS_CONFIG_FILENAME] = genesis_config_file_template_and_data
genesis_generation_config_artifact_uuid = render_templates(template_and_data_by_rel_dest_filepath)
genesis_generation_config_artifact_uuid = plan.render_templates(template_and_data_by_rel_dest_filepath)
# TODO(old) Make this the actual data generator - comment copied from the original module
launcher_service_id = prelaunch_data_generator_launcher.launch_prelaunch_data_generator(
plan,
{
CONFIG_DIRPATH_ON_GENERATOR: genesis_generation_config_artifact_uuid,
},
......@@ -78,8 +80,8 @@ def generate_el_genesis_data(
]
dir_creation_cmd_result = exec(struct(service_id=launcher_service_id, command=dir_creation_cmd))
assert(dir_creation_cmd_result["code"], "==", SUCCESSFUL_EXEC_CMD_EXIT_CODE)
dir_creation_cmd_result = plan.exec(struct(service_id=launcher_service_id, command=dir_creation_cmd))
plan.assert(dir_creation_cmd_result["code"], "==", SUCCESSFUL_EXEC_CMD_EXIT_CODE)
genesis_config_filepath_on_generator = shared_utils.path_join(CONFIG_DIRPATH_ON_GENERATOR, GENESIS_CONFIG_FILENAME)
genesis_filename_to_relative_filepath_in_artifact = {}
......@@ -94,8 +96,8 @@ def generate_el_genesis_data(
" ".join(cmd)
]
cmd_to_execute_result = exec(struct(service_id=launcher_service_id, command=cmd_to_execute))
assert(cmd_to_execute_result["code"], "==", SUCCESSFUL_EXEC_CMD_EXIT_CODE)
cmd_to_execute_result = plan.exec(struct(service_id=launcher_service_id, command=cmd_to_execute))
plan.assert(cmd_to_execute_result["code"], "==", SUCCESSFUL_EXEC_CMD_EXIT_CODE)
genesis_filename_to_relative_filepath_in_artifact[output_filename] = shared_utils.path_join(
......@@ -113,10 +115,10 @@ def generate_el_genesis_data(
)
]
jwt_secret_generation_cmd_result = exec(struct(service_id=launcher_service_id, command=jwt_secret_generation_cmd))
assert(jwt_secret_generation_cmd_result["code"], "==", SUCCESSFUL_EXEC_CMD_EXIT_CODE)
jwt_secret_generation_cmd_result = plan.exec(struct(service_id=launcher_service_id, command=jwt_secret_generation_cmd))
plan.assert(jwt_secret_generation_cmd_result["code"], "==", SUCCESSFUL_EXEC_CMD_EXIT_CODE)
elGenesisDataArtifactUuid = store_service_files(launcher_service_id, OUTPUT_DIRPATH_ON_GENERATOR)
elGenesisDataArtifactUuid = plan.store_service_files(launcher_service_id, OUTPUT_DIRPATH_ON_GENERATOR)
result = el_genesis.new_el_genesis_data(
elGenesisDataArtifactUuid,
......@@ -128,7 +130,7 @@ def generate_el_genesis_data(
)
# we cleanup as the data generation is done
remove_service(launcher_service_id)
plan.remove_service(launcher_service_id)
return result
......
......@@ -9,7 +9,7 @@ ENTRYPOINT_ARGS = [
]
# Launches a prelaunch data generator IMAGE, for use in various of the genesis generation
def launch_prelaunch_data_generator(files_artifact_mountpoints):
def launch_prelaunch_data_generator(plan, files_artifact_mountpoints):
config = get_config(files_artifact_mountpoints)
......@@ -18,7 +18,7 @@ def launch_prelaunch_data_generator(files_artifact_mountpoints):
time.now().unix_nano,
)
add_service(service_id, config)
plan.add_service(service_id, config)
return service_id
......
......@@ -15,7 +15,7 @@ USED_PORTS = {
HTTP_PORT_ID: shared_utils.new_port_spec(HTTP_PORT_NUMBER, shared_utils.TCP_PROTOCOL, shared_utils.HTTP_APPLICATION_PROTOCOL)
}
def launch_prometheus(config_template, cl_client_contexts):
def launch_prometheus(plan, config_template, cl_client_contexts):
all_cl_nodes_metrics_info = []
for client in cl_client_contexts:
all_cl_nodes_metrics_info.extend(client.cl_nodes_metrics_info)
......@@ -25,10 +25,10 @@ def launch_prometheus(config_template, cl_client_contexts):
template_and_data_by_rel_dest_filepath = {}
template_and_data_by_rel_dest_filepath[CONFIG_FILENAME] = template_and_data
config_files_artifact_uuid = render_templates(template_and_data_by_rel_dest_filepath)
config_files_artifact_uuid = plan.render_templates(template_and_data_by_rel_dest_filepath)
config = get_config(config_files_artifact_uuid)
prometheus_service = add_service(SERVICE_ID, config)
prometheus_service = plan.add_service(SERVICE_ID, config)
private_ip_address = prometheus_service.ip_address
prometheus_service_http_port = prometheus_service.ports[HTTP_PORT_ID].number
......
......@@ -9,18 +9,18 @@ SYNCHRONOUS_ENTRYPOINT_ARGS = [
# this is broken check - https://github.com/ethereum/merge-testnet-verifier/issues/4
def launch_testnet_verifier(params, el_client_contexts, cl_client_contexts):
def launch_testnet_verifier(plan, params, el_client_contexts, cl_client_contexts):
config = get_asynchronous_verification_config(params, el_client_contexts, cl_client_contexts)
add_service(SERVICE_ID, config)
plan.add_service(SERVICE_ID, config)
def run_synchronous_testnet_verification(params, el_client_contexts, cl_client_contexts):
def run_synchronous_testnet_verification(plan, params, el_client_contexts, cl_client_contexts):
config = get_synchronous_verification_config()
add_service(SERVICE_ID, config)
plan.add_service(SERVICE_ID, config)
command = get_cmd(params, el_client_contexts, cl_client_contexts, True)
exec_result = exec(struct(service_id=SERVICE_ID, command=command))
assert(exec_result["code"], "==", 0)
exec_result = plan.exec(struct(service_id=SERVICE_ID, command=command))
plan.assert(exec_result["code"], "==", 0)
def get_cmd(params, el_client_contexts, cl_client_contexts, add_binary_name):
......
IMAGE_NAME = "kurtosistech/tx-fuzz:0.2.0"
SERVICE_ID = "transaction-spammer"
def launch_transaction_spammer(prefunded_addresses, el_client_context):
def launch_transaction_spammer(plan, prefunded_addresses, el_client_context):
config = get_config(prefunded_addresses, el_client_context)
add_service(SERVICE_ID, config)
plan.add_service(SERVICE_ID, config)
def get_config(prefunded_addresses, el_client_context):
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment