Commit 4a561806 authored by Gyanendra Mishra's avatar Gyanendra Mishra Committed by GitHub

Merge pull request #12 from kurtosis-tech/gyani/smaller-pr

pushed fixes into its own smaller PR
parents 526aaa47 d26144ad
...@@ -10,7 +10,7 @@ This is the Startosis version of the popular [eth2-merge-kurtosis-module](https: ...@@ -10,7 +10,7 @@ This is the Startosis version of the popular [eth2-merge-kurtosis-module](https:
- [x] forkmon (this is blocked on CL clients running) - [x] forkmon (this is blocked on CL clients running)
- [x] prometheus (this is blocked on CL clients running) - [x] prometheus (this is blocked on CL clients running)
- [x] grafana (this is blocked on prometheus running) - [x] grafana (this is blocked on prometheus running)
- [ ] grafana needs an upload files endpoint in Startosis - [x] grafana needs an upload files endpoint in Startosis
- [x] testnet_verifier (this is blocked on CL/EL clients running) - [x] testnet_verifier (this is blocked on CL/EL clients running)
- [x] transaction_spammer (this is blocked on EL clients running) - [x] transaction_spammer (this is blocked on EL clients running)
- [ ] participant_network/participant_network - [ ] participant_network/participant_network
......
...@@ -7,5 +7,8 @@ ...@@ -7,5 +7,8 @@
- Added a lot of simple objects that just keep data - Added a lot of simple objects that just keep data
- Added monitoring on top of the repo - Added monitoring on top of the repo
### Fixes
- Fixes some bugs with the initial implementation of the monitors
# 0.0.0 # 0.0.0
* Initial commit * Initial commit
...@@ -5,7 +5,7 @@ SERVICE_ID = "forkmon" ...@@ -5,7 +5,7 @@ SERVICE_ID = "forkmon"
IMAGE_NAME = "ralexstokes/ethereum_consensus_monitor:latest" IMAGE_NAME = "ralexstokes/ethereum_consensus_monitor:latest"
HTTP_PORT_ID = "http" HTTP_PORT_ID = "http"
HTTP_PORT_NUMBER = uint16(80) HTTP_PORT_NUMBER = 80
HTTP_PROTOCOL = "TCP" HTTP_PROTOCOL = "TCP"
FORKMON_CONFIG_FILENAME = "forkmon-config.toml" FORKMON_CONFIG_FILENAME = "forkmon-config.toml"
...@@ -51,12 +51,12 @@ def get_service_config(config_files_artifact_uuid): ...@@ -51,12 +51,12 @@ def get_service_config(config_files_artifact_uuid):
used_ports = USED_PORTS, used_ports = USED_PORTS,
files_artifact_mount_dirpaths = { files_artifact_mount_dirpaths = {
config_files_artifact_uuid: FORKMON_CONFIG_MOUNT_DIRPATH_ON_SERVICE, config_files_artifact_uuid: FORKMON_CONFIG_MOUNT_DIRPATH_ON_SERVICE,
} },
cmd_args = ["--config-path", config_file_path] cmd_args = ["--config-path", config_file_path]
) )
def new_config_template_data(): def new_config_template_data(listen_port_num, cl_client_info, seconds_per_slot, slots_per_epoch, genesis_unix_timestamp):
return { return {
"ListenPortNum": listen_port_num, "ListenPortNum": listen_port_num,
"CLClientInfo": cl_client_info, "CLClientInfo": cl_client_info,
......
load("github.com/kurtosis-tech/eth2-module/src/shared_utils/shared_utils.star", "new_port_spec", "new_template_and_data", "path_join") load("github.com/kurtosis-tech/eth2-module/src/shared_utils/shared_utils.star", "new_port_spec", "new_template_and_data", "path_join")
load("github.com/kurtosis-tech/eth2-module/src/static_files/static_files.star", "GRAFANA_DASHBOARDS_CONFIG_DIRPATH")
SERVICE_ID = "grafana" SERVICE_ID = "grafana"
...@@ -20,13 +21,14 @@ GRAFANA_CONFIG_DIRPATH_ON_SERVICE = "/config" ...@@ -20,13 +21,14 @@ GRAFANA_CONFIG_DIRPATH_ON_SERVICE = "/config"
GRAFANA_DASHBOARDS_DIRPATH_ON_SERVICE = "/dashboards" GRAFANA_DASHBOARDS_DIRPATH_ON_SERVICE = "/dashboards"
GRAFANA_DASHBOARDS_FILEPATH_ON_SERVICE = GRAFANA_DASHBOARDS_DIRPATH_ON_SERVICE + "/dashboard.json" GRAFANA_DASHBOARDS_FILEPATH_ON_SERVICE = GRAFANA_DASHBOARDS_DIRPATH_ON_SERVICE + "/dashboard.json"
USED_PORTS = { USED_PORTS = {
HTTP_PORT_ID: new_port_spec(HTTP_PORT_NUMBER_UINT16, HTTP_PORT_PROTOCOL) HTTP_PORT_ID: new_port_spec(HTTP_PORT_NUMBER_UINT16, HTTP_PORT_PROTOCOL)
} }
def launch_grafana(datasource_config_template, dashboard_providers_config_template, prometheus_private_url): def launch_grafana(datasource_config_template, dashboard_providers_config_template, prometheus_private_url):
grafana_config_artifacts_uuid, grafana_dashboards_uuid = get_grafana_config_dir_artifact_uuid(datasource_config_template, dashboard_providers_config_template, prometheus_private_url) grafana_config_artifacts_uuid, grafana_dashboards_artifacts_uuid = get_grafana_config_dir_artifact_uuid(datasource_config_template, dashboard_providers_config_template, prometheus_private_url)
service_config = get_service_config(grafana_config_artifacts_uuid, grafana_dashboards_artifacts_uuid) service_config = get_service_config(grafana_config_artifacts_uuid, grafana_dashboards_artifacts_uuid)
...@@ -48,15 +50,26 @@ def get_grafana_config_dir_artifact_uuid(datasource_config_template, dashboard_p ...@@ -48,15 +50,26 @@ def get_grafana_config_dir_artifact_uuid(datasource_config_template, dashboard_p
grafana_config_artifacts_uuid = render_templates(template_and_data_by_rel_dest_filepath) grafana_config_artifacts_uuid = render_templates(template_and_data_by_rel_dest_filepath)
# TODO return actual UUID after upload_files is implemented grafana_dashboards_artifacts_uuid = upload_files(GRAFANA_DASHBOARDS_CONFIG_DIRPATH)
grafana_dashboards_artifacts_uuid = ""
return grafana_config_artifacts_uuid, grafana_dashboards_artifacts_uuid return grafana_config_artifacts_uuid, grafana_dashboards_artifacts_uuid
def get_service_config(grafana_config_artifacts_uuid, grafana_dashboards_artifacts_uuid):
return struct(
container_image_name = IMAGE_NAME,
used_ports = USED_PORTS,
env_vars = {CONFIG_DIRPATH_ENV_VAR: GRAFANA_CONFIG_DIRPATH_ON_SERVICE},
files_artifact_mount_dirpaths = {
grafana_config_artifacts_uuid : GRAFANA_CONFIG_DIRPATH_ON_SERVICE,
grafana_dashboards_artifacts_uuid: GRAFANA_DASHBOARDS_DIRPATH_ON_SERVICE
}
)
def new_datasource_config_template_data(prometheus_url): def new_datasource_config_template_data(prometheus_url):
return { return {
"PromtehusURL": prometheus_url "PrometheusURL": prometheus_url
} }
......
...@@ -55,7 +55,7 @@ def parse_input(input_args): ...@@ -55,7 +55,7 @@ def parse_input(input_args):
participant_value[sub_attr] = get_value_or_name(sub_value) participant_value[sub_attr] = get_value_or_name(sub_value)
# if not we grab the value from the default value dictionary # if not we grab the value from the default value dictionary
elif sub_attr not in (DESCRIPTOR_ATTR_NAME): elif sub_attr not in (DESCRIPTOR_ATTR_NAME):
participant_value[attr] = default_input["participants"][0].get(sub_attr, "") participant_value[sub_attr] = default_input["participants"][0].get(sub_attr, None)
participants.append(participant_value) participants.append(participant_value)
result["participants"] = participants result["participants"] = participants
...@@ -105,7 +105,7 @@ def parse_input(input_args): ...@@ -105,7 +105,7 @@ def parse_input(input_args):
required_num_validtors = 2 * result["network_params"]["slots_per_epoch"] required_num_validtors = 2 * result["network_params"]["slots_per_epoch"]
actual_num_validators = len(result["participants"]) * result["network_params"]["num_validators_per_keynode"] actual_num_validators = len(result["participants"]) * result["network_params"]["num_validators_per_keynode"]
if required_num_validtors < actual_num_validators: if required_num_validtors > actual_num_validators:
fail("required_num_validtors - {0} is greater than actual_num_validators - {1}".format(required_num_validtors, actual_num_validators)) fail("required_num_validtors - {0} is greater than actual_num_validators - {1}".format(required_num_validtors, actual_num_validators))
# Remove if nethermind doesn't break as second node we already test above if its the first node # Remove if nethermind doesn't break as second node we already test above if its the first node
...@@ -115,6 +115,14 @@ def parse_input(input_args): ...@@ -115,6 +115,14 @@ def parse_input(input_args):
return result return result
def get_client_log_level_or_default(participant_log_level, global_log_level, client_log_levels):
log_level = participant_log_level
if log_level == "":
log_level = client_log_levels.get(global_log_level, "")
if log_level == "":
fail("No participant log level defined, and the client log level has no mapping for global log level '{0}'".format(global_log_level))
return log_level
def get_value_or_name(value): def get_value_or_name(value):
if type(value) == ENUM_TYPE: if type(value) == ENUM_TYPE:
...@@ -132,7 +140,7 @@ def default_module_input(): ...@@ -132,7 +140,7 @@ def default_module_input():
"wait_for_finalization": False, "wait_for_finalization": False,
"wait_for_verifications": False, "wait_for_verifications": False,
"verifications_epoch_limit": 5, "verifications_epoch_limit": 5,
"global_log_level": "info" "global_client_log_level": "info"
} }
...@@ -155,6 +163,10 @@ def default_partitcipants(): ...@@ -155,6 +163,10 @@ def default_partitcipants():
"el_client_log_level": "", "el_client_log_level": "",
"cl_client_type": "lighthouse", "cl_client_type": "lighthouse",
"cl_client_image": "", "cl_client_image": "",
"cl_client_log_level": "" "cl_client_log_level": "",
"beacon_extra_params": [],
"el_extra_params": [],
"validator_extra_params": [],
"builder_network_params": None
} }
return [participant] return [participant]
...@@ -24,9 +24,9 @@ def launch(mev_boost_launcher, service_id, network_id): ...@@ -24,9 +24,9 @@ def launch(mev_boost_launcher, service_id, network_id):
def get_service_config(mev_boost_launcher, network_id): def get_service_config(mev_boost_launcher, network_id):
command = ["/app/mev-boost"]
network_name = NETWORK_ID_TO_NAME.get(network_id, "network-{0}".format(network_id)) network_name = NETWORK_ID_TO_NAME.get(network_id, "network-{0}".format(network_id))
command = ["mev-boost"]
command.append("-{0}".format(network_name)) command.append("-{0}".format(network_name))
if mev_boost_launcher.should_check_relay: if mev_boost_launcher.should_check_relay:
......
...@@ -52,7 +52,7 @@ def generate_cl_genesis_data( ...@@ -52,7 +52,7 @@ def generate_cl_genesis_data(
genesis_generation_config_artifact_uuid = render_templates(template_and_data_by_rel_dest_filepath) genesis_generation_config_artifact_uuid = render_templates(template_and_data_by_rel_dest_filepath)
# TODO Make this the actual data generator - comment copied from the original module # TODO(old) Make this the actual data generator - comment copied from the original module
launcher_service_id = launch_prelaunch_data_generator( launcher_service_id = launch_prelaunch_data_generator(
{ {
genesis_generation_config_artifact_uuid: CONFIG_DIRPATH_ON_GENERATOR, genesis_generation_config_artifact_uuid: CONFIG_DIRPATH_ON_GENERATOR,
......
...@@ -3,11 +3,11 @@ ...@@ -3,11 +3,11 @@
def new_generate_keystores_result(prysm_password_artifact_uuid, prysm_password_relative_filepath, per_node_keystores): def new_generate_keystores_result(prysm_password_artifact_uuid, prysm_password_relative_filepath, per_node_keystores):
return struct( return struct(
#Files artifact UUID where the Prysm password is stored #Files artifact UUID where the Prysm password is stored
PrysmPasswordArtifactUUid = prysm_password_artifact_uuid, prysm_password_artifact_uuid = prysm_password_artifact_uuid,
# Relative to root of files artifact # Relative to root of files artifact
PrysmPasswordRelativeFilepath = prysm_password_relative_filepath, prysm_password_relative_filepath = prysm_password_relative_filepath,
# Contains keystores-per-client-type for each node in the network # Contains keystores-per-client-type for each node in the network
PerNodeKeystores = per_node_keystores per_node_keystores = per_node_keystores
) )
# One of these will be created per node we're trying to start # One of these will be created per node we're trying to start
def new_keystore_files(files_artifact_uuid, raw_keys_relative_dirpath, raw_secrets_relative_dirpath, nimbus_keys_relative_dirpath, prysm_relative_dirpath, teku_keys_relative_dirpath, teku_secrets_relative_dirpath): def new_keystore_files(files_artifact_uuid, raw_keys_relative_dirpath, raw_secrets_relative_dirpath, nimbus_keys_relative_dirpath, prysm_relative_dirpath, teku_keys_relative_dirpath, teku_secrets_relative_dirpath):
return struct( return struct(
FilesArtifactUUID = files_artifact_uuid, files_artifact_uuid = files_artifact_uuid,
# ------------ All directories below are relative to the root of the files artifact ---------------- # ------------ All directories below are relative to the root of the files artifact ----------------
RawKeysRelativeDirpath = raw_keys_relative_dirpath, raw_keys_relative_dirpath = raw_keys_relative_dirpath,
RawSecretsRelativeDirpath = raw_secrets_relative_dirpath, raw_secrets_relative_dirpath = raw_secrets_relative_dirpath,
NimbusKeysRelativeDirpath = nimbus_keys_relative_dirpath, nimbus_keys_relative_dirpath = nimbus_keys_relative_dirpath,
PrysmRelativeDirpath = prysm_relative_dirpath, prysm_relative_dirpath = prysm_relative_dirpath,
TekuKeysRelativeDirpath = teku_keys_relative_dirpath, teku_keys_relative_dirpath = teku_keys_relative_dirpath,
TekuSecretsRelativeDirpath = teku_secrets_relative_dirpath teku_secrets_relative_dirpath = teku_secrets_relative_dirpath
) )
...@@ -45,7 +45,7 @@ def generate_el_genesis_data( ...@@ -45,7 +45,7 @@ def generate_el_genesis_data(
genesis_generation_config_artifact_uuid = render_templates(template_and_data_by_rel_dest_filepath) genesis_generation_config_artifact_uuid = render_templates(template_and_data_by_rel_dest_filepath)
# TODO Make this the actual data generator - comment copied from the original module # TODO(old) Make this the actual data generator - comment copied from the original module
launcher_service_id = launch_prelaunch_data_generator( launcher_service_id = launch_prelaunch_data_generator(
{ {
genesis_generation_config_artifact_uuid: CONFIG_DIRPATH_ON_GENERATOR, genesis_generation_config_artifact_uuid: CONFIG_DIRPATH_ON_GENERATOR,
......
...@@ -26,7 +26,7 @@ def get_service_config( ...@@ -26,7 +26,7 @@ def get_service_config(
files_artifact_mountpoints, files_artifact_mountpoints,
): ):
return struct( return struct(
# TODO used ports is supposed to be empty # TODO remove this when used_ports is optional to pass
used_ports = {}, used_ports = {},
container_image_name = IMAGE, container_image_name = IMAGE,
entry_point_args = ENTRYPOINT_ARGS, entry_point_args = ENTRYPOINT_ARGS,
......
...@@ -2,7 +2,7 @@ load("github.com/kurtosis-tech/eth2-module/src/shared_utils/shared_utils.star", ...@@ -2,7 +2,7 @@ load("github.com/kurtosis-tech/eth2-module/src/shared_utils/shared_utils.star",
SERVICE_ID = "prometheus" SERVICE_ID = "prometheus"
# TODO I'm not sure if we should use latest version or ping an specific version instead # TODO(old) I'm not sure if we should use latest version or ping an specific version instead
IMAGE_NAME = "prom/prometheus:latest" IMAGE_NAME = "prom/prometheus:latest"
HTTP_PORT_ID = "http" HTTP_PORT_ID = "http"
...@@ -21,7 +21,7 @@ USED_PORTS = { ...@@ -21,7 +21,7 @@ USED_PORTS = {
def launch_prometheus(config_template, cl_client_contexts): def launch_prometheus(config_template, cl_client_contexts):
all_cl_nodes_metrics_info = [] all_cl_nodes_metrics_info = []
for client in cl_client_contexts: for client in cl_client_contexts:
all_cl_nodes_metrics_info.append(client.cl_nodes_metrics_info) all_cl_nodes_metrics_info.extend(client.cl_nodes_metrics_info)
template_data = new_config_template_data(all_cl_nodes_metrics_info) template_data = new_config_template_data(all_cl_nodes_metrics_info)
template_data_json = json.encode(template_data) template_data_json = json.encode(template_data)
......
TCP_PROTOCOL = "TCP"
UDP_PROTOCOL = "UDP"
def new_template_and_data(template, template_data_json): def new_template_and_data(template, template_data_json):
return {"template": template, "template_data_json": template_data_json} return {"template": template, "template_data_json": template_data_json}
...@@ -12,5 +16,13 @@ def path_base(path): ...@@ -12,5 +16,13 @@ def path_base(path):
return split_path[-1] return split_path[-1]
def path_dir(path):
split_path = path.split("/")
if len(split_path) <= 1:
return "."
split_path = split_path[:-1]
return "/".join(split_path) or "/"
def new_port_spec(number, protocol): def new_port_spec(number, protocol):
return struct(number = number, protocol = protocol) return struct(number = number, protocol = protocol)
# The path on the module container where static files are housed
STATIC_FILES_DIRPATH = "github.com/kurtosis-tech/eth2-module/static_files"
# Geth + CL genesis generation
GENESIS_GENERATION_CONFIG_DIRPATH = STATIC_FILES_DIRPATH + "/genesis-generation-config"
EL_GENESIS_GENERATION_CONFIG_DIRPATH = GENESIS_GENERATION_CONFIG_DIRPATH + "/el"
EL_GENESIS_GENERATION_CONFIG_TEMPLATE_FILEPATH = EL_GENESIS_GENERATION_CONFIG_DIRPATH + \
"/genesis-config.yaml.tmpl"
CL_GENESIS_GENERATION_CONFIG_DIRPATH = GENESIS_GENERATION_CONFIG_DIRPATH + "/cl"
CL_GENESIS_GENERATION_CONFIG_TEMPLATE_FILEPATH = CL_GENESIS_GENERATION_CONFIG_DIRPATH + \
"/config.yaml.tmpl"
CL_GENESIS_GENERATION_MNEMONICS_TEMPLATE_FILEPATH = CL_GENESIS_GENERATION_CONFIG_DIRPATH + \
"/mnemonics.yaml.tmpl"
# Prefunded keys
PREFUNDED_KEYS_DIRPATH = STATIC_FILES_DIRPATH + "/genesis-prefunded-keys"
GETH_PREFUNDED_KEYS_DIRPATH = PREFUNDED_KEYS_DIRPATH + "/geth"
# Forkmon config
FORKMON_CONFIG_TEMPLATE_FILEPATH = STATIC_FILES_DIRPATH + \
"/forkmon-config/config.toml.tmpl"
# Prometheus config
PROMETHEUS_CONFIG_TEMPLATE_FILEPATH = STATIC_FILES_DIRPATH + \
"/prometheus-config/prometheus.yml.tmpl"
# Grafana config
GRAFANA_CONFIG_DIRPATH = "/grafana-config"
GRAFANA_DATASOURCE_CONFIG_TEMPLATE_FILEPATH = STATIC_FILES_DIRPATH + \
GRAFANA_CONFIG_DIRPATH + "/templates/datasource.yml.tmpl"
GRAFANA_DASHBOARD_PROVIDERS_CONFIG_TEMPLATE_FILEPATH = STATIC_FILES_DIRPATH + \
GRAFANA_CONFIG_DIRPATH + "/templates/dashboard-providers.yml.tmpl"
GRAFANA_DASHBOARDS_CONFIG_DIRPATH = STATIC_FILES_DIRPATH + \
GRAFANA_CONFIG_DIRPATH + "/dashboards/dashboard.json"
...@@ -2,10 +2,10 @@ IMAGE_NAME = "marioevz/merge-testnet-verifier:latest" ...@@ -2,10 +2,10 @@ IMAGE_NAME = "marioevz/merge-testnet-verifier:latest"
SERVICE_ID = "testnet-verifier" SERVICE_ID = "testnet-verifier"
# We use Docker exec commands to run the commands we need, so we override the default # We use Docker exec commands to run the commands we need, so we override the default
SYNCHRONOUS_ENTRYPOINT_ARGS = { SYNCHRONOUS_ENTRYPOINT_ARGS = [
"sleep", "sleep",
"999999", "999999",
} ]
# this is broken check - https://github.com/ethereum/merge-testnet-verifier/issues/4 # this is broken check - https://github.com/ethereum/merge-testnet-verifier/issues/4
...@@ -18,7 +18,7 @@ def run_synchronous_testnet_verification(params, el_client_contexts, cl_client_c ...@@ -18,7 +18,7 @@ def run_synchronous_testnet_verification(params, el_client_contexts, cl_client_c
service_config = get_synchronous_verification_service_config() service_config = get_synchronous_verification_service_config()
add_service(SERVICE_ID, service_config) add_service(SERVICE_ID, service_config)
command = get_cmd() command = get_cmd(params, el_client_contexts, cl_client_contexts, True)
exec(SERVICE_ID, command) exec(SERVICE_ID, command)
...@@ -28,7 +28,8 @@ def get_cmd(params, el_client_contexts, cl_client_contexts, add_binary_name): ...@@ -28,7 +28,8 @@ def get_cmd(params, el_client_contexts, cl_client_contexts, add_binary_name):
if add_binary_name: if add_binary_name:
command.append("./merge_testnet_verifier") command.append("./merge_testnet_verifier")
command.append("--ttd 0") command.append("--ttd")
command.append("0")
for el_client_context in el_client_contexts: for el_client_context in el_client_contexts:
command.append("--client") command.append("--client")
...@@ -38,16 +39,18 @@ def get_cmd(params, el_client_contexts, cl_client_contexts, add_binary_name): ...@@ -38,16 +39,18 @@ def get_cmd(params, el_client_contexts, cl_client_contexts, add_binary_name):
command.append("--client") command.append("--client")
command.append("{0},http://{1}:{2}".format(cl_client_context.client_name, cl_client_context.ip_addr, cl_client_context.http_port_num)) command.append("{0},http://{1}:{2}".format(cl_client_context.client_name, cl_client_context.ip_addr, cl_client_context.http_port_num))
command.append("--ttd-epoch-limit 0") command.append("--ttd-epoch-limit")
command.append("0")
command.append("--verif-epoch-limit") command.append("--verif-epoch-limit")
# TODO make this an actual param command.append("{0}".format(params.verifications_epoch_limit))
command.append("{0}".fomrat(param.verifications_epoch_limit))
return command
def get_asynchronous_verification_service_config(params, el_client_contexts, cl_client_contexts): def get_asynchronous_verification_service_config(params, el_client_contexts, cl_client_contexts):
commands = get_cmd(params, el_client_contexts, cl_client_contexts) commands = get_cmd(params, el_client_contexts, cl_client_contexts, False)
return struct( return struct(
container_image_name = IMAGE_NAME, container_image_name = IMAGE_NAME,
cmd_args = commands, cmd_args = commands,
...@@ -59,7 +62,7 @@ def get_asynchronous_verification_service_config(params, el_client_contexts, cl_ ...@@ -59,7 +62,7 @@ def get_asynchronous_verification_service_config(params, el_client_contexts, cl_
def get_synchronous_verification_service_config(): def get_synchronous_verification_service_config():
return struct( return struct(
container_image_name = IMAGE_NAME, container_image_name = IMAGE_NAME,
entry_point_args = ENTRYPOINT_ARGS, entry_point_args = SYNCHRONOUS_ENTRYPOINT_ARGS,
# TODO remove this when used_ports is optional in add_service # TODO remove this when used_ports is optional in add_service
used_ports = {}, used_ports = {},
) )
...@@ -19,10 +19,11 @@ def get_service_config(prefunded_addresses, el_client_context): ...@@ -19,10 +19,11 @@ def get_service_config(prefunded_addresses, el_client_context):
return struct( return struct(
container_image_name = IMAGE_NAME, container_image_name = IMAGE_NAME,
cmd_args = [ cmd_args = [
"http://{0}:{1}".fomrat(el_client_context.ip_addr, el_client_context.rpc_port_num), "http://{0}:{1}".format(el_client_context.ip_addr, el_client_context.rpc_port_num),
"spam", "spam",
comma_separated_private_keys, comma_separated_private_keys,
comma_separated_addresses comma_separated_addresses
] ],
used_ports = {}
) )
...@@ -8,7 +8,7 @@ CONFIG_NAME: testnet # needs to exist because of Prysm. Otherwise it conflicts w ...@@ -8,7 +8,7 @@ CONFIG_NAME: testnet # needs to exist because of Prysm. Otherwise it conflicts w
# `2**14` (= 16,384) # `2**14` (= 16,384)
MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: {{ .NumValidatorKeysToPreregister }} MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: {{ .NumValidatorKeysToPreregister }}
MIN_GENESIS_TIME: {{ .UnixTimestamp }} MIN_GENESIS_TIME: {{ .UnixTimestamp }}
GENESIS_FORK_VERSION: 0x10000038 GENESIS_FORK_VERSION: 0x30000038
GENESIS_DELAY: 300 GENESIS_DELAY: 300
# Forking # Forking
......
apiVersion: 1 apiVersion: 1
# TODO we can improve this, adding an array that we can use to iterate # TODO(old) we can improve this, adding an array that we can use to iterate
# TODO an get different datasource configurations # TODO(old) an get different datasource configurations
datasources: datasources:
- name: Prometheus - name: Prometheus
type: prometheus type: prometheus
......
...@@ -31,10 +31,10 @@ message ModuleInput { ...@@ -31,10 +31,10 @@ message ModuleInput {
optional uint64 verifications_epoch_limit = 6; optional uint64 verifications_epoch_limit = 6;
// The log level that the started clients should log at // The log level that the started clients should log at
optional GlobalLogLevel global_log_level = 7; optional GlobalClientLogLevel global_client_log_level = 7;
} }
enum GlobalLogLevel { enum GlobalClientLogLevel {
info = 0; info = 0;
error = 1; error = 1;
warn = 2; warn = 2;
...@@ -54,7 +54,7 @@ enum CLClientType { ...@@ -54,7 +54,7 @@ enum CLClientType {
teku = 1; teku = 1;
nimbus = 2; nimbus = 2;
prysm = 3; prysm = 3;
loadstar = 4; lodestar = 4;
} }
message BuilderNetworkParams { message BuilderNetworkParams {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment