Commit 4f054d05 authored by Tobias Wohland's avatar Tobias Wohland Committed by GitHub

feat: Add static ports (#677)

## Summary

This PR adds the possibility to define static ports for most of the
components included in the ethereum-package for kurtosis.

Furthermore, some parts of the code, which are relevant for this PR,
will be refactored.

## Tasks

- [x] Add new settings to network_params
- [x] Update input_parser
- [x] Update shared_utils
- [x] Add static ports to el clients
- [x] Add static ports to cl clients
- [x] Add static ports to vc clients
- [x] Add static ports to additional services
- [x] Update documentation

---------
Co-authored-by: default avatarBarnabas Busa <busa.barnabas@gmail.com>
parent cc2949b6
......@@ -15,4 +15,15 @@ participants:
cl_type: grandine
additional_services: []
port_publisher:
public_port_start: 30000
el:
enabled: true
public_port_start: 32000
cl:
enabled: true
public_port_start: 33000
vc:
enabled: true
public_port_start: 34000
additional_services:
enabled: true
public_port_start: 35000
......@@ -41,8 +41,19 @@ jobs:
echo "Skipping ./.github/tests/mix-with-tools-mev.yaml"
fi
- name: Check if Discord Webhook is Set
id: check_discord_webhook
env:
DISCORD_WEBHOOK: ${{ secrets.DISCORD_WEBHOOK }}
run: |
if [ -z "$DISCORD_WEBHOOK" ]; then
echo "discord_webhook_set=false" >> $GITHUB_ENV
else
echo "discord_webhook_set=true" >> $GITHUB_ENV
fi
- name: Notify
if: cancelled() || failure()
if: (cancelled() || failure()) && env.discord_webhook_set == 'true'
uses: Ilshidur/action-discord@master
env:
DISCORD_WEBHOOK: ${{ secrets.DISCORD_WEBHOOK }}
......@@ -54,12 +65,11 @@ jobs:
steps:
- name: Checkout Repository
uses: actions/checkout@v4
- name: Extract branch name
shell: bash
run: echo "branch=${GITHUB_HEAD_REF:-${GITHUB_REF#refs/heads/}}" >> $GITHUB_OUTPUT
id: extract_branch
- name: Kurtosis Assertoor GitHub Action
uses: ethpandaops/kurtosis-assertoor-github-action@v1
with:
ethereum_package_branch: ${{ steps.extract_branch.outputs.branch }}
kurtosis_extra_args: "--image-download always --non-blocking-tasks --verbosity DETAILED"
ethereum_package_branch: ""
ethereum_package_args: .github/tests/mix-assert.yaml
ethereum_package_url: "$GITHUB_WORKSPACE"
persistent_logs: "true"
......@@ -4,7 +4,7 @@ on:
pull_request:
workflow_dispatch:
schedule:
- cron: '0 2 * * *'
- cron: '0 2 * * *'
concurrency:
group: "scheduled-test"
......@@ -21,15 +21,18 @@ jobs:
steps:
- name: Checkout Repository
uses: actions/checkout@v4
- name: Setup minikube
id: minikube
uses: medyagh/setup-minikube@latest
- name: Get kubeconfig
id: kubeconfig
shell: bash
run: |
cat ~/.kube/config > kubeconfig
echo "kubeconfig=$(cat kubeconfig | base64 -w 0)" >> $GITHUB_OUTPUT
# run kurtosis test and assertoor
- name: Run kurtosis testnet
id: testnet
......@@ -40,15 +43,27 @@ jobs:
kubernetes_config: "${{ steps.kubeconfig.outputs.kubeconfig }}"
kubernetes_cluster: "minikube"
kubernetes_storage_class: "standard"
ethereum_package_branch: "${{ github.head_ref || github.ref_name }}"
ethereum_package_branch: ""
ethereum_package_args: "${{ matrix.payload.file }}"
ethereum_package_url: "$GITHUB_WORKSPACE"
enclave_name: "ethereum-package-${{ matrix.payload.name }}-${{ github.run_id }}"
persistent_logs: "true"
- name: Check if Discord Webhook is Set
id: check_discord_webhook
env:
DISCORD_WEBHOOK: ${{ secrets.DISCORD_WEBHOOK }}
run: |
if [ -z "$DISCORD_WEBHOOK" ]; then
echo "discord_webhook_set=false" >> $GITHUB_ENV
else
echo "discord_webhook_set=true" >> $GITHUB_ENV
fi
- name: Notify
if: cancelled() || failure()
if: (cancelled() || failure()) && env.discord_webhook_set == 'true'
uses: Ilshidur/action-discord@master
env:
DISCORD_WEBHOOK: ${{ secrets.DISCORD_WEBHOOK }}
with:
args: "The k8s nightly/per PR test for ${{matrix.payload.name}} on ethereum-package has failed find it here ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}"
args: "The k8s nightly/per PR test for ${{matrix.payload.name}} on ethereum-package has failed. Find it here: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}"
......@@ -797,9 +797,34 @@ port_publisher:
# Defaults to constants.PRIVATE_IP_ADDRESS_PLACEHOLDER
# The default value just means its the IP address of the container in which the service is running
nat_exit_ip: KURTOSIS_IP_ADDR_PLACEHOLDER
# The start value gets used as a seed for TCP and UDP discovery ports for el/cl client
# Defaults to None - no public ports
public_port_start: null
# Execution Layer public port exposed to your local machine
# Disabled by default
# Public port start defaults to 32000
# You can't run multiple enclaves on the same port settings
el:
enabled: false
public_port_start: 32000
# Consensus Layer public port exposed to your local machine
# Disabled by default
# Public port start defaults to 33000
# You can't run multiple enclaves on the same port settings
cl:
enabled: false
public_port_start: 33000
# Validator client public port exposed to your local machine
# Disabled by default
# Public port start defaults to 34000
# You can't run multiple enclaves on the same port settings
vc:
enabled: false
public_port_start: 34000
# Additional services public port exposed to your local machine
# Disabled by default
# Public port start defaults to 35000
# You can't run multiple enclaves on the same port settings
additional_services:
enabled: false
public_port_start: 35000
```
#### Example configurations
......
......@@ -370,7 +370,9 @@ def run(plan, args={}):
return output
launch_prometheus_grafana = False
for additional_service in args_with_right_defaults.additional_services:
for index, additional_service in enumerate(
args_with_right_defaults.additional_services
):
if additional_service == "tx_spammer":
plan.print("Launching transaction spammer")
tx_spammer_params = args_with_right_defaults.tx_spammer_params
......@@ -421,6 +423,8 @@ def run(plan, args={}):
el_forkmon_config_template,
all_el_contexts,
global_node_selectors,
args_with_right_defaults.port_publisher,
index,
)
plan.print("Successfully launched execution layer forkmon")
elif additional_service == "beacon_metrics_gazer":
......@@ -431,6 +435,8 @@ def run(plan, args={}):
all_cl_contexts,
network_params,
global_node_selectors,
args_with_right_defaults.port_publisher,
index,
)
)
launch_prometheus_grafana = True
......@@ -445,6 +451,8 @@ def run(plan, args={}):
all_el_contexts,
persistent,
global_node_selectors,
args_with_right_defaults.port_publisher,
index,
)
plan.print("Successfully launched blockscout")
elif additional_service == "dora":
......@@ -462,6 +470,8 @@ def run(plan, args={}):
global_node_selectors,
mev_endpoints,
mev_endpoint_names,
args_with_right_defaults.port_publisher,
index,
)
plan.print("Successfully launched dora")
elif additional_service == "dugtrio":
......@@ -476,6 +486,8 @@ def run(plan, args={}):
args_with_right_defaults.participants,
network_params,
global_node_selectors,
args_with_right_defaults.port_publisher,
index,
)
plan.print("Successfully launched dugtrio")
elif additional_service == "blutgang":
......@@ -490,6 +502,8 @@ def run(plan, args={}):
args_with_right_defaults.participants,
network_params,
global_node_selectors,
args_with_right_defaults.port_publisher,
index,
)
plan.print("Successfully launched blutgang")
elif additional_service == "blobscan":
......@@ -501,6 +515,8 @@ def run(plan, args={}):
network_params.network_id,
persistent,
global_node_selectors,
args_with_right_defaults.port_publisher,
index,
)
plan.print("Successfully launched blobscan")
elif additional_service == "forky":
......@@ -517,6 +533,8 @@ def run(plan, args={}):
network_params,
global_node_selectors,
final_genesis_timestamp,
args_with_right_defaults.port_publisher,
index,
)
plan.print("Successfully launched forky")
elif additional_service == "tracoor":
......@@ -533,6 +551,8 @@ def run(plan, args={}):
network_params,
global_node_selectors,
final_genesis_timestamp,
args_with_right_defaults.port_publisher,
index,
)
plan.print("Successfully launched tracoor")
elif additional_service == "apache":
......@@ -559,6 +579,8 @@ def run(plan, args={}):
all_el_contexts,
persistent,
global_node_selectors,
args_with_right_defaults.port_publisher,
index,
)
plan.print("Successfully launched full-beaconchain-explorer")
elif additional_service == "prometheus_grafana":
......
......@@ -153,5 +153,16 @@ global_tolerations: []
global_node_selectors: {}
keymanager_enabled: false
port_publisher:
public_port_start: null
nat_exit_ip: KURTOSIS_IP_ADDR_PLACEHOLDER
el:
enabled: false
public_port_start: 32000
cl:
enabled: false
public_port_start: 33000
vc:
enabled: false
public_port_start: 34000
additional_services:
enabled: false
public_port_start: 35000
shared_utils = import_module("../shared_utils/shared_utils.star")
prometheus = import_module("../prometheus/prometheus_launcher.star")
constants = import_module("../package_io/constants.star")
SERVICE_NAME = "beacon-metrics-gazer"
IMAGE_NAME = "ethpandaops/beacon-metrics-gazer:master"
HTTP_PORT_ID = "http"
HTTP_PORT_NUMBER = 8080
METRICS_PATH = "/metrics"
......@@ -17,7 +16,7 @@ BEACON_METRICS_GAZER_CONFIG_MOUNT_DIRPATH_ON_SERVICE = "/config"
VALIDATOR_RANGES_ARTIFACT_NAME = "validator-ranges"
USED_PORTS = {
HTTP_PORT_ID: shared_utils.new_port_spec(
constants.HTTP_PORT_ID: shared_utils.new_port_spec(
HTTP_PORT_NUMBER,
shared_utils.TCP_PROTOCOL,
shared_utils.HTTP_APPLICATION_PROTOCOL,
......@@ -36,10 +35,14 @@ def launch_beacon_metrics_gazer(
cl_contexts,
network_params,
global_node_selectors,
port_publisher,
additional_service_index,
):
config = get_config(
cl_contexts[0].beacon_http_url,
global_node_selectors,
port_publisher,
additional_service_index,
)
beacon_metrics_gazer_service = plan.add_service(SERVICE_NAME, config)
......@@ -56,14 +59,28 @@ def launch_beacon_metrics_gazer(
)
def get_config(beacon_http_url, node_selectors):
def get_config(
beacon_http_url,
node_selectors,
port_publisher,
additional_service_index,
):
config_file_path = shared_utils.path_join(
BEACON_METRICS_GAZER_CONFIG_MOUNT_DIRPATH_ON_SERVICE,
BEACON_METRICS_GAZER_CONFIG_FILENAME,
)
public_ports = shared_utils.get_additional_service_standard_public_port(
port_publisher,
constants.HTTP_PORT_ID,
additional_service_index,
0,
)
return ServiceConfig(
image=IMAGE_NAME,
ports=USED_PORTS,
public_ports=public_ports,
files={
BEACON_METRICS_GAZER_CONFIG_MOUNT_DIRPATH_ON_SERVICE: VALIDATOR_RANGES_ARTIFACT_NAME,
},
......
shared_utils = import_module("../shared_utils/shared_utils.star")
postgres = import_module("github.com/kurtosis-tech/postgres-package/main.star")
constants = import_module("../package_io/constants.star")
WEB_SERVICE_NAME = "blobscan-web"
API_SERVICE_NAME = "blobscan-api"
INDEXER_SERVICE_NAME = "blobscan-indexer"
HTTP_PORT_ID = "http"
WEB_HTTP_PORT_NUMBER = 3000
API_HTTP_PORT_NUMBER = 3001
WEB_PORTS = {
HTTP_PORT_ID: shared_utils.new_port_spec(
constants.HTTP_PORT_ID: shared_utils.new_port_spec(
WEB_HTTP_PORT_NUMBER,
shared_utils.TCP_PROTOCOL,
shared_utils.HTTP_APPLICATION_PROTOCOL,
)
}
API_PORTS = {
HTTP_PORT_ID: shared_utils.new_port_spec(
constants.HTTP_PORT_ID: shared_utils.new_port_spec(
API_HTTP_PORT_NUMBER,
shared_utils.TCP_PROTOCOL,
shared_utils.HTTP_APPLICATION_PROTOCOL,
......@@ -60,6 +59,8 @@ def launch_blobscan(
chain_id,
persistent,
global_node_selectors,
port_publisher,
additional_service_index,
):
node_selectors = global_node_selectors
beacon_node_rpc_uri = "{0}".format(cl_contexts[0].beacon_http_url)
......@@ -82,11 +83,13 @@ def launch_blobscan(
beacon_node_rpc_uri,
chain_id,
node_selectors,
port_publisher,
additional_service_index,
)
blobscan_config = plan.add_service(API_SERVICE_NAME, api_config)
blobscan_api_url = "http://{0}:{1}".format(
blobscan_config.ip_address, blobscan_config.ports[HTTP_PORT_ID].number
blobscan_config.ip_address, blobscan_config.ports[constants.HTTP_PORT_ID].number
)
web_config = get_web_config(
......@@ -94,6 +97,8 @@ def launch_blobscan(
beacon_node_rpc_uri,
chain_id,
node_selectors,
port_publisher,
additional_service_index,
)
plan.add_service(WEB_SERVICE_NAME, web_config)
......@@ -111,12 +116,22 @@ def get_api_config(
beacon_node_rpc,
chain_id,
node_selectors,
port_publisher,
additional_service_index,
):
IMAGE_NAME = "blossomlabs/blobscan:stable"
public_ports = shared_utils.get_additional_service_standard_public_port(
port_publisher,
constants.HTTP_PORT_ID,
additional_service_index,
0,
)
return ServiceConfig(
image=IMAGE_NAME,
ports=API_PORTS,
public_ports=public_ports,
env_vars={
"BEACON_NODE_ENDPOINT": beacon_node_rpc,
"CHAIN_ID": chain_id,
......@@ -144,15 +159,30 @@ def get_api_config(
)
def get_web_config(database_url, beacon_node_rpc, chain_id, node_selectors):
def get_web_config(
database_url,
beacon_node_rpc,
chain_id,
node_selectors,
port_publisher,
additional_service_index,
):
# TODO: https://github.com/kurtosis-tech/kurtosis/issues/1861
# Configure NEXT_PUBLIC_BEACON_BASE_URL and NEXT_PUBLIC_EXPLORER_BASE env vars
# once retrieving external URLs from services are supported in Kurtosis.
IMAGE_NAME = "blossomlabs/blobscan:stable"
public_ports = shared_utils.get_additional_service_standard_public_port(
port_publisher,
constants.HTTP_PORT_ID,
additional_service_index,
1,
)
return ServiceConfig(
image=IMAGE_NAME,
ports=WEB_PORTS,
public_ports=public_ports,
env_vars={
"DATABASE_URL": database_url,
"SECRET_KEY": "supersecret",
......
......@@ -7,7 +7,6 @@ IMAGE_NAME_BLOCKSCOUT_VERIF = "ghcr.io/blockscout/smart-contract-verifier:v1.6.0
SERVICE_NAME_BLOCKSCOUT = "blockscout"
HTTP_PORT_ID = "http"
HTTP_PORT_NUMBER = 4000
HTTP_PORT_NUMBER_VERIF = 8050
......@@ -22,7 +21,7 @@ BLOCKSCOUT_VERIF_MIN_MEMORY = 10
BLOCKSCOUT_VERIF_MAX_MEMORY = 1024
USED_PORTS = {
HTTP_PORT_ID: shared_utils.new_port_spec(
constants.HTTP_PORT_ID: shared_utils.new_port_spec(
HTTP_PORT_NUMBER,
shared_utils.TCP_PROTOCOL,
shared_utils.HTTP_APPLICATION_PROTOCOL,
......@@ -30,7 +29,7 @@ USED_PORTS = {
}
VERIF_USED_PORTS = {
HTTP_PORT_ID: shared_utils.new_port_spec(
constants.HTTP_PORT_ID: shared_utils.new_port_spec(
HTTP_PORT_NUMBER_VERIF,
shared_utils.TCP_PROTOCOL,
shared_utils.HTTP_APPLICATION_PROTOCOL,
......@@ -43,6 +42,8 @@ def launch_blockscout(
el_contexts,
persistent,
global_node_selectors,
port_publisher,
additional_service_index,
):
postgres_output = postgres.run(
plan,
......@@ -59,7 +60,11 @@ def launch_blockscout(
)
el_client_name = el_context.client_name
config_verif = get_config_verif(global_node_selectors)
config_verif = get_config_verif(
global_node_selectors,
port_publisher,
additional_service_index,
)
verif_service_name = "{}-verif".format(SERVICE_NAME_BLOCKSCOUT)
verif_service = plan.add_service(verif_service_name, config_verif)
verif_url = "http://{}:{}/api".format(
......@@ -72,6 +77,8 @@ def launch_blockscout(
verif_url,
el_client_name,
global_node_selectors,
port_publisher,
additional_service_index,
)
blockscout_service = plan.add_service(SERVICE_NAME_BLOCKSCOUT, config_backend)
plan.print(blockscout_service)
......@@ -83,10 +90,18 @@ def launch_blockscout(
return blockscout_url
def get_config_verif(node_selectors):
def get_config_verif(node_selectors, port_publisher, additional_service_index):
public_ports = shared_utils.get_additional_service_standard_public_port(
port_publisher,
constants.HTTP_PORT_ID,
additional_service_index,
0,
)
return ServiceConfig(
image=IMAGE_NAME_BLOCKSCOUT_VERIF,
ports=VERIF_USED_PORTS,
public_ports=public_ports,
env_vars={
"SMART_CONTRACT_VERIFIER__SERVER__HTTP__ADDR": "0.0.0.0:{}".format(
HTTP_PORT_NUMBER_VERIF
......@@ -101,7 +116,13 @@ def get_config_verif(node_selectors):
def get_config_backend(
postgres_output, el_client_rpc_url, verif_url, el_client_name, node_selectors
postgres_output,
el_client_rpc_url,
verif_url,
el_client_name,
node_selectors,
port_publisher,
additional_service_index,
):
database_url = "{protocol}://{user}:{password}@{hostname}:{port}/{database}".format(
protocol="postgresql",
......@@ -112,9 +133,17 @@ def get_config_backend(
database=postgres_output.database,
)
public_ports = shared_utils.get_additional_service_standard_public_port(
port_publisher,
constants.HTTP_PORT_ID,
additional_service_index,
1,
)
return ServiceConfig(
image=IMAGE_NAME_BLOCKSCOUT,
ports=USED_PORTS,
public_ports=public_ports,
cmd=[
"/bin/sh",
"-c",
......
......@@ -2,10 +2,7 @@ shared_utils = import_module("../shared_utils/shared_utils.star")
constants = import_module("../package_io/constants.star")
SERVICE_NAME = "blutgang"
HTTP_PORT_ID = "http"
HTTP_PORT_NUMBER = 3000
ADMIN_PORT_ID = "admin"
ADMIN_PORT_NUMBER = 5715
BLUTGANG_CONFIG_FILENAME = "config.toml"
......@@ -22,12 +19,12 @@ MIN_MEMORY = 128
MAX_MEMORY = 2048
USED_PORTS = {
HTTP_PORT_ID: shared_utils.new_port_spec(
constants.HTTP_PORT_ID: shared_utils.new_port_spec(
HTTP_PORT_NUMBER,
shared_utils.TCP_PROTOCOL,
shared_utils.HTTP_APPLICATION_PROTOCOL,
),
ADMIN_PORT_ID: shared_utils.new_port_spec(
constants.ADMIN_PORT_ID: shared_utils.new_port_spec(
ADMIN_PORT_NUMBER,
shared_utils.TCP_PROTOCOL,
shared_utils.HTTP_APPLICATION_PROTOCOL,
......@@ -42,6 +39,8 @@ def launch_blutgang(
participant_configs,
network_params,
global_node_selectors,
port_publisher,
additional_service_index,
):
all_el_client_info = []
for index, participant in enumerate(participant_contexts):
......@@ -75,6 +74,8 @@ def launch_blutgang(
config_files_artifact_name,
network_params,
global_node_selectors,
port_publisher,
additional_service_index,
)
plan.add_service(SERVICE_NAME, config)
......@@ -84,15 +85,29 @@ def get_config(
config_files_artifact_name,
network_params,
node_selectors,
port_publisher,
additional_service_index,
):
config_file_path = shared_utils.path_join(
BLUTGANG_CONFIG_MOUNT_DIRPATH_ON_SERVICE,
BLUTGANG_CONFIG_FILENAME,
)
public_ports = {}
if port_publisher.additional_services_enabled:
public_ports_for_component = shared_utils.get_public_ports_for_component(
"additional_services", port_publisher, additional_service_index
)
public_port_assignments = {
constants.HTTP_PORT_ID: public_ports_for_component[0],
constants.ADMIN_PORT_ID: public_ports_for_component[1],
}
public_ports = shared_utils.get_port_specs(public_port_assignments)
return ServiceConfig(
image=IMAGE_NAME,
ports=USED_PORTS,
public_ports=public_ports,
files={
BLUTGANG_CONFIG_MOUNT_DIRPATH_ON_SERVICE: config_files_artifact_name,
},
......
......@@ -181,6 +181,7 @@ def launch(
checkpoint_sync_enabled,
checkpoint_sync_url,
port_publisher,
index,
)
else:
boot_cl_client_ctx = all_cl_contexts
......@@ -217,6 +218,7 @@ def launch(
checkpoint_sync_enabled,
checkpoint_sync_url,
port_publisher,
index,
)
# Add participant cl additional prometheus labels
......
shared_utils = import_module("../shared_utils/shared_utils.star")
constants = import_module("../package_io/constants.star")
def get_general_cl_public_port_specs(public_ports_for_component):
discovery_port = public_ports_for_component[0]
public_port_assignments = {
constants.TCP_DISCOVERY_PORT_ID: discovery_port,
constants.UDP_DISCOVERY_PORT_ID: discovery_port,
constants.HTTP_PORT_ID: public_ports_for_component[1],
constants.METRICS_PORT_ID: public_ports_for_component[2],
}
public_ports = shared_utils.get_port_specs(public_port_assignments)
return public_ports, discovery_port
shared_utils = import_module("../../shared_utils/shared_utils.star")
input_parser = import_module("../../package_io/input_parser.star")
cl_context = import_module("../../cl/cl_context.star")
node_metrics = import_module("../../node_metrics_info.star")
cl_node_ready_conditions = import_module("../../cl/cl_node_ready_conditions.star")
cl_shared = import_module("../cl_shared.star")
node_metrics = import_module("../../node_metrics_info.star")
constants = import_module("../../package_io/constants.star")
vc_shared = import_module("../../vc/shared.star")
# ---------------------------------- Beacon client -------------------------------------
# The Docker container runs as the "grandine" user so we can't write to root
BEACON_DATA_DIRPATH_ON_SERVICE_CONTAINER = "/data/grandine/grandine-beacon-data"
# Port IDs
BEACON_TCP_DISCOVERY_PORT_ID = "tcp-discovery"
BEACON_UDP_DISCOVERY_PORT_ID = "udp-discovery"
BEACON_HTTP_PORT_ID = "http"
BEACON_METRICS_PORT_ID = "metrics"
VALIDATOR_HTTP_PORT_ID = "http-validator"
# Port nums
BEACON_DISCOVERY_PORT_NUM = 9000
BEACON_HTTP_PORT_NUM = 4000
......@@ -29,25 +23,6 @@ BEACON_METRICS_PATH = "/metrics"
MIN_PEERS = 1
def get_used_ports(discovery_port):
beacon_used_ports = {
BEACON_TCP_DISCOVERY_PORT_ID: shared_utils.new_port_spec(
discovery_port, shared_utils.TCP_PROTOCOL
),
BEACON_UDP_DISCOVERY_PORT_ID: shared_utils.new_port_spec(
discovery_port, shared_utils.UDP_PROTOCOL
),
BEACON_HTTP_PORT_ID: shared_utils.new_port_spec(
BEACON_HTTP_PORT_NUM, shared_utils.TCP_PROTOCOL
),
BEACON_METRICS_PORT_ID: shared_utils.new_port_spec(
BEACON_METRICS_PORT_NUM, shared_utils.TCP_PROTOCOL
),
}
return beacon_used_ports
ENTRYPOINT_ARGS = ["sh", "-c"]
VERBOSITY_LEVELS = {
......@@ -92,6 +67,7 @@ def launch(
checkpoint_sync_enabled,
checkpoint_sync_url,
port_publisher,
participant_index,
):
beacon_service_name = "{0}".format(service_name)
log_level = input_parser.get_client_log_level_or_default(
......@@ -155,23 +131,24 @@ def launch(
checkpoint_sync_enabled,
checkpoint_sync_url,
port_publisher,
participant_index,
)
beacon_service = plan.add_service(service_name, config)
beacon_http_port = beacon_service.ports[BEACON_HTTP_PORT_ID]
beacon_http_port = beacon_service.ports[constants.HTTP_PORT_ID]
beacon_http_url = "http://{0}:{1}".format(
beacon_service.ip_address, beacon_http_port.number
)
beacon_metrics_port = beacon_service.ports[BEACON_METRICS_PORT_ID]
beacon_metrics_port = beacon_service.ports[constants.METRICS_PORT_ID]
beacon_metrics_url = "{0}:{1}".format(
beacon_service.ip_address, beacon_metrics_port.number
)
beacon_node_identity_recipe = GetHttpRequestRecipe(
endpoint="/eth/v1/node/identity",
port_id=BEACON_HTTP_PORT_ID,
port_id=constants.HTTP_PORT_ID,
extract={
"enr": ".data.enr",
"multiaddr": ".data.p2p_addresses[0]",
......@@ -237,6 +214,7 @@ def get_beacon_config(
checkpoint_sync_enabled,
checkpoint_sync_url,
port_publisher,
participant_index,
):
validator_keys_dirpath = ""
validator_secrets_dirpath = ""
......@@ -262,20 +240,26 @@ def get_beacon_config(
)
public_ports = {}
validator_public_port_assignment = {}
discovery_port = BEACON_DISCOVERY_PORT_NUM
if port_publisher.public_port_start:
discovery_port = port_publisher.cl_start
if bootnode_contexts and len(bootnode_contexts) > 0:
discovery_port = discovery_port + len(bootnode_contexts)
public_ports = {
BEACON_TCP_DISCOVERY_PORT_ID: shared_utils.new_port_spec(
discovery_port, shared_utils.TCP_PROTOCOL
),
BEACON_UDP_DISCOVERY_PORT_ID: shared_utils.new_port_spec(
discovery_port, shared_utils.UDP_PROTOCOL
),
if port_publisher.cl_enabled:
public_ports_for_component = shared_utils.get_public_ports_for_component(
"cl", port_publisher, participant_index
)
validator_public_port_assignment = {
constants.VALIDATOR_HTTP_PORT_ID: public_ports_for_component[3]
}
used_ports = get_used_ports(discovery_port)
public_ports, discovery_port = cl_shared.get_general_cl_public_port_specs(
public_ports_for_component
)
used_port_assignments = {
constants.TCP_DISCOVERY_PORT_ID: discovery_port,
constants.UDP_DISCOVERY_PORT_ID: discovery_port,
constants.HTTP_PORT_ID: BEACON_HTTP_PORT_NUM,
constants.METRICS_PORT_ID: BEACON_METRICS_PORT_NUM,
}
used_ports = shared_utils.get_port_specs(used_port_assignments)
cmd = [
"--network={0}".format(
......@@ -383,8 +367,6 @@ def get_beacon_config(
constants.JWT_MOUNTPOINT_ON_CLIENTS: jwt_file,
}
ports = {}
ports.update(used_ports)
if node_keystore_files != None and not use_separate_vc:
cmd.extend(validator_default_cmd)
files[
......@@ -393,7 +375,10 @@ def get_beacon_config(
if keymanager_enabled:
cmd.extend(keymanager_api_cmd)
ports.update(vc_shared.VALIDATOR_KEYMANAGER_USED_PORTS)
used_ports.update(vc_shared.VALIDATOR_KEYMANAGER_USED_PORTS)
public_ports.update(
shared_utils.get_port_specs(validator_public_port_assignment)
)
if persistent:
files[BEACON_DATA_DIRPATH_ON_SERVICE_CONTAINER] = Directory(
......@@ -403,14 +388,14 @@ def get_beacon_config(
return ServiceConfig(
image=image,
ports=ports,
ports=used_ports,
public_ports=public_ports,
cmd=cmd,
env_vars=extra_env_vars,
files=files,
private_ip_address_placeholder=constants.PRIVATE_IP_ADDRESS_PLACEHOLDER,
ready_conditions=cl_node_ready_conditions.get_ready_conditions(
BEACON_HTTP_PORT_ID
constants.HTTP_PORT_ID
),
min_cpu=cl_min_cpu,
max_cpu=cl_max_cpu,
......
shared_utils = import_module("../../shared_utils/shared_utils.star")
input_parser = import_module("../../package_io/input_parser.star")
cl_context = import_module("../../cl/cl_context.star")
node_metrics = import_module("../../node_metrics_info.star")
cl_node_ready_conditions = import_module("../../cl/cl_node_ready_conditions.star")
cl_shared = import_module("../cl_shared.star")
node_metrics = import_module("../../node_metrics_info.star")
constants = import_module("../../package_io/constants.star")
blobber_launcher = import_module("../../blobber/blobber_launcher.star")
......@@ -15,12 +16,6 @@ RUST_FULL_BACKTRACE_KEYWORD = "full"
# ---------------------------------- Beacon client -------------------------------------
BEACON_DATA_DIRPATH_ON_BEACON_SERVICE_CONTAINER = "/data/lighthouse/beacon-data"
# Port IDs
BEACON_TCP_DISCOVERY_PORT_ID = "tcp-discovery"
BEACON_UDP_DISCOVERY_PORT_ID = "udp-discovery"
BEACON_HTTP_PORT_ID = "http"
BEACON_METRICS_PORT_ID = "metrics"
# Port nums
BEACON_DISCOVERY_PORT_NUM = 9000
BEACON_HTTP_PORT_NUM = 4000
......@@ -32,29 +27,6 @@ BEACON_MIN_MEMORY = 256
METRICS_PATH = "/metrics"
def get_used_ports(discovery_port):
beacon_used_ports = {
BEACON_TCP_DISCOVERY_PORT_ID: shared_utils.new_port_spec(
discovery_port, shared_utils.TCP_PROTOCOL
),
BEACON_UDP_DISCOVERY_PORT_ID: shared_utils.new_port_spec(
discovery_port, shared_utils.UDP_PROTOCOL
),
BEACON_HTTP_PORT_ID: shared_utils.new_port_spec(
BEACON_HTTP_PORT_NUM,
shared_utils.TCP_PROTOCOL,
shared_utils.HTTP_APPLICATION_PROTOCOL,
),
BEACON_METRICS_PORT_ID: shared_utils.new_port_spec(
BEACON_METRICS_PORT_NUM,
shared_utils.TCP_PROTOCOL,
shared_utils.HTTP_APPLICATION_PROTOCOL,
),
}
return beacon_used_ports
VERBOSITY_LEVELS = {
constants.GLOBAL_LOG_LEVEL.error: "error",
constants.GLOBAL_LOG_LEVEL.warn: "warn",
......@@ -97,6 +69,7 @@ def launch(
checkpoint_sync_enabled,
checkpoint_sync_url,
port_publisher,
participant_index,
):
beacon_service_name = "{0}".format(service_name)
......@@ -156,10 +129,11 @@ def launch(
checkpoint_sync_enabled,
checkpoint_sync_url,
port_publisher,
participant_index,
)
beacon_service = plan.add_service(beacon_service_name, beacon_config)
beacon_http_port = beacon_service.ports[BEACON_HTTP_PORT_ID]
beacon_http_port = beacon_service.ports[constants.HTTP_PORT_ID]
beacon_http_url = "http://{0}:{1}".format(
beacon_service.ip_address, beacon_http_port.number
)
......@@ -187,7 +161,7 @@ def launch(
# TODO(old) add validator availability using the validator API: https://ethereum.github.io/beacon-APIs/?urls.primaryName=v1#/ValidatorRequiredApi | from eth2-merge-kurtosis-module
beacon_node_identity_recipe = GetHttpRequestRecipe(
endpoint="/eth/v1/node/identity",
port_id=BEACON_HTTP_PORT_ID,
port_id=constants.HTTP_PORT_ID,
extract={
"enr": ".data.enr",
"multiaddr": ".data.p2p_addresses[0]",
......@@ -201,7 +175,7 @@ def launch(
beacon_multiaddr = response["extract.multiaddr"]
beacon_peer_id = response["extract.peer_id"]
beacon_metrics_port = beacon_service.ports[BEACON_METRICS_PORT_ID]
beacon_metrics_port = beacon_service.ports[constants.METRICS_PORT_ID]
beacon_metrics_url = "{0}:{1}".format(
beacon_service.ip_address, beacon_metrics_port.number
)
......@@ -253,6 +227,7 @@ def get_beacon_config(
checkpoint_sync_enabled,
checkpoint_sync_url,
port_publisher,
participant_index,
):
# If snooper is enabled use the snooper engine context, otherwise use the execution client context
if snooper_enabled:
......@@ -268,19 +243,21 @@ def get_beacon_config(
public_ports = {}
discovery_port = BEACON_DISCOVERY_PORT_NUM
if port_publisher.public_port_start:
discovery_port = port_publisher.cl_start
if boot_cl_client_ctxs and len(boot_cl_client_ctxs) > 0:
discovery_port = discovery_port + len(boot_cl_client_ctxs)
public_ports = {
BEACON_TCP_DISCOVERY_PORT_ID: shared_utils.new_port_spec(
discovery_port, shared_utils.TCP_PROTOCOL
),
BEACON_UDP_DISCOVERY_PORT_ID: shared_utils.new_port_spec(
discovery_port, shared_utils.UDP_PROTOCOL
),
}
used_ports = get_used_ports(discovery_port)
if port_publisher.cl_enabled:
public_ports_for_component = shared_utils.get_public_ports_for_component(
"cl", port_publisher, participant_index
)
public_ports, discovery_port = cl_shared.get_general_cl_public_port_specs(
public_ports_for_component
)
used_port_assignments = {
constants.TCP_DISCOVERY_PORT_ID: discovery_port,
constants.UDP_DISCOVERY_PORT_ID: discovery_port,
constants.HTTP_PORT_ID: BEACON_HTTP_PORT_NUM,
constants.METRICS_PORT_ID: BEACON_METRICS_PORT_NUM,
}
used_ports = shared_utils.get_port_specs(used_port_assignments)
# NOTE: If connecting to the merge devnet remotely we DON'T want the following flags; when they're not set, the node's external IP address is auto-detected
# from the peers it communicates with but when they're set they basically say "override the autodetection and
......@@ -384,7 +361,7 @@ def get_beacon_config(
cmd.extend([param for param in extra_params])
recipe = GetHttpRequestRecipe(
endpoint="/eth/v1/node/identity", port_id=BEACON_HTTP_PORT_ID
endpoint="/eth/v1/node/identity", port_id=constants.HTTP_PORT_ID
)
files = {
constants.GENESIS_DATA_MOUNTPOINT_ON_CLIENTS: el_cl_genesis_data.files_artifact_uuid,
......@@ -407,7 +384,7 @@ def get_beacon_config(
env_vars=env,
private_ip_address_placeholder=constants.PRIVATE_IP_ADDRESS_PLACEHOLDER,
ready_conditions=cl_node_ready_conditions.get_ready_conditions(
BEACON_HTTP_PORT_ID
constants.HTTP_PORT_ID
),
min_cpu=cl_min_cpu,
max_cpu=cl_max_cpu,
......
shared_utils = import_module("../../shared_utils/shared_utils.star")
input_parser = import_module("../../package_io/input_parser.star")
cl_context = import_module("../../cl/cl_context.star")
node_metrics = import_module("../../node_metrics_info.star")
cl_node_ready_conditions = import_module("../../cl/cl_node_ready_conditions.star")
cl_shared = import_module("../cl_shared.star")
node_metrics = import_module("../../node_metrics_info.star")
blobber_launcher = import_module("../../blobber/blobber_launcher.star")
constants = import_module("../../package_io/constants.star")
# ---------------------------------- Beacon client -------------------------------------
BEACON_DATA_DIRPATH_ON_SERVICE_CONTAINER = "/data/lodestar/beacon-data"
# Port IDs
TCP_DISCOVERY_PORT_ID = "tcp-discovery"
UDP_DISCOVERY_PORT_ID = "udp-discovery"
BEACON_HTTP_PORT_ID = "http"
METRICS_PORT_ID = "metrics"
# Port nums
DISCOVERY_PORT_NUM = 9000
HTTP_PORT_NUM = 4000
METRICS_PORT_NUM = 8008
BEACON_DISCOVERY_PORT_NUM = 9000
BEACON_HTTP_PORT_NUM = 4000
BEACON_METRICS_PORT_NUM = 8008
# The min/max CPU/memory that the beacon node can use
BEACON_MIN_CPU = 50
......@@ -25,25 +21,6 @@ BEACON_MIN_MEMORY = 256
METRICS_PATH = "/metrics"
def get_used_ports(discovery_port):
beacon_used_ports = {
TCP_DISCOVERY_PORT_ID: shared_utils.new_port_spec(
discovery_port, shared_utils.TCP_PROTOCOL
),
UDP_DISCOVERY_PORT_ID: shared_utils.new_port_spec(
discovery_port, shared_utils.UDP_PROTOCOL
),
BEACON_HTTP_PORT_ID: shared_utils.new_port_spec(
HTTP_PORT_NUM, shared_utils.TCP_PROTOCOL
),
METRICS_PORT_ID: shared_utils.new_port_spec(
METRICS_PORT_NUM, shared_utils.TCP_PROTOCOL
),
}
return beacon_used_ports
VERBOSITY_LEVELS = {
constants.GLOBAL_LOG_LEVEL.error: "error",
constants.GLOBAL_LOG_LEVEL.warn: "warn",
......@@ -86,6 +63,7 @@ def launch(
checkpoint_sync_enabled,
checkpoint_sync_url,
port_publisher,
participant_index,
):
beacon_service_name = "{0}".format(service_name)
log_level = input_parser.get_client_log_level_or_default(
......@@ -145,11 +123,12 @@ def launch(
checkpoint_sync_url,
port_publisher,
launcher.preset,
participant_index,
)
beacon_service = plan.add_service(beacon_service_name, beacon_config)
beacon_http_port = beacon_service.ports[BEACON_HTTP_PORT_ID]
beacon_http_port = beacon_service.ports[constants.HTTP_PORT_ID]
beacon_http_url = "http://{0}:{1}".format(
beacon_service.ip_address, beacon_http_port.number
......@@ -178,7 +157,7 @@ def launch(
beacon_node_identity_recipe = GetHttpRequestRecipe(
endpoint="/eth/v1/node/identity",
port_id=BEACON_HTTP_PORT_ID,
port_id=constants.HTTP_PORT_ID,
extract={
"enr": ".data.enr",
"multiaddr": ".data.p2p_addresses[-1]",
......@@ -192,7 +171,7 @@ def launch(
beacon_multiaddr = response["extract.multiaddr"]
beacon_peer_id = response["extract.peer_id"]
beacon_metrics_port = beacon_service.ports[METRICS_PORT_ID]
beacon_metrics_port = beacon_service.ports[constants.METRICS_PORT_ID]
beacon_metrics_url = "{0}:{1}".format(
beacon_service.ip_address, beacon_metrics_port.number
)
......@@ -247,6 +226,7 @@ def get_beacon_config(
checkpoint_sync_url,
port_publisher,
preset,
participant_index,
):
el_client_rpc_url_str = "http://{0}:{1}".format(
el_context.ip_addr,
......@@ -266,20 +246,22 @@ def get_beacon_config(
)
public_ports = {}
discovery_port = DISCOVERY_PORT_NUM
if port_publisher.public_port_start:
discovery_port = port_publisher.cl_start
if bootnode_contexts and len(bootnode_contexts) > 0:
discovery_port = discovery_port + len(bootnode_contexts)
public_ports = {
TCP_DISCOVERY_PORT_ID: shared_utils.new_port_spec(
discovery_port, shared_utils.TCP_PROTOCOL
),
UDP_DISCOVERY_PORT_ID: shared_utils.new_port_spec(
discovery_port, shared_utils.UDP_PROTOCOL
),
}
used_ports = get_used_ports(discovery_port)
discovery_port = BEACON_DISCOVERY_PORT_NUM
if port_publisher.cl_enabled:
public_ports_for_component = shared_utils.get_public_ports_for_component(
"cl", port_publisher, participant_index
)
public_ports, discovery_port = cl_shared.get_general_cl_public_port_specs(
public_ports_for_component
)
used_port_assignments = {
constants.TCP_DISCOVERY_PORT_ID: discovery_port,
constants.UDP_DISCOVERY_PORT_ID: discovery_port,
constants.HTTP_PORT_ID: BEACON_HTTP_PORT_NUM,
constants.METRICS_PORT_ID: BEACON_METRICS_PORT_NUM,
}
used_ports = shared_utils.get_port_specs(used_port_assignments)
cmd = [
"beacon",
......@@ -296,7 +278,7 @@ def get_beacon_config(
"--rest=true",
"--rest.address=0.0.0.0",
"--rest.namespace=*",
"--rest.port={0}".format(HTTP_PORT_NUM),
"--rest.port={0}".format(BEACON_HTTP_PORT_NUM),
"--nat=true",
"--enr.ip=" + port_publisher.nat_exit_ip,
"--enr.tcp={0}".format(discovery_port),
......@@ -307,7 +289,7 @@ def get_beacon_config(
# vvvvvvvvvvvvvvvvvvv METRICS CONFIG vvvvvvvvvvvvvvvvvvvvv
"--metrics",
"--metrics.address=0.0.0.0",
"--metrics.port={0}".format(METRICS_PORT_NUM),
"--metrics.port={0}".format(BEACON_METRICS_PORT_NUM),
# ^^^^^^^^^^^^^^^^^^^ METRICS CONFIG ^^^^^^^^^^^^^^^^^^^^^
]
......@@ -395,7 +377,7 @@ def get_beacon_config(
files=files,
private_ip_address_placeholder=constants.PRIVATE_IP_ADDRESS_PLACEHOLDER,
ready_conditions=cl_node_ready_conditions.get_ready_conditions(
BEACON_HTTP_PORT_ID
constants.HTTP_PORT_ID
),
min_cpu=cl_min_cpu,
max_cpu=cl_max_cpu,
......
......@@ -3,6 +3,7 @@ shared_utils = import_module("../../shared_utils/shared_utils.star")
input_parser = import_module("../../package_io/input_parser.star")
cl_context = import_module("../../cl/cl_context.star")
cl_node_ready_conditions = import_module("../../cl/cl_node_ready_conditions.star")
cl_shared = import_module("../cl_shared.star")
node_metrics = import_module("../../node_metrics_info.star")
constants = import_module("../../package_io/constants.star")
vc_shared = import_module("../../vc/shared.star")
......@@ -10,12 +11,6 @@ vc_shared = import_module("../../vc/shared.star")
# Nimbus requires that its data directory already exists (because it expects you to bind-mount it), so we
# have to to create it
BEACON_DATA_DIRPATH_ON_SERVICE_CONTAINER = "/data/nimbus/beacon-data"
# Port IDs
BEACON_TCP_DISCOVERY_PORT_ID = "tcp-discovery"
BEACON_UDP_DISCOVERY_PORT_ID = "udp-discovery"
BEACON_HTTP_PORT_ID = "http"
BEACON_METRICS_PORT_ID = "metrics"
VALIDATOR_HTTP_PORT_ID = "http-validator"
# Port nums
BEACON_DISCOVERY_PORT_NUM = 9000
......@@ -40,30 +35,6 @@ VALIDATOR_KEYS_MOUNTPOINT_ON_CLIENTS = "/data/nimbus/validator-keys"
# ---------------------------------- Metrics ----------------------------------
# ---------------------------------- Used Ports ----------------------------------
def get_used_ports(discovery_port):
used_ports = {
BEACON_TCP_DISCOVERY_PORT_ID: shared_utils.new_port_spec(
discovery_port, shared_utils.TCP_PROTOCOL
),
BEACON_UDP_DISCOVERY_PORT_ID: shared_utils.new_port_spec(
discovery_port, shared_utils.UDP_PROTOCOL
),
BEACON_HTTP_PORT_ID: shared_utils.new_port_spec(
BEACON_HTTP_PORT_NUM,
shared_utils.TCP_PROTOCOL,
shared_utils.HTTP_APPLICATION_PROTOCOL,
),
BEACON_METRICS_PORT_ID: shared_utils.new_port_spec(
BEACON_METRICS_PORT_NUM,
shared_utils.TCP_PROTOCOL,
shared_utils.HTTP_APPLICATION_PROTOCOL,
),
}
return used_ports
VERBOSITY_LEVELS = {
constants.GLOBAL_LOG_LEVEL.error: "ERROR",
constants.GLOBAL_LOG_LEVEL.warn: "WARN",
......@@ -108,6 +79,7 @@ def launch(
checkpoint_sync_enabled,
checkpoint_sync_url,
port_publisher,
participant_index,
):
beacon_service_name = "{0}".format(service_name)
......@@ -171,11 +143,12 @@ def launch(
checkpoint_sync_enabled,
checkpoint_sync_url,
port_publisher,
participant_index,
)
beacon_service = plan.add_service(beacon_service_name, beacon_config)
beacon_http_port = beacon_service.ports[BEACON_HTTP_PORT_ID]
beacon_metrics_port = beacon_service.ports[BEACON_METRICS_PORT_ID]
beacon_http_port = beacon_service.ports[constants.HTTP_PORT_ID]
beacon_metrics_port = beacon_service.ports[constants.METRICS_PORT_ID]
beacon_http_url = "http://{0}:{1}".format(
beacon_service.ip_address, beacon_http_port.number
)
......@@ -185,7 +158,7 @@ def launch(
beacon_node_identity_recipe = GetHttpRequestRecipe(
endpoint="/eth/v1/node/identity",
port_id=BEACON_HTTP_PORT_ID,
port_id=constants.HTTP_PORT_ID,
extract={
"enr": ".data.enr",
"multiaddr": ".data.p2p_addresses[0]",
......@@ -253,6 +226,7 @@ def get_beacon_config(
checkpoint_sync_enabled,
checkpoint_sync_url,
port_publisher,
participant_index,
):
validator_keys_dirpath = ""
validator_secrets_dirpath = ""
......@@ -279,19 +253,25 @@ def get_beacon_config(
public_ports = {}
discovery_port = BEACON_DISCOVERY_PORT_NUM
if port_publisher.public_port_start:
discovery_port = port_publisher.cl_start
if bootnode_contexts and len(bootnode_contexts) > 0:
discovery_port = discovery_port + len(bootnode_contexts)
public_ports = {
BEACON_TCP_DISCOVERY_PORT_ID: shared_utils.new_port_spec(
discovery_port, shared_utils.TCP_PROTOCOL
),
BEACON_UDP_DISCOVERY_PORT_ID: shared_utils.new_port_spec(
discovery_port, shared_utils.UDP_PROTOCOL
),
validator_public_port_assignment = {}
if port_publisher.cl_enabled:
public_ports_for_component = shared_utils.get_public_ports_for_component(
"cl", port_publisher, participant_index
)
validator_public_port_assignment = {
constants.VALIDATOR_HTTP_PORT_ID: public_ports_for_component[3]
}
used_ports = get_used_ports(discovery_port)
public_ports, discovery_port = cl_shared.get_general_cl_public_port_specs(
public_ports_for_component
)
used_port_assignments = {
constants.TCP_DISCOVERY_PORT_ID: discovery_port,
constants.UDP_DISCOVERY_PORT_ID: discovery_port,
constants.HTTP_PORT_ID: BEACON_HTTP_PORT_NUM,
constants.METRICS_PORT_ID: BEACON_METRICS_PORT_NUM,
}
used_ports = shared_utils.get_port_specs(used_port_assignments)
cmd = [
"--non-interactive=true",
......@@ -366,8 +346,7 @@ def get_beacon_config(
constants.GENESIS_DATA_MOUNTPOINT_ON_CLIENTS: el_cl_genesis_data.files_artifact_uuid,
constants.JWT_MOUNTPOINT_ON_CLIENTS: jwt_file,
}
ports = {}
ports.update(used_ports)
if node_keystore_files != None and not use_separate_vc:
cmd.extend(validator_default_cmd)
files[
......@@ -377,7 +356,10 @@ def get_beacon_config(
if keymanager_enabled:
cmd.extend(keymanager_api_cmd)
ports.update(vc_shared.VALIDATOR_KEYMANAGER_USED_PORTS)
used_ports.update(vc_shared.VALIDATOR_KEYMANAGER_USED_PORTS)
public_ports.update(
shared_utils.get_port_specs(validator_public_port_assignment)
)
if persistent:
files[BEACON_DATA_DIRPATH_ON_SERVICE_CONTAINER] = Directory(
......@@ -394,7 +376,7 @@ def get_beacon_config(
files=files,
private_ip_address_placeholder=constants.PRIVATE_IP_ADDRESS_PLACEHOLDER,
ready_conditions=cl_node_ready_conditions.get_ready_conditions(
BEACON_HTTP_PORT_ID
constants.HTTP_PORT_ID
),
min_cpu=cl_min_cpu,
max_cpu=cl_max_cpu,
......
shared_utils = import_module("../../shared_utils/shared_utils.star")
input_parser = import_module("../../package_io/input_parser.star")
cl_context = import_module("../../cl/cl_context.star")
node_metrics = import_module("../../node_metrics_info.star")
cl_node_ready_conditions = import_module("../../cl/cl_node_ready_conditions.star")
cl_shared = import_module("../cl_shared.star")
node_metrics = import_module("../../node_metrics_info.star")
constants = import_module("../../package_io/constants.star")
# ---------------------------------- Beacon client -------------------------------------
BEACON_DATA_DIRPATH_ON_SERVICE_CONTAINER = "/data/prysm/beacon-data/"
# Port IDs
TCP_DISCOVERY_PORT_ID = "tcp-discovery"
UDP_DISCOVERY_PORT_ID = "udp-discovery"
RPC_PORT_ID = "rpc"
BEACON_HTTP_PORT_ID = "http"
BEACON_MONITORING_PORT_ID = "monitoring"
# Port nums
DISCOVERY_TCP_PORT_NUM = 13000
DISCOVERY_UDP_PORT_NUM = 12000
......@@ -28,31 +22,8 @@ BEACON_MIN_MEMORY = 256
METRICS_PATH = "/metrics"
MIN_PEERS = 1
def get_used_ports(discovery_port):
used_ports = {
TCP_DISCOVERY_PORT_ID: shared_utils.new_port_spec(
discovery_port, shared_utils.TCP_PROTOCOL
),
UDP_DISCOVERY_PORT_ID: shared_utils.new_port_spec(
discovery_port, shared_utils.UDP_PROTOCOL
),
RPC_PORT_ID: shared_utils.new_port_spec(
RPC_PORT_NUM, shared_utils.TCP_PROTOCOL
),
BEACON_HTTP_PORT_ID: shared_utils.new_port_spec(
HTTP_PORT_NUM, shared_utils.TCP_PROTOCOL
),
BEACON_MONITORING_PORT_ID: shared_utils.new_port_spec(
BEACON_MONITORING_PORT_NUM, shared_utils.TCP_PROTOCOL
),
}
return used_ports
VERBOSITY_LEVELS = {
constants.GLOBAL_LOG_LEVEL.error: "error",
constants.GLOBAL_LOG_LEVEL.warn: "warn",
......@@ -95,6 +66,7 @@ def launch(
checkpoint_sync_enabled,
checkpoint_sync_url,
port_publisher,
participant_index,
):
beacon_service_name = "{0}".format(service_name)
log_level = input_parser.get_client_log_level_or_default(
......@@ -153,11 +125,12 @@ def launch(
checkpoint_sync_url,
port_publisher,
launcher.preset,
participant_index,
)
beacon_service = plan.add_service(beacon_service_name, beacon_config)
beacon_http_port = beacon_service.ports[BEACON_HTTP_PORT_ID]
beacon_http_port = beacon_service.ports[constants.HTTP_PORT_ID]
beacon_http_url = "http://{0}:{1}".format(beacon_service.ip_address, HTTP_PORT_NUM)
beacon_grpc_url = "{0}:{1}".format(beacon_service.ip_address, RPC_PORT_NUM)
......@@ -165,7 +138,7 @@ def launch(
# TODO(old) add validator availability using the validator API: https://ethereum.github.io/beacon-APIs/?urls.primaryName=v1#/ValidatorRequiredApi | from eth2-merge-kurtosis-module
beacon_node_identity_recipe = GetHttpRequestRecipe(
endpoint="/eth/v1/node/identity",
port_id=BEACON_HTTP_PORT_ID,
port_id=constants.HTTP_PORT_ID,
extract={
"enr": ".data.enr",
"multiaddr": ".data.p2p_addresses[0]",
......@@ -179,7 +152,7 @@ def launch(
beacon_multiaddr = response["extract.multiaddr"]
beacon_peer_id = response["extract.peer_id"]
beacon_metrics_port = beacon_service.ports[BEACON_MONITORING_PORT_ID]
beacon_metrics_port = beacon_service.ports[constants.METRICS_PORT_ID]
beacon_metrics_url = "{0}:{1}".format(
beacon_service.ip_address, beacon_metrics_port.number
)
......@@ -234,6 +207,7 @@ def get_beacon_config(
checkpoint_sync_url,
port_publisher,
preset,
participant_index,
):
# If snooper is enabled use the snooper engine context, otherwise use the execution client context
if snooper_enabled:
......@@ -249,19 +223,27 @@ def get_beacon_config(
public_ports = {}
discovery_port = DISCOVERY_TCP_PORT_NUM
if port_publisher.public_port_start:
discovery_port = port_publisher.cl_start
if bootnode_contexts and len(bootnode_contexts) > 0:
discovery_port = discovery_port + len(bootnode_contexts)
public_ports = {
TCP_DISCOVERY_PORT_ID: shared_utils.new_port_spec(
discovery_port, shared_utils.TCP_PROTOCOL
),
UDP_DISCOVERY_PORT_ID: shared_utils.new_port_spec(
discovery_port, shared_utils.UDP_PROTOCOL
),
}
used_ports = get_used_ports(discovery_port)
if port_publisher.cl_enabled:
public_ports_for_component = shared_utils.get_public_ports_for_component(
"cl", port_publisher, participant_index
)
public_ports, discovery_port = cl_shared.get_general_cl_public_port_specs(
public_ports_for_component
)
public_ports.update(
shared_utils.get_port_specs(
{constants.RPC_PORT_ID: public_ports_for_component[3]}
)
)
used_port_assignments = {
constants.TCP_DISCOVERY_PORT_ID: discovery_port,
constants.UDP_DISCOVERY_PORT_ID: discovery_port,
constants.HTTP_PORT_ID: HTTP_PORT_NUM,
constants.METRICS_PORT_ID: BEACON_MONITORING_PORT_NUM,
constants.RPC_PORT_ID: RPC_PORT_NUM,
}
used_ports = shared_utils.get_port_specs(used_port_assignments)
cmd = [
"--accept-terms-of-use=true", # it's mandatory in order to run the node
......@@ -379,7 +361,7 @@ def get_beacon_config(
files=files,
private_ip_address_placeholder=constants.PRIVATE_IP_ADDRESS_PLACEHOLDER,
ready_conditions=cl_node_ready_conditions.get_ready_conditions(
BEACON_HTTP_PORT_ID
constants.HTTP_PORT_ID
),
min_cpu=cl_min_cpu,
max_cpu=cl_max_cpu,
......
shared_utils = import_module("../../shared_utils/shared_utils.star")
input_parser = import_module("../../package_io/input_parser.star")
cl_context = import_module("../../cl/cl_context.star")
node_metrics = import_module("../../node_metrics_info.star")
cl_node_ready_conditions = import_module("../../cl/cl_node_ready_conditions.star")
cl_shared = import_module("../cl_shared.star")
node_metrics = import_module("../../node_metrics_info.star")
constants = import_module("../../package_io/constants.star")
vc_shared = import_module("../../vc/shared.star")
# ---------------------------------- Beacon client -------------------------------------
TEKU_BINARY_FILEPATH_IN_IMAGE = "/opt/teku/bin/teku"
# The Docker container runs as the "teku" user so we can't write to root
BEACON_DATA_DIRPATH_ON_SERVICE_CONTAINER = "/data/teku/teku-beacon-data"
# Port IDs
BEACON_TCP_DISCOVERY_PORT_ID = "tcp-discovery"
BEACON_UDP_DISCOVERY_PORT_ID = "udp-discovery"
BEACON_HTTP_PORT_ID = "http"
BEACON_METRICS_PORT_ID = "metrics"
VALIDATOR_HTTP_PORT_ID = "http-validator"
# Port nums
BEACON_DISCOVERY_PORT_NUM = 9000
BEACON_HTTP_PORT_NUM = 4000
......@@ -31,27 +26,6 @@ BEACON_METRICS_PATH = "/metrics"
MIN_PEERS = 1
def get_used_ports(discovery_port):
used_ports = {
BEACON_TCP_DISCOVERY_PORT_ID: shared_utils.new_port_spec(
discovery_port, shared_utils.TCP_PROTOCOL
),
BEACON_UDP_DISCOVERY_PORT_ID: shared_utils.new_port_spec(
discovery_port, shared_utils.UDP_PROTOCOL
),
BEACON_HTTP_PORT_ID: shared_utils.new_port_spec(
BEACON_HTTP_PORT_NUM,
shared_utils.TCP_PROTOCOL,
shared_utils.HTTP_APPLICATION_PROTOCOL,
),
BEACON_METRICS_PORT_ID: shared_utils.new_port_spec(
BEACON_METRICS_PORT_NUM, shared_utils.TCP_PROTOCOL
),
}
return used_ports
ENTRYPOINT_ARGS = ["sh", "-c"]
VERBOSITY_LEVELS = {
......@@ -96,6 +70,7 @@ def launch(
checkpoint_sync_enabled,
checkpoint_sync_url,
port_publisher,
participant_index,
):
beacon_service_name = "{0}".format(service_name)
log_level = input_parser.get_client_log_level_or_default(
......@@ -160,23 +135,24 @@ def launch(
checkpoint_sync_enabled,
checkpoint_sync_url,
port_publisher,
participant_index,
)
beacon_service = plan.add_service(service_name, config)
beacon_http_port = beacon_service.ports[BEACON_HTTP_PORT_ID]
beacon_http_port = beacon_service.ports[constants.HTTP_PORT_ID]
beacon_http_url = "http://{0}:{1}".format(
beacon_service.ip_address, beacon_http_port.number
)
beacon_metrics_port = beacon_service.ports[BEACON_METRICS_PORT_ID]
beacon_metrics_port = beacon_service.ports[constants.METRICS_PORT_ID]
beacon_metrics_url = "{0}:{1}".format(
beacon_service.ip_address, beacon_metrics_port.number
)
beacon_node_identity_recipe = GetHttpRequestRecipe(
endpoint="/eth/v1/node/identity",
port_id=BEACON_HTTP_PORT_ID,
port_id=constants.HTTP_PORT_ID,
extract={
"enr": ".data.enr",
"multiaddr": ".data.p2p_addresses[0]",
......@@ -244,6 +220,7 @@ def get_beacon_config(
checkpoint_sync_enabled,
checkpoint_sync_url,
port_publisher,
participant_index,
):
validator_keys_dirpath = ""
validator_secrets_dirpath = ""
......@@ -270,19 +247,25 @@ def get_beacon_config(
public_ports = {}
discovery_port = BEACON_DISCOVERY_PORT_NUM
if port_publisher.public_port_start:
discovery_port = port_publisher.cl_start
if bootnode_contexts and len(bootnode_contexts) > 0:
discovery_port = discovery_port + len(bootnode_contexts)
public_ports = {
BEACON_TCP_DISCOVERY_PORT_ID: shared_utils.new_port_spec(
discovery_port, shared_utils.TCP_PROTOCOL
),
BEACON_UDP_DISCOVERY_PORT_ID: shared_utils.new_port_spec(
discovery_port, shared_utils.UDP_PROTOCOL
),
validator_public_port_assignment = {}
if port_publisher.cl_enabled:
public_ports_for_component = shared_utils.get_public_ports_for_component(
"cl", port_publisher, participant_index
)
validator_public_port_assignment = {
constants.VALIDATOR_HTTP_PORT_ID: public_ports_for_component[3]
}
used_ports = get_used_ports(discovery_port)
public_ports, discovery_port = cl_shared.get_general_cl_public_port_specs(
public_ports_for_component
)
used_port_assignments = {
constants.TCP_DISCOVERY_PORT_ID: discovery_port,
constants.UDP_DISCOVERY_PORT_ID: discovery_port,
constants.HTTP_PORT_ID: BEACON_HTTP_PORT_NUM,
constants.METRICS_PORT_ID: BEACON_METRICS_PORT_NUM,
}
used_ports = shared_utils.get_port_specs(used_port_assignments)
cmd = [
"--logging=" + log_level,
......@@ -407,8 +390,7 @@ def get_beacon_config(
constants.GENESIS_DATA_MOUNTPOINT_ON_CLIENTS: el_cl_genesis_data.files_artifact_uuid,
constants.JWT_MOUNTPOINT_ON_CLIENTS: jwt_file,
}
ports = {}
ports.update(used_ports)
if node_keystore_files != None and not use_separate_vc:
cmd.extend(validator_default_cmd)
files[
......@@ -418,7 +400,10 @@ def get_beacon_config(
if keymanager_enabled:
files[constants.KEYMANAGER_MOUNT_PATH_ON_CLIENTS] = keymanager_file
cmd.extend(keymanager_api_cmd)
ports.update(vc_shared.VALIDATOR_KEYMANAGER_USED_PORTS)
used_ports.update(vc_shared.VALIDATOR_KEYMANAGER_USED_PORTS)
public_ports.update(
shared_utils.get_port_specs(validator_public_port_assignment)
)
if persistent:
files[BEACON_DATA_DIRPATH_ON_SERVICE_CONTAINER] = Directory(
......@@ -428,14 +413,14 @@ def get_beacon_config(
return ServiceConfig(
image=image,
ports=ports,
ports=used_ports,
public_ports=public_ports,
cmd=cmd,
env_vars=extra_env_vars,
files=files,
private_ip_address_placeholder=constants.PRIVATE_IP_ADDRESS_PLACEHOLDER,
ready_conditions=cl_node_ready_conditions.get_ready_conditions(
BEACON_HTTP_PORT_ID
constants.HTTP_PORT_ID
),
min_cpu=cl_min_cpu,
max_cpu=cl_max_cpu,
......
......@@ -2,7 +2,6 @@ shared_utils = import_module("../shared_utils/shared_utils.star")
constants = import_module("../package_io/constants.star")
SERVICE_NAME = "dora"
HTTP_PORT_ID = "http"
HTTP_PORT_NUMBER = 8080
DORA_CONFIG_FILENAME = "dora-config.yaml"
......@@ -19,7 +18,7 @@ MIN_MEMORY = 128
MAX_MEMORY = 2048
USED_PORTS = {
HTTP_PORT_ID: shared_utils.new_port_spec(
constants.HTTP_PORT_ID: shared_utils.new_port_spec(
HTTP_PORT_NUMBER,
shared_utils.TCP_PROTOCOL,
shared_utils.HTTP_APPLICATION_PROTOCOL,
......@@ -38,6 +37,8 @@ def launch_dora(
global_node_selectors,
mev_endpoints,
mev_endpoint_names,
port_publisher,
additional_service_index,
):
all_cl_client_info = []
all_el_client_info = []
......@@ -95,6 +96,8 @@ def launch_dora(
network_params,
dora_params,
global_node_selectors,
port_publisher,
additional_service_index,
)
plan.add_service(SERVICE_NAME, config)
......@@ -106,12 +109,21 @@ def get_config(
network_params,
dora_params,
node_selectors,
port_publisher,
additional_service_index,
):
config_file_path = shared_utils.path_join(
DORA_CONFIG_MOUNT_DIRPATH_ON_SERVICE,
DORA_CONFIG_FILENAME,
)
public_ports = shared_utils.get_additional_service_standard_public_port(
port_publisher,
constants.HTTP_PORT_ID,
additional_service_index,
0,
)
if dora_params.image != "":
IMAGE_NAME = dora_params.image
elif network_params.eip7594_fork_epoch < 100000000:
......@@ -124,6 +136,7 @@ def get_config(
return ServiceConfig(
image=IMAGE_NAME,
ports=USED_PORTS,
public_ports=public_ports,
files={
DORA_CONFIG_MOUNT_DIRPATH_ON_SERVICE: config_files_artifact_name,
VALIDATOR_RANGES_MOUNT_DIRPATH_ON_SERVICE: VALIDATOR_RANGES_ARTIFACT_NAME,
......
......@@ -2,7 +2,6 @@ shared_utils = import_module("../shared_utils/shared_utils.star")
constants = import_module("../package_io/constants.star")
SERVICE_NAME = "dugtrio"
HTTP_PORT_ID = "http"
HTTP_PORT_NUMBER = 8080
DUGTRIO_CONFIG_FILENAME = "dugtrio-config.yaml"
......@@ -18,7 +17,7 @@ MIN_MEMORY = 128
MAX_MEMORY = 2048
USED_PORTS = {
HTTP_PORT_ID: shared_utils.new_port_spec(
constants.HTTP_PORT_ID: shared_utils.new_port_spec(
HTTP_PORT_NUMBER,
shared_utils.TCP_PROTOCOL,
shared_utils.HTTP_APPLICATION_PROTOCOL,
......@@ -33,6 +32,8 @@ def launch_dugtrio(
participant_configs,
network_params,
global_node_selectors,
port_publisher,
additional_service_index,
):
all_cl_client_info = []
for index, participant in enumerate(participant_contexts):
......@@ -63,6 +64,8 @@ def launch_dugtrio(
config_files_artifact_name,
network_params,
global_node_selectors,
port_publisher,
additional_service_index,
)
plan.add_service(SERVICE_NAME, config)
......@@ -72,15 +75,25 @@ def get_config(
config_files_artifact_name,
network_params,
node_selectors,
port_publisher,
additional_service_index,
):
config_file_path = shared_utils.path_join(
DUGTRIO_CONFIG_MOUNT_DIRPATH_ON_SERVICE,
DUGTRIO_CONFIG_FILENAME,
)
public_ports = shared_utils.get_additional_service_standard_public_port(
port_publisher,
constants.HTTP_PORT_ID,
additional_service_index,
0,
)
return ServiceConfig(
image=IMAGE_NAME,
ports=USED_PORTS,
public_ports=public_ports,
files={
DUGTRIO_CONFIG_MOUNT_DIRPATH_ON_SERVICE: config_files_artifact_name,
},
......
......@@ -2,8 +2,10 @@ shared_utils = import_module("../../shared_utils/shared_utils.star")
input_parser = import_module("../../package_io/input_parser.star")
el_context = import_module("../../el/el_context.star")
el_admin_node_info = import_module("../../el/el_admin_node_info.star")
el_shared = import_module("../el_shared.star")
node_metrics = import_module("../../node_metrics_info.star")
constants = import_module("../../package_io/constants.star")
# The dirpath of the execution data directory on the client container
EXECUTION_DATA_DIRPATH_ON_CLIENT_CONTAINER = "/data/besu/execution-data"
......@@ -21,40 +23,8 @@ EXECUTION_MAX_CPU = 1000
EXECUTION_MIN_MEMORY = 512
EXECUTION_MAX_MEMORY = 2048
# Port IDs
RPC_PORT_ID = "rpc"
WS_PORT_ID = "ws"
TCP_DISCOVERY_PORT_ID = "tcp-discovery"
UDP_DISCOVERY_PORT_ID = "udp-discovery"
ENGINE_HTTP_RPC_PORT_ID = "engine-rpc"
METRICS_PORT_ID = "metrics"
JAVA_OPTS = {"JAVA_OPTS": "-agentlib:jdwp=transport=dt_socket,server=y,suspend=n"}
def get_used_ports(discovery_port=DISCOVERY_PORT_NUM):
used_ports = {
RPC_PORT_ID: shared_utils.new_port_spec(
RPC_PORT_NUM,
shared_utils.TCP_PROTOCOL,
shared_utils.HTTP_APPLICATION_PROTOCOL,
),
WS_PORT_ID: shared_utils.new_port_spec(WS_PORT_NUM, shared_utils.TCP_PROTOCOL),
TCP_DISCOVERY_PORT_ID: shared_utils.new_port_spec(
discovery_port, shared_utils.TCP_PROTOCOL
),
UDP_DISCOVERY_PORT_ID: shared_utils.new_port_spec(
discovery_port, shared_utils.UDP_PROTOCOL
),
ENGINE_HTTP_RPC_PORT_ID: shared_utils.new_port_spec(
ENGINE_HTTP_RPC_PORT_NUM, shared_utils.TCP_PROTOCOL
),
METRICS_PORT_ID: shared_utils.new_port_spec(
METRICS_PORT_NUM, shared_utils.TCP_PROTOCOL
),
}
return used_ports
ENTRYPOINT_ARGS = ["sh", "-c"]
VERBOSITY_LEVELS = {
......@@ -86,6 +56,7 @@ def launch(
tolerations,
node_selectors,
port_publisher,
participant_index,
):
log_level = input_parser.get_client_log_level_or_default(
participant_log_level, global_log_level, VERBOSITY_LEVELS
......@@ -136,11 +107,14 @@ def launch(
tolerations,
node_selectors,
port_publisher,
participant_index,
)
service = plan.add_service(service_name, config)
enode = el_admin_node_info.get_enode_for_node(plan, service_name, RPC_PORT_ID)
enode = el_admin_node_info.get_enode_for_node(
plan, service_name, constants.RPC_PORT_ID
)
metrics_url = "{0}:{1}".format(service.ip_address, METRICS_PORT_NUM)
besu_metrics_info = node_metrics.new_node_metrics_info(
......@@ -186,20 +160,35 @@ def get_config(
tolerations,
node_selectors,
port_publisher,
participant_index,
):
public_ports = {}
discovery_port = DISCOVERY_PORT_NUM
if port_publisher.public_port_start:
discovery_port = port_publisher.el_start + len(existing_el_clients)
public_ports = {
TCP_DISCOVERY_PORT_ID: shared_utils.new_port_spec(
discovery_port, shared_utils.TCP_PROTOCOL
),
UDP_DISCOVERY_PORT_ID: shared_utils.new_port_spec(
discovery_port, shared_utils.UDP_PROTOCOL
),
if port_publisher.el_enabled:
public_ports_for_component = shared_utils.get_public_ports_for_component(
"el", port_publisher, participant_index
)
public_ports, discovery_port = el_shared.get_general_el_public_port_specs(
public_ports_for_component
)
additional_public_port_assignments = {
constants.RPC_PORT_ID: public_ports_for_component[2],
constants.WS_PORT_ID: public_ports_for_component[3],
constants.METRICS_PORT_ID: public_ports_for_component[4],
}
used_ports = get_used_ports(discovery_port)
public_ports.update(
shared_utils.get_port_specs(additional_public_port_assignments)
)
used_port_assignments = {
constants.TCP_DISCOVERY_PORT_ID: discovery_port,
constants.UDP_DISCOVERY_PORT_ID: discovery_port,
constants.ENGINE_RPC_PORT_ID: ENGINE_HTTP_RPC_PORT_NUM,
constants.RPC_PORT_ID: RPC_PORT_NUM,
constants.WS_PORT_ID: WS_PORT_NUM,
constants.METRICS_PORT_ID: METRICS_PORT_NUM,
}
used_ports = shared_utils.get_port_specs(used_port_assignments)
cmd = [
"besu",
......
......@@ -156,6 +156,7 @@ def launch(
tolerations,
node_selectors,
port_publisher,
index,
)
# Add participant el additional prometheus metrics
for metrics_info in el_context.el_metrics_info:
......
shared_utils = import_module("../shared_utils/shared_utils.star")
constants = import_module("../package_io/constants.star")
def get_general_el_public_port_specs(public_ports_for_component):
discovery_port = public_ports_for_component[0]
public_port_assignments = {
constants.TCP_DISCOVERY_PORT_ID: discovery_port,
constants.UDP_DISCOVERY_PORT_ID: discovery_port,
constants.ENGINE_RPC_PORT_ID: public_ports_for_component[1],
}
public_ports = shared_utils.get_port_specs(public_port_assignments)
return public_ports, discovery_port
......@@ -2,7 +2,7 @@ shared_utils = import_module("../../shared_utils/shared_utils.star")
input_parser = import_module("../../package_io/input_parser.star")
el_admin_node_info = import_module("../../el/el_admin_node_info.star")
el_context = import_module("../../el/el_context.star")
el_shared = import_module("../el_shared.star")
node_metrics = import_module("../../node_metrics_info.star")
constants = import_module("../../package_io/constants.star")
......@@ -20,37 +20,6 @@ METRICS_PORT_NUM = 9001
EXECUTION_MIN_CPU = 100
EXECUTION_MIN_MEMORY = 512
# Port IDs
WS_RPC_PORT_ID = "ws-rpc"
TCP_DISCOVERY_PORT_ID = "tcp-discovery"
UDP_DISCOVERY_PORT_ID = "udp-discovery"
ENGINE_RPC_PORT_ID = "engine-rpc"
METRICS_PORT_ID = "metrics"
def get_used_ports(discovery_port=DISCOVERY_PORT_NUM):
used_ports = {
WS_RPC_PORT_ID: shared_utils.new_port_spec(
WS_RPC_PORT_NUM,
shared_utils.TCP_PROTOCOL,
shared_utils.HTTP_APPLICATION_PROTOCOL,
),
TCP_DISCOVERY_PORT_ID: shared_utils.new_port_spec(
discovery_port, shared_utils.TCP_PROTOCOL
),
UDP_DISCOVERY_PORT_ID: shared_utils.new_port_spec(
discovery_port, shared_utils.UDP_PROTOCOL
),
ENGINE_RPC_PORT_ID: shared_utils.new_port_spec(
ENGINE_RPC_PORT_NUM, shared_utils.TCP_PROTOCOL
),
METRICS_PORT_ID: shared_utils.new_port_spec(
METRICS_PORT_NUM, shared_utils.TCP_PROTOCOL
),
}
return used_ports
ENTRYPOINT_ARGS = ["sh", "-c"]
VERBOSITY_LEVELS = {
......@@ -82,6 +51,7 @@ def launch(
tolerations,
node_selectors,
port_publisher,
participant_index,
):
log_level = input_parser.get_client_log_level_or_default(
participant_log_level, global_log_level, VERBOSITY_LEVELS
......@@ -134,12 +104,13 @@ def launch(
tolerations,
node_selectors,
port_publisher,
participant_index,
)
service = plan.add_service(service_name, config)
enode, enr = el_admin_node_info.get_enode_enr_for_node(
plan, service_name, WS_RPC_PORT_ID
plan, service_name, constants.WS_RPC_PORT_ID
)
metrics_url = "{0}:{1}".format(service.ip_address, METRICS_PORT_NUM)
......@@ -189,6 +160,7 @@ def get_config(
tolerations,
node_selectors,
port_publisher,
participant_index,
):
init_datadir_cmd_str = "erigon init --datadir={0} {1}".format(
EXECUTION_DATA_DIRPATH_ON_CLIENT_CONTAINER,
......@@ -197,17 +169,29 @@ def get_config(
public_ports = {}
discovery_port = DISCOVERY_PORT_NUM
if port_publisher.public_port_start:
discovery_port = port_publisher.el_start + len(existing_el_clients)
public_ports = {
TCP_DISCOVERY_PORT_ID: shared_utils.new_port_spec(
discovery_port, shared_utils.TCP_PROTOCOL
),
UDP_DISCOVERY_PORT_ID: shared_utils.new_port_spec(
discovery_port, shared_utils.UDP_PROTOCOL
),
if port_publisher.el_enabled:
public_ports_for_component = shared_utils.get_public_ports_for_component(
"el", port_publisher, participant_index
)
public_ports, discovery_port = el_shared.get_general_el_public_port_specs(
public_ports_for_component
)
additional_public_port_assignments = {
constants.WS_RPC_PORT_ID: public_ports_for_component[2],
constants.METRICS_PORT_ID: public_ports_for_component[3],
}
used_ports = get_used_ports(discovery_port)
public_ports.update(
shared_utils.get_port_specs(additional_public_port_assignments)
)
used_port_assignments = {
constants.TCP_DISCOVERY_PORT_ID: discovery_port,
constants.UDP_DISCOVERY_PORT_ID: discovery_port,
constants.ENGINE_RPC_PORT_ID: ENGINE_RPC_PORT_NUM,
constants.WS_RPC_PORT_ID: WS_RPC_PORT_NUM,
constants.METRICS_PORT_ID: METRICS_PORT_NUM,
}
used_ports = shared_utils.get_port_specs(used_port_assignments)
cmd = [
"erigon",
......
......@@ -2,7 +2,7 @@ shared_utils = import_module("../../shared_utils/shared_utils.star")
input_parser = import_module("../..//package_io/input_parser.star")
el_context = import_module("../../el/el_context.star")
el_admin_node_info = import_module("../../el/el_admin_node_info.star")
el_shared = import_module("../el_shared.star")
node_metrics = import_module("../../node_metrics_info.star")
constants = import_module("../../package_io/constants.star")
......@@ -17,45 +17,11 @@ METRICS_PORT_NUM = 9001
EXECUTION_MIN_CPU = 100
EXECUTION_MIN_MEMORY = 256
# Port IDs
RPC_PORT_ID = "rpc"
WS_PORT_ID = "ws"
TCP_DISCOVERY_PORT_ID = "tcp-discovery"
UDP_DISCOVERY_PORT_ID = "udp-discovery"
ENGINE_RPC_PORT_ID = "engine-rpc"
WS_PORT_ENGINE_ID = "ws-engine"
METRICS_PORT_ID = "metrics"
METRICS_PATH = "/metrics"
# The dirpath of the execution data directory on the client container
EXECUTION_DATA_DIRPATH_ON_CLIENT_CONTAINER = "/data/ethereumjs/execution-data"
def get_used_ports(discovery_port=DISCOVERY_PORT_NUM):
used_ports = {
RPC_PORT_ID: shared_utils.new_port_spec(
RPC_PORT_NUM,
shared_utils.TCP_PROTOCOL,
shared_utils.HTTP_APPLICATION_PROTOCOL,
),
WS_PORT_ID: shared_utils.new_port_spec(WS_PORT_NUM, shared_utils.TCP_PROTOCOL),
WS_PORT_ENGINE_ID: shared_utils.new_port_spec(
WS_PORT_ENGINE_NUM, shared_utils.TCP_PROTOCOL
),
TCP_DISCOVERY_PORT_ID: shared_utils.new_port_spec(
discovery_port, shared_utils.TCP_PROTOCOL
),
UDP_DISCOVERY_PORT_ID: shared_utils.new_port_spec(
discovery_port, shared_utils.UDP_PROTOCOL
),
ENGINE_RPC_PORT_ID: shared_utils.new_port_spec(
ENGINE_RPC_PORT_NUM, shared_utils.TCP_PROTOCOL
),
# METRICS_PORT_ID: shared_utils.new_port_spec(METRICS_PORT_NUM, shared_utils.TCP_PROTOCOL)
}
return used_ports
ENTRYPOINT_ARGS = []
VERBOSITY_LEVELS = {
......@@ -87,6 +53,7 @@ def launch(
tolerations,
node_selectors,
port_publisher,
participant_index,
):
log_level = input_parser.get_client_log_level_or_default(
participant_log_level, global_log_level, VERBOSITY_LEVELS
......@@ -137,11 +104,14 @@ def launch(
tolerations,
node_selectors,
port_publisher,
participant_index,
)
service = plan.add_service(service_name, config)
enode = el_admin_node_info.get_enode_for_node(plan, service_name, RPC_PORT_ID)
enode = el_admin_node_info.get_enode_for_node(
plan, service_name, constants.RPC_PORT_ID
)
# TODO: Passing empty string for metrics_url for now https://github.com/ethpandaops/ethereum-package/issues/127
# metrics_url = "http://{0}:{1}".format(service.ip_address, METRICS_PORT_NUM)
......@@ -187,20 +157,35 @@ def get_config(
tolerations,
node_selectors,
port_publisher,
participant_index,
):
public_ports = {}
discovery_port = DISCOVERY_PORT_NUM
if port_publisher.public_port_start:
discovery_port = port_publisher.el_start + len(existing_el_clients)
public_ports = {
TCP_DISCOVERY_PORT_ID: shared_utils.new_port_spec(
discovery_port, shared_utils.TCP_PROTOCOL
),
UDP_DISCOVERY_PORT_ID: shared_utils.new_port_spec(
discovery_port, shared_utils.UDP_PROTOCOL
),
if port_publisher.el_enabled:
public_ports_for_component = shared_utils.get_public_ports_for_component(
"el", port_publisher, participant_index
)
public_ports, discovery_port = el_shared.get_general_el_public_port_specs(
public_ports_for_component
)
additional_public_port_assignments = {
constants.RPC_PORT_ID: public_ports_for_component[2],
constants.WS_PORT_ID: public_ports_for_component[3],
constants.ENGINE_WS_PORT_ID: public_ports_for_component[4],
}
used_ports = get_used_ports(discovery_port)
public_ports.update(
shared_utils.get_port_specs(additional_public_port_assignments)
)
used_port_assignments = {
constants.TCP_DISCOVERY_PORT_ID: discovery_port,
constants.UDP_DISCOVERY_PORT_ID: discovery_port,
constants.ENGINE_RPC_PORT_ID: ENGINE_RPC_PORT_NUM,
constants.RPC_PORT_ID: RPC_PORT_NUM,
constants.WS_PORT_ID: WS_PORT_NUM,
constants.ENGINE_WS_PORT_ID: WS_PORT_ENGINE_NUM,
}
used_ports = shared_utils.get_port_specs(used_port_assignments)
cmd = [
"--dataDir=" + EXECUTION_DATA_DIRPATH_ON_CLIENT_CONTAINER,
......
......@@ -5,7 +5,7 @@ el_admin_node_info = import_module("../../el/el_admin_node_info.star")
genesis_constants = import_module(
"../../prelaunch_data_generator/genesis_constants/genesis_constants.star"
)
el_shared = import_module("../el_shared.star")
node_metrics = import_module("../../node_metrics_info.star")
constants = import_module("../../package_io/constants.star")
......@@ -19,15 +19,6 @@ METRICS_PORT_NUM = 9001
EXECUTION_MIN_CPU = 300
EXECUTION_MIN_MEMORY = 512
# Port IDs
RPC_PORT_ID = "rpc"
WS_PORT_ID = "ws"
TCP_DISCOVERY_PORT_ID = "tcp-discovery"
UDP_DISCOVERY_PORT_ID = "udp-discovery"
ENGINE_RPC_PORT_ID = "engine-rpc"
ENGINE_WS_PORT_ID = "engineWs"
METRICS_PORT_ID = "metrics"
# TODO(old) Scale this dynamically based on CPUs available and Geth nodes mining
NUM_MINING_THREADS = 1
......@@ -36,32 +27,6 @@ METRICS_PATH = "/debug/metrics/prometheus"
# The dirpath of the execution data directory on the client container
EXECUTION_DATA_DIRPATH_ON_CLIENT_CONTAINER = "/data/geth/execution-data"
def get_used_ports(discovery_port=DISCOVERY_PORT_NUM):
used_ports = {
RPC_PORT_ID: shared_utils.new_port_spec(
RPC_PORT_NUM,
shared_utils.TCP_PROTOCOL,
shared_utils.HTTP_APPLICATION_PROTOCOL,
),
WS_PORT_ID: shared_utils.new_port_spec(WS_PORT_NUM, shared_utils.TCP_PROTOCOL),
TCP_DISCOVERY_PORT_ID: shared_utils.new_port_spec(
discovery_port, shared_utils.TCP_PROTOCOL
),
UDP_DISCOVERY_PORT_ID: shared_utils.new_port_spec(
discovery_port, shared_utils.UDP_PROTOCOL
),
ENGINE_RPC_PORT_ID: shared_utils.new_port_spec(
ENGINE_RPC_PORT_NUM,
shared_utils.TCP_PROTOCOL,
),
METRICS_PORT_ID: shared_utils.new_port_spec(
METRICS_PORT_NUM, shared_utils.TCP_PROTOCOL
),
}
return used_ports
ENTRYPOINT_ARGS = ["sh", "-c"]
VERBOSITY_LEVELS = {
......@@ -97,6 +62,7 @@ def launch(
tolerations,
node_selectors,
port_publisher,
participant_index,
):
log_level = input_parser.get_client_log_level_or_default(
participant_log_level, global_log_level, VERBOSITY_LEVELS
......@@ -149,12 +115,13 @@ def launch(
tolerations,
node_selectors,
port_publisher,
participant_index,
)
service = plan.add_service(service_name, config)
enode, enr = el_admin_node_info.get_enode_enr_for_node(
plan, service_name, RPC_PORT_ID
plan, service_name, constants.RPC_PORT_ID
)
metrics_url = "{0}:{1}".format(service.ip_address, METRICS_PORT_NUM)
......@@ -204,6 +171,7 @@ def get_config(
tolerations,
node_selectors,
port_publisher,
participant_index,
):
if "--gcmode=archive" in extra_params or "--gcmode archive" in extra_params:
gcmode_archive = True
......@@ -242,17 +210,31 @@ def get_config(
public_ports = {}
discovery_port = DISCOVERY_PORT_NUM
if port_publisher.public_port_start:
discovery_port = port_publisher.el_start + len(existing_el_clients)
public_ports = {
TCP_DISCOVERY_PORT_ID: shared_utils.new_port_spec(
discovery_port, shared_utils.TCP_PROTOCOL
),
UDP_DISCOVERY_PORT_ID: shared_utils.new_port_spec(
discovery_port, shared_utils.UDP_PROTOCOL
),
if port_publisher.el_enabled:
public_ports_for_component = shared_utils.get_public_ports_for_component(
"el", port_publisher, participant_index
)
public_ports, discovery_port = el_shared.get_general_el_public_port_specs(
public_ports_for_component
)
additional_public_port_assignments = {
constants.RPC_PORT_ID: public_ports_for_component[2],
constants.WS_PORT_ID: public_ports_for_component[3],
constants.METRICS_PORT_ID: public_ports_for_component[4],
}
used_ports = get_used_ports(discovery_port)
public_ports.update(
shared_utils.get_port_specs(additional_public_port_assignments)
)
used_port_assignments = {
constants.TCP_DISCOVERY_PORT_ID: discovery_port,
constants.UDP_DISCOVERY_PORT_ID: discovery_port,
constants.ENGINE_RPC_PORT_ID: ENGINE_RPC_PORT_NUM,
constants.RPC_PORT_ID: RPC_PORT_NUM,
constants.WS_PORT_ID: WS_PORT_NUM,
constants.METRICS_PORT_ID: METRICS_PORT_NUM,
}
used_ports = shared_utils.get_port_specs(used_port_assignments)
cmd = [
"geth",
......
......@@ -2,7 +2,7 @@ shared_utils = import_module("../../shared_utils/shared_utils.star")
input_parser = import_module("../../package_io/input_parser.star")
el_context = import_module("../../el/el_context.star")
el_admin_node_info = import_module("../../el/el_admin_node_info.star")
el_shared = import_module("../el_shared.star")
node_metrics = import_module("../../node_metrics_info.star")
constants = import_module("../../package_io/constants.star")
......@@ -21,39 +21,6 @@ METRICS_PORT_NUM = 9001
EXECUTION_MIN_CPU = 100
EXECUTION_MIN_MEMORY = 512
# Port IDs
RPC_PORT_ID = "rpc"
WS_PORT_ID = "ws"
TCP_DISCOVERY_PORT_ID = "tcp-discovery"
UDP_DISCOVERY_PORT_ID = "udp-discovery"
ENGINE_RPC_PORT_ID = "engine-rpc"
METRICS_PORT_ID = "metrics"
def get_used_ports(discovery_port=DISCOVERY_PORT_NUM):
used_ports = {
RPC_PORT_ID: shared_utils.new_port_spec(
RPC_PORT_NUM,
shared_utils.TCP_PROTOCOL,
shared_utils.HTTP_APPLICATION_PROTOCOL,
),
WS_PORT_ID: shared_utils.new_port_spec(WS_PORT_NUM, shared_utils.TCP_PROTOCOL),
TCP_DISCOVERY_PORT_ID: shared_utils.new_port_spec(
discovery_port, shared_utils.TCP_PROTOCOL
),
UDP_DISCOVERY_PORT_ID: shared_utils.new_port_spec(
discovery_port, shared_utils.UDP_PROTOCOL
),
ENGINE_RPC_PORT_ID: shared_utils.new_port_spec(
ENGINE_RPC_PORT_NUM, shared_utils.TCP_PROTOCOL
),
METRICS_PORT_ID: shared_utils.new_port_spec(
METRICS_PORT_NUM, shared_utils.TCP_PROTOCOL
),
}
return used_ports
VERBOSITY_LEVELS = {
constants.GLOBAL_LOG_LEVEL.error: "ERROR",
constants.GLOBAL_LOG_LEVEL.warn: "WARN",
......@@ -83,6 +50,7 @@ def launch(
tolerations,
node_selectors,
port_publisher,
participant_index,
):
log_level = input_parser.get_client_log_level_or_default(
participant_log_level, global_log_level, VERBOSITY_LEVELS
......@@ -133,11 +101,14 @@ def launch(
tolerations,
node_selectors,
port_publisher,
participant_index,
)
service = plan.add_service(service_name, config)
enode = el_admin_node_info.get_enode_for_node(plan, service_name, RPC_PORT_ID)
enode = el_admin_node_info.get_enode_for_node(
plan, service_name, constants.RPC_PORT_ID
)
metrics_url = "{0}:{1}".format(service.ip_address, METRICS_PORT_NUM)
nethermind_metrics_info = node_metrics.new_node_metrics_info(
......@@ -185,20 +156,35 @@ def get_config(
tolerations,
node_selectors,
port_publisher,
participant_index,
):
public_ports = {}
discovery_port = DISCOVERY_PORT_NUM
if port_publisher.public_port_start:
discovery_port = port_publisher.el_start + len(existing_el_clients)
public_ports = {
TCP_DISCOVERY_PORT_ID: shared_utils.new_port_spec(
discovery_port, shared_utils.TCP_PROTOCOL
),
UDP_DISCOVERY_PORT_ID: shared_utils.new_port_spec(
discovery_port, shared_utils.UDP_PROTOCOL
),
if port_publisher.el_enabled:
public_ports_for_component = shared_utils.get_public_ports_for_component(
"el", port_publisher, participant_index
)
public_ports, discovery_port = el_shared.get_general_el_public_port_specs(
public_ports_for_component
)
additional_public_port_assignments = {
constants.RPC_PORT_ID: public_ports_for_component[2],
constants.WS_PORT_ID: public_ports_for_component[3],
constants.METRICS_PORT_ID: public_ports_for_component[4],
}
used_ports = get_used_ports(discovery_port)
public_ports.update(
shared_utils.get_port_specs(additional_public_port_assignments)
)
used_port_assignments = {
constants.TCP_DISCOVERY_PORT_ID: discovery_port,
constants.UDP_DISCOVERY_PORT_ID: discovery_port,
constants.ENGINE_RPC_PORT_ID: ENGINE_RPC_PORT_NUM,
constants.RPC_PORT_ID: RPC_PORT_NUM,
constants.WS_PORT_ID: WS_PORT_NUM,
constants.METRICS_PORT_ID: METRICS_PORT_NUM,
}
used_ports = shared_utils.get_port_specs(used_port_assignments)
cmd = [
"--log=" + log_level,
......
......@@ -2,6 +2,7 @@ shared_utils = import_module("../../shared_utils/shared_utils.star")
input_parser = import_module("../../package_io/input_parser.star")
el_context = import_module("../../el/el_context.star")
el_admin_node_info = import_module("../../el/el_admin_node_info.star")
el_shared = import_module("../el_shared.star")
node_metrics = import_module("../../node_metrics_info.star")
constants = import_module("../../package_io/constants.star")
......@@ -14,47 +15,12 @@ METRICS_PORT_NUM = 9001
EXECUTION_MIN_CPU = 100
EXECUTION_MIN_MEMORY = 256
# Port IDs
WS_RPC_PORT_ID = "ws-rpc"
TCP_DISCOVERY_PORT_ID = "tcp-discovery"
UDP_DISCOVERY_PORT_ID = "udp-discovery"
ENGINE_RPC_PORT_ID = "engine-rpc"
METRICS_PORT_ID = "metrics"
# Paths
METRICS_PATH = "/metrics"
# The dirpath of the execution data directory on the client container
EXECUTION_DATA_DIRPATH_ON_CLIENT_CONTAINER = "/data/nimbus/execution-data"
def get_used_ports(discovery_port=DISCOVERY_PORT_NUM):
used_ports = {
WS_RPC_PORT_ID: shared_utils.new_port_spec(
WS_RPC_PORT_NUM,
shared_utils.TCP_PROTOCOL,
shared_utils.HTTP_APPLICATION_PROTOCOL,
),
TCP_DISCOVERY_PORT_ID: shared_utils.new_port_spec(
discovery_port,
shared_utils.TCP_PROTOCOL,
),
UDP_DISCOVERY_PORT_ID: shared_utils.new_port_spec(
discovery_port,
shared_utils.UDP_PROTOCOL,
),
ENGINE_RPC_PORT_ID: shared_utils.new_port_spec(
ENGINE_RPC_PORT_NUM,
shared_utils.TCP_PROTOCOL,
),
METRICS_PORT_ID: shared_utils.new_port_spec(
METRICS_PORT_NUM,
shared_utils.TCP_PROTOCOL,
),
}
return used_ports
VERBOSITY_LEVELS = {
constants.GLOBAL_LOG_LEVEL.error: "ERROR",
constants.GLOBAL_LOG_LEVEL.warn: "WARN",
......@@ -85,6 +51,7 @@ def launch(
tolerations,
node_selectors,
port_publisher,
participant_index,
):
log_level = input_parser.get_client_log_level_or_default(
participant_log_level, global_log_level, VERBOSITY_LEVELS
......@@ -135,11 +102,14 @@ def launch(
tolerations,
node_selectors,
port_publisher,
participant_index,
)
service = plan.add_service(service_name, config)
enode = el_admin_node_info.get_enode_for_node(plan, service_name, WS_RPC_PORT_ID)
enode = el_admin_node_info.get_enode_for_node(
plan, service_name, constants.WS_RPC_PORT_ID
)
metric_url = "{0}:{1}".format(service.ip_address, METRICS_PORT_NUM)
nimbus_metrics_info = node_metrics.new_node_metrics_info(
......@@ -186,20 +156,33 @@ def get_config(
tolerations,
node_selectors,
port_publisher,
participant_index,
):
public_ports = {}
discovery_port = DISCOVERY_PORT_NUM
if port_publisher.public_port_start:
discovery_port = port_publisher.el_start + len(existing_el_clients)
public_ports = {
TCP_DISCOVERY_PORT_ID: shared_utils.new_port_spec(
discovery_port, shared_utils.TCP_PROTOCOL
),
UDP_DISCOVERY_PORT_ID: shared_utils.new_port_spec(
discovery_port, shared_utils.UDP_PROTOCOL
),
if port_publisher.el_enabled:
public_ports_for_component = shared_utils.get_public_ports_for_component(
"el", port_publisher, participant_index
)
public_ports, discovery_port = el_shared.get_general_el_public_port_specs(
public_ports_for_component
)
additional_public_port_assignments = {
constants.WS_RPC_PORT_ID: public_ports_for_component[2],
constants.METRICS_PORT_ID: public_ports_for_component[3],
}
used_ports = get_used_ports(discovery_port)
public_ports.update(
shared_utils.get_port_specs(additional_public_port_assignments)
)
used_port_assignments = {
constants.TCP_DISCOVERY_PORT_ID: discovery_port,
constants.UDP_DISCOVERY_PORT_ID: discovery_port,
constants.ENGINE_RPC_PORT_ID: ENGINE_RPC_PORT_NUM,
constants.WS_RPC_PORT_ID: WS_RPC_PORT_NUM,
constants.METRICS_PORT_ID: METRICS_PORT_NUM,
}
used_ports = shared_utils.get_port_specs(used_port_assignments)
cmd = [
"--log-level={0}".format(verbosity_level),
......
......@@ -2,6 +2,7 @@ shared_utils = import_module("../../shared_utils/shared_utils.star")
input_parser = import_module("../../package_io/input_parser.star")
el_context = import_module("../el_context.star")
el_admin_node_info = import_module("../el_admin_node_info.star")
el_shared = import_module("../el_shared.star")
node_metrics = import_module("../../node_metrics_info.star")
constants = import_module("../../package_io/constants.star")
mev_rs_builder = import_module("../../mev/mev-rs/mev_builder/mev_builder_launcher.star")
......@@ -16,45 +17,12 @@ METRICS_PORT_NUM = 9001
EXECUTION_MIN_CPU = 100
EXECUTION_MIN_MEMORY = 256
# Port IDs
RPC_PORT_ID = "rpc"
WS_PORT_ID = "ws"
TCP_DISCOVERY_PORT_ID = "tcp-discovery"
UDP_DISCOVERY_PORT_ID = "udp-discovery"
ENGINE_RPC_PORT_ID = "engine-rpc"
METRICS_PORT_ID = "metrics"
# Paths
METRICS_PATH = "/metrics"
# The dirpath of the execution data directory on the client container
EXECUTION_DATA_DIRPATH_ON_CLIENT_CONTAINER = "/data/reth/execution-data"
def get_used_ports(discovery_port=DISCOVERY_PORT_NUM):
used_ports = {
RPC_PORT_ID: shared_utils.new_port_spec(
RPC_PORT_NUM,
shared_utils.TCP_PROTOCOL,
shared_utils.HTTP_APPLICATION_PROTOCOL,
),
WS_PORT_ID: shared_utils.new_port_spec(WS_PORT_NUM, shared_utils.TCP_PROTOCOL),
TCP_DISCOVERY_PORT_ID: shared_utils.new_port_spec(
discovery_port, shared_utils.TCP_PROTOCOL
),
UDP_DISCOVERY_PORT_ID: shared_utils.new_port_spec(
discovery_port, shared_utils.UDP_PROTOCOL
),
ENGINE_RPC_PORT_ID: shared_utils.new_port_spec(
ENGINE_RPC_PORT_NUM, shared_utils.TCP_PROTOCOL
),
METRICS_PORT_ID: shared_utils.new_port_spec(
METRICS_PORT_NUM, shared_utils.TCP_PROTOCOL
),
}
return used_ports
ENTRYPOINT_ARGS = ["sh", "-c"]
VERBOSITY_LEVELS = {
......@@ -87,6 +55,7 @@ def launch(
tolerations,
node_selectors,
port_publisher,
participant_index,
):
log_level = input_parser.get_client_log_level_or_default(
participant_log_level, global_log_level, VERBOSITY_LEVELS
......@@ -138,11 +107,14 @@ def launch(
node_selectors,
launcher.builder,
port_publisher,
participant_index,
)
service = plan.add_service(service_name, config)
enode = el_admin_node_info.get_enode_for_node(plan, service_name, RPC_PORT_ID)
enode = el_admin_node_info.get_enode_for_node(
plan, service_name, constants.RPC_PORT_ID
)
metric_url = "{0}:{1}".format(service.ip_address, METRICS_PORT_NUM)
reth_metrics_info = node_metrics.new_node_metrics_info(
......@@ -190,20 +162,35 @@ def get_config(
node_selectors,
builder,
port_publisher,
participant_index,
):
public_ports = {}
discovery_port = DISCOVERY_PORT_NUM
if port_publisher.public_port_start:
discovery_port = port_publisher.el_start + len(existing_el_clients)
public_ports = {
TCP_DISCOVERY_PORT_ID: shared_utils.new_port_spec(
discovery_port, shared_utils.TCP_PROTOCOL
),
UDP_DISCOVERY_PORT_ID: shared_utils.new_port_spec(
discovery_port, shared_utils.UDP_PROTOCOL
),
if port_publisher.el_enabled:
public_ports_for_component = shared_utils.get_public_ports_for_component(
"el", port_publisher, participant_index
)
public_ports, discovery_port = el_shared.get_general_el_public_port_specs(
public_ports_for_component
)
additional_public_port_assignments = {
constants.RPC_PORT_ID: public_ports_for_component[2],
constants.WS_PORT_ID: public_ports_for_component[3],
constants.METRICS_PORT_ID: public_ports_for_component[4],
}
used_ports = get_used_ports(discovery_port)
public_ports.update(
shared_utils.get_port_specs(additional_public_port_assignments)
)
used_port_assignments = {
constants.TCP_DISCOVERY_PORT_ID: discovery_port,
constants.UDP_DISCOVERY_PORT_ID: discovery_port,
constants.ENGINE_RPC_PORT_ID: ENGINE_RPC_PORT_NUM,
constants.RPC_PORT_ID: RPC_PORT_NUM,
constants.WS_PORT_ID: WS_PORT_NUM,
constants.METRICS_PORT_ID: METRICS_PORT_NUM,
}
used_ports = shared_utils.get_port_specs(used_port_assignments)
cmd = [
"/usr/local/bin/mev build" if builder else "reth",
......
shared_utils = import_module("../shared_utils/shared_utils.star")
constants = import_module("../package_io/constants.star")
SERVICE_NAME = "el-forkmon"
IMAGE_NAME = "ethpandaops/execution-monitor:master"
HTTP_PORT_ID = "http"
HTTP_PORT_NUMBER = 8080
EL_FORKMON_CONFIG_FILENAME = "el-forkmon-config.toml"
......@@ -12,7 +11,7 @@ EL_FORKMON_CONFIG_FILENAME = "el-forkmon-config.toml"
EL_FORKMON_CONFIG_MOUNT_DIRPATH_ON_SERVICE = "/config"
USED_PORTS = {
HTTP_PORT_ID: shared_utils.new_port_spec(
constants.HTTP_PORT_ID: shared_utils.new_port_spec(
HTTP_PORT_NUMBER,
shared_utils.TCP_PROTOCOL,
shared_utils.HTTP_APPLICATION_PROTOCOL,
......@@ -31,6 +30,8 @@ def launch_el_forkmon(
config_template,
el_contexts,
global_node_selectors,
port_publisher,
additional_service_index,
):
all_el_client_info = []
for client in el_contexts:
......@@ -56,18 +57,34 @@ def launch_el_forkmon(
config = get_config(
config_files_artifact_name,
global_node_selectors,
port_publisher,
additional_service_index,
)
plan.add_service(SERVICE_NAME, config)
def get_config(config_files_artifact_name, node_selectors):
def get_config(
config_files_artifact_name,
node_selectors,
port_publisher,
additional_service_index,
):
config_file_path = shared_utils.path_join(
EL_FORKMON_CONFIG_MOUNT_DIRPATH_ON_SERVICE, EL_FORKMON_CONFIG_FILENAME
)
public_ports = shared_utils.get_additional_service_standard_public_port(
port_publisher,
constants.HTTP_PORT_ID,
additional_service_index,
0,
)
return ServiceConfig(
image=IMAGE_NAME,
ports=USED_PORTS,
public_ports=public_ports,
files={
EL_FORKMON_CONFIG_MOUNT_DIRPATH_ON_SERVICE: config_files_artifact_name,
},
......
shared_utils = import_module("../shared_utils/shared_utils.star")
constants = import_module("../package_io/constants.star")
SERVICE_NAME = "forky"
HTTP_PORT_ID = "http"
HTTP_PORT_NUMBER = 8080
FORKY_CONFIG_FILENAME = "forky-config.yaml"
......@@ -19,7 +19,7 @@ MIN_MEMORY = 128
MAX_MEMORY = 2048
USED_PORTS = {
HTTP_PORT_ID: shared_utils.new_port_spec(
constants.HTTP_PORT_ID: shared_utils.new_port_spec(
HTTP_PORT_NUMBER,
shared_utils.TCP_PROTOCOL,
shared_utils.HTTP_APPLICATION_PROTOCOL,
......@@ -36,6 +36,8 @@ def launch_forky(
network_params,
global_node_selectors,
final_genesis_timestamp,
port_publisher,
additional_service_index,
):
all_cl_client_info = []
all_el_client_info = []
......@@ -84,6 +86,8 @@ def launch_forky(
el_cl_data_files_artifact_uuid,
network_params,
global_node_selectors,
port_publisher,
additional_service_index,
)
plan.add_service(SERVICE_NAME, config)
......@@ -94,6 +98,8 @@ def get_config(
el_cl_data_files_artifact_uuid,
network_params,
node_selectors,
port_publisher,
additional_service_index,
):
config_file_path = shared_utils.path_join(
FORKY_CONFIG_MOUNT_DIRPATH_ON_SERVICE,
......@@ -102,9 +108,17 @@ def get_config(
IMAGE_NAME = "ethpandaops/forky:latest"
public_ports = shared_utils.get_additional_service_standard_public_port(
port_publisher,
constants.HTTP_PORT_ID,
additional_service_index,
0,
)
return ServiceConfig(
image=IMAGE_NAME,
ports=USED_PORTS,
public_ports=public_ports,
files={
FORKY_CONFIG_MOUNT_DIRPATH_ON_SERVICE: config_files_artifact_name,
VALIDATOR_RANGES_MOUNT_DIRPATH_ON_SERVICE: VALIDATOR_RANGES_ARTIFACT_NAME,
......
......@@ -13,22 +13,11 @@ POSTGRES_PASSWORD = "pass"
REDIS_PORT_ID = "redis"
REDIS_PORT_NUMBER = 6379
FRONTEND_PORT_ID = "http"
FRONTEND_PORT_NUMBER = 8080
LITTLE_BIGTABLE_PORT_ID = "littlebigtable"
LITTLE_BIGTABLE_PORT_NUMBER = 9000
FULL_BEACONCHAIN_CONFIG_FILENAME = "beaconchain-config.yml"
USED_PORTS = {
FRONTEND_PORT_ID: shared_utils.new_port_spec(
FRONTEND_PORT_NUMBER,
shared_utils.TCP_PROTOCOL,
shared_utils.HTTP_APPLICATION_PROTOCOL,
)
}
# The min/max CPU/memory that postgres can use
POSTGRES_MIN_CPU = 10
POSTGRES_MAX_CPU = 1000
......@@ -98,6 +87,8 @@ def launch_full_beacon(
el_contexts,
persistent,
global_node_selectors,
port_publisher,
additional_service_index,
):
node_selectors = global_node_selectors
postgres_output = postgres.run(
......@@ -127,18 +118,8 @@ def launch_full_beacon(
# TODO perhaps create a new service for the littlebigtable
little_bigtable = plan.add_service(
name="beaconchain-littlebigtable",
config=ServiceConfig(
image="gobitfly/little_bigtable:latest",
ports={
LITTLE_BIGTABLE_PORT_ID: PortSpec(
LITTLE_BIGTABLE_PORT_NUMBER, application_protocol="tcp"
)
},
min_cpu=LITTLE_BIGTABLE_MIN_CPU,
max_cpu=LITTLE_BIGTABLE_MAX_CPU,
min_memory=LITTLE_BIGTABLE_MIN_MEMORY,
max_memory=LITTLE_BIGTABLE_MAX_MEMORY,
node_selectors=node_selectors,
config=get_little_bigtable_config(
node_selectors, port_publisher, additional_service_index
),
)
......@@ -329,31 +310,71 @@ def launch_full_beacon(
frontend = plan.add_service(
name="beaconchain-frontend",
config=ServiceConfig(
image=IMAGE_NAME,
files=files,
entrypoint=["./explorer"],
cmd=[
"-config",
"/app/config/beaconchain-config.yml",
],
env_vars={
"FRONTEND_ENABLED": "TRUE",
},
ports={
FRONTEND_PORT_ID: PortSpec(
FRONTEND_PORT_NUMBER, application_protocol="http"
),
},
min_cpu=FRONTEND_MIN_CPU,
max_cpu=FRONTEND_MAX_CPU,
min_memory=FRONTEND_MIN_MEMORY,
max_memory=FRONTEND_MAX_MEMORY,
node_selectors=node_selectors,
config=get_frontend_config(
files, node_selectors, port_publisher, additional_service_index
),
)
def get_little_bigtable_config(
node_selectors, port_publisher, additional_service_index
):
public_ports = shared_utils.get_additional_service_standard_public_port(
port_publisher,
constants.LITTLE_BIGTABLE_PORT_ID,
additional_service_index,
0,
)
return ServiceConfig(
image="gobitfly/little_bigtable:latest",
ports={
constants.LITTLE_BIGTABLE_PORT_ID: PortSpec(
LITTLE_BIGTABLE_PORT_NUMBER, application_protocol="tcp"
)
},
public_ports=public_ports,
min_cpu=LITTLE_BIGTABLE_MIN_CPU,
max_cpu=LITTLE_BIGTABLE_MAX_CPU,
min_memory=LITTLE_BIGTABLE_MIN_MEMORY,
max_memory=LITTLE_BIGTABLE_MAX_MEMORY,
node_selectors=node_selectors,
)
def get_frontend_config(
files, node_selectors, port_publisher, additional_service_index
):
public_ports = shared_utils.get_additional_service_standard_public_port(
port_publisher,
constants.HTTP_PORT_ID,
additional_service_index,
1,
)
return ServiceConfig(
image=IMAGE_NAME,
files=files,
entrypoint=["./explorer"],
cmd=[
"-config",
"/app/config/beaconchain-config.yml",
],
env_vars={
"FRONTEND_ENABLED": "TRUE",
},
ports={
constants.HTTP_PORT_ID: PortSpec(
FRONTEND_PORT_NUMBER, application_protocol="http"
),
},
public_ports=public_ports,
min_cpu=FRONTEND_MIN_CPU,
max_cpu=FRONTEND_MAX_CPU,
min_memory=FRONTEND_MIN_MEMORY,
max_memory=FRONTEND_MAX_MEMORY,
node_selectors=node_selectors,
)
def new_config_template_data(
cl_url,
cl_port,
......
......@@ -41,6 +41,20 @@ CLIENT_TYPES = struct(
validator="validator",
)
TCP_DISCOVERY_PORT_ID = "tcp-discovery"
UDP_DISCOVERY_PORT_ID = "udp-discovery"
RPC_PORT_ID = "rpc"
WS_RPC_PORT_ID = "ws-rpc"
WS_PORT_ID = "ws"
HTTP_PORT_ID = "http"
VALIDATOR_HTTP_PORT_ID = "http-validator"
METRICS_PORT_ID = "metrics"
ENGINE_RPC_PORT_ID = "engine-rpc"
ENGINE_WS_PORT_ID = "engine-ws"
ADMIN_PORT_ID = "admin"
LITTLE_BIGTABLE_PORT_ID = "littlebigtable"
VALDIATOR_GRPC_PORT_ID = "grpc"
VALIDATING_REWARDS_ACCOUNT = "0x8943545177806ED17B9F23F0a21ee5948eCaa776"
MAX_ENR_ENTRIES = 20
MAX_ENODE_ENTRIES = 20
......
......@@ -110,6 +110,7 @@ def input_parser(plan, input_args):
result["parallel_keystore_generation"] = False
result["global_tolerations"] = []
result["global_node_selectors"] = {}
result["port_publisher"] = get_port_publisher_params("default")
if constants.NETWORK_NAME.shadowfork in result["network_params"]["network"]:
shadow_base = result["network_params"]["network"].split("-shadowfork")[0]
......@@ -158,9 +159,7 @@ def input_parser(plan, input_args):
sub_value = input_args["xatu_sentry_params"][sub_attr]
result["xatu_sentry_params"][sub_attr] = sub_value
elif attr == "port_publisher":
for sub_attr in input_args["port_publisher"]:
sub_value = input_args["port_publisher"][sub_attr]
result["port_publisher"][sub_attr] = sub_value
result["port_publisher"] = get_port_publisher_params("user", input_args)
if result.get("disable_peer_scoring"):
result = enrich_disable_peer_scoring(result)
......@@ -188,11 +187,6 @@ def input_parser(plan, input_args):
if result["port_publisher"]["nat_exit_ip"] == "auto":
result["port_publisher"]["nat_exit_ip"] = get_public_ip(plan)
if result["port_publisher"]["public_port_start"] != None:
start = result["port_publisher"]["public_port_start"]
result["port_publisher"]["el_start"] = start
result["port_publisher"]["cl_start"] = start + len(result["participants"])
return struct(
participants=[
struct(
......@@ -377,10 +371,19 @@ def input_parser(plan, input_args):
checkpoint_sync_enabled=result["checkpoint_sync_enabled"],
checkpoint_sync_url=result["checkpoint_sync_url"],
port_publisher=struct(
public_port_start=result["port_publisher"]["public_port_start"],
nat_exit_ip=result["port_publisher"]["nat_exit_ip"],
el_start=result["port_publisher"].get("el_start"),
cl_start=result["port_publisher"].get("cl_start"),
cl_enabled=result["port_publisher"]["cl"]["enabled"],
cl_public_port_start=result["port_publisher"]["cl"]["public_port_start"],
el_enabled=result["port_publisher"]["el"]["enabled"],
el_public_port_start=result["port_publisher"]["el"]["public_port_start"],
vc_enabled=result["port_publisher"]["vc"]["enabled"],
vc_public_port_start=result["port_publisher"]["vc"]["public_port_start"],
additional_services_enabled=result["port_publisher"]["additional_services"][
"enabled"
],
additional_services_public_port_start=result["port_publisher"][
"additional_services"
]["public_port_start"],
),
)
......@@ -1001,6 +1004,30 @@ def get_default_custom_flood_params():
return {"interval_between_transactions": 1}
def get_port_publisher_params(parameter_type, input_args=None):
port_publisher_parameters = {
"nat_exit_ip": "KURTOSIS_IP_ADDR_PLACEHOLDER",
"el": {"enabled": False, "public_port_start": 32000},
"cl": {"enabled": False, "public_port_start": 33000},
"vc": {"enabled": False, "public_port_start": 34000},
"additional_services": {"enabled": False, "public_port_start": 35000},
}
if parameter_type == "default":
return port_publisher_parameters
else:
for setting in input_args["port_publisher"]:
if setting == "nat_exit_ip":
nat_exit_ip_value = input_args["port_publisher"][setting]
port_publisher_parameters[setting] = nat_exit_ip_value
else:
for sub_setting in input_args["port_publisher"][setting]:
sub_setting_value = input_args["port_publisher"][setting][
sub_setting
]
port_publisher_parameters[setting][sub_setting] = sub_setting_value
return port_publisher_parameters
def enrich_disable_peer_scoring(parsed_arguments_dict):
for index, participant in enumerate(parsed_arguments_dict["participants"]):
if participant["cl_type"] == "lighthouse":
......
......@@ -193,6 +193,7 @@ def launch_participant_network(
constants.CL_TYPE.lighthouse,
]
current_vc_index = 0
for index, participant in enumerate(participants):
el_type = participant.el_type
cl_type = participant.cl_type
......@@ -365,11 +366,14 @@ def launch_participant_network(
preset=network_params.preset,
network=network_params.network,
electra_fork_epoch=network_params.electra_fork_epoch,
port_publisher=port_publisher,
vc_index=current_vc_index,
)
all_vc_contexts.append(vc_context)
if vc_context and vc_context.metrics_info:
vc_context.metrics_info["config"] = participant.prometheus_config
current_vc_index += 1
all_participants = []
......
......@@ -6,6 +6,11 @@ HTTP_APPLICATION_PROTOCOL = "http"
NOT_PROVIDED_APPLICATION_PROTOCOL = ""
NOT_PROVIDED_WAIT = "not-provided-wait"
MAX_PORTS_PER_CL_NODE = 4
MAX_PORTS_PER_EL_NODE = 5
MAX_PORTS_PER_VC_NODE = 3
MAX_PORTS_PER_ADDITIONAL_SERVICE = 2
def new_template_and_data(template, template_data_json):
return struct(template=template, data=template_data_json)
......@@ -231,3 +236,84 @@ def get_client_names(participant, index, participant_contexts, participant_confi
)
)
return full_name, cl_client, el_client, participant_config
def get_public_ports_for_component(
component, port_publisher_params, participant_index=None
):
public_port_range = ()
if component == "cl":
public_port_range = __get_port_range(
port_publisher_params.cl_public_port_start,
MAX_PORTS_PER_CL_NODE,
participant_index,
)
elif component == "el":
public_port_range = __get_port_range(
port_publisher_params.el_public_port_start,
MAX_PORTS_PER_EL_NODE,
participant_index,
)
elif component == "vc":
public_port_range = __get_port_range(
port_publisher_params.vc_public_port_start,
MAX_PORTS_PER_VC_NODE,
participant_index,
)
elif component == "additional_services":
public_port_range = __get_port_range(
port_publisher_params.additional_services_public_port_start,
MAX_PORTS_PER_ADDITIONAL_SERVICE,
participant_index,
)
return [port for port in range(public_port_range[0], public_port_range[1], 1)]
def __get_port_range(port_start, max_ports_per_component, participant_index):
if participant_index == 0:
public_port_start = port_start
public_port_end = public_port_start + max_ports_per_component
else:
public_port_start = port_start + (max_ports_per_component * participant_index)
public_port_end = public_port_start + max_ports_per_component
return (public_port_start, public_port_end)
def get_port_specs(port_assignments):
ports = {}
for port_id, port in port_assignments.items():
if port_id in [
constants.TCP_DISCOVERY_PORT_ID,
constants.RPC_PORT_ID,
constants.ENGINE_RPC_PORT_ID,
constants.ENGINE_WS_PORT_ID,
constants.WS_RPC_PORT_ID,
constants.LITTLE_BIGTABLE_PORT_ID,
constants.WS_PORT_ID,
]:
ports.update({port_id: new_port_spec(port, TCP_PROTOCOL)})
elif port_id == constants.UDP_DISCOVERY_PORT_ID:
ports.update({port_id: new_port_spec(port, UDP_PROTOCOL)})
elif port_id in [
constants.HTTP_PORT_ID,
constants.METRICS_PORT_ID,
constants.VALIDATOR_HTTP_PORT_ID,
constants.ADMIN_PORT_ID,
constants.VALDIATOR_GRPC_PORT_ID,
]:
ports.update(
{port_id: new_port_spec(port, TCP_PROTOCOL, HTTP_APPLICATION_PROTOCOL)}
)
return ports
def get_additional_service_standard_public_port(
port_publisher, port_id, additional_service_index, port_index
):
public_ports = {}
if port_publisher.additional_services_enabled:
public_ports_for_component = get_public_ports_for_component(
"additional_services", port_publisher, additional_service_index
)
public_ports = get_port_specs({port_id: public_ports_for_component[port_index]})
return public_ports
......@@ -4,7 +4,6 @@ constants = import_module("../package_io/constants.star")
IMAGE_NAME = "ethpandaops/tracoor:0.0.18"
SERVICE_NAME = "tracoor"
HTTP_PORT_ID = "http"
HTTP_PORT_NUMBER = 7007
TRACOOR_CONFIG_FILENAME = "tracoor-config.yaml"
......@@ -18,7 +17,7 @@ MIN_MEMORY = 128
MAX_MEMORY = 2048
USED_PORTS = {
HTTP_PORT_ID: shared_utils.new_port_spec(
constants.HTTP_PORT_ID: shared_utils.new_port_spec(
HTTP_PORT_NUMBER,
shared_utils.TCP_PROTOCOL,
shared_utils.HTTP_APPLICATION_PROTOCOL,
......@@ -35,6 +34,8 @@ def launch_tracoor(
network_params,
global_node_selectors,
final_genesis_timestamp,
port_publisher,
additional_service_index,
):
all_client_info = []
for index, participant in enumerate(participant_contexts):
......@@ -78,6 +79,8 @@ def launch_tracoor(
el_cl_data_files_artifact_uuid,
network_params,
global_node_selectors,
port_publisher,
additional_service_index,
)
plan.add_service(SERVICE_NAME, config)
......@@ -88,15 +91,25 @@ def get_config(
el_cl_data_files_artifact_uuid,
network_params,
node_selectors,
port_publisher,
additional_service_index,
):
config_file_path = shared_utils.path_join(
TRACOOR_CONFIG_MOUNT_DIRPATH_ON_SERVICE,
TRACOOR_CONFIG_FILENAME,
)
public_ports = shared_utils.get_additional_service_standard_public_port(
port_publisher,
constants.HTTP_PORT_ID,
additional_service_index,
0,
)
return ServiceConfig(
image=IMAGE_NAME,
ports=USED_PORTS,
public_ports=public_ports,
files={
TRACOOR_CONFIG_MOUNT_DIRPATH_ON_SERVICE: config_files_artifact_name,
},
......
......@@ -37,6 +37,8 @@ def get_config(
keymanager_enabled,
network,
electra_fork_epoch,
port_publisher,
vc_index,
):
log_level = input_parser.get_client_log_level_or_default(
participant_log_level, global_log_level, VERBOSITY_LEVELS
......@@ -92,16 +94,34 @@ def get_config(
env = {RUST_BACKTRACE_ENVVAR_NAME: RUST_FULL_BACKTRACE_KEYWORD}
env.update(extra_env_vars)
public_ports = {}
public_keymanager_port_assignment = {}
if port_publisher.vc_enabled:
public_ports_for_component = shared_utils.get_public_ports_for_component(
"vc", port_publisher, vc_index
)
public_port_assignments = {
constants.METRICS_PORT_ID: public_ports_for_component[0]
}
public_keymanager_port_assignment = {
constants.VALIDATOR_HTTP_PORT_ID: public_ports_for_component[1]
}
public_ports = shared_utils.get_port_specs(public_port_assignments)
ports = {}
ports.update(vc_shared.VALIDATOR_CLIENT_USED_PORTS)
if keymanager_enabled:
cmd.extend(keymanager_api_cmd)
ports.update(vc_shared.VALIDATOR_KEYMANAGER_USED_PORTS)
public_ports.update(
shared_utils.get_port_specs(public_keymanager_port_assignment)
)
return ServiceConfig(
image=image,
ports=ports,
public_ports=public_ports,
cmd=cmd,
env_vars=env,
files=files,
......
......@@ -34,6 +34,8 @@ def get_config(
node_selectors,
keymanager_enabled,
preset,
port_publisher,
vc_index,
):
log_level = input_parser.get_client_log_level_or_default(
participant_log_level, global_log_level, VERBOSITY_LEVELS
......@@ -86,6 +88,20 @@ def get_config(
constants.VALIDATOR_KEYS_DIRPATH_ON_SERVICE_CONTAINER: node_keystore_files.files_artifact_uuid,
}
public_ports = {}
public_keymanager_port_assignment = {}
if port_publisher.vc_enabled:
public_ports_for_component = shared_utils.get_public_ports_for_component(
"vc", port_publisher, vc_index
)
public_port_assignments = {
constants.METRICS_PORT_ID: public_ports_for_component[0]
}
public_keymanager_port_assignment = {
constants.VALIDATOR_HTTP_PORT_ID: public_ports_for_component[1]
}
public_ports = shared_utils.get_port_specs(public_port_assignments)
ports = {}
ports.update(vc_shared.VALIDATOR_CLIENT_USED_PORTS)
......@@ -93,6 +109,9 @@ def get_config(
files[constants.KEYMANAGER_MOUNT_PATH_ON_CLIENTS] = keymanager_file
cmd.extend(keymanager_api_cmd)
ports.update(vc_shared.VALIDATOR_KEYMANAGER_USED_PORTS)
public_ports.update(
shared_utils.get_port_specs(public_keymanager_port_assignment)
)
if preset == "minimal":
extra_env_vars["LODESTAR_PRESET"] = "minimal"
......@@ -100,6 +119,7 @@ def get_config(
return ServiceConfig(
image=image,
ports=ports,
public_ports=public_ports,
cmd=cmd,
env_vars=extra_env_vars,
files=files,
......
......@@ -22,6 +22,8 @@ def get_config(
tolerations,
node_selectors,
keymanager_enabled,
port_publisher,
vc_index,
):
validator_keys_dirpath = ""
validator_secrets_dirpath = ""
......@@ -64,16 +66,34 @@ def get_config(
constants.KEYMANAGER_MOUNT_PATH_ON_CLIENTS: keymanager_file,
}
public_ports = {}
public_keymanager_port_assignment = {}
if port_publisher.vc_enabled:
public_ports_for_component = shared_utils.get_public_ports_for_component(
"vc", port_publisher, vc_index
)
public_port_assignments = {
constants.METRICS_PORT_ID: public_ports_for_component[0]
}
public_keymanager_port_assignment = {
constants.VALIDATOR_HTTP_PORT_ID: public_ports_for_component[1]
}
public_ports = shared_utils.get_port_specs(public_port_assignments)
ports = {}
ports.update(vc_shared.VALIDATOR_CLIENT_USED_PORTS)
if keymanager_enabled:
cmd.extend(keymanager_api_cmd)
ports.update(vc_shared.VALIDATOR_KEYMANAGER_USED_PORTS)
public_ports.update(
shared_utils.get_port_specs(public_keymanager_port_assignment)
)
return ServiceConfig(
image=image,
ports=ports,
public_ports=public_ports,
cmd=cmd,
env_vars=extra_env_vars,
files=files,
......
......@@ -5,10 +5,10 @@ vc_shared = import_module("./shared.star")
PRYSM_PASSWORD_MOUNT_DIRPATH_ON_SERVICE_CONTAINER = "/prysm-password"
PRYSM_BEACON_RPC_PORT = 4000
VALIDATOR_GRPC_PORT_NUM = 7500
VALDIATOR_GRPC_PORT_ID = "grpc"
EXTRA_PORTS = {
VALDIATOR_GRPC_PORT_ID: shared_utils.new_port_spec(
constants.VALDIATOR_GRPC_PORT_ID: shared_utils.new_port_spec(
VALIDATOR_GRPC_PORT_NUM,
shared_utils.TCP_PROTOCOL,
shared_utils.HTTP_APPLICATION_PROTOCOL,
......@@ -37,6 +37,8 @@ def get_config(
tolerations,
node_selectors,
keymanager_enabled,
port_publisher,
vc_index,
):
validator_keys_dirpath = shared_utils.path_join(
constants.VALIDATOR_KEYS_DIRPATH_ON_SERVICE_CONTAINER,
......@@ -90,6 +92,24 @@ def get_config(
PRYSM_PASSWORD_MOUNT_DIRPATH_ON_SERVICE_CONTAINER: prysm_password_artifact_uuid,
}
public_ports = {}
public_keymanager_port_assignment = {}
public_gprc_port_assignment = {}
if port_publisher.vc_enabled:
public_ports_for_component = shared_utils.get_public_ports_for_component(
"vc", port_publisher, vc_index
)
public_port_assignments = {
constants.METRICS_PORT_ID: public_ports_for_component[0]
}
public_keymanager_port_assignment = {
constants.VALIDATOR_HTTP_PORT_ID: public_ports_for_component[1]
}
public_gprc_port_assignment = {
constants.VALDIATOR_GRPC_PORT_ID: public_ports_for_component[2]
}
public_ports = shared_utils.get_port_specs(public_port_assignments)
ports = {}
ports.update(vc_shared.VALIDATOR_CLIENT_USED_PORTS)
......@@ -98,10 +118,15 @@ def get_config(
cmd.extend(keymanager_api_cmd)
ports.update(vc_shared.VALIDATOR_KEYMANAGER_USED_PORTS)
ports.update(EXTRA_PORTS)
public_ports.update(
shared_utils.get_port_specs(public_keymanager_port_assignment)
)
public_ports.update(shared_utils.get_port_specs(public_gprc_port_assignment))
return ServiceConfig(
image=image,
ports=ports,
public_ports=public_ports,
cmd=cmd,
env_vars=extra_env_vars,
files=files,
......
shared_utils = import_module("../shared_utils/shared_utils.star")
constants = import_module("../package_io/constants.star")
VALIDATOR_HTTP_PORT_NUM = 5056
VALIDATOR_HTTP_PORT_ID = "vc-http"
VALIDATOR_CLIENT_METRICS_PORT_NUM = 8080
VALIDATOR_CLIENT_METRICS_PORT_ID = "metrics"
METRICS_PATH = "/metrics"
VALIDATOR_CLIENT_USED_PORTS = {
VALIDATOR_CLIENT_METRICS_PORT_ID: shared_utils.new_port_spec(
constants.METRICS_PORT_ID: shared_utils.new_port_spec(
VALIDATOR_CLIENT_METRICS_PORT_NUM,
shared_utils.TCP_PROTOCOL,
shared_utils.HTTP_APPLICATION_PROTOCOL,
......@@ -16,7 +14,7 @@ VALIDATOR_CLIENT_USED_PORTS = {
}
VALIDATOR_KEYMANAGER_USED_PORTS = {
VALIDATOR_HTTP_PORT_ID: shared_utils.new_port_spec(
constants.VALIDATOR_HTTP_PORT_ID: shared_utils.new_port_spec(
VALIDATOR_HTTP_PORT_NUM,
shared_utils.TCP_PROTOCOL,
shared_utils.HTTP_APPLICATION_PROTOCOL,
......
......@@ -22,6 +22,8 @@ def get_config(
tolerations,
node_selectors,
keymanager_enabled,
port_publisher,
vc_index,
):
validator_keys_dirpath = ""
validator_secrets_dirpath = ""
......@@ -74,6 +76,20 @@ def get_config(
constants.VALIDATOR_KEYS_DIRPATH_ON_SERVICE_CONTAINER: node_keystore_files.files_artifact_uuid,
}
public_ports = {}
public_keymanager_port_assignment = {}
if port_publisher.vc_enabled:
public_ports_for_component = shared_utils.get_public_ports_for_component(
"vc", port_publisher, vc_index
)
public_port_assignments = {
constants.METRICS_PORT_ID: public_ports_for_component[0]
}
public_keymanager_port_assignment = {
constants.VALIDATOR_HTTP_PORT_ID: public_ports_for_component[1]
}
public_ports = shared_utils.get_port_specs(public_port_assignments)
ports = {}
ports.update(vc_shared.VALIDATOR_CLIENT_USED_PORTS)
......@@ -81,10 +97,14 @@ def get_config(
files[constants.KEYMANAGER_MOUNT_PATH_ON_CLIENTS] = keymanager_file
cmd.extend(keymanager_api_cmd)
ports.update(vc_shared.VALIDATOR_KEYMANAGER_USED_PORTS)
public_ports.update(
shared_utils.get_port_specs(public_keymanager_port_assignment)
)
return ServiceConfig(
image=image,
ports=ports,
public_ports=public_ports,
cmd=cmd,
env_vars=extra_env_vars,
files=files,
......
......@@ -49,6 +49,8 @@ def launch(
preset,
network, # TODO: remove when deneb rebase is done
electra_fork_epoch, # TODO: remove when deneb rebase is done
port_publisher,
vc_index,
):
if node_keystore_files == None:
return None
......@@ -94,6 +96,8 @@ def launch(
keymanager_enabled=keymanager_enabled,
network=network, # TODO: remove when deneb rebase is done
electra_fork_epoch=electra_fork_epoch, # TODO: remove when deneb rebase is done
port_publisher=port_publisher,
vc_index=vc_index,
)
elif vc_type == constants.VC_TYPE.lodestar:
config = lodestar.get_config(
......@@ -118,6 +122,8 @@ def launch(
node_selectors=node_selectors,
keymanager_enabled=keymanager_enabled,
preset=preset,
port_publisher=port_publisher,
vc_index=vc_index,
)
elif vc_type == constants.VC_TYPE.teku:
config = teku.get_config(
......@@ -139,6 +145,8 @@ def launch(
tolerations=tolerations,
node_selectors=node_selectors,
keymanager_enabled=keymanager_enabled,
port_publisher=port_publisher,
vc_index=vc_index,
)
elif vc_type == constants.VC_TYPE.nimbus:
config = nimbus.get_config(
......@@ -160,6 +168,8 @@ def launch(
tolerations=tolerations,
node_selectors=node_selectors,
keymanager_enabled=keymanager_enabled,
port_publisher=port_publisher,
vc_index=vc_index,
)
elif vc_type == constants.VC_TYPE.prysm:
config = prysm.get_config(
......@@ -183,6 +193,8 @@ def launch(
tolerations=tolerations,
node_selectors=node_selectors,
keymanager_enabled=keymanager_enabled,
port_publisher=port_publisher,
vc_index=vc_index,
)
elif vc_type == constants.VC_TYPE.grandine:
fail("Grandine VC is not yet supported")
......@@ -191,9 +203,7 @@ def launch(
validator_service = plan.add_service(service_name, config)
validator_metrics_port = validator_service.ports[
vc_shared.VALIDATOR_CLIENT_METRICS_PORT_ID
]
validator_metrics_port = validator_service.ports[constants.METRICS_PORT_ID]
validator_metrics_url = "{0}:{1}".format(
validator_service.ip_address, validator_metrics_port.number
)
......@@ -201,12 +211,6 @@ def launch(
service_name, vc_shared.METRICS_PATH, validator_metrics_url
)
validator_http_port = (
validator_service.ports[vc_shared.VALIDATOR_HTTP_PORT_ID]
if keymanager_enabled
else None
)
return vc_context.new_vc_context(
client_name=vc_type,
service_name=service_name,
......
......@@ -25,7 +25,7 @@ forky:
type: "beacon_node"
config:
address: "{{ $clClient.Beacon_HTTP_URL }}"
polling_interval: "{{ .SecondsPerSlot }}s"
polling_interval: "{{ $.SecondsPerSlot }}s"
{{- end }}
ethereum:
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment