From 20ab3fbd48bf9aeedc70b0883a9a8ad709012ef3 Mon Sep 17 00:00:00 2001 From: Kaloyan Tanev Date: Thu, 21 Nov 2024 15:44:00 +0200 Subject: [PATCH 01/39] Add profiles and container names; compose file consistency --- docker-compose.yml | 38 ++++++++++++++++++++++++++------------ 1 file changed, 26 insertions(+), 12 deletions(-) diff --git a/docker-compose.yml b/docker-compose.yml index 83460498..8228d9fc 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -9,6 +9,8 @@ services: # | | | | __/ |_| | | | __/ | | | | | | | | | | | (_| | # |_| |_|\___|\__|_| |_|\___|_| |_| |_| |_|_|_| |_|\__,_| nethermind: + container_name: nethermind-${CLUSTER_NAME:-cdvn} + profiles: [""] image: nethermind/nethermind:${NETHERMIND_VERSION:-1.28.0} restart: unless-stopped ports: @@ -44,6 +46,8 @@ services: # |___/ lighthouse: + container_name: lighthouse-${CLUSTER_NAME:-cdvn} + profiles: [""] image: sigp/lighthouse:${LIGHTHOUSE_VERSION:-v5.3.0} ports: - ${LIGHTHOUSE_PORT_P2P:-9000}:9000/tcp # P2P TCP @@ -77,20 +81,22 @@ services: # \___|_| |_|\__,_|_| \___/|_| |_| charon: + container_name: charon-${CLUSTER_NAME:-cdvn} + profiles: ["cluster", ""] image: obolnetwork/charon:${CHARON_VERSION:-v1.1.1} environment: - - CHARON_BEACON_NODE_ENDPOINTS=${CHARON_BEACON_NODE_ENDPOINTS:-http://lighthouse:5052} - - CHARON_LOG_LEVEL=${CHARON_LOG_LEVEL:-info} - - CHARON_LOG_FORMAT=${CHARON_LOG_FORMAT:-console} - - CHARON_P2P_RELAYS=${CHARON_P2P_RELAYS:-https://0.relay.obol.tech,https://1.relay.obol.tech/} - - CHARON_P2P_EXTERNAL_HOSTNAME=${CHARON_P2P_EXTERNAL_HOSTNAME:-} # Empty default required to avoid warnings. - - CHARON_P2P_TCP_ADDRESS=0.0.0.0:${CHARON_PORT_P2P_TCP:-3610} - - CHARON_VALIDATOR_API_ADDRESS=0.0.0.0:3600 - - CHARON_MONITORING_ADDRESS=0.0.0.0:3620 - - CHARON_BUILDER_API=${BUILDER_API_ENABLED:-false} - - CHARON_FEATURE_SET_ENABLE=${CHARON_FEATURE_SET_ENABLE:-} - - CHARON_LOKI_ADDRESSES=${CHARON_LOKI_ADDRESSES:-http://loki:3100/loki/api/v1/push} - - CHARON_LOKI_SERVICE=charon + CHARON_BEACON_NODE_ENDPOINTS: ${CHARON_BEACON_NODE_ENDPOINTS:-http://lighthouse:5052} + CHARON_LOG_LEVEL: ${CHARON_LOG_LEVEL:-info} + CHARON_LOG_FORMAT: ${CHARON_LOG_FORMAT:-console} + CHARON_P2P_RELAYS: ${CHARON_P2P_RELAYS:-https://0.relay.obol.tech,https://1.relay.obol.tech/} + CHARON_P2P_EXTERNAL_HOSTNAME: ${CHARON_P2P_EXTERNAL_HOSTNAME:-} # Empty default required to avoid warnings. + CHARON_P2P_TCP_ADDRESS: 0.0.0.0:${CHARON_PORT_P2P_TCP:-3610} + CHARON_VALIDATOR_API_ADDRESS: 0.0.0.0:3600 + CHARON_MONITORING_ADDRESS: 0.0.0.0:3620 + CHARON_BUILDER_API: ${BUILDER_API_ENABLED:-false} + CHARON_FEATURE_SET_ENABLE: ${CHARON_FEATURE_SET_ENABLE:-} + CHARON_LOKI_ADDRESSES: ${CHARON_LOKI_ADDRESSES:-http://loki:3100/loki/api/v1/push} + CHARON_LOKI_SERVICE: charon ports: - ${CHARON_PORT_P2P_TCP:-3610}:${CHARON_PORT_P2P_TCP:-3610}/tcp # P2P TCP libp2p networks: [dvnode] @@ -107,6 +113,8 @@ services: # |_|\___/ \__,_|\___||___/\__\__,_|_| lodestar: + container_name: lodestar-${CLUSTER_NAME:-cdvn} + profiles: ["cluster", ""] image: chainsafe/lodestar:${LODESTAR_VERSION:-v1.20.2} depends_on: [charon] entrypoint: /opt/lodestar/run.sh @@ -128,6 +136,8 @@ services: # | | | | | | __/\ V /_____| |_) | (_) | (_) \__ \ |_ # |_| |_| |_|\___| \_/ |_.__/ \___/ \___/|___/\__| mev-boost: + container_name: prometheus-${CLUSTER_NAME:-cdvn} + profiles: [""] image: ${MEVBOOST_IMAGE:-flashbots/mev-boost}:${MEVBOOST_VERSION:-1.7.0} command: | -${NETWORK} @@ -145,6 +155,8 @@ services: # |_| |_| |_|\___/|_| |_|_|\__\___/|_| |_|_| |_|\__, | # |___/ prometheus: + container_name: prometheus-${CLUSTER_NAME:-cdvn} + profiles: ["monitoring", ""] image: prom/prometheus:${PROMETHEUS_VERSION:-v2.50.1} user: ":" networks: [dvnode] @@ -158,6 +170,8 @@ services: restart: unless-stopped grafana: + container_name: grafana-${CLUSTER_NAME:-cdvn} + profiles: ["monitoring", ""] image: grafana/grafana:${GRAFANA_VERSION:-10.4.2} user: ":" ports: From 1ed42356f4dc8fc8b2a94e98422ab94a08dda04e Mon Sep 17 00:00:00 2001 From: Kaloyan Tanev Date: Thu, 21 Nov 2024 16:12:33 +0200 Subject: [PATCH 02/39] Enable by default MEVBOOST_RELAYS in holesky --- .env.sample.holesky | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.env.sample.holesky b/.env.sample.holesky index 4a2ce1a7..fad6b4a3 100644 --- a/.env.sample.holesky +++ b/.env.sample.holesky @@ -88,7 +88,7 @@ LIGHTHOUSE_CHECKPOINT_SYNC_URL=https://checkpoint-sync.holesky.ethpandaops.io/ # Comma separated list of MEV-Boost relays. # You can choose public relays from https://enchanted-direction-844.notion.site/6d369eb33f664487800b0dedfe32171e?v=d255247c822c409f99c498aeb6a4e51d. -#MEVBOOST_RELAYS=https://0xab78bf8c781c58078c3beb5710c57940874dd96aef2835e7742c866b4c7c0406754376c2c8285a36c630346aa5c5f833@holesky.aestus.live,https://0xb1559beef7b5ba3127485bbbb090362d9f497ba64e177ee2c8e7db74746306efad687f2cf8574e38d70067d40ef136dc@relay-stag.ultrasound.money,https://0xaa58208899c6105603b74396734a6263cc7d947f444f396a90f7b7d3e65d102aec7e5e5291b27e08d02c50a050825c2f@holesky.titanrelay.xyz +MEVBOOST_RELAYS=https://0xab78bf8c781c58078c3beb5710c57940874dd96aef2835e7742c866b4c7c0406754376c2c8285a36c630346aa5c5f833@holesky.aestus.live,https://0xb1559beef7b5ba3127485bbbb090362d9f497ba64e177ee2c8e7db74746306efad687f2cf8574e38d70067d40ef136dc@relay-stag.ultrasound.money,https://0xaa58208899c6105603b74396734a6263cc7d947f444f396a90f7b7d3e65d102aec7e5e5291b27e08d02c50a050825c2f@holesky.titanrelay.xyz ######### Monitoring Config ######### From 9b901f6a476aec96f766f36765b0abe481455805 Mon Sep 17 00:00:00 2001 From: Kaloyan Tanev Date: Thu, 21 Nov 2024 16:59:49 +0200 Subject: [PATCH 03/39] Add customisable validator API port and monitoring port --- .env.sample.holesky | 9 ++++++--- .env.sample.mainnet | 9 ++++++--- docker-compose.yml | 8 ++++---- prometheus/prometheus.yml.example | 2 +- 4 files changed, 17 insertions(+), 11 deletions(-) diff --git a/.env.sample.holesky b/.env.sample.holesky index fad6b4a3..3450e960 100644 --- a/.env.sample.holesky +++ b/.env.sample.holesky @@ -72,12 +72,15 @@ LIGHTHOUSE_CHECKPOINT_SYNC_URL=https://checkpoint-sync.holesky.ethpandaops.io/ # Loki log aggregation server addresses. Disable loki log aggregation by setting an empty address. #CHARON_LOKI_ADDRESSES= -# Docker network of running charon node. See `docker network ls`. -#CHARON_DOCKER_NETWORK= - # Charon host exposed ports #CHARON_PORT_P2P_TCP= +# Charon validator API port +#CHARON_PORT_VALIDATOR_API= + +# Charon monitoring port +#CHARON_PORT_MONITORING= + ######### MEV-Boost Config ######### # MEV-Boost docker container image version, e.g. `latest` or `1.7.0`. diff --git a/.env.sample.mainnet b/.env.sample.mainnet index 74f86d2a..efcb8a30 100644 --- a/.env.sample.mainnet +++ b/.env.sample.mainnet @@ -72,12 +72,15 @@ LIGHTHOUSE_CHECKPOINT_SYNC_URL=https://mainnet.checkpoint.sigp.io/ # Loki log aggregation server addresses. Disable loki log aggregation by setting an empty address. #CHARON_LOKI_ADDRESSES= -# Docker network of running charon node. See `docker network ls`. -#CHARON_DOCKER_NETWORK= - # Charon host exposed ports #CHARON_PORT_P2P_TCP= +# Charon validator API port +#CHARON_PORT_VALIDATOR_API= + +# Charon monitoring port +#CHARON_PORT_MONITORING= + ######### MEV-Boost Config ######### # MEV-Boost docker container image version, e.g. `latest` or `1.7.0`. diff --git a/docker-compose.yml b/docker-compose.yml index 8228d9fc..8d05e165 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -91,8 +91,8 @@ services: CHARON_P2P_RELAYS: ${CHARON_P2P_RELAYS:-https://0.relay.obol.tech,https://1.relay.obol.tech/} CHARON_P2P_EXTERNAL_HOSTNAME: ${CHARON_P2P_EXTERNAL_HOSTNAME:-} # Empty default required to avoid warnings. CHARON_P2P_TCP_ADDRESS: 0.0.0.0:${CHARON_PORT_P2P_TCP:-3610} - CHARON_VALIDATOR_API_ADDRESS: 0.0.0.0:3600 - CHARON_MONITORING_ADDRESS: 0.0.0.0:3620 + CHARON_VALIDATOR_API_ADDRESS: 0.0.0.0:${CHARON_PORT_VALIDATOR_API:-3600} + CHARON_MONITORING_ADDRESS: 0.0.0.0:${CHARON_PORT_MONITORING:-3620} CHARON_BUILDER_API: ${BUILDER_API_ENABLED:-false} CHARON_FEATURE_SET_ENABLE: ${CHARON_FEATURE_SET_ENABLE:-} CHARON_LOKI_ADDRESSES: ${CHARON_LOKI_ADDRESSES:-http://loki:3100/loki/api/v1/push} @@ -104,7 +104,7 @@ services: - .charon:/opt/charon/.charon restart: unless-stopped healthcheck: - test: wget -qO- http://localhost:3620/readyz + test: wget -qO- http://localhost:${CHARON_PORT_MONITORING:-3620}/readyz # _ _ _ # | | ___ __| | ___ ___| |_ __ _ _ __ @@ -120,7 +120,7 @@ services: entrypoint: /opt/lodestar/run.sh networks: [dvnode] environment: - BEACON_NODE_ADDRESS: http://charon:3600 + BEACON_NODE_ADDRESS: http://charon:${CHARON_PORT_VALIDATOR_API:-3600} NETWORK: ${NETWORK} BUILDER_API_ENABLED: ${BUILDER_API_ENABLED:-false} BUILDER_SELECTION: ${BUILDER_SELECTION:-builderalways} diff --git a/prometheus/prometheus.yml.example b/prometheus/prometheus.yml.example index 5dee97f5..108878b0 100644 --- a/prometheus/prometheus.yml.example +++ b/prometheus/prometheus.yml.example @@ -22,7 +22,7 @@ scrape_configs: - targets: ["lighthouse:5054"] - job_name: "charon" static_configs: - - targets: ["charon:3620"] + - targets: ["charon:${CHARON_PORT_MONITORING:-3620}"] - job_name: "lodestar" static_configs: - targets: ["lodestar:5064"] From 1e089b185c169d1d9470d8f8016167bf6b50d65f Mon Sep 17 00:00:00 2001 From: Kaloyan Tanev Date: Fri, 22 Nov 2024 15:29:59 +0200 Subject: [PATCH 04/39] Remove custom container names --- docker-compose.yml | 7 ------- 1 file changed, 7 deletions(-) diff --git a/docker-compose.yml b/docker-compose.yml index 8d05e165..ec85d5f7 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -9,7 +9,6 @@ services: # | | | | __/ |_| | | | __/ | | | | | | | | | | | (_| | # |_| |_|\___|\__|_| |_|\___|_| |_| |_| |_|_|_| |_|\__,_| nethermind: - container_name: nethermind-${CLUSTER_NAME:-cdvn} profiles: [""] image: nethermind/nethermind:${NETHERMIND_VERSION:-1.28.0} restart: unless-stopped @@ -46,7 +45,6 @@ services: # |___/ lighthouse: - container_name: lighthouse-${CLUSTER_NAME:-cdvn} profiles: [""] image: sigp/lighthouse:${LIGHTHOUSE_VERSION:-v5.3.0} ports: @@ -81,7 +79,6 @@ services: # \___|_| |_|\__,_|_| \___/|_| |_| charon: - container_name: charon-${CLUSTER_NAME:-cdvn} profiles: ["cluster", ""] image: obolnetwork/charon:${CHARON_VERSION:-v1.1.1} environment: @@ -113,7 +110,6 @@ services: # |_|\___/ \__,_|\___||___/\__\__,_|_| lodestar: - container_name: lodestar-${CLUSTER_NAME:-cdvn} profiles: ["cluster", ""] image: chainsafe/lodestar:${LODESTAR_VERSION:-v1.20.2} depends_on: [charon] @@ -136,7 +132,6 @@ services: # | | | | | | __/\ V /_____| |_) | (_) | (_) \__ \ |_ # |_| |_| |_|\___| \_/ |_.__/ \___/ \___/|___/\__| mev-boost: - container_name: prometheus-${CLUSTER_NAME:-cdvn} profiles: [""] image: ${MEVBOOST_IMAGE:-flashbots/mev-boost}:${MEVBOOST_VERSION:-1.7.0} command: | @@ -155,7 +150,6 @@ services: # |_| |_| |_|\___/|_| |_|_|\__\___/|_| |_|_| |_|\__, | # |___/ prometheus: - container_name: prometheus-${CLUSTER_NAME:-cdvn} profiles: ["monitoring", ""] image: prom/prometheus:${PROMETHEUS_VERSION:-v2.50.1} user: ":" @@ -170,7 +164,6 @@ services: restart: unless-stopped grafana: - container_name: grafana-${CLUSTER_NAME:-cdvn} profiles: ["monitoring", ""] image: grafana/grafana:${GRAFANA_VERSION:-10.4.2} user: ":" From 508f64b143b7c8c47402d9bfbce2c1fde25b584a Mon Sep 17 00:00:00 2001 From: Kaloyan Tanev Date: Fri, 22 Nov 2024 19:09:27 +0200 Subject: [PATCH 05/39] Add base profile to docker compose --- docker-compose.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/docker-compose.yml b/docker-compose.yml index ec85d5f7..7d9b4922 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -9,7 +9,7 @@ services: # | | | | __/ |_| | | | __/ | | | | | | | | | | | (_| | # |_| |_|\___|\__|_| |_|\___|_| |_| |_| |_|_|_| |_|\__,_| nethermind: - profiles: [""] + profiles: ["base", ""] image: nethermind/nethermind:${NETHERMIND_VERSION:-1.28.0} restart: unless-stopped ports: @@ -45,7 +45,7 @@ services: # |___/ lighthouse: - profiles: [""] + profiles: ["base", ""] image: sigp/lighthouse:${LIGHTHOUSE_VERSION:-v5.3.0} ports: - ${LIGHTHOUSE_PORT_P2P:-9000}:9000/tcp # P2P TCP @@ -132,7 +132,7 @@ services: # | | | | | | __/\ V /_____| |_) | (_) | (_) \__ \ |_ # |_| |_| |_|\___| \_/ |_.__/ \___/ \___/|___/\__| mev-boost: - profiles: [""] + profiles: ["base", ""] image: ${MEVBOOST_IMAGE:-flashbots/mev-boost}:${MEVBOOST_VERSION:-1.7.0} command: | -${NETWORK} @@ -150,7 +150,7 @@ services: # |_| |_| |_|\___/|_| |_|_|\__\___/|_| |_|_| |_|\__, | # |___/ prometheus: - profiles: ["monitoring", ""] + profiles: ["cluster", ""] image: prom/prometheus:${PROMETHEUS_VERSION:-v2.50.1} user: ":" networks: [dvnode] @@ -164,7 +164,7 @@ services: restart: unless-stopped grafana: - profiles: ["monitoring", ""] + profiles: ["base", ""] image: grafana/grafana:${GRAFANA_VERSION:-10.4.2} user: ":" ports: From 95cd398d82638bc4d3db26994c17169021d564a7 Mon Sep 17 00:00:00 2001 From: Kaloyan Tanev Date: Fri, 22 Nov 2024 19:09:36 +0200 Subject: [PATCH 06/39] gitignore clusters dir --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index ca609158..13c11c94 100644 --- a/.gitignore +++ b/.gitignore @@ -12,3 +12,4 @@ data/ .idea .charon prometheus/prometheus.yml +clusters/ \ No newline at end of file From 43436bcef908786f269f3fffb8371b4d57679d10 Mon Sep 17 00:00:00 2001 From: Kaloyan Tanev Date: Fri, 22 Nov 2024 19:09:58 +0200 Subject: [PATCH 07/39] Add setup.sh script for setting up multi cluster directory --- multi_cluster/setup.sh | 114 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 114 insertions(+) create mode 100755 multi_cluster/setup.sh diff --git a/multi_cluster/setup.sh b/multi_cluster/setup.sh new file mode 100755 index 00000000..a4996af4 --- /dev/null +++ b/multi_cluster/setup.sh @@ -0,0 +1,114 @@ +#!/bin/bash + +current_cluster_name=default + +usage() { + echo "Usage: $0 [OPTIONS]" + echo "" + echo "Create a multi-cluster setup from a traditional single cluster setup." + echo "" + echo "Options:" + echo " -h Display this help message." + echo " -c string Name of the current cluster. (default: \"default\")" +} + +while getopts "hc:" opt; do + case $opt in + h) + usage + exit 0 + ;; + c) + current_cluster_name=${OPTARG} + ;; + \?) + usage + exit 1 + ;; + esac +done + +cluster_dir=./clusters/${current_cluster_name} + +# Check if clusters dir already exists. +if test -d ./clusters; then + echo "./clsuters directory already exists. Cannot setup already set multi cluster CDVN." + exit 1 +fi + +# Create cluster's dir. +mkdir -p ${cluster_dir} + +cleanupClusterDir() { + if [ "$1" != "0" ]; then + rm -rf ./clusters + fi +} +trap 'cleanupClusterDir $?' EXIT + +# Copy .charon folder to cluster's dir. +if test -d ./.charon; then + cp -r .charon ${cluster_dir}/ +fi + +# Copy .env file to cluster's dir. +if test ./.env; then + cp .env ${cluster_dir}/ +fi + +# Copy docker-compose.yml to cluster's dir. +if test ./docker-compose.yml; then + cp ./docker-compose.yml ${cluster_dir}/ +fi + +# Write default charon ports in .env file if they are not set. +if grep -xq "CHARON_PORT_VALIDATOR_API=.*" ./.env; then + echo "CHARON_PORT_VALIDATOR_API already set, using the set port instead of the default 3600" +else + sed 's|#CHARON_PORT_VALIDATOR_API=|CHARON_PORT_VALIDATOR_API=3600|' ${cluster_dir}/.env > ${cluster_dir}/.env~ + mv ${cluster_dir}/.env~ ${cluster_dir}/.env +fi + +if grep -xq "CHARON_PORT_MONITORING=.*" ./.env; then + echo "CHARON_PORT_MONITORING already set, using the set port instead of the default 3620" +else + sed 's|#CHARON_PORT_MONITORING=|CHARON_PORT_MONITORING=3620|' ${cluster_dir}/.env > ${cluster_dir}/.env~ + mv ${cluster_dir}/.env~ ${cluster_dir}/.env +fi + +if grep -xq "CHARON_PORT_P2P_TCP=.*" ./.env; then + echo "CHARON_PORT_P2P_TCP already set, using the set port instead of the default 3610" +else + sed 's|#CHARON_PORT_P2P_TCP=|CHARON_PORT_P2P_TCP=3610|' ${cluster_dir}/.env > ${cluster_dir}/.env~ + mv ${cluster_dir}/.env~ ${cluster_dir}/.env +fi + +# Create data dir +mkdir ${cluster_dir}/data + +# Copy lodestar files and data +cp -r ./lodestar ${cluster_dir}/ +if test -d ./data/lodestar; then + cp -r ./data/lodestar ${cluster_dir}/data/ +fi + +# Copy prometheus files and data +cp -r ./prometheus ${cluster_dir}/ +if test -d ./data/prometheus; then + cp -r ./data/prometheus ${cluster_dir}/data/ +fi + +# Add the EL + CL + MEV-boost network +sed "s| dvnode:| dvnode:\n shared-node:\n external:\n name: charon-distributed-validator-node_dvnode|" ${cluster_dir}/docker-compose.yml > ${cluster_dir}/docker-compose.yml~ +mv ${cluster_dir}/docker-compose.yml~ ${cluster_dir}/docker-compose.yml + +# Include the other services in the EL + CL + MEV-boost network +sed "s| networks: \[dvnode\]| networks: [dvnode,shared-node]|" ${cluster_dir}/docker-compose.yml > ${cluster_dir}/docker-compose.yml~ +mv ${cluster_dir}/docker-compose.yml~ ${cluster_dir}/docker-compose.yml + +# Stop the cluster-related containers that are running in root directory (i.e.: charon, VC). +docker compose --profile cluster down +# Start the base containers in root directory (i.e.: EL, CL). +docker compose --profile base up -d +# Start the cluster-related containers in cluster-specific directory (i.e.: charon, VC). +docker compose --profile cluster -f ${cluster_dir}/docker-compose.yml up -d From 9dc22ea7429252d557e537206b7d3751fd8e47b1 Mon Sep 17 00:00:00 2001 From: Kaloyan Tanev Date: Fri, 22 Nov 2024 19:10:28 +0200 Subject: [PATCH 08/39] Add and delete cluster scripts --- multi_cluster/add_cluster.sh | 153 ++++++++++++++++++++++++++++++++ multi_cluster/delete_cluster.sh | 53 +++++++++++ 2 files changed, 206 insertions(+) create mode 100755 multi_cluster/add_cluster.sh create mode 100755 multi_cluster/delete_cluster.sh diff --git a/multi_cluster/add_cluster.sh b/multi_cluster/add_cluster.sh new file mode 100755 index 00000000..383dda93 --- /dev/null +++ b/multi_cluster/add_cluster.sh @@ -0,0 +1,153 @@ +#!/bin/bash + +unset -v cluster_name + +usage() { + echo "Usage: $0 [OPTIONS]" + echo "" + echo "Add a validator cluster (Charon + VC + Prometheus), to the ./clusters directory." + echo "" + echo "Options:" + echo " -h Display this help message." + echo " -c string [REQUIRED] Name of the cluster to be created." +} + +while getopts "hc:" opt; do + case $opt in + h) + usage + exit 0 + ;; + c) + cluster_name=${OPTARG} + ;; + \?) + usage + exit 1 + ;; + esac +done + +if [ -z "$cluster_name" ]; then + echo 'Missing flag -c, cluster name is mandatory.' >&2 + exit 1 +fi + +# Check if clusters dir already exists. +if test ! -d ./clusters; then + echo "./clsuters directory does not exist. Run setup.sh first." + exit 1 +fi + +# Check if clusters dir already exists. +if test -d ./clusters/$cluster_name; then + echo "./clsuters/$cluster_name directory already exists." + exit 1 +fi + +find_port() { + port=$1 # Port number to start search from + cluster_var=$2 # Env variable found in the cluster .env in ./clusters that is to be excluded + exclude=$3 # Comma separated list of strings with ports already allocated from this script + + is_occupied=1 + while [[ -n "$is_occupied" ]]; do + # Check if TCP port is free, if occupied increment with 1 and continue the loop + if is_occupied=$(netstat -taln | grep $port); then + port=$(($port+1)) + continue + fi + # Check if TCP port is used by another cluster + for cluster in ./clusters/*; do + p2p_cluster_port=$(. ./$cluster/.env; printf '%s' "${!cluster_var}") + if [ $port -eq $p2p_cluster_port ]; then + is_occupied=1 + break + fi + done + # If occupied by cluster, increment with 1 and continue the loop + if [ ! -z "$is_occupied" ]; then + port=$(($port+1)) + continue + fi + + for i in ${exclude//,/ } + do + if [ $port -eq $i ]; then + is_occupied=1 + port=$(($port+1)) + break + fi + done + done + + echo $port +} + +# Try to find free and unallocated to another cluster p2p port +p2p_port="$(find_port "3610" "CHARON_PORT_P2P_TCP" "")" +validator_port="$(find_port "3600" "CHARON_PORT_VALIDATOR_API" "$p2p_port")" +monitoring_port="$(find_port "3620" "CHARON_PORT_MONITORING" "$p2p_port,$validator_port")" + +mkdir -p ./clusters/$cluster_name +cluster_dir=./clusters/$cluster_name + +# Copy .env file to cluster's dir. +if test ./.env; then + cp .env ${cluster_dir}/ +fi + +# Copy docker-compose.yml to cluster's dir. +if test ./docker-compose.yml; then + cp ./docker-compose.yml ${cluster_dir}/ +fi + +# Write default charon ports in .env file if they are not set. +if grep -xq "CHARON_PORT_VALIDATOR_API=.*" ./.env; then + echo "CHARON_PORT_VALIDATOR_API already set, overwriting it with port $validator_port" + sed "s|CHARON_PORT_VALIDATOR_API=|CHARON_PORT_VALIDATOR_API=$validator_port|" ${cluster_dir}/.env > ${cluster_dir}/.env~ +else + sed "s|#CHARON_PORT_VALIDATOR_API=|CHARON_PORT_VALIDATOR_API=$validator_port|" ${cluster_dir}/.env > ${cluster_dir}/.env~ +fi + mv ${cluster_dir}/.env~ ${cluster_dir}/.env + +if grep -xq "CHARON_PORT_MONITORING=.*" ./.env; then + echo "CHARON_PORT_MONITORING already set, overwriting it with port $monitoring_port" + sed "s|CHARON_PORT_MONITORING=|CHARON_PORT_MONITORING=$monitoring_port|" ${cluster_dir}/.env > ${cluster_dir}/.env~ +else + sed "s|#CHARON_PORT_MONITORING=|CHARON_PORT_MONITORING=$monitoring_port|" ${cluster_dir}/.env > ${cluster_dir}/.env~ +fi +mv ${cluster_dir}/.env~ ${cluster_dir}/.env + +if grep -xq "CHARON_PORT_P2P_TCP=.*" ./.env; then + echo "CHARON_PORT_P2P_TCP already set, overwriting it with port $p2p_port" + sed "s|CHARON_PORT_P2P_TCP=|CHARON_PORT_P2P_TCP=$p2p_port|" ${cluster_dir}/.env > ${cluster_dir}/.env~ +else + sed "s|#CHARON_PORT_P2P_TCP=|CHARON_PORT_P2P_TCP=$p2p_port|" ${cluster_dir}/.env > ${cluster_dir}/.env~ +fi +mv ${cluster_dir}/.env~ ${cluster_dir}/.env + +# Create data dir +mkdir ${cluster_dir}/data + +# Copy prometheus files and data +cp -r ./prometheus ${cluster_dir}/ +if test -d ./data/prometheus; then + cp -r ./data/prometheus ${cluster_dir}/data/ +fi + +# Copy lodestar files +cp -r ./lodestar ${cluster_dir}/ + +# Add the EL + CL + MEV-boost network +sed "s| dvnode:| dvnode:\n shared-node:\n external:\n name: charon-distributed-validator-node_dvnode|" ${cluster_dir}/docker-compose.yml > ${cluster_dir}/docker-compose.yml~ +mv ${cluster_dir}/docker-compose.yml~ ${cluster_dir}/docker-compose.yml + +# Include the other services in the EL + CL + MEV-boost network +sed "s| networks: \[dvnode\]| networks: [dvnode,shared-node]|" ${cluster_dir}/docker-compose.yml > ${cluster_dir}/docker-compose.yml~ +mv ${cluster_dir}/docker-compose.yml~ ${cluster_dir}/docker-compose.yml + +echo "Added new cluster $cluster_name with the following cluster-specific config:" +echo "CHARON_PORT_P2P_TCP: $p2p_port" +echo "CHARON_PORT_VALIDATOR_API: $validator_port" +echo "CHARON_PORT_MONITORING: $monitoring_port" diff --git a/multi_cluster/delete_cluster.sh b/multi_cluster/delete_cluster.sh new file mode 100755 index 00000000..c6ab6226 --- /dev/null +++ b/multi_cluster/delete_cluster.sh @@ -0,0 +1,53 @@ +#!/bin/bash + +unset -v cluster_name + +usage() { + echo "Usage: $0 [OPTIONS]" + echo "" + echo "Delete a validator cluster (Charon + VC + Prometheus), from the ./clusters directory." + echo "" + echo "Options:" + echo " -h Display this help message." + echo " -c string [REQUIRED] Name of the cluster to be deleted." +} + +while getopts "hc:" opt; do + case $opt in + h) + usage + exit 0 + ;; + c) + cluster_name=${OPTARG} + ;; + \?) + usage + exit 1 + ;; + esac +done + +if [ -z "$cluster_name" ]; then + echo 'Missing flag -c, cluster name is mandatory.' >&2 + exit 1 +fi + +# Check if clusters dir already exists. +if test ! -d ./clusters; then + echo "./clsuters directory does not exist. Run setup.sh first." + exit 1 +fi + +# Check if clusters dir already exists. +if test ! -d ./clusters/$cluster_name; then + echo "./clsuters/$cluster_name directory does not exist. Make sure cluster $cluster_name is created." + exit 1 +fi + +read -r -p "Are you sure you want to delete the cluster? This will delete your private keys, which will be unrecoverable if you do not have backup! [y/N] " response +if [[ "$response" =~ ^([yY][eE][sS]|[yY])$ ]] +then + rm -rf ./clusters/$cluster_name + echo "Cluster $cluster_name deleted." +fi From 7cfce2ac4f26b2a4cc2bc53e9721c5c8b0e1f308 Mon Sep 17 00:00:00 2001 From: Kaloyan Tanev Date: Fri, 22 Nov 2024 19:10:44 +0200 Subject: [PATCH 09/39] Start and stop cluster scripts --- multi_cluster/start_cluster.sh | 49 ++++++++++++++++++++++++++++++++++ multi_cluster/stop_cluster.sh | 49 ++++++++++++++++++++++++++++++++++ 2 files changed, 98 insertions(+) create mode 100755 multi_cluster/start_cluster.sh create mode 100755 multi_cluster/stop_cluster.sh diff --git a/multi_cluster/start_cluster.sh b/multi_cluster/start_cluster.sh new file mode 100755 index 00000000..e850eda4 --- /dev/null +++ b/multi_cluster/start_cluster.sh @@ -0,0 +1,49 @@ +#!/bin/bash + +unset -v cluster_name + +usage() { + echo "Usage: $0 [OPTIONS]" + echo "" + echo "Start a validator cluster (Charon + VC + Prometheus), found in ./clusters directory." + echo "" + echo "Options:" + echo " -h Display this help message." + echo " -c string [REQUIRED] Name of the cluster to be started." +} + +while getopts "hc:" opt; do + case $opt in + h) + usage + exit 0 + ;; + c) + cluster_name=${OPTARG} + ;; + \?) + usage + exit 1 + ;; + esac +done + +if [ -z "$cluster_name" ]; then + echo 'Missing flag -c, cluster name is mandatory.' >&2 + exit 1 +fi + +# Check if clusters dir already exists. +if test ! -d ./clusters; then + echo "./clsuters directory does not exist. Run setup.sh first." + exit 1 +fi + +if test ! -d ./clusters/$cluster_name; then + echo "./clsuters/$cluster_name directory does not exist. Run add-cluster.sh first." + exit 1 +fi + +cluster_dir=./clusters/${cluster_name} + +docker compose --profile cluster -f ${cluster_dir}/docker-compose.yml up -d diff --git a/multi_cluster/stop_cluster.sh b/multi_cluster/stop_cluster.sh new file mode 100755 index 00000000..c760e273 --- /dev/null +++ b/multi_cluster/stop_cluster.sh @@ -0,0 +1,49 @@ +#!/bin/bash + +unset -v cluster_name + +usage() { + echo "Usage: $0 [OPTIONS]" + echo "" + echo "Stop a validator cluster (Charon + VC + Prometheus), found in ./clusters directory." + echo "" + echo "Options:" + echo " -h Display this help message." + echo " -c string [REQUIRED] Name of the cluster to be stopped." +} + +while getopts "hc:" opt; do + case $opt in + h) + usage + exit 0 + ;; + c) + cluster_name=${OPTARG} + ;; + \?) + usage + exit 1 + ;; + esac +done + +if [ -z "$cluster_name" ]; then + echo 'Missing flag -c, cluster name is mandatory.' >&2 + exit 1 +fi + +# Check if clusters dir already exists. +if test ! -d ./clusters; then + echo "./clsuters directory does not exist. Run setup.sh first." + exit 1 +fi + +if test ! -d ./clusters/$cluster_name; then + echo "./clsuters/$cluster_name directory does not exist. Run add-cluster.sh first." + exit 1 +fi + +cluster_dir=./clusters/${cluster_name} + +docker compose --profile cluster -f ${cluster_dir}/docker-compose.yml down From 35af5ed31489186d9fd92b7e1f87f91a9c61bb76 Mon Sep 17 00:00:00 2001 From: Kaloyan Tanev Date: Fri, 22 Nov 2024 19:11:28 +0200 Subject: [PATCH 10/39] Start and stop base containers scripts --- multi_cluster/start_base.sh | 25 +++++++++++++++++++++++++ multi_cluster/stop_base.sh | 25 +++++++++++++++++++++++++ 2 files changed, 50 insertions(+) create mode 100755 multi_cluster/start_base.sh create mode 100755 multi_cluster/stop_base.sh diff --git a/multi_cluster/start_base.sh b/multi_cluster/start_base.sh new file mode 100755 index 00000000..f9eb7229 --- /dev/null +++ b/multi_cluster/start_base.sh @@ -0,0 +1,25 @@ +#!/bin/bash + +usage() { + echo "Usage: $0 [OPTIONS]" + echo "" + echo "Start the base docker containers (EL, CL, MEV boost, Grafana), without any validator." + echo "" + echo "Options:" + echo " -h Display this help message." +} + +while getopts "hc:" opt; do + case $opt in + h) + usage + exit 0 + ;; + \?) + usage + exit 1 + ;; + esac +done + +docker compose --profile base up -d diff --git a/multi_cluster/stop_base.sh b/multi_cluster/stop_base.sh new file mode 100755 index 00000000..4470cb4c --- /dev/null +++ b/multi_cluster/stop_base.sh @@ -0,0 +1,25 @@ +#!/bin/bash + +usage() { + echo "Usage: $0 [OPTIONS]" + echo "" + echo "Stop the base docker containers (EL, CL, MEV boost, Grafana)." + echo "" + echo "Options:" + echo " -h Display this help message." +} + +while getopts "hc:" opt; do + case $opt in + h) + usage + exit 0 + ;; + \?) + usage + exit 1 + ;; + esac +done + +docker compose --profile base stop From 1c50f8b084d25e2f79fd028622400d18fc16e21f Mon Sep 17 00:00:00 2001 From: Kaloyan Tanev Date: Sat, 23 Nov 2024 20:41:02 +0200 Subject: [PATCH 11/39] Sum up scripts into 3, instead of 7 --- multi_cluster/add_cluster.sh | 153 ------------------- multi_cluster/base.sh | 57 ++++++++ multi_cluster/cluster.sh | 252 ++++++++++++++++++++++++++++++++ multi_cluster/delete_cluster.sh | 53 ------- multi_cluster/start_base.sh | 25 ---- multi_cluster/start_cluster.sh | 49 ------- multi_cluster/stop_base.sh | 25 ---- multi_cluster/stop_cluster.sh | 49 ------- 8 files changed, 309 insertions(+), 354 deletions(-) delete mode 100755 multi_cluster/add_cluster.sh create mode 100755 multi_cluster/base.sh create mode 100755 multi_cluster/cluster.sh delete mode 100755 multi_cluster/delete_cluster.sh delete mode 100755 multi_cluster/start_base.sh delete mode 100755 multi_cluster/start_cluster.sh delete mode 100755 multi_cluster/stop_base.sh delete mode 100755 multi_cluster/stop_cluster.sh diff --git a/multi_cluster/add_cluster.sh b/multi_cluster/add_cluster.sh deleted file mode 100755 index 383dda93..00000000 --- a/multi_cluster/add_cluster.sh +++ /dev/null @@ -1,153 +0,0 @@ -#!/bin/bash - -unset -v cluster_name - -usage() { - echo "Usage: $0 [OPTIONS]" - echo "" - echo "Add a validator cluster (Charon + VC + Prometheus), to the ./clusters directory." - echo "" - echo "Options:" - echo " -h Display this help message." - echo " -c string [REQUIRED] Name of the cluster to be created." -} - -while getopts "hc:" opt; do - case $opt in - h) - usage - exit 0 - ;; - c) - cluster_name=${OPTARG} - ;; - \?) - usage - exit 1 - ;; - esac -done - -if [ -z "$cluster_name" ]; then - echo 'Missing flag -c, cluster name is mandatory.' >&2 - exit 1 -fi - -# Check if clusters dir already exists. -if test ! -d ./clusters; then - echo "./clsuters directory does not exist. Run setup.sh first." - exit 1 -fi - -# Check if clusters dir already exists. -if test -d ./clusters/$cluster_name; then - echo "./clsuters/$cluster_name directory already exists." - exit 1 -fi - -find_port() { - port=$1 # Port number to start search from - cluster_var=$2 # Env variable found in the cluster .env in ./clusters that is to be excluded - exclude=$3 # Comma separated list of strings with ports already allocated from this script - - is_occupied=1 - while [[ -n "$is_occupied" ]]; do - # Check if TCP port is free, if occupied increment with 1 and continue the loop - if is_occupied=$(netstat -taln | grep $port); then - port=$(($port+1)) - continue - fi - # Check if TCP port is used by another cluster - for cluster in ./clusters/*; do - p2p_cluster_port=$(. ./$cluster/.env; printf '%s' "${!cluster_var}") - if [ $port -eq $p2p_cluster_port ]; then - is_occupied=1 - break - fi - done - # If occupied by cluster, increment with 1 and continue the loop - if [ ! -z "$is_occupied" ]; then - port=$(($port+1)) - continue - fi - - for i in ${exclude//,/ } - do - if [ $port -eq $i ]; then - is_occupied=1 - port=$(($port+1)) - break - fi - done - done - - echo $port -} - -# Try to find free and unallocated to another cluster p2p port -p2p_port="$(find_port "3610" "CHARON_PORT_P2P_TCP" "")" -validator_port="$(find_port "3600" "CHARON_PORT_VALIDATOR_API" "$p2p_port")" -monitoring_port="$(find_port "3620" "CHARON_PORT_MONITORING" "$p2p_port,$validator_port")" - -mkdir -p ./clusters/$cluster_name -cluster_dir=./clusters/$cluster_name - -# Copy .env file to cluster's dir. -if test ./.env; then - cp .env ${cluster_dir}/ -fi - -# Copy docker-compose.yml to cluster's dir. -if test ./docker-compose.yml; then - cp ./docker-compose.yml ${cluster_dir}/ -fi - -# Write default charon ports in .env file if they are not set. -if grep -xq "CHARON_PORT_VALIDATOR_API=.*" ./.env; then - echo "CHARON_PORT_VALIDATOR_API already set, overwriting it with port $validator_port" - sed "s|CHARON_PORT_VALIDATOR_API=|CHARON_PORT_VALIDATOR_API=$validator_port|" ${cluster_dir}/.env > ${cluster_dir}/.env~ -else - sed "s|#CHARON_PORT_VALIDATOR_API=|CHARON_PORT_VALIDATOR_API=$validator_port|" ${cluster_dir}/.env > ${cluster_dir}/.env~ -fi - mv ${cluster_dir}/.env~ ${cluster_dir}/.env - -if grep -xq "CHARON_PORT_MONITORING=.*" ./.env; then - echo "CHARON_PORT_MONITORING already set, overwriting it with port $monitoring_port" - sed "s|CHARON_PORT_MONITORING=|CHARON_PORT_MONITORING=$monitoring_port|" ${cluster_dir}/.env > ${cluster_dir}/.env~ -else - sed "s|#CHARON_PORT_MONITORING=|CHARON_PORT_MONITORING=$monitoring_port|" ${cluster_dir}/.env > ${cluster_dir}/.env~ -fi -mv ${cluster_dir}/.env~ ${cluster_dir}/.env - -if grep -xq "CHARON_PORT_P2P_TCP=.*" ./.env; then - echo "CHARON_PORT_P2P_TCP already set, overwriting it with port $p2p_port" - sed "s|CHARON_PORT_P2P_TCP=|CHARON_PORT_P2P_TCP=$p2p_port|" ${cluster_dir}/.env > ${cluster_dir}/.env~ -else - sed "s|#CHARON_PORT_P2P_TCP=|CHARON_PORT_P2P_TCP=$p2p_port|" ${cluster_dir}/.env > ${cluster_dir}/.env~ -fi -mv ${cluster_dir}/.env~ ${cluster_dir}/.env - -# Create data dir -mkdir ${cluster_dir}/data - -# Copy prometheus files and data -cp -r ./prometheus ${cluster_dir}/ -if test -d ./data/prometheus; then - cp -r ./data/prometheus ${cluster_dir}/data/ -fi - -# Copy lodestar files -cp -r ./lodestar ${cluster_dir}/ - -# Add the EL + CL + MEV-boost network -sed "s| dvnode:| dvnode:\n shared-node:\n external:\n name: charon-distributed-validator-node_dvnode|" ${cluster_dir}/docker-compose.yml > ${cluster_dir}/docker-compose.yml~ -mv ${cluster_dir}/docker-compose.yml~ ${cluster_dir}/docker-compose.yml - -# Include the other services in the EL + CL + MEV-boost network -sed "s| networks: \[dvnode\]| networks: [dvnode,shared-node]|" ${cluster_dir}/docker-compose.yml > ${cluster_dir}/docker-compose.yml~ -mv ${cluster_dir}/docker-compose.yml~ ${cluster_dir}/docker-compose.yml - -echo "Added new cluster $cluster_name with the following cluster-specific config:" -echo "CHARON_PORT_P2P_TCP: $p2p_port" -echo "CHARON_PORT_VALIDATOR_API: $validator_port" -echo "CHARON_PORT_MONITORING: $monitoring_port" diff --git a/multi_cluster/base.sh b/multi_cluster/base.sh new file mode 100755 index 00000000..d2ddebd1 --- /dev/null +++ b/multi_cluster/base.sh @@ -0,0 +1,57 @@ +#!/bin/bash + +usage() { + echo "Usage: $0 [OPTIONS] COMMAND" + echo "" + echo " Manage the base ethereum node docker containers (EL, CL, MEV boost, Grafana), without interfering with any validator." + echo "" + echo "Commands:" + echo " start Start an ethereum node, MEV-boost and Grafana." + echo " stop Stop an ethereum node, MEV-boost and Grafana." + echo "" + echo "Options:" + echo " -h Display this help message." +} + +start() { + docker compose --profile base up -d +} + +stop() { + docker compose --profile base stop +} + +while getopts ":h" opt; do + case $opt in + h) + usage + exit 0 + ;; + \?) + usage + exit 1 + ;; + : ) + usage + exit 1 + ;; + esac +done + +shift $((OPTIND -1)) + +subcommand=$1; shift +case "$subcommand" in + # Parse options to the install sub command + start) + start + ;; + stop) + stop + ;; + * ) + usage + exit 1 + ;; + +esac diff --git a/multi_cluster/cluster.sh b/multi_cluster/cluster.sh new file mode 100755 index 00000000..d4b77afa --- /dev/null +++ b/multi_cluster/cluster.sh @@ -0,0 +1,252 @@ +#!/bin/bash + +unset -v cluster_name + +usage() { + echo "Usage: $0 [OPTIONS] COMMAND" + echo "" + echo " Manage a validator cluster (Charon + VC + Prometheus), found in ./clusters directory." + echo "" + echo "Commands:" + echo " add string Add a validator cluster to the ./clusters directory." + echo " delete string Delete a validator cluster from the ./clusters directory." + echo " start string Start a validator cluster, found in the ./clusters directory." + echo " stop string Stop a validator cluster, found in the ./clusters directory." + echo "" + echo "Options:" + echo " -h Display this help message." +} + +# Check if cluster_name variable is set. +check_missing_cluster_name() { + if [ -z "$cluster_name" ]; then + echo 'Missing cluster name argument.' >&2 + exit 1 + fi +} + +# Check if ./clusters directory exists. +check_clusters_dir_does_not_exist() { + if test ! -d ./clusters; then + echo "./clsuters directory does not exist. Run setup.sh first." + exit 1 + fi +} + +# Check if cluster with the specified cluster_name already exists. +ckeck_cluster_already_exists() { + if test -d ./clusters/$cluster_name; then + echo "./clsuters/$cluster_name directory already exists." + exit 1 + fi +} + +# Check if cluster with the specified cluster_name does not exist. +ckeck_cluster_does_not_exist() { + if test ! -d ./clusters/$cluster_name; then + echo "./clsuters/$cluster_name directory does not exist." + exit 1 + fi +} + +# Add cluster to the ./clusters/{cluster_name} directory. +add() { + find_port() { + # Port number from which to start the search of free port. + port=$1 + # Env variable of a port, found in other clusters' .env, which value should be excluded (i.e.: CHARON_PORT_P2P_TCP). + # This is to prevent reusing same ports between clusters. + cluster_var=$2 + # Comma separated list of strings with ports to be explicitly excluded. + # This is to prevent reusing the same port from inside the script (i.e.: if find_port is called multiple times, to avoid resulting in the same port) + exclude=$3 + + is_occupied=1 + # run loop until is_occupied is empty + while [[ -n "$is_occupied" ]]; do + # Check if TCP port is free, if it is, is_occupied is set to empty, otherwise increment the port by 1 and continue the loop. + if is_occupied=$(netstat -taln | grep $port); then + port=$(($port+1)) + continue + fi + # Check if TCP port is used by another cluster from the ./clusters directory. + for cluster in ./clusters/*; do + # Check if it is used by evaluating the variable we look for, passed by `cluster_var`. + p2p_cluster_port=$(. ./$cluster/.env; printf '%s' "${!cluster_var}") + # If the free port is the same as the port in the cluster, mark as occupied and break the loop. + if [ $port -eq $p2p_cluster_port ]; then + is_occupied=1 + break + fi + done + # If the port was occupied by any cluster, increment the port by 1 and continue the loop. + if [ ! -z "$is_occupied" ]; then + port=$(($port+1)) + continue + fi + + # Check if the port is not from the ports to be excluded. + for i in ${exclude//,/ } + do + # If the port matches the potentially excluded port, mark as occupied, increment by 1 and break the loop. + if [ $port -eq $i ]; then + is_occupied=1 + port=$(($port+1)) + break + fi + done + done + + # Echo the free port. + echo $port + } + + # Try to find free and unallocated to another cluster ports. + p2p_port="$(find_port "3610" "CHARON_PORT_P2P_TCP" "")" + validator_port="$(find_port "3600" "CHARON_PORT_VALIDATOR_API" "$p2p_port")" + monitoring_port="$(find_port "3620" "CHARON_PORT_MONITORING" "$p2p_port,$validator_port")" + + # Create dir for the cluster. + mkdir -p ./clusters/$cluster_name + cluster_dir=./clusters/$cluster_name + + # Copy .env from root dir to cluster's dir (if it exists). + if test ./.env; then + cp .env ${cluster_dir}/ + fi + + # Copy docker-compose.yml from root dir to cluster's dir (if it exists). + if test ./docker-compose.yml; then + cp ./docker-compose.yml ${cluster_dir}/ + fi + + # Write the found free ports in the .env file. + if grep -xq "CHARON_PORT_VALIDATOR_API=.*" ./.env; then + echo "CHARON_PORT_VALIDATOR_API already set, overwriting it with port $validator_port" + sed "s|CHARON_PORT_VALIDATOR_API=|CHARON_PORT_VALIDATOR_API=$validator_port|" ${cluster_dir}/.env > ${cluster_dir}/.env.tmp + else + sed "s|#CHARON_PORT_VALIDATOR_API=|CHARON_PORT_VALIDATOR_API=$validator_port|" ${cluster_dir}/.env > ${cluster_dir}/.env.tmp + fi + mv ${cluster_dir}/.env.tmp ${cluster_dir}/.env + + if grep -xq "CHARON_PORT_MONITORING=.*" ./.env; then + echo "CHARON_PORT_MONITORING already set, overwriting it with port $monitoring_port" + sed "s|CHARON_PORT_MONITORING=|CHARON_PORT_MONITORING=$monitoring_port|" ${cluster_dir}/.env > ${cluster_dir}/.env.tmp + else + sed "s|#CHARON_PORT_MONITORING=|CHARON_PORT_MONITORING=$monitoring_port|" ${cluster_dir}/.env > ${cluster_dir}/.env.tmp + fi + mv ${cluster_dir}/.env.tmp ${cluster_dir}/.env + + if grep -xq "CHARON_PORT_P2P_TCP=.*" ./.env; then + echo "CHARON_PORT_P2P_TCP already set, overwriting it with port $p2p_port" + sed "s|CHARON_PORT_P2P_TCP=|CHARON_PORT_P2P_TCP=$p2p_port|" ${cluster_dir}/.env > ${cluster_dir}/.env.tmp + else + sed "s|#CHARON_PORT_P2P_TCP=|CHARON_PORT_P2P_TCP=$p2p_port|" ${cluster_dir}/.env > ${cluster_dir}/.env.tmp + fi + mv ${cluster_dir}/.env.tmp ${cluster_dir}/.env + + # Create data dir. + mkdir ${cluster_dir}/data + + # Copy prometheus files and data. + cp -r ./prometheus ${cluster_dir}/ + if test -d ./data/prometheus; then + cp -r ./data/prometheus ${cluster_dir}/data/ + fi + + # Copy lodestar files. + cp -r ./lodestar ${cluster_dir}/ + + # Add the base network on which EL + CL + MEV-boost + Grafana run. + sed "s| dvnode:| dvnode:\n shared-node:\n external:\n name: charon-distributed-validator-node_dvnode|" ${cluster_dir}/docker-compose.yml > ${cluster_dir}/docker-compose.yml.tmp + mv ${cluster_dir}/docker-compose.yml.tmp ${cluster_dir}/docker-compose.yml + + # Include the base network in the cluster-specific services' network config. + sed "s| networks: \[dvnode\]| networks: [dvnode,shared-node]|" ${cluster_dir}/docker-compose.yml > ${cluster_dir}/docker-compose.yml.tmp + mv ${cluster_dir}/docker-compose.yml.tmp ${cluster_dir}/docker-compose.yml + + echo "Added new cluster $cluster_name with the following cluster-specific config:" + echo "CHARON_PORT_P2P_TCP: $p2p_port" + echo "CHARON_PORT_VALIDATOR_API: $validator_port" + echo "CHARON_PORT_MONITORING: $monitoring_port" + echo "" + echo "You can start it by running $0 start $cluster_name" +} + +delete() { + read -r -p "Are you sure you want to delete the cluster? This will delete your private keys, which will be unrecoverable if you do not have backup! [y/N] " response + if [[ "$response" =~ ^([yY][eE][sS]|[yY])$ ]] + then + rm -rf ./clusters/$cluster_name + echo "Delete cluster $cluster_name." + fi +} + +start() { + docker compose --profile cluster -f ./clusters/${cluster_name}/docker-compose.yml up -d + echo "Started cluster $cluster_name" + echo "You can stop it by running $0 stop $cluster_name" +} + +stop() { + docker compose --profile cluster -f ./clusters/${cluster_name}/docker-compose.yml down + echo "Stopped cluster $cluster_name" + echo "You can start it again by running $0 start $cluster_name" +} + +# Match global flags +while getopts ":h" opt; do + case $opt in + h) + usage + exit 0 + ;; + \?) # unknown flag + usage + exit 1 + ;; + esac +done + +# Capture the subcommand passed. +shift "$((OPTIND -1))" +subcommand=$1; shift +# Execute subcommand. +case "$subcommand" in + add) + cluster_name=$1 + check_missing_cluster_name + check_clusters_dir_does_not_exist + ckeck_cluster_already_exists + add + exit 0 + ;; + delete) + cluster_name=$1 + check_missing_cluster_name + check_clusters_dir_does_not_exist + ckeck_cluster_does_not_exist + delete + exit 0 + ;; + start) + cluster_name=$1 + check_missing_cluster_name + check_clusters_dir_does_not_exist + ckeck_cluster_does_not_exist + start + exit 0 + ;; + stop) + cluster_name=$1 + check_missing_cluster_name + check_clusters_dir_does_not_exist + ckeck_cluster_does_not_exist + stop + exit 0 + ;; + * ) + usage + exit 1 + ;; +esac diff --git a/multi_cluster/delete_cluster.sh b/multi_cluster/delete_cluster.sh deleted file mode 100755 index c6ab6226..00000000 --- a/multi_cluster/delete_cluster.sh +++ /dev/null @@ -1,53 +0,0 @@ -#!/bin/bash - -unset -v cluster_name - -usage() { - echo "Usage: $0 [OPTIONS]" - echo "" - echo "Delete a validator cluster (Charon + VC + Prometheus), from the ./clusters directory." - echo "" - echo "Options:" - echo " -h Display this help message." - echo " -c string [REQUIRED] Name of the cluster to be deleted." -} - -while getopts "hc:" opt; do - case $opt in - h) - usage - exit 0 - ;; - c) - cluster_name=${OPTARG} - ;; - \?) - usage - exit 1 - ;; - esac -done - -if [ -z "$cluster_name" ]; then - echo 'Missing flag -c, cluster name is mandatory.' >&2 - exit 1 -fi - -# Check if clusters dir already exists. -if test ! -d ./clusters; then - echo "./clsuters directory does not exist. Run setup.sh first." - exit 1 -fi - -# Check if clusters dir already exists. -if test ! -d ./clusters/$cluster_name; then - echo "./clsuters/$cluster_name directory does not exist. Make sure cluster $cluster_name is created." - exit 1 -fi - -read -r -p "Are you sure you want to delete the cluster? This will delete your private keys, which will be unrecoverable if you do not have backup! [y/N] " response -if [[ "$response" =~ ^([yY][eE][sS]|[yY])$ ]] -then - rm -rf ./clusters/$cluster_name - echo "Cluster $cluster_name deleted." -fi diff --git a/multi_cluster/start_base.sh b/multi_cluster/start_base.sh deleted file mode 100755 index f9eb7229..00000000 --- a/multi_cluster/start_base.sh +++ /dev/null @@ -1,25 +0,0 @@ -#!/bin/bash - -usage() { - echo "Usage: $0 [OPTIONS]" - echo "" - echo "Start the base docker containers (EL, CL, MEV boost, Grafana), without any validator." - echo "" - echo "Options:" - echo " -h Display this help message." -} - -while getopts "hc:" opt; do - case $opt in - h) - usage - exit 0 - ;; - \?) - usage - exit 1 - ;; - esac -done - -docker compose --profile base up -d diff --git a/multi_cluster/start_cluster.sh b/multi_cluster/start_cluster.sh deleted file mode 100755 index e850eda4..00000000 --- a/multi_cluster/start_cluster.sh +++ /dev/null @@ -1,49 +0,0 @@ -#!/bin/bash - -unset -v cluster_name - -usage() { - echo "Usage: $0 [OPTIONS]" - echo "" - echo "Start a validator cluster (Charon + VC + Prometheus), found in ./clusters directory." - echo "" - echo "Options:" - echo " -h Display this help message." - echo " -c string [REQUIRED] Name of the cluster to be started." -} - -while getopts "hc:" opt; do - case $opt in - h) - usage - exit 0 - ;; - c) - cluster_name=${OPTARG} - ;; - \?) - usage - exit 1 - ;; - esac -done - -if [ -z "$cluster_name" ]; then - echo 'Missing flag -c, cluster name is mandatory.' >&2 - exit 1 -fi - -# Check if clusters dir already exists. -if test ! -d ./clusters; then - echo "./clsuters directory does not exist. Run setup.sh first." - exit 1 -fi - -if test ! -d ./clusters/$cluster_name; then - echo "./clsuters/$cluster_name directory does not exist. Run add-cluster.sh first." - exit 1 -fi - -cluster_dir=./clusters/${cluster_name} - -docker compose --profile cluster -f ${cluster_dir}/docker-compose.yml up -d diff --git a/multi_cluster/stop_base.sh b/multi_cluster/stop_base.sh deleted file mode 100755 index 4470cb4c..00000000 --- a/multi_cluster/stop_base.sh +++ /dev/null @@ -1,25 +0,0 @@ -#!/bin/bash - -usage() { - echo "Usage: $0 [OPTIONS]" - echo "" - echo "Stop the base docker containers (EL, CL, MEV boost, Grafana)." - echo "" - echo "Options:" - echo " -h Display this help message." -} - -while getopts "hc:" opt; do - case $opt in - h) - usage - exit 0 - ;; - \?) - usage - exit 1 - ;; - esac -done - -docker compose --profile base stop diff --git a/multi_cluster/stop_cluster.sh b/multi_cluster/stop_cluster.sh deleted file mode 100755 index c760e273..00000000 --- a/multi_cluster/stop_cluster.sh +++ /dev/null @@ -1,49 +0,0 @@ -#!/bin/bash - -unset -v cluster_name - -usage() { - echo "Usage: $0 [OPTIONS]" - echo "" - echo "Stop a validator cluster (Charon + VC + Prometheus), found in ./clusters directory." - echo "" - echo "Options:" - echo " -h Display this help message." - echo " -c string [REQUIRED] Name of the cluster to be stopped." -} - -while getopts "hc:" opt; do - case $opt in - h) - usage - exit 0 - ;; - c) - cluster_name=${OPTARG} - ;; - \?) - usage - exit 1 - ;; - esac -done - -if [ -z "$cluster_name" ]; then - echo 'Missing flag -c, cluster name is mandatory.' >&2 - exit 1 -fi - -# Check if clusters dir already exists. -if test ! -d ./clusters; then - echo "./clsuters directory does not exist. Run setup.sh first." - exit 1 -fi - -if test ! -d ./clusters/$cluster_name; then - echo "./clsuters/$cluster_name directory does not exist. Run add-cluster.sh first." - exit 1 -fi - -cluster_dir=./clusters/${cluster_name} - -docker compose --profile cluster -f ${cluster_dir}/docker-compose.yml down From 37c90c60a610e1c284d826259f3a11ef2eb10c64 Mon Sep 17 00:00:00 2001 From: Kaloyan Tanev Date: Sat, 23 Nov 2024 20:41:37 +0200 Subject: [PATCH 12/39] Improve setup.sh comments and give more feedback to user --- multi_cluster/setup.sh | 55 +++++++++++++++++++++++++++--------------- 1 file changed, 36 insertions(+), 19 deletions(-) diff --git a/multi_cluster/setup.sh b/multi_cluster/setup.sh index a4996af4..74d9b7cf 100755 --- a/multi_cluster/setup.sh +++ b/multi_cluster/setup.sh @@ -5,11 +5,11 @@ current_cluster_name=default usage() { echo "Usage: $0 [OPTIONS]" echo "" - echo "Create a multi-cluster setup from a traditional single cluster setup." + echo " Create a multi cluster setup from a traditional single cluster setup." echo "" echo "Options:" - echo " -h Display this help message." - echo " -c string Name of the current cluster. (default: \"default\")" + echo " -h Display this help message." + echo " -c string Name of the current cluster. (default: \"default\")" } while getopts "hc:" opt; do @@ -28,17 +28,22 @@ while getopts "hc:" opt; do esac done +if [ "$current_cluster_name" = "default" ]; then + echo "WARN: -c flag not specified. Using default cluster name 'default'." +fi + cluster_dir=./clusters/${current_cluster_name} -# Check if clusters dir already exists. +# Check if clusters directory already exists. if test -d ./clusters; then echo "./clsuters directory already exists. Cannot setup already set multi cluster CDVN." exit 1 fi -# Create cluster's dir. +# Create clusters directory. mkdir -p ${cluster_dir} +# Delete ./clusters dir if the script exits with non-zero code. cleanupClusterDir() { if [ "$1" != "0" ]; then rm -rf ./clusters @@ -46,17 +51,17 @@ cleanupClusterDir() { } trap 'cleanupClusterDir $?' EXIT -# Copy .charon folder to cluster's dir. +# Copy .charon folder to clusters directory (if it exists). if test -d ./.charon; then cp -r .charon ${cluster_dir}/ fi -# Copy .env file to cluster's dir. +# Copy .env file to clusters directory (if it exists). if test ./.env; then cp .env ${cluster_dir}/ fi -# Copy docker-compose.yml to cluster's dir. +# Copy docker-compose.yml to clusters directory (if it exists). if test ./docker-compose.yml; then cp ./docker-compose.yml ${cluster_dir}/ fi @@ -83,32 +88,44 @@ else mv ${cluster_dir}/.env~ ${cluster_dir}/.env fi -# Create data dir + # Create data dir. mkdir ${cluster_dir}/data -# Copy lodestar files and data +# Copy lodestar files and data. cp -r ./lodestar ${cluster_dir}/ if test -d ./data/lodestar; then cp -r ./data/lodestar ${cluster_dir}/data/ fi -# Copy prometheus files and data +# Copy prometheus files and data. cp -r ./prometheus ${cluster_dir}/ if test -d ./data/prometheus; then cp -r ./data/prometheus ${cluster_dir}/data/ fi -# Add the EL + CL + MEV-boost network +# Add the base network on which EL + CL + MEV-boost + Grafana run. sed "s| dvnode:| dvnode:\n shared-node:\n external:\n name: charon-distributed-validator-node_dvnode|" ${cluster_dir}/docker-compose.yml > ${cluster_dir}/docker-compose.yml~ mv ${cluster_dir}/docker-compose.yml~ ${cluster_dir}/docker-compose.yml -# Include the other services in the EL + CL + MEV-boost network +# Include the base network in the cluster-specific services' network config. sed "s| networks: \[dvnode\]| networks: [dvnode,shared-node]|" ${cluster_dir}/docker-compose.yml > ${cluster_dir}/docker-compose.yml~ mv ${cluster_dir}/docker-compose.yml~ ${cluster_dir}/docker-compose.yml -# Stop the cluster-related containers that are running in root directory (i.e.: charon, VC). -docker compose --profile cluster down -# Start the base containers in root directory (i.e.: EL, CL). -docker compose --profile base up -d -# Start the cluster-related containers in cluster-specific directory (i.e.: charon, VC). -docker compose --profile cluster -f ${cluster_dir}/docker-compose.yml up -d +# If containers were already started, restart the cluster with the new setup. +if [[ $(docker compose ps -aq) ]]; then + echo "Restarting the cluster-specific containers from the new multi cluster directory ${cluster_dir}" + # Stop the cluster-specific containers that are running in root directory - Charon, Lodestar, Prometheus. + docker compose --profile cluster down + # Start the base containers in the root directory. + docker compose --profile base up -d + # Start the cluster-specific containers in cluster-specific directory (i.e.: charon, VC). + docker compose --profile cluster -f ${cluster_dir}/docker-compose.yml up -d +fi + +echo "Multi cluster setup is complete." +echo "CDVN is divided in two:" +echo " 1. Ethereum node (EL + CL) and Grafana." +echo " 2. Multiple clusters, each consisting of Charon + Validator client + Prometheus." +echo "All existing cluster-specific files from the CDVN directory are copied to the first cluster in the multi cluster setup at ${cluster_dir}." +echo "Separate clusters can be managed using the cluster.sh script." +echo "Ethereum node (EL + CL) and Grafana can be managed using the base.sh script." From 6db85b519374569de3c2d8789775533039a836ad Mon Sep 17 00:00:00 2001 From: Kaloyan Tanev Date: Sat, 23 Nov 2024 21:30:38 +0200 Subject: [PATCH 13/39] Add multi cluster support doc to readme --- README.md | 80 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 80 insertions(+) diff --git a/README.md b/README.md index 75ea3bae..249bcc5a 100644 --- a/README.md +++ b/README.md @@ -43,3 +43,83 @@ docker compose -f examples/nethermind_teku_lighthouse.yml up # FAQs Check the Obol docs for frequent [errors and resolutions](https://docs.obol.tech/docs/faq/errors) + + +# Multi cluster setup + +There is an option to run multiple Charon clusters using the same Execution Client, Consensus Client and Grafana. This way you can operate multiple clusters for different purposes, without putting much more pressure on your system. + +## Setup + +If you already have running validator node in Docker, the Docker containers will be moved to the new multi cluster setup. + +```bash +./multi_cluster/setup.sh -c {YOUR_CLUSTER_NAME} +``` + +You can inspect what you have in the `./clusters/` directory. Each subfolder is a cluster with the following structure: + +```directory +clusters +└───{YOUR_CLUSTER_NAME} # cluster name +│ │ .charon # folder including secret material used by charon +│ │ data # data from the validator client and prometheus +│ │ lodestar # scripts used by lodestar +│ │ prometheus # scripts and configs used by prometheus +│ │ .env # environment variables used by the cluster +│ │ docker-compose.yml # docker compose used by the cluster +│ # N.B.: only services with profile "cluster" are ran +└───{YOUR_CLUSTER_NAME_2} +└───{YOUR_CLUSTER_NAME_...} +└───{YOUR_CLUSTER_NAME_N} +``` + +Note that those folders and files are copied from the root directory. Meaning all configurations and setup you have already done, will be copied to this first cluster of the multi cluster setup. + +## Manage cluster + +Manage the Charon + Validator Client + Prometheus containers of each cluster found in `./clusters/`. + +### Add cluster + +```bash +./multi_cluster/cluster.sh add {YOUR_CLUSTER_NAME} +``` + +Note that only the `.env`, `lodestar/`, `prometheus/` and `docker-compose.yml` files and directories are coiped from the root directory to the new cluster. `.charon/` and `data/` folders are expected to be from a brand new cluster that you will setup in the `./clusters/{YOUR_CLUSTER_NAME}` directory. + +### Start cluster + +It is expected that you have already done the regular procedure from cluster setup and you have `./clusters/{YOUR_CLUSTER_NAME}/.charon/` folder. + +```bash +./multi_cluster/cluster.sh start {YOUR_CLUSTER_NAME} +``` + +### Stop cluster + +```bash +./multi_cluster/cluster.sh stop {YOUR_CLUSTER_NAME} +``` + +### Delete cluster + +```bash +./multi_cluster/cluster.sh delete {YOUR_CLUSTER_NAME} +``` + +## Manage base node + +Manage the EL + CL + Grafana containers. + +### Start base node + +```bash +./multi_cluster/base.sh start +``` + +### Stop base node + +```bash +./multi_cluster/base.sh stop +``` From d866aa8edd25581376f3c551c138c508f86816c1 Mon Sep 17 00:00:00 2001 From: Kaloyan Tanev Date: Mon, 25 Nov 2024 18:57:31 +0200 Subject: [PATCH 14/39] Fix function name typo --- multi_cluster/cluster.sh | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/multi_cluster/cluster.sh b/multi_cluster/cluster.sh index d4b77afa..35563436 100755 --- a/multi_cluster/cluster.sh +++ b/multi_cluster/cluster.sh @@ -34,7 +34,7 @@ check_clusters_dir_does_not_exist() { } # Check if cluster with the specified cluster_name already exists. -ckeck_cluster_already_exists() { +check_cluster_already_exists() { if test -d ./clusters/$cluster_name; then echo "./clsuters/$cluster_name directory already exists." exit 1 @@ -42,7 +42,7 @@ ckeck_cluster_already_exists() { } # Check if cluster with the specified cluster_name does not exist. -ckeck_cluster_does_not_exist() { +check_cluster_does_not_exist() { if test ! -d ./clusters/$cluster_name; then echo "./clsuters/$cluster_name directory does not exist." exit 1 @@ -217,7 +217,7 @@ case "$subcommand" in cluster_name=$1 check_missing_cluster_name check_clusters_dir_does_not_exist - ckeck_cluster_already_exists + check_cluster_already_exists add exit 0 ;; @@ -225,7 +225,7 @@ case "$subcommand" in cluster_name=$1 check_missing_cluster_name check_clusters_dir_does_not_exist - ckeck_cluster_does_not_exist + check_cluster_does_not_exist delete exit 0 ;; @@ -233,7 +233,7 @@ case "$subcommand" in cluster_name=$1 check_missing_cluster_name check_clusters_dir_does_not_exist - ckeck_cluster_does_not_exist + check_cluster_does_not_exist start exit 0 ;; @@ -241,7 +241,7 @@ case "$subcommand" in cluster_name=$1 check_missing_cluster_name check_clusters_dir_does_not_exist - ckeck_cluster_does_not_exist + check_cluster_does_not_exist stop exit 0 ;; From bf1133a6cc841f1d563a6d7b9b638c43e7d5447a Mon Sep 17 00:00:00 2001 From: Kaloyan Tanev Date: Mon, 25 Nov 2024 18:58:36 +0200 Subject: [PATCH 15/39] Check if cluster was already existing and base feedback to user based on that --- multi_cluster/setup.sh | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/multi_cluster/setup.sh b/multi_cluster/setup.sh index 74d9b7cf..59c6704b 100755 --- a/multi_cluster/setup.sh +++ b/multi_cluster/setup.sh @@ -1,6 +1,7 @@ #!/bin/bash current_cluster_name=default +cluster_already_set= usage() { echo "Usage: $0 [OPTIONS]" @@ -54,6 +55,7 @@ trap 'cleanupClusterDir $?' EXIT # Copy .charon folder to clusters directory (if it exists). if test -d ./.charon; then cp -r .charon ${cluster_dir}/ + cluster_already_set=1 fi # Copy .env file to clusters directory (if it exists). @@ -126,6 +128,12 @@ echo "Multi cluster setup is complete." echo "CDVN is divided in two:" echo " 1. Ethereum node (EL + CL) and Grafana." echo " 2. Multiple clusters, each consisting of Charon + Validator client + Prometheus." -echo "All existing cluster-specific files from the CDVN directory are copied to the first cluster in the multi cluster setup at ${cluster_dir}." +if [ -z ${cluster_already_set+x} ] ; then + echo "Existing cluster was not found. You can create your new cluster from ${cluster_dir}." +else + echo "All existing cluster-specific files from the CDVN directory are copied to the first cluster in the multi cluster setup at ${cluster_dir}." + echo "Those are the .charon folder, data/lodestar and data/prometheus." + echo "If you are using the multi cluster setup, you should refer to the configurations and data found in ${cluster_dir} from now on." +fi echo "Separate clusters can be managed using the cluster.sh script." echo "Ethereum node (EL + CL) and Grafana can be managed using the base.sh script." From ad9241eff0788d188187067aba91d6310fa43797 Mon Sep 17 00:00:00 2001 From: Kaloyan Tanev Date: Mon, 25 Nov 2024 18:59:10 +0200 Subject: [PATCH 16/39] Decomission old cluster files --- multi_cluster/setup.sh | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/multi_cluster/setup.sh b/multi_cluster/setup.sh index 59c6704b..9f781488 100755 --- a/multi_cluster/setup.sh +++ b/multi_cluster/setup.sh @@ -124,6 +124,29 @@ if [[ $(docker compose ps -aq) ]]; then docker compose --profile cluster -f ${cluster_dir}/docker-compose.yml up -d fi +migrated_readme() { + cat > $1 << EOL +THIS DIRECTORY HAS BEEN MIGRATED TO $2. +YOU SHOULD REFER TO CONFIGURATIONS AND DATA IN $2. +EOL +} + +# Decomission cluster-specific directories and files +if test -d ./.charon; then + mv ./.charon ./.charon-migrated-to-multi + migrated_readme "./.charon-migrated-to-multi/README.md" "${cluster_dir}/.charon" +fi + +if test -d ./data/lodestar; then + mv ./data/lodestar ./data/lodestar-migrated-to-multi + migrated_readme "./data/lodestar-migrated-to-multi/README.md" "${cluster_dir}/data/lodestar" +fi + +if test -d ./data/prometheus; then + mv ./data/prometheus ./data/prometheus-migrated-to-multi + migrated_readme "./data/prometheus-migrated-to-multi/README.md" "${cluster_dir}/data/prometheus" +fi + echo "Multi cluster setup is complete." echo "CDVN is divided in two:" echo " 1. Ethereum node (EL + CL) and Grafana." From 5fc39aa3821be4969ab59c7c25f416dd1585223d Mon Sep 17 00:00:00 2001 From: Kaloyan Tanev Date: Mon, 25 Nov 2024 18:59:24 +0200 Subject: [PATCH 17/39] Check if Docker daemon is runing --- multi_cluster/setup.sh | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/multi_cluster/setup.sh b/multi_cluster/setup.sh index 9f781488..379d8cb3 100755 --- a/multi_cluster/setup.sh +++ b/multi_cluster/setup.sh @@ -113,6 +113,11 @@ mv ${cluster_dir}/docker-compose.yml~ ${cluster_dir}/docker-compose.yml sed "s| networks: \[dvnode\]| networks: [dvnode,shared-node]|" ${cluster_dir}/docker-compose.yml > ${cluster_dir}/docker-compose.yml~ mv ${cluster_dir}/docker-compose.yml~ ${cluster_dir}/docker-compose.yml +if ! docker info > /dev/null 2>&1; then + echo "Docker daemon is not running, please start Docker first." + exit 1 +fi + # If containers were already started, restart the cluster with the new setup. if [[ $(docker compose ps -aq) ]]; then echo "Restarting the cluster-specific containers from the new multi cluster directory ${cluster_dir}" From a53e0700211efabc24272fee193111b41309ce2d Mon Sep 17 00:00:00 2001 From: Kaloyan Tanev Date: Mon, 25 Nov 2024 18:59:41 +0200 Subject: [PATCH 18/39] Remove whitespace --- multi_cluster/setup.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/multi_cluster/setup.sh b/multi_cluster/setup.sh index 379d8cb3..4a94c196 100755 --- a/multi_cluster/setup.sh +++ b/multi_cluster/setup.sh @@ -90,7 +90,7 @@ else mv ${cluster_dir}/.env~ ${cluster_dir}/.env fi - # Create data dir. +# Create data dir. mkdir ${cluster_dir}/data # Copy lodestar files and data. From a20a01afdae7c1f53065d432083e6f7a289801d7 Mon Sep 17 00:00:00 2001 From: Kaloyan Tanev Date: Mon, 25 Nov 2024 19:00:06 +0200 Subject: [PATCH 19/39] gitignore decomissioned .charon --- .gitignore | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index 13c11c94..ed6baffe 100644 --- a/.gitignore +++ b/.gitignore @@ -10,6 +10,6 @@ cluster-lock.json .DS_Store data/ .idea -.charon +.charon* prometheus/prometheus.yml clusters/ \ No newline at end of file From 2619b06b5115057d02c928cc859fe3fc6e2d1345 Mon Sep 17 00:00:00 2001 From: Kaloyan Tanev Date: Mon, 25 Nov 2024 19:03:00 +0200 Subject: [PATCH 20/39] Revert "Add customisable validator API port and monitoring port" This reverts commit 9b901f6a476aec96f766f36765b0abe481455805. --- .env.sample.holesky | 9 +++------ .env.sample.mainnet | 9 +++------ docker-compose.yml | 8 ++++---- prometheus/prometheus.yml.example | 2 +- 4 files changed, 11 insertions(+), 17 deletions(-) diff --git a/.env.sample.holesky b/.env.sample.holesky index 3450e960..fad6b4a3 100644 --- a/.env.sample.holesky +++ b/.env.sample.holesky @@ -72,15 +72,12 @@ LIGHTHOUSE_CHECKPOINT_SYNC_URL=https://checkpoint-sync.holesky.ethpandaops.io/ # Loki log aggregation server addresses. Disable loki log aggregation by setting an empty address. #CHARON_LOKI_ADDRESSES= +# Docker network of running charon node. See `docker network ls`. +#CHARON_DOCKER_NETWORK= + # Charon host exposed ports #CHARON_PORT_P2P_TCP= -# Charon validator API port -#CHARON_PORT_VALIDATOR_API= - -# Charon monitoring port -#CHARON_PORT_MONITORING= - ######### MEV-Boost Config ######### # MEV-Boost docker container image version, e.g. `latest` or `1.7.0`. diff --git a/.env.sample.mainnet b/.env.sample.mainnet index efcb8a30..74f86d2a 100644 --- a/.env.sample.mainnet +++ b/.env.sample.mainnet @@ -72,15 +72,12 @@ LIGHTHOUSE_CHECKPOINT_SYNC_URL=https://mainnet.checkpoint.sigp.io/ # Loki log aggregation server addresses. Disable loki log aggregation by setting an empty address. #CHARON_LOKI_ADDRESSES= +# Docker network of running charon node. See `docker network ls`. +#CHARON_DOCKER_NETWORK= + # Charon host exposed ports #CHARON_PORT_P2P_TCP= -# Charon validator API port -#CHARON_PORT_VALIDATOR_API= - -# Charon monitoring port -#CHARON_PORT_MONITORING= - ######### MEV-Boost Config ######### # MEV-Boost docker container image version, e.g. `latest` or `1.7.0`. diff --git a/docker-compose.yml b/docker-compose.yml index 7d9b4922..09c0f012 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -88,8 +88,8 @@ services: CHARON_P2P_RELAYS: ${CHARON_P2P_RELAYS:-https://0.relay.obol.tech,https://1.relay.obol.tech/} CHARON_P2P_EXTERNAL_HOSTNAME: ${CHARON_P2P_EXTERNAL_HOSTNAME:-} # Empty default required to avoid warnings. CHARON_P2P_TCP_ADDRESS: 0.0.0.0:${CHARON_PORT_P2P_TCP:-3610} - CHARON_VALIDATOR_API_ADDRESS: 0.0.0.0:${CHARON_PORT_VALIDATOR_API:-3600} - CHARON_MONITORING_ADDRESS: 0.0.0.0:${CHARON_PORT_MONITORING:-3620} + CHARON_VALIDATOR_API_ADDRESS: 0.0.0.0:3600 + CHARON_MONITORING_ADDRESS: 0.0.0.0:3620 CHARON_BUILDER_API: ${BUILDER_API_ENABLED:-false} CHARON_FEATURE_SET_ENABLE: ${CHARON_FEATURE_SET_ENABLE:-} CHARON_LOKI_ADDRESSES: ${CHARON_LOKI_ADDRESSES:-http://loki:3100/loki/api/v1/push} @@ -101,7 +101,7 @@ services: - .charon:/opt/charon/.charon restart: unless-stopped healthcheck: - test: wget -qO- http://localhost:${CHARON_PORT_MONITORING:-3620}/readyz + test: wget -qO- http://localhost:3620/readyz # _ _ _ # | | ___ __| | ___ ___| |_ __ _ _ __ @@ -116,7 +116,7 @@ services: entrypoint: /opt/lodestar/run.sh networks: [dvnode] environment: - BEACON_NODE_ADDRESS: http://charon:${CHARON_PORT_VALIDATOR_API:-3600} + BEACON_NODE_ADDRESS: http://charon:3600 NETWORK: ${NETWORK} BUILDER_API_ENABLED: ${BUILDER_API_ENABLED:-false} BUILDER_SELECTION: ${BUILDER_SELECTION:-builderalways} diff --git a/prometheus/prometheus.yml.example b/prometheus/prometheus.yml.example index 108878b0..5dee97f5 100644 --- a/prometheus/prometheus.yml.example +++ b/prometheus/prometheus.yml.example @@ -22,7 +22,7 @@ scrape_configs: - targets: ["lighthouse:5054"] - job_name: "charon" static_configs: - - targets: ["charon:${CHARON_PORT_MONITORING:-3620}"] + - targets: ["charon:3620"] - job_name: "lodestar" static_configs: - targets: ["lodestar:5064"] From d79c38d994d3b0865df5dff534be31234659365e Mon Sep 17 00:00:00 2001 From: Kaloyan Tanev Date: Mon, 25 Nov 2024 19:06:26 +0200 Subject: [PATCH 21/39] Remove validator and monitoring ports from add cluster script --- multi_cluster/cluster.sh | 31 ++++--------------------------- 1 file changed, 4 insertions(+), 27 deletions(-) diff --git a/multi_cluster/cluster.sh b/multi_cluster/cluster.sh index 35563436..e6be75b5 100755 --- a/multi_cluster/cluster.sh +++ b/multi_cluster/cluster.sh @@ -54,12 +54,9 @@ add() { find_port() { # Port number from which to start the search of free port. port=$1 - # Env variable of a port, found in other clusters' .env, which value should be excluded (i.e.: CHARON_PORT_P2P_TCP). - # This is to prevent reusing same ports between clusters. - cluster_var=$2 # Comma separated list of strings with ports to be explicitly excluded. # This is to prevent reusing the same port from inside the script (i.e.: if find_port is called multiple times, to avoid resulting in the same port) - exclude=$3 + exclude=$2 is_occupied=1 # run loop until is_occupied is empty @@ -71,8 +68,8 @@ add() { fi # Check if TCP port is used by another cluster from the ./clusters directory. for cluster in ./clusters/*; do - # Check if it is used by evaluating the variable we look for, passed by `cluster_var`. - p2p_cluster_port=$(. ./$cluster/.env; printf '%s' "${!cluster_var}") + # Check if it is used by the p2p TCP port of this cluster. + p2p_cluster_port=$(. ./$cluster/.env; printf '%s' "${CHARON_PORT_P2P_TCP}") # If the free port is the same as the port in the cluster, mark as occupied and break the loop. if [ $port -eq $p2p_cluster_port ]; then is_occupied=1 @@ -103,8 +100,6 @@ add() { # Try to find free and unallocated to another cluster ports. p2p_port="$(find_port "3610" "CHARON_PORT_P2P_TCP" "")" - validator_port="$(find_port "3600" "CHARON_PORT_VALIDATOR_API" "$p2p_port")" - monitoring_port="$(find_port "3620" "CHARON_PORT_MONITORING" "$p2p_port,$validator_port")" # Create dir for the cluster. mkdir -p ./clusters/$cluster_name @@ -120,23 +115,7 @@ add() { cp ./docker-compose.yml ${cluster_dir}/ fi - # Write the found free ports in the .env file. - if grep -xq "CHARON_PORT_VALIDATOR_API=.*" ./.env; then - echo "CHARON_PORT_VALIDATOR_API already set, overwriting it with port $validator_port" - sed "s|CHARON_PORT_VALIDATOR_API=|CHARON_PORT_VALIDATOR_API=$validator_port|" ${cluster_dir}/.env > ${cluster_dir}/.env.tmp - else - sed "s|#CHARON_PORT_VALIDATOR_API=|CHARON_PORT_VALIDATOR_API=$validator_port|" ${cluster_dir}/.env > ${cluster_dir}/.env.tmp - fi - mv ${cluster_dir}/.env.tmp ${cluster_dir}/.env - - if grep -xq "CHARON_PORT_MONITORING=.*" ./.env; then - echo "CHARON_PORT_MONITORING already set, overwriting it with port $monitoring_port" - sed "s|CHARON_PORT_MONITORING=|CHARON_PORT_MONITORING=$monitoring_port|" ${cluster_dir}/.env > ${cluster_dir}/.env.tmp - else - sed "s|#CHARON_PORT_MONITORING=|CHARON_PORT_MONITORING=$monitoring_port|" ${cluster_dir}/.env > ${cluster_dir}/.env.tmp - fi - mv ${cluster_dir}/.env.tmp ${cluster_dir}/.env - + # Write the found free port in the .env file. if grep -xq "CHARON_PORT_P2P_TCP=.*" ./.env; then echo "CHARON_PORT_P2P_TCP already set, overwriting it with port $p2p_port" sed "s|CHARON_PORT_P2P_TCP=|CHARON_PORT_P2P_TCP=$p2p_port|" ${cluster_dir}/.env > ${cluster_dir}/.env.tmp @@ -167,8 +146,6 @@ add() { echo "Added new cluster $cluster_name with the following cluster-specific config:" echo "CHARON_PORT_P2P_TCP: $p2p_port" - echo "CHARON_PORT_VALIDATOR_API: $validator_port" - echo "CHARON_PORT_MONITORING: $monitoring_port" echo "" echo "You can start it by running $0 start $cluster_name" } From 3aa69758bb61c4e60da9f806cece499f73009ba7 Mon Sep 17 00:00:00 2001 From: Kaloyan Tanev Date: Mon, 25 Nov 2024 19:14:35 +0200 Subject: [PATCH 22/39] Simplify port find function --- multi_cluster/cluster.sh | 15 +-------------- 1 file changed, 1 insertion(+), 14 deletions(-) diff --git a/multi_cluster/cluster.sh b/multi_cluster/cluster.sh index e6be75b5..67bbaa5c 100755 --- a/multi_cluster/cluster.sh +++ b/multi_cluster/cluster.sh @@ -54,9 +54,6 @@ add() { find_port() { # Port number from which to start the search of free port. port=$1 - # Comma separated list of strings with ports to be explicitly excluded. - # This is to prevent reusing the same port from inside the script (i.e.: if find_port is called multiple times, to avoid resulting in the same port) - exclude=$2 is_occupied=1 # run loop until is_occupied is empty @@ -82,16 +79,6 @@ add() { continue fi - # Check if the port is not from the ports to be excluded. - for i in ${exclude//,/ } - do - # If the port matches the potentially excluded port, mark as occupied, increment by 1 and break the loop. - if [ $port -eq $i ]; then - is_occupied=1 - port=$(($port+1)) - break - fi - done done # Echo the free port. @@ -99,7 +86,7 @@ add() { } # Try to find free and unallocated to another cluster ports. - p2p_port="$(find_port "3610" "CHARON_PORT_P2P_TCP" "")" + p2p_port="$(find_port "3610")" # Create dir for the cluster. mkdir -p ./clusters/$cluster_name From 0755f8053a7c5e8578030d15f6ed5e0ee014f761 Mon Sep 17 00:00:00 2001 From: Kaloyan Tanev Date: Mon, 25 Nov 2024 21:23:20 +0200 Subject: [PATCH 23/39] Check ports from base network --- multi_cluster/cluster.sh | 57 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 57 insertions(+) diff --git a/multi_cluster/cluster.sh b/multi_cluster/cluster.sh index 67bbaa5c..9ced73a8 100755 --- a/multi_cluster/cluster.sh +++ b/multi_cluster/cluster.sh @@ -79,6 +79,63 @@ add() { continue fi + # Check if TCP port is used by the base. + + # Fetch the NETHERMIND_PORT_P2P from the base .env file. + nethermind_p2p_port=$(. ./.env; printf '%s' "${NETHERMIND_PORT_P2P}") + # If the NETHERMIND_PORT_P2P is not set and the free port is the same as the default one, increment the port by 1 and continue the loop. + if [ -z "$nethermind_p2p_port" ]; then + if [ "$port" -eq "30303" ]; then + port=$(($port+1)) + continue + fi + # If the NETHERMIND_PORT_P2P is set and the free port is the same, increment the port by 1 and continue the loop. + elif [ $port -eq $nethermind_p2p_port ]; then + port=$(($port+1)) + continue + fi + + # Fetch the NETHERMIND_PORT_HTTP from the base .env file. + nethermind_http_port=$(. ./.env; printf '%s' "${NETHERMIND_PORT_HTTP}") + # If the NETHERMIND_PORT_HTTP is not set and the free port is the same as the default one, increment the port by 1 and continue the loop. + if [ -z "$nethermind_http_port" ]; then + if [ "$port" -eq "8545" ]; then + port=$(($port+1)) + continue + fi + # If the NETHERMIND_PORT_HTTP is set and the free port is the same, increment the port by 1 and continue the loop. + elif [ $port -eq $nethermind_http_port ]; then + port=$(($port+1)) + continue + fi + + # Fetch the NETHERMIND_PORT_ENGINE from the base .env file. + nethermind_engine_port=$(. ./.env; printf '%s' "${NETHERMIND_PORT_ENGINE}") + # If the NETHERMIND_PORT_ENGINE is not set and the free port is the same as the default one, increment the port by 1 and continue the loop. + if [ -z "$nethermind_engine_port" ]; then + if [ "$port" -eq "8551" ]; then + port=$(($port+1)) + continue + fi + # If the NETHERMIND_PORT_ENGINE is set and the free port is the same, increment the port by 1 and continue the loop. + elif [ $port -eq $nethermind_engine_port ]; then + port=$(($port+1)) + continue + fi + + # Fetch the LIGHTHOUSE_PORT_P2P from the base .env file. + lighthouse_p2p_port=$(. ./.env; printf '%s' "${LIGHTHOUSE_PORT_P2P}") + # If the LIGHTHOUSE_PORT_P2P is not set and the free port is the same as the default one, increment the port by 1 and continue the loop. + if [ -z "$lighthouse_p2p_port" ]; then + if [ "$port" -eq "9000" ]; then + port=$(($port+1)) + continue + fi + # If the LIGHTHOUSE_PORT_P2P is set and the free port is the same, increment the port by 1 and continue the loop. + elif [ $port -eq $lighthouse_p2p_port ]; then + port=$(($port+1)) + continue + fi done # Echo the free port. From 8c9b56c868105d6a8353a6bb6478c40c716f365c Mon Sep 17 00:00:00 2001 From: Kaloyan Tanev Date: Tue, 3 Dec 2024 13:30:45 +0200 Subject: [PATCH 24/39] Revert "Enable by default MEVBOOST_RELAYS in holesky" This reverts commit 1ed42356f4dc8fc8b2a94e98422ab94a08dda04e. --- .env.sample.holesky | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.env.sample.holesky b/.env.sample.holesky index fad6b4a3..4a2ce1a7 100644 --- a/.env.sample.holesky +++ b/.env.sample.holesky @@ -88,7 +88,7 @@ LIGHTHOUSE_CHECKPOINT_SYNC_URL=https://checkpoint-sync.holesky.ethpandaops.io/ # Comma separated list of MEV-Boost relays. # You can choose public relays from https://enchanted-direction-844.notion.site/6d369eb33f664487800b0dedfe32171e?v=d255247c822c409f99c498aeb6a4e51d. -MEVBOOST_RELAYS=https://0xab78bf8c781c58078c3beb5710c57940874dd96aef2835e7742c866b4c7c0406754376c2c8285a36c630346aa5c5f833@holesky.aestus.live,https://0xb1559beef7b5ba3127485bbbb090362d9f497ba64e177ee2c8e7db74746306efad687f2cf8574e38d70067d40ef136dc@relay-stag.ultrasound.money,https://0xaa58208899c6105603b74396734a6263cc7d947f444f396a90f7b7d3e65d102aec7e5e5291b27e08d02c50a050825c2f@holesky.titanrelay.xyz +#MEVBOOST_RELAYS=https://0xab78bf8c781c58078c3beb5710c57940874dd96aef2835e7742c866b4c7c0406754376c2c8285a36c630346aa5c5f833@holesky.aestus.live,https://0xb1559beef7b5ba3127485bbbb090362d9f497ba64e177ee2c8e7db74746306efad687f2cf8574e38d70067d40ef136dc@relay-stag.ultrasound.money,https://0xaa58208899c6105603b74396734a6263cc7d947f444f396a90f7b7d3e65d102aec7e5e5291b27e08d02c50a050825c2f@holesky.titanrelay.xyz ######### Monitoring Config ######### From 2212cf48cddce59c9c75727f3e99762fb5722283 Mon Sep 17 00:00:00 2001 From: Kaloyan Tanev Date: Tue, 3 Dec 2024 13:31:45 +0200 Subject: [PATCH 25/39] Update readme with longer intro --- README.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 249bcc5a..1f40355c 100644 --- a/README.md +++ b/README.md @@ -47,7 +47,9 @@ Check the Obol docs for frequent [errors and resolutions](https://docs.obol.tech # Multi cluster setup -There is an option to run multiple Charon clusters using the same Execution Client, Consensus Client and Grafana. This way you can operate multiple clusters for different purposes, without putting much more pressure on your system. +There is an option to run multiple Charon clusters using the same Execution Layer Client (EL), Consensus Layer Client (CL) and Grafana. This way you can operate multiple clusters for different purposes, without putting much more pressure on your system. + +The way this is achieved is by separating the EL, CL and Grafana from the Charon node, Validator Client (VC) and Prometheus. Instead of having `.charon/` folder in the root directory it is moved to `clusters/{CLUSTER_NAME}/.charon`. Moreover, the VC and Prometheus data is now per cluster as well, moved from `data/lodestar` and `data/prometheus` to `clusters/{CLUSTER_NAME}/data/lodestar` and `clusters/{CLUSTER_NAME}/data/prometheus`, respectively. `docker-compose.yml` and `.env` are also used per cluster. There are also supporting scripts for the Charon node and the VC. ## Setup From 2678c0717240287cbe6b8083bcf538af761cee9e Mon Sep 17 00:00:00 2001 From: Kaloyan Tanev Date: Thu, 5 Dec 2024 18:19:37 +0200 Subject: [PATCH 26/39] Check for file ownership --- multi_cluster/setup.sh | 66 +++++++++++++++++++++++++++++++++++------- 1 file changed, 56 insertions(+), 10 deletions(-) diff --git a/multi_cluster/setup.sh b/multi_cluster/setup.sh index 4a94c196..01856edd 100755 --- a/multi_cluster/setup.sh +++ b/multi_cluster/setup.sh @@ -54,18 +54,36 @@ trap 'cleanupClusterDir $?' EXIT # Copy .charon folder to clusters directory (if it exists). if test -d ./.charon; then - cp -r .charon ${cluster_dir}/ - cluster_already_set=1 + owner="$(ls -ld ".charon" | awk '{print $3}')" + if [ "x${owner}" = "x${USER}" ]; then + cp -r .charon ${cluster_dir}/ + cluster_already_set=1 + else + echo "current user ${USER} is not owner of .charon/" + exit 1 + fi fi # Copy .env file to clusters directory (if it exists). if test ./.env; then - cp .env ${cluster_dir}/ + owner="$(ls -ld ".env" | awk '{print $3}')" + if [ "x${owner}" = "x${USER}" ]; then + cp .env ${cluster_dir}/ + else + echo "current user ${USER} is not owner of .env" + exit 1 + fi fi # Copy docker-compose.yml to clusters directory (if it exists). if test ./docker-compose.yml; then - cp ./docker-compose.yml ${cluster_dir}/ + owner="$(ls -ld "docker-compose.yml" | awk '{print $3}')" + if [ "x${owner}" = "x${USER}" ]; then + cp ./docker-compose.yml ${cluster_dir}/ + else + echo "current user ${USER} is not owner of docker-compose.yml" + exit 1 + fi fi # Write default charon ports in .env file if they are not set. @@ -93,16 +111,44 @@ fi # Create data dir. mkdir ${cluster_dir}/data -# Copy lodestar files and data. -cp -r ./lodestar ${cluster_dir}/ +# Copy lodestar files. +owner="$(ls -ld "lodestar" | awk '{print $3}')" +if [ "x${owner}" = "x${USER}" ]; then + cp -r ./lodestar ${cluster_dir}/ +else + echo "current user ${USER} is not owner of lodestar/" + exit 1 +fi + +# Copy lodestar data, if it exists. if test -d ./data/lodestar; then - cp -r ./data/lodestar ${cluster_dir}/data/ + owner="$(ls -ld "data/lodestar" | awk '{print $3}')" + if [ "x${owner}" = "x${USER}" ]; then + cp -r ./data/lodestar ${cluster_dir}/data/ + else + echo "current user ${USER} is not owner of data/lodestar/" + exit 1 + fi +fi + +# Copy prometheus files. +owner="$(ls -ld "prometheus" | awk '{print $3}')" +if [ "x${owner}" = "x${USER}" ]; then + cp -r ./prometheus ${cluster_dir}/ +else + echo "current user ${USER} is not owner of prometheus/" + exit 1 fi -# Copy prometheus files and data. -cp -r ./prometheus ${cluster_dir}/ +# Copy prometheus data, if it exists. if test -d ./data/prometheus; then - cp -r ./data/prometheus ${cluster_dir}/data/ + owner="$(ls -ld "data/prometheus" | awk '{print $3}')" + if [ "x${owner}" = "x${USER}" ]; then + cp -r ./data/prometheus ${cluster_dir}/data/ + else + echo "current user ${USER} is not owner of data/prometheus/" + exit 1 + fi fi # Add the base network on which EL + CL + MEV-boost + Grafana run. From 95558dd947935a42563278eeddd8c88853036888 Mon Sep 17 00:00:00 2001 From: Kaloyan Tanev Date: Fri, 6 Dec 2024 12:13:20 +0200 Subject: [PATCH 27/39] Add ss alongside netstat and option for skipping port check --- multi_cluster/cluster.sh | 150 +++++++++++++++++++++------------------ 1 file changed, 79 insertions(+), 71 deletions(-) diff --git a/multi_cluster/cluster.sh b/multi_cluster/cluster.sh index 9ced73a8..7805581d 100755 --- a/multi_cluster/cluster.sh +++ b/multi_cluster/cluster.sh @@ -1,6 +1,7 @@ #!/bin/bash unset -v cluster_name +skip_port_free_check= usage() { echo "Usage: $0 [OPTIONS] COMMAND" @@ -51,99 +52,106 @@ check_cluster_does_not_exist() { # Add cluster to the ./clusters/{cluster_name} directory. add() { - find_port() { - # Port number from which to start the search of free port. - port=$1 + # Port number from which to start the search of free port. + port=3610 - is_occupied=1 - # run loop until is_occupied is empty - while [[ -n "$is_occupied" ]]; do - # Check if TCP port is free, if it is, is_occupied is set to empty, otherwise increment the port by 1 and continue the loop. - if is_occupied=$(netstat -taln | grep $port); then - port=$(($port+1)) - continue - fi - # Check if TCP port is used by another cluster from the ./clusters directory. - for cluster in ./clusters/*; do - # Check if it is used by the p2p TCP port of this cluster. - p2p_cluster_port=$(. ./$cluster/.env; printf '%s' "${CHARON_PORT_P2P_TCP}") - # If the free port is the same as the port in the cluster, mark as occupied and break the loop. - if [ $port -eq $p2p_cluster_port ]; then - is_occupied=1 - break + is_occupied=1 + # run loop until is_occupied is empty + while [[ -n "$is_occupied" ]]; do + # Check if TCP port is free, if it is, is_occupied is set to empty, otherwise increment the port by 1 and continue the loop. + if [ ! -z ${skip_port_free_check+x} ] ; then + if [ -x "$(command -v netstat)" ]; then + if is_occupied=$(netstat -taln | grep $port); then + port=$(($port+1)) + continue fi - done - # If the port was occupied by any cluster, increment the port by 1 and continue the loop. - if [ ! -z "$is_occupied" ]; then - port=$(($port+1)) - continue - fi - - # Check if TCP port is used by the base. - - # Fetch the NETHERMIND_PORT_P2P from the base .env file. - nethermind_p2p_port=$(. ./.env; printf '%s' "${NETHERMIND_PORT_P2P}") - # If the NETHERMIND_PORT_P2P is not set and the free port is the same as the default one, increment the port by 1 and continue the loop. - if [ -z "$nethermind_p2p_port" ]; then - if [ "$port" -eq "30303" ]; then + elif [ -x "$(command -v ss)" ]; then + if is_occupied=$(ss -taln | grep $port); then port=$(($port+1)) continue fi - # If the NETHERMIND_PORT_P2P is set and the free port is the same, increment the port by 1 and continue the loop. - elif [ $port -eq $nethermind_p2p_port ]; then + else + echo "Neither netstat or ss commands found. Please install either of those to check for free ports or add the -p flag to skip port check." + exit 1 + fi + fi + # Check if TCP port is used by another cluster from the ./clusters directory. + for cluster in ./clusters/*; do + # Check if it is used by the p2p TCP port of this cluster. + p2p_cluster_port=$(. ./$cluster/.env; printf '%s' "${CHARON_PORT_P2P_TCP}") + # If the free port is the same as the port in the cluster, mark as occupied and break the loop. + if [ $port -eq $p2p_cluster_port ]; then + is_occupied=1 + break + fi + done + # If the port was occupied by any cluster, increment the port by 1 and continue the loop. + if [ ! -z "$is_occupied" ]; then + port=$(($port+1)) + continue + fi + + # Check if TCP port is used by the base. + + # Fetch the NETHERMIND_PORT_P2P from the base .env file. + nethermind_p2p_port=$(. ./.env; printf '%s' "${NETHERMIND_PORT_P2P}") + # If the NETHERMIND_PORT_P2P is not set and the free port is the same as the default one, increment the port by 1 and continue the loop. + if [ -z "$nethermind_p2p_port" ]; then + if [ "$port" -eq "30303" ]; then port=$(($port+1)) continue fi + # If the NETHERMIND_PORT_P2P is set and the free port is the same, increment the port by 1 and continue the loop. + elif [ $port -eq $nethermind_p2p_port ]; then + port=$(($port+1)) + continue + fi - # Fetch the NETHERMIND_PORT_HTTP from the base .env file. - nethermind_http_port=$(. ./.env; printf '%s' "${NETHERMIND_PORT_HTTP}") - # If the NETHERMIND_PORT_HTTP is not set and the free port is the same as the default one, increment the port by 1 and continue the loop. - if [ -z "$nethermind_http_port" ]; then - if [ "$port" -eq "8545" ]; then - port=$(($port+1)) - continue - fi - # If the NETHERMIND_PORT_HTTP is set and the free port is the same, increment the port by 1 and continue the loop. - elif [ $port -eq $nethermind_http_port ]; then + # Fetch the NETHERMIND_PORT_HTTP from the base .env file. + nethermind_http_port=$(. ./.env; printf '%s' "${NETHERMIND_PORT_HTTP}") + # If the NETHERMIND_PORT_HTTP is not set and the free port is the same as the default one, increment the port by 1 and continue the loop. + if [ -z "$nethermind_http_port" ]; then + if [ "$port" -eq "8545" ]; then port=$(($port+1)) continue fi + # If the NETHERMIND_PORT_HTTP is set and the free port is the same, increment the port by 1 and continue the loop. + elif [ $port -eq $nethermind_http_port ]; then + port=$(($port+1)) + continue + fi - # Fetch the NETHERMIND_PORT_ENGINE from the base .env file. - nethermind_engine_port=$(. ./.env; printf '%s' "${NETHERMIND_PORT_ENGINE}") - # If the NETHERMIND_PORT_ENGINE is not set and the free port is the same as the default one, increment the port by 1 and continue the loop. - if [ -z "$nethermind_engine_port" ]; then - if [ "$port" -eq "8551" ]; then - port=$(($port+1)) - continue - fi - # If the NETHERMIND_PORT_ENGINE is set and the free port is the same, increment the port by 1 and continue the loop. - elif [ $port -eq $nethermind_engine_port ]; then + # Fetch the NETHERMIND_PORT_ENGINE from the base .env file. + nethermind_engine_port=$(. ./.env; printf '%s' "${NETHERMIND_PORT_ENGINE}") + # If the NETHERMIND_PORT_ENGINE is not set and the free port is the same as the default one, increment the port by 1 and continue the loop. + if [ -z "$nethermind_engine_port" ]; then + if [ "$port" -eq "8551" ]; then port=$(($port+1)) continue fi + # If the NETHERMIND_PORT_ENGINE is set and the free port is the same, increment the port by 1 and continue the loop. + elif [ $port -eq $nethermind_engine_port ]; then + port=$(($port+1)) + continue + fi - # Fetch the LIGHTHOUSE_PORT_P2P from the base .env file. - lighthouse_p2p_port=$(. ./.env; printf '%s' "${LIGHTHOUSE_PORT_P2P}") - # If the LIGHTHOUSE_PORT_P2P is not set and the free port is the same as the default one, increment the port by 1 and continue the loop. - if [ -z "$lighthouse_p2p_port" ]; then - if [ "$port" -eq "9000" ]; then - port=$(($port+1)) - continue - fi - # If the LIGHTHOUSE_PORT_P2P is set and the free port is the same, increment the port by 1 and continue the loop. - elif [ $port -eq $lighthouse_p2p_port ]; then + # Fetch the LIGHTHOUSE_PORT_P2P from the base .env file. + lighthouse_p2p_port=$(. ./.env; printf '%s' "${LIGHTHOUSE_PORT_P2P}") + # If the LIGHTHOUSE_PORT_P2P is not set and the free port is the same as the default one, increment the port by 1 and continue the loop. + if [ -z "$lighthouse_p2p_port" ]; then + if [ "$port" -eq "9000" ]; then port=$(($port+1)) continue fi - done - - # Echo the free port. - echo $port - } + # If the LIGHTHOUSE_PORT_P2P is set and the free port is the same, increment the port by 1 and continue the loop. + elif [ $port -eq $lighthouse_p2p_port ]; then + port=$(($port+1)) + continue + fi + done # Try to find free and unallocated to another cluster ports. - p2p_port="$(find_port "3610")" + p2p_port=$port # Create dir for the cluster. mkdir -p ./clusters/$cluster_name From ac53693d0931a661e1a0d3fb90dff9c63bff3789 Mon Sep 17 00:00:00 2001 From: Kaloyan Tanev Date: Fri, 6 Dec 2024 14:45:08 +0200 Subject: [PATCH 28/39] Add options to each command in cluster.sh; add skip port check and default port overriding --- multi_cluster/cluster.sh | 127 ++++++++++++++++++++++++++++++++++----- 1 file changed, 113 insertions(+), 14 deletions(-) diff --git a/multi_cluster/cluster.sh b/multi_cluster/cluster.sh index 7805581d..1a32eb7d 100755 --- a/multi_cluster/cluster.sh +++ b/multi_cluster/cluster.sh @@ -2,8 +2,9 @@ unset -v cluster_name skip_port_free_check= +p2p_default_port=3610 -usage() { +usage_base() { echo "Usage: $0 [OPTIONS] COMMAND" echo "" echo " Manage a validator cluster (Charon + VC + Prometheus), found in ./clusters directory." @@ -18,6 +19,44 @@ usage() { echo " -h Display this help message." } +usage_add() { + echo "Usage: $0 add [OPTIONS] NAME" + echo "" + echo " Add a new cluster with specified name." + echo "" + echo "Options:" + echo " -h Display this help message." + echo " -s Skip free port checking with netstat/ss." + echo " -p integer Override the default port (3610) from which to start the search of a free port." +} + +usage_delete() { + echo "Usage: $0 delete [OPTIONS] NAME" + echo "" + echo " Delete an existing cluster with the specified name. A cluster name is a folder in ./clusters dir." + echo "" + echo "Options:" + echo " -h Display this help message." +} + +usage_start() { + echo "Usage: $0 start [OPTIONS] NAME" + echo "" + echo " Start an existing cluster with the specified name. A cluster name is a folder in ./clusters dir." + echo "" + echo "Options:" + echo " -h Display this help message." +} + +usage_stop() { + echo "Usage: $0 stop [OPTIONS] NAME" + echo "" + echo " Stop an existing cluster with the specified name. A cluster name is a folder in ./clusters dir." + echo "" + echo "Options:" + echo " -h Display this help message." +} + # Check if cluster_name variable is set. check_missing_cluster_name() { if [ -z "$cluster_name" ]; then @@ -52,14 +91,15 @@ check_cluster_does_not_exist() { # Add cluster to the ./clusters/{cluster_name} directory. add() { - # Port number from which to start the search of free port. - port=3610 + # Try to find free and unallocated to another cluster ports. + # Port number from which to start the search of free port, default is 3610. + port=$p2p_default_port is_occupied=1 - # run loop until is_occupied is empty + # Run loop until is_occupied is empty. while [[ -n "$is_occupied" ]]; do # Check if TCP port is free, if it is, is_occupied is set to empty, otherwise increment the port by 1 and continue the loop. - if [ ! -z ${skip_port_free_check+x} ] ; then + if [ -z ${skip_port_free_check} ] ; then if [ -x "$(command -v netstat)" ]; then if is_occupied=$(netstat -taln | grep $port); then port=$(($port+1)) @@ -74,6 +114,9 @@ add() { echo "Neither netstat or ss commands found. Please install either of those to check for free ports or add the -p flag to skip port check." exit 1 fi + else + # Assume port is not occupied if no netstat/ss check. + is_occupied= fi # Check if TCP port is used by another cluster from the ./clusters directory. for cluster in ./clusters/*; do @@ -150,8 +193,6 @@ add() { fi done - # Try to find free and unallocated to another cluster ports. - p2p_port=$port # Create dir for the cluster. mkdir -p ./clusters/$cluster_name @@ -169,10 +210,10 @@ add() { # Write the found free port in the .env file. if grep -xq "CHARON_PORT_P2P_TCP=.*" ./.env; then - echo "CHARON_PORT_P2P_TCP already set, overwriting it with port $p2p_port" - sed "s|CHARON_PORT_P2P_TCP=|CHARON_PORT_P2P_TCP=$p2p_port|" ${cluster_dir}/.env > ${cluster_dir}/.env.tmp + echo "CHARON_PORT_P2P_TCP already set, overwriting it with port $port" + sed "s|CHARON_PORT_P2P_TCP=|CHARON_PORT_P2P_TCP=$port|" ${cluster_dir}/.env > ${cluster_dir}/.env.tmp else - sed "s|#CHARON_PORT_P2P_TCP=|CHARON_PORT_P2P_TCP=$p2p_port|" ${cluster_dir}/.env > ${cluster_dir}/.env.tmp + sed "s|#CHARON_PORT_P2P_TCP=|CHARON_PORT_P2P_TCP=$port|" ${cluster_dir}/.env > ${cluster_dir}/.env.tmp fi mv ${cluster_dir}/.env.tmp ${cluster_dir}/.env @@ -197,7 +238,7 @@ add() { mv ${cluster_dir}/docker-compose.yml.tmp ${cluster_dir}/docker-compose.yml echo "Added new cluster $cluster_name with the following cluster-specific config:" - echo "CHARON_PORT_P2P_TCP: $p2p_port" + echo "CHARON_PORT_P2P_TCP: $port" echo "" echo "You can start it by running $0 start $cluster_name" } @@ -227,11 +268,11 @@ stop() { while getopts ":h" opt; do case $opt in h) - usage + usage_base exit 0 ;; \?) # unknown flag - usage + usage_base exit 1 ;; esac @@ -243,6 +284,25 @@ subcommand=$1; shift # Execute subcommand. case "$subcommand" in add) + while getopts ":hsp:" opt; do + case $opt in + h) + usage_add + exit 0 + ;; + s ) + skip_port_free_check=true + ;; + p ) + p2p_default_port=${OPTARG}; + ;; + ? ) # Invalid option + usage_add + exit 1 + ;; + esac + done + shift "$((OPTIND -1))" cluster_name=$1 check_missing_cluster_name check_clusters_dir_does_not_exist @@ -251,6 +311,19 @@ case "$subcommand" in exit 0 ;; delete) + while getopts ":h" opt; do + case $opt in + h) + usage_delete + exit 0 + ;; + ? ) # Invalid option + usage_delete + exit 1 + ;; + esac + done + shift $((OPTIND-1)) cluster_name=$1 check_missing_cluster_name check_clusters_dir_does_not_exist @@ -259,6 +332,19 @@ case "$subcommand" in exit 0 ;; start) + while getopts ":h" opt; do + case $opt in + h) + usage_start + exit 0 + ;; + ? ) # Invalid option + usage_start + exit 1 + ;; + esac + done + shift $((OPTIND-1)) cluster_name=$1 check_missing_cluster_name check_clusters_dir_does_not_exist @@ -267,6 +353,19 @@ case "$subcommand" in exit 0 ;; stop) + while getopts ":h" opt; do + case $opt in + h) + usage_stop + exit 0 + ;; + ? ) # Invalid option + usage_stop + exit 1 + ;; + esac + done + shift $((OPTIND-1)) cluster_name=$1 check_missing_cluster_name check_clusters_dir_does_not_exist @@ -275,7 +374,7 @@ case "$subcommand" in exit 0 ;; * ) - usage + usage_base exit 1 ;; esac From 9dafe289a4a21deb053081541856a58f7bf3d64b Mon Sep 17 00:00:00 2001 From: Kaloyan Tanev Date: Fri, 6 Dec 2024 15:17:24 +0200 Subject: [PATCH 29/39] Add examples in the usage --- multi_cluster/cluster.sh | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/multi_cluster/cluster.sh b/multi_cluster/cluster.sh index 1a32eb7d..8adb3900 100755 --- a/multi_cluster/cluster.sh +++ b/multi_cluster/cluster.sh @@ -28,6 +28,11 @@ usage_add() { echo " -h Display this help message." echo " -s Skip free port checking with netstat/ss." echo " -p integer Override the default port (3610) from which to start the search of a free port." + echo "" + echo "Example:" + echo " $0 add second-cluster" + echo " $0 add -s third-cluster-without-free-port-check" + echo " $0 add -p 3615 fourth-cluster-with-custom-port" } usage_delete() { @@ -37,6 +42,9 @@ usage_delete() { echo "" echo "Options:" echo " -h Display this help message." + echo "" + echo "Example:" + echo " $0 delete my-second-cluster" } usage_start() { @@ -46,6 +54,9 @@ usage_start() { echo "" echo "Options:" echo " -h Display this help message." + echo "" + echo "Example:" + echo " $0 start my-second-cluster" } usage_stop() { @@ -55,6 +66,9 @@ usage_stop() { echo "" echo "Options:" echo " -h Display this help message." + echo "" + echo "Example:" + echo " $0 stop my-second-cluster" } # Check if cluster_name variable is set. From 623158a757b8c05608ca1c3c6bf1eba3b124d3e2 Mon Sep 17 00:00:00 2001 From: Kaloyan Tanev Date: Fri, 6 Dec 2024 15:23:58 +0200 Subject: [PATCH 30/39] Add more help messages in base.sh --- multi_cluster/base.sh | 52 ++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 51 insertions(+), 1 deletion(-) diff --git a/multi_cluster/base.sh b/multi_cluster/base.sh index d2ddebd1..0dc0efb8 100755 --- a/multi_cluster/base.sh +++ b/multi_cluster/base.sh @@ -3,7 +3,7 @@ usage() { echo "Usage: $0 [OPTIONS] COMMAND" echo "" - echo " Manage the base ethereum node docker containers (EL, CL, MEV boost, Grafana), without interfering with any validator." + echo " Manage the base ethereum node (EL, CL, MEV boost, Grafana), without interfering with any validator." echo "" echo "Commands:" echo " start Start an ethereum node, MEV-boost and Grafana." @@ -13,6 +13,30 @@ usage() { echo " -h Display this help message." } +usage_start() { + echo "Usage: $0 start [OPTIONS]" + echo "" + echo " Start the base ethereum node." + echo "" + echo "Options:" + echo " -h Display this help message." + echo "" + echo "Example:" + echo " $0 start" +} + +usage_stop() { + echo "Usage: $0 stop [OPTIONS]" + echo "" + echo " Stop the base ethereum node." + echo "" + echo "Options:" + echo " -h Display this help message." + echo "" + echo "Example:" + echo " $0 stop" +} + start() { docker compose --profile base up -d } @@ -44,9 +68,35 @@ subcommand=$1; shift case "$subcommand" in # Parse options to the install sub command start) + while getopts ":h" opt; do + case $opt in + h) + usage_start + exit 0 + ;; + ? ) # Invalid option + usage_start + exit 1 + ;; + esac + done + shift $((OPTIND-1)) start ;; stop) + while getopts ":h" opt; do + case $opt in + h) + usage_stop + exit 0 + ;; + ? ) # Invalid option + usage_stop + exit 1 + ;; + esac + done + shift $((OPTIND-1)) stop ;; * ) From 0edb5793ed84e42804d681fa0af3c0da9a64fcf7 Mon Sep 17 00:00:00 2001 From: Kaloyan Tanev Date: Fri, 6 Dec 2024 15:27:54 +0200 Subject: [PATCH 31/39] Formatting --- multi_cluster/base.sh | 141 +++++++-------- multi_cluster/cluster.sh | 366 ++++++++++++++++++++------------------- multi_cluster/setup.sh | 46 ++--- 3 files changed, 284 insertions(+), 269 deletions(-) diff --git a/multi_cluster/base.sh b/multi_cluster/base.sh index 0dc0efb8..bec085c5 100755 --- a/multi_cluster/base.sh +++ b/multi_cluster/base.sh @@ -1,40 +1,40 @@ #!/bin/bash usage() { - echo "Usage: $0 [OPTIONS] COMMAND" - echo "" - echo " Manage the base ethereum node (EL, CL, MEV boost, Grafana), without interfering with any validator." - echo "" - echo "Commands:" - echo " start Start an ethereum node, MEV-boost and Grafana." - echo " stop Stop an ethereum node, MEV-boost and Grafana." - echo "" - echo "Options:" - echo " -h Display this help message." + echo "Usage: $0 [OPTIONS] COMMAND" + echo "" + echo " Manage the base ethereum node (EL, CL, MEV boost, Grafana), without interfering with any validator." + echo "" + echo "Commands:" + echo " start Start an ethereum node, MEV-boost and Grafana." + echo " stop Stop an ethereum node, MEV-boost and Grafana." + echo "" + echo "Options:" + echo " -h Display this help message." } usage_start() { - echo "Usage: $0 start [OPTIONS]" - echo "" - echo " Start the base ethereum node." - echo "" - echo "Options:" - echo " -h Display this help message." - echo "" - echo "Example:" - echo " $0 start" + echo "Usage: $0 start [OPTIONS]" + echo "" + echo " Start the base ethereum node." + echo "" + echo "Options:" + echo " -h Display this help message." + echo "" + echo "Example:" + echo " $0 start" } usage_stop() { - echo "Usage: $0 stop [OPTIONS]" - echo "" - echo " Stop the base ethereum node." - echo "" - echo "Options:" - echo " -h Display this help message." - echo "" - echo "Example:" - echo " $0 stop" + echo "Usage: $0 stop [OPTIONS]" + echo "" + echo " Stop the base ethereum node." + echo "" + echo "Options:" + echo " -h Display this help message." + echo "" + echo "Example:" + echo " $0 stop" } start() { @@ -46,7 +46,7 @@ stop() { } while getopts ":h" opt; do - case $opt in + case $opt in h) usage exit 0 @@ -55,53 +55,54 @@ while getopts ":h" opt; do usage exit 1 ;; - : ) + :) usage exit 1 ;; - esac + esac done -shift $((OPTIND -1)) +shift $((OPTIND - 1)) -subcommand=$1; shift +subcommand=$1 +shift case "$subcommand" in - # Parse options to the install sub command - start) - while getopts ":h" opt; do - case $opt in - h) - usage_start - exit 0 - ;; - ? ) # Invalid option - usage_start - exit 1 - ;; - esac - done - shift $((OPTIND-1)) - start - ;; - stop) - while getopts ":h" opt; do - case $opt in - h) - usage_stop - exit 0 - ;; - ? ) # Invalid option - usage_stop - exit 1 - ;; - esac - done - shift $((OPTIND-1)) - stop - ;; - * ) - usage - exit 1 - ;; +# Parse options to the install sub command +start) + while getopts ":h" opt; do + case $opt in + h) + usage_start + exit 0 + ;; + ?) # Invalid option + usage_start + exit 1 + ;; + esac + done + shift $((OPTIND - 1)) + start + ;; +stop) + while getopts ":h" opt; do + case $opt in + h) + usage_stop + exit 0 + ;; + ?) # Invalid option + usage_stop + exit 1 + ;; + esac + done + shift $((OPTIND - 1)) + stop + ;; +*) + usage + exit 1 + ;; esac diff --git a/multi_cluster/cluster.sh b/multi_cluster/cluster.sh index 8adb3900..d871d929 100755 --- a/multi_cluster/cluster.sh +++ b/multi_cluster/cluster.sh @@ -5,70 +5,70 @@ skip_port_free_check= p2p_default_port=3610 usage_base() { - echo "Usage: $0 [OPTIONS] COMMAND" - echo "" - echo " Manage a validator cluster (Charon + VC + Prometheus), found in ./clusters directory." - echo "" - echo "Commands:" - echo " add string Add a validator cluster to the ./clusters directory." - echo " delete string Delete a validator cluster from the ./clusters directory." - echo " start string Start a validator cluster, found in the ./clusters directory." - echo " stop string Stop a validator cluster, found in the ./clusters directory." - echo "" - echo "Options:" - echo " -h Display this help message." + echo "Usage: $0 [OPTIONS] COMMAND" + echo "" + echo " Manage a validator cluster (Charon + VC + Prometheus), found in ./clusters directory." + echo "" + echo "Commands:" + echo " add string Add a validator cluster to the ./clusters directory." + echo " delete string Delete a validator cluster from the ./clusters directory." + echo " start string Start a validator cluster, found in the ./clusters directory." + echo " stop string Stop a validator cluster, found in the ./clusters directory." + echo "" + echo "Options:" + echo " -h Display this help message." } usage_add() { - echo "Usage: $0 add [OPTIONS] NAME" - echo "" - echo " Add a new cluster with specified name." - echo "" - echo "Options:" - echo " -h Display this help message." - echo " -s Skip free port checking with netstat/ss." - echo " -p integer Override the default port (3610) from which to start the search of a free port." - echo "" - echo "Example:" - echo " $0 add second-cluster" - echo " $0 add -s third-cluster-without-free-port-check" - echo " $0 add -p 3615 fourth-cluster-with-custom-port" + echo "Usage: $0 add [OPTIONS] NAME" + echo "" + echo " Add a new cluster with specified name." + echo "" + echo "Options:" + echo " -h Display this help message." + echo " -s Skip free port checking with netstat/ss." + echo " -p integer Override the default port (3610) from which to start the search of a free port." + echo "" + echo "Example:" + echo " $0 add second-cluster" + echo " $0 add -s third-cluster-without-free-port-check" + echo " $0 add -p 3615 fourth-cluster-with-custom-port" } usage_delete() { - echo "Usage: $0 delete [OPTIONS] NAME" - echo "" - echo " Delete an existing cluster with the specified name. A cluster name is a folder in ./clusters dir." - echo "" - echo "Options:" - echo " -h Display this help message." - echo "" - echo "Example:" - echo " $0 delete my-second-cluster" + echo "Usage: $0 delete [OPTIONS] NAME" + echo "" + echo " Delete an existing cluster with the specified name. A cluster name is a folder in ./clusters dir." + echo "" + echo "Options:" + echo " -h Display this help message." + echo "" + echo "Example:" + echo " $0 delete my-second-cluster" } usage_start() { - echo "Usage: $0 start [OPTIONS] NAME" - echo "" - echo " Start an existing cluster with the specified name. A cluster name is a folder in ./clusters dir." - echo "" - echo "Options:" - echo " -h Display this help message." - echo "" - echo "Example:" - echo " $0 start my-second-cluster" + echo "Usage: $0 start [OPTIONS] NAME" + echo "" + echo " Start an existing cluster with the specified name. A cluster name is a folder in ./clusters dir." + echo "" + echo "Options:" + echo " -h Display this help message." + echo "" + echo "Example:" + echo " $0 start my-second-cluster" } usage_stop() { - echo "Usage: $0 stop [OPTIONS] NAME" - echo "" - echo " Stop an existing cluster with the specified name. A cluster name is a folder in ./clusters dir." - echo "" - echo "Options:" - echo " -h Display this help message." - echo "" - echo "Example:" - echo " $0 stop my-second-cluster" + echo "Usage: $0 stop [OPTIONS] NAME" + echo "" + echo " Stop an existing cluster with the specified name. A cluster name is a folder in ./clusters dir." + echo "" + echo "Options:" + echo " -h Display this help message." + echo "" + echo "Example:" + echo " $0 stop my-second-cluster" } # Check if cluster_name variable is set. @@ -113,15 +113,15 @@ add() { # Run loop until is_occupied is empty. while [[ -n "$is_occupied" ]]; do # Check if TCP port is free, if it is, is_occupied is set to empty, otherwise increment the port by 1 and continue the loop. - if [ -z ${skip_port_free_check} ] ; then + if [ -z ${skip_port_free_check} ]; then if [ -x "$(command -v netstat)" ]; then if is_occupied=$(netstat -taln | grep $port); then - port=$(($port+1)) + port=$(($port + 1)) continue fi elif [ -x "$(command -v ss)" ]; then if is_occupied=$(ss -taln | grep $port); then - port=$(($port+1)) + port=$(($port + 1)) continue fi else @@ -135,7 +135,10 @@ add() { # Check if TCP port is used by another cluster from the ./clusters directory. for cluster in ./clusters/*; do # Check if it is used by the p2p TCP port of this cluster. - p2p_cluster_port=$(. ./$cluster/.env; printf '%s' "${CHARON_PORT_P2P_TCP}") + p2p_cluster_port=$( + . ./$cluster/.env + printf '%s' "${CHARON_PORT_P2P_TCP}" + ) # If the free port is the same as the port in the cluster, mark as occupied and break the loop. if [ $port -eq $p2p_cluster_port ]; then is_occupied=1 @@ -144,70 +147,81 @@ add() { done # If the port was occupied by any cluster, increment the port by 1 and continue the loop. if [ ! -z "$is_occupied" ]; then - port=$(($port+1)) + port=$(($port + 1)) continue fi # Check if TCP port is used by the base. # Fetch the NETHERMIND_PORT_P2P from the base .env file. - nethermind_p2p_port=$(. ./.env; printf '%s' "${NETHERMIND_PORT_P2P}") + nethermind_p2p_port=$( + . ./.env + printf '%s' "${NETHERMIND_PORT_P2P}" + ) # If the NETHERMIND_PORT_P2P is not set and the free port is the same as the default one, increment the port by 1 and continue the loop. if [ -z "$nethermind_p2p_port" ]; then if [ "$port" -eq "30303" ]; then - port=$(($port+1)) + port=$(($port + 1)) continue fi # If the NETHERMIND_PORT_P2P is set and the free port is the same, increment the port by 1 and continue the loop. elif [ $port -eq $nethermind_p2p_port ]; then - port=$(($port+1)) + port=$(($port + 1)) continue fi # Fetch the NETHERMIND_PORT_HTTP from the base .env file. - nethermind_http_port=$(. ./.env; printf '%s' "${NETHERMIND_PORT_HTTP}") + nethermind_http_port=$( + . ./.env + printf '%s' "${NETHERMIND_PORT_HTTP}" + ) # If the NETHERMIND_PORT_HTTP is not set and the free port is the same as the default one, increment the port by 1 and continue the loop. if [ -z "$nethermind_http_port" ]; then if [ "$port" -eq "8545" ]; then - port=$(($port+1)) + port=$(($port + 1)) continue fi # If the NETHERMIND_PORT_HTTP is set and the free port is the same, increment the port by 1 and continue the loop. elif [ $port -eq $nethermind_http_port ]; then - port=$(($port+1)) + port=$(($port + 1)) continue fi # Fetch the NETHERMIND_PORT_ENGINE from the base .env file. - nethermind_engine_port=$(. ./.env; printf '%s' "${NETHERMIND_PORT_ENGINE}") + nethermind_engine_port=$( + . ./.env + printf '%s' "${NETHERMIND_PORT_ENGINE}" + ) # If the NETHERMIND_PORT_ENGINE is not set and the free port is the same as the default one, increment the port by 1 and continue the loop. if [ -z "$nethermind_engine_port" ]; then if [ "$port" -eq "8551" ]; then - port=$(($port+1)) + port=$(($port + 1)) continue fi # If the NETHERMIND_PORT_ENGINE is set and the free port is the same, increment the port by 1 and continue the loop. elif [ $port -eq $nethermind_engine_port ]; then - port=$(($port+1)) + port=$(($port + 1)) continue fi # Fetch the LIGHTHOUSE_PORT_P2P from the base .env file. - lighthouse_p2p_port=$(. ./.env; printf '%s' "${LIGHTHOUSE_PORT_P2P}") + lighthouse_p2p_port=$( + . ./.env + printf '%s' "${LIGHTHOUSE_PORT_P2P}" + ) # If the LIGHTHOUSE_PORT_P2P is not set and the free port is the same as the default one, increment the port by 1 and continue the loop. if [ -z "$lighthouse_p2p_port" ]; then if [ "$port" -eq "9000" ]; then - port=$(($port+1)) + port=$(($port + 1)) continue fi # If the LIGHTHOUSE_PORT_P2P is set and the free port is the same, increment the port by 1 and continue the loop. elif [ $port -eq $lighthouse_p2p_port ]; then - port=$(($port+1)) + port=$(($port + 1)) continue fi done - # Create dir for the cluster. mkdir -p ./clusters/$cluster_name cluster_dir=./clusters/$cluster_name @@ -225,9 +239,9 @@ add() { # Write the found free port in the .env file. if grep -xq "CHARON_PORT_P2P_TCP=.*" ./.env; then echo "CHARON_PORT_P2P_TCP already set, overwriting it with port $port" - sed "s|CHARON_PORT_P2P_TCP=|CHARON_PORT_P2P_TCP=$port|" ${cluster_dir}/.env > ${cluster_dir}/.env.tmp + sed "s|CHARON_PORT_P2P_TCP=|CHARON_PORT_P2P_TCP=$port|" ${cluster_dir}/.env >${cluster_dir}/.env.tmp else - sed "s|#CHARON_PORT_P2P_TCP=|CHARON_PORT_P2P_TCP=$port|" ${cluster_dir}/.env > ${cluster_dir}/.env.tmp + sed "s|#CHARON_PORT_P2P_TCP=|CHARON_PORT_P2P_TCP=$port|" ${cluster_dir}/.env >${cluster_dir}/.env.tmp fi mv ${cluster_dir}/.env.tmp ${cluster_dir}/.env @@ -244,11 +258,11 @@ add() { cp -r ./lodestar ${cluster_dir}/ # Add the base network on which EL + CL + MEV-boost + Grafana run. - sed "s| dvnode:| dvnode:\n shared-node:\n external:\n name: charon-distributed-validator-node_dvnode|" ${cluster_dir}/docker-compose.yml > ${cluster_dir}/docker-compose.yml.tmp + sed "s| dvnode:| dvnode:\n shared-node:\n external:\n name: charon-distributed-validator-node_dvnode|" ${cluster_dir}/docker-compose.yml >${cluster_dir}/docker-compose.yml.tmp mv ${cluster_dir}/docker-compose.yml.tmp ${cluster_dir}/docker-compose.yml # Include the base network in the cluster-specific services' network config. - sed "s| networks: \[dvnode\]| networks: [dvnode,shared-node]|" ${cluster_dir}/docker-compose.yml > ${cluster_dir}/docker-compose.yml.tmp + sed "s| networks: \[dvnode\]| networks: [dvnode,shared-node]|" ${cluster_dir}/docker-compose.yml >${cluster_dir}/docker-compose.yml.tmp mv ${cluster_dir}/docker-compose.yml.tmp ${cluster_dir}/docker-compose.yml echo "Added new cluster $cluster_name with the following cluster-specific config:" @@ -259,10 +273,9 @@ add() { delete() { read -r -p "Are you sure you want to delete the cluster? This will delete your private keys, which will be unrecoverable if you do not have backup! [y/N] " response - if [[ "$response" =~ ^([yY][eE][sS]|[yY])$ ]] - then - rm -rf ./clusters/$cluster_name - echo "Delete cluster $cluster_name." + if [[ "$response" =~ ^([yY][eE][sS]|[yY])$ ]]; then + rm -rf ./clusters/$cluster_name + echo "Delete cluster $cluster_name." fi } @@ -280,7 +293,7 @@ stop() { # Match global flags while getopts ":h" opt; do - case $opt in + case $opt in h) usage_base exit 0 @@ -289,106 +302,107 @@ while getopts ":h" opt; do usage_base exit 1 ;; - esac + esac done # Capture the subcommand passed. -shift "$((OPTIND -1))" -subcommand=$1; shift +shift "$((OPTIND - 1))" +subcommand=$1 +shift # Execute subcommand. case "$subcommand" in - add) - while getopts ":hsp:" opt; do - case $opt in - h) - usage_add - exit 0 - ;; - s ) - skip_port_free_check=true - ;; - p ) - p2p_default_port=${OPTARG}; - ;; - ? ) # Invalid option - usage_add - exit 1 - ;; - esac - done - shift "$((OPTIND -1))" - cluster_name=$1 - check_missing_cluster_name - check_clusters_dir_does_not_exist - check_cluster_already_exists - add - exit 0 - ;; - delete) - while getopts ":h" opt; do - case $opt in - h) - usage_delete - exit 0 - ;; - ? ) # Invalid option - usage_delete - exit 1 - ;; - esac - done - shift $((OPTIND-1)) - cluster_name=$1 - check_missing_cluster_name - check_clusters_dir_does_not_exist - check_cluster_does_not_exist - delete - exit 0 - ;; - start) - while getopts ":h" opt; do - case $opt in - h) - usage_start - exit 0 - ;; - ? ) # Invalid option - usage_start - exit 1 - ;; - esac - done - shift $((OPTIND-1)) - cluster_name=$1 - check_missing_cluster_name - check_clusters_dir_does_not_exist - check_cluster_does_not_exist - start - exit 0 - ;; - stop) - while getopts ":h" opt; do - case $opt in - h) - usage_stop - exit 0 - ;; - ? ) # Invalid option - usage_stop - exit 1 - ;; - esac - done - shift $((OPTIND-1)) - cluster_name=$1 - check_missing_cluster_name - check_clusters_dir_does_not_exist - check_cluster_does_not_exist - stop - exit 0 - ;; - * ) - usage_base - exit 1 - ;; +add) + while getopts ":hsp:" opt; do + case $opt in + h) + usage_add + exit 0 + ;; + s) + skip_port_free_check=true + ;; + p) + p2p_default_port=${OPTARG} + ;; + ?) # Invalid option + usage_add + exit 1 + ;; + esac + done + shift "$((OPTIND - 1))" + cluster_name=$1 + check_missing_cluster_name + check_clusters_dir_does_not_exist + check_cluster_already_exists + add + exit 0 + ;; +delete) + while getopts ":h" opt; do + case $opt in + h) + usage_delete + exit 0 + ;; + ?) # Invalid option + usage_delete + exit 1 + ;; + esac + done + shift $((OPTIND - 1)) + cluster_name=$1 + check_missing_cluster_name + check_clusters_dir_does_not_exist + check_cluster_does_not_exist + delete + exit 0 + ;; +start) + while getopts ":h" opt; do + case $opt in + h) + usage_start + exit 0 + ;; + ?) # Invalid option + usage_start + exit 1 + ;; + esac + done + shift $((OPTIND - 1)) + cluster_name=$1 + check_missing_cluster_name + check_clusters_dir_does_not_exist + check_cluster_does_not_exist + start + exit 0 + ;; +stop) + while getopts ":h" opt; do + case $opt in + h) + usage_stop + exit 0 + ;; + ?) # Invalid option + usage_stop + exit 1 + ;; + esac + done + shift $((OPTIND - 1)) + cluster_name=$1 + check_missing_cluster_name + check_clusters_dir_does_not_exist + check_cluster_does_not_exist + stop + exit 0 + ;; +*) + usage_base + exit 1 + ;; esac diff --git a/multi_cluster/setup.sh b/multi_cluster/setup.sh index 01856edd..ede78244 100755 --- a/multi_cluster/setup.sh +++ b/multi_cluster/setup.sh @@ -4,29 +4,29 @@ current_cluster_name=default cluster_already_set= usage() { - echo "Usage: $0 [OPTIONS]" - echo "" - echo " Create a multi cluster setup from a traditional single cluster setup." - echo "" - echo "Options:" - echo " -h Display this help message." - echo " -c string Name of the current cluster. (default: \"default\")" + echo "Usage: $0 [OPTIONS]" + echo "" + echo " Create a multi cluster setup from a traditional single cluster setup." + echo "" + echo "Options:" + echo " -h Display this help message." + echo " -c string Name of the current cluster. (default: \"default\")" } while getopts "hc:" opt; do - case $opt in - h) + case $opt in + h) usage exit 0 ;; - c) + c) current_cluster_name=${OPTARG} ;; - \?) + \?) usage exit 1 ;; - esac + esac done if [ "$current_cluster_name" = "default" ]; then @@ -46,9 +46,9 @@ mkdir -p ${cluster_dir} # Delete ./clusters dir if the script exits with non-zero code. cleanupClusterDir() { - if [ "$1" != "0" ]; then - rm -rf ./clusters - fi + if [ "$1" != "0" ]; then + rm -rf ./clusters + fi } trap 'cleanupClusterDir $?' EXIT @@ -90,21 +90,21 @@ fi if grep -xq "CHARON_PORT_VALIDATOR_API=.*" ./.env; then echo "CHARON_PORT_VALIDATOR_API already set, using the set port instead of the default 3600" else - sed 's|#CHARON_PORT_VALIDATOR_API=|CHARON_PORT_VALIDATOR_API=3600|' ${cluster_dir}/.env > ${cluster_dir}/.env~ + sed 's|#CHARON_PORT_VALIDATOR_API=|CHARON_PORT_VALIDATOR_API=3600|' ${cluster_dir}/.env >${cluster_dir}/.env~ mv ${cluster_dir}/.env~ ${cluster_dir}/.env fi if grep -xq "CHARON_PORT_MONITORING=.*" ./.env; then echo "CHARON_PORT_MONITORING already set, using the set port instead of the default 3620" else - sed 's|#CHARON_PORT_MONITORING=|CHARON_PORT_MONITORING=3620|' ${cluster_dir}/.env > ${cluster_dir}/.env~ + sed 's|#CHARON_PORT_MONITORING=|CHARON_PORT_MONITORING=3620|' ${cluster_dir}/.env >${cluster_dir}/.env~ mv ${cluster_dir}/.env~ ${cluster_dir}/.env fi if grep -xq "CHARON_PORT_P2P_TCP=.*" ./.env; then echo "CHARON_PORT_P2P_TCP already set, using the set port instead of the default 3610" else - sed 's|#CHARON_PORT_P2P_TCP=|CHARON_PORT_P2P_TCP=3610|' ${cluster_dir}/.env > ${cluster_dir}/.env~ + sed 's|#CHARON_PORT_P2P_TCP=|CHARON_PORT_P2P_TCP=3610|' ${cluster_dir}/.env >${cluster_dir}/.env~ mv ${cluster_dir}/.env~ ${cluster_dir}/.env fi @@ -152,14 +152,14 @@ if test -d ./data/prometheus; then fi # Add the base network on which EL + CL + MEV-boost + Grafana run. -sed "s| dvnode:| dvnode:\n shared-node:\n external:\n name: charon-distributed-validator-node_dvnode|" ${cluster_dir}/docker-compose.yml > ${cluster_dir}/docker-compose.yml~ +sed "s| dvnode:| dvnode:\n shared-node:\n external:\n name: charon-distributed-validator-node_dvnode|" ${cluster_dir}/docker-compose.yml >${cluster_dir}/docker-compose.yml~ mv ${cluster_dir}/docker-compose.yml~ ${cluster_dir}/docker-compose.yml # Include the base network in the cluster-specific services' network config. -sed "s| networks: \[dvnode\]| networks: [dvnode,shared-node]|" ${cluster_dir}/docker-compose.yml > ${cluster_dir}/docker-compose.yml~ +sed "s| networks: \[dvnode\]| networks: [dvnode,shared-node]|" ${cluster_dir}/docker-compose.yml >${cluster_dir}/docker-compose.yml~ mv ${cluster_dir}/docker-compose.yml~ ${cluster_dir}/docker-compose.yml -if ! docker info > /dev/null 2>&1; then +if ! docker info >/dev/null 2>&1; then echo "Docker daemon is not running, please start Docker first." exit 1 fi @@ -176,7 +176,7 @@ if [[ $(docker compose ps -aq) ]]; then fi migrated_readme() { - cat > $1 << EOL + cat >$1 < Date: Fri, 6 Dec 2024 16:11:03 +0200 Subject: [PATCH 32/39] Linting fixes --- multi_cluster/cluster.sh | 91 +++++++++++++++++++++------------------- multi_cluster/setup.sh | 60 +++++++++++++------------- 2 files changed, 79 insertions(+), 72 deletions(-) diff --git a/multi_cluster/cluster.sh b/multi_cluster/cluster.sh index d871d929..55cbf265 100755 --- a/multi_cluster/cluster.sh +++ b/multi_cluster/cluster.sh @@ -1,5 +1,8 @@ #!/bin/bash +# shellcheck disable=SC1090 +# shellcheck disable=SC1091 + unset -v cluster_name skip_port_free_check= p2p_default_port=3610 @@ -89,15 +92,15 @@ check_clusters_dir_does_not_exist() { # Check if cluster with the specified cluster_name already exists. check_cluster_already_exists() { - if test -d ./clusters/$cluster_name; then - echo "./clsuters/$cluster_name directory already exists." + if test -d "./clusters/${cluster_name}"; then + echo "./clsuters/${cluster_name} directory already exists." exit 1 fi } # Check if cluster with the specified cluster_name does not exist. check_cluster_does_not_exist() { - if test ! -d ./clusters/$cluster_name; then + if test ! -d "./clusters/${cluster_name}"; then echo "./clsuters/$cluster_name directory does not exist." exit 1 fi @@ -115,13 +118,13 @@ add() { # Check if TCP port is free, if it is, is_occupied is set to empty, otherwise increment the port by 1 and continue the loop. if [ -z ${skip_port_free_check} ]; then if [ -x "$(command -v netstat)" ]; then - if is_occupied=$(netstat -taln | grep $port); then - port=$(($port + 1)) + if is_occupied=$(netstat -taln | grep "$port"); then + port=$((port + 1)) continue fi elif [ -x "$(command -v ss)" ]; then - if is_occupied=$(ss -taln | grep $port); then - port=$(($port + 1)) + if is_occupied=$(ss -taln | grep "$port"); then + port=$((port + 1)) continue fi else @@ -136,18 +139,18 @@ add() { for cluster in ./clusters/*; do # Check if it is used by the p2p TCP port of this cluster. p2p_cluster_port=$( - . ./$cluster/.env - printf '%s' "${CHARON_PORT_P2P_TCP}" + . "./${cluster}/.env" + printf '%s' "$CHARON_PORT_P2P_TCP" ) # If the free port is the same as the port in the cluster, mark as occupied and break the loop. - if [ $port -eq $p2p_cluster_port ]; then + if [ "$port" -eq "$p2p_cluster_port" ]; then is_occupied=1 break fi done # If the port was occupied by any cluster, increment the port by 1 and continue the loop. - if [ ! -z "$is_occupied" ]; then - port=$(($port + 1)) + if [ -n "$is_occupied" ]; then + port=$((port + 1)) continue fi @@ -161,12 +164,12 @@ add() { # If the NETHERMIND_PORT_P2P is not set and the free port is the same as the default one, increment the port by 1 and continue the loop. if [ -z "$nethermind_p2p_port" ]; then if [ "$port" -eq "30303" ]; then - port=$(($port + 1)) + port=$((port + 1)) continue fi # If the NETHERMIND_PORT_P2P is set and the free port is the same, increment the port by 1 and continue the loop. - elif [ $port -eq $nethermind_p2p_port ]; then - port=$(($port + 1)) + elif [ "$port" -eq "$nethermind_p2p_port" ]; then + port=$((port + 1)) continue fi @@ -178,92 +181,92 @@ add() { # If the NETHERMIND_PORT_HTTP is not set and the free port is the same as the default one, increment the port by 1 and continue the loop. if [ -z "$nethermind_http_port" ]; then if [ "$port" -eq "8545" ]; then - port=$(($port + 1)) + port=$((port + 1)) continue fi # If the NETHERMIND_PORT_HTTP is set and the free port is the same, increment the port by 1 and continue the loop. - elif [ $port -eq $nethermind_http_port ]; then - port=$(($port + 1)) + elif [ "$port" -eq "$nethermind_http_port" ]; then + port=$((port + 1)) continue fi # Fetch the NETHERMIND_PORT_ENGINE from the base .env file. nethermind_engine_port=$( . ./.env - printf '%s' "${NETHERMIND_PORT_ENGINE}" + printf '%s' "$NETHERMIND_PORT_ENGINE" ) # If the NETHERMIND_PORT_ENGINE is not set and the free port is the same as the default one, increment the port by 1 and continue the loop. if [ -z "$nethermind_engine_port" ]; then if [ "$port" -eq "8551" ]; then - port=$(($port + 1)) + port=$((port + 1)) continue fi # If the NETHERMIND_PORT_ENGINE is set and the free port is the same, increment the port by 1 and continue the loop. - elif [ $port -eq $nethermind_engine_port ]; then - port=$(($port + 1)) + elif [ "$port" -eq "$nethermind_engine_port" ]; then + port=$((port + 1)) continue fi # Fetch the LIGHTHOUSE_PORT_P2P from the base .env file. lighthouse_p2p_port=$( . ./.env - printf '%s' "${LIGHTHOUSE_PORT_P2P}" + printf '%s' "$LIGHTHOUSE_PORT_P2P" ) # If the LIGHTHOUSE_PORT_P2P is not set and the free port is the same as the default one, increment the port by 1 and continue the loop. if [ -z "$lighthouse_p2p_port" ]; then if [ "$port" -eq "9000" ]; then - port=$(($port + 1)) + port=$((port + 1)) continue fi # If the LIGHTHOUSE_PORT_P2P is set and the free port is the same, increment the port by 1 and continue the loop. - elif [ $port -eq $lighthouse_p2p_port ]; then - port=$(($port + 1)) + elif [ "$port" -eq "$lighthouse_p2p_port" ]; then + port=$((port + 1)) continue fi done # Create dir for the cluster. - mkdir -p ./clusters/$cluster_name - cluster_dir=./clusters/$cluster_name + mkdir -p "./clusters/${cluster_name}" + cluster_dir="./clusters/${cluster_name}" # Copy .env from root dir to cluster's dir (if it exists). if test ./.env; then - cp .env ${cluster_dir}/ + cp .env "${cluster_dir}/" fi # Copy docker-compose.yml from root dir to cluster's dir (if it exists). if test ./docker-compose.yml; then - cp ./docker-compose.yml ${cluster_dir}/ + cp ./docker-compose.yml "$cluster_dir"/ fi # Write the found free port in the .env file. if grep -xq "CHARON_PORT_P2P_TCP=.*" ./.env; then echo "CHARON_PORT_P2P_TCP already set, overwriting it with port $port" - sed "s|CHARON_PORT_P2P_TCP=|CHARON_PORT_P2P_TCP=$port|" ${cluster_dir}/.env >${cluster_dir}/.env.tmp + sed "s|CHARON_PORT_P2P_TCP=|CHARON_PORT_P2P_TCP=$port|" "${cluster_dir}/.env" >"${cluster_dir}/.env.tmp" else - sed "s|#CHARON_PORT_P2P_TCP=|CHARON_PORT_P2P_TCP=$port|" ${cluster_dir}/.env >${cluster_dir}/.env.tmp + sed "s|#CHARON_PORT_P2P_TCP=|CHARON_PORT_P2P_TCP=$port|" "${cluster_dir}/.env" >"${cluster_dir}/.env.tmp" fi - mv ${cluster_dir}/.env.tmp ${cluster_dir}/.env + mv "${cluster_dir}/.env.tmp" "${cluster_dir}/.env" # Create data dir. - mkdir ${cluster_dir}/data + mkdir "${cluster_dir}/data" # Copy prometheus files and data. - cp -r ./prometheus ${cluster_dir}/ + cp -r ./prometheus "${cluster_dir}/" if test -d ./data/prometheus; then - cp -r ./data/prometheus ${cluster_dir}/data/ + cp -r ./data/prometheus "${cluster_dir}/data/" fi # Copy lodestar files. - cp -r ./lodestar ${cluster_dir}/ + cp -r ./lodestar "${cluster_dir}/" # Add the base network on which EL + CL + MEV-boost + Grafana run. - sed "s| dvnode:| dvnode:\n shared-node:\n external:\n name: charon-distributed-validator-node_dvnode|" ${cluster_dir}/docker-compose.yml >${cluster_dir}/docker-compose.yml.tmp - mv ${cluster_dir}/docker-compose.yml.tmp ${cluster_dir}/docker-compose.yml + sed "s| dvnode:| dvnode:\n shared-node:\n external:\n name: charon-distributed-validator-node_dvnode|" "${cluster_dir}/docker-compose.yml" >"${cluster_dir}/docker-compose.yml.tmp" + mv "${cluster_dir}/docker-compose.yml.tmp" "${cluster_dir}/docker-compose.yml" # Include the base network in the cluster-specific services' network config. - sed "s| networks: \[dvnode\]| networks: [dvnode,shared-node]|" ${cluster_dir}/docker-compose.yml >${cluster_dir}/docker-compose.yml.tmp - mv ${cluster_dir}/docker-compose.yml.tmp ${cluster_dir}/docker-compose.yml + sed "s| networks: \[dvnode\]| networks: [dvnode,shared-node]|" "${cluster_dir}/docker-compose.yml" >"${cluster_dir}/docker-compose.yml.tmp" + mv "${cluster_dir}/docker-compose.yml.tmp" "${cluster_dir}/docker-compose.yml" echo "Added new cluster $cluster_name with the following cluster-specific config:" echo "CHARON_PORT_P2P_TCP: $port" @@ -274,19 +277,19 @@ add() { delete() { read -r -p "Are you sure you want to delete the cluster? This will delete your private keys, which will be unrecoverable if you do not have backup! [y/N] " response if [[ "$response" =~ ^([yY][eE][sS]|[yY])$ ]]; then - rm -rf ./clusters/$cluster_name + rm -rf "./clusters/$cluster_name" echo "Delete cluster $cluster_name." fi } start() { - docker compose --profile cluster -f ./clusters/${cluster_name}/docker-compose.yml up -d + docker compose --profile cluster -f "./clusters/${cluster_name}/docker-compose.yml" up -d echo "Started cluster $cluster_name" echo "You can stop it by running $0 stop $cluster_name" } stop() { - docker compose --profile cluster -f ./clusters/${cluster_name}/docker-compose.yml down + docker compose --profile cluster -f "./clusters/${cluster_name}/docker-compose.yml" down echo "Stopped cluster $cluster_name" echo "You can start it again by running $0 start $cluster_name" } diff --git a/multi_cluster/setup.sh b/multi_cluster/setup.sh index ede78244..e5c44578 100755 --- a/multi_cluster/setup.sh +++ b/multi_cluster/setup.sh @@ -1,5 +1,9 @@ #!/bin/bash +# shellcheck disable=SC1090 +# shellcheck disable=SC1091 +# shellcheck disable=SC2012 + current_cluster_name=default cluster_already_set= @@ -42,7 +46,7 @@ if test -d ./clusters; then fi # Create clusters directory. -mkdir -p ${cluster_dir} +mkdir -p "$cluster_dir" # Delete ./clusters dir if the script exits with non-zero code. cleanupClusterDir() { @@ -55,8 +59,8 @@ trap 'cleanupClusterDir $?' EXIT # Copy .charon folder to clusters directory (if it exists). if test -d ./.charon; then owner="$(ls -ld ".charon" | awk '{print $3}')" - if [ "x${owner}" = "x${USER}" ]; then - cp -r .charon ${cluster_dir}/ + if [ "$owner" = "$USER" ]; then + cp -r .charon "$cluster_dir"/ cluster_already_set=1 else echo "current user ${USER} is not owner of .charon/" @@ -67,8 +71,8 @@ fi # Copy .env file to clusters directory (if it exists). if test ./.env; then owner="$(ls -ld ".env" | awk '{print $3}')" - if [ "x${owner}" = "x${USER}" ]; then - cp .env ${cluster_dir}/ + if [ "${owner}" = "${USER}" ]; then + cp .env "$cluster_dir"/ else echo "current user ${USER} is not owner of .env" exit 1 @@ -78,8 +82,8 @@ fi # Copy docker-compose.yml to clusters directory (if it exists). if test ./docker-compose.yml; then owner="$(ls -ld "docker-compose.yml" | awk '{print $3}')" - if [ "x${owner}" = "x${USER}" ]; then - cp ./docker-compose.yml ${cluster_dir}/ + if [ "${owner}" = "${USER}" ]; then + cp ./docker-compose.yml "$cluster_dir"/ else echo "current user ${USER} is not owner of docker-compose.yml" exit 1 @@ -90,31 +94,31 @@ fi if grep -xq "CHARON_PORT_VALIDATOR_API=.*" ./.env; then echo "CHARON_PORT_VALIDATOR_API already set, using the set port instead of the default 3600" else - sed 's|#CHARON_PORT_VALIDATOR_API=|CHARON_PORT_VALIDATOR_API=3600|' ${cluster_dir}/.env >${cluster_dir}/.env~ - mv ${cluster_dir}/.env~ ${cluster_dir}/.env + sed 's|#CHARON_PORT_VALIDATOR_API=|CHARON_PORT_VALIDATOR_API=3600|' "${cluster_dir}/.env" >"${cluster_dir}/.env~" + mv "${cluster_dir}/.env~" "${cluster_dir}/.env" fi if grep -xq "CHARON_PORT_MONITORING=.*" ./.env; then echo "CHARON_PORT_MONITORING already set, using the set port instead of the default 3620" else - sed 's|#CHARON_PORT_MONITORING=|CHARON_PORT_MONITORING=3620|' ${cluster_dir}/.env >${cluster_dir}/.env~ - mv ${cluster_dir}/.env~ ${cluster_dir}/.env + sed 's|#CHARON_PORT_MONITORING=|CHARON_PORT_MONITORING=3620|' "${cluster_dir}/.env" >"${cluster_dir}/.env~" + mv "${cluster_dir}/.env~" "${cluster_dir}/.env" fi if grep -xq "CHARON_PORT_P2P_TCP=.*" ./.env; then echo "CHARON_PORT_P2P_TCP already set, using the set port instead of the default 3610" else - sed 's|#CHARON_PORT_P2P_TCP=|CHARON_PORT_P2P_TCP=3610|' ${cluster_dir}/.env >${cluster_dir}/.env~ - mv ${cluster_dir}/.env~ ${cluster_dir}/.env + sed 's|#CHARON_PORT_P2P_TCP=|CHARON_PORT_P2P_TCP=3610|' "${cluster_dir}/.env" >"${cluster_dir}/.env~" + mv "${cluster_dir}/.env~" "${cluster_dir}/.env" fi # Create data dir. -mkdir ${cluster_dir}/data +mkdir "${cluster_dir}/data" # Copy lodestar files. owner="$(ls -ld "lodestar" | awk '{print $3}')" -if [ "x${owner}" = "x${USER}" ]; then - cp -r ./lodestar ${cluster_dir}/ +if [ "${owner}" = "${USER}" ]; then + cp -r ./lodestar "${cluster_dir}/" else echo "current user ${USER} is not owner of lodestar/" exit 1 @@ -123,8 +127,8 @@ fi # Copy lodestar data, if it exists. if test -d ./data/lodestar; then owner="$(ls -ld "data/lodestar" | awk '{print $3}')" - if [ "x${owner}" = "x${USER}" ]; then - cp -r ./data/lodestar ${cluster_dir}/data/ + if [ "${owner}" = "${USER}" ]; then + cp -r ./data/lodestar "${cluster_dir}/data/" else echo "current user ${USER} is not owner of data/lodestar/" exit 1 @@ -133,8 +137,8 @@ fi # Copy prometheus files. owner="$(ls -ld "prometheus" | awk '{print $3}')" -if [ "x${owner}" = "x${USER}" ]; then - cp -r ./prometheus ${cluster_dir}/ +if [ "${owner}" = "${USER}" ]; then + cp -r ./prometheus "${cluster_dir}/" else echo "current user ${USER} is not owner of prometheus/" exit 1 @@ -143,8 +147,8 @@ fi # Copy prometheus data, if it exists. if test -d ./data/prometheus; then owner="$(ls -ld "data/prometheus" | awk '{print $3}')" - if [ "x${owner}" = "x${USER}" ]; then - cp -r ./data/prometheus ${cluster_dir}/data/ + if [ "${owner}" = "${USER}" ]; then + cp -r ./data/prometheus "${cluster_dir}/data/" else echo "current user ${USER} is not owner of data/prometheus/" exit 1 @@ -152,12 +156,12 @@ if test -d ./data/prometheus; then fi # Add the base network on which EL + CL + MEV-boost + Grafana run. -sed "s| dvnode:| dvnode:\n shared-node:\n external:\n name: charon-distributed-validator-node_dvnode|" ${cluster_dir}/docker-compose.yml >${cluster_dir}/docker-compose.yml~ -mv ${cluster_dir}/docker-compose.yml~ ${cluster_dir}/docker-compose.yml +sed "s| dvnode:| dvnode:\n shared-node:\n external:\n name: charon-distributed-validator-node_dvnode|" "${cluster_dir}/docker-compose.yml" >"${cluster_dir}/docker-compose.yml~" +mv "${cluster_dir}/docker-compose.yml~" "${cluster_dir}/docker-compose.yml" # Include the base network in the cluster-specific services' network config. -sed "s| networks: \[dvnode\]| networks: [dvnode,shared-node]|" ${cluster_dir}/docker-compose.yml >${cluster_dir}/docker-compose.yml~ -mv ${cluster_dir}/docker-compose.yml~ ${cluster_dir}/docker-compose.yml +sed "s| networks: \[dvnode\]| networks: [dvnode,shared-node]|" "${cluster_dir}/docker-compose.yml" >"${cluster_dir}/docker-compose.yml~" +mv "${cluster_dir}/docker-compose.yml~" "${cluster_dir}/docker-compose.yml" if ! docker info >/dev/null 2>&1; then echo "Docker daemon is not running, please start Docker first." @@ -172,11 +176,11 @@ if [[ $(docker compose ps -aq) ]]; then # Start the base containers in the root directory. docker compose --profile base up -d # Start the cluster-specific containers in cluster-specific directory (i.e.: charon, VC). - docker compose --profile cluster -f ${cluster_dir}/docker-compose.yml up -d + docker compose --profile cluster -f "${cluster_dir}/docker-compose.yml" up -d fi migrated_readme() { - cat >$1 <"$1" < Date: Fri, 6 Dec 2024 16:21:58 +0200 Subject: [PATCH 33/39] Shellcheck disable one liner --- multi_cluster/cluster.sh | 3 +-- multi_cluster/setup.sh | 4 +--- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/multi_cluster/cluster.sh b/multi_cluster/cluster.sh index 55cbf265..02611185 100755 --- a/multi_cluster/cluster.sh +++ b/multi_cluster/cluster.sh @@ -1,7 +1,6 @@ #!/bin/bash -# shellcheck disable=SC1090 -# shellcheck disable=SC1091 +# shellcheck disable=SC1090,SC1091 unset -v cluster_name skip_port_free_check= diff --git a/multi_cluster/setup.sh b/multi_cluster/setup.sh index e5c44578..2f7b0665 100755 --- a/multi_cluster/setup.sh +++ b/multi_cluster/setup.sh @@ -1,8 +1,6 @@ #!/bin/bash -# shellcheck disable=SC1090 -# shellcheck disable=SC1091 -# shellcheck disable=SC2012 +# shellcheck disable=SC1090,SC1091,SC2012 current_cluster_name=default cluster_already_set= From 45f3730439987ade324b7d2c31cbc6b12012a0f2 Mon Sep 17 00:00:00 2001 From: Kaloyan Tanev Date: Fri, 6 Dec 2024 18:37:46 +0200 Subject: [PATCH 34/39] Remove 'my' from examples --- multi_cluster/cluster.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/multi_cluster/cluster.sh b/multi_cluster/cluster.sh index 02611185..d6c96abc 100755 --- a/multi_cluster/cluster.sh +++ b/multi_cluster/cluster.sh @@ -46,7 +46,7 @@ usage_delete() { echo " -h Display this help message." echo "" echo "Example:" - echo " $0 delete my-second-cluster" + echo " $0 delete second-cluster" } usage_start() { @@ -58,7 +58,7 @@ usage_start() { echo " -h Display this help message." echo "" echo "Example:" - echo " $0 start my-second-cluster" + echo " $0 start second-cluster" } usage_stop() { @@ -70,7 +70,7 @@ usage_stop() { echo " -h Display this help message." echo "" echo "Example:" - echo " $0 stop my-second-cluster" + echo " $0 stop second-cluster" } # Check if cluster_name variable is set. From 19fadd40d7b450159d84979aef233920da2cce57 Mon Sep 17 00:00:00 2001 From: Kaloyan Tanev Date: Fri, 6 Dec 2024 18:39:13 +0200 Subject: [PATCH 35/39] Remove 'your' from docs; make cluster name mandatory for setup --- README.md | 22 +++++++++++----------- multi_cluster/setup.sh | 24 +++++++++++++----------- 2 files changed, 24 insertions(+), 22 deletions(-) diff --git a/README.md b/README.md index 1f40355c..43b2bbb8 100644 --- a/README.md +++ b/README.md @@ -56,14 +56,14 @@ The way this is achieved is by separating the EL, CL and Grafana from the Charon If you already have running validator node in Docker, the Docker containers will be moved to the new multi cluster setup. ```bash -./multi_cluster/setup.sh -c {YOUR_CLUSTER_NAME} +./multi_cluster/setup.sh {CLUSTER_NAME} ``` You can inspect what you have in the `./clusters/` directory. Each subfolder is a cluster with the following structure: ```directory clusters -└───{YOUR_CLUSTER_NAME} # cluster name +└───{CLUSTER_NAME} # cluster name │ │ .charon # folder including secret material used by charon │ │ data # data from the validator client and prometheus │ │ lodestar # scripts used by lodestar @@ -71,9 +71,9 @@ clusters │ │ .env # environment variables used by the cluster │ │ docker-compose.yml # docker compose used by the cluster │ # N.B.: only services with profile "cluster" are ran -└───{YOUR_CLUSTER_NAME_2} -└───{YOUR_CLUSTER_NAME_...} -└───{YOUR_CLUSTER_NAME_N} +└───{CLUSTER_NAME_2} +└───{CLUSTER_NAME_...} +└───{CLUSTER_NAME_N} ``` Note that those folders and files are copied from the root directory. Meaning all configurations and setup you have already done, will be copied to this first cluster of the multi cluster setup. @@ -85,29 +85,29 @@ Manage the Charon + Validator Client + Prometheus containers of each cluster fou ### Add cluster ```bash -./multi_cluster/cluster.sh add {YOUR_CLUSTER_NAME} +./multi_cluster/cluster.sh add {CLUSTER_NAME} ``` -Note that only the `.env`, `lodestar/`, `prometheus/` and `docker-compose.yml` files and directories are coiped from the root directory to the new cluster. `.charon/` and `data/` folders are expected to be from a brand new cluster that you will setup in the `./clusters/{YOUR_CLUSTER_NAME}` directory. +Note that only the `.env`, `lodestar/`, `prometheus/` and `docker-compose.yml` files and directories are coiped from the root directory to the new cluster. `.charon/` and `data/` folders are expected to be from a brand new cluster that you will setup in the `./clusters/{CLUSTER_NAME}` directory. ### Start cluster -It is expected that you have already done the regular procedure from cluster setup and you have `./clusters/{YOUR_CLUSTER_NAME}/.charon/` folder. +It is expected that you have already done the regular procedure from cluster setup and you have `./clusters/{CLUSTER_NAME}/.charon/` folder. ```bash -./multi_cluster/cluster.sh start {YOUR_CLUSTER_NAME} +./multi_cluster/cluster.sh start {CLUSTER_NAME} ``` ### Stop cluster ```bash -./multi_cluster/cluster.sh stop {YOUR_CLUSTER_NAME} +./multi_cluster/cluster.sh stop {CLUSTER_NAME} ``` ### Delete cluster ```bash -./multi_cluster/cluster.sh delete {YOUR_CLUSTER_NAME} +./multi_cluster/cluster.sh delete {CLUSTER_NAME} ``` ## Manage base node diff --git a/multi_cluster/setup.sh b/multi_cluster/setup.sh index 2f7b0665..97371134 100755 --- a/multi_cluster/setup.sh +++ b/multi_cluster/setup.sh @@ -2,40 +2,42 @@ # shellcheck disable=SC1090,SC1091,SC2012 -current_cluster_name=default cluster_already_set= usage() { - echo "Usage: $0 [OPTIONS]" + echo "Usage: $0 [OPTIONS] NAME" echo "" - echo " Create a multi cluster setup from a traditional single cluster setup." + echo " Create a multi cluster setup from a traditional single cluster setup. Name of the first cluster should be specified." echo "" echo "Options:" echo " -h Display this help message." - echo " -c string Name of the current cluster. (default: \"default\")" + echo "" + echo "Example:" + echo " $0 initial-cluster" } -while getopts "hc:" opt; do +while getopts "h:" opt; do case $opt in h) usage exit 0 ;; - c) - current_cluster_name=${OPTARG} - ;; \?) usage exit 1 ;; esac done +shift "$((OPTIND - 1))" +cluster_name=$1 -if [ "$current_cluster_name" = "default" ]; then - echo "WARN: -c flag not specified. Using default cluster name 'default'." +if [ -z "$cluster_name" ]; then + echo 'Missing cluster name argument.' >&2 + usage + exit 1 fi -cluster_dir=./clusters/${current_cluster_name} +cluster_dir=./clusters/${cluster_name} # Check if clusters directory already exists. if test -d ./clusters; then From 975380f1d44777057ea5f85d72922bc688a0e711 Mon Sep 17 00:00:00 2001 From: Kaloyan Tanev Date: Tue, 10 Dec 2024 19:47:55 +0200 Subject: [PATCH 36/39] Fix typos --- multi_cluster/cluster.sh | 6 +++--- multi_cluster/setup.sh | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/multi_cluster/cluster.sh b/multi_cluster/cluster.sh index d6c96abc..e1c75c86 100755 --- a/multi_cluster/cluster.sh +++ b/multi_cluster/cluster.sh @@ -84,7 +84,7 @@ check_missing_cluster_name() { # Check if ./clusters directory exists. check_clusters_dir_does_not_exist() { if test ! -d ./clusters; then - echo "./clsuters directory does not exist. Run setup.sh first." + echo "./clusters directory does not exist. Run setup.sh first." exit 1 fi } @@ -92,7 +92,7 @@ check_clusters_dir_does_not_exist() { # Check if cluster with the specified cluster_name already exists. check_cluster_already_exists() { if test -d "./clusters/${cluster_name}"; then - echo "./clsuters/${cluster_name} directory already exists." + echo "./clusters/${cluster_name} directory already exists." exit 1 fi } @@ -100,7 +100,7 @@ check_cluster_already_exists() { # Check if cluster with the specified cluster_name does not exist. check_cluster_does_not_exist() { if test ! -d "./clusters/${cluster_name}"; then - echo "./clsuters/$cluster_name directory does not exist." + echo "./clusters/$cluster_name directory does not exist." exit 1 fi } diff --git a/multi_cluster/setup.sh b/multi_cluster/setup.sh index 97371134..8e2bd752 100755 --- a/multi_cluster/setup.sh +++ b/multi_cluster/setup.sh @@ -41,7 +41,7 @@ cluster_dir=./clusters/${cluster_name} # Check if clusters directory already exists. if test -d ./clusters; then - echo "./clsuters directory already exists. Cannot setup already set multi cluster CDVN." + echo "./clusters directory already exists. Cannot setup already set multi cluster CDVN." exit 1 fi From 51308c6b68bee7a6c111d860298de5a6bddb2ad7 Mon Sep 17 00:00:00 2001 From: Kaloyan Tanev Date: Tue, 10 Dec 2024 19:50:57 +0200 Subject: [PATCH 37/39] Add -f flag to testing file existance --- multi_cluster/cluster.sh | 4 ++-- multi_cluster/setup.sh | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/multi_cluster/cluster.sh b/multi_cluster/cluster.sh index e1c75c86..e3392987 100755 --- a/multi_cluster/cluster.sh +++ b/multi_cluster/cluster.sh @@ -229,12 +229,12 @@ add() { cluster_dir="./clusters/${cluster_name}" # Copy .env from root dir to cluster's dir (if it exists). - if test ./.env; then + if test -f ./.env; then cp .env "${cluster_dir}/" fi # Copy docker-compose.yml from root dir to cluster's dir (if it exists). - if test ./docker-compose.yml; then + if test -f ./docker-compose.yml; then cp ./docker-compose.yml "$cluster_dir"/ fi diff --git a/multi_cluster/setup.sh b/multi_cluster/setup.sh index 8e2bd752..b7d88311 100755 --- a/multi_cluster/setup.sh +++ b/multi_cluster/setup.sh @@ -69,7 +69,7 @@ if test -d ./.charon; then fi # Copy .env file to clusters directory (if it exists). -if test ./.env; then +if test -f ./.env; then owner="$(ls -ld ".env" | awk '{print $3}')" if [ "${owner}" = "${USER}" ]; then cp .env "$cluster_dir"/ @@ -80,7 +80,7 @@ if test ./.env; then fi # Copy docker-compose.yml to clusters directory (if it exists). -if test ./docker-compose.yml; then +if test -f ./docker-compose.yml; then owner="$(ls -ld "docker-compose.yml" | awk '{print $3}')" if [ "${owner}" = "${USER}" ]; then cp ./docker-compose.yml "$cluster_dir"/ From b8676b782ca726388bf4b3170f0bce40115c20b9 Mon Sep 17 00:00:00 2001 From: Kaloyan Tanev Date: Tue, 10 Dec 2024 19:52:43 +0200 Subject: [PATCH 38/39] grep port as a word --- multi_cluster/cluster.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/multi_cluster/cluster.sh b/multi_cluster/cluster.sh index e3392987..2bfa76c4 100755 --- a/multi_cluster/cluster.sh +++ b/multi_cluster/cluster.sh @@ -117,12 +117,12 @@ add() { # Check if TCP port is free, if it is, is_occupied is set to empty, otherwise increment the port by 1 and continue the loop. if [ -z ${skip_port_free_check} ]; then if [ -x "$(command -v netstat)" ]; then - if is_occupied=$(netstat -taln | grep "$port"); then + if is_occupied=$(netstat -taln | grep -w "$port"); then port=$((port + 1)) continue fi elif [ -x "$(command -v ss)" ]; then - if is_occupied=$(ss -taln | grep "$port"); then + if is_occupied=$(ss -taln | grep -w "$port"); then port=$((port + 1)) continue fi From 08408e640d29c1aeb6f7003132b41f43a5f03f4e Mon Sep 17 00:00:00 2001 From: Kaloyan Tanev Date: Tue, 10 Dec 2024 20:17:22 +0200 Subject: [PATCH 39/39] Add makefile --- Makefile | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) create mode 100644 Makefile diff --git a/Makefile b/Makefile new file mode 100644 index 00000000..71a3322b --- /dev/null +++ b/Makefile @@ -0,0 +1,34 @@ +.PHONY: multi-cluster-setup multi-cluster-add-cluster multi-cluster-delete-cluster multi-cluster-start-cluster multi-cluster-stop-cluster multi-cluster-start-base multi-cluster-stop-base + +check_defined = \ + $(strip $(foreach 1,$1, \ + $(call __check_defined,$1,$(strip $(value 2))))) +__check_defined = \ + $(if $(value $1),, \ + $(error $1$(if $2, ($2)) is not set. Set it by running `make $1$(if $2, ($2))=`)) + +multi-cluster-setup: + $(call check_defined, name) + ./multi_cluster/setup.sh $(name) + +multi-cluster-add-cluster: + $(call check_defined, name) + ./multi_cluster/cluster.sh add $(name) + +multi-cluster-delete-cluster: + $(call check_defined, name) + ./multi_cluster/cluster.sh delete $(name) + +multi-cluster-start-cluster: + $(call check_defined, name) + ./multi_cluster/cluster.sh start $(name) + +multi-cluster-stop-cluster: + $(call check_defined, name) + ./multi_cluster/cluster.sh stop $(name) + +multi-cluster-start-base: + ./multi_cluster/base.sh start + +multi-cluster-stop-base: + ./multi_cluster/base.sh stop