diff --git a/.ansible_lint b/.ansible-lint similarity index 51% rename from .ansible_lint rename to .ansible-lint index 688759be0..d739e8211 100644 --- a/.ansible_lint +++ b/.ansible-lint @@ -9,9 +9,13 @@ exclude_paths: - .github/ - molecule/ - docs/ - - .ansible-lint - - .yamllint - scripts/ - + - test/ + - playbooks/tasks # Dir for testing locally + skip_list: - yaml[line-length] + - var-naming[no-role-prefix] + - command-instead-of-module + - var-naming[pattern] + - risky-file-permissions diff --git a/.github/actions/ansible/action.yml b/.github/actions/ansible/action.yml new file mode 100644 index 000000000..b08049835 --- /dev/null +++ b/.github/actions/ansible/action.yml @@ -0,0 +1,53 @@ +--- +name: ansible +description: execute an ansible playbook + +runs: + using: composite + steps: + - name: Checkout files + uses: actions/checkout@v4 + + - name: Setup Terraform + uses: hashicorp/setup-terraform@v3 + with: + terraform_version: "1.7.3" + + - name: Get key + run: | + echo "$SSH_KEY" > .key + chmod 400 .key + shell: bash + env: + SSH_KEY: ${{secrets.SSH_PRIVATE_KEY}} + + - name: Terraform Init + id: init + run: | + terraform init + + - name: Terraform plan + id: plan + run: | + terraform plan -out=terraform.tfplan \ + -var "GITHUB_RUN_ID=$GITHUB_RUN_ID" \ + -var "do_token=${DO_PAT}" \ + -var "terraform_backend_bucket_name=github-action-${{ github.run_id }}" + + - name: Terraform Apply + run: | + terraform apply terraform.tfplan + + - name: Terraform plan delete stack + id: plan + run: | + terraform plan -destroy -out=terraform.tfplan \ + -var "GITHUB_RUN_ID=$GITHUB_RUN_ID" \ + -var "do_token=${DO_PAT}" \ + -var "terraform_backend_bucket_name=github-action-${{ github.run_id }}" + continue-on-error: true + + - name: Terraform Apply + run: | + terraform apply terraform.tfplan + continue-on-error: true diff --git a/.github/actions/update-readme/action.yml b/.github/actions/update-version/action.yml similarity index 84% rename from .github/actions/update-readme/action.yml rename to .github/actions/update-version/action.yml index af92ab847..df81caa29 100644 --- a/.github/actions/update-readme/action.yml +++ b/.github/actions/update-version/action.yml @@ -5,16 +5,19 @@ description: Update Readme and Changelog runs: using: composite steps: - # Update Readme + # Update Readme - shell: bash run: | + rkub_collection=$(yq -r .version galaxy.yml) rke2_version=$(yq -r .global_rke2_version playbooks/vars/main.yml) cert_version=$(yq -r .global_CERT_VERSION playbooks/vars/main.yml) rancher_version=$(yq -r .global_RANCHER_VERSION playbooks/vars/main.yml) longhorn_version=$(yq -r .global_LONGHORN_VERSION playbooks/vars/main.yml) neuvector_version=$(yq -r .global_NEU_VERSION playbooks/vars/main.yml) cat << EOF > version.txt + **Ansible Collection Rkub ${rkub_collection} include:** - [RKE2 ${rke2_version}](https://docs.rke2.io) - Security focused Kubernetes + - [Cert-manager ${cert_version}](https://cert-manager.io/docs/) - Certificate manager - [Rancher ${rancher_version}](https://www.suse.com/products/suse-rancher/) - Multi-Cluster Kubernetes Management - [Longhorn ${longhorn_version}](https://longhorn.io) - Unified storage layer - [Neuvector ${neuvector_version}](https://neuvector.com/) - Kubernetes Security Platform @@ -36,21 +39,21 @@ runs: cat << EOF > version.txt ## ${rkub_collection} (${date}) - Versions: + ### Versions: - rke2 version: ${rke2_version} - - cert-manager version: ${cert_version} - - rancher version: ${rancher_version} + - cert-manager version: ${cert_version} + - rancher version: ${rancher_version} - longhorn version: ${longhorn_version} - neuvector version: ${neuvector_version} EOF sed -i -n -e "1,/<\!-- Release -->/ p" -e"/<\!-- End Release -->/,$ p" -e "/<\!-- Release -->/ r version.txt" CHANGELOG.md - # Git push + # Git push - shell: bash run: | git config user.name github-actions git config user.email github-actions@github.com git add README.md CHANGELOG.md [[ -z $(git status -uno --porcelain) ]] && echo "No need to commit..." || git commit -m "⚡ Update README & CHANGELOG" - git push origin \ No newline at end of file + git push origin HEAD:${{ github.head_ref || github.ref_name }} diff --git a/.github/renovate.json b/.github/renovate.json new file mode 100644 index 000000000..530d4f899 --- /dev/null +++ b/.github/renovate.json @@ -0,0 +1,188 @@ +{ + "labels": ["dependencies"], + "schedule": ["after 7am on saturday"], + "ansible-galaxy": { "fileMatch": ["(^|/)(galaxy|requirements|ee-requirements)(\\.ansible)?\\.ya?ml$"] }, + "pre-commit": { + "enabled": true, + "addLabels": ["pre-commit"] + }, + "packageRules": [ + { + "matchManagers": ["dockerfile"], + "matchPackagePatterns": ["ubi8"], + "matchUpdateTypes": ["minor"], + "automerge": true + }, + { + "matchManagers": ["github-actions"], + "matchUpdateTypes": ["minor"], + "automerge": true + }, + { + "matchPackagePatterns": ["rke2"], + "addLabels": ["rke2"], + "extractVersion": "^v(?\\d+\\.\\d+.\\d+)" + }, + { + "matchPackagePatterns": ["helm"], + "addLabels": ["helm"], + "extractVersion": "^v(?\\d+\\.\\d+.\\d+)" + }, + { + "matchPackagePatterns": ["cert-manager"], + "addLabels": ["cert-manager"], + "extractVersion": "^v(?\\d+\\.\\d+.\\d+)" + }, + { + "matchPackagePatterns": ["rancher"], + "addLabels": ["rancher"], + "extractVersion": "^v(?\\d+\\.\\d+.\\d+)" + }, + { + "matchPackagePatterns": ["longhorn"], + "addLabels": ["longhorn"], + "extractVersion": "^longhorn-(?\\d+\\.\\d+.\\d+)" + }, + { + "matchPackagePatterns": ["neuvector"], + "addLabels": ["neuvector"], + "extractVersion": "(?\\d+\\.\\d+.\\d+)" + } + ], + "customManagers": [ + { + "description": "Update RKE2 helm charts version with customDatasource", + "customType": "regex", + "fileMatch": [ + "playbooks/vars/main\\.y[a]?ml$", + "CHANGELOG.md", + "README.md" + ], + "matchStrings": [ + "global_rke2_version: \"(?.*)\"\\n", + "- rke2 version: (?.*)\\n", + "- \\[RKE2 (?[^\\]]+)\\]\\(https://docs.rke2.io\\) - Security focused Kubernetes (channel stable)\\n" + ], + "depNameTemplate": "rke2", + "versioningTemplate": "semver-coerced", + "datasourceTemplate": "custom.rke2" + }, + { + "description": "Update helm binary version based on github repo", + "customType": "regex", + "fileMatch": [ + "playbooks/vars/main\\.y[a]?ml$" + ], + "matchStrings": [ + "global_helm_version: \"(?.*?)\"\\n" + ], + "datasourceTemplate": "git-refs", + "depNameTemplate": "helm", + "packageNameTemplate": "https://github.com/helm/helm", + "versioningTemplate": "semver-coerced" + }, + { + "description": "Update cert-manager helm charts version based on github repo", + "customType": "regex", + "fileMatch": [ + "playbooks/vars/main\\.y[a]?ml$", + "CHANGELOG.md", + "README.md" + ], + "matchStrings": [ + "global_CERT_VERSION: \"(?.*?)\"\\n", + "- cert-manager version: (?.*)\\n", + "- \\[Cert-manager (?[^\\]]+)\\]\\(https://cert-manager.io/docs/\\) - Certificate manager\\n" + ], + "datasourceTemplate": "git-tags", + "depNameTemplate": "cert-manager", + "packageNameTemplate": "https://github.com/cert-manager/cert-manager", + "versioningTemplate": "semver" + }, + { + "description": "Update Rancher helm charts version with customDatasource", + "customType": "regex", + "fileMatch": [ + "playbooks/vars/main\\.y[a]?ml$", + "CHANGELOG.md", + "README.md" + ], + "matchStrings": [ + "global_RANCHER_VERSION: \"(?.*?)\"\\n", + "- rancher version: (?.*)\\n", + "- \\[Rancher (?[^\\]]+)\\]\\(https://www.suse.com/products/suse-rancher/\\) - Multi-Cluster Kubernetes Management\\n" + ], + "depNameTemplate": "rancher", + "versioningTemplate": "semver-coerced", + "datasourceTemplate": "custom.rancher" + }, + { + "description": "Update Longhorn helm charts version based on github repo", + "customType": "regex", + "fileMatch": [ + "playbooks/vars/main\\.y[a]?ml$", + "CHANGELOG.md", + "README.md" + ], + "matchStrings": [ + "global_LONGHORN_VERSION: \"(?.*?)\"\\n", + "- longhorn version: (?.*)\\n", + "- \\[Longhorn (?[^\\]]+)\\]\\(https://longhorn.io\\) - Unified storage layer\\n" + ], + "datasourceTemplate": "git-refs", + "depNameTemplate": "longhorn", + "packageNameTemplate": "https://github.com/longhorn/charts", + "versioningTemplate": "semver" + }, + { + "description": "Update Neuvector helm charts version based on github repo", + "customType": "regex", + "fileMatch": [ + "playbooks/vars/main\\.y[a]?ml$", + "CHANGELOG.md", + "README.md" + ], + "matchStrings": [ + "global_NEU_VERSION: \"(?.*?)\"\\n", + "- neuvector version: (?.*)\\n", + "- \\[Neuvector (?[^\\]]+)\\]\\(https://neuvector.com/\\) - Kubernetes Security Platform\\n" + ], + "datasourceTemplate": "git-refs", + "depNameTemplate": "neuvector", + "packageNameTemplate": "https://github.com/neuvector/neuvector-helm", + "versioningTemplate": "semver" + }, + { + "description": "Update Kubevip version based on github repo", + "customType": "regex", + "fileMatch": [ + "playbooks/vars/main\\.y[a]?ml$", + "CHANGELOG.md", + "README.md" + ], + "matchStrings": [ + "global_kubevip_version: \"(?.*?)\"\\n", + "- kube-vip version: (?.*)\\n", + "- \\[Kube-vip (?[^\\]]+)\\]\\(https://kube-vip.io/\\) - Virtual IP and load balancer\\n" + ], + "datasourceTemplate": "git-refs", + "depNameTemplate": "kubevip", + "packageNameTemplate": "https://github.com/kube-vip/kube-vip", + "versioningTemplate": "semver" + } + ], + "customDatasources": { + "rke2": { + "defaultRegistryUrlTemplate": "https://update.rke2.io/v1-release/channels", + "transformTemplates": [ + "{\"releases\":[{\"version\": $$.(data[id = 'stable'].latest),\"sourceUrl\":\"https://github.com/rancher/rke2\",\"changelogUrl\":$join([\"https://github.com/rancher/rke2/releases/\",data[id = 'stable'].latest])}],\"sourceUrl\": \"https://github.com/rancher/rke2\",\"homepage\": \"https://docs.rke2.io\"}" + ] + }, + "rancher": { + "defaultRegistryUrlTemplate": "https://update.rancher.io/v1-release/channels", + "transformTemplates": [ + "{\"releases\":[{\"version\": $$.(data[id = 'latest'].latest),\"sourceUrl\":\"https://github.com/rancher/rancher\",\"changelogUrl\":$join([\"https://github.com/rancher/rancher/releases/\",data[id = 'latest'].latest])}],\"sourceUrl\": \"https://github.com/rancher/rancher\",\"homepage\": \"https://ranchermanager.docs.rancher.com\"}" + ] + } + } +} diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index f5634d3a1..84d1aa4f9 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -1,17 +1,69 @@ -name: Release on Ansible Galaxy +--- +name: Release to Ansible Galaxy on: workflow_dispatch: # Not needed anymore, since branch protection #push: # branches: ["main"] - pull_request: - # The branches below must be a subset of the branches above - branches: [ "develop" ] + + # The branches below must be a subset of the branches above + #pull_request: + # branches: [ "main" ] jobs: - release: + lint: + name: Ansible Lint + runs-on: ubuntu-latest + steps: + - name: Which branch? + shell: bash + run: | + echo "${{ github.head_ref || github.ref_name }}" + + - uses: actions/checkout@v4 + with: + ref: ${{ github.head_ref || github.ref_name }} + + # step to install prerequis + - name: Install prerequis + shell: bash + run: | + make prerequis + + - name: Run ansible-lint + uses: ansible/ansible-lint@main + + update-version: + name: Update Versions + needs: lint runs-on: ubuntu-latest + + if: github.repository == 'mozebaltyk/rkub' + permissions: + actions: write + checks: write + contents: write + + steps: + # Checkout on branch where pull request + - name: Which branch? + shell: bash + run: | + echo "${{ github.head_ref || github.ref_name }}" + + - uses: actions/checkout@v4 + with: + ref: ${{ github.head_ref || github.ref_name }} + + # This one is redondant with renovate but make sure + # that all versions are updated before to release! + - name: Update version in readme and changelog + uses: ./.github/actions/update-version + + release: name: Release to Galaxy + needs: update-version + runs-on: ubuntu-latest if: github.repository == 'mozebaltyk/rkub' permissions: actions: write @@ -29,7 +81,7 @@ jobs: with: fetch-depth: 0 token: ${{ secrets.GITHUB_TOKEN }} - + - run: git pull - name: Set up Python @@ -52,7 +104,7 @@ jobs: tag_version: ${{ steps.get_version.outputs.TAG_VERSION }} tag_exists: ${{ steps.check_tag.outputs.TAG_EXISTS }} if: ${{ steps.get_version.outputs.TAG_VERSION == steps.check_tag.outputs.TAG_EXISTS }} - uses: actions/github-script@v3 + uses: actions/github-script@v7 with: script: | core.setFailed('Release tag already exists in git (tag_exists var: ${tag_exists} and tag_version: ${tag_version})') @@ -75,7 +127,7 @@ jobs: run: | ansible-galaxy collection publish *.tar.gz --api-key $ANSIBLE_GALAXY_API_KEY - # GH tag and release + # GH tag and release - name: Create release tag run: | git config user.name github-actions diff --git a/.github/workflows/stage_airgap.yml b/.github/workflows/stage_airgap.yml new file mode 100644 index 000000000..a180bb5a2 --- /dev/null +++ b/.github/workflows/stage_airgap.yml @@ -0,0 +1,368 @@ +--- +name: Stage airgap install + +on: + workflow_dispatch: + +env: + DO_PAT: ${{secrets.DIGITALOCEAN_ACCESS_TOKEN}} + AWS_ACCESS_KEY_ID: ${{secrets.DIGITALOCEAN_SPACES_ACCESS_TOKEN}} + AWS_SECRET_ACCESS_KEY: ${{secrets.DIGITALOCEAN_SPACES_SECRET_KEY}} + REGION: ${{secrets.DIGITALOCEAN_REGION}} + MOUNT_POINT: "/opt/rkub" + BUCKET: "rkub-github-action-${{ github.run_id }}" + #BUCKET: "terraform-backend-github" + CONTROLLER_COUNT: "1" + WORKER_COUNT: "1" + SIZE: "s-2vcpu-4gb" + +jobs: + bucket: + name: Bucket + runs-on: ubuntu-latest + timeout-minutes: 10 + + steps: + - name: Set up S3cmd cli tool + uses: s3-actions/s3cmd@main + with: + provider: digitalocean + region: ${{secrets.DIGITALOCEAN_REGION}} + access_key: ${{secrets.DIGITALOCEAN_SPACES_ACCESS_TOKEN}} + secret_key: ${{secrets.DIGITALOCEAN_SPACES_SECRET_KEY}} + + - name: Create Space Bucket + run: | + sed -i -e 's/signature_v2.*$/signature_v2 = True/' ~/.s3cfg + if [[ $BUCKET != "terraform-backend-github" ]]; then s3cmd mb s3://${BUCKET}; fi + sleep 10 + + package: + name: Package + runs-on: ubuntu-latest + needs: Bucket + timeout-minutes: 60 + + steps: + - name: Install s3fs-fuse on Ubuntu + run: | + sudo apt-get install automake autotools-dev fuse g++ git libcurl4-gnutls-dev libfuse-dev libssl-dev libxml2-dev make pkg-config + git clone https://github.com/s3fs-fuse/s3fs-fuse.git + cd s3fs-fuse + ./autogen.sh + ./configure + make + sudo make install + + - name: Mount Space Bucket + run: | + echo "${{secrets.DIGITALOCEAN_SPACES_ACCESS_TOKEN}}:${{secrets.DIGITALOCEAN_SPACES_SECRET_KEY}}" > ./passwd-s3fs + chmod 600 ./passwd-s3fs + mkdir -p ${MOUNT_POINT} + s3fs ${BUCKET} ${MOUNT_POINT} -o url=https://${REGION}.digitaloceanspaces.com -o passwd_file=./passwd-s3fs + df -Th ${MOUNT_POINT} + + - name: Checkout files + uses: actions/checkout@v4 + + - name: Build + run: | + cd ./test + if [[ $BUCKET != "terraform-backend-github" ]]; then \ + ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook playbooks/hauler_build.yml -e dir_build="${MOUNT_POINT}" -e longhorn="true" -e archive="false"; \ + fi + + deploy: + name: Deploy + runs-on: ubuntu-latest + needs: Bucket + timeout-minutes: 20 + + defaults: + run: + shell: bash + working-directory: ./test + + steps: + - name: Checkout files + uses: actions/checkout@v4 + + - name: Setup Terraform + uses: hashicorp/setup-terraform@v3 + with: + terraform_version: "1.7.3" + + - name: Terraform Init + id: init + run: | + cd ./DO/infra + terraform init -backend-config="bucket=${BUCKET}" + + - name: Terraform Validate + id: validate + run: | + cd ./DO/infra + terraform validate -no-color + + - name: Terraform Plan + id: plan + run: | + cd ./DO/infra + terraform plan -out=terraform.tfplan \ + -var "GITHUB_RUN_ID=$GITHUB_RUN_ID" \ + -var "token=${DO_PAT}" \ + -var "worker_count=${WORKER_COUNT}" \ + -var "controller_count=${CONTROLLER_COUNT}" \ + -var "instance_size=${SIZE}" \ + -var "spaces_access_key_id=${{secrets.DIGITALOCEAN_SPACES_ACCESS_TOKEN}}" \ + -var "spaces_access_key_secret=${{secrets.DIGITALOCEAN_SPACES_SECRET_KEY}}" \ + -var "mount_point=${MOUNT_POINT}" \ + -var "terraform_backend_bucket_name=${BUCKET}" + continue-on-error: true + + - name: Terraform Plan Status + if: steps.plan.outcome == 'failure' + run: exit 1 + + - name: Terraform Apply + run: | + cd ./DO/infra + terraform apply terraform.tfplan + + # No relative path allowed + - name: Inventory artifacts + uses: actions/upload-artifact@v4 + with: + name: inventory + path: | + ${{ github.workspace }}/test/inventory/hosts.ini + if-no-files-found: error + + reachable: + name: Reachable + runs-on: ubuntu-latest + needs: deploy + timeout-minutes: 10 + + defaults: + run: + shell: bash + working-directory: ./test + + steps: + - name: Checkout files + uses: actions/checkout@v4 + + - name: Download inventory + uses: actions/download-artifact@v4 + with: + name: inventory + + - name: Check if inventory present + run: | + cat ${{ github.workspace }}/hosts.ini + + - name: Set up Python + id: setup_python + uses: actions/setup-python@v5 + with: + python-version: 3.9 + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip3 install ansible + ansible --version + + - name: Get key and hosts.ini + run: | + echo "$SSH_KEY" > .key + chmod 400 .key + cp ${{ github.workspace }}/hosts.ini inventory/hosts.ini + shell: bash + env: + SSH_KEY: ${{secrets.SSH_PRIVATE_KEY}} + + - name: Test if reachable + run: | + ANSIBLE_HOST_KEY_CHECKING=False ansible RKE2_CLUSTER -m ping -u root + + - name: Wait for cloud-init to finish + run: | + ANSIBLE_HOST_KEY_CHECKING=False ansible RKE2_CLUSTER -m shell -a "cloud-init status --wait" -u root -v + + install: + name: Install + runs-on: ubuntu-latest + needs: [ Reachable, Package ] + timeout-minutes: 60 + + defaults: + run: + shell: bash + working-directory: ./test + + steps: + - name: Checkout files + uses: actions/checkout@v4 + + - name: Install requirements + run: | + cd .. + make prerequis + + - name: Download inventory + uses: actions/download-artifact@v4 + with: + name: inventory + + - name: Get key and hosts.ini + run: | + echo "$SSH_KEY" > .key + chmod 400 .key + cp ${{ github.workspace }}/hosts.ini inventory/hosts.ini + shell: bash + env: + SSH_KEY: ${{secrets.SSH_PRIVATE_KEY}} + + - name: Run playbook hauler_server.yml + run: | + ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook -u root playbooks/hauler_server.yml -e dir_target=${MOUNT_POINT} + + - name: Run playbook install.yml + run: | + ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook -u root playbooks/install.yml + + #- name: Run playbook rancher.yml + # run: | + # ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook -u root --private-key .key playbooks/rancher.yml + + #- name: Run playbook longhorn.yml + # run: | + # ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook -u root --private-key .key playbooks/longhorn.yml + + #- name: Run playbook neuvector.yml + # run: | + # ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook -u root --private-key .key playbooks/neuvector.yml + + test: + name: Test + runs-on: ubuntu-latest + needs: Install + timeout-minutes: 10 + + defaults: + run: + shell: bash + working-directory: ./test + + steps: + - name: Checkout files + uses: actions/checkout@v4 + + - name: Download inventory + uses: actions/download-artifact@v4 + with: + name: inventory + + - name: Get key and hosts.ini + run: | + echo "$SSH_KEY" > .key + chmod 400 .key + cp ${{ github.workspace }}/hosts.ini inventory/hosts.ini + shell: bash + env: + SSH_KEY: ${{secrets.SSH_PRIVATE_KEY}} + + - name: Install dependencies + run: | + python3 -m pip install --upgrade pip + python3 -m pip install -U pytest-testinfra ansible pytest-sugar pytest + ansible --version + + - name: Run Python Tests + run: | + export DEFAULT_PRIVATE_KEY_FILE=.key + python3 -m pytest --hosts=RKE2_CONTROLLERS --ansible-inventory=inventory/hosts.ini --force-ansible --connection=ansible basic_server_tests.py + python3 -m pytest --hosts=RKE2_WORKERS --ansible-inventory=inventory/hosts.ini --force-ansible --connection=ansible basic_agent_tests.py + + delay: + name: Delay + runs-on: ubuntu-latest + needs: Test + if: always() + + steps: + - name: Delay half an hour + uses: whatnick/wait-action@master + with: + time: '1800s' + + cleanup: + name: Cleanup + runs-on: ubuntu-latest + needs: Delay + if: always() + timeout-minutes: 30 + + defaults: + run: + shell: bash + working-directory: ./test/DO/infra + + steps: + - name: Checkout files + uses: actions/checkout@v4 + + - name: Setup Terraform + uses: hashicorp/setup-terraform@v3 + with: + terraform_version: "1.7.3" + + - name: Get key + run: | + echo "$SSH_KEY" > .key + chmod 400 .key + shell: bash + env: + SSH_KEY: ${{secrets.SSH_PRIVATE_KEY}} + + - name: Terraform Init + id: init + run: | + terraform init -backend-config="bucket=${BUCKET}" + continue-on-error: true + + - name: Terraform plan delete stack + id: plan + run: | + terraform plan -destroy -out=terraform.tfplan \ + -var "GITHUB_RUN_ID=$GITHUB_RUN_ID" \ + -var "token=${DO_PAT}" \ + -var "worker_count=${WORKER_COUNT}" \ + -var "controller_count=${CONTROLLER_COUNT}" \ + -var "instance_size=${SIZE}" \ + -var "spaces_access_key_id=${{secrets.DIGITALOCEAN_SPACES_ACCESS_TOKEN}}" \ + -var "spaces_access_key_secret=${{secrets.DIGITALOCEAN_SPACES_SECRET_KEY}}" \ + -var "mount_point=${MOUNT_POINT}" \ + -var "terraform_backend_bucket_name=${BUCKET}" + continue-on-error: true + + - name: Terraform Apply + run: | + terraform apply terraform.tfplan + continue-on-error: true + + - name: Set up S3cmd cli tool + uses: s3-actions/s3cmd@main + with: + provider: digitalocean + region: ${{secrets.DIGITALOCEAN_REGION}} + access_key: ${{secrets.DIGITALOCEAN_SPACES_ACCESS_TOKEN}} + secret_key: ${{secrets.DIGITALOCEAN_SPACES_SECRET_KEY}} + + - name: Remove Space bucket + run: | + sed -i -e 's/signature_v2.*$/signature_v2 = True/' ~/.s3cfg + if [[ $BUCKET != "terraform-backend-github" ]]; then s3cmd rb s3://${BUCKET} --recursive; fi + sleep 10 diff --git a/.github/workflows/stage_online.yml b/.github/workflows/stage_online.yml new file mode 100644 index 000000000..ece93b144 --- /dev/null +++ b/.github/workflows/stage_online.yml @@ -0,0 +1,336 @@ +--- +name: Stage online install + +on: + workflow_dispatch: + +env: + DO_PAT: ${{secrets.DIGITALOCEAN_ACCESS_TOKEN}} + AWS_ACCESS_KEY_ID: ${{secrets.DIGITALOCEAN_SPACES_ACCESS_TOKEN}} + AWS_SECRET_ACCESS_KEY: ${{secrets.DIGITALOCEAN_SPACES_SECRET_KEY}} + REGION: ${{secrets.DIGITALOCEAN_REGION}} + MOUNT_POINT: "/opt/rkub" + BUCKET: "rkub-github-action-${{ github.run_id }}" + #BUCKET: "terraform-backend-github" + CONTROLLER_COUNT: "1" + WORKER_COUNT: "1" + SIZE: "s-2vcpu-4gb" + +jobs: + bucket: + name: Bucket + runs-on: ubuntu-latest + timeout-minutes: 10 + + steps: + - name: Set up S3cmd cli tool + uses: s3-actions/s3cmd@main + with: + provider: digitalocean + region: ${{secrets.DIGITALOCEAN_REGION}} + access_key: ${{secrets.DIGITALOCEAN_SPACES_ACCESS_TOKEN}} + secret_key: ${{secrets.DIGITALOCEAN_SPACES_SECRET_KEY}} + + - name: Create Space Bucket + run: | + sed -i -e 's/signature_v2.*$/signature_v2 = True/' ~/.s3cfg + if [[ $BUCKET != "terraform-backend-github" ]]; then s3cmd mb s3://${BUCKET}; fi + sleep 10 + + deploy: + name: Deploy + runs-on: ubuntu-latest + needs: [ Bucket ] + timeout-minutes: 20 + + defaults: + run: + shell: bash + working-directory: ./test + + steps: + - name: Checkout files + uses: actions/checkout@v4 + + - name: Setup Terraform + uses: hashicorp/setup-terraform@v3 + with: + terraform_version: "1.7.3" + + - name: Terraform Init + id: init + run: | + cd ./DO/infra + terraform init -backend-config="bucket=${BUCKET}" + + - name: Terraform Validate + id: validate + run: | + cd ./DO/infra + terraform validate -no-color + + - name: Terraform Plan + id: plan + run: | + cd ./DO/infra + terraform plan -out=terraform.tfplan \ + -var "GITHUB_RUN_ID=$GITHUB_RUN_ID" \ + -var "token=${DO_PAT}" \ + -var "worker_count=${WORKER_COUNT}" \ + -var "controller_count=${CONTROLLER_COUNT}" \ + -var "instance_size=${SIZE}" \ + -var "spaces_access_key_id=${{secrets.DIGITALOCEAN_SPACES_ACCESS_TOKEN}}" \ + -var "spaces_access_key_secret=${{secrets.DIGITALOCEAN_SPACES_SECRET_KEY}}" \ + -var "mount_point=${MOUNT_POINT}" \ + -var "airgap=false" \ + -var "terraform_backend_bucket_name=${BUCKET}" + continue-on-error: true + + - name: Terraform Plan Status + if: steps.plan.outcome == 'failure' + run: exit 1 + + - name: Terraform Apply + run: | + cd ./DO/infra + terraform apply terraform.tfplan + + - name: Display inventory + run: | + ls -l ${{ github.workspace }}/test/inventory/hosts.ini + cat inventory/hosts.ini + + # No relative path allowed + - name: Inventory artifacts + uses: actions/upload-artifact@v4 + with: + name: inventory + path: | + ${{ github.workspace }}/test/inventory/hosts.ini + if-no-files-found: error + + reachable: + name: Reachable + runs-on: ubuntu-latest + needs: [ Deploy ] + timeout-minutes: 10 + + defaults: + run: + shell: bash + working-directory: ./test + + steps: + - name: Checkout files + uses: actions/checkout@v4 + + - name: Download inventory + uses: actions/download-artifact@v4 + with: + name: inventory + + - name: Check if inventory present + run: | + cat ${{ github.workspace }}/hosts.ini + + - name: Set up Python + id: setup_python + uses: actions/setup-python@v5 + with: + python-version: 3.9 + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip3 install ansible pytest-testinfra + ansible --version + + - name: Get key and hosts.ini + run: | + echo "$SSH_KEY" > .key + chmod 400 .key + cp ${{ github.workspace }}/hosts.ini inventory/hosts.ini + shell: bash + env: + SSH_KEY: ${{secrets.SSH_PRIVATE_KEY}} + + - name: Test if reachable + run: | + ANSIBLE_HOST_KEY_CHECKING=False ansible RKE2_CLUSTER -m ping -u root + + - name: Wait for cloud-init to finish + run: | + ANSIBLE_HOST_KEY_CHECKING=False ansible RKE2_CLUSTER -m shell -a "cloud-init status --wait" -u root -v + + install: + name: Install + runs-on: ubuntu-latest + needs: [ Reachable ] + timeout-minutes: 60 + + defaults: + run: + shell: bash + working-directory: ./test + + steps: + - name: Checkout files + uses: actions/checkout@v4 + + - name: Install requirements + run: | + cd .. + make prerequis + + - name: Download inventory + uses: actions/download-artifact@v4 + with: + name: inventory + + - name: Get key and hosts.ini + run: | + echo "$SSH_KEY" > .key + chmod 400 .key + cp ${{ github.workspace }}/hosts.ini inventory/hosts.ini + shell: bash + env: + SSH_KEY: ${{secrets.SSH_PRIVATE_KEY}} + + - name: Run playbook install.yml + run: | + ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook -u root playbooks/install.yml -e "airgap=false" -e "method=tarball" + + #- name: Run playbook rancher.yml + # run: | + # ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook -u root playbooks/rancher.yml + + #- name: Run playbook longhorn.yml + # run: | + # ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook -u root playbooks/longhorn.yml + + #- name: Run playbook neuvector.yml + # run: | + # ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook -u root playbooks/neuvector.yml + + test: + name: Test + runs-on: ubuntu-latest + needs: [ Install ] + timeout-minutes: 10 + + defaults: + run: + shell: bash + working-directory: ./test + + steps: + - name: Checkout files + uses: actions/checkout@v4 + + - name: Download inventory + uses: actions/download-artifact@v4 + with: + name: inventory + + - name: Get key and hosts.ini + run: | + echo "$SSH_KEY" > .key + chmod 400 .key + cp ${{ github.workspace }}/hosts.ini inventory/hosts.ini + shell: bash + env: + SSH_KEY: ${{secrets.SSH_PRIVATE_KEY}} + + - name: Install dependencies + run: | + python3 -m pip install --upgrade pip + python3 -m pip install -U pytest-testinfra ansible pytest-sugar pytest + ansible --version + + - name: Run Python Tests + run: | + export DEFAULT_PRIVATE_KEY_FILE=.key + python3 -m pytest --hosts=RKE2_CONTROLLERS --ansible-inventory=inventory/hosts.ini --force-ansible --connection=ansible basic_server_tests.py + python3 -m pytest --hosts=RKE2_WORKERS --ansible-inventory=inventory/hosts.ini --force-ansible --connection=ansible basic_agent_tests.py + + delay: + name: Delay + runs-on: ubuntu-latest + needs: [ Test ] + if: always() + + steps: + - name: Delay half an hour + uses: whatnick/wait-action@master + with: + time: '1800s' + + cleanup: + name: Cleanup + runs-on: ubuntu-latest + needs: [ Delay ] + if: always() + timeout-minutes: 30 + + defaults: + run: + shell: bash + working-directory: ./test/DO/infra + + steps: + - name: Checkout files + uses: actions/checkout@v4 + + - name: Setup Terraform + uses: hashicorp/setup-terraform@v3 + with: + terraform_version: "1.7.3" + + - name: Get key + run: | + echo "$SSH_KEY" > .key + chmod 400 .key + shell: bash + env: + SSH_KEY: ${{secrets.SSH_PRIVATE_KEY}} + + - name: Terraform Init + id: init + run: | + terraform init -backend-config="bucket=${BUCKET}" + continue-on-error: true + + - name: Terraform plan delete stack + id: plan + run: | + terraform plan -destroy -out=terraform.tfplan \ + -var "GITHUB_RUN_ID=$GITHUB_RUN_ID" \ + -var "token=${DO_PAT}" \ + -var "worker_count=${WORKER_COUNT}" \ + -var "controller_count=${CONTROLLER_COUNT}" \ + -var "instance_size=${SIZE}" \ + -var "spaces_access_key_id=${{secrets.DIGITALOCEAN_SPACES_ACCESS_TOKEN}}" \ + -var "spaces_access_key_secret=${{secrets.DIGITALOCEAN_SPACES_SECRET_KEY}}" \ + -var "mount_point=${MOUNT_POINT}" \ + -var "airgap=false" \ + -var "terraform_backend_bucket_name=${BUCKET}" + continue-on-error: true + + - name: Terraform Apply + run: | + terraform apply terraform.tfplan + continue-on-error: true + + - name: Set up S3cmd cli tool + uses: s3-actions/s3cmd@main + with: + provider: digitalocean + region: ${{secrets.DIGITALOCEAN_REGION}} + access_key: ${{secrets.DIGITALOCEAN_SPACES_ACCESS_TOKEN}} + secret_key: ${{secrets.DIGITALOCEAN_SPACES_SECRET_KEY}} + + - name: Remove Space bucket + run: | + sed -i -e 's/signature_v2.*$/signature_v2 = True/' ~/.s3cfg + if [[ $BUCKET != "terraform-backend-github" ]]; then s3cmd rb s3://${BUCKET} --recursive; fi + sleep 10 diff --git a/.github/workflows/version.yml b/.github/workflows/version.yml deleted file mode 100644 index 219442b04..000000000 --- a/.github/workflows/version.yml +++ /dev/null @@ -1,22 +0,0 @@ -name: Release on Ansible Galaxy -on: - workflow_dispatch: - push: - branches: ["develop"] - -jobs: - readme: - runs-on: ubuntu-latest - name: Update Versions - - if: github.repository == 'mozebaltyk/rkub' - permissions: - actions: write - checks: write - contents: write - - steps: - - uses: actions/checkout@v4 - - - name: Update Readme and changelog - uses: ./.github/actions/update-readme diff --git a/.gitignore b/.gitignore index 83e8a222e..d6acbf124 100644 --- a/.gitignore +++ b/.gitignore @@ -1,13 +1,30 @@ +# tmp .*.swp .govmomi -.terraform* -terraform.tfstate* -# Workstation +# Workstation .DS_Store .vscode # loaded images **/roles/**/files/images/* images +context # Galaxy artifacts. *.tar.gz -*.zip \ No newline at end of file +*.zip +*.iso +*.log +*.png +*.tgz +# Staging +*kubeconfig.yaml +*terraform* +node_modules +package*.json +*.tfstate +hosts.ini +.key* +Chart.lock +.venv/ +__pycache__ +.pytest_cache +AWS diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 98e49c799..2a8053191 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -3,26 +3,29 @@ # See https://pre-commit.com/hooks.html for more hooks repos: + - repo: https://github.com/pre-commit/pre-commit-hooks + # Latest release from https://github.com/pre-commit/pre-commit-hooks + rev: v4.5.0 + hooks: + - id: check-added-large-files + - id: check-executables-have-shebangs + - id: check-merge-conflict + - id: check-shebang-scripts-are-executable + - id: detect-private-key + - id: end-of-file-fixer + - id: mixed-line-ending + - id: trailing-whitespace + - id: check-yaml + - repo: https://github.com/adrienverge/yamllint.git - rev: v1.17.0 + rev: v1.33.0 hooks: - id: yamllint args: [-c=.yamllint] - repo: https://github.com/ansible-community/ansible-lint.git # Latest release from https://github.com/ansible-community/ansible-lint - rev: v6.8.0 + rev: v6.22.2 hooks: - id: ansible-lint files: \.(yaml|yml)$ - - - repo: https://github.com/pre-commit/pre-commit-hooks - # Latest release from https://github.com/pre-commit/pre-commit-hooks - rev: v4.3.0 - hooks: - - id: end-of-file-fixer - - id: check-yaml - - id: check-added-large-files - - id: check-merge-conflict - - id: detect-private-key - - id: check-symlinks diff --git a/.yamllint b/.yamllint index bc0e4f2b4..759cb289d 100644 --- a/.yamllint +++ b/.yamllint @@ -1,3 +1,4 @@ +--- # Based on ansible-lint config extends: default diff --git a/CHANGELOG.md b/CHANGELOG.md index 2639086eb..12eb9a68c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,44 +1,64 @@ # CHANGELOG.md -## 1.0.2 (2024-01-14) - -Versions: - - rke2 version: 1.26.11 - - cert-manager version: 1.13.3 - - rancher version: 2.8.0 - - longhorn version: 1.5.3 - - neuvector version: 2.6.6 +## 1.0.3 (2024-05-23) + +### Versions + +- rke2 version: 1.27.12 + +- kube-vip: 0.8.0 + +- cert-manager version: 1.14.1 + +- rancher version: 2.8.1 + +- longhorn version: 1.6.0 + +- neuvector version: 2.7.2 -Features ✨ - - Install RKE2 one controler and several workers (currently no HA): - - Add nerdctl. - - Setup an admin on master node (kuberoot). - - Deploy local registry and images loaded inside. - - Setup firewalld rules if needed. - - Make "master_ip" and "domain" parametrable. - - Script to uninstall everything. - - Deploy longhorn with custom datapath. - - Deploy Rancher with custom password. - - Deploy Neuvector. - - Script to containerize in an Execution-Env. +### Features ✨ + - [x] Install RKE2 one controler and several workers (currently no HA): + - [x] Add nerdctl. + - [x] Setup an admin on master node (kuberoot). + - [x] Deploy local registry and images loaded inside. + - [x] Setup firewalld rules if needed. + - [x] Make "master_ip" and "domain" parametrable. + - [x] Deploy longhorn with custom datapath. + - [x] Deploy Rancher with custom password. + - [x] Deploy Neuvector. + - [x] Script to containerize in an Execution-Env. + - [x] Script to uninstall everything + - [x] More install customization and options + - [x] Improve collection to run as true collection + - [x] CI workflows + - [x] Quickstart script + +Use case: + - [x] airgap + - [x] non-airgap + - [x] standalone + - [x] one-master-and-x-workers + - [ ] masters-HA 🚧 + - [ ] update/upgrade 🚧 + - [ ] change-config 🚧 -Fix 🩹 +### Fix 🩹 - Firewalld conditions to apply only when running. - Correct names and tasks order. -Bugfix 🐞 +### Bugfix 🐞 - Correct scripts for prerequisites. -Security 🔒️ +### Security 🔒️ - Branch protect - - Github Workflows to release. + - Github Workflows to release and lint. diff --git a/Makefile b/Makefile index 3820dfc15..a5d205e16 100644 --- a/Makefile +++ b/Makefile @@ -1,10 +1,7 @@ -# Rkub Makefile - -export INVENTORY ?= ./plugins/inventory -export ANSIBLE_USER ?= admin -export EXTRA_VARS := $(shell for n in $$INSTALL_VARS; do echo "-e $$n "; done ) -export OPT ?= -export ANSIBLE_ARGS = -i $(INVENTORY) -u $(ANSIBLE_USER) $(EXTRA_VARS) $(OPT) +# Rkub Makefile +export WORKERS ?= 0 +export MASTERS ?= 1 +export SIZE_MATTERS ?= "s-2vcpu-4gb" export REGISTRY ?= localhost:5000 export EE_IMAGE ?= ee-rkub @@ -18,30 +15,63 @@ export EE_PACKAGE_PATH ?= $$HOME/$(EE_PACKAGE_NAME) prerequis: $(MAKE) -C ./scripts/prerequis all +.PHONY: quickstart +## Create a RKE2 cluster on Digital Ocean +quickstart: + # Checks vars settings + @for v in AWS_ACCESS_KEY_ID AWS_SECRET_ACCESS_KEY DO_PAT; do \ + eval test -n \"\$$$$v\" || { echo "You must set environment variable $$v"; exit 1; } && echo $$v; \ + done + # S3 bucket for Backend + @cd ./test/DO/backend && terraform init + @cd ./test/DO/backend && terraform plan -out=terraform.tfplan \ + -var "token=$(DO_PAT)" \ + -var "spaces_access_key_id=$(AWS_ACCESS_KEY_ID)" \ + -var "spaces_access_key_secret=$(AWS_SECRET_ACCESS_KEY)" + @cd ./test/DO/backend && terraform apply "terraform.tfplan" + # Create infra with Terrafrom + @cd ./test/DO/infra && terraform init + @cd ./test/DO/infra && terraform plan -out=terraform.tfplan \ + -var "token=$(DO_PAT)" \ + -var "worker_count=$(WORKERS)" \ + -var "controller_count=$(MASTERS)" \ + -var "instance_size=$(SIZE_MATTERS)" \ + -var "spaces_access_key_id=$(AWS_ACCESS_KEY_ID)" \ + -var "spaces_access_key_secret=$(AWS_SECRET_ACCESS_KEY)" + @cd ./test/DO/infra && terraform apply "terraform.tfplan" + # Run playbooks + @sleep 10 + @cd ./test && ansible-playbook playbooks/install.yml -e "stable=true" -e "airgap=false" -e "method=rpm" -u root + +.PHONY: quickstart-cleanup +## Remove RKE2 cluster from quickstart on Digital Ocean +quickstart-cleanup: + # Checks vars settings + @for v in AWS_ACCESS_KEY_ID AWS_SECRET_ACCESS_KEY DO_PAT; do \ + eval test -n \"\$$$$v\" || { echo "You must set environment variable $$v"; exit 1; } && echo $$v; \ + done + # Delete infra with Terrafrom + @cd ./test/DO/infra && terraform init + @cd ./test/DO/infra && terraform plan -destroy -out=terraform.tfplan \ + -var "token=$(DO_PAT)" \ + -var "worker_count=$(WORKERS)" \ + -var "controller_count=$(MASTERS)" \ + -var "instance_size=$(SIZE_MATTERS)" \ + -var "spaces_access_key_id=$(AWS_ACCESS_KEY_ID)" \ + -var "spaces_access_key_secret=$(AWS_SECRET_ACCESS_KEY)" + @cd ./test/DO/infra && terraform apply "terraform.tfplan" + # Remove S3 bucket for Backend + @cd ./test/DO/backend && terraform init + @cd ./test/DO/backend && terraform plan -destroy -out=terraform.tfplan \ + -var "token=$(DO_PAT)" \ + -var "spaces_access_key_id=$(AWS_ACCESS_KEY_ID)" \ + -var "spaces_access_key_secret=$(AWS_SECRET_ACCESS_KEY)" + @cd ./test/DO/backend && terraform apply "terraform.tfplan" + .PHONY: build ## Run playbook to build rkub zst package on localhost. build: - ansible-playbook ./playbooks/tasks/build.yml $(ANSIBLE_ARGS) - -.PHONY: upload -## Run playbook to upload rkub zst package. -upload: - ansible-playbook ./playbooks/tasks/upload.yml $(ANSIBLE_ARGS) - -.PHONY: install -## Run playbook to install rkub. -install: - ansible-playbook ./playbooks/tasks/install.yml $(ANSIBLE_ARGS) - -.PHONY: uninstall -## Run playbook to uninstall rkub. -uninstall: - ansible-playbook ./playbooks/tasks/uninstall.yml $(ANSIBLE_ARGS) - - -################## -## EE Container ## -################## + ansible-playbook ./playbooks/build.yml .PHONY: ee-container ## Create an execution-env container with all dependencies inside @@ -103,4 +133,4 @@ show-help: } \ printf "\n"; \ }' \ - | cat + | cat diff --git a/README.md b/README.md index e4a039c18..20ac05b21 100644 --- a/README.md +++ b/README.md @@ -1,181 +1,286 @@

Ansible Collection - Rkub

-Ansible Collection to deploy a rancher RKE2 cluster in airgap mode. +Ansible Collection to deploy and test Rancher stacks (RKE2, Rancher, Longhorn and Neuvector). [![Releases](https://img.shields.io/github/release/MozeBaltyk/rkub)](https://github.com/MozeBaltyk/rkub/releases) [![License: Apache-2.0](https://img.shields.io/badge/License-Apache%202.0-green.svg)](https://opensource.org/licenses/Apache-2.0/) - +[![Stage airgap](https://github.com/MozeBaltyk/Rkub/actions/workflows/stage_airgap.yml/badge.svg)](https://github.com/MozeBaltyk/Rkub/actions/workflows/stage_airgap.yml) +[![Stage online](https://github.com/MozeBaltyk/Rkub/actions/workflows/stage_online.yml/badge.svg)](https://github.com/MozeBaltyk/Rkub/actions/workflows/stage_online.yml) ## Description -Ansible collection met to install in airgap environnement RKE2 (one controler and several workers, currently no HA): +This Ansible collection will install in airgap environnement RKE2 (one controler and several workers, currently no HA): - - [RKE2 1.26.11](https://docs.rke2.io) - Security focused Kubernetes - - [Rancher 2.8.0](https://www.suse.com/products/suse-rancher/) - Multi-Cluster Kubernetes Management - - [Longhorn 1.5.3](https://longhorn.io) - Unified storage layer - - [Neuvector 2.6.6](https://neuvector.com/) - Kubernetes Security Platform +**Current develop - Ansible Collection Rkub 1.0.3 include by default:** + +- [RKE2 1.27.12](https://docs.rke2.io) - Security focused Kubernetes (channel stable) + +- [Kube-vip 0.8.0](https://kube-vip.io/) - Virtual IP and load balancer + +- [Cert-manager 1.14.1](https://cert-manager.io/docs/) - Certificate manager + +- [Rancher 2.8.1](https://www.suse.com/products/suse-rancher/) - Multi-Cluster Kubernetes Management + +- [Longhorn 1.6.0](https://longhorn.io) - Unified storage layer + +- [Neuvector 2.7.2](https://neuvector.com/) - Kubernetes Security Platform + -This Project is mainly inspired from [Clemenko/rke_airgap_install](https://github.com/clemenko/rke_airgap_install/blob/main/air_gap_all_the_things.sh). -I tried it and like the idea but I was frustrated with Shell scripting limitations. So I decided to rewrite it in Ansible. +This Project is mainly inspired from [Clemenko/rke_airgap_install](https://github.com/clemenko/rke_airgap_install/) but Shell scripting brings limitations. So Let's rewrite it in Ansible which comes with below benefices: + +- Idempotency: can be relaunch multiple time. + +- User agnostic: can be launch by any user (with sudo rights). + +- OS agnositc: can be launch on any Linux systems (at least for the package build, for the install part, it depends on your participation 😸) + +Add-on from this Ansible collection: + +- Some flexibility about path with the possibility to build and install on a choosen path. + +- Admin user (by default 'kuberoot') on first controller node with some admin tools (k9s, helm and kubectl). + +- Import kubeconfig on Ansible controller host and add it to kubecm if present (to be able to admin rke2 cluster from localhost). -With Ansible: +- Nerdctl as complement of containerd to handle oci-archive. -* Idempotency: can be relaunch multiple time. +- Uninstall playbook to cleanup (and maybe reinstall if needed). -* User agnostic: can be launch by any user (with sudo rights). +- Ansible Collection Released, so possibilty to get back to older versions. -* OS agnositc: can be launch on any Linux systems (at least for the package build, for the install depend on your participation to this project 😸) +- Quickstart script to triggers an RKE2 cluster in Digital Ocean and delete it once required. -Add-on from my part: -* Some part which were manual in Clemenko procedure are automated with Ansible (like the upload or NFS mount) +## Use Case -* Some flexibility about path (possible to export or mount NFS in choosen place) +Currently only install: -* Arkade to install utilities binaries +- on Rocky8 -* Admin user (by default kuberoot) on first controler node with all necessary tools +- airgap or online install -* Nerdctl (as complement of containerd) +- tarball or rpm method -* Firewalld settings if firewalld running +- Defined versions or versions from Stable channels -* Uninstall playbook to cleanup (and maybe reinstall if needed) +- Canal CNI -* Collection Released, so possibilty to get back to older versions +- Digital Ocean +But the target would be to handle all the usecase below: + +| OS | Versions | Method | CNI | Cloud providers | Cluster Arch | Extra Install | +|--------|-----------------------------|----------------|--------|-----------------|-----------------------|-----------------| +| Rocky8 | Defined in this collection | airgap tarball | Canal | Digital Ocean | Standalone | Kubevip | +| Ubuntu | Stable channels | airgap rpm | | AWS | One Master, x Workers | Longhorn | +| | Custom | online tarball | | Azure | 3 Masters, x Workers | Rancher | +| | | online rpm | | | | Neuvector | ## Prerequisites -* Linux Host as a package builder (can be a VM or your WSL). Count 30G of free space in the build directory of your package builder (17G for download + 7G for the zst package). +- Linux Host as a package builder (can be a VM or your WSL). Count 10G of free space in the build directory of your package builder. + +- An Ansible Controler, can be the same host for ansible and for building package, at your convenience... + +- A minimum of 2 hosts RHEL-like (2 vCPU and 8G of RAM) for the cluster RKE2 with 80G at least on target directory. + +## Quickstart + +As prerequisities, you will need a Digital Ocean accompte and set your `Token` and a `Spaces key` inside API tabs. + +- Clone the main branch of this project to a machine with an internet access: + `git clone -b main https://github.com/MozeBaltyk/Rkub.git` + +- Execute `make prerequis` to install all prerequisites defined in meta directory. -* An Ansible Controler, can be the same host for ansible and for building package, at your convenience... +- Export vars and Execute as below: -* A minimum of 2 hosts RHEL-like for the cluster RKE2 with 80G at least on target directory. +```bash +export DO_PAT="xxxxxxxxxx" +export AWS_ACCESS_KEY_ID="xxxxxxxxxxxx" +export AWS_SECRET_ACCESS_KEY="xxxxxxxxxxx" +export WORKERS=2 + +# Create RKE2 cluster +make quickstart + +# Delete RKE2 cluster +make quickstart-cleanup +``` +## Global Usage -## Getting started +1. Preparation steps for classic ansible controller: -1. Preparation steps: +- Create some SSH keys and deploy it on target hosts. - * Clone this project on local machine which have an internet access. +- Define an ansible.cfg - * Execute `make prerequis` to install all prerequisites defined in meta directory. - - * Complete directory inside `./plugins/inventory/hosts.yml`. +- Define an inventory (example in `./plugins/inventory/hosts.yml`). -NB: `make` alone display options and descriptions. +then use it... -2. Build your package by running (works on Debian-like and Redhat-like): -```sh -ansible-playbook playbooks/tasks/build.yml # All arguments below are not mandatory --e dir_build="$HOME/rkub" # Directory where to upload everything (count 30G) --e package_name="rke2_rancher_longhorn.zst" # Name of the package, by default rke2_rancher_longhorn.zst +2. Build your package by running (works on Debian-like or Redhat-like and targets localhost). +This step concern only an airgap install. If targeted servers have an internet access then skip and go to step 5: + +```sh +ansible-playbook mozebaltyk.rkub.build.yml # All arguments below are not mandatory +-e "dir_build=$HOME/rkub" # Directory where to upload everything (count 10G) +-e "package_name=rkub.zst" # Name of the package, by default rkub.zst +-e "archive=true" # Archive tar.zst true or false (default value "true") +-e "stable=false" # Stable channels or take version as defined in Rkub collection (default value "false") +-e "method=tarball" # Method for install, value possible "tarball" or "rpm" (default value "tarball") +-e "el=9" # RHEL version (take default value from localhost if OS is different from RedHat-like take value "8") +-e "all=false" # Add all components kubevip,longhorn,rancher,neuvector (default value "false") +-e "kubevip=true longhorn=true rancher=true neuvector=true" # Add extras components to package (default value from var 'all') +-u admin -Kk # Other Ansible Arguments (like -vvv) +``` + +3. Push your package to first controler: + +```sh +ansible-playbook mozebaltyk.rkub.upload.yml # All arguments below are not mandatory +-e "package_path=/home/me/rkub.zst" # Will be prompt if not given in the command +-e "dir_target=/opt/rkub" # Directory where to sync and unarchive (by default /opt/rkub, count 30G available) -u admin -Kk # Other Ansible Arguments (like -vvv) ``` -3. Push your package to first controler: +4. Deploy Hauler services: + ```sh -ansible-playbook playbooks/tasks/upload.yml # All arguments below are not mandatory --e package_path=/home/me/rke2_rancher_longhorn.zst # Will be prompt if not given in the command --e dir_target=/opt # Directory where to sync and unarchive (by default /opt, count 50G available) --u admin -Kk # Other Ansible Arguments (like -vvv) +ansible-playbook mozebaltyk.rkub.hauler.yml # All arguments below are not mandatory +-e "dir_target=/opt/rkub" # Directory where to find package untar with previous playbook +-u admin -Kk # Other Ansible Arguments (like -vvv) ``` -4. Start installation: +5. Start installation: + ```sh -ansible-playbook playbooks/tasks/install.yml # All arguments below are not mandatory --e dir_target=/opt # Dir on first master where to find package unarchive by previous task (by default /opt, count 50G available) --e dir_mount=/mnt/rkub # NFS mount point (on first master, it will be a symlink to "dir_target") --e domain="example.com" # By default take the host domain from master server +ansible-playbook mozebaltyk.rkub.install.yml # All arguments below are not mandatory +-e domain="example.com" # By default take the host domain from master server +-e "method=tarball" # Method for install, value possible "tarball" or "rpm" (default value "tarball") +-e "airgap=true" # if servers have internet access then set airgap to false (default value "true") + -e "stable=false" # if airgap false then choose btw Stable channels or version from this collection. (default value "false") -u admin -Kk # Other Ansible Arguments (like -vvv) ``` -5. Deploy Rancher: +6. Deploy Rancher: + ```sh -ansible-playbook playbooks/tasks/rancher.yml # All arguments below are not mandatory --e dir_mount=/mnt/rkub # NFS mount point, by default value is /mnt/rkub --e domain="example.com" # Domain use for ingress, by default take the host domain from master server +ansible-playbook mozebaltyk.rkub.rancher.yml # All arguments below are not mandatory +-e domain="example.com" # Domain use for ingress, by default take the host domain from master server -e password="BootStrapAllTheThings" # Default password is "BootStrapAllTheThings" -u admin -Kk # Other Ansible Arguments (like -vvv) ``` -6. Deploy Longhorn: +7. Deploy Longhorn: + ```sh -ansible-playbook playbooks/tasks/longhorn.yml # All arguments below are not mandatory --e dir_mount=/mnt/rkub # NFS mount point, by default value is /mnt/rkub +ansible-playbook mozebaltyk.rkub.longhorn.yml # All arguments below are not mandatory -e domain="example.com" # Domain use for ingress, by default take the host domain from master server --e datapath="/opt/longhorn" # Longhorn Path for PVC, by default equal "{{ dir_target }}/longhorn". - # The best is to have a dedicated LVM filesystem for this one. +-e datapath="/data/longhorn" # Longhorn Path for PVC (default "/data/longhorn"). + # The best is to have a dedicated LVM filesystem for this one. -u admin -Kk # Other Ansible Arguments (like -vvv) ``` -7. Deploy Neuvector +8. Deploy Neuvector + ```sh -ansible-playbook playbooks/tasks/neuvector.yml # All arguments below are not mandatory --e dir_mount=/mnt/rkub # NFS mount point, by default value is /mnt/rkub +ansible-playbook mozebaltyk.rkub.neuvector.yml # All arguments below are not mandatory -e domain="example.com" # Domain use for ingress, by default take the host domain from master server -u admin -Kk # Other Ansible Arguments (like -vvv) ``` -8. Bonus: +## Container methode + +1. This is a custom script which imitate Execution-Environement: + +- `make ee-container` will load an UBI-8 image and execute inside `make prerequis` -With make command, all playbooks above are in the makefile. `make` alone display options and small descriptions. +- `make ee-exec` Run image with collection and package zst mounted inside. Launch playbook or make command as described above. + +All prerequisites are set in folder `meta` and `meta/execution-environment.yml`. So it's possible to use ansible-builder (though not tested yet). + +## Some details + +I favored the tarball installation since it's the most compact and install rely on a archive tar.zst which stay on all nodes. +The rpm install is much straight forward but match only system with RPM (so mainly Redhat-like) and require a registry. +But the rpm method with the stable channel is used for the quickstart install. + +**build** have for purpose to create a tar.zst with following content using hauler tool: ```bash -# Example with make -make install # All arguments below are not mandatory -ANSIBLE_USER=admin # equal to '-u admin' -"OPT=-e domain=example.com -Kk" # redefine vars or add options to ansible-playbook command +rkub +├── airgap_hauler.yaml # yaml listing all resources +├── hauler # hauler binary +└── store # hauler store made from above yaml and hauler command + ├── blobs + │   └── sha256 + │   ├── 024f2ae6c3625583f0e10ab4d68e4b8947b55d085c88e34c0bd916944ed05add + └── index.json ``` -## Container methode +It will store and build package regarding: -1. This is a custom script which imitate Execution-Environement: +- Chosen install method for rke2 (tarbal or rpm) +- Chosen components (kube-vip, longhorn, rancher, neuvector) +- Chosen channels stable or versions defined in this collection - * `make ee-container` will load an UBI-8 image and execute inside `make prerequis` - - * `make ee-exec` Run image with collection and package zst mounted inside. Launch playbook or make command as described above. +**upload** push the big monster packages (around 7G) and unarchive on first node on chosen targeted path. +**hauler** (by default on first controller but could be on dedicated server) -## Roadmap +- deploy a registry as systemd service and make it available on port 5000 using hauler. +- deploy a fileserver as systemd service and make it available on port 8080 using hauler. -Milestones: +**install** RKE2 (currently only one master) with: -* More install customization and options +- Install rke2 with tarball method by default or rpm method if given in argument. +- An admin user (by default `kuberoot`) on first master with some administation tools like `k9s` `kubectl` or `helm`. +- Nerdctl as complement to containerd and allow oci-archive. +- Firewalld settings if firewalld running. +- Selinux rpm if selinux enabled. +- Fetch and add kubeconfig to ansible controller in directory ./kube (and add to kubecm if present). -* HA masters with kubevip +**deploy** keeping this order, *Rancher*, *Longhorn*, *Neuvector* -* To add bootstrap with ArgoCD +- Those are simple playbooks which deploy with helm charts either in airgap or online mode. +- It use the default ingress from RKE2 *Nginx-ingress* in https (currently Self-sign certificate) +- *Rancher* need *Certmanager*, So it deploy first Certmanager -* Add a option to chooce by url mode or airgap mode +## Roadmap -Improvments: +Milestones: -* Improve collection to run as true collection +* More install customization and options +* HA masters with kubevip + +* Allow several providers (currently only DO) -# Special thanks to 📢 +# Acknowledgements -* Clemenko, for the idea [Clemenko/rke_airgap_install](https://github.com/clemenko/rke_airgap_install/blob/main/air_gap_all_the_things.sh). +## Special thanks to 📢 -* Alex Ellis, for its [Arkade project](https://github.com/alexellis/arkade). I cannot live without anymore. +* Clemenko, for the idea [Clemenko/rke_airgap_install](https://github.com/clemenko/rke_airgap_install/). -## Github sources +## References -[Clemenko/rke_airgap_install](https://github.com/clemenko/rke_airgap_install/blob/main/air_gap_all_the_things.sh) +- [Clemenko/rke_airgap_install](https://github.com/clemenko/rke_airgap_install/) -[rancherfederal/RKE2-ansible](https://github.com/rancherfederal/rke2-ansible) +- [rancherfederal/RKE2-ansible](https://github.com/rancherfederal/rke2-ansible) -[lablabs/ansible-role-rke2](https://github.com/lablabs/ansible-role-rke2) +- [lablabs/ansible-role-rke2](https://github.com/lablabs/ansible-role-rke2) -[rancher/RKE2](https://github.com/rancher/rke2) +- [rancher/RKE2](https://github.com/rancher/rke2) +- [rancher/quickstart](https://github.com/rancher/quickstart) -## Authors -morze.baltyk@proton.me +## Repo Activity +![Alt](https://repobeats.axiom.co/api/embed/2664e49768529526895630ae70e2a366a70de78f.svg "Repobeats analytics image") ## Project status + Still on developement diff --git a/galaxy.yml b/galaxy.yml index eea78bfd9..f7ca14026 100644 --- a/galaxy.yml +++ b/galaxy.yml @@ -2,7 +2,7 @@ ### REQUIRED namespace: mozebaltyk name: rkub -version: 1.0.2 +version: 1.0.3 readme: README.md @@ -14,16 +14,16 @@ description: > Ansible Collection to deploy a rancher RKE2 cluster. license: -- Apache-2.0 + - Apache-2.0 -tags: +tags: - devops - kubernetes - k8s - rancher - rke2 - longhorn - - neuvector + - neuvector - ansible - airgap - collection @@ -39,12 +39,12 @@ issues: https://github.com/MozeBaltyk/rkub/issues build_ignore: - .gitignore - - '.*.swp' - - .govmomi + - '.*.swp' + - .govmomi - '.terraform*' - 'terraform.tfstate*' - .DS_Store - .vscode - '.*.tar.gz' - '*.zip' - + - 'test' diff --git a/meta/ee-bindeps.txt b/meta/ee-bindeps.txt index 3bed7e846..98a5615bb 100644 --- a/meta/ee-bindeps.txt +++ b/meta/ee-bindeps.txt @@ -9,4 +9,12 @@ openssl [platform:dpkg] sshpass [platform:rpm] sshpass [platform:dpkg] rsync [platform:rpm] -rsync [platform:dpkg] \ No newline at end of file +rsync [platform:dpkg] +zstd [platform:rpm] +zstd [platform:dpkg] +jq [platform:rpm] +jq [platform:dpkg] +tar [platform:rpm] +tar [platform:dpkg] +gzip [platform:rpm] +gzip [platform:dpkg] diff --git a/meta/execution-environment.yml b/meta/execution-environment.yml index 65b42944a..7a7ae6148 100644 --- a/meta/execution-environment.yml +++ b/meta/execution-environment.yml @@ -1,17 +1,28 @@ --- -version: 4 +version: 3 dependencies: - python: meta/ee-requirements.txt - galaxy: meta/ee-requirements.yml - system: meta/ee-bindep.txt - # custom - arkade: meta/ee-arkade.txt - images: meta/ee-images.txt + python: ee-requirements.txt + galaxy: ee-requirements.yml + system: ee-bindeps.txt + +# Custom Dependencies +additional_build_steps: + prepend_final: | + RUN pip3 install --upgrade pip setuptools + append_final: + - RUN $(MAKE) -C ./scripts/prerequis arkade + - RUN $(MAKE) -C ./scripts/prerequis images images: base_image: name: registry.redhat.io/ansible-automation-platform-24/ee-minimal-rhel8:latest +options: + package_manager_path: /usr/bin/microdnf + # Check documentation: -# https://docs.ansible.com/automation-controller/latest/html/userguide/ee_reference.html \ No newline at end of file +# https://docs.ansible.com/automation-controller/latest/html/userguide/ee_reference.html + +# Run : +# ansible-builder build --tag test --container-runtime podman -f meta/execution-environment.yml diff --git a/playbooks/build.yml b/playbooks/build.yml new file mode 100644 index 000000000..9410f8b31 --- /dev/null +++ b/playbooks/build.yml @@ -0,0 +1,9 @@ +--- +- name: Build RKE2 Package with Hauler + hosts: localhost + connection: local + gather_facts: false + vars_files: ../vars/main.yml + tags: build + roles: + - {role: mozebaltyk.rkub.build_airgap_hauler, tags: hauler} diff --git a/playbooks/hauler.yml b/playbooks/hauler.yml new file mode 100644 index 000000000..e6b04e927 --- /dev/null +++ b/playbooks/hauler.yml @@ -0,0 +1,8 @@ +--- +- name: Hauler Server + hosts: "{{ hauler_ip | default('RKE2_CONTROLLERS[0]') }}" + gather_facts: false + vars_files: ../vars/main.yml + tags: [ registry, fileserver ] + roles: + - {role: mozebaltyk.rkub.deploy_hauler, tags: hauler} diff --git a/playbooks/tasks/install.yml b/playbooks/install.yml similarity index 70% rename from playbooks/tasks/install.yml rename to playbooks/install.yml index 1026926aa..3e163a149 100644 --- a/playbooks/tasks/install.yml +++ b/playbooks/install.yml @@ -6,7 +6,7 @@ vars_files: ../vars/main.yml tags: controller roles: - - {role: install_rke2_controller, tags: rke2,} + - {role: mozebaltyk.rkub.install_rke2_controller, tags: rke2} - name: Install RKE2 Workers hosts: RKE2_WORKERS @@ -15,4 +15,4 @@ vars_files: ../vars/main.yml tags: worker roles: - - {role: install_rke2_worker, tags: rke2,} \ No newline at end of file + - {role: mozebaltyk.rkub.install_rke2_worker, tags: rke2} diff --git a/playbooks/tasks/longhorn.yml b/playbooks/longhorn.yml similarity index 72% rename from playbooks/tasks/longhorn.yml rename to playbooks/longhorn.yml index 38d356d48..6754bf434 100644 --- a/playbooks/tasks/longhorn.yml +++ b/playbooks/longhorn.yml @@ -1,3 +1,4 @@ +--- - name: Install Longhorn hosts: RKE2_CONTROLLERS:RKE2_WORKERS gather_facts: true @@ -5,4 +6,4 @@ vars_files: ../vars/main.yml tags: [ controller, worker ] roles: - - {role: deploy_longhorn, tags: longhorn,} \ No newline at end of file + - {role: mozebaltyk.rkub.deploy_longhorn, tags: longhorn} diff --git a/playbooks/tasks/neuvector.yml b/playbooks/neuvector.yml similarity index 71% rename from playbooks/tasks/neuvector.yml rename to playbooks/neuvector.yml index 12be6a45e..903235a21 100644 --- a/playbooks/tasks/neuvector.yml +++ b/playbooks/neuvector.yml @@ -1,3 +1,4 @@ +--- - name: Install Neuvector hosts: RKE2_CONTROLLERS:RKE2_WORKERS gather_facts: true @@ -5,4 +6,4 @@ vars_files: ../vars/main.yml tags: [ controller, worker ] roles: - - {role: deploy_neuvector, tags: neuvector,} \ No newline at end of file + - {role: mozebaltyk.rkub.deploy_neuvector, tags: neuvector} diff --git a/playbooks/tasks/rancher.yml b/playbooks/rancher.yml similarity index 72% rename from playbooks/tasks/rancher.yml rename to playbooks/rancher.yml index 58d36b0ae..c9cd649ad 100644 --- a/playbooks/tasks/rancher.yml +++ b/playbooks/rancher.yml @@ -1,3 +1,4 @@ +--- - name: Install Rancher hosts: RKE2_CONTROLLERS:RKE2_WORKERS gather_facts: true @@ -5,4 +6,4 @@ vars_files: ../vars/main.yml tags: [ controller, worker ] roles: - - {role: deploy_rancher, tags: rancher,} \ No newline at end of file + - {role: mozebaltyk.rkub.deploy_rancher, tags: rancher} diff --git a/playbooks/tasks/uninstall.yml b/playbooks/uninstall.yml similarity index 61% rename from playbooks/tasks/uninstall.yml rename to playbooks/uninstall.yml index e00093440..4b09d0218 100644 --- a/playbooks/tasks/uninstall.yml +++ b/playbooks/uninstall.yml @@ -1,8 +1,9 @@ -- name: uninstall RKE2 +--- +- name: Uninstall RKE2 hosts: RKE2_CONTROLLERS:RKE2_WORKERS gather_facts: false become: true vars_files: ../vars/main.yml tags: controler, worker roles: - - {role: uninstall_rkub, tags: uninstall,} \ No newline at end of file + - {role: mozebaltyk.rkub.uninstall_rkub, tags: uninstall} diff --git a/playbooks/upload.yml b/playbooks/upload.yml new file mode 100644 index 000000000..b861e8a9f --- /dev/null +++ b/playbooks/upload.yml @@ -0,0 +1,8 @@ +--- +- name: Dowload Rkub package on first controler + hosts: RKE2_CONTROLLERS[0] + gather_facts: false + vars_files: ../vars/main.yml + tags: controler + roles: + - {role: mozebaltyk.rkub.upload_package_zst, tags: upload} diff --git a/playbooks/vars/main.yml b/playbooks/vars/main.yml index 09623ea00..fe3e262ad 100644 --- a/playbooks/vars/main.yml +++ b/playbooks/vars/main.yml @@ -1,41 +1,71 @@ +--- +# OLD Version products +global_rke2_version: "1.27.12" +global_kubevip_version: "0.7.0" +global_helm_version: "3.14.0" +global_CERT_VERSION: "1.14.1" +global_RANCHER_VERSION: "2.8.1" +global_LONGHORN_VERSION: "1.6.0" +global_NEU_VERSION: "2.7.2" + # Version products -global_rke2_version: "1.26.11" -global_helm_version: "3.11.3" -global_CERT_VERSION: "1.13.3" -global_RANCHER_VERSION: "2.8.0" -global_LONGHORN_VERSION: "1.5.3" -global_NEU_VERSION: "2.6.6" - -# extras RPM -global_rke2_common_repo_version: "v{{ rke2_version }}%2Brke2r1" #.stable.0 -global_rke2_common_rpm_version: "rke2-common-{{ rke2_version }}.rke2r1-0" #.el{{ ansible_distribution_major_version }}.x86_64.rpm -global_rke2_selinux_repo_version: "v0.17.stable.1" -global_rke2_selinux_rpm_version: "rke2-selinux-0.17-1" #.el{{ ansible_distribution_major_version }}.noarch.rpm +global_stable_channel: "{{ stable | default('false') }}" +global_all_wanted: "{{ all | default('false') }}" +global_extras_components: + kubevip: "{{ kubevip | default(global_all_wanted) }}" + rancher: "{{ rancher | default(global_all_wanted) }}" + longhorn: "{{ longhorn | default(global_all_wanted) }}" + neuvector: "{{ neuvector | default(global_all_wanted) }}" + +global_versions: + rke2: "1.27.12" + kubevip: "0.8.0" + cert_manager: "1.14.1" + rancher: "2.8.1" + longhorn: "1.6.0" + neuvector: "2.7.2" + helm: "3.14.0" + nerdctl: "1.7.6" + k9s: "0.32.4" + hauler: "1.0.3" + +global_rhel_version: "{{ el }}" # General global_install_user: "{{ install_user | default('kuberoot') }}" -global_directory_package_build: "{{ dir_build | default('$HOME/rkub') }}" +global_method_install: "{{ method | default('tarball') }}" +global_rpm_install: "{{ global_method_install == 'rpm' }}" +global_tarball_install: "{{ global_method_install == 'tarball' }}" +global_airgap_install: "{{ airgap | default('true') }}" # Local -global_package_name: "{{ package_name | default('rke2_rancher_longhorn.zst') }}" +global_directory_package_build: "{{ dir_build | default('$HOME/rkub') }}" +global_archive_tar_zst_bool: "{{ archive | default('true') | bool }}" +global_package_name: "{{ package_name | default('rkub.zst') }}" global_path_to_package_zst: "{{ global_directory_package_build }}/../{{ global_package_name }}" # Target -global_directory_package_target: "{{ dir_target | default('/opt') }}" -global_directory_mount: "{{ dir_mount | default('/mnt/rkub') }}" +global_directory_package_target: "{{ dir_target | default('/opt/rkub') }}" -# Options RKE2 +# Options General +global_hauler_ip: "{{ hauler_ip | default(hostvars[groups['RKE2_CONTROLLERS'][0]]['ansible_default_ipv4']['address']) }}" global_master_ip: "{{ master_ip | default(hostvars[groups['RKE2_CONTROLLERS'][0]]['ansible_default_ipv4']['address']) }}" -global_domain: "{{ domain | default(hostvars[groups['RKE2_CONTROLLERS'][0]]['ansible_domain']) }}" +global_domain: "{{ domain | default(hostvars[groups['RKE2_CONTROLLERS'][0]]['ansible_default_ipv4']['address'] + '.sslip.io') }}" -# Options - Not used yet -global_data_dir: "/rke2" -global_cluster_cidr: "10.42.0.0/16" #Default Value -global_service_cidr: "10.43.0.0/16" #Default Value +# Options RKE2 +global_rke2_data_dir: "{{ data_dir | default('/var/lib/rancher/rke2') }}" +global_rke2_ha_mode: false +global_rke2_api_ip: "{{ vip | default(global_master_ip) }}" +global_rke2_cluster_cidr: "10.42.0.0/16" #Default Value +global_rke2_service_cidr: "10.43.0.0/16" #Default Value +global_rke2_cni: "canal" +global_rke2_profile_activated: "{{ profile_cis | default('false') }}" +global_rke2_disable: "{{ disable | default(omit) }}" # Valid items to not deploy: rke2-canal, rke2-coredns, rke2-ingress-nginx, rke2-metrics-server -# Longhorn -default_longhorn_datapath: "{{ global_directory_package_target }}/longhorn" +# Longhorn +default_longhorn_datapath: "/data/longhorn" global_longhorn_datapath: "{{ datapath | default(default_longhorn_datapath) }}" +global_longhorn_replica: 2 # Rancher -global_rancher_password: "{{ password | default('BootStrapAllTheThings') }}" \ No newline at end of file +global_rancher_password: "{{ password | default('BootStrapAllTheThings') }}" diff --git a/roles/build_airgap_hauler/README.md b/roles/build_airgap_hauler/README.md new file mode 100644 index 000000000..99e6e69c4 --- /dev/null +++ b/roles/build_airgap_hauler/README.md @@ -0,0 +1,72 @@ +Role Name +========= + +Role to build an airgap package with Hauler. + +Requirements +------------ + +*Example below show that the roles have two flavors and different requirements in functions of what you want* + +if idm set to true: +- Access to a IDM server if you want to create users account. +- Credentials access to connect to IDM + +if idm set to false: +- create local account on Linux servers + +Role Variables +-------------- + +| **VarName** | **Type** | **Content** | **Mandatory** | +|--------------------|----------|---------------------------|:-------------:| +| idm | boolean | true / false | x | +| svc_account | string | Service Account | x | +| svc_account_passwd | string | pwd (can be omited) | | +| svc_group | string | Group | | +| svc_owner | string | Owner of the account | if idm true | +| list_svc_account | list | Users which goes in group | if idm true | +| idm_server | string | Service Account PWD | if idm true | +| idm_pwd | string | sudo group | if idm true | + +**Mandatory** is the minimum variables that need to be set to make the role work +*the variables not mandatory either have a default value defined or can be omited* + +Dependencies +------------ + +Dependencies with some others roles (if there is some). + +Example Playbook +---------------- +Give some example about how to use or implement your Roles + + +```yml +- name: Trigger Role Example in a Playbooks + hosts: RANDOM_GROUP_DEFINED_IN_YOUR_INVENTORY + remote_user: ansible + become: true + + roles: + - { role: 'example', tags: 'example' } +``` + +```yml +# Example for one user +- import_role: + name: "example" + vars: + svc_account: "{{ tomcat_svc_account }}" + svc_group: "{{ tomcat_svc_group }}" +``` + +License +------- + +Apache-2.0 + +Author Information +------------------ + +morze.baltyk@proton.me diff --git a/roles/build_airgap_hauler/defaults/main.yml b/roles/build_airgap_hauler/defaults/main.yml new file mode 100644 index 000000000..efa140f9e --- /dev/null +++ b/roles/build_airgap_hauler/defaults/main.yml @@ -0,0 +1,9 @@ +--- +# defaults file for build_airgap_hauler +hauler_version: "{{ global_versions['hauler'] }}" +rpm_install: "{{ global_rpm_install | bool }}" +tarball_install: "{{ global_tarball_install | bool }}" +directory_package: "{{ global_directory_package_build }}" +tar_zst_name: "{{ global_package_name }}" +path_to_package_zst: "{{ global_path_to_package_zst }}" +archive_wanted: "{{ global_archive_tar_zst_bool }}" diff --git a/roles/install_utils_registry/meta/main.yml b/roles/build_airgap_hauler/meta/main.yml similarity index 94% rename from roles/install_utils_registry/meta/main.yml rename to roles/build_airgap_hauler/meta/main.yml index 59a11d8ba..880eeef3f 100644 --- a/roles/install_utils_registry/meta/main.yml +++ b/roles/build_airgap_hauler/meta/main.yml @@ -2,7 +2,7 @@ galaxy_info: standalone: false # Part of a collection author: morze.baltyk@proton.me - description: Install a minimal localhost docker registry + description: Role to build an airgap package with Hauler. company: Opensource # If the issue tracker for your role is not on github, uncomment the @@ -18,7 +18,7 @@ galaxy_info: # - CC-BY-4.0 license: Apache-2.0 - min_ansible_version: "2.12.0" + min_ansible_version: "2.15.0" # If this a Container Enabled role, provide the minimum Ansible Container version. # min_ansible_container_version: diff --git a/roles/build_airgap_hauler/tasks/hauler.yml b/roles/build_airgap_hauler/tasks/hauler.yml new file mode 100644 index 000000000..73dd9d3fd --- /dev/null +++ b/roles/build_airgap_hauler/tasks/hauler.yml @@ -0,0 +1,63 @@ +--- +- name: Append lists together + ansible.builtin.set_fact: + list_images: "{{ list_images_longhorn | default([]) + list_images_certmanager | default([]) + list_images_rancher_latest | default([]) + list_images_neuvector | default([]) + list_images_kubevip | default([]) }}" + +- name: Append lists for RPM install with custom registry + ansible.builtin.set_fact: + list_images: "{{ list_images | default([]) + list_images_rke2 }}" + when: rpm_install + +- name: Install Hauler if not present + ansible.builtin.shell: + cmd: | + set -o pipefail + curl -sfL https://get.hauler.dev | HAULER_VERSION={{ hauler_version }} bash + executable: /bin/bash + creates: /usr/local/bin/hauler + changed_when: false + +- name: Download and Unarchive hauler from URL + ansible.builtin.unarchive: + src: "https://github.com/rancherfederal/hauler/releases/download/v{{ hauler_version }}/hauler_{{ hauler_version }}_linux_amd64.tar.gz" + dest: "{{ directory_package }}" + remote_src: true + +- name: Clean up files + ansible.builtin.file: + path: "{{ directory_package }}/{{ item }}" + state: absent + loop: + - "LICENSE" + - "README.md" + +- name: Push template + ansible.builtin.template: + src: "{{ item }}" + dest: "{{ directory_package }}/{{ item | basename | regex_replace('.j2$', '') }}" + mode: 0660 + loop: + - "airgap_hauler.yaml.j2" + +# Hauler Store +- name: Display Info + ansible.builtin.debug: + msg: "Start Hauler store - this step may take some times..." + +- name: Hauler store the all things + ansible.builtin.shell: + cmd: | + set -o pipefail + hauler store sync -f {{ directory_package }}/airgap_hauler.yaml + executable: /bin/bash + args: + chdir: "{{ directory_package }}" + changed_when: false + +- name: Compress files using zstd and create an archive + ansible.builtin.command: + "hauler store save -f ../{{ tar_zst_name }}" + args: + chdir: "{{ directory_package }}" + changed_when: false + when: archive_wanted diff --git a/roles/build_airgap_hauler/tasks/kubevip.yml b/roles/build_airgap_hauler/tasks/kubevip.yml new file mode 100644 index 000000000..7c898d905 --- /dev/null +++ b/roles/build_airgap_hauler/tasks/kubevip.yml @@ -0,0 +1,6 @@ +--- +# https://kube-vip.io/docs/installation/daemonset/#generating-a-manifest +- name: Create a list for Kube-VIP + ansible.builtin.set_fact: + list_images_kubevip: + - "ghcr.io/kube-vip/kube-vip:v{{ kubevip_version }}" diff --git a/roles/build_airgap_hauler/tasks/longhorn.yml b/roles/build_airgap_hauler/tasks/longhorn.yml new file mode 100644 index 000000000..0a3cd04fe --- /dev/null +++ b/roles/build_airgap_hauler/tasks/longhorn.yml @@ -0,0 +1,5 @@ +--- +# for i in $(curl -sL https://github.com/longhorn/longhorn/releases/download/$LONGHORN_VERSION/longhorn-images.txt); do echo " - name: "$i >> airgap_hauler.yaml; done +- name: Add longhorn images to list_images variable + ansible.builtin.set_fact: + list_images_longhorn: "{{ lookup('ansible.builtin.url', 'https://raw.githubusercontent.com/longhorn/longhorn/v{{ longhorn_version }}/deploy/longhorn-images.txt', wantlist=True) }}" diff --git a/roles/build_airgap_hauler/tasks/main.yml b/roles/build_airgap_hauler/tasks/main.yml new file mode 100644 index 000000000..7cb1b9b45 --- /dev/null +++ b/roles/build_airgap_hauler/tasks/main.yml @@ -0,0 +1,31 @@ +--- +# tasks file for build_airgap_hauler +- name: First set variables + ansible.builtin.import_role: + name: set_versions + tasks_from: main + +- name: Prerequisites + ansible.builtin.import_tasks: prerequis.yml + +- name: Get list images RKE2 + ansible.builtin.import_tasks: rke2.yml + +- name: Get list images Longhorn + ansible.builtin.import_tasks: longhorn.yml + when: longhorn_wanted + +- name: Get list images Rancher + ansible.builtin.import_tasks: rancher.yml + when: rancher_wanted + +- name: Get list images Neuvector + ansible.builtin.import_tasks: neuvector.yml + when: neuvector_wanted + +- name: Get list images Kube-vip + ansible.builtin.import_tasks: kubevip.yml + when: kubevip_wanted + +- name: Hauler install and store + ansible.builtin.import_tasks: hauler.yml diff --git a/roles/build_airgap_hauler/tasks/neuvector.yml b/roles/build_airgap_hauler/tasks/neuvector.yml new file mode 100644 index 000000000..ef8e28e8c --- /dev/null +++ b/roles/build_airgap_hauler/tasks/neuvector.yml @@ -0,0 +1,20 @@ +--- +# for i in $(helm template neuvector/core --version $NEU_VERSION | awk '$1 ~ /image:/ {print $2}' | sed -e 's/\"//g'); do echo " - name: "$i >> airgap_hauler.yaml; done +- name: Add neuvector chart repo + kubernetes.core.helm_repository: + name: neuvector + repo_url: "https://neuvector.github.io/neuvector-helm/" + force_update: true + +- name: Neuvector List - helm template + ansible.builtin.shell: + cmd: | + set -o pipefail + helm template neuvector/core --version {{ neuvector_version }} | awk '$1 ~ /image:/ {print $2}' | sed s/\"//g + executable: /bin/bash + changed_when: false + register: neuvector_images + +- name: Create a list from output + ansible.builtin.set_fact: + list_images_neuvector: "{{ neuvector_images['stdout'].splitlines() }}" diff --git a/roles/build_airgap_hauler/tasks/prerequis.yml b/roles/build_airgap_hauler/tasks/prerequis.yml new file mode 100644 index 000000000..49318f738 --- /dev/null +++ b/roles/build_airgap_hauler/tasks/prerequis.yml @@ -0,0 +1,50 @@ +--- +# NB: Collection's Prerequisites are defined in ./meta but I put some here to make the playbook works on a target server and not only localhost. +- name: Gather facts + ansible.builtin.setup: + gather_subset: + - "distribution" + - "distribution_major_version" + - "!min" + when: > + ansible_os_family is not defined + +# for RHEL-like +- name: Install zstd and jq + ansible.builtin.dnf: + name: "{{ item }}" + state: present + with_items: + - zstd + - jq + when: + - ansible_os_family == "RedHat" + become: true + +# for Debian-like +- name: Install zstd and jq + ansible.builtin.apt: + name: "{{ item }}" + state: present + with_items: + - zstd + - jq + when: + - ansible_os_family == "Debian" + become: true + +# check command and install if not present +- name: Ensure Helm 3 is present + ansible.builtin.shell: + cmd: | + set -o pipefail + command -v helm || curl -s https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash + executable: /bin/bash + changed_when: false + +# Dir +- name: Ensure package directory exists + ansible.builtin.file: + path: "{{ directory_package }}" + state: directory + recurse: true diff --git a/roles/build_airgap_hauler/tasks/rancher.yml b/roles/build_airgap_hauler/tasks/rancher.yml new file mode 100644 index 000000000..99ecc1142 --- /dev/null +++ b/roles/build_airgap_hauler/tasks/rancher.yml @@ -0,0 +1,55 @@ +--- +# for i in $(helm template jetstack/cert-manager --version $CERT_VERSION | awk '$1 ~ /image:/ {print $2}' | sed 's/\"//g'); do echo " - name: "$i >> airgap_hauler.yaml; done +- name: Add jetstack helm repo + kubernetes.core.helm_repository: + name: jetstack + repo_url: "https://charts.jetstack.io" + force_update: true + +- name: Cert-manager List - helm template + ansible.builtin.shell: + cmd: | + set -o pipefail + helm template jetstack/cert-manager --version {{ cert_manager_version }} | awk '$1 ~ /image:/ {print $2}' | sed s/\"//g + executable: /bin/bash + changed_when: false + register: certmanager_images + +- name: Create a list from output + ansible.builtin.set_fact: + list_images_certmanager: "{{ certmanager_images['stdout'].splitlines() }}" + +# Rancher +- name: Get Rancher images from URL + ansible.builtin.uri: + url: "https://github.com/rancher/rancher/releases/download/v{{ rancher_version }}/rancher-images.txt" + method: GET + return_content: true + register: rancher_images + +- name: Create a list and Exclude images not needed for Rancher + ansible.builtin.set_fact: + list_images_rancher: "{{ rancher_images['content'].splitlines() | select('match', '^(?!.*(neuvector|minio|gke|aks|eks|sriov|harvester|mirrored|longhorn|thanos|tekton|istio|hyper|jenkins|windows)).*$') }}" + +- name: Add back needed images to list_images_rancher variable + ansible.builtin.set_fact: + list_images_rancher: "{{ list_images_rancher + rancher_images['content'].splitlines() | select('match', '^(.*(cluster-api|kubectl)).*$') }}" + +- name: Keep only the latest version of each image + ansible.builtin.set_fact: + latest_images: "{{ latest_images | default({}) | combine({item.split(':')[0]: item.split(':')[1]}) }}" + loop: "{{ list_images_rancher }}" + when: item.split(':')[0] not in latest_images or item.split(':')[1] is ansible.builtin.version(latest_images[item.split(':')[0]], '>') + vars: + latest_images: {} + +- name: Convert dictionary to list of "name:version" strings + ansible.builtin.set_fact: + list_images_rancher_latest: "{{ list_images_rancher_latest | default([]) + [item.key + ':' + item.value] }}" + loop: "{{ latest_images | dict2items }}" + vars: + list_images_rancher_latest: [] + +- name: Kubectl fix + ansible.builtin.set_fact: + list_images_rancher_latest: "{{ list_images_rancher_latest | default([]) + ['rancher/kubectl:v1.20.2'] }}" diff --git a/roles/build_airgap_hauler/tasks/rke2.yml b/roles/build_airgap_hauler/tasks/rke2.yml new file mode 100644 index 000000000..70742db13 --- /dev/null +++ b/roles/build_airgap_hauler/tasks/rke2.yml @@ -0,0 +1,12 @@ +--- +# for i in $(curl -sL https://github.com/rancher/rke2/releases/download/v$RKE_VERSION%2Brke2r1/rke2-images-all.linux-amd64.txt|grep -v "sriov\|cilium\|vsphere"); do echo " - name: "$i >> airgap_hauler.yaml ; done +- name: Get rke2 images from URL + ansible.builtin.uri: + url: "https://github.com/rancher/rke2/releases/download/v{{ rke2_version }}%2Brke2r1/rke2-images-all.linux-amd64.txt" + method: GET + return_content: true + register: rke2_images + +- name: Add rke2 images to list_images variable + ansible.builtin.set_fact: + list_images_rke2: "{{ rke2_images['content'].splitlines() | select('match', '^(?!.*(sriov|cilium|vsphere)).*$') }}" diff --git a/roles/build_airgap_hauler/templates/airgap_hauler.yaml.j2 b/roles/build_airgap_hauler/templates/airgap_hauler.yaml.j2 new file mode 100644 index 000000000..ddc2de512 --- /dev/null +++ b/roles/build_airgap_hauler/templates/airgap_hauler.yaml.j2 @@ -0,0 +1,74 @@ +{% if list_images|length > 0 %} +# images +apiVersion: content.hauler.cattle.io/v1alpha1 +kind: Images +metadata: + name: rancher-images + annotations: + hauler.dev/platform: linux/amd64 +spec: + images: +{% for item in list_images %} + - name: {{ item }} +{% endfor %} +{% endif %} +{% if rancher_wanted or longhorn_wanted or neuvector_wanted %} +# Helm Charts +--- +apiVersion: content.hauler.cattle.io/v1alpha1 +kind: Charts +metadata: + name: rancher-charts +spec: + charts: +{% if rancher_wanted %} + - name: rancher + repoURL: https://releases.rancher.com/server-charts/latest + version: {{ rancher_version }} + - name: cert-manager + repoURL: https://charts.jetstack.io + version: {{ cert_manager_version }} +{% endif %} +{% if longhorn_wanted %} + - name: longhorn + repoURL: https://charts.longhorn.io + version: {{ longhorn_version }} +{% endif %} +{% if neuvector_wanted %} + - name: core + repoURL: https://neuvector.github.io/neuvector-helm/ + version: {{ neuvector_version }} +{% endif %} +{% endif %} +# Files and RPM +--- +apiVersion: content.hauler.cattle.io/v1alpha1 +kind: Files +metadata: + name: rancher-files +spec: + files: + # RPM Common to both method + - path: https://github.com/rancher/rke2-packaging/releases/download/v{{ rke2_version }}%2Brke2r1.stable.0/rke2-common-{{ rke2_version }}.rke2r1-0.el{{ rhel_version }}.x86_64.rpm + - path: https://github.com/rancher/rke2-selinux/releases/download/v0.17.stable.1/rke2-selinux-0.17-1.el{{ rhel_version }}.noarch.rpm + - path: https://rpm.rancher.io/public.key + {% if rpm_install -%} + # RPM method + - path: https://github.com/rancher/rke2-packaging/releases/download/v{{ rke2_version }}%2Brke2r1.stable.0/rke2-agent-{{ rke2_version }}.rke2r1-0.el{{ rhel_version }}.x86_64.rpm + - path: https://github.com/rancher/rke2-packaging/releases/download/v{{ rke2_version }}%2Brke2r1.stable.0/rke2-server-{{ rke2_version }}.rke2r1-0.el{{ rhel_version }}.x86_64.rpm + {% endif -%} + {% if tarball_install -%} + # Tarball method + - path: https://github.com/rancher/rke2/releases/download/v{{ rke2_version }}%2Brke2r1/rke2-images.linux-amd64.tar.zst + - path: https://github.com/rancher/rke2/releases/download/v{{ rke2_version }}%2Brke2r1/rke2.linux-amd64.tar.gz + - path: https://github.com/rancher/rke2/releases/download/v{{ rke2_version }}%2Brke2r1/sha256sum-amd64.txt + - name: install.sh + path: https://get.rke2.io + {% endif -%} + # Addons + - name: helm.tar.gz + path: https://get.helm.sh/helm-v{{ helm_version }}-linux-amd64.tar.gz + - name: nerdctl.tar.gz + path: https://github.com/containerd/nerdctl/releases/download/v{{ nerdctl_version }}/nerdctl-{{ nerdctl_version }}-linux-amd64.tar.gz + - name: k9s.tar.gz + path: https://github.com/derailed/k9s/releases/download/v{{ k9s_version }}/k9s_Linux_amd64.tar.gz diff --git a/roles/deploy_certmanager/tests/inventory b/roles/build_airgap_hauler/tests/inventory similarity index 90% rename from roles/deploy_certmanager/tests/inventory rename to roles/build_airgap_hauler/tests/inventory index 878877b07..2fbb50c4a 100644 --- a/roles/deploy_certmanager/tests/inventory +++ b/roles/build_airgap_hauler/tests/inventory @@ -1,2 +1 @@ localhost - diff --git a/roles/build_airgap_hauler/tests/test.yml b/roles/build_airgap_hauler/tests/test.yml new file mode 100644 index 000000000..05b80154a --- /dev/null +++ b/roles/build_airgap_hauler/tests/test.yml @@ -0,0 +1,6 @@ +--- +- name: Test + hosts: localhost + remote_user: root + roles: + - build_airgap_hauler diff --git a/roles/build_airgap_package/defaults/main.yml b/roles/build_airgap_package/defaults/main.yml index f57d7fca0..31f920f0a 100644 --- a/roles/build_airgap_package/defaults/main.yml +++ b/roles/build_airgap_package/defaults/main.yml @@ -1,6 +1,7 @@ --- # defaults file for build_airgap rke2_version: "{{ global_rke2_version }}" +kubevip_version: "{{ global_kubevip_version }}" helm_version: "{{ global_helm_version }}" CERT_VERSION: "{{ global_CERT_VERSION }}" RANCHER_VERSION: "{{ global_RANCHER_VERSION }}" @@ -9,6 +10,7 @@ NEU_VERSION: "{{ global_NEU_VERSION }}" directory_package: "{{ global_directory_package_build }}" tar_zst_name: "{{ global_package_name }}" path_to_package_zst: "{{ global_path_to_package_zst }}" +archive_wanted: "{{ global_archive_tar_zst_bool }}" list_directory_package: - "{{ directory_package }}/rke2_{{ rke2_version }}" - "{{ directory_package }}/helm" @@ -18,9 +20,10 @@ list_directory_package: - "{{ directory_package }}/images/longhorn" - "{{ directory_package }}/images/registry" - "{{ directory_package }}/images/neuvector" + - "{{ directory_package }}/images/kubevip" # extras RPM rke2_common_repo_version: "{{ global_rke2_common_repo_version }}" rke2_common_rpm_version: "{{ global_rke2_common_rpm_version }}" rke2_selinux_repo_version: "{{ global_rke2_selinux_repo_version }}" -rke2_selinux_rpm_version: "{{ global_rke2_selinux_rpm_version }}" \ No newline at end of file +rke2_selinux_rpm_version: "{{ global_rke2_selinux_rpm_version }}" diff --git a/roles/build_airgap_package/meta/main.yml b/roles/build_airgap_package/meta/main.yml index d8c3e0913..6e839e1c6 100644 --- a/roles/build_airgap_package/meta/main.yml +++ b/roles/build_airgap_package/meta/main.yml @@ -18,7 +18,7 @@ galaxy_info: # - CC-BY-4.0 license: Apache-2.0 - min_ansible_version: "2.12.0" + min_ansible_version: "2.15.0" # If this a Container Enabled role, provide the minimum Ansible Container version. # min_ansible_container_version: diff --git a/roles/build_airgap_package/tasks/compress.yml b/roles/build_airgap_package/tasks/compress.yml index b4d7067c8..a2bbe67ac 100644 --- a/roles/build_airgap_package/tasks/compress.yml +++ b/roles/build_airgap_package/tasks/compress.yml @@ -1,5 +1,8 @@ +--- - name: Compress files using zstd and create an archive ansible.builtin.command: "tar -I zstd -vcf {{ tar_zst_name }} -C {{ directory_package }} ." args: chdir: "{{ directory_package }}/.." + changed_when: false + when: archive_wanted diff --git a/roles/build_airgap_package/tasks/helm.yml b/roles/build_airgap_package/tasks/helm.yml index e08a32682..43560928b 100644 --- a/roles/build_airgap_package/tasks/helm.yml +++ b/roles/build_airgap_package/tasks/helm.yml @@ -1,8 +1,10 @@ -# Download/Install helm on build server +--- +# Download/Install helm on build server - name: Download Helm binary ansible.builtin.get_url: url: "https://get.helm.sh/helm-v{{ helm_version }}-linux-amd64.tar.gz" dest: "{{ directory_package }}/helm/helm-v{{ helm_version }}-linux-amd64.tar.gz" + mode: 0750 - name: Install Helm binary ansible.builtin.shell: | @@ -11,6 +13,7 @@ rm -rf linux-amd64 > /dev/null 2>&1 args: chdir: "{{ directory_package }}/helm/" + changed_when: false # Add charts repository - name: Add jetstack chart repo @@ -37,7 +40,7 @@ repo_url: "https://neuvector.github.io/neuvector-helm/" force_update: true -# Pull charts +# Pull charts - name: Download chart using chart url kubernetes.core.helm_pull: chart_ref: jetstack/cert-manager diff --git a/roles/build_airgap_package/tasks/images.yml b/roles/build_airgap_package/tasks/images.yml index d373f8dd2..9a9ec81b0 100644 --- a/roles/build_airgap_package/tasks/images.yml +++ b/roles/build_airgap_package/tasks/images.yml @@ -1,24 +1,26 @@ -## List images +--- +## List images - name: Rancher List - Download rancher-images.txt ansible.builtin.get_url: url: "https://github.com/rancher/rancher/releases/download/v{{ RANCHER_VERSION }}/rancher-images.txt" dest: "{{ directory_package }}/images/rancher/orig_rancher-images.txt" + mode: "0640" - name: Rancher List - Fix 1 Library tags ansible.builtin.lineinfile: path: "{{ directory_package }}/images/rancher/orig_rancher-images.txt" regexp: '^(.*)busybox(.*)$' line: '\1library\/busybox\2' - firstmatch: yes - backrefs: yes + firstmatch: true + backrefs: true - name: Rancher List - Fix 2 Library tags ansible.builtin.lineinfile: path: "{{ directory_package }}/images/rancher/orig_rancher-images.txt" regexp: '^(.*)registry(.*)$' line: '\1library\/registry\2' - firstmatch: yes - backrefs: yes + firstmatch: true + backrefs: true - name: Rancher List - Remove things that are not needed and overlapped ansible.builtin.lineinfile: @@ -28,57 +30,101 @@ # this one could be probably improved - name: Rancher List - Get latest version - ansible.builtin.shell: | - for i in $(cat {{ directory_package }}/images/rancher/orig_rancher-images.txt|awk -F: '{print $1}'); do - grep -w $i {{ directory_package }}/images/rancher/orig_rancher-images.txt | sort -Vr| head -1 >> {{ directory_package }}/images/rancher/version_unsorted.txt; - done + ansible.builtin.shell: + cmd: | + set -o pipefail + for i in $(cat {{ directory_package }}/images/rancher/orig_rancher-images.txt|awk -F: '{print $1}'); do + grep -w $i {{ directory_package }}/images/rancher/orig_rancher-images.txt | sort -Vr| head -1 >> {{ directory_package }}/images/rancher/version_unsorted.txt; + done + executable: /bin/bash + changed_when: false - name: Rancher List - Final Sort - ansible.builtin.shell: | - cat {{ directory_package }}/images/rancher/version_unsorted.txt | sort -u > {{ directory_package }}/images/rancher/images.txt + ansible.builtin.shell: + cmd: | + set -o pipefail + cat {{ directory_package }}/images/rancher/version_unsorted.txt | sort -u > {{ directory_package }}/images/rancher/images.txt + executable: /bin/bash + changed_when: false - name: Rancher List - Due to version 2.8.0 - ansible.builtin.shell: | - echo "rancher/mirrored-cluster-api-controller:v1.4.4" >> {{ directory_package }}/images/rancher/images.txt - echo "rancher/kubectl:v1.20.2" >> {{ directory_package }}/images/rancher/images.txt + ansible.builtin.shell: + cmd: | + set -o pipefail + echo "rancher/mirrored-cluster-api-controller:v1.4.4" >> {{ directory_package }}/images/rancher/images.txt + echo "rancher/kubectl:v1.20.2" >> {{ directory_package }}/images/rancher/images.txt + executable: /bin/bash + changed_when: false - name: Longhorn List - Download longhorn-images.txt ansible.builtin.get_url: url: "https://raw.githubusercontent.com/longhorn/longhorn/v{{ LONGHORN_VERSION }}/deploy/longhorn-images.txt" dest: "{{ directory_package }}/images/longhorn/images.txt" + mode: "0640" - name: Cert-manager List - helm template - ansible.builtin.shell: | - helm template {{ directory_package }}/helm/cert-manager-v{{ CERT_VERSION }}.tgz | awk '$1 ~ /image:/ {print $2}' | sed s/\"//g > {{ directory_package }}/images/cert/images.txt + ansible.builtin.shell: + cmd: | + set -o pipefail + helm template {{ directory_package }}/helm/cert-manager-v{{ CERT_VERSION }}.tgz | awk '$1 ~ /image:/ {print $2}' | sed s/\"//g > {{ directory_package }}/images/cert/images.txt + executable: /bin/bash + changed_when: false - name: Neuvector List - helm template - ansible.builtin.shell: | - helm template {{ directory_package }}/helm/core-{{ NEU_VERSION }}.tgz | awk '$1 ~ /image:/ {print $2}' | sed -e 's/\"//g' > {{ directory_package }}/images/neuvector/images.txt + ansible.builtin.shell: + cmd: | + set -o pipefail + helm template {{ directory_package }}/helm/core-{{ NEU_VERSION }}.tgz | awk '$1 ~ /image:/ {print $2}' | sed -e 's/\"//g' > {{ directory_package }}/images/neuvector/images.txt + executable: /bin/bash + changed_when: false -## Import images +## Import images - name: Upload images from list ($2) - ansible.builtin.shell: > - for i in $(cat {{ item }}/images.txt); do - if ( ! ls {{ item }}/$(echo $i| awk -F/ '{print $2}'|sed 's/:/_/g').tar > /dev/null); then - skopeo copy docker://$i docker-archive:{{ item }}/$(echo $i| awk -F/ '{print $2}'|sed 's/:/_/g').tar:$(echo $i| awk -F/ '{print $2}'); - fi; - done + ansible.builtin.shell: + cmd: | + set -o pipefail + for i in $(cat {{ item }}/images.txt); do + if ( ! ls {{ item }}/$(echo $i| awk -F/ '{print $2}'|sed 's/:/_/g').tar > /dev/null); then + skopeo copy docker://$i docker-archive:{{ item }}/$(echo $i| awk -F/ '{print $2}'|sed 's/:/_/g').tar:$(echo $i| awk -F/ '{print $2}'); + fi; + done + executable: /bin/bash + changed_when: false loop: - "{{ directory_package }}/images/rancher" - "{{ directory_package }}/images/longhorn" - name: Upload images from list ($3) - ansible.builtin.shell: > - for i in $(cat {{ item }}/images.txt); do - if ( ! ls {{ item }}/$(echo $i| awk -F/ '{print $3}'|sed 's/:/_/g').tar > /dev/null); then - skopeo copy docker://$i docker-archive:{{ item }}/$(echo $i| awk -F/ '{print $3}'|sed 's/:/_/g').tar:$(echo $i| awk -F/ '{print $3}'); - fi; - done + ansible.builtin.shell: + cmd: | + set -o pipefail + for i in $(cat {{ item }}/images.txt); do + if ( ! ls {{ item }}/$(echo $i| awk -F/ '{print $3}'|sed 's/:/_/g').tar > /dev/null); then + skopeo copy docker://$i docker-archive:{{ item }}/$(echo $i| awk -F/ '{print $3}'|sed 's/:/_/g').tar:$(echo $i| awk -F/ '{print $3}'); + fi; + done + executable: /bin/bash + changed_when: false loop: - "{{ directory_package }}/images/cert" - "{{ directory_package }}/images/neuvector" - name: Upload registry image - ansible.builtin.get_url: - url: "https://github.com/clemenko/rke_airgap_install/raw/main/registry.tar" - dest: "{{ directory_package }}/images/registry/registry.tar" + ansible.builtin.shell: + cmd: | + set -o pipefail + if ( ! ls {{ directory_package }}/images/registry/registry.tar > /dev/null); then + skopeo copy --additional-tag registry:latest docker://registry:latest docker-archive:{{ directory_package }}/images/registry/registry.tar + fi; + executable: /bin/bash + changed_when: false + +- name: Upload kubevip image + ansible.builtin.shell: + cmd: | + set -o pipefail + if ( ! ls {{ directory_package }}/images/kubevip/kubevip_v{{ kubevip_version }}.tar > /dev/null); then + skopeo copy docker://ghcr.io/kube-vip/kube-vip:v{{ kubevip_version }} docker-archive:{{ directory_package }}/images/kubevip/kubevip_v{{ kubevip_version }}.tar + fi; + executable: /bin/bash + changed_when: false diff --git a/roles/build_airgap_package/tasks/main.yml b/roles/build_airgap_package/tasks/main.yml index a3148e0b1..4ad006a3b 100644 --- a/roles/build_airgap_package/tasks/main.yml +++ b/roles/build_airgap_package/tasks/main.yml @@ -1,28 +1,28 @@ --- # tasks file for build_airgap_package - name: Gather facts - setup: + ansible.builtin.setup: gather_subset: - "distribution" - "distribution_major_version" - "!min" when: > ansible_os_family is not defined - + - name: Prerequisites - import_tasks: prerequis.yml + ansible.builtin.import_tasks: prerequis.yml - name: Import utils binaries - import_tasks: utils.yml + ansible.builtin.import_tasks: utils.yml - name: Import RKE2 - import_tasks: rke2.yml + ansible.builtin.import_tasks: rke2.yml - name: Import helm charts - import_tasks: helm.yml + ansible.builtin.import_tasks: helm.yml - name: Import images - import_tasks: images.yml + ansible.builtin.import_tasks: images.yml -- name: Compress the all thing - import_tasks: compress.yml \ No newline at end of file +- name: Compress the all thing + ansible.builtin.import_tasks: compress.yml \ No newline at end of file diff --git a/roles/build_airgap_package/tasks/prerequis.yml b/roles/build_airgap_package/tasks/prerequis.yml index a515aa64d..14a89cef6 100644 --- a/roles/build_airgap_package/tasks/prerequis.yml +++ b/roles/build_airgap_package/tasks/prerequis.yml @@ -1,3 +1,4 @@ +--- # for RHEL-like - name: Install zstd and skopeo ansible.builtin.dnf: @@ -8,6 +9,7 @@ - skopeo when: - ansible_os_family == "RedHat" + become: true # for Debian-like - name: Install zstd and skopeo @@ -19,6 +21,7 @@ - skopeo when: - ansible_os_family == "Debian" + become: true - name: Create package directories ansible.builtin.file: diff --git a/roles/build_airgap_package/tasks/rke2.yml b/roles/build_airgap_package/tasks/rke2.yml index af1499918..54c27a42b 100644 --- a/roles/build_airgap_package/tasks/rke2.yml +++ b/roles/build_airgap_package/tasks/rke2.yml @@ -1,53 +1,84 @@ - +--- - name: Download rke2-images.linux-amd64.tar.zst - get_url: + ansible.builtin.get_url: url: "https://github.com/rancher/rke2/releases/download/{{ rke2_common_repo_version }}/rke2-images.linux-amd64.tar.zst" dest: "{{ directory_package }}/rke2_{{ rke2_version }}/rke2-images.linux-amd64.tar.zst" + mode: "0750" - name: Download rke2.linux-amd64.tar.gz - get_url: + ansible.builtin.get_url: url: "https://github.com/rancher/rke2/releases/download/{{ rke2_common_repo_version }}/rke2.linux-amd64.tar.gz" dest: "{{ directory_package }}/rke2_{{ rke2_version }}/rke2.linux-amd64.tar.gz" + mode: "0750" - name: Download sha256sum-amd64.txt - get_url: + ansible.builtin.get_url: url: "https://github.com/rancher/rke2/releases/download/{{ rke2_common_repo_version }}/sha256sum-amd64.txt" dest: "{{ directory_package }}/rke2_{{ rke2_version }}/sha256sum-amd64.txt" + mode: "0750" - name: Download repo GPG key - get_url: + ansible.builtin.get_url: url: "https://rpm.rancher.io/public.key" dest: "{{ directory_package }}/rke2_{{ rke2_version }}/public.key" + mode: "0750" - name: Download get.rke2.io - get_url: + ansible.builtin.get_url: url: "https://get.rke2.io" dest: "{{ directory_package }}/rke2_{{ rke2_version }}/install.sh" + mode: "0750" # if localhost is RHEL-like take packages for the current major version -- block: +- name: Download if localhost RHEL-like and take same version than localhost + when: + - ansible_os_family == "RedHat" + block: - name: Download rke2-common RPM - get_url: + ansible.builtin.get_url: url: "https://github.com/rancher/rke2-packaging/releases/download/{{ rke2_common_repo_version }}.stable.0/{{ rke2_common_rpm_version }}.el{{ ansible_distribution_major_version }}.x86_64.rpm" dest: "{{ directory_package }}/rke2_{{ rke2_version }}/{{ rke2_common_rpm_version }}.el{{ ansible_distribution_major_version }}.x86_64.rpm" + mode: "0750" - name: Download rke2-selinux RPM - get_url: + ansible.builtin.get_url: url: "https://github.com/rancher/rke2-selinux/releases/download/{{ rke2_selinux_repo_version }}/{{ rke2_selinux_rpm_version }}.el{{ ansible_distribution_major_version }}.noarch.rpm" dest: "{{ directory_package }}/rke2_{{ rke2_version }}/{{ rke2_selinux_rpm_version }}.el{{ ansible_distribution_major_version }}.noarch.rpm" - when: - - ansible_os_family == "RedHat" + mode: "0750" + + - name: Download rke2 RPM + ansible.builtin.get_url: + url: "https://github.com/rancher/rke2-packaging/releases/download/v{{ rke2_version }}%2Brke2r1.stable.0/{{ item }}" + dest: "{{ directory_package }}/rke2_{{ rke2_version }}/{{ item }}" + mode: "0750" + loop: + - rke2-common-{{ rke2_version }}.rke2r1-0.el{{ ansible_distribution_major_version }}.x86_64.rpm + - rke2-agent-{{ rke2_version }}.rke2r1-0.el{{ ansible_distribution_major_version }}.x86_64.rpm + - rke2-server-{{ rke2_version }}.rke2r1-0.el{{ ansible_distribution_major_version }}.x86_64.rpm # if localhost is not a RHEL-like take el8 -- block: +- name: Download if localhost not a RHEL-like and take by default el8 + when: + - ansible_os_family != "RedHat" + block: - name: Download rke2-common RPM - get_url: + ansible.builtin.get_url: url: "https://github.com/rancher/rke2-packaging/releases/download/{{ rke2_common_repo_version }}.stable.0/{{ rke2_common_rpm_version }}.el8.x86_64.rpm" dest: "{{ directory_package }}/rke2_{{ rke2_version }}/{{ rke2_common_rpm_version }}.el8.x86_64.rpm" + mode: "0750" - name: Download rke2-selinux RPM - get_url: + ansible.builtin.get_url: url: "https://github.com/rancher/rke2-selinux/releases/download/{{ rke2_selinux_repo_version }}/{{ rke2_selinux_rpm_version }}.el8.noarch.rpm" dest: "{{ directory_package }}/rke2_{{ rke2_version }}/{{ rke2_selinux_rpm_version }}.el8.noarch.rpm" - when: - - ansible_os_family != "RedHat" \ No newline at end of file + mode: "0750" + + - name: Download rke2 RPM + ansible.builtin.get_url: + url: "https://github.com/rancher/rke2-packaging/releases/download/v{{ rke2_version }}%2Brke2r1.stable.0/{{ item }}" + dest: "{{ directory_package }}/rke2_{{ rke2_version }}/{{ item }}" + mode: "0750" + loop: + - rke2-common-{{ rke2_version }}.rke2r1-0.el8.x86_64.rpm + - rke2-agent-{{ rke2_version }}.rke2r1-0.el8.x86_64.rpm + - rke2-server-{{ rke2_version }}.rke2r1-0.el8.x86_64.rpm diff --git a/roles/build_airgap_package/tasks/utils.yml b/roles/build_airgap_package/tasks/utils.yml index bccae399b..1d050cc68 100644 --- a/roles/build_airgap_package/tasks/utils.yml +++ b/roles/build_airgap_package/tasks/utils.yml @@ -1,3 +1,4 @@ +--- - name: Get Latest version of arkade ansible.builtin.get_url: url: "{{ lookup('url', 'https://api.github.com/repos/alexellis/arkade/releases/latest', split_lines=false) | regex_search('browser_download_url.*(https://github.com/alexellis/arkade/releases/download(.*?)/arkade)', '\\1') | first }}" @@ -7,7 +8,8 @@ - name: Arkade get packages ansible.builtin.shell: "{{ directory_package }}/utils/arkade get --progress=false --path {{ directory_package }}/utils/ {{ item }} > /dev/null" - loop: + changed_when: false + loop: - yq - jq - helm diff --git a/roles/build_airgap_package/tests/test.yml b/roles/build_airgap_package/tests/test.yml index 38b2e13ba..12d9eee0a 100644 --- a/roles/build_airgap_package/tests/test.yml +++ b/roles/build_airgap_package/tests/test.yml @@ -1,5 +1,6 @@ --- -- hosts: localhost +- name: Test + hosts: localhost remote_user: root roles: - - build_airgap + - build_airgap_package diff --git a/roles/deploy_certmanager/defaults/main.yml b/roles/deploy_certmanager/defaults/main.yml deleted file mode 100644 index 16a2b98bb..000000000 --- a/roles/deploy_certmanager/defaults/main.yml +++ /dev/null @@ -1,9 +0,0 @@ ---- -# defaults file for deploy_certmanager -cert_version: "v{{ global_CERT_VERSION }}" -cert_charts: "cert-manager-{{ cert_version }}.tgz" - -# General -admin_user: "{{ global_install_user }}" -mount_path: "{{ global_directory_mount }}" -mount_helm_path: "{{ mount_path }}/helm" \ No newline at end of file diff --git a/roles/deploy_certmanager/handlers/main.yml b/roles/deploy_certmanager/handlers/main.yml deleted file mode 100644 index 8664e076c..000000000 --- a/roles/deploy_certmanager/handlers/main.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -# handlers file for deploy_cert diff --git a/roles/deploy_certmanager/tasks/deploy.yml b/roles/deploy_certmanager/tasks/deploy.yml deleted file mode 100644 index 14b557b10..000000000 --- a/roles/deploy_certmanager/tasks/deploy.yml +++ /dev/null @@ -1,29 +0,0 @@ -- name: "Deploy Certmanager" - run_once: true - become: true - become_user: "{{ admin_user }}" - become_method: sudo - become_flags: "-i" - block: - - - name: Deploy helm charts - kubernetes.core.helm: - atomic: true - name: "cert-manager" - chart_ref: "{{ mount_helm_path }}/{{ cert_charts }}" - release_namespace: "cert-manager" - create_namespace: true - values: - installCRDs: true - image: - repository: localhost:5000/cert/cert-manager-controller - webhook: - image: - repository: localhost:5000/cert/cert-manager-webhook - cainjector: - image: - repository: localhost:5000/cert/cert-manager-cainjector - startupapicheck: - image: - repository: localhost:5000/cert/cert-manager-ctl - kubeconfig: "/home/{{ admin_user }}/.kube/{{ inventory_hostname }}.yaml" diff --git a/roles/deploy_certmanager/tasks/main.yml b/roles/deploy_certmanager/tasks/main.yml deleted file mode 100644 index 04640d714..000000000 --- a/roles/deploy_certmanager/tasks/main.yml +++ /dev/null @@ -1,6 +0,0 @@ ---- -# tasks file for deploy_cert - -- name: Deploy with the Helm Charts on master - import_tasks: deploy.yml - when: inventory_hostname in groups['RKE2_CONTROLLERS'][0] \ No newline at end of file diff --git a/roles/deploy_certmanager/tests/test.yml b/roles/deploy_certmanager/tests/test.yml deleted file mode 100644 index d377fd3e2..000000000 --- a/roles/deploy_certmanager/tests/test.yml +++ /dev/null @@ -1,5 +0,0 @@ ---- -- hosts: localhost - remote_user: root - roles: - - deploy_cert diff --git a/roles/deploy_certmanager/vars/main.yml b/roles/deploy_certmanager/vars/main.yml deleted file mode 100644 index 46e8b38e6..000000000 --- a/roles/deploy_certmanager/vars/main.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -# vars file for deploy_cert diff --git a/roles/deploy_hauler/README.md b/roles/deploy_hauler/README.md new file mode 100644 index 000000000..66f5ac399 --- /dev/null +++ b/roles/deploy_hauler/README.md @@ -0,0 +1,72 @@ +Role Name +========= + +Role to deploy hauler on first controller. + +Requirements +------------ + +*Example below show that the roles have two flavors and different requirements in functions of what you want* + +if idm set to true: +- Access to a IDM server if you want to create users account. +- Credentials access to connect to IDM + +if idm set to false: +- create local account on Linux servers + +Role Variables +-------------- + +| **VarName** | **Type** | **Content** | **Mandatory** | +|--------------------|----------|---------------------------|:-------------:| +| idm | boolean | true / false | x | +| svc_account | string | Service Account | x | +| svc_account_passwd | string | pwd (can be omited) | | +| svc_group | string | Group | | +| svc_owner | string | Owner of the account | if idm true | +| list_svc_account | list | Users which goes in group | if idm true | +| idm_server | string | Service Account PWD | if idm true | +| idm_pwd | string | sudo group | if idm true | + +**Mandatory** is the minimum variables that need to be set to make the role work +*the variables not mandatory either have a default value defined or can be omited* + +Dependencies +------------ + +Dependencies with some others roles (if there is some). + +Example Playbook +---------------- +Give some example about how to use or implement your Roles + + +```yml +- name: Trigger Role Example in a Playbooks + hosts: RANDOM_GROUP_DEFINED_IN_YOUR_INVENTORY + remote_user: ansible + become: true + + roles: + - { role: 'example', tags: 'example' } +``` + +```yml +# Example for one user +- import_role: + name: "example" + vars: + svc_account: "{{ tomcat_svc_account }}" + svc_group: "{{ tomcat_svc_group }}" +``` + +License +------- + +Apache-2.0 + +Author Information +------------------ + +morze.baltyk@proton.me diff --git a/roles/deploy_hauler/defaults/main.yml b/roles/deploy_hauler/defaults/main.yml new file mode 100644 index 000000000..a0fecf6ce --- /dev/null +++ b/roles/deploy_hauler/defaults/main.yml @@ -0,0 +1,13 @@ +--- +# defaults file for deploy_hauler +deploy_hauler_directory: "{{ global_directory_package_target }}" +deploy_hauler_server: "{{ global_hauler_ip }}" + +# Firewall rules +hauler_firewalld_rules: + inbound: + - name: hauler + zone: public + ports: + - {port: 5000, protocol: tcp} + - {port: 8080, protocol: tcp} diff --git a/roles/deploy_hauler/handlers/main.yml b/roles/deploy_hauler/handlers/main.yml new file mode 100644 index 000000000..bad663f62 --- /dev/null +++ b/roles/deploy_hauler/handlers/main.yml @@ -0,0 +1,5 @@ +--- +# handlers file for deploy_hauler +- name: Systemd_reload + ansible.builtin.systemd: + daemon_reload: true diff --git a/roles/set_nfs_mount/meta/main.yml b/roles/deploy_hauler/meta/main.yml similarity index 94% rename from roles/set_nfs_mount/meta/main.yml rename to roles/deploy_hauler/meta/main.yml index ca0b7087d..044dd105f 100644 --- a/roles/set_nfs_mount/meta/main.yml +++ b/roles/deploy_hauler/meta/main.yml @@ -2,7 +2,7 @@ galaxy_info: standalone: false # Part of a collection author: morze.baltyk@proton.me - description: mount share nfs on workers to share images + description: Role to deploy hauler on first controller. company: Opensource # If the issue tracker for your role is not on github, uncomment the @@ -18,7 +18,7 @@ galaxy_info: # - CC-BY-4.0 license: Apache-2.0 - min_ansible_version: "2.12.0" + min_ansible_version: "2.15.0" # If this a Container Enabled role, provide the minimum Ansible Container version. # min_ansible_container_version: diff --git a/roles/deploy_hauler/tasks/main.yml b/roles/deploy_hauler/tasks/main.yml new file mode 100644 index 000000000..5b4e1a24e --- /dev/null +++ b/roles/deploy_hauler/tasks/main.yml @@ -0,0 +1,37 @@ +--- +# tasks file for deploy_hauler +- name: Gather facts + ansible.builtin.setup: + gather_subset: + - "distribution" + - "distribution_major_version" + - "default_ipv4" + - "!all,!min" + when: > + ansible_os_family is not defined + tags: [always] + +- name: Populate service facts + ansible.builtin.service_facts: {} + tags: [always] + +- name: Hauler firewalld + vars: + firewalld_rules: "{{ hauler_firewalld_rules }}" + ansible.builtin.import_role: + name: set_firewalld + tasks_from: main + when: + - ansible_facts['services']['firewalld.service'] is defined + - ansible_facts['services']['firewalld.service']['state'] == "running" + tags: firewalld + +- name: Hauler Install and settings for RHEL-like OS + ansible.builtin.import_tasks: rhel.yml + when: ansible_os_family == "RedHat" + +- name: Send fail message if not a Redhat-like OS + ansible.builtin.fail: + msg: > + "deploy_hauler role currently apply only on Redhat-like OS" + when: ansible_os_family != "RedHat" diff --git a/roles/deploy_hauler/tasks/rhel.yml b/roles/deploy_hauler/tasks/rhel.yml new file mode 100644 index 000000000..adb1bc390 --- /dev/null +++ b/roles/deploy_hauler/tasks/rhel.yml @@ -0,0 +1,70 @@ +--- +# as root +- name: Install and create repo for RPM type + become: true + block: + - name: Copy the hauler binary to /usr/local/bin + ansible.builtin.copy: + src: "{{ deploy_hauler_directory }}/hauler" + dest: /usr/local/bin/hauler + mode: '0755' + remote_src: true + + - name: Push template hauler service + ansible.builtin.template: + src: "{{ item }}" + dest: "/etc/systemd/system/hauler@.service" + mode: 0660 + loop: + - "hauler.service.j2" + notify: Systemd_reload + + - name: Enable and start the hauler services + ansible.builtin.systemd: + name: "{{ item }}" + state: "started" + enabled: true + loop: + - "hauler@fileserver" + - "hauler@registry" + + - name: Pause for 5 seconds + ansible.builtin.pause: + seconds: 5 + + - name: Wait until 'hauler store info' command succeeds + ansible.builtin.shell: + cmd: "hauler store info > /dev/null 2>&1" + executable: /bin/bash + args: + chdir: "{{ deploy_hauler_directory }}" + changed_when: false + register: hauler_store_info + until: hauler_store_info.rc == 0 + retries: 30 + delay: 10 + + - name: Install createrepo + ansible.builtin.dnf: + name: "{{ item }}" + state: present + with_items: + - createrepo + + - name: Save hauler index to fileserver + ansible.builtin.copy: + dest: "{{ deploy_hauler_directory }}/fileserver/_hauler_index.txt" + content: "{{ hauler_store_info['stdout'] }}" + + - name: Save hauler repo to fileserver + ansible.builtin.template: + src: hauler.repo.j2 + dest: "{{ deploy_hauler_directory }}/fileserver/hauler.repo" + + - name: Createrepo on fileserver + ansible.builtin.shell: + cmd: | + set -o pipefail + createrepo {{ deploy_hauler_directory }}/fileserver + executable: /bin/bash + changed_when: false diff --git a/roles/deploy_hauler/templates/hauler.repo.j2 b/roles/deploy_hauler/templates/hauler.repo.j2 new file mode 100644 index 000000000..cb17d38f5 --- /dev/null +++ b/roles/deploy_hauler/templates/hauler.repo.j2 @@ -0,0 +1,5 @@ +name=Hauler Air Gap Server +baseurl=http://{{ deploy_hauler_server }}:8080 +enabled=1 +gpgcheck=1 +gpgkey=http://{{ deploy_hauler_server }}:8080/public.key diff --git a/roles/deploy_hauler/templates/hauler.service.j2 b/roles/deploy_hauler/templates/hauler.service.j2 new file mode 100644 index 000000000..18ec684eb --- /dev/null +++ b/roles/deploy_hauler/templates/hauler.service.j2 @@ -0,0 +1,11 @@ +# /etc/systemd/system/hauler.service +[Unit] +Description=Hauler Serve %I Service + +[Service] +Environment="HOME={{ deploy_hauler_directory }}" +ExecStart=/usr/local/bin/hauler store serve %i +WorkingDirectory={{ deploy_hauler_directory }} + +[Install] +WantedBy=multi-user.target diff --git a/roles/install_utils_registry/tests/inventory b/roles/deploy_hauler/tests/inventory similarity index 90% rename from roles/install_utils_registry/tests/inventory rename to roles/deploy_hauler/tests/inventory index 878877b07..2fbb50c4a 100644 --- a/roles/install_utils_registry/tests/inventory +++ b/roles/deploy_hauler/tests/inventory @@ -1,2 +1 @@ localhost - diff --git a/roles/deploy_hauler/tests/test.yml b/roles/deploy_hauler/tests/test.yml new file mode 100644 index 000000000..137b354f8 --- /dev/null +++ b/roles/deploy_hauler/tests/test.yml @@ -0,0 +1,6 @@ +--- +- name: Test + hosts: localhost + remote_user: root + roles: + - deploy_hauler diff --git a/roles/deploy_longhorn/defaults/main.yml b/roles/deploy_longhorn/defaults/main.yml index 3952b0db0..157c10fc8 100644 --- a/roles/deploy_longhorn/defaults/main.yml +++ b/roles/deploy_longhorn/defaults/main.yml @@ -1,14 +1,12 @@ --- # defaults file for deploy_longhorn -longhorn_version: "{{ global_LONGHORN_VERSION }}" -longhorn_charts: "longhorn-{{ longhorn_version }}.tgz" longhorn_servicename: "longhorn" longhorn_domain: "{{ global_domain }}" longhorn_url: "{{ longhorn_servicename }}.{{ longhorn_domain }}" longhorn_datapath: "{{ global_longhorn_datapath }}" +longhorn_replica: "{{ global_longhorn_replica }}" # General admin_user: "{{ global_install_user }}" master: "{{ global_master_ip }}" -mount_path: "{{ global_directory_mount }}" -mount_helm_path: "{{ mount_path }}/helm" \ No newline at end of file +airgap_install: "{{ global_airgap_install | bool }}" diff --git a/roles/deploy_longhorn/handlers/main.yml b/roles/deploy_longhorn/handlers/main.yml deleted file mode 100644 index 963115ec7..000000000 --- a/roles/deploy_longhorn/handlers/main.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -# handlers file for deploy_longhorn diff --git a/roles/deploy_longhorn/meta/main.yml b/roles/deploy_longhorn/meta/main.yml index 1907b01eb..8b5b58f3a 100644 --- a/roles/deploy_longhorn/meta/main.yml +++ b/roles/deploy_longhorn/meta/main.yml @@ -18,7 +18,7 @@ galaxy_info: # - CC-BY-4.0 license: Apache-2.0 - min_ansible_version: "2.12.0" + min_ansible_version: "2.15.0" # If this a Container Enabled role, provide the minimum Ansible Container version. # min_ansible_container_version: diff --git a/roles/deploy_longhorn/tasks/deploy.yml b/roles/deploy_longhorn/tasks/deploy.yml index 22fd372b2..bd142c576 100644 --- a/roles/deploy_longhorn/tasks/deploy.yml +++ b/roles/deploy_longhorn/tasks/deploy.yml @@ -1,25 +1,75 @@ +--- +# Airgap - name: "Deploy Longhorn" run_once: true become: true become_user: "{{ admin_user }}" - become_method: sudo + become_method: ansible.builtin.sudo become_flags: "-i" - block: + when: airgap_install + block: + - name: Deploy helm charts + kubernetes.core.helm: + atomic: true + name: "longhorn" + chart_ref: "oci://{{ master }}:5000/hauler/longhorn" + release_namespace: "longhorn-system" + create_namespace: true + values: + global: + cattle: + systemDefaultRegistry: "{{ master }}:5000" + ingress: + enabled: true + host: "{{ longhorn_url }}" + defaultSettings: + defaultDataPath: "{{ longhorn_datapath }}" + nodeDownPodDeletionPolicy: delete-both-statefulset-and-deployment-pod + persistence: + defaultClassReplicaCount: "{{ longhorn_replica }}" + kubeconfig: "/home/{{ admin_user }}/.kube/{{ inventory_hostname }}.yaml" - - name: Deploy helm charts - kubernetes.core.helm: - atomic: true - name: "longhorn" - chart_ref: "{{ mount_helm_path }}/{{ longhorn_charts }}" - release_namespace: "longhorn-system" - create_namespace: true - values: - global: - cattle: - systemDefaultRegistry: "localhost:5000" - ingress: - enabled: true - host: "{{ longhorn_url }}" - defaultSettings: - defaultDataPath: "{{ longhorn_datapath }}" - kubeconfig: "/home/{{ admin_user }}/.kube/{{ inventory_hostname }}.yaml" \ No newline at end of file +# Non-Airgap +- name: "Deploy Longhorn" + run_once: true + become: true + become_user: "{{ admin_user }}" + become_method: ansible.builtin.sudo + become_flags: "-i" + when: not airgap_install + block: + - name: Longhorn Versions needed + ansible.builtin.import_role: + name: set_versions + tasks_from: only_longhorn + run_once: true + + - name: Add longhorn chart repo + kubernetes.core.helm_repository: + name: longhorn + repo_url: "https://charts.longhorn.io" + force_update: true + + - name: Deploy helm charts + kubernetes.core.helm: + atomic: true + name: "longhorn" + chart_ref: "longhorn/longhorn" + chart_version: "{{ longhorn_version }}" + release_namespace: "longhorn-system" + create_namespace: true + values: + ingress: + enabled: true + host: "{{ longhorn_url }}" + defaultSettings: + defaultDataPath: "{{ longhorn_datapath }}" + nodeDownPodDeletionPolicy: delete-both-statefulset-and-deployment-pod + persistence: + defaultClassReplicaCount: "{{ longhorn_replica }}" + kubeconfig: "/home/{{ admin_user }}/.kube/{{ inventory_hostname }}.yaml" + +# Common +- name: Display Longhorn URL + ansible.builtin.debug: + msg: "Longhorn URL : {{ longhorn_url }}" diff --git a/roles/deploy_longhorn/tasks/install.yml b/roles/deploy_longhorn/tasks/install.yml index 538ce7aff..3b1fada99 100644 --- a/roles/deploy_longhorn/tasks/install.yml +++ b/roles/deploy_longhorn/tasks/install.yml @@ -1,24 +1,25 @@ -# Install and start iscsid -- block: - - name: Ensure iscsi-initiator-utils package is installed on Redhat based OS - dnf: - name: iscsi-initiator-utils - state: present - - - name: Start and enable iscsid - systemd: - name: iscsid - state: started - enabled: yes - +--- +# Install and start iscsid on RHEL 8 +- name: Install and start iscsid on RHEL 8 become: true when: - ansible_os_family == "RedHat" - ansible_distribution_major_version | int >= 8 + block: + - name: Ensure iscsi-initiator-utils package is installed on Redhat based OS + ansible.builtin.dnf: + name: iscsi-initiator-utils + state: present + + - name: Start and enable iscsid + ansible.builtin.systemd: + name: iscsid + state: started + enabled: true - name: Ensure longhorn directory exist ansible.builtin.file: path: "{{ longhorn_datapath }}" state: directory recurse: true - mode: '0750' \ No newline at end of file + mode: '0750' diff --git a/roles/deploy_longhorn/tasks/main.yml b/roles/deploy_longhorn/tasks/main.yml index 1503a0749..1d65fcdb3 100644 --- a/roles/deploy_longhorn/tasks/main.yml +++ b/roles/deploy_longhorn/tasks/main.yml @@ -1,17 +1,19 @@ --- # tasks file for deploy_longhorn - name: Gather facts - setup: + ansible.builtin.setup: gather_subset: - "distribution" - "distribution_major_version" + - "default_ipv4" - "!min" when: > ansible_os_family is not defined + tags: [always] -- name: Tasks for RHEL-like OS - import_tasks: install.yml +- name: Prerequis needed on all nodes + ansible.builtin.import_tasks: install.yml - name: Deploy with the Helm Charts on master - import_tasks: deploy.yml - when: inventory_hostname in groups['RKE2_CONTROLLERS'][0] \ No newline at end of file + ansible.builtin.import_tasks: deploy.yml + when: inventory_hostname in groups['RKE2_CONTROLLERS'][0] diff --git a/roles/deploy_longhorn/tests/test.yml b/roles/deploy_longhorn/tests/test.yml index b347a1fdc..90171086d 100644 --- a/roles/deploy_longhorn/tests/test.yml +++ b/roles/deploy_longhorn/tests/test.yml @@ -1,5 +1,6 @@ --- -- hosts: localhost +- name: Test + hosts: localhost remote_user: root roles: - deploy_longhorn diff --git a/roles/deploy_neuvector/defaults/main.yml b/roles/deploy_neuvector/defaults/main.yml index b08038e02..c46379ba4 100644 --- a/roles/deploy_neuvector/defaults/main.yml +++ b/roles/deploy_neuvector/defaults/main.yml @@ -1,7 +1,5 @@ --- # defaults file for deploy_neuvector -neuvector_version: "{{ global_NEU_VERSION }}" -neuvector_charts: "core-{{ neuvector_version }}.tgz" neuvector_servicename: "neuvector" neuvector_domain: "{{ global_domain }}" neuvector_url: "{{ neuvector_servicename }}.{{ neuvector_domain }}" @@ -9,5 +7,4 @@ neuvector_url: "{{ neuvector_servicename }}.{{ neuvector_domain }}" # General admin_user: "{{ global_install_user }}" master: "{{ global_master_ip }}" -mount_path: "{{ global_directory_mount }}" -mount_helm_path: "{{ mount_path }}/helm" \ No newline at end of file +airgap_install: "{{ global_airgap_install | bool }}" diff --git a/roles/deploy_neuvector/handlers/main.yml b/roles/deploy_neuvector/handlers/main.yml deleted file mode 100644 index 0cd74fa5a..000000000 --- a/roles/deploy_neuvector/handlers/main.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -# handlers file for deploy_neuvector diff --git a/roles/deploy_neuvector/meta/main.yml b/roles/deploy_neuvector/meta/main.yml index 4520372b4..32757d498 100644 --- a/roles/deploy_neuvector/meta/main.yml +++ b/roles/deploy_neuvector/meta/main.yml @@ -18,7 +18,7 @@ galaxy_info: # - CC-BY-4.0 license: Apache-2.0 - min_ansible_version: "2.12.0" + min_ansible_version: "2.15.0" # If this a Container Enabled role, provide the minimum Ansible Container version. # min_ansible_container_version: diff --git a/roles/deploy_neuvector/tasks/deploy.yml b/roles/deploy_neuvector/tasks/deploy.yml index fe63b47e7..242c01d3e 100644 --- a/roles/deploy_neuvector/tasks/deploy.yml +++ b/roles/deploy_neuvector/tasks/deploy.yml @@ -1,47 +1,113 @@ +--- +# Airgap - name: "Deploy Neuvector" run_once: true become: true become_user: "{{ admin_user }}" - become_method: sudo + become_method: ansible.builtin.sudo become_flags: "-i" + when: airgap_install block: - - - name: Deploy helm charts - kubernetes.core.helm: - atomic: true - name: "neuvector" - chart_ref: "{{ mount_helm_path }}/{{ neuvector_charts }}" - release_namespace: "neuvector" - create_namespace: true - values: - imagePullSecrets: regsecret - registry: localhost:5000 - k3s: - enabled: true - runtimePath: /run/k3s/containerd/containerd.sock - manager: - image: - repository: neuvector/manager - ingress: + - name: Deploy helm charts + kubernetes.core.helm: + atomic: true + name: "neuvector" + chart_ref: "neuvector/core" + release_namespace: "neuvector" + create_namespace: true + values: + registry: "{{ master }}:5000" + k3s: enabled: true + runtimePath: /run/k3s/containerd/containerd.sock + manager: + image: + repository: neuvector/manager ingress: - host: "{{ neuvector_url }}" - svc: - type: ClusterIP - controller: - image: - repository: neuvector/controller - pvc: - enabled: true - capacity: 500Mi - enforcer: - image: - repository: neuvector/enforcer - internal: - certmanager: + enabled: true + ingress: + host: "{{ neuvector_url }}" + svc: + type: ClusterIP + controller: + image: + repository: neuvector/controller + pvc: + enabled: true + capacity: 500Mi + enforcer: + image: + repository: neuvector/enforcer + internal: + certmanager: + enabled: true + cve: + updater: + image: + repository: neuvector/updater + kubeconfig: "/home/{{ admin_user }}/.kube/{{ inventory_hostname }}.yaml" + +# Non-Airgap +- name: "Deploy Neuvector" + run_once: true + become: true + become_user: "{{ admin_user }}" + become_method: ansible.builtin.sudo + become_flags: "-i" + when: not airgap_install + block: + - name: Neuvector Versions needed + ansible.builtin.import_role: + name: set_versions + tasks_from: only_neuvector + run_once: true + + - name: Add Neuvector chart repo + kubernetes.core.helm_repository: + name: neuvector + repo_url: "https://neuvector.github.io/neuvector-helm/" + force_update: true + + - name: Deploy helm charts + kubernetes.core.helm: + atomic: true + name: "neuvector" + chart_ref: "neuvector/core" + chart_version: "{{ neuvector_version }}" + release_namespace: "neuvector" + create_namespace: true + values: + k3s: enabled: true - cve: - updater: + runtimePath: /run/k3s/containerd/containerd.sock + manager: image: - repository: neuvector/updater - kubeconfig: "/home/{{ admin_user }}/.kube/{{ inventory_hostname }}.yaml" + repository: neuvector/manager + ingress: + enabled: true + ingress: + host: "{{ neuvector_url }}" + svc: + type: ClusterIP + controller: + image: + repository: neuvector/controller + pvc: + enabled: true + capacity: 500Mi + enforcer: + image: + repository: neuvector/enforcer + internal: + certmanager: + enabled: true + cve: + updater: + image: + repository: neuvector/updater + kubeconfig: "/home/{{ admin_user }}/.kube/{{ inventory_hostname }}.yaml" + +# Common +- name: Display Neuvector URL + ansible.builtin.debug: + msg: "Neuvector URL : {{ neuvector_url }}" diff --git a/roles/deploy_neuvector/tasks/main.yml b/roles/deploy_neuvector/tasks/main.yml index fad70591e..124b2fbd1 100644 --- a/roles/deploy_neuvector/tasks/main.yml +++ b/roles/deploy_neuvector/tasks/main.yml @@ -1,12 +1,14 @@ --- # tasks file for deploy_neuvector - -- name: Install Cert-manager as Neuvector prerequisites - ansible.builtin.import_role: - name: deploy_certmanager - tasks_from: main - tags: certmanager +- name: Gather facts + ansible.builtin.setup: + gather_subset: + - "default_ipv4" + - "!all,!min" + when: > + ansible_default_ipv4 is not defined + tags: [always] - name: Deploy with the Helm Charts on master - import_tasks: deploy.yml - when: inventory_hostname in groups['RKE2_CONTROLLERS'][0] \ No newline at end of file + ansible.builtin.import_tasks: deploy.yml + when: inventory_hostname in groups['RKE2_CONTROLLERS'][0] diff --git a/roles/deploy_neuvector/tests/test.yml b/roles/deploy_neuvector/tests/test.yml index 77d871a9d..851c514ce 100644 --- a/roles/deploy_neuvector/tests/test.yml +++ b/roles/deploy_neuvector/tests/test.yml @@ -1,5 +1,6 @@ --- -- hosts: localhost +- name: Test + hosts: localhost remote_user: root roles: - deploy_neuvector diff --git a/roles/deploy_rancher/defaults/main.yml b/roles/deploy_rancher/defaults/main.yml index 09efefaf9..902778ba2 100644 --- a/roles/deploy_rancher/defaults/main.yml +++ b/roles/deploy_rancher/defaults/main.yml @@ -1,7 +1,5 @@ --- # defaults file for deploy_rancher -rancher_version: "{{ global_RANCHER_VERSION }}" -rancher_charts: "rancher-{{ rancher_version }}.tgz" rancher_servicename: "rancher" rancher_domain: "{{ global_domain }}" rancher_url: "{{ rancher_servicename }}.{{ rancher_domain }}" @@ -10,5 +8,4 @@ rancher_password: "{{ global_rancher_password }}" # General admin_user: "{{ global_install_user }}" master: "{{ global_master_ip }}" -mount_path: "{{ global_directory_mount }}" -mount_helm_path: "{{ mount_path }}/helm" \ No newline at end of file +airgap_install: "{{ global_airgap_install | bool }}" diff --git a/roles/deploy_rancher/handlers/main.yml b/roles/deploy_rancher/handlers/main.yml deleted file mode 100644 index 2bf8e91f3..000000000 --- a/roles/deploy_rancher/handlers/main.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -# handlers file for deploy_rancher diff --git a/roles/deploy_rancher/meta/main.yml b/roles/deploy_rancher/meta/main.yml index 7be14f8d5..93d21467c 100644 --- a/roles/deploy_rancher/meta/main.yml +++ b/roles/deploy_rancher/meta/main.yml @@ -18,7 +18,7 @@ galaxy_info: # - CC-BY-4.0 license: Apache-2.0 - min_ansible_version: "2.12.0" + min_ansible_version: "2.15.0" # If this a Container Enabled role, provide the minimum Ansible Container version. # min_ansible_container_version: diff --git a/roles/deploy_rancher/tasks/deploy.yml b/roles/deploy_rancher/tasks/deploy.yml index f79e600da..7a015736d 100644 --- a/roles/deploy_rancher/tasks/deploy.yml +++ b/roles/deploy_rancher/tasks/deploy.yml @@ -1,26 +1,120 @@ +--- +# Airgap +- name: "Deploy Cert-manager and Rancher" + run_once: true + become: true + become_user: "{{ admin_user }}" + become_method: ansible.builtin.sudo + become_flags: "-i" + when: airgap_install + block: + # Cert-manager + - name: Deploy helm charts + kubernetes.core.helm: + atomic: true + name: "cert-manager" + chart_ref: "oci://{{ master }}:5000/hauler/cert-manager" + release_namespace: "cert-manager" + create_namespace: true + values: + installCRDs: true + image: + repository: "{{ master }}:5000/cert/cert-manager-controller" + webhook: + image: + repository: "{{ master }}:5000/cert/cert-manager-webhook" + cainjector: + image: + repository: "{{ master }}:5000/cert/cert-manager-cainjector" + startupapicheck: + image: + repository: "{{ master }}:5000/cert/cert-manager-ctl" + kubeconfig: "/home/{{ admin_user }}/.kube/{{ inventory_hostname }}.yaml" + + # Rancher + - name: Deploy helm charts + kubernetes.core.helm: + atomic: true + name: "rancher" + chart_ref: "oci://{{ master }}:5000/hauler/rancher" + release_namespace: "cattle-system" + create_namespace: true + values: + useBundledSystemChart: true + bootstrapPassword: "{{ rancher_password }}" + replicas: 1 + systemDefaultRegistry: "{{ master }}:5000" + rancherImage: "{{ master }}:5000/rancher/rancher" + hostname: "{{ rancher_url }}" + auditLog: + level: 2 + destination: hostPath + kubeconfig: "/home/{{ admin_user }}/.kube/{{ inventory_hostname }}.yaml" + +# Non-Airgap - name: "Deploy Rancher" run_once: true become: true become_user: "{{ admin_user }}" - become_method: sudo + become_method: ansible.builtin.sudo become_flags: "-i" + when: not airgap_install block: + # Get Versions + - name: Rancher Versions needed + ansible.builtin.import_role: + name: set_versions + tasks_from: only_rancher + run_once: true + + # Cert-manager + - name: Add Cert-Manager chart repo + kubernetes.core.helm_repository: + name: jetstack + repo_url: "https://charts.jetstack.io" + force_update: true + + - name: Deploy helm charts + kubernetes.core.helm: + atomic: true + name: "cert-manager" + chart_ref: "jetstack/cert-manager" + chart_version: "{{ cert_manager_version }}" + release_namespace: "cert-manager" + create_namespace: true + values: + installCRDs: true + kubeconfig: "/home/{{ admin_user }}/.kube/{{ inventory_hostname }}.yaml" + + # Rancher + - name: Add Rancher chart repo + kubernetes.core.helm_repository: + name: rancher + repo_url: "https://releases.rancher.com/server-charts/latest" + force_update: true + + + - name: Deploy helm charts + kubernetes.core.helm: + atomic: true + name: "rancher" + chart_ref: "rancher/rancher" + chart_version: "{{ rancher_version }}" + release_namespace: "cattle-system" + create_namespace: true + values: + useBundledSystemChart: true + bootstrapPassword: "{{ rancher_password }}" + replicas: 1 + #systemDefaultRegistry: "{{ master }}:5000" + #rancherImage: "{{ master }}:5000/rancher/rancher" + hostname: "{{ rancher_url }}" + auditLog: + level: 2 + destination: hostPath + kubeconfig: "/home/{{ admin_user }}/.kube/{{ inventory_hostname }}.yaml" - - name: Deploy helm charts - kubernetes.core.helm: - atomic: true - name: "rancher" - chart_ref: "{{ mount_helm_path }}/{{ rancher_charts }}" - release_namespace: "cattle-system" - create_namespace: true - values: - useBundledSystemChart: true - bootstrapPassword: "{{ rancher_password }}" - replicas: 1 - systemDefaultRegistry: localhost:5000 - rancherImage: localhost:5000/rancher/rancher - hostname: "{{ rancher_url }}" - auditLog: - level: 2 - destination: hostPath - kubeconfig: "/home/{{ admin_user }}/.kube/{{ inventory_hostname }}.yaml" +# Common +- name: Display Rancher URL + ansible.builtin.debug: + msg: "Rancher URL : {{ rancher_url }}" diff --git a/roles/deploy_rancher/tasks/main.yml b/roles/deploy_rancher/tasks/main.yml index 6b5ddd861..48b0ffadc 100644 --- a/roles/deploy_rancher/tasks/main.yml +++ b/roles/deploy_rancher/tasks/main.yml @@ -1,12 +1,14 @@ --- # tasks file for deploy_rancher - -- name: Install Cert-manager as Rancher prerequisites - ansible.builtin.import_role: - name: deploy_certmanager - tasks_from: main - tags: certmanager +- name: Gather facts + ansible.builtin.setup: + gather_subset: + - "default_ipv4" + - "!all,!min" + when: > + ansible_default_ipv4 is not defined + tags: [always] - name: Deploy with the Helm Charts on master - import_tasks: deploy.yml - when: inventory_hostname in groups['RKE2_CONTROLLERS'][0] \ No newline at end of file + ansible.builtin.import_tasks: deploy.yml + when: inventory_hostname in groups['RKE2_CONTROLLERS'][0] diff --git a/roles/deploy_rancher/tests/test.yml b/roles/deploy_rancher/tests/test.yml index 353fc6766..412a2dc5f 100644 --- a/roles/deploy_rancher/tests/test.yml +++ b/roles/deploy_rancher/tests/test.yml @@ -1,5 +1,6 @@ --- -- hosts: localhost +- name: Test + hosts: localhost remote_user: root roles: - deploy_rancher diff --git a/roles/install_rke2_common/defaults/main.yml b/roles/install_rke2_common/defaults/main.yml index 8546a2855..bc83c9918 100644 --- a/roles/install_rke2_common/defaults/main.yml +++ b/roles/install_rke2_common/defaults/main.yml @@ -1,3 +1,2 @@ --- # defaults file for install_common -admin_user: "{{ global_install_user }}" diff --git a/roles/install_rke2_common/handlers/main.yml b/roles/install_rke2_common/handlers/main.yml index 224d858d7..0e3edc447 100644 --- a/roles/install_rke2_common/handlers/main.yml +++ b/roles/install_rke2_common/handlers/main.yml @@ -21,4 +21,4 @@ ansible.builtin.systemd: name: rke2-agent.service state: restarted - notify: "Service (re)started" \ No newline at end of file + notify: "Service (re)started" diff --git a/roles/install_rke2_common/meta/main.yml b/roles/install_rke2_common/meta/main.yml index 5c30fda1b..3ce0052bc 100644 --- a/roles/install_rke2_common/meta/main.yml +++ b/roles/install_rke2_common/meta/main.yml @@ -18,7 +18,7 @@ galaxy_info: # - CC-BY-4.0 license: Apache-2.0 - min_ansible_version: "2.12.0" + min_ansible_version: "2.15.0" # If this a Container Enabled role, provide the minimum Ansible Container version. # min_ansible_container_version: diff --git a/roles/install_rke2_common/tasks/common.yml b/roles/install_rke2_common/tasks/common.yml new file mode 100644 index 000000000..264ab92a4 --- /dev/null +++ b/roles/install_rke2_common/tasks/common.yml @@ -0,0 +1,60 @@ +--- +# Prerequisites +- name: Ensure admin_user exist + ansible.builtin.user: + name: "{{ admin_user }}" + comment: "rke2 user for administration" + createhome: true + +- name: Create sysctl.conf from template + ansible.builtin.template: + src: rke2.conf + dest: /etc/sysctl.d/rke2.conf + mode: '0600' + +- name: Restart_sysctl + ansible.builtin.systemd: + state: restarted + name: systemd-sysctl + +# Prepare Dir +- name: Ensure data_dir exists + ansible.builtin.file: + path: "{{ rke2_data_dir }}" + state: directory + mode: "0640" + +- name: Ensure dir /var/lib/rancher exists + ansible.builtin.file: + path: "/var/lib/rancher" + state: directory + mode: "0640" + +- name: Create Symlink between rke2_data_dir and /var/lib/rancher/rke2 + ansible.builtin.file: + src: "{{ rke2_data_dir }}" + dest: /var/lib/rancher/rke2 + state: link + when: rke2_data_dir != "/var/lib/rancher/rke2" + +# Common packages on all nodes +- name: Install packages common + ansible.builtin.dnf: + name: "{{ item }}" + state: present + with_items: + - zstd + become: true + when: + - ansible_os_family == "RedHat" + - ansible_distribution_major_version | int >= 8 + +- name: Install packages common + ansible.builtin.apt: + name: "{{ item }}" + state: present + with_items: + - zstd + become: true + when: + - ansible_os_family == "Debian" diff --git a/roles/install_rke2_common/tasks/install.yml b/roles/install_rke2_common/tasks/install.yml deleted file mode 100644 index 28a6e3f9d..000000000 --- a/roles/install_rke2_common/tasks/install.yml +++ /dev/null @@ -1,27 +0,0 @@ -- name: Ensure admin_user exist - ansible.builtin.user: - name: "{{ admin_user }}" - comment: "rke2 user for administration" - createhome: true - -- name: Install common packages - ansible.builtin.dnf: - name: "{{ item }}" - state: present - with_items: - - iptables - - container-selinux - - libnetfilter_conntrack - - libnfnetlink - - libnftnl - - policycoreutils-python-utils - - cryptsetup - when: - - ansible_os_family == "RedHat" - - ansible_distribution_major_version | int >= 8 - -- name: Create sysctl.conf from template - ansible.builtin.template: - src: rke2.conf - dest: /etc/sysctl.d/rke2.conf - notify: Restart systemd-sysctl diff --git a/roles/install_rke2_common/tasks/main.yml b/roles/install_rke2_common/tasks/main.yml index 4be2b8246..2dbf31f1f 100644 --- a/roles/install_rke2_common/tasks/main.yml +++ b/roles/install_rke2_common/tasks/main.yml @@ -8,9 +8,22 @@ - "!min" when: > ansible_os_family is not defined + tags: [always] + +- name: Populate service facts + ansible.builtin.service_facts: {} + when: > + ansible_facts['services'] is not defined + tags: [always] + +- name: Specific config for NetworkManager + ansible.builtin.import_tasks: networkmanager.yml + when: + - ansible_facts['services']['NetworkManager.service'] is defined + tags: networkmanager - name: Tasks common to Linux servers for RKE2 installation - ansible.builtin.import_tasks: install.yml + ansible.builtin.import_tasks: common.yml - name: Get Token if one exist ansible.builtin.import_tasks: token.yml @@ -18,4 +31,6 @@ - name: RKE2 Install specific for RHEL-like OS ansible.builtin.import_tasks: rhel.yml when: ansible_os_family == "RedHat" - tags: rhel + +- name: Flush handlers + ansible.builtin.meta: flush_handlers diff --git a/roles/install_rke2_common/tasks/networkmanager.yml b/roles/install_rke2_common/tasks/networkmanager.yml new file mode 100644 index 000000000..b60f9a055 --- /dev/null +++ b/roles/install_rke2_common/tasks/networkmanager.yml @@ -0,0 +1,23 @@ +--- +# NetworkManager config +- name: Configure NetworkManager for RKE2 and Canal + ansible.builtin.copy: + content: | + [keyfile] + unmanaged-devices=interface-name:cali*;interface-name:flannel* + dest: /etc/NetworkManager/conf.d/rke2-canal.conf + mode: '0600' + +- name: Set rke2-canal.conf file permissions + ansible.builtin.file: + path: /etc/NetworkManager/conf.d/rke2-canal.conf + mode: '0600' + owner: root + group: root + +- name: Reload NetworkManager + ansible.builtin.systemd: + name: NetworkManager + state: reloaded + when: + - ansible_facts['services']['NetworkManager.service']['state'] == "running" diff --git a/roles/install_rke2_common/tasks/rhel.yml b/roles/install_rke2_common/tasks/rhel.yml index 086fbf357..741abd804 100644 --- a/roles/install_rke2_common/tasks/rhel.yml +++ b/roles/install_rke2_common/tasks/rhel.yml @@ -1,14 +1,16 @@ +--- # For RHEL >= 8 as root -- block: - - name: Display message - debug: - msg: "Specific actions regarding servers in RHEL 8/9" - +- name: Specific actions to be done on RHEL 8 + become: true + when: + - ansible_os_family == "RedHat" + - ansible_distribution_major_version | int >= 8 + block: # Desactivate Cloud services - name: Disable service nm-cloud-setup ansible.builtin.systemd: name: nm-cloud-setup.service - enabled: no + enabled: false state: stopped when: ansible_facts['services']['nm-cloud-setup.service'] is defined @@ -16,46 +18,16 @@ ansible.builtin.systemd: name: nm-cloud-setup.timer state: stopped - enabled: no + enabled: false when: ansible_facts['services']['nm-cloud-setup.service'] is defined - # Networker config - - name: Configure NetworkManager for RKE2 and Canal - ansible.builtin.copy: - content: | - [keyfile] - unmanaged-devices=interface-name:cali*;interface-name:flannel* - dest: /etc/NetworkManager/conf.d/rke2-canal.conf - when: ansible_facts['services']['NetworkManager.service'] is defined - - - name: Set rke2-canal.conf file permissions - ansible.builtin.file: - path: /etc/NetworkManager/conf.d/rke2-canal.conf - mode: '0600' - owner: root - group: root - when: ansible_facts['services']['NetworkManager.service'] is defined - - - name: Reload NetworkManager - ansible.builtin.systemd: - name: NetworkManager - state: reloaded - when: - - ansible_facts['services']['NetworkManager.service'] is defined - - ansible_facts['services']['NetworkManager.service']['state'] == "running" - - become: true - when: - - ansible_os_family == "RedHat" - - ansible_distribution_major_version | int >= 8 - # For RHEL 7 as root -- block: - - name: Display message - debug: - msg: "Specific actions regarding servers in RHEL 7" - +- name: Specific actions to be done on RHEL 7 become: true when: - ansible_os_family == "RedHat" - ansible_distribution_major_version | int == 7 + block: + - name: Display message + ansible.builtin.debug: + msg: "Specific actions regarding servers in RHEL 7" diff --git a/roles/install_rke2_common/tasks/rpm_install.yml b/roles/install_rke2_common/tasks/rpm_install.yml new file mode 100644 index 000000000..87fe4faa4 --- /dev/null +++ b/roles/install_rke2_common/tasks/rpm_install.yml @@ -0,0 +1,102 @@ +--- +# NB: +# - Airgap does not need to have rke2 version variable, since it take what is present in Hauler repo. +# - RPM apply only for Redhat-like OS + +# Define which type of node +- name: Define server install + ansible.builtin.set_fact: + type_node: "server" + when: caller_role_name == "controller" + +- name: Define agent install + ansible.builtin.set_fact: + type_node: "agent" + when: caller_role_name == "worker" + +# Not airgap +- name: Set repo for RPM with internet access + become: true + when: + - ansible_os_family == "RedHat" + - ansible_distribution_major_version | int >= 8 + - not airgap_install + block: + - name: TARBALL | Versions needed to set repo + ansible.builtin.import_role: + name: set_versions + tasks_from: only_rke2 + run_once: true + + - name: Set Official RKE2 common repo + ansible.builtin.yum_repository: + name: rancher-rke2-common-latest + description: "Rancher RKE2 Common Latest" + baseurl: "https://rpm.rancher.io/rke2/latest/common/centos/{{ ansible_distribution_major_version }}/noarch" + gpgcheck: true + gpgkey: "https://rpm.rancher.io/public.key" + + - name: Set Official RKE2 version repo + ansible.builtin.yum_repository: + name: rancher-rke2-latest + description: "Rancher RKE2 Latest" + baseurl: "https://rpm.rancher.io/rke2/latest/{{ rke2_version.split('.')[:2] | join('.') }}/centos/{{ ansible_distribution_major_version }}/x86_64" + gpgcheck: true + gpgkey: "https://rpm.rancher.io/public.key" + +# Airgap +- name: Set Hauler as an airgap repo and registry for RPM + become: true + when: + - ansible_os_family == "RedHat" + - ansible_distribution_major_version | int >= 8 + - airgap_install + block: + - name: Set repo Hauler Airgap Server + ansible.builtin.yum_repository: + name: hauler + description: "Hauler Airgap Server" + baseurl: "http://{{ hauler_server }}:8080" + gpgcheck: true + gpgkey: "http://{{ hauler_server }}:8080/public.key" + + - name: Set mirror registry (needed for rpm_install in airgap) + ansible.builtin.template: + src: "{{ item }}" + dest: "/etc/rancher/rke2/{{ item | basename | regex_replace('.j2$', '') }}" + mode: "0640" + loop: + - "registries.yaml.j2" + +# Install needed +- name: Install RKE2 with rpm method + become: true + when: + - ansible_os_family == "RedHat" + - ansible_distribution_major_version | int >= 8 + block: + - name: Install RKE2 rpm packages + ansible.builtin.dnf: + name: "{{ item }}" + state: present + loop: + - "rke2-{{ type_node }}" + + # https://docs.rke2.io/install/airgap + # - only if selinux activated + # - common to tarball and rpm install + - name: Install common packages + ansible.builtin.dnf: + name: "{{ item }}" + state: present + with_items: + - iptables + - container-selinux + - libnetfilter_conntrack + - libnfnetlink + - libnftnl + - policycoreutils-python-utils + - cryptsetup + - rke2-common + - rke2-selinux + when: ansible_selinux['status'] == 'enabled' diff --git a/roles/install_rke2_common/tasks/tarball_install.yml b/roles/install_rke2_common/tasks/tarball_install.yml new file mode 100644 index 000000000..dcf11444e --- /dev/null +++ b/roles/install_rke2_common/tasks/tarball_install.yml @@ -0,0 +1,146 @@ +--- +# NB: +# - Airgap does not need to have rke2 version variable, since it take what is present in Hauler repo. +# - Tarball Should work for all Linux + +# Define which type of node +- name: Define server install + ansible.builtin.set_fact: + type_node: "server" + when: caller_role_name == "controller" + +- name: Define agent install + ansible.builtin.set_fact: + type_node: "agent" + when: caller_role_name == "worker" + +# Not Airgap +- name: Install with tarball method + become: true + when: + - not airgap_install + block: + - name: TARBALL | Versions needed to set repo + ansible.builtin.import_role: + name: set_versions + tasks_from: only_rke2 + run_once: true + + - name: TARBALL | Install RKE2 node + ansible.builtin.shell: + cmd: | + set -o pipefail + curl -sfL https://get.rke2.io | INSTALL_RKE2_METHOD=tar INSTALL_RKE2_TYPE={{ type_node }} INSTALL_RKE2_VERSION=v{{ rke2_version }}+rke2r1 sh - + executable: /bin/bash + register: install_output + failed_when: false + changed_when: false + + - name: TARBALL | Display install output + ansible.builtin.debug: + var: install_output['stdout'] + +# Airgap +- name: Install with tarball method + become: true + when: + - airgap_install + block: + - name: TARBALL | Make temp dir + ansible.builtin.tempfile: + state: directory + suffix: "-rke2-install" + path: "{{ tarball_tmp_dir | default(omit) }}" + register: temp_dir + + - name: TARBALL | Get RKE2 Artifacts from URL + ansible.builtin.get_url: + url: "http://{{ hauler_server }}:8080/{{ item }}" + dest: "{{ temp_dir['path'] }}/{{ item }}" + validate_certs: false + loop: + - rke2-images.linux-amd64.tar.zst + - rke2.linux-amd64.tar.gz + - sha256sum-amd64.txt + - install.sh + + - name: TARBALL | Install RKE2 server node + ansible.builtin.shell: + cmd: | + set -o pipefail + INSTALL_RKE2_ARTIFACT_PATH={{ temp_dir['path'] }} INSTALL_RKE2_TYPE={{ type_node }} sh {{ temp_dir['path'] }}/install.sh + chdir: "{{ temp_dir['path'] }}" + executable: /bin/bash + register: install_server_output + failed_when: false + changed_when: false + + - name: TARBALL | Display install output + ansible.builtin.debug: + var: install_server_output['stdout_lines'] + +### RPM apply only for Redhat-like OS ### + +# Not airgap +- name: Set repo for RPM with internet access + become: true + when: + - ansible_os_family == "RedHat" + - ansible_distribution_major_version | int >= 8 + - not airgap_install + block: + - name: Set Official RKE2 common repo + ansible.builtin.yum_repository: + name: rancher-rke2-common-latest + description: "Rancher RKE2 Common Latest" + baseurl: "https://rpm.rancher.io/rke2/latest/common/centos/{{ ansible_distribution_major_version }}/noarch" + gpgcheck: true + gpgkey: "https://rpm.rancher.io/public.key" + + - name: Set Official RKE2 version repo + ansible.builtin.yum_repository: + name: rancher-rke2-latest + description: "Rancher RKE2 Latest" + baseurl: "https://rpm.rancher.io/rke2/latest/{{ rke2_version.split('.')[:2] | join('.') }}/centos/{{ ansible_distribution_major_version }}/x86_64" + gpgcheck: true + gpgkey: "https://rpm.rancher.io/public.key" + +# Airgap +- name: Set Hauler as an airgap repo for RPM + become: true + when: + - ansible_os_family == "RedHat" + - ansible_distribution_major_version | int >= 8 + - airgap_install + block: + - name: Set repo Hauler Airgap Server + ansible.builtin.yum_repository: + name: hauler + description: "Hauler Airgap Server" + baseurl: "http://{{ hauler_server }}:8080" + gpgcheck: true + gpgkey: "http://{{ hauler_server }}:8080/public.key" + +# Install needed only if selinux enabled +- name: Set Hauler as an airgap repo for RPM + become: true + when: + - ansible_os_family == "RedHat" + - ansible_distribution_major_version | int >= 8 + - ansible_selinux['status'] == 'enabled' + block: + # https://docs.rke2.io/install/airgap => only if selinux activated common to tarball or rpm install + - name: Install common packages + ansible.builtin.dnf: + name: "{{ item }}" + state: present + with_items: + - iptables + - container-selinux + - libnetfilter_conntrack + - libnfnetlink + - libnftnl + - policycoreutils-python-utils + - cryptsetup + - rke2-common + - rke2-selinux diff --git a/roles/install_rke2_common/tasks/token.yml b/roles/install_rke2_common/tasks/token.yml index 564a0fce3..1497af433 100644 --- a/roles/install_rke2_common/tasks/token.yml +++ b/roles/install_rke2_common/tasks/token.yml @@ -1,16 +1,22 @@ -## Get Token if one exist -- name: Check if token already exist - ansible.builtin.stat: - path: "{{ mount_path }}/token" - register: token +--- +## Get Token if one exist from Hauler +- name: Get token from URL if exist + ansible.builtin.uri: + url: "http://{{ hauler_server }}:8080/token" + method: GET + validate_certs: false + return_content: true + register: token_response + failed_when: false -- block: - - name: Read node-token from NFS share - ansible.builtin.slurp: - src: "{{ mount_path }}/token" - register: node_token +- name: Set variable with token + ansible.builtin.set_fact: + rke2_config_token: "{{ token_response['content'] }}" + when: token_response.status == 200 - - name: Store Master node-token - ansible.builtin.set_fact: - rke2_config_token: "{{ node_token['content'] | b64decode | regex_replace('\n', '') }}" - when: token['stat'].exists \ No newline at end of file +## Get Token from first master +- name: Set token from first master for other nodes + ansible.builtin.set_fact: + rke2_config_token: "{{ hostvars[groups['RKE2_CONTROLLERS'][0]].rke2_config_token }}" + when: + - inventory_hostname not in groups['RKE2_CONTROLLERS'][0] diff --git a/roles/install_rke2_common/templates/registries.yaml.j2 b/roles/install_rke2_common/templates/registries.yaml.j2 new file mode 100644 index 000000000..a12a54aac --- /dev/null +++ b/roles/install_rke2_common/templates/registries.yaml.j2 @@ -0,0 +1,7 @@ +mirrors: + docker.io: + endpoint: + - http://{{ hauler_server }}:5000 + {{ hauler_server }}:5000: + endpoint: + - http://{{ hauler_server }}:5000 diff --git a/roles/install_rke2_common/tests/test.yml b/roles/install_rke2_common/tests/test.yml index 8844a1bff..2cdc275d8 100644 --- a/roles/install_rke2_common/tests/test.yml +++ b/roles/install_rke2_common/tests/test.yml @@ -1,5 +1,6 @@ --- -- hosts: localhost +- name: Test + hosts: localhost remote_user: root roles: - - install_common + - install_rke2_common diff --git a/roles/install_rke2_controller/defaults/main.yml b/roles/install_rke2_controller/defaults/main.yml index e1c5be572..d69b36a9e 100644 --- a/roles/install_rke2_controller/defaults/main.yml +++ b/roles/install_rke2_controller/defaults/main.yml @@ -1,25 +1,36 @@ --- # defaults file for install_controler -rke2_version: "{{ global_rke2_version }}" admin_user: "{{ global_install_user }}" master: "{{ global_master_ip }}" +control_plane_endpoint: "{{ global_rke2_api_ip }}" +rpm_install: "{{ global_rpm_install | bool }}" +tarball_install: "{{ global_tarball_install | bool }}" +airgap_install: "{{ global_airgap_install | bool }}" -# Mount share -mount_path: "{{ global_directory_mount }}" -mount_rke2_path: "{{ mount_path }}/rke2_{{ rke2_version }}" -mount_utils_path: "{{ mount_path }}/utils" -mount_rke2_common_rpm_path: "{{ mount_rke2_path }}/{{ global_rke2_common_rpm_version }}.el{{ ansible_distribution_major_version }}.x86_64.rpm" -mount_rke2_selinux_rpm_path: "{{ mount_rke2_path }}/{{ global_rke2_selinux_rpm_version }}.el{{ ansible_distribution_major_version }}.noarch.rpm" +# Fileserver +hauler_server: "{{ global_hauler_ip }}" + +# RKE2 config +rke2_kubeconfig_file: "/etc/rancher/rke2/rke2.yaml" +rkub_local_kubeconfig: "~/.kube/rkub-{{ inventory_hostname }}.yaml" +rkub_context_name: "rkub-{{ inventory_hostname }}" + +# Controller options +rke2_data_dir: "{{ global_rke2_data_dir }}" +rke2_cluster_cidr: "{{ global_rke2_cluster_cidr }}" +rke2_service_cidr: "{{ global_rke2_service_cidr }}" +rke2_cni: "{{ global_rke2_cni }}" +rke2_profile_activated: "{{ global_rke2_profile_activated | bool }}" # Firewall rules -controller_firewalld_rules: +controller_firewalld_rules: inbound: - - name: rke2 - zone: public - ports: - - {port: 443, protocol: tcp} - - {port: 6443, protocol: tcp} - - {port: 2379, protocol: tcp} - - {port: 9345, protocol: tcp} - - {port: 10250, protocol: tcp} - - {port: 8472, protocol: udp} + - name: rke2 + zone: public + ports: + - {port: 443, protocol: tcp} + - {port: 6443, protocol: tcp} + - {port: 2379, protocol: tcp} + - {port: 9345, protocol: tcp} + - {port: 10250, protocol: tcp} + - {port: 8472, protocol: udp} diff --git a/roles/install_rke2_controller/handlers/main.yml b/roles/install_rke2_controller/handlers/main.yml index 75dd96ff6..d31d844ca 100644 --- a/roles/install_rke2_controller/handlers/main.yml +++ b/roles/install_rke2_controller/handlers/main.yml @@ -14,11 +14,18 @@ - name: Restart rke2-server ansible.builtin.systemd: name: rke2-server.service - state: restarted + state: stopped + notify: "Start rke2-server" + +- name: Start rke2-server + ansible.builtin.systemd: + name: rke2-server.service + state: started + enabled: true notify: "Service (re)started" - name: Restart rke2-agent ansible.builtin.systemd: name: rke2-agent.service state: restarted - notify: "Service (re)started" \ No newline at end of file + notify: "Service (re)started" diff --git a/roles/install_rke2_controller/meta/main.yml b/roles/install_rke2_controller/meta/main.yml index b00fd4401..e83fc63a0 100644 --- a/roles/install_rke2_controller/meta/main.yml +++ b/roles/install_rke2_controller/meta/main.yml @@ -18,7 +18,7 @@ galaxy_info: # - CC-BY-4.0 license: Apache-2.0 - min_ansible_version: "2.12.0" + min_ansible_version: "2.15.0" # If this a Container Enabled role, provide the minimum Ansible Container version. # min_ansible_container_version: diff --git a/roles/install_rke2_controller/tasks/admin.yml b/roles/install_rke2_controller/tasks/admin.yml index 7263a48eb..6b834eef9 100644 --- a/roles/install_rke2_controller/tasks/admin.yml +++ b/roles/install_rke2_controller/tasks/admin.yml @@ -1,41 +1,200 @@ -# Admin setup -- block: - - name: Remove old Kubeconfig file - ansible.builtin.file: - path: "$HOME/.kube/{{ inventory_hostname }}.yaml" - state: absent - become_user: "{{ admin_user }}" - - - name: Ensure .kube dir exist - ansible.builtin.file: - path: "$HOME/.kube" - state: directory - mode: 0700 - recurse: yes - become_user: "{{ admin_user }}" - - - name: Copy kubeconfig in Admin home dir - ansible.builtin.copy: - src: "/etc/rancher/rke2/rke2.yaml" - dest: "/home/{{ admin_user }}/.kube/{{ inventory_hostname }}.yaml" - owner: "{{ admin_user }}" - group: "{{ admin_user }}" - force: true - remote_src: true - mode: '0600' - become: true - - - name: Change localhost with master ip in kubeconfig - ansible.builtin.lineinfile: - path: "$HOME/.kube/{{ inventory_hostname }}.yaml" - search_string: '127.0.0.1' - line: " server: https://{{ master }}:6443" - become_user: "{{ admin_user }}" - - - name: Update Admin .bashrc - ansible.builtin.blockinfile: - path: "$HOME/.bashrc" - block: | - export KUBECONFIG="~/.kube/{{ inventory_hostname }}.yaml" - marker: "# {mark} ANSIBLE setup Kubeconfig" - become_user: "{{ admin_user }}" \ No newline at end of file +--- +# Admin setup on first master +- name: Admin setup + become: true + block: + - name: Remove old Kubeconfig file + ansible.builtin.file: + path: "/home/{{ admin_user }}/.kube/{{ inventory_hostname }}.yaml" + state: absent + + - name: Ensure .kube dir exist + ansible.builtin.file: + path: "/home/{{ admin_user }}/.kube" + state: directory + owner: "{{ admin_user }}" + group: "{{ admin_user }}" + mode: 0700 + recurse: true + + - name: Update Admin .bashrc + ansible.builtin.blockinfile: + path: "/home/{{ admin_user }}/.bashrc" + block: | + export KUBECONFIG=$HOME/.kube/{{ inventory_hostname }}.yaml + export CRI_CONFIG_FILE={{ rke2_data_dir }}/agent/etc/crictl.yaml + marker: "# {mark} ANSIBLE setup Kubeconfig and RKE2" + + - name: Copy kubeconfig to admin homedir + ansible.builtin.copy: + src: "{{ rke2_kubeconfig_file }}" + dest: "/home/{{ admin_user }}/.kube/{{ inventory_hostname }}.yaml" + owner: "{{ admin_user }}" + group: "{{ admin_user }}" + force: true + remote_src: true + mode: '0600' + + - name: Change localhost with control_plane_endpoint in kubeconfig + ansible.builtin.lineinfile: + path: /home/{{ admin_user }}/.kube/{{ inventory_hostname }}.yaml + search_string: '127.0.0.1' + line: " server: https://{{ control_plane_endpoint }}:6443" + +# Push kubectl on admin user +- name: Push Kubectl + become: true + block: + - name: Ensure dir exist + ansible.builtin.file: + path: "{{ item }}" + state: directory + owner: "{{ admin_user }}" + group: "{{ admin_user }}" + mode: 0750 + recurse: true + loop: + - "/home/{{ admin_user }}/.local" + - "/home/{{ admin_user }}/.local/bin" + + - name: Copy kubectl binary to admin user + ansible.builtin.copy: + src: "{{ rke2_data_dir }}/bin/kubectl" + dest: "/home/{{ admin_user }}/.local/bin/kubectl" + owner: "{{ admin_user }}" + group: "{{ admin_user }}" + force: true + remote_src: true + mode: '0750' + + +# Install helm / k9s non-airgap +- name: Install admin tools not in Airgap + become: true + when: + - not airgap_install + block: + # helm + - name: Download Helm command line tool + ansible.builtin.uri: + url: https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 + return_content: true + register: helm_installer + + - name: Install Helm + ansible.builtin.command: + cmd: bash + stdin: "{{ helm_installer.content }}" + creates: /usr/local/bin/helm + environment: + DESIRED_VERSION: "{{ helm_version | default('') }}" + + # k9s + - name: Get K9S binary latest release + ansible.builtin.uri: + url: "https://api.github.com/repos/derailed/k9s/releases/latest" + method: GET + return_content: true + register: k9s_release + + - name: Extract latest release tag + ansible.builtin.set_fact: + k9s_version: "{{ k9s_release.json.tag_name | regex_replace('^v', '') }}" + + - name: Ensure .local/bin dir exist + ansible.builtin.file: + path: "{{ item }}" + state: directory + mode: 0700 + recurse: true + loop: + - "/tmp/k9s" + + - name: Download k9s bin to .local/bin + ansible.builtin.unarchive: + src: "https://github.com/derailed/k9s/releases/download/v{{ k9s_version }}/k9s_Linux_amd64.tar.gz" + dest: "/tmp/k9s" + mode: '0750' + remote_src: true + validate_certs: false + + - name: Copy k9s binary file + ansible.builtin.copy: + src: "/tmp/k9s/k9s" + dest: "/home/{{ admin_user }}/.local/bin/k9s" + owner: "{{ admin_user }}" + group: "{{ admin_user }}" + force: true + remote_src: true + mode: '0750' + + - name: Cleanup tmp dir + ansible.builtin.file: + path: "{{ item }}" + state: absent + loop: + - "/tmp/k9s" + +# Install helm / k9s airgap +- name: Install admin tools in Airgap + become_user: "{{ admin_user }}" + become: true + when: + - airgap_install + block: + - name: Ensure dir exist + ansible.builtin.file: + path: "{{ item }}" + state: directory + mode: 0700 + recurse: true + loop: + - "/tmp/helm" + - "/tmp/k9s" + - "$HOME/.local/bin" + + - name: Download helm bin to .local/bin + ansible.builtin.unarchive: + src: "http://{{ hauler_server }}:8080/helm.tar.gz" + dest: "/tmp/helm" + owner: "{{ admin_user }}" + group: "{{ admin_user }}" + mode: '0750' + remote_src: true + validate_certs: false + + - name: Copy helm binary file + ansible.builtin.copy: + src: "/tmp/helm/linux-amd64/helm" + dest: "$HOME/.local/bin/helm" + owner: "{{ admin_user }}" + group: "{{ admin_user }}" + mode: '0750' + remote_src: true + + - name: Download k9s bin to .local/bin + ansible.builtin.unarchive: + src: "http://{{ hauler_server }}:8080/k9s.tar.gz" + dest: "/tmp/k9s" + owner: "{{ admin_user }}" + group: "{{ admin_user }}" + mode: '0750' + remote_src: true + validate_certs: false + + - name: Copy k9s binary file + ansible.builtin.copy: + src: "/tmp/k9s/k9s" + dest: "$HOME/.local/bin/k9s" + owner: "{{ admin_user }}" + group: "{{ admin_user }}" + mode: '0750' + remote_src: true + + - name: Cleanup tmp dir + ansible.builtin.file: + path: "{{ item }}" + state: absent + loop: + - "/tmp/helm" + - "/tmp/k9s" diff --git a/roles/install_rke2_controller/tasks/arkade.yml b/roles/install_rke2_controller/tasks/arkade.yml deleted file mode 100644 index cf8a8b28a..000000000 --- a/roles/install_rke2_controller/tasks/arkade.yml +++ /dev/null @@ -1,51 +0,0 @@ -# As root -- name: Ensure that admin user can access - ansible.builtin.file: - path: "{{ mount_path }}/.." - mode: "0755" - recurse: true - become: true - -- name: Ensure admin user access mount_utils_path - ansible.builtin.file: - path: "{{ mount_utils_path }}/" - owner: "{{ admin_user }}" - group: "{{ admin_user }}" - recurse: true - become: true - -# As Admin User -- block: - - name: Ensure Arkade directory exist - ansible.builtin.file: - path: "$HOME/.arkade/bin" - state: directory - recurse: true - owner: "{{ admin_user }}" - group: "{{ admin_user }}" - mode: '0700' - - - name: Copy utils into .arkade/bin - ansible.builtin.copy: - src: "{{ mount_utils_path }}/{{ item }}" - dest: "$HOME/.arkade/bin/{{ item }}" - force: true - remote_src: true - mode: '0700' - loop: - - yq - - jq - - helm - - just - - kubectl - - k9s - - - name: Update .bashrc - ansible.builtin.blockinfile: - path: "$HOME/.bashrc" - block: | - export PATH=$PATH:$HOME/.arkade/bin - marker: "# {mark} ANSIBLE install arkade utils" - - become: true - become_user: "{{ admin_user }}" \ No newline at end of file diff --git a/roles/install_rke2_controller/tasks/config.yml b/roles/install_rke2_controller/tasks/config.yml new file mode 100644 index 000000000..64a5d2fb8 --- /dev/null +++ b/roles/install_rke2_controller/tasks/config.yml @@ -0,0 +1,64 @@ +--- +# As root +- name: Controller install as root + become: true + block: + # prerequis + - name: Create etcd group + ansible.builtin.group: + name: etcd + state: present + + - name: Create etcd user + ansible.builtin.user: + name: etcd + comment: "etcd user" + shell: /sbin/nologin + system: true + createhome: false + + - name: Create directories + ansible.builtin.file: + path: "{{ item }}" + state: directory + recurse: true + mode: '0750' + with_items: + - /etc/rancher/rke2/ + - "{{ rke2_data_dir }}/server/manifests/" + - "{{ rke2_data_dir }}/agent/images" + + # Config + - name: Configure RKE2 config.yaml + ansible.builtin.template: + src: config.yaml.j2 + dest: /etc/rancher/rke2/config.yaml + mode: "0640" + + - name: Set up audit policy file + ansible.builtin.copy: + content: | + apiVersion: audit.k8s.io/v1 + kind: Policy + rules: + - level: RequestResponse + dest: /etc/rancher/rke2/audit-policy.yaml + mode: "0640" + + - name: Set up ssl passthrough for nginx + ansible.builtin.copy: + content: | + apiVersion: helm.cattle.io/v1 + kind: HelmChartConfig + metadata: + name: rke2-ingress-nginx + namespace: kube-system + spec: + valuesContent: |- + controller: + config: + use-forwarded-headers: true + extraArgs: + enable-ssl-passthrough: true + dest: /var/lib/rancher/rke2/server/manifests/rke2-ingress-nginx-config.yaml + mode: "0640" diff --git a/roles/install_rke2_controller/tasks/install.yml b/roles/install_rke2_controller/tasks/install.yml index 02551bf64..3a1742e3d 100644 --- a/roles/install_rke2_controller/tasks/install.yml +++ b/roles/install_rke2_controller/tasks/install.yml @@ -1,141 +1,16 @@ -# As root -- block: - - - name: Create etcd group - ansible.builtin.group: - name: etcd - state: present - - - name: Create etcd user - ansible.builtin.user: - name: etcd - comment: "etcd user" - shell: /sbin/nologin - system: yes - createhome: false - - - name: Create directories - ansible.builtin.file: - path: "{{ item }}" - state: directory - recurse: true - mode: '0750' - with_items: - - /etc/rancher/rke2/ - - /var/lib/rancher/rke2/server/manifests/ - - /var/lib/rancher/rke2/agent/images - - - name: Configure RKE2 config.yaml - ansible.builtin.template: - src: config.yaml.j2 - dest: /etc/rancher/rke2/config.yaml - - - name: Set up audit policy file - ansible.builtin.copy: - content: | - apiVersion: audit.k8s.io/v1 - kind: Policy - rules: - - level: RequestResponse - dest: /etc/rancher/rke2/audit-policy.yaml - - - name: Set up ssl passthrough for nginx - ansible.builtin.copy: - content: | - apiVersion: helm.cattle.io/v1 - kind: HelmChartConfig - metadata: - name: rke2-ingress-nginx - namespace: kube-system - spec: - valuesContent: |- - controller: - config: - use-forwarded-headers: true - extraArgs: - enable-ssl-passthrough: true - dest: /var/lib/rancher/rke2/server/manifests/rke2-ingress-nginx-config.yaml - - - name: Install RKE2 server node - ansible.builtin.shell: - cmd: "INSTALL_RKE2_ARTIFACT_PATH={{ mount_rke2_path }} INSTALL_RKE2_TYPE=server sh {{ mount_rke2_path }}/install.sh" - chdir: "{{ mount_rke2_path }}" - register: install_server_output - failed_when: false - - - ansible.builtin.debug: - var: install_server_output['stdout_lines'] - - # RPM - - name: Install packages common to controlers - ansible.builtin.dnf: - name: "{{ item }}" - state: present - with_items: - - zstd - - skopeo - become: true - when: - - ansible_os_family == "RedHat" - - ansible_distribution_major_version | int >= 8 - - - name: Import a key from a file - ansible.builtin.rpm_key: - state: present - key: "{{ mount_rke2_path }}/public.key" - when: - - ansible_os_family == "RedHat" - - - name: Install RKE2 selinux packages (dependency for RKE2 common) - ansible.builtin.dnf: - name: "{{ mount_rke2_selinux_rpm_path }}" - state: present - when: - - ansible_os_family == "RedHat" - - ansible_distribution_major_version | int >= 8 - - - name: Install RKE2 common packages - ansible.builtin.dnf: - name: "{{ mount_rke2_common_rpm_path }}" - state: present - when: - - ansible_os_family == "RedHat" - - ansible_distribution_major_version | int >= 8 - - # Service - - name: Mask RKE2 agent service on the first server - ansible.builtin.systemd: - name: "rke2-agent.service" - enabled: false - masked: true - - - name: Start and enable rke2-server - ansible.builtin.systemd: - name: rke2-server.service - state: started - enabled: yes - notify: "Service (re)started" - - - name: Wait for k8s apiserver - ansible.builtin.wait_for: - host: localhost - port: "6443" - state: present - timeout: 300 - - - name: Create symlink for containerd.sock - ansible.builtin.file: - src: /var/run/k3s/containerd/containerd.sock - dest: /var/run/containerd/containerd.sock - state: link - - - name: Update root .bashrc - ansible.builtin.blockinfile: - path: ~/.bashrc - block: | - export KUBECONFIG=/etc/rancher/rke2/rke2.yaml - export CRI_CONFIG_FILE=/var/lib/rancher/rke2/agent/etc/crictl.yaml - PATH=$PATH:/var/lib/rancher/rke2/bin - marker: "# {mark} ANSIBLE install_rke2_controler" - - become: true \ No newline at end of file +--- +- name: RKE2 Install tarball method + vars: + caller_role_name: controller + ansible.builtin.import_role: + name: install_rke2_common + tasks_from: tarball_install + when: tarball_install + +- name: RKE2 Install rpm method + vars: + caller_role_name: controller + ansible.builtin.import_role: + name: install_rke2_common + tasks_from: rpm_install + when: rpm_install diff --git a/roles/install_rke2_controller/tasks/localhost.yml b/roles/install_rke2_controller/tasks/localhost.yml index 20d7c7439..126478392 100644 --- a/roles/install_rke2_controller/tasks/localhost.yml +++ b/roles/install_rke2_controller/tasks/localhost.yml @@ -1,59 +1,74 @@ -# All tasks to do realize on ansible controler +--- +# All tasks on ansible controller +# kubeconfig to localhost - name: Fetch RKE2 kubeconfig to localhost - fetch: + ansible.builtin.fetch: src: "$HOME/.kube/{{ inventory_hostname }}.yaml" - dest: "~/.kube/{{ inventory_hostname }}.yaml" + dest: "{{ rkub_local_kubeconfig }}" flat: true validate_checksum: true become_user: "{{ admin_user }}" + become: true # check kubecm is installed on your localhost -- block: - - name: Check kubecm is installed (part of prerequisites) - shell: type kubecm - register: is_installed - - - block: - - name: Test if default context already exist in your kubeconfig. - shell: "kubecm list default > /dev/null 2>&1" - - - name: Message to you - debug: - msg: "Kubeconfig was added to your kubecm." +- name: Check if kubecm is installed on localhost + delegate_to: localhost + become: false + block: + - name: Check kubecm is installed (part of prerequisites) + ansible.builtin.command: which kubecm + register: is_installed + changed_when: false + failed_when: false - rescue: - - name: Add to kubecm - shell: "kubecm add -c -f ~/.kube/{{ inventory_hostname }}.yaml" - - - name: Switch to default - shell: "kubecm switch default" + - name: Add kubeconfig to kubecm + when: is_installed.rc == 0 + block: + - name: Test if context_name already exist in your kubeconfig. + ansible.builtin.command: "kubecm list {{ rkub_context_name }} > /dev/null 2>&1" + register: context_name_exist + changed_when: false + failed_when: false - rescue: - - name: No Kubecm - debug: - msg: "Kubecm is not installed on your localhost! Not a big problem, but I did not add it to your local kubeconfig." + - name: Add to kubecm + ansible.builtin.command: "kubecm add -cf {{ rkub_local_kubeconfig }} --context-name {{ rkub_context_name }}" + changed_when: false + when: context_name_exist.rc != 0 - always: - - name: Message to you - debug: - msg: "Kubeconfig of this cluster was imported in your localhost in ~/.kube/{{ inventory_hostname }}.yaml" + - name: Switch to new cluster + ansible.builtin.command: "kubecm switch {{ rkub_context_name }}" + changed_when: false - delegate_to: localhost - become: false + always: + - name: Message to you + ansible.builtin.debug: + msg: "Kubeconfig was added to your kubecm." + - name: No Kubecm + when: is_installed.rc != 0 + block: + - name: No Kubecm + ansible.builtin.debug: + msg: "Kubecm is not installed on your localhost! Not a big problem, but I did not add it to your local kubeconfig." + + always: + - name: Message to you + ansible.builtin.debug: + msg: "Kubeconfig of this cluster was imported in your localhost in {{ rkub_local_kubeconfig }}" # Check Flux to Kube API - name: Check if flux 6443 is open between localhost and master delegate_to: localhost become: false - block: - - name: "Check" - uri: + block: + - name: Check kube api + ansible.builtin.uri: url: "https://{{ master }}:6443" validate_certs: false status_code: 401 rescue: - - fail: - msg: > - "Something wrong in your network since localhost does not reach master on 6443" + - name: Send fail message + ansible.builtin.fail: + msg: > + "Something wrong in your network since localhost does not reach master on 6443" diff --git a/roles/install_rke2_controller/tasks/main.yml b/roles/install_rke2_controller/tasks/main.yml index fbbf82761..85386a98b 100644 --- a/roles/install_rke2_controller/tasks/main.yml +++ b/roles/install_rke2_controller/tasks/main.yml @@ -6,6 +6,7 @@ - "distribution" - "distribution_major_version" - "default_ipv4" + - "selinux" - "!all,!min" when: > ansible_os_family is not defined @@ -26,23 +27,8 @@ - ansible_facts['services']['firewalld.service']['state'] == "running" tags: firewalld -# Here, we start export NFS on first controler and mounting on all other controlers -- name: Set NFS export on first node - ansible.builtin.import_role: - name: set_nfs_export - tasks_from: main - when: inventory_hostname in groups['RKE2_CONTROLLERS'][0] - tags: nfs_export - -- name: Set NFS mount on other nodes - ansible.builtin.import_role: - name: set_nfs_mount - tasks_from: main - when: inventory_hostname in groups['RKE2_CONTROLLERS'][1:] - tags: nfs_mount - # Start install -- name: RKE2 common to worker and controler tasks +- name: RKE2 common tasks vars: caller_role_name: controller ansible.builtin.import_role: @@ -50,22 +36,27 @@ tasks_from: main tags: common +# Install first node +- name: RKE2 config controler before install + ansible.builtin.import_tasks: config.yml + when: inventory_hostname in groups['RKE2_CONTROLLERS'][0] + tags: config + - name: RKE2 Install controler ansible.builtin.import_tasks: install.yml when: inventory_hostname in groups['RKE2_CONTROLLERS'][0] tags: install +- name: RKE2 start controller + ansible.builtin.import_tasks: start.yml + when: inventory_hostname in groups['RKE2_CONTROLLERS'][0] + tags: start + - name: RKE2 Get Token ansible.builtin.import_tasks: token.yml when: inventory_hostname in groups['RKE2_CONTROLLERS'][0] tags: token -# Admin setup -- name: Arkade utils push in admin user - ansible.builtin.import_tasks: arkade.yml - when: inventory_hostname in groups['RKE2_CONTROLLERS'][0] - tags: arkade - - name: Admin user setup ansible.builtin.import_tasks: admin.yml when: inventory_hostname in groups['RKE2_CONTROLLERS'][0] @@ -76,20 +67,14 @@ when: inventory_hostname in groups['RKE2_CONTROLLERS'][0] tags: localhost -# Utils -- name: Install registry on first node - vars: - caller_role_name: controller - ansible.builtin.import_role: - name: install_utils_registry - tasks_from: main - when: inventory_hostname in groups['RKE2_CONTROLLERS'][0] - tags: registry +# Install other controllers +# ... +# Utils - name: RKE2 install nerdctl vars: caller_role_name: controller ansible.builtin.import_role: name: install_utils_nerdctl tasks_from: main - tags: nerdctl \ No newline at end of file + tags: nerdctl diff --git a/roles/install_rke2_controller/tasks/start.yml b/roles/install_rke2_controller/tasks/start.yml new file mode 100644 index 000000000..464c0b0f0 --- /dev/null +++ b/roles/install_rke2_controller/tasks/start.yml @@ -0,0 +1,43 @@ +--- +# Finish install after selinux +- name: Post install + become: true + block: + # Service + - name: Mask RKE2 agent service on the first server + ansible.builtin.systemd: + name: "rke2-agent.service" + enabled: false + masked: true + retries: 5 + + - name: Start and enable rke2-server + ansible.builtin.systemd: + name: rke2-server.service + state: started + enabled: true + notify: "Service (re)started" + async: 600 + poll: 60 + + - name: Wait for k8s apiserver + ansible.builtin.wait_for: + host: localhost + port: "6443" + state: present + timeout: 300 + + - name: Create symlink for containerd.sock + ansible.builtin.file: + src: /var/run/k3s/containerd/containerd.sock + dest: /var/run/containerd/containerd.sock + state: link + + - name: Update root .bashrc + ansible.builtin.blockinfile: + path: ~/.bashrc + block: | + export KUBECONFIG={{ rke2_kubeconfig_file }} + export CRI_CONFIG_FILE={{ rke2_data_dir }}/agent/etc/crictl.yaml + PATH=$PATH:{{ rke2_data_dir }}/bin + marker: "# {mark} ANSIBLE install_rke2_controler" diff --git a/roles/install_rke2_controller/tasks/token.yml b/roles/install_rke2_controller/tasks/token.yml index 7ce87f536..dd9061596 100644 --- a/roles/install_rke2_controller/tasks/token.yml +++ b/roles/install_rke2_controller/tasks/token.yml @@ -1,24 +1,18 @@ +--- +# On controller[0] as root +- name: Get token on first master + when: inventory_hostname in groups['RKE2_CONTROLLERS'][0] + become: true + block: + - name: Wait for node-token + ansible.builtin.wait_for: + path: "{{ rke2_data_dir }}/server/node-token" -# As root -- block: - # Get and Write Token - - name: Wait for node-token - ansible.builtin.wait_for: - path: /var/lib/rancher/rke2/server/node-token - - - name: Read node-token from master - ansible.builtin.slurp: - src: /var/lib/rancher/rke2/server/node-token - register: node_token - - - name: Store Master node-token - ansible.builtin.set_fact: - rke2_config_token: "{{ node_token['content'] | b64decode | regex_replace('\n', '') }}" - - - name: Write token on mount path - ansible.builtin.copy: - content: "{{ rke2_config_token }}" - dest: "{{ mount_path }}/token" - follow: yes + - name: Read node-token from first master + ansible.builtin.slurp: + src: "{{ rke2_data_dir }}/server/node-token" + register: node_token - become: true + - name: Store Master node-token + ansible.builtin.set_fact: + rke2_config_token: "{{ node_token['content'] | b64decode | regex_replace('\n', '') }}" diff --git a/roles/install_rke2_controller/templates/config.yaml.j2 b/roles/install_rke2_controller/templates/config.yaml.j2 index fe733013e..92b2c3a71 100644 --- a/roles/install_rke2_controller/templates/config.yaml.j2 +++ b/roles/install_rke2_controller/templates/config.yaml.j2 @@ -1,23 +1,30 @@ -#profile: cis-1.23 +{% if inventory_hostname in groups['RKE2_CONTROLLERS'][0] %} +{% else %} +server: https://{{ master }}:9345 +{% endif %} +{% if rke2_config_token is defined %} +token: {{ rke2_config_token }} +{% endif %} +# Common +{% if rke2_profile_activated %} +profile: cis +{% endif %} +node-name: {{ inventory_hostname }} +write-kubeconfig-mode: 0600 +data-dir: {{ rke2_data_dir }} +cluster-cidr: {{ rke2_cluster_cidr }} +service-cidr: {{ rke2_service_cidr }} +# Config Controller selinux: true +{% if ( rke2_cni is defined ) and ( rke2_cni | type_debug == "list" ) %} +cni: +{% for cni in rke2_cni %} + - {{ cni }} +{% endfor %} +{% else %} +cni: {{ rke2_cni }} +{% endif %} +tls-san: + - cluster.local + - {{ control_plane_endpoint }} secrets-encryption: true -write-kubeconfig-mode: 0600 -kube-controller-manager-arg: -- bind-address=127.0.0.1 -- use-service-account-credentials=true -- tls-min-version=VersionTLS12 -- tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 -kube-scheduler-arg: -- tls-min-version=VersionTLS12 -- tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 -kube-apiserver-arg: -- tls-min-version=VersionTLS12 -- tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 -- authorization-mode=RBAC,Node -- anonymous-auth=false -- audit-policy-file=/etc/rancher/rke2/audit-policy.yaml -- audit-log-mode=blocking-strict- audit-log-maxage=30 -kubelet-arg: -- protect-kernel-defaults=true -- read-only-port=0 -- authorization-mode=Webhook \ No newline at end of file diff --git a/roles/install_rke2_controller/templates/old-config.yaml.j2 b/roles/install_rke2_controller/templates/old-config.yaml.j2 new file mode 100644 index 000000000..c10b3a1da --- /dev/null +++ b/roles/install_rke2_controller/templates/old-config.yaml.j2 @@ -0,0 +1,4 @@ +#profile: cis-1.23 +selinux: true +secrets-encryption: true +write-kubeconfig-mode: 0600 diff --git a/roles/install_rke2_controller/tests/test.yml b/roles/install_rke2_controller/tests/test.yml index d63602c6b..e5e1fd5e4 100644 --- a/roles/install_rke2_controller/tests/test.yml +++ b/roles/install_rke2_controller/tests/test.yml @@ -1,5 +1,6 @@ --- -- hosts: localhost +- name: Test + hosts: localhost remote_user: root roles: - - install_controler + - install_rke2_controller diff --git a/roles/install_rke2_worker/defaults/main.yml b/roles/install_rke2_worker/defaults/main.yml index 9d33bef7e..47d88bec0 100644 --- a/roles/install_rke2_worker/defaults/main.yml +++ b/roles/install_rke2_worker/defaults/main.yml @@ -1,22 +1,26 @@ --- # defaults file for install_worker -rke2_version: "{{ global_rke2_version }}" admin_user: "{{ global_install_user }}" master: "{{ global_master_ip }}" +control_plane_endpoint: "{{ global_rke2_api_ip }}" +rpm_install: "{{ global_rpm_install | bool }}" +tarball_install: "{{ global_tarball_install | bool }}" +airgap_install: "{{ global_airgap_install | bool }}" -# Mount share -mount_path: "{{ global_directory_mount }}" -mount_rke2_path: "{{ mount_path }}/rke2_{{ rke2_version }}" -mount_utils_path: "{{ mount_path }}/utils" -mount_rke2_common_rpm_path: "{{ mount_rke2_path }}/{{ global_rke2_common_rpm_version }}.el{{ ansible_distribution_major_version }}.x86_64.rpm" -mount_rke2_selinux_rpm_path: "{{ mount_rke2_path }}/{{ global_rke2_selinux_rpm_version }}.el{{ ansible_distribution_major_version }}.noarch.rpm" +# Fileserver +hauler_server: "{{ global_hauler_ip }}" + +# Worker options +rke2_data_dir: "{{ global_rke2_data_dir }}" +rke2_cluster_cidr: "{{ global_rke2_cluster_cidr }}" +rke2_service_cidr: "{{ global_rke2_service_cidr }}" +rke2_profile_activated: "{{ global_rke2_profile_activated | bool }}" # Firewall rules worker_firewalld_rules: inbound: - - name: rke2 - zone: public - ports: - - {port: 10250, protocol: tcp} - - {port: 8472, protocol: udp} - \ No newline at end of file + - name: rke2 + zone: public + ports: + - {port: 10250, protocol: tcp} + - {port: 8472, protocol: udp} diff --git a/roles/install_rke2_worker/meta/main.yml b/roles/install_rke2_worker/meta/main.yml index 2308f130e..983f829fa 100644 --- a/roles/install_rke2_worker/meta/main.yml +++ b/roles/install_rke2_worker/meta/main.yml @@ -18,7 +18,7 @@ galaxy_info: # - CC-BY-4.0 license: Apache-2.0 - min_ansible_version: "2.12.0" + min_ansible_version: "2.15.0" # If this a Container Enabled role, provide the minimum Ansible Container version. # min_ansible_container_version: diff --git a/roles/install_rke2_worker/tasks/config.yml b/roles/install_rke2_worker/tasks/config.yml new file mode 100644 index 000000000..d2a8e23f1 --- /dev/null +++ b/roles/install_rke2_worker/tasks/config.yml @@ -0,0 +1,20 @@ +--- +# As root +- name: Install RKE2 worker + become: true + block: + # prerequis + - name: Create directories + ansible.builtin.file: + path: "{{ item }}" + state: directory + recurse: true + mode: '0750' + with_items: + - /etc/rancher/rke2/ + + - name: Configure RKE2 config.yaml + ansible.builtin.template: + src: config.yaml.j2 + dest: /etc/rancher/rke2/config.yaml + mode: "0640" diff --git a/roles/install_rke2_worker/tasks/install.yml b/roles/install_rke2_worker/tasks/install.yml index 2d67d95a5..7a39f2188 100644 --- a/roles/install_rke2_worker/tasks/install.yml +++ b/roles/install_rke2_worker/tasks/install.yml @@ -1,64 +1,17 @@ -- block: - - name: Create directories - ansible.builtin.file: - path: "{{ item }}" - state: directory - recurse: true - mode: '0750' - with_items: - - /etc/rancher/rke2/ - - - name: Configure RKE2 config.yaml - ansible.builtin.template: - src: config.yaml.j2 - dest: /etc/rancher/rke2/config.yaml - - - name: Install RKE2 worker nodes - ansible.builtin.shell: - cmd: "INSTALL_RKE2_ARTIFACT_PATH={{ mount_rke2_path }} INSTALL_RKE2_TYPE=agent sh {{ mount_rke2_path }}/install.sh" - chdir: "{{ mount_rke2_path }}" - register: install_worker_output - failed_when: false - - - ansible.builtin.debug: - var: install_worker_output['stdout_lines'] - - # RPM - - name: Import a key from a file - ansible.builtin.rpm_key: - state: present - key: "{{ mount_rke2_path }}/public.key" - when: - - ansible_os_family == "RedHat" - - - name: Install RKE2 selinux packages (dependency for RKE2 common) - ansible.builtin.dnf: - name: "{{ mount_rke2_selinux_rpm_path }}" - state: present - when: - - ansible_os_family == "RedHat" - - ansible_distribution_major_version | int >= 8 - - - name: Install RKE2 common packages - ansible.builtin.dnf: - name: "{{ mount_rke2_common_rpm_path }}" - state: present - when: - - ansible_os_family == "RedHat" - - ansible_distribution_major_version | int >= 8 - - # Start - - name: Mask RKE2 agent service on the first server - ansible.builtin.systemd: - name: "rke2-server.service" - enabled: false - masked: true - - - name: Start and enable rke2-server - ansible.builtin.systemd: - name: rke2-agent.service - state: started - enabled: yes - notify: "Service (re)started" - - become: true \ No newline at end of file +--- +# Install regarding choosen method +- name: RKE2 Install tarball method + vars: + caller_role_name: worker + ansible.builtin.import_role: + name: install_rke2_common + tasks_from: tarball_install + when: tarball_install + +- name: RKE2 Install rpm method + vars: + caller_role_name: worker + ansible.builtin.import_role: + name: install_rke2_common + tasks_from: rpm_install + when: rpm_install diff --git a/roles/install_rke2_worker/tasks/main.yml b/roles/install_rke2_worker/tasks/main.yml index 5bdb3dc9a..035d64eb1 100644 --- a/roles/install_rke2_worker/tasks/main.yml +++ b/roles/install_rke2_worker/tasks/main.yml @@ -6,6 +6,7 @@ - "distribution" - "distribution_major_version" - "default_ipv4" + - "selinux" - "!all,!min" when: > ansible_os_family is not defined @@ -26,15 +27,8 @@ - ansible_facts['services']['firewalld.service']['state'] == "running" tags: firewalld -# NFS share -- name: Set NFS mount on other nodes - ansible.builtin.import_role: - name: set_nfs_mount - tasks_from: main - tags: nfs_mount - -# Start install -- name: RKE2 common to worker and controler tasks +# Start install workers +- name: RKE2 common tasks vars: caller_role_name: worker ansible.builtin.import_role: @@ -42,23 +36,23 @@ tasks_from: main tags: common +- name: RKE2 config worker before install + ansible.builtin.import_tasks: config.yml + tags: config + - name: RKE2 Install worker ansible.builtin.import_tasks: install.yml tags: install -# Utils -- name: Install registry on worker - vars: - caller_role_name: worker - ansible.builtin.import_role: - name: install_utils_registry - tasks_from: main - tags: registry +- name: RKE2 start worker + ansible.builtin.import_tasks: start.yml + tags: start +# Utils - name: RKE2 install nerdctl vars: caller_role_name: worker ansible.builtin.import_role: name: install_utils_nerdctl tasks_from: main - tags: nerdctl \ No newline at end of file + tags: nerdctl diff --git a/roles/install_rke2_worker/tasks/start.yml b/roles/install_rke2_worker/tasks/start.yml new file mode 100644 index 000000000..c49e0216f --- /dev/null +++ b/roles/install_rke2_worker/tasks/start.yml @@ -0,0 +1,22 @@ +--- +# As root +- name: Start RKE2 worker + become: true + block: + # Start + - name: Mask RKE2 server service on the first server + ansible.builtin.systemd: + name: "rke2-server.service" + enabled: false + masked: true + retries: 5 + + - name: Start and enable rke2-agent + ansible.builtin.systemd: + name: rke2-agent.service + state: started + enabled: true + notify: "Service (re)started" + register: rke2_service + until: rke2_service is succeeded + retries: 5 diff --git a/roles/install_rke2_worker/templates/config.yaml.j2 b/roles/install_rke2_worker/templates/config.yaml.j2 index 093fb65f7..8e8e3d70f 100644 --- a/roles/install_rke2_worker/templates/config.yaml.j2 +++ b/roles/install_rke2_worker/templates/config.yaml.j2 @@ -1,8 +1,18 @@ server: https://{{ master }}:9345 token: {{ rke2_config_token }} + +# Common +{% if rke2_profile_activated %} +profile: cis +{% endif %} +node-name: {{ inventory_hostname }} write-kubeconfig-mode: 0600 -#profile: cis-1.23 +data-dir: {{ rke2_data_dir }} +cluster-cidr: {{ rke2_cluster_cidr }} +service-cidr: {{ rke2_service_cidr }} + +# Config Worker kube-apiserver-arg: - "authorization-mode=RBAC,Node" kubelet-arg: -- "protect-kernel-defaults=true" \ No newline at end of file +- "protect-kernel-defaults=true" diff --git a/roles/install_rke2_worker/templates/old-config.yaml.j2 b/roles/install_rke2_worker/templates/old-config.yaml.j2 new file mode 100644 index 000000000..012222154 --- /dev/null +++ b/roles/install_rke2_worker/templates/old-config.yaml.j2 @@ -0,0 +1,8 @@ +server: https://{{ master }}:9345 +token: {{ rke2_config_token }} +write-kubeconfig-mode: 0600 +#profile: cis-1.23 +kube-apiserver-arg: +- "authorization-mode=RBAC,Node" +kubelet-arg: +- "protect-kernel-defaults=true" diff --git a/roles/install_rke2_worker/tests/test.yml b/roles/install_rke2_worker/tests/test.yml index 4f63b9488..a5cba8a3a 100644 --- a/roles/install_rke2_worker/tests/test.yml +++ b/roles/install_rke2_worker/tests/test.yml @@ -1,5 +1,6 @@ --- -- hosts: localhost +- name: Test + hosts: localhost remote_user: root roles: - install_rke2_worker diff --git a/roles/install_utils_kubevip/README.md b/roles/install_utils_kubevip/README.md new file mode 100644 index 000000000..13da312c7 --- /dev/null +++ b/roles/install_utils_kubevip/README.md @@ -0,0 +1,72 @@ +Role Name +========= + +Role to install kubevip for HA mode + +Requirements +------------ + +*Example below show that the roles have two flavors and different requirements in functions of what you want* + +if idm set to true: +- Access to a IDM server if you want to create users account. +- Credentials access to connect to IDM + +if idm set to false: +- create local account on Linux servers + +Role Variables +-------------- + +| **VarName** | **Type** | **Content** | **Mandatory** | +|--------------------|----------|---------------------------|:-------------:| +| idm | boolean | true / false | x | +| svc_account | string | Service Account | x | +| svc_account_passwd | string | pwd (can be omited) | | +| svc_group | string | Group | | +| svc_owner | string | Owner of the account | if idm true | +| list_svc_account | list | Users which goes in group | if idm true | +| idm_server | string | Service Account PWD | if idm true | +| idm_pwd | string | sudo group | if idm true | + +**Mandatory** is the minimum variables that need to be set to make the role work +*the variables not mandatory either have a default value defined or can be omited* + +Dependencies +------------ + +Dependencies with some others roles (if there is some). + +Example Playbook +---------------- +Give some example about how to use or implement your Roles + + +```yml +- name: Trigger Role Example in a Playbooks + hosts: RANDOM_GROUP_DEFINED_IN_YOUR_INVENTORY + remote_user: ansible + become: true + + roles: + - { role: 'example', tags: 'example' } +``` + +```yml +# Example for one user +- import_role: + name: "example" + vars: + svc_account: "{{ tomcat_svc_account }}" + svc_group: "{{ tomcat_svc_group }}" +``` + +License +------- + +Apache-2.0 + +Author Information +------------------ + +morze.baltyk@proton.me diff --git a/roles/install_utils_kubevip/defaults/main.yml b/roles/install_utils_kubevip/defaults/main.yml new file mode 100644 index 000000000..0b618b922 --- /dev/null +++ b/roles/install_utils_kubevip/defaults/main.yml @@ -0,0 +1,8 @@ +--- +# defaults file for install_utils_kubevip +kubevip_version: "{{ global_kubevip_version }}" +kubevip_svc_enable: true +kubevip_ipvs_lb_enable: +kubevip_image: "ghcr.io/kube-vip/kube-vip:{{ kubevip_version }}" +#kubevip_args: +#rke2_kubevip_cloud_provider_image: ghcr.io/kube-vip/kube-vip-cloud-provider:v0.0.4 diff --git a/roles/install_utils_kubevip/handlers/main.yml b/roles/install_utils_kubevip/handlers/main.yml new file mode 100644 index 000000000..7d5492690 --- /dev/null +++ b/roles/install_utils_kubevip/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for install_utils_kubevip diff --git a/roles/set_nfs_export/meta/main.yml b/roles/install_utils_kubevip/meta/main.yml similarity index 94% rename from roles/set_nfs_export/meta/main.yml rename to roles/install_utils_kubevip/meta/main.yml index 39d40421a..bce3a4125 100644 --- a/roles/set_nfs_export/meta/main.yml +++ b/roles/install_utils_kubevip/meta/main.yml @@ -2,7 +2,7 @@ galaxy_info: standalone: false # Part of a collection author: morze.baltyk@proton.me - description: Export NFS volume from Controler to share images + description: Role to install kubevip for HA mode company: Opensource # If the issue tracker for your role is not on github, uncomment the @@ -18,7 +18,7 @@ galaxy_info: # - CC-BY-4.0 license: Apache-2.0 - min_ansible_version: "2.12.0" + min_ansible_version: "2.15.0" # If this a Container Enabled role, provide the minimum Ansible Container version. # min_ansible_container_version: diff --git a/roles/install_utils_kubevip/tasks/install_airgap.yml b/roles/install_utils_kubevip/tasks/install_airgap.yml new file mode 100644 index 000000000..0752c1377 --- /dev/null +++ b/roles/install_utils_kubevip/tasks/install_airgap.yml @@ -0,0 +1,19 @@ +--- +- name: Create the RKE2 manifests directory + ansible.builtin.file: + state: directory + path: "{{ rke2_data_path }}/server/manifests" + owner: root + group: root + mode: 0700 + +- name: Copy kube-vip files to first server + ansible.builtin.template: + src: "{{ item }}" + dest: "{{ rke2_data_path }}/server/manifests/{{ item | basename | regex_replace('.j2$', '') }}" + owner: root + group: root + mode: 0664 + loop: + - "templates/airgap/kube-vip.yml.j2" + - "templates/airgap/kube-vip-rbac.yml.j2" diff --git a/roles/install_utils_kubevip/tasks/install_direct.yml b/roles/install_utils_kubevip/tasks/install_direct.yml new file mode 100644 index 000000000..e69de29bb diff --git a/roles/install_utils_kubevip/tasks/main.yml b/roles/install_utils_kubevip/tasks/main.yml new file mode 100644 index 000000000..0703b64b7 --- /dev/null +++ b/roles/install_utils_kubevip/tasks/main.yml @@ -0,0 +1,5 @@ +--- +# tasks file for install_utils_kubevip +- name: Install in airgap mode + ansible.builtin.import_tasks: install_airgap.yml + when: caller_role_name == "controller" diff --git a/roles/install_utils_kubevip/templates/airgap/kube-vip-rbac.yml.j2 b/roles/install_utils_kubevip/templates/airgap/kube-vip-rbac.yml.j2 new file mode 100644 index 000000000..0480d0ec6 --- /dev/null +++ b/roles/install_utils_kubevip/templates/airgap/kube-vip-rbac.yml.j2 @@ -0,0 +1,32 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: kube-vip + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + annotations: + rbac.authorization.kubernetes.io/autoupdate: "true" + name: system:kube-vip-role +rules: + - apiGroups: [""] + resources: ["services", "services/status", "nodes", "endpoints"] + verbs: ["list","get","watch", "update"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["list", "get", "watch", "update", "create"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: system:kube-vip-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:kube-vip-role +subjects: +- kind: ServiceAccount + name: kube-vip + namespace: kube-system diff --git a/roles/install_utils_kubevip/templates/airgap/kube-vip.yml.j2 b/roles/install_utils_kubevip/templates/airgap/kube-vip.yml.j2 new file mode 100644 index 000000000..a22905301 --- /dev/null +++ b/roles/install_utils_kubevip/templates/airgap/kube-vip.yml.j2 @@ -0,0 +1,88 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + creationTimestamp: null + labels: + app.kubernetes.io/name: kube-vip-ds + name: kube-vip-ds + namespace: kube-system +spec: + selector: + matchLabels: + app.kubernetes.io/name: kube-vip-ds + template: + metadata: + creationTimestamp: null + labels: + app.kubernetes.io/name: kube-vip-ds + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/master + operator: Exists + - matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: Exists + containers: + - args: + - manager + env: + - name: vip_arp + value: "true" + - name: vip_interface + value: "{{ rke2_interface | default(ansible_default_ipv4.interface) }}" + - name: port + value: "{{ rke2_api_port | default('6443')}}" + - name: vip_cidr + value: "{{ rke2_api_cidr | default('24') }}" + - name: cp_enable + value: "true" + - name: cp_namespace + value: kube-system + - name: vip_ddns + value: "false" + - name: enableUPNP + value: "false" + - name: svc_enable + value: "{{ kubevip_svc_enable }}" + - name: svc_election + value: "true" + - name: vip_leaderelection + value: "true" + - name: vip_leaseduration + value: "5" + - name: vip_renewdeadline + value: "3" + - name: vip_retryperiod + value: "1" + - name: address + value: "{{ rke2_api_ip }}" + - name: prometheus_server + value: :2112 + - name: lb_enable + value: "{{ kubevip_ipvs_lb_enable }}" +{% if kubevip_args is defined %} +{% for item in kubevip_args %} + - name: {{ item.param }} + value: {{ item.value }} +{% endfor %} +{% endif %} + image: "{{ kubevip_image }}" + name: kube-vip + resources: {} + securityContext: + capabilities: + add: + - NET_ADMIN + - NET_RAW + hostNetwork: true + serviceAccountName: kube-vip + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + updateStrategy: {} diff --git a/roles/set_nfs_export/tests/inventory b/roles/install_utils_kubevip/tests/inventory similarity index 90% rename from roles/set_nfs_export/tests/inventory rename to roles/install_utils_kubevip/tests/inventory index 878877b07..2fbb50c4a 100644 --- a/roles/set_nfs_export/tests/inventory +++ b/roles/install_utils_kubevip/tests/inventory @@ -1,2 +1 @@ localhost - diff --git a/roles/install_utils_kubevip/tests/test.yml b/roles/install_utils_kubevip/tests/test.yml new file mode 100644 index 000000000..945d6a17e --- /dev/null +++ b/roles/install_utils_kubevip/tests/test.yml @@ -0,0 +1,6 @@ +--- +- name: Test + hosts: localhost + remote_user: root + roles: + - install_utils_kubevip diff --git a/roles/install_utils_nerdctl/defaults/main.yml b/roles/install_utils_nerdctl/defaults/main.yml index 9571b36d0..a73761e06 100644 --- a/roles/install_utils_nerdctl/defaults/main.yml +++ b/roles/install_utils_nerdctl/defaults/main.yml @@ -1,6 +1,5 @@ --- # defaults file for install_utils_nerdctl -# Mount -mount_path: "{{ global_directory_mount }}" -mount_pkg_nerdctl: "{{ global_directory_mount }}/utils/nerdctl" \ No newline at end of file +# Fileserver +hauler_server: "{{ global_hauler_ip }}" diff --git a/roles/install_utils_nerdctl/meta/main.yml b/roles/install_utils_nerdctl/meta/main.yml index 25204434f..09b78780a 100644 --- a/roles/install_utils_nerdctl/meta/main.yml +++ b/roles/install_utils_nerdctl/meta/main.yml @@ -18,7 +18,7 @@ galaxy_info: # - CC-BY-4.0 license: Apache-2.0 - min_ansible_version: "2.12.0" + min_ansible_version: "2.15.0" # If this a Container Enabled role, provide the minimum Ansible Container version. # min_ansible_container_version: diff --git a/roles/install_utils_nerdctl/tasks/install.yml b/roles/install_utils_nerdctl/tasks/install.yml index 1430066b5..7f47d9af1 100644 --- a/roles/install_utils_nerdctl/tasks/install.yml +++ b/roles/install_utils_nerdctl/tasks/install.yml @@ -1,36 +1,123 @@ -- name: Create the NERDCTL directory - ansible.builtin.file: - path: /etc/nerdctl - state: directory - mode: '0754' - -- name: Check if file does exist - ansible.builtin.stat: - path: /usr/local/bin/nerdctl - register: file_data - -- name: Copy Nerdctl bin into /usr/local/bin - ansible.builtin.copy: - src: "{{ mount_pkg_nerdctl }}" - dest: /usr/local/bin/ - owner: "{{ admin_user }}" - group: "{{ admin_user }}" - mode: '0750' - remote_src: true - when: not file_data.stat.exists - -- name: Copy NERDCTL configuration file - ansible.builtin.template: - src: nerdctl.toml - dest: /etc/nerdctl - owner: "{{ admin_user }}" - group: "{{ admin_user }}" - mode: '0644' - -- name: Copy SUDO configuration - ansible.builtin.template: - src: secure-path - dest: /etc/sudoers.d - owner: "root" - group: "root" - mode: '0440' \ No newline at end of file +--- +# Non-airgap +- name: Install Nerdctl in non-Airgap + become: true + when: + - not airgap_install + block: + - name: Create the NERDCTL directory + ansible.builtin.file: + path: /etc/nerdctl + state: directory + mode: '0754' + + - name: Check if file does exist + ansible.builtin.stat: + path: /usr/local/bin/nerdctl + register: file_data + + - name: Download and install nerdctl when not already here + when: not file_data.stat.exists + block: + - name: Get Nerdctl binary latest release + ansible.builtin.uri: + url: "https://api.github.com/repos/containerd/nerdctl/releases/latest" + method: GET + return_content: true + register: nerdctl_release + + - name: Extract latest release tag + ansible.builtin.set_fact: + nerdctl_version: "{{ nerdctl_release.json.tag_name | regex_replace('^v', '') }}" + + - name: Create tmp dir + ansible.builtin.file: + path: /tmp/nerdctl + state: directory + mode: '0750' + + - name: Download Nerdctl bin into /usr/local/bin + ansible.builtin.unarchive: + src: "https://github.com/containerd/nerdctl/releases/download/v{{ nerdctl_version }}/nerdctl-{{ nerdctl_version }}-linux-amd64.tar.gz" + dest: "/tmp/nerdctl" + mode: '0750' + remote_src: true + validate_certs: false + + - name: Copy nerdctl binary file + ansible.builtin.copy: + src: "/tmp/nerdctl/nerdctl" + dest: "/usr/local/bin/nerdctl" + mode: '0750' + remote_src: true + + - name: Cleanup tmp dir + ansible.builtin.file: + path: /tmp/nerdctl + state: absent + +# Airgap +- name: Install Nerdctl in Airgap + become: true + when: + - airgap_install + block: + - name: Create the NERDCTL directory + ansible.builtin.file: + path: /etc/nerdctl + state: directory + mode: '0754' + + - name: Check if file does exist + ansible.builtin.stat: + path: /usr/local/bin/nerdctl + register: file_data + + - name: Push nerdctl when not already here + when: not file_data.stat.exists + block: + - name: Create tmp dir + ansible.builtin.file: + path: /tmp/nerdctl + state: directory + mode: '0750' + + - name: Download Nerdctl bin into /usr/local/bin + ansible.builtin.unarchive: + src: "http://{{ hauler_server }}:8080/nerdctl.tar.gz" + dest: "/tmp/nerdctl" + mode: '0750' + remote_src: true + validate_certs: false + + - name: Copy nerdctl binary file + ansible.builtin.copy: + src: "/tmp/nerdctl/nerdctl" + dest: "/usr/local/bin/nerdctl" + mode: '0750' + remote_src: true + + - name: Cleanup tmp dir + ansible.builtin.file: + path: /tmp/nerdctl + state: absent + +# Common +- name: Config Nerdctl + become: true + block: + - name: Copy NERDCTL configuration file + ansible.builtin.template: + src: nerdctl.toml + dest: /etc/nerdctl + owner: "{{ admin_user }}" + group: "{{ admin_user }}" + mode: '0644' + + - name: Copy SUDO configuration + ansible.builtin.template: + src: secure-path + dest: /etc/sudoers.d + owner: "root" + group: "root" + mode: '0440' diff --git a/roles/install_utils_nerdctl/tasks/main.yml b/roles/install_utils_nerdctl/tasks/main.yml index a56565618..de428d08b 100644 --- a/roles/install_utils_nerdctl/tasks/main.yml +++ b/roles/install_utils_nerdctl/tasks/main.yml @@ -1,5 +1,7 @@ --- # tasks file for install_utils_nerdctl +- name: Tasks prerequis + ansible.builtin.import_tasks: prerequis.yml - name: Tasks to install nerdctl - ansible.builtin.import_tasks: install.yml \ No newline at end of file + ansible.builtin.import_tasks: install.yml diff --git a/roles/install_utils_nerdctl/tasks/prerequis.yml b/roles/install_utils_nerdctl/tasks/prerequis.yml new file mode 100644 index 000000000..94eb82950 --- /dev/null +++ b/roles/install_utils_nerdctl/tasks/prerequis.yml @@ -0,0 +1,24 @@ +--- +# packages on all nodes needed +- name: Install packages common + ansible.builtin.dnf: + name: "{{ item }}" + state: present + with_items: + - tar + - gzip + become: true + when: + - ansible_os_family == "RedHat" + - ansible_distribution_major_version | int >= 8 + +- name: Install packages common + ansible.builtin.apt: + name: "{{ item }}" + state: present + with_items: + - tar + - gzip + become: true + when: + - ansible_os_family == "Debian" diff --git a/roles/install_utils_nerdctl/tests/test.yml b/roles/install_utils_nerdctl/tests/test.yml index d84884926..3c30556b7 100644 --- a/roles/install_utils_nerdctl/tests/test.yml +++ b/roles/install_utils_nerdctl/tests/test.yml @@ -1,5 +1,6 @@ --- -- hosts: localhost +- name: Test + hosts: localhost remote_user: root roles: - install_utils_nerdctl diff --git a/roles/install_utils_registry/README.md b/roles/install_utils_registry/README.md deleted file mode 100644 index f6c579a29..000000000 --- a/roles/install_utils_registry/README.md +++ /dev/null @@ -1,14 +0,0 @@ -Role Name -========= - -Install a minimal localhost docker registry - -License -------- - -Apache-2.0 - -Author Information ------------------- - -morze.baltyk@proton.me \ No newline at end of file diff --git a/roles/install_utils_registry/defaults/main.yml b/roles/install_utils_registry/defaults/main.yml deleted file mode 100644 index 52145ba8a..000000000 --- a/roles/install_utils_registry/defaults/main.yml +++ /dev/null @@ -1,12 +0,0 @@ ---- -# defaults file for install_local_registry - -# Mount share -mount_path: "{{ global_directory_mount }}" -mount_registry_path: "{{ mount_path }}/registry" -mount_images_path: "{{ mount_path }}/images" -mount_registry_tar: "{{ mount_path }}/images/registry/registry.tar" - -# General -rke2_images_path: "/var/lib/rancher/rke2/agent/images/" -registry_namespace: "kube-registry" diff --git a/roles/install_utils_registry/handlers/main.yml b/roles/install_utils_registry/handlers/main.yml deleted file mode 100644 index e9f34c1da..000000000 --- a/roles/install_utils_registry/handlers/main.yml +++ /dev/null @@ -1,24 +0,0 @@ ---- -# handlers file for install_local_registry -- name: Flush handlers - ansible.builtin.meta: flush_handlers - -- name: Config file changed - ansible.builtin.set_fact: - rke2_restart_needed: true - -- name: Service (re)started - ansible.builtin.set_fact: - rke2_restart_needed: false - -- name: Restart rke2-server - ansible.builtin.systemd: - name: rke2-server.service - state: restarted - notify: "Service (re)started" - -- name: Restart rke2-agent - ansible.builtin.systemd: - name: rke2-agent.service - state: restarted - notify: "Service (re)started" \ No newline at end of file diff --git a/roles/install_utils_registry/tasks/deploy.yml b/roles/install_utils_registry/tasks/deploy.yml deleted file mode 100644 index 67cb9c887..000000000 --- a/roles/install_utils_registry/tasks/deploy.yml +++ /dev/null @@ -1,21 +0,0 @@ ---- -- name: Kubernetes execution from Ansible controler - delegate_to: localhost - run_once: true - become: false - block: - - - name: Create Namespace - kubernetes.core.k8s: - state: present - definition: - apiVersion: v1 - kind: Namespace - metadata: - name: "{{ registry_namespace }}" - - - name: Deploy registry manifest - kubernetes.core.k8s: - state: present - template: "registry.yaml.j2" - kubeconfig: "~/.kube/{{ inventory_hostname }}.yaml" diff --git a/roles/install_utils_registry/tasks/load.yml b/roles/install_utils_registry/tasks/load.yml deleted file mode 100644 index dfb56d6d0..000000000 --- a/roles/install_utils_registry/tasks/load.yml +++ /dev/null @@ -1,68 +0,0 @@ -- name: Flush handlers - ansible.builtin.meta: flush_handlers - -- name: Wait for k8s apiserver - ansible.builtin.wait_for: - host: localhost - port: "5000" - state: present - timeout: 600 - -# Longhorn -- name: Find longhorn images on the target server - ansible.builtin.find: - paths: "{{ mount_images_path }}/longhorn/" - patterns: "*.tar" - register: found_images - -- name: Copy longhorn images with skopeo - ansible.builtin.command: "skopeo copy docker-archive:{{ item.path }} docker://localhost:5000/longhornio/{{ item.path | basename | regex_replace('.tar', '') | regex_replace('_',':') }} --dest-tls-verify=false" - with_items: "{{ found_images['files'] }}" - -# Cert -- name: Find Cert-manager images on the target server - ansible.builtin.find: - paths: "{{ mount_images_path }}/cert/" - patterns: "*.tar" - register: found_images - -- name: Copy Cert-manager images with skopeo - ansible.builtin.command: "skopeo copy docker-archive:{{ item.path }} docker://localhost:5000/cert/{{ item.path | basename | regex_replace('.tar', '') | regex_replace('_',':') }} --dest-tls-verify=false" - with_items: "{{ found_images['files'] }}" - -# Neuvector -- name: Find Neuvector images on the target server - ansible.builtin.find: - paths: "{{ mount_images_path }}/neuvector/" - patterns: "*.tar" - register: found_images - -- name: Copy Neuvector images with skopeo - ansible.builtin.command: "skopeo copy docker-archive:{{ item.path }} docker://localhost:5000/neuvector/{{ item.path | basename | regex_replace('.tar', '') | regex_replace('_',':') }} --dest-tls-verify=false" - with_items: "{{ found_images['files'] }}" - -# Rancher -- name: Find Rancher images on the target server - ansible.builtin.find: - paths: "{{ mount_images_path }}/rancher/" - patterns: "*.tar" - register: found_images - -- name: Copy Rancher images with skopeo - ansible.builtin.command: "skopeo copy docker-archive:{{ item.path }} docker://localhost:5000/rancher/{{ item.path | basename | regex_replace('.tar', '') | regex_replace('_',':') }} --dest-tls-verify=false" - with_items: "{{ found_images['files'] }}" - -# Result -- name: List docker registry - ansible.builtin.shell: | - for i in $(curl -sk localhost:5000/v2/_catalog | jq -r '.repositories[]'); do - for tag in $(curl -sk localhost:5000/v2/${i}/tags/list | jq -r '.tags[]'); do - echo ${i}:${tag}; - done; - done - register: docker - -- name: Display Output - debug: - var: docker['stdout_lines'] - \ No newline at end of file diff --git a/roles/install_utils_registry/tasks/main.yml b/roles/install_utils_registry/tasks/main.yml deleted file mode 100644 index cfe711e39..000000000 --- a/roles/install_utils_registry/tasks/main.yml +++ /dev/null @@ -1,16 +0,0 @@ ---- -# tasks file for install_local_registry - -- name: Push registry tar in RKE2 images - ansible.builtin.import_tasks: push.yml - -- name: Flush handlers - ansible.builtin.meta: flush_handlers - -- name: Kubernetes tasks - ansible.builtin.import_tasks: deploy.yml - when: caller_role_name == "controller" - -- name: Load images in local registry - ansible.builtin.import_tasks: load.yml - when: caller_role_name == "controller" \ No newline at end of file diff --git a/roles/install_utils_registry/tasks/push.yml b/roles/install_utils_registry/tasks/push.yml deleted file mode 100644 index 11c02fa4c..000000000 --- a/roles/install_utils_registry/tasks/push.yml +++ /dev/null @@ -1,15 +0,0 @@ -- name: Pre-load registry image - ansible.builtin.copy: - src: "{{ mount_registry_tar }}" - dest: "{{ rke2_images_path }}" - remote_src: true - notify: Restart rke2-server - when: caller_role_name == "controller" - -- name: Pre-load registry image - ansible.builtin.copy: - src: "{{ mount_registry_tar }}" - dest: "{{ rke2_images_path }}" - remote_src: true - notify: Restart rke2-agent - when: caller_role_name == "worker" diff --git a/roles/install_utils_registry/templates/registry.yaml.j2 b/roles/install_utils_registry/templates/registry.yaml.j2 deleted file mode 100644 index 1336022a7..000000000 --- a/roles/install_utils_registry/templates/registry.yaml.j2 +++ /dev/null @@ -1,36 +0,0 @@ ---- -apiVersion: apps/v1 -kind: DaemonSet -metadata: - name: registry - namespace: {{ registry_namespace }} - labels: - app: registry -spec: - selector: - matchLabels: - app: registry - template: - metadata: - labels: - app: registry - spec: - containers: - - name: registry - image: registry - imagePullPolicy: Never - ports: - - name: registry - containerPort: 5000 - securityContext: - capabilities: - add: - - NET_BIND_SERVICE - volumeMounts: - - name: registry - mountPath: /var/lib/registry - volumes: - - name: registry - hostPath: - path: "{{ mount_registry_path }}" - hostNetwork: true \ No newline at end of file diff --git a/roles/install_utils_registry/tests/test.yml b/roles/install_utils_registry/tests/test.yml deleted file mode 100644 index 50b7e1bef..000000000 --- a/roles/install_utils_registry/tests/test.yml +++ /dev/null @@ -1,5 +0,0 @@ ---- -- hosts: localhost - remote_user: root - roles: - - install_local_registry diff --git a/roles/install_utils_registry/vars/main.yml b/roles/install_utils_registry/vars/main.yml deleted file mode 100644 index a9d100751..000000000 --- a/roles/install_utils_registry/vars/main.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -# vars file for install_local_registry diff --git a/roles/rolling_restart/README.md b/roles/rolling_restart/README.md new file mode 100644 index 000000000..694a1e4f5 --- /dev/null +++ b/roles/rolling_restart/README.md @@ -0,0 +1,72 @@ +Role Name +========= + +Role to restart RKE2 cluster in rolling mode + +Requirements +------------ + +*Example below show that the roles have two flavors and different requirements in functions of what you want* + +if idm set to true: +- Access to a IDM server if you want to create users account. +- Credentials access to connect to IDM + +if idm set to false: +- create local account on Linux servers + +Role Variables +-------------- + +| **VarName** | **Type** | **Content** | **Mandatory** | +|--------------------|----------|---------------------------|:-------------:| +| idm | boolean | true / false | x | +| svc_account | string | Service Account | x | +| svc_account_passwd | string | pwd (can be omited) | | +| svc_group | string | Group | | +| svc_owner | string | Owner of the account | if idm true | +| list_svc_account | list | Users which goes in group | if idm true | +| idm_server | string | Service Account PWD | if idm true | +| idm_pwd | string | sudo group | if idm true | + +**Mandatory** is the minimum variables that need to be set to make the role work +*the variables not mandatory either have a default value defined or can be omited* + +Dependencies +------------ + +Dependencies with some others roles (if there is some). + +Example Playbook +---------------- +Give some example about how to use or implement your Roles + + +```yml +- name: Trigger Role Example in a Playbooks + hosts: RANDOM_GROUP_DEFINED_IN_YOUR_INVENTORY + remote_user: ansible + become: true + + roles: + - { role: 'example', tags: 'example' } +``` + +```yml +# Example for one user +- import_role: + name: "example" + vars: + svc_account: "{{ tomcat_svc_account }}" + svc_group: "{{ tomcat_svc_group }}" +``` + +License +------- + +Apache-2.0 + +Author Information +------------------ + +morze.baltyk@proton.me diff --git a/roles/rolling_restart/defaults/main.yml b/roles/rolling_restart/defaults/main.yml new file mode 100644 index 000000000..e8840f68a --- /dev/null +++ b/roles/rolling_restart/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for rolling_restart diff --git a/roles/rolling_restart/handlers/main.yml b/roles/rolling_restart/handlers/main.yml new file mode 100644 index 000000000..049ee2b94 --- /dev/null +++ b/roles/rolling_restart/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for rolling_restart diff --git a/roles/rolling_restart/meta/main.yml b/roles/rolling_restart/meta/main.yml new file mode 100644 index 000000000..832eafbff --- /dev/null +++ b/roles/rolling_restart/meta/main.yml @@ -0,0 +1,54 @@ +--- +galaxy_info: + standalone: false # Part of a collection + author: morze.baltyk@proton.me + description: Role to restart RKE2 cluster in rolling mode + company: Opensource + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: http://example.com/issue/tracker + + # Choose a valid license ID from https://spdx.org - some suggested licenses: + # - BSD-3-Clause (default) + # - MIT + # - GPL-2.0-or-later + # - GPL-3.0-only + # - Apache-2.0 + # - CC-BY-4.0 + license: Apache-2.0 + + min_ansible_version: "2.15.0" + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # + # Provide a list of supported platforms, and for each platform a list of versions. + # If you don't wish to enumerate all versions for a particular platform, use 'all'. + # To view available platforms and versions (or releases), visit: + # https://galaxy.ansible.com/api/v1/platforms/ + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. diff --git a/roles/rolling_restart/tasks/main.yml b/roles/rolling_restart/tasks/main.yml new file mode 100644 index 000000000..ea20d1f52 --- /dev/null +++ b/roles/rolling_restart/tasks/main.yml @@ -0,0 +1,71 @@ +--- +# tasks file for rolling_restart + +- name: Cordon and Drain the node {{ inventory_hostname }} + ansible.builtin.shell: | + set -o pipefail + {{ rke2_data_path }}/bin/kubectl --kubeconfig /etc/rancher/rke2/rke2.yaml \ + cordon "{{ inventory_hostname }}" && \ + {{ rke2_data_path }}/bin/kubectl --kubeconfig /etc/rancher/rke2/rke2.yaml \ + drain "{{ inventory_hostname }}" --ignore-daemonsets --delete-emptydir-data + args: + executable: /bin/bash + register: drain + until: + - drain.stdout is search('drained') + retries: 100 + delay: 15 + changed_when: false + delegate_to: "{{ active_server | default(groups[rke2_servers_group_name].0) }}" + run_once: true + when: rke2_drain_node_during_upgrade + +- name: Restart RKE2 service on {{ inventory_hostname }} + ansible.builtin.service: + name: "{{ rke2_service_name }}" + state: restarted + notify: "Service (re)started" + +- name: Wait for all nodes to be ready again + ansible.builtin.shell: | + set -o pipefail + {{ rke2_data_path }}/bin/kubectl --kubeconfig /etc/rancher/rke2/rke2.yaml get nodes | grep " Ready" | wc -l + args: + executable: /bin/bash + changed_when: false + register: all_ready_nodes + until: + - groups[rke2_cluster_group_name] | length == all_ready_nodes.stdout | int + retries: 100 + delay: 15 + delegate_to: "{{ active_server | default(groups[rke2_servers_group_name].0) }}" + run_once: true + +- name: Uncordon the node {{ inventory_hostname }} + ansible.builtin.shell: | + set -o pipefail + {{ rke2_data_path }}/bin/kubectl --kubeconfig /etc/rancher/rke2/rke2.yaml \ + uncordon "{{ inventory_hostname }}" + args: + executable: /bin/bash + changed_when: false + delegate_to: "{{ active_server | default(groups[rke2_servers_group_name].0) }}" + run_once: true + when: rke2_drain_node_during_upgrade + +- name: Wait for all pods to be ready again + ansible.builtin.shell: | + set -o pipefail + {{ rke2_data_path }}/bin/kubectl --kubeconfig /etc/rancher/rke2/rke2.yaml get pods -A --field-selector=metadata.namespace!=kube-system | grep -iE "crash|error|init|terminating" | wc -l + args: + executable: /bin/bash + failed_when: "all_pods_ready.rc not in [ 0, 1 ]" + changed_when: false + register: all_pods_ready + until: + '"0" in all_pods_ready.stdout' + retries: 100 + delay: 15 + delegate_to: "{{ active_server | default(groups[rke2_servers_group_name].0) }}" + run_once: true + when: rke2_wait_for_all_pods_to_be_ready diff --git a/roles/set_nfs_mount/tests/inventory b/roles/rolling_restart/tests/inventory similarity index 90% rename from roles/set_nfs_mount/tests/inventory rename to roles/rolling_restart/tests/inventory index 878877b07..2fbb50c4a 100644 --- a/roles/set_nfs_mount/tests/inventory +++ b/roles/rolling_restart/tests/inventory @@ -1,2 +1 @@ localhost - diff --git a/roles/rolling_restart/tests/test.yml b/roles/rolling_restart/tests/test.yml new file mode 100644 index 000000000..ab92c2e70 --- /dev/null +++ b/roles/rolling_restart/tests/test.yml @@ -0,0 +1,6 @@ +--- +- name: Test + hosts: localhost + remote_user: root + roles: + - rolling_restart diff --git a/roles/rolling_restart/vars/main.yml b/roles/rolling_restart/vars/main.yml new file mode 100644 index 000000000..bc3d39003 --- /dev/null +++ b/roles/rolling_restart/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for rolling_restart diff --git a/roles/set_firewalld/handlers/main.yml b/roles/set_firewalld/handlers/main.yml index c1076245c..cf72800cc 100644 --- a/roles/set_firewalld/handlers/main.yml +++ b/roles/set_firewalld/handlers/main.yml @@ -1,7 +1,7 @@ --- # handlers file for firewalld -- name: restart firewalld - systemd: +- name: Restart firewalld + ansible.builtin.systemd: name: firewalld state: restarted diff --git a/roles/set_firewalld/meta/main.yml b/roles/set_firewalld/meta/main.yml index 6c4a72b51..e7d3b176d 100644 --- a/roles/set_firewalld/meta/main.yml +++ b/roles/set_firewalld/meta/main.yml @@ -18,7 +18,7 @@ galaxy_info: # - CC-BY-4.0 license: Apache-2.0 - min_ansible_version: "2.12.0" + min_ansible_version: "2.15.0" # If this a Container Enabled role, provide the minimum Ansible Container version. # min_ansible_container_version: diff --git a/roles/set_firewalld/tasks/delete_service.yml b/roles/set_firewalld/tasks/delete_service.yml index 96b206f15..9f8583b85 100644 --- a/roles/set_firewalld/tasks/delete_service.yml +++ b/roles/set_firewalld/tasks/delete_service.yml @@ -1,19 +1,20 @@ +--- # Remove service from zone -- name: Delete {{ service['name'] }} firewalld service - firewalld: +- name: Delete firewalld service {{ service['name'] }} + ansible.posix.firewalld: service: "{{ service['name'] }}" - permanent: yes - immediate: yes - zone: "{{ service['zone'] | default( firewalld_default_zone ) }}" + permanent: true + immediate: true + zone: "{{ service['zone'] | default(firewalld_default_zone) }}" state: disabled # Completely delete service -- name: Delete {{ service['name'] }} firewalld service definition - file: +- name: Delete firewalld service definition {{ service['name'] }} + ansible.builtin.file: path: "{{ firewalld_path }}/services/{{ service['name'] }}.xml" state: absent - when: service['erase'] == true - notify: restart firewalld - -- meta: flush_handlers + when: service['erase'] + notify: Restart firewalld +- name: Flush handlers now + ansible.builtin.meta: flush_handlers \ No newline at end of file diff --git a/roles/set_firewalld/tasks/main.yml b/roles/set_firewalld/tasks/main.yml index 337f6547a..2473545d6 100644 --- a/roles/set_firewalld/tasks/main.yml +++ b/roles/set_firewalld/tasks/main.yml @@ -2,19 +2,19 @@ # tasks file for firewalld - name: Manage firewalld zone files - include_tasks: manage_firewalld_zone.yml - loop: "{{ firewalld_zones | default([]) }}" + ansible.builtin.include_tasks: manage_firewalld_zone.yml + loop: "{{ firewalld_zones | default([]) }}" loop_control: loop_var: zone - name: Manage inbound rules - include_tasks: manage_inbound_rule.yml + ansible.builtin.include_tasks: manage_inbound_rule.yml loop: "{{ firewalld_rules['inbound'] | default([]) }}" loop_control: loop_var: rule - name: Delete a Service Completely - include_tasks: delete_service.yml + ansible.builtin.include_tasks: delete_service.yml loop: "{{ firewalld_remove['inbound'] | default([]) }}" loop_control: - loop_var: service + loop_var: service \ No newline at end of file diff --git a/roles/set_firewalld/tasks/manage_firewalld_zone.yml b/roles/set_firewalld/tasks/manage_firewalld_zone.yml index ca4f3e323..32e6e0d6a 100644 --- a/roles/set_firewalld/tasks/manage_firewalld_zone.yml +++ b/roles/set_firewalld/tasks/manage_firewalld_zone.yml @@ -1,18 +1,20 @@ -- name: Manage {{ zone['name'] }} firewalld file zone definition - template: +--- +- name: Manage firewalld file zone definition {{ zone['name'] }} + ansible.builtin.template: src: zone.xml.j2 dest: "{{ firewalld_path }}/zones/{{ zone['name'] }}.xml" owner: root group: root mode: "0644" - force: yes - notify: restart firewalld + force: true + notify: Restart firewalld -- meta: flush_handlers +- name: Flush handlers now + ansible.builtin.meta: flush_handlers -- name: Manage {{ zone['name'] }} firewalld zone definition - firewalld: +- name: Manage firewalld zone definition {{ zone['name'] }} + ansible.posix.firewalld: zone: "{{ zone['name'] }}" - permanent: yes - immediate: yes - state: enabled + permanent: true + immediate: true + state: enabled \ No newline at end of file diff --git a/roles/set_firewalld/tasks/manage_inbound_rule.yml b/roles/set_firewalld/tasks/manage_inbound_rule.yml index d8b2cc46c..5d26ad838 100644 --- a/roles/set_firewalld/tasks/manage_inbound_rule.yml +++ b/roles/set_firewalld/tasks/manage_inbound_rule.yml @@ -1,19 +1,21 @@ -- name: Manage {{ rule['name'] }} firewalld service definition - template: +--- +- name: Manage firewalld service definition {{ rule['name'] }} + ansible.builtin.template: src: service.xml.j2 dest: "{{ firewalld_path }}/services/{{ rule['name'] }}.xml" owner: root group: root mode: "0644" - force: yes - notify: restart firewalld + force: true + notify: Restart firewalld -- meta: flush_handlers +- name: Flush handlers now + ansible.builtin.meta: flush_handlers -- name: Manage {{ rule['name'] }} firewalld service - firewalld: +- name: Manage firewalld service {{ rule['name'] }} + ansible.posix.firewalld: service: "{{ rule['name'] }}" - permanent: yes - immediate: yes - zone: "{{ rule['zone'] | default( firewalld_default_zone ) }}" + permanent: true + immediate: true + zone: "{{ rule['zone'] | default(firewalld_default_zone) }}" state: enabled diff --git a/roles/set_firewalld/tests/test.yml b/roles/set_firewalld/tests/test.yml index 67768aa7d..1cc260b87 100644 --- a/roles/set_firewalld/tests/test.yml +++ b/roles/set_firewalld/tests/test.yml @@ -1,5 +1,6 @@ --- -- hosts: localhost +- name: Test + hosts: localhost remote_user: root roles: - - firewalld \ No newline at end of file + - set_firewalld \ No newline at end of file diff --git a/roles/set_nfs_export/README.md b/roles/set_nfs_export/README.md deleted file mode 100644 index 1e293c72b..000000000 --- a/roles/set_nfs_export/README.md +++ /dev/null @@ -1,14 +0,0 @@ -Role Name -========= - -Export NFS volume from Controler to share images - -License -------- - -Apache-2.0 - -Author Information ------------------- - -morze.baltyk@proton.me \ No newline at end of file diff --git a/roles/set_nfs_export/defaults/main.yml b/roles/set_nfs_export/defaults/main.yml deleted file mode 100644 index 5a4f9c4a0..000000000 --- a/roles/set_nfs_export/defaults/main.yml +++ /dev/null @@ -1,6 +0,0 @@ ---- -# defaults file for export_nfs - -export_nfs_path: "{{ global_directory_package_target }}/rancher" -export_nfs_registry_path: "{{ export_nfs_path }}/registry" -symlink_mount_path: "{{ global_directory_mount }}" \ No newline at end of file diff --git a/roles/set_nfs_export/handlers/main.yml b/roles/set_nfs_export/handlers/main.yml deleted file mode 100644 index 1b5aa9dc4..000000000 --- a/roles/set_nfs_export/handlers/main.yml +++ /dev/null @@ -1,7 +0,0 @@ ---- -# handlers file for export_nfs - -- name: Restart_NFS_Server - systemd: - name: nfs-server - state: restarted \ No newline at end of file diff --git a/roles/set_nfs_export/tasks/firewalld.yml b/roles/set_nfs_export/tasks/firewalld.yml deleted file mode 100644 index e502edb6b..000000000 --- a/roles/set_nfs_export/tasks/firewalld.yml +++ /dev/null @@ -1,21 +0,0 @@ -- block: - - - name: firewalld NFS port enabled - ansible.posix.firewalld: - service: "{{ item }}" - permanent: true - state: enabled - loop: - - nfs - - rpc-bind - - mountd - - - name: Always reload firewalld - ansible.builtin.service: - name: firewalld - state: reloaded - - become: true - when: - - ansible_facts['services']['firewalld.service'] is defined - - ansible_facts['services']['firewalld.service']['state'] == "running" \ No newline at end of file diff --git a/roles/set_nfs_export/tasks/install.yml b/roles/set_nfs_export/tasks/install.yml deleted file mode 100644 index 6f6974e6b..000000000 --- a/roles/set_nfs_export/tasks/install.yml +++ /dev/null @@ -1,34 +0,0 @@ - -# share out directory -- name: NFS share - block: - - name: Install NFS packages - ansible.builtin.dnf: - name: "{{ item }}" - state: present - with_items: - - nfs-utils - when: - - ansible_os_family == "RedHat" - - ansible_distribution_major_version | int >= 8 - - - name: Share out directory via NFS - ansible.builtin.lineinfile: - path: /etc/exports - line: "{{ export_nfs_path }} *(ro)" - notify: Restart_NFS_Server - - - name: Enable and start NFS server service - ansible.builtin.systemd: - name: nfs-server - enabled: yes - state: started - - - name: Create Symlink to get same path than other server - ansible.builtin.file: - src: "{{ export_nfs_path }}" - dest: "{{ symlink_mount_path }}" - state: link - when: export_nfs_path != symlink_mount_path - - become: true \ No newline at end of file diff --git a/roles/set_nfs_export/tasks/main.yml b/roles/set_nfs_export/tasks/main.yml deleted file mode 100644 index d56b55d0e..000000000 --- a/roles/set_nfs_export/tasks/main.yml +++ /dev/null @@ -1,22 +0,0 @@ ---- -# tasks file for export_nfs -- name: Gather facts - setup: - gather_subset: - - "distribution" - - "distribution_major_version" - - "!min" - when: > - ansible_os_family is not defined - -- name: Tasks for Linux - import_tasks: selinux.yml - -- name: Tasks for Linux - import_tasks: firewalld.yml - -- name: Tasks to install and setup NFS export - import_tasks: install.yml - -- name: Flush handlers - ansible.builtin.meta: flush_handlers \ No newline at end of file diff --git a/roles/set_nfs_export/tasks/selinux.yml b/roles/set_nfs_export/tasks/selinux.yml deleted file mode 100644 index 2b84c84f6..000000000 --- a/roles/set_nfs_export/tasks/selinux.yml +++ /dev/null @@ -1,23 +0,0 @@ -- name: Create package directories - ansible.builtin.file: - path: "{{ export_nfs_registry_path }}" - state: directory - recurse: true - -- name: Test whether SELinux is enabled - ansible.builtin.command: /usr/sbin/selinuxenabled - ignore_errors: yes - register: selinux_status - -- name: SElinux config - when: selinux_status.rc == 0 - block: - # chcon system_u:object_r:container_file_t:s0 /opt/rancher/registry - - name: Ensures registry container can be executed on path - community.general.sefcontext: - target: "{{ export_nfs_registry_path }}(/.*)?" - setype: container_file_t - state: present - - - name: Restorecon - ansible.builtin.command: "restorecon -v {{ export_nfs_registry_path }}" diff --git a/roles/set_nfs_export/tests/test.yml b/roles/set_nfs_export/tests/test.yml deleted file mode 100644 index 1cde05bbc..000000000 --- a/roles/set_nfs_export/tests/test.yml +++ /dev/null @@ -1,5 +0,0 @@ ---- -- hosts: localhost - remote_user: root - roles: - - export_nfs diff --git a/roles/set_nfs_export/vars/main.yml b/roles/set_nfs_export/vars/main.yml deleted file mode 100644 index 9deb28589..000000000 --- a/roles/set_nfs_export/vars/main.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -# vars file for export_nfs diff --git a/roles/set_nfs_mount/README.md b/roles/set_nfs_mount/README.md deleted file mode 100644 index ca61a96de..000000000 --- a/roles/set_nfs_mount/README.md +++ /dev/null @@ -1,14 +0,0 @@ -Role Name -========= - -mount share nfs on workers to share images - -License -------- - -Apache-2.0 - -Author Information ------------------- - -morze.baltyk@proton.me \ No newline at end of file diff --git a/roles/set_nfs_mount/defaults/main.yml b/roles/set_nfs_mount/defaults/main.yml deleted file mode 100644 index 6e4fbdbd3..000000000 --- a/roles/set_nfs_mount/defaults/main.yml +++ /dev/null @@ -1,5 +0,0 @@ ---- -# defaults file for mount_nfs -master: "{{ global_master_ip }}" -export_nfs_path: "{{ global_directory_package_target }}/rancher" -nfs_mount_path: "{{ global_directory_mount }}" \ No newline at end of file diff --git a/roles/set_nfs_mount/handlers/main.yml b/roles/set_nfs_mount/handlers/main.yml deleted file mode 100644 index 0f1387fea..000000000 --- a/roles/set_nfs_mount/handlers/main.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -# handlers file for mount_nfs diff --git a/roles/set_nfs_mount/tasks/main.yml b/roles/set_nfs_mount/tasks/main.yml deleted file mode 100644 index 989cfc4a7..000000000 --- a/roles/set_nfs_mount/tasks/main.yml +++ /dev/null @@ -1,15 +0,0 @@ ---- -# tasks file for mount_nfs -- name: Gather facts - setup: - gather_subset: - - "distribution" - - "distribution_major_version" - - "!min" - when: > - ansible_os_family is not defined - -- name: Tasks for RHEL-like OS - import_tasks: rhel.yml - when: - - ansible_os_family == "RedHat" \ No newline at end of file diff --git a/roles/set_nfs_mount/tasks/rhel.yml b/roles/set_nfs_mount/tasks/rhel.yml deleted file mode 100644 index 6b1c4d074..000000000 --- a/roles/set_nfs_mount/tasks/rhel.yml +++ /dev/null @@ -1,32 +0,0 @@ -# mount NFS directory -- block: - - name: Install NFS packages - dnf: - name: "{{ item }}" - state: present - with_items: - - nfs-utils - - - name: Create mount directory - ansible.builtin.file: - path: "{{ nfs_mount_path }}" - state: directory - recurse: yes - - - name: Add NFS entry to /etc/fstab - ansible.builtin.lineinfile: - path: /etc/fstab - line: "{{ master }}:{{ export_nfs_path }} {{ nfs_mount_path }} nfs rw,hard,rsize=1048576,wsize=1048576 0 0" - - - name: Mount NFS share - ansible.posix.mount: - path: "{{ nfs_mount_path }}" - src: "{{ master }}:{{ export_nfs_path }}" - fstype: nfs - opts: rw,hard,rsize=1048576,wsize=1048576 - state: mounted - - become: true - when: - - ansible_os_family == "RedHat" - - ansible_distribution_major_version | int >= 8 \ No newline at end of file diff --git a/roles/set_nfs_mount/tests/test.yml b/roles/set_nfs_mount/tests/test.yml deleted file mode 100644 index 10177c473..000000000 --- a/roles/set_nfs_mount/tests/test.yml +++ /dev/null @@ -1,5 +0,0 @@ ---- -- hosts: localhost - remote_user: root - roles: - - mount_nfs diff --git a/roles/deploy_certmanager/README.md b/roles/set_versions/README.md similarity index 97% rename from roles/deploy_certmanager/README.md rename to roles/set_versions/README.md index f07bc84ab..fd3c3682d 100644 --- a/roles/deploy_certmanager/README.md +++ b/roles/set_versions/README.md @@ -1,7 +1,7 @@ Role Name ========= -Role to deploy Cert-manager +Role to get and set versions. Requirements ------------ @@ -69,4 +69,4 @@ Apache-2.0 Author Information ------------------ -morze.baltyk@proton.me \ No newline at end of file +morze.baltyk@proton.me diff --git a/roles/set_versions/defaults/main.yml b/roles/set_versions/defaults/main.yml new file mode 100644 index 000000000..bc66fb75a --- /dev/null +++ b/roles/set_versions/defaults/main.yml @@ -0,0 +1,7 @@ +--- +rhel_version: "{{ global_rhel_version | default('false') }}" +stable_channel_wanted: "{{ global_stable_channel | bool }}" +kubevip_wanted: "{{ global_extras_components['kubevip'] | bool }}" +longhorn_wanted: "{{ global_extras_components['longhorn'] | bool }}" +rancher_wanted: "{{ global_extras_components['rancher'] | bool }}" +neuvector_wanted: "{{ global_extras_components['neuvector'] | bool }}" diff --git a/roles/deploy_certmanager/meta/main.yml b/roles/set_versions/meta/main.yml similarity index 95% rename from roles/deploy_certmanager/meta/main.yml rename to roles/set_versions/meta/main.yml index 9a6653358..fa440af90 100644 --- a/roles/deploy_certmanager/meta/main.yml +++ b/roles/set_versions/meta/main.yml @@ -2,7 +2,7 @@ galaxy_info: standalone: false # Part of a collection author: morze.baltyk@proton.me - description: Role to deploy Cert-manager + description: Role to get and set versions. company: Opensource # If the issue tracker for your role is not on github, uncomment the @@ -18,7 +18,7 @@ galaxy_info: # - CC-BY-4.0 license: Apache-2.0 - min_ansible_version: "2.12.0" + min_ansible_version: "2.15.0" # If this a Container Enabled role, provide the minimum Ansible Container version. # min_ansible_container_version: diff --git a/roles/set_versions/tasks/defined_versions.yml b/roles/set_versions/tasks/defined_versions.yml new file mode 100644 index 000000000..3f7eb10ad --- /dev/null +++ b/roles/set_versions/tasks/defined_versions.yml @@ -0,0 +1,25 @@ +--- +- name: Set Versions variables + ansible.builtin.set_fact: + rke2_version: "{{ global_versions['rke2'] }}" + kubevip_version: "{{ global_versions['kubevip'] }}" + longhorn_version: "{{ global_versions['longhorn'] }}" + cert_manager_version: "{{ global_versions['cert_manager'] }}" + rancher_version: "{{ global_versions['rancher'] }}" + neuvector_version: "{{ global_versions['neuvector'] }}" + helm_version: "{{ global_versions['helm'] }}" + nerdctl_version: "{{ global_versions['nerdctl'] }}" + k9s_version: "{{ global_versions['k9s'] }}" + +- name: Display + ansible.builtin.debug: + msg: + - "Defined RKE2 version in this ansible collection is {{ rke2_version }}" + - "Defined Helm version in this ansible collection is {{ helm_version }}" + - "Defined Nerdctl version in this ansible collection is {{ nerdctl_version }}" + - "Defined k9s version in this ansible collection is {{ k9s_version }}" + - "{% if kubevip_wanted %}Defined Kube-vip version in this ansible collection is {{ kubevip_version }}{% endif %}" + - "{% if longhorn_wanted %}Defined Longhorn version in this ansible collection is {{ longhorn_version }}{% endif %}" + - "{% if rancher_wanted %}Defined Cert-manager version in this ansible collection is {{ cert_manager_version }}{% endif %}" + - "{% if rancher_wanted %}Defined Rancher version in this ansible collection is {{ rancher_version }}{% endif %}" + - "{% if neuvector_wanted %}Defined Neuvector version in this ansible collection is {{ neuvector_version }}{% endif %}" diff --git a/roles/set_versions/tasks/main.yml b/roles/set_versions/tasks/main.yml new file mode 100644 index 000000000..5ff35e063 --- /dev/null +++ b/roles/set_versions/tasks/main.yml @@ -0,0 +1,12 @@ +--- +# tasks file for set_versions +- name: Set RHEL Versions variables + ansible.builtin.import_tasks: rhel_version.yml + +- name: Set Versions variables + ansible.builtin.import_tasks: stable_channels.yml + when: stable_channel_wanted + +- name: Set Versions variables + ansible.builtin.import_tasks: defined_versions.yml + when: not stable_channel_wanted diff --git a/roles/set_versions/tasks/only_kubevip.yml b/roles/set_versions/tasks/only_kubevip.yml new file mode 100644 index 000000000..8f502f87c --- /dev/null +++ b/roles/set_versions/tasks/only_kubevip.yml @@ -0,0 +1,27 @@ +--- +# Stable version from URL +- name: Set Kube-vip Versions variables + when: stable_channel_wanted + block: + # export KUBEVIP_VERSION=$(curl -s https://api.github.com/repos/kube-vip/kube-vip/releases/latest | jq -r .tag_name) + - name: Get Kube-vip latest release + ansible.builtin.uri: + url: "https://api.github.com/repos/kube-vip/kube-vip/releases/latest" + method: GET + return_content: true + register: kubevip_release + + - name: Extract latest release tag + ansible.builtin.set_fact: + kubevip_version: "{{ kubevip_release.json.tag_name | regex_replace('^v', '') }}" + +# Version defined in this Ansible collection +- name: Set Kube-vip Versions variables + ansible.builtin.set_fact: + kubevip_version: "{{ global_versions['kubevip'] }}" + when: not stable_channel_wanted + +# Common +- name: Display kube-vip version + ansible.builtin.debug: + msg: "Kube-vip version to be installed is {{ kubevip_version }}" diff --git a/roles/set_versions/tasks/only_longhorn.yml b/roles/set_versions/tasks/only_longhorn.yml new file mode 100644 index 000000000..5b90467d3 --- /dev/null +++ b/roles/set_versions/tasks/only_longhorn.yml @@ -0,0 +1,27 @@ +--- +# Stable version from URL +- name: Set Longhorn Versions variables + when: stable_channel_wanted + block: + # export LONGHORN_VERSION=$(curl -s https://api.github.com/repos/longhorn/longhorn/releases/latest | jq -r .tag_name) + - name: Get Longhorn latest stable version + ansible.builtin.uri: + url: "https://api.github.com/repos/longhorn/longhorn/releases/latest" + method: GET + return_content: true + register: longhorn_release + + - name: Extract latest stable version + ansible.builtin.set_fact: + longhorn_version: "{{ longhorn_release.json.tag_name | regex_replace('^v', '') }}" + +# Version defined in this Ansible collection +- name: Set Longhorn Versions variables + ansible.builtin.set_fact: + longhorn_version: "{{ global_versions['longhorn'] }}" + when: not stable_channel_wanted + +# Common +- name: Display Longhorn version + ansible.builtin.debug: + msg: "Longhorn version to be installed is {{ longhorn_version }}" diff --git a/roles/set_versions/tasks/only_neuvector.yml b/roles/set_versions/tasks/only_neuvector.yml new file mode 100644 index 000000000..698ad8730 --- /dev/null +++ b/roles/set_versions/tasks/only_neuvector.yml @@ -0,0 +1,27 @@ +--- +# Stable version from URL +- name: Set Neuvector Versions variables + when: stable_channel_wanted + block: + # export NEU_VERSION=$(curl -s https://api.github.com/repos/neuvector/neuvector-helm/releases/latest | jq -r .tag_name) + - name: Get Neuvector latest release + ansible.builtin.uri: + url: "https://api.github.com/repos/neuvector/neuvector-helm/releases/latest" + method: GET + return_content: true + register: neuvector_release + + - name: Extract latest release tag + ansible.builtin.set_fact: + neuvector_version: "{{ neuvector_release.json.tag_name | regex_replace('^v', '') }}" + +# Version defined in this Ansible collection +- name: Set Neuvector Versions variables + ansible.builtin.set_fact: + neuvector_version: "{{ global_versions['neuvector'] }}" + when: not stable_channel_wanted + +# Common +- name: Display Neuvector version + ansible.builtin.debug: + msg: "Neuvector version to be installed is {{ neuvector_version }}" diff --git a/roles/set_versions/tasks/only_rancher.yml b/roles/set_versions/tasks/only_rancher.yml new file mode 100644 index 000000000..e97880082 --- /dev/null +++ b/roles/set_versions/tasks/only_rancher.yml @@ -0,0 +1,42 @@ +--- +# Stable version from URL +- name: Set Rancher Versions variables + when: stable_channel_wanted + block: + # export CERT_VERSION=$(curl -s https://api.github.com/repos/cert-manager/cert-manager/releases/latest | jq -r .tag_name) + - name: Get Cert-Manager latest release + ansible.builtin.uri: + url: "https://api.github.com/repos/cert-manager/cert-manager/releases/latest" + method: GET + return_content: true + register: cert_manager_release + + - name: Extract latest release tag + ansible.builtin.set_fact: + cert_manager_version: "{{ cert_manager_release.json.tag_name | regex_replace('^v', '') }}" + + # export RANCHER_VERSION=$(curl -s https://api.github.com/repos/rancher/rancher/releases/latest | jq -r .tag_name) + - name: Get Rancher latest release + ansible.builtin.uri: + url: "https://api.github.com/repos/rancher/rancher/releases/latest" + method: GET + return_content: true + register: rancher_release + + - name: Extract latest release tag + ansible.builtin.set_fact: + rancher_version: "{{ rancher_release.json.tag_name | regex_replace('^v', '') }}" + +# Version defined in this Ansible collection +- name: Set Rancher Versions variables + ansible.builtin.set_fact: + cert_manager_version: "{{ global_versions['cert_manager'] }}" + rancher_version: "{{ global_versions['rancher'] }}" + when: not stable_channel_wanted + +# Common +- name: Display Rancher version + ansible.builtin.debug: + msg: + - "Cert-manager version to be installed is {{ cert_manager_version }}" + - "Rancher version to be installed is {{ rancher_version }}" diff --git a/roles/set_versions/tasks/only_rke2.yml b/roles/set_versions/tasks/only_rke2.yml new file mode 100644 index 000000000..3a9de92da --- /dev/null +++ b/roles/set_versions/tasks/only_rke2.yml @@ -0,0 +1,27 @@ +--- +# Stable version from URL +- name: Set RKE2 Versions variables + when: stable_channel_wanted + block: + # export RKE_VERSION=$(curl -s https://update.rke2.io/v1-release/channels | jq -r '.data[] | select(.id=="stable") | .latest' | awk -F"+" '{print $1}'| sed 's/v//') + - name: Get RKE2 latest stable version + ansible.builtin.uri: + url: "https://update.rke2.io/v1-release/channels" + method: GET + return_content: true + register: rke2_channels + + - name: Extract latest stable version + ansible.builtin.set_fact: + rke2_version: "{{ rke2_channels.json.data | selectattr('id', 'equalto', 'stable') | map(attribute='latest') | first | regex_replace('\\+.*', '') | regex_replace('^v', '') }}" + +# Version defined in this Ansible collection +- name: Set RKE2 Versions variables + ansible.builtin.set_fact: + rke2_version: "{{ global_versions['rke2'] }}" + when: not stable_channel_wanted + +# Common +- name: Display RKE2 version + ansible.builtin.debug: + msg: "RKE2 version to be installed is {{ rke2_version }}" diff --git a/roles/set_versions/tasks/rhel_version.yml b/roles/set_versions/tasks/rhel_version.yml new file mode 100644 index 000000000..e2c087b6b --- /dev/null +++ b/roles/set_versions/tasks/rhel_version.yml @@ -0,0 +1,26 @@ +--- +- name: RHEL version was not given in command line so need to be defined + when: rhel_version == 'false' + block: + - name: Gather facts + ansible.builtin.setup: + gather_subset: + - "distribution" + - "distribution_major_version" + - "!min" + when: > + ansible_os_family is not defined + + - name: Set rhel_version variable from non-RedHat systems + ansible.builtin.set_fact: + rhel_version: 8 + when: ansible_os_family != "RedHat" + + - name: Set rhel_version variable from a RedHat systems and take same version + ansible.builtin.set_fact: + rhel_version: "{{ ansible_distribution_major_version }}" + when: ansible_os_family == "RedHat" + +- name: Display RHEL version + ansible.builtin.debug: + msg: "Rkub Package is set now for RHEL version {{ rhel_version }}" diff --git a/roles/set_versions/tasks/stable_channels.yml b/roles/set_versions/tasks/stable_channels.yml new file mode 100644 index 000000000..41c0c69b7 --- /dev/null +++ b/roles/set_versions/tasks/stable_channels.yml @@ -0,0 +1,147 @@ +--- +# RKE2 +- name: Block RKE2 + block: + # export RKE_VERSION=$(curl -s https://update.rke2.io/v1-release/channels | jq -r '.data[] | select(.id=="stable") | .latest' | awk -F"+" '{print $1}'| sed 's/v//') + - name: Get RKE2 latest stable version + ansible.builtin.uri: + url: "https://update.rke2.io/v1-release/channels" + method: GET + return_content: true + register: rke2_channels + + - name: Extract latest stable version + ansible.builtin.set_fact: + rke2_version: "{{ rke2_channels.json.data | selectattr('id', 'equalto', 'stable') | map(attribute='latest') | first | regex_replace('\\+.*', '') | regex_replace('^v', '') }}" + +# Kubevip +- name: Block Kubevip + when: kubevip_wanted + block: + # export KUBEVIP_VERSION=$(curl -s https://api.github.com/repos/kube-vip/kube-vip/releases/latest | jq -r .tag_name) + - name: Get Kube-vip latest release + ansible.builtin.uri: + url: "https://api.github.com/repos/kube-vip/kube-vip/releases/latest" + method: GET + return_content: true + register: kubevip_release + + - name: Extract latest release tag + ansible.builtin.set_fact: + kubevip_version: "{{ kubevip_release.json.tag_name | regex_replace('^v', '') }}" + +# Longhorn +- name: Block Longhorn + when: longhorn_wanted + block: + # export LONGHORN_VERSION=$(curl -s https://api.github.com/repos/longhorn/longhorn/releases/latest | jq -r .tag_name) + - name: Get Longhorn latest release + ansible.builtin.uri: + url: "https://api.github.com/repos/longhorn/longhorn/releases/latest" + method: GET + return_content: true + register: longhorn_release + + - name: Extract latest release tag + ansible.builtin.set_fact: + longhorn_version: "{{ longhorn_release.json.tag_name | regex_replace('^v', '') }}" + +# Rancher +- name: Block Rancher + when: rancher_wanted + block: + # export CERT_VERSION=$(curl -s https://api.github.com/repos/cert-manager/cert-manager/releases/latest | jq -r .tag_name) + - name: Get Cert-Manager latest release + ansible.builtin.uri: + url: "https://api.github.com/repos/cert-manager/cert-manager/releases/latest" + method: GET + return_content: true + register: cert_manager_release + + - name: Extract latest release tag + ansible.builtin.set_fact: + cert_manager_version: "{{ cert_manager_release.json.tag_name | regex_replace('^v', '') }}" + + # export RANCHER_VERSION=$(curl -s https://api.github.com/repos/rancher/rancher/releases/latest | jq -r .tag_name) + - name: Get Rancher latest release + ansible.builtin.uri: + url: "https://api.github.com/repos/rancher/rancher/releases/latest" + method: GET + return_content: true + register: rancher_release + + - name: Extract latest release tag + ansible.builtin.set_fact: + rancher_version: "{{ rancher_release.json.tag_name | regex_replace('^v', '') }}" + +# Neuvector +- name: Block Neuvector + when: neuvector_wanted + block: + # export NEU_VERSION=$(curl -s https://api.github.com/repos/neuvector/neuvector-helm/releases/latest | jq -r .tag_name) + - name: Get Neuvector latest release + ansible.builtin.uri: + url: "https://api.github.com/repos/neuvector/neuvector-helm/releases/latest" + method: GET + return_content: true + register: neuvector_release + + - name: Extract latest release tag + ansible.builtin.set_fact: + neuvector_version: "{{ neuvector_release.json.tag_name | regex_replace('^v', '') }}" + +# Helm binary +- name: Block Helm + block: + - name: Get Helm binary latest release + ansible.builtin.uri: + url: "https://api.github.com/repos/helm/helm/releases/latest" + method: GET + return_content: true + register: helm_release + + - name: Extract latest release tag + ansible.builtin.set_fact: + helm_version: "{{ helm_release.json.tag_name | regex_replace('^v', '') }}" + +# Nerdctl binary +- name: Block Nerdctl + block: + - name: Get Nerdctl binary latest release + ansible.builtin.uri: + url: "https://api.github.com/repos/containerd/nerdctl/releases/latest" + method: GET + return_content: true + register: nerdctl_release + + - name: Extract latest release tag + ansible.builtin.set_fact: + nerdctl_version: "{{ nerdctl_release.json.tag_name | regex_replace('^v', '') }}" + +# K9S binary +- name: Block K9S + block: + - name: Get K9S binary latest release + ansible.builtin.uri: + url: "https://api.github.com/repos/derailed/k9s/releases/latest" + method: GET + return_content: true + register: k9s_release + + - name: Extract latest release tag + ansible.builtin.set_fact: + k9s_version: "{{ k9s_release.json.tag_name | regex_replace('^v', '') }}" + +# Display result +- name: Display versions + ansible.builtin.debug: + msg: + - "Latest stable RKE2 version is {{ rke2_version }}" + - "Latest Helm release tag is {{ helm_version }}" + - "Latest Nerdctl release tag is {{ nerdctl_version }}" + - "Latest K9S release tag is {{ k9s_version }}" + - "{% if kubevip_wanted %}Latest Kube-vip release tag is {{ kubevip_version | default('') }}{% endif %}" + - "{% if longhorn_wanted %}Latest Longhorn release tag is {{ longhorn_version | default('') }}{% endif %}" + - "{% if rancher_wanted %}Latest Cert-manager release tag is {{ cert_manager_version | default('') }}{% endif %}" + - "{% if rancher_wanted %}Latest Rancher release tag is {{ rancher_version | default('') }}{% endif %}" + - "{% if neuvector_wanted %}Latest Neuvector release tag is {{ neuvector_version | default('') }}{% endif %}" diff --git a/roles/set_versions/tests/inventory b/roles/set_versions/tests/inventory new file mode 100644 index 000000000..2fbb50c4a --- /dev/null +++ b/roles/set_versions/tests/inventory @@ -0,0 +1 @@ +localhost diff --git a/roles/set_versions/tests/test.yml b/roles/set_versions/tests/test.yml new file mode 100644 index 000000000..0b31f6829 --- /dev/null +++ b/roles/set_versions/tests/test.yml @@ -0,0 +1,6 @@ +--- +- name: Test + hosts: localhost + remote_user: root + roles: + - set_versions diff --git a/roles/uninstall_rkub/defaults/main.yml b/roles/uninstall_rkub/defaults/main.yml index d3aba5595..c69355fc0 100644 --- a/roles/uninstall_rkub/defaults/main.yml +++ b/roles/uninstall_rkub/defaults/main.yml @@ -1,12 +1,10 @@ --- # defaults file for uninstall_rke2 admin_user: "{{ global_install_user }}" -export_nfs_path: "{{ global_directory_package_target }}/rancher" -nfs_mount_path: "{{ global_directory_mount }}" longhorn_datapath: "{{ global_longhorn_datapath }}" -firewalld_rules_to_remove: +firewalld_rules_to_remove: inbound: - - name: rke2 - zone: public - erase: true \ No newline at end of file + - name: rke2 + zone: public + erase: true diff --git a/roles/uninstall_rkub/meta/main.yml b/roles/uninstall_rkub/meta/main.yml index 89894653b..bb4399bd7 100644 --- a/roles/uninstall_rkub/meta/main.yml +++ b/roles/uninstall_rkub/meta/main.yml @@ -18,7 +18,7 @@ galaxy_info: # - CC-BY-4.0 license: Apache-2.0 - min_ansible_version: "2.12.0" + min_ansible_version: "2.15.0" # If this a Container Enabled role, provide the minimum Ansible Container version. # min_ansible_container_version: diff --git a/roles/uninstall_rkub/tasks/admin.yml b/roles/uninstall_rkub/tasks/admin.yml index cc9828640..1a7e8ccf9 100644 --- a/roles/uninstall_rkub/tasks/admin.yml +++ b/roles/uninstall_rkub/tasks/admin.yml @@ -1,9 +1,10 @@ +--- - name: Remove admin user (if different from root) ansible.builtin.user: name: "{{ admin_user }}" state: absent - remove: yes + remove: true failed_when: false - when: - - admin_user is defined - - admin_user != "root" \ No newline at end of file + when: + - admin_user is defined + - admin_user != "root" \ No newline at end of file diff --git a/roles/uninstall_rkub/tasks/main.yml b/roles/uninstall_rkub/tasks/main.yml index 16ff799ca..8b6c04a85 100644 --- a/roles/uninstall_rkub/tasks/main.yml +++ b/roles/uninstall_rkub/tasks/main.yml @@ -7,8 +7,8 @@ - "distribution_major_version" - "default_ipv4" - "!all,!min" - when: > - ansible_os_family is not defined + when: + - ansible_os_family is not defined tags: [always] - name: Populate service facts @@ -24,9 +24,6 @@ - name: Admin Uninstall ansible.builtin.import_tasks: admin.yml -- name: NFS Uninstall - ansible.builtin.import_tasks: nfs.yml - - name: RKE2 firewalld remove vars: firewalld_remove: "{{ firewalld_rules_to_remove }}" @@ -35,4 +32,4 @@ tasks_from: main when: - ansible_facts['services']['firewalld.service'] is defined - - ansible_facts['services']['firewalld.service']['state'] == "running" \ No newline at end of file + - ansible_facts['services']['firewalld.service']['state'] == "running" diff --git a/roles/uninstall_rkub/tasks/nfs.yml b/roles/uninstall_rkub/tasks/nfs.yml deleted file mode 100644 index fc91f6a24..000000000 --- a/roles/uninstall_rkub/tasks/nfs.yml +++ /dev/null @@ -1,58 +0,0 @@ -- block: - - - name: Unmount NFS share - ansible.posix.mount: - path: "{{ nfs_mount_path }}" - state: unmounted - - - name: Remove NFS entry from /etc/fstab - ansible.builtin.lineinfile: - path: /etc/fstab - regexp: ".*{{ nfs_mount_path }} nfs.*" - state: absent - - - name: Remove mount directory - ansible.builtin.file: - path: "{{ nfs_mount_path }}" - state: absent - - - name: Share out directory via NFS - ansible.builtin.lineinfile: - path: /etc/exports - line: "{{ export_nfs_path }} *(ro)" - state: absent - - - name: Remove NFS packages - ansible.builtin.dnf: - name: "{{ item }}" - state: absent - with_items: - - nfs-utils - when: - - ansible_os_family == "RedHat" - - ansible_distribution_major_version | int >= 8 - - become: true - -# Remove NFS firewalld -- block: - - - name: firewalld NFS port enabled - ansible.posix.firewalld: - service: "{{ item }}" - permanent: true - state: disabled - loop: - - nfs - - mountd - - rpc-bind - - - name: Always reload firewalld - ansible.builtin.service: - name: firewalld - state: reloaded - - become: true - when: - - ansible_facts['services']['firewalld.service'] is defined - - ansible_facts['services']['firewalld.service']['state'] == "running" \ No newline at end of file diff --git a/roles/uninstall_rkub/tasks/uninstall.yml b/roles/uninstall_rkub/tasks/uninstall.yml index 57c05718d..7361bd230 100644 --- a/roles/uninstall_rkub/tasks/uninstall.yml +++ b/roles/uninstall_rkub/tasks/uninstall.yml @@ -1,46 +1,51 @@ +--- - name: Stop and uninstall RKE2 become: true - block: - - name: Stop RKE2 server - ansible.builtin.systemd: - name: "rke2-server.service" - state: stopped - failed_when: false + block: + - name: Stop RKE2 server + ansible.builtin.systemd: + name: "rke2-server.service" + state: stopped + failed_when: false - - name: Stop RKE2 agent - ansible.builtin.systemd: - name: "rke2-agent.service" - state: stopped - failed_when: false + - name: Stop RKE2 agent + ansible.builtin.systemd: + name: "rke2-agent.service" + state: stopped + failed_when: false - - name: Uninstall RKE2 - ansible.builtin.shell: /usr/local/bin/rke2-uninstall.sh - failed_when: false + - name: Uninstall RKE2 + ansible.builtin.shell: + cmd: | + set -o pipefail && /usr/local/bin/rke2-uninstall.sh + executable: /bin/bash + failed_when: false + changed_when: false # Longhorn removal -- block: - - name: 'collect longhorn files' - find: - paths: "{{ longhorn_datapath }}" - hidden: True - recurse: True - # file_type: any # Added in ansible 2.3 - register: collected_files +- name: Longhorn removal + block: + - name: Collect longhorn files + ansible.builtin.find: + paths: "{{ longhorn_datapath }}" + hidden: true + recurse: true + register: collected_files - - name: 'collect longhorn directories' - find: - paths: "{{ longhorn_datapath }}" - hidden: True - recurse: True - file_type: directory - register: collected_directories + - name: Collect longhorn directories + ansible.builtin.find: + paths: "{{ longhorn_datapath }}" + hidden: true + recurse: true + file_type: directory + register: collected_directories - - name: remove collected files and directories - file: - path: "{{ item['path'] }}" - state: absent - with_items: > - {{ - collected_files.files - + collected_directories.files - }} + - name: Remove collected files and directories + ansible.builtin.file: + path: "{{ item['path'] }}" + state: absent + with_items: > + {{ + collected_files.files + + collected_directories.files + }} diff --git a/roles/uninstall_rkub/tasks/utils.yml b/roles/uninstall_rkub/tasks/utils.yml index f83176f8e..53d9dc4fb 100644 --- a/roles/uninstall_rkub/tasks/utils.yml +++ b/roles/uninstall_rkub/tasks/utils.yml @@ -1,4 +1,5 @@ -- name: Remove Helm +--- +- name: Remove Helm ansible.builtin.file: path: /usr/local/bin/helm state: absent diff --git a/roles/uninstall_rkub/tests/test.yml b/roles/uninstall_rkub/tests/test.yml index 5027bb43c..10d995253 100644 --- a/roles/uninstall_rkub/tests/test.yml +++ b/roles/uninstall_rkub/tests/test.yml @@ -1,5 +1,6 @@ --- -- hosts: localhost +- name: Test + hosts: localhost remote_user: root roles: - - uninstall_rke2 + - uninstall_rkub diff --git a/roles/upload_package_zst/defaults/main.yml b/roles/upload_package_zst/defaults/main.yml index 565ab9884..6dac64c88 100644 --- a/roles/upload_package_zst/defaults/main.yml +++ b/roles/upload_package_zst/defaults/main.yml @@ -1,2 +1,4 @@ --- # defaults file for upload_package_zst +upload_package_zst_directory: "{{ global_directory_package_target }}" +upload_package_zst_name: "{{ global_package_name }}" diff --git a/roles/upload_package_zst/meta/main.yml b/roles/upload_package_zst/meta/main.yml index 08d692a71..781ca24f0 100644 --- a/roles/upload_package_zst/meta/main.yml +++ b/roles/upload_package_zst/meta/main.yml @@ -18,7 +18,7 @@ galaxy_info: # - CC-BY-4.0 license: Apache-2.0 - min_ansible_version: "2.12.0" + min_ansible_version: "2.15.0" # If this a Container Enabled role, provide the minimum Ansible Container version. # min_ansible_container_version: diff --git a/roles/upload_package_zst/tasks/main.yml b/roles/upload_package_zst/tasks/main.yml index b3eb42671..918c913f0 100644 --- a/roles/upload_package_zst/tasks/main.yml +++ b/roles/upload_package_zst/tasks/main.yml @@ -35,11 +35,11 @@ become: true when: - ansible_os_family == "RedHat" - - ansible_distribution_major_version | int >= 8 + - ansible_distribution_major_version | int >= 8 - name: Ensure target directory exist and accessible to connexion user ansible.builtin.file: - path: "{{ global_directory_package_target }}/rancher" + path: "{{ upload_package_zst_directory }}" state: directory recurse: true owner: "{{ ansible_user }}" @@ -51,12 +51,11 @@ - name: Synchronization of Monster zst package on first controler ansible.posix.synchronize: src: "{{ package_path }}" - dest: "{{ global_directory_package_target }}" + dest: "{{ upload_package_zst_directory }}" archive: false - name: Unarchive Monster zst package on first controler ansible.builtin.unarchive: - src: "{{ global_directory_package_target }}/{{ global_package_name }}" - dest: "{{ global_directory_package_target }}/rancher" + src: "{{ upload_package_zst_directory }}/{{ upload_package_zst_name }}" + dest: "{{ upload_package_zst_directory }}" remote_src: true - \ No newline at end of file diff --git a/roles/upload_package_zst/tests/test.yml b/roles/upload_package_zst/tests/test.yml index fce0e4fd3..8acfbe189 100644 --- a/roles/upload_package_zst/tests/test.yml +++ b/roles/upload_package_zst/tests/test.yml @@ -1,5 +1,6 @@ --- -- hosts: localhost +- name: Test + hosts: localhost remote_user: root roles: - upload_package_zst diff --git a/scripts/docker/Containerfile b/scripts/docker/Containerfile index d804322fd..eb5db1e64 100644 --- a/scripts/docker/Containerfile +++ b/scripts/docker/Containerfile @@ -1,5 +1,5 @@ # Use a base image v4.11.3 with the desired prerequisites installed -FROM registry.access.redhat.com/ubi8:8.8 +FROM registry.access.redhat.com/ubi8:8.9 # Switch to root user USER root diff --git a/scripts/prerequis/Makefile b/scripts/prerequis/Makefile old mode 100755 new mode 100644 index c17c495d8..1dee7af68 --- a/scripts/prerequis/Makefile +++ b/scripts/prerequis/Makefile @@ -9,7 +9,7 @@ endif .PHONY: all ## all prerequisites (keep the phony order) -all: pythons bindeps collections arkade images +all: pythons bindeps collections arkade .PHONY: images ## Load images in files directories of each roles (since too big, those are in the .gitignore) @@ -26,8 +26,8 @@ arkade: collections: @printf "\e[1;34m[INFO]\e[m ## Install Ansible Collections dependencies ##\n" @ansible-galaxy install -r ../../meta/ee-requirements.yml - @printf "\e[1;34m[INFO]\e[m ## Install $(REPO) version $(VERSION) ##\n" - @ansible-galaxy collection install git+$(REPO).git,$(VERSION) + @printf "\e[1;34m[INFO]\e[m ## Install $(REPO) version $(VERSION) ##\n" + @ansible-galaxy collection install git+$(REPO).git @printf "\e[1;32m[OK]\e[m Ansible Collections installed.\n" .PHONY: pythons @@ -92,4 +92,3 @@ show-help: printf "\n"; \ }' \ | cat - diff --git a/scripts/prerequis/arkade.sh b/scripts/prerequis/arkade.sh index b2457c344..15e84c024 100755 --- a/scripts/prerequis/arkade.sh +++ b/scripts/prerequis/arkade.sh @@ -2,6 +2,11 @@ set -eo pipefail find_home_profile(){ + if [[ -z "$HOME" ]]; then + export user=$(whoami) + export HOME=$(awk -F":" -v v="$user" '{if ($1==v) print $6}' /etc/passwd) + fi + if [[ "$SHELL" == *"/zsh" ]]; then HOME_PROFILE="$HOME/.zshrc" elif [[ "$SHELL" == *"/bash" ]]; then @@ -41,7 +46,7 @@ install_arkade(){ printf "\e[1;34m[INFO]\e[m Checking for updates...\n" if [[ "$CURRENT_VERSION" != "$LATEST_VERSION" ]]; then printf "\e[1;33m[CHANGE]\e[m New version of arkade found, current: $CURRENT_VERSION. Updating...\n" - curl -L "${DOWNLOAD_URL}"/"${LATEST_VERSION}"/arkade --output $HOME/.arkade/new_arkade > /dev/null 2>&1: + curl -L "${DOWNLOAD_URL}"/"${LATEST_VERSION}"/arkade --output $HOME/.arkade/new_arkade > /dev/null 2>&1 mv $HOME/.arkade/new_arkade $HOME/.arkade/bin/arkade chmod +x $HOME/.arkade/bin/arkade printf "\e[1;32m[OK]\e[m arkade has been updated to version $CURRENT_VERSION.\n" diff --git a/test/Azure/infra/main.tf b/test/Azure/infra/main.tf new file mode 100644 index 000000000..e69de29bb diff --git a/test/Azure/infra/output.tf b/test/Azure/infra/output.tf new file mode 100644 index 000000000..e69de29bb diff --git a/test/Azure/infra/provider.tf b/test/Azure/infra/provider.tf new file mode 100644 index 000000000..defd386d7 --- /dev/null +++ b/test/Azure/infra/provider.tf @@ -0,0 +1,27 @@ +### +### Provider part +### +terraform { + required_providers { + azurerm = { + source = "hashicorp/azurerm" + version = "~>3.0" + } + } + backend "s3" { + // Nothing here can be variabilized + resource_group_name = "tfstate" + storage_account_name = "" + container_name = "tfstate" + key = "terraform.tfstate" + } +} + +provider "azurerm" { + features {} + + subscription_id = var.azure_subscription_id + client_id = var.azure_client_id + client_secret = var.azure_client_secret + tenant_id = var.azure_tenant_id +} diff --git a/test/Azure/infra/variables.tf b/test/Azure/infra/variables.tf new file mode 100644 index 000000000..8f9059ea3 --- /dev/null +++ b/test/Azure/infra/variables.tf @@ -0,0 +1,85 @@ +variable "token" { + description = "Azure API Token" +} + +### s-2vcpu-4gb +variable "instance_size" { + type = string + description = "VM size" + default = "s-2vcpu-4gb" +} + +variable "controller_count" { + type = number + description = "number of controllers" + default = "1" +} + +variable "worker_count" { + type = number + description = "number of workers" + default = "2" +} + +variable "az_user" { + type = string + description = "user created on VM" + default = "terraform" +} + +variable "az_system" { + type = string + description = "os used for VM" + default = "rockylinux-8-x64" +} + +variable "domain" { + description = "Domain given to loadbalancer and VMs" + default = "rkub.com" +} + +variable "region" { + description = "Unique bucket name for storing terraform backend data" + default = "fra1" +} + +variable "airgap" { + description = "if airgap true, mount s3 bucket with rkub package" + default = "true" +} + +variable "GITHUB_RUN_ID" { + type = string + description = "github run id" + default = "quickstart" +} + +variable "terraform_backend_bucket_name" { + description = "Unique bucket name for storing terraform backend data" + default = "terraform-backend-rkub-quickstart" +} + +variable "mount_point" { + description = "Unique bucket name for storing terraform backend data" + default = "/opt/rkub" +} + +## +## Azure credentials +## + +variable "azure_subscription_id" { + description = "Azure Subscription ID" +} + +variable "azure_client_id" { + description = "Azure Client ID" +} + +variable "azure_client_secret" { + description = "Azure Client Secret" +} + +variable "azure_tenant_id" { + description = "Azure tenant ID" +} diff --git a/test/DO/README.md b/test/DO/README.md new file mode 100644 index 000000000..2be09b27c --- /dev/null +++ b/test/DO/README.md @@ -0,0 +1,121 @@ +## Description + +This terraform and tests are part of CI with github-actions. But here a small procedure to use it manually. + +The puropse of this CI is to test the integration between RKE2, longhorn, rancher and neuvector. + +## Prerequis + +On Digital Ocean account: + +- generate a PAT (private access token) +- a set of SSH key +- Create a Space with a an acces_key and a secret key + +Add inside ./test a file .key with the private ssh key generate by DO. + +## Create/delete a bucket to store backend + +```bash +export DO_PAT="dop_v1_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + +# info +s3cmd ls +s3cmd info s3://github-action-8147167750 + +# create +s3cmd mb s3://github-action-8147167750 + +# delete +s3cmd rb s3://github-action-8147167750 --recursive + +# with terraform +cd ./test/DO/backend +terraform init +terraform plan -out=terraform.tfplan \ +-var "GITHUB_RUN_ID=${GITHUB_RUN_ID}" \ +-var "do_token=${DO_PAT}" \ +-var "spaces_access_key_id=${AWS_ACCESS_KEY_ID}" \ +-var "spaces_access_key_secret=${AWS_SECRET_ACCESS_KEY}" +``` + +## Create an infra + +```bash +export DO_PAT="dop_v1_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + +## +# init with backend config +terraform init --backend-config=./backend_config.hcl +# ./backend_config.hcl +# bucket="name" +# access_key="" +# secret_key="" + +# init with one line command +terraform init \ +-backend-config="access_key=$SPACES_ACCESS_TOKEN" \ +-backend-config="secret_key=$SPACES_SECRET_KEY" \ +-backend-config="bucket=terraform-backend-github" + +# recommended method +export AWS_ACCESS_KEY_ID=DOxxxxxxxxxxxxxxxx +export AWS_SECRET_ACCESS_KEY=xxxxxxxxxxxxxxxxxx +terraform init -backend-config="bucket=terraform-backend-rkub-quickstart" + +# auto-approve (default: size=s-1vcpu-1gb, 1 controller + 2 workers) +terraform apply -var "GITHUB_RUN_ID=${GITHUB_RUN_ID}" -var "do_token=${DO_PAT}" -auto-approve + +# Deploy +terraform plan -destroy -out=terraform.tfplan \ +-var "GITHUB_RUN_ID=${GITHUB_RUN_ID}" \ +-var "do_token=${DO_PAT}" \ +-var "do_worker_count=0" \ +-var "do_controller_count=1" \ +-var "do_instance_size=s-1vcpu-1gb" \ +-var "spaces_access_key_id=${AWS_ACCESS_KEY_ID}" \ +-var "spaces_access_key_secret=${AWS_SECRET_ACCESS_KEY}" + +# Apply +terraform apply terraform.tfplan + +# Reconciliate +terraform plan -refresh-only -out=terraform.tfplan \ +-var "GITHUB_RUN_ID=${GITHUB_RUN_ID}" \ +-var "do_token=${DO_PAT}" \ +-var "do_worker_count=1" \ +-var "do_controller_count=3" \ +-var "do_instance_size=s-1vcpu-1gb" + +# connect to a controller +ssh root@$(terraform output -json ip_address_controllers | jq -r '.[0]') -i .key + +# connect to a worker +ssh root@$(terraform output -json ip_address_workers | jq -r '.[0]') -i .key + +# Destroy +terraform plan -destroy -out=terraform.tfplan \ +-var "GITHUB_RUN_ID=${GITHUB_RUN_ID}" \ +-var "do_token=${DO_PAT}" \ +-var "do_worker_count=1" \ +-var "do_controller_count=3" \ +-var "do_instance_size=s-1vcpu-1gb" + +# Apply destroy +terraform apply terraform.tfplan +``` + +## Use Workspace + +```bash +# Create a workspace +export GITHUB_RUN_ID="777" +terraform workspace new rkub-${GITHUB_RUN_ID} + +# Get back to a workspace +terraform workspace select rkub-${GITHUB_RUN_ID} + +# Delete Workspace +terraform workspace select default +terraform workspace delete rkub-${GITHUB_RUN_ID} +``` diff --git a/test/DO/ansible/.gitignore b/test/DO/ansible/.gitignore new file mode 100644 index 000000000..49d1ef281 --- /dev/null +++ b/test/DO/ansible/.gitignore @@ -0,0 +1 @@ +terraform.tfstate* diff --git a/test/DO/ansible/main.tf b/test/DO/ansible/main.tf new file mode 100644 index 000000000..1b33df2f3 --- /dev/null +++ b/test/DO/ansible/main.tf @@ -0,0 +1,92 @@ +terraform { + required_providers { + digitalocean = { + source = "digitalocean/digitalocean" + version = "~> 2.0" + } + } +} + +provider "digitalocean" { + token = var.do_token +} + +data "digitalocean_ssh_key" "terraform" { + name = "terraform" +} + +locals { + cloud_init_config = yamlencode({ + yum_repos = { + epel-release = { + name = "Extra Packages for Enterprise Linux 8 - Release" + baseurl = "http://download.fedoraproject.org/pub/epel/8/Everything/$basearch" + enabled = true + failovermethod = "priority" + gpgcheck = true + gpgkey = "http://download.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-8" + } + }, + packages = [ + "epel-release", + "s3fs-fuse", + "git", + "ansible", + "make" + ], + write_files = [{ + owner = "root:root" + path = "/etc/passwd-s3fs" + permissions = "0600" + content = "${var.spaces_access_key_id}:${var.spaces_access_key_secret}" + }], + runcmd = [ + "systemctl daemon-reload", + "mkdir -p ${var.mount_point}", + "s3fs ${var.terraform_backend_bucket_name} ${var.mount_point} -o url=https://${var.region}.digitaloceanspaces.com", + "echo \"s3fs#${var.terraform_backend_bucket_name} ${var.mount_point} fuse _netdev,allow_other,nonempty,use_cache=/tmp/cache,url=https://${var.region}.digitaloceanspaces.com 0 0\" >> /etc/fstab", + "systemctl daemon-reload", + "git clone ${var.repository} ~/rkub", + "cd ~/rkub && make prerequis", + "source ~/.bashrc", + "cd ~/rkub/test && ansible-playbook playbooks/build.yml -e dir_build=\"${var.mount_point}/package\" -e package_name=\"${var.mount_point}/rke2_rancher_longhorn.zst\"", + ] + }) +} + +# Convert our cloud-init config to userdata +# Userdata runs at first boot when the droplets are created +data "cloudinit_config" "server_config" { + gzip = false + base64_encode = false + part { + content_type = "text/cloud-config" + content = local.cloud_init_config + } +} + +resource "digitalocean_droplet" "ansible" { + image = "rockylinux-8-x64" + name = "ansible" + region = var.region + size = var.do_instance_size + ssh_keys = [ data.digitalocean_ssh_key.terraform.id ] + user_data = data.cloudinit_config.server_config.rendered +# connection { +# host = self.ipv4_address +# user = "root" +# type = "ssh" +# private_key = file(pathexpand(".key")) +# timeout = "2m" +# } +# provisioner "remote-exec" { +# inline = [ +# "ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook -u root -vv --private-key .key ../../playbooks/build.yml", +# ] +# } +} + +output "ip_address_ansible" { + value = digitalocean_droplet.ansible[*].ipv4_address + description = "The public IP address of your ansible server." +} diff --git a/test/DO/ansible/variables.tf b/test/DO/ansible/variables.tf new file mode 100644 index 000000000..7b01a2f1f --- /dev/null +++ b/test/DO/ansible/variables.tf @@ -0,0 +1,43 @@ +variable "do_token" { + description = "Digital Ocean API Token" +} + +variable "spaces_access_key_id" { + description = "Digital Ocean Spaces Access ID" +} + +variable "spaces_access_key_secret" { + description = "Digital Ocean Spaces Access Key" +} + +variable "repository" { + description = "Repository to be be clone inside VM" +} + +variable "GITHUB_RUN_ID" { + type = string + description = "github run id" + default = "test" +} + +variable "terraform_backend_bucket_name" { + description = "Unique bucket name for storing terraform backend data" + default = "terraform-backend-github" +} + +variable "mount_point" { + description = "Unique bucket name for storing terraform backend data" + default = "/mnt/rkub" +} + +### s-2vcpu-4gb +variable "do_instance_size" { + type = string + description = "VM size" + default = "s-1vcpu-1gb" +} + +variable "region" { + description = "Unique bucket name for storing terraform backend data" + default = "fra1" +} diff --git a/test/DO/backend/.gitignore b/test/DO/backend/.gitignore new file mode 100644 index 000000000..49d1ef281 --- /dev/null +++ b/test/DO/backend/.gitignore @@ -0,0 +1 @@ +terraform.tfstate* diff --git a/test/DO/backend/main.tf b/test/DO/backend/main.tf new file mode 100644 index 000000000..e005b7e5e --- /dev/null +++ b/test/DO/backend/main.tf @@ -0,0 +1,32 @@ +terraform { + required_providers { + digitalocean = { + source = "digitalocean/digitalocean" + version = "~> 2.0" + } + } +} + +provider "digitalocean" { + token = var.token + spaces_access_id = var.spaces_access_key_id + spaces_secret_key = var.spaces_access_key_secret +} + +resource "digitalocean_spaces_bucket" "terraform_backend" { + name = var.terraform_backend_bucket_name + region = var.region + force_destroy = true +} + +output "terraform_backend_bucket_domain_name" { + value = digitalocean_spaces_bucket.terraform_backend.bucket_domain_name +} + +output "terraform_backend_bucket_name" { + value = digitalocean_spaces_bucket.terraform_backend.name +} + +output "terraform_backend_bucket_region" { + value = digitalocean_spaces_bucket.terraform_backend.region +} diff --git a/test/DO/backend/variables.tf b/test/DO/backend/variables.tf new file mode 100644 index 000000000..73049fd1c --- /dev/null +++ b/test/DO/backend/variables.tf @@ -0,0 +1,27 @@ +variable "token" { + description = "Digital Ocean API Token" +} + +variable "spaces_access_key_id" { + description = "Digital Ocean Spaces Access ID" +} + +variable "spaces_access_key_secret" { + description = "Digital Ocean Spaces Access Key" +} + +variable "GITHUB_RUN_ID" { + type = string + description = "github run id" + default = "test" +} + +variable "terraform_backend_bucket_name" { + description = "Unique bucket name for storing terraform backend data" + default = "terraform-backend-rkub-quickstart" +} + +variable "region" { + description = "Unique bucket name for storing terraform backend data" + default = "fra1" +} diff --git a/test/DO/infra/.gitignore b/test/DO/infra/.gitignore new file mode 100644 index 000000000..49d1ef281 --- /dev/null +++ b/test/DO/infra/.gitignore @@ -0,0 +1 @@ +terraform.tfstate* diff --git a/test/DO/infra/local.tf b/test/DO/infra/local.tf new file mode 100644 index 000000000..9fefd6670 --- /dev/null +++ b/test/DO/infra/local.tf @@ -0,0 +1,73 @@ +# Local resources + +### +### Cloud-init +### + +# non-airgap +locals { + cloud_init_config = yamlencode({ + packages = [ + "ansible", + "make" + ] + }) +} + +# Convert our cloud-init config to userdata +# Userdata runs at first boot when the droplets are created +data "cloudinit_config" "server_config" { + gzip = false + base64_encode = false + part { + content_type = "text/cloud-config" + content = local.cloud_init_config + } +} + +# airgap +locals { + cloud_init_airgap_config = yamlencode({ + yum_repos = { + epel-release = { + name = "Extra Packages for Enterprise Linux 8 - Release" + baseurl = "http://download.fedoraproject.org/pub/epel/8/Everything/$basearch" + enabled = true + failovermethod = "priority" + gpgcheck = true + gpgkey = "http://download.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-8" + } + }, + packages = [ + "epel-release", + "s3fs-fuse", + "git", + "ansible", + "make" + ], + write_files = [{ + owner = "root:root" + path = "/etc/passwd-s3fs" + permissions = "0600" + content = "${var.spaces_access_key_id}:${var.spaces_access_key_secret}" + }], + runcmd = [ + "systemctl daemon-reload", + "mkdir -p ${var.mount_point}", + "s3fs ${var.terraform_backend_bucket_name} ${var.mount_point} -o url=https://${var.region}.digitaloceanspaces.com", + "echo \"s3fs#${var.terraform_backend_bucket_name} ${var.mount_point} fuse _netdev,allow_other,nonempty,use_cache=/tmp/cache,url=https://${var.region}.digitaloceanspaces.com 0 0\" >> /etc/fstab", + "systemctl daemon-reload", + ] + }) +} + +# Convert our cloud-init config to userdata +# Userdata runs at first boot when the droplets are created +data "cloudinit_config" "server_airgap_config" { + gzip = false + base64_encode = false + part { + content_type = "text/cloud-config" + content = local.cloud_init_airgap_config + } +} diff --git a/test/DO/infra/main.tf b/test/DO/infra/main.tf new file mode 100644 index 000000000..50cbabb44 --- /dev/null +++ b/test/DO/infra/main.tf @@ -0,0 +1,117 @@ +### +### SSH +### + +# Generate an SSH key pair +resource "tls_private_key" "global_key" { + algorithm = "RSA" + rsa_bits = 4096 +} + +# Save the public key to a local file +resource "local_file" "ssh_public_key" { + filename = "${path.module}/.key.pub" + content = tls_private_key.global_key.public_key_openssh +} + +# Save the private key to a local file +resource "local_sensitive_file" "ssh_private_key" { + filename = "${path.module}/.key.private" + content = tls_private_key.global_key.private_key_pem + file_permission = "0600" +} + +# Upload the public key to DigitalOcean +resource "digitalocean_ssh_key" "ssh_key" { + name = "rkub-${var.GITHUB_RUN_ID}-ssh" + public_key = tls_private_key.global_key.public_key_openssh +} + +### +### VPC +### +resource "digitalocean_vpc" "rkub-project-network" { + name = "rkub-${var.GITHUB_RUN_ID}-network" + region = var.region + + timeouts { + delete = "10m" + } +} + +# https://github.com/digitalocean/terraform-provider-digitalocean/issues/446 +resource "time_sleep" "wait_200_seconds_to_destroy" { + depends_on = [digitalocean_vpc.rkub-project-network] + destroy_duration = "200s" +} +resource "null_resource" "placeholder" { + depends_on = [time_sleep.wait_200_seconds_to_destroy] +} +# + +### +### Droplet INSTANCES +### + +# Droplet Instance for RKE2 Cluster - Manager +resource "digitalocean_droplet" "controllers" { + count = var.controller_count + image = var.do_system + name = "controller${count.index}.${var.domain}" + region = var.region + size = var.instance_size + tags = [ + "rkub-${var.GITHUB_RUN_ID}", + "controller", + "${var.do_system}_controllers", + ] + vpc_uuid = digitalocean_vpc.rkub-project-network.id + ssh_keys = [ digitalocean_ssh_key.ssh_key.fingerprint ] + # if airgap, S3 bucket is mounted on master to get the resources + user_data = var.airgap ? data.cloudinit_config.server_airgap_config.rendered : null +} + +output "ip_address_controllers" { + value = digitalocean_droplet.controllers[*].ipv4_address + description = "The public IP address of your rke2 controllers." +} + +# Droplet Instance for RKE2 Cluster - Workers +resource "digitalocean_droplet" "workers" { + count = var.worker_count + image = var.do_system + name = "worker${count.index}.${var.domain}" + region = var.region + size = var.instance_size + tags = [ + "rkub-${var.GITHUB_RUN_ID}", + "worker", + "${var.do_system}_workers", + ] + vpc_uuid = digitalocean_vpc.rkub-project-network.id + ssh_keys = [ digitalocean_ssh_key.ssh_key.fingerprint ] +} + +### +### Project +### +resource "digitalocean_project" "rkub" { + name = "rkub-${var.GITHUB_RUN_ID}" + description = "A CI project to test the Rkub development from github." + purpose = "Cluster k8s" + environment = "Staging" + resources = flatten([digitalocean_droplet.controllers.*.urn, digitalocean_droplet.workers.*.urn ]) +} + +### +### Generate the hosts.ini file +### +resource "local_file" "ansible_inventory" { + content = templatefile("../../inventory/hosts.tpl", + { + controller_ips = digitalocean_droplet.controllers[*].ipv4_address, + worker_ips = digitalocean_droplet.workers[*].ipv4_address + } + ) + filename = "../../inventory/hosts.ini" +} diff --git a/test/DO/infra/output.tf b/test/DO/infra/output.tf new file mode 100644 index 000000000..132922ee2 --- /dev/null +++ b/test/DO/infra/output.tf @@ -0,0 +1,7 @@ +### +### Display +### +output "ip_address_workers" { + value = digitalocean_droplet.workers[*].ipv4_address + description = "The public IP address of your rke2 workers." +} diff --git a/test/DO/infra/provider.tf b/test/DO/infra/provider.tf new file mode 100644 index 000000000..897a9a6b5 --- /dev/null +++ b/test/DO/infra/provider.tf @@ -0,0 +1,29 @@ +### +### Provider part +### +terraform { + required_providers { + digitalocean = { + source = "digitalocean/digitalocean" + version = "~> 2.0" + } + } + backend "s3" { + // Nothing here can be variabilized + skip_region_validation = true + skip_credentials_validation = true + skip_metadata_api_check = true + skip_requesting_account_id = true + use_path_style = true + skip_s3_checksum = true + endpoints = { + s3 = "https://fra1.digitaloceanspaces.com" + } + region = "fra1" + key = "terraform.tfstate" + } +} + +provider "digitalocean" { + token = var.token +} diff --git a/test/DO/infra/removed b/test/DO/infra/removed new file mode 100644 index 000000000..fe24d6916 --- /dev/null +++ b/test/DO/infra/removed @@ -0,0 +1,35 @@ +### +### LB / Domain / DNS +### + +resource "digitalocean_loadbalancer" "www-lb" { + name = "rkub-${var.GITHUB_RUN_ID}-lb" + region = var.region + + forwarding_rule { + entry_port = 80 + entry_protocol = "http" + + target_port = 80 + target_protocol = "http" + } + + healthcheck { + port = 22 + protocol = "tcp" + } + + droplet_ids = flatten([digitalocean_droplet.controllers.*.id]) + vpc_uuid = digitalocean_vpc.rkub-project-network.id +} +resource "digitalocean_domain" "rkub-domain" { + name = var.domain + ip_address = digitalocean_loadbalancer.www-lb.ip +} + +resource "digitalocean_record" "wildcard" { + domain = "${digitalocean_domain.rkub-domain.name}" + type = "A" + name = "*" + value = digitalocean_loadbalancer.www-lb.ip +} diff --git a/test/DO/infra/variables.tf b/test/DO/infra/variables.tf new file mode 100644 index 000000000..a2ca83b33 --- /dev/null +++ b/test/DO/infra/variables.tf @@ -0,0 +1,73 @@ +variable "token" { + description = "Digital Ocean API Token" +} + +### s-2vcpu-4gb +variable "instance_size" { + type = string + description = "VM size" + default = "s-2vcpu-4gb" +} + +variable "controller_count" { + type = number + description = "number of controllers" + default = "1" +} + +variable "worker_count" { + type = number + description = "number of workers" + default = "2" +} + +variable "do_user" { + type = string + description = "user created on droplet" + default = "terraform" +} + +variable "do_system" { + type = string + description = "os used for droplet" + default = "rockylinux-8-x64" +} + +variable "domain" { + description = "Domain given to loadbalancer and VMs" + default = "rkub.com" +} + +variable "region" { + description = "Unique bucket name for storing terraform backend data" + default = "fra1" +} + +variable "airgap" { + description = "if airgap true, mount s3 bucket with rkub package" + default = "true" +} + +variable "GITHUB_RUN_ID" { + type = string + description = "github run id" + default = "quickstart" +} + +variable "terraform_backend_bucket_name" { + description = "Unique bucket name for storing terraform backend data" + default = "terraform-backend-rkub-quickstart" +} + +variable "mount_point" { + description = "Unique bucket name for storing terraform backend data" + default = "/opt/rkub" +} + +variable "spaces_access_key_id" { + description = "Digital Ocean Spaces Access ID" +} + +variable "spaces_access_key_secret" { + description = "Digital Ocean Spaces Access Key" +} diff --git a/test/ansible.cfg b/test/ansible.cfg new file mode 100644 index 000000000..5f750f1eb --- /dev/null +++ b/test/ansible.cfg @@ -0,0 +1,10 @@ +[defaults] +remote_user = root +inventory = ./inventory/hosts.ini +roles_path = ../roles +host_key_checking = False +display_skipped_hosts = false +deprecation_warnings = false +force_color = True +stdout_callback = yaml +private_key_file = ./DO/infra/.key.private diff --git a/test/basic_agent_tests.py b/test/basic_agent_tests.py new file mode 100644 index 000000000..91263d86a --- /dev/null +++ b/test/basic_agent_tests.py @@ -0,0 +1,20 @@ +""" +basic_tests.py - Using testinfra to run tests on ansible playbook rke2-ansible +""" + +import testinfra + + +def test_rke2_config(host): + rke2_config = host.file("/etc/rancher/rke2/config.yaml") + assert rke2_config.contains("token:") + assert rke2_config.contains("server:") + assert rke2_config.user == "root" + assert rke2_config.group == "root" + assert rke2_config.mode == 0o640 + + +def test_rke2_server_running_and_enabled(host): + rke2_server = host.service("rke2-agent") + assert rke2_server.is_running + assert rke2_server.is_enabled diff --git a/test/basic_server_tests.py b/test/basic_server_tests.py new file mode 100644 index 000000000..bb4720129 --- /dev/null +++ b/test/basic_server_tests.py @@ -0,0 +1,18 @@ +""" +basic_tests.py - Using testinfra to run tests on ansible playbook rke2-ansible +""" + +import testinfra + + +def test_rke2_config(host): + rke2_config = host.file("/etc/rancher/rke2/config.yaml") + assert rke2_config.user == "root" + assert rke2_config.group == "root" + assert rke2_config.mode == 0o640 + + +def test_rke2_server_running_and_enabled(host): + rke2_server = host.service("rke2-server") + assert rke2_server.is_running + assert rke2_server.is_enabled diff --git a/test/inventory/hosts.tpl b/test/inventory/hosts.tpl new file mode 100644 index 000000000..aa2272710 --- /dev/null +++ b/test/inventory/hosts.tpl @@ -0,0 +1,18 @@ +# Generated with hosts.tpl +[all] +## ALL HOSTS +localhost ansible_connection=local + +[RKE2_CONTROLLERS] +%{ for idx, ip in controller_ips ~} +controller${idx} ansible_host=${ip} # Controller${idx} +%{ endfor ~} + +[RKE2_WORKERS] +%{ for idx, ip in worker_ips ~} +worker${idx} ansible_host=${ip} # Worker${idx} +%{ endfor ~} + +[RKE2_CLUSTER:children] +RKE2_CONTROLLERS +RKE2_WORKERS diff --git a/playbooks/tasks/build.yml b/test/playbooks/build.yml similarity index 55% rename from playbooks/tasks/build.yml rename to test/playbooks/build.yml index 407453fa2..842287cdb 100644 --- a/playbooks/tasks/build.yml +++ b/test/playbooks/build.yml @@ -3,7 +3,7 @@ hosts: localhost connection: local gather_facts: false - vars_files: ../vars/main.yml + vars_files: ../../playbooks/vars/main.yml tags: build roles: - - {role: build_airgap_package, tags: package,} \ No newline at end of file + - {role: build_airgap_package, tags: package} diff --git a/test/playbooks/hauler_build.yml b/test/playbooks/hauler_build.yml new file mode 100644 index 000000000..8c2de888f --- /dev/null +++ b/test/playbooks/hauler_build.yml @@ -0,0 +1,8 @@ +--- +- name: Build RKE2 Package with Hauler + hosts: localhost + connection: local + gather_facts: false + vars_files: ../../playbooks/vars/main.yml + roles: + - {role: build_airgap_hauler, tags: hauler} diff --git a/test/playbooks/hauler_server.yml b/test/playbooks/hauler_server.yml new file mode 100644 index 000000000..a64a02eff --- /dev/null +++ b/test/playbooks/hauler_server.yml @@ -0,0 +1,7 @@ +--- +- name: Hauler Server + hosts: "{{ hauler_ip | default('RKE2_CONTROLLERS[0]') }}" + gather_facts: false + vars_files: ../../playbooks/vars/main.yml + roles: + - {role: deploy_hauler, tags: hauler} diff --git a/test/playbooks/install.yml b/test/playbooks/install.yml new file mode 100644 index 000000000..e06d0193b --- /dev/null +++ b/test/playbooks/install.yml @@ -0,0 +1,18 @@ +--- +- name: Install RKE2 Controlers + hosts: RKE2_CONTROLLERS + gather_facts: true + become: true + vars_files: ../../playbooks/vars/main.yml + tags: controller + roles: + - {role: install_rke2_controller, tags: rke2} + +- name: Install RKE2 Workers + hosts: RKE2_WORKERS + gather_facts: true + become: true + vars_files: ../../playbooks/vars/main.yml + tags: worker + roles: + - {role: install_rke2_worker, tags: rke2} diff --git a/test/playbooks/longhorn.yml b/test/playbooks/longhorn.yml new file mode 100644 index 000000000..3ef6b0551 --- /dev/null +++ b/test/playbooks/longhorn.yml @@ -0,0 +1,8 @@ +--- +- name: Install Longhorn + hosts: RKE2_CONTROLLERS:RKE2_WORKERS + gather_facts: false + vars_files: ../../playbooks/vars/main.yml + tags: [ controller, worker ] + roles: + - {role: deploy_longhorn, tags: longhorn} diff --git a/test/playbooks/neuvector.yml b/test/playbooks/neuvector.yml new file mode 100644 index 000000000..265af6dd5 --- /dev/null +++ b/test/playbooks/neuvector.yml @@ -0,0 +1,8 @@ +--- +- name: Install Neuvector + hosts: RKE2_CONTROLLERS + gather_facts: false + vars_files: ../../playbooks/vars/main.yml + tags: [ controller ] + roles: + - {role: deploy_neuvector, tags: neuvector} diff --git a/playbooks/tasks/ping.yml b/test/playbooks/ping.yml similarity index 100% rename from playbooks/tasks/ping.yml rename to test/playbooks/ping.yml diff --git a/test/playbooks/rancher.yml b/test/playbooks/rancher.yml new file mode 100644 index 000000000..414c04d7c --- /dev/null +++ b/test/playbooks/rancher.yml @@ -0,0 +1,8 @@ +--- +- name: Install Rancher + hosts: RKE2_CONTROLLERS + gather_facts: false + vars_files: ../../playbooks/vars/main.yml + tags: [ controller ] + roles: + - {role: deploy_rancher, tags: rancher} diff --git a/playbooks/tasks/test.yml b/test/playbooks/test.yml similarity index 100% rename from playbooks/tasks/test.yml rename to test/playbooks/test.yml diff --git a/test/playbooks/uninstall.yml b/test/playbooks/uninstall.yml new file mode 100644 index 000000000..d957f0b4c --- /dev/null +++ b/test/playbooks/uninstall.yml @@ -0,0 +1,9 @@ +--- +- name: uninstall RKE2 + hosts: RKE2_CONTROLLERS:RKE2_WORKERS + gather_facts: false + become: true + vars_files: ../../playbooks/vars/main.yml + tags: controler, worker + roles: + - {role: uninstall_rkub, tags: uninstall} diff --git a/playbooks/tasks/upload.yml b/test/playbooks/upload.yml similarity index 57% rename from playbooks/tasks/upload.yml rename to test/playbooks/upload.yml index 14ecbd00c..e911f2557 100644 --- a/playbooks/tasks/upload.yml +++ b/test/playbooks/upload.yml @@ -1,8 +1,8 @@ +--- - name: Dowload Rkub package on first controler hosts: RKE2_CONTROLLERS[0] gather_facts: false - vars_files: - - ../vars/main.yml + vars_files: ../../playbooks/vars/main.yml tags: controler roles: - - {role: upload_package_zst, tags: upload,} + - {role: upload_package_zst, tags: upload}