diff --git a/.github/workflows/approve_renovate_pr.yaml b/.github/workflows/approve_renovate_pr.yaml index 45011157..d25bcafa 100644 --- a/.github/workflows/approve_renovate_pr.yaml +++ b/.github/workflows/approve_renovate_pr.yaml @@ -10,6 +10,6 @@ on: jobs: approve-pr: name: Approve Renovate pull request - uses: canonical/data-platform-workflows/.github/workflows/approve_renovate_pr.yaml@v31.0.1 + uses: canonical/data-platform-workflows/.github/workflows/approve_renovate_pr.yaml@v32.0.0 permissions: pull-requests: write # Needed to approve PR diff --git a/.github/workflows/check_pr.yaml b/.github/workflows/check_pr.yaml index 84c56d20..489b5549 100644 --- a/.github/workflows/check_pr.yaml +++ b/.github/workflows/check_pr.yaml @@ -15,4 +15,4 @@ on: jobs: check-pr: name: Check pull request - uses: canonical/data-platform-workflows/.github/workflows/check_charm_pr.yaml@v31.0.1 + uses: canonical/data-platform-workflows/.github/workflows/check_charm_pr.yaml@v32.0.0 diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 8b05f897..8b55c382 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -55,7 +55,7 @@ jobs: build: name: Build charm - uses: canonical/data-platform-workflows/.github/workflows/build_charm.yaml@v31.0.1 + uses: canonical/data-platform-workflows/.github/workflows/build_charm.yaml@v32.0.0 integration-test: name: Integration test charm diff --git a/.github/workflows/promote.yaml b/.github/workflows/promote.yaml index 9f05c68c..ffd5ac7e 100644 --- a/.github/workflows/promote.yaml +++ b/.github/workflows/promote.yaml @@ -25,7 +25,7 @@ on: jobs: promote: name: Promote charm - uses: canonical/data-platform-workflows/.github/workflows/_promote_charm.yaml@v31.0.1 + uses: canonical/data-platform-workflows/.github/workflows/_promote_charm.yaml@v32.0.0 with: track: 'dpe' from-risk: ${{ inputs.from-risk }} diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 2b16e17f..79831343 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -1,6 +1,6 @@ # Copyright 2023 Canonical Ltd. # See LICENSE file for licensing details. -name: Release to Charmhub +name: Release to Charmhub edge on: push: @@ -13,8 +13,18 @@ on: - '.github/workflows/sync_docs.yaml' jobs: + tag: + name: Create charm refresh compatibility version git tag + uses: canonical/data-platform-workflows/.github/workflows/tag_charm_edge.yaml@v32.0.0 + with: + track: 'dpe' + permissions: + contents: write # Needed to create git tag + ci-tests: name: Tests + needs: + - tag uses: ./.github/workflows/ci.yaml secrets: inherit permissions: @@ -23,12 +33,13 @@ jobs: release: name: Release charm needs: + - tag - ci-tests - uses: canonical/data-platform-workflows/.github/workflows/release_charm.yaml@v31.0.1 + uses: canonical/data-platform-workflows/.github/workflows/release_charm_edge.yaml@v32.0.0 with: - channel: dpe/edge + track: ${{ needs.tag.outputs.track }} artifact-prefix: ${{ needs.ci-tests.outputs.artifact-prefix }} secrets: charmhub-token: ${{ secrets.CHARMHUB_TOKEN }} permissions: - contents: write # Needed to create GitHub release + contents: write # Needed to create git tags diff --git a/.github/workflows/sync_docs.yaml b/.github/workflows/sync_docs.yaml index e22b7025..ef3637fd 100644 --- a/.github/workflows/sync_docs.yaml +++ b/.github/workflows/sync_docs.yaml @@ -10,7 +10,7 @@ on: jobs: sync-docs: name: Sync docs from Discourse - uses: canonical/data-platform-workflows/.github/workflows/sync_docs.yaml@v31.0.1 + uses: canonical/data-platform-workflows/.github/workflows/sync_docs.yaml@v32.0.0 with: reviewers: a-velasco permissions: diff --git a/actions.yaml b/actions.yaml index 0bedd9b2..2a70f26d 100644 --- a/actions.yaml +++ b/actions.yaml @@ -1,18 +1,61 @@ # Copyright 2023 Canonical Ltd. # See LICENSE file for licensing details. -resume-upgrade: - description: Upgrade remaining units (after you manually verified that upgraded units are healthy). - -force-upgrade: +pre-refresh-check: + description: Check if charm is ready to refresh + additionalProperties: false +force-refresh-start: description: | - Potential of *data loss* and *downtime* + Potential of data loss and downtime + + Force refresh of first unit + + Must run with at least one of the parameters `=false` + params: + check-compatibility: + type: boolean + default: true + description: | + Potential of data loss and downtime + + If `false`, force refresh if new version of Router and/or charm is not compatible with previous version + run-pre-refresh-checks: + type: boolean + default: true + description: | + Potential of data loss and downtime + + If `false`, force refresh if app is unhealthy or not ready to refresh (and unit status shows "Pre-refresh check failed") + check-workload-container: + type: boolean + default: true + description: | + Potential of data loss and downtime during and after refresh - Force upgrade of this unit. + If `false`, allow refresh to Router container version that has not been validated to work with the charm revision + additionalProperties: false +resume-refresh: + description: | + Refresh next unit(s) (after you have manually verified that refreshed units are healthy) + + If the `pause_after_unit_refresh` config is set to `all`, this action will refresh the next unit. + + If `pause_after_unit_refresh` is set to `first`, this action will refresh all remaining units. + Exception: if automatic health checks fail after a unit has refreshed, the refresh will pause. - Use to - - force incompatible upgrade and/or - - continue upgrade if 1+ upgraded units have non-active status + If `pause_after_unit_refresh` is set to `none`, this action will have no effect unless it is called with `check-health-of-refreshed-units` as `false`. + params: + check-health-of-refreshed-units: + type: boolean + default: true + description: | + Potential of data loss and downtime + + If `false`, force refresh (of next unit) if 1 or more refreshed units are unhealthy + + Warning: if first unit to refresh is unhealthy, consider running `force-refresh-start` action on that unit instead of using this parameter. + If first unit to refresh is unhealthy because compatibility checks, pre-refresh checks, or workload container checks are failing, this parameter is more destructive than the `force-refresh-start` action. + additionalProperties: false set-tls-private-key: description: diff --git a/charm_version b/charm_version deleted file mode 100644 index d00491fd..00000000 --- a/charm_version +++ /dev/null @@ -1 +0,0 @@ -1 diff --git a/charmcraft.yaml b/charmcraft.yaml index ee962e66..5bc60da8 100644 --- a/charmcraft.yaml +++ b/charmcraft.yaml @@ -83,18 +83,20 @@ parts: files: plugin: dump source: . + after: + - poetry-deps # Ensure poetry is installed build-packages: - git override-build: | - # Workaround to add unique identifier (git hash) to charm version while specification - # DA053 - Charm versioning - # (https://docs.google.com/document/d/1Jv1jhWLl8ejK3iJn7Q3VbCIM9GIhp8926bgXpdtx-Sg/edit?pli=1) - # is pending review. - python3 -c 'import pathlib; import shutil; import subprocess; git_hash=subprocess.run(["git", "describe", "--always", "--dirty"], capture_output=True, check=True, encoding="utf-8").stdout; file = pathlib.Path("charm_version"); shutil.copy(file, pathlib.Path("charm_version.backup")); version = file.read_text().strip(); file.write_text(f"{version}+{git_hash}")' + # Set `charm_version` in refresh_versions.toml from git tag + # Create venv in `..` so that git working tree is not dirty + python3 -m venv ../refresh-version-venv + source ../refresh-version-venv/bin/activate + poetry install --only build-refresh-version + write-charm-version craftctl default stage: - LICENSE - - charm_version - - workload_version + - refresh_versions.toml - templates diff --git a/config.yaml b/config.yaml index d4fc8cdc..dfe2fe89 100644 --- a/config.yaml +++ b/config.yaml @@ -2,8 +2,14 @@ # See LICENSE file for licensing details. options: - vip: description: | Virtual IP to use to front mysql router units. Used only in case of external node connection. type: string + pause_after_unit_refresh: + description: | + Wait for manual confirmation to resume refresh after these units refresh + + Allowed values: "all", "first", "none" + type: string + default: first diff --git a/metadata.yaml b/metadata.yaml index 93f26668..9af31278 100644 --- a/metadata.yaml +++ b/metadata.yaml @@ -55,11 +55,8 @@ peers: interface: tls cos: interface: cos - upgrade-version-a: - # Relation versioning scheme: - # DA056 - Upgrading in-place upgrade protocol - # https://docs.google.com/document/d/1H7qy5SAwLiCOKO9xMQJbbQP5_-jGV6Lhi-mJOk4gZ08/edit - interface: upgrade + refresh-v-three: + interface: refresh # DEPRECATED shared-db: Workaround for legacy "mysql-shared" interface using unit databags instead of app databag deprecated-shared-db-credentials: interface: _deprecated_shared_db_peers diff --git a/poetry.lock b/poetry.lock index ae186651..7a63bb50 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 2.1.1 and should not be changed by hand. +# This file is automatically @generated by Poetry 2.1.2 and should not be changed by hand. [[package]] name = "allure-pytest" @@ -54,7 +54,7 @@ version = "4.5.2" description = "High level compatibility layer for multiple asynchronous event loop implementations" optional = false python-versions = ">=3.8" -groups = ["charm-libs"] +groups = ["main", "charm-libs"] files = [ {file = "anyio-4.5.2-py3-none-any.whl", hash = "sha256:c011ee36bc1e8ba40e5a81cb9df91925c218fe9b778554e0b56a21e1b5d4716f"}, {file = "anyio-4.5.2.tar.gz", hash = "sha256:23009af4ed04ce05991845451e11ef02fc7c5ed29179ac9a420e5ad0ac7ddc5b"}, @@ -154,7 +154,7 @@ description = "Base class for creating enumerated constants that are also subcla optional = false python-versions = ">=3.8.6,<3.11" groups = ["integration"] -markers = "python_version < \"3.11\"" +markers = "python_version == \"3.10\"" files = [ {file = "backports_strenum-1.3.1-py3-none-any.whl", hash = "sha256:cdcfe36dc897e2615dc793b7d3097f54d359918fc448754a517e6f23044ccf83"}, {file = "backports_strenum-1.3.1.tar.gz", hash = "sha256:77c52407342898497714f0596e86188bb7084f89063226f4ba66863482f42414"}, @@ -291,6 +291,71 @@ markers = {charm-libs = "platform_python_implementation != \"PyPy\""} [package.dependencies] pycparser = "*" +[[package]] +name = "charm-api" +version = "0.1.1" +description = "" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "charm_api-0.1.1-py3-none-any.whl", hash = "sha256:2fb02cee06a198e025a9a25f9e2b80bdecac62e07f0e0b9dca217031328184aa"}, + {file = "charm_api-0.1.1.tar.gz", hash = "sha256:8e55e6ae4b484548a6c48eb83d68af39a77910c1aff9596b13ddc7c1e319fabc"}, +] + +[[package]] +name = "charm-json" +version = "0.1.1" +description = "" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "charm_json-0.1.1-py3-none-any.whl", hash = "sha256:a3fac62d45821d1a8c14058632e21333ec4e2cd41d0d00d6a73d00fc9a656eef"}, + {file = "charm_json-0.1.1.tar.gz", hash = "sha256:cb2eb24f6135d226ad04b0a17288ca2e027160d8af288083ef701bf4b137154e"}, +] + +[package.dependencies] +charm-api = ">=0.1.1" + +[[package]] +name = "charm-refresh" +version = "3.0.0.6" +description = "In-place rolling refreshes (upgrades) of stateful charmed applications " +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "charm_refresh-3.0.0.6-py3-none-any.whl", hash = "sha256:36e1736b08358b02833bfa55e21a05d6f9493a447457ebeef82575a599fe7f90"}, + {file = "charm_refresh-3.0.0.6.tar.gz", hash = "sha256:a97fc970f2c1393b749047b5088225acffbd5b4fea1dce493f31e45ebceb4117"}, +] + +[package.dependencies] +charm-api = ">=0.1.1" +charm-json = ">=0.1.1" +httpx = ">=0.28.1" +lightkube = ">=0.15.4" +ops = ">=2.9.0" +packaging = ">=24.1" +pyyaml = ">=6.0.2" +tomli = ">=2.0.1" + +[[package]] +name = "charm-refresh-build-version" +version = "0.2.0" +description = "Write `charm` version in refresh_versions.toml from git tag" +optional = false +python-versions = ">=3.8" +groups = ["build-refresh-version"] +files = [ + {file = "charm_refresh_build_version-0.2.0-py3-none-any.whl", hash = "sha256:5a6965772e74549dddfa91eb6c9114605eb9f437ef98e610d7fb428bbd3a934c"}, + {file = "charm_refresh_build_version-0.2.0.tar.gz", hash = "sha256:1ec97659f669f18fc1ff759b5e535ea4dba716c509116b05d8dc4d1f3e7d49a8"}, +] + +[package.dependencies] +dunamai = ">=1.23.1" +tomlkit = ">=0.13.2" + [[package]] name = "charset-normalizer" version = "3.3.2" @@ -590,14 +655,29 @@ wrapt = ">=1.10,<2" [package.extras] dev = ["PyTest", "PyTest-Cov", "bump2version (<1)", "sphinx (<2)", "tox"] +[[package]] +name = "dunamai" +version = "1.23.2" +description = "Dynamic version generation" +optional = false +python-versions = ">=3.5" +groups = ["build-refresh-version"] +files = [ + {file = "dunamai-1.23.2-py3-none-any.whl", hash = "sha256:ba2e3db8045a5bc5c4b9e2654cac3710a0980ad13e025c80fe640592a8ffab9d"}, + {file = "dunamai-1.23.2.tar.gz", hash = "sha256:df71e6de961f715579252011f94982ca864f2120c195c15122f5fd6ad436682f"}, +] + +[package.dependencies] +packaging = ">=20.9" + [[package]] name = "exceptiongroup" version = "1.2.1" description = "Backport of PEP 654 (exception groups)" optional = false python-versions = ">=3.7" -groups = ["charm-libs", "integration", "unit"] -markers = "python_version < \"3.11\"" +groups = ["main", "charm-libs", "integration", "unit"] +markers = "python_version == \"3.10\"" files = [ {file = "exceptiongroup-1.2.1-py3-none-any.whl", hash = "sha256:5258b9ed329c5bbdd31a309f53cbfb0b155341807f6ff7606a1e801a891b29ad"}, {file = "exceptiongroup-1.2.1.tar.gz", hash = "sha256:a4785e48b045528f5bfe627b6ad554ff32def154f42372786903b7abcfe1aa16"}, @@ -684,7 +764,7 @@ version = "0.16.0" description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1" optional = false python-versions = ">=3.8" -groups = ["charm-libs"] +groups = ["main", "charm-libs"] files = [ {file = "h11-0.16.0-py3-none-any.whl", hash = "sha256:63cf8bbe7522de3bf65932fda1d9c2772064ffb3dae62d55932da54b31cb6c86"}, {file = "h11-0.16.0.tar.gz", hash = "sha256:4e35b956cf45792e4caa5885e69fba00bdbc6ffafbfa020300e549b208ee5ff1"}, @@ -696,7 +776,7 @@ version = "1.0.9" description = "A minimal low-level HTTP client." optional = false python-versions = ">=3.8" -groups = ["charm-libs"] +groups = ["main", "charm-libs"] files = [ {file = "httpcore-1.0.9-py3-none-any.whl", hash = "sha256:2d400746a40668fc9dec9810239072b40b4484b640a8c38fd654a024c7a1bf55"}, {file = "httpcore-1.0.9.tar.gz", hash = "sha256:6e34463af53fd2ab5d807f399a9b45ea31c3dfa2276f15a2c3f00afff6e176e8"}, @@ -718,7 +798,7 @@ version = "0.28.1" description = "The next generation HTTP client." optional = false python-versions = ">=3.8" -groups = ["charm-libs"] +groups = ["main", "charm-libs"] files = [ {file = "httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad"}, {file = "httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc"}, @@ -787,26 +867,6 @@ docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.link perf = ["ipython"] testing = ["flufl.flake8", "importlib-resources (>=1.3) ; python_version < \"3.9\"", "packaging", "pyfakefs", "pytest (>=6)", "pytest-black (>=0.3.7) ; platform_python_implementation != \"PyPy\"", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy (>=0.9.1) ; platform_python_implementation != \"PyPy\"", "pytest-perf (>=0.9.2)", "pytest-ruff"] -[[package]] -name = "importlib-resources" -version = "6.4.0" -description = "Read resources from Python packages" -optional = false -python-versions = ">=3.8" -groups = ["charm-libs"] -markers = "python_version < \"3.9\"" -files = [ - {file = "importlib_resources-6.4.0-py3-none-any.whl", hash = "sha256:50d10f043df931902d4194ea07ec57960f66a80449ff867bfe782b4c486ba78c"}, - {file = "importlib_resources-6.4.0.tar.gz", hash = "sha256:cdb2b453b8046ca4e3798eb1d84f3cce1446a0e8e7b5ef4efb600f19fc398145"}, -] - -[package.dependencies] -zipp = {version = ">=3.1.0", markers = "python_version < \"3.10\""} - -[package.extras] -docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (<7.2.5)", "sphinx (>=3.5)", "sphinx-lint"] -testing = ["jaraco.test (>=5.4)", "pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy ; platform_python_implementation != \"PyPy\"", "pytest-ruff (>=0.2.1)", "zipp (>=3.17)"] - [[package]] name = "iniconfig" version = "2.0.0" @@ -861,7 +921,6 @@ prompt-toolkit = ">=3.0.30,<3.0.37 || >3.0.37,<3.1.0" pygments = ">=2.4.0" stack-data = "*" traitlets = ">=5" -typing-extensions = {version = "*", markers = "python_version < \"3.10\""} [package.extras] all = ["black", "curio", "docrepr", "ipykernel", "ipyparallel", "ipywidgets", "matplotlib", "matplotlib (!=3.2.0)", "nbconvert", "nbformat", "notebook", "numpy (>=1.21)", "pandas", "pytest (<7)", "pytest (<7.1)", "pytest-asyncio", "qtconsole", "setuptools (>=18.5)", "sphinx (>=1.3)", "sphinx-rtd-theme", "stack-data", "testpath", "trio", "typing-extensions"] @@ -928,9 +987,7 @@ files = [ [package.dependencies] attrs = ">=22.2.0" -importlib-resources = {version = ">=1.4.0", markers = "python_version < \"3.9\""} jsonschema-specifications = ">=2023.03.6" -pkgutil-resolve-name = {version = ">=1.3.10", markers = "python_version < \"3.9\""} referencing = ">=0.28.4" rpds-py = ">=0.7.1" @@ -951,7 +1008,6 @@ files = [ ] [package.dependencies] -importlib-resources = {version = ">=1.4.0", markers = "python_version < \"3.9\""} referencing = ">=0.31.0" [[package]] @@ -1018,7 +1074,7 @@ version = "0.17.1" description = "Lightweight kubernetes client library" optional = false python-versions = "*" -groups = ["charm-libs"] +groups = ["main", "charm-libs"] files = [ {file = "lightkube-0.17.1-py3-none-any.whl", hash = "sha256:3d046c2c46191b3745471763710ef4ed2df4259a7405f798b577df2ae390358a"}, {file = "lightkube-0.17.1.tar.gz", hash = "sha256:e0d6b71476a4fa7cbda7080da1f0943e43c7e747212db9f2ec7d87415bf8d23e"}, @@ -1038,7 +1094,7 @@ version = "1.32.0.8" description = "Models and Resources for lightkube module" optional = false python-versions = "*" -groups = ["charm-libs"] +groups = ["main", "charm-libs"] files = [ {file = "lightkube-models-1.32.0.8.tar.gz", hash = "sha256:97f6c2ab554a23a69554dd56ffbd94173fb416af6490c3a21b1e0b8e13a2bafe"}, {file = "lightkube_models-1.32.0.8-py3-none-any.whl", hash = "sha256:73786dac63085521f4c88aa69d86bfdc76a67da997c1770e5bdcef8482e4b2a0"}, @@ -1360,7 +1416,7 @@ version = "24.1" description = "Core utilities for Python packages" optional = false python-versions = ">=3.8" -groups = ["integration", "unit"] +groups = ["main", "build-refresh-version", "integration", "unit"] files = [ {file = "packaging-24.1-py3-none-any.whl", hash = "sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124"}, {file = "packaging-24.1.tar.gz", hash = "sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002"}, @@ -1432,19 +1488,6 @@ files = [ {file = "pickleshare-0.7.5.tar.gz", hash = "sha256:87683d47965c1da65cdacaf31c8441d12b8044cdec9aca500cd78fc2c683afca"}, ] -[[package]] -name = "pkgutil-resolve-name" -version = "1.3.10" -description = "Resolve a name to an object." -optional = false -python-versions = ">=3.6" -groups = ["charm-libs"] -markers = "python_version < \"3.9\"" -files = [ - {file = "pkgutil_resolve_name-1.3.10-py3-none-any.whl", hash = "sha256:ca27cc078d25c5ad71a9de0a7a330146c4e014c2462d9af19c6b828280649c5e"}, - {file = "pkgutil_resolve_name-1.3.10.tar.gz", hash = "sha256:357d6c9e6a755653cfd78893817c0853af365dd51ec97f3d358a819373bbd174"}, -] - [[package]] name = "pluggy" version = "1.5.0" @@ -1852,63 +1895,65 @@ files = [ [[package]] name = "pyyaml" -version = "6.0.1" +version = "6.0.2" description = "YAML parser and emitter for Python" optional = false -python-versions = ">=3.6" +python-versions = ">=3.8" groups = ["main", "charm-libs", "integration", "unit"] files = [ - {file = "PyYAML-6.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d858aa552c999bc8a8d57426ed01e40bef403cd8ccdd0fc5f6f04a00414cac2a"}, - {file = "PyYAML-6.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f"}, - {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"}, - {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"}, - {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"}, - {file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"}, - {file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"}, - {file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"}, - {file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"}, - {file = "PyYAML-6.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f003ed9ad21d6a4713f0a9b5a7a0a79e08dd0f221aff4525a2be4c346ee60aab"}, - {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"}, - {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"}, - {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"}, - {file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"}, - {file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"}, - {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, - {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"}, - {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"}, - {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef"}, - {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"}, - {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"}, - {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"}, - {file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"}, - {file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"}, - {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"}, - {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"}, - {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:afd7e57eddb1a54f0f1a974bc4391af8bcce0b444685d936840f125cf046d5bd"}, - {file = "PyYAML-6.0.1-cp36-cp36m-win32.whl", hash = "sha256:fca0e3a251908a499833aa292323f32437106001d436eca0e6e7833256674585"}, - {file = "PyYAML-6.0.1-cp36-cp36m-win_amd64.whl", hash = "sha256:f22ac1c3cac4dbc50079e965eba2c1058622631e526bd9afd45fedd49ba781fa"}, - {file = "PyYAML-6.0.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b1275ad35a5d18c62a7220633c913e1b42d44b46ee12554e5fd39c70a243d6a3"}, - {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:18aeb1bf9a78867dc38b259769503436b7c72f7a1f1f4c93ff9a17de54319b27"}, - {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:596106435fa6ad000c2991a98fa58eeb8656ef2325d7e158344fb33864ed87e3"}, - {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:baa90d3f661d43131ca170712d903e6295d1f7a0f595074f151c0aed377c9b9c"}, - {file = "PyYAML-6.0.1-cp37-cp37m-win32.whl", hash = "sha256:9046c58c4395dff28dd494285c82ba00b546adfc7ef001486fbf0324bc174fba"}, - {file = "PyYAML-6.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:4fb147e7a67ef577a588a0e2c17b6db51dda102c71de36f8549b6816a96e1867"}, - {file = "PyYAML-6.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1d4c7e777c441b20e32f52bd377e0c409713e8bb1386e1099c2415f26e479595"}, - {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"}, - {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"}, - {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"}, - {file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"}, - {file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"}, - {file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"}, - {file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"}, - {file = "PyYAML-6.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c8098ddcc2a85b61647b2590f825f3db38891662cfc2fc776415143f599bb859"}, - {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"}, - {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"}, - {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"}, - {file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"}, - {file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"}, - {file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"}, - {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, + {file = "PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086"}, + {file = "PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8824b5a04a04a047e72eea5cec3bc266db09e35de6bdfe34c9436ac5ee27d237"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c36280e6fb8385e520936c3cb3b8042851904eba0e58d277dca80a5cfed590b"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec031d5d2feb36d1d1a24380e4db6d43695f3748343d99434e6f5f9156aaa2ed"}, + {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:936d68689298c36b53b29f23c6dbb74de12b4ac12ca6cfe0e047bedceea56180"}, + {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:23502f431948090f597378482b4812b0caae32c22213aecf3b55325e049a6c68"}, + {file = "PyYAML-6.0.2-cp310-cp310-win32.whl", hash = "sha256:2e99c6826ffa974fe6e27cdb5ed0021786b03fc98e5ee3c5bfe1fd5015f42b99"}, + {file = "PyYAML-6.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:a4d3091415f010369ae4ed1fc6b79def9416358877534caf6a0fdd2146c87a3e"}, + {file = "PyYAML-6.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774"}, + {file = "PyYAML-6.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85"}, + {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4"}, + {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e"}, + {file = "PyYAML-6.0.2-cp311-cp311-win32.whl", hash = "sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5"}, + {file = "PyYAML-6.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44"}, + {file = "PyYAML-6.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab"}, + {file = "PyYAML-6.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476"}, + {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48"}, + {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b"}, + {file = "PyYAML-6.0.2-cp312-cp312-win32.whl", hash = "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4"}, + {file = "PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8"}, + {file = "PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba"}, + {file = "PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5"}, + {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc"}, + {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652"}, + {file = "PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183"}, + {file = "PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563"}, + {file = "PyYAML-6.0.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:24471b829b3bf607e04e88d79542a9d48bb037c2267d7927a874e6c205ca7e9a"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7fded462629cfa4b685c5416b949ebad6cec74af5e2d42905d41e257e0869f5"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d84a1718ee396f54f3a086ea0a66d8e552b2ab2017ef8b420e92edbc841c352d"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9056c1ecd25795207ad294bcf39f2db3d845767be0ea6e6a34d856f006006083"}, + {file = "PyYAML-6.0.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:82d09873e40955485746739bcb8b4586983670466c23382c19cffecbf1fd8706"}, + {file = "PyYAML-6.0.2-cp38-cp38-win32.whl", hash = "sha256:43fa96a3ca0d6b1812e01ced1044a003533c47f6ee8aca31724f78e93ccc089a"}, + {file = "PyYAML-6.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:01179a4a8559ab5de078078f37e5c1a30d76bb88519906844fd7bdea1b7729ff"}, + {file = "PyYAML-6.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:688ba32a1cffef67fd2e9398a2efebaea461578b0923624778664cc1c914db5d"}, + {file = "PyYAML-6.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8786accb172bd8afb8be14490a16625cbc387036876ab6ba70912730faf8e1f"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8e03406cac8513435335dbab54c0d385e4a49e4945d2909a581c83647ca0290"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f753120cb8181e736c57ef7636e83f31b9c0d1722c516f7e86cf15b7aa57ff12"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b1fdb9dc17f5a7677423d508ab4f243a726dea51fa5e70992e59a7411c89d19"}, + {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0b69e4ce7a131fe56b7e4d770c67429700908fc0752af059838b1cfb41960e4e"}, + {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a9f8c2e67970f13b16084e04f134610fd1d374bf477b17ec1599185cf611d725"}, + {file = "PyYAML-6.0.2-cp39-cp39-win32.whl", hash = "sha256:6395c297d42274772abc367baaa79683958044e5d3835486c16da75d2a694631"}, + {file = "PyYAML-6.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:39693e1f8320ae4f43943590b49779ffb98acb81f788220ea932a6b6c51004d8"}, + {file = "pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e"}, ] [[package]] @@ -2137,7 +2182,7 @@ version = "1.3.1" description = "Sniff out which async library your code is running under" optional = false python-versions = ">=3.7" -groups = ["charm-libs"] +groups = ["main", "charm-libs"] files = [ {file = "sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2"}, {file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"}, @@ -2181,16 +2226,69 @@ test = ["pytest", "tornado (>=4.5)", "typeguard"] [[package]] name = "tomli" -version = "2.0.1" +version = "2.2.1" description = "A lil' TOML parser" optional = false -python-versions = ">=3.7" -groups = ["integration", "unit"] +python-versions = ">=3.8" +groups = ["main", "integration", "unit"] +files = [ + {file = "tomli-2.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678e4fa69e4575eb77d103de3df8a895e1591b48e740211bd1067378c69e8249"}, + {file = "tomli-2.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:023aa114dd824ade0100497eb2318602af309e5a55595f76b626d6d9f3b7b0a6"}, + {file = "tomli-2.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ece47d672db52ac607a3d9599a9d48dcb2f2f735c6c2d1f34130085bb12b112a"}, + {file = "tomli-2.2.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6972ca9c9cc9f0acaa56a8ca1ff51e7af152a9f87fb64623e31d5c83700080ee"}, + {file = "tomli-2.2.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c954d2250168d28797dd4e3ac5cf812a406cd5a92674ee4c8f123c889786aa8e"}, + {file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8dd28b3e155b80f4d54beb40a441d366adcfe740969820caf156c019fb5c7ec4"}, + {file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:e59e304978767a54663af13c07b3d1af22ddee3bb2fb0618ca1593e4f593a106"}, + {file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:33580bccab0338d00994d7f16f4c4ec25b776af3ffaac1ed74e0b3fc95e885a8"}, + {file = "tomli-2.2.1-cp311-cp311-win32.whl", hash = "sha256:465af0e0875402f1d226519c9904f37254b3045fc5084697cefb9bdde1ff99ff"}, + {file = "tomli-2.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:2d0f2fdd22b02c6d81637a3c95f8cd77f995846af7414c5c4b8d0545afa1bc4b"}, + {file = "tomli-2.2.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:4a8f6e44de52d5e6c657c9fe83b562f5f4256d8ebbfe4ff922c495620a7f6cea"}, + {file = "tomli-2.2.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8d57ca8095a641b8237d5b079147646153d22552f1c637fd3ba7f4b0b29167a8"}, + {file = "tomli-2.2.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e340144ad7ae1533cb897d406382b4b6fede8890a03738ff1683af800d54192"}, + {file = "tomli-2.2.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db2b95f9de79181805df90bedc5a5ab4c165e6ec3fe99f970d0e302f384ad222"}, + {file = "tomli-2.2.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:40741994320b232529c802f8bc86da4e1aa9f413db394617b9a256ae0f9a7f77"}, + {file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:400e720fe168c0f8521520190686ef8ef033fb19fc493da09779e592861b78c6"}, + {file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:02abe224de6ae62c19f090f68da4e27b10af2b93213d36cf44e6e1c5abd19fdd"}, + {file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b82ebccc8c8a36f2094e969560a1b836758481f3dc360ce9a3277c65f374285e"}, + {file = "tomli-2.2.1-cp312-cp312-win32.whl", hash = "sha256:889f80ef92701b9dbb224e49ec87c645ce5df3fa2cc548664eb8a25e03127a98"}, + {file = "tomli-2.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:7fc04e92e1d624a4a63c76474610238576942d6b8950a2d7f908a340494e67e4"}, + {file = "tomli-2.2.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f4039b9cbc3048b2416cc57ab3bda989a6fcf9b36cf8937f01a6e731b64f80d7"}, + {file = "tomli-2.2.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:286f0ca2ffeeb5b9bd4fcc8d6c330534323ec51b2f52da063b11c502da16f30c"}, + {file = "tomli-2.2.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a92ef1a44547e894e2a17d24e7557a5e85a9e1d0048b0b5e7541f76c5032cb13"}, + {file = "tomli-2.2.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9316dc65bed1684c9a98ee68759ceaed29d229e985297003e494aa825ebb0281"}, + {file = "tomli-2.2.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e85e99945e688e32d5a35c1ff38ed0b3f41f43fad8df0bdf79f72b2ba7bc5272"}, + {file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ac065718db92ca818f8d6141b5f66369833d4a80a9d74435a268c52bdfa73140"}, + {file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:d920f33822747519673ee656a4b6ac33e382eca9d331c87770faa3eef562aeb2"}, + {file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a198f10c4d1b1375d7687bc25294306e551bf1abfa4eace6650070a5c1ae2744"}, + {file = "tomli-2.2.1-cp313-cp313-win32.whl", hash = "sha256:d3f5614314d758649ab2ab3a62d4f2004c825922f9e370b29416484086b264ec"}, + {file = "tomli-2.2.1-cp313-cp313-win_amd64.whl", hash = "sha256:a38aa0308e754b0e3c67e344754dff64999ff9b513e691d0e786265c93583c69"}, + {file = "tomli-2.2.1-py3-none-any.whl", hash = "sha256:cb55c73c5f4408779d0cf3eef9f762b9c9f147a77de7b258bef0a5628adc85cc"}, + {file = "tomli-2.2.1.tar.gz", hash = "sha256:cd45e1dc79c835ce60f7404ec8119f2eb06d38b1deba146f07ced3bbc44505ff"}, +] + +[[package]] +name = "tomli-w" +version = "1.2.0" +description = "A lil' TOML writer" +optional = false +python-versions = ">=3.9" +groups = ["integration"] +files = [ + {file = "tomli_w-1.2.0-py3-none-any.whl", hash = "sha256:188306098d013b691fcadc011abd66727d3c414c571bb01b1a174ba8c983cf90"}, + {file = "tomli_w-1.2.0.tar.gz", hash = "sha256:2dd14fac5a47c27be9cd4c976af5a12d87fb1f0b4512f81d69cce3b35ae25021"}, +] + +[[package]] +name = "tomlkit" +version = "0.13.2" +description = "Style preserving TOML library" +optional = false +python-versions = ">=3.8" +groups = ["build-refresh-version"] files = [ - {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"}, - {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, + {file = "tomlkit-0.13.2-py3-none-any.whl", hash = "sha256:7a974427f6e119197f670fbbbeae7bef749a6c14e793db934baefc1b5f03efde"}, + {file = "tomlkit-0.13.2.tar.gz", hash = "sha256:fff5fe59a87295b278abd31bec92c15d9bc4a06885ab12bcea52c71119392e79"}, ] -markers = {integration = "python_version < \"3.11\"", unit = "python_full_version <= \"3.11.0a6\""} [[package]] name = "toposort" @@ -2226,11 +2324,12 @@ version = "4.12.2" description = "Backported and Experimental Type Hints for Python 3.8+" optional = false python-versions = ">=3.8" -groups = ["charm-libs", "integration"] +groups = ["main", "charm-libs", "integration"] files = [ {file = "typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d"}, {file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"}, ] +markers = {main = "python_version == \"3.10\""} [[package]] name = "typing-inspect" @@ -2489,5 +2588,5 @@ test = ["big-O", "importlib-resources ; python_version < \"3.9\"", "jaraco.funct [metadata] lock-version = "2.1" -python-versions = "^3.8.6" -content-hash = "b1b2343a3fd7fde050c0b57530703da1686dbeb169c934f7b3fd7697a1d0c531" +python-versions = "^3.10" +content-hash = "6d27463ae27a9d37d615e3ed41f02bce55b023bdf6882c8a36d45f1d77da2918" diff --git a/pyproject.toml b/pyproject.toml index a35a5c93..3a57f623 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -6,13 +6,14 @@ package-mode = false requires-poetry = ">=2.0.0" [tool.poetry.dependencies] -python = "^3.8.6" # ^3.8.6 required by juju +python = "^3.10" # breaking change in ops 2.10.0: https://github.com/canonical/operator/pull/1091#issuecomment-1888644075 ops = "^2.9.0, <2.10" tenacity = "^9.0.0" poetry-core = "^1.9.1" jinja2 = "^3.1.4" requests = "^2.32.3" +charm-refresh = "^3.0.0.6" [tool.poetry.group.charm-libs.dependencies] # data_platform_libs/v0/data_interfaces.py @@ -54,6 +55,14 @@ mysql-connector-python = "~8.0.33" tenacity = "^9.0.0" ops = "^2.9.0" allure-pytest-default-results = "^0.1.2" +tomli = "^2.2.1" +tomli-w = "^1.2.0" + +[tool.poetry.group.build-refresh-version] +optional = true + +[tool.poetry.group.build-refresh-version.dependencies] +charm-refresh-build-version = "^0.2.0" [tool.coverage.run] branch = true diff --git a/refresh_versions.toml b/refresh_versions.toml new file mode 100644 index 00000000..c00c8fce --- /dev/null +++ b/refresh_versions.toml @@ -0,0 +1,13 @@ +# https://canonical-charm-refresh.readthedocs-hosted.com/latest/refresh-versions-toml/ + +charm_major = 1 +workload = "8.0.41" + +[snap] +name = "charmed-mysql" + +[snap.revisions] +# amd64 +x86_64 = "139" +# arm64 +aarch64 = "138" diff --git a/src/abstract_charm.py b/src/abstract_charm.py index d7d40011..16469a4e 100644 --- a/src/abstract_charm.py +++ b/src/abstract_charm.py @@ -4,26 +4,69 @@ """MySQL Router charm""" import abc +import dataclasses import logging import typing +import charm_refresh import ops import container import lifecycle import logrotate -import machine_upgrade import relations.cos import relations.database_provides import relations.database_requires import relations.tls import server_exceptions -import upgrade import workload logger = logging.getLogger(__name__) +@dataclasses.dataclass(eq=False) +class RouterRefresh(charm_refresh.CharmSpecificCommon, abc.ABC): + """MySQL Router refresh callbacks & configuration""" + + @staticmethod + def run_pre_refresh_checks_after_1_unit_refreshed() -> None: + pass + + @classmethod + def is_compatible( + cls, + *, + old_charm_version: charm_refresh.CharmVersion, + new_charm_version: charm_refresh.CharmVersion, + old_workload_version: str, + new_workload_version: str, + ) -> bool: + # Check charm version compatibility + if not super().is_compatible( + old_charm_version=old_charm_version, + new_charm_version=new_charm_version, + old_workload_version=old_workload_version, + new_workload_version=new_workload_version, + ): + return False + + # Check workload version compatibility + old_major, old_minor, old_patch = ( + int(component) for component in old_workload_version.split(".") + ) + new_major, new_minor, new_patch = ( + int(component) for component in new_workload_version.split(".") + ) + if old_major != new_major: + return False + if new_minor > old_minor: + return True + elif new_minor == old_minor: + return new_patch >= old_patch + else: + return False + + class MySQLRouterCharm(ops.CharmBase, abc.ABC): """MySQL Router charm""" @@ -32,6 +75,14 @@ class MySQLRouterCharm(ops.CharmBase, abc.ABC): _READ_WRITE_X_PORT = 6448 _READ_ONLY_X_PORT = 6449 + refresh: charm_refresh.Common + # Whether `reconcile` method is allowed to run + # `False` if `charm_refresh.UnitTearingDown` or `charm_refresh.PeerRelationNotReady` raised + # Most of the charm code should not run if either of those exceptions is raised + # However, some charm libs (i.e. data-platform-libs) will break if they do not receive every + # event they expect (e.g. relation-created) + _reconcile_allowed: bool + def __init__(self, *args) -> None: super().__init__(*args) # Instantiate before registering other event observers @@ -40,33 +91,19 @@ def __init__(self, *args) -> None: ) self._workload_type = workload.Workload - self._authenticated_workload_type = workload.AuthenticatedWorkload + self._running_workload_type = workload.RunningWorkload self._database_requires = relations.database_requires.RelationEndpoint(self) self._database_provides = relations.database_provides.RelationEndpoint(self) self._cos_relation = relations.cos.COSRelation(self, self._container) self._ha_cluster = None - self.framework.observe(self.on.update_status, self.reconcile) - self.framework.observe( - self.on[upgrade.PEER_RELATION_ENDPOINT_NAME].relation_changed, self.reconcile - ) - self.framework.observe( - self.on[upgrade.RESUME_ACTION_NAME].action, self._on_resume_upgrade_action - ) - # (For Kubernetes) Reset partition after scale down - self.framework.observe( - self.on[upgrade.PEER_RELATION_ENDPOINT_NAME].relation_departed, self.reconcile - ) - # Handle upgrade & set status on first start if no relations active - self.framework.observe(self.on.start, self.reconcile) - # Update app status - self.framework.observe(self.on.leader_elected, self.reconcile) - # Set versions in upgrade peer relation app databag - self.framework.observe( - self.on[upgrade.PEER_RELATION_ENDPOINT_NAME].relation_created, - self._upgrade_relation_created, - ) self.tls = relations.tls.RelationEndpoint(self) + # Observe all events (except custom events) + for bound_event in self.on.events().values(): + if bound_event.event_type == ops.CollectStatusEvent: + continue + self.framework.observe(bound_event, self.reconcile) + @property @abc.abstractmethod def _subordinate_relation_endpoint_names(self) -> typing.Optional[typing.Iterable[str]]: @@ -80,11 +117,6 @@ def _subordinate_relation_endpoint_names(self) -> typing.Optional[typing.Iterabl def _container(self) -> container.Container: """Workload container (snap or rock)""" - @property - @abc.abstractmethod - def _upgrade(self) -> typing.Optional[upgrade.Upgrade]: - pass - @property @abc.abstractmethod def _logrotate(self) -> logrotate.LogRotate: @@ -162,10 +194,17 @@ def _cos_exporter_config(self, event) -> typing.Optional[relations.cos.ExporterC if cos_relation_exists: return self._cos_relation.exporter_user_config - def get_workload(self, *, event): - """MySQL Router workload""" - if connection_info := self._database_requires.get_connection_info(event=event): - return self._authenticated_workload_type( + def get_workload(self, *, event, refresh: charm_refresh.Common = None): + """MySQL Router workload + + Pass `refresh` if `self.refresh` is not set + """ + if refresh is None: + refresh = self.refresh + if refresh.workload_allowed_to_start and ( + connection_info := self._database_requires.get_connection_info(event=event) + ): + return self._running_workload_type( container_=self._container, logrotate_=self._logrotate, connection_info=connection_info, @@ -198,11 +237,8 @@ def _prioritize_statuses(statuses: typing.List[ops.StatusBase]) -> ops.StatusBas def _determine_app_status(self, *, event) -> ops.StatusBase: """Report app status.""" - if self._upgrade and (upgrade_status := self._upgrade.app_status): - # Upgrade status should take priority over relation status—even if the status level is - # normally lower priority. - # (Relations should not be modified during upgrade.) - return upgrade_status + if self.refresh.app_status_higher_priority: + return self.refresh.app_status_higher_priority statuses = [] if self._status: statuses.append(self._status) @@ -213,14 +249,21 @@ def _determine_app_status(self, *, event) -> ops.StatusBase: def _determine_unit_status(self, *, event) -> ops.StatusBase: """Report unit status.""" + if self.refresh.unit_status_higher_priority: + return self.refresh.unit_status_higher_priority statuses = [] - workload_status = self.get_workload(event=event).status - if self._upgrade: - statuses.append(self._upgrade.get_unit_juju_status(workload_status=workload_status)) + workload_ = self.get_workload(event=event) + if status := workload_.status: + statuses.append(status) # only in machine charms if self._ha_cluster: - statuses.append(self._ha_cluster.get_unit_juju_status()) - statuses.append(workload_status) + if status := self._ha_cluster.get_unit_juju_status(): + statuses.append(status) + refresh_lower_priority = self.refresh.unit_status_lower_priority( + workload_is_running=isinstance(workload_, workload.RunningWorkload) + ) + if (not statuses or statuses == [ops.WaitingStatus()]) and refresh_lower_priority: + return refresh_lower_priority return self._prioritize_statuses(statuses) def set_status(self, *, event, app=True, unit=True) -> None: @@ -261,67 +304,30 @@ def _update_endpoints(self) -> None: # Handlers # ======================= - def _upgrade_relation_created(self, _) -> None: - if self._unit_lifecycle.authorized_leader: - # `self._upgrade.is_compatible` should return `True` during first charm - # installation/setup - self._upgrade.set_versions_in_app_databag() - def reconcile(self, event=None) -> None: # noqa: C901 """Handle most events.""" - if not self._upgrade: - logger.debug("Peer relation not available") - return - if not self._upgrade.versions_set: - logger.debug("Peer relation not ready") + if not self._reconcile_allowed: + logger.debug("Reconcile not allowed") return workload_ = self.get_workload(event=event) - if self._unit_lifecycle.authorized_leader and not self._upgrade.in_progress: - # Run before checking `self._upgrade.is_compatible` in case incompatible upgrade was - # forced & completed on all units. - # Side effect: on machines, if charm was upgraded to a charm with the same snap - # revision, compatibility checks will be skipped. - # (The only real use case for this would be upgrading the charm code to an incompatible - # version without upgrading the snap. In that situation, the upgrade may appear - # successful and the user will not be notified of the charm incompatibility. This case - # is much less likely than the forced incompatible upgrade & the impact is not as bad - # as the impact if we did not handle the forced incompatible upgrade case.) - self._upgrade.set_versions_in_app_databag() - if self._upgrade.unit_state is upgrade.UnitState.RESTARTING: # Kubernetes only - if not self._upgrade.is_compatible: - logger.info( - "Upgrade incompatible. If you accept potential *data loss* and *downtime*, you can continue with `resume-upgrade force=true`" - ) - self.unit.status = ops.BlockedStatus( - "Upgrade incompatible. Rollback to previous revision with `juju refresh`" - ) - self.set_status(event=event, unit=False) - return - elif isinstance(self._upgrade, machine_upgrade.Upgrade): # Machines only - if not self._upgrade.is_compatible: - self.set_status(event=event) - return - if self._upgrade.unit_state is upgrade.UnitState.OUTDATED: - if self._upgrade.authorized: - self._upgrade.upgrade_unit( - event=event, - workload_=workload_, - tls=self._tls_certificate_saved, - exporter_config=self._cos_exporter_config(event), - ) - else: - self.set_status(event=event) - logger.debug("Waiting to upgrade") - return logger.debug( "State of reconcile " f"{self._unit_lifecycle.authorized_leader=}, " - f"{isinstance(workload_, workload.AuthenticatedWorkload)=}, " + f"{isinstance(workload_, workload.RunningWorkload)=}, " f"{workload_.container_ready=}, " + f"{self.refresh.workload_allowed_to_start=}, " f"{self._database_requires.is_relation_breaking(event)=}, " - f"{self._upgrade.in_progress=}, " + f"{self._database_requires.does_relation_exist()=}, " + f"{self.refresh.in_progress=}, " f"{self._cos_relation.is_relation_breaking(event)=}" ) + if isinstance(self.refresh, charm_refresh.Machines): + workload_.install( + unit=self.unit, + model_uuid=self.model.uuid, + snap_revision=self.refresh.pinned_snap_revision, + refresh=self.refresh, + ) # only in machine charms if self._ha_cluster: @@ -330,14 +336,14 @@ def reconcile(self, event=None) -> None: # noqa: C901 try: if self._unit_lifecycle.authorized_leader: if self._database_requires.is_relation_breaking(event): - if self._upgrade.in_progress: + if self.refresh.in_progress: logger.warning( - "Modifying relations during an upgrade is not supported. The charm may be in a broken, unrecoverable state. Re-deploy the charm" + "Modifying relations during a refresh is not supported. The charm may be in a broken, unrecoverable state. Re-deploy the charm" ) self._database_provides.delete_all_databags() elif ( - not self._upgrade.in_progress - and isinstance(workload_, workload.AuthenticatedWorkload) + not self.refresh.in_progress + and isinstance(workload_, workload.RunningWorkload) and workload_.container_ready ): self._reconcile_service() @@ -361,33 +367,29 @@ def reconcile(self, event=None) -> None: # noqa: C901 certificate=self._tls_certificate, certificate_authority=self._tls_certificate_authority, ) - if not self._upgrade.in_progress and isinstance( - workload_, workload.AuthenticatedWorkload + if not self.refresh.in_progress and isinstance( + workload_, workload.RunningWorkload ): self._reconcile_ports(event=event) - # Empty waiting status means we're waiting for database requires relation before - # starting workload - if not workload_.status or workload_.status == ops.WaitingStatus(): - self._upgrade.unit_state = upgrade.UnitState.HEALTHY - if self._unit_lifecycle.authorized_leader: - self._upgrade.reconcile_partition() + logger.debug(f"{workload_.status=}") + if not workload_.status: + self.refresh.next_unit_allowed_to_refresh = True + elif ( + self.refresh.workload_allowed_to_start and workload_.status == ops.WaitingStatus() + ): + # During scale up, this code should not be reached before the first + # relation-created event is received on this unit since otherwise + # `charm_refresh.PeerRelationNotReady` would be raised + if self._database_requires.does_relation_exist(): + # Waiting for relation-changed event before starting workload + pass + else: + # Waiting for database requires relation; refresh can continue + self.refresh.next_unit_allowed_to_refresh = True self.set_status(event=event) except server_exceptions.Error as e: # If not for `unit=False`, another `server_exceptions.Error` could be thrown here self.set_status(event=event, unit=False) self.unit.status = e.status logger.debug(f"Set unit status to {self.unit.status}") - - def _on_resume_upgrade_action(self, event: ops.ActionEvent) -> None: - if not self._unit_lifecycle.authorized_leader: - message = f"Must run action on leader unit. (e.g. `juju run {self.app.name}/leader {upgrade.RESUME_ACTION_NAME}`)" - logger.debug(f"Resume upgrade event failed: {message}") - event.fail(message) - return - if not self._upgrade or not self._upgrade.in_progress: - message = "No upgrade in progress" - logger.debug(f"Resume upgrade event failed: {message}") - event.fail(message) - return - self._upgrade.reconcile_partition(action_event=event) diff --git a/src/charm.py b/src/charm.py index 4b3aecb3..159ba137 100755 --- a/src/charm.py +++ b/src/charm.py @@ -13,33 +13,57 @@ if is_wrong_architecture() and __name__ == "__main__": ops.main.main(WrongArchitectureWarningCharm) +import dataclasses import logging import socket import typing +import charm_refresh +import ops.log import tenacity from charms.tempo_coordinator_k8s.v0.charm_tracing import trace_charm import abstract_charm import logrotate import machine_logrotate -import machine_upgrade import machine_workload import relations.database_providers_wrapper import relations.hacluster import snap -import upgrade import workload logger = logging.getLogger(__name__) +logging.getLogger("httpx").setLevel(logging.WARNING) +logging.getLogger("httpcore").setLevel(logging.WARNING) + + +@dataclasses.dataclass(eq=False) +class _MachinesRouterRefresh(abstract_charm.RouterRefresh, charm_refresh.CharmSpecificMachines): + _charm: abstract_charm.MySQLRouterCharm + + def refresh_snap( + self, *, snap_name: str, snap_revision: str, refresh: charm_refresh.Machines + ) -> None: + # TODO: issue on relation-broken event since event not passed? mitigated by regular event handler? + self._charm.get_workload(event=None, refresh=refresh).refresh( + event=None, + unit=self._charm.unit, + model_uuid=self._charm.model.uuid, + snap_revision=snap_revision, + refresh=refresh, + tls=self._charm._tls_certificate_saved, + exporter_config=self._charm._cos_exporter_config(event=None), + ) + # `reconcile()` will run on every event, which will set + # `refresh.next_unit_allowed_to_refresh = True` + # (This method will run in the charm's __init__, before `reconcile()` is called by ops) @trace_charm( tracing_endpoint="tracing_endpoint", extra_types=( logrotate.LogRotate, - machine_upgrade.Upgrade, - machine_workload.AuthenticatedMachineWorkload, + machine_workload.RunningMachineWorkload, relations.cos.COSRelation, relations.database_providers_wrapper.RelationEndpoint, relations.database_requires.RelationEndpoint, @@ -53,19 +77,37 @@ class MachineSubordinateRouterCharm(abstract_charm.MySQLRouterCharm): def __init__(self, *args) -> None: super().__init__(*args) + # Show logger name (module name) in logs + root_logger = logging.getLogger() + for handler in root_logger.handlers: + if isinstance(handler, ops.log.JujuLogHandler): + handler.setFormatter(logging.Formatter("{name}:{message}", style="{")) + # DEPRECATED shared-db: Enable legacy "mysql-shared" interface self._database_provides = relations.database_providers_wrapper.RelationEndpoint( self, self._database_provides ) - self._authenticated_workload_type = machine_workload.AuthenticatedMachineWorkload + self._running_workload_type = machine_workload.RunningMachineWorkload self._ha_cluster = relations.hacluster.HACluster(self) - self.framework.observe(self.on.install, self._on_install) - self.framework.observe(self.on.remove, self._on_remove) - self.framework.observe(self.on.upgrade_charm, self._on_upgrade_charm) - self.framework.observe( - self.on[machine_upgrade.FORCE_ACTION_NAME].action, self._on_force_upgrade_action - ) - self.framework.observe(self.on.config_changed, self.reconcile) + try: + self.refresh = charm_refresh.Machines( + _MachinesRouterRefresh( + workload_name="Router", charm_name="mysql-router", _charm=self + ) + ) + except charm_refresh.UnitTearingDown: + # MySQL server charm will clean up users & router metadata when the MySQL Router app or + # unit(s) tear down + self.unit.status = ops.MaintenanceStatus("Tearing down") + snap.uninstall() + self._reconcile_allowed = False + except charm_refresh.PeerRelationNotReady: + self.unit.status = ops.MaintenanceStatus("Waiting for peer relation") + if self.unit.is_leader(): + self.app.status = ops.MaintenanceStatus("Waiting for peer relation") + self._reconcile_allowed = False + else: + self._reconcile_allowed = True @property def _subordinate_relation_endpoint_names(self) -> typing.Optional[typing.Iterable[str]]: @@ -78,13 +120,6 @@ def _subordinate_relation_endpoint_names(self) -> typing.Optional[typing.Iterabl def _container(self) -> snap.Snap: return snap.Snap(unit_name=self.unit.name) - @property - def _upgrade(self) -> typing.Optional[machine_upgrade.Upgrade]: - try: - return machine_upgrade.Upgrade(self) - except upgrade.PeerRelationNotReady: - pass - @property def _status(self) -> ops.StatusBase: pass @@ -180,56 +215,6 @@ def wait_until_mysql_router_ready(self, *, event) -> None: else: logger.debug("MySQL Router is ready") - # ======================= - # Handlers - # ======================= - - def _on_install(self, _) -> None: - snap.install(unit=self.unit, model_uuid=self.model.uuid) - self.unit.set_workload_version(self.get_workload(event=None).version) - - def _on_remove(self, _) -> None: - snap.uninstall() - - def _on_upgrade_charm(self, _) -> None: - if self._unit_lifecycle.authorized_leader: - if not self._upgrade.in_progress: - logger.info("Charm upgraded. MySQL Router version unchanged") - self._upgrade.upgrade_resumed = False - # Only call `reconcile` on leader unit to avoid race conditions with `upgrade_resumed` - self.reconcile() - - def _on_resume_upgrade_action(self, event: ops.ActionEvent) -> None: - super()._on_resume_upgrade_action(event) - # If next to upgrade, upgrade leader unit - self.reconcile() - - def _on_force_upgrade_action(self, event: ops.ActionEvent) -> None: - if not self._upgrade or not self._upgrade.in_progress: - message = "No upgrade in progress" - logger.debug(f"Force upgrade event failed: {message}") - event.fail(message) - return - if not self._upgrade.upgrade_resumed: - message = f"Run `juju run {self.app.name}/leader resume-upgrade` before trying to force upgrade" - logger.debug(f"Force upgrade event failed: {message}") - event.fail(message) - return - if self._upgrade.unit_state is not upgrade.UnitState.OUTDATED: - message = "Unit already upgraded" - logger.debug(f"Force upgrade event failed: {message}") - event.fail(message) - return - - logger.warning("Forcing upgrade") - event.log(f"Forcefully upgrading {self.unit.name}") - self._upgrade.upgrade_unit( - event=event, workload_=self.get_workload(event=None), tls=self._tls_certificate_saved - ) - self.reconcile() - event.set_results({"result": f"Forcefully upgraded {self.unit.name}"}) - logger.warning("Forced upgrade") - if __name__ == "__main__": ops.main.main(MachineSubordinateRouterCharm) diff --git a/src/container.py b/src/container.py index 8ddd61d1..e72864e6 100644 --- a/src/container.py +++ b/src/container.py @@ -8,6 +8,7 @@ import subprocess import typing +import charm_refresh import ops if typing.TYPE_CHECKING: @@ -61,6 +62,13 @@ def __init__( super().__init__(returncode=returncode, cmd=cmd, output=output, stderr=stderr) +class RefreshFailed(Exception): + """Snap failed to refresh. Previous snap revision is still installed + + Only applies to machine charm + """ + + class Container(abc.ABC): """Workload container (snap or rock)""" @@ -163,11 +171,33 @@ def update_mysql_router_exporter_service( "`key`, `certificate` and `certificate_authority` required when tls=True" ) + @staticmethod + @abc.abstractmethod + def install( + *, unit: ops.Unit, model_uuid: str, snap_revision: str, refresh: charm_refresh.Machines + ) -> None: + """Ensure snap is installed by this charm + + Only applies to machine charm + + If snap is not installed, install it + If snap is installed, check that it was installed by this charm & raise an exception otherwise + + Automatically retries if snap installation fails + """ + + @staticmethod @abc.abstractmethod - def upgrade(self, unit: ops.Unit) -> None: - """Upgrade container version + def refresh( + *, unit: ops.Unit, model_uuid: str, snap_revision: str, refresh: charm_refresh.Machines + ) -> None: + """Refresh snap Only applies to machine charm + + If snap refresh fails and previous revision is still installed, raises `RefreshFailed` + + Does not automatically retry if snap installation fails """ @abc.abstractmethod diff --git a/src/machine_upgrade.py b/src/machine_upgrade.py deleted file mode 100644 index a016358f..00000000 --- a/src/machine_upgrade.py +++ /dev/null @@ -1,198 +0,0 @@ -# Copyright 2023 Canonical Ltd. -# See LICENSE file for licensing details. - -"""In-place upgrades on machines - -Derived from specification: DA058 - In-Place Upgrades - Kubernetes v2 -(https://docs.google.com/document/d/1tLjknwHudjcHs42nzPVBNkHs98XxAOT2BXGGpP7NyEU/) -""" - -import json -import logging -import time -import typing - -import ops - -import snap -import upgrade -import workload - -if typing.TYPE_CHECKING: - import relations.cos - -logger = logging.getLogger(__name__) - -FORCE_ACTION_NAME = "force-upgrade" - - -class Upgrade(upgrade.Upgrade): - """In-place upgrades on machines""" - - @property - def unit_state(self) -> typing.Optional[upgrade.UnitState]: - if ( - self._unit_workload_container_version is not None - and self._unit_workload_container_version != self._app_workload_container_version - ): - logger.debug("Unit upgrade state: outdated") - return upgrade.UnitState.OUTDATED - return super().unit_state - - @unit_state.setter - def unit_state(self, value: upgrade.UnitState) -> None: - if value is upgrade.UnitState.HEALTHY: - # Set snap revision on first install - self._unit_workload_container_version = snap.revision - self._unit_workload_version = self._current_versions["workload"] - logger.debug( - f'Saved {snap.revision=} and {self._current_versions["workload"]=} in unit databag while setting state healthy' - ) - # Super call - upgrade.Upgrade.unit_state.fset(self, value) - - def _get_unit_healthy_status( - self, *, workload_status: typing.Optional[ops.StatusBase] - ) -> ops.StatusBase: - if self._unit_workload_container_version == self._app_workload_container_version: - if isinstance(workload_status, ops.WaitingStatus): - return ops.WaitingStatus( - f'Router {self._unit_workload_version}; Snap rev {self._unit_workload_container_version}; Charmed operator {self._current_versions["charm"]}' - ) - return ops.ActiveStatus( - f'Router {self._unit_workload_version} running; Snap rev {self._unit_workload_container_version}; Charmed operator {self._current_versions["charm"]}' - ) - if isinstance(workload_status, ops.WaitingStatus): - return ops.WaitingStatus( - f'Router {self._unit_workload_version}; Snap rev {self._unit_workload_container_version} (outdated); Charmed operator {self._current_versions["charm"]}' - ) - return ops.ActiveStatus( - f'Router {self._unit_workload_version} running; Snap rev {self._unit_workload_container_version} (outdated); Charmed operator {self._current_versions["charm"]}' - ) - - @property - def app_status(self) -> typing.Optional[ops.StatusBase]: - if not self.in_progress: - return - if not self.is_compatible: - logger.info( - "Upgrade incompatible. If you accept potential *data loss* and *downtime*, you can continue by running `force-upgrade` action on each remaining unit" - ) - return ops.BlockedStatus( - "Upgrade incompatible. Rollback to previous revision with `juju refresh`" - ) - return super().app_status - - @property - def _unit_workload_container_versions(self) -> typing.Dict[str, str]: - """{Unit name: installed snap revision}""" - versions = {} - for unit in self._sorted_units: - if version := (self._peer_relation.data[unit].get("snap_revision")): - versions[unit.name] = version - return versions - - @property - def _unit_workload_container_version(self) -> typing.Optional[str]: - """Installed snap revision for this unit""" - return self._unit_databag.get("snap_revision") - - @_unit_workload_container_version.setter - def _unit_workload_container_version(self, value: str): - self._unit_databag["snap_revision"] = value - - @property - def _app_workload_container_version(self) -> str: - """Snap revision for current charm code""" - return snap.revision - - @property - def _unit_workload_version(self) -> typing.Optional[str]: - """Installed MySQL Router version for this unit""" - return self._unit_databag.get("workload_version") - - @_unit_workload_version.setter - def _unit_workload_version(self, value: str): - self._unit_databag["workload_version"] = value - - def reconcile_partition(self, *, action_event: ops.ActionEvent = None) -> None: - """Handle Juju action to confirm first upgraded unit is healthy and resume upgrade.""" - if action_event: - unit = self._sorted_units[0] # First unit to upgrade - state = self._peer_relation.data[unit].get("state") - if state: - state = upgrade.UnitState(state) - outdated = ( - self._unit_workload_container_versions.get(unit.name) - != self._app_workload_container_version - ) - unhealthy = state is not upgrade.UnitState.HEALTHY - if outdated or unhealthy: - if outdated: - message = "Highest number unit has not upgraded yet. Upgrade will not resume." - else: - message = "Highest number unit is unhealthy. Upgrade will not resume." - logger.debug(f"Resume upgrade event failed: {message}") - action_event.fail(message) - return - self.upgrade_resumed = True - message = "Upgrade resumed." - action_event.set_results({"result": message}) - logger.debug(f"Resume upgrade event succeeded: {message}") - - @property - def upgrade_resumed(self) -> bool: - """Whether user has resumed upgrade with Juju action - - Reset to `False` after each `juju refresh` - """ - return json.loads(self._app_databag.get("upgrade-resumed", "false")) - - @upgrade_resumed.setter - def upgrade_resumed(self, value: bool): - # Trigger peer relation_changed event even if value does not change - # (Needed when leader sets value to False during `ops.UpgradeCharmEvent`) - self._app_databag["-unused-timestamp-upgrade-resume-last-updated"] = str(time.time()) - - self._app_databag["upgrade-resumed"] = json.dumps(value) - logger.debug(f"Set upgrade-resumed to {value=}") - - @property - def authorized(self) -> bool: - assert self._unit_workload_container_version != self._app_workload_container_version - for index, unit in enumerate(self._sorted_units): - if unit.name == self._unit.name: - # Higher number units have already upgraded - if index == 1: - # User confirmation needed to resume upgrade (i.e. upgrade second unit) - logger.debug(f"Second unit authorized to upgrade if {self.upgrade_resumed=}") - return self.upgrade_resumed - return True - state = self._peer_relation.data[unit].get("state") - if state: - state = upgrade.UnitState(state) - if ( - self._unit_workload_container_versions.get(unit.name) - != self._app_workload_container_version - or state is not upgrade.UnitState.HEALTHY - ): - # Waiting for higher number units to upgrade - return False - return False - - def upgrade_unit( - self, - *, - event, - workload_: workload.Workload, - tls: bool, - exporter_config: "relations.cos.ExporterConfig", - ) -> None: - logger.debug(f"Upgrading {self.authorized=}") - self.unit_state = upgrade.UnitState.UPGRADING - workload_.upgrade(event=event, unit=self._unit, tls=tls, exporter_config=exporter_config) - self._unit_workload_container_version = snap.revision - self._unit_workload_version = self._current_versions["workload"] - logger.debug( - f'Saved {snap.revision=} and {self._current_versions["workload"]=} in unit databag after upgrade' - ) diff --git a/src/machine_workload.py b/src/machine_workload.py index ed713e66..59ea234c 100644 --- a/src/machine_workload.py +++ b/src/machine_workload.py @@ -17,7 +17,7 @@ logger = logging.getLogger(__name__) -class AuthenticatedMachineWorkload(workload.AuthenticatedWorkload): +class RunningMachineWorkload(workload.RunningWorkload): """Workload with connection to MySQL cluster and with Unix sockets enabled""" # TODO python3.10 min version: Use `list` instead of `typing.List` diff --git a/src/relations/cos.py b/src/relations/cos.py index d25dfcc9..14f83c68 100644 --- a/src/relations/cos.py +++ b/src/relations/cos.py @@ -7,13 +7,13 @@ import typing from dataclasses import dataclass +import charm_refresh import ops from charms.grafana_agent.v0.cos_agent import COSAgentProvider, charm_tracing_config import container import relations.secrets import utils -from snap import _SNAP_NAME if typing.TYPE_CHECKING: import abstract_charm @@ -53,21 +53,12 @@ def __init__(self, charm_: "abstract_charm.MySQLRouterCharm", container_: contai "port": self._EXPORTER_PORT, } ], - log_slots=[f"{_SNAP_NAME}:logs"], + log_slots=[f"{charm_refresh.snap_name()}:logs"], tracing_protocols=[self._TRACING_PROTOCOL], ) self._charm = charm_ self._container = container_ - charm_.framework.observe( - charm_.on[self._NAME].relation_created, - charm_.reconcile, - ) - charm_.framework.observe( - charm_.on[self._NAME].relation_broken, - charm_.reconcile, - ) - self._secrets = relations.secrets.RelationSecrets( charm_, self._PEER_RELATION_NAME, diff --git a/src/relations/database_provides.py b/src/relations/database_provides.py index b1876c94..b00507aa 100644 --- a/src/relations/database_provides.py +++ b/src/relations/database_provides.py @@ -214,10 +214,7 @@ class RelationEndpoint: def __init__(self, charm_: "abstract_charm.MySQLRouterCharm") -> None: self._interface = data_interfaces.DatabaseProvides(charm_, relation_name=self._NAME) - - charm_.framework.observe(charm_.on[self._NAME].relation_created, charm_.reconcile) charm_.framework.observe(self._interface.on.database_requested, charm_.reconcile) - charm_.framework.observe(charm_.on[self._NAME].relation_broken, charm_.reconcile) @property # TODO python3.10 min version: Use `list` instead of `typing.List` diff --git a/src/relations/database_requires.py b/src/relations/database_requires.py index bdb23da3..1e1b3379 100644 --- a/src/relations/database_requires.py +++ b/src/relations/database_requires.py @@ -6,6 +6,7 @@ import logging import typing +import charm_ as charm import charms.data_platform_libs.v0.data_interfaces as data_interfaces import ops @@ -109,10 +110,8 @@ def __init__(self, charm_: "abstract_charm.MySQLRouterCharm") -> None: database_name="mysql_innodb_cluster_metadata", extra_user_roles="mysqlrouter", ) - charm_.framework.observe(charm_.on[self._NAME].relation_created, charm_.reconcile) charm_.framework.observe(self._interface.on.database_created, charm_.reconcile) charm_.framework.observe(self._interface.on.endpoints_changed, charm_.reconcile) - charm_.framework.observe(charm_.on[self._NAME].relation_broken, charm_.reconcile) def get_connection_info(self, *, event) -> typing.Optional[CompleteConnectionInformation]: """Information for connection to MySQL cluster""" @@ -137,3 +136,11 @@ def get_status(self, event) -> typing.Optional[ops.StatusBase]: CompleteConnectionInformation(interface=self._interface, event=event) except (_MissingRelation, remote_databag.IncompleteDatabag) as exception: return exception.status + + def does_relation_exist(self) -> bool: + """Whether a relation exists + + From testing: during scale up, this should return `True` as soon as this unit receives the + first relation-created event on any endpoint + """ + return charm.Endpoint(self._NAME).relation is not None diff --git a/src/relations/deprecated_shared_db_database_provides.py b/src/relations/deprecated_shared_db_database_provides.py index 02d596c9..bf4f86ea 100644 --- a/src/relations/deprecated_shared_db_database_provides.py +++ b/src/relations/deprecated_shared_db_database_provides.py @@ -205,8 +205,6 @@ def __init__(self, charm_: "abstract_charm.MySQLRouterCharm") -> None: logger.warning( "'mysql-shared' relation interface is DEPRECATED and will be removed in a future release. Use 'mysql_client' interface instead." ) - charm_.framework.observe(charm_.on[self._NAME].relation_changed, charm_.reconcile) - charm_.framework.observe(charm_.on[self._NAME].relation_broken, charm_.reconcile) self._charm = charm_ self.framework.observe( self._charm.on[self._CREDENTIALS_PEER_RELATION_ENDPOINT_NAME].relation_changed, diff --git a/src/relations/hacluster.py b/src/relations/hacluster.py index cf1f3906..c8f31a49 100644 --- a/src/relations/hacluster.py +++ b/src/relations/hacluster.py @@ -23,10 +23,6 @@ def __init__(self, charm: ops.CharmBase): self.charm = charm - self.framework.observe( - self.charm.on[HACLUSTER_RELATION_NAME].relation_changed, self.charm.reconcile - ) - @property def relation(self) -> Optional[ops.Relation]: """Returns the relations in this model, or None if hacluster is not initialised.""" diff --git a/src/snap.py b/src/snap.py index 98b27944..b4f50b53 100644 --- a/src/snap.py +++ b/src/snap.py @@ -3,14 +3,13 @@ """Workload snap container & installer""" -import enum import logging import pathlib -import platform import shutil import subprocess import typing +import charm_refresh import charms.operator_libs_linux.v2.snap as snap_lib import ops import tenacity @@ -22,97 +21,72 @@ logger = logging.getLogger(__name__) -_SNAP_NAME = "charmed-mysql" -REVISIONS: typing.Dict[str, str] = { - # Keep in sync with `workload_version` file - "x86_64": "139", - "aarch64": "138", -} -revision = REVISIONS[platform.machine()] -_snap = snap_lib.SnapCache()[_SNAP_NAME] _UNIX_USERNAME = "snap_daemon" -class _RefreshVerb(str, enum.Enum): - INSTALL = "install" - UPGRADE = "upgrade" +def _unique_unit_name(*, unit: ops.Unit, model_uuid: str): + return f"{model_uuid}_{unit.name}" -def _refresh(*, unit: ops.Unit, verb: _RefreshVerb) -> None: - # TODO python3.10 min version: use `removesuffix` instead of `rstrip` - logger.debug(f'{verb.capitalize().rstrip("e")}ing {_SNAP_NAME=}, {revision=}') - unit.status = ops.MaintenanceStatus(f'{verb.capitalize().rstrip("e")}ing snap') +def _raise_if_snap_installed_not_by_this_charm(*, unit: ops.Unit, model_uuid: str): + """Raise exception if snap was not installed by this charm - def _set_retry_status(_) -> None: - message = f"Snap {verb} failed. Retrying..." - unit.status = ops.MaintenanceStatus(message) - logger.debug(message) - - for attempt in tenacity.Retrying( - stop=tenacity.stop_after_delay(60 * 5), - wait=tenacity.wait_exponential(multiplier=10), - retry=tenacity.retry_if_exception_type(snap_lib.SnapError), - after=_set_retry_status, - reraise=True, - ): - with attempt: - _snap.ensure(state=snap_lib.SnapState.Present, revision=revision) - _snap.hold() - logger.debug(f'{verb.capitalize().rstrip("e")}ed {_SNAP_NAME=}, {revision=}') - - -def install(*, unit: ops.Unit, model_uuid: str): - """Install snap.""" + Assumes snap is installed + """ + snap_name = charm_refresh.snap_name() installed_by_unit = pathlib.Path( - "/var/snap", _SNAP_NAME, "common", "installed_by_mysql_router_charm_unit" + "/var/snap", snap_name, "common", "installed_by_mysql_router_charm_unit" ) - unique_unit_name = f"{model_uuid}_{unit.name}" - # This charm can override/use an existing snap installation only if the snap was previously - # installed by this charm. - # Otherwise, the snap could be in use by another charm (e.g. MySQL Server charm, a different - # MySQL Router charm). - if _snap.present and not ( - installed_by_unit.exists() and installed_by_unit.read_text() == unique_unit_name + + if not ( + installed_by_unit.exists() + and installed_by_unit.read_text() == _unique_unit_name(unit=unit, model_uuid=model_uuid) ): + # The snap could be in use by another charm (e.g. MySQL Server charm, a different MySQL + # Router charm). logger.debug( - f"{installed_by_unit.exists() and installed_by_unit.read_text()=} {unique_unit_name=}" + f"{installed_by_unit.exists() and installed_by_unit.read_text()=} " + f"{_unique_unit_name(unit=unit, model_uuid=model_uuid)=}" ) - logger.error(f"{_SNAP_NAME} snap already installed on machine. Installation aborted") - raise Exception(f"Multiple {_SNAP_NAME} snap installs not supported on one machine") - _refresh(unit=unit, verb=_RefreshVerb.INSTALL) - installed_by_unit.write_text(unique_unit_name) - logger.debug(f"Wrote {unique_unit_name=} to {installed_by_unit.name=}") + logger.error(f"{snap_name} snap already installed on machine. Installation aborted") + raise Exception(f"Multiple {snap_name} snap installs not supported on one machine") def uninstall(): - """Uninstall snap.""" - logger.debug(f"Uninstalling {_SNAP_NAME=}") - _snap.ensure(state=snap_lib.SnapState.Absent) - logger.debug(f"Uninstalled {_SNAP_NAME=}") + """Uninstall snap if installed""" + snap_name = charm_refresh.snap_name() + snap = snap_lib.SnapCache()[snap_name] + + logger.debug(f"Ensuring {snap_name=} is uninstalled") + snap.ensure(state=snap_lib.SnapState.Absent) + logger.debug(f"Ensured {snap_name=} is uninstalled") class _Path(pathlib.PosixPath, container.Path): def __new__(cls, *args, **kwargs): path = super().__new__(cls, *args, **kwargs) + snap_name = charm_refresh.snap_name() + if args and isinstance(args[0], cls) and (parent_ := args[0]._container_parent): path._container_parent = parent_ else: if str(path).startswith("/etc/mysqlrouter") or str(path).startswith( "/var/lib/mysqlrouter" ): - parent = f"/var/snap/{_SNAP_NAME}/current" + parent = f"/var/snap/{snap_name}/current" elif str(path).startswith("/run/mysqlrouter") or str(path).startswith( "/var/log/mysqlrouter" ): - parent = f"/var/snap/{_SNAP_NAME}/common" + parent = f"/var/snap/{snap_name}/common" elif str(path).startswith("/tmp"): - parent = f"/tmp/snap-private-tmp/snap.{_SNAP_NAME}" + parent = f"/tmp/snap-private-tmp/snap.{snap_name}" else: parent = None if parent: assert str(path).startswith("/") path = super().__new__(cls, parent, path.relative_to("/"), **kwargs) path._container_parent = parent + return path def __truediv__(self, other): @@ -161,42 +135,51 @@ class Snap(container.Container): _EXPORTER_SERVICE_NAME = "mysqlrouter-exporter" def __init__(self, *, unit_name: str) -> None: + self._snap_name = charm_refresh.snap_name() + self._installed_by_unit = pathlib.Path( + "/var/snap", self._snap_name, "common", "installed_by_mysql_router_charm_unit" + ) + super().__init__( - mysql_router_command=f"{_SNAP_NAME}.mysqlrouter", - mysql_shell_command=f"{_SNAP_NAME}.mysqlsh", - mysql_router_password_command=f"{_SNAP_NAME}.mysqlrouter-passwd", + mysql_router_command=f"{self._snap_name}.mysqlrouter", + mysql_shell_command=f"{self._snap_name}.mysqlsh", + mysql_router_password_command=f"{self._snap_name}.mysqlrouter-passwd", unit_name=unit_name, ) + @property + def _snap(self): + return snap_lib.SnapCache()[self._snap_name] + @property def ready(self) -> bool: return True @property def mysql_router_service_enabled(self) -> bool: - return _snap.services[self._SERVICE_NAME]["active"] + return self._snap.services[self._SERVICE_NAME]["active"] @property def mysql_router_exporter_service_enabled(self) -> bool: - return _snap.services[self._EXPORTER_SERVICE_NAME]["active"] + return self._snap.services[self._EXPORTER_SERVICE_NAME]["active"] def update_mysql_router_service(self, *, enabled: bool, tls: bool = None) -> None: super().update_mysql_router_service(enabled=enabled, tls=tls) if tls: - _snap.set({"mysqlrouter.extra-options": f"--extra-config {self.tls_config_file}"}) + self._snap.set({"mysqlrouter.extra-options": f"--extra-config {self.tls_config_file}"}) else: - _snap.unset("mysqlrouter.extra-options") + self._snap.unset("mysqlrouter.extra-options") - router_is_running = _snap.services[self._SERVICE_NAME]["active"] + router_is_running = self._snap.services[self._SERVICE_NAME]["active"] if enabled: if router_is_running: - _snap.restart([self._SERVICE_NAME]) + self._snap.restart([self._SERVICE_NAME]) else: - _snap.start([self._SERVICE_NAME], enable=True) + self._snap.start([self._SERVICE_NAME], enable=True) else: - _snap.stop([self._SERVICE_NAME], disable=True) + self._snap.stop([self._SERVICE_NAME], disable=True) def update_mysql_router_exporter_service( self, @@ -218,7 +201,7 @@ def update_mysql_router_exporter_service( ) if enabled: - _snap.set({ + self._snap.set({ "mysqlrouter-exporter.listen-port": config.listen_port, "mysqlrouter-exporter.user": config.username, "mysqlrouter-exporter.password": config.password, @@ -226,30 +209,109 @@ def update_mysql_router_exporter_service( "mysqlrouter-exporter.service-name": self._unit_name.replace("/", "-"), }) if tls: - _snap.set({ + self._snap.set({ "mysqlrouter.tls-cacert-path": certificate_authority_filename, "mysqlrouter.tls-cert-path": certificate_filename, "mysqlrouter.tls-key-path": key_filename, }) else: - _snap.unset("mysqlrouter.tls-cacert-path") - _snap.unset("mysqlrouter.tls-cert-path") - _snap.unset("mysqlrouter.tls-key-path") - _snap.start([self._EXPORTER_SERVICE_NAME], enable=True) + self._snap.unset("mysqlrouter.tls-cacert-path") + self._snap.unset("mysqlrouter.tls-cert-path") + self._snap.unset("mysqlrouter.tls-key-path") + self._snap.start([self._EXPORTER_SERVICE_NAME], enable=True) + else: + self._snap.stop([self._EXPORTER_SERVICE_NAME], disable=True) + self._snap.unset("mysqlrouter-exporter.listen-port") + self._snap.unset("mysqlrouter-exporter.user") + self._snap.unset("mysqlrouter-exporter.password") + self._snap.unset("mysqlrouter-exporter.url") + self._snap.unset("mysqlrouter-exporter.service-name") + self._snap.unset("mysqlrouter.tls-cacert-path") + self._snap.unset("mysqlrouter.tls-cert-path") + self._snap.unset("mysqlrouter.tls-key-path") + + def install( + self, + *, + unit: ops.Unit, + model_uuid: str, + snap_revision: str, + refresh: charm_refresh.Machines, + ) -> None: + """Ensure snap is installed by this charm + + If snap is not installed, install it + If snap is installed, check that it was installed by this charm & raise an exception otherwise + + Automatically retries if snap installation fails + """ + unique_unit_name = f"{model_uuid}_{unit.name}" + if self._snap.present: + _raise_if_snap_installed_not_by_this_charm(unit=unit, model_uuid=model_uuid) + return + # Install snap + logger.info(f"Installing snap revision {repr(snap_revision)}") + unit.status = ops.MaintenanceStatus("Installing snap") + + def _set_retry_status(_) -> None: + message = "Snap install failed. Retrying..." + unit.status = ops.MaintenanceStatus(message) + logger.debug(message) + + for attempt in tenacity.Retrying( + stop=tenacity.stop_after_delay(60 * 5), + wait=tenacity.wait_exponential(multiplier=10), + retry=tenacity.retry_if_exception_type((snap_lib.SnapError, snap_lib.SnapAPIError)), + after=_set_retry_status, + reraise=True, + ): + with attempt: + self._snap.ensure(state=snap_lib.SnapState.Present, revision=snap_revision) + refresh.update_snap_revision() + self._snap.hold() + self._installed_by_unit.write_text(unique_unit_name) + logger.debug(f"Wrote {unique_unit_name=} to {self._installed_by_unit.name=}") + logger.info(f"Installed snap revision {repr(snap_revision)}") + + def refresh( + self, + *, + unit: ops.Unit, + model_uuid: str, + snap_revision: str, + refresh: charm_refresh.Machines, + ) -> None: + """Refresh snap + + If snap refresh fails and previous revision is still installed, raises `RefreshFailed` + + Does not automatically retry if snap installation fails + """ + if not self._snap.present: + self.install( + unit=unit, model_uuid=model_uuid, snap_revision=snap_revision, refresh=refresh + ) + return + _raise_if_snap_installed_not_by_this_charm(unit=unit, model_uuid=model_uuid) + + revision_before_refresh = self._snap.revision + if revision_before_refresh == snap_revision: + raise ValueError(f"Cannot refresh snap; {snap_revision=} is already installed") + + logger.info(f"Refreshing snap to revision {repr(snap_revision)}") + unit.status = ops.MaintenanceStatus("Refreshing snap") + try: + self._snap.ensure(state=snap_lib.SnapState.Present, revision=snap_revision) + except (snap_lib.SnapError, snap_lib.SnapAPIError): + logger.exception("Snap refresh failed") + if self._snap.revision == revision_before_refresh: + raise container.RefreshFailed + else: + refresh.update_snap_revision() + raise else: - _snap.stop([self._EXPORTER_SERVICE_NAME], disable=True) - _snap.unset("mysqlrouter-exporter.listen-port") - _snap.unset("mysqlrouter-exporter.user") - _snap.unset("mysqlrouter-exporter.password") - _snap.unset("mysqlrouter-exporter.url") - _snap.unset("mysqlrouter-exporter.service-name") - _snap.unset("mysqlrouter.tls-cacert-path") - _snap.unset("mysqlrouter.tls-cert-path") - _snap.unset("mysqlrouter.tls-key-path") - - def upgrade(self, unit: ops.Unit) -> None: - """Upgrade snap.""" - _refresh(unit=unit, verb=_RefreshVerb.UPGRADE) + refresh.update_snap_revision() + logger.info(f"Refreshed snap to revision {repr(snap_revision)}") # TODO python3.10 min version: Use `list` instead of `typing.List` def _run_command( diff --git a/src/upgrade.py b/src/upgrade.py deleted file mode 100644 index 0535c89f..00000000 --- a/src/upgrade.py +++ /dev/null @@ -1,230 +0,0 @@ -# Copyright 2023 Canonical Ltd. -# See LICENSE file for licensing details. - -"""In-place upgrades - -Based off specification: DA058 - In-Place Upgrades - Kubernetes v2 -(https://docs.google.com/document/d/1tLjknwHudjcHs42nzPVBNkHs98XxAOT2BXGGpP7NyEU/) -""" - -import abc -import copy -import enum -import json -import logging -import pathlib -import typing - -import ops -import poetry.core.constraints.version as poetry_version - -import workload - -logger = logging.getLogger(__name__) - -PEER_RELATION_ENDPOINT_NAME = "upgrade-version-a" -RESUME_ACTION_NAME = "resume-upgrade" - - -def unit_number(unit_: ops.Unit) -> int: - """Get unit number""" - return int(unit_.name.split("/")[-1]) - - -class PeerRelationNotReady(Exception): - """Upgrade peer relation not available (to this unit)""" - - -class UnitState(str, enum.Enum): - """Unit upgrade state""" - - HEALTHY = "healthy" - RESTARTING = "restarting" # Kubernetes only - UPGRADING = "upgrading" # Machines only - OUTDATED = "outdated" # Machines only - - -class Upgrade(abc.ABC): - """In-place upgrades""" - - def __init__(self, charm_: ops.CharmBase) -> None: - relations = charm_.model.relations[PEER_RELATION_ENDPOINT_NAME] - if not relations: - raise PeerRelationNotReady - assert len(relations) == 1 - self._peer_relation = relations[0] - self._unit: ops.Unit = charm_.unit - self._unit_databag = self._peer_relation.data[self._unit] - self._app_databag = self._peer_relation.data[charm_.app] - self._app_name = charm_.app.name - self._current_versions = {} # For this unit - for version, file_name in { - "charm": "charm_version", - "workload": "workload_version", - }.items(): - self._current_versions[version] = pathlib.Path(file_name).read_text().strip() - - @property - def unit_state(self) -> typing.Optional[UnitState]: - """Unit upgrade state""" - if state := self._unit_databag.get("state"): - return UnitState(state) - - @unit_state.setter - def unit_state(self, value: UnitState) -> None: - self._unit_databag["state"] = value.value - - @property - def is_compatible(self) -> bool: - """Whether upgrade is supported from previous versions""" - assert self.versions_set - previous_version_strs: typing.Dict[str, str] = json.loads(self._app_databag["versions"]) - # TODO charm versioning: remove `.split("+")` (which removes git hash before comparing) - previous_version_strs["charm"] = previous_version_strs["charm"].split("+")[0] - previous_versions: typing.Dict[str, poetry_version.Version] = { - key: poetry_version.Version.parse(value) - for key, value in previous_version_strs.items() - } - current_version_strs = copy.copy(self._current_versions) - current_version_strs["charm"] = current_version_strs["charm"].split("+")[0] - current_versions = { - key: poetry_version.Version.parse(value) for key, value in current_version_strs.items() - } - try: - if ( - previous_versions["charm"] > current_versions["charm"] - or previous_versions["charm"].major != current_versions["charm"].major - ): - logger.debug( - f'{previous_versions["charm"]=} incompatible with {current_versions["charm"]=}' - ) - return False - if ( - previous_versions["workload"] > current_versions["workload"] - or previous_versions["workload"].major != current_versions["workload"].major - or previous_versions["workload"].minor != current_versions["workload"].minor - ): - logger.debug( - f'{previous_versions["workload"]=} incompatible with {current_versions["workload"]=}' - ) - return False - logger.debug( - f"Versions before upgrade compatible with versions after upgrade {previous_version_strs=} {self._current_versions=}" - ) - return True - except KeyError as exception: - logger.debug(f"Version missing from {previous_versions=}", exc_info=exception) - return False - - @property - def in_progress(self) -> bool: - logger.debug( - f"{self._app_workload_container_version=} {self._unit_workload_container_versions=}" - ) - return any( - version != self._app_workload_container_version - for version in self._unit_workload_container_versions.values() - ) - - @property - def _sorted_units(self) -> typing.List[ops.Unit]: - """Units sorted from highest to lowest unit number""" - return sorted((self._unit, *self._peer_relation.units), key=unit_number, reverse=True) - - @abc.abstractmethod - def _get_unit_healthy_status( - self, *, workload_status: typing.Optional[ops.StatusBase] - ) -> ops.StatusBase: - """Status shown during upgrade if unit is healthy""" - - def get_unit_juju_status( - self, *, workload_status: typing.Optional[ops.StatusBase] - ) -> typing.Optional[ops.StatusBase]: - if self.in_progress: - return self._get_unit_healthy_status(workload_status=workload_status) - - @property - def app_status(self) -> typing.Optional[ops.StatusBase]: - if not self.in_progress: - return - if not self.upgrade_resumed: - # User confirmation needed to resume upgrade (i.e. upgrade second unit) - # Statuses over 120 characters are truncated in `juju status` as of juju 3.1.6 and - # 2.9.45 - return ops.BlockedStatus( - f"Upgrading. Verify highest unit is healthy & run `{RESUME_ACTION_NAME}` action. To rollback, `juju refresh` to last revision" - ) - return ops.MaintenanceStatus( - "Upgrading. To rollback, `juju refresh` to the previous revision" - ) - - @property - def versions_set(self) -> bool: - """Whether versions have been saved in app databag - - Should only be `False` during first charm install - - If a user upgrades from a charm that does not set versions, this charm will get stuck. - """ - return self._app_databag.get("versions") is not None - - def set_versions_in_app_databag(self) -> None: - """Save current versions in app databag - - Used after next upgrade to check compatibility (i.e. whether that upgrade should be - allowed) - """ - assert not self.in_progress - logger.debug(f"Setting {self._current_versions=} in upgrade peer relation app databag") - self._app_databag["versions"] = json.dumps(self._current_versions) - logger.debug(f"Set {self._current_versions=} in upgrade peer relation app databag") - - @property - @abc.abstractmethod - def upgrade_resumed(self) -> bool: - """Whether user has resumed upgrade with Juju action""" - - @property - @abc.abstractmethod - def _unit_workload_container_versions(self) -> typing.Dict[str, str]: - """{Unit name: unique identifier for unit's workload container version} - - If and only if this version changes, the workload will restart (during upgrade or - rollback). - - On Kubernetes, the workload & charm are upgraded together - On machines, the charm is upgraded before the workload - - This identifier should be comparable to `_app_workload_container_version` to determine if - the unit & app are the same workload container version. - """ - - @property - @abc.abstractmethod - def _app_workload_container_version(self) -> str: - """Unique identifier for the app's workload container version - - This should match the workload version in the current Juju app charm version. - - This identifier should be comparable to `_unit_workload_container_versions` to determine if - the app & unit are the same workload container version. - """ - - @abc.abstractmethod - def reconcile_partition(self, *, action_event: ops.ActionEvent = None) -> None: - """If ready, allow next unit to upgrade.""" - - @property - @abc.abstractmethod - def authorized(self) -> bool: - """Whether this unit is authorized to upgrade - - Only applies to machine charm - """ - - @abc.abstractmethod - def upgrade_unit(self, *, event, workload_: workload.Workload, tls: bool) -> None: - """Upgrade this unit. - - Only applies to machine charm - """ diff --git a/src/workload.py b/src/workload.py index 2643641e..6d55e2b3 100644 --- a/src/workload.py +++ b/src/workload.py @@ -11,6 +11,7 @@ import string import typing +import charm_refresh import ops import requests import tenacity @@ -67,25 +68,45 @@ def container_ready(self) -> bool: """ return self._container.ready - @property - def version(self) -> str: - """MySQL Router version""" - version = self._container.run_mysql_router(["--version"]) - for component in version.split(): - if component.startswith("8"): - return component - return "" - - def upgrade( - self, *, event, unit: ops.Unit, tls: bool, exporter_config: "relations.cos.ExporterConfig" + def install( + self, + *, + unit: ops.Unit, + model_uuid: str, + snap_revision: str, + refresh: charm_refresh.Machines, ) -> None: - """Upgrade MySQL Router. + """Ensure snap is installed by this charm Only applies to machine charm + + If snap is not installed, install it + If snap is installed, check that it was installed by this charm & raise an exception otherwise + + Automatically retries if snap installation fails """ - logger.debug("Upgrading MySQL Router") - self._container.upgrade(unit=unit) - logger.debug("Upgraded MySQL Router") + self._container.install( + unit=unit, model_uuid=model_uuid, snap_revision=snap_revision, refresh=refresh + ) + + def refresh( + self, + *, + event, + unit: ops.Unit, + model_uuid: str, + snap_revision: str, + refresh: charm_refresh.Machines, + tls: bool, + exporter_config: "relations.cos.ExporterConfig", + ) -> None: + """Refresh MySQL Router + + Only applies to machine charm + """ + self._container.refresh( + unit=unit, model_uuid=model_uuid, snap_revision=snap_revision, refresh=refresh + ) @property def _tls_config_file_data(self) -> str: @@ -186,7 +207,7 @@ def status(self) -> typing.Optional[ops.StatusBase]: return ops.WaitingStatus() -class AuthenticatedWorkload(Workload): +class RunningWorkload(Workload): """Workload with connection to MySQL cluster""" def __init__( @@ -219,16 +240,16 @@ def _router_id(self) -> str: # MySQL Router is bootstrapped without `--directory`—there is one system-wide instance. return f"{socket.getfqdn()}::system" - def _cleanup_after_upgrade_or_potential_container_restart(self) -> None: - """Remove Router user after upgrade or (potential) container restart. + def _cleanup_after_refresh_or_potential_container_restart(self) -> None: + """Remove Router user after refresh or (potential) container restart. (On Kubernetes, storage is not persisted on container restart—MySQL Router's config file is deleted. Therefore, MySQL Router needs to be bootstrapped again.) """ if user_info := self.shell.get_mysql_router_user_for_unit(self._charm.unit.name): - logger.debug("Cleaning up after upgrade or container restart") + logger.debug("Cleaning up after refresh or container restart") self.shell.delete_user(user_info.username) - logger.debug("Cleaned up after upgrade or container restart") + logger.debug("Cleaned up after refresh or container restart") # TODO python3.10 min version: Use `list` instead of `typing.List` def _get_bootstrap_command( @@ -328,7 +349,7 @@ def _restart(self, *, event, tls: bool) -> None: def _enable_router(self, *, event, tls: bool, unit_name: str) -> None: """Enable router after setting up all the necessary prerequisites.""" logger.info("Enabling MySQL Router service") - self._cleanup_after_upgrade_or_potential_container_restart() + self._cleanup_after_refresh_or_potential_container_restart() # create an empty credentials file, if the file does not exist self._container.create_router_rest_api_credentials_file() self._bootstrap_router(event=event, tls=tls) @@ -420,22 +441,45 @@ def status(self) -> typing.Optional[ops.StatusBase]: "Router was manually removed from MySQL ClusterSet. Remove & re-deploy unit" ) - def upgrade( - self, *, event, unit: ops.Unit, tls: bool, exporter_config: "relations.cos.ExporterConfig" + def refresh( + self, + *, + event, + unit: ops.Unit, + model_uuid: str, + snap_revision: str, + refresh: charm_refresh.Machines, + tls: bool, + exporter_config: "relations.cos.ExporterConfig", ) -> None: enabled = self._container.mysql_router_service_enabled exporter_enabled = self._container.mysql_router_exporter_service_enabled if exporter_enabled: self._disable_exporter() if enabled: - logger.debug("Disabling MySQL Router service before upgrade") + logger.debug("Disabling MySQL Router service before refresh") self._disable_router() - super().upgrade(event=event, unit=unit, tls=tls, exporter_config=exporter_config) - if enabled: - logger.debug("Re-enabling MySQL Router service after upgrade") - self._enable_router(event=event, tls=tls, unit_name=unit.name) - if exporter_enabled: - self._enable_exporter(tls=tls, exporter_config=exporter_config) + try: + super().refresh( + event=event, + unit=unit, + model_uuid=model_uuid, + snap_revision=snap_revision, + refresh=refresh, + tls=tls, + exporter_config=exporter_config, + ) + except container.RefreshFailed: + message = "Re-enabling MySQL Router service after failed snap refresh" + raise + else: + message = "Re-enabling MySQL Router service after refresh" + finally: + if enabled: + logger.debug(message) + self._enable_router(event=event, tls=tls, unit_name=unit.name) + if exporter_enabled: + self._enable_exporter(tls=tls, exporter_config=exporter_config) def _wait_until_http_server_authenticates(self) -> None: """Wait until active connection with router HTTP server using monitoring credentials.""" diff --git a/tests/integration/helpers.py b/tests/integration/helpers.py index 88942f92..6832905b 100644 --- a/tests/integration/helpers.py +++ b/tests/integration/helpers.py @@ -8,7 +8,6 @@ from typing import Dict, List, Optional import tenacity -from juju.model import Model from juju.unit import Unit from pytest_operator.plugin import OpsTest @@ -410,41 +409,6 @@ async def ensure_all_units_continuous_writes_incrementing( last_max_written_value = max_written_value -async def get_workload_version(ops_test: OpsTest, unit_name: str) -> str: - """Get the workload version of the deployed router charm.""" - return_code, output, _ = await ops_test.juju( - "ssh", - unit_name, - "sudo", - "cat", - f"/var/lib/juju/agents/unit-{unit_name.replace('/', '-')}/charm/workload_version", - ) - - assert return_code == 0 - return output.strip() - - -async def get_leader_unit( - ops_test: Optional[OpsTest], app_name: str, model: Optional[Model] = None -) -> Optional[Unit]: - """Get the leader unit of a given application. - - Args: - ops_test: The ops test framework instance - app_name: The name of the application - model: The model to use (overrides ops_test.model) - """ - leader_unit = None - if not model: - model = ops_test.model - for unit in model.applications[app_name].units: - if await unit.is_leader_from_status(): - leader_unit = unit - break - - return leader_unit - - def get_juju_status(model_name: str) -> str: """Return the juju status output. diff --git a/tests/integration/test_upgrade.py b/tests/integration/test_upgrade.py index b932e2be..4b6fc2aa 100644 --- a/tests/integration/test_upgrade.py +++ b/tests/integration/test_upgrade.py @@ -5,26 +5,20 @@ import logging import os import pathlib -import platform -import re import shutil import typing import zipfile import pytest -import tenacity +import tomli +import tomli_w from pytest_operator.plugin import OpsTest -import snap - from .helpers import ( APPLICATION_DEFAULT_APP_NAME, MYSQL_DEFAULT_APP_NAME, MYSQL_ROUTER_DEFAULT_APP_NAME, ensure_all_units_continuous_writes_incrementing, - get_juju_status, - get_leader_unit, - get_workload_version, ) from .juju_ import run_action @@ -52,12 +46,15 @@ async def test_deploy_edge(ops_test: OpsTest, series) -> None: config={"profile": "testing"}, series="jammy", ), - ops_test.model.deploy( + ops_test.juju( + "deploy", MYSQL_ROUTER_APP_NAME, - application_name=MYSQL_ROUTER_APP_NAME, - num_units=1, - channel="dpe/edge", - series=series, + "-n", + 1, + "--channel", + "dpe/edge/test-refresh-v3-8.0.41", # TODO remove after refresh v3 merged + "--series", + series, ), ops_test.model.deploy( TEST_APP_NAME, @@ -91,9 +88,6 @@ async def test_upgrade_from_edge(ops_test: OpsTest, charm, continuous_writes) -> await ensure_all_units_continuous_writes_incrementing(ops_test) mysql_router_application = ops_test.model.applications[MYSQL_ROUTER_APP_NAME] - mysql_router_unit = mysql_router_application.units[0] - - old_workload_version = await get_workload_version(ops_test, mysql_router_unit.name) logger.info("Build charm locally") global temporary_charm @@ -106,28 +100,52 @@ async def test_upgrade_from_edge(ops_test: OpsTest, charm, continuous_writes) -> logger.info("Refresh the charm") await mysql_router_application.refresh(path=temporary_charm) - logger.info("Wait for the first unit to be refreshed and the app to move to blocked status") + # Refresh will always be incompatible since we are downgrading the workload + # Refresh will additionally be incompatible on PR CI (not edge CI) since unrelease charm + # versions are always marked as incompatible + logger.info("Wait for refresh to block as incompatible") await ops_test.model.block_until( lambda: mysql_router_application.status == "blocked", timeout=TIMEOUT ) assert ( - "resume-upgrade" in mysql_router_application.status_message - ), "mysql router application status not indicating that user should resume upgrade" + "incompatible" in mysql_router_application.status_message + ), "mysql router application status not indicating that refresh incompatible" + + # Highest to lowest unit number + refresh_order = sorted( + mysql_router_application.units, + key=lambda unit: int(unit.name.split("/")[1]), + reverse=True, + ) + + logger.info("Running force-refresh-start action with check-compatibility=false") + await run_action(refresh_order[0], "force-refresh-start", **{"check-compatibility": False}) - for attempt in tenacity.Retrying( - reraise=True, - stop=tenacity.stop_after_delay(SMALL_TIMEOUT), - wait=tenacity.wait_fixed(10), - ): - with attempt: - assert "+testupgrade" in get_juju_status( - ops_test.model.name - ), "None of the units are upgraded" + logger.info("Wait for app status to update") + await ops_test.model.wait_for_idle( + [MYSQL_ROUTER_APP_NAME], + idle_period=30, + timeout=TIMEOUT, + ) - mysql_router_leader_unit = await get_leader_unit(ops_test, MYSQL_ROUTER_APP_NAME) + logger.info("Wait for refresh to start") + await ops_test.model.block_until( + lambda: mysql_router_application.status == "blocked", timeout=3 * 60 + ) + assert ( + "resume-refresh" in mysql_router_application.status_message + ), "mysql router application status not indicating that user should resume refresh" + + logger.info("Wait for first unit to upgrade") + async with ops_test.fast_forward("60s"): + await ops_test.model.wait_for_idle( + [MYSQL_ROUTER_APP_NAME], + idle_period=30, + timeout=TIMEOUT, + ) - logger.info("Running resume-upgrade on the mysql router leader unit") - await run_action(mysql_router_leader_unit, "resume-upgrade") + logger.info("Running resume-refresh") + await run_action(refresh_order[1], "resume-refresh") logger.info("Waiting for upgrade to complete on all units") await ops_test.model.wait_for_idle( @@ -137,14 +155,6 @@ async def test_upgrade_from_edge(ops_test: OpsTest, charm, continuous_writes) -> timeout=UPGRADE_TIMEOUT, ) - workload_version_file = pathlib.Path("workload_version") - repo_workload_version = workload_version_file.read_text().strip() - - for unit in mysql_router_application.units: - workload_version = await get_workload_version(ops_test, unit.name) - assert workload_version == f"{repo_workload_version}+testupgrade" - assert old_workload_version != workload_version - await ensure_all_units_continuous_writes_incrementing(ops_test) await ops_test.model.wait_for_idle( @@ -193,17 +203,6 @@ async def test_fail_and_rollback(ops_test: OpsTest, charm, continuous_writes) -> apps=[MYSQL_ROUTER_APP_NAME], status="active", timeout=TIMEOUT, idle_period=30 ) - workload_version_file = pathlib.Path("workload_version") - repo_workload_version = workload_version_file.read_text().strip() - - for unit in mysql_router_application.units: - charm_workload_version = await get_workload_version(ops_test, unit.name) - assert charm_workload_version == f"{repo_workload_version}+testupgrade" - - await ops_test.model.wait_for_idle( - apps=[MYSQL_ROUTER_APP_NAME], status="active", timeout=TIMEOUT - ) - logger.info("Ensure continuous writes after rollback procedure") await ensure_all_units_continuous_writes_incrementing(ops_test) @@ -216,31 +215,26 @@ def create_valid_upgrade_charm(charm_file: typing.Union[str, pathlib.Path]) -> N Upgrades require a new snap revision to avoid no-oping. """ - workload_version_file = pathlib.Path("workload_version") - workload_version = workload_version_file.read_text().strip() + with pathlib.Path("refresh_versions.toml").open("rb") as file: + versions = tomli.load(file) + + # charm needs to refresh snap to be able to avoid no-op when upgrading. + # set an old revision of the snap + versions["snap"]["revisions"]["x86_64"] = "121" + versions["snap"]["revisions"]["aarch64"] = "122" + versions["workload"] = "8.0.39" with zipfile.ZipFile(charm_file, mode="a") as charm_zip: - charm_zip.writestr("workload_version", f"{workload_version}+testupgrade\n") - - # charm needs to refresh snap to be able to avoid no-op when upgrading. - # set an old revision of the snap - snap_file = pathlib.Path("src/snap.py") - content = snap_file.read_text() - old_revision = {"x86_64": "121", "aarch64": "122"}[platform.machine()] - new_snap_content = re.sub( - f'"{platform.machine()}": "{snap.revision}"', - f'"{platform.machine()}": "{old_revision}"', - str(content), - ) - charm_zip.writestr("src/snap.py", new_snap_content) + charm_zip.writestr("refresh_versions.toml", tomli_w.dumps(versions)) def create_invalid_upgrade_charm(charm_file: typing.Union[str, pathlib.Path]) -> None: """Create an invalid mysql router charm for upgrade.""" - workload_version_file = pathlib.Path("workload_version") - old_workload_version = workload_version_file.read_text().strip() - [major, minor, patch] = old_workload_version.split(".") + with pathlib.Path("refresh_versions.toml").open("rb") as file: + versions = tomli.load(file) + + versions["charm"] = "8.0/0.0.0" with zipfile.ZipFile(charm_file, mode="a") as charm_zip: # an invalid charm version because the major workload_version is one less than the current workload_version - charm_zip.writestr("workload_version", f"{int(major) - 1}.{minor}.{patch}+testrollback\n") + charm_zip.writestr("refresh_versions.toml", tomli_w.dumps(versions)) diff --git a/tests/unit/conftest.py b/tests/unit/conftest.py index c0b402d0..4b48db1d 100644 --- a/tests/unit/conftest.py +++ b/tests/unit/conftest.py @@ -1,13 +1,14 @@ # Copyright 2023 Canonical Ltd. # See LICENSE file for licensing details. +import pathlib +import platform import ops import pytest +import tomli from charms.tempo_coordinator_k8s.v0.charm_tracing import charm_tracing_disabled -import snap - @pytest.fixture(autouse=True) def disable_tenacity_retry(monkeypatch): @@ -29,21 +30,49 @@ def disable_tenacity_retry(monkeypatch): monkeypatch.setattr(f"tenacity.{retry_class}.__call__", lambda *args, **kwargs: False) +class _MockRefresh: + in_progress = False + next_unit_allowed_to_refresh = True + workload_allowed_to_start = True + app_status_higher_priority = None + unit_status_higher_priority = None + + def __init__(self, _, /): + pass + + def update_snap_revision(self): + pass + + @property + def pinned_snap_revision(self): + with pathlib.Path("refresh_versions.toml").open("rb") as file: + return tomli.load(file)["snap"]["revisions"][platform.machine()] + + def unit_status_lower_priority(self, *, workload_is_running=True): + return None + + @pytest.fixture(autouse=True) def patch(monkeypatch): monkeypatch.setattr( "charm.MachineSubordinateRouterCharm.wait_until_mysql_router_ready", lambda *args, **kwargs: None, ) - monkeypatch.setattr("workload.AuthenticatedWorkload._router_username", "") + monkeypatch.setattr("workload.RunningWorkload._router_username", "") monkeypatch.setattr("mysql_shell.Shell._run_code", lambda *args, **kwargs: None) monkeypatch.setattr( "mysql_shell.Shell.get_mysql_router_user_for_unit", lambda *args, **kwargs: None ) monkeypatch.setattr("mysql_shell.Shell.is_router_in_cluster_set", lambda *args, **kwargs: True) - monkeypatch.setattr("upgrade.Upgrade.in_progress", False) - monkeypatch.setattr("upgrade.Upgrade.versions_set", True) - monkeypatch.setattr("upgrade.Upgrade.is_compatible", True) + monkeypatch.setattr("charm_refresh.Machines", _MockRefresh) + monkeypatch.setattr("charm_refresh.snap_name", lambda: "charmed-mysql") + monkeypatch.setattr( + "charm_refresh.CharmSpecificCommon.__post_init__", lambda *args, **kwargs: None + ) + monkeypatch.setattr( + "relations.database_requires.RelationEndpoint.does_relation_exist", + lambda *args, **kwargs: True, + ) # flake8: noqa: C901 @@ -96,12 +125,11 @@ def restart(self, services: list[str] = []): if "mysqlrouter-exporter" in services: self.services["mysqlrouter-exporter"]["active"] = True - monkeypatch.setattr(snap, "_snap", Snap()) + monkeypatch.setattr("snap.Snap._snap", Snap()) - monkeypatch.setattr( - "snap.Snap._run_command", - lambda *args, **kwargs: "null", # Use "null" for `json.loads()` - ) + # Use "null" for `json.loads()` + monkeypatch.setattr("snap.Snap._run_command", lambda *args, **kwargs: "null") + monkeypatch.setattr("snap.Snap.install", lambda *args, **kwargs: None) monkeypatch.setattr("snap._Path.read_text", lambda *args, **kwargs: "") monkeypatch.setattr("snap._Path.write_text", lambda *args, **kwargs: None) monkeypatch.setattr("snap._Path.unlink", lambda *args, **kwargs: None) diff --git a/tests/unit/scenario_/database_relations/test_database_relations.py b/tests/unit/scenario_/database_relations/test_database_relations.py index b0387bf5..8255e0b3 100644 --- a/tests/unit/scenario_/database_relations/test_database_relations.py +++ b/tests/unit/scenario_/database_relations/test_database_relations.py @@ -23,7 +23,7 @@ def output_states(*, relations: list[scenario.Relation]) -> typing.Iterable[scen """ context = scenario.Context(charm.MachineSubordinateRouterCharm) input_state = scenario.State( - relations=[*relations, scenario.PeerRelation(endpoint="upgrade-version-a")], + relations=[*relations, scenario.PeerRelation(endpoint="refresh-v-three")], leader=True, ) events = [] diff --git a/tests/unit/scenario_/database_relations/test_database_relations_breaking.py b/tests/unit/scenario_/database_relations/test_database_relations_breaking.py index ef32152e..799cd53d 100644 --- a/tests/unit/scenario_/database_relations/test_database_relations_breaking.py +++ b/tests/unit/scenario_/database_relations/test_database_relations_breaking.py @@ -17,7 +17,7 @@ def output_state( ) -> scenario.State: context = scenario.Context(charm.MachineSubordinateRouterCharm) input_state = scenario.State( - relations=[*relations, scenario.PeerRelation(endpoint="upgrade-version-a")], + relations=[*relations, scenario.PeerRelation(endpoint="refresh-v-three")], secrets=secrets, leader=True, ) diff --git a/tests/unit/scenario_/test_start.py b/tests/unit/scenario_/test_start.py index b662c61a..1716ea2b 100644 --- a/tests/unit/scenario_/test_start.py +++ b/tests/unit/scenario_/test_start.py @@ -13,7 +13,7 @@ def test_start_sets_status_if_no_relations(leader): context = scenario.Context(charm.MachineSubordinateRouterCharm) input_state = scenario.State( leader=leader, - relations=[scenario.PeerRelation(endpoint="upgrade-version-a")], + relations=[scenario.PeerRelation(endpoint="refresh-v-three")], ) output_state = context.run("start", input_state) if leader: diff --git a/tests/unit/test_workload.py b/tests/unit/test_workload.py index 64ef369b..13296d6e 100644 --- a/tests/unit/test_workload.py +++ b/tests/unit/test_workload.py @@ -185,4 +185,4 @@ ], ) def test_parse_username_from_config(config_file_text, username): - assert workload.AuthenticatedWorkload._parse_username_from_config(config_file_text) == username + assert workload.RunningWorkload._parse_username_from_config(config_file_text) == username diff --git a/tox.ini b/tox.ini index 53c18624..849829db 100644 --- a/tox.ini +++ b/tox.ini @@ -41,7 +41,7 @@ description = Run unit tests commands_pre = poetry install --only main,charm-libs,unit commands = - poetry run pytest --numprocesses=auto --cov=src --ignore={[vars]tests_path}/integration/ {posargs} + poetry run pytest --numprocesses 120 --cov=src --ignore={[vars]tests_path}/integration/ {posargs} [testenv:integration] description = Run integration tests diff --git a/workload_version b/workload_version deleted file mode 100644 index d5c63883..00000000 --- a/workload_version +++ /dev/null @@ -1 +0,0 @@ -8.0.41