From 56b85c95a881f88a5b721ec4537504fe7d2015b2 Mon Sep 17 00:00:00 2001 From: yvonnexyang Date: Mon, 28 Mar 2022 20:27:37 +0800 Subject: [PATCH] feat mysql and mariadb Signed-off-by: yvonnexyang --- .github/workflows/CI-mariadb.yml | 95 ++++ .github/workflows/CI-mysql.yml | 95 ++++ .github/workflows/CI.yml | 6 + make/harbor.yml.tmpl | 2 + .../mysql/0015_1.10.0_schema.up.sql | 3 +- .../migrations/mysql/0030_2.0.0_schema.up.sql | 5 +- .../migrations/mysql/0040_2.1.0_schema.up.sql | 37 +- .../migrations/mysql/0050_2.2.0_schema.up.sql | 37 +- .../migrations/mysql/0070_2.4.0_schema.up.sql | 2 +- make/photon/notary-server/Dockerfile | 2 +- make/photon/notary-signer/Dockerfile | 2 +- make/photon/prepare/templates/core/env.jinja | 12 +- .../prepare/templates/exporter/env.jinja | 1 + .../notary/server-config.mysql.json.jinja | 28 + .../prepare/templates/notary/server_env.jinja | 7 + .../notary/signer-config.mysql.json.jinja | 15 + .../prepare/templates/notary/signer_env.jinja | 7 + make/photon/prepare/utils/configs.py | 3 + make/photon/prepare/utils/notary.py | 22 +- src/cmd/exporter/main.go | 51 +- src/cmd/migrate-patch/main.go | 37 +- src/common/const.go | 7 + src/common/dao/base.go | 17 +- src/common/dao/dao_test.go | 4 +- src/common/dao/mysql.go | 83 ++- src/common/dao/testutils.go | 38 ++ src/common/models/database.go | 13 +- src/lib/config/metadata/metadatalist.go | 8 + src/lib/config/systemconfig.go | 46 +- src/migration/migration.go | 11 +- src/pkg/blob/dao/dao_test.go | 2 +- src/pkg/blob/dao/mysql_dao.go | 171 ++++++ src/pkg/blob/dao/mysql_dao_test.go | 489 +++++++++++++++++ src/pkg/blob/manager.go | 12 + src/pkg/config/db/manager_test.go | 19 + src/pkg/config/manager.go | 25 +- src/pkg/member/dao/mysql_dao.go | 70 +++ src/pkg/member/dao/mysql_dao_test.go | 298 +++++++++++ src/pkg/member/manager.go | 12 + src/pkg/usergroup/dao/mysql_dao.go | 67 +++ src/pkg/usergroup/dao/mysql_dao_test.go | 69 +++ src/pkg/usergroup/manager.go | 12 + src/testing/suite.go | 12 +- .../migrate/v4/database/mysql/README.md | 55 ++ .../migrate/v4/database/mysql/mysql.go | 494 ++++++++++++++++++ tests/docker-compose.test.yml | 21 + 46 files changed, 2417 insertions(+), 107 deletions(-) create mode 100644 .github/workflows/CI-mariadb.yml create mode 100644 .github/workflows/CI-mysql.yml create mode 100644 make/photon/prepare/templates/notary/server-config.mysql.json.jinja create mode 100644 make/photon/prepare/templates/notary/signer-config.mysql.json.jinja create mode 100644 src/pkg/blob/dao/mysql_dao.go create mode 100644 src/pkg/blob/dao/mysql_dao_test.go create mode 100644 src/pkg/member/dao/mysql_dao.go create mode 100644 src/pkg/member/dao/mysql_dao_test.go create mode 100644 src/pkg/usergroup/dao/mysql_dao.go create mode 100644 src/pkg/usergroup/dao/mysql_dao_test.go create mode 100644 src/vendor/github.com/golang-migrate/migrate/v4/database/mysql/README.md create mode 100644 src/vendor/github.com/golang-migrate/migrate/v4/database/mysql/mysql.go diff --git a/.github/workflows/CI-mariadb.yml b/.github/workflows/CI-mariadb.yml new file mode 100644 index 000000000000..c5ca0f359032 --- /dev/null +++ b/.github/workflows/CI-mariadb.yml @@ -0,0 +1,95 @@ +name: CI +env: + DATABASE_TYPE: mariadb + MYSQL_HOST: localhost + MYSQL_PORT: 3306 + MYSQL_USERNAME: root + MYSQL_PASSWORD: root123 + MYSQL_DATABASE: registry + POSTGRESQL_HOST: localhost + POSTGRESQL_PORT: 5432 + POSTGRESQL_USR: postgres + POSTGRESQL_PWD: root123 + POSTGRESQL_DATABASE: registry + DOCKER_COMPOSE_VERSION: 1.23.0 + HARBOR_ADMIN: admin + HARBOR_ADMIN_PASSWD: Harbor12345 + CORE_SECRET: tempString + KEY_PATH: "/data/secret/keys/secretkey" + REDIS_HOST: localhost + REG_VERSION: v2.7.1-patch-2819-2553 + UI_BUILDER_VERSION: 1.6.0 + +on: + pull_request: + push: + paths-ignore: + - 'docs/**' + +jobs: + UTTEST4MARIADB: + env: + UTTEST: true + runs-on: + #- self-hosted + - ubuntu-latest + timeout-minutes: 100 + steps: + - name: Set up Go 1.17 + uses: actions/setup-go@v1 + with: + go-version: 1.17.7 + id: go + - name: setup Docker + uses: docker-practice/actions-setup-docker@0.0.1 + with: + docker_version: 20.04 + docker_channel: stable + - uses: actions/checkout@v2 + with: + path: src/github.com/goharbor/harbor + - name: setup env + run: | + cd src/github.com/goharbor/harbor + pwd + go env + echo "GOPATH=$(go env GOPATH):$GITHUB_WORKSPACE" >> $GITHUB_ENV + echo "$(go env GOPATH)/bin" >> $GITHUB_PATH + echo "TOKEN_PRIVATE_KEY_PATH=${GITHUB_WORKSPACE}/src/github.com/goharbor/harbor/tests/private_key.pem" >> $GITHUB_ENV + shell: bash + - name: before_install + run: | + set -x + cd src/github.com/goharbor/harbor + pwd + env + #sudo apt install -y xvfb + #xvfb-run ls + curl -L https://github.com/docker/compose/releases/download/${DOCKER_COMPOSE_VERSION}/docker-compose-`uname -s`-`uname -m` > docker-compose + chmod +x docker-compose + sudo mv docker-compose /usr/local/bin + IP=`hostname -I | awk '{print $1}'` + echo '{"insecure-registries" : ["'$IP':5000"]}' | sudo tee /etc/docker/daemon.json + echo "IP=$IP" >> $GITHUB_ENV + sudo cp ./tests/harbor_ca.crt /usr/local/share/ca-certificates/ + sudo update-ca-certificates + sudo service docker restart + - name: install + run: | + cd src/github.com/goharbor/harbor + env + df -h + bash ./tests/showtime.sh ./tests/ci/ut_install.sh + - name: script + run: | + echo IP: $IP + df -h + cd src/github.com/goharbor/harbor + bash ./tests/showtime.sh ./tests/ci/ut_run.sh $IP + df -h + - name: Codecov For BackEnd + uses: codecov/codecov-action@v1 + with: + file: ./src/github.com/goharbor/harbor/profile.cov + flags: unittests + diff --git a/.github/workflows/CI-mysql.yml b/.github/workflows/CI-mysql.yml new file mode 100644 index 000000000000..2e31a2fd7bda --- /dev/null +++ b/.github/workflows/CI-mysql.yml @@ -0,0 +1,95 @@ +name: CI +env: + DATABASE_TYPE: mysql + MYSQL_HOST: localhost + MYSQL_PORT: 3308 + MYSQL_USERNAME: root + MYSQL_PASSWORD: root123 + MYSQL_DATABASE: registry + POSTGRESQL_HOST: localhost + POSTGRESQL_PORT: 5432 + POSTGRESQL_USR: postgres + POSTGRESQL_PWD: root123 + POSTGRESQL_DATABASE: registry + DOCKER_COMPOSE_VERSION: 1.23.0 + HARBOR_ADMIN: admin + HARBOR_ADMIN_PASSWD: Harbor12345 + CORE_SECRET: tempString + KEY_PATH: "/data/secret/keys/secretkey" + REDIS_HOST: localhost + REG_VERSION: v2.7.1-patch-2819-2553 + UI_BUILDER_VERSION: 1.6.0 + +on: + pull_request: + push: + paths-ignore: + - 'docs/**' + +jobs: + UTTEST4MARIADB: + env: + UTTEST: true + runs-on: + #- self-hosted + - ubuntu-latest + timeout-minutes: 100 + steps: + - name: Set up Go 1.17 + uses: actions/setup-go@v1 + with: + go-version: 1.17.7 + id: go + - name: setup Docker + uses: docker-practice/actions-setup-docker@0.0.1 + with: + docker_version: 20.04 + docker_channel: stable + - uses: actions/checkout@v2 + with: + path: src/github.com/goharbor/harbor + - name: setup env + run: | + cd src/github.com/goharbor/harbor + pwd + go env + echo "GOPATH=$(go env GOPATH):$GITHUB_WORKSPACE" >> $GITHUB_ENV + echo "$(go env GOPATH)/bin" >> $GITHUB_PATH + echo "TOKEN_PRIVATE_KEY_PATH=${GITHUB_WORKSPACE}/src/github.com/goharbor/harbor/tests/private_key.pem" >> $GITHUB_ENV + shell: bash + - name: before_install + run: | + set -x + cd src/github.com/goharbor/harbor + pwd + env + #sudo apt install -y xvfb + #xvfb-run ls + curl -L https://github.com/docker/compose/releases/download/${DOCKER_COMPOSE_VERSION}/docker-compose-`uname -s`-`uname -m` > docker-compose + chmod +x docker-compose + sudo mv docker-compose /usr/local/bin + IP=`hostname -I | awk '{print $1}'` + echo '{"insecure-registries" : ["'$IP':5000"]}' | sudo tee /etc/docker/daemon.json + echo "IP=$IP" >> $GITHUB_ENV + sudo cp ./tests/harbor_ca.crt /usr/local/share/ca-certificates/ + sudo update-ca-certificates + sudo service docker restart + - name: install + run: | + cd src/github.com/goharbor/harbor + env + df -h + bash ./tests/showtime.sh ./tests/ci/ut_install.sh + - name: script + run: | + echo IP: $IP + df -h + cd src/github.com/goharbor/harbor + bash ./tests/showtime.sh ./tests/ci/ut_run.sh $IP + df -h + - name: Codecov For BackEnd + uses: codecov/codecov-action@v1 + with: + file: ./src/github.com/goharbor/harbor/profile.cov + flags: unittests + diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml index a8b055637061..ce7191fe8287 100644 --- a/.github/workflows/CI.yml +++ b/.github/workflows/CI.yml @@ -1,10 +1,16 @@ name: CI env: + DATABASE_TYPE: postgresql POSTGRESQL_HOST: localhost POSTGRESQL_PORT: 5432 POSTGRESQL_USR: postgres POSTGRESQL_PWD: root123 POSTGRESQL_DATABASE: registry + MYSQL_HOST: localhost + MYSQL_PORT: 3306 + MYSQL_USERNAME: root + MYSQL_PASSWORD: root123 + MYSQL_DATABASE: registry DOCKER_COMPOSE_VERSION: 1.23.0 HARBOR_ADMIN: admin HARBOR_ADMIN_PASSWD: Harbor12345 diff --git a/make/harbor.yml.tmpl b/make/harbor.yml.tmpl index 123d9ff0cec2..2536c4391179 100644 --- a/make/harbor.yml.tmpl +++ b/make/harbor.yml.tmpl @@ -142,6 +142,8 @@ _version: 2.5.0 # Uncomment external_database if using external database. # external_database: # harbor: +# # database type, default is postgresql, options include postgresql, mariadb and mysql +# type: harbor_db_type # host: harbor_db_host # port: harbor_db_port # db_name: harbor_db_name diff --git a/make/migrations/mysql/0015_1.10.0_schema.up.sql b/make/migrations/mysql/0015_1.10.0_schema.up.sql index fadd47e9295e..4e2820a254eb 100644 --- a/make/migrations/mysql/0015_1.10.0_schema.up.sql +++ b/make/migrations/mysql/0015_1.10.0_schema.up.sql @@ -34,7 +34,8 @@ CREATE TABLE scan_report report JSON, start_time TIMESTAMP DEFAULT CURRENT_TIMESTAMP, end_time TIMESTAMP DEFAULT CURRENT_TIMESTAMP, - UNIQUE(digest, registration_uuid, mime_type) + UNIQUE(digest, registration_uuid, mime_type), + CHECK (report is null or JSON_VALID (report)) ); /** Add table for immutable tag **/ diff --git a/make/migrations/mysql/0030_2.0.0_schema.up.sql b/make/migrations/mysql/0030_2.0.0_schema.up.sql index 028407a1ef02..4bce84345b02 100644 --- a/make/migrations/mysql/0030_2.0.0_schema.up.sql +++ b/make/migrations/mysql/0030_2.0.0_schema.up.sql @@ -92,6 +92,7 @@ JOIN ( ) AS ordered_art ON art.repository_name=ordered_art.repository_name AND art.digest=ordered_art.digest WHERE ordered_art.seq=1; +ALTER TABLE artifact DROP INDEX unique_artifact; ALTER TABLE artifact DROP COLUMN tag; /*remove the duplicate artifact rows*/ @@ -102,7 +103,6 @@ WHERE id NOT IN ( ); SET sql_mode = ''; -ALTER TABLE artifact DROP INDEX unique_artifact; ALTER TABLE artifact ADD CONSTRAINT unique_artifact UNIQUE (repository_id, digest); /*set artifact size*/ @@ -127,7 +127,8 @@ CREATE TABLE artifact_reference annotations json, FOREIGN KEY (parent_id) REFERENCES artifact(id), FOREIGN KEY (child_id) REFERENCES artifact(id), - CONSTRAINT unique_reference UNIQUE (parent_id, child_id) + CONSTRAINT unique_reference UNIQUE (parent_id, child_id), + CHECK (annotations is null or JSON_VALID (annotations)) ); /* artifact_trash records deleted artifact */ diff --git a/make/migrations/mysql/0040_2.1.0_schema.up.sql b/make/migrations/mysql/0040_2.1.0_schema.up.sql index e67d5e6b9ac9..65d339d1be9c 100644 --- a/make/migrations/mysql/0040_2.1.0_schema.up.sql +++ b/make/migrations/mysql/0040_2.1.0_schema.up.sql @@ -1,4 +1,4 @@ -ALTER TABLE project ADD COLUMN registry_id int; +ALTER TABLE project ADD COLUMN IF NOT EXISTS registry_id int; ALTER TABLE cve_whitelist RENAME TO cve_allowlist; UPDATE role SET name='maintainer' WHERE name='master'; UPDATE project_metadata SET name='reuse_sys_cve_allowlist' WHERE name='reuse_sys_cve_whitelist'; @@ -14,7 +14,8 @@ CREATE TABLE IF NOT EXISTS execution ( start_time timestamp DEFAULT CURRENT_TIMESTAMP, end_time timestamp, revision int, - PRIMARY KEY (id) + PRIMARY KEY (id), + CHECK (extra_attrs is null or JSON_VALID (extra_attrs)) ); CREATE TABLE IF NOT EXISTS task ( @@ -31,16 +32,17 @@ CREATE TABLE IF NOT EXISTS task ( start_time timestamp, update_time timestamp, end_time timestamp, - FOREIGN KEY (execution_id) REFERENCES execution(id) + FOREIGN KEY (execution_id) REFERENCES execution(id), + CHECK (extra_attrs is null or JSON_VALID (extra_attrs)) ); ALTER TABLE `blob` ADD COLUMN update_time timestamp default CURRENT_TIMESTAMP; ALTER TABLE `blob` ADD COLUMN status varchar(255) default 'none'; ALTER TABLE `blob` ADD COLUMN version BIGINT default 0; -CREATE INDEX idx_status ON `blob` (status); -CREATE INDEX idx_version ON `blob` (version); +CREATE INDEX IF NOT EXISTS idx_status ON `blob` (status); +CREATE INDEX IF NOT EXISTS idx_version ON `blob` (version); -CREATE TABLE p2p_preheat_instance ( +CREATE TABLE IF NOT EXISTS p2p_preheat_instance ( id SERIAL PRIMARY KEY NOT NULL, name varchar(255) NOT NULL, description varchar(255), @@ -69,22 +71,23 @@ CREATE TABLE IF NOT EXISTS p2p_preheat_policy ( UNIQUE (name, project_id) ); -ALTER TABLE schedule ADD COLUMN vendor_type varchar(16); -ALTER TABLE schedule ADD COLUMN vendor_id int; -ALTER TABLE schedule ADD COLUMN cron varchar(64); -ALTER TABLE schedule ADD COLUMN callback_func_name varchar(128); -ALTER TABLE schedule ADD COLUMN callback_func_param text; +ALTER TABLE schedule ADD COLUMN IF NOT EXISTS vendor_type varchar(16); +ALTER TABLE schedule ADD COLUMN IF NOT EXISTS vendor_id int; +ALTER TABLE schedule ADD COLUMN IF NOT EXISTS cron varchar(64); +ALTER TABLE schedule ADD COLUMN IF NOT EXISTS callback_func_name varchar(128); +ALTER TABLE schedule ADD COLUMN IF NOT EXISTS callback_func_param text; /*abstract the cron, callback function parameters from table retention_policy*/ UPDATE schedule, ( - SELECT id, data->>'$.trigger.references.job_id' AS schedule_id, - data->>'$.trigger.settings.cron' AS cron + SELECT id, replace(json_extract(data,'$.trigger.references.job_id'),'"','') AS schedule_id, + replace(json_extract(data,'$.trigger.settings.cron'),'"','') AS cron FROM retention_policy ) AS retention SET vendor_type= 'RETENTION', vendor_id=retention.id, schedule.cron = retention.cron, callback_func_name = 'RETENTION', callback_func_param=concat('{"PolicyID":', retention.id, ',"Trigger":"Schedule"}') WHERE schedule.id=retention.schedule_id; + /*create new execution and task record for each schedule*/ CREATE PROCEDURE PROC_UPDATE_EXECUTION_TASK ( ) BEGIN INSERT INTO execution ( vendor_type, vendor_id, `trigger` ) SELECT @@ -120,13 +123,13 @@ END; CALL PROC_UPDATE_EXECUTION_TASK(); -ALTER TABLE schedule DROP COLUMN job_id; -ALTER TABLE schedule DROP COLUMN status; +ALTER TABLE schedule DROP COLUMN IF EXISTS job_id; +ALTER TABLE schedule DROP COLUMN IF EXISTS status; UPDATE registry SET type = 'quay' WHERE type = 'quay-io'; -ALTER TABLE artifact ADD COLUMN icon varchar(255); +ALTER TABLE artifact ADD COLUMN IF NOT EXISTS icon varchar(255); /*remove the constraint for name in table 'notification_policy'*/ /*ALTER TABLE notification_policy DROP CONSTRAINT notification_policy_name_key;*/ @@ -146,7 +149,7 @@ INSERT INTO data_migrations (version) VALUES ( ELSE 0 END ); -ALTER TABLE schema_migrations DROP COLUMN data_version; +ALTER TABLE schema_migrations DROP COLUMN IF EXISTS data_version; UPDATE artifact SET icon=( diff --git a/make/migrations/mysql/0050_2.2.0_schema.up.sql b/make/migrations/mysql/0050_2.2.0_schema.up.sql index 2d3c72d1a4ee..c476257da74f 100644 --- a/make/migrations/mysql/0050_2.2.0_schema.up.sql +++ b/make/migrations/mysql/0050_2.2.0_schema.up.sql @@ -263,7 +263,7 @@ DROP TABLE IF EXISTS replication_execution; INSERT INTO `schedule` ( vendor_type, vendor_id, cron, callback_func_name, callback_func_param, creation_time, update_time ) SELECT 'REPLICATION', schd.policy_id, -( SELECT `trigger` ->> '$.trigger_settings.cron' FROM replication_policy WHERE id = schd.policy_id ), +( SELECT replace(json_extract(`trigger`,'$.trigger_settings.cron'),'"','') FROM replication_policy WHERE id = schd.policy_id ), 'REPLICATION_CALLBACK', schd.policy_id, schd.creation_time, @@ -376,11 +376,11 @@ ALTER TABLE schedule ADD CONSTRAINT unique_schedule UNIQUE (vendor_type, vendor_ INSERT INTO `schedule` ( vendor_type, vendor_id, cron, callback_func_name, callback_func_param, cron_type, extra_attrs, creation_time, update_time ) SELECT 'GARBAGE_COLLECTION', - 1, -schd.cron_str ->> '$.cron', +replace(json_extract(schd.cron_str,'$.cron'),'"',''), 'GARBAGE_COLLECTION', -( SELECT JSON_OBJECT ( 'trigger', NULL, 'deleteuntagged', schd.job_parameters -> '$.delete_untagged', 'dryrun', FALSE, 'extra_attrs', schd.job_parameters ) ), -schd.cron_str ->> '$.type', -( SELECT JSON_OBJECT ( 'delete_untagged', schd.job_parameters -> '$.delete_untagged' ) ), +( SELECT JSON_OBJECT ( 'trigger', NULL, 'deleteuntagged', json_extract(schd.job_parameters,'$.delete_untagged'), 'dryrun', FALSE, 'extra_attrs', schd.job_parameters ) ), +replace(json_extract(schd.cron_str,'$.type'),'"',''), +( SELECT JSON_OBJECT ( 'delete_untagged', json_extract(schd.job_parameters,'$.delete_untagged') ) ), schd.creation_time, schd.update_time FROM @@ -399,9 +399,9 @@ INSERT INTO execution ( vendor_type, vendor_id, STATUS, revision, `trigger`, sta WHERE vendor_type = 'GARBAGE_COLLECTION' AND vendor_id =- 1 - AND cron = schd.cron_str ->> '$.cron' + AND cron = replace(json_extract(schd.cron_str,'$.cron'),'"','') AND callback_func_name = 'GARBAGE_COLLECTION' - AND cron_type = schd.cron_str ->> '$.type' + AND cron_type = replace(json_extract(schd.cron_str,'$.type'),'"','') AND creation_time = schd.creation_time AND update_time = schd.update_time ), @@ -447,9 +447,9 @@ INSERT INTO task ( vendor_type, execution_id, job_id, STATUS, status_code, statu WHERE vendor_type = 'GARBAGE_COLLECTION' AND vendor_id =- 1 - AND cron = schd.cron_str ->> '$.cron' + AND cron = replace(json_extract(schd.cron_str,'$.cron'),'"','') AND callback_func_name = 'GARBAGE_COLLECTION' - AND cron_type = schd.cron_str ->> '$.type' + AND cron_type = replace(json_extract(schd.cron_str,'$.type'),'"','') AND creation_time = schd.creation_time AND update_time = schd.creation_time ) @@ -579,7 +579,7 @@ CASE END, 0, 1, - cast( aj.job_parameters AS json ), + aj.job_parameters, aj.creation_time, aj.creation_time, aj.update_time, @@ -595,9 +595,9 @@ WHERE INSERT INTO `schedule` ( vendor_type, vendor_id, cron, callback_func_name, cron_type, creation_time, update_time ) SELECT 'SCAN_ALL', 0, -schd.cron_str ->> 'cron', +replace(json_extract(schd.cron_str,'cron'),'"',''), 'scanAll', -schd.cron_str ->> 'type', +replace(json_extract(schd.cron_str,'type'),'"',''), schd.creation_time, schd.update_time FROM @@ -616,9 +616,9 @@ INSERT INTO execution ( vendor_type, vendor_id, STATUS, revision, `trigger`, sta WHERE vendor_type = 'SCAN_ALL' AND vendor_id =0 - AND cron = schd.cron_str ->> '$.cron' + AND cron = replace(json_extract(schd.cron_str,'$.cron'),'"','') AND callback_func_name = 'scanAll' - AND cron_type = schd.cron_str ->> '$.type' + AND cron_type = replace(json_extract(schd.cron_str,'$.type'),'"','') AND creation_time = schd.creation_time AND update_time = schd.update_time ), @@ -664,9 +664,9 @@ INSERT INTO task ( vendor_type, execution_id, job_id, STATUS, status_code, statu WHERE vendor_type = 'SCAN_ALL' AND vendor_id =0 - AND cron = schd.cron_str ->> '$.cron' + AND cron = replace(json_extract(schd.cron_str,'$.cron'),'"','') AND callback_func_name = 'scanAll' - AND cron_type = schd.cron_str ->> '$.type' + AND cron_type = replace(json_extract(schd.cron_str,'$.type'),'"','') AND creation_time = schd.creation_time AND update_time = schd.update_time ) @@ -755,7 +755,8 @@ CREATE TABLE IF NOT EXISTS vulnerability_record ( cwe_ids text, vendor_attributes json, UNIQUE (cve_id(64), registration_uuid, package(64), package_version(64)), - CONSTRAINT fk_registration_uuid FOREIGN KEY(registration_uuid) REFERENCES scanner_registration(uuid) ON DELETE CASCADE + CONSTRAINT fk_registration_uuid FOREIGN KEY(registration_uuid) REFERENCES scanner_registration(uuid) ON DELETE CASCADE, + CHECK (vendor_attributes is null or JSON_VALID (vendor_attributes)) ); -- -------------------------------------------------- @@ -782,7 +783,7 @@ DELETE rt FROM retention_task AS rt LEFT JOIN retention_execution re ON rt.execu WHERE re.id IS NULL; /*move the replication execution records into the new execution table*/ -ALTER TABLE retention_execution ADD COLUMN new_execution_id int; +ALTER TABLE retention_execution ADD COLUMN IF NOT EXISTS new_execution_id int; CREATE PROCEDURE PROC_UPDATE_EXECUTION_AND_RETENTION_EXECUTION ( ) BEGIN DECLARE diff --git a/make/migrations/mysql/0070_2.4.0_schema.up.sql b/make/migrations/mysql/0070_2.4.0_schema.up.sql index a8194557a88e..3accb76f4345 100644 --- a/make/migrations/mysql/0070_2.4.0_schema.up.sql +++ b/make/migrations/mysql/0070_2.4.0_schema.up.sql @@ -1,5 +1,5 @@ /* cleanup deleted user project members */ -DELETE FROM project_member pm WHERE pm.entity_type = 'u' AND EXISTS (SELECT NULL FROM harbor_user u WHERE pm.entity_id = u.user_id AND u.deleted = true ); +DELETE FROM project_member WHERE project_member.entity_type = 'u' AND EXISTS (SELECT NULL FROM harbor_user WHERE project_member.entity_id = harbor_user.user_id AND harbor_user.deleted = true ); ALTER TABLE replication_policy ADD COLUMN speed_kb int; diff --git a/make/photon/notary-server/Dockerfile b/make/photon/notary-server/Dockerfile index f6a7309d1ba3..f651f6529ff7 100644 --- a/make/photon/notary-server/Dockerfile +++ b/make/photon/notary-server/Dockerfile @@ -10,4 +10,4 @@ COPY ./make/photon/notary/binary/migrations/ /migrations/ RUN chmod +x /bin/notary-server /migrations/migrate.sh /bin/migrate /bin/migrate-patch ENV SERVICE_NAME=notary_server USER notary -CMD migrate-patch -database=${DB_URL} && /migrations/migrate.sh && /bin/notary-server -config=/etc/notary/server-config.postgres.json -logf=logfmt \ No newline at end of file +CMD migrate-patch -database=${DB_URL} && /migrations/migrate.sh && /bin/notary-server -config=${CONFIG_FILE} -logf=logfmt \ No newline at end of file diff --git a/make/photon/notary-signer/Dockerfile b/make/photon/notary-signer/Dockerfile index aaa3a8c52089..e9374bd46105 100644 --- a/make/photon/notary-signer/Dockerfile +++ b/make/photon/notary-signer/Dockerfile @@ -10,4 +10,4 @@ COPY ./make/photon/notary/binary/migrations/ /migrations/ RUN chmod +x /bin/notary-signer /migrations/migrate.sh /bin/migrate /bin/migrate-patch ENV SERVICE_NAME=notary_signer USER notary -CMD migrate-patch -database=${DB_URL} && /migrations/migrate.sh && /bin/notary-signer -config=/etc/notary/signer-config.postgres.json -logf=logfmt \ No newline at end of file +CMD migrate-patch -database=${DB_URL} && /migrations/migrate.sh && /bin/notary-signer -config=${CONFIG_FILE} -logf=logfmt \ No newline at end of file diff --git a/make/photon/prepare/templates/core/env.jinja b/make/photon/prepare/templates/core/env.jinja index c9adc209e383..f9b93627dfa7 100644 --- a/make/photon/prepare/templates/core/env.jinja +++ b/make/photon/prepare/templates/core/env.jinja @@ -7,7 +7,16 @@ _REDIS_URL_REG={{redis_url_reg}} LOG_LEVEL={{log_level}} EXT_ENDPOINT={{public_url}} -DATABASE_TYPE=postgresql +DATABASE_TYPE={{harbor_db_type}} +{% if ( harbor_db_type == "mysql" or harbor_db_type == "mariadb" ) %} +MYSQL_HOST={{harbor_db_host}} +MYSQL_PORT={{harbor_db_port}} +MYSQL_USERNAME={{harbor_db_username}} +MYSQL_PASSWORD={{harbor_db_password}} +MYSQL_DATABASE={{harbor_db_name}} +MYSQL_MAX_IDLE_CONNS={{harbor_db_max_idle_conns}} +MYSQL_MAX_OPEN_CONNS={{harbor_db_max_open_conns}} +{% else %} POSTGRESQL_HOST={{harbor_db_host}} POSTGRESQL_PORT={{harbor_db_port}} POSTGRESQL_USERNAME={{harbor_db_username}} @@ -16,6 +25,7 @@ POSTGRESQL_DATABASE={{harbor_db_name}} POSTGRESQL_SSLMODE={{harbor_db_sslmode}} POSTGRESQL_MAX_IDLE_CONNS={{harbor_db_max_idle_conns}} POSTGRESQL_MAX_OPEN_CONNS={{harbor_db_max_open_conns}} +{% endif %} REGISTRY_URL={{registry_url}} PORTAL_URL={{portal_url}} TOKEN_SERVICE_URL={{token_service_url}} diff --git a/make/photon/prepare/templates/exporter/env.jinja b/make/photon/prepare/templates/exporter/env.jinja index d9e557c3ad8f..c8a5693a98c7 100644 --- a/make/photon/prepare/templates/exporter/env.jinja +++ b/make/photon/prepare/templates/exporter/env.jinja @@ -18,6 +18,7 @@ HARBOR_SERVICE_SCHEME=https HARBOR_SERVICE_PORT=8080 HARBOR_SERVICE_SCHEME=http {% endif %} +HARBOR_DATABASE_TYPE={{harbor_db_type}} HARBOR_DATABASE_HOST={{harbor_db_host}} HARBOR_DATABASE_PORT={{harbor_db_port}} HARBOR_DATABASE_USERNAME={{harbor_db_username}} diff --git a/make/photon/prepare/templates/notary/server-config.mysql.json.jinja b/make/photon/prepare/templates/notary/server-config.mysql.json.jinja new file mode 100644 index 000000000000..2dad995cc028 --- /dev/null +++ b/make/photon/prepare/templates/notary/server-config.mysql.json.jinja @@ -0,0 +1,28 @@ +{ + "server": { + "http_addr": ":4443" + }, + "trust_service": { + "type": "remote", + "hostname": "notarysigner", + "port": "7899", + "tls_ca_file": "./notary-signer-ca.crt", + "key_algorithm": "ecdsa" + }, + "logging": { + "level": "debug" + }, + "storage": { + "backend": "mysql", + "db_url": "{{notary_server_db_username}}:{{notary_server_db_password}}@tcp({{notary_server_db_host}}:{{notary_server_db_port}})/{{notary_server_db_name}}?parseTime=True" + }, + "auth": { + "type": "token", + "options": { + "realm": "{{token_endpoint}}/service/token", + "service": "harbor-notary", + "issuer": "harbor-token-issuer", + "rootcertbundle": "/etc/notary/root.crt" + } + } +} \ No newline at end of file diff --git a/make/photon/prepare/templates/notary/server_env.jinja b/make/photon/prepare/templates/notary/server_env.jinja index 7486b2647083..677538dd0fe1 100644 --- a/make/photon/prepare/templates/notary/server_env.jinja +++ b/make/photon/prepare/templates/notary/server_env.jinja @@ -1,2 +1,9 @@ +{% if ( notary_server_db_type == "mysql" or notary_server_db_type == "mariadb" ) %} +MIGRATIONS_PATH=migrations/server/mysql +CONFIG_FILE=/etc/notary/server-config.mysql.json +DB_URL=mysql://{{notary_server_db_username}}:{{notary_server_db_password}}@tcp({{notary_server_db_host}}:{{notary_server_db_port}})/{{notary_server_db_name}} +{% else %} MIGRATIONS_PATH=migrations/server/postgresql +CONFIG_FILE=/etc/notary/server-config.postgres.json DB_URL=postgres://{{notary_server_db_username}}:{{notary_server_db_password}}@{{notary_server_db_host}}:{{notary_server_db_port}}/{{notary_server_db_name}}?sslmode={{notary_server_db_sslmode}} +{% endif %} \ No newline at end of file diff --git a/make/photon/prepare/templates/notary/signer-config.mysql.json.jinja b/make/photon/prepare/templates/notary/signer-config.mysql.json.jinja new file mode 100644 index 000000000000..37018d80ae48 --- /dev/null +++ b/make/photon/prepare/templates/notary/signer-config.mysql.json.jinja @@ -0,0 +1,15 @@ +{ + "server": { + "grpc_addr": ":7899", + "tls_cert_file": "./notary-signer.crt", + "tls_key_file": "./notary-signer.key" + }, + "logging": { + "level": "debug" + }, + "storage": { + "backend": "mysql", + "db_url": "{{notary_signer_db_username}}:{{notary_signer_db_password}}@tcp({{notary_signer_db_host}}:{{notary_signer_db_port}})/{{notary_signer_db_name}}?parseTime=True", + "default_alias": "defaultalias" + } +} \ No newline at end of file diff --git a/make/photon/prepare/templates/notary/signer_env.jinja b/make/photon/prepare/templates/notary/signer_env.jinja index 2482b5a0bac6..9928551221a6 100644 --- a/make/photon/prepare/templates/notary/signer_env.jinja +++ b/make/photon/prepare/templates/notary/signer_env.jinja @@ -1,3 +1,10 @@ NOTARY_SIGNER_DEFAULTALIAS={{alias}} +{% if ( notary_signer_db_type == "mysql" or notary_signer_db_type == "mariadb" ) %} +MIGRATIONS_PATH=migrations/signer/mysql +CONFIG_FILE=/etc/notary/signer-config.mysql.json +DB_URL=mysql://{{notary_server_db_username}}:{{notary_server_db_password}}@tcp({{notary_server_db_host}}:{{notary_server_db_port}})/{{notary_server_db_name}} +{% else %} MIGRATIONS_PATH=migrations/signer/postgresql +CONFIG_FILE=/etc/notary/signer-config.postgres.json DB_URL=postgres://{{notary_signer_db_username}}:{{notary_signer_db_password}}@{{notary_signer_db_host}}:{{notary_signer_db_port}}/{{notary_signer_db_name}}?sslmode={{notary_signer_db_sslmode}} +{% endif %} \ No newline at end of file diff --git a/make/photon/prepare/utils/configs.py b/make/photon/prepare/utils/configs.py index f1c6b786f3d2..5ea5a64d688e 100644 --- a/make/photon/prepare/utils/configs.py +++ b/make/photon/prepare/utils/configs.py @@ -273,6 +273,7 @@ def parse_yaml_config(config_file_path, with_notary, with_trivy, with_chartmuseu if external_db_configs: config_dict['external_database'] = True # harbor db + config_dict['harbor_db_type'] = external_db_configs['harbor']['type'] config_dict['harbor_db_host'] = external_db_configs['harbor']['host'] config_dict['harbor_db_port'] = external_db_configs['harbor']['port'] config_dict['harbor_db_name'] = external_db_configs['harbor']['db_name'] @@ -284,6 +285,7 @@ def parse_yaml_config(config_file_path, with_notary, with_trivy, with_chartmuseu if with_notary: # notary signer + config_dict['notary_signer_db_type'] = external_db_configs['notary_signer']['type'] config_dict['notary_signer_db_host'] = external_db_configs['notary_signer']['host'] config_dict['notary_signer_db_port'] = external_db_configs['notary_signer']['port'] config_dict['notary_signer_db_name'] = external_db_configs['notary_signer']['db_name'] @@ -291,6 +293,7 @@ def parse_yaml_config(config_file_path, with_notary, with_trivy, with_chartmuseu config_dict['notary_signer_db_password'] = external_db_configs['notary_signer']['password'] config_dict['notary_signer_db_sslmode'] = external_db_configs['notary_signer']['ssl_mode'] # notary server + config_dict['notary_server_db_type'] = external_db_configs['notary_server']['type'] config_dict['notary_server_db_host'] = external_db_configs['notary_server']['host'] config_dict['notary_server_db_port'] = external_db_configs['notary_server']['port'] config_dict['notary_server_db_name'] = external_db_configs['notary_server']['db_name'] diff --git a/make/photon/prepare/utils/notary.py b/make/photon/prepare/utils/notary.py index 59f6ee9ee615..bb644cbb93d7 100644 --- a/make/photon/prepare/utils/notary.py +++ b/make/photon/prepare/utils/notary.py @@ -7,6 +7,8 @@ notary_template_dir = os.path.join(templates_dir, "notary") notary_signer_pg_template = os.path.join(notary_template_dir, "signer-config.postgres.json.jinja") notary_server_pg_template = os.path.join(notary_template_dir, "server-config.postgres.json.jinja") +notary_signer_mysql_template = os.path.join(notary_template_dir, "signer-config.mysql.json.jinja") +notary_server_mysql_template = os.path.join(notary_template_dir, "server-config.mysql.json.jinja") notary_server_nginx_config_template = os.path.join(templates_dir, "nginx", "notary.server.conf.jinja") notary_signer_env_template = os.path.join(notary_template_dir, "signer_env.jinja") notary_server_env_template = os.path.join(notary_template_dir, "server_env.jinja") @@ -14,6 +16,8 @@ notary_config_dir = os.path.join(config_dir, 'notary') notary_signer_pg_config = os.path.join(notary_config_dir, "signer-config.postgres.json") notary_server_pg_config = os.path.join(notary_config_dir, "server-config.postgres.json") +notary_signer_mysql_config = os.path.join(notary_config_dir, "signer-config.mysql.json") +notary_server_mysql_config = os.path.join(notary_config_dir, "server-config.mysql.json") notary_server_config_path = os.path.join(notary_config_dir, 'notary.server.conf') notary_signer_env_path = os.path.join(notary_config_dir, "signer_env") notary_server_env_path = os.path.join(notary_config_dir, "server_env") @@ -53,7 +57,7 @@ def prepare_env_notary(nginx_config_dir): signer_key_secret_path.exists() and signer_ca_cert_secret_path.exists() ): - # If the certs are exist in old localtion, move them to new location + # If the certs are exist in old location, move them to new location if old_signer_ca_cert_secret_path.exists() and old_signer_cert_secret_path.exists() and old_signer_key_secret_path.exists(): print("Copying certs for notary signer") shutil.copy2(old_signer_ca_cert_secret_path, signer_ca_cert_secret_path) @@ -121,6 +125,14 @@ def prepare_notary(config_dict, nginx_config_dir, ssl_cert_path, ssl_cert_key_pa token_endpoint=config_dict['public_url'], **config_dict) + render_jinja( + notary_server_mysql_template, + notary_server_mysql_config, + uid=DEFAULT_UID, + gid=DEFAULT_GID, + token_endpoint=config_dict['public_url'], + **config_dict) + render_jinja( notary_server_env_template, notary_server_env_path, @@ -142,3 +154,11 @@ def prepare_notary(config_dict, nginx_config_dir, ssl_cert_path, ssl_cert_key_pa gid=DEFAULT_GID, alias=default_alias, **config_dict) + + render_jinja( + notary_signer_mysql_template, + notary_signer_mysql_config, + uid=DEFAULT_UID, + gid=DEFAULT_GID, + alias=default_alias, + **config_dict) diff --git a/src/cmd/exporter/main.go b/src/cmd/exporter/main.go index 29f83fc2d760..28658187f2ed 100644 --- a/src/cmd/exporter/main.go +++ b/src/cmd/exporter/main.go @@ -20,19 +20,7 @@ func main() { viper.SetEnvPrefix("harbor") viper.AutomaticEnv() viper.SetEnvKeyReplacer(strings.NewReplacer(".", "_")) - dbCfg := &models.Database{ - Type: "postgresql", - PostGreSQL: &models.PostGreSQL{ - Host: viper.GetString("database.host"), - Port: viper.GetInt("database.port"), - Username: viper.GetString("database.username"), - Password: viper.GetString("database.password"), - Database: viper.GetString("database.dbname"), - SSLMode: viper.GetString("database.sslmode"), - MaxIdleConns: viper.GetInt("database.max_idle_conns"), - MaxOpenConns: viper.GetInt("database.max_open_conns"), - }, - } + dbCfg := Database() if err := dao.InitDatabase(dbCfg); err != nil { log.Fatalf("failed to initialize database: %v", err) } @@ -81,3 +69,40 @@ func main() { os.Exit(1) } } + +// Database returns database settings +func Database() *models.Database { + databaseType := viper.GetString("database.type") + + switch databaseType { + case "", "postgresql": + return &models.Database{ + Type: databaseType, + PostGreSQL: &models.PostGreSQL{ + Host: viper.GetString("database.host"), + Port: viper.GetInt("database.port"), + Username: viper.GetString("database.username"), + Password: viper.GetString("database.password"), + Database: viper.GetString("database.dbname"), + SSLMode: viper.GetString("database.sslmode"), + MaxIdleConns: viper.GetInt("database.max_idle_conns"), + MaxOpenConns: viper.GetInt("database.max_open_conns"), + }, + } + case "mariadb", "mysql": + return &models.Database{ + Type: databaseType, + MySQL: &models.MySQL{ + Host: viper.GetString("database.host"), + Port: viper.GetInt("database.port"), + Username: viper.GetString("database.username"), + Password: viper.GetString("database.password"), + Database: viper.GetString("database.dbname"), + MaxIdleConns: viper.GetInt("database.max_idle_conns"), + MaxOpenConns: viper.GetInt("database.max_open_conns"), + }, + } + } + + return nil +} diff --git a/src/cmd/migrate-patch/main.go b/src/cmd/migrate-patch/main.go index 61a41eadf28b..b4b4fd1b4e59 100644 --- a/src/cmd/migrate-patch/main.go +++ b/src/cmd/migrate-patch/main.go @@ -7,49 +7,64 @@ import ( "strings" "time" + _ "github.com/go-sql-driver/mysql" // registry mysql driver _ "github.com/jackc/pgx/v4/stdlib" // registry pgx driver ) var dbURL string -const pgSQLAlterStmt string = `ALTER TABLE schema_migrations ADD COLUMN "dirty" boolean NOT NULL DEFAULT false` -const pgSQLCheckColStmt string = `SELECT T1.C1, T2.C2 FROM +const sqlAlterStmt string = `ALTER TABLE schema_migrations ADD COLUMN "dirty" boolean NOT NULL DEFAULT false` +const sqlCheckColStmt string = `SELECT T1.C1, T2.C2 FROM (SELECT COUNT(*) AS C1 FROM information_schema.tables WHERE table_name='schema_migrations') T1, (SELECT COUNT(*) AS C2 FROM information_schema.columns WHERE table_name='schema_migrations' and column_name='dirty') T2` -const pgSQLDelRows string = `DELETE FROM schema_migrations t WHERE t.version < ( SELECT MAX(version) FROM schema_migrations )` +const sqlDelRows string = `DELETE FROM schema_migrations t WHERE t.version < ( SELECT MAX(version) FROM schema_migrations )` func init() { - urlUsage := `The URL to the target database (driver://url). Currently it only supports postgres` + urlUsage := `The URL to the target database (driver://url). Currently it only supports postgres/mariadb/mysql` flag.StringVar(&dbURL, "database", "", urlUsage) } func main() { flag.Parse() log.Printf("Updating database.") - if !strings.HasPrefix(dbURL, "postgres://") { + var ( + db *sql.DB + err error + ) + + switch { + case strings.HasPrefix(dbURL, "postgres://"): + log.Printf("DB type is postgres.") + db, err = sql.Open("pgx", dbURL) + case strings.HasPrefix(dbURL, "mysql://"): + log.Printf("DB type is mysql.") + dbURL = strings.TrimLeft(dbURL, "mysql://") + db, err = sql.Open("mysql", dbURL) + default: log.Fatalf("Invalid URL: '%s'\n", dbURL) } - db, err := sql.Open("pgx", dbURL) if err != nil { log.Fatalf("Failed to connect to Database, error: %v\n", err) } + defer db.Close() + c := make(chan struct{}, 1) go func() { err := db.Ping() for ; err != nil; err = db.Ping() { - log.Println("Failed to Ping DB, sleep for 1 second.") + log.Printf("Failed to Ping DB:%s, sleep for 1 second.", err) time.Sleep(1 * time.Second) } c <- struct{}{} }() + select { case <-c: case <-time.After(30 * time.Second): log.Fatal("Failed to connect DB after 30 seconds, time out. \n") - } - row := db.QueryRow(pgSQLCheckColStmt) + row := db.QueryRow(sqlCheckColStmt) var tblCount, colCount int if err := row.Scan(&tblCount, &colCount); err != nil { log.Fatalf("Failed to check schema_migrations table, error: %v \n", err) @@ -62,10 +77,10 @@ func main() { log.Println("schema_migrations table does not require update, skip.") return } - if _, err := db.Exec(pgSQLDelRows); err != nil { + if _, err := db.Exec(sqlDelRows); err != nil { log.Fatalf("Failed to clean up table, error: %v", err) } - if _, err := db.Exec(pgSQLAlterStmt); err != nil { + if _, err := db.Exec(sqlAlterStmt); err != nil { log.Fatalf("Failed to update database, error: %v \n", err) } log.Println("Done updating database.") diff --git a/src/common/const.go b/src/common/const.go index eae703992df8..83944d51d985 100755 --- a/src/common/const.go +++ b/src/common/const.go @@ -59,6 +59,13 @@ const ( PostGreSQLSSLMode = "postgresql_sslmode" PostGreSQLMaxIdleConns = "postgresql_max_idle_conns" PostGreSQLMaxOpenConns = "postgresql_max_open_conns" + MySQLHOST = "mysql_host" + MySQLPort = "mysql_port" + MySQLUsername = "mysql_username" + MySQLPassword = "mysql_password" + MySQLDatabase = "mysql_database" + MySQLMaxIdleConns = "mysql_max_idle_conns" + MySQLMaxOpenConns = "mysql_max_open_conns" SelfRegistration = "self_registration" CoreURL = "core_url" CoreLocalURL = "core_local_url" diff --git a/src/common/dao/base.go b/src/common/dao/base.go index bc5535741e63..f23f769ead71 100644 --- a/src/common/dao/base.go +++ b/src/common/dao/base.go @@ -17,14 +17,14 @@ package dao import ( "errors" "fmt" - proModels "github.com/goharbor/harbor/src/pkg/project/models" - userModels "github.com/goharbor/harbor/src/pkg/user/models" "strconv" "sync" "github.com/astaxie/beego/orm" "github.com/goharbor/harbor/src/common/models" "github.com/goharbor/harbor/src/lib/log" + proModels "github.com/goharbor/harbor/src/pkg/project/models" + userModels "github.com/goharbor/harbor/src/pkg/user/models" ) const ( @@ -86,6 +86,16 @@ func getDatabase(database *models.Database) (db Database, err error) { database.PostGreSQL.MaxIdleConns, database.PostGreSQL.MaxOpenConns, ) + case "mariadb", "mysql": + db = NewMySQL( + database.MySQL.Host, + strconv.Itoa(database.MySQL.Port), + database.MySQL.Username, + database.MySQL.Password, + database.MySQL.Database, + database.MySQL.MaxIdleConns, + database.MySQL.MaxOpenConns, + ) default: err = fmt.Errorf("invalid database: %s", database.Type) } @@ -121,6 +131,9 @@ func ClearTable(table string) error { if table == "project_metadata" { // make sure library is public sql = fmt.Sprintf("delete from %s where id > 1", table) } + if table == "blob" && o.Driver().Type() == orm.DRMySQL { + sql = fmt.Sprintf("delete from `%s` where 1=1", table) + } _, err := o.Raw(sql).Exec() return err } diff --git a/src/common/dao/dao_test.go b/src/common/dao/dao_test.go index 12bfb379711a..46c2fcfab6c2 100644 --- a/src/common/dao/dao_test.go +++ b/src/common/dao/dao_test.go @@ -97,13 +97,15 @@ const password string = "Abc12345" const projectName string = "test_project" func TestMain(m *testing.M) { - databases := []string{"postgresql"} + databases := []string{"postgresql", "mysql", "mariadb"} for _, database := range databases { log.Infof("run test cases for database: %s", database) result := 1 switch database { case "postgresql": PrepareTestForPostgresSQL() + case "mysql", "mariadb": + PrepareTestForMySQL() default: log.Fatalf("invalid database: %s", database) } diff --git a/src/common/dao/mysql.go b/src/common/dao/mysql.go index fda9ba28d3ce..6de643e1cd3b 100644 --- a/src/common/dao/mysql.go +++ b/src/common/dao/mysql.go @@ -16,29 +16,41 @@ package dao import ( "fmt" + "os" + "strconv" "time" "github.com/astaxie/beego/orm" _ "github.com/go-sql-driver/mysql" // register mysql driver + "github.com/goharbor/harbor/src/common/models" "github.com/goharbor/harbor/src/common/utils" + "github.com/goharbor/harbor/src/lib/log" + "github.com/golang-migrate/migrate/v4" + _ "github.com/golang-migrate/migrate/v4/database/mysql" // import mysql driver for migrator ) +const defaultMysqlMigrationPath = "migrations/mysql/" + type mysql struct { - host string - port string - usr string - pwd string - database string + host string + port string + usr string + pwd string + database string + maxIdleConns int + maxOpenConns int } // NewMySQL returns an instance of mysql -func NewMySQL(host, port, usr, pwd, database string) Database { +func NewMySQL(host, port, usr, pwd, database string, maxIdleConns int, maxOpenConns int) Database { return &mysql{ - host: host, - port: port, - usr: usr, - pwd: pwd, - database: database, + host: host, + port: port, + usr: usr, + pwd: pwd, + database: database, + maxIdleConns: maxIdleConns, + maxOpenConns: maxOpenConns, } } @@ -59,10 +71,11 @@ func (m *mysql) Register(alias ...string) error { } conn := fmt.Sprintf("%s:%s@tcp(%s:%s)/%s", m.usr, m.pwd, m.host, m.port, m.database) - if err := orm.RegisterDataBase(an, "mysql", conn); err != nil { + if err := orm.RegisterDataBase(an, "mysql", conn, m.maxIdleConns, m.maxOpenConns); err != nil { return err } db, _ := orm.GetDB(an) + db.SetMaxOpenConns(m.maxOpenConns) db.SetConnMaxLifetime(5 * time.Minute) return nil } @@ -74,6 +87,34 @@ func (m *mysql) Name() string { // UpgradeSchema is not supported for MySQL, it assumes the schema is initialized and up to date in the DB instance. func (m *mysql) UpgradeSchema() error { + port, err := strconv.Atoi(m.port) + if err != nil { + return err + } + mg, err := NewMysqlMigrator(&models.MySQL{ + Host: m.host, + Port: int(port), + Username: m.usr, + Password: m.pwd, + Database: m.database, + }) + if err != nil { + return err + } + defer func() { + srcErr, dbErr := mg.Close() + if srcErr != nil || dbErr != nil { + log.Warningf("Failed to close migrator, source error: %v, db error: %v", srcErr, dbErr) + } + }() + log.Infof("Upgrading schema for mysql ...") + err = mg.Up() + if err == migrate.ErrNoChange { + log.Infof("No change in schema, skip.") + } else if err != nil { // migrate.ErrLockTimeout will be thrown when another process is doing migration and timeout. + log.Errorf("Failed to upgrade schema, error: %q", err) + return err + } return nil } @@ -82,3 +123,21 @@ func (m *mysql) String() string { return fmt.Sprintf("type-%s host-%s port-%s user-%s database-%s", m.Name(), m.host, m.port, m.usr, m.database) } + +// NewMysqlMigrator creates a migrator base on the information +func NewMysqlMigrator(database *models.MySQL) (*migrate.Migrate, error) { + dbURL := fmt.Sprintf("mysql://%s:%s@tcp(%s:%d)/%s", database.Username, + database.Password, database.Host, database.Port, database.Database) + // For UT + path := os.Getenv("MYSQL_MIGRATION_SCRIPTS_PATH") + if len(path) == 0 { + path = defaultMysqlMigrationPath + } + srcURL := fmt.Sprintf("file://%s", path) + m, err := migrate.New(srcURL, dbURL) + if err != nil { + return nil, err + } + m.Log = newMigrateLogger() + return m, nil +} diff --git a/src/common/dao/testutils.go b/src/common/dao/testutils.go index 63935c9382d0..a9f28ab4faa0 100644 --- a/src/common/dao/testutils.go +++ b/src/common/dao/testutils.go @@ -27,6 +27,44 @@ var defaultRegistered = false // PrepareTestForMySQL is for test only. func PrepareTestForMySQL() { + dbHost := os.Getenv("MYSQL_HOST") + if len(dbHost) == 0 { + log.Fatalf("environment variable MYSQL_HOST is not set") + } + dbUser := os.Getenv("MYSQL_USERNAME") + if len(dbUser) == 0 { + log.Fatalf("environment variable MYSQL_USERNAME is not set") + } + dbPortStr := os.Getenv("MYSQL_PORT") + if len(dbPortStr) == 0 { + log.Fatalf("environment variable MYSQL_PORT is not set") + } + dbPort, err := strconv.Atoi(dbPortStr) + if err != nil { + log.Fatalf("invalid POSTGRESQL_PORT: %v", err) + } + + dbPassword := os.Getenv("MYSQL_PASSWORD") + dbDatabase := os.Getenv("MYSQL_DATABASE") + if len(dbDatabase) == 0 { + log.Fatalf("environment variable POSTGRESQL_DATABASE is not set") + } + + database := &models.Database{ + Type: "mysql", + MySQL: &models.MySQL{ + Host: dbHost, + Port: dbPort, + Username: dbUser, + Password: dbPassword, + Database: dbDatabase, + MaxIdleConns: 50, + MaxOpenConns: 100, + }, + } + + log.Infof("MYSQL_HOST: %s, MYSQL_USERNAME: %s, MYSQL_PORT: %d, MYSQL_PASSWORD: %s\n", dbHost, dbUser, dbPort, dbPassword) + initDatabaseForTest(database) } // PrepareTestForSQLite is for test only. diff --git a/src/common/models/database.go b/src/common/models/database.go index 3fb031240919..c68e515543ac 100644 --- a/src/common/models/database.go +++ b/src/common/models/database.go @@ -18,15 +18,18 @@ package models type Database struct { Type string `json:"type"` PostGreSQL *PostGreSQL `json:"postgresql,omitempty"` + MySQL *MySQL `json:"mysql,omitempty"` } // MySQL ... type MySQL struct { - Host string `json:"host"` - Port int `json:"port"` - Username string `json:"username"` - Password string `json:"password,omitempty"` - Database string `json:"database"` + Host string `json:"host"` + Port int `json:"port"` + Username string `json:"username"` + Password string `json:"password,omitempty"` + Database string `json:"database"` + MaxIdleConns int `json:"max_idle_conns"` + MaxOpenConns int `json:"max_open_conns"` } // SQLite ... diff --git a/src/lib/config/metadata/metadatalist.go b/src/lib/config/metadata/metadatalist.go index f17aeeeec49c..9e80e85c726c 100644 --- a/src/lib/config/metadata/metadatalist.go +++ b/src/lib/config/metadata/metadatalist.go @@ -114,6 +114,14 @@ var ( {Name: common.PostGreSQLMaxIdleConns, Scope: SystemScope, Group: DatabaseGroup, EnvKey: "POSTGRESQL_MAX_IDLE_CONNS", DefaultValue: "2", ItemType: &IntType{}, Editable: false}, {Name: common.PostGreSQLMaxOpenConns, Scope: SystemScope, Group: DatabaseGroup, EnvKey: "POSTGRESQL_MAX_OPEN_CONNS", DefaultValue: "0", ItemType: &IntType{}, Editable: false}, + {Name: common.MySQLDatabase, Scope: SystemScope, Group: DatabaseGroup, EnvKey: "MYSQL_DATABASE", DefaultValue: "registry", ItemType: &StringType{}, Editable: false}, + {Name: common.MySQLHOST, Scope: SystemScope, Group: DatabaseGroup, EnvKey: "MYSQL_HOST", DefaultValue: "mysql", ItemType: &StringType{}, Editable: false}, + {Name: common.MySQLPassword, Scope: SystemScope, Group: DatabaseGroup, EnvKey: "MYSQL_PASSWORD", DefaultValue: "root123", ItemType: &PasswordType{}, Editable: false}, + {Name: common.MySQLPort, Scope: SystemScope, Group: DatabaseGroup, EnvKey: "MYSQL_PORT", DefaultValue: "3306", ItemType: &PortType{}, Editable: false}, + {Name: common.MySQLUsername, Scope: SystemScope, Group: DatabaseGroup, EnvKey: "MYSQL_USERNAME", DefaultValue: "root", ItemType: &StringType{}, Editable: false}, + {Name: common.MySQLMaxIdleConns, Scope: SystemScope, Group: DatabaseGroup, EnvKey: "MYSQL_MAX_IDLE_CONNS", DefaultValue: "2", ItemType: &IntType{}, Editable: false}, + {Name: common.MySQLMaxOpenConns, Scope: SystemScope, Group: DatabaseGroup, EnvKey: "MYSQL_MAX_OPEN_CONNS", DefaultValue: "0", ItemType: &IntType{}, Editable: false}, + {Name: common.ProjectCreationRestriction, Scope: UserScope, Group: BasicGroup, EnvKey: "PROJECT_CREATION_RESTRICTION", DefaultValue: common.ProCrtRestrEveryone, ItemType: &ProjectCreationRestrictionType{}, Editable: false, Description: `Indicate who can create projects, it could be ''adminonly'' or ''everyone''.`}, {Name: common.ReadOnly, Scope: UserScope, Group: BasicGroup, EnvKey: "READ_ONLY", DefaultValue: "false", ItemType: &BoolType{}, Editable: false, Description: `The flag to indicate whether Harbor is in readonly mode.`}, diff --git a/src/lib/config/systemconfig.go b/src/lib/config/systemconfig.go index e7fe5838dc79..c0350b34e88c 100644 --- a/src/lib/config/systemconfig.go +++ b/src/lib/config/systemconfig.go @@ -31,14 +31,15 @@ package config import ( "context" "errors" + "os" + "strconv" + "strings" + "github.com/goharbor/harbor/src/common" "github.com/goharbor/harbor/src/common/models" "github.com/goharbor/harbor/src/common/secret" "github.com/goharbor/harbor/src/lib/encrypt" "github.com/goharbor/harbor/src/lib/log" - "os" - "strconv" - "strings" ) var ( @@ -248,17 +249,36 @@ func InitialAdminPassword() (string, error) { func Database() (*models.Database, error) { database := &models.Database{} database.Type = DefaultMgr().Get(backgroundCtx, common.DatabaseType).GetString() - postgresql := &models.PostGreSQL{ - Host: DefaultMgr().Get(backgroundCtx, common.PostGreSQLHOST).GetString(), - Port: DefaultMgr().Get(backgroundCtx, common.PostGreSQLPort).GetInt(), - Username: DefaultMgr().Get(backgroundCtx, common.PostGreSQLUsername).GetString(), - Password: DefaultMgr().Get(backgroundCtx, common.PostGreSQLPassword).GetPassword(), - Database: DefaultMgr().Get(backgroundCtx, common.PostGreSQLDatabase).GetString(), - SSLMode: DefaultMgr().Get(backgroundCtx, common.PostGreSQLSSLMode).GetString(), - MaxIdleConns: DefaultMgr().Get(backgroundCtx, common.PostGreSQLMaxIdleConns).GetInt(), - MaxOpenConns: DefaultMgr().Get(backgroundCtx, common.PostGreSQLMaxOpenConns).GetInt(), + + switch database.Type { + case "", "postgresql": + postgresql := &models.PostGreSQL{ + Host: DefaultMgr().Get(backgroundCtx, common.PostGreSQLHOST).GetString(), + Port: DefaultMgr().Get(backgroundCtx, common.PostGreSQLPort).GetInt(), + Username: DefaultMgr().Get(backgroundCtx, common.PostGreSQLUsername).GetString(), + Password: DefaultMgr().Get(backgroundCtx, common.PostGreSQLPassword).GetPassword(), + Database: DefaultMgr().Get(backgroundCtx, common.PostGreSQLDatabase).GetString(), + SSLMode: DefaultMgr().Get(backgroundCtx, common.PostGreSQLSSLMode).GetString(), + MaxIdleConns: DefaultMgr().Get(backgroundCtx, common.PostGreSQLMaxIdleConns).GetInt(), + MaxOpenConns: DefaultMgr().Get(backgroundCtx, common.PostGreSQLMaxOpenConns).GetInt(), + } + database.PostGreSQL = postgresql + case "mariadb", "mysql": + mysql := &models.MySQL{ + Host: DefaultMgr().Get(backgroundCtx, common.MySQLHOST).GetString(), + Port: DefaultMgr().Get(backgroundCtx, common.MySQLPort).GetInt(), + Username: DefaultMgr().Get(backgroundCtx, common.MySQLUsername).GetString(), + Password: DefaultMgr().Get(backgroundCtx, common.MySQLPassword).GetString(), + Database: DefaultMgr().Get(backgroundCtx, common.MySQLDatabase).GetString(), + MaxIdleConns: DefaultMgr().Get(backgroundCtx, common.MySQLMaxIdleConns).GetInt(), + MaxOpenConns: DefaultMgr().Get(backgroundCtx, common.MySQLMaxOpenConns).GetInt(), + } + database.MySQL = mysql } - database.PostGreSQL = postgresql return database, nil } + +func DatabaseType() string { + return os.Getenv("DATABASE_TYPE") +} diff --git a/src/migration/migration.go b/src/migration/migration.go index 230c1c08f1c3..281cb3356409 100644 --- a/src/migration/migration.go +++ b/src/migration/migration.go @@ -34,8 +34,17 @@ const ( // MigrateDB upgrades DB schema and do necessary transformation of the data in DB func MigrateDB(database *models.Database) error { + var migrator *migrate.Migrate + var err error + // check the database schema version - migrator, err := dao.NewMigrator(database.PostGreSQL) + switch database.Type { + case "", "postgresql": + migrator, err = dao.NewMigrator(database.PostGreSQL) + case "mariadb", "mysql": + migrator, err = dao.NewMysqlMigrator(database.MySQL) + } + if err != nil { return err } diff --git a/src/pkg/blob/dao/dao_test.go b/src/pkg/blob/dao/dao_test.go index a69ed2ff111d..1e5be1442766 100644 --- a/src/pkg/blob/dao/dao_test.go +++ b/src/pkg/blob/dao/dao_test.go @@ -31,7 +31,7 @@ type DaoTestSuite struct { func (suite *DaoTestSuite) SetupSuite() { suite.Suite.SetupSuite() - suite.Suite.ClearTables = []string{"blob", "artifact_blob", "project_blob"} + suite.Suite.ClearTables = []string{`blob`, "artifact_blob", "project_blob"} suite.dao = New() } diff --git a/src/pkg/blob/dao/mysql_dao.go b/src/pkg/blob/dao/mysql_dao.go new file mode 100644 index 000000000000..fa705cb61a2b --- /dev/null +++ b/src/pkg/blob/dao/mysql_dao.go @@ -0,0 +1,171 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dao + +import ( + "context" + "fmt" + "time" + + "github.com/docker/distribution/manifest/schema2" + + "github.com/goharbor/harbor/src/lib/log" + "github.com/goharbor/harbor/src/lib/orm" + "github.com/goharbor/harbor/src/pkg/blob/models" +) + +// NewMysqlDao returns an instance of the default DAO +func NewMysqlDao() DAO { + return &mysqlDao{} +} + +type mysqlDao struct { + *dao +} + +func (d *mysqlDao) UpdateBlobStatus(ctx context.Context, blob *models.Blob) (int64, error) { + o, err := orm.FromContext(ctx) + if err != nil { + return -1, err + } + + var sql string + if blob.Status == models.StatusNone { + sql = "UPDATE `blob` SET version = version + 1, update_time = ?, status = ? where id = ? AND version >= ? AND status IN (%s)" + } else { + sql = "UPDATE `blob` SET version = version + 1, update_time = ?, status = ? where id = ? AND version = ? AND status IN (%s)" + } + + var newVersion int64 + params := []interface{}{time.Now(), blob.Status, blob.ID, blob.Version} + stats := models.StatusMap[blob.Status] + for _, stat := range stats { + params = append(params, stat) + } + + if res, err := o.Raw(fmt.Sprintf(sql, orm.ParamPlaceholderForIn(len(models.StatusMap[blob.Status]))), params...).Exec(); err != nil { + return -1, err + } else if row, err := res.RowsAffected(); err == nil && row == 0 { + log.Warningf("no blob is updated according to query condition, id: %d, status_in, %v", blob.ID, models.StatusMap[blob.Status]) + return 0, nil + } + + selectVersionSQL := "SELECT version FROM `blob` WHERE id = ?" + if err := o.Raw(selectVersionSQL, blob.ID).QueryRow(&newVersion); err != nil { + return 0, nil + } + + blob.Version = newVersion + return 1, nil +} + +func (d *mysqlDao) SumBlobsSizeByProject(ctx context.Context, projectID int64, excludeForeignLayer bool) (int64, error) { + o, err := orm.FromContext(ctx) + if err != nil { + return 0, err + } + + params := []interface{}{projectID} + sql := "SELECT SUM(size) FROM `blob` JOIN project_blob ON `blob`.id = project_blob.blob_id AND project_id = ?" + if excludeForeignLayer { + foreignLayerTypes := []interface{}{ + schema2.MediaTypeForeignLayer, + } + + sql = fmt.Sprintf("%s AND content_type NOT IN (%s)", sql, orm.ParamPlaceholderForIn(len(foreignLayerTypes))) + params = append(params, foreignLayerTypes...) + } + + var totalSize int64 + if err := o.Raw(sql, params...).QueryRow(&totalSize); err != nil { + return 0, err + } + + return totalSize, nil +} + +// SumBlobsSize returns sum size of all blobs skip foreign blobs when `excludeForeignLayer` is true +func (d *mysqlDao) SumBlobsSize(ctx context.Context, excludeForeignLayer bool) (int64, error) { + o, err := orm.FromContext(ctx) + if err != nil { + return 0, err + } + + params := []interface{}{} + + sql := "SELECT SUM(size) FROM `blob`" + if excludeForeignLayer { + foreignLayerTypes := []interface{}{ + schema2.MediaTypeForeignLayer, + } + sql = fmt.Sprintf("%s Where content_type NOT IN (%s)", sql, orm.ParamPlaceholderForIn(len(foreignLayerTypes))) + params = append(params, foreignLayerTypes...) + } + + var totalSize int64 + if err := o.Raw(sql, params...).QueryRow(&totalSize); err != nil { + return 0, err + } + + return totalSize, nil +} + +func (d *mysqlDao) ExistProjectBlob(ctx context.Context, projectID int64, blobDigest string) (bool, error) { + o, err := orm.FromContext(ctx) + if err != nil { + return false, err + } + + sql := "SELECT COUNT(*) FROM project_blob JOIN `blob` ON project_blob.blob_id = blob.id AND project_id = ? AND digest = ?" + + var count int64 + if err := o.Raw(sql, projectID, blobDigest).QueryRow(&count); err != nil { + return false, err + } + + return count > 0, nil +} + +func (d *mysqlDao) GetBlobsNotRefedByProjectBlob(ctx context.Context, timeWindowHours int64) ([]*models.Blob, error) { + var noneRefed []*models.Blob + ormer, err := orm.FromContext(ctx) + if err != nil { + return noneRefed, err + } + + sql := fmt.Sprintf("SELECT b.id, b.digest, b.content_type, b.status, b.version, b.size FROM `blob` AS b LEFT JOIN project_blob pb ON b.id = pb.blob_id WHERE pb.id IS NULL AND b.update_time <= date_sub(now(), interval %d hour);", timeWindowHours) + _, err = ormer.Raw(sql).QueryRows(&noneRefed) + if err != nil { + return noneRefed, err + } + + return noneRefed, nil +} + +func (d *mysqlDao) GetBlobsByArtDigest(ctx context.Context, digest string) ([]*models.Blob, error) { + var blobs []*models.Blob + ormer, err := orm.FromContext(ctx) + if err != nil { + return blobs, err + } + + sql := "SELECT b.id, b.digest, b.content_type, b.status, b.version, b.size FROM artifact_blob AS ab LEFT JOIN `blob` b ON ab.digest_blob = b.digest WHERE ab.digest_af = ?" + _, err = ormer.Raw(sql, digest).QueryRows(&blobs) + if err != nil { + return blobs, err + } + + return blobs, nil +} diff --git a/src/pkg/blob/dao/mysql_dao_test.go b/src/pkg/blob/dao/mysql_dao_test.go new file mode 100644 index 000000000000..22597b594355 --- /dev/null +++ b/src/pkg/blob/dao/mysql_dao_test.go @@ -0,0 +1,489 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dao + +import ( + "testing" + "time" + + "github.com/stretchr/testify/suite" + + "github.com/goharbor/harbor/src/lib/errors" + "github.com/goharbor/harbor/src/lib/q" + "github.com/goharbor/harbor/src/pkg/blob/models" + htesting "github.com/goharbor/harbor/src/testing" +) + +type MysqlDaoTestSuite struct { + htesting.Suite + dao DAO +} + +func (suite *MysqlDaoTestSuite) SetupSuite() { + suite.Suite.SetupSuite() + suite.Suite.ClearTables = []string{"blob", "artifact_blob", "project_blob"} + suite.dao = NewMysqlDao() +} + +func (suite *MysqlDaoTestSuite) TestCreateArtifactAndBlob() { + ctx := suite.Context() + + artifactDigest := suite.DigestString() + blobDigest := suite.DigestString() + + _, err := suite.dao.CreateArtifactAndBlob(ctx, artifactDigest, blobDigest) + suite.Nil(err) + + _, err = suite.dao.CreateArtifactAndBlob(ctx, artifactDigest, blobDigest) + suite.Nil(err) +} + +func (suite *MysqlDaoTestSuite) TestGetArtifactAndBlob() { + ctx := suite.Context() + + artifactDigest := suite.DigestString() + blobDigest := suite.DigestString() + + md, err := suite.dao.GetArtifactAndBlob(ctx, artifactDigest, blobDigest) + suite.IsNotFoundErr(err) + suite.Nil(md) + + _, err = suite.dao.CreateArtifactAndBlob(ctx, artifactDigest, blobDigest) + suite.Nil(err) + + md, err = suite.dao.GetArtifactAndBlob(ctx, artifactDigest, blobDigest) + if suite.Nil(err) { + suite.Equal(artifactDigest, md.DigestAF) + suite.Equal(blobDigest, md.DigestBlob) + } +} + +func (suite *MysqlDaoTestSuite) TestDeleteArtifactAndBlobByArtifact() { + ctx := suite.Context() + + artifactDigest := suite.DigestString() + blobDigest1 := suite.DigestString() + blobDigest2 := suite.DigestString() + + _, err := suite.dao.CreateArtifactAndBlob(ctx, artifactDigest, blobDigest1) + suite.Nil(err) + + _, err = suite.dao.CreateArtifactAndBlob(ctx, artifactDigest, blobDigest2) + suite.Nil(err) + + digests, err := suite.dao.GetAssociatedBlobDigestsForArtifact(ctx, artifactDigest) + suite.Nil(err) + suite.Len(digests, 2) + + suite.Nil(suite.dao.DeleteArtifactAndBlobByArtifact(ctx, artifactDigest)) + + digests, err = suite.dao.GetAssociatedBlobDigestsForArtifact(ctx, artifactDigest) + suite.Nil(err) + suite.Len(digests, 0) +} + +func (suite *MysqlDaoTestSuite) TestGetAssociatedBlobDigestsForArtifact() { + +} + +func (suite *MysqlDaoTestSuite) TestCreateBlob() { + ctx := suite.Context() + + digest := suite.DigestString() + + _, err := suite.dao.CreateBlob(ctx, &models.Blob{Digest: digest}) + suite.Nil(err) + + _, err = suite.dao.CreateBlob(ctx, &models.Blob{Digest: digest}) + suite.Nil(err) +} + +func (suite *MysqlDaoTestSuite) TestGetBlobByDigest() { + ctx := suite.Context() + + digest := suite.DigestString() + + blob, err := suite.dao.GetBlobByDigest(ctx, digest) + suite.IsNotFoundErr(err) + suite.Nil(blob) + + suite.dao.CreateBlob(ctx, &models.Blob{Digest: digest}) + + blob, err = suite.dao.GetBlobByDigest(ctx, digest) + if suite.Nil(err) { + suite.Equal(digest, blob.Digest) + suite.Equal(models.StatusNone, blob.Status) + } +} + +func (suite *MysqlDaoTestSuite) TestUpdateBlob() { + ctx := suite.Context() + + digest := suite.DigestString() + + suite.dao.CreateBlob(ctx, &models.Blob{Digest: digest}) + blob, err := suite.dao.GetBlobByDigest(ctx, digest) + if suite.Nil(err) { + suite.Equal(int64(0), blob.Size) + } + + blob.Size = 100 + if suite.Nil(suite.dao.UpdateBlob(ctx, blob)) { + blob, err := suite.dao.GetBlobByDigest(ctx, digest) + if suite.Nil(err) { + suite.Equal(int64(100), blob.Size) + suite.Equal(int64(0), blob.Version) + } + } + + blob.Status = "deleting" + suite.Nil(suite.dao.UpdateBlob(ctx, blob), "cannot be updated.") + blob, err = suite.dao.GetBlobByDigest(ctx, digest) + if suite.Nil(err) { + suite.Equal(int64(0), blob.Version) + suite.Equal(models.StatusNone, blob.Status) + } +} + +func (suite *MysqlDaoTestSuite) TestUpdateBlobStatus() { + ctx := suite.Context() + + digest := suite.DigestString() + + suite.dao.CreateBlob(ctx, &models.Blob{Digest: digest}) + blob, err := suite.dao.GetBlobByDigest(ctx, digest) + if suite.Nil(err) { + suite.Equal(int64(0), blob.Size) + } + + // StatusNone cannot be updated to StatusDeleting directly + blob.Status = models.StatusDeleting + count, err := suite.dao.UpdateBlobStatus(ctx, blob) + suite.Nil(err) + suite.Equal(int64(0), count) + + blob.Status = models.StatusDelete + count, err = suite.dao.UpdateBlobStatus(ctx, blob) + suite.Nil(err) + suite.Equal(int64(1), count) + + blob.Status = models.StatusDeleting + count, err = suite.dao.UpdateBlobStatus(ctx, blob) + suite.Nil(err) + suite.Equal(int64(1), count) + + blob.Status = models.StatusDeleteFailed + count, err = suite.dao.UpdateBlobStatus(ctx, blob) + suite.Nil(err) + suite.Equal(int64(1), count) + + blob, err = suite.dao.GetBlobByDigest(ctx, digest) + if suite.Nil(err) { + suite.Equal(int64(3), blob.Version) + suite.Equal(models.StatusDeleteFailed, blob.Status) + } +} + +func (suite *MysqlDaoTestSuite) TestListBlobs() { + ctx := suite.Context() + + digest1 := suite.DigestString() + suite.dao.CreateBlob(ctx, &models.Blob{Digest: digest1}) + + digest2 := suite.DigestString() + suite.dao.CreateBlob(ctx, &models.Blob{Digest: digest2}) + + ol := q.OrList{ + Values: []interface{}{ + digest1, + }, + } + blobs, err := suite.dao.ListBlobs(ctx, q.New(q.KeyWords{"digest": &ol})) + if suite.Nil(err) { + suite.Len(blobs, 1) + } + + ol = q.OrList{ + Values: []interface{}{ + digest1, + digest2, + }, + } + blobs, err = suite.dao.ListBlobs(ctx, q.New(q.KeyWords{"digest": &ol})) + if suite.Nil(err) { + suite.Len(blobs, 2) + } + + rg := q.Range{ + Max: time.Now().Add(-time.Hour).Format(time.RFC3339), + } + blobs, err = suite.dao.ListBlobs(ctx, q.New(q.KeyWords{"update_time": &rg})) + if suite.Nil(err) { + suite.Len(blobs, 0) + } + + digest3 := suite.DigestString() + suite.dao.CreateBlob(ctx, &models.Blob{Digest: digest3, UpdateTime: time.Now().Add(-time.Hour * 2)}) + blobs, err = suite.dao.ListBlobs(ctx, q.New(q.KeyWords{"update_time": &rg})) + if suite.Nil(err) { + suite.Len(blobs, 1) + } + +} + +func (suite *MysqlDaoTestSuite) TestListBlobsAssociatedWithArtifact() { + +} + +func (suite *MysqlDaoTestSuite) TestSumBlobsSize() { + ctx := suite.Context() + + size1, err := suite.dao.SumBlobsSize(ctx, true) + suite.Nil(err) + + digest1 := suite.DigestString() + suite.dao.CreateBlob(ctx, &models.Blob{Digest: digest1, Size: 999}) + + size2, err := suite.dao.SumBlobsSize(ctx, true) + suite.Nil(err) + + suite.Equal(int64(999), size2-size1) +} + +func (suite *MysqlDaoTestSuite) TestFindBlobsShouldUnassociatedWithProject() { + ctx := suite.Context() + + suite.WithProject(func(projectID int64, projectName string) { + artifact1 := suite.DigestString() + artifact2 := suite.DigestString() + + sql := "INSERT INTO artifact (`type`, media_type, manifest_media_type, digest, project_id, repository_id, repository_name) VALUES ('image', 'media_type', 'manifest_media_type', ?, ?, ?, 'library/hello-world')" + suite.ExecSQL(sql, artifact1, projectID, 10) + suite.ExecSQL(sql, artifact2, projectID, 10) + + defer suite.ExecSQL(`DELETE FROM artifact WHERE project_id = ?`, projectID) + + digest1 := suite.DigestString() + digest2 := suite.DigestString() + digest3 := suite.DigestString() + digest4 := suite.DigestString() + digest5 := suite.DigestString() + + var ol q.OrList + blobDigests := []string{digest1, digest2, digest3, digest4, digest5} + for _, digest := range blobDigests { + blobID, err := suite.dao.CreateBlob(ctx, &models.Blob{Digest: digest}) + if suite.Nil(err) { + suite.dao.CreateProjectBlob(ctx, projectID, blobID) + } + ol.Values = append(ol.Values, digest) + } + + blobs, err := suite.dao.ListBlobs(ctx, q.New(q.KeyWords{"digest": &ol})) + suite.Nil(err) + suite.Len(blobs, 5) + + for _, digest := range []string{digest1, digest2, digest3} { + suite.dao.CreateArtifactAndBlob(ctx, artifact1, digest) + } + + for _, digest := range blobDigests { + suite.dao.CreateArtifactAndBlob(ctx, artifact2, digest) + } + + { + results, err := suite.dao.FindBlobsShouldUnassociatedWithProject(ctx, projectID, blobs) + suite.Nil(err) + suite.Len(results, 0) + } + + suite.ExecSQL(`DELETE FROM artifact WHERE digest = ?`, artifact2) + + { + results, err := suite.dao.FindBlobsShouldUnassociatedWithProject(ctx, projectID, blobs) + suite.Nil(err) + if suite.Len(results, 2) { + suite.Contains([]string{results[0].Digest, results[1].Digest}, digest4) + suite.Contains([]string{results[0].Digest, results[1].Digest}, digest5) + } + + } + }) + +} + +func (suite *MysqlDaoTestSuite) TestCreateProjectBlob() { + ctx := suite.Context() + + projectID := int64(1) + blobID := int64(1000) + + _, err := suite.dao.CreateProjectBlob(ctx, projectID, blobID) + suite.Nil(err) + + _, err = suite.dao.CreateProjectBlob(ctx, projectID, blobID) + suite.Nil(err) +} + +func (suite *MysqlDaoTestSuite) TestExistProjectBlob() { + ctx := suite.Context() + + digest := suite.DigestString() + + projectID := int64(1) + + exist, err := suite.dao.ExistProjectBlob(ctx, projectID, digest) + suite.Nil(err) + suite.False(exist) + + blobID, err := suite.dao.CreateBlob(ctx, &models.Blob{Digest: digest}) + suite.Nil(err) + + _, err = suite.dao.CreateProjectBlob(ctx, projectID, blobID) + suite.Nil(err) + + exist, err = suite.dao.ExistProjectBlob(ctx, projectID, digest) + suite.Nil(err) + suite.True(exist) +} + +func (suite *MysqlDaoTestSuite) TestDeleteProjectBlob() { + ctx := suite.Context() + + digest := suite.DigestString() + blobID, err := suite.dao.CreateBlob(ctx, &models.Blob{Digest: digest}) + suite.Nil(err) + + projectID1 := int64(1) + projectID2 := int64(2) + projectID3 := int64(3) + + _, err = suite.dao.CreateProjectBlob(ctx, projectID1, blobID) + suite.Nil(err) + + _, err = suite.dao.CreateProjectBlob(ctx, projectID2, blobID) + suite.Nil(err) + + { + exist, err := suite.dao.ExistProjectBlob(ctx, projectID1, digest) + suite.Nil(err) + suite.True(exist) + } + + { + exist, err := suite.dao.ExistProjectBlob(ctx, projectID2, digest) + suite.Nil(err) + suite.True(exist) + } + + suite.Nil(suite.dao.DeleteProjectBlob(ctx, projectID3, blobID)) + + { + exist, err := suite.dao.ExistProjectBlob(ctx, projectID1, digest) + suite.Nil(err) + suite.True(exist) + } + + { + exist, err := suite.dao.ExistProjectBlob(ctx, projectID2, digest) + suite.Nil(err) + suite.True(exist) + } + + suite.Nil(suite.dao.DeleteProjectBlob(ctx, projectID1, blobID)) + + { + exist, err := suite.dao.ExistProjectBlob(ctx, projectID1, digest) + suite.Nil(err) + suite.False(exist) + } + + { + exist, err := suite.dao.ExistProjectBlob(ctx, projectID2, digest) + suite.Nil(err) + suite.True(exist) + } +} + +func (suite *MysqlDaoTestSuite) TestDelete() { + ctx := suite.Context() + + err := suite.dao.DeleteBlob(ctx, 100021) + suite.Require().NotNil(err) + suite.True(errors.IsErr(err, errors.NotFoundCode)) + + digest := suite.DigestString() + id, err := suite.dao.CreateBlob(ctx, &models.Blob{Digest: digest}) + suite.Nil(err) + err = suite.dao.DeleteBlob(ctx, id) + suite.Require().Nil(err) +} + +func (suite *MysqlDaoTestSuite) TestGetBlobsNotRefedByProjectBlob() { + ctx := suite.Context() + + blobs, err := suite.dao.GetBlobsNotRefedByProjectBlob(ctx, 0) + suite.Require().Nil(err) + beforeAdd := len(blobs) + + suite.dao.CreateBlob(ctx, &models.Blob{Digest: suite.DigestString()}) + suite.dao.CreateBlob(ctx, &models.Blob{Digest: suite.DigestString()}) + digest := suite.DigestString() + suite.dao.CreateBlob(ctx, &models.Blob{Digest: digest}) + + blob, err := suite.dao.GetBlobByDigest(ctx, digest) + suite.Nil(err) + + projectID := int64(1) + _, err = suite.dao.CreateProjectBlob(ctx, projectID, blob.ID) + suite.Nil(err) + + blobs, err = suite.dao.GetBlobsNotRefedByProjectBlob(ctx, 0) + suite.Require().Nil(err) + suite.Require().Equal(2+beforeAdd, len(blobs)) + + blobs, err = suite.dao.GetBlobsNotRefedByProjectBlob(ctx, 2) + suite.Require().Nil(err) + suite.Require().Equal(0, len(blobs)) +} + +func (suite *MysqlDaoTestSuite) GetBlobsByArtDigest() { + ctx := suite.Context() + afDigest := suite.DigestString() + blobs, err := suite.dao.GetBlobsByArtDigest(ctx, afDigest) + suite.Nil(err) + suite.Require().Equal(0, len(blobs)) + + suite.dao.CreateBlob(ctx, &models.Blob{Digest: afDigest}) + blobDigest1 := suite.DigestString() + blobDigest2 := suite.DigestString() + suite.dao.CreateBlob(ctx, &models.Blob{Digest: blobDigest1}) + suite.dao.CreateBlob(ctx, &models.Blob{Digest: blobDigest2}) + + _, err = suite.dao.CreateArtifactAndBlob(ctx, afDigest, afDigest) + suite.Nil(err) + _, err = suite.dao.CreateArtifactAndBlob(ctx, afDigest, blobDigest1) + suite.Nil(err) + _, err = suite.dao.CreateArtifactAndBlob(ctx, afDigest, blobDigest2) + suite.Nil(err) + + blobs, err = suite.dao.GetBlobsByArtDigest(ctx, afDigest) + suite.Nil(err) + suite.Require().Equal(3, len(blobs)) +} + +func TestMysqlDaoTestSuite(t *testing.T) { + suite.Run(t, &MysqlDaoTestSuite{}) +} diff --git a/src/pkg/blob/manager.go b/src/pkg/blob/manager.go index 70ab9c22ddd5..52585f7f07a6 100644 --- a/src/pkg/blob/manager.go +++ b/src/pkg/blob/manager.go @@ -16,6 +16,8 @@ package blob import ( "context" + + "github.com/goharbor/harbor/src/lib/config" "github.com/goharbor/harbor/src/lib/errors" "github.com/goharbor/harbor/src/lib/q" "github.com/goharbor/harbor/src/pkg/blob/dao" @@ -162,5 +164,15 @@ func (m *manager) CalculateTotalSize(ctx context.Context, excludeForeignLayer bo // NewManager returns blob manager func NewManager() Manager { + + dbType := config.DatabaseType() + + switch dbType { + case "", "postgresql": + return &manager{dao: dao.New()} + case "mysql", "mariadb": + return &manager{dao: dao.NewMysqlDao()} + } + return &manager{dao: dao.New()} } diff --git a/src/pkg/config/db/manager_test.go b/src/pkg/config/db/manager_test.go index 00517b8ebd21..e8c856fd9333 100644 --- a/src/pkg/config/db/manager_test.go +++ b/src/pkg/config/db/manager_test.go @@ -115,6 +115,7 @@ func TestCfgManager_GetDatabaseCfg(t *testing.T) { "postgresql_sslmode": "disable", }) dbCfg := configManager.GetDatabaseCfg() + assert.Equal(t, "postgresql", dbCfg.Type) assert.Equal(t, "localhost", dbCfg.PostGreSQL.Host) assert.Equal(t, "registry", dbCfg.PostGreSQL.Database) assert.Equal(t, "root123", dbCfg.PostGreSQL.Password) @@ -122,6 +123,24 @@ func TestCfgManager_GetDatabaseCfg(t *testing.T) { assert.Equal(t, "disable", dbCfg.PostGreSQL.SSLMode) } +func TestCfgManager_GetDatabaseCfg_Mysql(t *testing.T) { + configManager.UpdateConfig(testCtx, map[string]interface{}{ + "database_type": "mysql", + "mysql_host": "localhost", + "mysql_database": "registry", + "mysql_password": "root123", + "mysql_username": "postgres", + "mysql_port": 3306, + }) + dbCfg := configManager.GetDatabaseCfg() + assert.Equal(t, "mysql", dbCfg.Type) + assert.Equal(t, "localhost", dbCfg.MySQL.Host) + assert.Equal(t, "registry", dbCfg.MySQL.Database) + assert.Equal(t, "root123", dbCfg.MySQL.Password) + assert.Equal(t, "postgres", dbCfg.MySQL.Username) + assert.Equal(t, 3306, dbCfg.MySQL.Port) +} + func TestConfigStore_Save(t *testing.T) { cfgStore := store.NewConfigStore(&Database{cfgDAO: dao.New()}) err := cfgStore.Save(testCtx) diff --git a/src/pkg/config/manager.go b/src/pkg/config/manager.go index 4adbc2911e69..ed199bf5a161 100644 --- a/src/pkg/config/manager.go +++ b/src/pkg/config/manager.go @@ -155,9 +155,12 @@ func (c *CfgManager) Set(ctx context.Context, key string, value interface{}) { // GetDatabaseCfg - Get database configurations func (c *CfgManager) GetDatabaseCfg() *models.Database { ctx := context.Background() - return &models.Database{ - Type: c.Get(ctx, common.DatabaseType).GetString(), - PostGreSQL: &models.PostGreSQL{ + database := &models.Database{} + database.Type = c.Get(ctx, common.DatabaseType).GetString() + + switch database.Type { + case "", "postgresql": + postgresql := &models.PostGreSQL{ Host: c.Get(ctx, common.PostGreSQLHOST).GetString(), Port: c.Get(ctx, common.PostGreSQLPort).GetInt(), Username: c.Get(ctx, common.PostGreSQLUsername).GetString(), @@ -166,8 +169,22 @@ func (c *CfgManager) GetDatabaseCfg() *models.Database { SSLMode: c.Get(ctx, common.PostGreSQLSSLMode).GetString(), MaxIdleConns: c.Get(ctx, common.PostGreSQLMaxIdleConns).GetInt(), MaxOpenConns: c.Get(ctx, common.PostGreSQLMaxOpenConns).GetInt(), - }, + } + database.PostGreSQL = postgresql + case "mariadb", "mysql": + mysql := &models.MySQL{ + Host: c.Get(ctx, common.MySQLHOST).GetString(), + Port: c.Get(ctx, common.MySQLPort).GetInt(), + Username: c.Get(ctx, common.MySQLUsername).GetString(), + Password: c.Get(ctx, common.MySQLPassword).GetString(), + Database: c.Get(ctx, common.MySQLDatabase).GetString(), + MaxIdleConns: c.Get(ctx, common.MySQLMaxIdleConns).GetInt(), + MaxOpenConns: c.Get(ctx, common.MySQLMaxOpenConns).GetInt(), + } + database.MySQL = mysql } + + return database } // UpdateConfig - Update config Store with a specified configuration and also save updated configure. diff --git a/src/pkg/member/dao/mysql_dao.go b/src/pkg/member/dao/mysql_dao.go new file mode 100644 index 000000000000..962c88f04c1a --- /dev/null +++ b/src/pkg/member/dao/mysql_dao.go @@ -0,0 +1,70 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dao + +import ( + "context" + "fmt" + + "github.com/goharbor/harbor/src/lib/log" + "github.com/goharbor/harbor/src/lib/orm" + "github.com/goharbor/harbor/src/pkg/member/models" +) + +type mysqlDao struct { + *dao +} + +// NewMysqlDao ... +func NewMysqlDao() DAO { + return &mysqlDao{} +} + +func (d *mysqlDao) AddProjectMember(ctx context.Context, member models.Member) (int, error) { + log.Debugf("Adding project member %+v", member) + o, err := orm.FromContext(ctx) + if err != nil { + return 0, err + } + + if member.EntityID <= 0 { + return 0, fmt.Errorf("invalid entity_id, member: %+v", member) + } + + if member.ProjectID <= 0 { + return 0, fmt.Errorf("invalid project_id, member: %+v", member) + } + + delSQL := "delete from project_member where project_id = ? and entity_id = ? and entity_type = ? " + _, err = o.Raw(delSQL, member.ProjectID, member.EntityID, member.EntityType).Exec() + if err != nil { + return 0, err + } + + var pmid int + + sql := "insert into project_member (project_id, entity_id , role, entity_type) values (?, ?, ?, ?)" + res, err := o.Raw(sql, member.ProjectID, member.EntityID, member.Role, member.EntityType).Exec() + if err != nil { + return 0, err + } + insertID, err := res.LastInsertId() + if err != nil { + return 0, err + } + pmid = int(insertID) + + return pmid, err +} diff --git a/src/pkg/member/dao/mysql_dao_test.go b/src/pkg/member/dao/mysql_dao_test.go new file mode 100644 index 000000000000..7c716a5dc4f4 --- /dev/null +++ b/src/pkg/member/dao/mysql_dao_test.go @@ -0,0 +1,298 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dao + +import ( + "database/sql" + "testing" + + "github.com/stretchr/testify/suite" + + "github.com/goharbor/harbor/src/common" + _ "github.com/goharbor/harbor/src/common/dao" + testDao "github.com/goharbor/harbor/src/common/dao" + "github.com/goharbor/harbor/src/lib/orm" + "github.com/goharbor/harbor/src/pkg/member/models" + "github.com/goharbor/harbor/src/pkg/project" + "github.com/goharbor/harbor/src/pkg/user" + userDao "github.com/goharbor/harbor/src/pkg/user/dao" + "github.com/goharbor/harbor/src/pkg/usergroup" + ugModel "github.com/goharbor/harbor/src/pkg/usergroup/model" + htesting "github.com/goharbor/harbor/src/testing" +) + +type MysqlDaoTestSuite struct { + htesting.Suite + dao DAO + projectMgr project.Manager + projectID int64 + userMgr user.Manager +} + +func (s *MysqlDaoTestSuite) SetupSuite() { + s.Suite.SetupSuite() + s.Suite.ClearTables = []string{"project_member"} + s.dao = NewMysqlDao() + // Extract to test utils + initSqls := []string{ + "insert into harbor_user (username, email, password, realname) values ('member_test_01', 'member_test_01@example.com', '123456', 'member_test_01')", + "insert into project (name, owner_id) values ('member_test_01', 1)", + "insert into user_group (group_name, group_type, ldap_group_dn) values ('test_group_01', 1, 'CN=harbor_users,OU=sample,OU=vmware,DC=harbor,DC=com')", + "update project set owner_id = (select user_id from harbor_user where username = 'member_test_01') where name = 'member_test_01'", + "insert into project_member (project_id, entity_id, entity_type, role) values ( (select project_id from project where name = 'member_test_01') , (select user_id from harbor_user where username = 'member_test_01'), 'u', 1)", + "insert into project_member (project_id, entity_id, entity_type, role) values ( (select project_id from project where name = 'member_test_01') , (select id from user_group where group_name = 'test_group_01'), 'g', 1)", + + "insert into harbor_user (username, email, password, realname) values ('member_test_02', 'member_test_02@example.com', '123456', 'member_test_02')", + "insert into project (name, owner_id) values ('member_test_02', 1)", + "insert into user_group (group_name, group_type, ldap_group_dn) values ('test_group_02', 1, 'CN=harbor_users,OU=sample,OU=vmware,DC=harbor,DC=com')", + "update project set owner_id = (select user_id from harbor_user where username = 'member_test_02') where name = 'member_test_02'", + "insert into project_member (project_id, entity_id, entity_type, role) values ( (select project_id from project where name = 'member_test_02') , (select user_id from harbor_user where username = 'member_test_02'), 'u', 1)", + "insert into project_member (project_id, entity_id, entity_type, role) values ( (select project_id from project where name = 'member_test_02') , (select id from user_group where group_name = 'test_group_02'), 'g', 1)", + } + + clearSqls := []string{ + "delete from project where name='member_test_01' or name='member_test_02'", + "delete from harbor_user where username='member_test_01' or username='member_test_02' or username='pm_sample'", + "delete from user_group", + "delete from project_member where id > 1", + } + testDao.PrepareTestData(clearSqls, initSqls) + s.projectMgr = project.Mgr + s.userMgr = user.Mgr + ctx := s.Context() + proj, err := s.projectMgr.Get(ctx, "member_test_01") + s.Nil(err) + s.NotNil(proj) + s.projectID = proj.ProjectID +} +func (s *MysqlDaoTestSuite) TearDownSuite() { +} + +func (s *MysqlDaoTestSuite) TestAddProjectMember() { + ctx := s.Context() + proj, err := s.projectMgr.Get(ctx, "member_test_01") + s.Nil(err) + s.NotNil(proj) + + member := models.Member{ + ProjectID: proj.ProjectID, + EntityID: 1, + EntityType: common.UserMember, + Role: common.RoleProjectAdmin, + } + pmid, err := s.dao.AddProjectMember(ctx, member) + s.Nil(err) + s.True(pmid > 0) + + queryMember := models.Member{ + ProjectID: proj.ProjectID, + ID: pmid, + } + memberList, err := s.dao.GetProjectMember(ctx, queryMember, nil) + s.Nil(err) + s.False(len(memberList) == 0) + + _, err = s.dao.AddProjectMember(ctx, models.Member{ + ProjectID: -1, + EntityID: 1, + EntityType: common.UserMember, + Role: common.RoleProjectAdmin, + }) + + s.NotNil(err) + + _, err = s.dao.AddProjectMember(ctx, models.Member{ + ProjectID: 1, + EntityID: -1, + EntityType: common.UserMember, + Role: common.RoleProjectAdmin, + }) + + s.NotNil(err) +} + +func (s *MysqlDaoTestSuite) TestUpdateProjectMemberRole() { + ctx := s.Context() + proj, err := s.projectMgr.Get(ctx, "member_test_01") + s.Nil(err) + s.NotNil(proj) + user := userDao.User{ + Username: "pm_sample", + Email: sql.NullString{String: "pm_sample@example.com", Valid: true}, + Realname: "pm_sample", + Password: "1234567d", + } + o, err := orm.FromContext(ctx) + s.Nil(err) + userID, err := o.Insert(&user) + s.Nil(err) + member := models.Member{ + ProjectID: proj.ProjectID, + EntityID: int(userID), + EntityType: common.UserMember, + Role: common.RoleProjectAdmin, + } + + pmid, err := s.dao.AddProjectMember(ctx, member) + s.Nil(err) + s.dao.UpdateProjectMemberRole(ctx, proj.ProjectID, pmid, common.RoleDeveloper) + + queryMember := models.Member{ + ProjectID: proj.ProjectID, + EntityID: int(userID), + EntityType: common.UserMember, + } + + memberList, err := s.dao.GetProjectMember(ctx, queryMember, nil) + s.Nil(err) + s.True(len(memberList) == 1, "project member should exist") + memberItem := memberList[0] + s.Equal(common.RoleDeveloper, memberItem.Role, "should be developer role") + s.Equal(user.Username, memberItem.Entityname) + + memberList2, err := s.dao.SearchMemberByName(ctx, proj.ProjectID, "pm_sample") + s.Nil(err) + s.True(len(memberList2) > 0) + + memberList3, err := s.dao.SearchMemberByName(ctx, proj.ProjectID, "") + s.Nil(err) + s.True(len(memberList3) > 0, "failed to search project member") +} + +func (s *MysqlDaoTestSuite) TestGetProjectMembers() { + ctx := s.Context() + + query1 := models.Member{ProjectID: s.projectID, Entityname: "member_test_01", EntityType: common.UserMember} + member1, err := s.dao.GetProjectMember(ctx, query1, nil) + s.Nil(err) + s.True(len(member1) > 0) + s.Equal(member1[0].Entityname, "member_test_01") + + query2 := models.Member{ProjectID: s.projectID, Entityname: "test_group_01", EntityType: common.GroupMember} + member2, err := s.dao.GetProjectMember(ctx, query2, nil) + s.Nil(err) + s.True(len(member2) > 0) + s.Equal(member2[0].Entityname, "test_group_01") +} + +func (s *MysqlDaoTestSuite) TestGetTotalOfProjectMembers() { + ctx := s.Context() + tot, err := s.dao.GetTotalOfProjectMembers(ctx, s.projectID, nil) + s.Nil(err) + s.Equal(2, int(tot)) +} + +func (s *MysqlDaoTestSuite) TestListRoles() { + ctx := s.Context() + + // nil user + roles, err := s.dao.ListRoles(ctx, nil, 1) + s.Nil(err) + s.Len(roles, 0) + + // user with empty groups + u, err := s.userMgr.GetByName(ctx, "member_test_01") + s.Nil(err) + s.NotNil(u) + user := &models.User{ + UserID: u.UserID, + Username: u.Username, + } + roles, err = s.dao.ListRoles(ctx, user, s.projectID) + s.Nil(err) + s.Len(roles, 1) + + // user with a group whose ID doesn't exist + user.GroupIDs = []int{9999} + roles, err = s.dao.ListRoles(ctx, user, s.projectID) + s.Nil(err) + s.Len(roles, 1) + s.Equal(common.RoleProjectAdmin, roles[0]) + + // user with a valid group + groupID, err := usergroup.Mgr.Create(ctx, ugModel.UserGroup{ + GroupName: "group_for_list_role", + GroupType: 1, + LdapGroupDN: "CN=list_role_users,OU=sample,OU=vmware,DC=harbor,DC=com", + }) + + s.Nil(err) + defer usergroup.Mgr.Delete(ctx, groupID) + + memberID, err := s.dao.AddProjectMember(ctx, models.Member{ + ProjectID: s.projectID, + Role: common.RoleDeveloper, + EntityID: groupID, + EntityType: "g", + }) + s.Nil(err) + defer s.dao.DeleteProjectMemberByID(ctx, s.projectID, memberID) + + user.GroupIDs = []int{groupID} + roles, err = s.dao.ListRoles(ctx, user, s.projectID) + s.Nil(err) + s.Len(roles, 2) + s.Equal(common.RoleProjectAdmin, roles[0]) + s.Equal(common.RoleDeveloper, roles[1]) +} + +func (s *MysqlDaoTestSuite) TestDeleteProjectMember() { + ctx := s.Context() + var addMember = models.Member{ + ProjectID: s.projectID, + EntityID: 1, + EntityType: common.UserMember, + Role: common.RoleDeveloper, + } + pmid, err := s.dao.AddProjectMember(ctx, addMember) + s.Nil(err) + s.True(pmid > 0) + + err = s.dao.DeleteProjectMemberByID(ctx, s.projectID, pmid) + s.Nil(err) + + // not exist + err = s.dao.DeleteProjectMemberByID(ctx, s.projectID, -1) + s.Nil(err) + +} + +func (s *MysqlDaoTestSuite) TestDeleteProjectMemberByUserId() { + ctx := s.Context() + userID := 22 + var addMember = models.Member{ + ProjectID: s.projectID, + EntityID: userID, + EntityType: common.UserMember, + Role: common.RoleDeveloper, + } + pmid, err := s.dao.AddProjectMember(ctx, addMember) + s.Nil(err) + s.True(pmid > 0) + + err = s.dao.DeleteProjectMemberByUserID(ctx, userID) + s.Nil(err) + + queryMember := models.Member{ProjectID: s.projectID, EntityID: userID, EntityType: common.UserMember} + + // not exist + members, err := s.dao.GetProjectMember(ctx, queryMember, nil) + s.True(len(members) == 0) + s.Nil(err) +} + +func TestMysqlDaoTestSuite(t *testing.T) { + suite.Run(t, &MysqlDaoTestSuite{}) +} diff --git a/src/pkg/member/manager.go b/src/pkg/member/manager.go index 60fccddfb7aa..0abdb78c1c7f 100644 --- a/src/pkg/member/manager.go +++ b/src/pkg/member/manager.go @@ -16,6 +16,8 @@ package member import ( "context" + + "github.com/goharbor/harbor/src/lib/config" "github.com/goharbor/harbor/src/lib/errors" "github.com/goharbor/harbor/src/lib/q" "github.com/goharbor/harbor/src/pkg/member/dao" @@ -103,5 +105,15 @@ func (m *manager) DeleteMemberByUserID(ctx context.Context, uid int) error { // NewManager ... func NewManager() Manager { + + dbType := config.DatabaseType() + + switch dbType { + case "", "postgresql": + return &manager{dao: dao.New()} + case "mysql", "mariadb": + return &manager{dao: dao.NewMysqlDao()} + } + return &manager{dao: dao.New()} } diff --git a/src/pkg/usergroup/dao/mysql_dao.go b/src/pkg/usergroup/dao/mysql_dao.go new file mode 100644 index 000000000000..278833e363d1 --- /dev/null +++ b/src/pkg/usergroup/dao/mysql_dao.go @@ -0,0 +1,67 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dao + +import ( + "context" + "time" + + "github.com/goharbor/harbor/src/common" + "github.com/goharbor/harbor/src/common/utils" + "github.com/goharbor/harbor/src/lib/orm" + "github.com/goharbor/harbor/src/lib/q" + "github.com/goharbor/harbor/src/pkg/usergroup/model" +) + +// NewMysqlDao ... +func NewMysqlDao() DAO { + return &mysqlDao{} +} + +type mysqlDao struct { + *dao +} + +// Add - Add User Group +func (d *mysqlDao) Add(ctx context.Context, userGroup model.UserGroup) (int, error) { + query := q.New(q.KeyWords{"GroupName": userGroup.GroupName, "GroupType": common.HTTPGroupType}) + userGroupList, err := d.Query(ctx, query) + if err != nil { + return 0, ErrGroupNameDup + } + if len(userGroupList) > 0 { + return 0, ErrGroupNameDup + } + o, err := orm.FromContext(ctx) + if err != nil { + return 0, err + } + + var id int + now := time.Now() + sql := "insert into user_group (group_name, group_type, ldap_group_dn, creation_time, update_time) values (?, ?, ?, ?, ?)" + + res, err := o.Raw(sql, userGroup.GroupName, userGroup.GroupType, utils.TrimLower(userGroup.LdapGroupDN), now, now).Exec() + if err != nil { + return 0, err + } + insertID, err := res.LastInsertId() + if err != nil { + return 0, err + } + id = int(insertID) + + return id, nil +} diff --git a/src/pkg/usergroup/dao/mysql_dao_test.go b/src/pkg/usergroup/dao/mysql_dao_test.go new file mode 100644 index 000000000000..470edbe8c53e --- /dev/null +++ b/src/pkg/usergroup/dao/mysql_dao_test.go @@ -0,0 +1,69 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dao + +import ( + "testing" + + "github.com/stretchr/testify/suite" + + "github.com/goharbor/harbor/src/pkg/usergroup/model" + htesting "github.com/goharbor/harbor/src/testing" +) + +type MysqlDaoTestSuite struct { + htesting.Suite + dao DAO +} + +func (s *MysqlDaoTestSuite) SetupSuite() { + s.Suite.SetupSuite() + s.Suite.ClearTables = []string{"user_group"} + s.dao = NewMysqlDao() +} + +func (s *MysqlDaoTestSuite) TestCRUDUsergroup() { + ctx := s.Context() + userGroup := model.UserGroup{ + GroupName: "harbor_dev", + GroupType: 1, + LdapGroupDN: "cn=harbor_dev,ou=groups,dc=example,dc=com", + } + id, err := s.dao.Add(ctx, userGroup) + s.Nil(err) + s.True(id > 0) + + ug, err2 := s.dao.Get(ctx, id) + s.Nil(err2) + s.Equal("harbor_dev", ug.GroupName) + s.Equal("cn=harbor_dev,ou=groups,dc=example,dc=com", ug.LdapGroupDN) + s.Equal(1, ug.GroupType) + + err3 := s.dao.UpdateName(ctx, id, "my_harbor_dev") + s.Nil(err3) + + ug2, err4 := s.dao.Get(ctx, id) + s.Nil(err4) + s.Equal("my_harbor_dev", ug2.GroupName) + s.Equal("cn=harbor_dev,ou=groups,dc=example,dc=com", ug2.LdapGroupDN) + s.Equal(1, ug2.GroupType) + + err5 := s.dao.Delete(ctx, id) + s.Nil(err5) +} + +func TestMysqlDaoTestSuite(t *testing.T) { + suite.Run(t, &MysqlDaoTestSuite{}) +} diff --git a/src/pkg/usergroup/manager.go b/src/pkg/usergroup/manager.go index 10f22aa0e69f..ca1b7a6053cf 100644 --- a/src/pkg/usergroup/manager.go +++ b/src/pkg/usergroup/manager.go @@ -17,8 +17,10 @@ package usergroup import ( "context" "errors" + "github.com/goharbor/harbor/src/common" "github.com/goharbor/harbor/src/common/utils" + "github.com/goharbor/harbor/src/lib/config" "github.com/goharbor/harbor/src/lib/log" "github.com/goharbor/harbor/src/lib/q" "github.com/goharbor/harbor/src/pkg/usergroup/dao" @@ -57,6 +59,16 @@ type manager struct { } func newManager() Manager { + + dbType := config.DatabaseType() + + switch dbType { + case "", "postgresql": + return &manager{dao: dao.New()} + case "mysql", "mariadb": + return &manager{dao: dao.NewMysqlDao()} + } + return &manager{dao: dao.New()} } diff --git a/src/testing/suite.go b/src/testing/suite.go index cf0b0440fbe6..dd1b5e09fbd5 100644 --- a/src/testing/suite.go +++ b/src/testing/suite.go @@ -24,6 +24,7 @@ import ( "math/rand" "net/http" "net/http/httptest" + "os" "sync" "time" @@ -54,7 +55,16 @@ type Suite struct { func (suite *Suite) SetupSuite() { once.Do(func() { config.Init() - dao.PrepareTestForPostgresSQL() + dbType := os.Getenv("DATABASE_TYPE") + + switch dbType { + case "", "postgresql": + dao.PrepareTestForPostgresSQL() + case "mysql", "mariadb": + dao.PrepareTestForMySQL() + default: + dao.PrepareTestForPostgresSQL() + } }) } diff --git a/src/vendor/github.com/golang-migrate/migrate/v4/database/mysql/README.md b/src/vendor/github.com/golang-migrate/migrate/v4/database/mysql/README.md new file mode 100644 index 000000000000..c65c1107df5d --- /dev/null +++ b/src/vendor/github.com/golang-migrate/migrate/v4/database/mysql/README.md @@ -0,0 +1,55 @@ +# MySQL + +`mysql://user:password@tcp(host:port)/dbname?query` + +| URL Query | WithInstance Config | Description | +|------------|---------------------|-------------| +| `x-migrations-table` | `MigrationsTable` | Name of the migrations table | +| `x-no-lock` | `NoLock` | Set to `true` to skip `GET_LOCK`/`RELEASE_LOCK` statements. Useful for [multi-master MySQL flavors](https://www.percona.com/doc/percona-xtradb-cluster/LATEST/features/pxc-strict-mode.html#explicit-table-locking). Only run migrations from one host when this is enabled. | +| `dbname` | `DatabaseName` | The name of the database to connect to | +| `user` | | The user to sign in as | +| `password` | | The user's password | +| `host` | | The host to connect to. | +| `port` | | The port to bind to. | +| `tls` | | TLS / SSL encrypted connection parameter; see [go-sql-driver](https://github.com/go-sql-driver/mysql#tls). Use any name (e.g. `migrate`) if you want to use a custom TLS config (`x-tls-` queries). | +| `x-tls-ca` | | The location of the CA (certificate authority) file. | +| `x-tls-cert` | | The location of the client certicicate file. Must be used with `x-tls-key`. | +| `x-tls-key` | | The location of the private key file. Must be used with `x-tls-cert`. | +| `x-tls-insecure-skip-verify` | | Whether or not to use SSL (true\|false) | + +## Use with existing client + +If you use the MySQL driver with existing database client, you must create the client with parameter `multiStatements=true`: + +```go +package main + +import ( + "database/sql" + + _ "github.com/go-sql-driver/mysql" + "github.com/golang-migrate/migrate" + "github.com/golang-migrate/migrate/database/mysql" + _ "github.com/golang-migrate/migrate/source/file" +) + +func main() { + db, _ := sql.Open("mysql", "user:password@tcp(host:port)/dbname?multiStatements=true") + driver, _ := mysql.WithInstance(db, &mysql.Config{}) + m, _ := migrate.NewWithDatabaseInstance( + "file:///migrations", + "mysql", + driver, + ) + + m.Steps(2) +} +``` + +## Upgrading from v1 + +1. Write down the current migration version from schema_migrations +1. `DROP TABLE schema_migrations` +2. Wrap your existing migrations in transactions ([BEGIN/COMMIT](https://dev.mysql.com/doc/refman/5.7/en/commit.html)) if you use multiple statements within one migration. +3. Download and install the latest migrate version. +4. Force the current migration version with `migrate force `. diff --git a/src/vendor/github.com/golang-migrate/migrate/v4/database/mysql/mysql.go b/src/vendor/github.com/golang-migrate/migrate/v4/database/mysql/mysql.go new file mode 100644 index 000000000000..14b15390e29f --- /dev/null +++ b/src/vendor/github.com/golang-migrate/migrate/v4/database/mysql/mysql.go @@ -0,0 +1,494 @@ +//go:build go1.9 +// +build go1.9 + +package mysql + +import ( + "context" + "crypto/tls" + "crypto/x509" + "database/sql" + "fmt" + "go.uber.org/atomic" + "io" + "io/ioutil" + nurl "net/url" + "strconv" + "strings" + + "github.com/go-sql-driver/mysql" + "github.com/golang-migrate/migrate/v4/database" + "github.com/hashicorp/go-multierror" +) + +var _ database.Driver = (*Mysql)(nil) // explicit compile time type check + +func init() { + database.Register("mysql", &Mysql{}) +} + +var DefaultMigrationsTable = "schema_migrations" + +var ( + ErrDatabaseDirty = fmt.Errorf("database is dirty") + ErrNilConfig = fmt.Errorf("no config") + ErrNoDatabaseName = fmt.Errorf("no database name") + ErrAppendPEM = fmt.Errorf("failed to append PEM") + ErrTLSCertKeyConfig = fmt.Errorf("To use TLS client authentication, both x-tls-cert and x-tls-key must not be empty") +) + +type Config struct { + MigrationsTable string + DatabaseName string + NoLock bool +} + +type Mysql struct { + // mysql RELEASE_LOCK must be called from the same conn, so + // just do everything over a single conn anyway. + conn *sql.Conn + db *sql.DB + isLocked atomic.Bool + + config *Config +} + +// connection instance must have `multiStatements` set to true +func WithConnection(ctx context.Context, conn *sql.Conn, config *Config) (*Mysql, error) { + if config == nil { + return nil, ErrNilConfig + } + + if err := conn.PingContext(ctx); err != nil { + return nil, err + } + + mx := &Mysql{ + conn: conn, + db: nil, + config: config, + } + + if config.DatabaseName == "" { + query := `SELECT DATABASE()` + var databaseName sql.NullString + if err := conn.QueryRowContext(ctx, query).Scan(&databaseName); err != nil { + return nil, &database.Error{OrigErr: err, Query: []byte(query)} + } + + if len(databaseName.String) == 0 { + return nil, ErrNoDatabaseName + } + + config.DatabaseName = databaseName.String + } + + if len(config.MigrationsTable) == 0 { + config.MigrationsTable = DefaultMigrationsTable + } + + if err := mx.ensureVersionTable(); err != nil { + return nil, err + } + + return mx, nil +} + +// instance must have `multiStatements` set to true +func WithInstance(instance *sql.DB, config *Config) (database.Driver, error) { + ctx := context.Background() + + if err := instance.Ping(); err != nil { + return nil, err + } + + conn, err := instance.Conn(ctx) + if err != nil { + return nil, err + } + + mx, err := WithConnection(ctx, conn, config) + if err != nil { + return nil, err + } + + mx.db = instance + + return mx, nil +} + +// extractCustomQueryParams extracts the custom query params (ones that start with "x-") from +// mysql.Config.Params (connection parameters) as to not interfere with connecting to MySQL +func extractCustomQueryParams(c *mysql.Config) (map[string]string, error) { + if c == nil { + return nil, ErrNilConfig + } + customQueryParams := map[string]string{} + + for k, v := range c.Params { + if strings.HasPrefix(k, "x-") { + customQueryParams[k] = v + delete(c.Params, k) + } + } + return customQueryParams, nil +} + +func urlToMySQLConfig(url string) (*mysql.Config, error) { + // Need to parse out custom TLS parameters and call + // mysql.RegisterTLSConfig() before mysql.ParseDSN() is called + // which consumes the registered tls.Config + // Fixes: https://github.com/golang-migrate/migrate/issues/411 + // + // Can't use url.Parse() since it fails to parse MySQL DSNs + // mysql.ParseDSN() also searches for "?" to find query parameters: + // https://github.com/go-sql-driver/mysql/blob/46351a8/dsn.go#L344 + if idx := strings.LastIndex(url, "?"); idx > 0 { + rawParams := url[idx+1:] + parsedParams, err := nurl.ParseQuery(rawParams) + if err != nil { + return nil, err + } + + ctls := parsedParams.Get("tls") + if len(ctls) > 0 { + if _, isBool := readBool(ctls); !isBool && strings.ToLower(ctls) != "skip-verify" { + rootCertPool := x509.NewCertPool() + pem, err := ioutil.ReadFile(parsedParams.Get("x-tls-ca")) + if err != nil { + return nil, err + } + + if ok := rootCertPool.AppendCertsFromPEM(pem); !ok { + return nil, ErrAppendPEM + } + + clientCert := make([]tls.Certificate, 0, 1) + if ccert, ckey := parsedParams.Get("x-tls-cert"), parsedParams.Get("x-tls-key"); ccert != "" || ckey != "" { + if ccert == "" || ckey == "" { + return nil, ErrTLSCertKeyConfig + } + certs, err := tls.LoadX509KeyPair(ccert, ckey) + if err != nil { + return nil, err + } + clientCert = append(clientCert, certs) + } + + insecureSkipVerify := false + insecureSkipVerifyStr := parsedParams.Get("x-tls-insecure-skip-verify") + if len(insecureSkipVerifyStr) > 0 { + x, err := strconv.ParseBool(insecureSkipVerifyStr) + if err != nil { + return nil, err + } + insecureSkipVerify = x + } + + err = mysql.RegisterTLSConfig(ctls, &tls.Config{ + RootCAs: rootCertPool, + Certificates: clientCert, + InsecureSkipVerify: insecureSkipVerify, + }) + if err != nil { + return nil, err + } + } + } + } + + config, err := mysql.ParseDSN(strings.TrimPrefix(url, "mysql://")) + if err != nil { + return nil, err + } + + config.MultiStatements = true + + // Keep backwards compatibility from when we used net/url.Parse() to parse the DSN. + // net/url.Parse() would automatically unescape it for us. + // See: https://play.golang.org/p/q9j1io-YICQ + user, err := nurl.QueryUnescape(config.User) + if err != nil { + return nil, err + } + config.User = user + + password, err := nurl.QueryUnescape(config.Passwd) + if err != nil { + return nil, err + } + config.Passwd = password + + return config, nil +} + +func (m *Mysql) Open(url string) (database.Driver, error) { + config, err := urlToMySQLConfig(url) + if err != nil { + return nil, err + } + + customParams, err := extractCustomQueryParams(config) + if err != nil { + return nil, err + } + + noLockParam, noLock := customParams["x-no-lock"], false + if noLockParam != "" { + noLock, err = strconv.ParseBool(noLockParam) + if err != nil { + return nil, fmt.Errorf("could not parse x-no-lock as bool: %w", err) + } + } + + db, err := sql.Open("mysql", config.FormatDSN()) + if err != nil { + return nil, err + } + + mx, err := WithInstance(db, &Config{ + DatabaseName: config.DBName, + MigrationsTable: customParams["x-migrations-table"], + NoLock: noLock, + }) + if err != nil { + return nil, err + } + + return mx, nil +} + +func (m *Mysql) Close() error { + connErr := m.conn.Close() + var dbErr error + if m.db != nil { + dbErr = m.db.Close() + } + + if connErr != nil || dbErr != nil { + return fmt.Errorf("conn: %v, db: %v", connErr, dbErr) + } + return nil +} + +func (m *Mysql) Lock() error { + return database.CasRestoreOnErr(&m.isLocked, false, true, database.ErrLocked, func() error { + if m.config.NoLock { + return nil + } + aid, err := database.GenerateAdvisoryLockId( + fmt.Sprintf("%s:%s", m.config.DatabaseName, m.config.MigrationsTable)) + if err != nil { + return err + } + + query := "SELECT GET_LOCK(?, 10)" + var success bool + if err := m.conn.QueryRowContext(context.Background(), query, aid).Scan(&success); err != nil { + return &database.Error{OrigErr: err, Err: "try lock failed", Query: []byte(query)} + } + + if !success { + return database.ErrLocked + } + + return nil + }) +} + +func (m *Mysql) Unlock() error { + return database.CasRestoreOnErr(&m.isLocked, true, false, database.ErrNotLocked, func() error { + if m.config.NoLock { + return nil + } + + aid, err := database.GenerateAdvisoryLockId( + fmt.Sprintf("%s:%s", m.config.DatabaseName, m.config.MigrationsTable)) + if err != nil { + return err + } + + query := `SELECT RELEASE_LOCK(?)` + if _, err := m.conn.ExecContext(context.Background(), query, aid); err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + + // NOTE: RELEASE_LOCK could return NULL or (or 0 if the code is changed), + // in which case isLocked should be true until the timeout expires -- synchronizing + // these states is likely not worth trying to do; reconsider the necessity of isLocked. + + return nil + }) +} + +func (m *Mysql) Run(migration io.Reader) error { + migr, err := ioutil.ReadAll(migration) + if err != nil { + return err + } + + query := string(migr[:]) + if _, err := m.conn.ExecContext(context.Background(), query); err != nil { + return database.Error{OrigErr: err, Err: "migration failed", Query: migr} + } + + return nil +} + +func (m *Mysql) SetVersion(version int, dirty bool) error { + tx, err := m.conn.BeginTx(context.Background(), &sql.TxOptions{}) + if err != nil { + return &database.Error{OrigErr: err, Err: "transaction start failed"} + } + + query := "TRUNCATE `" + m.config.MigrationsTable + "`" + if _, err := tx.ExecContext(context.Background(), query); err != nil { + if errRollback := tx.Rollback(); errRollback != nil { + err = multierror.Append(err, errRollback) + } + return &database.Error{OrigErr: err, Query: []byte(query)} + } + + // Also re-write the schema version for nil dirty versions to prevent + // empty schema version for failed down migration on the first migration + // See: https://github.com/golang-migrate/migrate/issues/330 + if version >= 0 || (version == database.NilVersion && dirty) { + query := "INSERT INTO `" + m.config.MigrationsTable + "` (version, dirty) VALUES (?, ?)" + if _, err := tx.ExecContext(context.Background(), query, version, dirty); err != nil { + if errRollback := tx.Rollback(); errRollback != nil { + err = multierror.Append(err, errRollback) + } + return &database.Error{OrigErr: err, Query: []byte(query)} + } + } + + if err := tx.Commit(); err != nil { + return &database.Error{OrigErr: err, Err: "transaction commit failed"} + } + + return nil +} + +func (m *Mysql) Version() (version int, dirty bool, err error) { + query := "SELECT version, dirty FROM `" + m.config.MigrationsTable + "` LIMIT 1" + err = m.conn.QueryRowContext(context.Background(), query).Scan(&version, &dirty) + switch { + case err == sql.ErrNoRows: + return database.NilVersion, false, nil + + case err != nil: + if e, ok := err.(*mysql.MySQLError); ok { + if e.Number == 0 { + return database.NilVersion, false, nil + } + } + return 0, false, &database.Error{OrigErr: err, Query: []byte(query)} + + default: + return version, dirty, nil + } +} + +func (m *Mysql) Drop() (err error) { + // select all tables + query := `SHOW TABLES LIKE '%'` + tables, err := m.conn.QueryContext(context.Background(), query) + if err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + defer func() { + if errClose := tables.Close(); errClose != nil { + err = multierror.Append(err, errClose) + } + }() + + // delete one table after another + tableNames := make([]string, 0) + for tables.Next() { + var tableName string + if err := tables.Scan(&tableName); err != nil { + return err + } + if len(tableName) > 0 { + tableNames = append(tableNames, tableName) + } + } + if err := tables.Err(); err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + + if len(tableNames) > 0 { + // disable checking foreign key constraints until finished + query = `SET foreign_key_checks = 0` + if _, err := m.conn.ExecContext(context.Background(), query); err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + + defer func() { + // enable foreign key checks + _, _ = m.conn.ExecContext(context.Background(), `SET foreign_key_checks = 1`) + }() + + // delete one by one ... + for _, t := range tableNames { + query = "DROP TABLE IF EXISTS `" + t + "`" + if _, err := m.conn.ExecContext(context.Background(), query); err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + } + } + + return nil +} + +// ensureVersionTable checks if versions table exists and, if not, creates it. +// Note that this function locks the database, which deviates from the usual +// convention of "caller locks" in the Mysql type. +func (m *Mysql) ensureVersionTable() (err error) { + if err = m.Lock(); err != nil { + return err + } + + defer func() { + if e := m.Unlock(); e != nil { + if err == nil { + err = e + } else { + err = multierror.Append(err, e) + } + } + }() + + // check if migration table exists + var result string + query := `SHOW TABLES LIKE '` + m.config.MigrationsTable + `'` + if err := m.conn.QueryRowContext(context.Background(), query).Scan(&result); err != nil { + if err != sql.ErrNoRows { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + } else { + return nil + } + + // if not, create the empty migration table + query = "CREATE TABLE `" + m.config.MigrationsTable + "` (version bigint not null primary key, dirty boolean not null)" + if _, err := m.conn.ExecContext(context.Background(), query); err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + return nil +} + +// Returns the bool value of the input. +// The 2nd return value indicates if the input was a valid bool value +// See https://github.com/go-sql-driver/mysql/blob/a059889267dc7170331388008528b3b44479bffb/utils.go#L71 +func readBool(input string) (value bool, valid bool) { + switch input { + case "1", "true", "TRUE", "True": + return true, true + case "0", "false", "FALSE", "False": + return false, true + } + + // Not a valid bool value + return +} diff --git a/tests/docker-compose.test.yml b/tests/docker-compose.test.yml index f8303df3c64c..dea01e52462a 100644 --- a/tests/docker-compose.test.yml +++ b/tests/docker-compose.test.yml @@ -27,3 +27,24 @@ services: - /data/redis:/var/lib/redis ports: - 6379:6379 + mariadb: + image: mariadb:10.5.9 + restart: always + environment: + MYSQL_ROOT_PASSWORD: root123 + MYSQL_DATABASE: registry + volumes: + - /data/mariadb/database:/var/lib/mysql:z + ports: + - 3306:3306 + mysql: + image: mysql:8.0 + command: --default-authentication-plugin=mysql_native_password + restart: always + environment: + MYSQL_ROOT_PASSWORD: root123 + MYSQL_DATABASE: registry + volumes: + - /data/mysql/database:/var/lib/mysql:z + ports: + - 3308:3306