diff --git a/.github/workflows/CI-mariadb.yml b/.github/workflows/CI-mariadb.yml new file mode 100644 index 000000000000..22254b1bdecf --- /dev/null +++ b/.github/workflows/CI-mariadb.yml @@ -0,0 +1,91 @@ +name: CI-mariadb +env: + DATABASE_TYPE: mariadb + DB_HOST: localhost + DB_PORT: 3306 + DB_USERNAME: root + DB_PASSWORD: root123 + DB_DATABASE: registry + DB_COLLATION: utf8mb4_general_ci + DOCKER_COMPOSE_VERSION: 1.23.0 + HARBOR_ADMIN: admin + HARBOR_ADMIN_PASSWD: Harbor12345 + CORE_SECRET: tempString + KEY_PATH: "/data/secret/keys/secretkey" + REDIS_HOST: localhost + REG_VERSION: v2.7.1-patch-2819-2553 + UI_BUILDER_VERSION: 1.6.0 + +on: + pull_request: + push: + paths-ignore: + - 'docs/**' + +jobs: + UTTEST4MARIADB: + env: + UTTEST: true + runs-on: + #- self-hosted + - ubuntu-latest + timeout-minutes: 100 + steps: + - name: Set up Go 1.17 + uses: actions/setup-go@v1 + with: + go-version: 1.17.7 + id: go + - name: setup Docker + uses: docker-practice/actions-setup-docker@0.0.1 + with: + docker_version: 20.04 + docker_channel: stable + - uses: actions/checkout@v2 + with: + path: src/github.com/goharbor/harbor + - name: setup env + run: | + cd src/github.com/goharbor/harbor + pwd + go env + echo "GOPATH=$(go env GOPATH):$GITHUB_WORKSPACE" >> $GITHUB_ENV + echo "$(go env GOPATH)/bin" >> $GITHUB_PATH + echo "TOKEN_PRIVATE_KEY_PATH=${GITHUB_WORKSPACE}/src/github.com/goharbor/harbor/tests/private_key.pem" >> $GITHUB_ENV + shell: bash + - name: before_install + run: | + set -x + cd src/github.com/goharbor/harbor + pwd + env + #sudo apt install -y xvfb + #xvfb-run ls + curl -L https://github.com/docker/compose/releases/download/${DOCKER_COMPOSE_VERSION}/docker-compose-`uname -s`-`uname -m` > docker-compose + chmod +x docker-compose + sudo mv docker-compose /usr/local/bin + IP=`hostname -I | awk '{print $1}'` + echo '{"insecure-registries" : ["'$IP':5000"]}' | sudo tee /etc/docker/daemon.json + echo "IP=$IP" >> $GITHUB_ENV + sudo cp ./tests/harbor_ca.crt /usr/local/share/ca-certificates/ + sudo update-ca-certificates + sudo service docker restart + - name: install + run: | + cd src/github.com/goharbor/harbor + env + df -h + bash ./tests/showtime.sh ./tests/ci/ut_install.sh + - name: script + run: | + echo IP: $IP + df -h + cd src/github.com/goharbor/harbor + bash ./tests/showtime.sh ./tests/ci/ut_run.sh $IP + df -h + - name: Codecov For BackEnd + uses: codecov/codecov-action@v1 + with: + file: ./src/github.com/goharbor/harbor/profile.cov + flags: unittests + diff --git a/.github/workflows/CI-mysql.yml b/.github/workflows/CI-mysql.yml new file mode 100644 index 000000000000..34844ea67f15 --- /dev/null +++ b/.github/workflows/CI-mysql.yml @@ -0,0 +1,91 @@ +name: CI-mysql +env: + DATABASE_TYPE: mysql + DB_HOST: localhost + DB_PORT: 3308 + DB_USERNAME: root + DB_PASSWORD: root123 + DB_DATABASE: registry + DB_COLLATION: utf8mb4_general_ci + DOCKER_COMPOSE_VERSION: 1.23.0 + HARBOR_ADMIN: admin + HARBOR_ADMIN_PASSWD: Harbor12345 + CORE_SECRET: tempString + KEY_PATH: "/data/secret/keys/secretkey" + REDIS_HOST: localhost + REG_VERSION: v2.7.1-patch-2819-2553 + UI_BUILDER_VERSION: 1.6.0 + +on: + pull_request: + push: + paths-ignore: + - 'docs/**' + +jobs: + UTTEST4MYSQL: + env: + UTTEST: true + runs-on: + #- self-hosted + - ubuntu-latest + timeout-minutes: 100 + steps: + - name: Set up Go 1.17 + uses: actions/setup-go@v1 + with: + go-version: 1.17.7 + id: go + - name: setup Docker + uses: docker-practice/actions-setup-docker@0.0.1 + with: + docker_version: 20.04 + docker_channel: stable + - uses: actions/checkout@v2 + with: + path: src/github.com/goharbor/harbor + - name: setup env + run: | + cd src/github.com/goharbor/harbor + pwd + go env + echo "GOPATH=$(go env GOPATH):$GITHUB_WORKSPACE" >> $GITHUB_ENV + echo "$(go env GOPATH)/bin" >> $GITHUB_PATH + echo "TOKEN_PRIVATE_KEY_PATH=${GITHUB_WORKSPACE}/src/github.com/goharbor/harbor/tests/private_key.pem" >> $GITHUB_ENV + shell: bash + - name: before_install + run: | + set -x + cd src/github.com/goharbor/harbor + pwd + env + #sudo apt install -y xvfb + #xvfb-run ls + curl -L https://github.com/docker/compose/releases/download/${DOCKER_COMPOSE_VERSION}/docker-compose-`uname -s`-`uname -m` > docker-compose + chmod +x docker-compose + sudo mv docker-compose /usr/local/bin + IP=`hostname -I | awk '{print $1}'` + echo '{"insecure-registries" : ["'$IP':5000"]}' | sudo tee /etc/docker/daemon.json + echo "IP=$IP" >> $GITHUB_ENV + sudo cp ./tests/harbor_ca.crt /usr/local/share/ca-certificates/ + sudo update-ca-certificates + sudo service docker restart + - name: install + run: | + cd src/github.com/goharbor/harbor + env + df -h + bash ./tests/showtime.sh ./tests/ci/ut_install.sh + - name: script + run: | + echo IP: $IP + df -h + cd src/github.com/goharbor/harbor + bash ./tests/showtime.sh ./tests/ci/ut_run.sh $IP + df -h + - name: Codecov For BackEnd + uses: codecov/codecov-action@v1 + with: + file: ./src/github.com/goharbor/harbor/profile.cov + flags: unittests + diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml index 6bac762fce99..63f949542554 100644 --- a/.github/workflows/CI.yml +++ b/.github/workflows/CI.yml @@ -1,10 +1,11 @@ name: CI env: - POSTGRESQL_HOST: localhost - POSTGRESQL_PORT: 5432 - POSTGRESQL_USR: postgres - POSTGRESQL_PWD: root123 - POSTGRESQL_DATABASE: registry + DATABASE_TYPE: postgresql + DB_HOST: localhost + DB_PORT: 5432 + DB_USERNAME: postgres + DB_PASSWORD: root123 + DB_DATABASE: registry DOCKER_COMPOSE_VERSION: 1.23.0 HARBOR_ADMIN: admin HARBOR_ADMIN_PASSWD: Harbor12345 diff --git a/make/harbor.yml.tmpl b/make/harbor.yml.tmpl index c0563b74ca36..42971221a434 100644 --- a/make/harbor.yml.tmpl +++ b/make/harbor.yml.tmpl @@ -142,12 +142,16 @@ _version: 2.6.0 # Uncomment external_database if using external database. # external_database: # harbor: +# # database type, default is postgresql, options include postgresql, mariadb and mysql +# type: harbor_db_type # host: harbor_db_host # port: harbor_db_port # db_name: harbor_db_name # username: harbor_db_username # password: harbor_db_password # ssl_mode: disable +# # collation setting for mariadb and mysql +# collation: utf8mb4_general_ci # max_idle_conns: 2 # max_open_conns: 0 # notary_signer: diff --git a/make/migrations/mysql/0001_initial_schema.up.sql b/make/migrations/mysql/0001_initial_schema.up.sql index 70c00a8210aa..dc7e16253e78 100644 --- a/make/migrations/mysql/0001_initial_schema.up.sql +++ b/make/migrations/mysql/0001_initial_schema.up.sql @@ -39,8 +39,8 @@ create table harbor_user ( reset_uuid varchar(40) DEFAULT NULL, salt varchar(40) DEFAULT NULL, sysadmin_flag boolean DEFAULT false NOT NULL, - creation_time timestamp default CURRENT_TIMESTAMP, - update_time timestamp default CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, + creation_time timestamp(6) default CURRENT_TIMESTAMP(6), + update_time timestamp(6) default CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6), UNIQUE (username), UNIQUE (email) ); @@ -57,16 +57,17 @@ create table project ( and 11 is reserved for marking the deleted project. */ name varchar (255) NOT NULL, - creation_time timestamp default CURRENT_TIMESTAMP, - update_time timestamp default CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, + creation_time timestamp(6) default CURRENT_TIMESTAMP(6), + update_time timestamp(6) default CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6), deleted boolean DEFAULT false NOT NULL, FOREIGN KEY (owner_id) REFERENCES harbor_user(user_id), UNIQUE (name) ); -insert into project (owner_id, name, creation_time, update_time) values -(1, 'library', NOW(), NOW()); + +insert into project (owner_id, name, creation_time, update_time) +select user_id , 'library', NOW(), NOW() from harbor_user where username = 'admin'; create table project_member ( id SERIAL NOT NULL, @@ -78,8 +79,8 @@ create table project_member ( */ entity_type char(1) NOT NULL, role int NOT NULL, - creation_time timestamp default CURRENT_TIMESTAMP, - update_time timestamp default CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, + creation_time timestamp(6) default CURRENT_TIMESTAMP(6), + update_time timestamp(6) default CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6), PRIMARY KEY (id), CONSTRAINT unique_project_entity_type UNIQUE (project_id, entity_id, entity_type) ); @@ -92,8 +93,8 @@ create table project_metadata ( project_id bigint unsigned NOT NULL, name varchar(255) NOT NULL, value varchar(255), - creation_time timestamp default CURRENT_TIMESTAMP, - update_time timestamp default CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, + creation_time timestamp(6) default CURRENT_TIMESTAMP(6), + update_time timestamp(6) default CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6), deleted boolean DEFAULT false NOT NULL, PRIMARY KEY (id), CONSTRAINT unique_project_id_and_name UNIQUE (project_id,name), @@ -108,8 +109,8 @@ create table user_group ( group_name varchar(255) NOT NULL, group_type smallint default 0, ldap_group_dn varchar(512) NOT NULL, - creation_time timestamp default CURRENT_TIMESTAMP, - update_time timestamp default CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, + creation_time timestamp(6) default CURRENT_TIMESTAMP(6), + update_time timestamp(6) default CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6), PRIMARY KEY (id) ); @@ -121,7 +122,7 @@ create table access_log ( repo_tag varchar (128), GUID varchar(64), operation varchar(20) NOT NULL, - op_time timestamp default CURRENT_TIMESTAMP, + op_time timestamp(6) default CURRENT_TIMESTAMP(6), primary key (log_id) ); @@ -134,8 +135,8 @@ create table repository ( description text, pull_count int DEFAULT 0 NOT NULL, star_count int DEFAULT 0 NOT NULL, - creation_time timestamp default CURRENT_TIMESTAMP, - update_time timestamp default CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, + creation_time timestamp(6) default CURRENT_TIMESTAMP(6), + update_time timestamp(6) default CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6), primary key (repository_id), UNIQUE (name) ); @@ -151,9 +152,9 @@ create table replication_policy ( cron_str varchar(256), filters varchar(1024), replicate_deletion boolean DEFAULT false NOT NULL, - start_time timestamp NULL, - creation_time timestamp default CURRENT_TIMESTAMP, - update_time timestamp default CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, + start_time timestamp(6) NULL, + creation_time timestamp(6) default CURRENT_TIMESTAMP(6), + update_time timestamp(6) default CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6), PRIMARY KEY (id) ); @@ -170,8 +171,8 @@ create table replication_target ( */ target_type SMALLINT NOT NULL DEFAULT 0, insecure boolean NOT NULL DEFAULT false, - creation_time timestamp default CURRENT_TIMESTAMP, - update_time timestamp default CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, + creation_time timestamp(6) default CURRENT_TIMESTAMP(6), + update_time timestamp(6) default CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6), PRIMARY KEY (id) ); @@ -186,8 +187,8 @@ create table replication_job ( New job service only records uuid, for compatibility in this table both IDs are stored. */ job_uuid varchar(64), - creation_time timestamp default CURRENT_TIMESTAMP, - update_time timestamp default CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, + creation_time timestamp(6) default CURRENT_TIMESTAMP(6), + update_time timestamp(6) default CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6), PRIMARY KEY (id) ); @@ -201,8 +202,8 @@ create table replication_immediate_trigger ( namespace varchar(256) NOT NULL, on_push boolean NOT NULL DEFAULT false, on_deletion boolean NOT NULL DEFAULT false, - creation_time timestamp default CURRENT_TIMESTAMP, - update_time timestamp default CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, + creation_time timestamp(6) default CURRENT_TIMESTAMP(6), + update_time timestamp(6) default CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6), PRIMARY KEY (id) ); @@ -216,8 +217,8 @@ create table replication_immediate_trigger ( New job service only records uuid, for compatibility in this table both IDs are stored. */ job_uuid varchar(64), - creation_time timestamp default CURRENT_TIMESTAMP, - update_time timestamp default CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, + creation_time timestamp(6) default CURRENT_TIMESTAMP(6), + update_time timestamp(6) default CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6), PRIMARY KEY (id) ); @@ -236,8 +237,8 @@ create table img_scan_overview ( components_overview varchar(2048), /* primary key for querying details, in clair it should be the name of the "top layer" */ details_key varchar(128), - creation_time timestamp default CURRENT_TIMESTAMP, - update_time timestamp default CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, + creation_time timestamp(6) default CURRENT_TIMESTAMP(6), + update_time timestamp(6) default CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6), PRIMARY KEY(id), UNIQUE(image_digest) ); @@ -245,7 +246,7 @@ create table img_scan_overview ( create table clair_vuln_timestamp ( id SERIAL NOT NULL, namespace varchar(128) NOT NULL, -last_update timestamp NOT NULL, +last_update timestamp(6) NOT NULL, PRIMARY KEY(id), UNIQUE(namespace) ); @@ -274,8 +275,8 @@ create table harbor_label ( */ scope char(1) NOT NULL, project_id int, - creation_time timestamp default CURRENT_TIMESTAMP, - update_time timestamp default CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, + creation_time timestamp(6) default CURRENT_TIMESTAMP(6), + update_time timestamp(6) default CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6), deleted boolean DEFAULT false NOT NULL, PRIMARY KEY(id), CONSTRAINT unique_label UNIQUE (name,scope, project_id) @@ -299,8 +300,8 @@ the resource_name is the name of image when the resource_type is i 'i' for image */ resource_type char(1) NOT NULL, - creation_time timestamp default CURRENT_TIMESTAMP, - update_time timestamp default CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, + creation_time timestamp(6) default CURRENT_TIMESTAMP(6), + update_time timestamp(6) default CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6), PRIMARY KEY(id), CONSTRAINT unique_label_resource UNIQUE (label_id,resource_id, resource_name, resource_type) ); @@ -312,12 +313,13 @@ create table admin_job ( cron_str varchar(256), status varchar(64) NOT NULL, job_uuid varchar(64), - creation_time timestamp default CURRENT_TIMESTAMP, - update_time timestamp default CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, + creation_time timestamp(6) default CURRENT_TIMESTAMP(6), + update_time timestamp(6) default CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6), deleted boolean DEFAULT false NOT NULL, PRIMARY KEY(id) ); + CREATE INDEX admin_job_status ON admin_job (status); CREATE INDEX admin_job_uuid ON admin_job (job_uuid); diff --git a/make/migrations/mysql/0002_1.7.0_schema.up.sql b/make/migrations/mysql/0002_1.7.0_schema.up.sql index 2b8019019465..82d07323c661 100644 --- a/make/migrations/mysql/0002_1.7.0_schema.up.sql +++ b/make/migrations/mysql/0002_1.7.0_schema.up.sql @@ -4,7 +4,7 @@ DELETE FROM properties where k='scan_all_policy'; create table job_log ( log_id SERIAL NOT NULL, job_uuid varchar (64) NOT NULL, - creation_time timestamp default CURRENT_TIMESTAMP, + creation_time timestamp(6) default CURRENT_TIMESTAMP(6), content text, primary key (log_id) ); diff --git a/make/migrations/mysql/0004_1.8.0_schema.up.sql b/make/migrations/mysql/0004_1.8.0_schema.up.sql index f03d492c8f50..ce25170b3fe0 100644 --- a/make/migrations/mysql/0004_1.8.0_schema.up.sql +++ b/make/migrations/mysql/0004_1.8.0_schema.up.sql @@ -6,8 +6,8 @@ CREATE TABLE robot ( project_id int, expiresat bigint, disabled boolean DEFAULT false NOT NULL, - creation_time timestamp default CURRENT_TIMESTAMP, - update_time timestamp default CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, + creation_time timestamp(6) default CURRENT_TIMESTAMP(6), + update_time timestamp(6) default CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6), CONSTRAINT unique_robot UNIQUE (name, project_id) ); @@ -29,8 +29,8 @@ CREATE TABLE oidc_user ( Encoded token */ token text, - creation_time timestamp default CURRENT_TIMESTAMP, - update_time timestamp default CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, + creation_time timestamp(6) default CURRENT_TIMESTAMP(6), + update_time timestamp(6) default CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6), PRIMARY KEY (id), FOREIGN KEY (user_id) REFERENCES harbor_user(user_id), UNIQUE (subiss) @@ -48,7 +48,7 @@ DELETE p FROM replication_policy AS p WHERE p.deleted = TRUE; /*upgrade the replication_target to registry*/ -ALTER TABLE replication_target MODIFY COLUMN update_time timestamp default CURRENT_TIMESTAMP; +ALTER TABLE replication_target MODIFY COLUMN update_time timestamp(6) default CURRENT_TIMESTAMP(6); ALTER TABLE replication_target RENAME registry; ALTER TABLE registry MODIFY COLUMN url varchar(256); ALTER TABLE registry ADD COLUMN credential_type varchar(16); @@ -78,7 +78,7 @@ UPDATE replication_policy SET override=TRUE; ALTER TABLE replication_policy DROP COLUMN project_id; ALTER TABLE replication_policy CHANGE COLUMN cron_str `trigger` varchar(256) DEFAULT NULL; -ALTER TABLE replication_immediate_trigger MODIFY COLUMN update_time timestamp default CURRENT_TIMESTAMP; +ALTER TABLE replication_immediate_trigger MODIFY COLUMN update_time timestamp(6) default CURRENT_TIMESTAMP(6); DROP TABLE replication_immediate_trigger; create table replication_execution ( @@ -93,8 +93,8 @@ create table replication_execution ( in_progress int NOT NULL DEFAULT 0, stopped int NOT NULL DEFAULT 0, `trigger` varchar(64), - start_time timestamp default CURRENT_TIMESTAMP, - end_time timestamp NULL, + start_time timestamp(6) default CURRENT_TIMESTAMP(6), + end_time timestamp(6) NULL, PRIMARY KEY (id) ); CREATE INDEX execution_policy ON replication_execution (policy_id); @@ -108,8 +108,8 @@ create table replication_task ( operation varchar(32), job_id varchar(64), status varchar(32), - start_time timestamp default CURRENT_TIMESTAMP, - end_time timestamp NULL, + start_time timestamp(6) default CURRENT_TIMESTAMP(6), + end_time timestamp(6) NULL, PRIMARY KEY (id) ); CREATE INDEX task_execution ON replication_task (execution_id); @@ -166,7 +166,7 @@ ALTER TABLE replication_job DROP COLUMN op_uuid; DROP INDEX policy ON replication_job; DROP INDEX poid_uptime ON replication_job; DROP INDEX poid_status ON replication_job; -ALTER TABLE replication_job MODIFY COLUMN update_time timestamp default CURRENT_TIMESTAMP; +ALTER TABLE replication_job MODIFY COLUMN update_time timestamp(6) default CURRENT_TIMESTAMP(6); ALTER TABLE replication_job RENAME TO replication_schedule_job; /* diff --git a/make/migrations/mysql/0010_1.9.0_schema.up.sql b/make/migrations/mysql/0010_1.9.0_schema.up.sql index 7fa1ec99e8a3..46a42f6b65e0 100644 --- a/make/migrations/mysql/0010_1.9.0_schema.up.sql +++ b/make/migrations/mysql/0010_1.9.0_schema.up.sql @@ -3,8 +3,8 @@ CREATE TABLE cve_whitelist ( id SERIAL PRIMARY KEY NOT NULL, project_id int, - creation_time timestamp default CURRENT_TIMESTAMP, - update_time timestamp default CURRENT_TIMESTAMP, + creation_time timestamp(6) default CURRENT_TIMESTAMP(6), + update_time timestamp(6) default CURRENT_TIMESTAMP(6), expires_at bigint, items text NOT NULL, UNIQUE (project_id) @@ -19,7 +19,7 @@ CREATE TABLE `blob` digest varchar(255) NOT NULL, content_type varchar(1024) NOT NULL, size bigint NOT NULL, - creation_time timestamp default CURRENT_TIMESTAMP, + creation_time timestamp(6) default CURRENT_TIMESTAMP(6), UNIQUE (digest) ); @@ -28,7 +28,7 @@ CREATE TABLE project_blob ( id SERIAL PRIMARY KEY NOT NULL, project_id int NOT NULL, blob_id int NOT NULL, - creation_time timestamp default CURRENT_TIMESTAMP, + creation_time timestamp(6) default CURRENT_TIMESTAMP(6), CONSTRAINT unique_project_blob UNIQUE (project_id, blob_id) ); @@ -47,9 +47,9 @@ CREATE TABLE artifact kind of artifact, image, chart, etc.. */ kind varchar(255) NOT NULL, - creation_time timestamp default CURRENT_TIMESTAMP, - pull_time timestamp, - push_time timestamp, + creation_time timestamp(6) default CURRENT_TIMESTAMP(6), + pull_time timestamp(6), + push_time timestamp(6), CONSTRAINT unique_artifact UNIQUE (project_id, repo, tag) ); @@ -59,7 +59,7 @@ CREATE TABLE artifact_blob id SERIAL PRIMARY KEY NOT NULL, digest_af varchar(255) NOT NULL, digest_blob varchar(255) NOT NULL, - creation_time timestamp default CURRENT_TIMESTAMP, + creation_time timestamp(6) default CURRENT_TIMESTAMP(6), CONSTRAINT unique_artifact_blob UNIQUE (digest_af, digest_blob) ); @@ -70,8 +70,8 @@ CREATE TABLE quota reference VARCHAR(255) NOT NULL, reference_id VARCHAR(255) NOT NULL, hard JSON NOT NULL, - creation_time timestamp default CURRENT_TIMESTAMP, - update_time timestamp default CURRENT_TIMESTAMP, + creation_time timestamp(6) default CURRENT_TIMESTAMP(6), + update_time timestamp(6) default CURRENT_TIMESTAMP(6), UNIQUE (reference, reference_id) ); @@ -82,8 +82,8 @@ CREATE TABLE quota_usage reference VARCHAR(255) NOT NULL, reference_id VARCHAR(255) NOT NULL, used JSON NOT NULL, - creation_time timestamp default CURRENT_TIMESTAMP, - update_time timestamp default CURRENT_TIMESTAMP, + creation_time timestamp(6) default CURRENT_TIMESTAMP(6), + update_time timestamp(6) default CURRENT_TIMESTAMP(6), UNIQUE (reference, reference_id) ); @@ -123,7 +123,7 @@ create table retention_execution policy_id integer, dry_run boolean, `trigger` varchar(20), - start_time timestamp + start_time timestamp(6) ); create table retention_task @@ -135,8 +135,8 @@ create table retention_task status varchar(32), status_code integer, status_revision integer, - start_time timestamp default CURRENT_TIMESTAMP, - end_time timestamp default CURRENT_TIMESTAMP, + start_time timestamp(6) default CURRENT_TIMESTAMP(6), + end_time timestamp(6) default CURRENT_TIMESTAMP(6), total integer, retained integer, PRIMARY KEY (id) @@ -147,8 +147,8 @@ create table schedule id SERIAL NOT NULL, job_id varchar(64), status varchar(64), - creation_time timestamp default CURRENT_TIMESTAMP, - update_time timestamp default CURRENT_TIMESTAMP, + creation_time timestamp(6) default CURRENT_TIMESTAMP(6), + update_time timestamp(6) default CURRENT_TIMESTAMP(6), PRIMARY KEY (id) ); @@ -162,8 +162,8 @@ create table notification_policy ( targets text, event_types text, creator varchar(256), - creation_time timestamp default CURRENT_TIMESTAMP, - update_time timestamp default CURRENT_TIMESTAMP, + creation_time timestamp(6) default CURRENT_TIMESTAMP(6), + update_time timestamp(6) default CURRENT_TIMESTAMP(6), PRIMARY KEY (id), CONSTRAINT unique_project_id UNIQUE (project_id) ); @@ -179,8 +179,8 @@ create table notification_policy ( notify_type varchar(256), job_detail text, job_uuid varchar(64), - creation_time timestamp default CURRENT_TIMESTAMP, - update_time timestamp default CURRENT_TIMESTAMP, + creation_time timestamp(6) default CURRENT_TIMESTAMP(6), + update_time timestamp(6) default CURRENT_TIMESTAMP(6), PRIMARY KEY (id) ); diff --git a/make/migrations/mysql/0015_1.10.0_schema.up.sql b/make/migrations/mysql/0015_1.10.0_schema.up.sql index fadd47e9295e..d08cf1dc287a 100644 --- a/make/migrations/mysql/0015_1.10.0_schema.up.sql +++ b/make/migrations/mysql/0015_1.10.0_schema.up.sql @@ -13,8 +13,8 @@ CREATE TABLE scanner_registration use_internal_addr BOOLEAN NOT NULL DEFAULT FALSE, immutable BOOLEAN NOT NULL DEFAULT FALSE, skip_cert_verify BOOLEAN NOT NULL DEFAULT FALSE, - create_time TIMESTAMP DEFAULT CURRENT_TIMESTAMP, - update_time TIMESTAMP DEFAULT CURRENT_TIMESTAMP + create_time TIMESTAMP(6) DEFAULT CURRENT_TIMESTAMP(6), + update_time TIMESTAMP(6) DEFAULT CURRENT_TIMESTAMP(6) ); /*Table for keeping the scan report. The report details are stored as JSON*/ @@ -32,9 +32,10 @@ CREATE TABLE scan_report status_code INTEGER DEFAULT 0, status_rev BIGINT DEFAULT 0, report JSON, - start_time TIMESTAMP DEFAULT CURRENT_TIMESTAMP, - end_time TIMESTAMP DEFAULT CURRENT_TIMESTAMP, - UNIQUE(digest, registration_uuid, mime_type) + start_time TIMESTAMP(6) DEFAULT CURRENT_TIMESTAMP(6), + end_time TIMESTAMP(6) DEFAULT CURRENT_TIMESTAMP(6), + UNIQUE(digest, registration_uuid, mime_type), + CHECK (report is null or JSON_VALID (report)) ); /** Add table for immutable tag **/ @@ -44,7 +45,7 @@ CREATE TABLE immutable_tag_rule project_id int NOT NULL, tag_filter text, disabled BOOLEAN NOT NULL DEFAULT FALSE, - creation_time timestamp default CURRENT_TIMESTAMP, + creation_time timestamp(6) default CURRENT_TIMESTAMP(6), UNIQUE(project_id, tag_filter(255)) ); @@ -55,7 +56,7 @@ DROP INDEX idx_status ON img_scan_job; DROP INDEX idx_digest ON img_scan_job; DROP INDEX idx_uuid ON img_scan_job; DROP INDEX idx_repository_tag ON img_scan_job; -ALTER TABLE img_scan_job MODIFY COLUMN update_time timestamp default CURRENT_TIMESTAMP; +ALTER TABLE img_scan_job MODIFY COLUMN update_time timestamp(6) default CURRENT_TIMESTAMP(6); DROP TABLE IF EXISTS img_scan_job; DROP TABLE IF EXISTS img_scan_overview; diff --git a/make/migrations/mysql/0030_2.0.0_schema.up.sql b/make/migrations/mysql/0030_2.0.0_schema.up.sql index 028407a1ef02..9960fc82a18c 100644 --- a/make/migrations/mysql/0030_2.0.0_schema.up.sql +++ b/make/migrations/mysql/0030_2.0.0_schema.up.sql @@ -9,8 +9,8 @@ table artifact: repository_name varchar(255) NOT NULL, digest varchar(255) NOT NULL, size bigint, - push_time timestamp default CURRENT_TIMESTAMP, - pull_time timestamp, + push_time timestamp(6) default CURRENT_TIMESTAMP(6), + pull_time timestamp(6), extra_attrs text, annotations jsonb, CONSTRAINT unique_artifact UNIQUE (repository_id, digest) @@ -67,8 +67,8 @@ CREATE TABLE tag repository_id int NOT NULL, artifact_id bigint unsigned NOT NULL, name varchar(255) NOT NULL, - push_time timestamp default CURRENT_TIMESTAMP, - pull_time timestamp, + push_time timestamp(6) default CURRENT_TIMESTAMP(6), + pull_time timestamp(6), FOREIGN KEY (artifact_id) REFERENCES artifact(id), CONSTRAINT unique_tag UNIQUE (repository_id, name) ); @@ -92,6 +92,7 @@ JOIN ( ) AS ordered_art ON art.repository_name=ordered_art.repository_name AND art.digest=ordered_art.digest WHERE ordered_art.seq=1; +ALTER TABLE artifact DROP INDEX unique_artifact; ALTER TABLE artifact DROP COLUMN tag; /*remove the duplicate artifact rows*/ @@ -102,7 +103,6 @@ WHERE id NOT IN ( ); SET sql_mode = ''; -ALTER TABLE artifact DROP INDEX unique_artifact; ALTER TABLE artifact ADD CONSTRAINT unique_artifact UNIQUE (repository_id, digest); /*set artifact size*/ @@ -127,7 +127,8 @@ CREATE TABLE artifact_reference annotations json, FOREIGN KEY (parent_id) REFERENCES artifact(id), FOREIGN KEY (child_id) REFERENCES artifact(id), - CONSTRAINT unique_reference UNIQUE (parent_id, child_id) + CONSTRAINT unique_reference UNIQUE (parent_id, child_id), + CHECK (annotations is null or JSON_VALID (annotations)) ); /* artifact_trash records deleted artifact */ @@ -138,7 +139,7 @@ CREATE TABLE artifact_trash manifest_media_type varchar(255) NOT NULL, repository_name varchar(255) NOT NULL, digest varchar(255) NOT NULL, - creation_time timestamp default CURRENT_TIMESTAMP, + creation_time timestamp(6) default CURRENT_TIMESTAMP(6), CONSTRAINT unique_artifact_trash UNIQUE (repository_name, digest) ); @@ -147,8 +148,8 @@ CREATE TABLE label_reference ( id SERIAL PRIMARY KEY NOT NULL, label_id bigint unsigned NOT NULL, artifact_id bigint unsigned NOT NULL, - creation_time timestamp default CURRENT_TIMESTAMP, - update_time timestamp default CURRENT_TIMESTAMP, + creation_time timestamp(6) default CURRENT_TIMESTAMP(6), + update_time timestamp(6) default CURRENT_TIMESTAMP(6), FOREIGN KEY (label_id) REFERENCES harbor_label(id), FOREIGN KEY (artifact_id) REFERENCES artifact(id), CONSTRAINT unique_label_reference UNIQUE (label_id,artifact_id) @@ -179,7 +180,7 @@ CREATE TABLE audit_log resource_type varchar(255) NOT NULL, resource varchar(1024) NOT NULL, username varchar(255) NOT NULL, - op_time timestamp default CURRENT_TIMESTAMP + op_time timestamp(6) default CURRENT_TIMESTAMP(6) ); /*migrate access log to audit log*/ diff --git a/make/migrations/mysql/0040_2.1.0_schema.up.sql b/make/migrations/mysql/0040_2.1.0_schema.up.sql index e67d5e6b9ac9..4da0b6f71bfc 100644 --- a/make/migrations/mysql/0040_2.1.0_schema.up.sql +++ b/make/migrations/mysql/0040_2.1.0_schema.up.sql @@ -1,4 +1,19 @@ -ALTER TABLE project ADD COLUMN registry_id int; +CREATE PROCEDURE PROC_ADD_COLUMN_IF_NOT_EXISTS (in TB_NAME varchar(64), in CL_NAME varchar(64), in CL_TYPE varchar(64)) BEGIN +SELECT count(*) INTO @EXIST_CL +FROM INFORMATION_SCHEMA.COLUMNS +WHERE TABLE_SCHEMA = database() + AND TABLE_NAME = TB_NAME + AND COLUMN_NAME = CL_NAME LIMIT 1; + +SET @sql_cl = IF (@EXIST_CL <= 0, CONCAT('ALTER TABLE `', TB_NAME, '` ADD COLUMN `',CL_NAME, '` ', CL_TYPE), + 'select \' COLUMN EXISTS\' status'); +PREPARE stmt_cl FROM @sql_cl; +EXECUTE stmt_cl; +DEALLOCATE PREPARE stmt_cl; +END; + +CALL PROC_ADD_COLUMN_IF_NOT_EXISTS('project', 'registry_id', 'int'); + ALTER TABLE cve_whitelist RENAME TO cve_allowlist; UPDATE role SET name='maintainer' WHERE name='master'; UPDATE project_metadata SET name='reuse_sys_cve_allowlist' WHERE name='reuse_sys_cve_whitelist'; @@ -11,10 +26,11 @@ CREATE TABLE IF NOT EXISTS execution ( status_message text, `trigger` varchar(16) NOT NULL, extra_attrs JSON, - start_time timestamp DEFAULT CURRENT_TIMESTAMP, - end_time timestamp, + start_time timestamp(6) DEFAULT CURRENT_TIMESTAMP(6), + end_time timestamp(6) NULL DEFAULT NULL, revision int, - PRIMARY KEY (id) + PRIMARY KEY (id), + CHECK (extra_attrs is null or JSON_VALID (extra_attrs)) ); CREATE TABLE IF NOT EXISTS task ( @@ -27,20 +43,36 @@ CREATE TABLE IF NOT EXISTS task ( status_message text, run_count int, extra_attrs JSON, - creation_time timestamp DEFAULT CURRENT_TIMESTAMP, - start_time timestamp, - update_time timestamp, - end_time timestamp, - FOREIGN KEY (execution_id) REFERENCES execution(id) + creation_time timestamp(6) DEFAULT CURRENT_TIMESTAMP(6), + start_time timestamp(6), + update_time timestamp(6), + end_time timestamp(6) NULL DEFAULT NULL, + FOREIGN KEY (execution_id) REFERENCES execution(id), + CHECK (extra_attrs is null or JSON_VALID (extra_attrs)) ); -ALTER TABLE `blob` ADD COLUMN update_time timestamp default CURRENT_TIMESTAMP; -ALTER TABLE `blob` ADD COLUMN status varchar(255) default 'none'; -ALTER TABLE `blob` ADD COLUMN version BIGINT default 0; -CREATE INDEX idx_status ON `blob` (status); -CREATE INDEX idx_version ON `blob` (version); +CALL PROC_ADD_COLUMN_IF_NOT_EXISTS('blob', 'update_time', 'timestamp(6) default CURRENT_TIMESTAMP(6)'); +CALL PROC_ADD_COLUMN_IF_NOT_EXISTS('blob', 'status', 'varchar(255) default \'none\''); +CALL PROC_ADD_COLUMN_IF_NOT_EXISTS('blob', 'version', 'BIGINT default 0'); + +CREATE PROCEDURE PROC_CREATE_INDEX_IF_NOT_EXISTS (in TB_NAME varchar(64), in CL_NAME varchar(64), in IND_NAME varchar(64)) BEGIN +SELECT count(*) INTO @EXIST_IND +FROM INFORMATION_SCHEMA.STATISTICS +WHERE TABLE_SCHEMA = database() + AND TABLE_NAME = TB_NAME + AND INDEX_NAME = IND_NAME LIMIT 1; + +SET @sql_ind = IF (@EXIST_IND <= 0, CONCAT('CREATE INDEX `', IND_NAME, '` ON `', TB_NAME, '` (', CL_NAME, ')'), + 'select \' INDEX EXISTS\' status'); +PREPARE stmt_ind FROM @sql_ind; +EXECUTE stmt_ind; +DEALLOCATE PREPARE stmt_ind; +END; -CREATE TABLE p2p_preheat_instance ( +CALL PROC_CREATE_INDEX_IF_NOT_EXISTS('blob', 'status', 'idx_status'); +CALL PROC_CREATE_INDEX_IF_NOT_EXISTS('blob', 'version', 'idx_version'); + +CREATE TABLE IF NOT EXISTS p2p_preheat_instance ( id SERIAL PRIMARY KEY NOT NULL, name varchar(255) NOT NULL, description varchar(255), @@ -64,27 +96,28 @@ CREATE TABLE IF NOT EXISTS p2p_preheat_policy ( filters varchar(1024), `trigger` varchar(255), enabled boolean, - creation_time timestamp, - update_time timestamp, + creation_time timestamp(6), + update_time timestamp(6), UNIQUE (name, project_id) ); -ALTER TABLE schedule ADD COLUMN vendor_type varchar(16); -ALTER TABLE schedule ADD COLUMN vendor_id int; -ALTER TABLE schedule ADD COLUMN cron varchar(64); -ALTER TABLE schedule ADD COLUMN callback_func_name varchar(128); -ALTER TABLE schedule ADD COLUMN callback_func_param text; +CALL PROC_ADD_COLUMN_IF_NOT_EXISTS('schedule', 'vendor_type', 'varchar(16)'); +CALL PROC_ADD_COLUMN_IF_NOT_EXISTS('schedule', 'vendor_id', 'int'); +CALL PROC_ADD_COLUMN_IF_NOT_EXISTS('schedule', 'cron', 'varchar(64)'); +CALL PROC_ADD_COLUMN_IF_NOT_EXISTS('schedule', 'callback_func_name', 'varchar(128)'); +CALL PROC_ADD_COLUMN_IF_NOT_EXISTS('schedule', 'callback_func_param', 'text'); /*abstract the cron, callback function parameters from table retention_policy*/ UPDATE schedule, ( - SELECT id, data->>'$.trigger.references.job_id' AS schedule_id, - data->>'$.trigger.settings.cron' AS cron + SELECT id, replace(json_extract(data,'$.trigger.references.job_id'),'"','') AS schedule_id, + replace(json_extract(data,'$.trigger.settings.cron'),'"','') AS cron FROM retention_policy ) AS retention SET vendor_type= 'RETENTION', vendor_id=retention.id, schedule.cron = retention.cron, callback_func_name = 'RETENTION', callback_func_param=concat('{"PolicyID":', retention.id, ',"Trigger":"Schedule"}') WHERE schedule.id=retention.schedule_id; + /*create new execution and task record for each schedule*/ CREATE PROCEDURE PROC_UPDATE_EXECUTION_TASK ( ) BEGIN INSERT INTO execution ( vendor_type, vendor_id, `trigger` ) SELECT @@ -125,8 +158,7 @@ ALTER TABLE schedule DROP COLUMN status; UPDATE registry SET type = 'quay' WHERE type = 'quay-io'; - -ALTER TABLE artifact ADD COLUMN icon varchar(255); +CALL PROC_ADD_COLUMN_IF_NOT_EXISTS('artifact', 'icon', 'varchar(255)'); /*remove the constraint for name in table 'notification_policy'*/ /*ALTER TABLE notification_policy DROP CONSTRAINT notification_policy_name_key;*/ @@ -136,8 +168,8 @@ ALTER TABLE notification_policy ADD UNIQUE(name,project_id); CREATE TABLE IF NOT EXISTS data_migrations ( id SERIAL PRIMARY KEY NOT NULL, version int, - creation_time timestamp default CURRENT_TIMESTAMP, - update_time timestamp default CURRENT_TIMESTAMP + creation_time timestamp(6) default CURRENT_TIMESTAMP(6), + update_time timestamp(6) default CURRENT_TIMESTAMP(6) ); INSERT INTO data_migrations (version) VALUES ( CASE diff --git a/make/migrations/mysql/0050_2.2.0_schema.up.sql b/make/migrations/mysql/0050_2.2.0_schema.up.sql index 2d3c72d1a4ee..d8318f225fec 100644 --- a/make/migrations/mysql/0050_2.2.0_schema.up.sql +++ b/make/migrations/mysql/0050_2.2.0_schema.up.sql @@ -18,28 +18,28 @@ Clean the dirty data in quota/quota_usage DELETE FROM quota WHERE reference='project' AND reference_id NOT IN (SELECT project_id FROM project WHERE deleted=FALSE); DELETE FROM quota_usage WHERE reference='project' AND reference_id NOT IN (SELECT project_id FROM project WHERE deleted=FALSE); -ALTER TABLE schedule ADD COLUMN cron_type varchar(64); -ALTER TABLE robot ADD COLUMN secret varchar(2048); -ALTER TABLE robot ADD COLUMN salt varchar(64); +CALL PROC_ADD_COLUMN_IF_NOT_EXISTS('schedule', 'cron_type', 'varchar(64)'); +CALL PROC_ADD_COLUMN_IF_NOT_EXISTS('robot', 'secret', 'varchar(2048)'); +CALL PROC_ADD_COLUMN_IF_NOT_EXISTS('robot', 'salt', 'varchar(64)'); SET sql_mode = ''; -ALTER TABLE task ADD COLUMN vendor_type varchar(16); +CALL PROC_ADD_COLUMN_IF_NOT_EXISTS('task', 'vendor_type', 'varchar(16)'); UPDATE task, execution SET task.vendor_type = execution.vendor_type WHERE task.execution_id = execution.id; ALTER TABLE task MODIFY COLUMN vendor_type varchar(16) NOT NULL; -ALTER TABLE execution ADD COLUMN update_time timestamp; +CALL PROC_ADD_COLUMN_IF_NOT_EXISTS('execution', 'update_time', 'timestamp(6)'); UPDATE artifact AS art SET size = ( SELECT sum( size ) FROM `blob` WHERE digest IN ( SELECT digest_blob FROM artifact_blob WHERE digest_af = art.digest ) ); -ALTER TABLE robot ADD COLUMN duration int; +CALL PROC_ADD_COLUMN_IF_NOT_EXISTS('robot', 'duration', 'int'); CREATE TABLE IF NOT EXISTS role_permission ( id SERIAL PRIMARY KEY NOT NULL, role_type varchar(255) NOT NULL, role_id int NOT NULL, permission_policy_id int NOT NULL, - creation_time timestamp default CURRENT_TIMESTAMP, + creation_time timestamp(6) default CURRENT_TIMESTAMP(6), CONSTRAINT unique_role_permission UNIQUE (role_type, role_id, permission_policy_id) ); @@ -55,7 +55,7 @@ CREATE TABLE IF NOT EXISTS permission_policy ( resource varchar(255), action varchar(255), effect varchar(255), - creation_time timestamp default CURRENT_TIMESTAMP, + creation_time timestamp(6) default CURRENT_TIMESTAMP(6), CONSTRAINT unique_rbac_policy UNIQUE (scope(64), resource(64), action(64), effect(64)) ); @@ -188,7 +188,7 @@ END; CALL PROC_UPDATE_REPLICATION_EXECUTION(); /*move the replication execution records into the new execution table*/ -ALTER TABLE replication_execution ADD COLUMN new_execution_id int; +CALL PROC_ADD_COLUMN_IF_NOT_EXISTS('replication_execution', 'new_execution_id', 'int'); INSERT INTO execution ( vendor_type, vendor_id, status, status_message, revision, `trigger`, start_time, end_time ) SELECT 'REPLICATION', rep_exec.policy_id, @@ -263,7 +263,7 @@ DROP TABLE IF EXISTS replication_execution; INSERT INTO `schedule` ( vendor_type, vendor_id, cron, callback_func_name, callback_func_param, creation_time, update_time ) SELECT 'REPLICATION', schd.policy_id, -( SELECT `trigger` ->> '$.trigger_settings.cron' FROM replication_policy WHERE id = schd.policy_id ), +( SELECT replace(json_extract(`trigger`,'$.trigger_settings.cron'),'"','') FROM replication_policy WHERE id = schd.policy_id ), 'REPLICATION_CALLBACK', schd.policy_id, schd.creation_time, @@ -357,6 +357,7 @@ SET sql_mode = ''; ALTER TABLE execution MODIFY COLUMN vendor_type varchar(64) NOT NULL; ALTER TABLE `schedule` MODIFY COLUMN vendor_type varchar(64) DEFAULT NULL; ALTER TABLE `schedule` ADD COLUMN extra_attrs JSON; +CALL PROC_ADD_COLUMN_IF_NOT_EXISTS('schedule', 'extra_attrs', 'JSON'); ALTER TABLE task MODIFY COLUMN vendor_type varchar(64) NOT NULL; /* Remove these columns in scan_report because execution-task pattern will handle them */ @@ -376,11 +377,11 @@ ALTER TABLE schedule ADD CONSTRAINT unique_schedule UNIQUE (vendor_type, vendor_ INSERT INTO `schedule` ( vendor_type, vendor_id, cron, callback_func_name, callback_func_param, cron_type, extra_attrs, creation_time, update_time ) SELECT 'GARBAGE_COLLECTION', - 1, -schd.cron_str ->> '$.cron', +replace(json_extract(schd.cron_str,'$.cron'),'"',''), 'GARBAGE_COLLECTION', -( SELECT JSON_OBJECT ( 'trigger', NULL, 'deleteuntagged', schd.job_parameters -> '$.delete_untagged', 'dryrun', FALSE, 'extra_attrs', schd.job_parameters ) ), -schd.cron_str ->> '$.type', -( SELECT JSON_OBJECT ( 'delete_untagged', schd.job_parameters -> '$.delete_untagged' ) ), +( SELECT JSON_OBJECT ( 'trigger', NULL, 'deleteuntagged', json_extract(schd.job_parameters,'$.delete_untagged'), 'dryrun', FALSE, 'extra_attrs', schd.job_parameters ) ), +replace(json_extract(schd.cron_str,'$.type'),'"',''), +( SELECT JSON_OBJECT ( 'delete_untagged', json_extract(schd.job_parameters,'$.delete_untagged') ) ), schd.creation_time, schd.update_time FROM @@ -399,9 +400,9 @@ INSERT INTO execution ( vendor_type, vendor_id, STATUS, revision, `trigger`, sta WHERE vendor_type = 'GARBAGE_COLLECTION' AND vendor_id =- 1 - AND cron = schd.cron_str ->> '$.cron' + AND cron = replace(json_extract(schd.cron_str,'$.cron'),'"','') AND callback_func_name = 'GARBAGE_COLLECTION' - AND cron_type = schd.cron_str ->> '$.type' + AND cron_type = replace(json_extract(schd.cron_str,'$.type'),'"','') AND creation_time = schd.creation_time AND update_time = schd.update_time ), @@ -447,9 +448,9 @@ INSERT INTO task ( vendor_type, execution_id, job_id, STATUS, status_code, statu WHERE vendor_type = 'GARBAGE_COLLECTION' AND vendor_id =- 1 - AND cron = schd.cron_str ->> '$.cron' + AND cron = replace(json_extract(schd.cron_str,'$.cron'),'"','') AND callback_func_name = 'GARBAGE_COLLECTION' - AND cron_type = schd.cron_str ->> '$.type' + AND cron_type = replace(json_extract(schd.cron_str,'$.type'),'"','') AND creation_time = schd.creation_time AND update_time = schd.creation_time ) @@ -579,7 +580,7 @@ CASE END, 0, 1, - cast( aj.job_parameters AS json ), + aj.job_parameters, aj.creation_time, aj.creation_time, aj.update_time, @@ -595,9 +596,9 @@ WHERE INSERT INTO `schedule` ( vendor_type, vendor_id, cron, callback_func_name, cron_type, creation_time, update_time ) SELECT 'SCAN_ALL', 0, -schd.cron_str ->> 'cron', +replace(json_extract(schd.cron_str,'cron'),'"',''), 'scanAll', -schd.cron_str ->> 'type', +replace(json_extract(schd.cron_str,'type'),'"',''), schd.creation_time, schd.update_time FROM @@ -616,9 +617,9 @@ INSERT INTO execution ( vendor_type, vendor_id, STATUS, revision, `trigger`, sta WHERE vendor_type = 'SCAN_ALL' AND vendor_id =0 - AND cron = schd.cron_str ->> '$.cron' + AND cron = replace(json_extract(schd.cron_str,'$.cron'),'"','') AND callback_func_name = 'scanAll' - AND cron_type = schd.cron_str ->> '$.type' + AND cron_type = replace(json_extract(schd.cron_str,'$.type'),'"','') AND creation_time = schd.creation_time AND update_time = schd.update_time ), @@ -664,9 +665,9 @@ INSERT INTO task ( vendor_type, execution_id, job_id, STATUS, status_code, statu WHERE vendor_type = 'SCAN_ALL' AND vendor_id =0 - AND cron = schd.cron_str ->> '$.cron' + AND cron = replace(json_extract(schd.cron_str,'$.cron'),'"','') AND callback_func_name = 'scanAll' - AND cron_type = schd.cron_str ->> '$.type' + AND cron_type = replace(json_extract(schd.cron_str,'$.type'),'"','') AND creation_time = schd.creation_time AND update_time = schd.update_time ) @@ -755,7 +756,8 @@ CREATE TABLE IF NOT EXISTS vulnerability_record ( cwe_ids text, vendor_attributes json, UNIQUE (cve_id(64), registration_uuid, package(64), package_version(64)), - CONSTRAINT fk_registration_uuid FOREIGN KEY(registration_uuid) REFERENCES scanner_registration(uuid) ON DELETE CASCADE + CONSTRAINT fk_registration_uuid FOREIGN KEY(registration_uuid) REFERENCES scanner_registration(uuid) ON DELETE CASCADE, + CHECK (vendor_attributes is null or JSON_VALID (vendor_attributes)) ); -- -------------------------------------------------- @@ -782,7 +784,7 @@ DELETE rt FROM retention_task AS rt LEFT JOIN retention_execution re ON rt.execu WHERE re.id IS NULL; /*move the replication execution records into the new execution table*/ -ALTER TABLE retention_execution ADD COLUMN new_execution_id int; +CALL PROC_ADD_COLUMN_IF_NOT_EXISTS('retention_execution', 'new_execution_id', 'int'); CREATE PROCEDURE PROC_UPDATE_EXECUTION_AND_RETENTION_EXECUTION ( ) BEGIN DECLARE @@ -794,11 +796,11 @@ CREATE PROCEDURE PROC_UPDATE_EXECUTION_AND_RETENTION_EXECUTION ( ) BEGIN DECLARE rep_exec_trigger VARCHAR ( 20 ); DECLARE - rep_exec_start_time TIMESTAMP; + rep_exec_start_time TIMESTAMP(6); DECLARE rep_status VARCHAR ( 32 ); DECLARE - rep_end_time TIMESTAMP; + rep_end_time TIMESTAMP(6); DECLARE new_exec_id INTEGER; DECLARE diff --git a/make/migrations/mysql/0052_2.2.2_schema.up.sql b/make/migrations/mysql/0052_2.2.2_schema.up.sql index 6d5b5201c42a..e716f4264d7d 100644 --- a/make/migrations/mysql/0052_2.2.2_schema.up.sql +++ b/make/migrations/mysql/0052_2.2.2_schema.up.sql @@ -1,2 +1,2 @@ -ALTER TABLE schedule ADD COLUMN revision integer; +CALL PROC_ADD_COLUMN_IF_NOT_EXISTS('schedule', 'revision', 'integer'); UPDATE schedule set revision = 0; \ No newline at end of file diff --git a/make/migrations/mysql/0053_2.2.3_schema.up.sql b/make/migrations/mysql/0053_2.2.3_schema.up.sql index 7642404710c5..8b704db67e27 100644 --- a/make/migrations/mysql/0053_2.2.3_schema.up.sql +++ b/make/migrations/mysql/0053_2.2.3_schema.up.sql @@ -1,4 +1,4 @@ -CREATE INDEX idx_artifact_push_time ON artifact (push_time); -CREATE INDEX idx_tag_push_time ON tag (push_time); -CREATE INDEX idx_tag_artifact_id ON tag (artifact_id); -CREATE INDEX idx_artifact_reference_child_id ON artifact_reference (child_id); \ No newline at end of file +CALL PROC_CREATE_INDEX_IF_NOT_EXISTS('artifact', 'push_time', 'idx_artifact_push_time'); +CALL PROC_CREATE_INDEX_IF_NOT_EXISTS('tag', 'push_time', 'idx_tag_push_time'); +CALL PROC_CREATE_INDEX_IF_NOT_EXISTS('tag', 'artifact_id', 'idx_tag_artifact_id'); +CALL PROC_CREATE_INDEX_IF_NOT_EXISTS('artifact_reference', 'child_id', 'idx_artifact_reference_child_id'); \ No newline at end of file diff --git a/make/migrations/mysql/0060_2.3.0_schema.up.sql b/make/migrations/mysql/0060_2.3.0_schema.up.sql index 54ebddfa6c5c..707aece5fd6e 100644 --- a/make/migrations/mysql/0060_2.3.0_schema.up.sql +++ b/make/migrations/mysql/0060_2.3.0_schema.up.sql @@ -1,8 +1,8 @@ -ALTER TABLE replication_policy ADD COLUMN dest_namespace_replace_count int; +CALL PROC_ADD_COLUMN_IF_NOT_EXISTS('replication_policy', 'dest_namespace_replace_count', 'int'); UPDATE replication_policy SET dest_namespace_replace_count=-1 WHERE dest_namespace IS NOT NULL; -CREATE INDEX idx_artifact_push_time ON artifact (push_time); -CREATE INDEX idx_tag_push_time ON tag (push_time); -CREATE INDEX idx_tag_artifact_id ON tag (artifact_id); -CREATE INDEX idx_artifact_reference_child_id ON artifact_reference (child_id); -CREATE INDEX idx_audit_log_op_time ON audit_log (op_time); \ No newline at end of file +CALL PROC_CREATE_INDEX_IF_NOT_EXISTS('artifact', 'push_time', 'idx_artifact_push_time'); +CALL PROC_CREATE_INDEX_IF_NOT_EXISTS('tag', 'push_time', 'idx_tag_push_time'); +CALL PROC_CREATE_INDEX_IF_NOT_EXISTS('tag', 'artifact_id', 'idx_tag_artifact_id'); +CALL PROC_CREATE_INDEX_IF_NOT_EXISTS('artifact_reference', 'child_id', 'idx_artifact_reference_child_id'); +CALL PROC_CREATE_INDEX_IF_NOT_EXISTS('audit_log', 'op_time', 'idx_audit_log_op_time'); \ No newline at end of file diff --git a/make/migrations/mysql/0070_2.4.0_schema.up.sql b/make/migrations/mysql/0070_2.4.0_schema.up.sql index a8194557a88e..5abf986b1512 100644 --- a/make/migrations/mysql/0070_2.4.0_schema.up.sql +++ b/make/migrations/mysql/0070_2.4.0_schema.up.sql @@ -1,11 +1,13 @@ /* cleanup deleted user project members */ -DELETE FROM project_member pm WHERE pm.entity_type = 'u' AND EXISTS (SELECT NULL FROM harbor_user u WHERE pm.entity_id = u.user_id AND u.deleted = true ); +DELETE FROM project_member WHERE project_member.entity_type = 'u' AND EXISTS (SELECT NULL FROM harbor_user WHERE project_member.entity_id = harbor_user.user_id AND harbor_user.deleted = true ); -ALTER TABLE replication_policy ADD COLUMN speed_kb int; +CALL PROC_ADD_COLUMN_IF_NOT_EXISTS('replication_policy', 'speed_kb', 'int'); /* add version fields for lock free quota */ ALTER TABLE quota ADD COLUMN version bigint DEFAULT 0; ALTER TABLE quota_usage ADD COLUMN version bigint DEFAULT 0; +CALL PROC_ADD_COLUMN_IF_NOT_EXISTS('quota', 'version', 'bigint DEFAULT 0'); +CALL PROC_ADD_COLUMN_IF_NOT_EXISTS('quota_usage', 'version', 'bigint DEFAULT 0'); /* convert Negligible to None for the severity of the vulnerability record */ UPDATE vulnerability_record SET severity='None' WHERE severity='Negligible'; diff --git a/make/migrations/mysql/0080_2.5.0_schema.up.sql b/make/migrations/mysql/0080_2.5.0_schema.up.sql index 8deb2ac077cd..1e53b5b1a726 100644 --- a/make/migrations/mysql/0080_2.5.0_schema.up.sql +++ b/make/migrations/mysql/0080_2.5.0_schema.up.sql @@ -15,7 +15,7 @@ CREATE TABLE IF NOT EXISTS artifact_accessory ( type varchar(256), size bigint, digest varchar(1024), - creation_time timestamp default CURRENT_TIMESTAMP, + creation_time timestamp(6) default CURRENT_TIMESTAMP(6), FOREIGN KEY (artifact_id) REFERENCES artifact(id), FOREIGN KEY (subject_artifact_id) REFERENCES artifact(id), CONSTRAINT unique_artifact_accessory UNIQUE (artifact_id, subject_artifact_id) diff --git a/make/migrations/mysql/0081_2.5.2_schema.up.sql b/make/migrations/mysql/0081_2.5.2_schema.up.sql new file mode 100644 index 000000000000..05f7c30c1f8b --- /dev/null +++ b/make/migrations/mysql/0081_2.5.2_schema.up.sql @@ -0,0 +1 @@ +CALL PROC_CREATE_INDEX_IF_NOT_EXISTS('artifact', 'repository_name', 'idx_artifact_repository_name'); \ No newline at end of file diff --git a/make/migrations/mysql/0090_2.6.0_schema.up.sql b/make/migrations/mysql/0090_2.6.0_schema.up.sql new file mode 100644 index 000000000000..171dfd694100 --- /dev/null +++ b/make/migrations/mysql/0090_2.6.0_schema.up.sql @@ -0,0 +1,24 @@ +/* Correct project_metadata.public value, should only be true or false, other invaild value will be rewrite to false */ +UPDATE project_metadata SET value='false' WHERE name='public' AND value NOT IN('true', 'false'); + +/* +System Artifact Manager +Github proposal link : https://github.com/goharbor/community/pull/181 +*/ + CREATE TABLE IF NOT EXISTS system_artifact ( + id SERIAL NOT NULL PRIMARY KEY, + repository varchar(256) NOT NULL, + digest varchar(255) NOT NULL DEFAULT '' , + size bigint NOT NULL DEFAULT 0 , + vendor varchar(255) NOT NULL DEFAULT '' , + type varchar(255) NOT NULL DEFAULT '' , + create_time timestamp default CURRENT_TIMESTAMP, + extra_attrs text NOT NULL DEFAULT '' , + UNIQUE (repository, digest, vendor) +); + +CALL PROC_CREATE_INDEX_IF_NOT_EXISTS('artifact', 'repository_name', 'idx_artifact_repository_name'); + +CALL PROC_CREATE_INDEX_IF_NOT_EXISTS('execution', 'vendor_type', 'idx_execution_vendor_type_vendor_id'); +CALL PROC_CREATE_INDEX_IF_NOT_EXISTS('execution', 'start_time', 'idx_execution_start_time'); +CALL PROC_CREATE_INDEX_IF_NOT_EXISTS('audit_log', 'project_id, op_time', 'idx_audit_log_project_id_optime'); diff --git a/make/photon/notary-server/Dockerfile b/make/photon/notary-server/Dockerfile index f6a7309d1ba3..f651f6529ff7 100644 --- a/make/photon/notary-server/Dockerfile +++ b/make/photon/notary-server/Dockerfile @@ -10,4 +10,4 @@ COPY ./make/photon/notary/binary/migrations/ /migrations/ RUN chmod +x /bin/notary-server /migrations/migrate.sh /bin/migrate /bin/migrate-patch ENV SERVICE_NAME=notary_server USER notary -CMD migrate-patch -database=${DB_URL} && /migrations/migrate.sh && /bin/notary-server -config=/etc/notary/server-config.postgres.json -logf=logfmt \ No newline at end of file +CMD migrate-patch -database=${DB_URL} && /migrations/migrate.sh && /bin/notary-server -config=${CONFIG_FILE} -logf=logfmt \ No newline at end of file diff --git a/make/photon/notary-signer/Dockerfile b/make/photon/notary-signer/Dockerfile index aaa3a8c52089..e9374bd46105 100644 --- a/make/photon/notary-signer/Dockerfile +++ b/make/photon/notary-signer/Dockerfile @@ -10,4 +10,4 @@ COPY ./make/photon/notary/binary/migrations/ /migrations/ RUN chmod +x /bin/notary-signer /migrations/migrate.sh /bin/migrate /bin/migrate-patch ENV SERVICE_NAME=notary_signer USER notary -CMD migrate-patch -database=${DB_URL} && /migrations/migrate.sh && /bin/notary-signer -config=/etc/notary/signer-config.postgres.json -logf=logfmt \ No newline at end of file +CMD migrate-patch -database=${DB_URL} && /migrations/migrate.sh && /bin/notary-signer -config=${CONFIG_FILE} -logf=logfmt \ No newline at end of file diff --git a/make/photon/prepare/templates/core/env.jinja b/make/photon/prepare/templates/core/env.jinja index 4824318c6278..b9c5cbe43289 100644 --- a/make/photon/prepare/templates/core/env.jinja +++ b/make/photon/prepare/templates/core/env.jinja @@ -7,15 +7,16 @@ _REDIS_URL_REG={{redis_url_reg}} LOG_LEVEL={{log_level}} EXT_ENDPOINT={{public_url}} -DATABASE_TYPE=postgresql -POSTGRESQL_HOST={{harbor_db_host}} -POSTGRESQL_PORT={{harbor_db_port}} -POSTGRESQL_USERNAME={{harbor_db_username}} -POSTGRESQL_PASSWORD={{harbor_db_password}} -POSTGRESQL_DATABASE={{harbor_db_name}} -POSTGRESQL_SSLMODE={{harbor_db_sslmode}} -POSTGRESQL_MAX_IDLE_CONNS={{harbor_db_max_idle_conns}} -POSTGRESQL_MAX_OPEN_CONNS={{harbor_db_max_open_conns}} +DATABASE_TYPE={{harbor_db_type}} +DB_HOST={{harbor_db_host}} +DB_PORT={{harbor_db_port}} +DB_USERNAME={{harbor_db_username}} +DB_PASSWORD={{harbor_db_password}} +DB_DATABASE={{harbor_db_name}} +DB_MAX_IDLE_CONNS={{harbor_db_max_idle_conns}} +DB_MAX_OPEN_CONNS={{harbor_db_max_open_conns}} +DB_SSLMODE={{harbor_db_sslmode}} +DB_COLLATION={{harbor_db_collation}} REGISTRY_URL={{registry_url}} PORTAL_URL={{portal_url}} TOKEN_SERVICE_URL={{token_service_url}} diff --git a/make/photon/prepare/templates/exporter/env.jinja b/make/photon/prepare/templates/exporter/env.jinja index d9e557c3ad8f..59057ff41455 100644 --- a/make/photon/prepare/templates/exporter/env.jinja +++ b/make/photon/prepare/templates/exporter/env.jinja @@ -18,6 +18,7 @@ HARBOR_SERVICE_SCHEME=https HARBOR_SERVICE_PORT=8080 HARBOR_SERVICE_SCHEME=http {% endif %} +HARBOR_DATABASE_TYPE={{harbor_db_type}} HARBOR_DATABASE_HOST={{harbor_db_host}} HARBOR_DATABASE_PORT={{harbor_db_port}} HARBOR_DATABASE_USERNAME={{harbor_db_username}} @@ -25,4 +26,5 @@ HARBOR_DATABASE_PASSWORD={{harbor_db_password}} HARBOR_DATABASE_DBNAME={{harbor_db_name}} HARBOR_DATABASE_SSLMODE={{harbor_db_sslmode}} HARBOR_DATABASE_MAX_IDLE_CONNS={{harbor_db_max_idle_conns}} -HARBOR_DATABASE_MAX_OPEN_CONNS={{harbor_db_max_open_conns}} \ No newline at end of file +HARBOR_DATABASE_MAX_OPEN_CONNS={{harbor_db_max_open_conns}} +HARBOR_DATABASE_COLLATION={{harbor_db_collation}} \ No newline at end of file diff --git a/make/photon/prepare/templates/notary/server-config.mysql.json.jinja b/make/photon/prepare/templates/notary/server-config.mysql.json.jinja new file mode 100644 index 000000000000..2dad995cc028 --- /dev/null +++ b/make/photon/prepare/templates/notary/server-config.mysql.json.jinja @@ -0,0 +1,28 @@ +{ + "server": { + "http_addr": ":4443" + }, + "trust_service": { + "type": "remote", + "hostname": "notarysigner", + "port": "7899", + "tls_ca_file": "./notary-signer-ca.crt", + "key_algorithm": "ecdsa" + }, + "logging": { + "level": "debug" + }, + "storage": { + "backend": "mysql", + "db_url": "{{notary_server_db_username}}:{{notary_server_db_password}}@tcp({{notary_server_db_host}}:{{notary_server_db_port}})/{{notary_server_db_name}}?parseTime=True" + }, + "auth": { + "type": "token", + "options": { + "realm": "{{token_endpoint}}/service/token", + "service": "harbor-notary", + "issuer": "harbor-token-issuer", + "rootcertbundle": "/etc/notary/root.crt" + } + } +} \ No newline at end of file diff --git a/make/photon/prepare/templates/notary/server_env.jinja b/make/photon/prepare/templates/notary/server_env.jinja index 7486b2647083..677538dd0fe1 100644 --- a/make/photon/prepare/templates/notary/server_env.jinja +++ b/make/photon/prepare/templates/notary/server_env.jinja @@ -1,2 +1,9 @@ +{% if ( notary_server_db_type == "mysql" or notary_server_db_type == "mariadb" ) %} +MIGRATIONS_PATH=migrations/server/mysql +CONFIG_FILE=/etc/notary/server-config.mysql.json +DB_URL=mysql://{{notary_server_db_username}}:{{notary_server_db_password}}@tcp({{notary_server_db_host}}:{{notary_server_db_port}})/{{notary_server_db_name}} +{% else %} MIGRATIONS_PATH=migrations/server/postgresql +CONFIG_FILE=/etc/notary/server-config.postgres.json DB_URL=postgres://{{notary_server_db_username}}:{{notary_server_db_password}}@{{notary_server_db_host}}:{{notary_server_db_port}}/{{notary_server_db_name}}?sslmode={{notary_server_db_sslmode}} +{% endif %} \ No newline at end of file diff --git a/make/photon/prepare/templates/notary/signer-config.mysql.json.jinja b/make/photon/prepare/templates/notary/signer-config.mysql.json.jinja new file mode 100644 index 000000000000..37018d80ae48 --- /dev/null +++ b/make/photon/prepare/templates/notary/signer-config.mysql.json.jinja @@ -0,0 +1,15 @@ +{ + "server": { + "grpc_addr": ":7899", + "tls_cert_file": "./notary-signer.crt", + "tls_key_file": "./notary-signer.key" + }, + "logging": { + "level": "debug" + }, + "storage": { + "backend": "mysql", + "db_url": "{{notary_signer_db_username}}:{{notary_signer_db_password}}@tcp({{notary_signer_db_host}}:{{notary_signer_db_port}})/{{notary_signer_db_name}}?parseTime=True", + "default_alias": "defaultalias" + } +} \ No newline at end of file diff --git a/make/photon/prepare/templates/notary/signer_env.jinja b/make/photon/prepare/templates/notary/signer_env.jinja index 2482b5a0bac6..9928551221a6 100644 --- a/make/photon/prepare/templates/notary/signer_env.jinja +++ b/make/photon/prepare/templates/notary/signer_env.jinja @@ -1,3 +1,10 @@ NOTARY_SIGNER_DEFAULTALIAS={{alias}} +{% if ( notary_signer_db_type == "mysql" or notary_signer_db_type == "mariadb" ) %} +MIGRATIONS_PATH=migrations/signer/mysql +CONFIG_FILE=/etc/notary/signer-config.mysql.json +DB_URL=mysql://{{notary_server_db_username}}:{{notary_server_db_password}}@tcp({{notary_server_db_host}}:{{notary_server_db_port}})/{{notary_server_db_name}} +{% else %} MIGRATIONS_PATH=migrations/signer/postgresql +CONFIG_FILE=/etc/notary/signer-config.postgres.json DB_URL=postgres://{{notary_signer_db_username}}:{{notary_signer_db_password}}@{{notary_signer_db_host}}:{{notary_signer_db_port}}/{{notary_signer_db_name}}?sslmode={{notary_signer_db_sslmode}} +{% endif %} \ No newline at end of file diff --git a/make/photon/prepare/utils/configs.py b/make/photon/prepare/utils/configs.py index 99f67df375ad..66115e7b5538 100644 --- a/make/photon/prepare/utils/configs.py +++ b/make/photon/prepare/utils/configs.py @@ -12,6 +12,7 @@ default_db_max_open_conns = 0 default_https_cert_path = '/your/certificate/path' default_https_key_path = '/your/certificate/path' +default_db_collation = 'utf8mb4_general_ci' REGISTRY_USER_NAME = 'harbor_registry_user' @@ -279,6 +280,7 @@ def parse_yaml_config(config_file_path, with_notary, with_trivy, with_chartmuseu if external_db_configs: config_dict['external_database'] = True # harbor db + config_dict['harbor_db_type'] = external_db_configs['harbor']['type'] config_dict['harbor_db_host'] = external_db_configs['harbor']['host'] config_dict['harbor_db_port'] = external_db_configs['harbor']['port'] config_dict['harbor_db_name'] = external_db_configs['harbor']['db_name'] @@ -287,9 +289,11 @@ def parse_yaml_config(config_file_path, with_notary, with_trivy, with_chartmuseu config_dict['harbor_db_sslmode'] = external_db_configs['harbor']['ssl_mode'] config_dict['harbor_db_max_idle_conns'] = external_db_configs['harbor'].get("max_idle_conns") or default_db_max_idle_conns config_dict['harbor_db_max_open_conns'] = external_db_configs['harbor'].get("max_open_conns") or default_db_max_open_conns + config_dict['harbor_db_collation'] = external_db_configs['harbor'].get("collation") or default_db_collation if with_notary: # notary signer + config_dict['notary_signer_db_type'] = external_db_configs['notary_signer']['type'] config_dict['notary_signer_db_host'] = external_db_configs['notary_signer']['host'] config_dict['notary_signer_db_port'] = external_db_configs['notary_signer']['port'] config_dict['notary_signer_db_name'] = external_db_configs['notary_signer']['db_name'] @@ -297,6 +301,7 @@ def parse_yaml_config(config_file_path, with_notary, with_trivy, with_chartmuseu config_dict['notary_signer_db_password'] = external_db_configs['notary_signer']['password'] config_dict['notary_signer_db_sslmode'] = external_db_configs['notary_signer']['ssl_mode'] # notary server + config_dict['notary_server_db_type'] = external_db_configs['notary_server']['type'] config_dict['notary_server_db_host'] = external_db_configs['notary_server']['host'] config_dict['notary_server_db_port'] = external_db_configs['notary_server']['port'] config_dict['notary_server_db_name'] = external_db_configs['notary_server']['db_name'] diff --git a/make/photon/prepare/utils/notary.py b/make/photon/prepare/utils/notary.py index 59f6ee9ee615..bb644cbb93d7 100644 --- a/make/photon/prepare/utils/notary.py +++ b/make/photon/prepare/utils/notary.py @@ -7,6 +7,8 @@ notary_template_dir = os.path.join(templates_dir, "notary") notary_signer_pg_template = os.path.join(notary_template_dir, "signer-config.postgres.json.jinja") notary_server_pg_template = os.path.join(notary_template_dir, "server-config.postgres.json.jinja") +notary_signer_mysql_template = os.path.join(notary_template_dir, "signer-config.mysql.json.jinja") +notary_server_mysql_template = os.path.join(notary_template_dir, "server-config.mysql.json.jinja") notary_server_nginx_config_template = os.path.join(templates_dir, "nginx", "notary.server.conf.jinja") notary_signer_env_template = os.path.join(notary_template_dir, "signer_env.jinja") notary_server_env_template = os.path.join(notary_template_dir, "server_env.jinja") @@ -14,6 +16,8 @@ notary_config_dir = os.path.join(config_dir, 'notary') notary_signer_pg_config = os.path.join(notary_config_dir, "signer-config.postgres.json") notary_server_pg_config = os.path.join(notary_config_dir, "server-config.postgres.json") +notary_signer_mysql_config = os.path.join(notary_config_dir, "signer-config.mysql.json") +notary_server_mysql_config = os.path.join(notary_config_dir, "server-config.mysql.json") notary_server_config_path = os.path.join(notary_config_dir, 'notary.server.conf') notary_signer_env_path = os.path.join(notary_config_dir, "signer_env") notary_server_env_path = os.path.join(notary_config_dir, "server_env") @@ -53,7 +57,7 @@ def prepare_env_notary(nginx_config_dir): signer_key_secret_path.exists() and signer_ca_cert_secret_path.exists() ): - # If the certs are exist in old localtion, move them to new location + # If the certs are exist in old location, move them to new location if old_signer_ca_cert_secret_path.exists() and old_signer_cert_secret_path.exists() and old_signer_key_secret_path.exists(): print("Copying certs for notary signer") shutil.copy2(old_signer_ca_cert_secret_path, signer_ca_cert_secret_path) @@ -121,6 +125,14 @@ def prepare_notary(config_dict, nginx_config_dir, ssl_cert_path, ssl_cert_key_pa token_endpoint=config_dict['public_url'], **config_dict) + render_jinja( + notary_server_mysql_template, + notary_server_mysql_config, + uid=DEFAULT_UID, + gid=DEFAULT_GID, + token_endpoint=config_dict['public_url'], + **config_dict) + render_jinja( notary_server_env_template, notary_server_env_path, @@ -142,3 +154,11 @@ def prepare_notary(config_dict, nginx_config_dir, ssl_cert_path, ssl_cert_key_pa gid=DEFAULT_GID, alias=default_alias, **config_dict) + + render_jinja( + notary_signer_mysql_template, + notary_signer_mysql_config, + uid=DEFAULT_UID, + gid=DEFAULT_GID, + alias=default_alias, + **config_dict) diff --git a/src/cmd/exporter/main.go b/src/cmd/exporter/main.go index 536636c52191..da04b9e0afd5 100644 --- a/src/cmd/exporter/main.go +++ b/src/cmd/exporter/main.go @@ -12,6 +12,7 @@ import ( "github.com/goharbor/harbor/src/common/dao" commonthttp "github.com/goharbor/harbor/src/common/http" "github.com/goharbor/harbor/src/common/models" + "github.com/goharbor/harbor/src/common/utils" "github.com/goharbor/harbor/src/lib/log" "github.com/goharbor/harbor/src/pkg/exporter" ) @@ -21,19 +22,7 @@ func main() { viper.SetEnvPrefix("harbor") viper.AutomaticEnv() viper.SetEnvKeyReplacer(strings.NewReplacer(".", "_")) - dbCfg := &models.Database{ - Type: "postgresql", - PostGreSQL: &models.PostGreSQL{ - Host: viper.GetString("database.host"), - Port: viper.GetInt("database.port"), - Username: viper.GetString("database.username"), - Password: viper.GetString("database.password"), - Database: viper.GetString("database.dbname"), - SSLMode: viper.GetString("database.sslmode"), - MaxIdleConns: viper.GetInt("database.max_idle_conns"), - MaxOpenConns: viper.GetInt("database.max_open_conns"), - }, - } + dbCfg := Database() if err := dao.InitDatabase(dbCfg); err != nil { log.Fatalf("failed to initialize database: %v", err) } @@ -82,3 +71,40 @@ func main() { os.Exit(1) } } + +// Database returns database settings +func Database() *models.Database { + databaseType := viper.GetString("database.type") + + switch { + case utils.IsDBPostgresql(databaseType): + return &models.Database{ + Type: databaseType, + PostGreSQL: &models.PostGreSQL{ + Host: viper.GetString("database.host"), + Port: viper.GetInt("database.port"), + Username: viper.GetString("database.username"), + Password: viper.GetString("database.password"), + Database: viper.GetString("database.dbname"), + SSLMode: viper.GetString("database.sslmode"), + MaxIdleConns: viper.GetInt("database.max_idle_conns"), + MaxOpenConns: viper.GetInt("database.max_open_conns"), + }, + } + case utils.IsDBMysql(databaseType): + return &models.Database{ + Type: databaseType, + MySQL: &models.MySQL{ + Host: viper.GetString("database.host"), + Port: viper.GetInt("database.port"), + Username: viper.GetString("database.username"), + Password: viper.GetString("database.password"), + Database: viper.GetString("database.dbname"), + MaxIdleConns: viper.GetInt("database.max_idle_conns"), + MaxOpenConns: viper.GetInt("database.max_open_conns"), + }, + } + } + + return nil +} diff --git a/src/cmd/migrate-patch/main.go b/src/cmd/migrate-patch/main.go index a95eac205eb0..b4b4fd1b4e59 100644 --- a/src/cmd/migrate-patch/main.go +++ b/src/cmd/migrate-patch/main.go @@ -7,48 +7,64 @@ import ( "strings" "time" + _ "github.com/go-sql-driver/mysql" // registry mysql driver _ "github.com/jackc/pgx/v4/stdlib" // registry pgx driver ) var dbURL string -const pgSQLAlterStmt string = `ALTER TABLE schema_migrations ADD COLUMN "dirty" boolean NOT NULL DEFAULT false` -const pgSQLCheckColStmt string = `SELECT T1.C1, T2.C2 FROM +const sqlAlterStmt string = `ALTER TABLE schema_migrations ADD COLUMN "dirty" boolean NOT NULL DEFAULT false` +const sqlCheckColStmt string = `SELECT T1.C1, T2.C2 FROM (SELECT COUNT(*) AS C1 FROM information_schema.tables WHERE table_name='schema_migrations') T1, (SELECT COUNT(*) AS C2 FROM information_schema.columns WHERE table_name='schema_migrations' and column_name='dirty') T2` -const pgSQLDelRows string = `DELETE FROM schema_migrations t WHERE t.version < ( SELECT MAX(version) FROM schema_migrations )` +const sqlDelRows string = `DELETE FROM schema_migrations t WHERE t.version < ( SELECT MAX(version) FROM schema_migrations )` func init() { - urlUsage := `The URL to the target database (driver://url). Currently it only supports postgres` + urlUsage := `The URL to the target database (driver://url). Currently it only supports postgres/mariadb/mysql` flag.StringVar(&dbURL, "database", "", urlUsage) } func main() { flag.Parse() log.Printf("Updating database.") - if !strings.HasPrefix(dbURL, "postgres://") { + var ( + db *sql.DB + err error + ) + + switch { + case strings.HasPrefix(dbURL, "postgres://"): + log.Printf("DB type is postgres.") + db, err = sql.Open("pgx", dbURL) + case strings.HasPrefix(dbURL, "mysql://"): + log.Printf("DB type is mysql.") + dbURL = strings.TrimLeft(dbURL, "mysql://") + db, err = sql.Open("mysql", dbURL) + default: log.Fatalf("Invalid URL: '%s'\n", dbURL) } - db, err := sql.Open("pgx", dbURL) if err != nil { log.Fatalf("Failed to connect to Database, error: %v\n", err) } + defer db.Close() + c := make(chan struct{}, 1) go func() { err := db.Ping() for ; err != nil; err = db.Ping() { - log.Println("Failed to Ping DB, sleep for 1 second.") + log.Printf("Failed to Ping DB:%s, sleep for 1 second.", err) time.Sleep(1 * time.Second) } c <- struct{}{} }() + select { case <-c: case <-time.After(30 * time.Second): log.Fatal("Failed to connect DB after 30 seconds, time out. \n") } - row := db.QueryRow(pgSQLCheckColStmt) + row := db.QueryRow(sqlCheckColStmt) var tblCount, colCount int if err := row.Scan(&tblCount, &colCount); err != nil { log.Fatalf("Failed to check schema_migrations table, error: %v \n", err) @@ -61,10 +77,10 @@ func main() { log.Println("schema_migrations table does not require update, skip.") return } - if _, err := db.Exec(pgSQLDelRows); err != nil { + if _, err := db.Exec(sqlDelRows); err != nil { log.Fatalf("Failed to clean up table, error: %v", err) } - if _, err := db.Exec(pgSQLAlterStmt); err != nil { + if _, err := db.Exec(sqlAlterStmt); err != nil { log.Fatalf("Failed to update database, error: %v \n", err) } log.Println("Done updating database.") diff --git a/src/common/const.go b/src/common/const.go index a99d8ddaebba..521d5dbbdf3c 100755 --- a/src/common/const.go +++ b/src/common/const.go @@ -51,14 +51,15 @@ const ( ExtEndpoint = "ext_endpoint" AUTHMode = "auth_mode" DatabaseType = "database_type" - PostGreSQLHOST = "postgresql_host" - PostGreSQLPort = "postgresql_port" - PostGreSQLUsername = "postgresql_username" - PostGreSQLPassword = "postgresql_password" - PostGreSQLDatabase = "postgresql_database" - PostGreSQLSSLMode = "postgresql_sslmode" - PostGreSQLMaxIdleConns = "postgresql_max_idle_conns" - PostGreSQLMaxOpenConns = "postgresql_max_open_conns" + DBHOST = "db_host" + DBPort = "db_port" + DBUsername = "db_username" + DBPassword = "db_password" + DBDatabase = "db_database" + DBSSLMode = "db_sslmode" + DBCollation = "db_collation" + DBMaxIdleConns = "db_max_idle_conns" + DBMaxOpenConns = "db_max_open_conns" SelfRegistration = "self_registration" CoreURL = "core_url" CoreLocalURL = "core_local_url" diff --git a/src/common/dao/base.go b/src/common/dao/base.go index eeccb25259cd..ff0643d210f0 100644 --- a/src/common/dao/base.go +++ b/src/common/dao/base.go @@ -23,6 +23,7 @@ import ( "github.com/beego/beego/orm" "github.com/goharbor/harbor/src/common/models" + "github.com/goharbor/harbor/src/common/utils" "github.com/goharbor/harbor/src/lib/log" proModels "github.com/goharbor/harbor/src/pkg/project/models" userModels "github.com/goharbor/harbor/src/pkg/user/models" @@ -74,8 +75,8 @@ func InitDatabase(database *models.Database) error { } func getDatabase(database *models.Database) (db Database, err error) { - switch database.Type { - case "", "postgresql": + switch { + case utils.IsDBPostgresql(database.Type): db = NewPGSQL( database.PostGreSQL.Host, strconv.Itoa(database.PostGreSQL.Port), @@ -86,6 +87,17 @@ func getDatabase(database *models.Database) (db Database, err error) { database.PostGreSQL.MaxIdleConns, database.PostGreSQL.MaxOpenConns, ) + case utils.IsDBMysql(database.Type): + db = NewMySQL( + database.MySQL.Host, + strconv.Itoa(database.MySQL.Port), + database.MySQL.Username, + database.MySQL.Password, + database.MySQL.Database, + database.MySQL.Collation, + database.MySQL.MaxIdleConns, + database.MySQL.MaxOpenConns, + ) default: err = fmt.Errorf("invalid database: %s", database.Type) } @@ -121,6 +133,9 @@ func ClearTable(table string) error { if table == "project_metadata" { // make sure library is public sql = fmt.Sprintf("delete from %s where id > 1", table) } + if table == "blob" && o.Driver().Type() == orm.DRMySQL { + sql = fmt.Sprintf("delete from `%s` where 1=1", table) + } _, err := o.Raw(sql).Exec() return err } diff --git a/src/common/dao/dao_test.go b/src/common/dao/dao_test.go index a1248dbf82a5..53d25c4a4560 100644 --- a/src/common/dao/dao_test.go +++ b/src/common/dao/dao_test.go @@ -22,6 +22,7 @@ import ( "github.com/beego/beego/orm" "github.com/goharbor/harbor/src/common/models" + "github.com/goharbor/harbor/src/common/utils" "github.com/goharbor/harbor/src/lib/log" libOrm "github.com/goharbor/harbor/src/lib/orm" "github.com/goharbor/harbor/src/pkg/user" @@ -99,22 +100,22 @@ const password string = "Abc12345" const projectName string = "test_project" func TestMain(m *testing.M) { - databases := []string{"postgresql"} - for _, database := range databases { - log.Infof("run test cases for database: %s", database) - result := 1 - switch database { - case "postgresql": - PrepareTestForPostgresSQL() - default: - log.Fatalf("invalid database: %s", database) - } - testCtx = libOrm.Context() - result = testForAll(m) + database := os.Getenv("DATABASE_TYPE") + log.Infof("run test cases for database: %s", database) + result := 1 + switch { + case utils.IsDBPostgresql(): + PrepareTestForPostgresSQL() + case utils.IsDBMysql(): + PrepareTestForMySQL() + default: + log.Fatalf("invalid database: %s", database) + } + testCtx = libOrm.Context() + result = testForAll(m) - if result != 0 { - os.Exit(result) - } + if result != 0 { + os.Exit(result) } } diff --git a/src/common/dao/mysql.go b/src/common/dao/mysql.go index 5222272cb678..9534ff4a6a15 100644 --- a/src/common/dao/mysql.go +++ b/src/common/dao/mysql.go @@ -16,30 +16,44 @@ package dao import ( "fmt" + "os" + "strconv" "time" "github.com/beego/beego/orm" _ "github.com/go-sql-driver/mysql" // register mysql driver + "github.com/golang-migrate/migrate/v4" + _ "github.com/golang-migrate/migrate/v4/database/mysql" // import mysql driver for migrator + "github.com/goharbor/harbor/src/common/models" "github.com/goharbor/harbor/src/common/utils" + "github.com/goharbor/harbor/src/lib/log" ) +const defaultMysqlMigrationPath = "migrations/mysql/" + type mysql struct { - host string - port string - usr string - pwd string - database string + host string + port string + usr string + pwd string + database string + collation string + maxIdleConns int + maxOpenConns int } // NewMySQL returns an instance of mysql -func NewMySQL(host, port, usr, pwd, database string) Database { +func NewMySQL(host, port, usr, pwd, database, collation string, maxIdleConns int, maxOpenConns int) Database { return &mysql{ - host: host, - port: port, - usr: usr, - pwd: pwd, - database: database, + host: host, + port: port, + usr: usr, + pwd: pwd, + database: database, + collation: collation, + maxIdleConns: maxIdleConns, + maxOpenConns: maxOpenConns, } } @@ -57,12 +71,13 @@ func (m *mysql) Register(alias ...string) error { if len(alias) != 0 { an = alias[0] } - conn := fmt.Sprintf("%s:%s@tcp(%s:%s)/%s", m.usr, - m.pwd, m.host, m.port, m.database) - if err := orm.RegisterDataBase(an, "mysql", conn); err != nil { + conn := fmt.Sprintf("%s:%s@tcp(%s:%s)/%s?collation=%s&parseTime=true&loc=Local", m.usr, + m.pwd, m.host, m.port, m.database, m.collation) + if err := orm.RegisterDataBase(an, "mysql", conn, m.maxIdleConns, m.maxOpenConns); err != nil { return err } db, _ := orm.GetDB(an) + db.SetMaxOpenConns(m.maxOpenConns) db.SetConnMaxLifetime(5 * time.Minute) return nil } @@ -74,6 +89,34 @@ func (m *mysql) Name() string { // UpgradeSchema is not supported for MySQL, it assumes the schema is initialized and up to date in the DB instance. func (m *mysql) UpgradeSchema() error { + port, err := strconv.Atoi(m.port) + if err != nil { + return err + } + mg, err := NewMysqlMigrator(&models.MySQL{ + Host: m.host, + Port: port, + Username: m.usr, + Password: m.pwd, + Database: m.database, + }) + if err != nil { + return err + } + defer func() { + srcErr, dbErr := mg.Close() + if srcErr != nil || dbErr != nil { + log.Warningf("Failed to close migrator, source error: %v, db error: %v", srcErr, dbErr) + } + }() + log.Infof("Upgrading schema for mysql ...") + err = mg.Up() + if err == migrate.ErrNoChange { + log.Infof("No change in schema, skip.") + } else if err != nil { // migrate.ErrLockTimeout will be thrown when another process is doing migration and timeout. + log.Errorf("Failed to upgrade schema, error: %q", err) + return err + } return nil } @@ -82,3 +125,21 @@ func (m *mysql) String() string { return fmt.Sprintf("type-%s host-%s port-%s user-%s database-%s", m.Name(), m.host, m.port, m.usr, m.database) } + +// NewMysqlMigrator creates a migrator base on the information +func NewMysqlMigrator(database *models.MySQL) (*migrate.Migrate, error) { + dbURL := fmt.Sprintf("mysql://%s:%s@tcp(%s:%d)/%s", database.Username, + database.Password, database.Host, database.Port, database.Database) + // For UT + path := os.Getenv("MYSQL_MIGRATION_SCRIPTS_PATH") + if len(path) == 0 { + path = defaultMysqlMigrationPath + } + srcURL := fmt.Sprintf("file://%s", path) + m, err := migrate.New(srcURL, dbURL) + if err != nil { + return nil, err + } + m.Log = newMigrateLogger() + return m, nil +} diff --git a/src/common/dao/mysql_test.go b/src/common/dao/mysql_test.go new file mode 100644 index 000000000000..9b612af1c60c --- /dev/null +++ b/src/common/dao/mysql_test.go @@ -0,0 +1,57 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dao + +import ( + "fmt" + "sync" + "testing" + + "github.com/beego/beego/orm" + + "github.com/goharbor/harbor/src/common/utils" +) + +func TestMysqlMaxOpenConns(t *testing.T) { + if !utils.IsDBMysql() { + return + } + var wg sync.WaitGroup + + queryNum := 200 + results := make([]bool, queryNum) + for i := 0; i < queryNum; i++ { + wg.Add(1) + go func(i int) { + defer wg.Done() + + o := orm.NewOrm() + if _, err := o.Raw("SELECT sleep(10)").Exec(); err != nil { + fmt.Printf("failed to get the count of the projects, error: %v\n", err) + results[i] = false + } else { + results[i] = true + } + }(i) + } + + wg.Wait() + + for _, success := range results { + if !success { + t.Fatal("max open conns not work") + } + } +} diff --git a/src/common/dao/pgsql_test.go b/src/common/dao/pgsql_test.go index 2a2aa635c18b..21bf20edf30e 100644 --- a/src/common/dao/pgsql_test.go +++ b/src/common/dao/pgsql_test.go @@ -20,9 +20,14 @@ import ( "testing" "github.com/beego/beego/orm" + + "github.com/goharbor/harbor/src/common/utils" ) -func TestMaxOpenConns(t *testing.T) { +func TestPgsqlMaxOpenConns(t *testing.T) { + if !utils.IsDBPostgresql() { + return + } var wg sync.WaitGroup queryNum := 200 diff --git a/src/common/dao/testutils.go b/src/common/dao/testutils.go index a0d7101b0a57..0e9b981da18e 100644 --- a/src/common/dao/testutils.go +++ b/src/common/dao/testutils.go @@ -20,13 +20,64 @@ import ( "strconv" "github.com/goharbor/harbor/src/common/models" + "github.com/goharbor/harbor/src/common/utils" "github.com/goharbor/harbor/src/lib/log" ) var defaultRegistered = false +func PrepareTestForDB() { + switch { + case utils.IsDBPostgresql(): + PrepareTestForPostgresSQL() + case utils.IsDBMysql(): + PrepareTestForMySQL() + default: + PrepareTestForPostgresSQL() + } +} + // PrepareTestForMySQL is for test only. func PrepareTestForMySQL() { + dbHost := os.Getenv("DB_HOST") + if len(dbHost) == 0 { + log.Fatalf("environment variable DB_HOST is not set") + } + dbUser := os.Getenv("DB_USERNAME") + if len(dbUser) == 0 { + log.Fatalf("environment variable DB_USERNAME is not set") + } + dbPortStr := os.Getenv("DB_PORT") + if len(dbPortStr) == 0 { + log.Fatalf("environment variable DB_PORT is not set") + } + dbPort, err := strconv.Atoi(dbPortStr) + if err != nil { + log.Fatalf("invalid POSTGRESQL_PORT: %v", err) + } + + dbPassword := os.Getenv("DB_PASSWORD") + dbDatabase := os.Getenv("DB_DATABASE") + if len(dbDatabase) == 0 { + log.Fatalf("environment variable DB_DATABASE is not set") + } + dbCollation := os.Getenv("DB_COLLATION") + database := &models.Database{ + Type: "mysql", + MySQL: &models.MySQL{ + Host: dbHost, + Port: dbPort, + Username: dbUser, + Password: dbPassword, + Database: dbDatabase, + Collation: dbCollation, + MaxIdleConns: 50, + MaxOpenConns: 100, + }, + } + + log.Infof("DB_HOST: %s, DB_USERNAME: %s, DB_PORT: %d, DB_PASSWORD: %s, DB_COLLATION: %s\n", dbHost, dbUser, dbPort, dbPassword, dbCollation) + initDatabaseForTest(database) } // PrepareTestForSQLite is for test only. @@ -35,27 +86,27 @@ func PrepareTestForSQLite() { // PrepareTestForPostgresSQL is for test only. func PrepareTestForPostgresSQL() { - dbHost := os.Getenv("POSTGRESQL_HOST") + dbHost := os.Getenv("DB_HOST") if len(dbHost) == 0 { - log.Fatalf("environment variable POSTGRESQL_HOST is not set") + log.Fatalf("environment variable DB_HOST is not set") } - dbUser := os.Getenv("POSTGRESQL_USR") + dbUser := os.Getenv("DB_USERNAME") if len(dbUser) == 0 { - log.Fatalf("environment variable POSTGRESQL_USR is not set") + log.Fatalf("environment variable DB_USERNAME is not set") } - dbPortStr := os.Getenv("POSTGRESQL_PORT") + dbPortStr := os.Getenv("DB_PORT") if len(dbPortStr) == 0 { - log.Fatalf("environment variable POSTGRESQL_PORT is not set") + log.Fatalf("environment variable DB_PORT is not set") } dbPort, err := strconv.Atoi(dbPortStr) if err != nil { log.Fatalf("invalid POSTGRESQL_PORT: %v", err) } - dbPassword := os.Getenv("POSTGRESQL_PWD") - dbDatabase := os.Getenv("POSTGRESQL_DATABASE") + dbPassword := os.Getenv("DB_PASSWORD") + dbDatabase := os.Getenv("DB_DATABASE") if len(dbDatabase) == 0 { - log.Fatalf("environment variable POSTGRESQL_DATABASE is not set") + log.Fatalf("environment variable DB_DATABASE is not set") } database := &models.Database{ @@ -71,7 +122,7 @@ func PrepareTestForPostgresSQL() { }, } - log.Infof("POSTGRES_HOST: %s, POSTGRES_USR: %s, POSTGRES_PORT: %d, POSTGRES_PWD: %s\n", dbHost, dbUser, dbPort, dbPassword) + log.Infof("DB_HOST: %s, DB_USERNAME: %s, DB_PORT: %d, DB_PASSWORD: %s\n", dbHost, dbUser, dbPort, dbPassword) initDatabaseForTest(database) } diff --git a/src/common/models/database.go b/src/common/models/database.go index 3fb031240919..9289d73a017c 100644 --- a/src/common/models/database.go +++ b/src/common/models/database.go @@ -18,15 +18,19 @@ package models type Database struct { Type string `json:"type"` PostGreSQL *PostGreSQL `json:"postgresql,omitempty"` + MySQL *MySQL `json:"mysql,omitempty"` } // MySQL ... type MySQL struct { - Host string `json:"host"` - Port int `json:"port"` - Username string `json:"username"` - Password string `json:"password,omitempty"` - Database string `json:"database"` + Host string `json:"host"` + Port int `json:"port"` + Username string `json:"username"` + Password string `json:"password,omitempty"` + Database string `json:"database"` + Collation string `json:"collation"` + MaxIdleConns int `json:"max_idle_conns"` + MaxOpenConns int `json:"max_open_conns"` } // SQLite ... diff --git a/src/common/utils/test/config.go b/src/common/utils/test/config.go index c9637a077571..b79d8238d7b3 100644 --- a/src/common/utils/test/config.go +++ b/src/common/utils/test/config.go @@ -22,11 +22,12 @@ var defaultConfig = map[string]interface{}{ common.ExtEndpoint: "https://host01.com", common.AUTHMode: common.DBAuth, common.DatabaseType: "postgresql", - common.PostGreSQLHOST: "127.0.0.1", - common.PostGreSQLPort: 5432, - common.PostGreSQLUsername: "postgres", - common.PostGreSQLPassword: "root123", - common.PostGreSQLDatabase: "registry", + common.DBHOST: "127.0.0.1", + common.DBPort: 5432, + common.DBUsername: "postgres", + common.DBPassword: "root123", + common.DBDatabase: "registry", + common.DBCollation: "utf8mb4_general_ci", common.SelfRegistration: true, common.LDAPURL: "ldap://127.0.0.1", common.LDAPSearchDN: "uid=searchuser,ou=people,dc=mydomain,dc=com", diff --git a/src/common/utils/test/database.go b/src/common/utils/test/database.go index 01e8588d63f0..7869c82ae5f2 100644 --- a/src/common/utils/test/database.go +++ b/src/common/utils/test/database.go @@ -21,6 +21,7 @@ import ( "github.com/goharbor/harbor/src/common/dao" "github.com/goharbor/harbor/src/common/models" + "github.com/goharbor/harbor/src/common/utils" "github.com/goharbor/harbor/src/lib/log" "github.com/goharbor/harbor/src/lib/orm" pkguser "github.com/goharbor/harbor/src/pkg/user" @@ -28,42 +29,61 @@ import ( // InitDatabaseFromEnv is used to initialize database for testing func InitDatabaseFromEnv() { - dbHost := os.Getenv("POSTGRESQL_HOST") + dbHost := os.Getenv("DB_HOST") if len(dbHost) == 0 { - log.Fatalf("environment variable POSTGRESQL_HOST is not set") + log.Fatalf("environment variable DB_HOST is not set") } - dbUser := os.Getenv("POSTGRESQL_USR") + dbUser := os.Getenv("DB_USERNAME") if len(dbUser) == 0 { - log.Fatalf("environment variable POSTGRESQL_USR is not set") + log.Fatalf("environment variable DB_USERNAME is not set") } - dbPortStr := os.Getenv("POSTGRESQL_PORT") + dbPortStr := os.Getenv("DB_PORT") if len(dbPortStr) == 0 { - log.Fatalf("environment variable POSTGRESQL_PORT is not set") + log.Fatalf("environment variable DB_PORT is not set") } dbPort, err := strconv.Atoi(dbPortStr) if err != nil { - log.Fatalf("invalid POSTGRESQL_PORT: %v", err) + log.Fatalf("invalid DB_PORT: %v", err) } - dbPassword := os.Getenv("POSTGRESQL_PWD") - dbDatabase := os.Getenv("POSTGRESQL_DATABASE") + dbPassword := os.Getenv("DB_PASSWORD") + dbDatabase := os.Getenv("DB_DATABASE") + dbCollation := os.Getenv("DB_COLLATION") adminPwd := os.Getenv("HARBOR_ADMIN_PASSWD") if len(dbDatabase) == 0 { - log.Fatalf("environment variable POSTGRESQL_DATABASE is not set") + log.Fatalf("environment variable DB_DATABASE is not set") } - database := &models.Database{ - Type: "postgresql", - PostGreSQL: &models.PostGreSQL{ - Host: dbHost, - Port: dbPort, - Username: dbUser, - Password: dbPassword, - Database: dbDatabase, - }, + database := &models.Database{} + switch { + case utils.IsDBPostgresql(): + database = &models.Database{ + Type: "postgresql", + PostGreSQL: &models.PostGreSQL{ + Host: dbHost, + Port: dbPort, + Username: dbUser, + Password: dbPassword, + Database: dbDatabase, + }, + } + case utils.IsDBMysql(): + database = &models.Database{ + Type: "mysql", + MySQL: &models.MySQL{ + Host: dbHost, + Port: dbPort, + Username: dbUser, + Password: dbPassword, + Database: dbDatabase, + Collation: dbCollation, + }, + } + default: + log.Fatalf("invalid db type %s", os.Getenv("DATABASE_TYPE")) } - log.Infof("POSTGRES_HOST: %s, POSTGRES_USR: %s, POSTGRES_PORT: %d, POSTGRES_PWD: %s\n", dbHost, dbUser, dbPort, dbPassword) + log.Infof("DB_HOST: %s, DB_USERNAME: %s, DB_PORT: %d, DB_PASSWORD: %s\n", dbHost, dbUser, dbPort, dbPassword) if err := dao.InitDatabase(database); err != nil { log.Fatalf("failed to init database : %v", err) diff --git a/src/common/utils/test/test.go b/src/common/utils/test/test.go index 635ff96211a2..33d7bc6cef84 100644 --- a/src/common/utils/test/test.go +++ b/src/common/utils/test/test.go @@ -100,11 +100,11 @@ func GetUnitTestConfig() map[string]interface{} { common.ExtEndpoint: fmt.Sprintf("https://%s", ipAddress), common.AUTHMode: "db_auth", common.DatabaseType: "postgresql", - common.PostGreSQLHOST: ipAddress, - common.PostGreSQLPort: 5432, - common.PostGreSQLUsername: "postgres", - common.PostGreSQLPassword: "root123", - common.PostGreSQLDatabase: "registry", + common.DBHOST: ipAddress, + common.DBPort: 5432, + common.DBUsername: "postgres", + common.DBPassword: "root123", + common.DBDatabase: "registry", common.LDAPURL: "ldap://ldap.vmware.com", common.LDAPSearchDN: "cn=admin,dc=example,dc=com", common.LDAPSearchPwd: "admin", diff --git a/src/common/utils/utils.go b/src/common/utils/utils.go index afb97a7abae0..19fa7bec0d3d 100644 --- a/src/common/utils/utils.go +++ b/src/common/utils/utils.go @@ -21,6 +21,7 @@ import ( "fmt" "net" "net/url" + "os" "reflect" "regexp" "strconv" @@ -300,3 +301,39 @@ func NextSchedule(cron string, curTime time.Time) time.Time { func CronParser() cronlib.Parser { return cronlib.NewParser(cronlib.Second | cronlib.Minute | cronlib.Hour | cronlib.Dom | cronlib.Month | cronlib.Dow) } + +// IsDBMysql returns is harbor start with mysql. If parameters are passed, the first non-null parameter will be used as the database type. +func IsDBMysql(dbTypeOptions ...string) bool { + dbType := os.Getenv("DATABASE_TYPE") + for _, db := range dbTypeOptions { + if db != "" { + dbType = db + break + } + } + return dbType == "mysql" || dbType == "mariadb" +} + +// IsDBMariaDB returns is harbor start with MariaDB. If parameters are passed, the first non-null parameter will be used as the database type. +func IsDBMariaDB(dbTypeOptions ...string) bool { + dbType := os.Getenv("DATABASE_TYPE") + for _, db := range dbTypeOptions { + if db != "" { + dbType = db + break + } + } + return dbType == "mariadb" +} + +// IsDBPostgresql returns is harbor start with postgresql. If parameters are passed, the first non-null parameter will be used as the database type. +func IsDBPostgresql(dbTypeOptions ...string) bool { + dbType := os.Getenv("DATABASE_TYPE") + for _, db := range dbTypeOptions { + if db != "" { + dbType = db + break + } + } + return dbType == "" || dbType == "postgresql" +} diff --git a/src/controller/event/handler/auditlog/auditlog_test.go b/src/controller/event/handler/auditlog/auditlog_test.go index d8648415aa43..73bd364b1a08 100644 --- a/src/controller/event/handler/auditlog/auditlog_test.go +++ b/src/controller/event/handler/auditlog/auditlog_test.go @@ -67,7 +67,7 @@ type AuditLogHandlerTestSuite struct { } func (suite *AuditLogHandlerTestSuite) SetupSuite() { - common_dao.PrepareTestForPostgresSQL() + common_dao.PrepareTestForDB() suite.logMgr = &MockAuditLogManager{} suite.auditLogHandler = &Handler{} } diff --git a/src/controller/event/handler/internal/artifact_test.go b/src/controller/event/handler/internal/artifact_test.go index eb6fa7925ae0..315cb6058329 100644 --- a/src/controller/event/handler/internal/artifact_test.go +++ b/src/controller/event/handler/internal/artifact_test.go @@ -49,7 +49,7 @@ func TestArtifactHandler(t *testing.T) { // SetupSuite prepares for running ArtifactHandlerTestSuite. func (suite *ArtifactHandlerTestSuite) SetupSuite() { - common_dao.PrepareTestForPostgresSQL() + common_dao.PrepareTestForDB() config.Init() suite.handler = &Handler{} suite.ctx = orm.NewContext(context.TODO(), beegoorm.NewOrm()) @@ -135,7 +135,7 @@ func (suite *ArtifactHandlerTestSuite) TestOnPull() { art, err = pkg.ArtifactMgr.Get(suite.ctx, 1) suite.Nil(err) return art.PullTime.After(lastPullTime) - }, 3*asyncFlushDuration, asyncFlushDuration/2, "wait for pull_time async update") + }, 4*asyncFlushDuration, asyncFlushDuration/2, "wait for pull_time async update") suite.Eventually(func() bool { repository, err = pkg.RepositoryMgr.Get(suite.ctx, 1) diff --git a/src/controller/event/handler/webhook/artifact/replication_test.go b/src/controller/event/handler/webhook/artifact/replication_test.go index 1dd31134790f..38dfe88371a9 100644 --- a/src/controller/event/handler/webhook/artifact/replication_test.go +++ b/src/controller/event/handler/webhook/artifact/replication_test.go @@ -41,7 +41,7 @@ import ( ) func TestReplicationHandler_Handle(t *testing.T) { - common_dao.PrepareTestForPostgresSQL() + common_dao.PrepareTestForDB() config.Init() PolicyMgr := notification.PolicyMgr diff --git a/src/controller/event/handler/webhook/artifact/retention_test.go b/src/controller/event/handler/webhook/artifact/retention_test.go index fa5f71c79b00..9e06ea7ccf7d 100644 --- a/src/controller/event/handler/webhook/artifact/retention_test.go +++ b/src/controller/event/handler/webhook/artifact/retention_test.go @@ -114,6 +114,6 @@ func TestRetentionHandler_IsStateful(t *testing.T) { } func TestMain(m *testing.M) { - dao.PrepareTestForPostgresSQL() + dao.PrepareTestForDB() os.Exit(m.Run()) } diff --git a/src/controller/event/handler/webhook/quota/quota_test.go b/src/controller/event/handler/webhook/quota/quota_test.go index 2b01052f2a48..6642f0cf4ba1 100644 --- a/src/controller/event/handler/webhook/quota/quota_test.go +++ b/src/controller/event/handler/webhook/quota/quota_test.go @@ -51,7 +51,7 @@ func TestQuotaPreprocessHandler(t *testing.T) { // SetupSuite prepares env for test suite. func (suite *QuotaPreprocessHandlerSuite) SetupSuite() { - common_dao.PrepareTestForPostgresSQL() + common_dao.PrepareTestForDB() cfg := map[string]interface{}{ common.NotificationEnable: true, } diff --git a/src/controller/event/handler/webhook/scan/scan_test.go b/src/controller/event/handler/webhook/scan/scan_test.go index f6b783a7571f..18062987194f 100644 --- a/src/controller/event/handler/webhook/scan/scan_test.go +++ b/src/controller/event/handler/webhook/scan/scan_test.go @@ -61,7 +61,7 @@ func TestScanImagePreprocessHandler(t *testing.T) { // SetupSuite prepares env for test suite. func (suite *ScanImagePreprocessHandlerSuite) SetupSuite() { - common_dao.PrepareTestForPostgresSQL() + common_dao.PrepareTestForDB() cfg := map[string]interface{}{ common.NotificationEnable: true, } diff --git a/src/controller/ldap/controller_test.go b/src/controller/ldap/controller_test.go index 1f4b76331dc2..c0ecbfab948b 100644 --- a/src/controller/ldap/controller_test.go +++ b/src/controller/ldap/controller_test.go @@ -33,11 +33,11 @@ var defaultConfigWithVerifyCert = map[string]interface{}{ common.ExtEndpoint: "https://host01.com", common.AUTHMode: common.LDAPAuth, common.DatabaseType: "postgresql", - common.PostGreSQLHOST: "127.0.0.1", - common.PostGreSQLPort: 5432, - common.PostGreSQLUsername: "postgres", - common.PostGreSQLPassword: "root123", - common.PostGreSQLDatabase: "registry", + common.DBHOST: "127.0.0.1", + common.DBPort: 5432, + common.DBUsername: "postgres", + common.DBPassword: "root123", + common.DBDatabase: "registry", common.SelfRegistration: true, common.LDAPURL: "ldap://127.0.0.1:389", common.LDAPSearchDN: "cn=admin,dc=example,dc=com", diff --git a/src/controller/retention/controller_test.go b/src/controller/retention/controller_test.go index a9421b5050f9..244d23840fe7 100644 --- a/src/controller/retention/controller_test.go +++ b/src/controller/retention/controller_test.go @@ -50,7 +50,7 @@ func (s *ControllerTestSuite) SetupSuite() { } func TestMain(m *testing.M) { - dao.PrepareTestForPostgresSQL() + dao.PrepareTestForDB() os.Exit(m.Run()) } diff --git a/src/controller/usergroup/test/controller_test.go b/src/controller/usergroup/test/controller_test.go index 85e4bb560d7e..007cf70314bc 100644 --- a/src/controller/usergroup/test/controller_test.go +++ b/src/controller/usergroup/test/controller_test.go @@ -42,11 +42,11 @@ var defaultConfigWithVerifyCert = map[string]interface{}{ common.ExtEndpoint: "https://host01.com", common.AUTHMode: common.LDAPAuth, common.DatabaseType: "postgresql", - common.PostGreSQLHOST: "127.0.0.1", - common.PostGreSQLPort: 5432, - common.PostGreSQLUsername: "postgres", - common.PostGreSQLPassword: "root123", - common.PostGreSQLDatabase: "registry", + common.DBHOST: "127.0.0.1", + common.DBPort: 5432, + common.DBUsername: "postgres", + common.DBPassword: "root123", + common.DBDatabase: "registry", common.SelfRegistration: true, common.LDAPURL: "ldap://127.0.0.1:389", common.LDAPSearchDN: "cn=admin,dc=example,dc=com", diff --git a/src/core/auth/authproxy/auth_test.go b/src/core/auth/authproxy/auth_test.go index 76045a559f6a..f3eeaf112532 100644 --- a/src/core/auth/authproxy/auth_test.go +++ b/src/core/auth/authproxy/auth_test.go @@ -58,12 +58,12 @@ func TestMain(m *testing.M) { common.HTTPAuthProxyEndpoint: a.Endpoint, common.HTTPAuthProxyTokenReviewEndpoint: a.TokenReviewEndpoint, common.HTTPAuthProxyVerifyCert: false, - common.PostGreSQLSSLMode: cfgMap[common.PostGreSQLSSLMode], - common.PostGreSQLUsername: cfgMap[common.PostGreSQLUsername], - common.PostGreSQLPort: cfgMap[common.PostGreSQLPort], - common.PostGreSQLHOST: cfgMap[common.PostGreSQLHOST], - common.PostGreSQLPassword: cfgMap[common.PostGreSQLPassword], - common.PostGreSQLDatabase: cfgMap[common.PostGreSQLDatabase], + common.DBSSLMode: cfgMap[common.DBSSLMode], + common.DBUsername: cfgMap[common.DBUsername], + common.DBPort: cfgMap[common.DBPort], + common.DBHOST: cfgMap[common.DBHOST], + common.DBPassword: cfgMap[common.DBPassword], + common.DBDatabase: cfgMap[common.DBDatabase], } config.InitWithSettings(conf) diff --git a/src/core/auth/ldap/ldap_test.go b/src/core/auth/ldap/ldap_test.go index f1fa37f3e9cf..43d19fad7100 100644 --- a/src/core/auth/ldap/ldap_test.go +++ b/src/core/auth/ldap/ldap_test.go @@ -39,14 +39,14 @@ import ( ) var ldapTestConfig = map[string]interface{}{ - common.ExtEndpoint: "host01.com", - common.AUTHMode: "ldap_auth", - common.DatabaseType: "postgresql", - common.PostGreSQLHOST: "127.0.0.1", - common.PostGreSQLPort: 5432, - common.PostGreSQLUsername: "postgres", - common.PostGreSQLPassword: "root123", - common.PostGreSQLDatabase: "registry", + common.ExtEndpoint: "host01.com", + common.AUTHMode: "ldap_auth", + common.DatabaseType: "postgresql", + common.DBHOST: "127.0.0.1", + common.DBPort: 5432, + common.DBUsername: "postgres", + common.DBPassword: "root123", + common.DBDatabase: "registry", // config.SelfRegistration: true, common.LDAPURL: "ldap://127.0.0.1", common.LDAPSearchDN: "cn=admin,dc=example,dc=com", diff --git a/src/jobservice/job/impl/context_test.go b/src/jobservice/job/impl/context_test.go index 97d1f6b52763..cad438c328ff 100644 --- a/src/jobservice/job/impl/context_test.go +++ b/src/jobservice/job/impl/context_test.go @@ -48,7 +48,7 @@ type ContextImplTestSuite struct { // TestContextImplTestSuite is entry of go test func TestContextImplTestSuite(t *testing.T) { - common_dao.PrepareTestForPostgresSQL() + common_dao.PrepareTestForDB() suite.Run(t, new(ContextImplTestSuite)) } diff --git a/src/jobservice/logger/backend/db_logger_test.go b/src/jobservice/logger/backend/db_logger_test.go index d9eb5fba4565..944b51b1a625 100644 --- a/src/jobservice/logger/backend/db_logger_test.go +++ b/src/jobservice/logger/backend/db_logger_test.go @@ -7,33 +7,31 @@ import ( "github.com/stretchr/testify/require" "github.com/goharbor/harbor/src/common/dao" + "github.com/goharbor/harbor/src/common/utils" "github.com/goharbor/harbor/src/jobservice/logger/getter" "github.com/goharbor/harbor/src/jobservice/logger/sweeper" "github.com/goharbor/harbor/src/lib/log" ) func TestMain(m *testing.M) { + database := os.Getenv("DATABASE_TYPE") + log.Infof("run test cases for database: %s", database) - // databases := []string{"mysql", "sqlite"} - databases := []string{"postgresql"} - for _, database := range databases { - log.Infof("run test cases for database: %s", database) - - result := 1 - switch database { - case "postgresql": - dao.PrepareTestForPostgresSQL() - default: - log.Fatalf("invalid database: %s", database) - } + result := 1 + switch { + case utils.IsDBPostgresql(): + dao.PrepareTestForPostgresSQL() + case utils.IsDBMysql(): + dao.PrepareTestForMySQL() + default: + log.Fatalf("invalid database: %s", database) + } - result = m.Run() + result = m.Run() - if result != 0 { - os.Exit(result) - } + if result != 0 { + os.Exit(result) } - } // Test DB logger diff --git a/src/jobservice/logger/entry_test.go b/src/jobservice/logger/entry_test.go index 66301d30f88e..146b58da5313 100644 --- a/src/jobservice/logger/entry_test.go +++ b/src/jobservice/logger/entry_test.go @@ -9,6 +9,7 @@ import ( "github.com/goharbor/harbor/src/common" "github.com/goharbor/harbor/src/common/dao" + "github.com/goharbor/harbor/src/common/utils" "github.com/goharbor/harbor/src/jobservice/logger/backend" "github.com/goharbor/harbor/src/lib/config" "github.com/goharbor/harbor/src/lib/log" @@ -18,24 +19,23 @@ import ( func TestMain(m *testing.M) { config.DefaultCfgManager = common.InMemoryCfgManager - // databases := []string{"mysql", "sqlite"} - databases := []string{"postgresql"} - for _, database := range databases { - log.Infof("run test cases for database: %s", database) + database := os.Getenv("DATABASE_TYPE") + log.Infof("run test cases for database: %s", database) - result := 1 - switch database { - case "postgresql": - dao.PrepareTestForPostgresSQL() - default: - log.Fatalf("invalid database: %s", database) - } + result := 1 + switch { + case utils.IsDBPostgresql(): + dao.PrepareTestForPostgresSQL() + case utils.IsDBMysql(): + dao.PrepareTestForMySQL() + default: + log.Fatalf("invalid database: %s", database) + } - result = m.Run() + result = m.Run() - if result != 0 { - os.Exit(result) - } + if result != 0 { + os.Exit(result) } } diff --git a/src/jobservice/logger/getter/db_getter_test.go b/src/jobservice/logger/getter/db_getter_test.go index ef6963031728..ee00694adec0 100644 --- a/src/jobservice/logger/getter/db_getter_test.go +++ b/src/jobservice/logger/getter/db_getter_test.go @@ -7,31 +7,31 @@ import ( "github.com/stretchr/testify/require" "github.com/goharbor/harbor/src/common/dao" + "github.com/goharbor/harbor/src/common/utils" "github.com/goharbor/harbor/src/jobservice/logger/backend" "github.com/goharbor/harbor/src/jobservice/logger/sweeper" "github.com/goharbor/harbor/src/lib/log" ) func TestMain(m *testing.M) { - databases := []string{"postgresql"} - for _, database := range databases { - log.Infof("run test cases for database: %s", database) - - result := 1 - switch database { - case "postgresql": - dao.PrepareTestForPostgresSQL() - default: - log.Fatalf("invalid database: %s", database) - } + database := os.Getenv("DATABASE_TYPE") + log.Infof("run test cases for database: %s", database) + + result := 1 + switch { + case utils.IsDBPostgresql(): + dao.PrepareTestForPostgresSQL() + case utils.IsDBMysql(): + dao.PrepareTestForMySQL() + default: + log.Fatalf("invalid database: %s", database) + } - result = m.Run() + result = m.Run() - if result != 0 { - os.Exit(result) - } + if result != 0 { + os.Exit(result) } - } // TestDBGetter diff --git a/src/jobservice/logger/sweeper/db_sweeper_test.go b/src/jobservice/logger/sweeper/db_sweeper_test.go index a5d77ea88364..d4833b9fb693 100644 --- a/src/jobservice/logger/sweeper/db_sweeper_test.go +++ b/src/jobservice/logger/sweeper/db_sweeper_test.go @@ -7,30 +7,30 @@ import ( "github.com/stretchr/testify/require" "github.com/goharbor/harbor/src/common/dao" + "github.com/goharbor/harbor/src/common/utils" "github.com/goharbor/harbor/src/jobservice/logger/backend" "github.com/goharbor/harbor/src/lib/log" ) func TestMain(m *testing.M) { - databases := []string{"postgresql"} - for _, database := range databases { - log.Infof("run test cases for database: %s", database) - - result := 1 - switch database { - case "postgresql": - dao.PrepareTestForPostgresSQL() - default: - log.Fatalf("invalid database: %s", database) - } - - result = m.Run() - - if result != 0 { - os.Exit(result) - } + database := os.Getenv("DATABASE_TYPE") + log.Infof("run test cases for database: %s", database) + + result := 1 + switch { + case utils.IsDBPostgresql(): + dao.PrepareTestForPostgresSQL() + case utils.IsDBMysql(): + dao.PrepareTestForMySQL() + default: + log.Fatalf("invalid database: %s", database) } + result = m.Run() + + if result != 0 { + os.Exit(result) + } } // TestDBGetter diff --git a/src/jobservice/runner/redis_test.go b/src/jobservice/runner/redis_test.go index 21e79538a1b3..9aa5ed76ee91 100644 --- a/src/jobservice/runner/redis_test.go +++ b/src/jobservice/runner/redis_test.go @@ -51,7 +51,7 @@ type RedisRunnerTestSuite struct { // TestRedisRunnerTestSuite is entry of go test func TestRedisRunnerTestSuite(t *testing.T) { - common_dao.PrepareTestForPostgresSQL() + common_dao.PrepareTestForDB() suite.Run(t, new(RedisRunnerTestSuite)) } diff --git a/src/jobservice/runtime/bootstrap_test.go b/src/jobservice/runtime/bootstrap_test.go index 643fd5451a71..1684e47d9da9 100644 --- a/src/jobservice/runtime/bootstrap_test.go +++ b/src/jobservice/runtime/bootstrap_test.go @@ -44,7 +44,7 @@ type BootStrapTestSuite struct { // SetupSuite prepares test suite func (suite *BootStrapTestSuite) SetupSuite() { - dao.PrepareTestForPostgresSQL() + dao.PrepareTestForDB() libcfg.DefaultCfgManager = common.InMemoryCfgManager diff --git a/src/jobservice/sync/schedule_test.go b/src/jobservice/sync/schedule_test.go index b534955b5c86..a288ec31007b 100644 --- a/src/jobservice/sync/schedule_test.go +++ b/src/jobservice/sync/schedule_test.go @@ -49,7 +49,7 @@ func TestWorker(t *testing.T) { func (suite *WorkerTestSuite) SetupSuite() { sysContext := context.TODO() - dao.PrepareTestForPostgresSQL() + dao.PrepareTestForDB() getPolicies := func() ([]*period.Policy, error) { return []*period.Policy{ diff --git a/src/jobservice/worker/cworker/c_worker_test.go b/src/jobservice/worker/cworker/c_worker_test.go index c1faa7acd23d..76a74ac2d713 100644 --- a/src/jobservice/worker/cworker/c_worker_test.go +++ b/src/jobservice/worker/cworker/c_worker_test.go @@ -54,7 +54,7 @@ type CWorkerTestSuite struct { func (suite *CWorkerTestSuite) SetupSuite() { suite.namespace = tests.GiveMeTestNamespace() suite.pool = tests.GiveMeRedisPool() - common_dao.PrepareTestForPostgresSQL() + common_dao.PrepareTestForDB() // Append node ID vCtx := context.WithValue(context.Background(), utils.NodeID, utils.GenerateNodeID()) diff --git a/src/lib/config/metadata/metadatalist.go b/src/lib/config/metadata/metadatalist.go index 1c2e7c34036a..050138fc586a 100644 --- a/src/lib/config/metadata/metadatalist.go +++ b/src/lib/config/metadata/metadatalist.go @@ -22,7 +22,7 @@ type Item struct { Scope string `json:"scope,omitempty"` // email, ldapbasic, ldapgroup, uaa settings, used to retieve configure items by group Group string `json:"group,omitempty"` - // environment key to retrieves this value when initialize, for example: POSTGRESQL_HOST, only used for system settings, for user settings no EnvKey + // environment key to retrieves this value when initialize, for example: DB_HOST, only used for system settings, for user settings no EnvKey EnvKey string `json:"environment_key,omitempty"` // The default string value for this key DefaultValue string `json:"default_value,omitempty"` @@ -106,14 +106,15 @@ var ( {Name: common.NotaryURL, Scope: SystemScope, Group: BasicGroup, EnvKey: "NOTARY_URL", DefaultValue: "http://notary-server:4443", ItemType: &StringType{}, Editable: false}, {Name: common.ScanAllPolicy, Scope: UserScope, Group: BasicGroup, EnvKey: "", DefaultValue: "", ItemType: &MapType{}, Editable: false, Description: `The policy to scan images`}, - {Name: common.PostGreSQLDatabase, Scope: SystemScope, Group: DatabaseGroup, EnvKey: "POSTGRESQL_DATABASE", DefaultValue: "registry", ItemType: &StringType{}, Editable: false}, - {Name: common.PostGreSQLHOST, Scope: SystemScope, Group: DatabaseGroup, EnvKey: "POSTGRESQL_HOST", DefaultValue: "postgresql", ItemType: &StringType{}, Editable: false}, - {Name: common.PostGreSQLPassword, Scope: SystemScope, Group: DatabaseGroup, EnvKey: "POSTGRESQL_PASSWORD", DefaultValue: "root123", ItemType: &PasswordType{}, Editable: false}, - {Name: common.PostGreSQLPort, Scope: SystemScope, Group: DatabaseGroup, EnvKey: "POSTGRESQL_PORT", DefaultValue: "5432", ItemType: &PortType{}, Editable: false}, - {Name: common.PostGreSQLSSLMode, Scope: SystemScope, Group: DatabaseGroup, EnvKey: "POSTGRESQL_SSLMODE", DefaultValue: "disable", ItemType: &StringType{}, Editable: false}, - {Name: common.PostGreSQLUsername, Scope: SystemScope, Group: DatabaseGroup, EnvKey: "POSTGRESQL_USERNAME", DefaultValue: "postgres", ItemType: &StringType{}, Editable: false}, - {Name: common.PostGreSQLMaxIdleConns, Scope: SystemScope, Group: DatabaseGroup, EnvKey: "POSTGRESQL_MAX_IDLE_CONNS", DefaultValue: "2", ItemType: &IntType{}, Editable: false}, - {Name: common.PostGreSQLMaxOpenConns, Scope: SystemScope, Group: DatabaseGroup, EnvKey: "POSTGRESQL_MAX_OPEN_CONNS", DefaultValue: "0", ItemType: &IntType{}, Editable: false}, + {Name: common.DBDatabase, Scope: SystemScope, Group: DatabaseGroup, EnvKey: "DB_DATABASE", DefaultValue: "registry", ItemType: &StringType{}, Editable: false}, + {Name: common.DBHOST, Scope: SystemScope, Group: DatabaseGroup, EnvKey: "DB_HOST", DefaultValue: "postgresql", ItemType: &StringType{}, Editable: false}, + {Name: common.DBPassword, Scope: SystemScope, Group: DatabaseGroup, EnvKey: "DB_PASSWORD", DefaultValue: "root123", ItemType: &PasswordType{}, Editable: false}, + {Name: common.DBPort, Scope: SystemScope, Group: DatabaseGroup, EnvKey: "DB_PORT", DefaultValue: "5432", ItemType: &PortType{}, Editable: false}, + {Name: common.DBSSLMode, Scope: SystemScope, Group: DatabaseGroup, EnvKey: "DB_SSLMODE", DefaultValue: "disable", ItemType: &StringType{}, Editable: false}, + {Name: common.DBCollation, Scope: SystemScope, Group: DatabaseGroup, EnvKey: "DB_COLLATION", DefaultValue: "utf8mb4_general_ci", ItemType: &StringType{}, Editable: false}, + {Name: common.DBUsername, Scope: SystemScope, Group: DatabaseGroup, EnvKey: "DB_USERNAME", DefaultValue: "postgres", ItemType: &StringType{}, Editable: false}, + {Name: common.DBMaxIdleConns, Scope: SystemScope, Group: DatabaseGroup, EnvKey: "DB_MAX_IDLE_CONNS", DefaultValue: "2", ItemType: &IntType{}, Editable: false}, + {Name: common.DBMaxOpenConns, Scope: SystemScope, Group: DatabaseGroup, EnvKey: "DB_MAX_OPEN_CONNS", DefaultValue: "0", ItemType: &IntType{}, Editable: false}, {Name: common.ProjectCreationRestriction, Scope: UserScope, Group: BasicGroup, EnvKey: "PROJECT_CREATION_RESTRICTION", DefaultValue: common.ProCrtRestrEveryone, ItemType: &ProjectCreationRestrictionType{}, Editable: false, Description: `Indicate who can create projects, it could be ''adminonly'' or ''everyone''.`}, {Name: common.ReadOnly, Scope: UserScope, Group: BasicGroup, EnvKey: "READ_ONLY", DefaultValue: "false", ItemType: &BoolType{}, Editable: false, Description: `The flag to indicate whether Harbor is in readonly mode.`}, diff --git a/src/lib/config/systemconfig.go b/src/lib/config/systemconfig.go index b8d8eb7206a2..0e92a97f5451 100644 --- a/src/lib/config/systemconfig.go +++ b/src/lib/config/systemconfig.go @@ -38,6 +38,7 @@ import ( "github.com/goharbor/harbor/src/common" "github.com/goharbor/harbor/src/common/models" "github.com/goharbor/harbor/src/common/secret" + "github.com/goharbor/harbor/src/common/utils" "github.com/goharbor/harbor/src/lib/encrypt" "github.com/goharbor/harbor/src/lib/log" ) @@ -273,17 +274,33 @@ func CacheExpireHours() int { func Database() (*models.Database, error) { database := &models.Database{} database.Type = DefaultMgr().Get(backgroundCtx, common.DatabaseType).GetString() - postgresql := &models.PostGreSQL{ - Host: DefaultMgr().Get(backgroundCtx, common.PostGreSQLHOST).GetString(), - Port: DefaultMgr().Get(backgroundCtx, common.PostGreSQLPort).GetInt(), - Username: DefaultMgr().Get(backgroundCtx, common.PostGreSQLUsername).GetString(), - Password: DefaultMgr().Get(backgroundCtx, common.PostGreSQLPassword).GetPassword(), - Database: DefaultMgr().Get(backgroundCtx, common.PostGreSQLDatabase).GetString(), - SSLMode: DefaultMgr().Get(backgroundCtx, common.PostGreSQLSSLMode).GetString(), - MaxIdleConns: DefaultMgr().Get(backgroundCtx, common.PostGreSQLMaxIdleConns).GetInt(), - MaxOpenConns: DefaultMgr().Get(backgroundCtx, common.PostGreSQLMaxOpenConns).GetInt(), + + switch { + case utils.IsDBPostgresql(database.Type): + postgresql := &models.PostGreSQL{ + Host: DefaultMgr().Get(backgroundCtx, common.DBHOST).GetString(), + Port: DefaultMgr().Get(backgroundCtx, common.DBPort).GetInt(), + Username: DefaultMgr().Get(backgroundCtx, common.DBUsername).GetString(), + Password: DefaultMgr().Get(backgroundCtx, common.DBPassword).GetPassword(), + Database: DefaultMgr().Get(backgroundCtx, common.DBDatabase).GetString(), + SSLMode: DefaultMgr().Get(backgroundCtx, common.DBSSLMode).GetString(), + MaxIdleConns: DefaultMgr().Get(backgroundCtx, common.DBMaxIdleConns).GetInt(), + MaxOpenConns: DefaultMgr().Get(backgroundCtx, common.DBMaxOpenConns).GetInt(), + } + database.PostGreSQL = postgresql + case utils.IsDBMysql(database.Type): + mysql := &models.MySQL{ + Host: DefaultMgr().Get(backgroundCtx, common.DBHOST).GetString(), + Port: DefaultMgr().Get(backgroundCtx, common.DBPort).GetInt(), + Username: DefaultMgr().Get(backgroundCtx, common.DBUsername).GetString(), + Password: DefaultMgr().Get(backgroundCtx, common.DBPassword).GetString(), + Database: DefaultMgr().Get(backgroundCtx, common.DBDatabase).GetString(), + Collation: DefaultMgr().Get(backgroundCtx, common.DBCollation).GetString(), + MaxIdleConns: DefaultMgr().Get(backgroundCtx, common.DBMaxIdleConns).GetInt(), + MaxOpenConns: DefaultMgr().Get(backgroundCtx, common.DBMaxOpenConns).GetInt(), + } + database.MySQL = mysql } - database.PostGreSQL = postgresql return database, nil } diff --git a/src/lib/orm/error.go b/src/lib/orm/error.go index 1c5cd8aa19c1..8141ba649943 100644 --- a/src/lib/orm/error.go +++ b/src/lib/orm/error.go @@ -16,6 +16,7 @@ package orm import ( "github.com/beego/beego/orm" + "github.com/go-sql-driver/mysql" "github.com/jackc/pgconn" "github.com/goharbor/harbor/src/lib/errors" @@ -91,6 +92,11 @@ func IsDuplicateKeyError(err error) bool { return true } + var mysqlErr *mysql.MySQLError + if errors.As(err, &mysqlErr) && mysqlErr.Number == 1062 { + return true + } + return false } @@ -100,5 +106,10 @@ func isViolatingForeignKeyConstraintError(err error) bool { return true } + var mysqlErr *mysql.MySQLError + if errors.As(err, &mysqlErr) && (mysqlErr.Number == 1451 || mysqlErr.Number == 1452) { + return true + } + return false } diff --git a/src/lib/orm/test/orm_test.go b/src/lib/orm/test/orm_test.go index ce36d5ed305a..9dafa6b1ca82 100644 --- a/src/lib/orm/test/orm_test.go +++ b/src/lib/orm/test/orm_test.go @@ -105,7 +105,7 @@ type OrmSuite struct { // SetupSuite ... func (suite *OrmSuite) SetupSuite() { RegisterModel(&Foo{}) - dao.PrepareTestForPostgresSQL() + dao.PrepareTestForDB() o, err := FromContext(Context()) if err != nil { diff --git a/src/migration/migration.go b/src/migration/migration.go index 396dcf5435ec..d2197fb5700d 100644 --- a/src/migration/migration.go +++ b/src/migration/migration.go @@ -24,6 +24,7 @@ import ( "github.com/goharbor/harbor/src/common/dao" "github.com/goharbor/harbor/src/common/models" + "github.com/goharbor/harbor/src/common/utils" "github.com/goharbor/harbor/src/lib/log" "github.com/goharbor/harbor/src/lib/orm" ) @@ -36,8 +37,17 @@ const ( // MigrateDB upgrades DB schema and do necessary transformation of the data in DB func MigrateDB(database *models.Database) error { + var migrator *migrate.Migrate + var err error + // check the database schema version - migrator, err := dao.NewMigrator(database.PostGreSQL) + switch { + case utils.IsDBPostgresql(database.Type): + migrator, err = dao.NewMigrator(database.PostGreSQL) + case utils.IsDBMysql(database.Type): + migrator, err = dao.NewMysqlMigrator(database.MySQL) + } + if err != nil { return err } diff --git a/src/pkg/accessory/dao/dao_test.go b/src/pkg/accessory/dao/dao_test.go index aa1b02750f49..628085c31ec9 100644 --- a/src/pkg/accessory/dao/dao_test.go +++ b/src/pkg/accessory/dao/dao_test.go @@ -42,7 +42,7 @@ type daoTestSuite struct { func (d *daoTestSuite) SetupSuite() { d.dao = New() - common_dao.PrepareTestForPostgresSQL() + common_dao.PrepareTestForDB() d.ctx = orm.NewContext(nil, beegoorm.NewOrm()) d.ClearTables = []string{"artifact", "artifact_accessory"} diff --git a/src/pkg/artifact/dao/dao_test.go b/src/pkg/artifact/dao/dao_test.go index e92f5bd3a5c3..04060e50f943 100644 --- a/src/pkg/artifact/dao/dao_test.go +++ b/src/pkg/artifact/dao/dao_test.go @@ -47,7 +47,7 @@ type daoTestSuite struct { func (d *daoTestSuite) SetupSuite() { d.dao = New() d.tagDAO = tagdao.New() - common_dao.PrepareTestForPostgresSQL() + common_dao.PrepareTestForDB() d.ctx = orm.NewContext(nil, beegoorm.NewOrm()) } diff --git a/src/pkg/artifactrash/dao/dao_test.go b/src/pkg/artifactrash/dao/dao_test.go index 6e53ebb2d213..c46082f6bfa8 100644 --- a/src/pkg/artifactrash/dao/dao_test.go +++ b/src/pkg/artifactrash/dao/dao_test.go @@ -9,6 +9,7 @@ import ( v1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/stretchr/testify/suite" + "github.com/goharbor/harbor/src/common/utils" errors "github.com/goharbor/harbor/src/lib/errors" "github.com/goharbor/harbor/src/lib/orm" artdao "github.com/goharbor/harbor/src/pkg/artifact/dao" @@ -189,5 +190,8 @@ func (d *daoTestSuite) TestFlush() { } func TestDaoTestSuite(t *testing.T) { + if !utils.IsDBPostgresql() { + return + } suite.Run(t, &daoTestSuite{}) } diff --git a/src/pkg/artifactrash/dao/myql_dao_test.go b/src/pkg/artifactrash/dao/myql_dao_test.go new file mode 100644 index 000000000000..ae4fd790b114 --- /dev/null +++ b/src/pkg/artifactrash/dao/myql_dao_test.go @@ -0,0 +1,196 @@ +package dao + +import ( + "context" + beegoorm "github.com/beego/beego/orm" + "github.com/goharbor/harbor/src/common/utils" + errors "github.com/goharbor/harbor/src/lib/errors" + "github.com/goharbor/harbor/src/lib/orm" + artdao "github.com/goharbor/harbor/src/pkg/artifact/dao" + "github.com/goharbor/harbor/src/pkg/artifactrash/model" + htesting "github.com/goharbor/harbor/src/testing" + v1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/stretchr/testify/suite" + "testing" + "time" +) + +type mysqlDaoTestSuite struct { + dao DAO + afDao artdao.DAO + id int64 + ctx context.Context + digest string + htesting.Suite +} + +func (d *mysqlDaoTestSuite) SetupSuite() { + d.Suite.SetupSuite() + d.Suite.ClearTables = []string{"artifact", "artifact_trash"} + d.dao = NewMysqlDao() + d.afDao = artdao.New() + d.ctx = orm.NewContext(nil, beegoorm.NewOrm()) + + d.digest = d.Suite.DigestString() + art1 := &artdao.Artifact{ + Type: "image", + ManifestMediaType: v1.MediaTypeImageManifest, + ProjectID: 10, + RepositoryID: 10, + Digest: d.digest, + } + id, err := d.afDao.Create(d.ctx, art1) + d.Require().Nil(err) + err = d.afDao.Delete(d.ctx, id) + d.Require().Nil(err) + art2 := &artdao.Artifact{ + Type: "image", + ManifestMediaType: v1.MediaTypeImageManifest, + ProjectID: 10, + RepositoryID: 10, + Digest: d.Suite.DigestString(), + } + _, err = d.afDao.Create(d.ctx, art2) + d.Require().Nil(err) + + aft := &model.ArtifactTrash{ + ManifestMediaType: v1.MediaTypeImageManifest, + RepositoryName: "test/hello-world", + Digest: d.digest, + } + id, err = d.dao.Create(d.ctx, aft) + d.Require().Nil(err) + d.id = art2.ID +} + +func (d *mysqlDaoTestSuite) TearDownSuite() { + d.afDao.Delete(d.ctx, d.id) + d.afDao.Delete(d.ctx, 12) +} + +func (d *mysqlDaoTestSuite) TestCreate() { + // conflict + aft := &model.ArtifactTrash{ + ManifestMediaType: v1.MediaTypeImageManifest, + RepositoryName: "test/hello-world", + Digest: d.digest, + } + + _, err := d.dao.Create(d.ctx, aft) + d.Require().NotNil(err) + d.True(errors.IsErr(err, errors.ConflictCode)) +} + +func (d *mysqlDaoTestSuite) TestDelete() { + err := d.dao.Delete(d.ctx, 100021) + d.Require().NotNil(err) + var e *errors.Error + d.Require().True(errors.As(err, &e)) + d.Equal(errors.NotFoundCode, e.Code) +} + +func (d *mysqlDaoTestSuite) TestFilter() { + afs, err := d.dao.Filter(d.ctx, time.Now().Add(time.Second*10)) + d.Require().Nil(err) + d.Require().Equal(afs[0].Digest, d.digest) + + // clean it in GC + err = d.dao.Flush(d.ctx, time.Now().Add(time.Second*10)) + d.Require().Nil(err) + + // push hello-world to projecta + digest := d.Suite.DigestString() + art1 := &artdao.Artifact{ + Type: "image", + ManifestMediaType: v1.MediaTypeImageManifest, + ProjectID: 11, + RepositoryID: 11, + RepositoryName: "projectA/hello-world", + Digest: digest, + } + _, err = d.afDao.Create(d.ctx, art1) + d.Require().Nil(err) + + // push hello-world to projectb + art2 := &artdao.Artifact{ + Type: "image", + ManifestMediaType: v1.MediaTypeImageManifest, + ProjectID: 12, + RepositoryID: 12, + RepositoryName: "projectB/hello-world", + Digest: digest, + } + _, err = d.afDao.Create(d.ctx, art2) + d.Require().Nil(err) + + // remove hello-world to projectA + err = d.afDao.Delete(d.ctx, art1.ID) + d.Require().Nil(err) + + aft2 := &model.ArtifactTrash{ + ManifestMediaType: v1.MediaTypeImageManifest, + RepositoryName: "projectA/hello-world", + Digest: digest, + } + _, err = d.dao.Create(d.ctx, aft2) + d.Require().Nil(err) + + // filter results should contain projectA hello-world + afs1, err := d.dao.Filter(d.ctx, time.Now().Add(time.Second*10)) + d.Require().Nil(err) + d.Require().Equal(afs1[0].Digest, digest) + d.Require().Equal(afs1[0].RepositoryName, "projectA/hello-world") + + afs1, err = d.dao.Filter(d.ctx, time.Now().Add(-1*time.Hour)) + d.Require().Nil(err) + d.Require().Equal(0, len(afs1)) + + // push hello-world again to projecta + art3 := &artdao.Artifact{ + Type: "image", + ManifestMediaType: v1.MediaTypeImageManifest, + ProjectID: 11, + RepositoryID: 13, + RepositoryName: "projectA/hello-world", + Digest: digest, + } + _, err = d.afDao.Create(d.ctx, art3) + d.Require().Nil(err) + + // filter results should contain nothing + afs2, err := d.dao.Filter(d.ctx, time.Now()) + d.Require().Nil(err) + d.Require().Equal(0, len(afs2)) + +} + +func (d *mysqlDaoTestSuite) TestFlush() { + _, err := d.dao.Create(d.ctx, &model.ArtifactTrash{ + ManifestMediaType: v1.MediaTypeImageManifest, + RepositoryName: "hello-world", + Digest: d.Suite.DigestString(), + }) + d.Require().Nil(err) + _, err = d.dao.Create(d.ctx, &model.ArtifactTrash{ + ManifestMediaType: v1.MediaTypeImageManifest, + RepositoryName: "hello-world2", + Digest: d.Suite.DigestString(), + }) + d.Require().Nil(err) + _, err = d.dao.Create(d.ctx, &model.ArtifactTrash{ + ManifestMediaType: v1.MediaTypeImageManifest, + RepositoryName: "hello-world3", + Digest: d.Suite.DigestString(), + }) + d.Require().Nil(err) + + err = d.dao.Flush(d.ctx, time.Now()) + d.Require().Nil(err) +} + +func TestMysqlDaoTestSuite(t *testing.T) { + if !utils.IsDBMysql() { + return + } + suite.Run(t, &mysqlDaoTestSuite{}) +} diff --git a/src/pkg/artifactrash/dao/mysql_dao.go b/src/pkg/artifactrash/dao/mysql_dao.go new file mode 100644 index 000000000000..ec97446aca8d --- /dev/null +++ b/src/pkg/artifactrash/dao/mysql_dao.go @@ -0,0 +1,54 @@ +package dao + +import ( + "context" + "fmt" + "time" + + "github.com/goharbor/harbor/src/lib/orm" + "github.com/goharbor/harbor/src/pkg/artifactrash/model" +) + +// New returns an instance of the default DAO +func NewMysqlDao() DAO { + return &mysqlDao{} +} + +type mysqlDao struct { + *dao +} + +// Filter the results are: all of records in artifact_trash excludes the records in artifact with same repo and digest. +func (d *mysqlDao) Filter(ctx context.Context, cutOff time.Time) (arts []model.ArtifactTrash, err error) { + var deletedAfs []model.ArtifactTrash + ormer, err := orm.FromContext(ctx) + if err != nil { + return deletedAfs, err + } + + sql := fmt.Sprintf(`SELECT aft.* FROM artifact_trash AS aft LEFT JOIN artifact af ON (aft.repository_name=af.repository_name AND aft.digest=af.digest) WHERE (af.digest IS NULL AND af.repository_name IS NULL) AND aft.creation_time <= FROM_UNIXTIME('%f')`, float64(cutOff.UnixNano())/float64((time.Second))) + + _, err = ormer.Raw(sql).QueryRows(&deletedAfs) + if err != nil { + return deletedAfs, err + } + + return deletedAfs, nil +} + +// Flush delete all of items beside the one in the time window. +func (d *mysqlDao) Flush(ctx context.Context, cutOff time.Time) (err error) { + ormer, err := orm.FromContext(ctx) + if err != nil { + return err + } + sql := fmt.Sprintf(`DELETE FROM artifact_trash where creation_time <= FROM_UNIXTIME('%f')`, float64(cutOff.UnixNano())/float64((time.Second))) + if err != nil { + return err + } + _, err = ormer.Raw(sql).Exec() + if err != nil { + return err + } + return nil +} diff --git a/src/pkg/artifactrash/manager.go b/src/pkg/artifactrash/manager.go index 7707b5ba9a66..9829f1202919 100644 --- a/src/pkg/artifactrash/manager.go +++ b/src/pkg/artifactrash/manager.go @@ -18,6 +18,7 @@ import ( "context" "time" + "github.com/goharbor/harbor/src/common/utils" "github.com/goharbor/harbor/src/pkg/artifactrash/dao" "github.com/goharbor/harbor/src/pkg/artifactrash/model" ) @@ -43,9 +44,14 @@ type Manager interface { // NewManager returns an instance of the default manager func NewManager() Manager { - return &manager{ - dao.New(), + switch { + case utils.IsDBPostgresql(): + return &manager{dao: dao.New()} + case utils.IsDBMysql(): + return &manager{dao: dao.NewMysqlDao()} } + + return &manager{dao: dao.New()} } var _ Manager = &manager{} diff --git a/src/pkg/audit/dao/dao_test.go b/src/pkg/audit/dao/dao_test.go index 95d8f3641e78..4fbea9cf39d4 100644 --- a/src/pkg/audit/dao/dao_test.go +++ b/src/pkg/audit/dao/dao_test.go @@ -16,6 +16,7 @@ package dao import ( "context" + "github.com/goharbor/harbor/src/common/utils" "reflect" "testing" "time" @@ -39,7 +40,7 @@ type daoTestSuite struct { func (d *daoTestSuite) SetupSuite() { d.dao = New() - common_dao.PrepareTestForPostgresSQL() + common_dao.PrepareTestForDB() d.ctx = orm.NewContext(nil, beegoorm.NewOrm()) artifactID, err := d.dao.Create(d.ctx, &model.AuditLog{ Operation: "Create", @@ -176,6 +177,9 @@ func (d *daoTestSuite) TestPurge() { } func TestDaoTestSuite(t *testing.T) { + if !utils.IsDBPostgresql() { + return + } suite.Run(t, &daoTestSuite{}) } diff --git a/src/pkg/audit/dao/mysql_dao.go b/src/pkg/audit/dao/mysql_dao.go new file mode 100644 index 000000000000..b170526e0b22 --- /dev/null +++ b/src/pkg/audit/dao/mysql_dao.go @@ -0,0 +1,73 @@ +package dao + +import ( + "context" + "strings" + + beegorm "github.com/beego/beego/orm" + + "github.com/goharbor/harbor/src/lib/log" + "github.com/goharbor/harbor/src/lib/orm" +) + +// NewMysqlDao ... +func NewMysqlDao() DAO { + return &mysqlDao{} +} + +type mysqlDao struct { + *dao +} + +// Purge delete expired audit log +func (*mysqlDao) Purge(ctx context.Context, retentionHour int, includeOperations []string, dryRun bool) (int64, error) { + ormer, err := orm.FromContext(ctx) + if err != nil { + return 0, err + } + if dryRun { + return dryRunPurgeForMysql(ormer, retentionHour, includeOperations) + } + sql := "DELETE FROM audit_log WHERE op_time < date_sub(CURRENT_TIMESTAMP(6), interval ? * 1 hour) " + filterOps := permitOps(includeOperations) + if len(filterOps) == 0 { + log.Infof("no operation selected, skip to purge audit log") + return 0, nil + } + sql = sql + "AND lower(operation) IN ('" + strings.Join(filterOps, "','") + "')" + log.Debugf("the sql is %v", sql) + + r, err := ormer.Raw(sql, retentionHour).Exec() + if err != nil { + log.Errorf("failed to purge audit log, error %v", err) + return 0, err + } + delRows, rErr := r.RowsAffected() + if rErr != nil { + log.Errorf("failed to purge audit log, error %v", rErr) + return 0, rErr + } + log.Infof("purged %d audit logs in the database", delRows) + + return delRows, err +} + +func dryRunPurgeForMysql(ormer beegorm.Ormer, retentionHour int, includeOperations []string) (int64, error) { + sql := "SELECT count(1) cnt FROM audit_log WHERE op_time < date_sub(CURRENT_TIMESTAMP(6), interval ? * 1 hour) " + filterOps := permitOps(includeOperations) + if len(filterOps) == 0 { + log.Infof("[DRYRUN]no operation selected, skip to purge audit log") + return 0, nil + } + sql = sql + "AND lower(operation) IN ('" + strings.Join(filterOps, "','") + "')" + log.Debugf("the sql is %v", sql) + + var cnt int64 + err := ormer.Raw(sql, retentionHour).QueryRow(&cnt) + if err != nil { + log.Errorf("failed to dry run purge audit log, error %v", err) + return 0, err + } + log.Infof("[DRYRUN]purged %d audit logs in the database", cnt) + return cnt, nil +} diff --git a/src/pkg/audit/dao/mysql_dao_test.go b/src/pkg/audit/dao/mysql_dao_test.go new file mode 100644 index 000000000000..8fe86f5880dc --- /dev/null +++ b/src/pkg/audit/dao/mysql_dao_test.go @@ -0,0 +1,225 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dao + +import ( + "context" + "testing" + "time" + + beegoorm "github.com/beego/beego/orm" + "github.com/stretchr/testify/suite" + + common_dao "github.com/goharbor/harbor/src/common/dao" + "github.com/goharbor/harbor/src/common/utils" + "github.com/goharbor/harbor/src/lib/errors" + "github.com/goharbor/harbor/src/lib/orm" + "github.com/goharbor/harbor/src/lib/q" + "github.com/goharbor/harbor/src/pkg/audit/model" +) + +type mysqlDaoTestSuite struct { + suite.Suite + dao DAO + auditID int64 + ctx context.Context +} + +func (d *mysqlDaoTestSuite) SetupSuite() { + d.dao = NewMysqlDao() + common_dao.PrepareTestForDB() + d.ctx = orm.NewContext(nil, beegoorm.NewOrm()) + artifactID, err := d.dao.Create(d.ctx, &model.AuditLog{ + Operation: "Create", + ResourceType: "artifact", + Resource: "library/test-audit", + Username: "admin", + OpTime: time.Now().AddDate(0, 0, -8), + }) + d.Require().Nil(err) + d.auditID = artifactID +} + +func (d *mysqlDaoTestSuite) TearDownSuite() { + ormer, err := orm.FromContext(d.ctx) + d.Require().Nil(err) + _, err = ormer.Raw("delete from audit_log").Exec() + d.Require().Nil(err) + +} + +func (d *mysqlDaoTestSuite) TestCount() { + total, err := d.dao.Count(d.ctx, nil) + d.Require().Nil(err) + d.True(total > 0) + total, err = d.dao.Count(d.ctx, &q.Query{ + Keywords: map[string]interface{}{ + "Resource": "library/test-audit", + }, + }) + d.Require().Nil(err) + d.Equal(int64(1), total) +} + +func (d *mysqlDaoTestSuite) TestList() { + // nil query + audits, err := d.dao.List(d.ctx, nil) + d.Require().Nil(err) + + // query by repository ID and name + audits, err = d.dao.List(d.ctx, &q.Query{ + Keywords: map[string]interface{}{ + "Resource": "library/test-audit", + }, + }) + d.Require().Nil(err) + d.Require().Equal(1, len(audits)) + d.Equal("admin", audits[0].Username) +} + +func (d *mysqlDaoTestSuite) TestGet() { + // get the non-exist tag + _, err := d.dao.Get(d.ctx, 10000) + d.Require().NotNil(err) + d.True(errors.IsErr(err, errors.NotFoundCode)) + + audit, err := d.dao.Get(d.ctx, d.auditID) + d.Require().Nil(err) + d.Require().NotNil(audit) + d.Equal(d.auditID, audit.ID) +} + +func (d *mysqlDaoTestSuite) TestListPIDs() { + // get the non-exist tag + id1, err := d.dao.Create(d.ctx, &model.AuditLog{ + Operation: "Create", + ResourceType: "artifact", + Resource: "library/hello-world", + Username: "admin", + ProjectID: 11, + }) + d.Require().Nil(err) + id2, err := d.dao.Create(d.ctx, &model.AuditLog{ + Operation: "Create", + ResourceType: "artifact", + Resource: "library/hello-world", + Username: "admin", + ProjectID: 12, + }) + d.Require().Nil(err) + id3, err := d.dao.Create(d.ctx, &model.AuditLog{ + Operation: "Delete", + ResourceType: "artifact", + Resource: "library/hello-world", + Username: "admin", + ProjectID: 13, + }) + d.Require().Nil(err) + + // query by repository ID and name + ol := &q.OrList{} + for _, item := range []int64{11, 12, 13} { + ol.Values = append(ol.Values, item) + } + audits, err := d.dao.List(d.ctx, &q.Query{ + Keywords: map[string]interface{}{ + "ProjectID": ol, + }, + }) + d.Require().Nil(err) + d.Require().Equal(3, len(audits)) + d.dao.Delete(d.ctx, id1) + d.dao.Delete(d.ctx, id2) + d.dao.Delete(d.ctx, id3) +} + +func (d *mysqlDaoTestSuite) TestCreate() { + // conflict + audit := &model.AuditLog{ + Operation: "Create", + ResourceType: "tag", + Resource: "library/hello-world", + Username: "admin", + } + _, err := d.dao.Create(d.ctx, audit) + d.Require().Nil(err) +} + +func (d *mysqlDaoTestSuite) TestDelete() { + err := d.dao.Delete(d.ctx, 10000) + d.Require().NotNil(err) + var e *errors.Error + d.Require().True(errors.As(err, &e)) + d.Equal(errors.NotFoundCode, e.Code) +} + +func (d *mysqlDaoTestSuite) TestPurge() { + result, err := d.dao.Purge(d.ctx, 24*30, []string{"Create"}, true) + d.Require().Nil(err) + d.Require().Equal(int64(0), result) + result1, err := d.dao.Purge(d.ctx, 24*7, []string{"Create"}, true) + d.Require().Nil(err) + d.Require().Equal(int64(1), result1) + +} + +func TestMysqlDaoTestSuite(t *testing.T) { + if !utils.IsDBMysql() { + return + } + suite.Run(t, &mysqlDaoTestSuite{}) +} + +func (d *mysqlDaoTestSuite) Test_dao_Purge() { + + d.ctx = orm.NewContext(nil, beegoorm.NewOrm()) + _, err := d.dao.Create(d.ctx, &model.AuditLog{ + Operation: "Delete", + ResourceType: "artifact", + Resource: "library/test-audit", + Username: "admin", + OpTime: time.Now().AddDate(0, 0, -8), + }) + d.Require().Nil(err) + + type args struct { + ctx context.Context + retentionHour int + includeOperations []string + dryRun bool + } + tests := []struct { + name string + args args + want int64 + wantErr bool + }{ + {"dry run 1 month", args{d.ctx, 24 * 30, []string{"create", "delete", "pull"}, true}, int64(0), false}, + {"dry run 1 week", args{d.ctx, 24 * 7, []string{"create", "delete", "pull"}, true}, int64(2), false}, + {"dry run delete run 1 week", args{d.ctx, 24 * 7, []string{"Delete"}, true}, int64(1), false}, + {"delete run 1 week", args{d.ctx, 24 * 7, []string{"Delete"}, false}, int64(1), false}, + } + for _, tt := range tests { + d.Run(tt.name, func() { + got, err := d.dao.Purge(tt.args.ctx, tt.args.retentionHour, tt.args.includeOperations, tt.args.dryRun) + if tt.wantErr { + d.Require().NotNil(err) + } else { + d.Require().Nil(err) + } + d.Require().Equal(tt.want, got) + }) + } +} diff --git a/src/pkg/audit/manager.go b/src/pkg/audit/manager.go index b81a89bfe094..09b14db3bae9 100644 --- a/src/pkg/audit/manager.go +++ b/src/pkg/audit/manager.go @@ -17,6 +17,7 @@ package audit import ( "context" + "github.com/goharbor/harbor/src/common/utils" "github.com/goharbor/harbor/src/lib/config" "github.com/goharbor/harbor/src/lib/q" "github.com/goharbor/harbor/src/pkg/audit/dao" @@ -44,9 +45,13 @@ type Manager interface { // New returns a default implementation of Manager func New() Manager { - return &manager{ - dao: dao.New(), + switch { + case utils.IsDBPostgresql(): + return &manager{dao: dao.New()} + case utils.IsDBMysql(): + return &manager{dao: dao.NewMysqlDao()} } + return &manager{dao: dao.New()} } type manager struct { diff --git a/src/pkg/authproxy/http_test.go b/src/pkg/authproxy/http_test.go index f307edddee49..67b19a585e06 100644 --- a/src/pkg/authproxy/http_test.go +++ b/src/pkg/authproxy/http_test.go @@ -15,7 +15,7 @@ import ( ) func TestMain(m *testing.M) { - dao.PrepareTestForPostgresSQL() + dao.PrepareTestForDB() result := m.Run() if result != 0 { os.Exit(result) diff --git a/src/pkg/blob/dao/dao_test.go b/src/pkg/blob/dao/dao_test.go index 00c63e94c605..7b6471267a8f 100644 --- a/src/pkg/blob/dao/dao_test.go +++ b/src/pkg/blob/dao/dao_test.go @@ -20,6 +20,7 @@ import ( "github.com/stretchr/testify/suite" + "github.com/goharbor/harbor/src/common/utils" "github.com/goharbor/harbor/src/lib/errors" "github.com/goharbor/harbor/src/lib/q" "github.com/goharbor/harbor/src/pkg/blob/models" @@ -32,8 +33,11 @@ type DaoTestSuite struct { } func (suite *DaoTestSuite) SetupSuite() { + if !utils.IsDBPostgresql() { + return + } suite.Suite.SetupSuite() - suite.Suite.ClearTables = []string{"blob", "artifact_blob", "project_blob"} + suite.Suite.ClearTables = []string{`blob`, "artifact_blob", "project_blob"} suite.dao = New() } @@ -485,5 +489,8 @@ func (suite *DaoTestSuite) GetBlobsByArtDigest() { } func TestDaoTestSuite(t *testing.T) { + if !utils.IsDBPostgresql() { + return + } suite.Run(t, &DaoTestSuite{}) } diff --git a/src/pkg/blob/dao/mysql_dao.go b/src/pkg/blob/dao/mysql_dao.go new file mode 100644 index 000000000000..2c734fbce90f --- /dev/null +++ b/src/pkg/blob/dao/mysql_dao.go @@ -0,0 +1,171 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dao + +import ( + "context" + "fmt" + "time" + + "github.com/docker/distribution/manifest/schema2" + + "github.com/goharbor/harbor/src/lib/log" + "github.com/goharbor/harbor/src/lib/orm" + "github.com/goharbor/harbor/src/pkg/blob/models" +) + +// NewMysqlDao returns an instance of the mysql DAO +func NewMysqlDao() DAO { + return &mysqlDao{} +} + +type mysqlDao struct { + *dao +} + +func (d *mysqlDao) UpdateBlobStatus(ctx context.Context, blob *models.Blob) (int64, error) { + o, err := orm.FromContext(ctx) + if err != nil { + return -1, err + } + + var sql string + if blob.Status == models.StatusNone { + sql = "UPDATE `blob` SET version = version + 1, update_time = ?, status = ? where id = ? AND version >= ? AND status IN (%s)" + } else { + sql = "UPDATE `blob` SET version = version + 1, update_time = ?, status = ? where id = ? AND version = ? AND status IN (%s)" + } + + var newVersion int64 + params := []interface{}{time.Now(), blob.Status, blob.ID, blob.Version} + stats := models.StatusMap[blob.Status] + for _, stat := range stats { + params = append(params, stat) + } + + if res, err := o.Raw(fmt.Sprintf(sql, orm.ParamPlaceholderForIn(len(models.StatusMap[blob.Status]))), params...).Exec(); err != nil { + return -1, err + } else if row, err := res.RowsAffected(); err == nil && row == 0 { + log.Warningf("no blob is updated according to query condition, id: %d, status_in, %v", blob.ID, models.StatusMap[blob.Status]) + return 0, nil + } + + selectVersionSQL := "SELECT version FROM `blob` WHERE id = ?" + if err := o.Raw(selectVersionSQL, blob.ID).QueryRow(&newVersion); err != nil { + return 0, nil + } + + blob.Version = newVersion + return 1, nil +} + +func (d *mysqlDao) SumBlobsSizeByProject(ctx context.Context, projectID int64, excludeForeignLayer bool) (int64, error) { + o, err := orm.FromContext(ctx) + if err != nil { + return 0, err + } + + params := []interface{}{projectID} + sql := "SELECT SUM(size) FROM `blob` JOIN project_blob ON `blob`.id = project_blob.blob_id AND project_id = ?" + if excludeForeignLayer { + foreignLayerTypes := []interface{}{ + schema2.MediaTypeForeignLayer, + } + + sql = fmt.Sprintf("%s AND content_type NOT IN (%s)", sql, orm.ParamPlaceholderForIn(len(foreignLayerTypes))) + params = append(params, foreignLayerTypes...) + } + + var totalSize int64 + if err := o.Raw(sql, params...).QueryRow(&totalSize); err != nil { + return 0, err + } + + return totalSize, nil +} + +// SumBlobsSize returns sum size of all blobs skip foreign blobs when `excludeForeignLayer` is true +func (d *mysqlDao) SumBlobsSize(ctx context.Context, excludeForeignLayer bool) (int64, error) { + o, err := orm.FromContext(ctx) + if err != nil { + return 0, err + } + + params := []interface{}{} + + sql := "SELECT SUM(size) FROM `blob`" + if excludeForeignLayer { + foreignLayerTypes := []interface{}{ + schema2.MediaTypeForeignLayer, + } + sql = fmt.Sprintf("%s Where content_type NOT IN (%s)", sql, orm.ParamPlaceholderForIn(len(foreignLayerTypes))) + params = append(params, foreignLayerTypes...) + } + + var totalSize int64 + if err := o.Raw(sql, params...).QueryRow(&totalSize); err != nil { + return 0, err + } + + return totalSize, nil +} + +func (d *mysqlDao) ExistProjectBlob(ctx context.Context, projectID int64, blobDigest string) (bool, error) { + o, err := orm.FromContext(ctx) + if err != nil { + return false, err + } + + sql := "SELECT COUNT(*) FROM project_blob JOIN `blob` ON project_blob.blob_id = blob.id AND project_id = ? AND digest = ?" + + var count int64 + if err := o.Raw(sql, projectID, blobDigest).QueryRow(&count); err != nil { + return false, err + } + + return count > 0, nil +} + +func (d *mysqlDao) GetBlobsNotRefedByProjectBlob(ctx context.Context, timeWindowHours int64) ([]*models.Blob, error) { + var noneRefed []*models.Blob + ormer, err := orm.FromContext(ctx) + if err != nil { + return noneRefed, err + } + + sql := fmt.Sprintf("SELECT b.id, b.digest, b.content_type, b.status, b.version, b.size FROM `blob` AS b LEFT JOIN project_blob pb ON b.id = pb.blob_id WHERE pb.id IS NULL AND b.update_time <= date_sub(CURRENT_TIMESTAMP(6), interval %d hour);", timeWindowHours) + _, err = ormer.Raw(sql).QueryRows(&noneRefed) + if err != nil { + return noneRefed, err + } + + return noneRefed, nil +} + +func (d *mysqlDao) GetBlobsByArtDigest(ctx context.Context, digest string) ([]*models.Blob, error) { + var blobs []*models.Blob + ormer, err := orm.FromContext(ctx) + if err != nil { + return blobs, err + } + + sql := "SELECT b.id, b.digest, b.content_type, b.status, b.version, b.size FROM artifact_blob AS ab LEFT JOIN `blob` b ON ab.digest_blob = b.digest WHERE ab.digest_af = ?" + _, err = ormer.Raw(sql, digest).QueryRows(&blobs) + if err != nil { + return blobs, err + } + + return blobs, nil +} diff --git a/src/pkg/blob/dao/mysql_dao_test.go b/src/pkg/blob/dao/mysql_dao_test.go new file mode 100644 index 000000000000..314629c7703c --- /dev/null +++ b/src/pkg/blob/dao/mysql_dao_test.go @@ -0,0 +1,493 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dao + +import ( + "testing" + "time" + + "github.com/stretchr/testify/suite" + + "github.com/goharbor/harbor/src/common/utils" + "github.com/goharbor/harbor/src/lib/errors" + "github.com/goharbor/harbor/src/lib/q" + "github.com/goharbor/harbor/src/pkg/blob/models" + htesting "github.com/goharbor/harbor/src/testing" +) + +type MysqlDaoTestSuite struct { + htesting.Suite + dao DAO +} + +func (suite *MysqlDaoTestSuite) SetupSuite() { + suite.Suite.SetupSuite() + suite.Suite.ClearTables = []string{"blob", "artifact_blob", "project_blob"} + suite.dao = NewMysqlDao() +} + +func (suite *MysqlDaoTestSuite) TestCreateArtifactAndBlob() { + ctx := suite.Context() + + artifactDigest := suite.DigestString() + blobDigest := suite.DigestString() + + _, err := suite.dao.CreateArtifactAndBlob(ctx, artifactDigest, blobDigest) + suite.Nil(err) + + _, err = suite.dao.CreateArtifactAndBlob(ctx, artifactDigest, blobDigest) + suite.Nil(err) +} + +func (suite *MysqlDaoTestSuite) TestGetArtifactAndBlob() { + ctx := suite.Context() + + artifactDigest := suite.DigestString() + blobDigest := suite.DigestString() + + md, err := suite.dao.GetArtifactAndBlob(ctx, artifactDigest, blobDigest) + suite.IsNotFoundErr(err) + suite.Nil(md) + + _, err = suite.dao.CreateArtifactAndBlob(ctx, artifactDigest, blobDigest) + suite.Nil(err) + + md, err = suite.dao.GetArtifactAndBlob(ctx, artifactDigest, blobDigest) + if suite.Nil(err) { + suite.Equal(artifactDigest, md.DigestAF) + suite.Equal(blobDigest, md.DigestBlob) + } +} + +func (suite *MysqlDaoTestSuite) TestDeleteArtifactAndBlobByArtifact() { + ctx := suite.Context() + + artifactDigest := suite.DigestString() + blobDigest1 := suite.DigestString() + blobDigest2 := suite.DigestString() + + _, err := suite.dao.CreateArtifactAndBlob(ctx, artifactDigest, blobDigest1) + suite.Nil(err) + + _, err = suite.dao.CreateArtifactAndBlob(ctx, artifactDigest, blobDigest2) + suite.Nil(err) + + digests, err := suite.dao.GetAssociatedBlobDigestsForArtifact(ctx, artifactDigest) + suite.Nil(err) + suite.Len(digests, 2) + + suite.Nil(suite.dao.DeleteArtifactAndBlobByArtifact(ctx, artifactDigest)) + + digests, err = suite.dao.GetAssociatedBlobDigestsForArtifact(ctx, artifactDigest) + suite.Nil(err) + suite.Len(digests, 0) +} + +func (suite *MysqlDaoTestSuite) TestGetAssociatedBlobDigestsForArtifact() { + +} + +func (suite *MysqlDaoTestSuite) TestCreateBlob() { + ctx := suite.Context() + + digest := suite.DigestString() + + _, err := suite.dao.CreateBlob(ctx, &models.Blob{Digest: digest}) + suite.Nil(err) + + _, err = suite.dao.CreateBlob(ctx, &models.Blob{Digest: digest}) + suite.Nil(err) +} + +func (suite *MysqlDaoTestSuite) TestGetBlobByDigest() { + ctx := suite.Context() + + digest := suite.DigestString() + + blob, err := suite.dao.GetBlobByDigest(ctx, digest) + suite.IsNotFoundErr(err) + suite.Nil(blob) + + suite.dao.CreateBlob(ctx, &models.Blob{Digest: digest}) + + blob, err = suite.dao.GetBlobByDigest(ctx, digest) + if suite.Nil(err) { + suite.Equal(digest, blob.Digest) + suite.Equal(models.StatusNone, blob.Status) + } +} + +func (suite *MysqlDaoTestSuite) TestUpdateBlob() { + ctx := suite.Context() + + digest := suite.DigestString() + + suite.dao.CreateBlob(ctx, &models.Blob{Digest: digest}) + blob, err := suite.dao.GetBlobByDigest(ctx, digest) + if suite.Nil(err) { + suite.Equal(int64(0), blob.Size) + } + + blob.Size = 100 + if suite.Nil(suite.dao.UpdateBlob(ctx, blob)) { + blob, err := suite.dao.GetBlobByDigest(ctx, digest) + if suite.Nil(err) { + suite.Equal(int64(100), blob.Size) + suite.Equal(int64(0), blob.Version) + } + } + + blob.Status = "deleting" + suite.Nil(suite.dao.UpdateBlob(ctx, blob), "cannot be updated.") + blob, err = suite.dao.GetBlobByDigest(ctx, digest) + if suite.Nil(err) { + suite.Equal(int64(0), blob.Version) + suite.Equal(models.StatusNone, blob.Status) + } +} + +func (suite *MysqlDaoTestSuite) TestUpdateBlobStatus() { + ctx := suite.Context() + + digest := suite.DigestString() + + suite.dao.CreateBlob(ctx, &models.Blob{Digest: digest}) + blob, err := suite.dao.GetBlobByDigest(ctx, digest) + if suite.Nil(err) { + suite.Equal(int64(0), blob.Size) + } + + // StatusNone cannot be updated to StatusDeleting directly + blob.Status = models.StatusDeleting + count, err := suite.dao.UpdateBlobStatus(ctx, blob) + suite.Nil(err) + suite.Equal(int64(0), count) + + blob.Status = models.StatusDelete + count, err = suite.dao.UpdateBlobStatus(ctx, blob) + suite.Nil(err) + suite.Equal(int64(1), count) + + blob.Status = models.StatusDeleting + count, err = suite.dao.UpdateBlobStatus(ctx, blob) + suite.Nil(err) + suite.Equal(int64(1), count) + + blob.Status = models.StatusDeleteFailed + count, err = suite.dao.UpdateBlobStatus(ctx, blob) + suite.Nil(err) + suite.Equal(int64(1), count) + + blob, err = suite.dao.GetBlobByDigest(ctx, digest) + if suite.Nil(err) { + suite.Equal(int64(3), blob.Version) + suite.Equal(models.StatusDeleteFailed, blob.Status) + } +} + +func (suite *MysqlDaoTestSuite) TestListBlobs() { + ctx := suite.Context() + + digest1 := suite.DigestString() + suite.dao.CreateBlob(ctx, &models.Blob{Digest: digest1}) + + digest2 := suite.DigestString() + suite.dao.CreateBlob(ctx, &models.Blob{Digest: digest2}) + + ol := q.OrList{ + Values: []interface{}{ + digest1, + }, + } + blobs, err := suite.dao.ListBlobs(ctx, q.New(q.KeyWords{"digest": &ol})) + if suite.Nil(err) { + suite.Len(blobs, 1) + } + + ol = q.OrList{ + Values: []interface{}{ + digest1, + digest2, + }, + } + blobs, err = suite.dao.ListBlobs(ctx, q.New(q.KeyWords{"digest": &ol})) + if suite.Nil(err) { + suite.Len(blobs, 2) + } + + rg := q.Range{ + Max: time.Now().Add(-time.Hour).Format(time.RFC3339), + } + blobs, err = suite.dao.ListBlobs(ctx, q.New(q.KeyWords{"update_time": &rg})) + if suite.Nil(err) { + suite.Len(blobs, 0) + } + + digest3 := suite.DigestString() + suite.dao.CreateBlob(ctx, &models.Blob{Digest: digest3, UpdateTime: time.Now().Add(-time.Hour * 2)}) + blobs, err = suite.dao.ListBlobs(ctx, q.New(q.KeyWords{"update_time": &rg})) + if suite.Nil(err) { + suite.Len(blobs, 1) + } + +} + +func (suite *MysqlDaoTestSuite) TestListBlobsAssociatedWithArtifact() { + +} + +func (suite *MysqlDaoTestSuite) TestSumBlobsSize() { + ctx := suite.Context() + + size1, err := suite.dao.SumBlobsSize(ctx, true) + suite.Nil(err) + + digest1 := suite.DigestString() + suite.dao.CreateBlob(ctx, &models.Blob{Digest: digest1, Size: 999}) + + size2, err := suite.dao.SumBlobsSize(ctx, true) + suite.Nil(err) + + suite.Equal(int64(999), size2-size1) +} + +func (suite *MysqlDaoTestSuite) TestFindBlobsShouldUnassociatedWithProject() { + ctx := suite.Context() + + suite.WithProject(func(projectID int64, projectName string) { + artifact1 := suite.DigestString() + artifact2 := suite.DigestString() + + sql := "INSERT INTO artifact (`type`, media_type, manifest_media_type, digest, project_id, repository_id, repository_name) VALUES ('image', 'media_type', 'manifest_media_type', ?, ?, ?, 'library/hello-world')" + suite.ExecSQL(sql, artifact1, projectID, 10) + suite.ExecSQL(sql, artifact2, projectID, 10) + + defer suite.ExecSQL(`DELETE FROM artifact WHERE project_id = ?`, projectID) + + digest1 := suite.DigestString() + digest2 := suite.DigestString() + digest3 := suite.DigestString() + digest4 := suite.DigestString() + digest5 := suite.DigestString() + + var ol q.OrList + blobDigests := []string{digest1, digest2, digest3, digest4, digest5} + for _, digest := range blobDigests { + blobID, err := suite.dao.CreateBlob(ctx, &models.Blob{Digest: digest}) + if suite.Nil(err) { + suite.dao.CreateProjectBlob(ctx, projectID, blobID) + } + ol.Values = append(ol.Values, digest) + } + + blobs, err := suite.dao.ListBlobs(ctx, q.New(q.KeyWords{"digest": &ol})) + suite.Nil(err) + suite.Len(blobs, 5) + + for _, digest := range []string{digest1, digest2, digest3} { + suite.dao.CreateArtifactAndBlob(ctx, artifact1, digest) + } + + for _, digest := range blobDigests { + suite.dao.CreateArtifactAndBlob(ctx, artifact2, digest) + } + + { + results, err := suite.dao.FindBlobsShouldUnassociatedWithProject(ctx, projectID, blobs) + suite.Nil(err) + suite.Len(results, 0) + } + + suite.ExecSQL(`DELETE FROM artifact WHERE digest = ?`, artifact2) + + { + results, err := suite.dao.FindBlobsShouldUnassociatedWithProject(ctx, projectID, blobs) + suite.Nil(err) + if suite.Len(results, 2) { + suite.Contains([]string{results[0].Digest, results[1].Digest}, digest4) + suite.Contains([]string{results[0].Digest, results[1].Digest}, digest5) + } + + } + }) + +} + +func (suite *MysqlDaoTestSuite) TestCreateProjectBlob() { + ctx := suite.Context() + + projectID := int64(1) + blobID := int64(1000) + + _, err := suite.dao.CreateProjectBlob(ctx, projectID, blobID) + suite.Nil(err) + + _, err = suite.dao.CreateProjectBlob(ctx, projectID, blobID) + suite.Nil(err) +} + +func (suite *MysqlDaoTestSuite) TestExistProjectBlob() { + ctx := suite.Context() + + digest := suite.DigestString() + + projectID := int64(1) + + exist, err := suite.dao.ExistProjectBlob(ctx, projectID, digest) + suite.Nil(err) + suite.False(exist) + + blobID, err := suite.dao.CreateBlob(ctx, &models.Blob{Digest: digest}) + suite.Nil(err) + + _, err = suite.dao.CreateProjectBlob(ctx, projectID, blobID) + suite.Nil(err) + + exist, err = suite.dao.ExistProjectBlob(ctx, projectID, digest) + suite.Nil(err) + suite.True(exist) +} + +func (suite *MysqlDaoTestSuite) TestDeleteProjectBlob() { + ctx := suite.Context() + + digest := suite.DigestString() + blobID, err := suite.dao.CreateBlob(ctx, &models.Blob{Digest: digest}) + suite.Nil(err) + + projectID1 := int64(1) + projectID2 := int64(2) + projectID3 := int64(3) + + _, err = suite.dao.CreateProjectBlob(ctx, projectID1, blobID) + suite.Nil(err) + + _, err = suite.dao.CreateProjectBlob(ctx, projectID2, blobID) + suite.Nil(err) + + { + exist, err := suite.dao.ExistProjectBlob(ctx, projectID1, digest) + suite.Nil(err) + suite.True(exist) + } + + { + exist, err := suite.dao.ExistProjectBlob(ctx, projectID2, digest) + suite.Nil(err) + suite.True(exist) + } + + suite.Nil(suite.dao.DeleteProjectBlob(ctx, projectID3, blobID)) + + { + exist, err := suite.dao.ExistProjectBlob(ctx, projectID1, digest) + suite.Nil(err) + suite.True(exist) + } + + { + exist, err := suite.dao.ExistProjectBlob(ctx, projectID2, digest) + suite.Nil(err) + suite.True(exist) + } + + suite.Nil(suite.dao.DeleteProjectBlob(ctx, projectID1, blobID)) + + { + exist, err := suite.dao.ExistProjectBlob(ctx, projectID1, digest) + suite.Nil(err) + suite.False(exist) + } + + { + exist, err := suite.dao.ExistProjectBlob(ctx, projectID2, digest) + suite.Nil(err) + suite.True(exist) + } +} + +func (suite *MysqlDaoTestSuite) TestDelete() { + ctx := suite.Context() + + err := suite.dao.DeleteBlob(ctx, 100021) + suite.Require().NotNil(err) + suite.True(errors.IsErr(err, errors.NotFoundCode)) + + digest := suite.DigestString() + id, err := suite.dao.CreateBlob(ctx, &models.Blob{Digest: digest}) + suite.Nil(err) + err = suite.dao.DeleteBlob(ctx, id) + suite.Require().Nil(err) +} + +func (suite *MysqlDaoTestSuite) TestGetBlobsNotRefedByProjectBlob() { + ctx := suite.Context() + + blobs, err := suite.dao.GetBlobsNotRefedByProjectBlob(ctx, 0) + suite.Require().Nil(err) + beforeAdd := len(blobs) + + suite.dao.CreateBlob(ctx, &models.Blob{Digest: suite.DigestString()}) + suite.dao.CreateBlob(ctx, &models.Blob{Digest: suite.DigestString()}) + digest := suite.DigestString() + suite.dao.CreateBlob(ctx, &models.Blob{Digest: digest}) + + blob, err := suite.dao.GetBlobByDigest(ctx, digest) + suite.Nil(err) + + projectID := int64(1) + _, err = suite.dao.CreateProjectBlob(ctx, projectID, blob.ID) + suite.Nil(err) + + blobs, err = suite.dao.GetBlobsNotRefedByProjectBlob(ctx, 0) + suite.Require().Nil(err) + suite.Require().Equal(2+beforeAdd, len(blobs)) + + blobs, err = suite.dao.GetBlobsNotRefedByProjectBlob(ctx, 2) + suite.Require().Nil(err) + suite.Require().Equal(0, len(blobs)) +} + +func (suite *MysqlDaoTestSuite) GetBlobsByArtDigest() { + ctx := suite.Context() + afDigest := suite.DigestString() + blobs, err := suite.dao.GetBlobsByArtDigest(ctx, afDigest) + suite.Nil(err) + suite.Require().Equal(0, len(blobs)) + + suite.dao.CreateBlob(ctx, &models.Blob{Digest: afDigest}) + blobDigest1 := suite.DigestString() + blobDigest2 := suite.DigestString() + suite.dao.CreateBlob(ctx, &models.Blob{Digest: blobDigest1}) + suite.dao.CreateBlob(ctx, &models.Blob{Digest: blobDigest2}) + + _, err = suite.dao.CreateArtifactAndBlob(ctx, afDigest, afDigest) + suite.Nil(err) + _, err = suite.dao.CreateArtifactAndBlob(ctx, afDigest, blobDigest1) + suite.Nil(err) + _, err = suite.dao.CreateArtifactAndBlob(ctx, afDigest, blobDigest2) + suite.Nil(err) + + blobs, err = suite.dao.GetBlobsByArtDigest(ctx, afDigest) + suite.Nil(err) + suite.Require().Equal(3, len(blobs)) +} + +func TestMysqlDaoTestSuite(t *testing.T) { + if !utils.IsDBMysql() { + return + } + suite.Run(t, &MysqlDaoTestSuite{}) +} diff --git a/src/pkg/blob/manager.go b/src/pkg/blob/manager.go index 54cc371dfb5f..7858eeef0900 100644 --- a/src/pkg/blob/manager.go +++ b/src/pkg/blob/manager.go @@ -17,6 +17,7 @@ package blob import ( "context" + "github.com/goharbor/harbor/src/common/utils" "github.com/goharbor/harbor/src/lib/errors" "github.com/goharbor/harbor/src/lib/q" "github.com/goharbor/harbor/src/pkg/blob/dao" @@ -163,5 +164,12 @@ func (m *manager) CalculateTotalSize(ctx context.Context, excludeForeignLayer bo // NewManager returns blob manager func NewManager() Manager { + switch { + case utils.IsDBPostgresql(): + return &manager{dao: dao.New()} + case utils.IsDBMysql(): + return &manager{dao: dao.NewMysqlDao()} + } + return &manager{dao: dao.New()} } diff --git a/src/pkg/blob/manager_test.go b/src/pkg/blob/manager_test.go index c688bb34e6b5..f20d7c359f1a 100644 --- a/src/pkg/blob/manager_test.go +++ b/src/pkg/blob/manager_test.go @@ -22,6 +22,7 @@ import ( "github.com/docker/distribution/manifest/schema2" "github.com/stretchr/testify/suite" + "github.com/goharbor/harbor/src/common/utils" "github.com/goharbor/harbor/src/lib/q" "github.com/goharbor/harbor/src/pkg/blob/models" htesting "github.com/goharbor/harbor/src/testing" @@ -125,12 +126,28 @@ func (suite *ManagerTestSuite) TestCleanupAssociationsForArtifact() { } } +func prepareSQLForInsertArtifact() string { + var sql string + defaultSQL := `INSERT INTO artifact ("type", media_type, manifest_media_type, digest, project_id, repository_id, repository_name) VALUES ('image', 'media_type', 'manifest_media_type', ?, ?, ?, 'library/hello-world')` + + switch { + case utils.IsDBPostgresql(): + sql = defaultSQL + case utils.IsDBMysql(): + sql = "INSERT INTO artifact (`type`, media_type, manifest_media_type, digest, project_id, repository_id, repository_name) VALUES ('image', 'media_type', 'manifest_media_type', ?, ?, ?, 'library/hello-world')" + default: + sql = defaultSQL + } + + return sql +} + func (suite *ManagerTestSuite) TestCleanupAssociationsForProject() { suite.WithProject(func(projectID int64, projectName string) { artifact1 := suite.DigestString() artifact2 := suite.DigestString() - sql := `INSERT INTO artifact ("type", media_type, manifest_media_type, digest, project_id, repository_id, repository_name) VALUES ('image', 'media_type', 'manifest_media_type', ?, ?, ?, 'library/hello-world')` + sql := prepareSQLForInsertArtifact() suite.ExecSQL(sql, artifact1, projectID, 10) suite.ExecSQL(sql, artifact2, projectID, 10) @@ -200,7 +217,7 @@ func (suite *ManagerTestSuite) TestFindBlobsShouldUnassociatedWithProject() { artifact1 := suite.DigestString() artifact2 := suite.DigestString() - sql := `INSERT INTO artifact ("type", media_type, manifest_media_type, digest, project_id, repository_id, repository_name) VALUES ('image', 'media_type', 'manifest_media_type', ?, ?, ?, 'library/hello-world')` + sql := prepareSQLForInsertArtifact() suite.ExecSQL(sql, artifact1, projectID, 11) suite.ExecSQL(sql, artifact2, projectID, 11) diff --git a/src/pkg/config/db/manager_test.go b/src/pkg/config/db/manager_test.go index 806a0eccab2b..4d8095766949 100644 --- a/src/pkg/config/db/manager_test.go +++ b/src/pkg/config/db/manager_test.go @@ -22,6 +22,7 @@ import ( "github.com/stretchr/testify/assert" + "github.com/goharbor/harbor/src/common/utils" "github.com/goharbor/harbor/src/common/utils/test" "github.com/goharbor/harbor/src/lib/config/metadata" "github.com/goharbor/harbor/src/lib/orm" @@ -31,13 +32,14 @@ import ( ) var TestDBConfig = map[string]interface{}{ - "postgresql_host": "localhost", - "postgresql_database": "registry", - "postgresql_password": "root123", - "postgresql_username": "postgres", - "postgresql_sslmode": "disable", - "email_host": "127.0.0.1", - "scan_all_policy": `{"parameter":{"daily_time":0},"type":"daily"}`, + "database_type": "postgresql", + "db_host": "localhost", + "db_database": "registry", + "db_password": "root123", + "db_username": "postgres", + "db_sslmode": "disable", + "email_host": "127.0.0.1", + "scan_all_policy": `{"parameter":{"daily_time":0},"type":"daily"}`, } var configManager *cfgPkg.CfgManager @@ -102,21 +104,26 @@ func TestCfgManger_loadSystemValues(t *testing.T) { configManager.LoadDefault() configManager.LoadSystemConfigFromEnv() configManager.UpdateConfig(testCtx, map[string]interface{}{ - "postgresql_host": "127.0.0.1", + "db_host": "127.0.0.1", }) - if configManager.Get(testCtx, "postgresql_host").GetString() != "127.0.0.1" { - t.Errorf("Failed to set system value postgresql_host, expected %v, actual %v", "127.0.0.1", configManager.Get(nil, "postgresql_host").GetString()) + if configManager.Get(testCtx, "db_host").GetString() != "127.0.0.1" { + t.Errorf("Failed to set system value db_host, expected %v, actual %v", "127.0.0.1", configManager.Get(nil, "db_host").GetString()) } } func TestCfgManager_GetDatabaseCfg(t *testing.T) { + if !utils.IsDBPostgresql() { + return + } configManager.UpdateConfig(testCtx, map[string]interface{}{ - "postgresql_host": "localhost", - "postgresql_database": "registry", - "postgresql_password": "root123", - "postgresql_username": "postgres", - "postgresql_sslmode": "disable", + "database_type": "postgresql", + "db_host": "localhost", + "db_database": "registry", + "db_password": "root123", + "db_username": "postgres", + "db_sslmode": "disable", }) dbCfg := configManager.GetDatabaseCfg() + assert.Equal(t, "postgresql", dbCfg.Type) assert.Equal(t, "localhost", dbCfg.PostGreSQL.Host) assert.Equal(t, "registry", dbCfg.PostGreSQL.Database) assert.Equal(t, "root123", dbCfg.PostGreSQL.Password) @@ -124,6 +131,27 @@ func TestCfgManager_GetDatabaseCfg(t *testing.T) { assert.Equal(t, "disable", dbCfg.PostGreSQL.SSLMode) } +func TestCfgManager_GetDatabaseCfg_Mysql(t *testing.T) { + if !utils.IsDBMysql() { + return + } + configManager.UpdateConfig(testCtx, map[string]interface{}{ + "database_type": "mysql", + "db_host": "localhost", + "db_database": "registry", + "db_password": "root123", + "db_username": "root", + "db_port": 3306, + }) + dbCfg := configManager.GetDatabaseCfg() + assert.Equal(t, "mysql", dbCfg.Type) + assert.Equal(t, "localhost", dbCfg.MySQL.Host) + assert.Equal(t, "registry", dbCfg.MySQL.Database) + assert.Equal(t, "root123", dbCfg.MySQL.Password) + assert.Equal(t, "root", dbCfg.MySQL.Username) + assert.Equal(t, 3306, dbCfg.MySQL.Port) +} + func TestConfigStore_Save(t *testing.T) { cfgStore := store.NewConfigStore(&Database{cfgDAO: dao.New()}) err := cfgStore.Save(testCtx) diff --git a/src/pkg/config/manager.go b/src/pkg/config/manager.go index 4ecb253f24a8..5a74f7d8fdbe 100644 --- a/src/pkg/config/manager.go +++ b/src/pkg/config/manager.go @@ -156,19 +156,37 @@ func (c *CfgManager) Set(ctx context.Context, key string, value interface{}) { // GetDatabaseCfg - Get database configurations func (c *CfgManager) GetDatabaseCfg() *models.Database { ctx := context.Background() - return &models.Database{ - Type: c.Get(ctx, common.DatabaseType).GetString(), - PostGreSQL: &models.PostGreSQL{ - Host: c.Get(ctx, common.PostGreSQLHOST).GetString(), - Port: c.Get(ctx, common.PostGreSQLPort).GetInt(), - Username: c.Get(ctx, common.PostGreSQLUsername).GetString(), - Password: c.Get(ctx, common.PostGreSQLPassword).GetString(), - Database: c.Get(ctx, common.PostGreSQLDatabase).GetString(), - SSLMode: c.Get(ctx, common.PostGreSQLSSLMode).GetString(), - MaxIdleConns: c.Get(ctx, common.PostGreSQLMaxIdleConns).GetInt(), - MaxOpenConns: c.Get(ctx, common.PostGreSQLMaxOpenConns).GetInt(), - }, + database := &models.Database{} + database.Type = c.Get(ctx, common.DatabaseType).GetString() + + switch { + case utils.IsDBPostgresql(database.Type): + postgresql := &models.PostGreSQL{ + Host: c.Get(ctx, common.DBHOST).GetString(), + Port: c.Get(ctx, common.DBPort).GetInt(), + Username: c.Get(ctx, common.DBUsername).GetString(), + Password: c.Get(ctx, common.DBPassword).GetString(), + Database: c.Get(ctx, common.DBDatabase).GetString(), + SSLMode: c.Get(ctx, common.DBSSLMode).GetString(), + MaxIdleConns: c.Get(ctx, common.DBMaxIdleConns).GetInt(), + MaxOpenConns: c.Get(ctx, common.DBMaxOpenConns).GetInt(), + } + database.PostGreSQL = postgresql + case utils.IsDBMysql(database.Type): + mysql := &models.MySQL{ + Host: c.Get(ctx, common.DBHOST).GetString(), + Port: c.Get(ctx, common.DBPort).GetInt(), + Username: c.Get(ctx, common.DBUsername).GetString(), + Password: c.Get(ctx, common.DBPassword).GetString(), + Database: c.Get(ctx, common.DBDatabase).GetString(), + Collation: c.Get(ctx, common.DBCollation).GetString(), + MaxIdleConns: c.Get(ctx, common.DBMaxIdleConns).GetInt(), + MaxOpenConns: c.Get(ctx, common.DBMaxOpenConns).GetInt(), + } + database.MySQL = mysql } + + return database } // UpdateConfig - Update config Store with a specified configuration and also save updated configure. diff --git a/src/pkg/exporter/project_collector.go b/src/pkg/exporter/project_collector.go index 1e85073d11e4..10a4a66d982e 100644 --- a/src/pkg/exporter/project_collector.go +++ b/src/pkg/exporter/project_collector.go @@ -4,6 +4,7 @@ import ( "encoding/json" "errors" + "github.com/beego/beego/orm" "github.com/prometheus/client_golang/prometheus" "github.com/goharbor/harbor/src/common/dao" @@ -23,7 +24,8 @@ var ( INNER JOIN quota ON project.project_id = CAST(quota.reference_id AS Integer) INNER JOIN quota_usage ON project.project_id = CAST(quota_usage.reference_id AS Integer) WHERE quota.reference='project' AND quota_usage.reference='project' AND project.deleted=FALSE AND project_metadata.name='public';` - projectMemberSQL = `SELECT project.project_id, COUNT(project.project_id) AS member_total + projectBasicMySQL = "SELECT project.project_id, project.name, project_metadata.value AS public, quota.hard AS quota, quota_usage.used AS `usage` FROM project INNER JOIN project_metadata ON (project.project_id = project_metadata.project_id) INNER JOIN quota ON project.project_id = CAST(quota.reference_id AS SIGNED) INNER JOIN quota_usage ON project.project_id = CAST(quota_usage.reference_id AS SIGNED) WHERE quota.reference='project' AND quota_usage.reference='project' AND project.deleted=FALSE AND project_metadata.name='public';" + projectMemberSQL = `SELECT project.project_id, COUNT(project.project_id) AS member_total FROM project INNER JOIN project_member ON project.project_id=project_member.project_id WHERE project.deleted=FALSE AND project_member.entity_type='u' GROUP BY project.project_id, project_member.entity_type;` @@ -35,6 +37,10 @@ var ( FROM project INNER JOIN artifact ON project.project_id=artifact.project_id WHERE project.deleted=FALSE GROUP BY artifact.project_id, type;` + projectArtifactsMySQL = `SELECT artifact.project_id, artifact.type AS artifact_type, COUNT(artifact.type) AS artifact_total + FROM project INNER JOIN artifact ON project.project_id=artifact.project_id + WHERE project.deleted=FALSE + GROUP BY artifact.project_id, type, BINARY type;` ) var ( projectTotal = typedDesc{ @@ -186,8 +192,9 @@ func getProjectInfo() *projectOverviewInfo { } func updateProjectBasicInfo(projectMap map[int64]*projectInfo) { + sql := getProjectBasicSQL() pList := make([]*projectInfo, 0) - _, err := dao.GetOrmer().Raw(projectBasicSQL).QueryRows(&pList) + _, err := dao.GetOrmer().Raw(sql).QueryRows(&pList) checkErr(err, "get project from DB failure") for _, p := range pList { p.Artifact = make(map[string]artifactInfo) @@ -195,6 +202,14 @@ func updateProjectBasicInfo(projectMap map[int64]*projectInfo) { } } +func getProjectBasicSQL() string { + sql := projectBasicSQL + if dao.GetOrmer().Driver().Type() == orm.DRMySQL { + sql = projectBasicMySQL + } + return sql +} + func updateProjectMemberInfo(projectMap map[int64]*projectInfo) { pList := make([]projectInfo, 0) _, err := dao.GetOrmer().Raw(projectMemberSQL).QueryRows(&pList) @@ -226,7 +241,8 @@ func updateProjectRepoInfo(projectMap map[int64]*projectInfo) { func updateProjectArtifactInfo(projectMap map[int64]*projectInfo) { aList := make([]artifactInfo, 0) - _, err := dao.GetOrmer().Raw(projectArtifactsSQL).QueryRows(&aList) + sql := getProjectArtifactsSQL() + _, err := dao.GetOrmer().Raw(sql).QueryRows(&aList) checkErr(err, "get data from DB failure") for _, a := range aList { if _, ok := projectMap[a.ProjectID]; ok { @@ -236,3 +252,11 @@ func updateProjectArtifactInfo(projectMap map[int64]*projectInfo) { } } } + +func getProjectArtifactsSQL() string { + sql := projectArtifactsSQL + if dao.GetOrmer().Driver().Type() == orm.DRMySQL { + sql = projectArtifactsMySQL + } + return sql +} diff --git a/src/pkg/exporter/project_collector_test.go b/src/pkg/exporter/project_collector_test.go index e4912a3b59c5..7b81498b6412 100644 --- a/src/pkg/exporter/project_collector_test.go +++ b/src/pkg/exporter/project_collector_test.go @@ -1,6 +1,7 @@ package exporter import ( + "fmt" "strconv" "testing" "time" @@ -10,6 +11,7 @@ import ( "github.com/goharbor/harbor/src/common" "github.com/goharbor/harbor/src/common/dao" "github.com/goharbor/harbor/src/common/models" + "github.com/goharbor/harbor/src/common/utils" "github.com/goharbor/harbor/src/common/utils/test" proctl "github.com/goharbor/harbor/src/controller/project" quotactl "github.com/goharbor/harbor/src/controller/quota" @@ -142,12 +144,17 @@ type PorjectCollectorTestSuite struct { } func (c *PorjectCollectorTestSuite) TestProjectCollector() { + if utils.IsDBMariaDB() { + return + } pMap := make(map[int64]*projectInfo) updateProjectBasicInfo(pMap) updateProjectMemberInfo(pMap) updateProjectRepoInfo(pMap) updateProjectArtifactInfo(pMap) + fmt.Printf("pMap1: %+v\n", pMap[testPro1.ProjectID]) + fmt.Printf("pMap2: %+v\n", pMap[testPro2.ProjectID]) c.Equalf(testPro1.ProjectID, pMap[testPro1.ProjectID].ProjectID, "pMap %v", pMap) c.Equalf(pMap[testPro1.ProjectID].ProjectID, testPro1.ProjectID, "pMap %v", pMap) c.Equalf(pMap[testPro1.ProjectID].Name, testPro1.Name, "pMap %v", pMap) @@ -170,6 +177,42 @@ func (c *PorjectCollectorTestSuite) TestProjectCollector() { } +func (c *PorjectCollectorTestSuite) TestProjectCollectorForMariaDB() { + if !utils.IsDBMariaDB() { + return + } + pMap := make(map[int64]*projectInfo) + updateProjectBasicInfo(pMap) + updateProjectMemberInfo(pMap) + updateProjectRepoInfo(pMap) + + fmt.Printf("pMap1: %+v\n", pMap[testPro1.ProjectID]) + fmt.Printf("pMap2: %+v\n", pMap[testPro2.ProjectID]) + updateProjectArtifactInfo(pMap) + + fmt.Printf("pMap1: %+v\n", pMap[testPro1.ProjectID]) + fmt.Printf("pMap2: %+v\n", pMap[testPro2.ProjectID]) + c.Equalf(testPro1.ProjectID, pMap[testPro1.ProjectID].ProjectID, "pMap %v", pMap) + c.Equalf(pMap[testPro1.ProjectID].ProjectID, testPro1.ProjectID, "pMap %v", pMap) + c.Equalf(pMap[testPro1.ProjectID].Name, testPro1.Name, "pMap %v", pMap) + c.Equalf(strconv.FormatBool(pMap[testPro1.ProjectID].Public), testPro1.Metadata["public"], "pMap %v", pMap) + c.Equalf(pMap[testPro1.ProjectID].Quota, "{\"storage\":100}", "pMap %v", pMap) + c.Equalf(pMap[testPro1.ProjectID].Usage, "{\"storage\":0}", "pMap %v", pMap) + c.Equalf(pMap[testPro1.ProjectID].MemberTotal, float64(2), "pMap %v", pMap) + c.Equalf(pMap[testPro1.ProjectID].PullTotal, float64(0), "pMap %v", pMap) + c.Equalf(pMap[testPro1.ProjectID].Artifact["IMAGE"].ArtifactTotal, float64(1), "pMap %v", pMap) + c.Equalf(pMap[testPro1.ProjectID].Artifact["IMAGE"].ArtifactType, "IMAGE", "pMap %v", pMap) + + c.Equalf(pMap[testPro2.ProjectID].ProjectID, testPro2.ProjectID, "pMap %v", pMap) + c.Equalf(pMap[testPro2.ProjectID].Name, testPro2.Name, "pMap %v", pMap) + c.Equalf(strconv.FormatBool(pMap[testPro2.ProjectID].Public), testPro2.Metadata["public"], "pMap %v", pMap) + c.Equalf(pMap[testPro2.ProjectID].Quota, "{\"storage\":200}", "pMap %v", pMap) + c.Equalf(pMap[testPro2.ProjectID].Usage, "{\"storage\":0}", "pMap %v", pMap) + c.Equalf(pMap[testPro2.ProjectID].MemberTotal, float64(3), "pMap %v", pMap) + c.Equalf(pMap[testPro2.ProjectID].PullTotal, float64(0), "pMap %v", pMap) + c.Equalf(pMap[testPro2.ProjectID].Artifact["IMAGE"].ArtifactTotal, float64(1), "pMap %v", pMap) +} + func TestPorjectCollectorTestSuite(t *testing.T) { setupTest(t) defer tearDownTest(t) diff --git a/src/pkg/immutable/dao/dao_test.go b/src/pkg/immutable/dao/dao_test.go index 0e361c39b8c6..89fed8459ff5 100644 --- a/src/pkg/immutable/dao/dao_test.go +++ b/src/pkg/immutable/dao/dao_test.go @@ -25,7 +25,7 @@ type immutableRuleDaoTestSuite struct { func (t *immutableRuleDaoTestSuite) SetupSuite() { t.require = require.New(t.T()) t.assert = assert.New(t.T()) - dao.PrepareTestForPostgresSQL() + dao.PrepareTestForDB() t.dao = New() } diff --git a/src/pkg/immutable/match/rule/match_test.go b/src/pkg/immutable/match/rule/match_test.go index 56116534f609..a8ffe7f9b7db 100644 --- a/src/pkg/immutable/match/rule/match_test.go +++ b/src/pkg/immutable/match/rule/match_test.go @@ -157,7 +157,7 @@ func (s *MatchTestSuite) TearDownSuite() { } func TestMain(m *testing.M) { - dao.PrepareTestForPostgresSQL() + dao.PrepareTestForDB() if result := m.Run(); result != 0 { os.Exit(result) diff --git a/src/pkg/label/dao/dao_test.go b/src/pkg/label/dao/dao_test.go index 371aa43ccb4b..abee5d72d181 100644 --- a/src/pkg/label/dao/dao_test.go +++ b/src/pkg/label/dao/dao_test.go @@ -41,7 +41,7 @@ type labelDaoTestSuite struct { } func (l *labelDaoTestSuite) SetupSuite() { - common_dao.PrepareTestForPostgresSQL() + common_dao.PrepareTestForDB() l.dao = &defaultDAO{} l.artDAO = artdao.New() l.ctx = orm.NewContext(nil, beegoorm.NewOrm()) diff --git a/src/pkg/member/dao/dao_test.go b/src/pkg/member/dao/dao_test.go index 61d27a7ba47f..9266b2fb02d7 100644 --- a/src/pkg/member/dao/dao_test.go +++ b/src/pkg/member/dao/dao_test.go @@ -21,8 +21,9 @@ import ( "github.com/stretchr/testify/suite" "github.com/goharbor/harbor/src/common" - testDao "github.com/goharbor/harbor/src/common/dao" _ "github.com/goharbor/harbor/src/common/dao" + testDao "github.com/goharbor/harbor/src/common/dao" + "github.com/goharbor/harbor/src/common/utils" "github.com/goharbor/harbor/src/lib/orm" "github.com/goharbor/harbor/src/pkg" "github.com/goharbor/harbor/src/pkg/member/models" @@ -43,6 +44,9 @@ type DaoTestSuite struct { } func (s *DaoTestSuite) SetupSuite() { + if !utils.IsDBPostgresql() { + return + } s.Suite.SetupSuite() s.Suite.ClearTables = []string{"project_member"} s.dao = New() @@ -295,5 +299,8 @@ func (s *DaoTestSuite) TestDeleteProjectMemberByUserId() { } func TestDaoTestSuite(t *testing.T) { + if !utils.IsDBPostgresql() { + return + } suite.Run(t, &DaoTestSuite{}) } diff --git a/src/pkg/member/dao/mysql_dao.go b/src/pkg/member/dao/mysql_dao.go new file mode 100644 index 000000000000..962c88f04c1a --- /dev/null +++ b/src/pkg/member/dao/mysql_dao.go @@ -0,0 +1,70 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dao + +import ( + "context" + "fmt" + + "github.com/goharbor/harbor/src/lib/log" + "github.com/goharbor/harbor/src/lib/orm" + "github.com/goharbor/harbor/src/pkg/member/models" +) + +type mysqlDao struct { + *dao +} + +// NewMysqlDao ... +func NewMysqlDao() DAO { + return &mysqlDao{} +} + +func (d *mysqlDao) AddProjectMember(ctx context.Context, member models.Member) (int, error) { + log.Debugf("Adding project member %+v", member) + o, err := orm.FromContext(ctx) + if err != nil { + return 0, err + } + + if member.EntityID <= 0 { + return 0, fmt.Errorf("invalid entity_id, member: %+v", member) + } + + if member.ProjectID <= 0 { + return 0, fmt.Errorf("invalid project_id, member: %+v", member) + } + + delSQL := "delete from project_member where project_id = ? and entity_id = ? and entity_type = ? " + _, err = o.Raw(delSQL, member.ProjectID, member.EntityID, member.EntityType).Exec() + if err != nil { + return 0, err + } + + var pmid int + + sql := "insert into project_member (project_id, entity_id , role, entity_type) values (?, ?, ?, ?)" + res, err := o.Raw(sql, member.ProjectID, member.EntityID, member.Role, member.EntityType).Exec() + if err != nil { + return 0, err + } + insertID, err := res.LastInsertId() + if err != nil { + return 0, err + } + pmid = int(insertID) + + return pmid, err +} diff --git a/src/pkg/member/dao/mysql_dao_test.go b/src/pkg/member/dao/mysql_dao_test.go new file mode 100644 index 000000000000..dd37026813da --- /dev/null +++ b/src/pkg/member/dao/mysql_dao_test.go @@ -0,0 +1,304 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dao + +import ( + "database/sql" + "testing" + + "github.com/stretchr/testify/suite" + + "github.com/goharbor/harbor/src/common" + _ "github.com/goharbor/harbor/src/common/dao" + testDao "github.com/goharbor/harbor/src/common/dao" + "github.com/goharbor/harbor/src/common/utils" + "github.com/goharbor/harbor/src/lib/orm" + "github.com/goharbor/harbor/src/pkg" + "github.com/goharbor/harbor/src/pkg/member/models" + "github.com/goharbor/harbor/src/pkg/project" + "github.com/goharbor/harbor/src/pkg/user" + userDao "github.com/goharbor/harbor/src/pkg/user/dao" + "github.com/goharbor/harbor/src/pkg/usergroup" + ugModel "github.com/goharbor/harbor/src/pkg/usergroup/model" + htesting "github.com/goharbor/harbor/src/testing" +) + +type MysqlDaoTestSuite struct { + htesting.Suite + dao DAO + projectMgr project.Manager + projectID int64 + userMgr user.Manager +} + +func (s *MysqlDaoTestSuite) SetupSuite() { + s.Suite.SetupSuite() + s.Suite.ClearTables = []string{"project_member"} + s.dao = NewMysqlDao() + // Extract to test utils + initSqls := []string{ + "insert into harbor_user (username, email, password, realname) values ('member_test_01', 'member_test_01@example.com', '123456', 'member_test_01')", + "insert into project (name, owner_id) values ('member_test_01', 1)", + "insert into user_group (group_name, group_type, ldap_group_dn) values ('test_group_01', 1, 'CN=harbor_users,OU=sample,OU=vmware,DC=harbor,DC=com')", + "update project set owner_id = (select user_id from harbor_user where username = 'member_test_01') where name = 'member_test_01'", + "insert into project_member (project_id, entity_id, entity_type, role) values ( (select project_id from project where name = 'member_test_01') , (select user_id from harbor_user where username = 'member_test_01'), 'u', 1)", + "insert into project_member (project_id, entity_id, entity_type, role) values ( (select project_id from project where name = 'member_test_01') , (select id from user_group where group_name = 'test_group_01'), 'g', 1)", + + "insert into harbor_user (username, email, password, realname) values ('member_test_02', 'member_test_02@example.com', '123456', 'member_test_02')", + "insert into project (name, owner_id) values ('member_test_02', 1)", + "insert into user_group (group_name, group_type, ldap_group_dn) values ('test_group_02', 1, 'CN=harbor_users,OU=sample,OU=vmware,DC=harbor,DC=com')", + "update project set owner_id = (select user_id from harbor_user where username = 'member_test_02') where name = 'member_test_02'", + "insert into project_member (project_id, entity_id, entity_type, role) values ( (select project_id from project where name = 'member_test_02') , (select user_id from harbor_user where username = 'member_test_02'), 'u', 1)", + "insert into project_member (project_id, entity_id, entity_type, role) values ( (select project_id from project where name = 'member_test_02') , (select id from user_group where group_name = 'test_group_02'), 'g', 1)", + } + + clearSqls := []string{ + "delete from project where name='member_test_01' or name='member_test_02'", + "delete from harbor_user where username='member_test_01' or username='member_test_02' or username='pm_sample'", + "delete from user_group", + "delete from project_member where id > 1", + } + testDao.PrepareTestData(clearSqls, initSqls) + s.projectMgr = pkg.ProjectMgr + s.userMgr = user.Mgr + ctx := s.Context() + proj, err := s.projectMgr.Get(ctx, "member_test_01") + s.Nil(err) + s.NotNil(proj) + s.projectID = proj.ProjectID +} +func (s *MysqlDaoTestSuite) TearDownSuite() { +} + +func (s *MysqlDaoTestSuite) TestAddProjectMember() { + ctx := s.Context() + proj, err := s.projectMgr.Get(ctx, "member_test_01") + s.Nil(err) + s.NotNil(proj) + + member := models.Member{ + ProjectID: proj.ProjectID, + EntityID: 1, + EntityType: common.UserMember, + Role: common.RoleProjectAdmin, + } + pmid, err := s.dao.AddProjectMember(ctx, member) + s.Nil(err) + s.True(pmid > 0) + + queryMember := models.Member{ + ProjectID: proj.ProjectID, + ID: pmid, + } + memberList, err := s.dao.GetProjectMember(ctx, queryMember, nil) + s.Nil(err) + s.False(len(memberList) == 0) + + _, err = s.dao.AddProjectMember(ctx, models.Member{ + ProjectID: -1, + EntityID: 1, + EntityType: common.UserMember, + Role: common.RoleProjectAdmin, + }) + + s.NotNil(err) + + _, err = s.dao.AddProjectMember(ctx, models.Member{ + ProjectID: 1, + EntityID: -1, + EntityType: common.UserMember, + Role: common.RoleProjectAdmin, + }) + + s.NotNil(err) +} + +func (s *MysqlDaoTestSuite) TestUpdateProjectMemberRole() { + ctx := s.Context() + proj, err := s.projectMgr.Get(ctx, "member_test_01") + s.Nil(err) + s.NotNil(proj) + user := userDao.User{ + Username: "pm_sample", + Email: sql.NullString{String: "pm_sample@example.com", Valid: true}, + Realname: "pm_sample", + Password: "1234567d", + } + o, err := orm.FromContext(ctx) + s.Nil(err) + userID, err := o.Insert(&user) + s.Nil(err) + member := models.Member{ + ProjectID: proj.ProjectID, + EntityID: int(userID), + EntityType: common.UserMember, + Role: common.RoleProjectAdmin, + } + + pmid, err := s.dao.AddProjectMember(ctx, member) + s.Nil(err) + err = s.dao.UpdateProjectMemberRole(ctx, proj.ProjectID, pmid, common.RoleDeveloper) + s.Nil(err) + + queryMember := models.Member{ + ProjectID: proj.ProjectID, + EntityID: int(userID), + EntityType: common.UserMember, + } + + memberList, err := s.dao.GetProjectMember(ctx, queryMember, nil) + s.Nil(err) + s.True(len(memberList) == 1, "project member should exist") + memberItem := memberList[0] + s.Equal(common.RoleDeveloper, memberItem.Role, "should be developer role") + s.Equal(user.Username, memberItem.Entityname) + + memberList2, err := s.dao.SearchMemberByName(ctx, proj.ProjectID, "pm_sample") + s.Nil(err) + s.True(len(memberList2) > 0) + + memberList3, err := s.dao.SearchMemberByName(ctx, proj.ProjectID, "") + s.Nil(err) + s.True(len(memberList3) > 0, "failed to search project member") +} + +func (s *MysqlDaoTestSuite) TestGetProjectMembers() { + ctx := s.Context() + + query1 := models.Member{ProjectID: s.projectID, Entityname: "member_test_01", EntityType: common.UserMember} + member1, err := s.dao.GetProjectMember(ctx, query1, nil) + s.Nil(err) + s.True(len(member1) > 0) + s.Equal(member1[0].Entityname, "member_test_01") + + query2 := models.Member{ProjectID: s.projectID, Entityname: "test_group_01", EntityType: common.GroupMember} + member2, err := s.dao.GetProjectMember(ctx, query2, nil) + s.Nil(err) + s.True(len(member2) > 0) + s.Equal(member2[0].Entityname, "test_group_01") +} + +func (s *MysqlDaoTestSuite) TestGetTotalOfProjectMembers() { + ctx := s.Context() + tot, err := s.dao.GetTotalOfProjectMembers(ctx, s.projectID, nil) + s.Nil(err) + s.Equal(2, int(tot)) +} + +func (s *MysqlDaoTestSuite) TestListRoles() { + ctx := s.Context() + + // nil user + roles, err := s.dao.ListRoles(ctx, nil, 1) + s.Nil(err) + s.Len(roles, 0) + + // user with empty groups + u, err := s.userMgr.GetByName(ctx, "member_test_01") + s.Nil(err) + s.NotNil(u) + user := &models.User{ + UserID: u.UserID, + Username: u.Username, + } + roles, err = s.dao.ListRoles(ctx, user, s.projectID) + s.Nil(err) + s.Len(roles, 1) + + // user with a group whose ID doesn't exist + user.GroupIDs = []int{9999} + roles, err = s.dao.ListRoles(ctx, user, s.projectID) + s.Nil(err) + s.Len(roles, 1) + s.Equal(common.RoleProjectAdmin, roles[0]) + + // user with a valid group + groupID, err := usergroup.Mgr.Create(ctx, ugModel.UserGroup{ + GroupName: "group_for_list_role", + GroupType: 1, + LdapGroupDN: "CN=list_role_users,OU=sample,OU=vmware,DC=harbor,DC=com", + }) + + s.Nil(err) + defer usergroup.Mgr.Delete(ctx, groupID) + + memberID, err := s.dao.AddProjectMember(ctx, models.Member{ + ProjectID: s.projectID, + Role: common.RoleDeveloper, + EntityID: groupID, + EntityType: "g", + }) + s.Nil(err) + defer s.dao.DeleteProjectMemberByID(ctx, s.projectID, memberID) + + user.GroupIDs = []int{groupID} + roles, err = s.dao.ListRoles(ctx, user, s.projectID) + s.Nil(err) + s.Len(roles, 2) + s.Equal(common.RoleProjectAdmin, roles[0]) + s.Equal(common.RoleDeveloper, roles[1]) +} + +func (s *MysqlDaoTestSuite) TestDeleteProjectMember() { + ctx := s.Context() + var addMember = models.Member{ + ProjectID: s.projectID, + EntityID: 1, + EntityType: common.UserMember, + Role: common.RoleDeveloper, + } + pmid, err := s.dao.AddProjectMember(ctx, addMember) + s.Nil(err) + s.True(pmid > 0) + + err = s.dao.DeleteProjectMemberByID(ctx, s.projectID, pmid) + s.Nil(err) + + // not exist + err = s.dao.DeleteProjectMemberByID(ctx, s.projectID, -1) + s.Nil(err) + +} + +func (s *MysqlDaoTestSuite) TestDeleteProjectMemberByUserId() { + ctx := s.Context() + userID := 22 + var addMember = models.Member{ + ProjectID: s.projectID, + EntityID: userID, + EntityType: common.UserMember, + Role: common.RoleDeveloper, + } + pmid, err := s.dao.AddProjectMember(ctx, addMember) + s.Nil(err) + s.True(pmid > 0) + + err = s.dao.DeleteProjectMemberByUserID(ctx, userID) + s.Nil(err) + + queryMember := models.Member{ProjectID: s.projectID, EntityID: userID, EntityType: common.UserMember} + + // not exist + members, err := s.dao.GetProjectMember(ctx, queryMember, nil) + s.True(len(members) == 0) + s.Nil(err) +} + +func TestMysqlDaoTestSuite(t *testing.T) { + if !utils.IsDBMysql() { + return + } + suite.Run(t, &MysqlDaoTestSuite{}) +} diff --git a/src/pkg/member/manager.go b/src/pkg/member/manager.go index 573fb288f708..b61577f43d15 100644 --- a/src/pkg/member/manager.go +++ b/src/pkg/member/manager.go @@ -17,6 +17,7 @@ package member import ( "context" + "github.com/goharbor/harbor/src/common/utils" "github.com/goharbor/harbor/src/lib/errors" "github.com/goharbor/harbor/src/lib/q" "github.com/goharbor/harbor/src/pkg/member/dao" @@ -104,5 +105,12 @@ func (m *manager) DeleteMemberByUserID(ctx context.Context, uid int) error { // NewManager ... func NewManager() Manager { + switch { + case utils.IsDBPostgresql(): + return &manager{dao: dao.New()} + case utils.IsDBMysql(): + return &manager{dao: dao.NewMysqlDao()} + } + return &manager{dao: dao.New()} } diff --git a/src/pkg/notification/job/dao/dao_test.go b/src/pkg/notification/job/dao/dao_test.go index 2841f4f4747c..f0433d9a3bd9 100644 --- a/src/pkg/notification/job/dao/dao_test.go +++ b/src/pkg/notification/job/dao/dao_test.go @@ -5,6 +5,7 @@ import ( "github.com/stretchr/testify/suite" + "github.com/goharbor/harbor/src/common/utils" "github.com/goharbor/harbor/src/lib/errors" "github.com/goharbor/harbor/src/lib/orm" "github.com/goharbor/harbor/src/lib/q" @@ -185,5 +186,8 @@ func (suite *DaoTestSuite) TestGetLastTriggerJobsGroupByEventType() { } func TestDaoTestSuite(t *testing.T) { + if !utils.IsDBPostgresql() { + return + } suite.Run(t, &DaoTestSuite{}) } diff --git a/src/pkg/notification/job/dao/mysql_dao.go b/src/pkg/notification/job/dao/mysql_dao.go new file mode 100644 index 000000000000..188c6cc2a588 --- /dev/null +++ b/src/pkg/notification/job/dao/mysql_dao.go @@ -0,0 +1,45 @@ +package dao + +import ( + "context" + + "github.com/goharbor/harbor/src/lib/log" + "github.com/goharbor/harbor/src/lib/orm" + "github.com/goharbor/harbor/src/pkg/notification/job/model" +) + +// New creates a default implementation for Dao +func NewMysqlDao() DAO { + return &mysqlDao{} +} + +type mysqlDao struct { + *dao +} + +// GetLastTriggerJobsGroupByEventType get notification jobs info of policy, including event type and last trigger time +func (d *mysqlDao) GetLastTriggerJobsGroupByEventType(ctx context.Context, policyID int64) ([]*model.Job, error) { + ormer, err := orm.FromContext(ctx) + if err != nil { + return nil, err + } + // todo Yvonne more beauty + setSQLMode := `set sql_mode="STRICT_TRANS_TABLES"` + _, err = ormer.Raw(setSQLMode).Exec() + if err != nil { + log.Errorf("query last trigger info group by event type failed: %v", err) + return nil, err + } + // get jobs last triggered(created) group by event_type. + sql := `select event_type, id, creation_time, status, notify_type, job_uuid, update_time, + creation_time, job_detail from notification_job where policy_id = ? + group by event_type order by event_type, id desc, creation_time, status, notify_type, job_uuid, update_time, creation_time, job_detail` + jobs := []*model.Job{} + _, err = ormer.Raw(sql, policyID).QueryRows(&jobs) + if err != nil { + log.Errorf("query last trigger info group by event type failed: %v", err) + return nil, err + } + + return jobs, nil +} diff --git a/src/pkg/notification/job/dao/mysql_dao_test.go b/src/pkg/notification/job/dao/mysql_dao_test.go new file mode 100644 index 000000000000..18d15dc50a02 --- /dev/null +++ b/src/pkg/notification/job/dao/mysql_dao_test.go @@ -0,0 +1,164 @@ +package dao + +import ( + "github.com/goharbor/harbor/src/common/utils" + "github.com/goharbor/harbor/src/lib/errors" + "github.com/goharbor/harbor/src/lib/orm" + "github.com/goharbor/harbor/src/lib/q" + "github.com/goharbor/harbor/src/pkg/notification/job/model" + htesting "github.com/goharbor/harbor/src/testing" + "github.com/stretchr/testify/suite" + "testing" +) + +type MysqlDaoTestSuite struct { + htesting.Suite + dao DAO + + jobID1 int64 + jobID2 int64 + jobID3 int64 +} + +func (suite *MysqlDaoTestSuite) SetupSuite() { + suite.Suite.SetupSuite() + suite.dao = NewMysqlDao() + suite.Suite.ClearTables = []string{"notification_job"} + suite.jobs() +} + +func (suite *MysqlDaoTestSuite) jobs() { + var err error + suite.jobID1, err = suite.dao.Create(orm.Context(), testJob1) + suite.Nil(err) + + suite.jobID2, err = suite.dao.Create(orm.Context(), testJob2) + suite.Nil(err) + + suite.jobID3, err = suite.dao.Create(orm.Context(), testJob3) + suite.Nil(err) +} + +func (suite *MysqlDaoTestSuite) TestCreate() { + _, err := suite.dao.Create(orm.Context(), nil) + suite.NotNil(err) +} + +func (suite *MysqlDaoTestSuite) TestDelete() { + err := suite.dao.Delete(orm.Context(), 1234) + suite.Require().NotNil(err) + suite.True(errors.IsErr(err, errors.NotFoundCode)) + + err = suite.dao.Delete(orm.Context(), suite.jobID2) + suite.Nil(err) +} + +func (suite *MysqlDaoTestSuite) TestList() { + jobs, err := suite.dao.List(orm.Context(), &q.Query{ + Keywords: map[string]interface{}{ + "EventType": "pushImage", + }, + }) + suite.Require().Nil(err) + suite.Equal(len(jobs), 1) + suite.Equal(suite.jobID1, jobs[0].ID) +} + +func (suite *MysqlDaoTestSuite) TestGet() { + _, err := suite.dao.Get(orm.Context(), 1234) + suite.Require().NotNil(err) + suite.True(errors.IsErr(err, errors.NotFoundCode)) + + id, err := suite.dao.Create(orm.Context(), &model.Job{ + PolicyID: 2222, + EventType: "pushChart", + NotifyType: "http", + Status: "pending", + JobDetail: "{\"type\":\"pushImage\",\"occur_at\":1563536782,\"event_data\":{\"resources\":[{\"digest\":\"sha256:bf1684a6e3676389ec861c602e97f27b03f14178e5bc3f70dce198f9f160cce9\",\"tag\":\"v1.0\",\"resource_url\":\"10.194.32.23/myproj/alpine:v1.0\"}],\"repository\":{\"date_created\":1563505587,\"name\":\"alpine\",\"namespace\":\"myproj\",\"repo_full_name\":\"myproj/alpine\",\"repo_type\":\"private\"}},\"operator\":\"admin\"}", + UUID: "00000000", + }) + suite.Nil(err) + + r, err := suite.dao.Get(orm.Context(), id) + suite.Nil(err) + suite.Equal("pushChart", r.EventType) +} + +func (suite *MysqlDaoTestSuite) TestUpdate() { + j := &model.Job{ + ID: suite.jobID1, + Status: "success", + } + + err := suite.dao.Update(orm.Context(), j) + suite.Nil(err) + + r1, err := suite.dao.Get(orm.Context(), j.ID) + suite.Equal("success", r1.Status) +} + +func (suite *MysqlDaoTestSuite) TestCount() { + // nil query + total, err := suite.dao.Count(orm.Context(), nil) + suite.Nil(err) + suite.True(total > 0) + + // query by name + total, err = suite.dao.Count(orm.Context(), &q.Query{ + Keywords: map[string]interface{}{ + "EventType": "deleteImage", + }, + }) + suite.Nil(err) + suite.Equal(int64(1), total) +} + +func (suite *MysqlDaoTestSuite) TestDeleteByPolicyID() { + jobs, err := suite.dao.List(orm.Context(), &q.Query{ + Keywords: map[string]interface{}{ + "PolicyID": 111, + }, + }) + suite.True(len(jobs) > 0) + + err = suite.dao.DeleteByPolicyID(orm.Context(), 111) + suite.Nil(err) + + jobs, err = suite.dao.List(orm.Context(), &q.Query{ + Keywords: map[string]interface{}{ + "PolicyID": 111, + }, + }) + suite.Equal(0, len(jobs)) +} + +func (suite *MysqlDaoTestSuite) TestGetLastTriggerJobsGroupByEventType() { + _, err := suite.dao.Create(orm.Context(), &model.Job{ + PolicyID: 3333, + EventType: "pushChart", + NotifyType: "http", + Status: "pending", + JobDetail: "{\"type\":\"pushImage\",\"occur_at\":1563536782,\"event_data\":{\"resources\":[{\"digest\":\"sha256:bf1684a6e3676389ec861c602e97f27b03f14178e5bc3f70dce198f9f160cce9\",\"tag\":\"v1.0\",\"resource_url\":\"10.194.32.23/myproj/alpine:v1.0\"}],\"repository\":{\"date_created\":1563505587,\"name\":\"alpine\",\"namespace\":\"myproj\",\"repo_full_name\":\"myproj/alpine\",\"repo_type\":\"private\"}},\"operator\":\"admin\"}", + UUID: "00000000", + }) + suite.Nil(err) + _, err = suite.dao.Create(orm.Context(), &model.Job{ + PolicyID: 3333, + EventType: "pullChart", + NotifyType: "http", + Status: "pending", + JobDetail: "{\"type\":\"pushImage\",\"occur_at\":1563536782,\"event_data\":{\"resources\":[{\"digest\":\"sha256:bf1684a6e3676389ec861c602e97f27b03f14178e5bc3f70dce198f9f160cce9\",\"tag\":\"v1.0\",\"resource_url\":\"10.194.32.23/myproj/alpine:v1.0\"}],\"repository\":{\"date_created\":1563505587,\"name\":\"alpine\",\"namespace\":\"myproj\",\"repo_full_name\":\"myproj/alpine\",\"repo_type\":\"private\"}},\"operator\":\"admin\"}", + UUID: "00000000", + }) + suite.Nil(err) + jobs, err := suite.dao.GetLastTriggerJobsGroupByEventType(orm.Context(), 3333) + suite.Nil(err) + suite.Equal(2, len(jobs)) +} + +func TestMysqlDaoTestSuite(t *testing.T) { + if !utils.IsDBMysql() { + return + } + suite.Run(t, &MysqlDaoTestSuite{}) +} diff --git a/src/pkg/notification/job/manager.go b/src/pkg/notification/job/manager.go index f60ed95ebc70..a4d476acb9ea 100644 --- a/src/pkg/notification/job/manager.go +++ b/src/pkg/notification/job/manager.go @@ -3,6 +3,7 @@ package job import ( "context" + "github.com/goharbor/harbor/src/common/utils" "github.com/goharbor/harbor/src/lib/q" "github.com/goharbor/harbor/src/pkg/notification/job/dao" "github.com/goharbor/harbor/src/pkg/notification/job/model" @@ -39,9 +40,14 @@ type manager struct { // NewManager ... func NewManager() Manager { - return &manager{ - dao: dao.New(), + switch { + case utils.IsDBPostgresql(): + return &manager{dao: dao.New()} + case utils.IsDBMysql(): + return &manager{dao: dao.NewMysqlDao()} } + + return &manager{dao: dao.New()} } // Create ... diff --git a/src/pkg/notifier/handler/notification/slack_handler_test.go b/src/pkg/notifier/handler/notification/slack_handler_test.go index 2ed68c00c2cd..137437512326 100644 --- a/src/pkg/notifier/handler/notification/slack_handler_test.go +++ b/src/pkg/notifier/handler/notification/slack_handler_test.go @@ -16,7 +16,7 @@ import ( ) func TestSlackHandler_Handle(t *testing.T) { - dao.PrepareTestForPostgresSQL() + dao.PrepareTestForDB() hookMgr := notification.HookManager defer func() { notification.HookManager = hookMgr diff --git a/src/pkg/notifier/notifier_test.go b/src/pkg/notifier/notifier_test.go index 9bd738e5dc14..344ec95e1efe 100644 --- a/src/pkg/notifier/notifier_test.go +++ b/src/pkg/notifier/notifier_test.go @@ -119,7 +119,7 @@ func TestSubscribeAndUnSubscribe(t *testing.T) { } func TestPublish(t *testing.T) { - dao.PrepareTestForPostgresSQL() + dao.PrepareTestForDB() count := len(notificationWatcher.handlers) err := Subscribe("topic1", &fakeStatefulHandler{0}) if err != nil { @@ -161,7 +161,7 @@ func TestPublish(t *testing.T) { } func TestConcurrentPublish(t *testing.T) { - dao.PrepareTestForPostgresSQL() + dao.PrepareTestForDB() count := len(notificationWatcher.handlers) err := Subscribe("topic1", &fakeStatefulHandler{0}) if err != nil { diff --git a/src/pkg/p2p/preheat/dao/instance/dao_test.go b/src/pkg/p2p/preheat/dao/instance/dao_test.go index 1ce525232ff9..a89002bdb147 100644 --- a/src/pkg/p2p/preheat/dao/instance/dao_test.go +++ b/src/pkg/p2p/preheat/dao/instance/dao_test.go @@ -37,7 +37,7 @@ type instanceSuite struct { } func (is *instanceSuite) SetupSuite() { - common_dao.PrepareTestForPostgresSQL() + common_dao.PrepareTestForDB() is.ctx = orm.NewContext(nil, beego_orm.NewOrm()) is.dao = New() } diff --git a/src/pkg/p2p/preheat/dao/policy/dao_test.go b/src/pkg/p2p/preheat/dao/policy/dao_test.go index cf9bc542a285..e648cfa831c1 100644 --- a/src/pkg/p2p/preheat/dao/policy/dao_test.go +++ b/src/pkg/p2p/preheat/dao/policy/dao_test.go @@ -44,7 +44,7 @@ func TestDaoTestSuite(t *testing.T) { // SetupSuite setups testing env. func (d *daoTestSuite) SetupSuite() { - common_dao.PrepareTestForPostgresSQL() + common_dao.PrepareTestForDB() d.dao = New() d.ctx = orm.NewContext(nil, beego_orm.NewOrm()) d.defaultPolicy = &policy.Schema{ diff --git a/src/pkg/project/dao/dao_test.go b/src/pkg/project/dao/dao_test.go index f3baf96221e9..78f22dcd8501 100644 --- a/src/pkg/project/dao/dao_test.go +++ b/src/pkg/project/dao/dao_test.go @@ -394,5 +394,8 @@ func (suite *DaoTestSuite) TestListRoles() { } func TestDaoTestSuite(t *testing.T) { + if utils.IsDBMysql() { + return + } suite.Run(t, &DaoTestSuite{}) } diff --git a/src/pkg/project/dao/mysql_dao_test.go b/src/pkg/project/dao/mysql_dao_test.go new file mode 100644 index 000000000000..c43e13149e1b --- /dev/null +++ b/src/pkg/project/dao/mysql_dao_test.go @@ -0,0 +1,423 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dao + +import ( + "fmt" + "testing" + + "github.com/goharbor/harbor/src/common" + "github.com/goharbor/harbor/src/common/utils" + "github.com/goharbor/harbor/src/lib/errors" + "github.com/goharbor/harbor/src/lib/orm" + "github.com/goharbor/harbor/src/lib/q" + "github.com/goharbor/harbor/src/pkg/project/models" + htesting "github.com/goharbor/harbor/src/testing" + "github.com/stretchr/testify/suite" +) + +type MysqlDaoTestSuite struct { + htesting.Suite + dao DAO +} + +func (suite *MysqlDaoTestSuite) SetupSuite() { + suite.Suite.SetupSuite() + suite.dao = New() +} + +func (suite *MysqlDaoTestSuite) WithUser(f func(int64, string), usernames ...string) { + var username string + if len(usernames) > 0 { + username = usernames[0] + } else { + username = suite.RandString(5) + } + + o, err := orm.FromContext(orm.Context()) + if err != nil { + suite.Fail("got error %v", err) + } + + var userID int64 + + email := fmt.Sprintf("%s@example.com", username) + sql := "INSERT INTO harbor_user (username, realname, email, password) VALUES (?, ?, ?, 'Harbor12345')" + _, err = o.Raw(sql, username, username, email).Exec() + suite.Nil(err) + selectVersionSQL := "SELECT user_id FROM harbor_user WHERE username = ?" + suite.Nil(o.Raw(selectVersionSQL, username).QueryRow(&userID)) + + defer func() { + o.Raw("UPDATE harbor_user SET deleted=True, username=concat_ws('#', username, user_id), email=concat_ws('#', email, user_id) WHERE user_id = ?", userID).Exec() + }() + + f(userID, username) +} + +func (suite *MysqlDaoTestSuite) WithUserGroup(f func(int64, string), groupNames ...string) { + var groupName string + if len(groupNames) > 0 { + groupName = groupNames[0] + } else { + groupName = suite.RandString(5) + } + + o, err := orm.FromContext(orm.Context()) + if err != nil { + suite.Fail("got error %v", err) + } + + var groupID int64 + + groupDN := fmt.Sprintf("cn=%s,dc=goharbor,dc=io", groupName) + sql := "INSERT INTO user_group (group_name, ldap_group_dn) VALUES (?, ?)" + res, err := o.Raw(sql, groupName, groupDN).Exec() + suite.Nil(err) + selectSQL := "SELECT id FROM user_group WHERE group_name = ?" + suite.Nil(o.Raw(selectSQL, groupName).QueryRow(&groupID)) + insertID, err := res.LastInsertId() + suite.Equal(groupID, insertID) + + defer func() { + o.Raw("DELETE FROM user_group WHERE id = ?", groupID).Exec() + }() + + f(groupID, groupName) +} + +func (suite *MysqlDaoTestSuite) TestCreate() { + { + project := &models.Project{ + Name: "foobar", + OwnerID: 1, + } + + projectID, err := suite.dao.Create(orm.Context(), project) + suite.Nil(err) + suite.dao.Delete(orm.Context(), projectID) + } + + { + // project name duplicated + project := &models.Project{ + Name: "library", + OwnerID: 1, + } + + projectID, err := suite.dao.Create(orm.Context(), project) + suite.Error(err) + suite.True(errors.IsConflictErr(err)) + suite.Equal(int64(0), projectID) + } +} + +func (suite *MysqlDaoTestSuite) TestCount() { + { + count, err := suite.dao.Count(orm.Context(), q.New(q.KeyWords{"project_id": 1})) + suite.Nil(err) + suite.Equal(int64(1), count) + } +} + +func (suite *MysqlDaoTestSuite) TestDelete() { + project := &models.Project{ + Name: "foobar", + OwnerID: 1, + } + + projectID, err := suite.dao.Create(orm.Context(), project) + suite.Nil(err) + + p1, err := suite.dao.Get(orm.Context(), projectID) + suite.Nil(err) + suite.Equal("foobar", p1.Name) + + suite.dao.Delete(orm.Context(), projectID) + + p2, err := suite.dao.Get(orm.Context(), projectID) + suite.Error(err) + suite.True(errors.IsNotFoundErr(err)) + suite.Nil(p2) +} + +func (suite *MysqlDaoTestSuite) TestGet() { + { + project, err := suite.dao.Get(orm.Context(), 1) + suite.Nil(err) + suite.Equal("library", project.Name) + } + + { + // not found + project, err := suite.dao.Get(orm.Context(), 10000) + suite.Error(err) + suite.True(errors.IsNotFoundErr(err)) + suite.Nil(project) + } +} + +func (suite *MysqlDaoTestSuite) TestGetByName() { + { + project, err := suite.dao.GetByName(orm.Context(), "library") + suite.Nil(err) + suite.Equal("library", project.Name) + } + + { + // not found + project, err := suite.dao.GetByName(orm.Context(), "project10000") + suite.Error(err) + suite.True(errors.IsNotFoundErr(err)) + suite.Nil(project) + } +} + +func (suite *MysqlDaoTestSuite) TestList() { + projectNames := []string{"foo1", "foo2", "foo3"} + + var projectIDs []int64 + for _, projectName := range projectNames { + project := &models.Project{ + Name: projectName, + OwnerID: 1, + } + projectID, err := suite.dao.Create(orm.Context(), project) + if suite.Nil(err) { + projectIDs = append(projectIDs, projectID) + } + } + + defer func() { + for _, projectID := range projectIDs { + suite.dao.Delete(orm.Context(), projectID) + } + }() + + { + projects, err := suite.dao.List(orm.Context(), q.New(q.KeyWords{"project_id__in": projectIDs})) + suite.Nil(err) + suite.Len(projects, len(projectNames)) + } +} + +func (suite *MysqlDaoTestSuite) TestListByPublic() { + { + // default library project + projects, err := suite.dao.List(orm.Context(), q.New(q.KeyWords{"public": true})) + suite.Nil(err) + suite.Len(projects, 1) + } + + { + // default library project + projects, err := suite.dao.List(orm.Context(), q.New(q.KeyWords{"public": "true"})) + suite.Nil(err) + suite.Len(projects, 1) + } + + { + projects, err := suite.dao.List(orm.Context(), q.New(q.KeyWords{"public": false})) + suite.Nil(err) + suite.Len(projects, 0) + } + + { + projects, err := suite.dao.List(orm.Context(), q.New(q.KeyWords{"public": "false"})) + suite.Nil(err) + suite.Len(projects, 0) + } +} + +func (suite *MysqlDaoTestSuite) TestListByOwner() { + { + // default library project + projects, err := suite.dao.List(orm.Context(), q.New(q.KeyWords{"owner": "admin"})) + suite.Nil(err) + suite.Len(projects, 1) + } + + { + projects, err := suite.dao.List(orm.Context(), q.New(q.KeyWords{"owner": "owner-not-found"})) + suite.Nil(err) + suite.Len(projects, 0) + } + + { + // single quotes in owner + suite.WithUser(func(userID int64, username string) { + project := &models.Project{ + Name: "project-owner-name-include-single-quotes", + OwnerID: int(userID), + } + projectID, err := suite.dao.Create(orm.Context(), project) + suite.Nil(err) + + defer suite.dao.Delete(orm.Context(), projectID) + + projects, err := suite.dao.List(orm.Context(), q.New(q.KeyWords{"owner": username})) + suite.Nil(err) + suite.Len(projects, 1) + }, "owner include single quotes ' in it") + } + + { + // sql inject + suite.WithUser(func(userID int64, username string) { + project := &models.Project{ + Name: "project-sql-inject", + OwnerID: int(userID), + } + projectID, err := suite.dao.Create(orm.Context(), project) + suite.Nil(err) + + defer suite.dao.Delete(orm.Context(), projectID) + + projects, err := suite.dao.List(orm.Context(), q.New(q.KeyWords{"owner": username})) + suite.Nil(err) + suite.Len(projects, 1) + }, "'owner' OR 1=1") + } +} + +func (suite *MysqlDaoTestSuite) TestListByMember() { + { + // project admin + projects, err := suite.dao.List(orm.Context(), q.New(q.KeyWords{"member": &models.MemberQuery{UserID: 1, Role: common.RoleProjectAdmin}})) + suite.Nil(err) + suite.Len(projects, 1) + } + + { + // guest + projects, err := suite.dao.List(orm.Context(), q.New(q.KeyWords{"member": &models.MemberQuery{UserID: 1, Role: common.RoleGuest}})) + suite.Nil(err) + suite.Len(projects, 0) + } + + { + // guest with public projects + projects, err := suite.dao.List(orm.Context(), q.New(q.KeyWords{"member": &models.MemberQuery{UserID: 1, Role: common.RoleGuest, WithPublic: true}})) + suite.Nil(err) + suite.Len(projects, 1) + } + + { + suite.WithUser(func(userID int64, username string) { + project := &models.Project{ + Name: "project-with-user-group", + OwnerID: int(userID), + } + projectID, err := suite.dao.Create(orm.Context(), project) + suite.Nil(err) + + defer suite.dao.Delete(orm.Context(), projectID) + + suite.WithUserGroup(func(groupID int64, groupName string) { + + o, err := orm.FromContext(orm.Context()) + if err != nil { + suite.Fail("got error %v", err) + } + + var pid int64 + sql := "INSERT INTO project_member (project_id, entity_id, role, entity_type) values (?, ?, ?, ?)" + res, err := o.Raw(sql, projectID, groupID, common.RoleGuest, "g").Exec() + suite.Nil(err) + insertID, err := res.LastInsertId() + selectPidSQL := "SELECT project_id FROM project_member WHERE id = ?" + suite.Nil(o.Raw(selectPidSQL, insertID).QueryRow(&pid)) + suite.Equal(projectID, pid) + + defer o.Raw("DELETE FROM project_member WHERE id = ?", pid) + + memberQuery := &models.MemberQuery{ + UserID: 1, + Role: common.RoleProjectAdmin, + GroupIDs: []int{int(groupID)}, + } + projects, err := suite.dao.List(orm.Context(), q.New(q.KeyWords{"member": memberQuery})) + suite.Nil(err) + suite.Len(projects, 2) + }) + }) + } +} + +func (suite *MysqlDaoTestSuite) TestListRoles() { + { + // only projectAdmin + suite.WithUser(func(userID int64, username string) { + project := &models.Project{ + Name: utils.GenerateRandomString(), + OwnerID: int(userID), + } + projectID, err := suite.dao.Create(orm.Context(), project) + suite.Nil(err) + defer suite.dao.Delete(orm.Context(), projectID) + + roles, err := suite.dao.ListRoles(orm.Context(), projectID, int(userID)) + suite.Nil(err) + suite.Len(roles, 1) + suite.Contains(roles, common.RoleProjectAdmin) + }) + } + + { + // projectAdmin and user groups + suite.WithUser(func(userID int64, username string) { + project := &models.Project{ + Name: utils.GenerateRandomString(), + OwnerID: int(userID), + } + projectID, err := suite.dao.Create(orm.Context(), project) + suite.Nil(err) + + defer suite.dao.Delete(orm.Context(), projectID) + + suite.WithUserGroup(func(groupID int64, groupName string) { + + o, err := orm.FromContext(orm.Context()) + if err != nil { + suite.Fail("got error %v", err) + } + + var pid int64 + + sql := "INSERT INTO project_member (project_id, entity_id, role, entity_type) values (?, ?, ?, ?)" + res, err := o.Raw(sql, projectID, groupID, common.RoleGuest, "g").Exec() + suite.Nil(err) + insertID, err := res.LastInsertId() + selectPidSQL := "SELECT project_id FROM project_member WHERE id = ?" + suite.Nil(o.Raw(selectPidSQL, insertID).QueryRow(&pid)) + + defer o.Raw("DELETE FROM project_member WHERE id = ?", pid) + + roles, err := suite.dao.ListRoles(orm.Context(), projectID, int(userID), int(groupID)) + suite.Nil(err) + suite.Len(roles, 2) + suite.Contains(roles, common.RoleProjectAdmin) + suite.Contains(roles, common.RoleGuest) + }) + }) + } +} + +func TestMysqlDaoTestSuite(t *testing.T) { + if utils.IsDBPostgresql() { + return + } + suite.Run(t, &MysqlDaoTestSuite{}) +} diff --git a/src/pkg/quota/dao/dao_test.go b/src/pkg/quota/dao/dao_test.go index eddd66b3b367..94845462bf14 100644 --- a/src/pkg/quota/dao/dao_test.go +++ b/src/pkg/quota/dao/dao_test.go @@ -15,6 +15,7 @@ package dao import ( + "github.com/goharbor/harbor/src/common/utils" "testing" "github.com/google/uuid" @@ -229,5 +230,8 @@ func (suite *DaoTestSuite) TestList() { } func TestDaoTestSuite(t *testing.T) { + if !utils.IsDBPostgresql() { + return + } suite.Run(t, &DaoTestSuite{}) } diff --git a/src/pkg/quota/dao/mysql_dao.go b/src/pkg/quota/dao/mysql_dao.go new file mode 100644 index 000000000000..7dcc69d442f7 --- /dev/null +++ b/src/pkg/quota/dao/mysql_dao.go @@ -0,0 +1,81 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dao + +import ( + "context" + "fmt" + + "github.com/goharbor/harbor/src/lib/orm" + "github.com/goharbor/harbor/src/lib/q" + "github.com/goharbor/harbor/src/pkg/quota/models" +) + +// NewMysqlDao returns an instance of the mysql DAO +func NewMysqlDao() DAO { + return &mysqlDao{} +} + +type mysqlDao struct { + *dao +} + +func (d *mysqlDao) List(ctx context.Context, query *q.Query) ([]*models.Quota, error) { + o, err := orm.FromContext(ctx) + if err != nil { + return nil, err + } + + condition, params := listConditions(query) + + sql := fmt.Sprintf(` +SELECT + a.id, + a.reference, + a.reference_id, + a.hard, + a.version as hard_version, + b.used, + b.version as used_version, + b.creation_time, + b.update_time +FROM + quota AS a + JOIN quota_usage AS b ON a.id = b.id %s`, condition) + + orderBy := listOrderByForMysql(query) + if orderBy != "" { + sql += ` order by ` + orderBy + } + + if query != nil { + page, size := query.PageNumber, query.PageSize + if size > 0 { + sql += ` limit ?` + params = append(params, size) + if page > 0 { + sql += ` offset ?` + params = append(params, size*(page-1)) + } + } + } + + var quotas []*models.Quota + if _, err := o.Raw(sql, params).QueryRows("as); err != nil { + return nil, err + } + + return quotas, nil +} diff --git a/src/pkg/quota/dao/mysql_dao_test.go b/src/pkg/quota/dao/mysql_dao_test.go new file mode 100644 index 000000000000..9c703bb20fca --- /dev/null +++ b/src/pkg/quota/dao/mysql_dao_test.go @@ -0,0 +1,236 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dao + +import ( + "github.com/goharbor/harbor/src/common/utils" + "testing" + + "github.com/goharbor/harbor/src/lib/q" + "github.com/goharbor/harbor/src/pkg/quota/types" + htesting "github.com/goharbor/harbor/src/testing" + "github.com/google/uuid" + "github.com/stretchr/testify/suite" +) + +type MysqlDaoTestSuite struct { + htesting.Suite + dao DAO +} + +func (suite *MysqlDaoTestSuite) SetupSuite() { + suite.Suite.SetupSuite() + suite.Suite.ClearSQLs = []string{ + "DELETE FROM quota WHERE id > 1", + "DELETE FROM quota_usage WHERE id > 1", + } + suite.dao = NewMysqlDao() +} + +func (suite *MysqlDaoTestSuite) TestCount() { + suite.Suite.TearDownSuite() // Clean other quotas + + reference := uuid.New().String() + hardLimits := types.ResourceList{types.ResourceStorage: 100} + usage := types.ResourceList{types.ResourceStorage: 0} + + ctx := suite.Context() + + suite.dao.Create(ctx, reference, "1", types.ResourceList{types.ResourceStorage: 200}, usage) + suite.dao.Create(ctx, reference, "2", hardLimits, usage) + suite.dao.Create(ctx, reference, "3", hardLimits, usage) + suite.dao.Create(ctx, uuid.New().String(), "4", types.ResourceList{types.ResourceStorage: 10}, usage) + + { + // Count all the quotas + count, err := suite.dao.Count(ctx, nil) + suite.Nil(err) + suite.Equal(int64(5), count) // 4 + library project quota + } + + { + // Count quotas filter by reference + count, err := suite.dao.Count(ctx, q.New(q.KeyWords{"reference": reference})) + suite.Nil(err) + suite.Equal(int64(3), count) + } + + { + // Count quotas filter by reference ids + count, err := suite.dao.Count(ctx, q.New(q.KeyWords{"reference": reference, "reference_ids": []string{"1", "2"}})) + suite.Nil(err) + suite.Equal(int64(2), count) + } +} + +func (suite *MysqlDaoTestSuite) TestCreate() { + hardLimits := types.ResourceList{types.ResourceStorage: 100} + usage := types.ResourceList{types.ResourceStorage: 0} + id, err := suite.dao.Create(suite.Context(), "project", "2", hardLimits, usage) + suite.Nil(err) + + q, err := suite.dao.Get(suite.Context(), id) + if suite.Nil(err) { + hard, _ := q.GetHard() + used, _ := q.GetUsed() + + suite.Equal(hardLimits, hard) + suite.Equal(usage, used) + } +} + +func (suite *MysqlDaoTestSuite) TestDelete() { + hardLimits := types.ResourceList{types.ResourceStorage: 100} + usage := types.ResourceList{types.ResourceStorage: 0} + + id, err := suite.dao.Create(suite.Context(), "project", "3", hardLimits, usage) + suite.Nil(err) + + { + q, err := suite.dao.Get(suite.Context(), id) + suite.Nil(err) + suite.NotNil(q) + } + + suite.Nil(suite.dao.Delete(suite.Context(), id)) + + { + _, err := suite.dao.Get(suite.Context(), id) + suite.Error(err) + } +} + +func (suite *MysqlDaoTestSuite) TestGetByRef() { + hardLimits := types.ResourceList{types.ResourceStorage: 100} + usage := types.ResourceList{types.ResourceStorage: 0} + + reference, referenceID := "project", "4" + id, err := suite.dao.Create(suite.Context(), reference, referenceID, hardLimits, usage) + suite.Nil(err) + + { + q, err := suite.dao.GetByRef(suite.Context(), reference, referenceID) + suite.Nil(err) + suite.NotNil(q) + } + + suite.Nil(suite.dao.Delete(suite.Context(), id)) + + { + _, err := suite.dao.GetByRef(suite.Context(), reference, referenceID) + suite.Error(err) + } +} + +func (suite *MysqlDaoTestSuite) TestUpdate() { + hardLimits := types.ResourceList{types.ResourceStorage: 100} + usage := types.ResourceList{types.ResourceStorage: 0} + + id, err := suite.dao.Create(suite.Context(), "project", "6", hardLimits, usage) + suite.Nil(err) + + newHardLimits := types.ResourceList{types.ResourceStorage: 200} + newUsage := types.ResourceList{types.ResourceStorage: 1} + + { + q, err := suite.dao.Get(suite.Context(), id) + if suite.Nil(err) { + q.SetHard(newHardLimits) + + suite.Nil(suite.dao.Update(suite.Context(), q)) + } + } + + { + q, err := suite.dao.Get(suite.Context(), id) + if suite.Nil(err) { + q.SetUsed(newUsage) + + suite.Nil(suite.dao.Update(suite.Context(), q)) + } + } + + { + q, err := suite.dao.Get(suite.Context(), id) + if suite.Nil(err) { + hard, _ := q.GetHard() + used, _ := q.GetUsed() + + suite.Equal(newHardLimits, hard) + suite.Equal(newUsage, used) + } + } +} + +func (suite *MysqlDaoTestSuite) TestList() { + suite.Suite.TearDownSuite() // Clean other quotas + + reference := uuid.New().String() + hardLimits := types.ResourceList{types.ResourceStorage: 100} + usage := types.ResourceList{types.ResourceStorage: 0} + + ctx := suite.Context() + + suite.dao.Create(ctx, reference, "1", types.ResourceList{types.ResourceStorage: 200}, usage) + suite.dao.Create(ctx, reference, "2", hardLimits, usage) + suite.dao.Create(ctx, reference, "3", hardLimits, usage) + suite.dao.Create(ctx, uuid.New().String(), "4", types.ResourceList{types.ResourceStorage: 10}, usage) + + { + // List all the quotas + quotas, err := suite.dao.List(ctx, nil) + suite.Nil(err) + suite.Equal(5, len(quotas)) // 4 + library project quota + suite.NotEqual(reference, quotas[0].Reference) + suite.Equal("4", quotas[0].ReferenceID) + } + + { + // List quotas filter by reference + quotas, err := suite.dao.List(ctx, q.New(q.KeyWords{"reference": reference})) + suite.Nil(err) + suite.Equal(3, len(quotas)) + } + + { + // List quotas filter by reference ids + quotas, err := suite.dao.List(ctx, q.New(q.KeyWords{"reference": reference, "reference_ids": []string{"1", "2"}})) + suite.Nil(err) + suite.Equal(2, len(quotas)) + } + + { + // List quotas by pagination + quotas, err := suite.dao.List(ctx, &q.Query{PageSize: 2}) + suite.Nil(err) + suite.Equal(2, len(quotas)) + } + + { + // List quotas by sorting + quotas, err := suite.dao.List(ctx, &q.Query{Keywords: q.KeyWords{"reference": reference}, Sorting: "-hard.storage"}) + suite.Nil(err) + suite.Equal(reference, quotas[0].Reference) + suite.Equal("1", quotas[0].ReferenceID) + } + +} + +func TestMysqlDaoTestSuite(t *testing.T) { + if !utils.IsDBMysql() { + return + } + suite.Run(t, &MysqlDaoTestSuite{}) +} diff --git a/src/pkg/quota/dao/util.go b/src/pkg/quota/dao/util.go index 467c939136c0..bf94399f4d22 100644 --- a/src/pkg/quota/dao/util.go +++ b/src/pkg/quota/dao/util.go @@ -84,6 +84,11 @@ func castQuantity(field string) string { return fmt.Sprintf("CAST( (CASE WHEN (%[1]s) IS NULL THEN '0' WHEN (%[1]s) = '-1' THEN '9223372036854775807' ELSE (%[1]s) END) AS BIGINT )", field) } +func castQuantityFormysql(field string) string { + // cast -1 to max int64 when order by field + return fmt.Sprintf("CAST( (CASE WHEN %[1]s IS NULL THEN '0' WHEN %[1]s = '-1' THEN '9223372036854775807' ELSE %[1]s END) AS UNSIGNED )", field) +} + func listOrderBy(query *q.Query) string { orderBy := "b.creation_time DESC" @@ -115,3 +120,35 @@ func listOrderBy(query *q.Query) string { return orderBy } + +func listOrderByForMysql(query *q.Query) string { + orderBy := "b.creation_time DESC" + + if query != nil && query.Sorting != "" { + if val, ok := quotaOrderMap[query.Sorting]; ok { + orderBy = val + } else { + sort := query.Sorting + + order := "ASC" + if sort[0] == '-' { + order = "DESC" + sort = sort[1:] + } + + prefixes := []string{"hard.", "used."} + for _, prefix := range prefixes { + if strings.HasPrefix(sort, prefix) { + resource := strings.TrimPrefix(sort, prefix) + if types.IsValidResource(types.ResourceName(resource)) { + field := fmt.Sprintf("json_extract(%s, '$.%s')", strings.TrimSuffix(prefix, "."), resource) + orderBy = fmt.Sprintf("(%s) %s", castQuantityFormysql(field), order) + break + } + } + } + } + } + + return orderBy +} diff --git a/src/pkg/quota/dao/util_test.go b/src/pkg/quota/dao/util_test.go index 7a26df194bde..de9d8a42ef86 100644 --- a/src/pkg/quota/dao/util_test.go +++ b/src/pkg/quota/dao/util_test.go @@ -50,3 +50,34 @@ func Test_listOrderBy(t *testing.T) { }) } } + +func Test_listOrderByForMysql(t *testing.T) { + query := func(sort string) *q.Query { + return &q.Query{ + Sorting: sort, + } + } + + type args struct { + query *q.Query + } + tests := []struct { + name string + args args + want string + }{ + {"no query", args{nil}, "b.creation_time DESC"}, + {"order by unsupported field", args{query("unknown")}, "b.creation_time DESC"}, + {"order by storage of hard", args{query("hard.storage")}, "(CAST( (CASE WHEN json_extract(hard, '$.storage') IS NULL THEN '0' WHEN json_extract(hard, '$.storage') = '-1' THEN '9223372036854775807' ELSE json_extract(hard, '$.storage') END) AS UNSIGNED )) ASC"}, + {"order by unsupported hard resource", args{query("hard.unknown")}, "b.creation_time DESC"}, + {"order by storage of used", args{query("used.storage")}, "(CAST( (CASE WHEN json_extract(used, '$.storage') IS NULL THEN '0' WHEN json_extract(used, '$.storage') = '-1' THEN '9223372036854775807' ELSE json_extract(used, '$.storage') END) AS UNSIGNED )) ASC"}, + {"order by unsupported used resource", args{query("used.unknown")}, "b.creation_time DESC"}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := listOrderByForMysql(tt.args.query); got != tt.want { + t.Errorf("listOrderBy() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/src/pkg/quota/manager.go b/src/pkg/quota/manager.go index e2a3ca901579..78625b407975 100644 --- a/src/pkg/quota/manager.go +++ b/src/pkg/quota/manager.go @@ -17,6 +17,7 @@ package quota import ( "context" + "github.com/goharbor/harbor/src/common/utils" "github.com/goharbor/harbor/src/lib/orm" "github.com/goharbor/harbor/src/lib/q" "github.com/goharbor/harbor/src/pkg/quota/dao" @@ -109,5 +110,12 @@ func (m *manager) List(ctx context.Context, query *q.Query) ([]*Quota, error) { // NewManager returns quota manager func NewManager() Manager { + switch { + case utils.IsDBPostgresql(): + return &manager{dao: dao.New()} + case utils.IsDBMysql(): + return &manager{dao: dao.NewMysqlDao()} + } + return &manager{dao: dao.New()} } diff --git a/src/pkg/reg/dao/dao_test.go b/src/pkg/reg/dao/dao_test.go index 890a4a26c8dd..fca08a420d4c 100644 --- a/src/pkg/reg/dao/dao_test.go +++ b/src/pkg/reg/dao/dao_test.go @@ -36,7 +36,7 @@ type daoTestSuite struct { func (d *daoTestSuite) SetupSuite() { d.dao = NewDAO() - common_dao.PrepareTestForPostgresSQL() + common_dao.PrepareTestForDB() d.ctx = orm.NewContext(nil, beegoorm.NewOrm()) } diff --git a/src/pkg/replication/dao/dao_test.go b/src/pkg/replication/dao/dao_test.go index cf6b87a39557..5a613acc1087 100644 --- a/src/pkg/replication/dao/dao_test.go +++ b/src/pkg/replication/dao/dao_test.go @@ -37,7 +37,7 @@ type daoTestSuite struct { func (d *daoTestSuite) SetupSuite() { d.dao = NewDAO() - common_dao.PrepareTestForPostgresSQL() + common_dao.PrepareTestForDB() d.ctx = orm.NewContext(nil, beegoorm.NewOrm()) } diff --git a/src/pkg/repository/dao/dao_test.go b/src/pkg/repository/dao/dao_test.go index f00a48c8da1a..332e98063316 100644 --- a/src/pkg/repository/dao/dao_test.go +++ b/src/pkg/repository/dao/dao_test.go @@ -51,7 +51,7 @@ func (d *daoTestSuite) SetupSuite() { d.dao = New() d.tagDao = tag_dao.New() d.afDao = af_dao.New() - common_dao.PrepareTestForPostgresSQL() + common_dao.PrepareTestForDB() d.ctx = orm.NewContext(nil, beegoorm.NewOrm()) } diff --git a/src/pkg/retention/dao/retention_test.go b/src/pkg/retention/dao/retention_test.go index 6ed92c8e6c12..58549f0f073b 100644 --- a/src/pkg/retention/dao/retention_test.go +++ b/src/pkg/retention/dao/retention_test.go @@ -17,7 +17,7 @@ import ( ) func TestMain(m *testing.M) { - dao.PrepareTestForPostgresSQL() + dao.PrepareTestForDB() os.Exit(m.Run()) } diff --git a/src/pkg/retention/manager_test.go b/src/pkg/retention/manager_test.go index 26edc51439de..22a1060216db 100644 --- a/src/pkg/retention/manager_test.go +++ b/src/pkg/retention/manager_test.go @@ -14,7 +14,7 @@ import ( ) func TestMain(m *testing.M) { - dao.PrepareTestForPostgresSQL() + dao.PrepareTestForDB() os.Exit(m.Run()) } diff --git a/src/pkg/retention/policy/action/performer_test.go b/src/pkg/retention/policy/action/performer_test.go index 8023eb3b1006..f36daa5e7275 100644 --- a/src/pkg/retention/policy/action/performer_test.go +++ b/src/pkg/retention/policy/action/performer_test.go @@ -69,7 +69,7 @@ func (suite *TestPerformerSuite) SetupSuite() { suite.oldClient = dep.DefaultClient dep.DefaultClient = &fakeRetentionClient{} - dao.PrepareTestForPostgresSQL() + dao.PrepareTestForDB() } // TearDownSuite ... diff --git a/src/pkg/retention/policy/alg/or/processor_test.go b/src/pkg/retention/policy/alg/or/processor_test.go index 265a24804b37..e6335fbb2aa4 100644 --- a/src/pkg/retention/policy/alg/or/processor_test.go +++ b/src/pkg/retention/policy/alg/or/processor_test.go @@ -53,7 +53,7 @@ func TestProcessor(t *testing.T) { // SetupSuite ... func (suite *ProcessorTestSuite) SetupSuite() { - dao.PrepareTestForPostgresSQL() + dao.PrepareTestForDB() suite.all = []*selector.Candidate{ { Namespace: "library", diff --git a/src/pkg/retention/policy/builder_test.go b/src/pkg/retention/policy/builder_test.go index 8748d2ad7224..19ee5f680244 100644 --- a/src/pkg/retention/policy/builder_test.go +++ b/src/pkg/retention/policy/builder_test.go @@ -54,7 +54,7 @@ func TestBuilder(t *testing.T) { // SetupSuite prepares the testing content if needed func (suite *TestBuilderSuite) SetupSuite() { - dao.PrepareTestForPostgresSQL() + dao.PrepareTestForDB() suite.all = []*selector.Candidate{ { NamespaceID: 1, diff --git a/src/pkg/scan/export/manager.go b/src/pkg/scan/export/manager.go index 5b74abc13c5f..775159d8c4b9 100644 --- a/src/pkg/scan/export/manager.go +++ b/src/pkg/scan/export/manager.go @@ -8,6 +8,7 @@ import ( beego_orm "github.com/beego/beego/orm" + "github.com/goharbor/harbor/src/common/utils" "github.com/goharbor/harbor/src/lib/orm" q2 "github.com/goharbor/harbor/src/lib/q" ) @@ -52,6 +53,41 @@ group by vulnerability_record.package_version, vulnerability_record.fixed_version, to_jsonb(vulnerability_record.vendor_attributes), + scanner_registration.id + ` + VulnScanReportQueryTemplateMysql = ` +select + artifact.digest as artifact_digest, + artifact.repository_id, + artifact.repository_name, + vulnerability_record.cve_id, + vulnerability_record.package, + vulnerability_record.severity, + vulnerability_record.cwe_ids, + vulnerability_record.package_version, + vulnerability_record.fixed_version, + json_extract(vulnerability_record.vendor_attributes, '$') as vendor_attributes, + scanner_registration.name as scanner_name +from + report_vulnerability_record + inner join scan_report on report_vulnerability_record.report_uuid = scan_report.uuid + inner join artifact on scan_report.digest = artifact.digest + left outer join artifact_reference on artifact.id = artifact_reference.child_id + inner join vulnerability_record on report_vulnerability_record.vuln_record_id = vulnerability_record.id + inner join scanner_registration on scan_report.registration_uuid = scanner_registration.uuid +and artifact.id in (%s) + +group by + package, + vulnerability_record.severity, + vulnerability_record.cve_id, + artifact.digest, + artifact.repository_id, + artifact.repository_name, + vulnerability_record.cwe_ids, + vulnerability_record.package_version, + vulnerability_record.fixed_version, + json_extract(vulnerability_record.vendor_attributes, '$'), scanner_registration.id ` JobModeExport = "export" @@ -134,7 +170,7 @@ func (em *exportManager) buildQuery(ctx context.Context, params Params) (beego_o } } - sql := fmt.Sprintf(VulnScanReportQueryTemplate, artIDs) + sql := getVulnScanReportQueryTemplate(artIDs) ormer, err := orm.FromContext(ctx) if err != nil { return nil, err @@ -153,3 +189,11 @@ func (em *exportManager) buildQuery(ctx context.Context, params Params) (beego_o // user can open ORM_DEBUG for log the sql return ormer.Raw(query, pageLimits), nil } + +func getVulnScanReportQueryTemplate(artIDs string) string { + sql := fmt.Sprintf(VulnScanReportQueryTemplate, artIDs) + if utils.IsDBMysql() { + sql = fmt.Sprintf(VulnScanReportQueryTemplateMysql, artIDs) + } + return sql +} diff --git a/src/pkg/scan/export/manager_test.go b/src/pkg/scan/export/manager_test.go index 45c288a11679..fa852147af16 100644 --- a/src/pkg/scan/export/manager_test.go +++ b/src/pkg/scan/export/manager_test.go @@ -204,6 +204,7 @@ func (suite *ExportManagerSuite) cleanUpAdditionalData(reportID string, scannerI } func (suite *ExportManagerSuite) setupTestData() { + timeNow := time.Now() // create repositories repoRecord := &model.RepoRecord{ Name: "library/ubuntu", @@ -211,8 +212,8 @@ func (suite *ExportManagerSuite) setupTestData() { Description: "", PullCount: 1, StarCount: 0, - CreationTime: time.Time{}, - UpdateTime: time.Time{}, + CreationTime: timeNow, + UpdateTime: timeNow, } repoId, err := suite.repositoryDao.Create(suite.Context(), repoRecord) suite.NoError(err) @@ -230,8 +231,8 @@ func (suite *ExportManagerSuite) setupTestData() { Digest: "sha256:e3d7ff9efd8431d9ef39a144c45992df5502c995b9ba3c53ff70c5b52a848d9c", Size: 28573056, Icon: "", - PushTime: time.Time{}, - PullTime: time.Time{}.Add(-10 * time.Minute), + PushTime: timeNow, + PullTime: timeNow.Add(-10 * time.Minute), ExtraAttrs: `{"architecture":"amd64","author":"","config":{"Env":["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":["/bin/bash"]},"created":"2021-03-04T02:24:42.927713926Z","os":"linux"}`, Annotations: "", } @@ -244,8 +245,8 @@ func (suite *ExportManagerSuite) setupTestData() { RepositoryID: repoId, ArtifactID: artId, Name: "latest", - PushTime: time.Time{}, - PullTime: time.Time{}, + PushTime: timeNow, + PullTime: timeNow, } tagId, err := suite.tagDao.Create(suite.Context(), t) suite.NoError(err) @@ -272,8 +273,8 @@ func (suite *ExportManagerSuite) setupTestData() { Level: "", Scope: "", ProjectID: 1, - CreationTime: time.Time{}, - UpdateTime: time.Time{}, + CreationTime: timeNow, + UpdateTime: timeNow, Deleted: false, } labelId, err := suite.labelDao.Create(suite.Context(), &l) @@ -284,8 +285,8 @@ func (suite *ExportManagerSuite) setupTestData() { ID: 0, LabelID: labelId, ArtifactID: artId, - CreationTime: time.Time{}, - UpdateTime: time.Time{}, + CreationTime: timeNow, + UpdateTime: timeNow, } lRefId, err := suite.labelDao.CreateReference(suite.Context(), &lRef) suite.NoError(err) diff --git a/src/pkg/scan/report/manager_test.go b/src/pkg/scan/report/manager_test.go index f1dd6e800e62..3f2f9b02500d 100644 --- a/src/pkg/scan/report/manager_test.go +++ b/src/pkg/scan/report/manager_test.go @@ -15,6 +15,7 @@ package report import ( + "strings" "testing" "github.com/stretchr/testify/suite" @@ -41,7 +42,7 @@ func TestManager(t *testing.T) { // SetupSuite prepares test env for suite TestManagerSuite. func (suite *TestManagerSuite) SetupSuite() { - dao.PrepareTestForPostgresSQL() + dao.PrepareTestForDB() suite.m = NewManager() } @@ -92,7 +93,7 @@ func (suite *TestManagerSuite) TestManagerUpdateReportData() { suite.Require().NoError(err) suite.Require().Equal(1, len(l)) - suite.Equal("{\"a\":1000}", l[0].Report) + suite.Equal("{\"a\":1000}", strings.ReplaceAll(l[0].Report, " ", "")) } // TestManagerDeleteByDigests ... diff --git a/src/pkg/scheduler/dao_test.go b/src/pkg/scheduler/dao_test.go index e2503e8a74ad..e9dfa30a1b79 100644 --- a/src/pkg/scheduler/dao_test.go +++ b/src/pkg/scheduler/dao_test.go @@ -16,6 +16,7 @@ package scheduler import ( "context" + "strings" "testing" "github.com/stretchr/testify/suite" @@ -35,7 +36,7 @@ type daoTestSuite struct { func (d *daoTestSuite) SetupSuite() { d.dao = &dao{} - common_dao.PrepareTestForPostgresSQL() + common_dao.PrepareTestForDB() d.ctx = orm.Context() } @@ -94,7 +95,7 @@ func (d *daoTestSuite) TestGet() { schedule, err = d.dao.Get(d.ctx, d.id) d.Require().Nil(err) d.Equal(d.id, schedule.ID) - d.Equal("{\"key\":\"value\"}", schedule.ExtraAttrs) + d.Equal("{\"key\":\"value\"}", strings.ReplaceAll(schedule.ExtraAttrs, " ", "")) } func (d *daoTestSuite) TestDelete() { diff --git a/src/pkg/systemartifact/cleanupcriteria_test.go b/src/pkg/systemartifact/cleanupcriteria_test.go index 23b22823542f..1ba610293123 100644 --- a/src/pkg/systemartifact/cleanupcriteria_test.go +++ b/src/pkg/systemartifact/cleanupcriteria_test.go @@ -26,7 +26,7 @@ func (suite *defaultCleanupCriteriaTestSuite) SetupSuite() { suite.Suite.SetupSuite() suite.dao = dao.NewSystemArtifactDao() suite.cleanupCriteria = DefaultSelector - common_dao.PrepareTestForPostgresSQL() + common_dao.PrepareTestForDB() suite.ctx = orm.Context() sa := model.SystemArtifact{} suite.ClearTables = append(suite.ClearTables, sa.TableName()) diff --git a/src/pkg/systemartifact/dao/dao_test.go b/src/pkg/systemartifact/dao/dao_test.go index b0b9ae2a13a5..5b32ba1e3c00 100644 --- a/src/pkg/systemartifact/dao/dao_test.go +++ b/src/pkg/systemartifact/dao/dao_test.go @@ -25,7 +25,7 @@ type daoTestSuite struct { func (suite *daoTestSuite) SetupSuite() { suite.Suite.SetupSuite() suite.dao = &systemArtifactDAO{} - common_dao.PrepareTestForPostgresSQL() + common_dao.PrepareTestForDB() suite.ctx = orm.Context() sa := model.SystemArtifact{} suite.ClearTables = append(suite.ClearTables, sa.TableName()) diff --git a/src/pkg/tag/dao/dao_test.go b/src/pkg/tag/dao/dao_test.go index 1ab0a03ff51a..b4fb7b6e4f9a 100644 --- a/src/pkg/tag/dao/dao_test.go +++ b/src/pkg/tag/dao/dao_test.go @@ -41,7 +41,7 @@ type daoTestSuite struct { func (d *daoTestSuite) SetupSuite() { d.dao = New() - common_dao.PrepareTestForPostgresSQL() + common_dao.PrepareTestForDB() d.ctx = orm.NewContext(nil, beegoorm.NewOrm()) d.artDAO = artdao.New() artifactID, err := d.artDAO.Create(d.ctx, &artdao.Artifact{ diff --git a/src/pkg/task/dao/execution_mysql.go b/src/pkg/task/dao/execution_mysql.go new file mode 100644 index 000000000000..8971712f0694 --- /dev/null +++ b/src/pkg/task/dao/execution_mysql.go @@ -0,0 +1,237 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dao + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/goharbor/harbor/src/jobservice/job" + "github.com/goharbor/harbor/src/lib/log" + "github.com/goharbor/harbor/src/lib/orm" + "github.com/goharbor/harbor/src/lib/q" +) + +// NewExecutionDAO returns an instance of ExecutionDAO +func NewExecutionMysqlDAO() ExecutionDAO { + return &executionMysqlDAO{ + taskDAO: NewTaskMysqlDAO(), + } +} + +type executionMysqlDAO struct { + executionDAO + taskDAO TaskDAO +} + +func (e *executionMysqlDAO) Count(ctx context.Context, query *q.Query) (int64, error) { + if query != nil { + // ignore the page number and size + query = &q.Query{ + Keywords: query.Keywords, + } + } + qs, err := e.querySetter(ctx, query) + if err != nil { + return 0, err + } + return qs.Count() +} + +func (e *executionMysqlDAO) List(ctx context.Context, query *q.Query) ([]*Execution, error) { + executions := []*Execution{} + qs, err := e.querySetter(ctx, query) + if err != nil { + return nil, err + } + if _, err = qs.All(&executions); err != nil { + return nil, err + } + return executions, nil +} + +func (e *executionMysqlDAO) GetMetrics(ctx context.Context, id int64) (*Metrics, error) { + scs, err := e.taskDAO.ListStatusCount(ctx, id) + if err != nil { + return nil, err + } + metrics := &Metrics{} + if len(scs) == 0 { + return metrics, nil + } + + for _, sc := range scs { + switch sc.Status { + case job.SuccessStatus.String(): + metrics.SuccessTaskCount = sc.Count + case job.ErrorStatus.String(): + metrics.ErrorTaskCount = sc.Count + case job.PendingStatus.String(): + metrics.PendingTaskCount = sc.Count + case job.RunningStatus.String(): + metrics.RunningTaskCount = sc.Count + case job.ScheduledStatus.String(): + metrics.ScheduledTaskCount = sc.Count + case job.StoppedStatus.String(): + metrics.StoppedTaskCount = sc.Count + default: + log.Errorf("unknown task status: %s", sc.Status) + } + } + metrics.TaskCount = metrics.SuccessTaskCount + metrics.ErrorTaskCount + + metrics.PendingTaskCount + metrics.RunningTaskCount + + metrics.ScheduledTaskCount + metrics.StoppedTaskCount + return metrics, nil +} + +func (e *executionMysqlDAO) RefreshStatus(ctx context.Context, id int64) (bool, string, error) { + // as the status of the execution can be refreshed by multiple operators concurrently + // we use the optimistic locking to avoid the conflict and retry 5 times at most + for i := 0; i < 5; i++ { + statusChanged, currentStatus, retry, err := e.refreshStatus(ctx, id) + if err != nil { + return false, "", err + } + if !retry { + return statusChanged, currentStatus, nil + } + } + return false, "", fmt.Errorf("failed to refresh the status of the execution %d after %d retries", id, 5) +} + +// the returning values: +// 1. bool: is the status changed +// 2. string: the current status if changed +// 3. bool: whether a retry is needed +// 4. error: the error +func (e *executionMysqlDAO) refreshStatus(ctx context.Context, id int64) (bool, string, bool, error) { + execution, err := e.Get(ctx, id) + if err != nil { + return false, "", false, err + } + metrics, err := e.GetMetrics(ctx, id) + if err != nil { + return false, "", false, err + } + // no task, return directly + if metrics.TaskCount == 0 { + return false, "", false, nil + } + + var status string + if metrics.PendingTaskCount > 0 || metrics.RunningTaskCount > 0 || metrics.ScheduledTaskCount > 0 { + status = job.RunningStatus.String() + } else if metrics.ErrorTaskCount > 0 { + status = job.ErrorStatus.String() + } else if metrics.StoppedTaskCount > 0 { + status = job.StoppedStatus.String() + } else if metrics.SuccessTaskCount > 0 { + status = job.SuccessStatus.String() + } + + ormer, err := orm.FromContext(ctx) + if err != nil { + return false, "", false, err + } + + sql := `update execution set status = ?, revision = revision+1, update_time = ? where id = ? and revision = ?` + result, err := ormer.Raw(sql, status, time.Now(), id, execution.Revision).Exec() + if err != nil { + return false, "", false, err + } + n, err := result.RowsAffected() + if err != nil { + return false, "", false, err + } + + // if the count of affected rows is 0, that means the execution is updating by others, retry + if n == 0 { + return false, "", true, nil + } + + // update the end time if the status is final, otherwise set the end time as NULL, this is useful + // for retrying jobs + sql = `update execution + set end_time = ( + case + when status='%s' or status='%s' or status='%s' then ( + select max(end_time) + from task + where execution_id=?) + else NULL + end) + where id=?` + sql = fmt.Sprintf(sql, job.ErrorStatus.String(), job.StoppedStatus.String(), job.SuccessStatus.String()) + _, err = ormer.Raw(sql, id, id).Exec() + return status != execution.Status, status, false, err +} + +func (e *executionMysqlDAO) querySetter(ctx context.Context, query *q.Query) (orm.QuerySeter, error) { + qs, err := orm.QuerySetter(ctx, &Execution{}, query) + if err != nil { + return nil, err + } + + // append the filter for "extra attrs" + if query != nil && len(query.Keywords) > 0 { + var ( + key string + keyPrefix string + value interface{} + ) + for key, value = range query.Keywords { + if strings.HasPrefix(key, "ExtraAttrs.") { + keyPrefix = "ExtraAttrs." + break + } + if strings.HasPrefix(key, "extra_attrs.") { + keyPrefix = "extra_attrs." + break + } + } + if len(keyPrefix) == 0 || keyPrefix == key { + return qs, nil + } + + keys := strings.Split(strings.TrimPrefix(key, keyPrefix), ".") + var args []interface{} + + args = append(args, value) + inClause, err := orm.CreateInClause(ctx, buildInClauseSQLForExtraAttrsForMysql(keys), args...) + if err != nil { + return nil, err + } + qs = qs.FilterRaw("id", inClause) + } + + return qs, nil +} + +// Param keys is strings.Split() after trim "extra_attrs."/"ExtraAttrs." prefix +func buildInClauseSQLForExtraAttrsForMysql(keys []string) string { + switch len(keys) { + case 0: + // won't fall into this case, as the if condition on "keyPrefix == key" + // act as a place holder to ensure "default" is equivalent to "len(keys) >= 2" + return "" + case 1: + return fmt.Sprintf("select id from execution where json_extract(extra_attrs, '$.%s')=?", keys[0]) + default: + s := strings.Join(keys, ".") + return fmt.Sprintf("select id from execution where json_extract(extra_attrs, '$.%s')=?", s) + } +} diff --git a/src/pkg/task/dao/execution_mysql_test.go b/src/pkg/task/dao/execution_mysql_test.go new file mode 100644 index 000000000000..b27dc134f040 --- /dev/null +++ b/src/pkg/task/dao/execution_mysql_test.go @@ -0,0 +1,358 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dao + +import ( + "context" + "github.com/goharbor/harbor/src/common/utils" + "testing" + "time" + + "github.com/goharbor/harbor/src/common/dao" + "github.com/goharbor/harbor/src/jobservice/job" + "github.com/goharbor/harbor/src/lib/errors" + "github.com/goharbor/harbor/src/lib/orm" + "github.com/goharbor/harbor/src/lib/q" + "github.com/stretchr/testify/suite" +) + +type executionMysqlDAOTestSuite struct { + suite.Suite + ctx context.Context + executionDAO *executionMysqlDAO + taskDao *taskMysqlDAO + executionID int64 +} + +func (e *executionMysqlDAOTestSuite) SetupSuite() { + dao.PrepareTestForDB() + e.ctx = orm.Context() + e.taskDao = &taskMysqlDAO{} + e.executionDAO = &executionMysqlDAO{ + taskDAO: e.taskDao, + } +} + +func (e *executionMysqlDAOTestSuite) SetupTest() { + id, err := e.executionDAO.Create(e.ctx, &Execution{ + VendorType: "test", + Trigger: "test", + ExtraAttrs: `{"key":"value"}`, + }) + e.Require().Nil(err) + e.executionID = id +} + +func (e *executionMysqlDAOTestSuite) TearDownTest() { + err := e.executionDAO.Delete(e.ctx, e.executionID) + e.Nil(err) +} + +func (e *executionMysqlDAOTestSuite) TestCount() { + count, err := e.executionDAO.Count(e.ctx, &q.Query{ + Keywords: map[string]interface{}{ + "VendorType": "test", + "ExtraAttrs.key": "value", + }, + }) + e.Require().Nil(err) + e.Equal(int64(1), count) + + count, err = e.executionDAO.Count(e.ctx, &q.Query{ + Keywords: map[string]interface{}{ + "VendorType": "test", + "ExtraAttrs.key": "incorrect-value", + }, + }) + e.Require().Nil(err) + e.Equal(int64(0), count) +} + +func (e *executionMysqlDAOTestSuite) TestList() { + executions, err := e.executionDAO.List(e.ctx, &q.Query{ + Keywords: map[string]interface{}{ + "VendorType": "test", + "ExtraAttrs.key": "value", + }, + }) + e.Require().Nil(err) + e.Require().Len(executions, 1) + e.Equal(e.executionID, executions[0].ID) + + executions, err = e.executionDAO.List(e.ctx, &q.Query{ + Keywords: map[string]interface{}{ + "VendorType": "test", + "ExtraAttrs.key": "incorrect-value", + }, + }) + e.Require().Nil(err) + e.Require().Len(executions, 0) +} + +func (e *executionMysqlDAOTestSuite) TestGet() { + // not exist + _, err := e.executionDAO.Get(e.ctx, 10000) + e.Require().NotNil(err) + e.True(errors.IsNotFoundErr(err)) + + // exist + execution, err := e.executionDAO.Get(e.ctx, e.executionID) + e.Require().Nil(err) + e.NotNil(execution) +} + +func (e *executionMysqlDAOTestSuite) TestCreate() { + // happy pass is covered by SetupTest +} + +func (e *executionMysqlDAOTestSuite) TestUpdate() { + // not exist + err := e.executionDAO.Update(e.ctx, &Execution{ID: 10000}, "Status") + e.Require().NotNil(err) + e.True(errors.IsNotFoundErr(err)) + + // exist + err = e.executionDAO.Update(e.ctx, &Execution{ + ID: e.executionID, + Status: "failed", + }, "Status") + e.Require().Nil(err) + execution, err := e.executionDAO.Get(e.ctx, e.executionID) + e.Require().Nil(err) + e.Equal("failed", execution.Status) +} + +func (e *executionMysqlDAOTestSuite) TestDelete() { + // not exist + err := e.executionDAO.Delete(e.ctx, 10000) + e.Require().NotNil(err) + e.True(errors.IsNotFoundErr(err)) + + // happy pass is covered by TearDownTest +} + +func (e *executionMysqlDAOTestSuite) TestGetMetrics() { + taskID01, err := e.taskDao.Create(e.ctx, &Task{ + ExecutionID: e.executionID, + Status: job.SuccessStatus.String(), + StatusCode: job.SuccessStatus.Code(), + ExtraAttrs: "{}", + }) + e.Require().Nil(err) + defer e.taskDao.Delete(e.ctx, taskID01) + + taskID02, err := e.taskDao.Create(e.ctx, &Task{ + ExecutionID: e.executionID, + Status: job.StoppedStatus.String(), + StatusCode: job.StoppedStatus.Code(), + ExtraAttrs: "{}", + }) + e.Require().Nil(err) + defer e.taskDao.Delete(e.ctx, taskID02) + + taskID03, err := e.taskDao.Create(e.ctx, &Task{ + ExecutionID: e.executionID, + Status: job.ErrorStatus.String(), + StatusCode: job.ErrorStatus.Code(), + ExtraAttrs: "{}", + }) + e.Require().Nil(err) + defer e.taskDao.Delete(e.ctx, taskID03) + + taskID04, err := e.taskDao.Create(e.ctx, &Task{ + ExecutionID: e.executionID, + Status: job.PendingStatus.String(), + StatusCode: job.PendingStatus.Code(), + ExtraAttrs: "{}", + }) + e.Require().Nil(err) + defer e.taskDao.Delete(e.ctx, taskID04) + + taskID05, err := e.taskDao.Create(e.ctx, &Task{ + ExecutionID: e.executionID, + Status: job.RunningStatus.String(), + StatusCode: job.RunningStatus.Code(), + ExtraAttrs: "{}", + }) + e.Require().Nil(err) + defer e.taskDao.Delete(e.ctx, taskID05) + + taskID06, err := e.taskDao.Create(e.ctx, &Task{ + ExecutionID: e.executionID, + Status: job.ScheduledStatus.String(), + StatusCode: job.ScheduledStatus.Code(), + ExtraAttrs: "{}", + }) + e.Require().Nil(err) + defer e.taskDao.Delete(e.ctx, taskID06) + + metrics, err := e.executionDAO.GetMetrics(e.ctx, e.executionID) + e.Require().Nil(err) + e.Equal(int64(6), metrics.TaskCount) + e.Equal(int64(1), metrics.SuccessTaskCount) + e.Equal(int64(1), metrics.StoppedTaskCount) + e.Equal(int64(1), metrics.ErrorTaskCount) + e.Equal(int64(1), metrics.PendingTaskCount) + e.Equal(int64(1), metrics.RunningTaskCount) + e.Equal(int64(1), metrics.ScheduledTaskCount) +} + +func (e *executionMysqlDAOTestSuite) TestRefreshStatus() { + // contains tasks with status: success + taskID01, err := e.taskDao.Create(e.ctx, &Task{ + ExecutionID: e.executionID, + Status: job.SuccessStatus.String(), + StatusCode: job.SuccessStatus.Code(), + ExtraAttrs: "{}", + EndTime: time.Now(), + }) + e.Require().Nil(err) + defer e.taskDao.Delete(e.ctx, taskID01) + + statusChanged, currentStatus, err := e.executionDAO.RefreshStatus(e.ctx, e.executionID) + e.Require().Nil(err) + e.True(statusChanged) + e.Equal(job.SuccessStatus.String(), currentStatus) + execution, err := e.executionDAO.Get(e.ctx, e.executionID) + e.Require().Nil(err) + e.Equal(job.SuccessStatus.String(), execution.Status) + e.NotEmpty(execution.EndTime) + + // contains tasks with status: stopped + taskID02, err := e.taskDao.Create(e.ctx, &Task{ + ExecutionID: e.executionID, + Status: job.StoppedStatus.String(), + StatusCode: job.StoppedStatus.Code(), + ExtraAttrs: "{}", + EndTime: time.Now(), + }) + e.Require().Nil(err) + defer e.taskDao.Delete(e.ctx, taskID02) + + statusChanged, currentStatus, err = e.executionDAO.RefreshStatus(e.ctx, e.executionID) + e.Require().Nil(err) + e.True(statusChanged) + e.Equal(job.StoppedStatus.String(), currentStatus) + execution, err = e.executionDAO.Get(e.ctx, e.executionID) + e.Require().Nil(err) + e.Equal(job.StoppedStatus.String(), execution.Status) + e.NotEmpty(execution.EndTime) + + // contains tasks with status: error + taskID03, err := e.taskDao.Create(e.ctx, &Task{ + ExecutionID: e.executionID, + Status: job.ErrorStatus.String(), + StatusCode: job.ErrorStatus.Code(), + ExtraAttrs: "{}", + EndTime: time.Now(), + }) + e.Require().Nil(err) + defer e.taskDao.Delete(e.ctx, taskID03) + + statusChanged, currentStatus, err = e.executionDAO.RefreshStatus(e.ctx, e.executionID) + e.Require().Nil(err) + e.True(statusChanged) + e.Equal(job.ErrorStatus.String(), currentStatus) + execution, err = e.executionDAO.Get(e.ctx, e.executionID) + e.Require().Nil(err) + e.Equal(job.ErrorStatus.String(), execution.Status) + e.NotEmpty(execution.EndTime) + + // contains tasks with status: pending, running, scheduled + taskID04, err := e.taskDao.Create(e.ctx, &Task{ + ExecutionID: e.executionID, + Status: job.PendingStatus.String(), + StatusCode: job.PendingStatus.Code(), + ExtraAttrs: "{}", + }) + e.Require().Nil(err) + defer e.taskDao.Delete(e.ctx, taskID04) + + taskID05, err := e.taskDao.Create(e.ctx, &Task{ + ExecutionID: e.executionID, + Status: job.RunningStatus.String(), + StatusCode: job.RunningStatus.Code(), + ExtraAttrs: "{}", + }) + e.Require().Nil(err) + defer e.taskDao.Delete(e.ctx, taskID05) + + taskID06, err := e.taskDao.Create(e.ctx, &Task{ + ExecutionID: e.executionID, + Status: job.ScheduledStatus.String(), + StatusCode: job.ScheduledStatus.Code(), + ExtraAttrs: "{}", + }) + e.Require().Nil(err) + defer e.taskDao.Delete(e.ctx, taskID06) + + statusChanged, currentStatus, err = e.executionDAO.RefreshStatus(e.ctx, e.executionID) + e.Require().Nil(err) + e.True(statusChanged) + e.Equal(job.RunningStatus.String(), currentStatus) + execution, err = e.executionDAO.Get(e.ctx, e.executionID) + e.Require().Nil(err) + e.Equal(job.RunningStatus.String(), execution.Status) + e.Empty(execution.EndTime) + + // add another running task, the status shouldn't be changed + taskID07, err := e.taskDao.Create(e.ctx, &Task{ + ExecutionID: e.executionID, + Status: job.RunningStatus.String(), + StatusCode: job.RunningStatus.Code(), + ExtraAttrs: "{}", + }) + e.Require().Nil(err) + defer e.taskDao.Delete(e.ctx, taskID07) + + statusChanged, currentStatus, err = e.executionDAO.RefreshStatus(e.ctx, e.executionID) + e.Require().Nil(err) + e.False(statusChanged) + e.Equal(job.RunningStatus.String(), currentStatus) + execution, err = e.executionDAO.Get(e.ctx, e.executionID) + e.Require().Nil(err) + e.Equal(job.RunningStatus.String(), execution.Status) + e.Empty(execution.EndTime) +} + +func TestExecutionMysqlDAOSuite(t *testing.T) { + if !utils.IsDBMysql() { + return + } + suite.Run(t, &executionMysqlDAOTestSuite{}) +} + +func Test_buildInClauseSqlForExtraAttrsForMysql(t *testing.T) { + type args struct { + keys []string + } + tests := []struct { + name string + args args + want string + }{ + {"extra_attrs.", args{[]string{}}, ""}, + {"extra_attrs.id", args{[]string{"id"}}, "select id from execution where json_extract(extra_attrs, '$.id')=?"}, + {"extra_attrs.artifact.digest", args{[]string{"artifact", "digest"}}, "select id from execution where json_extract(extra_attrs, '$.artifact.digest')=?"}, + {"extra_attrs.a.b.c", args{[]string{"a", "b", "c"}}, "select id from execution where json_extract(extra_attrs, '$.a.b.c')=?"}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := buildInClauseSQLForExtraAttrsForMysql(tt.args.keys); got != tt.want { + t.Errorf("buildInClauseSQLForExtraAttrsForMysql() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/src/pkg/task/dao/execution_test.go b/src/pkg/task/dao/execution_test.go index 5e3fd4282773..f62fc8587678 100644 --- a/src/pkg/task/dao/execution_test.go +++ b/src/pkg/task/dao/execution_test.go @@ -22,6 +22,7 @@ import ( "github.com/stretchr/testify/suite" "github.com/goharbor/harbor/src/common/dao" + "github.com/goharbor/harbor/src/common/utils" "github.com/goharbor/harbor/src/jobservice/job" "github.com/goharbor/harbor/src/lib/errors" "github.com/goharbor/harbor/src/lib/orm" @@ -37,7 +38,7 @@ type executionDAOTestSuite struct { } func (e *executionDAOTestSuite) SetupSuite() { - dao.PrepareTestForPostgresSQL() + dao.PrepareTestForDB() e.ctx = orm.Context() e.taskDao = &taskDAO{} e.executionDAO = &executionDAO{ @@ -328,6 +329,9 @@ func (e *executionDAOTestSuite) TestRefreshStatus() { } func TestExecutionDAOSuite(t *testing.T) { + if !utils.IsDBPostgresql() { + return + } suite.Run(t, &executionDAOTestSuite{}) } diff --git a/src/pkg/task/dao/task_mysql.go b/src/pkg/task/dao/task_mysql.go new file mode 100644 index 000000000000..248874fa16fc --- /dev/null +++ b/src/pkg/task/dao/task_mysql.go @@ -0,0 +1,138 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dao + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/goharbor/harbor/src/jobservice/job" + "github.com/goharbor/harbor/src/lib/orm" + "github.com/goharbor/harbor/src/lib/q" +) + +// NewTaskDAO returns an instance of TaskDAO +func NewTaskMysqlDAO() TaskDAO { + return &taskMysqlDAO{} +} + +type taskMysqlDAO struct { + *taskDAO +} + +func (t *taskMysqlDAO) Count(ctx context.Context, query *q.Query) (int64, error) { + if query != nil { + // ignore the page number and size + query = &q.Query{ + Keywords: query.Keywords, + } + } + qs, err := t.querySetter(ctx, query) + if err != nil { + return 0, err + } + return qs.Count() +} + +func (t *taskMysqlDAO) List(ctx context.Context, query *q.Query) ([]*Task, error) { + tasks := []*Task{} + qs, err := t.querySetter(ctx, query) + if err != nil { + return nil, err + } + if _, err = qs.All(&tasks); err != nil { + return nil, err + } + return tasks, nil +} + +func (t *taskMysqlDAO) UpdateStatus(ctx context.Context, id int64, status string, statusRevision int64) error { + ormer, err := orm.FromContext(ctx) + if err != nil { + return err + } + + // status revision is the unix timestamp of job starting time, it's changing means a retrying of the job + startTime := time.Unix(statusRevision, 0) + // update run count and start time when status revision changes + sql := `update task set run_count = run_count +1, start_time = ? + where id = ? and status_revision < ?` + if _, err = ormer.Raw(sql, startTime, id, statusRevision).Exec(); err != nil { + return err + } + + jobStatus := job.Status(status) + statusCode := jobStatus.Code() + var args []interface{} + now := time.Now() + // when the task is in final status, update the end time + // when the task re-runs again, the end time should be cleared, so set the end time + // to null if the task isn't in final status + if jobStatus.Final() { + endTime := now + args = []interface{}{status, statusCode, statusRevision, now, endTime, + id, statusRevision, statusCode, statusRevision} + sql = `update task set status = ?, status_code = ?, status_revision = ?, update_time = ?, end_time = ? + where id = ? and (status_revision = ? and status_code < ? or status_revision < ?) ` + } else { + args = []interface{}{status, statusCode, statusRevision, now, + id, statusRevision, statusCode, statusRevision} + sql = `update task set status = ?, status_code = ?, status_revision = ?, update_time = ?, end_time = NULL + where id = ? and (status_revision = ? and status_code < ? or status_revision < ?) ` + } + // use raw sql rather than the ORM as the sql generated by ORM isn't a "single" statement + // which means the operation isn't atomic, this will cause issues when running in concurrency + _, err = ormer.Raw(sql, args).Exec() + return err +} + +func (t *taskMysqlDAO) querySetter(ctx context.Context, query *q.Query) (orm.QuerySeter, error) { + qs, err := orm.QuerySetter(ctx, &Task{}, query) + if err != nil { + return nil, err + } + + // append the filter for "extra attrs" + if query != nil && len(query.Keywords) > 0 { + var ( + key string + keyPrefix string + value interface{} + ) + for key, value = range query.Keywords { + if strings.HasPrefix(key, "ExtraAttrs.") { + keyPrefix = "ExtraAttrs." + break + } + if strings.HasPrefix(key, "extra_attrs.") { + keyPrefix = "extra_attrs." + break + } + } + if len(keyPrefix) == 0 { + return qs, nil + } + inClause, err := orm.CreateInClause(ctx, fmt.Sprintf("select id from task where json_extract(extra_attrs, '$.%s') = ?", + strings.TrimPrefix(key, keyPrefix)), value) + if err != nil { + return nil, err + } + qs = qs.FilterRaw("id", inClause) + } + + return qs, nil +} diff --git a/src/pkg/task/dao/task_mysql_test.go b/src/pkg/task/dao/task_mysql_test.go new file mode 100644 index 000000000000..b937a7b16a7d --- /dev/null +++ b/src/pkg/task/dao/task_mysql_test.go @@ -0,0 +1,238 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dao + +import ( + "context" + "testing" + "time" + + "github.com/goharbor/harbor/src/common/dao" + "github.com/goharbor/harbor/src/common/utils" + "github.com/goharbor/harbor/src/jobservice/job" + "github.com/goharbor/harbor/src/lib/errors" + "github.com/goharbor/harbor/src/lib/orm" + "github.com/goharbor/harbor/src/lib/q" + "github.com/stretchr/testify/suite" +) + +type taskMysqlDAOTestSuite struct { + suite.Suite + ctx context.Context + taskDAO *taskMysqlDAO + executionDAO *executionMysqlDAO + executionID int64 + taskID int64 +} + +func (t *taskMysqlDAOTestSuite) SetupSuite() { + dao.PrepareTestForDB() + t.ctx = orm.Context() + t.taskDAO = &taskMysqlDAO{} + t.executionDAO = &executionMysqlDAO{} +} + +func (t *taskMysqlDAOTestSuite) SetupTest() { + id, err := t.executionDAO.Create(t.ctx, &Execution{ + VendorType: "test", + Trigger: "test", + ExtraAttrs: "{}", + }) + t.Require().Nil(err) + t.executionID = id + id, err = t.taskDAO.Create(t.ctx, &Task{ + ExecutionID: t.executionID, + Status: "success", + StatusCode: 1, + ExtraAttrs: `{"key":"value"}`, + }) + t.Require().Nil(err) + t.taskID = id +} + +func (t *taskMysqlDAOTestSuite) TearDownTest() { + err := t.taskDAO.Delete(t.ctx, t.taskID) + t.Nil(err) + + err = t.executionDAO.Delete(t.ctx, t.executionID) + t.Nil(err) +} + +func (t *taskMysqlDAOTestSuite) TestCount() { + count, err := t.taskDAO.Count(t.ctx, &q.Query{ + Keywords: map[string]interface{}{ + "ExecutionID": t.executionID, + "ExtraAttrs.key": "value", + }, + }) + t.Require().Nil(err) + t.Equal(int64(1), count) + + count, err = t.taskDAO.Count(t.ctx, &q.Query{ + Keywords: map[string]interface{}{ + "ExecutionID": t.executionID, + "ExtraAttrs.key": "incorrect-value", + }, + }) + t.Require().Nil(err) + t.Equal(int64(0), count) +} + +func (t *taskMysqlDAOTestSuite) TestList() { + tasks, err := t.taskDAO.List(t.ctx, &q.Query{ + Keywords: map[string]interface{}{ + "ExecutionID": t.executionID, + "ExtraAttrs.key": "value", + }, + }) + t.Require().Nil(err) + t.Require().Len(tasks, 1) + t.Equal(t.taskID, tasks[0].ID) + + tasks, err = t.taskDAO.List(t.ctx, &q.Query{ + Keywords: map[string]interface{}{ + "ExecutionID": t.executionID, + "ExtraAttrs.key": "incorrect-value", + }, + }) + t.Require().Nil(err) + t.Require().Len(tasks, 0) +} + +func (t *taskMysqlDAOTestSuite) TestGet() { + // not exist + _, err := t.taskDAO.Get(t.ctx, 10000) + t.Require().NotNil(err) + t.True(errors.IsNotFoundErr(err)) + + // exist + task, err := t.taskDAO.Get(t.ctx, t.taskID) + t.Require().Nil(err) + t.NotNil(task) +} + +func (t *taskMysqlDAOTestSuite) TestCreate() { + // reference the non-existing execution + _, err := t.taskDAO.Create(t.ctx, &Task{ + ExecutionID: 10000, + Status: "success", + StatusCode: 1, + ExtraAttrs: "{}", + }) + t.Require().NotNil(err) + t.True(errors.IsErr(err, errors.ViolateForeignKeyConstraintCode)) + + // reference the existing execution is covered by SetupTest +} + +func (t *taskMysqlDAOTestSuite) TestUpdate() { + // not exist + err := t.taskDAO.Update(t.ctx, &Task{ID: 10000}, "Status") + t.Require().NotNil(err) + t.True(errors.IsNotFoundErr(err)) + + // exist + err = t.taskDAO.Update(t.ctx, &Task{ + ID: t.taskID, + Status: "failed", + }, "Status") + t.Require().Nil(err) + task, err := t.taskDAO.Get(t.ctx, t.taskID) + t.Require().Nil(err) + t.Equal("failed", task.Status) +} + +func (t *taskMysqlDAOTestSuite) TestUpdateStatus() { + // update status to running + status := job.RunningStatus.String() + statusRevision := time.Now().Unix() + err := t.taskDAO.UpdateStatus(t.ctx, t.taskID, status, statusRevision) + t.Require().Nil(err) + + task, err := t.taskDAO.Get(t.ctx, t.taskID) + t.Require().Nil(err) + t.Equal(int32(1), task.RunCount) + t.True(time.Unix(statusRevision, 0).Equal(task.StartTime)) + t.Equal(status, task.Status) + t.Equal(job.RunningStatus.Code(), task.StatusCode) + t.Equal(statusRevision, task.StatusRevision) + t.NotEqual(time.Time{}, task.UpdateTime) + t.Equal(time.Time{}, task.EndTime) + + // update status to success + status = job.SuccessStatus.String() + err = t.taskDAO.UpdateStatus(t.ctx, t.taskID, status, statusRevision) + t.Require().Nil(err) + + task, err = t.taskDAO.Get(t.ctx, t.taskID) + t.Require().Nil(err) + t.Equal(int32(1), task.RunCount) + t.True(time.Unix(statusRevision, 0).Equal(task.StartTime)) + t.Equal(status, task.Status) + t.Equal(job.SuccessStatus.Code(), task.StatusCode) + t.Equal(statusRevision, task.StatusRevision) + t.NotEqual(time.Time{}, task.EndTime) + + // update status to running again with different revision + status = job.RunningStatus.String() + statusRevision = time.Now().Add(1 * time.Second).Unix() + err = t.taskDAO.UpdateStatus(t.ctx, t.taskID, status, statusRevision) + t.Require().Nil(err) + + task, err = t.taskDAO.Get(t.ctx, t.taskID) + t.Require().Nil(err) + t.Equal(int32(2), task.RunCount) + t.True(time.Unix(statusRevision, 0).Equal(task.StartTime)) + t.Equal(status, task.Status) + t.Equal(job.RunningStatus.Code(), task.StatusCode) + t.Equal(statusRevision, task.StatusRevision) + t.Equal(time.Time{}, task.EndTime) +} + +func (t *taskMysqlDAOTestSuite) TestDelete() { + // not exist + err := t.taskDAO.Delete(t.ctx, 10000) + t.Require().NotNil(err) + t.True(errors.IsNotFoundErr(err)) + + // happy pass is covered by TearDownTest +} + +func (t *taskMysqlDAOTestSuite) TestListStatusCount() { + scs, err := t.taskDAO.ListStatusCount(t.ctx, t.executionID) + t.Require().Nil(err) + t.Require().Len(scs, 1) + t.Equal("success", scs[0].Status) + t.Equal(int64(1), scs[0].Count) +} + +func (t *taskMysqlDAOTestSuite) TestGetMaxEndTime() { + now := time.Now() + err := t.taskDAO.Update(t.ctx, &Task{ + ID: t.taskID, + EndTime: now, + }, "EndTime") + t.Require().Nil(err) + endTime, err := t.taskDAO.GetMaxEndTime(t.ctx, t.executionID) + t.Require().Nil(err) + t.Equal(now.Unix(), endTime.Unix()) +} + +func TestTaskMysqlDAOSuite(t *testing.T) { + if !utils.IsDBMysql() { + return + } + suite.Run(t, &taskMysqlDAOTestSuite{}) +} diff --git a/src/pkg/task/dao/task_test.go b/src/pkg/task/dao/task_test.go index a6a52e60610c..99a72e1d96ed 100644 --- a/src/pkg/task/dao/task_test.go +++ b/src/pkg/task/dao/task_test.go @@ -21,6 +21,8 @@ import ( "github.com/stretchr/testify/suite" + "github.com/goharbor/harbor/src/common/dao" + "github.com/goharbor/harbor/src/common/utils" "github.com/goharbor/harbor/src/jobservice/job" "github.com/goharbor/harbor/src/lib/errors" "github.com/goharbor/harbor/src/lib/orm" @@ -37,6 +39,7 @@ type taskDAOTestSuite struct { } func (t *taskDAOTestSuite) SetupSuite() { + dao.PrepareTestForDB() t.ctx = orm.Context() t.taskDAO = &taskDAO{} t.executionDAO = &executionDAO{} @@ -229,5 +232,8 @@ func (t *taskDAOTestSuite) TestGetMaxEndTime() { } func TestTaskDAOSuite(t *testing.T) { + if !utils.IsDBPostgresql() { + return + } suite.Run(t, &taskDAOTestSuite{}) } diff --git a/src/pkg/task/execution.go b/src/pkg/task/execution.go index 1f4d2f8a03cd..9bd32a1cfdb5 100644 --- a/src/pkg/task/execution.go +++ b/src/pkg/task/execution.go @@ -21,6 +21,7 @@ import ( "sync" "time" + "github.com/goharbor/harbor/src/common/utils" "github.com/goharbor/harbor/src/jobservice/job" "github.com/goharbor/harbor/src/lib" "github.com/goharbor/harbor/src/lib/errors" @@ -79,6 +80,25 @@ type ExecutionManager interface { // NewExecutionManager return an instance of the default execution manager func NewExecutionManager() ExecutionManager { + switch { + case utils.IsDBPostgresql(): + return &executionManager{ + executionDAO: dao.NewExecutionDAO(), + taskMgr: Mgr, + taskDAO: dao.NewTaskDAO(), + ormCreator: orm.Crt, + wp: lib.NewWorkerPool(10), + } + case utils.IsDBMysql(): + return &executionManager{ + executionDAO: dao.NewExecutionMysqlDAO(), + taskMgr: Mgr, + taskDAO: dao.NewTaskMysqlDAO(), + ormCreator: orm.Crt, + wp: lib.NewWorkerPool(10), + } + } + return &executionManager{ executionDAO: dao.NewExecutionDAO(), taskMgr: Mgr, diff --git a/src/pkg/task/hook.go b/src/pkg/task/hook.go index 4f752e47ad71..979d65363b96 100644 --- a/src/pkg/task/hook.go +++ b/src/pkg/task/hook.go @@ -18,6 +18,7 @@ import ( "context" "fmt" + "github.com/goharbor/harbor/src/common/utils" "github.com/goharbor/harbor/src/jobservice/job" "github.com/goharbor/harbor/src/lib/errors" "github.com/goharbor/harbor/src/lib/log" @@ -32,6 +33,19 @@ var ( // NewHookHandler creates a hook handler instance func NewHookHandler() *HookHandler { + switch { + case utils.IsDBPostgresql(): + return &HookHandler{ + taskDAO: dao.NewTaskDAO(), + executionDAO: dao.NewExecutionDAO(), + } + case utils.IsDBMysql(): + return &HookHandler{ + taskDAO: dao.NewTaskMysqlDAO(), + executionDAO: dao.NewExecutionMysqlDAO(), + } + } + return &HookHandler{ taskDAO: dao.NewTaskDAO(), executionDAO: dao.NewExecutionDAO(), diff --git a/src/pkg/task/task.go b/src/pkg/task/task.go index 6640d77dd4e0..d6d4ffa4c25e 100644 --- a/src/pkg/task/task.go +++ b/src/pkg/task/task.go @@ -18,10 +18,12 @@ import ( "context" "encoding/json" "fmt" + "time" cjob "github.com/goharbor/harbor/src/common/job" "github.com/goharbor/harbor/src/common/job/models" + "github.com/goharbor/harbor/src/common/utils" "github.com/goharbor/harbor/src/jobservice/job" "github.com/goharbor/harbor/src/lib/config" "github.com/goharbor/harbor/src/lib/log" @@ -60,6 +62,22 @@ type Manager interface { // NewManager creates an instance of the default task manager func NewManager() Manager { + switch { + case utils.IsDBPostgresql(): + return &manager{ + dao: dao.NewTaskDAO(), + execDAO: dao.NewExecutionDAO(), + jsClient: cjob.GlobalClient, + coreURL: config.GetCoreURL(), + } + case utils.IsDBMysql(): + return &manager{ + dao: dao.NewTaskMysqlDAO(), + execDAO: dao.NewExecutionMysqlDAO(), + jsClient: cjob.GlobalClient, + coreURL: config.GetCoreURL(), + } + } return &manager{ dao: dao.NewTaskDAO(), execDAO: dao.NewExecutionDAO(), diff --git a/src/pkg/usergroup/dao/dao_test.go b/src/pkg/usergroup/dao/dao_test.go index 56f318b9f079..7198b935c21f 100644 --- a/src/pkg/usergroup/dao/dao_test.go +++ b/src/pkg/usergroup/dao/dao_test.go @@ -19,6 +19,7 @@ import ( "github.com/stretchr/testify/suite" + "github.com/goharbor/harbor/src/common/utils" "github.com/goharbor/harbor/src/pkg/usergroup/model" htesting "github.com/goharbor/harbor/src/testing" ) @@ -29,6 +30,9 @@ type DaoTestSuite struct { } func (s *DaoTestSuite) SetupSuite() { + if !utils.IsDBPostgresql() { + return + } s.Suite.SetupSuite() s.Suite.ClearTables = []string{"user_group"} s.dao = New() @@ -65,5 +69,8 @@ func (s *DaoTestSuite) TestCRUDUsergroup() { } func TestDaoTestSuite(t *testing.T) { + if !utils.IsDBPostgresql() { + return + } suite.Run(t, &DaoTestSuite{}) } diff --git a/src/pkg/usergroup/dao/mysql_dao.go b/src/pkg/usergroup/dao/mysql_dao.go new file mode 100644 index 000000000000..278833e363d1 --- /dev/null +++ b/src/pkg/usergroup/dao/mysql_dao.go @@ -0,0 +1,67 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dao + +import ( + "context" + "time" + + "github.com/goharbor/harbor/src/common" + "github.com/goharbor/harbor/src/common/utils" + "github.com/goharbor/harbor/src/lib/orm" + "github.com/goharbor/harbor/src/lib/q" + "github.com/goharbor/harbor/src/pkg/usergroup/model" +) + +// NewMysqlDao ... +func NewMysqlDao() DAO { + return &mysqlDao{} +} + +type mysqlDao struct { + *dao +} + +// Add - Add User Group +func (d *mysqlDao) Add(ctx context.Context, userGroup model.UserGroup) (int, error) { + query := q.New(q.KeyWords{"GroupName": userGroup.GroupName, "GroupType": common.HTTPGroupType}) + userGroupList, err := d.Query(ctx, query) + if err != nil { + return 0, ErrGroupNameDup + } + if len(userGroupList) > 0 { + return 0, ErrGroupNameDup + } + o, err := orm.FromContext(ctx) + if err != nil { + return 0, err + } + + var id int + now := time.Now() + sql := "insert into user_group (group_name, group_type, ldap_group_dn, creation_time, update_time) values (?, ?, ?, ?, ?)" + + res, err := o.Raw(sql, userGroup.GroupName, userGroup.GroupType, utils.TrimLower(userGroup.LdapGroupDN), now, now).Exec() + if err != nil { + return 0, err + } + insertID, err := res.LastInsertId() + if err != nil { + return 0, err + } + id = int(insertID) + + return id, nil +} diff --git a/src/pkg/usergroup/dao/mysql_dao_test.go b/src/pkg/usergroup/dao/mysql_dao_test.go new file mode 100644 index 000000000000..34e0c9e750d0 --- /dev/null +++ b/src/pkg/usergroup/dao/mysql_dao_test.go @@ -0,0 +1,73 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dao + +import ( + "github.com/goharbor/harbor/src/common/utils" + "testing" + + "github.com/stretchr/testify/suite" + + "github.com/goharbor/harbor/src/pkg/usergroup/model" + htesting "github.com/goharbor/harbor/src/testing" +) + +type MysqlDaoTestSuite struct { + htesting.Suite + dao DAO +} + +func (s *MysqlDaoTestSuite) SetupSuite() { + s.Suite.SetupSuite() + s.Suite.ClearTables = []string{"user_group"} + s.dao = NewMysqlDao() +} + +func (s *MysqlDaoTestSuite) TestCRUDUsergroup() { + ctx := s.Context() + userGroup := model.UserGroup{ + GroupName: "harbor_dev", + GroupType: 1, + LdapGroupDN: "cn=harbor_dev,ou=groups,dc=example,dc=com", + } + id, err := s.dao.Add(ctx, userGroup) + s.Nil(err) + s.True(id > 0) + + ug, err2 := s.dao.Get(ctx, id) + s.Nil(err2) + s.Equal("harbor_dev", ug.GroupName) + s.Equal("cn=harbor_dev,ou=groups,dc=example,dc=com", ug.LdapGroupDN) + s.Equal(1, ug.GroupType) + + err3 := s.dao.UpdateName(ctx, id, "my_harbor_dev") + s.Nil(err3) + + ug2, err4 := s.dao.Get(ctx, id) + s.Nil(err4) + s.Equal("my_harbor_dev", ug2.GroupName) + s.Equal("cn=harbor_dev,ou=groups,dc=example,dc=com", ug2.LdapGroupDN) + s.Equal(1, ug2.GroupType) + + err5 := s.dao.Delete(ctx, id) + s.Nil(err5) +} + +func TestMysqlDaoTestSuite(t *testing.T) { + if !utils.IsDBMysql() { + return + } + suite.Run(t, &MysqlDaoTestSuite{}) +} diff --git a/src/pkg/usergroup/manager.go b/src/pkg/usergroup/manager.go index 458ed0ff6a1b..f6f3b4c6d589 100644 --- a/src/pkg/usergroup/manager.go +++ b/src/pkg/usergroup/manager.go @@ -58,6 +58,13 @@ type manager struct { } func newManager() Manager { + switch { + case utils.IsDBPostgresql(): + return &manager{dao: dao.New()} + case utils.IsDBMysql(): + return &manager{dao: dao.NewMysqlDao()} + } + return &manager{dao: dao.New()} } diff --git a/src/server/middleware/immutable/pushmf_test.go b/src/server/middleware/immutable/pushmf_test.go index 44def9304e9a..dbe7d0ed64d5 100644 --- a/src/server/middleware/immutable/pushmf_test.go +++ b/src/server/middleware/immutable/pushmf_test.go @@ -177,7 +177,7 @@ func (suite *HandlerSuite) TestPutDeleteManifestCreated() { } func TestMain(m *testing.M) { - dao.PrepareTestForPostgresSQL() + dao.PrepareTestForDB() if result := m.Run(); result != 0 { os.Exit(result) diff --git a/src/testing/suite.go b/src/testing/suite.go index 31cd1e0d7a98..d2c0bce9c13b 100644 --- a/src/testing/suite.go +++ b/src/testing/suite.go @@ -55,7 +55,7 @@ type Suite struct { func (suite *Suite) SetupSuite() { once.Do(func() { config.Init() - dao.PrepareTestForPostgresSQL() + dao.PrepareTestForDB() }) } diff --git a/src/vendor/github.com/golang-migrate/migrate/v4/database/mysql/README.md b/src/vendor/github.com/golang-migrate/migrate/v4/database/mysql/README.md new file mode 100644 index 000000000000..c65c1107df5d --- /dev/null +++ b/src/vendor/github.com/golang-migrate/migrate/v4/database/mysql/README.md @@ -0,0 +1,55 @@ +# MySQL + +`mysql://user:password@tcp(host:port)/dbname?query` + +| URL Query | WithInstance Config | Description | +|------------|---------------------|-------------| +| `x-migrations-table` | `MigrationsTable` | Name of the migrations table | +| `x-no-lock` | `NoLock` | Set to `true` to skip `GET_LOCK`/`RELEASE_LOCK` statements. Useful for [multi-master MySQL flavors](https://www.percona.com/doc/percona-xtradb-cluster/LATEST/features/pxc-strict-mode.html#explicit-table-locking). Only run migrations from one host when this is enabled. | +| `dbname` | `DatabaseName` | The name of the database to connect to | +| `user` | | The user to sign in as | +| `password` | | The user's password | +| `host` | | The host to connect to. | +| `port` | | The port to bind to. | +| `tls` | | TLS / SSL encrypted connection parameter; see [go-sql-driver](https://github.com/go-sql-driver/mysql#tls). Use any name (e.g. `migrate`) if you want to use a custom TLS config (`x-tls-` queries). | +| `x-tls-ca` | | The location of the CA (certificate authority) file. | +| `x-tls-cert` | | The location of the client certicicate file. Must be used with `x-tls-key`. | +| `x-tls-key` | | The location of the private key file. Must be used with `x-tls-cert`. | +| `x-tls-insecure-skip-verify` | | Whether or not to use SSL (true\|false) | + +## Use with existing client + +If you use the MySQL driver with existing database client, you must create the client with parameter `multiStatements=true`: + +```go +package main + +import ( + "database/sql" + + _ "github.com/go-sql-driver/mysql" + "github.com/golang-migrate/migrate" + "github.com/golang-migrate/migrate/database/mysql" + _ "github.com/golang-migrate/migrate/source/file" +) + +func main() { + db, _ := sql.Open("mysql", "user:password@tcp(host:port)/dbname?multiStatements=true") + driver, _ := mysql.WithInstance(db, &mysql.Config{}) + m, _ := migrate.NewWithDatabaseInstance( + "file:///migrations", + "mysql", + driver, + ) + + m.Steps(2) +} +``` + +## Upgrading from v1 + +1. Write down the current migration version from schema_migrations +1. `DROP TABLE schema_migrations` +2. Wrap your existing migrations in transactions ([BEGIN/COMMIT](https://dev.mysql.com/doc/refman/5.7/en/commit.html)) if you use multiple statements within one migration. +3. Download and install the latest migrate version. +4. Force the current migration version with `migrate force `. diff --git a/src/vendor/github.com/golang-migrate/migrate/v4/database/mysql/mysql.go b/src/vendor/github.com/golang-migrate/migrate/v4/database/mysql/mysql.go new file mode 100644 index 000000000000..14b15390e29f --- /dev/null +++ b/src/vendor/github.com/golang-migrate/migrate/v4/database/mysql/mysql.go @@ -0,0 +1,494 @@ +//go:build go1.9 +// +build go1.9 + +package mysql + +import ( + "context" + "crypto/tls" + "crypto/x509" + "database/sql" + "fmt" + "go.uber.org/atomic" + "io" + "io/ioutil" + nurl "net/url" + "strconv" + "strings" + + "github.com/go-sql-driver/mysql" + "github.com/golang-migrate/migrate/v4/database" + "github.com/hashicorp/go-multierror" +) + +var _ database.Driver = (*Mysql)(nil) // explicit compile time type check + +func init() { + database.Register("mysql", &Mysql{}) +} + +var DefaultMigrationsTable = "schema_migrations" + +var ( + ErrDatabaseDirty = fmt.Errorf("database is dirty") + ErrNilConfig = fmt.Errorf("no config") + ErrNoDatabaseName = fmt.Errorf("no database name") + ErrAppendPEM = fmt.Errorf("failed to append PEM") + ErrTLSCertKeyConfig = fmt.Errorf("To use TLS client authentication, both x-tls-cert and x-tls-key must not be empty") +) + +type Config struct { + MigrationsTable string + DatabaseName string + NoLock bool +} + +type Mysql struct { + // mysql RELEASE_LOCK must be called from the same conn, so + // just do everything over a single conn anyway. + conn *sql.Conn + db *sql.DB + isLocked atomic.Bool + + config *Config +} + +// connection instance must have `multiStatements` set to true +func WithConnection(ctx context.Context, conn *sql.Conn, config *Config) (*Mysql, error) { + if config == nil { + return nil, ErrNilConfig + } + + if err := conn.PingContext(ctx); err != nil { + return nil, err + } + + mx := &Mysql{ + conn: conn, + db: nil, + config: config, + } + + if config.DatabaseName == "" { + query := `SELECT DATABASE()` + var databaseName sql.NullString + if err := conn.QueryRowContext(ctx, query).Scan(&databaseName); err != nil { + return nil, &database.Error{OrigErr: err, Query: []byte(query)} + } + + if len(databaseName.String) == 0 { + return nil, ErrNoDatabaseName + } + + config.DatabaseName = databaseName.String + } + + if len(config.MigrationsTable) == 0 { + config.MigrationsTable = DefaultMigrationsTable + } + + if err := mx.ensureVersionTable(); err != nil { + return nil, err + } + + return mx, nil +} + +// instance must have `multiStatements` set to true +func WithInstance(instance *sql.DB, config *Config) (database.Driver, error) { + ctx := context.Background() + + if err := instance.Ping(); err != nil { + return nil, err + } + + conn, err := instance.Conn(ctx) + if err != nil { + return nil, err + } + + mx, err := WithConnection(ctx, conn, config) + if err != nil { + return nil, err + } + + mx.db = instance + + return mx, nil +} + +// extractCustomQueryParams extracts the custom query params (ones that start with "x-") from +// mysql.Config.Params (connection parameters) as to not interfere with connecting to MySQL +func extractCustomQueryParams(c *mysql.Config) (map[string]string, error) { + if c == nil { + return nil, ErrNilConfig + } + customQueryParams := map[string]string{} + + for k, v := range c.Params { + if strings.HasPrefix(k, "x-") { + customQueryParams[k] = v + delete(c.Params, k) + } + } + return customQueryParams, nil +} + +func urlToMySQLConfig(url string) (*mysql.Config, error) { + // Need to parse out custom TLS parameters and call + // mysql.RegisterTLSConfig() before mysql.ParseDSN() is called + // which consumes the registered tls.Config + // Fixes: https://github.com/golang-migrate/migrate/issues/411 + // + // Can't use url.Parse() since it fails to parse MySQL DSNs + // mysql.ParseDSN() also searches for "?" to find query parameters: + // https://github.com/go-sql-driver/mysql/blob/46351a8/dsn.go#L344 + if idx := strings.LastIndex(url, "?"); idx > 0 { + rawParams := url[idx+1:] + parsedParams, err := nurl.ParseQuery(rawParams) + if err != nil { + return nil, err + } + + ctls := parsedParams.Get("tls") + if len(ctls) > 0 { + if _, isBool := readBool(ctls); !isBool && strings.ToLower(ctls) != "skip-verify" { + rootCertPool := x509.NewCertPool() + pem, err := ioutil.ReadFile(parsedParams.Get("x-tls-ca")) + if err != nil { + return nil, err + } + + if ok := rootCertPool.AppendCertsFromPEM(pem); !ok { + return nil, ErrAppendPEM + } + + clientCert := make([]tls.Certificate, 0, 1) + if ccert, ckey := parsedParams.Get("x-tls-cert"), parsedParams.Get("x-tls-key"); ccert != "" || ckey != "" { + if ccert == "" || ckey == "" { + return nil, ErrTLSCertKeyConfig + } + certs, err := tls.LoadX509KeyPair(ccert, ckey) + if err != nil { + return nil, err + } + clientCert = append(clientCert, certs) + } + + insecureSkipVerify := false + insecureSkipVerifyStr := parsedParams.Get("x-tls-insecure-skip-verify") + if len(insecureSkipVerifyStr) > 0 { + x, err := strconv.ParseBool(insecureSkipVerifyStr) + if err != nil { + return nil, err + } + insecureSkipVerify = x + } + + err = mysql.RegisterTLSConfig(ctls, &tls.Config{ + RootCAs: rootCertPool, + Certificates: clientCert, + InsecureSkipVerify: insecureSkipVerify, + }) + if err != nil { + return nil, err + } + } + } + } + + config, err := mysql.ParseDSN(strings.TrimPrefix(url, "mysql://")) + if err != nil { + return nil, err + } + + config.MultiStatements = true + + // Keep backwards compatibility from when we used net/url.Parse() to parse the DSN. + // net/url.Parse() would automatically unescape it for us. + // See: https://play.golang.org/p/q9j1io-YICQ + user, err := nurl.QueryUnescape(config.User) + if err != nil { + return nil, err + } + config.User = user + + password, err := nurl.QueryUnescape(config.Passwd) + if err != nil { + return nil, err + } + config.Passwd = password + + return config, nil +} + +func (m *Mysql) Open(url string) (database.Driver, error) { + config, err := urlToMySQLConfig(url) + if err != nil { + return nil, err + } + + customParams, err := extractCustomQueryParams(config) + if err != nil { + return nil, err + } + + noLockParam, noLock := customParams["x-no-lock"], false + if noLockParam != "" { + noLock, err = strconv.ParseBool(noLockParam) + if err != nil { + return nil, fmt.Errorf("could not parse x-no-lock as bool: %w", err) + } + } + + db, err := sql.Open("mysql", config.FormatDSN()) + if err != nil { + return nil, err + } + + mx, err := WithInstance(db, &Config{ + DatabaseName: config.DBName, + MigrationsTable: customParams["x-migrations-table"], + NoLock: noLock, + }) + if err != nil { + return nil, err + } + + return mx, nil +} + +func (m *Mysql) Close() error { + connErr := m.conn.Close() + var dbErr error + if m.db != nil { + dbErr = m.db.Close() + } + + if connErr != nil || dbErr != nil { + return fmt.Errorf("conn: %v, db: %v", connErr, dbErr) + } + return nil +} + +func (m *Mysql) Lock() error { + return database.CasRestoreOnErr(&m.isLocked, false, true, database.ErrLocked, func() error { + if m.config.NoLock { + return nil + } + aid, err := database.GenerateAdvisoryLockId( + fmt.Sprintf("%s:%s", m.config.DatabaseName, m.config.MigrationsTable)) + if err != nil { + return err + } + + query := "SELECT GET_LOCK(?, 10)" + var success bool + if err := m.conn.QueryRowContext(context.Background(), query, aid).Scan(&success); err != nil { + return &database.Error{OrigErr: err, Err: "try lock failed", Query: []byte(query)} + } + + if !success { + return database.ErrLocked + } + + return nil + }) +} + +func (m *Mysql) Unlock() error { + return database.CasRestoreOnErr(&m.isLocked, true, false, database.ErrNotLocked, func() error { + if m.config.NoLock { + return nil + } + + aid, err := database.GenerateAdvisoryLockId( + fmt.Sprintf("%s:%s", m.config.DatabaseName, m.config.MigrationsTable)) + if err != nil { + return err + } + + query := `SELECT RELEASE_LOCK(?)` + if _, err := m.conn.ExecContext(context.Background(), query, aid); err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + + // NOTE: RELEASE_LOCK could return NULL or (or 0 if the code is changed), + // in which case isLocked should be true until the timeout expires -- synchronizing + // these states is likely not worth trying to do; reconsider the necessity of isLocked. + + return nil + }) +} + +func (m *Mysql) Run(migration io.Reader) error { + migr, err := ioutil.ReadAll(migration) + if err != nil { + return err + } + + query := string(migr[:]) + if _, err := m.conn.ExecContext(context.Background(), query); err != nil { + return database.Error{OrigErr: err, Err: "migration failed", Query: migr} + } + + return nil +} + +func (m *Mysql) SetVersion(version int, dirty bool) error { + tx, err := m.conn.BeginTx(context.Background(), &sql.TxOptions{}) + if err != nil { + return &database.Error{OrigErr: err, Err: "transaction start failed"} + } + + query := "TRUNCATE `" + m.config.MigrationsTable + "`" + if _, err := tx.ExecContext(context.Background(), query); err != nil { + if errRollback := tx.Rollback(); errRollback != nil { + err = multierror.Append(err, errRollback) + } + return &database.Error{OrigErr: err, Query: []byte(query)} + } + + // Also re-write the schema version for nil dirty versions to prevent + // empty schema version for failed down migration on the first migration + // See: https://github.com/golang-migrate/migrate/issues/330 + if version >= 0 || (version == database.NilVersion && dirty) { + query := "INSERT INTO `" + m.config.MigrationsTable + "` (version, dirty) VALUES (?, ?)" + if _, err := tx.ExecContext(context.Background(), query, version, dirty); err != nil { + if errRollback := tx.Rollback(); errRollback != nil { + err = multierror.Append(err, errRollback) + } + return &database.Error{OrigErr: err, Query: []byte(query)} + } + } + + if err := tx.Commit(); err != nil { + return &database.Error{OrigErr: err, Err: "transaction commit failed"} + } + + return nil +} + +func (m *Mysql) Version() (version int, dirty bool, err error) { + query := "SELECT version, dirty FROM `" + m.config.MigrationsTable + "` LIMIT 1" + err = m.conn.QueryRowContext(context.Background(), query).Scan(&version, &dirty) + switch { + case err == sql.ErrNoRows: + return database.NilVersion, false, nil + + case err != nil: + if e, ok := err.(*mysql.MySQLError); ok { + if e.Number == 0 { + return database.NilVersion, false, nil + } + } + return 0, false, &database.Error{OrigErr: err, Query: []byte(query)} + + default: + return version, dirty, nil + } +} + +func (m *Mysql) Drop() (err error) { + // select all tables + query := `SHOW TABLES LIKE '%'` + tables, err := m.conn.QueryContext(context.Background(), query) + if err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + defer func() { + if errClose := tables.Close(); errClose != nil { + err = multierror.Append(err, errClose) + } + }() + + // delete one table after another + tableNames := make([]string, 0) + for tables.Next() { + var tableName string + if err := tables.Scan(&tableName); err != nil { + return err + } + if len(tableName) > 0 { + tableNames = append(tableNames, tableName) + } + } + if err := tables.Err(); err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + + if len(tableNames) > 0 { + // disable checking foreign key constraints until finished + query = `SET foreign_key_checks = 0` + if _, err := m.conn.ExecContext(context.Background(), query); err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + + defer func() { + // enable foreign key checks + _, _ = m.conn.ExecContext(context.Background(), `SET foreign_key_checks = 1`) + }() + + // delete one by one ... + for _, t := range tableNames { + query = "DROP TABLE IF EXISTS `" + t + "`" + if _, err := m.conn.ExecContext(context.Background(), query); err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + } + } + + return nil +} + +// ensureVersionTable checks if versions table exists and, if not, creates it. +// Note that this function locks the database, which deviates from the usual +// convention of "caller locks" in the Mysql type. +func (m *Mysql) ensureVersionTable() (err error) { + if err = m.Lock(); err != nil { + return err + } + + defer func() { + if e := m.Unlock(); e != nil { + if err == nil { + err = e + } else { + err = multierror.Append(err, e) + } + } + }() + + // check if migration table exists + var result string + query := `SHOW TABLES LIKE '` + m.config.MigrationsTable + `'` + if err := m.conn.QueryRowContext(context.Background(), query).Scan(&result); err != nil { + if err != sql.ErrNoRows { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + } else { + return nil + } + + // if not, create the empty migration table + query = "CREATE TABLE `" + m.config.MigrationsTable + "` (version bigint not null primary key, dirty boolean not null)" + if _, err := m.conn.ExecContext(context.Background(), query); err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + return nil +} + +// Returns the bool value of the input. +// The 2nd return value indicates if the input was a valid bool value +// See https://github.com/go-sql-driver/mysql/blob/a059889267dc7170331388008528b3b44479bffb/utils.go#L71 +func readBool(input string) (value bool, valid bool) { + switch input { + case "1", "true", "TRUE", "True": + return true, true + case "0", "false", "FALSE", "False": + return false, true + } + + // Not a valid bool value + return +} diff --git a/tests/ci/ut_run.sh b/tests/ci/ut_run.sh index 3c3ad3375306..cc437a0ee2c4 100755 --- a/tests/ci/ut_run.sh +++ b/tests/ci/ut_run.sh @@ -17,5 +17,5 @@ docker ps DIR="$(cd "$(dirname "$0")" && pwd)" go test -race -i ./src/core ./src/jobservice -sudo -E env "PATH=$PATH" "POSTGRES_MIGRATION_SCRIPTS_PATH=$DIR/../../make/migrations/postgresql/" ./tests/coverage4gotest.sh +sudo -E env "PATH=$PATH" "POSTGRES_MIGRATION_SCRIPTS_PATH=$DIR/../../make/migrations/postgresql/" "MYSQL_MIGRATION_SCRIPTS_PATH=$DIR/../../make/migrations/mysql/" ./tests/coverage4gotest.sh #goveralls -coverprofile=profile.cov -service=github || true \ No newline at end of file diff --git a/tests/docker-compose.test.yml b/tests/docker-compose.test.yml index f8303df3c64c..c6a15e86620b 100644 --- a/tests/docker-compose.test.yml +++ b/tests/docker-compose.test.yml @@ -27,3 +27,30 @@ services: - /data/redis:/var/lib/redis ports: - 6379:6379 + mariadb: + image: mariadb:10.5.9 + command: [ + '--collation-server=utf8mb4_general_ci', + ] + restart: always + volumes: + - /data/mariadb/database:/var/lib/mysql:z + environment: + MYSQL_ROOT_PASSWORD: root123 + MYSQL_DATABASE: registry + ports: + - 3306:3306 + mysql: + image: mysql:8.0 + command: [ + '--default-authentication-plugin=mysql_native_password', + '--collation-server=utf8mb4_general_ci', + ] + restart: always + volumes: + - /data/mysql/database:/var/lib/mysql:z + environment: + MYSQL_ROOT_PASSWORD: root123 + MYSQL_DATABASE: registry + ports: + - 3308:3306