diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index f4e25b4..c7e8256 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -16,43 +16,81 @@ jobs: only-latest-golang: false run-codeql: true run-test: false + run-build: false secrets: inherit test: runs-on: ubuntu-latest + + env: + DBPASSWORD: ${{ github.sha }} + services: postgres: - image: postgres + image: postgres:11-alpine env: - POSTGRES_USER: user - POSTGRES_PASSWORD: password + POSTGRES_PASSWORD: ${{ env.DBPASSWORD }} ports: - - 5432:5432 + - 5432/tcp options: >- - --health-cmd pg_isready + --health-cmd "pg_isready -d postgres -U postgres" --health-interval 10s --health-timeout 5s --health-retries 5 + mysql: image: mysql:5.7 env: - MYSQL_USER: user - MYSQL_PASSWORD: password - MYSQL_DATABASE: test - MYSQL_ROOT_PASSWORD: password + MYSQL_ROOT_PASSWORD: ${{ env.DBPASSWORD }} ports: - - 3306:3306 - options: --health-cmd="mysqladmin ping" --health-interval=10s --health-timeout=5s --health-retries=3 + - 3306/tcp + options: >- + --health-cmd="mysqladmin ping" + --health-interval=10s + --health-timeout=5s + --health-retries=5 + + mariadb: + image: mariadb:10.2 + env: + MARIADB_ROOT_PASSWORD: ${{ env.DBPASSWORD }} + ports: + - 3306/tcp + options: >- + --health-cmd="mysqladmin ping" + --health-interval=10s + --health-timeout=5s + --health-retries=5 + strategy: matrix: go: [ 'stable', 'oldstable' ] + steps: - - name: Checkout - uses: actions/checkout@v3 - name: Set up Go - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: go-version: ${{ matrix.go }} check-latest: true - - name: Test - run: V=1 make ci-test + + - name: Install gotestsum + run: go install gotest.tools/gotestsum@v1.11.0 + + - name: Checkout + uses: actions/checkout@v4 + + - name: Run Test Suite + run: | + gotestsum -- -timeout 10m -race -coverpkg=./... -coverprofile=coverprofile -covermode=atomic ./... + env: + GOTESTSUM_JSONFILE: gotestsum.json + + TEST_POSTGRES_DSN: postgres://postgres:${{ env.DBPASSWORD }}@localhost:${{ job.services.postgres.ports['5432'] }}/nosql_test_${{ matrix.go }}?sslmode=disable&connect_timeout=5 + TEST_MYSQL_DSN: root:${{ env.DBPASSWORD }}@tcp(localhost:${{ job.services.mysql.ports['3306'] }})/nosql_test_${{ matrix.go }}?timeout=5s + TEST_MARIADB_DSN: root:${{ env.DBPASSWORD }}@tcp(localhost:${{ job.services.mariadb.ports['3306'] }})/nosql_test_${{ matrix.go }}?timeout=5s + + - name: Annotate Test Suite Results + if: ${{ (success() || failure()) && hashFiles('gotestsum.json') != '' }} + uses: guyarb/golang-test-annotations@v0.7.0 + with: + test-results: gotestsum.json diff --git a/.gitignore b/.gitignore index 06dece2..4bf864c 100644 --- a/.gitignore +++ b/.gitignore @@ -12,4 +12,7 @@ *.out # Vendor directory -vendor/* \ No newline at end of file +vendor/* + +# Ignore direnv files +.envrc diff --git a/Makefile b/Makefile deleted file mode 100644 index f7e2879..0000000 --- a/Makefile +++ /dev/null @@ -1,65 +0,0 @@ -# Set V to 1 for verbose output from the Makefile -Q=$(if $V,,@) -PREFIX?= -SRC=$(shell find . -type f -name '*.go' -not -path "./vendor/*") -GOOS_OVERRIDE ?= -OUTPUT_ROOT=output/ - -all: test lint - -ci: ci-test - -.PHONY: all - -######################################### -# Build -######################################### - -build: ; - -######################################### -# Bootstrapping -######################################### - -bootstrap: - $Q curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $$(go env GOPATH)/bin latest - $Q go install golang.org/x/vuln/cmd/govulncheck@latest - $Q go install gotest.tools/gotestsum@latest - -.PHONY: bootstrap - -######################################### -# Test -######################################### -test: - $Q $(GOFLAGS) go test -short -coverprofile=coverage.out ./... - -ci-test: - $Q $(GOFLAGS) CI=1 go test -short -coverprofile=coverage.out ./... - -.PHONY: test ci-test - -######################################### -# Linting -######################################### - -fmt: - $Q goimports -local github.com/golangci/golangci-lint -l -w $(SRC) - -lint: SHELL:=/bin/bash -lint: - $Q LOG_LEVEL=error golangci-lint run --config <(curl -s https://raw.githubusercontent.com/smallstep/workflows/master/.golangci.yml) --timeout=30m - $Q govulncheck ./... - -.PHONY: fmt lint - -######################################### -# Clean -######################################### - -clean: -ifneq ($(BINNAME),"") - $Q rm -f bin/$(BINNAME) -endif - -.PHONY: clean diff --git a/README.md b/README.md index 7fb5ffe..7b504a9 100644 --- a/README.md +++ b/README.md @@ -4,15 +4,12 @@ NoSQL is an abstraction layer for data persistence. This project is in development, the API is not stable. -# Implementations +## Supported data stores -The current version comes with a few implementations inlcuding Mysql, Badger, -and BoltDB, but implementations are on the roadmap. +- [x] [BoltDB](https://github.com/etcd-io/bbolt) +- [x] [Badger](https://github.com/dgraph-io/badger) (`v1`, `v2`, `v3` & `v4`) +- [x] [MariaDB](https://mariadb.org/) (`v10.2`+) +- [x] [MySQL](https://www.mysql.com/) (`v5.7`+) +- [x] [PostgreSQL](https://www.postgresql.org/) (`v11`+) -- [ ] Memory -- [x] [BoltDB](https://github.com/etcd-io/bbolt) etcd fork. -- [x] Badger -- [x] MariaDB/MySQL -- [x] PostgreSQL -- [ ] Cassandra -- [ ] ... +More implementations are on the way. \ No newline at end of file diff --git a/badger/v1/badger.go b/badger/v1/badger.go deleted file mode 100644 index b268432..0000000 --- a/badger/v1/badger.go +++ /dev/null @@ -1,407 +0,0 @@ -//go:build !nobadger && !nobadgerv1 -// +build !nobadger,!nobadgerv1 - -package badger - -import ( - "bytes" - "encoding/binary" - "strings" - - "github.com/dgraph-io/badger" - "github.com/dgraph-io/badger/options" - "github.com/pkg/errors" - "github.com/smallstep/nosql/database" -) - -// DB is a wrapper over *badger.DB, -type DB struct { - db *badger.DB -} - -// Open opens or creates a BoltDB database in the given path. -func (db *DB) Open(dir string, opt ...database.Option) (err error) { - opts := &database.Options{} - for _, o := range opt { - if err := o(opts); err != nil { - return err - } - } - - bo := badger.DefaultOptions(dir) - - // Set the Table and Value LoadingMode - default is MemoryMap. Low memory/RAM - // systems may want to use FileIO. - switch strings.ToLower(opts.BadgerFileLoadingMode) { - case "", database.BadgerMemoryMap, "memorymap": - bo.TableLoadingMode = options.MemoryMap - bo.ValueLogLoadingMode = options.MemoryMap - case database.BadgerFileIO: - bo.TableLoadingMode = options.FileIO - bo.ValueLogLoadingMode = options.FileIO - default: - return badger.ErrInvalidLoadingMode - } - - if opts.ValueDir != "" { - bo.ValueDir = opts.ValueDir - } else { - bo.ValueDir = dir - } - - db.db, err = badger.Open(bo) - return errors.Wrap(err, "error opening Badger database") -} - -// Close closes the DB database. -func (db *DB) Close() error { - return errors.Wrap(db.db.Close(), "error closing Badger database") -} - -// CreateTable creates a token element with the 'bucket' prefix so that such -// that their appears to be a table. -func (db *DB) CreateTable(bucket []byte) error { - bk, err := badgerEncode(bucket) - if err != nil { - return err - } - return db.db.Update(func(txn *badger.Txn) error { - return errors.Wrapf(txn.Set(bk, []byte{}), "failed to create %s/", bucket) - }) -} - -// DeleteTable deletes a root or embedded bucket. Returns an error if the -// bucket cannot be found or if the key represents a non-bucket value. -func (db *DB) DeleteTable(bucket []byte) error { - var tableExists bool - prefix, err := badgerEncode(bucket) - if err != nil { - return err - } - deleteKeys := func(keysForDelete [][]byte) error { - if err := db.db.Update(func(txn *badger.Txn) error { - for _, key := range keysForDelete { - tableExists = true - if err := txn.Delete(key); err != nil { - return errors.Wrapf(err, "error deleting key %s", key) - } - } - return nil - }); err != nil { - return errors.Wrapf(err, "update failed") - } - return nil - } - - collectSize := 1000 - err = db.db.View(func(txn *badger.Txn) error { - opts := badger.DefaultIteratorOptions - opts.AllVersions = false - opts.PrefetchValues = false - it := txn.NewIterator(opts) - defer it.Close() - - keysForDelete := make([][]byte, collectSize) - keysCollected := 0 - for it.Seek(prefix); it.ValidForPrefix(prefix); it.Next() { - key := it.Item().KeyCopy(nil) - keysForDelete[keysCollected] = key - keysCollected++ - if keysCollected == collectSize { - if err := deleteKeys(keysForDelete); err != nil { - return err - } - keysCollected = 0 - } - } - if keysCollected > 0 { - if err := deleteKeys(keysForDelete[:keysCollected]); err != nil { - return err - } - } - if !tableExists { - return errors.Wrapf(database.ErrNotFound, "table %s does not exist", bucket) - } - - return nil - }) - return err -} - -// Compact triggers a value log garbage collection. -func (db *DB) Compact(discardRatio float64) error { - return db.db.RunValueLogGC(discardRatio) -} - -// badgerGet is a helper for the Get method. -func badgerGet(txn *badger.Txn, key []byte) ([]byte, error) { - item, err := txn.Get(key) - switch { - case errors.Is(err, badger.ErrKeyNotFound): - return nil, errors.Wrapf(database.ErrNotFound, "key %s not found", key) - case err != nil: - return nil, errors.Wrapf(err, "failed to get key %s", key) - default: - val, err := item.ValueCopy(nil) - if err != nil { - return nil, errors.Wrap(err, "error accessing value returned by database") - } - return val, nil - } -} - -// Get returns the value stored in the given bucked and key. -func (db *DB) Get(bucket, key []byte) (ret []byte, err error) { - bk, err := toBadgerKey(bucket, key) - if err != nil { - return nil, errors.Wrapf(err, "error converting %s/%s to badgerKey", bucket, key) - } - err = db.db.View(func(txn *badger.Txn) error { - ret, err = badgerGet(txn, bk) - return err - }) - return -} - -// Set stores the given value on bucket and key. -func (db *DB) Set(bucket, key, value []byte) error { - bk, err := toBadgerKey(bucket, key) - if err != nil { - return errors.Wrapf(err, "error converting %s/%s to badgerKey", bucket, key) - } - return db.db.Update(func(txn *badger.Txn) error { - return errors.Wrapf(txn.Set(bk, value), "failed to set %s/%s", bucket, key) - }) -} - -// Del deletes the value stored in the given bucked and key. -func (db *DB) Del(bucket, key []byte) error { - bk, err := toBadgerKey(bucket, key) - if err != nil { - return errors.Wrapf(err, "error converting %s/%s to badgerKey", bucket, key) - } - return db.db.Update(func(txn *badger.Txn) error { - return errors.Wrapf(txn.Delete(bk), "failed to delete %s/%s", bucket, key) - }) -} - -// List returns the full list of entries in a bucket. -func (db *DB) List(bucket []byte) ([]*database.Entry, error) { - var ( - entries []*database.Entry - tableExists bool - ) - err := db.db.View(func(txn *badger.Txn) error { - it := txn.NewIterator(badger.DefaultIteratorOptions) - defer it.Close() - prefix, err := badgerEncode(bucket) - if err != nil { - return err - } - for it.Seek(prefix); it.ValidForPrefix(prefix); it.Next() { - tableExists = true - item := it.Item() - bk := item.KeyCopy(nil) - if isBadgerTable(bk) { - continue - } - _bucket, key, err := fromBadgerKey(bk) - if err != nil { - return errors.Wrapf(err, "error converting from badgerKey %s", bk) - } - if !bytes.Equal(_bucket, bucket) { - return errors.Errorf("bucket names do not match; want %v, but got %v", - bucket, _bucket) - } - v, err := item.ValueCopy(nil) - if err != nil { - return errors.Wrap(err, "error retrieving contents from database value") - } - entries = append(entries, &database.Entry{ - Bucket: _bucket, - Key: key, - Value: v, - }) - } - if !tableExists { - return errors.Wrapf(database.ErrNotFound, "bucket %s not found", bucket) - } - return nil - }) - return entries, err -} - -// CmpAndSwap modifies the value at the given bucket and key (to newValue) -// only if the existing (current) value matches oldValue. -func (db *DB) CmpAndSwap(bucket, key, oldValue, newValue []byte) ([]byte, bool, error) { - bk, err := toBadgerKey(bucket, key) - if err != nil { - return nil, false, err - } - - badgerTxn := db.db.NewTransaction(true) - defer badgerTxn.Discard() - - val, swapped, err := cmpAndSwap(badgerTxn, bk, oldValue, newValue) - switch { - case err != nil: - return nil, false, err - case swapped: - if err := badgerTxn.Commit(); err != nil { - return nil, false, errors.Wrapf(err, "failed to commit badger transaction") - } - return val, swapped, nil - default: - return val, swapped, err - } -} - -func cmpAndSwap(badgerTxn *badger.Txn, bk, oldValue, newValue []byte) ([]byte, bool, error) { - current, err := badgerGet(badgerTxn, bk) - // If value does not exist but expected is not nil, then return w/out swapping. - if err != nil && !database.IsErrNotFound(err) { - return nil, false, err - } - if !bytes.Equal(current, oldValue) { - return current, false, nil - } - - if err := badgerTxn.Set(bk, newValue); err != nil { - return current, false, errors.Wrapf(err, "failed to set %s", bk) - } - return newValue, true, nil -} - -// Update performs multiple commands on one read-write transaction. -func (db *DB) Update(txn *database.Tx) error { - return db.db.Update(func(badgerTxn *badger.Txn) (err error) { - for _, q := range txn.Operations { - switch q.Cmd { - case database.CreateTable: - if err := db.CreateTable(q.Bucket); err != nil { - return err - } - continue - case database.DeleteTable: - if err := db.DeleteTable(q.Bucket); err != nil { - return err - } - continue - } - bk, err := toBadgerKey(q.Bucket, q.Key) - if err != nil { - return err - } - switch q.Cmd { - case database.Get: - if q.Result, err = badgerGet(badgerTxn, bk); err != nil { - return errors.Wrapf(err, "failed to get %s/%s", q.Bucket, q.Key) - } - case database.Set: - if err := badgerTxn.Set(bk, q.Value); err != nil { - return errors.Wrapf(err, "failed to set %s/%s", q.Bucket, q.Key) - } - case database.Delete: - if err = badgerTxn.Delete(bk); err != nil { - return errors.Wrapf(err, "failed to delete %s/%s", q.Bucket, q.Key) - } - case database.CmpAndSwap: - q.Result, q.Swapped, err = cmpAndSwap(badgerTxn, bk, q.CmpValue, q.Value) - if err != nil { - return errors.Wrapf(err, "failed to CmpAndSwap %s/%s", q.Bucket, q.Key) - } - case database.CmpOrRollback: - return database.ErrOpNotSupported - default: - return database.ErrOpNotSupported - } - } - return nil - }) -} - -// toBadgerKey returns the Badger database key using the following algorithm: -// First 2 bytes are the length of the bucket/table name in little endian format, -// followed by the bucket/table name, -// followed by 2 bytes representing the length of the key in little endian format, -// followed by the key. -func toBadgerKey(bucket, key []byte) ([]byte, error) { - first, err := badgerEncode(bucket) - if err != nil { - return nil, err - } - second, err := badgerEncode(key) - if err != nil { - return nil, err - } - return append(first, second...), nil -} - -// isBadgerTable returns True if the slice is a badgerTable token, false otherwise. -// badgerTable means that the slice contains only the [size|value] of one section -// of a badgerKey and no remainder. A badgerKey is [buket|key], while a badgerTable -// is only the bucket section. -func isBadgerTable(bk []byte) bool { - if k, rest := parseBadgerEncode(bk); len(k) > 0 && len(rest) == 0 { - return true - } - return false -} - -// fromBadgerKey returns the bucket and key encoded in a BadgerKey. -// See documentation for toBadgerKey. -func fromBadgerKey(bk []byte) ([]byte, []byte, error) { - bucket, rest := parseBadgerEncode(bk) - if len(bucket) == 0 || len(rest) == 0 { - return nil, nil, errors.Errorf("invalid badger key: %v", bk) - } - - key, rest2 := parseBadgerEncode(rest) - if len(key) == 0 || len(rest2) != 0 { - return nil, nil, errors.Errorf("invalid badger key: %v", bk) - } - - return bucket, key, nil -} - -// badgerEncode encodes a byte slice into a section of a BadgerKey. -// See documentation for toBadgerKey. -func badgerEncode(val []byte) ([]byte, error) { - l := len(val) - switch { - case l == 0: - return nil, errors.Errorf("input cannot be empty") - case l > 65535: - return nil, errors.Errorf("length of input cannot be greater than 65535") - default: - lb := new(bytes.Buffer) - if err := binary.Write(lb, binary.LittleEndian, uint16(l)); err != nil { - return nil, errors.Wrap(err, "error doing binary Write") - } - return append(lb.Bytes(), val...), nil - } -} - -func parseBadgerEncode(bk []byte) (value, rest []byte) { - var ( - keyLen uint16 - start = uint16(2) - length = uint16(len(bk)) - ) - if uint16(len(bk)) < start { - return nil, bk - } - // First 2 bytes stores the length of the value. - if err := binary.Read(bytes.NewReader(bk[:2]), binary.LittleEndian, &keyLen); err != nil { - return nil, bk - } - end := start + keyLen - switch { - case length < end: - return nil, bk - case length == end: - return bk[start:end], nil - default: - return bk[start:end], bk[end:] - } -} diff --git a/badger/v1/badger_test.go b/badger/v1/badger_test.go deleted file mode 100644 index 8c0886a..0000000 --- a/badger/v1/badger_test.go +++ /dev/null @@ -1,225 +0,0 @@ -package badger - -import ( - "errors" - "testing" - - "github.com/smallstep/assert" -) - -func Test_badgerEncode(t *testing.T) { - type args struct { - val []byte - } - tests := []struct { - name string - args args - want []byte - err error - }{ - { - name: "fail/input-too-long", - args: args{make([]byte, 65536)}, - err: errors.New("length of input cannot be greater than 65535"), - }, - { - name: "fail/input-empty", - args: args{nil}, - err: errors.New("input cannot be empty"), - }, - { - name: "ok", - args: args{[]byte("hello")}, - want: []byte{5, 0, 104, 101, 108, 108, 111}, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := badgerEncode(tt.args.val) - if err != nil { - if assert.NotNil(t, tt.err) { - assert.HasPrefix(t, err.Error(), tt.err.Error()) - } - } else { - if assert.Nil(t, tt.err) && assert.NotNil(t, got) && assert.NotNil(t, tt.want) { - assert.Equals(t, got, tt.want) - } - } - }) - } -} - -func Test_toBadgerKey(t *testing.T) { - type args struct { - bucket []byte - key []byte - } - tests := []struct { - name string - args args - want []byte - err error - }{ - { - name: "fail/bucket-too-long", - args: args{make([]byte, 65536), []byte("goodbye")}, - err: errors.New("length of input cannot be greater than 65535"), - }, - { - name: "fail/key-empty", - args: args{[]byte("hello"), nil}, - err: errors.New("input cannot be empty"), - }, - { - name: "ok", - args: args{[]byte("hello"), []byte("goodbye")}, - want: []byte{5, 0, 104, 101, 108, 108, 111, 7, 0, 103, 111, 111, 100, 98, 121, 101}, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := toBadgerKey(tt.args.bucket, tt.args.key) - if err != nil { - if assert.NotNil(t, tt.err) { - assert.HasPrefix(t, err.Error(), tt.err.Error()) - } - } else { - if assert.Nil(t, tt.err) && assert.NotNil(t, got) && assert.NotNil(t, tt.want) { - assert.Equals(t, got, tt.want) - } - } - }) - } -} - -func Test_fromBadgerKey(t *testing.T) { - type args struct { - bk []byte - } - type ret struct { - bucket []byte - key []byte - } - tests := []struct { - name string - args args - want ret - err error - }{ - { - name: "fail/input-too-short/no-bucket-length", - args: args{[]byte{5}}, - err: errors.New("invalid badger key: [5]"), - }, - { - name: "fail/input-too-short/no-key-length", - args: args{[]byte{5, 0, 104, 101, 108, 108, 111}}, - err: errors.New("invalid badger key: [5 0 104 101 108 108 111]"), - }, - { - name: "fail/input-too-short/invalid-key", - args: args{[]byte{5, 0, 104, 101, 108, 108, 111, 7, 0, 103}}, - err: errors.New("invalid badger key: [5 0 104 101 108 108 111 7 0 103]"), - }, - { - name: "ok", - args: args{[]byte{5, 0, 104, 101, 108, 108, 111, 7, 0, 103, 111, 111, 100, 98, 121, 101}}, - want: ret{[]byte{104, 101, 108, 108, 111}, []byte{103, 111, 111, 100, 98, 121, 101}}, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - bucket, key, err := fromBadgerKey(tt.args.bk) - if err != nil { - if assert.NotNil(t, tt.err) { - assert.HasPrefix(t, err.Error(), tt.err.Error()) - } - } else { - if assert.Nil(t, tt.err) && assert.NotNil(t, bucket) && assert.NotNil(t, key) { - assert.Equals(t, bucket, tt.want.bucket) - assert.Equals(t, key, tt.want.key) - } - } - }) - } -} - -func Test_parseBadgerEncode(t *testing.T) { - type args struct { - bk []byte - } - type ret struct { - bucket []byte - key []byte - } - tests := []struct { - name string - args args - want ret - }{ - { - name: "fail/keylen-too-short", - args: args{[]byte{5}}, - want: ret{nil, []byte{5}}, - }, - { - name: "fail/key-too-short", - args: args{[]byte{5, 0, 111, 111}}, - want: ret{nil, []byte{5, 0, 111, 111}}, - }, - { - name: "ok/exact-length", - args: args{[]byte{5, 0, 104, 101, 108, 108, 111}}, - want: ret{[]byte{104, 101, 108, 108, 111}, nil}, - }, - { - name: "ok/longer", - args: args{[]byte{5, 0, 104, 101, 108, 108, 111, 7, 0, 103, 111, 111, 100, 98, 121, 101}}, - want: ret{[]byte{104, 101, 108, 108, 111}, []byte{7, 0, 103, 111, 111, 100, 98, 121, 101}}, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - bucket, key := parseBadgerEncode(tt.args.bk) - assert.Equals(t, bucket, tt.want.bucket) - assert.Equals(t, key, tt.want.key) - }) - } -} - -func Test_isBadgerTable(t *testing.T) { - type args struct { - bk []byte - } - tests := []struct { - name string - args args - want bool - }{ - { - name: "false/keylen-too-short", - args: args{[]byte{5}}, - want: false, - }, - { - name: "false/key-too-short", - args: args{[]byte{5, 0, 111, 111}}, - want: false, - }, - { - name: "ok", - args: args{[]byte{5, 0, 104, 101, 108, 108, 111}}, - want: true, - }, - { - name: "false/key-too-long", - args: args{[]byte{5, 0, 104, 101, 108, 108, 111, 7, 0, 103, 111, 111, 100, 98, 121, 101}}, - want: false, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - assert.Equals(t, isBadgerTable(tt.args.bk), tt.want) - }) - } -} diff --git a/badger/v1/nobadger.go b/badger/v1/nobadger.go deleted file mode 100644 index 4e7703e..0000000 --- a/badger/v1/nobadger.go +++ /dev/null @@ -1,8 +0,0 @@ -//go:build nobadger || nobadgerv1 -// +build nobadger nobadgerv1 - -package badger - -import "github.com/smallstep/nosql/database" - -type DB = database.NotSupportedDB diff --git a/badger/v2/badger.go b/badger/v2/badger.go deleted file mode 100644 index 247912e..0000000 --- a/badger/v2/badger.go +++ /dev/null @@ -1,412 +0,0 @@ -//go:build !nobadger && !nobadgerv2 -// +build !nobadger,!nobadgerv2 - -package badger - -import ( - "bytes" - "encoding/binary" - "strings" - - "github.com/dgraph-io/badger/v2" - "github.com/dgraph-io/badger/v2/options" - "github.com/pkg/errors" - "github.com/smallstep/nosql/database" -) - -// DB is a wrapper over *badger/v2.DB, -type DB struct { - db *badger.DB -} - -// Open opens or creates a BoltDB database in the given path. -func (db *DB) Open(dir string, opt ...database.Option) (err error) { - opts := &database.Options{} - for _, o := range opt { - if err := o(opts); err != nil { - return err - } - } - - bo := badger.DefaultOptions(dir) - if opts.ValueDir != "" { - bo.ValueDir = opts.ValueDir - } - - // Set the ValueLogLoadingMode - default is MemoryMap. Low memory/RAM - // systems may want to use FileIO. - switch strings.ToLower(opts.BadgerFileLoadingMode) { - case "", database.BadgerMemoryMap, "memorymap": - bo.ValueLogLoadingMode = options.MemoryMap - case database.BadgerFileIO: - bo.ValueLogLoadingMode = options.FileIO - default: - return badger.ErrInvalidLoadingMode - } - - db.db, err = badger.Open(bo) - return errors.Wrap(err, "error opening Badger database") -} - -// Close closes the DB database. -func (db *DB) Close() error { - return errors.Wrap(db.db.Close(), "error closing Badger database") -} - -// CreateTable creates a token element with the 'bucket' prefix so that such -// that their appears to be a table. -func (db *DB) CreateTable(bucket []byte) error { - bk, err := badgerEncode(bucket) - if err != nil { - return err - } - return db.db.Update(func(txn *badger.Txn) error { - return errors.Wrapf(txn.Set(bk, []byte{}), "failed to create %s/", bucket) - }) -} - -// DeleteTable deletes a root or embedded bucket. Returns an error if the -// bucket cannot be found or if the key represents a non-bucket value. -func (db *DB) DeleteTable(bucket []byte) error { - var tableExists bool - prefix, err := badgerEncode(bucket) - if err != nil { - return err - } - deleteKeys := func(keysForDelete [][]byte) error { - if err := db.db.Update(func(txn *badger.Txn) error { - for _, key := range keysForDelete { - tableExists = true - if err := txn.Delete(key); err != nil { - return errors.Wrapf(err, "error deleting key %s", key) - } - } - return nil - }); err != nil { - return errors.Wrapf(err, "update failed") - } - return nil - } - - collectSize := 1000 - err = db.db.View(func(txn *badger.Txn) error { - opts := badger.DefaultIteratorOptions - opts.AllVersions = false - opts.PrefetchValues = false - it := txn.NewIterator(opts) - defer it.Close() - - keysForDelete := make([][]byte, collectSize) - keysCollected := 0 - for it.Seek(prefix); it.ValidForPrefix(prefix); it.Next() { - key := it.Item().KeyCopy(nil) - keysForDelete[keysCollected] = key - keysCollected++ - if keysCollected == collectSize { - if err := deleteKeys(keysForDelete); err != nil { - return err - } - keysCollected = 0 - } - } - if keysCollected > 0 { - if err := deleteKeys(keysForDelete[:keysCollected]); err != nil { - return err - } - } - if !tableExists { - return errors.Wrapf(database.ErrNotFound, "table %s does not exist", bucket) - } - - return nil - }) - return err -} - -// badgerGetV2 is a helper for the Get method. -func badgerGetV2(txn *badger.Txn, key []byte) ([]byte, error) { - item, err := txn.Get(key) - switch { - case errors.Is(err, badger.ErrKeyNotFound): - return nil, errors.Wrapf(database.ErrNotFound, "key %s not found", key) - case err != nil: - return nil, errors.Wrapf(err, "failed to get key %s", key) - default: - val, err := item.ValueCopy(nil) - if err != nil { - return nil, errors.Wrap(err, "error accessing value returned by database") - } - - // Make sure to return a copy as val is only valid during the - // transaction. - return cloneBytes(val), nil - } -} - -// Get returns the value stored in the given bucked and key. -func (db *DB) Get(bucket, key []byte) (ret []byte, err error) { - bk, err := toBadgerKey(bucket, key) - if err != nil { - return nil, errors.Wrapf(err, "error converting %s/%s to badgerKey", bucket, key) - } - err = db.db.View(func(txn *badger.Txn) error { - ret, err = badgerGetV2(txn, bk) - return err - }) - return -} - -// Set stores the given value on bucket and key. -func (db *DB) Set(bucket, key, value []byte) error { - bk, err := toBadgerKey(bucket, key) - if err != nil { - return errors.Wrapf(err, "error converting %s/%s to badgerKey", bucket, key) - } - return db.db.Update(func(txn *badger.Txn) error { - return errors.Wrapf(txn.Set(bk, value), "failed to set %s/%s", bucket, key) - }) -} - -// Del deletes the value stored in the given bucked and key. -func (db *DB) Del(bucket, key []byte) error { - bk, err := toBadgerKey(bucket, key) - if err != nil { - return errors.Wrapf(err, "error converting %s/%s to badgerKey", bucket, key) - } - return db.db.Update(func(txn *badger.Txn) error { - return errors.Wrapf(txn.Delete(bk), "failed to delete %s/%s", bucket, key) - }) -} - -// List returns the full list of entries in a bucket. -func (db *DB) List(bucket []byte) ([]*database.Entry, error) { - var ( - entries []*database.Entry - tableExists bool - ) - err := db.db.View(func(txn *badger.Txn) error { - it := txn.NewIterator(badger.DefaultIteratorOptions) - defer it.Close() - prefix, err := badgerEncode(bucket) - if err != nil { - return err - } - for it.Seek(prefix); it.ValidForPrefix(prefix); it.Next() { - tableExists = true - item := it.Item() - bk := item.KeyCopy(nil) - if isBadgerTable(bk) { - continue - } - _bucket, key, err := fromBadgerKey(bk) - if err != nil { - return errors.Wrapf(err, "error converting from badgerKey %s", bk) - } - if !bytes.Equal(_bucket, bucket) { - return errors.Errorf("bucket names do not match; want %v, but got %v", - bucket, _bucket) - } - v, err := item.ValueCopy(nil) - if err != nil { - return errors.Wrap(err, "error retrieving contents from database value") - } - entries = append(entries, &database.Entry{ - Bucket: _bucket, - Key: key, - Value: cloneBytes(v), - }) - } - if !tableExists { - return errors.Wrapf(database.ErrNotFound, "bucket %s not found", bucket) - } - return nil - }) - return entries, err -} - -// CmpAndSwap modifies the value at the given bucket and key (to newValue) -// only if the existing (current) value matches oldValue. -func (db *DB) CmpAndSwap(bucket, key, oldValue, newValue []byte) ([]byte, bool, error) { - bk, err := toBadgerKey(bucket, key) - if err != nil { - return nil, false, err - } - - badgerTxn := db.db.NewTransaction(true) - defer badgerTxn.Discard() - - val, swapped, err := cmpAndSwapV2(badgerTxn, bk, oldValue, newValue) - switch { - case err != nil: - return nil, false, err - case swapped: - if err := badgerTxn.Commit(); err != nil { - return nil, false, errors.Wrapf(err, "failed to commit badger transaction") - } - return val, swapped, nil - default: - return val, swapped, err - } -} - -func cmpAndSwapV2(badgerTxn *badger.Txn, bk, oldValue, newValue []byte) ([]byte, bool, error) { - current, err := badgerGetV2(badgerTxn, bk) - // If value does not exist but expected is not nil, then return w/out swapping. - if err != nil && !database.IsErrNotFound(err) { - return nil, false, err - } - if !bytes.Equal(current, oldValue) { - return current, false, nil - } - - if err := badgerTxn.Set(bk, newValue); err != nil { - return current, false, errors.Wrapf(err, "failed to set %s", bk) - } - return newValue, true, nil -} - -// Update performs multiple commands on one read-write transaction. -func (db *DB) Update(txn *database.Tx) error { - return db.db.Update(func(badgerTxn *badger.Txn) (err error) { - for _, q := range txn.Operations { - switch q.Cmd { - case database.CreateTable: - if err := db.CreateTable(q.Bucket); err != nil { - return err - } - continue - case database.DeleteTable: - if err := db.DeleteTable(q.Bucket); err != nil { - return err - } - continue - } - bk, err := toBadgerKey(q.Bucket, q.Key) - if err != nil { - return err - } - switch q.Cmd { - case database.Get: - if q.Result, err = badgerGetV2(badgerTxn, bk); err != nil { - return errors.Wrapf(err, "failed to get %s/%s", q.Bucket, q.Key) - } - case database.Set: - if err := badgerTxn.Set(bk, q.Value); err != nil { - return errors.Wrapf(err, "failed to set %s/%s", q.Bucket, q.Key) - } - case database.Delete: - if err = badgerTxn.Delete(bk); err != nil { - return errors.Wrapf(err, "failed to delete %s/%s", q.Bucket, q.Key) - } - case database.CmpAndSwap: - q.Result, q.Swapped, err = cmpAndSwapV2(badgerTxn, bk, q.CmpValue, q.Value) - if err != nil { - return errors.Wrapf(err, "failed to CmpAndSwap %s/%s", q.Bucket, q.Key) - } - case database.CmpOrRollback: - return database.ErrOpNotSupported - default: - return database.ErrOpNotSupported - } - } - return nil - }) -} - -// Compact triggers a value log garbage collection. -func (db *DB) Compact(discardRatio float64) error { - return db.db.RunValueLogGC(discardRatio) -} - -// toBadgerKey returns the Badger database key using the following algorithm: -// First 2 bytes are the length of the bucket/table name in little endian format, -// followed by the bucket/table name, -// followed by 2 bytes representing the length of the key in little endian format, -// followed by the key. -func toBadgerKey(bucket, key []byte) ([]byte, error) { - first, err := badgerEncode(bucket) - if err != nil { - return nil, err - } - second, err := badgerEncode(key) - if err != nil { - return nil, err - } - return append(first, second...), nil -} - -// isBadgerTable returns True if the slice is a badgerTable token, false otherwise. -// badgerTable means that the slice contains only the [size|value] of one section -// of a badgerKey and no remainder. A badgerKey is [buket|key], while a badgerTable -// is only the bucket section. -func isBadgerTable(bk []byte) bool { - if k, rest := parseBadgerEncode(bk); len(k) > 0 && len(rest) == 0 { - return true - } - return false -} - -// fromBadgerKey returns the bucket and key encoded in a BadgerKey. -// See documentation for toBadgerKey. -func fromBadgerKey(bk []byte) ([]byte, []byte, error) { - bucket, rest := parseBadgerEncode(bk) - if len(bucket) == 0 || len(rest) == 0 { - return nil, nil, errors.Errorf("invalid badger key: %v", bk) - } - - key, rest2 := parseBadgerEncode(rest) - if len(key) == 0 || len(rest2) != 0 { - return nil, nil, errors.Errorf("invalid badger key: %v", bk) - } - - return bucket, key, nil -} - -// badgerEncode encodes a byte slice into a section of a BadgerKey. -// See documentation for toBadgerKey. -func badgerEncode(val []byte) ([]byte, error) { - l := len(val) - switch { - case l == 0: - return nil, errors.Errorf("input cannot be empty") - case l > 65535: - return nil, errors.Errorf("length of input cannot be greater than 65535") - default: - lb := new(bytes.Buffer) - if err := binary.Write(lb, binary.LittleEndian, uint16(l)); err != nil { - return nil, errors.Wrap(err, "error doing binary Write") - } - return append(lb.Bytes(), val...), nil - } -} - -func parseBadgerEncode(bk []byte) (value, rest []byte) { - var ( - keyLen uint16 - start = uint16(2) - length = uint16(len(bk)) - ) - if uint16(len(bk)) < start { - return nil, bk - } - // First 2 bytes stores the length of the value. - if err := binary.Read(bytes.NewReader(bk[:2]), binary.LittleEndian, &keyLen); err != nil { - return nil, bk - } - end := start + keyLen - switch { - case length < end: - return nil, bk - case length == end: - return bk[start:end], nil - default: - return bk[start:end], bk[end:] - } -} - -// cloneBytes returns a copy of a given slice. -func cloneBytes(v []byte) []byte { - var clone = make([]byte, len(v)) - copy(clone, v) - return clone -} diff --git a/badger/v2/badger_test.go b/badger/v2/badger_test.go deleted file mode 100644 index 8c0886a..0000000 --- a/badger/v2/badger_test.go +++ /dev/null @@ -1,225 +0,0 @@ -package badger - -import ( - "errors" - "testing" - - "github.com/smallstep/assert" -) - -func Test_badgerEncode(t *testing.T) { - type args struct { - val []byte - } - tests := []struct { - name string - args args - want []byte - err error - }{ - { - name: "fail/input-too-long", - args: args{make([]byte, 65536)}, - err: errors.New("length of input cannot be greater than 65535"), - }, - { - name: "fail/input-empty", - args: args{nil}, - err: errors.New("input cannot be empty"), - }, - { - name: "ok", - args: args{[]byte("hello")}, - want: []byte{5, 0, 104, 101, 108, 108, 111}, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := badgerEncode(tt.args.val) - if err != nil { - if assert.NotNil(t, tt.err) { - assert.HasPrefix(t, err.Error(), tt.err.Error()) - } - } else { - if assert.Nil(t, tt.err) && assert.NotNil(t, got) && assert.NotNil(t, tt.want) { - assert.Equals(t, got, tt.want) - } - } - }) - } -} - -func Test_toBadgerKey(t *testing.T) { - type args struct { - bucket []byte - key []byte - } - tests := []struct { - name string - args args - want []byte - err error - }{ - { - name: "fail/bucket-too-long", - args: args{make([]byte, 65536), []byte("goodbye")}, - err: errors.New("length of input cannot be greater than 65535"), - }, - { - name: "fail/key-empty", - args: args{[]byte("hello"), nil}, - err: errors.New("input cannot be empty"), - }, - { - name: "ok", - args: args{[]byte("hello"), []byte("goodbye")}, - want: []byte{5, 0, 104, 101, 108, 108, 111, 7, 0, 103, 111, 111, 100, 98, 121, 101}, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := toBadgerKey(tt.args.bucket, tt.args.key) - if err != nil { - if assert.NotNil(t, tt.err) { - assert.HasPrefix(t, err.Error(), tt.err.Error()) - } - } else { - if assert.Nil(t, tt.err) && assert.NotNil(t, got) && assert.NotNil(t, tt.want) { - assert.Equals(t, got, tt.want) - } - } - }) - } -} - -func Test_fromBadgerKey(t *testing.T) { - type args struct { - bk []byte - } - type ret struct { - bucket []byte - key []byte - } - tests := []struct { - name string - args args - want ret - err error - }{ - { - name: "fail/input-too-short/no-bucket-length", - args: args{[]byte{5}}, - err: errors.New("invalid badger key: [5]"), - }, - { - name: "fail/input-too-short/no-key-length", - args: args{[]byte{5, 0, 104, 101, 108, 108, 111}}, - err: errors.New("invalid badger key: [5 0 104 101 108 108 111]"), - }, - { - name: "fail/input-too-short/invalid-key", - args: args{[]byte{5, 0, 104, 101, 108, 108, 111, 7, 0, 103}}, - err: errors.New("invalid badger key: [5 0 104 101 108 108 111 7 0 103]"), - }, - { - name: "ok", - args: args{[]byte{5, 0, 104, 101, 108, 108, 111, 7, 0, 103, 111, 111, 100, 98, 121, 101}}, - want: ret{[]byte{104, 101, 108, 108, 111}, []byte{103, 111, 111, 100, 98, 121, 101}}, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - bucket, key, err := fromBadgerKey(tt.args.bk) - if err != nil { - if assert.NotNil(t, tt.err) { - assert.HasPrefix(t, err.Error(), tt.err.Error()) - } - } else { - if assert.Nil(t, tt.err) && assert.NotNil(t, bucket) && assert.NotNil(t, key) { - assert.Equals(t, bucket, tt.want.bucket) - assert.Equals(t, key, tt.want.key) - } - } - }) - } -} - -func Test_parseBadgerEncode(t *testing.T) { - type args struct { - bk []byte - } - type ret struct { - bucket []byte - key []byte - } - tests := []struct { - name string - args args - want ret - }{ - { - name: "fail/keylen-too-short", - args: args{[]byte{5}}, - want: ret{nil, []byte{5}}, - }, - { - name: "fail/key-too-short", - args: args{[]byte{5, 0, 111, 111}}, - want: ret{nil, []byte{5, 0, 111, 111}}, - }, - { - name: "ok/exact-length", - args: args{[]byte{5, 0, 104, 101, 108, 108, 111}}, - want: ret{[]byte{104, 101, 108, 108, 111}, nil}, - }, - { - name: "ok/longer", - args: args{[]byte{5, 0, 104, 101, 108, 108, 111, 7, 0, 103, 111, 111, 100, 98, 121, 101}}, - want: ret{[]byte{104, 101, 108, 108, 111}, []byte{7, 0, 103, 111, 111, 100, 98, 121, 101}}, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - bucket, key := parseBadgerEncode(tt.args.bk) - assert.Equals(t, bucket, tt.want.bucket) - assert.Equals(t, key, tt.want.key) - }) - } -} - -func Test_isBadgerTable(t *testing.T) { - type args struct { - bk []byte - } - tests := []struct { - name string - args args - want bool - }{ - { - name: "false/keylen-too-short", - args: args{[]byte{5}}, - want: false, - }, - { - name: "false/key-too-short", - args: args{[]byte{5, 0, 111, 111}}, - want: false, - }, - { - name: "ok", - args: args{[]byte{5, 0, 104, 101, 108, 108, 111}}, - want: true, - }, - { - name: "false/key-too-long", - args: args{[]byte{5, 0, 104, 101, 108, 108, 111, 7, 0, 103, 111, 111, 100, 98, 121, 101}}, - want: false, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - assert.Equals(t, isBadgerTable(tt.args.bk), tt.want) - }) - } -} diff --git a/badger/v2/nobadger.go b/badger/v2/nobadger.go deleted file mode 100644 index b450f62..0000000 --- a/badger/v2/nobadger.go +++ /dev/null @@ -1,8 +0,0 @@ -//go:build nobadger || nobadgerv2 -// +build nobadger nobadgerv2 - -package badger - -import "github.com/smallstep/nosql/database" - -type DB = database.NotSupportedDB diff --git a/bolt/bbolt.go b/bolt/bbolt.go deleted file mode 100644 index 9f667f4..0000000 --- a/bolt/bbolt.go +++ /dev/null @@ -1,275 +0,0 @@ -//go:build !nobbolt -// +build !nobbolt - -package bolt - -import ( - "bytes" - "time" - - "github.com/pkg/errors" - "github.com/smallstep/nosql/database" - bolt "go.etcd.io/bbolt" -) - -var boltDBSep = []byte("/") - -// DB is a wrapper over bolt.DB, -type DB struct { - db *bolt.DB -} - -type boltBucket interface { - Bucket(name []byte) *bolt.Bucket - CreateBucket(name []byte) (*bolt.Bucket, error) - CreateBucketIfNotExists(name []byte) (*bolt.Bucket, error) - DeleteBucket(name []byte) error -} - -// Open opens or creates a DB database in the given path. -func (db *DB) Open(dataSourceName string, opt ...database.Option) (err error) { - opts := &database.Options{} - for _, o := range opt { - if err := o(opts); err != nil { - return err - } - } - db.db, err = bolt.Open(dataSourceName, 0600, &bolt.Options{Timeout: 5 * time.Second}) - return errors.WithStack(err) -} - -// Close closes the DB database. -func (db *DB) Close() error { - return errors.WithStack(db.db.Close()) -} - -// CreateTable creates a bucket or an embedded bucket if it does not exists. -func (db *DB) CreateTable(bucket []byte) error { - return db.db.Update(func(tx *bolt.Tx) error { - return db.createBucket(tx, bucket) - }) -} - -// DeleteTable deletes a root or embedded bucket. Returns an error if the -// bucket cannot be found or if the key represents a non-bucket value. -func (db *DB) DeleteTable(bucket []byte) error { - return db.db.Update(func(tx *bolt.Tx) error { - return db.deleteBucket(tx, bucket) - }) -} - -// Get returns the value stored in the given bucked and key. -func (db *DB) Get(bucket, key []byte) (ret []byte, err error) { - err = db.db.View(func(tx *bolt.Tx) error { - b, err := db.getBucket(tx, bucket) - if err != nil { - return err - } - ret = b.Get(key) - if ret == nil { - return database.ErrNotFound - } - // Make sure to return a copy as ret is only valid during the - // transaction. - ret = cloneBytes(ret) - return nil - }) - return -} - -// Set stores the given value on bucket and key. -func (db *DB) Set(bucket, key, value []byte) error { - return db.db.Update(func(tx *bolt.Tx) error { - b, err := db.getBucket(tx, bucket) - if err != nil { - return err - } - return errors.WithStack(b.Put(key, value)) - }) -} - -// Del deletes the value stored in the given bucked and key. -func (db *DB) Del(bucket, key []byte) error { - return db.db.Update(func(tx *bolt.Tx) error { - b, err := db.getBucket(tx, bucket) - if err != nil { - return err - } - return errors.WithStack(b.Delete(key)) - }) -} - -// List returns the full list of entries in a bucket. -func (db *DB) List(bucket []byte) ([]*database.Entry, error) { - var entries []*database.Entry - err := db.db.View(func(tx *bolt.Tx) error { - b, err := db.getBucket(tx, bucket) - if err != nil { - return errors.Wrap(err, "getBucket failed") - } - - c := b.Cursor() - for k, v := c.First(); k != nil; k, v = c.Next() { - entries = append(entries, &database.Entry{ - Bucket: bucket, - Key: cloneBytes(k), - Value: cloneBytes(v), - }) - } - return nil - }) - return entries, err -} - -// CmpAndSwap modifies the value at the given bucket and key (to newValue) -// only if the existing (current) value matches oldValue. -func (db *DB) CmpAndSwap(bucket, key, oldValue, newValue []byte) ([]byte, bool, error) { - boltTx, err := db.db.Begin(true) - if err != nil { - return nil, false, errors.Wrap(err, "error creating Bolt transaction") - } - - boltBucket := boltTx.Bucket(bucket) - if boltBucket == nil { - return nil, false, errors.Errorf("failed to get bucket %s", bucket) - } - - val, swapped, err := cmpAndSwap(boltBucket, key, oldValue, newValue) - switch { - case err != nil: - if err := boltTx.Rollback(); err != nil { - return nil, false, errors.Wrapf(err, "failed to execute CmpAndSwap transaction on %s/%s and failed to rollback transaction", bucket, key) - } - return nil, false, err - case swapped: - if err := boltTx.Commit(); err != nil { - return nil, false, errors.Wrapf(err, "failed to commit badger transaction") - } - return val, swapped, nil - default: - if err := boltTx.Rollback(); err != nil { - return nil, false, errors.Wrapf(err, "failed to rollback read-only CmpAndSwap transaction on %s/%s", bucket, key) - } - return val, swapped, err - } -} - -func cmpAndSwap(boltBucket *bolt.Bucket, key, oldValue, newValue []byte) ([]byte, bool, error) { - current := boltBucket.Get(key) - if !bytes.Equal(current, oldValue) { - return cloneBytes(current), false, nil - } - - if err := boltBucket.Put(key, newValue); err != nil { - return nil, false, errors.Wrapf(err, "failed to set key %s", key) - } - return newValue, true, nil -} - -// Update performs multiple commands on one read-write transaction. -func (db *DB) Update(tx *database.Tx) error { - return db.db.Update(func(boltTx *bolt.Tx) (err error) { - var b *bolt.Bucket - for _, q := range tx.Operations { - // create or delete buckets - switch q.Cmd { - case database.CreateTable: - err = db.createBucket(boltTx, q.Bucket) - if err != nil { - return err - } - continue - case database.DeleteTable: - err = db.deleteBucket(boltTx, q.Bucket) - if err != nil { - return err - } - continue - } - - // For other operations, get bucket and perform operation - b = boltTx.Bucket(q.Bucket) - - switch q.Cmd { - case database.Get: - ret := b.Get(q.Key) - if ret == nil { - return errors.WithStack(database.ErrNotFound) - } - q.Result = cloneBytes(ret) - case database.Set: - if err = b.Put(q.Key, q.Value); err != nil { - return errors.WithStack(err) - } - case database.Delete: - if err = b.Delete(q.Key); err != nil { - return errors.WithStack(err) - } - case database.CmpAndSwap: - q.Result, q.Swapped, err = cmpAndSwap(b, q.Key, q.CmpValue, q.Value) - if err != nil { - return errors.Wrapf(err, "failed to execute CmpAndSwap on %s/%s", q.Bucket, q.Key) - } - case database.CmpOrRollback: - return errors.Errorf("operation '%s' is not yet implemented", q.Cmd) - default: - return errors.Errorf("operation '%s' is not supported", q.Cmd) - } - } - return nil - }) -} - -// getBucket returns the bucket supporting nested buckets, nested buckets are -// bucket names separated by '/'. -func (db *DB) getBucket(tx *bolt.Tx, name []byte) (b *bolt.Bucket, err error) { - buckets := bytes.Split(name, boltDBSep) - for i, n := range buckets { - if i == 0 { - b = tx.Bucket(n) - } else { - b = b.Bucket(n) - } - if b == nil { - return nil, database.ErrNotFound - } - } - return -} - -// createBucket creates a bucket or a nested bucket in the given transaction. -func (db *DB) createBucket(tx *bolt.Tx, name []byte) (err error) { - b := boltBucket(tx) - buckets := bytes.Split(name, boltDBSep) - for _, name := range buckets { - b, err = b.CreateBucketIfNotExists(name) - if err != nil { - return errors.WithStack(err) - } - } - return -} - -// deleteBucket deletes a bucket or a nested bucked in the given transaction. -func (db *DB) deleteBucket(tx *bolt.Tx, name []byte) (err error) { - b := boltBucket(tx) - buckets := bytes.Split(name, boltDBSep) - last := len(buckets) - 1 - for i := 0; i < last; i++ { - if buck := b.Bucket(buckets[i]); buck == nil { - return errors.Wrapf(database.ErrNotFound, "bucket %s does not exist", bytes.Join(buckets[0:i+1], boltDBSep)) - } - } - err = b.DeleteBucket(buckets[last]) - if errors.Is(err, bolt.ErrBucketNotFound) { - return errors.Wrapf(database.ErrNotFound, "bucket %s does not exist", name) - } - return -} - -// cloneBytes returns a copy of a given slice. -func cloneBytes(v []byte) []byte { - var clone = make([]byte, len(v)) - copy(clone, v) - return clone -} diff --git a/bolt/nobbolt.go b/bolt/nobbolt.go deleted file mode 100644 index 7d1f163..0000000 --- a/bolt/nobbolt.go +++ /dev/null @@ -1,8 +0,0 @@ -//go:build nobbolt -// +build nobbolt - -package bolt - -import "github.com/smallstep/nosql/database" - -type DB = database.NotSupportedDB diff --git a/constrained.go b/constrained.go new file mode 100644 index 0000000..d1f72ee --- /dev/null +++ b/constrained.go @@ -0,0 +1,280 @@ +package nosql + +import ( + "context" + "unicode" + "unicode/utf8" +) + +// Constrain wraps the provided [DB] implementation with all applicable constraints. +func Constrain(db DB) DB { + if _, ok := db.(interface{ constrained() }); ok { + return db // we've already wrapped this database with applicable constraints + } + + if compactor, ok := db.(CompactedByFactor); ok { + return &compactedByFactorConstrained{ + DB: &constrained{DB: db}, + fn: compactor.CompactByFactor, + } + } + + return &constrained{DB: db} +} + +type compactedByFactorConstrained struct { + DB + fn func(context.Context, float64) error +} + +func (c *compactedByFactorConstrained) CompactByFactor(ctx context.Context, factor float64) error { + return c.fn(ctx, factor) +} + +type constrained struct { + DB +} + +func (*constrained) constrained() {} + +func (c *constrained) Close(ctx context.Context) error { + return c.DB.Close(ctx) +} + +func (c *constrained) CreateBucket(ctx context.Context, bucket []byte) (err error) { + if err = validateBucket(bucket); err == nil { + err = c.DB.CreateBucket(ctx, bucket) + } + + return +} + +func (c *constrained) DeleteBucket(ctx context.Context, bucket []byte) (err error) { + if err = validateBucket(bucket); err == nil { + err = c.DB.DeleteBucket(ctx, bucket) + } + + return +} + +func (c *constrained) Get(ctx context.Context, bucket, key []byte) (value []byte, err error) { + if err = validateID(bucket, key); err == nil { + value, err = c.DB.Get(ctx, bucket, key) + } + + return +} + +func (c *constrained) Put(ctx context.Context, bucket, key, value []byte) (err error) { + if err = validateRecord(bucket, key, value); err == nil { + err = c.DB.Put(ctx, bucket, key, value) + } + + return +} + +func (c *constrained) PutMany(ctx context.Context, records ...Record) (err error) { + if err = validateRecords(records...); err == nil { + err = c.DB.PutMany(ctx, records...) + } + + return +} + +func (c *constrained) Delete(ctx context.Context, bucket, key []byte) (err error) { + if err = validateID(bucket, key); err == nil { + err = c.DB.Delete(ctx, bucket, key) + } + + return +} + +func (c *constrained) CompareAndSwap(ctx context.Context, bucket, key, oldValue, newValue []byte) (err error) { + if err = validateRecord(bucket, key, oldValue); err == nil { + if err = validateValue(newValue); err == nil { + err = c.DB.CompareAndSwap(ctx, bucket, key, oldValue, newValue) + } + } + + return +} + +func (c *constrained) List(ctx context.Context, bucket []byte) (records []Record, err error) { + if err = validateBucket(bucket); err == nil { + records, err = c.DB.List(ctx, bucket) + } + + return +} + +func (c *constrained) View(ctx context.Context, fn func(Viewer) error) error { + return c.DB.View(ctx, func(v Viewer) error { + return fn(&constrainedViewer{v}) + }) +} + +func (c *constrained) Mutate(ctx context.Context, fn func(Mutator) error) error { + return c.DB.Mutate(ctx, func(m Mutator) error { + return fn(&constrainedMutator{mut: m}) + }) +} + +type constrainedViewer struct { + v Viewer +} + +func (cv *constrainedViewer) Get(ctx context.Context, bucket, key []byte) (value []byte, err error) { + if err = validateID(bucket, key); err == nil { + value, err = cv.v.Get(ctx, bucket, key) + } + + return +} + +func (cv *constrainedViewer) List(ctx context.Context, bucket []byte) (records []Record, err error) { + if err = validateBucket(bucket); err == nil { + records, err = cv.v.List(ctx, bucket) + } + + return +} + +type constrainedMutator struct { + mut Mutator +} + +func (cm *constrainedMutator) Get(ctx context.Context, bucket, key []byte) (value []byte, err error) { + if err = validateID(bucket, key); err == nil { + value, err = cm.mut.Get(ctx, bucket, key) + } + + return +} + +func (cm *constrainedMutator) List(ctx context.Context, bucket []byte) (records []Record, err error) { + if err = validateBucket(bucket); err == nil { + records, err = cm.mut.List(ctx, bucket) + } + + return +} + +func (cm *constrainedMutator) CompareAndSwap(ctx context.Context, bucket, key, oldValue, newValue []byte) (err error) { + if err = validateRecord(bucket, key, oldValue); err == nil { + if err = validateValue(newValue); err == nil { + err = cm.mut.CompareAndSwap(ctx, bucket, key, oldValue, newValue) + } + } + + return +} + +func (cm *constrainedMutator) Put(ctx context.Context, bucket, key, value []byte) (err error) { + if err = validateRecord(bucket, key, value); err == nil { + err = cm.mut.Put(ctx, bucket, key, value) + } + + return +} + +func (cm *constrainedMutator) PutMany(ctx context.Context, records ...Record) (err error) { + if err = validateRecords(records...); err == nil { + err = cm.mut.PutMany(ctx, records...) + } + + return +} + +func (cm *constrainedMutator) Delete(ctx context.Context, bucket, key []byte) (err error) { + if err = validateID(bucket, key); err == nil { + err = cm.mut.Delete(ctx, bucket, key) + } + + return +} + +func validateBucket(bucket []byte) error { + if bucket == nil { + return ErrNilBucket + } + + if l := len(bucket); l == 0 { + return ErrEmptyBucket + } else if l > MaxBucketSize { + return ErrBucketTooLong + } + + // MySQL does not allow for invalid UTF-8 and both postgres & mysql do not allow zeroes even in + // quoted identifiers. + // + // MySQL also does not allow identifiers names to end with space characters, and contain UTF-8 + // supplementary runes (U+10000 and above). + // + // Ref: https://dev.mysql.com/doc/refman/8.3/en/identifiers.html + + for { + r, s := utf8.DecodeRune(bucket) + switch { + case r == utf8.RuneError && s == 1: + return ErrInvalidBucket // contains invalid UTF-8 sequences + case r < 1, r > '\U0000FFFF': + return ErrInvalidBucket // contains runes outside the defined range + } + + bucket = bucket[s:] + if len(bucket) == 0 { + if unicode.IsSpace(r) { + return ErrInvalidBucket // ends with a space character + } + + return nil + } + } +} + +func validateKey(key []byte) error { + if key == nil { + return ErrNilKey + } + + if l := len(key); l == 0 { + return ErrEmptyKey + } else if l > MaxKeySize { + return ErrKeyTooLong + } + + return nil +} + +func validateID(bucket, key []byte) (err error) { + if err = validateBucket(bucket); err == nil { + err = validateKey(key) + } + return +} + +func validateValue(value []byte) error { + if value == nil { + return ErrNilValue + } + if l := len(value); l > MaxValueSize { + return ErrValueTooLong + } + return nil +} + +func validateRecord(bucket, key, value []byte) (err error) { + if err = validateID(bucket, key); err == nil { + err = validateValue(value) + } + return +} + +func validateRecords(records ...Record) (err error) { + for _, r := range records { + if err = validateRecord(r.Bucket, r.Key, r.Value); err != nil { + break + } + } + return +} diff --git a/database/database.go b/database/database.go deleted file mode 100644 index abdb5ce..0000000 --- a/database/database.go +++ /dev/null @@ -1,233 +0,0 @@ -package database - -import ( - "fmt" - - "errors" -) - -var ( - // ErrNotFound is the type returned on DB implementations if an item does not - // exist. - ErrNotFound = errors.New("not found") - // ErrOpNotSupported is the type returned on DB implementations if an operation - // is not supported. - ErrOpNotSupported = errors.New("operation not supported") -) - -// IsErrNotFound returns true if the cause of the given error is ErrNotFound. -func IsErrNotFound(err error) bool { - return errors.Is(err, ErrNotFound) -} - -// IsErrOpNotSupported returns true if the cause of the given error is ErrOpNotSupported. -func IsErrOpNotSupported(err error) bool { - return errors.Is(err, ErrOpNotSupported) -} - -// Options are configuration options for the database. -type Options struct { - Database string - ValueDir string - BadgerFileLoadingMode string -} - -// Option is the modifier type over Options. -type Option func(o *Options) error - -// WithValueDir is a modifier that sets the ValueDir attribute of Options. -func WithValueDir(path string) Option { - return func(o *Options) error { - o.ValueDir = path - return nil - } -} - -// WithDatabase is a modifier that sets the Database attribute of Options. -func WithDatabase(db string) Option { - return func(o *Options) error { - o.Database = db - return nil - } -} - -// WithBadgerFileLoadingMode is a modifier that sets the ValueLogLoadingMode -// of Badger db. -func WithBadgerFileLoadingMode(mode string) Option { - return func(o *Options) error { - o.BadgerFileLoadingMode = mode - return nil - } -} - -// DB is a interface to be implemented by the databases. -type DB interface { - // Open opens the database available with the given options. - Open(dataSourceName string, opt ...Option) error - // Close closes the current database. - Close() error - // Get returns the value stored in the given table/bucket and key. - Get(bucket, key []byte) (ret []byte, err error) - // Set sets the given value in the given table/bucket and key. - Set(bucket, key, value []byte) error - // CmpAndSwap swaps the value at the given bucket and key if the current - // value is equivalent to the oldValue input. Returns 'true' if the - // swap was successful and 'false' otherwise. - CmpAndSwap(bucket, key, oldValue, newValue []byte) ([]byte, bool, error) - // Del deletes the data in the given table/bucket and key. - Del(bucket, key []byte) error - // List returns a list of all the entries in a given table/bucket. - List(bucket []byte) ([]*Entry, error) - // Update performs a transaction with multiple read-write commands. - Update(tx *Tx) error - // CreateTable creates a table or a bucket in the database. - CreateTable(bucket []byte) error - // DeleteTable deletes a table or a bucket in the database. - DeleteTable(bucket []byte) error -} - -// Badger FileLoadingMode constants. -const ( - BadgerMemoryMap = "mmap" - BadgerFileIO = "fileio" -) - -// TxCmd is the type used to represent database command and operations. -type TxCmd int - -const ( - // CreateTable on a TxEntry will represent the creation of a table or - // bucket on the database. - CreateTable TxCmd = iota - // DeleteTable on a TxEntry will represent the deletion of a table or - // bucket on the database. - DeleteTable - // Get on a TxEntry will represent a command to retrieve data from the - // database. - Get - // Set on a TxEntry will represent a command to write data on the - // database. - Set - // Delete on a TxEntry represent a command to delete data on the database. - Delete - // CmpAndSwap on a TxEntry will represent a compare and swap operation on - // the database. It will compare the value read and change it if it's - // different. The TxEntry will contain the value read. - CmpAndSwap - // CmpOrRollback on a TxEntry will represent a read transaction that will - // compare the values will the ones passed, and if they don't match the - // transaction will fail - CmpOrRollback -) - -// String implements the fmt.Stringer interface on TxCmd. -func (o TxCmd) String() string { - switch o { - case CreateTable: - return "create-table" - case DeleteTable: - return "delete-table" - case Get: - return "read" - case Set: - return "write" - case Delete: - return "delete" - case CmpAndSwap: - return "compare-and-swap" - case CmpOrRollback: - return "compare-and-rollback" - default: - return fmt.Sprintf("unknown(%d)", o) - } -} - -// Tx represents a transaction and it's list of multiple TxEntry. Each TxEntry -// represents a read or write operation on the database. -type Tx struct { - Operations []*TxEntry -} - -// CreateTable adds a new create query to the transaction. -func (tx *Tx) CreateTable(bucket []byte) { - tx.Operations = append(tx.Operations, &TxEntry{ - Bucket: bucket, - Cmd: CreateTable, - }) -} - -// DeleteTable adds a new create query to the transaction. -func (tx *Tx) DeleteTable(bucket []byte) { - tx.Operations = append(tx.Operations, &TxEntry{ - Bucket: bucket, - Cmd: DeleteTable, - }) -} - -// Get adds a new read query to the transaction. -func (tx *Tx) Get(bucket, key []byte) { - tx.Operations = append(tx.Operations, &TxEntry{ - Bucket: bucket, - Key: key, - Cmd: Get, - }) -} - -// Set adds a new write query to the transaction. -func (tx *Tx) Set(bucket, key, value []byte) { - tx.Operations = append(tx.Operations, &TxEntry{ - Bucket: bucket, - Key: key, - Value: value, - Cmd: Set, - }) -} - -// Del adds a new delete query to the transaction. -func (tx *Tx) Del(bucket, key []byte) { - tx.Operations = append(tx.Operations, &TxEntry{ - Bucket: bucket, - Key: key, - Cmd: Delete, - }) -} - -// Cas adds a new compare-and-swap query to the transaction. -func (tx *Tx) Cas(bucket, key, value []byte) { - tx.Operations = append(tx.Operations, &TxEntry{ - Bucket: bucket, - Key: key, - Value: value, - Cmd: CmpAndSwap, - }) -} - -// Cmp adds a new compare-or-rollback query to the transaction. -func (tx *Tx) Cmp(bucket, key, value []byte) { - tx.Operations = append(tx.Operations, &TxEntry{ - Bucket: bucket, - Key: key, - Value: value, - Cmd: CmpOrRollback, - }) -} - -// TxEntry is the base elements for the transactions, a TxEntry is a read or -// write operation on the database. -type TxEntry struct { - Bucket []byte - Key []byte - Value []byte - CmpValue []byte - // Where the result of Get or CmpAndSwap txns is stored. - Result []byte - Cmd TxCmd - Swapped bool -} - -// Entry is the return value for list commands. -type Entry struct { - Bucket []byte - Key []byte - Value []byte -} diff --git a/database/notsupported.go b/database/notsupported.go deleted file mode 100644 index 3a6d835..0000000 --- a/database/notsupported.go +++ /dev/null @@ -1,46 +0,0 @@ -//nolint:revive // ignore mocked methods for unsupported DB type -package database - -// NotSupportedDB is a db implementation used on database drivers when the -// no tags are used. -type NotSupportedDB struct{} - -func (*NotSupportedDB) Open(dataSourceName string, opt ...Option) error { - return ErrOpNotSupported -} - -func (*NotSupportedDB) Close() error { - return ErrOpNotSupported -} - -func (*NotSupportedDB) Get(bucket, key []byte) (ret []byte, err error) { - return nil, ErrOpNotSupported -} - -func (*NotSupportedDB) Set(bucket, key, value []byte) error { - return ErrOpNotSupported -} - -func (*NotSupportedDB) CmpAndSwap(bucket, key, oldValue, newValue []byte) ([]byte, bool, error) { - return nil, false, ErrOpNotSupported -} - -func (*NotSupportedDB) Del(bucket, key []byte) error { - return ErrOpNotSupported -} - -func (*NotSupportedDB) List(bucket []byte) ([]*Entry, error) { - return nil, ErrOpNotSupported -} - -func (*NotSupportedDB) Update(tx *Tx) error { - return ErrOpNotSupported -} - -func (*NotSupportedDB) CreateTable(bucket []byte) error { - return ErrOpNotSupported -} - -func (*NotSupportedDB) DeleteTable(bucket []byte) error { - return ErrOpNotSupported -} diff --git a/dbtest/dbtest.go b/dbtest/dbtest.go new file mode 100644 index 0000000..b744ff9 --- /dev/null +++ b/dbtest/dbtest.go @@ -0,0 +1,95 @@ +// Package dbtest implements a test suite for [nosql.DB] implementations. +package dbtest + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/smallstep/nosql" +) + +// Test tests the provided [nosql.DB]. +// +// The provided [nosql.DB] will be closed before Run returns. +// +// The given races flag denotes whether the given [nosql.DB] implementation should be tested +// for race-y conditions. +func Test(t *testing.T, db nosql.DB) { + t.Helper() + + t.Cleanup(func() { + assert.NoError(t, db.Close(newContext(t))) + }) + + s := &suite{ + db: db, + generated: map[string]map[string][]byte{}, + } + + tests := map[string]func(*testing.T){ + "CreateBucketValidations": s.testCreateBucketValidations, + "CreateBucket": s.testCreateBucket, + "DeleteBucketValidations": s.testDeleteBucketValidations, + "DeleteBucket": s.testDeleteBucket, + + "GetValidations": s.testGetValidations, + "Get": s.testGet, + "ViewerGetValidations": s.testViewerGetValidations, + "ViewerGet": s.testViewerGet, + "MutatorGetValidations": s.testMutatorGetValidations, + "MutatorGet": s.testMutatorGet, + + "DeleteConstraints": s.testDeleteValidations, + "Delete": s.testDelete, + "MutatorDeleteConstraints": s.testMutatorDeleteValidations, + "MutatorDelete": s.testMutatorDelete, + + "PutValidations": s.testPutValidations, + "Put": s.testPut, + "MutatorPutValidations": s.testMutatorPutValidations, + "MutatorPut": s.testMutatorPut, + + "CompareAndSwapValidations": s.testCompareAndSwapValidations, + "MutatorCompareAndSwapValidations": s.testMutatorCompareAndSwapValidations, + "CompareAndSwap": s.testCompareAndSwap, + "MutatorCompareAndSwap": s.testMutatorCompareAndSwap, + + "PutManyValidations": s.testPutManyValidations, + "MutatorPutManyValidations": s.testMutatorPutManyValidations, + "PutMany": s.testPutMany, + "MutatorPutMany": s.testMutatorPutMany, + "PutManyError": s.testPutManyError, + "MutatorPutManyError": s.testMutatorPutManyError, + + "ListValidations": s.testListValidations, + "List": s.testList, + "ViewerListValidations": s.testViewerListValidations, + "ViewerList": s.testViewerList, + "MutatorListValidations": s.testMutatorListValidations, + "MutatorList": s.testMutatorList, + + "CompoundViewer": s.testCompoundViewer, + "CompoundMutator": s.testCompoundMutator, + } + + for name, test := range tests { + t.Run(name, test) + } +} + +func newContext(t *testing.T) (ctx context.Context) { + t.Helper() + + var cancel context.CancelFunc + if dl, ok := t.Deadline(); ok { + ctx, cancel = context.WithDeadline(context.Background(), dl) + } else { + ctx, cancel = context.WithTimeout(context.Background(), 30*time.Second) + } + t.Cleanup(cancel) + + return +} diff --git a/dbtest/suite.go b/dbtest/suite.go new file mode 100644 index 0000000..a5dd50c --- /dev/null +++ b/dbtest/suite.go @@ -0,0 +1,1391 @@ +package dbtest + +import ( + "bytes" + "context" + "crypto/rand" + "errors" + mand "math/rand" + "slices" + "strconv" + "sync" + "testing" + "unicode" + "unicode/utf8" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/smallstep/nosql" + "github.com/smallstep/nosql/internal/token" +) + +// suite implements state management for the test suite. +type suite struct { + db nosql.DB + + generatedMu sync.Mutex // protects generated + generated map[string]map[string][]byte // bucket -> key -> value +} + +func (s *suite) testCreateBucketValidations(t *testing.T) { + t.Parallel() + + s.assertBucketError(t, func(t *testing.T, bucket []byte) error { + return s.db.CreateBucket(newContext(t), bucket) + }) +} + +func (s *suite) testCreateBucket(t *testing.T) { + t.Parallel() + + cases := []struct { + bucket []byte // input bucket + err error // expected error + }{ + 0: {s.existingBucket(t), nil}, + 1: {s.newBucket(t), nil}, + } + + for caseIndex := range cases { + kase := cases[caseIndex] + + t.Run(strconv.Itoa(caseIndex), func(t *testing.T) { + t.Parallel() + + if err := s.db.CreateBucket(newContext(t), kase.bucket); kase.err == nil { + assert.NoError(t, err) + } else { + assert.ErrorIs(t, kase.err, err) + } + }) + } +} + +func (s *suite) testDeleteBucketValidations(t *testing.T) { + t.Parallel() + + s.assertBucketError(t, func(t *testing.T, bucket []byte) error { + return s.db.DeleteBucket(newContext(t), bucket) + }) +} + +func (s *suite) testDeleteBucket(t *testing.T) { + t.Parallel() + + // create a bucket with a few records + nonEmpty := s.existingBucket(t) + var keys [][]byte + for i := 0; i < 5+mand.Intn(5); i++ { //nolint:gosec // not a sensitive op + key, _ := s.existingKey(t, nonEmpty) + keys = append(keys, key) + } + + cases := []struct { + bucket []byte + keys [][]byte + err error + }{ + 0: {s.newBucket(t), nil, nosql.ErrBucketNotFound}, + 1: {s.existingBucket(t), nil, nil}, + 2: {nonEmpty, keys, nil}, + } + + for caseIndex := range cases { + kase := cases[caseIndex] + + t.Run(strconv.Itoa(caseIndex), func(t *testing.T) { + t.Parallel() + + ctx := newContext(t) + + if err := s.db.DeleteBucket(ctx, kase.bucket); kase.err != nil { + assert.ErrorIs(t, err, kase.err) + + return + } else if !assert.NoError(t, err) { + return + } + + // deleting a deleted bucket should yield [ErrBucketNotFound] + require.ErrorIs(t, s.db.DeleteBucket(ctx, kase.bucket), nosql.ErrBucketNotFound) + + // bucket should have deleted all of its keys as well when booted + for _, key := range kase.keys { + assert.ErrorIs(t, s.db.Delete(ctx, kase.bucket, key), nosql.ErrBucketNotFound) + } + }) + } +} + +func (s *suite) testGetValidations(t *testing.T) { + t.Parallel() + + s.testGetFuncValidations(t, s.db.Get) +} + +func (s *suite) testViewerGetValidations(t *testing.T) { + t.Parallel() + + s.testGetFuncValidations(t, func(ctx context.Context, bucket, key []byte) (value []byte, err error) { + err = s.db.View(ctx, func(v nosql.Viewer) (err error) { + value, err = v.Get(ctx, bucket, key) + + return + }) + + return + }) +} + +func (s *suite) testMutatorGetValidations(t *testing.T) { + t.Parallel() + + s.testGetFuncValidations(t, func(ctx context.Context, bucket, key []byte) (value []byte, err error) { + err = s.db.Mutate(ctx, func(m nosql.Mutator) (err error) { + value, err = m.Get(ctx, bucket, key) + + return + }) + + return + }) +} + +type getFunc func(ctx context.Context, bucket, key []byte) ([]byte, error) + +func (s *suite) testGetFuncValidations(t *testing.T, fn getFunc) { + t.Helper() + + s.assertIDError(t, func(t *testing.T, bucket, key []byte) (err error) { + _, err = fn(newContext(t), bucket, key) + return + }) +} + +func (s *suite) testGet(t *testing.T) { + t.Parallel() + + s.testGetFunc(t, s.db.Get) +} + +func (s *suite) testViewerGet(t *testing.T) { + t.Parallel() + + s.testGetFunc(t, func(ctx context.Context, bucket, key []byte) (value []byte, err error) { + err = s.db.View(ctx, func(v nosql.Viewer) (err error) { + value, err = v.Get(ctx, bucket, key) + + return + }) + return + }) +} + +func (s *suite) testMutatorGet(t *testing.T) { + t.Parallel() + + s.testGetFunc(t, func(ctx context.Context, bucket, key []byte) (value []byte, err error) { + err = s.db.Mutate(ctx, func(m nosql.Mutator) (err error) { + value, err = m.Get(ctx, bucket, key) + + return + }) + return + }) +} + +func (s *suite) testGetFunc(t *testing.T, get getFunc) { + t.Helper() + + cases := []func() ([]byte, []byte, []byte, error){ + 0: func() (bucket, key, value []byte, err error) { + // when the bucket does not exist, we expect ErrBucketNotFound + bucket = s.newBucket(t) + key = s.anyKey(t) + + err = nosql.ErrBucketNotFound + + return + }, + 1: func() (bucket, key, value []byte, err error) { + // when the bucket exists but the key does not, we expect nosql.ErrKeyNotFound + bucket = s.existingBucket(t) + key = s.anyKey(t) + + err = nosql.ErrKeyNotFound + + return + }, + 2: func() (bucket, key, value []byte, err error) { + // when the key exists in the bucket, we expect the value the key points to + bucket = s.existingBucket(t) + key, value = s.existingKey(t, bucket) + + return + }, + } + + for caseIndex := range cases { + setup := cases[caseIndex] + + t.Run(strconv.Itoa(caseIndex), func(t *testing.T) { + t.Parallel() + + var ( + ctx = newContext(t) + bucket, key, expValue, expError = setup() + ) + + got, err := get(ctx, bucket, key) + if expError != nil { + assert.Same(t, expError, err) + assert.Nil(t, got) + + return + } + + // ensure we retrieved what we expected to retrieve + assert.NoError(t, err) + assert.Equal(t, expValue, got) + }) + } +} + +func (s *suite) testDeleteValidations(t *testing.T) { + t.Parallel() + + s.testDeleteFuncValidations(t, s.db.Delete) +} + +func (s *suite) testMutatorDeleteValidations(t *testing.T) { + t.Parallel() + + s.testDeleteFuncValidations(t, func(ctx context.Context, bucket, key []byte) error { + return s.db.Mutate(ctx, func(m nosql.Mutator) error { + return m.Delete(ctx, bucket, key) + }) + }) +} + +func (s *suite) testDeleteFuncValidations(t *testing.T, del deleteFunc) { + t.Helper() + + s.assertIDError(t, func(t *testing.T, bucket, key []byte) (err error) { + return del(newContext(t), bucket, key) + }) +} + +func (s *suite) testDelete(t *testing.T) { + t.Parallel() + + s.testDeleteFunc(t, s.db.Delete) +} + +func (s *suite) testMutatorDelete(t *testing.T) { + t.Parallel() + + s.testDeleteFunc(t, func(ctx context.Context, bucket, key []byte) error { + return s.db.Mutate(ctx, func(m nosql.Mutator) error { + return m.Delete(ctx, bucket, key) + }) + }) +} + +type deleteFunc func(ctx context.Context, bucket, key []byte) error + +func (s *suite) testDeleteFunc(t *testing.T, del deleteFunc) { + t.Helper() + + cases := []func() (bucket, key []byte, err error){ + 0: func() (bucket, key []byte, err error) { + // when the bucket does not exist, we expect ErrBucketNotFound + bucket = s.newBucket(t) + key = s.anyKey(t) + + err = nosql.ErrBucketNotFound + + return + }, + 1: func() (bucket, key []byte, err error) { + // when the bucket exists but the key does not, we expect no error + bucket = s.existingBucket(t) + key = s.anyKey(t) + + return + }, + 2: func() (bucket, key []byte, err error) { + // when the key exists, we expect no error + bucket = s.existingBucket(t) + key, _ = s.existingKey(t, bucket) + + return + }, + } + + for caseIndex := range cases { + setup := cases[caseIndex] + + t.Run(strconv.Itoa(caseIndex), func(t *testing.T) { + t.Parallel() + + var ( + ctx = newContext(t) + bucket, key, exp = setup() + ) + + err := del(ctx, bucket, key) + if exp != nil { + assert.Same(t, exp, err) + + return + } + + assert.NoError(t, err) + + // ensure the key is not there any more + _, err = s.db.Get(ctx, bucket, key) + assert.Same(t, nosql.ErrKeyNotFound, err) + }) + } +} + +func (s *suite) testPutValidations(t *testing.T) { + t.Parallel() + + s.testPutFuncValidations(t, s.db.Put) +} + +func (s *suite) testMutatorPutValidations(t *testing.T) { + t.Parallel() + + s.testPutFuncValidations(t, func(ctx context.Context, bucket, key, value []byte) error { + return s.db.Mutate(ctx, func(m nosql.Mutator) error { + return m.Put(ctx, bucket, key, value) + }) + }) +} + +func (s *suite) testPutFuncValidations(t *testing.T, put putFunc) { + t.Helper() + + s.assertRecordError(t, func(t *testing.T, bucket, key, value []byte) error { + return put(newContext(t), bucket, key, value) + }) +} + +type putFunc func(ctx context.Context, bucket, key, value []byte) error + +func (s *suite) testPut(t *testing.T) { + t.Parallel() + + s.testPutFunc(t, s.db.Put) +} + +func (s *suite) testMutatorPut(t *testing.T) { + t.Parallel() + + s.testPutFunc(t, func(ctx context.Context, bucket, key, value []byte) error { + return s.db.Mutate(ctx, func(m nosql.Mutator) error { + return m.Put(ctx, bucket, key, value) + }) + }) +} + +func (s *suite) testPutFunc(t *testing.T, put putFunc) { + t.Helper() + + cases := []func() ([]byte, []byte, []byte, error){ + 0: func() (bucket, key, value []byte, err error) { + // when the bucket does not exist, we expect [nosql.ErrBucketNotFound] + bucket = s.newBucket(t) + key = s.anyKey(t) + value = s.anyValue(t) + + err = nosql.ErrBucketNotFound + + return + }, + 1: func() (bucket, key, value []byte, err error) { + // when the bucket exists but the key does not, we expect the key to be created + bucket = s.existingBucket(t) + key = s.newKey(t, bucket) + value = s.anyValue(t) + + return + }, + 2: func() (bucket, key, value []byte, err error) { + // when the key exists in the bucket, and the new value equals the old one, we expect + // a noop + bucket = s.existingBucket(t) + key, value = s.existingKey(t, bucket) + + return + }, + 3: func() (bucket, key, value []byte, err error) { + // when the key exists in the bucket, and the new value differs from the old one, we + // expect the value to be ovewritten + bucket = s.existingBucket(t) + key, value = s.existingKey(t, bucket) + value = s.differentValue(t, value) + + return + }, + } + + for caseIndex := range cases { + setup := cases[caseIndex] + + t.Run(strconv.Itoa(caseIndex), func(t *testing.T) { + t.Parallel() + + var ( + ctx = newContext(t) + bucket, key, value, exp = setup() + ) + + if err := put(ctx, bucket, key, value); exp != nil { + assert.ErrorIs(t, exp, err) + + return + } else if !assert.NoError(t, err) { + return + } + + // ensure we can now read what we wrote + if v, err := s.db.Get(ctx, bucket, key); assert.NoError(t, err) { + assert.Equal(t, value, v) + } + }) + } +} + +func (s *suite) testCompareAndSwapValidations(t *testing.T) { + t.Parallel() + + s.testCompareAndSwapFuncValidations(t, s.db.CompareAndSwap) +} + +func (s *suite) testMutatorCompareAndSwapValidations(t *testing.T) { + t.Parallel() + + s.testCompareAndSwapFuncValidations(t, func(ctx context.Context, bucket, key, oldValue, newValue []byte) error { + return s.db.Mutate(ctx, func(m nosql.Mutator) error { + return m.CompareAndSwap(ctx, bucket, key, oldValue, newValue) + }) + }) +} + +func (s *suite) testCompareAndSwapFuncValidations(t *testing.T, cas compareAndSwapFunc) { + t.Helper() + + cases := []struct { + bucket []byte + key []byte + oldValue []byte + newValue []byte + }{ + 0: {nil, s.anyKey(t), s.anyValue(t), s.anyValue(t)}, + 1: {[]byte{}, s.anyKey(t), s.anyValue(t), s.anyValue(t)}, + 2: {s.longBucket(t), s.anyKey(t), s.anyValue(t), s.anyValue(t)}, + 3: {s.invalidBucket(t), s.anyKey(t), s.anyValue(t), s.anyValue(t)}, + + 4: {s.newBucket(t), nil, s.anyValue(t), s.anyValue(t)}, + 5: {s.newBucket(t), []byte{}, s.anyValue(t), s.anyValue(t)}, + 6: {s.newBucket(t), s.longKey(t), s.anyValue(t), s.anyValue(t)}, + + 7: {s.newBucket(t), s.anyKey(t), nil, s.anyValue(t)}, + 8: {s.newBucket(t), s.anyKey(t), s.longValue(t), s.anyValue(t)}, + + 9: {s.newBucket(t), s.anyKey(t), s.anyValue(t), nil}, + 10: {s.newBucket(t), s.anyKey(t), s.anyValue(t), s.longValue(t)}, + } + + for caseIndex := range cases { + kase := cases[caseIndex] + + t.Run(strconv.Itoa(caseIndex), func(t *testing.T) { + t.Parallel() + + exp := s.recordError(kase.bucket, kase.key, kase.oldValue) + if exp == nil { + exp = s.valueError(kase.newValue) + } + err := cas(newContext(t), kase.bucket, kase.key, kase.newValue, kase.oldValue) + + assert.ErrorIs(t, err, exp) + }) + } +} + +type compareAndSwapFunc func(ctx context.Context, bucket, key, oldValue, newValue []byte) error + +func (s *suite) testCompareAndSwap(t *testing.T) { + t.Parallel() + + s.testCompareAndSwapFunc(t, s.db.CompareAndSwap) +} + +func (s *suite) testMutatorCompareAndSwap(t *testing.T) { + t.Parallel() + + s.testCompareAndSwapFunc(t, func(ctx context.Context, bucket, key, oldValue, newValue []byte) error { + return s.db.Mutate(ctx, func(m nosql.Mutator) error { + return m.CompareAndSwap(ctx, bucket, key, oldValue, newValue) + }) + }) +} + +func (s *suite) testCompareAndSwapFunc(t *testing.T, cas compareAndSwapFunc) { + t.Helper() + + cases := []func() (bucket, key, oldValue, newValue, foundValue []byte, err error){ + 0: func() (bucket, key, oldValue, newValue, _ []byte, err error) { + // when the bucket does not exist, we expect ErrBucketNotFound + bucket = s.newBucket(t) + key = s.anyKey(t) + oldValue = s.anyValue(t) + newValue = s.anyValue(t) + + err = nosql.ErrBucketNotFound + + return + }, + 1: func() (bucket, key, oldValue, newValue, _ []byte, err error) { + // when the key does not exist, we expect ErrKeyNotFound + bucket = s.existingBucket(t) + key = s.anyKey(t) + oldValue = s.anyValue(t) + newValue = s.anyValue(t) + + err = nosql.ErrKeyNotFound + + return + }, + 2: func() (bucket, key, oldValue, newValue, cmpValue []byte, _ error) { + // when current != old, we expect a [ComparisonError] + bucket = s.existingBucket(t) + key, cmpValue = s.existingKey(t, bucket) + + oldValue = s.differentValue(t, cmpValue) + newValue = s.anyValue(t) + + return + }, + 3: func() (bucket, key, oldValue, newValue, cmpValue []byte, _ error) { + // when current != old AND current == newValue, we expect a [ComparisonError] + bucket = s.existingBucket(t) + key, cmpValue = s.existingKey(t, bucket) + + oldValue = s.differentValue(t, cmpValue) + newValue = slices.Clone(cmpValue) + + return + }, + 4: func() (bucket, key, oldValue, newValue, _ []byte, _ error) { + // when current == old AND old == new, we expect no error + bucket = s.existingBucket(t) + key, oldValue = s.existingKey(t, bucket) + newValue = slices.Clone(oldValue) + + return + }, + 5: func() (bucket, key, oldValue, newValue, _ []byte, _ error) { + // when current == old AND old != new, we expect no error + bucket = s.existingBucket(t) + key, oldValue = s.existingKey(t, bucket) + newValue = s.differentValue(t, oldValue) + + return + }, + } + + for caseIndex := range cases { + setup := cases[caseIndex] + + t.Run(strconv.Itoa(caseIndex), func(t *testing.T) { + t.Parallel() + + var ( + ctx = newContext(t) + bucket, key, oldValue, newValue, cmpValue, expErr = setup() + ) + + switch err := cas(ctx, bucket, key, oldValue, newValue); { + case expErr == nil && cmpValue == nil: + if !assert.NoError(t, err) { + return + } + + // ensure the change actually happened + if got, err := s.db.Get(ctx, bucket, key); assert.NoError(t, err) { + assert.Equal(t, newValue, got) + } + case cmpValue != nil && expErr == nil: + var ce *nosql.ComparisonError + require.ErrorAs(t, err, &ce) + + assert.Equal(t, cmpValue, ce.Value) + case expErr != nil && cmpValue == nil: + assert.ErrorIs(t, err, expErr) + default: + t.Fatal("invalid setup") + } + }) + } +} + +func (s *suite) testPutManyValidations(t *testing.T) { + t.Parallel() + + s.testPutManyFuncValidations(t, s.db.PutMany) +} + +func (s *suite) testMutatorPutManyValidations(t *testing.T) { + t.Parallel() + + s.testPutManyFuncValidations(t, func(ctx context.Context, records ...nosql.Record) error { + return s.db.Mutate(ctx, func(m nosql.Mutator) error { + return m.PutMany(ctx, records...) + }) + }) +} + +func (s *suite) testPutManyFuncValidations(t *testing.T, putMany putManyFunc) { + t.Helper() + + s.assertRecordsError(t, func(t *testing.T, records ...nosql.Record) error { + return putMany(newContext(t), records...) + }) +} + +type putManyFunc func(ctx context.Context, records ...nosql.Record) error + +func (s *suite) testPutMany(t *testing.T) { + t.Parallel() + + s.testPutManyFunc(t, s.db.PutMany) +} + +func (s *suite) testMutatorPutMany(t *testing.T) { + t.Parallel() + + s.testPutManyFunc(t, func(ctx context.Context, records ...nosql.Record) error { + return s.db.Mutate(ctx, func(m nosql.Mutator) error { + return m.PutMany(ctx, records...) + }) + }) +} + +func (s *suite) testPutManyFunc(t *testing.T, putMany putManyFunc) { + t.Helper() + + // we'll create a few records that already exist + exp := s.existingRecords(t, 2, 3, 3, 4) + existing := len(exp) + + // and a few that do not (but for buckets that do) + for i := 0; i < 5+mand.Intn(5); i++ { //nolint:gosec // not a sensitive op + bucket := exp[mand.Intn(existing)].Bucket //nolint:gosec // not a sensitive op + + exp = append(exp, nosql.Record{ + Bucket: bucket, + Key: s.newKey(t, bucket), + Value: s.anyValue(t), + }) + } + + // for some of the existing records, we'll indicate we want a new value + for i := 0; i < existing; i++ { + if mand.Intn(2) == 1 { //nolint:gosec // not a sensitive op + exp[i].Value = s.differentValue(t, exp[i].Value) + } + } + + // and, finally, we'll add a secondary update for one of each records + di := mand.Intn(len(exp)) //nolint:gosec // not a sensitive op + dup := exp[di].Clone() + dup.Value = s.differentValue(t, dup.Value) + exp = append(exp, dup) + + ctx := newContext(t) + + // then we'll put them in the database, ensuring we get no error + require.NoError(t, putMany(ctx, exp...)) + + // and retrieve them to compare what we read is what we expect + for i, r := range exp { + v, err := s.db.Get(ctx, r.Bucket, r.Key) + if assert.NoError(t, err) { + if i == di { + // for this record, we expect the value to equal the one we set last + assert.Equal(t, exp[len(exp)-1].Value, v) + } else { + assert.Equal(t, r.Value, v) + } + } + } +} + +func (s *suite) testPutManyError(t *testing.T) { + t.Parallel() + + s.testPutManyFuncError(t, s.db.PutMany) +} + +func (s *suite) testMutatorPutManyError(t *testing.T) { + t.Parallel() + + s.testPutManyFuncError(t, func(ctx context.Context, records ...nosql.Record) error { + return s.db.Mutate(ctx, func(m nosql.Mutator) error { + return m.PutMany(ctx, records...) + }) + }) +} + +func (s *suite) testPutManyFuncError(t *testing.T, putMany putManyFunc) { + t.Helper() + + var ( + exp = s.existingRecords(t, 2, 3, 2, 4) + src = make([]nosql.Record, 0, len(exp)) + ) + + // we'll instruct the database to change the values of about half of the records we created + // earlier + for _, r := range exp { + rr := r.Clone() + + if mand.Intn(2) == 1 { //nolint:gosec // not a sensitive op + rr.Value = s.differentValue(t, rr.Value) + } + src = append(src, rr) + } + + // we'll also ask the database to save the value of a key in a bucket that doesn't exist + src = append(src, nosql.Record{ + Bucket: s.newBucket(t), + Key: s.anyKey(t), + Value: s.anyValue(t), + }) + + ctx := newContext(t) + + // then we'll put them in the database and expect the command to fail + require.ErrorIs(t, putMany(ctx, src...), nosql.ErrBucketNotFound) + + // then we'll read back the original existing records and expect no changes to them + var got []nosql.Record + for _, r := range exp { + v, err := s.db.Get(ctx, r.Bucket, r.Key) + require.NoError(t, err) + + rr := r.Clone() + rr.Value = v + got = append(got, rr) + } + + assert.ElementsMatch(t, exp, got) +} + +func (s *suite) testListValidations(t *testing.T) { + t.Parallel() + + s.testListFuncValidations(t, s.db.List) +} + +func (s *suite) testViewerListValidations(t *testing.T) { + t.Parallel() + + s.testListFuncValidations(t, func(ctx context.Context, bucket []byte) (records []nosql.Record, err error) { + err = s.db.View(ctx, func(v nosql.Viewer) (err error) { + records, err = v.List(ctx, bucket) + + return + }) + + return + }) +} + +func (s *suite) testMutatorListValidations(t *testing.T) { + t.Parallel() + + s.testListFuncValidations(t, func(ctx context.Context, bucket []byte) (records []nosql.Record, err error) { + err = s.db.Mutate(ctx, func(m nosql.Mutator) (err error) { + records, err = m.List(ctx, bucket) + + return + }) + + return + }) +} + +type listFunc func(ctx context.Context, bucket []byte) ([]nosql.Record, error) + +func (s *suite) testListFuncValidations(t *testing.T, list listFunc) { + t.Helper() + + s.assertBucketError(t, func(t *testing.T, bucket []byte) error { + _, err := list(newContext(t), bucket) + return err + }) +} + +func (s *suite) testList(t *testing.T) { + t.Parallel() + + s.testListFunc(t, s.db.List) +} + +func (s *suite) testViewerList(t *testing.T) { + t.Parallel() + + s.testListFunc(t, func(ctx context.Context, bucket []byte) (records []nosql.Record, err error) { + err = s.db.View(ctx, func(v nosql.Viewer) (err error) { + records, err = v.List(ctx, bucket) + + return + }) + return + }) +} + +func (s *suite) testMutatorList(t *testing.T) { + t.Parallel() + + s.testListFunc(t, func(ctx context.Context, bucket []byte) (records []nosql.Record, err error) { + err = s.db.Mutate(ctx, func(m nosql.Mutator) (err error) { + records, err = m.List(ctx, bucket) + + return + }) + return + }) +} + +func (s *suite) testCompoundViewer(t *testing.T) { + t.Parallel() + + var ( + ctx = newContext(t) + + bucket = s.existingBucket(t) + key, expValue = s.existingKey(t, bucket) + ) + + var gotValue []byte + err := s.db.View(ctx, func(v nosql.Viewer) (err error) { + var ( + b1 = s.newBucket(t) // bucke that does not exist + k1 = s.anyKey(t) + ) + + if _, err = v.Get(ctx, b1, k1); !errors.Is(err, nosql.ErrBucketNotFound) { + panic(err) + } + + if _, err = v.List(ctx, b1); !errors.Is(err, nosql.ErrBucketNotFound) { + panic(err) + } + + gotValue, err = v.Get(ctx, bucket, key) + + return + }) + require.NoError(t, err) + require.Equal(t, expValue, gotValue) +} + +func (s *suite) testCompoundMutator(t *testing.T) { + t.Parallel() + + var ( + ctx = newContext(t) + + bucket = s.existingBucket(t) + key = s.newKey(t, bucket) + value = s.anyValue(t) + expValue = s.differentValue(t, value) + ) + + err := s.db.Mutate(ctx, func(m nosql.Mutator) error { + var ( + b1 = s.newBucket(t) // bucke that does not exist + k1 = s.anyKey(t) + v1 = s.anyValue(t) + ) + if err := m.Put(ctx, b1, k1, v1); !errors.Is(err, nosql.ErrBucketNotFound) { + panic(err) + } + + if _, err := m.Get(ctx, b1, k1); !errors.Is(err, nosql.ErrBucketNotFound) { + panic(err) + } + + if err := m.CompareAndSwap(ctx, b1, k1, v1, s.differentValue(t, v1)); !errors.Is(err, nosql.ErrBucketNotFound) { + panic(err) + } + + if _, err := m.List(ctx, b1); !errors.Is(err, nosql.ErrBucketNotFound) { + panic(err) + } + + if err := m.Delete(ctx, b1, k1); !errors.Is(err, nosql.ErrBucketNotFound) { + panic(err) + } + + // the above errors shouldn't stop the transaction from going through + return m.PutMany(ctx, + nosql.Record{Bucket: bucket, Key: key, Value: value}, + nosql.Record{Bucket: bucket, Key: key, Value: expValue}, + ) + }) + require.NoError(t, err) + + got, err := s.db.Get(ctx, bucket, key) + require.NoError(t, err) + require.Equal(t, expValue, got) +} + +func (s *suite) testListFunc(t *testing.T, list listFunc) { + t.Helper() + + var ( + exp = s.existingRecords(t, 2, 3, 2, 3) + buckets = map[string][]nosql.Record{} + ) + for _, r := range exp { + bucket := string(r.Bucket) + + buckets[bucket] = append(buckets[bucket], r) + } + + for _, bucket := range buckets { + slices.SortFunc(bucket, compareRecords) + } + buckets[string(s.newBucket(t))] = nil // a bucket that doesn't exist + + ctx := newContext(t) + + for bucket, exp := range buckets { + got, err := list(ctx, []byte(bucket)) + if exp == nil { + assert.ErrorIs(t, err, nosql.ErrBucketNotFound) + assert.Nil(t, got) + + return + } + + require.NoError(t, err) + require.Len(t, got, len(exp)) + require.Equal(t, exp, got) + } +} + +func (s *suite) existingRecords(t *testing.T, minBuckets, maxBuckets, minPerBucket, maxPerBucket int) (records []nosql.Record) { + t.Helper() + + for i := 0; i < minBuckets+mand.Intn(maxBuckets-minBuckets); i++ { //nolint:gosec // not a sensitive op + bucket := s.existingBucket(t) + + for j := 0; j < minPerBucket+mand.Intn(maxPerBucket-minPerBucket); j++ { //nolint:gosec // not a sensitive op + var r nosql.Record + + r.Bucket = slices.Clone(bucket) + r.Key, r.Value = s.existingKey(t, bucket) + + records = append(records, r) + } + } + + slices.SortFunc(records, compareRecords) + + return +} + +// longBucket returns a bucket that's at least [nosql.MaxBucketSize] + 1 bytes long. +func (*suite) longBucket(t *testing.T) []byte { + t.Helper() + + return token.New(t, nosql.MaxBucketSize+1, nosql.MaxBucketSize+2, true) +} + +// invalidBucket returns a bucket that's invalid. +func (*suite) invalidBucket(t *testing.T) (bucket []byte) { + t.Helper() + + l := nosql.MinBucketSize + mand.Intn(nosql.MaxBucketSize-nosql.MinBucketSize) //nolint:gosec // not a sensitive op + bucket = make([]byte, l) + + for { + _, err := rand.Read(bucket) + require.NoError(t, err) + + if bytes.IndexByte(bucket, 0) > -1 || !utf8.Valid(bucket) { + return + } else if r, _ := utf8.DecodeLastRune(bucket); unicode.IsSpace(r) { + return + } + } +} + +// existingBucket adds a new bucket to the database and returns it. +func (s *suite) existingBucket(t *testing.T) (bucket []byte) { + t.Helper() + + bucket = s.newBucket(t) + require.NoError(t, s.db.CreateBucket(newContext(t), bucket)) + + s.generatedMu.Lock() + s.generated[string(bucket)] = map[string][]byte{} + s.generatedMu.Unlock() + + return bucket +} + +// newBucket returns a bucket that's not in the database. +func (s *suite) newBucket(t *testing.T) (bucket []byte) { + t.Helper() + + for { + m := nosql.MinBucketSize + if mand.Intn(100) < 10 { //nolint:gosec // not a sensitive op + // ensure 10% of the generated buckets have the longest possible size + m = nosql.MaxBucketSize + } + + bucket = token.New(t, m, nosql.MaxBucketSize, true) + + s.generatedMu.Lock() + if _, ok := s.generated[string(bucket)]; ok { + s.generatedMu.Unlock() + + continue + } + + s.generated[string(bucket)] = nil + s.generatedMu.Unlock() + + return + } +} + +// newKey returns a key that's not in the provided bucket. The provided bucket must already exist. +func (s *suite) newKey(t *testing.T, bucket []byte) (key []byte) { + t.Helper() + + for { + key = s.anyKey(t) + + s.generatedMu.Lock() + b := s.generated[string(bucket)] + if b == nil { + s.generatedMu.Unlock() + + panic("bucket does not exist") + } + + if _, ok := b[string(key)]; ok { + s.generatedMu.Unlock() + + continue // duplicate key + } + + s.generated[string(bucket)][string(key)] = nil + s.generatedMu.Unlock() + + return + } +} + +// anyKey returns a random key +func (*suite) anyKey(t *testing.T) []byte { + t.Helper() + + m := nosql.MinKeySize + if mand.Intn(100) < 10 { //nolint:gosec // not a sensitive op + // ensure that 10% of the generated keys are as long as possible + m = nosql.MaxKeySize + } + + return token.New(t, m, nosql.MaxKeySize, false) +} + +// longKey returns a key that's at least [nosql.MaxKeySize] + 1 bytes long. +func (*suite) longKey(t *testing.T) []byte { + t.Helper() + + return token.New(t, nosql.MaxKeySize+1, nosql.MaxKeySize+2, false) +} + +// existingKey adds a key (pointing to a random value) to the provided bucket and returns it +// along with the value it generated for it. +func (s *suite) existingKey(t *testing.T, bucket []byte) (key, value []byte) { + t.Helper() + + key = s.newKey(t, bucket) + value = s.anyValue(t) + + require.NoError(t, s.db.Put(newContext(t), bucket, key, value)) + + s.generatedMu.Lock() + s.generated[string(bucket)][string(key)] = slices.Clone(value) + s.generatedMu.Unlock() + + return key, value +} + +// anyValue returns a random value +func (*suite) anyValue(t *testing.T) []byte { + t.Helper() + + m := 1 + if mand.Intn(100) < 10 { //nolint:gosec // not a sensitive op + // ensure that 10% of the generated values are as long as possible + m = nosql.MaxValueSize + } + + return token.New(t, m, nosql.MaxValueSize, false) +} + +// longValue returns a value that's at least [nosql.MaxValueSize] + 1 bytes long. +func (*suite) longValue(t *testing.T) []byte { + t.Helper() + + return token.New(t, nosql.MaxValueSize+1, nosql.MaxValueSize+2, false) +} + +// anyValue returns a random value different to the given one +func (s *suite) differentValue(t *testing.T, current []byte) (value []byte) { + t.Helper() + + for { + if value = s.anyValue(t); !bytes.Equal(current, value) { + return + } + } +} + +func (s *suite) assertBucketError(t *testing.T, fn func(*testing.T, []byte) error) { + t.Helper() + + cases := [][]byte{ + 0: nil, + 1: {}, + 2: s.invalidBucket(t), + 3: s.longBucket(t), + } + + for caseIndex := range [][]byte{} { + bucket := cases[caseIndex] + + t.Run(strconv.Itoa(caseIndex), func(t *testing.T) { + t.Parallel() + + exp := s.bucketError(bucket) + err := fn(t, bucket) + + assert.ErrorIs(t, err, exp) + }) + } +} + +func (s *suite) assertIDError(t *testing.T, fn func(t *testing.T, bucket, key []byte) error) { + t.Helper() + + cases := []struct { + bucket []byte + key []byte + }{ + 0: {nil, s.anyKey(t)}, + 1: {[]byte{}, s.anyKey(t)}, + 2: {s.longBucket(t), s.anyKey(t)}, + 3: {s.invalidBucket(t), s.anyKey(t)}, + + 4: {s.newBucket(t), nil}, + 5: {s.newBucket(t), []byte{}}, + 6: {s.newBucket(t), s.longKey(t)}, + } + + for caseIndex := range cases { + kase := cases[caseIndex] + + t.Run(strconv.Itoa(caseIndex), func(t *testing.T) { + t.Parallel() + + exp := s.idError(kase.bucket, kase.key) + err := fn(t, kase.bucket, kase.key) + + assert.ErrorIs(t, err, exp) + }) + } +} + +func (s *suite) assertRecordError(t *testing.T, fn func(t *testing.T, bucket, key, value []byte) error) { + t.Helper() + + cases := []struct { + bucket, key, value []byte + }{ + 0: {nil, s.anyKey(t), s.anyValue(t)}, + 1: {[]byte{}, s.anyKey(t), s.anyValue(t)}, + 2: {s.longBucket(t), s.anyKey(t), s.anyValue(t)}, + 3: {s.invalidBucket(t), s.anyKey(t), s.anyValue(t)}, + + 4: {s.newBucket(t), nil, s.anyValue(t)}, + 5: {s.newBucket(t), []byte{}, s.anyValue(t)}, + 6: {s.newBucket(t), s.longKey(t), s.anyValue(t)}, + + 7: {s.newBucket(t), s.anyKey(t), nil}, + 8: {s.newBucket(t), s.anyKey(t), s.longValue(t)}, + } + + for caseIndex := range cases { + kase := cases[caseIndex] + + t.Run(strconv.Itoa(caseIndex), func(t *testing.T) { + t.Parallel() + + exp := s.recordError(kase.bucket, kase.key, kase.value) + got := fn(t, kase.bucket, kase.key, kase.value) + assert.ErrorIs(t, got, exp) + }) + } +} + +func (s *suite) assertRecordsError(t *testing.T, fn func(*testing.T, ...nosql.Record) error) { + t.Helper() + + cases := []nosql.Record{ + 0: {Bucket: nil, Key: s.anyKey(t), Value: s.anyValue(t)}, + 1: {Bucket: []byte{}, Key: s.anyKey(t), Value: s.anyValue(t)}, + 2: {Bucket: s.longBucket(t), Key: s.anyKey(t), Value: s.anyValue(t)}, + 3: {Bucket: s.invalidBucket(t), Key: s.anyKey(t), Value: s.anyValue(t)}, + + 4: {Bucket: s.newBucket(t), Key: nil, Value: s.anyValue(t)}, + 5: {Bucket: s.newBucket(t), Key: []byte{}, Value: s.anyValue(t)}, + 6: {Bucket: s.newBucket(t), Key: s.longKey(t), Value: s.anyValue(t)}, + + 7: {Bucket: s.newBucket(t), Key: s.anyKey(t), Value: nil}, + 8: {Bucket: s.newBucket(t), Key: s.anyKey(t), Value: s.longValue(t)}, + } + + for caseIndex := range cases { + kase := cases[caseIndex] + + t.Run(strconv.Itoa(caseIndex), func(t *testing.T) { + records := []nosql.Record{kase} + for len(records) < 10+mand.Intn(90) { //nolint:gosec // not a sensitive op + records = append(records, nosql.Record{ + Bucket: s.newBucket(t), + Key: s.anyKey(t), + Value: s.anyValue(t), + }) + } + shuffleRecords(records) + + exp := s.recordsError(records...) + err := fn(t, records...) + + assert.ErrorIs(t, err, exp) + }) + } +} + +// recordsError returns the value we expect for the given records. +func (s *suite) recordsError(records ...nosql.Record) (err error) { + for _, r := range records { + if err = s.recordError(r.Bucket, r.Key, r.Value); err != nil { + break + } + } + + return +} + +// recordError returns the value we expect for the given Record particulars. +func (s *suite) recordError(bucket, key, value []byte) (err error) { + if err = s.idError(bucket, key); err == nil { + err = s.valueError(value) + } + return +} + +// idError returns the error we expect for the given ID particulars. +func (s *suite) idError(bucket, key []byte) (err error) { + if err = s.bucketError(bucket); err == nil { + err = s.keyError(key) + } + return +} + +// bucketError returns the error we expect for the given bucket. +func (*suite) bucketError(bucket []byte) error { + if bucket == nil { + return nosql.ErrNilBucket + } + if l := len(bucket); l == 0 { + return nosql.ErrEmptyBucket + } else if l > nosql.MaxBucketSize { + return nosql.ErrBucketTooLong + } + + for { + r, s := utf8.DecodeRune(bucket) + switch { + case r == utf8.RuneError && s == 1: + return nosql.ErrInvalidBucket + case r < 1, r > '\U0000FFFF': + return nosql.ErrInvalidBucket + } + + if bucket = bucket[s:]; len(bucket) == 0 { + if unicode.IsSpace(r) { + return nosql.ErrInvalidBucket + } + + return nil + } + } +} + +// keyError returns the error we expect for the given key. +func (*suite) keyError(key []byte) error { + if key == nil { + return nosql.ErrNilKey + } + if l := len(key); l == 0 { + return nosql.ErrEmptyKey + } else if l > nosql.MaxKeySize { + return nosql.ErrKeyTooLong + } + return nil +} + +// valueError returns the error we expect for the given value. +func (*suite) valueError(value []byte) error { + if value == nil { + return nosql.ErrNilValue + } else if l := len(value); l > nosql.MaxValueSize { + return nosql.ErrValueTooLong + } + return nil +} + +func compareRecords(a, b nosql.Record) (comparison int) { + if comparison = bytes.Compare(a.Bucket, b.Bucket); comparison == 0 { + if comparison = bytes.Compare(a.Key, b.Key); comparison == 0 { + comparison = bytes.Compare(a.Value, b.Value) + } + } + + return +} + +func shuffleRecords(records []nosql.Record) { + mand.Shuffle(len(records), func(i, j int) { + records[i], records[j] = records[j], records[i] + }) +} diff --git a/driver/badger/driver.go b/driver/badger/driver.go new file mode 100644 index 0000000..02794b9 --- /dev/null +++ b/driver/badger/driver.go @@ -0,0 +1,452 @@ +// Package badger implements a [nosql.Driver] for badger databases. +package badger + +import ( + "bytes" + "context" + "encoding/binary" + "errors" + "fmt" + "io" + "math" + "os" + "path/filepath" + "slices" + + badgerv1 "github.com/dgraph-io/badger" + badgerv2 "github.com/dgraph-io/badger/v2" + badgerv3 "github.com/dgraph-io/badger/v3" + + "github.com/smallstep/nosql" + "github.com/smallstep/nosql/internal/each" +) + +func init() { + nosql.Register("badger", Open) +} + +// ErrNoRewrite is returned when CompactByFactor doesn't result in any rewrites. +var ErrNoRewrite = errors.New("nosql/badger: CompactByFactor didn't result in any cleanup") + +// Open implements a [nosql.Driver] for badger databases. In case of an existing database, [Open] +// will use either [OpenV1] or [OpenV2] depending on the version contained in the database's +// MANIFEST file. +// +// When creating new databases, [Open] is a passthrough call to [OpenV2]. +func Open(ctx context.Context, dir string) (nosql.DB, error) { + return determineOpener(dir)(ctx, dir) +} + +func determineOpener(dir string) (opener func(context.Context, string) (nosql.DB, error)) { + opener = OpenV4 // use the latest as the default + + f, err := os.Open(filepath.Join(dir, "MANIFEST")) + if err != nil { + return // either no database exists or another error took place; use the default + } + defer f.Close() + + if _, _, err = badgerv1.ReplayManifestFile(f); err == nil { + opener = OpenV1 + + return + } else if _, err = f.Seek(0, io.SeekStart); err != nil { + return + } + + if _, _, err = badgerv2.ReplayManifestFile(f); err == nil { + opener = OpenV2 + + return + } else if _, err = f.Seek(0, io.SeekStart); err == nil { + return + } + + if _, _, err = badgerv3.ReplayManifestFile(f); err == nil { + opener = OpenV3 + } + + return +} + +// item defines the item generic constraint. +type item interface { + Key() []byte + KeyCopy([]byte) []byte + Value(func([]byte) error) error + ValueCopy([]byte) ([]byte, error) +} + +// iterator defines the iterator generic constraint. +type iterator[KV item] interface { + Close() + Item() KV + Seek([]byte) + ValidForPrefix([]byte) bool + Next() +} + +// tx defines the transaction generic constraint. +type tx[IO any, KV item, I iterator[KV]] interface { + Delete(key []byte) error + Set(key, value []byte) error + Get(key []byte) (KV, error) + NewIterator(opt IO) I +} + +// db defines the database generic constraint. +type db[IO any, KV item, I iterator[KV], T tx[IO, KV, I]] interface { + Close() error + View(func(T) error) error + Update(func(T) error) error + RunValueLogGC(float64) error +} + +type wrapper[IO any, KV item, I iterator[KV], TX tx[IO, KV, I]] struct { + db db[IO, KV, I, TX] + isKeyNotFound func(error) bool + isNoRewrite func(error) bool + keysOnlyIteratorOptions func(prefix []byte) IO + keysAndValuesIteratorOptions func(prefix []byte) IO +} + +func (w *wrapper[_, _, _, _]) CompactByFactor(_ context.Context, factor float64) (err error) { + if err = w.db.RunValueLogGC(factor); err != nil && w.isNoRewrite(err) { + err = ErrNoRewrite + } + return +} + +func (w *wrapper[_, _, _, _]) Close(_ context.Context) error { + return w.db.Close() +} + +func (w *wrapper[_, _, _, TX]) CreateBucket(_ context.Context, bucket []byte) error { + id := encode(nil, bucket) + + return w.db.Update(func(tx TX) error { + return tx.Set(id, []byte{}) + }) +} + +func (w *wrapper[_, _, _, TX]) DeleteBucket(_ context.Context, bucket []byte) error { + prefix := encode(nil, bucket) + + return w.db.Update(func(tx TX) (err error) { + it := tx.NewIterator(w.keysOnlyIteratorOptions(prefix)) + defer it.Close() + + var found bool + for it.Seek(prefix); it.ValidForPrefix(prefix); it.Next() { + id := it.Item().Key() + if bytes.Equal(prefix, id) { + found = true + } + + if err = tx.Delete(slices.Clone(id)); err != nil { + return + } + } + + if !found { + err = nosql.ErrBucketNotFound + } + + return + }) +} + +func (w *wrapper[_, _, _, _]) Get(ctx context.Context, bucket, key []byte) (value []byte, err error) { + err = w.View(ctx, func(v nosql.Viewer) (err error) { + value, err = v.Get(ctx, bucket, key) + + return + }) + + return +} + +func (w *wrapper[_, _, _, _]) Put(ctx context.Context, bucket, key, value []byte) error { + return w.Mutate(ctx, func(m nosql.Mutator) error { + return m.Put(ctx, bucket, key, value) + }) +} + +func (w *wrapper[_, _, _, _]) PutMany(ctx context.Context, records ...nosql.Record) error { + return w.Mutate(ctx, func(m nosql.Mutator) error { + return m.PutMany(ctx, records...) + }) +} + +func (w *wrapper[_, _, _, _]) Delete(ctx context.Context, bucket, key []byte) error { + return w.Mutate(ctx, func(m nosql.Mutator) error { + return m.Delete(ctx, bucket, key) + }) +} + +func (w *wrapper[_, _, _, _]) List(ctx context.Context, bucket []byte) (records []nosql.Record, err error) { + err = w.View(ctx, func(v nosql.Viewer) (err error) { + records, err = v.List(ctx, bucket) + + return + }) + + return +} + +func (w *wrapper[_, _, _, _]) CompareAndSwap(ctx context.Context, bucket, key, oldValue, newValue []byte) error { + return w.Mutate(ctx, func(m nosql.Mutator) error { + return m.CompareAndSwap(ctx, bucket, key, oldValue, newValue) + }) +} + +func (w *wrapper[IO, KV, S, TX]) View(_ context.Context, fn func(nosql.Viewer) error) error { + return w.db.View(func(tx TX) error { + return fn(&viewer[IO, KV, S, TX]{ + tx: tx, + isKeyNotFound: w.isKeyNotFound, + keysAndValuesIteratorOptions: w.keysAndValuesIteratorOptions, + }) + }) +} + +func (w *wrapper[IO, KV, S, TX]) Mutate(_ context.Context, fn func(nosql.Mutator) error) error { + return w.db.Update(func(tx TX) error { + return fn(&mutator[IO, KV, S, TX]{ + tx: tx, + isKeyNotFound: w.isKeyNotFound, + keysAndValuesIteratorOptions: w.keysAndValuesIteratorOptions, + KeysOnlyIteratorOptions: w.keysOnlyIteratorOptions, + }) + }) +} + +type viewer[IO any, KV item, S iterator[KV], TX tx[IO, KV, S]] struct { + tx TX + isKeyNotFound func(error) bool + keysAndValuesIteratorOptions func([]byte) IO +} + +func (v *viewer[_, _, _, _]) Get(_ context.Context, bucket, key []byte) ([]byte, error) { + return get(v.tx, bucket, key, v.isKeyNotFound) +} + +func (v *viewer[IO, KV, S, _]) List(_ context.Context, bucket []byte) ([]nosql.Record, error) { + prefix := encode(nil, bucket) + + it := v.tx.NewIterator(v.keysAndValuesIteratorOptions(prefix)) + defer it.Close() + + return list[IO, KV, S](it, prefix) +} + +type mutator[IO any, KV item, S iterator[KV], TX tx[IO, KV, S]] struct { + tx TX + isKeyNotFound func(error) bool + KeysOnlyIteratorOptions func([]byte) IO + keysAndValuesIteratorOptions func([]byte) IO +} + +func (m *mutator[_, _, _, _]) Get(_ context.Context, bucket, key []byte) ([]byte, error) { + return get(m.tx, bucket, key, m.isKeyNotFound) +} + +func (m *mutator[IO, KV, S, _]) List(_ context.Context, bucket []byte) ([]nosql.Record, error) { + prefix := encode(nil, bucket) + + it := m.tx.NewIterator(m.keysAndValuesIteratorOptions(prefix)) + defer it.Close() + + return list[IO, KV, S](it, prefix) +} + +func (m *mutator[_, _, _, _]) CompareAndSwap(_ context.Context, bucket, key, oldValue, newValue []byte) error { + id := encode(nil, bucket, key) + + if err := checkPrefix(m.tx, id[:2+len(bucket)], m.isKeyNotFound); err != nil { + return err + } + + item, err := m.tx.Get(id) + if err != nil { + if m.isKeyNotFound(err) { + err = nosql.ErrKeyNotFound + } + + return err + } + + if err := item.Value(func(current []byte) error { + if !bytes.Equal(current, oldValue) { + return &nosql.ComparisonError{ + Value: slices.Clone(current), + } + } + + return nil + }); err != nil { + return err + } + + return m.tx.Set(id, newValue) +} + +func (m *mutator[_, _, _, _]) Delete(_ context.Context, bucket, key []byte) (err error) { + id := encode(nil, bucket, key) + + if err = checkPrefix(m.tx, id[:2+len(bucket)], m.isKeyNotFound); err == nil { + err = m.tx.Delete(id) + } + + return +} + +func (m *mutator[_, _, _, _]) Put(_ context.Context, bucket, key, value []byte) (err error) { + id := encode(nil, bucket, key) + + if err = checkPrefix(m.tx, id[:2+len(bucket)], m.isKeyNotFound); err == nil { + err = m.tx.Set(id, value) + } + + return +} + +func (m *mutator[_, _, _, _]) PutMany(_ context.Context, records ...nosql.Record) error { + if len(records) == 0 { + return nil + } + + prefix := make([]byte, 0, 2+nosql.MaxBucketSize) + + return each.Bucket(records, func(bucket []byte, rex []*nosql.Record) (err error) { + prefix = encode(prefix[:0], bucket) + + if err = checkPrefix(m.tx, prefix, m.isKeyNotFound); err != nil { + return + } + + for _, r := range rex { + id := encode(prefix[:len(prefix):len(prefix)], r.Key) + + if err = m.tx.Set(id, r.Value); err != nil { + break + } + } + + return + }) +} + +func get[IO any, KV item, S iterator[KV], TX tx[IO, KV, S]](tx TX, bucket, key []byte, isKeyNotFound func(error) bool) ([]byte, error) { + id := encode(nil, bucket, key) + + if err := checkPrefix(tx, id[:2+len(bucket)], isKeyNotFound); err != nil { + return nil, err + } + + switch itm, err := tx.Get(id); { + case err == nil: + return itm.ValueCopy(nil) + case isKeyNotFound(err): + return nil, nosql.ErrKeyNotFound + default: + return nil, err + } +} + +func list[IO any, KV item, S iterator[KV]](it S, prefix []byte) ([]nosql.Record, error) { + skip := len(prefix) + 2 + + var records []nosql.Record + var foundBucket bool + for it.Seek(prefix); it.ValidForPrefix(prefix); it.Next() { + item := it.Item() + if bytes.Equal(prefix, item.Key()) { + foundBucket = true + + continue // found the bucket + } + + value, err := item.ValueCopy(nil) + if err != nil { + return nil, err + } + + records = append(records, nosql.Record{ + Bucket: slices.Clone(prefix[2:]), + Key: slices.Clone(item.Key()[skip:]), + Value: value, + }) + } + + if !foundBucket { + return nil, nosql.ErrBucketNotFound + } + + // we're delimiting the tokens so a sort is required. + slices.SortFunc(records, func(a, b nosql.Record) (v int) { + return bytes.Compare(a.Key, b.Key) + }) + + return records, nil +} + +// checkPrefix checks whether the provided prefix (which is an encoded bucket) exists. +func checkPrefix[IO any, KV item, S iterator[KV], TX tx[IO, KV, S]](tx TX, prefix []byte, isKeyNotFound func(error) bool) (err error) { + if _, err = tx.Get(prefix); err != nil && isKeyNotFound(err) { + err = nosql.ErrBucketNotFound + } + return +} + +// encode appends the encoded representation of the given tokens to the given destination buffer +// and returns the result. +func encode(dst []byte, tokens ...[]byte) []byte { + for _, tok := range tokens { + if l := len(tok); l > math.MaxUint16 { + panic(fmt.Errorf("token is too long (%d)", l)) + } + } + + for _, tok := range tokens { + dst = binary.LittleEndian.AppendUint16(dst, uint16(len(tok))) + dst = append(dst, tok...) + } + + return dst +} + +// ContextWithOptions returns a copy of the provided [context.Context] that carries the provided +// [Options]. +func ContextWithOptions(ctx context.Context, opts Options) context.Context { + return context.WithValue(ctx, optionsContextKeyType{}, opts) +} + +// OptionsFromContext reports the [Options] the provided [context.Context] carries or sensible +// defaults. +func OptionsFromContext(ctx context.Context) (opts Options) { + opts, _ = ctx.Value(optionsContextKeyType{}).(Options) + + return +} + +type optionsContextKeyType struct{} + +// Options wraps the set of configuration for badger databases. +type Options struct { + // ValueDir specifies the directory to use for values. If empty, + // the database directory will be used in its place. + ValueDir string + + // Logger specifies the logger to use. If nil, a default one + // will be used in its place. + Logger Logger +} + +// Logger wraps the set of badger loggers. +type Logger interface { + Errorf(string, ...any) + Warningf(string, ...any) + Infof(string, ...any) + Debugf(string, ...any) +} diff --git a/driver/badger/driver_test.go b/driver/badger/driver_test.go new file mode 100644 index 0000000..ab33d5f --- /dev/null +++ b/driver/badger/driver_test.go @@ -0,0 +1,192 @@ +package badger + +import ( + "bytes" + "context" + "fmt" + "io" + "testing" + + badgerv1 "github.com/dgraph-io/badger" + badgerv2 "github.com/dgraph-io/badger/v2" + badgerv3 "github.com/dgraph-io/badger/v3" + badgerv4 "github.com/dgraph-io/badger/v4" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/smallstep/nosql" + "github.com/smallstep/nosql/dbtest" +) + +var versions = map[string]nosql.Driver{ + "Default": Open, + "V1": OpenV1, + "V2": OpenV2, + "V3": OpenV3, + "V4": OpenV4, +} + +func Test(t *testing.T) { + t.Parallel() + + for v := range versions { + open := versions[v] + + t.Run(v, func(t *testing.T) { + t.Parallel() + + db, err := open(context.Background(), t.TempDir()) + require.NoError(t, err) + + dbtest.Test(t, db) + }) + } +} + +func TestCompactByFactor(t *testing.T) { + t.Parallel() + + for v := range versions { + open := versions[v] + + t.Run(v, func(t *testing.T) { + t.Parallel() + + ctx := context.Background() + + db, err := open(ctx, t.TempDir()) + require.NoError(t, err) + t.Cleanup(func() { assert.NoError(t, db.Close(ctx)) }) + + assert.ErrorIs(t, db.(nosql.CompactedByFactor).CompactByFactor(ctx, 0.7), ErrNoRewrite) + }) + } +} + +func TestOpenExisting(t *testing.T) { + t.Parallel() + + cases := map[string]string{} + + dir1 := t.TempDir() + db1, err := badgerv1.Open(badgerv1.DefaultOptions(dir1)) + require.NoError(t, err) + require.NoError(t, db1.Close()) + cases["v1"] = dir1 + + dir2 := t.TempDir() + db2, err := badgerv2.Open(badgerv2.DefaultOptions(dir2)) + require.NoError(t, err) + require.NoError(t, db2.Close()) + cases["v2"] = dir2 + + dir3 := t.TempDir() + db3, err := badgerv3.Open(badgerv3.DefaultOptions(dir3)) + require.NoError(t, err) + require.NoError(t, db3.Close()) + cases["v3"] = dir3 + + dir4 := t.TempDir() + db4, err := badgerv4.Open(badgerv4.DefaultOptions(dir4)) + require.NoError(t, err) + require.NoError(t, db4.Close()) + cases["v4"] = dir4 + + for v := range cases { + dir := cases[v] + + t.Run(v, func(t *testing.T) { + t.Parallel() + + ctx := context.Background() + db, err := Open(ctx, dir) + require.NoError(t, err) + require.NoError(t, db.Close(ctx)) + }) + } +} + +func TestOpenNew(t *testing.T) { + var ( + ctx = context.Background() + dir = t.TempDir() + ) + + db, err := Open(ctx, dir) + require.NoError(t, err) + initDB(ctx, t, db) + require.NoError(t, db.Close(ctx)) + + db, err = OpenV4(ctx, dir) + require.NoError(t, err) + t.Cleanup(func() { assert.NoError(t, db.Close(ctx)) }) + + checkDB(ctx, t, db) +} + +func initDB(ctx context.Context, t *testing.T, db nosql.DB) { + t.Helper() + + require.NoError(t, db.CreateBucket(ctx, []byte{1})) + require.NoError(t, db.Put(ctx, []byte{1}, []byte{2}, []byte{3})) +} + +func checkDB(ctx context.Context, t *testing.T, db nosql.DB) { + t.Helper() + + got, err := db.Get(ctx, []byte{1}, []byte{2}) + require.NoError(t, err) + require.Equal(t, []byte{3}, got) +} + +func TestLogger(t *testing.T) { + t.Parallel() + + for v := range versions { + open := versions[v] + + t.Run(v, func(t *testing.T) { + t.Parallel() + + l, b := newLogger(t) + + ctx := ContextWithOptions(context.Background(), Options{ + Logger: l, + }) + db, err := open(ctx, t.TempDir()) + require.NoError(t, err) + require.NoError(t, db.Close(ctx)) + + assert.NotEmpty(t, b.String()) + }) + } +} + +func newLogger(t *testing.T) (l Logger, b *bytes.Buffer) { + t.Helper() + + b = new(bytes.Buffer) + l = &testLogger{b} + + return +} + +type testLogger struct { + w io.Writer +} + +func (l *testLogger) Errorf(format string, v ...any) { + _, _ = fmt.Fprintf(l.w, format, v...) +} + +func (l *testLogger) Warningf(format string, v ...any) { + _, _ = fmt.Fprintf(l.w, format, v...) +} + +func (l *testLogger) Infof(format string, v ...any) { + _, _ = fmt.Fprintf(l.w, format, v...) +} + +func (l *testLogger) Debugf(format string, v ...any) { + _, _ = fmt.Fprintf(l.w, format, v...) +} diff --git a/driver/badger/v1.go b/driver/badger/v1.go new file mode 100644 index 0000000..70278c3 --- /dev/null +++ b/driver/badger/v1.go @@ -0,0 +1,61 @@ +package badger + +import ( + "context" + "errors" + + badgerv1 "github.com/dgraph-io/badger" + + "github.com/smallstep/nosql" +) + +func init() { + nosql.Register("badgerv1", OpenV1) +} + +// OpenV1 implements a [nosql.Driver] for badger V1 databases. +func OpenV1(ctx context.Context, dir string) (nosql.DB, error) { + opts := OptionsFromContext(ctx) + o, err := opts.toV1(dir) + if err != nil { + return nil, err + } + + db, err := badgerv1.Open(o) + if err != nil { + return nil, err + } + + w := &wrapper[badgerv1.IteratorOptions, *badgerv1.Item, *badgerv1.Iterator, *badgerv1.Txn]{ + db: db, + isKeyNotFound: func(err error) bool { return errors.Is(err, badgerv1.ErrKeyNotFound) }, + isNoRewrite: func(err error) bool { return errors.Is(err, badgerv1.ErrNoRewrite) }, + keysOnlyIteratorOptions: func(prefix []byte) badgerv1.IteratorOptions { + return badgerv1.IteratorOptions{ + Prefix: prefix, + } + }, + keysAndValuesIteratorOptions: func(prefix []byte) badgerv1.IteratorOptions { + return badgerv1.IteratorOptions{ + PrefetchValues: true, + PrefetchSize: 100, + Prefix: prefix, + } + }, + } + + return nosql.Constrain(w), nil +} + +func (o *Options) toV1(dir string) (opts badgerv1.Options, err error) { + opts = badgerv1.DefaultOptions(dir) + + if o.ValueDir != "" { + opts.ValueDir = o.ValueDir + } + if o.Logger != nil { + opts.Logger = o.Logger + } + + return +} diff --git a/driver/badger/v2.go b/driver/badger/v2.go new file mode 100644 index 0000000..03710cb --- /dev/null +++ b/driver/badger/v2.go @@ -0,0 +1,57 @@ +package badger + +import ( + "context" + "errors" + + badgerv2 "github.com/dgraph-io/badger/v2" + + "github.com/smallstep/nosql" +) + +func init() { + nosql.Register("badgerv2", OpenV2) +} + +// OpenV2 implements a [nosql.Driver] for badger V2 databases. +func OpenV2(ctx context.Context, dir string) (nosql.DB, error) { + opts := OptionsFromContext(ctx) + + db, err := badgerv2.Open(opts.toV2(dir)) + if err != nil { + return nil, err + } + + w := &wrapper[badgerv2.IteratorOptions, *badgerv2.Item, *badgerv2.Iterator, *badgerv2.Txn]{ + db: db, + isKeyNotFound: func(err error) bool { return errors.Is(err, badgerv2.ErrKeyNotFound) }, + isNoRewrite: func(err error) bool { return errors.Is(err, badgerv2.ErrNoRewrite) }, + keysOnlyIteratorOptions: func(prefix []byte) badgerv2.IteratorOptions { + return badgerv2.IteratorOptions{ + Prefix: prefix, + } + }, + keysAndValuesIteratorOptions: func(prefix []byte) badgerv2.IteratorOptions { + return badgerv2.IteratorOptions{ + PrefetchValues: true, + PrefetchSize: 100, + Prefix: prefix, + } + }, + } + + return nosql.Constrain(w), nil +} + +func (o *Options) toV2(dir string) (opts badgerv2.Options) { + opts = badgerv2.DefaultOptions(dir) + + if o.ValueDir != "" { + opts.ValueDir = o.ValueDir + } + if o.Logger != nil { + opts.Logger = o.Logger + } + + return +} diff --git a/driver/badger/v3.go b/driver/badger/v3.go new file mode 100644 index 0000000..8c841cb --- /dev/null +++ b/driver/badger/v3.go @@ -0,0 +1,57 @@ +package badger + +import ( + "context" + "errors" + + badgerv3 "github.com/dgraph-io/badger/v3" + + "github.com/smallstep/nosql" +) + +func init() { + nosql.Register("badgerv3", OpenV3) +} + +// OpenV3 implements a [nosql.Driver] for badger V3 databases. +func OpenV3(ctx context.Context, dir string) (nosql.DB, error) { + opts := OptionsFromContext(ctx) + + db, err := badgerv3.Open(opts.toV3(dir)) + if err != nil { + return nil, err + } + + w := &wrapper[badgerv3.IteratorOptions, *badgerv3.Item, *badgerv3.Iterator, *badgerv3.Txn]{ + db: db, + isKeyNotFound: func(err error) bool { return errors.Is(err, badgerv3.ErrKeyNotFound) }, + isNoRewrite: func(err error) bool { return errors.Is(err, badgerv3.ErrNoRewrite) }, + keysOnlyIteratorOptions: func(prefix []byte) badgerv3.IteratorOptions { + return badgerv3.IteratorOptions{ + Prefix: prefix, + } + }, + keysAndValuesIteratorOptions: func(prefix []byte) badgerv3.IteratorOptions { + return badgerv3.IteratorOptions{ + PrefetchValues: true, + PrefetchSize: 100, + Prefix: prefix, + } + }, + } + + return nosql.Constrain(w), nil +} + +func (o *Options) toV3(dir string) (opts badgerv3.Options) { + opts = badgerv3.DefaultOptions(dir) + + if o.ValueDir != "" { + opts.ValueDir = o.ValueDir + } + if o.Logger != nil { + opts.Logger = o.Logger + } + + return +} diff --git a/driver/badger/v4.go b/driver/badger/v4.go new file mode 100644 index 0000000..16c7a52 --- /dev/null +++ b/driver/badger/v4.go @@ -0,0 +1,57 @@ +package badger + +import ( + "context" + "errors" + + badgerv4 "github.com/dgraph-io/badger/v4" + + "github.com/smallstep/nosql" +) + +func init() { + nosql.Register("badgerv4", OpenV4) +} + +// OpenV4 implements a [nosql.Driver] for badger V4 databases. +func OpenV4(ctx context.Context, dir string) (nosql.DB, error) { + opts := OptionsFromContext(ctx) + + db, err := badgerv4.Open(opts.toV4(dir)) + if err != nil { + return nil, err + } + + w := &wrapper[badgerv4.IteratorOptions, *badgerv4.Item, *badgerv4.Iterator, *badgerv4.Txn]{ + db: db, + isKeyNotFound: func(err error) bool { return errors.Is(err, badgerv4.ErrKeyNotFound) }, + isNoRewrite: func(err error) bool { return errors.Is(err, badgerv4.ErrNoRewrite) }, + keysOnlyIteratorOptions: func(prefix []byte) badgerv4.IteratorOptions { + return badgerv4.IteratorOptions{ + Prefix: prefix, + } + }, + keysAndValuesIteratorOptions: func(prefix []byte) badgerv4.IteratorOptions { + return badgerv4.IteratorOptions{ + PrefetchValues: true, + PrefetchSize: 100, + Prefix: prefix, + } + }, + } + + return nosql.Constrain(w), nil +} + +func (o *Options) toV4(dir string) (opts badgerv4.Options) { + opts = badgerv4.DefaultOptions(dir) + + if o.ValueDir != "" { + opts.ValueDir = o.ValueDir + } + if o.Logger != nil { + opts.Logger = o.Logger + } + + return +} diff --git a/driver/bolt/driver.go b/driver/bolt/driver.go new file mode 100644 index 0000000..ffa0472 --- /dev/null +++ b/driver/bolt/driver.go @@ -0,0 +1,274 @@ +// Package bolt implements a [nosql.Driver] for bolt databases. +package bolt + +import ( + "bytes" + "context" + "errors" + "os" + "slices" + "time" + + "go.etcd.io/bbolt" + + "github.com/smallstep/nosql" + "github.com/smallstep/nosql/internal/each" +) + +func init() { + nosql.Register("bolt", Open) + nosql.Register("bbolt", Open) // to keep compatibility with earlier versions +} + +// Open implements a [nosql.Driver] for boltdb databases. +func Open(ctx context.Context, path string) (nosql.DB, error) { + var ( + opts = OptionsFromContext(ctx) + fm = FileModeFromContext(ctx) + ) + + db, err := bbolt.Open(path, fm, opts) + if err != nil { + return nil, err + } + + return nosql.Constrain(&wrapper{ + db: db, + }), nil +} + +type wrapper struct { + db *bbolt.DB +} + +func (w *wrapper) Close(context.Context) error { + return w.db.Close() +} + +func (w *wrapper) CreateBucket(_ context.Context, bucket []byte) error { + return w.db.Update(func(tx *bbolt.Tx) (err error) { + _, err = tx.CreateBucketIfNotExists(bucket) + + return + }) +} + +func (w *wrapper) DeleteBucket(_ context.Context, bucket []byte) error { + return w.db.Update(func(tx *bbolt.Tx) (err error) { + switch err = tx.DeleteBucket(bucket); { + case err == nil: + break + case errors.Is(err, bbolt.ErrBucketNotFound): + err = nosql.ErrBucketNotFound + } + + return + }) +} + +func (w *wrapper) Get(ctx context.Context, bucket, key []byte) (value []byte, err error) { + err = w.View(ctx, func(v nosql.Viewer) (err error) { + value, err = v.Get(ctx, bucket, key) + + return + }) + + return +} + +func (w *wrapper) Put(ctx context.Context, bucket, key, value []byte) error { + return w.Mutate(ctx, func(m nosql.Mutator) error { + return m.Put(ctx, bucket, key, value) + }) +} + +func (w *wrapper) PutMany(ctx context.Context, records ...nosql.Record) error { + return w.Mutate(ctx, func(m nosql.Mutator) error { + return m.PutMany(ctx, records...) + }) +} + +func (w *wrapper) Delete(ctx context.Context, bucket, key []byte) error { + return w.Mutate(ctx, func(m nosql.Mutator) error { + return m.Delete(ctx, bucket, key) + }) +} + +func (w *wrapper) List(ctx context.Context, bucket []byte) (records []nosql.Record, err error) { + err = w.View(ctx, func(v nosql.Viewer) (err error) { + records, err = v.List(ctx, bucket) + + return + }) + + return +} + +func (w *wrapper) CompareAndSwap(ctx context.Context, bucket, key, oldValue, newValue []byte) error { + return w.Mutate(ctx, func(m nosql.Mutator) error { + return m.CompareAndSwap(ctx, bucket, key, oldValue, newValue) + }) +} + +func (w *wrapper) View(_ context.Context, fn func(nosql.Viewer) error) error { + return w.db.View(func(tx *bbolt.Tx) error { + return fn(&viewer{tx}) + }) +} + +func (w *wrapper) Mutate(_ context.Context, fn func(nosql.Mutator) error) error { + return w.db.Update(func(tx *bbolt.Tx) error { + return fn(&mutator{tx}) + }) +} + +type viewer struct { + tx *bbolt.Tx +} + +func (v *viewer) Get(_ context.Context, bucket, key []byte) ([]byte, error) { + return get(v.tx, bucket, key) +} + +func (v *viewer) List(_ context.Context, bucket []byte) ([]nosql.Record, error) { + return list(v.tx, bucket) +} + +type mutator struct { + tx *bbolt.Tx +} + +func (m *mutator) Get(_ context.Context, bucket, key []byte) ([]byte, error) { + return get(m.tx, bucket, key) +} + +func (m *mutator) List(_ context.Context, bucket []byte) ([]nosql.Record, error) { + return list(m.tx, bucket) +} + +func (m *mutator) CompareAndSwap(_ context.Context, bucket, key, oldValue, newValue []byte) error { + b := m.tx.Bucket(bucket) + if b == nil { + return nosql.ErrBucketNotFound + } + + val := b.Get(key) + if val == nil { + return nosql.ErrKeyNotFound + } + + if !bytes.Equal(oldValue, val) { + return &nosql.ComparisonError{ + Value: slices.Clone(val), + } + } + + return b.Put(key, newValue) +} + +func (m *mutator) Delete(_ context.Context, bucket, key []byte) error { + b := m.tx.Bucket(bucket) + if b == nil { + return nosql.ErrBucketNotFound + } + + return b.Delete(key) +} + +func (m *mutator) Put(_ context.Context, bucket, key, value []byte) error { + b := m.tx.Bucket(bucket) + if b == nil { + return nosql.ErrBucketNotFound + } + + return b.Put(key, value) +} + +func (m *mutator) PutMany(_ context.Context, records ...nosql.Record) error { + if len(records) == 0 { + return nil + } + + return each.Bucket(records, func(bucket []byte, rex []*nosql.Record) error { + b := m.tx.Bucket(bucket) + if b == nil { + return nosql.ErrBucketNotFound + } + + for _, r := range rex { + if err := b.Put(r.Key, r.Value); err != nil { + return err + } + } + + return nil + }) +} + +func get(tx *bbolt.Tx, bucket, key []byte) (value []byte, err error) { + if b := tx.Bucket(bucket); b == nil { + err = nosql.ErrBucketNotFound + } else if value = b.Get(key); value == nil { + err = nosql.ErrKeyNotFound + } + + return +} + +func list(tx *bbolt.Tx, bucket []byte) (records []nosql.Record, err error) { + b := tx.Bucket(bucket) + if b == nil { + err = nosql.ErrBucketNotFound + + return + } + + err = b.ForEach(func(key, value []byte) error { + records = append(records, nosql.Record{ + Bucket: slices.Clone(bucket), + Key: slices.Clone(key), + Value: slices.Clone(value), + }) + + return nil + }) + + return +} + +// ContextWithOptions returns a [context.Context] that carries the provided [bbolt.Options]. +func ContextWithOptions(ctx context.Context, opts *bbolt.Options) context.Context { + return context.WithValue(ctx, optionsKey{}, opts) +} + +// OptionsFromContext reports the [bbolt.Options] the given [context.Context] carries or sensible +// defaults. +func OptionsFromContext(ctx context.Context) (opts *bbolt.Options) { + var ok bool + if opts, ok = ctx.Value(optionsKey{}).(*bbolt.Options); !ok || opts == nil { + opts = &bbolt.Options{ + Timeout: 5 * time.Second, + FreelistType: bbolt.FreelistArrayType, + } + } + + return opts +} + +type optionsKey struct{} + +// ContextWithFileMode returns a [context.Context] that carries the provided [bbolt.Options]. +func ContextWithFileMode(ctx context.Context, fm os.FileMode) context.Context { + return context.WithValue(ctx, fileModeKey{}, fm) +} + +// FileModeFromContext reports the [os.FileMode] the given [context.Context] carries or 0600. +func FileModeFromContext(ctx context.Context) (fm os.FileMode) { + var ok bool + if fm, ok = ctx.Value(fileModeKey{}).(os.FileMode); !ok { + fm = 0600 + } + return +} + +type fileModeKey struct{} diff --git a/driver/bolt/driver_test.go b/driver/bolt/driver_test.go new file mode 100644 index 0000000..253d14f --- /dev/null +++ b/driver/bolt/driver_test.go @@ -0,0 +1,71 @@ +package bolt + +import ( + "context" + mand "math/rand" + "os" + "path/filepath" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.etcd.io/bbolt" + + "github.com/smallstep/nosql/dbtest" +) + +func Test(t *testing.T) { + db, err := Open(newContext(t), filepath.Join(t.TempDir(), "bolt.db")) + require.NoError(t, err) + + dbtest.Test(t, db) +} + +func TestContextWithOptions(t *testing.T) { + var ( + exp = new(bbolt.Options) + got = OptionsFromContext(ContextWithOptions(context.Background(), exp)) + ) + + assert.Same(t, exp, got) +} + +func TestContextWithOptionsOnContextWithoutOptions(t *testing.T) { + var ( + exp = &bbolt.Options{ + Timeout: 5 * time.Second, + NoGrowSync: false, + FreelistType: bbolt.FreelistArrayType, + } + got = OptionsFromContext(context.Background()) + ) + + assert.Equal(t, exp, got) +} + +func TestContextWithFileMode(t *testing.T) { + var ( + exp = os.FileMode(mand.Uint32()) + got = FileModeFromContext(ContextWithFileMode(context.Background(), exp)) + ) + + assert.Equal(t, exp, got) +} + +func TestContextWithFileModeOnContextWithoutFileMode(t *testing.T) { + var ( + exp os.FileMode = 0600 + got = FileModeFromContext(context.Background()) + ) + + assert.Equal(t, exp, got) +} + +func newContext(t *testing.T) context.Context { + t.Helper() + + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + t.Cleanup(cancel) + return ctx +} diff --git a/driver/mysql/driver.go b/driver/mysql/driver.go new file mode 100644 index 0000000..ac0f30d --- /dev/null +++ b/driver/mysql/driver.go @@ -0,0 +1,431 @@ +// Package mysql implements a [nosql.Driver] for MySQL databases. +package mysql + +import ( + "bytes" + "context" + "database/sql" + "errors" + "fmt" + "slices" + "strings" + + "github.com/go-sql-driver/mysql" + + "github.com/smallstep/nosql" + "github.com/smallstep/nosql/internal/each" +) + +// Open implements a [nosql.Driver] for MySQL databases. +func Open(ctx context.Context, dsn string) (nosql.DB, error) { + cfg, err := mysql.ParseDSN(dsn) + if err != nil { + return nil, err + } + + if err := setup(ctx, cfg); err != nil { + return nil, err + } + + mdb, err := sql.Open("mysql", dsn) + if err != nil { + return nil, err + } + + return nosql.Constrain(&db{ + pool: mdb, + }), nil +} + +// setup ensures that the database exists +func setup(ctx context.Context, cfg *mysql.Config) (err error) { + cfg = cfg.Clone() // work on a clone of the config + + dbName := cfg.DBName + cfg.DBName = "" + + var db *sql.DB + if db, err = sql.Open("mysql", cfg.FormatDSN()); err != nil { + return err + } + defer func() { + if e := db.Close(); err == nil { + err = e + } + }() + + const checkSQL = /* sql */ ` + SELECT TRUE + FROM INFORMATION_SCHEMA.SCHEMATA + WHERE SCHEMA_NAME = ? + ` + + var exists bool + switch err = db.QueryRowContext(ctx, checkSQL, dbName).Scan(&exists); { + case err == nil: + return // database exists + case isNoRows(err): + // database does not exist; create it + createSQL := fmt.Sprintf( /* sql */ ` + CREATE DATABASE IF NOT EXISTS %s; + `, quote(dbName)) + + _, err = db.ExecContext(ctx, createSQL) + } + + return +} + +type db struct { + pool *sql.DB +} + +func (db *db) Close(context.Context) error { + return db.pool.Close() +} + +func (db *db) tx(ctx context.Context, opts *sql.TxOptions, fn func(tx *sql.Tx) error) (err error) { + var conn *sql.Conn + if conn, err = db.pool.Conn(ctx); err != nil { + return + } + defer conn.Close() + + var tx *sql.Tx + if tx, err = conn.BeginTx(ctx, opts); err != nil { + return + } + + if err = fn(tx); err != nil { + _ = tx.Rollback() + } else { + err = tx.Commit() + } + + return +} + +func (db *db) CreateBucket(ctx context.Context, bucket []byte) error { + table := quote(bucket) + + const checkQuery = /* sql */ ` + SELECT TRUE + FROM INFORMATION_SCHEMA.TABLES + WHERE TABLE_SCHEMA = DATABASE() AND table_name = ?; + ` + + var exists bool + if err := db.pool.QueryRowContext(ctx, checkQuery, table).Scan(&exists); !isNoRows(err) { + return err // either nil (which means the table is there) or another error + } + + createQuery := fmt.Sprintf( /* sql */ ` + CREATE TABLE IF NOT EXISTS %s ( + nkey VARBINARY(%d) PRIMARY KEY NOT NULL CHECK ( octet_length(nkey) >= %d ), + nvalue BLOB NOT NULL CHECK ( octet_length(nvalue) <= %d ) + ); + `, table, nosql.MaxKeySize, nosql.MinKeySize, nosql.MaxValueSize) + + _, err := db.pool.ExecContext(ctx, createQuery) + + return err +} + +func (db *db) DeleteBucket(ctx context.Context, bucket []byte) (err error) { + query := fmt.Sprintf( /* sql */ ` + DROP TABLE %s; + `, quote(bucket)) + + switch _, err = db.pool.ExecContext(ctx, query); { + case err == nil: + break + case isTableNotFound(err): + err = nosql.ErrBucketNotFound + } + + return +} + +func (db *db) Delete(ctx context.Context, bucket, key []byte) error { + return del(ctx, db.pool, bucket, key) +} + +func (db *db) PutMany(ctx context.Context, records ...nosql.Record) error { + return db.Mutate(ctx, func(m nosql.Mutator) error { + return m.PutMany(ctx, records...) + }) +} + +func (db *db) Put(ctx context.Context, bucket, key, value []byte) error { + return put(ctx, db.pool, bucket, key, value) +} + +func (db *db) Get(ctx context.Context, bucket, key []byte) ([]byte, error) { + return get(ctx, db.pool, bucket, key) +} + +func (db *db) CompareAndSwap(ctx context.Context, bucket, key, oldValue, newValue []byte) error { + return db.Mutate(ctx, func(m nosql.Mutator) error { + return m.CompareAndSwap(ctx, bucket, key, oldValue, newValue) + }) +} + +func (db *db) List(ctx context.Context, bucket []byte) ([]nosql.Record, error) { + return list(ctx, db.pool, bucket) +} + +var viewOpts = sql.TxOptions{ + Isolation: sql.LevelRepeatableRead, + ReadOnly: true, +} + +func (db *db) View(ctx context.Context, fn func(nosql.Viewer) error) error { + return db.tx(ctx, &viewOpts, func(tx *sql.Tx) error { + return fn(&wrapper{db, tx}) + }) +} + +var mutationOpts = sql.TxOptions{ + Isolation: sql.LevelRepeatableRead, + ReadOnly: false, +} + +func (db *db) Mutate(ctx context.Context, fn func(nosql.Mutator) error) error { + return db.tx(ctx, &mutationOpts, func(tx *sql.Tx) error { + return fn(&wrapper{db, tx}) + }) +} + +type wrapper struct { + db *db + tx *sql.Tx +} + +func (w *wrapper) Get(ctx context.Context, bucket, key []byte) ([]byte, error) { + return get(ctx, w.tx, bucket, key) +} + +func (w *wrapper) Put(ctx context.Context, bucket, key, value []byte) error { + return put(ctx, w.tx, bucket, key, value) +} + +func (w *wrapper) Delete(ctx context.Context, bucket, key []byte) (err error) { + return del(ctx, w.tx, bucket, key) +} + +func (w *wrapper) CompareAndSwap(ctx context.Context, bucket, key, oldValue, newValue []byte) error { + var ( + table = quote(bucket) + + query = fmt.Sprintf( /* sql */ ` + UPDATE %s SET + nvalue = ? + WHERE nkey = ? AND nvalue = ?; + `, table) + ) + + if ret, err := w.tx.ExecContext(ctx, query, newValue, key, oldValue); err != nil { + if isTableNotFound(err) { + err = nosql.ErrBucketNotFound + } + + return err + } else if rowsAffected, err := ret.RowsAffected(); err != nil { + return err + } else if rowsAffected > 0 { + return nil // the row was updated + } + + // the update didn't happen; grab the rows earlier version (if any) + + query = fmt.Sprintf( /* sql */ ` + SELECT nvalue + FROM %s + WHERE nkey = ?; + `, table) + + var current []byte + if err := w.tx.QueryRowContext(ctx, query, key).Scan(¤t); err != nil { + if isNoRows(err) { + err = nosql.ErrKeyNotFound + } + + return err + } else if bytes.Equal(current, oldValue) { + // the update didn't happen because the old value is the same with the new value + return nil + } + + return &nosql.ComparisonError{ + Value: current, + } +} + +func (w *wrapper) PutMany(ctx context.Context, records ...nosql.Record) error { + if len(records) == 0 { + return nil // save the round trip + } + + var keysAndVals []any // reusable keys/value buffer + + return each.Bucket(records, func(bucket []byte, rex []*nosql.Record) error { + for _, r := range rex { + keysAndVals = append(keysAndVals, r.Key, r.Value) + } + + var ( + suffix = strings.TrimSuffix(strings.Repeat("(?, ?), ", len(keysAndVals)>>1), ", ") + + query = fmt.Sprintf( /* sql */ ` + INSERT INTO %s (nkey, nvalue) + VALUES %s + ON DUPLICATE KEY UPDATE nvalue = VALUES(nvalue); + `, quote(bucket), suffix) + ) + + if _, err := w.tx.ExecContext(ctx, query, keysAndVals...); err != nil { + if isTableNotFound(err) { + err = nosql.ErrBucketNotFound + } + + return err + } + + keysAndVals = keysAndVals[:0] + + return nil + }) +} + +func (w *wrapper) List(ctx context.Context, bucket []byte) ([]nosql.Record, error) { + return list(ctx, w.tx, bucket) +} + +// --- helpers + +// generic constraints +// generic constraints +type ( + executor interface { + ExecContext(context.Context, string, ...any) (sql.Result, error) + } + + querier interface { + QueryContext(context.Context, string, ...any) (*sql.Rows, error) + } + + rowQuerier interface { + QueryRowContext(context.Context, string, ...any) *sql.Row + } +) + +func get[RQ rowQuerier](ctx context.Context, rq RQ, bucket, key []byte) (value []byte, err error) { + query := fmt.Sprintf( /* sql */ ` + SELECT nvalue + FROM %s + WHERE nkey = ?; + `, quote(bucket)) + + switch err = rq.QueryRowContext(ctx, query, key).Scan(&value); { + case isNoRows(err): + err = nosql.ErrKeyNotFound + case isTableNotFound(err): + err = nosql.ErrBucketNotFound + } + + return +} + +func list[Q querier](ctx context.Context, q Q, bucket []byte) ([]nosql.Record, error) { + query := fmt.Sprintf( /* sql */ ` + SELECT nkey, nvalue + FROM %s + ORDER BY nkey; + `, quote(bucket)) + + rows, err := q.QueryContext(ctx, query) + if err != nil { + if isTableNotFound(err) { + err = nosql.ErrBucketNotFound + } + + return nil, err + } + defer rows.Close() + + var records []nosql.Record + var key, value sql.RawBytes + for rows.Next() { + if err := rows.Scan(&key, &value); err != nil { + return nil, err + } + + records = append(records, nosql.Record{ + Bucket: slices.Clone(bucket), + Key: slices.Clone(key), + Value: slices.Clone(value), + }) + } + if err := rows.Err(); err != nil { + return nil, err + } + + return records, nil +} + +func del[E executor](ctx context.Context, e E, bucket, key []byte) (err error) { + query := fmt.Sprintf( /* sql */ ` + DELETE FROM %s + WHERE nkey = ?; + `, quote(bucket)) + + if _, err = e.ExecContext(ctx, query, key); err != nil && isTableNotFound(err) { + err = nosql.ErrBucketNotFound + } + + return +} + +func put[E executor](ctx context.Context, e E, bucket, key, value []byte) (err error) { + query := fmt.Sprintf( /* sql */ ` + INSERT INTO %s ( nkey, nvalue ) + VALUES ( ?, ? ) + ON DUPLICATE KEY UPDATE nvalue = VALUES(nvalue); + `, quote(bucket)) + + if _, err = e.ExecContext(ctx, query, key, value); err != nil && isTableNotFound(err) { + err = nosql.ErrBucketNotFound + } + + return +} + +func quote[T ~string | ~[]byte](id T) string { + var sb strings.Builder + sb.Grow(2*len(id) + 2) + + sb.WriteByte('`') + + for i := 0; i < len(id); i++ { + c := id[i] + + if c == '`' { + sb.WriteByte(c) + } + sb.WriteByte(c) + } + + sb.WriteByte('`') + + return sb.String() +} + +func isNoRows(err error) bool { + return errors.Is(err, sql.ErrNoRows) +} + +func isTableNotFound(err error) bool { + var me *mysql.MySQLError + return errors.As(err, &me) && + (me.Number == 1051 || me.Number == 1146) +} diff --git a/driver/mysql/driver_test.go b/driver/mysql/driver_test.go new file mode 100644 index 0000000..f98987a --- /dev/null +++ b/driver/mysql/driver_test.go @@ -0,0 +1,67 @@ +package mysql + +import ( + "context" + "database/sql" + "fmt" + "os" + "strings" + "testing" + "time" + + "github.com/go-sql-driver/mysql" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/smallstep/nosql/dbtest" +) + +func Test(t *testing.T) { + t.Parallel() + + for _, name := range []string{"MySQL", "MariaDB"} { + name := name + + t.Run(name, func(t *testing.T) { + t.Parallel() + + key := fmt.Sprintf("TEST_%s_DSN", strings.ToUpper(name)) + dsn := os.Getenv(key) + if dsn == "" { + t.Skipf("$%s is missing or empty; test skipped", key) + } + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + t.Cleanup(cancel) + + dropTestDatabase(ctx, t, dsn) + + db, err := Open(ctx, dsn) + require.NoError(t, err) + + dbtest.Test(t, db) + }) + } +} + +func dropTestDatabase(ctx context.Context, t *testing.T, dsn string) { + t.Helper() + + cfg, err := mysql.ParseDSN(dsn) + require.NoError(t, err) + + cfg = cfg.Clone() + dbName := cfg.DBName + cfg.DBName = "" + + db, err := sql.Open("mysql", cfg.FormatDSN()) + require.NoError(t, err) + t.Cleanup(func() { assert.NoError(t, db.Close()) }) + + query := fmt.Sprintf( /* sql */ ` + DROP DATABASE IF EXISTS %s; + `, quote(dbName)) + + _, err = db.ExecContext(ctx, query) + assert.NoError(t, err) +} diff --git a/driver/postgresql/driver.go b/driver/postgresql/driver.go new file mode 100644 index 0000000..512769e --- /dev/null +++ b/driver/postgresql/driver.go @@ -0,0 +1,490 @@ +// Package postgresql implements a [nosql.Driver] for Postgres databases. +package postgresql + +import ( + "context" + "errors" + "fmt" + "hash/maphash" + "os" + "os/user" + "slices" + "strings" + + "github.com/jackc/pgerrcode" + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgconn" + "github.com/jackc/pgx/v5/pgxpool" + + "github.com/smallstep/nosql" + "github.com/smallstep/nosql/internal/each" +) + +func init() { + nosql.Register("postgresql", Open) +} + +// Open implements a [nosql.Driver] for Postgres databases. +func Open(ctx context.Context, dsn string) (nosql.DB, error) { + pool, err := pgxpool.New(ctx, dsn) + if err != nil { + return nil, err + } else if err := setup(ctx, pool.Config().ConnConfig); err != nil { + pool.Close() + + return nil, err + } + + return nosql.Constrain(&db{ + pool: pool, + }), nil +} + +func setup(ctx context.Context, cfg *pgx.ConnConfig) (err error) { + db := determineDatabaseName(cfg) + cfg.Database = "postgres" + + var conn *pgx.Conn + if conn, err = pgx.ConnectConfig(ctx, cfg); err != nil { + return + } + defer conn.Close(ctx) + + // check if the database already exists + const checkSQL = /* sql */ ` + SELECT TRUE + FROM pg_catalog.pg_database + WHERE datname = $1; + ` + var exists bool + switch err = conn.QueryRow(ctx, checkSQL, db).Scan(&exists); { + case err == nil: + return // database exists + case isNoRows(err): + break // database does not exist; proceed with creating it + default: + return // another error occurred + } + + createSQL := fmt.Sprintf( /* sql */ ` + CREATE DATABASE %s; + `, quote(db)) + + if _, err = conn.Exec(ctx, createSQL); isPostgresErrorCode(err, pgerrcode.DuplicateDatabase) { + err = nil // the database was created while we were also trying to create it + } + + return err +} + +func determineDatabaseName(cfg *pgx.ConnConfig) (db string) { + if db = cfg.Database; db == "" { + db = os.Getenv("PGDATABASE") + } + if db == "" { + db = cfg.User + } + if db == "" { + if u, err := user.Current(); err == nil { + db = u.Username + } + } + return +} + +type db struct { + pool *pgxpool.Pool +} + +func (db *db) Close(context.Context) error { + db.pool.Close() + return nil +} + +func (db *db) CreateBucket(ctx context.Context, bucket []byte) error { + return pgx.BeginTxFunc(ctx, db.pool, readWriteOpts, func(tx pgx.Tx) error { + // we avoid CREATE TABLE IF NOT EXISTS in case the table is there but + // the permissions are not + + const checkSQL = /* sql */ ` + SELECT EXISTS ( + SELECT FROM pg_tables + WHERE schemaname = CURRENT_SCHEMA AND tablename = $1 + ); + ` + + var exists bool + if err := tx.QueryRow(ctx, checkSQL, bucket).Scan(&exists); err != nil { + return err + } else if exists { + return nil + } + + createSQL := fmt.Sprintf( /* sql */ ` + CREATE TABLE IF NOT EXISTS %s ( + nkey BYTEA NOT NULL CHECK ( octet_length(nkey) BETWEEN %d AND %d ), + nvalue BYTEA NOT NULL CHECK ( octet_length(nvalue) <= %d ), + + PRIMARY KEY (nkey) + ); + `, quote(bucket), nosql.MinKeySize, nosql.MaxKeySize, nosql.MaxValueSize) + + _, err := db.pool.Exec(ctx, createSQL) + return err + }) +} + +func (db *db) DeleteBucket(ctx context.Context, bucket []byte) (err error) { + sql := fmt.Sprintf( /* sql */ ` + DROP TABLE %s; + `, quote(bucket)) + + if _, err = db.pool.Exec(ctx, sql); err != nil && isUndefinedTable(err) { + err = nosql.ErrBucketNotFound + } + + return +} + +func (db *db) Delete(ctx context.Context, bucket, key []byte) error { + return del(ctx, db.pool, bucket, key) +} + +func (db *db) PutMany(ctx context.Context, records ...nosql.Record) error { + if len(records) == 0 { + return nil // save the round trip + } + + return putMany(ctx, db.pool, records...) +} + +func (db *db) Put(ctx context.Context, bucket, key, value []byte) error { + return put(ctx, db.pool, bucket, key, value) +} + +func (db *db) Get(ctx context.Context, bucket, key []byte) ([]byte, error) { + return get(ctx, db.pool, bucket, key) +} + +func (db *db) CompareAndSwap(ctx context.Context, bucket, key, oldValue, newValue []byte) error { + return cas(ctx, db.pool, bucket, key, oldValue, newValue) +} + +func (db *db) List(ctx context.Context, bucket []byte) ([]nosql.Record, error) { + return list(ctx, db.pool, bucket) +} + +var readOnlyOpts = pgx.TxOptions{ + IsoLevel: pgx.RepeatableRead, + AccessMode: pgx.ReadOnly, +} + +func (db *db) View(ctx context.Context, fn func(nosql.Viewer) error) error { + return pgx.BeginTxFunc(ctx, db.pool, readOnlyOpts, func(tx pgx.Tx) error { + return fn(&wrapper{tx}) + }) +} + +var readWriteOpts = pgx.TxOptions{ + IsoLevel: pgx.RepeatableRead, + AccessMode: pgx.ReadWrite, +} + +func (db *db) Mutate(ctx context.Context, fn func(nosql.Mutator) error) error { + return pgx.BeginTxFunc(ctx, db.pool, readWriteOpts, func(tx pgx.Tx) error { + return fn(&wrapper{tx}) + }) +} + +type wrapper struct { + tx pgx.Tx +} + +func (w *wrapper) Get(ctx context.Context, bucket, key []byte) (value []byte, err error) { + // we're using a savepoint as if the table is not defined, the whole + // of the transaction will be aborted instead of carrying on + + err = w.do(ctx, func(tx pgx.Tx) (err error) { + value, err = get(ctx, tx, bucket, key) + + return + }) + + return +} + +func (w *wrapper) Put(ctx context.Context, bucket, key, value []byte) error { + // we're using a savepoint as if the table is not defined, the whole + // of the transaction will be aborted instead of carrying on + + return w.do(ctx, func(tx pgx.Tx) error { + return put(ctx, tx, bucket, key, value) + }) +} + +func (w *wrapper) Delete(ctx context.Context, bucket, key []byte) error { + // we're using a savepoint as if the table is not defined, the whole + // of the transaction will be aborted instead of carrying on + + return w.do(ctx, func(tx pgx.Tx) error { + return del(ctx, tx, bucket, key) + }) +} + +func (w *wrapper) CompareAndSwap(ctx context.Context, bucket, key, oldValue, newValue []byte) error { + // we're using a savepoint as if the table is not defined, the whole + // of the transaction will be aborted instead of carrying on + + return w.do(ctx, func(tx pgx.Tx) error { + return cas(ctx, tx, bucket, key, oldValue, newValue) + }) +} + +func (w *wrapper) PutMany(ctx context.Context, records ...nosql.Record) error { + if len(records) == 0 { + return nil // save the round trip + } + + // we're using a savepoint as if the table is not defined, the whole + // of the transaction will be aborted instead of carrying on + + return w.do(ctx, func(tx pgx.Tx) error { + return putMany(ctx, tx, records...) + }) +} + +func (w *wrapper) List(ctx context.Context, bucket []byte) (records []nosql.Record, err error) { + // we're using a savepoint as if the table is not defined, the whole + // of the transaction will be aborted instead of carrying on + + err = w.do(ctx, func(tx pgx.Tx) (err error) { + records, err = list(ctx, tx, bucket) + + return + }) + + return +} + +func (w *wrapper) do(ctx context.Context, fn func(pgx.Tx) error) error { + return pgx.BeginFunc(ctx, w.tx, fn) +} + +// --- helpers + +// generic constraints +type ( + executor interface { + Exec(context.Context, string, ...any) (pgconn.CommandTag, error) + } + + querier interface { + Query(context.Context, string, ...any) (pgx.Rows, error) + } + + rowQuerier interface { + QueryRow(context.Context, string, ...any) pgx.Row + } + + batcher interface { + SendBatch(context.Context, *pgx.Batch) pgx.BatchResults + } +) + +func get[RQ rowQuerier](ctx context.Context, rq RQ, bucket, key []byte) (value []byte, err error) { + sql := fmt.Sprintf( /* sql */ ` + SELECT nvalue + FROM %s + WHERE nkey = $1; + `, quote(bucket)) + + switch err = rq.QueryRow(ctx, sql, key).Scan(&value); { + case isNoRows(err): + err = nosql.ErrKeyNotFound + case isUndefinedTable(err): + err = nosql.ErrBucketNotFound + } + + return +} + +func put[E executor](ctx context.Context, e E, bucket, key, value []byte) (err error) { + sql := fmt.Sprintf( /* sql */ ` + INSERT INTO %s (nkey, nvalue) + VALUES ($1, $2) + ON CONFLICT(nkey) DO UPDATE SET nvalue = EXCLUDED.nvalue; + `, quote(bucket)) + + if _, err = e.Exec(ctx, sql, key, value); isUndefinedTable(err) { + err = nosql.ErrBucketNotFound + } + + return +} + +func list[QR querier](ctx context.Context, qr QR, bucket []byte) (records []nosql.Record, err error) { + sql := fmt.Sprintf( /* sql */ ` + SELECT nkey AS Key, nvalue AS Value + FROM %s + ORDER BY nkey; + `, quote(bucket)) + + var rows pgx.Rows + if rows, err = qr.Query(ctx, sql); err == nil { + if records, err = pgx.CollectRows(rows, pgx.RowToStructByNameLax[nosql.Record]); err == nil { + for i := range records { + records[i].Bucket = slices.Clone(bucket) + } + } + } else if isUndefinedTable(err) { + err = nosql.ErrBucketNotFound + } + + return +} + +func del[E executor](ctx context.Context, e E, bucket, key []byte) (err error) { + sql := fmt.Sprintf( /* sql */ ` + DELETE FROM %s + WHERE nkey = $1; + `, quote(bucket)) + + if _, err = e.Exec(ctx, sql, key); err != nil && isUndefinedTable(err) { + err = nosql.ErrBucketNotFound + } + + return +} + +func putMany[B batcher](ctx context.Context, b B, records ...nosql.Record) error { + var ( + seed = maphash.MakeSeed() + km = map[uint64][]byte{} // keys map, sum(key) -> key + vm = map[uint64][]byte{} // values map, sum(key) -> value + + keys [][]byte // reusable keys buffer + vals [][]byte // reusable values buffer + + batch pgx.Batch + ) + + _ = each.Bucket(records, func(bucket []byte, rex []*nosql.Record) (_ error) { + // we can't INSERT ... ON CONFLICT DO UPDATE for the same key more than once so we'll + // ensure that the last key is the only one sent per bucket + for _, r := range rex { + id := maphash.Bytes(seed, r.Key) + km[id] = r.Key + vm[id] = r.Value + } + + for id, key := range km { + keys = append(keys, key) + vals = append(vals, vm[id]) + } + + sql := fmt.Sprintf( /* sql */ ` + INSERT INTO %s (nkey, nvalue) + SELECT * + FROM unnest($1::BYTEA[], $2::BYTEA[]) + ON CONFLICT(nkey) DO UPDATE SET nvalue = EXCLUDED.nvalue; + `, quote(bucket)) + + // we have to pass clones of the keys and the values + _ = batch.Queue(sql, slices.Clone(keys), slices.Clone(vals)) + + // clear the buffers before the next iteration + clear(km) + clear(vm) + keys = keys[:0] + vals = vals[:0] + + return + }) + + err := b.SendBatch(ctx, &batch).Close() + if isUndefinedTable(err) { + err = nosql.ErrBucketNotFound + } + return err +} + +func cas[RQ rowQuerier](ctx context.Context, rq RQ, bucket, key, oldValue, newValue []byte) (err error) { + table := quote(bucket) + + sql := fmt.Sprintf( /* sql */ ` + WITH current AS ( + SELECT + nvalue + FROM %s + WHERE nkey = $1 + ), updated AS ( + UPDATE %s SET + nvalue = $3 + WHERE nkey = $1 AND nvalue = $2 + RETURNING TRUE AS updated + ) + SELECT + COALESCE(u.updated, FALSE), + c.nvalue + FROM current c + LEFT JOIN updated u ON TRUE; + `, table, table) + + var updated bool + var current []byte + switch err = rq.QueryRow(ctx, sql, key, oldValue, newValue).Scan(&updated, ¤t); { + case err == nil: + if !updated { + err = &nosql.ComparisonError{ + Value: current, + } + } + case isNoRows(err): + err = nosql.ErrKeyNotFound + case isUndefinedTable(err): + err = nosql.ErrBucketNotFound + } + + return +} + +func quote[T ~string | ~[]byte](id T) string { + var sb strings.Builder + sb.Grow(2*len(id) + 2) + + sb.WriteByte('"') + + for i := 0; i < len(id); i++ { + c := id[i] + + if c == '"' { + sb.WriteByte(c) + } + sb.WriteByte(c) + } + + sb.WriteByte('"') + + return sb.String() +} + +func isNoRows(err error) bool { + return errors.Is(err, pgx.ErrNoRows) +} + +func isUndefinedTable(err error) bool { + return isPostgresErrorCode(err, pgerrcode.UndefinedTable) +} + +func isPostgresErrorCode(err error, codes ...string) (is bool) { + var pe *pgconn.PgError + if is = errors.As(err, &pe); is { + for _, code := range codes { + if is = pe.Code == code; is { + break + } + } + } + + return +} diff --git a/driver/postgresql/driver_test.go b/driver/postgresql/driver_test.go new file mode 100644 index 0000000..000dee6 --- /dev/null +++ b/driver/postgresql/driver_test.go @@ -0,0 +1,56 @@ +package postgresql + +import ( + "context" + "fmt" + "os" + "testing" + "time" + + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgxpool" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/smallstep/nosql/dbtest" +) + +func Test(t *testing.T) { + dsn := os.Getenv("TEST_POSTGRES_DSN") + if dsn == "" { + t.Skip("$TEST_POSTGRES_DSN is missing or empty; test skipped") + } + + // tear down the test database if it already exists + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + dropTestDatabase(ctx, t, dsn) + + db, err := Open(ctx, dsn) + require.NoError(t, err) + + dbtest.Test(t, db) +} + +func dropTestDatabase(ctx context.Context, t *testing.T, dsn string) { + t.Helper() + + cfg, err := pgxpool.ParseConfig(dsn) + require.NoError(t, err) + + connConfig := cfg.ConnConfig.Copy() + dbName := determineDatabaseName(connConfig) + connConfig.Database = "postgres" + + conn, err := pgx.ConnectConfig(ctx, connConfig) + require.NoError(t, err) + t.Cleanup(func() { assert.NoError(t, conn.Close(ctx)) }) + + sql := fmt.Sprintf( /* sql */ ` + DROP DATABASE IF EXISTS %s; + `, quote([]byte(dbName))) + + _, err = conn.Exec(ctx, sql) + require.NoError(t, err) +} diff --git a/go.mod b/go.mod index fe994fe..737df8c 100644 --- a/go.mod +++ b/go.mod @@ -1,36 +1,46 @@ module github.com/smallstep/nosql -go 1.18 +go 1.21 require ( github.com/dgraph-io/badger v1.6.2 github.com/dgraph-io/badger/v2 v2.2007.4 + github.com/dgraph-io/badger/v3 v3.2103.5 + github.com/dgraph-io/badger/v4 v4.2.0 github.com/go-sql-driver/mysql v1.7.1 - github.com/jackc/pgx/v4 v4.18.1 - github.com/pkg/errors v0.9.1 - github.com/smallstep/assert v0.0.0-20180720014142-de77670473b5 + github.com/jackc/pgerrcode v0.0.0-20220416144525-469b46aa5efa + github.com/jackc/pgx/v5 v5.5.4 + github.com/stretchr/testify v1.8.4 go.etcd.io/bbolt v1.3.9 ) require ( github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96 // indirect github.com/cespare/xxhash v1.1.0 // indirect - github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de // indirect - github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/dgraph-io/ristretto v0.1.1 // indirect + github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 // indirect github.com/dustin/go-humanize v1.0.0 // indirect - github.com/golang/protobuf v1.4.3 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/glog v1.0.0 // indirect + github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 // indirect + github.com/golang/protobuf v1.5.2 // indirect github.com/golang/snappy v0.0.3 // indirect - github.com/jackc/chunkreader/v2 v2.0.1 // indirect - github.com/jackc/pgconn v1.14.0 // indirect - github.com/jackc/pgio v1.0.0 // indirect + github.com/google/flatbuffers v1.12.1 // indirect github.com/jackc/pgpassfile v1.0.0 // indirect - github.com/jackc/pgproto3/v2 v2.3.2 // indirect github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect - github.com/jackc/pgtype v1.14.0 // indirect + github.com/jackc/puddle/v2 v2.2.1 // indirect github.com/klauspost/compress v1.12.3 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/rogpeppe/go-internal v1.12.0 // indirect + go.opencensus.io v0.22.5 // indirect golang.org/x/crypto v0.17.0 // indirect - golang.org/x/net v0.17.0 // indirect + golang.org/x/net v0.10.0 // indirect + golang.org/x/sync v0.6.0 // indirect golang.org/x/sys v0.15.0 // indirect golang.org/x/text v0.14.0 // indirect - google.golang.org/protobuf v1.25.0 // indirect + google.golang.org/protobuf v1.28.1 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index a2f6100..351ce6e 100644 --- a/go.sum +++ b/go.sum @@ -2,24 +2,19 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96 h1:cTp8I5+VIoKjsnZuH8vjyaysT/ses3EvZeaV/1UkF2M= github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc= -github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I= -github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= -github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -27,117 +22,66 @@ github.com/dgraph-io/badger v1.6.2 h1:mNw0qs90GVgGGWylh0umH5iag1j6n/PeJtNvL6KY/x github.com/dgraph-io/badger v1.6.2/go.mod h1:JW2yswe3V058sS0kZ2h/AXeDSqFjxnZcRrVH//y2UQE= github.com/dgraph-io/badger/v2 v2.2007.4 h1:TRWBQg8UrlUhaFdco01nO2uXwzKS7zd+HVdwV/GHc4o= github.com/dgraph-io/badger/v2 v2.2007.4/go.mod h1:vSw/ax2qojzbN6eXHIx6KPKtCSHJN/Uz0X0VPruTIhk= +github.com/dgraph-io/badger/v3 v3.2103.5 h1:ylPa6qzbjYRQMU6jokoj4wzcaweHylt//CH0AKt0akg= +github.com/dgraph-io/badger/v3 v3.2103.5/go.mod h1:4MPiseMeDQ3FNCYwRbbcBOGJLf5jsE0PPFzRiKjtcdw= +github.com/dgraph-io/badger/v4 v4.2.0 h1:kJrlajbXXL9DFTNuhhu9yCx7JJa4qpYWxtE8BzuWsEs= +github.com/dgraph-io/badger/v4 v4.2.0/go.mod h1:qfCqhPoWDFJRx1gp5QwwyGo8xk1lbHUxvK9nK0OGAak= github.com/dgraph-io/ristretto v0.0.2/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= -github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de h1:t0UHb5vdojIDUqktM6+xJAfScFBsVpXZmqC9dsgJmeA= github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= +github.com/dgraph-io/ristretto v0.1.1 h1:6CWw5tJNgpegArSHpNHJKldNeq03FQCwYvfMVWajOK8= +github.com/dgraph-io/ristretto v0.1.1/go.mod h1:S1GPSBCYCIhmVNfcth17y2zZtQT6wzkzgwUve0VDWWA= +github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= -github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y= -github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= -github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI= github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/gofrs/uuid v4.0.0+incompatible h1:1SD/1F5pU8p29ybwgQSwpQk+mwdRrXCYuPhW6m+TnJw= -github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= +github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 h1:ZgQEtGgCBiWRM39fZuwSd1LwSqqSW0hOdXCYYDX0R3I= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/flatbuffers v1.12.1 h1:MVlul7pQNoDzWRLTw5imwYsl+usrS1TXG2H4jg6ImGw= +github.com/google/flatbuffers v1.12.1/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo= -github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= -github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8= -github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= -github.com/jackc/pgconn v0.0.0-20190420214824-7e0022ef6ba3/go.mod h1:jkELnwuX+w9qN5YIfX0fl88Ehu4XC3keFuOJJk9pcnA= -github.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb/go.mod h1:lLjNuW/+OfW9/pnVKPazfWOgNfH2aPem8YQ7ilXGvJE= -github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsUgOEh9hBm+xYTstcNHg7UPMVJqRfQxq4s= -github.com/jackc/pgconn v1.8.0/go.mod h1:1C2Pb36bGIP9QHGBYCjnyhqu7Rv3sGshaQUvmfGIB/o= -github.com/jackc/pgconn v1.9.0/go.mod h1:YctiPyvzfU11JFxoXokUOOKQXQmDMoJL9vJzHH8/2JY= -github.com/jackc/pgconn v1.9.1-0.20210724152538-d89c8390a530/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI= -github.com/jackc/pgconn v1.14.0 h1:vrbA9Ud87g6JdFWkHTJXppVce58qPIdP7N8y0Ml/A7Q= -github.com/jackc/pgconn v1.14.0/go.mod h1:9mBNlny0UvkgJdCDvdVHYSjI+8tD2rnKK69Wz8ti++E= -github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE= -github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= -github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE= -github.com/jackc/pgmock v0.0.0-20201204152224-4fe30f7445fd/go.mod h1:hrBW0Enj2AZTNpt/7Y5rr2xe/9Mn757Wtb2xeBzPv2c= -github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65 h1:DadwsjnMwFjfWc9y5Wi/+Zz7xoE5ALHsRQlOctkOiHc= -github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65/go.mod h1:5R2h2EEX+qri8jOWMbJCtaPWkrrNc7OHwsp2TCqp7ak= +github.com/jackc/pgerrcode v0.0.0-20220416144525-469b46aa5efa h1:s+4MhCQ6YrzisK6hFJUX53drDT4UsSW3DEhKn0ifuHw= +github.com/jackc/pgerrcode v0.0.0-20220416144525-469b46aa5efa/go.mod h1:a/s9Lp5W7n/DD0VrVoyJ00FbP2ytTPDVOivvn2bMlds= github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= -github.com/jackc/pgproto3 v1.1.0/go.mod h1:eR5FA3leWg7p9aeAqi37XOTgTIbkABlvcPB3E5rlc78= -github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db/go.mod h1:bhq50y+xrl9n5mRYyCBFKkpRVTLYJVWeCc+mEAI3yXA= -github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod h1:uH0AWtUmuShn0bcesswc4aBTWGvw0cAxIJp+6OB//Wg= -github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= -github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= -github.com/jackc/pgproto3/v2 v2.0.6/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= -github.com/jackc/pgproto3/v2 v2.1.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= -github.com/jackc/pgproto3/v2 v2.3.2 h1:7eY55bdBeCz1F2fTzSz69QC+pG46jYq9/jtSPiJ5nn0= -github.com/jackc/pgproto3/v2 v2.3.2/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= -github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk= github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= -github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg= -github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc= -github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw= -github.com/jackc/pgtype v1.8.1-0.20210724151600-32e20a603178/go.mod h1:C516IlIV9NKqfsMCXTdChteoXmwgUceqaLfjg2e3NlM= -github.com/jackc/pgtype v1.14.0 h1:y+xUdabmyMkJLyApYuPj38mW+aAIqCe5uuBB51rH3Vw= -github.com/jackc/pgtype v1.14.0/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4= -github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y= -github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM= -github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc= -github.com/jackc/pgx/v4 v4.12.1-0.20210724153913-640aa07df17c/go.mod h1:1QD0+tgSXP7iUjYm9C1NxKhny7lq6ee99u/z+IHFcgs= -github.com/jackc/pgx/v4 v4.18.1 h1:YP7G1KABtKpB5IHrO9vYwSrCOhs7p3uqhvhhQBptya0= -github.com/jackc/pgx/v4 v4.18.1/go.mod h1:FydWkUyadDmdNH/mHnGob881GawxeEm7TcMCzkb+qQE= -github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= -github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= -github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= -github.com/jackc/puddle v1.3.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/pgx/v5 v5.5.4 h1:Xp2aQS8uXButQdnCMWNmvx6UysWQQC+u1EoizjguY+8= +github.com/jackc/pgx/v5 v5.5.4/go.mod h1:ez9gk+OAat140fv9ErkZDYFWmXLfV+++K0uAOiwgm1A= +github.com/jackc/puddle/v2 v2.2.1 h1:RhxXJtFG022u4ibrCSMSiu5aOq1i77R3OHKNJj77OAk= +github.com/jackc/puddle/v2 v2.2.1/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.12.3 h1:G5AfA94pHPysR56qqrkO2pxEexdDzrpFJ6yt/VqWxVU= github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.10.2 h1:AqzbZs4ZoCBp+GtejcpCpcxM3zlSMx29dXbUSeVtJb8= -github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= -github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= @@ -146,20 +90,9 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= -github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= -github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= -github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= -github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= -github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= -github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= -github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/smallstep/assert v0.0.0-20180720014142-de77670473b5 h1:lX6ybsQW9Agn3qK/W1Z39Z4a6RyEMGem/gXUYW0axYk= -github.com/smallstep/assert v0.0.0-20180720014142-de77670473b5/go.mod h1:TC9A4+RjIOS+HyTH7wG17/gSqVv95uDw2J64dQZx7RE= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= @@ -170,124 +103,72 @@ github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb6 github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= go.etcd.io/bbolt v1.3.9 h1:8x7aARPEXiXbHmtUwAIv7eV2fQFHrLLavdiJ3uzJXoI= go.etcd.io/bbolt v1.3.9/go.mod h1:zaO32+Ti0PK1ivdPtgMESzuzL2VPoIG1PCQNvOdo/dE= -go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= -go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= -go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= -go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +go.opencensus.io v0.22.5 h1:dntmOdLpSpHlVqbW5Eay97DelsZHe+55D+xC6i0dDS0= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= -golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= -golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= +golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20221010170243-090e33056c14/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -296,30 +177,19 @@ golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8T google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= diff --git a/internal/each/each.go b/internal/each/each.go new file mode 100644 index 0000000..81528f1 --- /dev/null +++ b/internal/each/each.go @@ -0,0 +1,48 @@ +// Package each implements functionality relating to iterators. +package each + +import ( + "bytes" + "hash/maphash" + + "github.com/smallstep/nosql" +) + +// Bucket calls fn with the pointers to the records belonging to each bucket in the provided +// [nosql.Record] slice. +// +// The provided function must not retain access (or mutate) its given arguments. +func Bucket(records []nosql.Record, fn func(bucket []byte, rex []*nosql.Record) error) (err error) { + var ( + seed = maphash.MakeSeed() + bm = map[uint64]struct{}{} // buckets map, sum(bucket) -> presence + rex []*nosql.Record // reusable records buffer + ) + + for i := range records { + r := &records[i] + + id := maphash.Bytes(seed, r.Bucket) + if _, ok := bm[id]; ok { + continue // bucket already processed + } + bm[id] = struct{}{} + + rex = append(rex, r) + + for j := range records[i+1:] { + if rr := &records[1+i+j]; bytes.Equal(r.Bucket, rr.Bucket) { + rex = append(rex, rr) + } + } + + if err = fn(r.Bucket, rex); err != nil { + break + } + + clear(rex) + rex = rex[:0] + } + + return +} diff --git a/internal/each/each_test.go b/internal/each/each_test.go new file mode 100644 index 0000000..8805672 --- /dev/null +++ b/internal/each/each_test.go @@ -0,0 +1,52 @@ +package each + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/smallstep/nosql" +) + +func TestBucket(t *testing.T) { + var ( + b = func(v string) []byte { + return []byte(v) + } + + src = []nosql.Record{ + {Bucket: b("1"), Key: b("1"), Value: b("1")}, + {Bucket: b("2"), Key: b("1"), Value: b("1")}, + {Bucket: b("3"), Key: b("1"), Value: b("1")}, + {Bucket: b("1"), Key: b("2"), Value: b("1")}, + {Bucket: b("1"), Key: b("3"), Value: b("1")}, + {Bucket: b("3"), Key: b("2"), Value: b("2")}, + {Bucket: b("2"), Key: b("2"), Value: b("2")}, + {Bucket: b("1"), Key: b("1"), Value: b("2")}, + } + + exp = []nosql.Record{ + {Bucket: b("1"), Key: b("1"), Value: b("1")}, + {Bucket: b("1"), Key: b("2"), Value: b("1")}, + {Bucket: b("1"), Key: b("3"), Value: b("1")}, + {Bucket: b("1"), Key: b("1"), Value: b("2")}, + {Bucket: b("2"), Key: b("1"), Value: b("1")}, + {Bucket: b("2"), Key: b("2"), Value: b("2")}, + {Bucket: b("3"), Key: b("1"), Value: b("1")}, + {Bucket: b("3"), Key: b("2"), Value: b("2")}, + } + ) + + var got []nosql.Record + err := Bucket(src, func(bucket []byte, rex []*nosql.Record) error { + for _, r := range rex { + got = append(got, *r) + } + + return nil + }) + require.NoError(t, err) + + assert.Equal(t, exp, got) +} diff --git a/internal/token/token.go b/internal/token/token.go new file mode 100644 index 0000000..6b90f41 --- /dev/null +++ b/internal/token/token.go @@ -0,0 +1,69 @@ +package token + +import ( + "crypto/rand" + "io" + mand "math/rand" + "testing" + "unicode" + "unicode/utf8" + + "github.com/stretchr/testify/require" +) + +// New returns a random token with length in the [minSize, maxSize] interval. +// +// If bucket is set, then the returned value will be valid to be used as a bucket. +func New(t *testing.T, minSize, maxSize int, bucket bool) (tok []byte) { + if minSize == maxSize { + tok = make([]byte, maxSize) + } else { + tok = make([]byte, minSize+mand.Intn(1+maxSize-minSize)) //nolint:gosec // not a sensitive op + } + + src := rand.Reader + if bucket { + src = allowedRuneReader{} + } + + for { + _, err := io.ReadFull(src, tok) + require.NoError(t, err) + + if !bucket { + break + } else if r, _ := utf8.DecodeLastRune(tok); !unicode.IsSpace(r) { + break + } + } + + return +} + +// allowedRuneReader implements a reader that reads runes valid for buckets. +type allowedRuneReader struct{} + +// Read implements io.Reader for [bucketRuneReader]. +func (allowedRuneReader) Read(buf []byte) (n int, _ error) { + for len(buf) > 0 { + var r rune + var s int + for { + const ( + runeStart = '\U00000001' // inclusive + runeStop = '\U00010000' // not inclusive + ) + + // determine a random rune up to the allowed remaining buffer size + r = runeStart + mand.Int31n(runeStop-runeStart) //nolint:gosec // not a sensitive op + if s = utf8.RuneLen(r); s != -1 && s <= len(buf) { + break + } + } + + n += utf8.EncodeRune(buf, r) + buf = buf[s:] + } + + return +} diff --git a/internal/token/token_test.go b/internal/token/token_test.go new file mode 100644 index 0000000..27834ad --- /dev/null +++ b/internal/token/token_test.go @@ -0,0 +1,61 @@ +package token + +import ( + "bytes" + "hash/maphash" + "testing" + "unicode/utf8" + + "github.com/stretchr/testify/assert" +) + +func TestNewCorrectness(t *testing.T) { + t.Parallel() + + const ( + minSize = 5 + maxSize = 42 + iterations = 1000 + ) + + tokens := make([][]byte, 0, iterations) + + for i := 0; i < iterations; i++ { + tok := New(t, minSize, maxSize, true) + tokens = append(tokens, tok) + + assert.True(t, minSize <= len(tok) && len(tok) <= maxSize, + "wrong size (%d) for token %q", len(tok), tok) + + assert.True(t, utf8.Valid(tok), + "invalid utf8 in token %q", tok) + + assert.True(t, bytes.IndexByte(tok, 0) == -1, + "zero byte in token %q", tok) + + assert.False(t, bytes.HasSuffix(tok, []byte{' '}), + "token (%q) ends with a space", tok) + } + assert.Len(t, tokens, iterations) +} + +func TestNewUniqueness(t *testing.T) { + t.Parallel() + + var ( + seed = maphash.MakeSeed() + generated = map[uint64]struct{}{} + iterations int + ) + + for len(generated) < 100_000 { + iterations++ + + x := maphash.Bytes(seed, New(t, 32, 32, iterations%2 == 1)) + if _, ok := generated[x]; !ok { + generated[x] = struct{}{} + } + } + + assert.Len(t, generated, iterations) +} diff --git a/mysql/mysql.go b/mysql/mysql.go deleted file mode 100644 index 0273491..0000000 --- a/mysql/mysql.go +++ /dev/null @@ -1,282 +0,0 @@ -//go:build !nomysql -// +build !nomysql - -package mysql - -import ( - "bytes" - "database/sql" - "fmt" - "strings" - - "github.com/go-sql-driver/mysql" - "github.com/pkg/errors" - "github.com/smallstep/nosql/database" -) - -// DB is a wrapper over *sql.DB, -type DB struct { - db *sql.DB -} - -// Open creates a Driver and connects to the database with the given address -// and access details. -func (db *DB) Open(dataSourceName string, opt ...database.Option) error { - opts := &database.Options{} - for _, o := range opt { - if err := o(opts); err != nil { - return err - } - } - - parsedDSN, err := mysql.ParseDSN(dataSourceName) - if err != nil { - return errors.Wrap(err, "parse database from dataSource") - } - // Database name in DSN is ignored if explicitly set - if opts.Database == "" { - opts.Database = parsedDSN.DBName - } - - // First connect to no db to create it if it doesn't exist - parsedDSN.DBName = "" - _db, err := sql.Open("mysql", parsedDSN.FormatDSN()) - if err != nil { - return errors.Wrap(err, "error connecting to mysql") - } - _, err = _db.Exec(fmt.Sprintf("CREATE DATABASE IF NOT EXISTS `%s`", opts.Database)) - if err != nil { - return errors.Wrapf(err, "error creating database %s (if not exists)", opts.Database) - } - parsedDSN.DBName = opts.Database - db.db, err = sql.Open("mysql", parsedDSN.FormatDSN()) - if err != nil { - return errors.Wrapf(err, "error connecting to mysql database") - } - - return nil -} - -// Close shutsdown the database driver. -func (db *DB) Close() error { - return errors.WithStack(db.db.Close()) -} - -func getQry(bucket []byte) string { - return fmt.Sprintf("SELECT nvalue FROM `%s` WHERE nkey = ?", bucket) -} - -func getQryForUpdate(bucket []byte) string { - return fmt.Sprintf("SELECT nvalue FROM `%s` WHERE nkey = ? FOR UPDATE", bucket) -} - -func insertUpdateQry(bucket []byte) string { - return fmt.Sprintf("INSERT INTO `%s`(nkey, nvalue) VALUES(?,?) ON DUPLICATE KEY UPDATE nvalue = ?", bucket) -} - -func delQry(bucket []byte) string { - return fmt.Sprintf("DELETE FROM `%s` WHERE nkey = ?", bucket) -} - -func createTableQry(bucket []byte) string { - return fmt.Sprintf("CREATE TABLE IF NOT EXISTS `%s`(nkey VARBINARY(255), nvalue BLOB, PRIMARY KEY (nkey));", bucket) -} - -func deleteTableQry(bucket []byte) string { - return fmt.Sprintf("DROP TABLE `%s`", bucket) -} - -// Get retrieves the column/row with given key. -func (db *DB) Get(bucket, key []byte) ([]byte, error) { - var val string - err := db.db.QueryRow(getQry(bucket), key).Scan(&val) - switch { - case err == sql.ErrNoRows: - return nil, errors.Wrapf(database.ErrNotFound, "%s/%s not found", bucket, key) - case err != nil: - return nil, errors.Wrapf(err, "failed to get %s/%s", bucket, key) - default: - return []byte(val), nil - } -} - -// Set inserts the key and value into the given bucket(column). -func (db *DB) Set(bucket, key, value []byte) error { - _, err := db.db.Exec(insertUpdateQry(bucket), key, value, value) - if err != nil { - return errors.Wrapf(err, "failed to set %s/%s", bucket, key) - } - return nil -} - -// Del deletes a row from the database. -func (db *DB) Del(bucket, key []byte) error { - _, err := db.db.Exec(delQry(bucket), key) - return errors.Wrapf(err, "failed to delete %s/%s", bucket, key) -} - -// List returns the full list of entries in a column. -func (db *DB) List(bucket []byte) ([]*database.Entry, error) { - rows, err := db.db.Query(fmt.Sprintf("SELECT * FROM `%s`", bucket)) - if err != nil { - estr := err.Error() - if strings.HasPrefix(estr, "Error 1146") { - return nil, errors.Wrapf(database.ErrNotFound, estr) - } - return nil, errors.Wrapf(err, "error querying table %s", bucket) - } - defer rows.Close() - var ( - key, value string - entries []*database.Entry - ) - for rows.Next() { - err := rows.Scan(&key, &value) - if err != nil { - return nil, errors.Wrap(err, "error getting key and value from row") - } - entries = append(entries, &database.Entry{ - Bucket: bucket, - Key: []byte(key), - Value: []byte(value), - }) - } - err = rows.Err() - if err != nil { - return nil, errors.Wrap(err, "error accessing row") - } - return entries, nil -} - -// CmpAndSwap modifies the value at the given bucket and key (to newValue) -// only if the existing (current) value matches oldValue. -func (db *DB) CmpAndSwap(bucket, key, oldValue, newValue []byte) ([]byte, bool, error) { - sqlTx, err := db.db.Begin() - if err != nil { - return nil, false, errors.WithStack(err) - } - - val, swapped, err := cmpAndSwap(sqlTx, bucket, key, oldValue, newValue) - switch { - case err != nil: - if err := sqlTx.Rollback(); err != nil { - return nil, false, errors.Wrapf(err, "failed to execute CmpAndSwap transaction on %s/%s and failed to rollback transaction", bucket, key) - } - return nil, false, err - case swapped: - if err := sqlTx.Commit(); err != nil { - return nil, false, errors.Wrapf(err, "failed to commit MySQL transaction") - } - return val, swapped, nil - default: - if err := sqlTx.Rollback(); err != nil { - return nil, false, errors.Wrapf(err, "failed to rollback read-only CmpAndSwap transaction on %s/%s", bucket, key) - } - return val, swapped, err - } -} - -func cmpAndSwap(sqlTx *sql.Tx, bucket, key, oldValue, newValue []byte) ([]byte, bool, error) { - var current []byte - err := sqlTx.QueryRow(getQryForUpdate(bucket), key).Scan(¤t) - - if err != nil && !errors.Is(err, sql.ErrNoRows) { - return nil, false, err - } - if !bytes.Equal(current, oldValue) { - return current, false, nil - } - - if _, err = sqlTx.Exec(insertUpdateQry(bucket), key, newValue, newValue); err != nil { - return nil, false, errors.Wrapf(err, "failed to set %s/%s", bucket, key) - } - return newValue, true, nil -} - -// Update performs multiple commands on one read-write transaction. -func (db *DB) Update(tx *database.Tx) error { - sqlTx, err := db.db.Begin() - if err != nil { - return errors.WithStack(err) - } - rollback := func(err error) error { - if rollbackErr := sqlTx.Rollback(); rollbackErr != nil { - return errors.Wrap(err, "UPDATE failed, unable to rollback transaction") - } - return errors.Wrap(err, "UPDATE failed") - } - for _, q := range tx.Operations { - // create or delete buckets - switch q.Cmd { - case database.CreateTable: - _, err := sqlTx.Exec(createTableQry(q.Bucket)) - if err != nil { - return rollback(errors.Wrapf(err, "failed to create table %s", q.Bucket)) - } - case database.DeleteTable: - _, err := sqlTx.Exec(deleteTableQry(q.Bucket)) - if err != nil { - estr := err.Error() - if strings.HasPrefix(err.Error(), "Error 1051") { - return errors.Wrapf(database.ErrNotFound, estr) - } - return errors.Wrapf(err, "failed to delete table %s", q.Bucket) - } - case database.Get: - var val string - err := sqlTx.QueryRow(getQry(q.Bucket), q.Key).Scan(&val) - switch { - case err == sql.ErrNoRows: - return rollback(errors.Wrapf(database.ErrNotFound, "%s/%s not found", q.Bucket, q.Key)) - case err != nil: - return rollback(errors.Wrapf(err, "failed to get %s/%s", q.Bucket, q.Key)) - default: - q.Result = []byte(val) - } - case database.Set: - if _, err = sqlTx.Exec(insertUpdateQry(q.Bucket), q.Key, q.Value, q.Value); err != nil { - return rollback(errors.Wrapf(err, "failed to set %s/%s", q.Bucket, q.Key)) - } - case database.Delete: - if _, err = sqlTx.Exec(delQry(q.Bucket), q.Key); err != nil { - return rollback(errors.Wrapf(err, "failed to delete %s/%s", q.Bucket, q.Key)) - } - case database.CmpAndSwap: - q.Result, q.Swapped, err = cmpAndSwap(sqlTx, q.Bucket, q.Key, q.CmpValue, q.Value) - if err != nil { - return rollback(errors.Wrapf(err, "failed to load-or-store %s/%s", q.Bucket, q.Key)) - } - case database.CmpOrRollback: - return database.ErrOpNotSupported - default: - return database.ErrOpNotSupported - } - } - - if err = errors.WithStack(sqlTx.Commit()); err != nil { - return rollback(err) - } - return nil -} - -// CreateTable creates a table in the database. -func (db *DB) CreateTable(bucket []byte) error { - _, err := db.db.Exec(createTableQry(bucket)) - if err != nil { - return errors.Wrapf(err, "failed to create table %s", bucket) - } - return nil -} - -// DeleteTable deletes a table in the database. -func (db *DB) DeleteTable(bucket []byte) error { - _, err := db.db.Exec(deleteTableQry(bucket)) - if err != nil { - estr := err.Error() - if strings.HasPrefix(estr, "Error 1051") { - return errors.Wrapf(database.ErrNotFound, estr) - } - return errors.Wrapf(err, "failed to delete table %s", bucket) - } - return nil -} diff --git a/mysql/nomysql.go b/mysql/nomysql.go deleted file mode 100644 index ae07cae..0000000 --- a/mysql/nomysql.go +++ /dev/null @@ -1,8 +0,0 @@ -//go:build nomysql -// +build nomysql - -package mysql - -import "github.com/smallstep/nosql/database" - -type DB = database.NotSupportedDB diff --git a/nosql.go b/nosql.go index a72da2a..b4f017c 100644 --- a/nosql.go +++ b/nosql.go @@ -1,80 +1,276 @@ package nosql import ( - "strings" - - "github.com/pkg/errors" - badgerV1 "github.com/smallstep/nosql/badger/v1" - badgerV2 "github.com/smallstep/nosql/badger/v2" - "github.com/smallstep/nosql/bolt" - "github.com/smallstep/nosql/database" - "github.com/smallstep/nosql/mysql" - "github.com/smallstep/nosql/postgresql" + "context" + "errors" + "fmt" + "slices" + "sort" + "sync" ) -// Option is just a wrapper over database.Option. -type Option = database.Option +var ( + // ErrNilBucket is contained in the chains of errors returned by functions that accept a bucket + // when the bucket is nil. + ErrNilBucket = errors.New(errPrefix + "bucket is nil") + + // ErrEmptyBucket is contained in the chains of errors returned by functions that accept a + // bucket when the bucket is empty. + ErrEmptyBucket = errors.New(errPrefix + "bucket is empty") + + // ErrBucketTooLong is contained in the chains of errors returned by functions that accept a + // bucket when the bucket's length exceeds [MaxBucketSize]. + ErrBucketTooLong = errors.New(errPrefix + "bucket is too long") + + // ErrInvalidBucket is contained in the chains of errors returned by functions that accept a + // bucket when the bucket contains the zero byte or an invalid UTF-8 sequence. + ErrInvalidBucket = errors.New(errPrefix + "bucket is invalid") + + // ErrBucketNotFound is contained in the chains of errors returned by functions that expect + // the presence of a bucket that does not exist. + ErrBucketNotFound = errors.New(errPrefix + "bucket not found") + + // ErrNilKey is contained in the chains of errors returned by functions that accept a key when + // the key is nil. + ErrNilKey = errors.New(errPrefix + "key is nil") + + // ErrEmptyKey is contained in the chains of errors returned by functions that accept a key + // when the key is empty. + ErrEmptyKey = errors.New(errPrefix + "key is empty") + + // ErrKeyTooLong is contained in the chains of errors returned by functions that accept a key + // when the key's length exceeds [MaxKeySize]. + ErrKeyTooLong = errors.New(errPrefix + "key is too long") + + // ErrKeyNotFound is contained in the chains of errors returned by functions that expect + // the presence of a key that does not exist. + ErrKeyNotFound = errors.New(errPrefix + "key not found") -// DB is just a wrapper over database.DB. -type DB = database.DB + // ErrNilValue is contained in the chains of errors returned by functions that accept a value + // when the value is nil. + ErrNilValue = errors.New(errPrefix + "value is nil") -// Compactor in an interface implemented by those databases that can run a value -// log garbage collector like badger. -type Compactor interface { - Compact(discardRatio float64) error + // ErrValueTooLong is contained in the chains of errors returned by functions that accept a + // value when the value's length exceeds [MaxValueSize]. + ErrValueTooLong = errors.New(errPrefix + "value is too long") +) + +// ComparisonError is a type of error contained in the chains of errors returned by CompareAndSwap +// functions when the swap fails due to the existing value being different than the expected one. +type ComparisonError struct { + // Value holds a copy of the value the key had at the time of the call. + Value []byte +} + +// Error implements [error] for [ComparisonError]. +func (e *ComparisonError) Error() string { + return errPrefix + "unexpected value" } +const ( + errPrefix = "nosql: " // error prefix + + // MinBucketSize denotes the minimum allowed byte size for buckets. + MinBucketSize = 1 + + // MaxBucketSize denotes the maximum allowed byte size for buckets. + MaxBucketSize = 50 + + // MinKeySize denotes the minimum allowed byte size for keys. + MinKeySize = 1 + + // MaxKeySize denotes the maximum allowed byte size for keys. + MaxKeySize = 200 + + // MaxValueSize denotes the maximum allowed byte size for values. + MaxValueSize = 1 << 14 +) + var ( - // WithValueDir is a wrapper over database.WithValueDir. - WithValueDir = database.WithValueDir - // WithDatabase is a wrapper over database.WithDatabase. - WithDatabase = database.WithDatabase - // WithBadgerFileLoadingMode is a wrapper over database.WithBadgerFileLoadingMode. - WithBadgerFileLoadingMode = database.WithBadgerFileLoadingMode - // IsErrNotFound is a wrapper over database.IsErrNotFound. - IsErrNotFound = database.IsErrNotFound - // IsErrOpNotSupported is a wrapper over database.IsErrOpNotSupported. - IsErrOpNotSupported = database.IsErrOpNotSupported - - // Available db driver types. // - - // BadgerDriver indicates the default Badger database - currently Badger V1. - BadgerDriver = "badger" - // BadgerV1Driver explicitly selects the Badger V1 driver. - BadgerV1Driver = "badgerv1" - // BadgerV2Driver explicitly selects the Badger V2 driver. - BadgerV2Driver = "badgerv2" - // BBoltDriver indicates the default BBolt database. - BBoltDriver = "bbolt" - // MySQLDriver indicates the default MySQL database. - MySQLDriver = "mysql" - // PostgreSQLDriver indicates the default PostgreSQL database. - PostgreSQLDriver = "postgresql" - - // Badger FileLoadingMode - - // BadgerMemoryMap indicates the MemoryMap FileLoadingMode option. - BadgerMemoryMap = database.BadgerMemoryMap - // BadgerFileIO indicates the FileIO FileLoadingMode option. - BadgerFileIO = database.BadgerFileIO + driversMu sync.RWMutex // protects drivers + drivers = make(map[string]Driver) ) -// New returns a database with the given driver. -func New(driver, dataSourceName string, opt ...Option) (db database.DB, err error) { - switch strings.ToLower(driver) { - case BadgerDriver, BadgerV1Driver: - db = &badgerV1.DB{} - case BadgerV2Driver: - db = &badgerV2.DB{} - case BBoltDriver: - db = &bolt.DB{} - case MySQLDriver: - db = &mysql.DB{} - case PostgreSQLDriver: - db = &postgresql.DB{} - default: - return nil, errors.Errorf("%s database not supported", driver) +// Driver wraps the set of database drivers. +type Driver func(ctx context.Context, dsn string) (DB, error) + +// Register registers the named driver. +func Register(name string, driver Driver) { + if driver == nil { + panic(errPrefix + "nil driver") + } + + driversMu.Lock() + defer driversMu.Unlock() + + if _, dup := drivers[name]; dup { + panic(fmt.Sprintf(errPrefix+"driver %q is already registered", name)) + } + + drivers[name] = driver +} + +// Open opens the database the provided DSN describes, via the named driver. +func Open(ctx context.Context, driverName, dsn string) (DB, error) { + driversMu.Lock() + driver := drivers[driverName] + driversMu.Unlock() + + if driver == nil { + return nil, fmt.Errorf(errPrefix+"driver %q is not registered", driverName) + } + + db, err := driver(ctx, dsn) + if err == nil { + db = Constrain(db) + } + return db, err +} + +// Drivers returns a sorted list of the registered drivers. +func Drivers() []string { + driversMu.RLock() + defer driversMu.RUnlock() + + l := make([]string, 0, len(drivers)) + for driver := range drivers { + l = append(l, driver) + } + sort.Strings(l) + + return l +} + +// DB wraps functionality exported by compatible data stores. +// +// Implementations of this interface returned by [Open], enforce constraints for the 3 types of +// tokens (buckets, keys and values) as follows: +// +// - For any bucket to be considered valid, it must be a non-nil, non-empty, valid UTF-8 encoded +// byte slice that does not contain the zero byte and that is up to [MaxBucketSize] bytes long. +// - Respectively, for any key to be considered valid, it must be a non-nil, non-empty byte slice +// that is up to [MaxKeySize] bytes long. +// - And, finally, for any value to be considered valid, it must be a non-nil byte slice that is +// up to [MaxValueSize] bytes long. +// +// When a token violates the above constraints, one of the sentinel errors this package declares +// (e.x. [ErrNilBucket], [ErrValueTooLong], etc.) will be returned to the caller. +type DB interface { + Mutator + + // Close closes the underlying connection[s] to the database. + Close(context.Context) error + + // CreateBucket creates the given bucket. It returns no error when the given bucket already + // exists. + // + // See [DB] for the validations CreateBucket performs on the given bucket. + CreateBucket(ctx context.Context, bucket []byte) error + + // DeleteBucket deletes the given bucket. It returns an error that contains [ErrBucketNotFound] + // in its chain when the bucket does not exist. + // + // See [DB] for the validations DeleteBucket performs on the given bucket. + DeleteBucket(ctx context.Context, bucket []byte) error + + // View runs the given read-only transaction against the database and returns its error. + View(context.Context, func(Viewer) error) error + + // Mutate runs the given read-write transaction against the database and returns its error. + Mutate(context.Context, func(Mutator) error) error +} + +// Viewer is the interface that database views implement. +type Viewer interface { + // Get returns the value stored in the given bucket for the given key. It returns an error + // that contains + // + // - [ErrBucketNotFound] in its chain when the given bucket does not exist. + // - [ErrKeyNotFound] in its chain when the given key does not exist in the given bucket. + // + // See [DB] for the validations Get performs on the given bucket and key. + Get(ctx context.Context, bucket, key []byte) ([]byte, error) + + // List returns the records of the given bucket, in lexicographicaly sorted order (by key). It + // returns an error containing [ErrBucketNotFound] in its chain when the given bucket does not + // exist. + // + // See [DB] for the validations List performs on the given bucket. + List(ctx context.Context, bucket []byte) ([]Record, error) +} + +// Mutator is the interface that database mutators implement. +type Mutator interface { + Viewer + + // CompareAndSwap sets the value stored in the given bucket for the given key, to the given + // new one (newValue), if and only if the current value of the key equals the given one + // (oldValue). It returns an error containing in its chain + // + // - [ErrBucketNotFound] when the given bucket does not exist. + // - [ErrKeyNotFound] when the given key does not exist in the given bucket. + // - a reference to a [ComparisonError] when the value for the key at the time of the attempted + // swap differed from the expected one (oldValue). + // + // See [DB] for the validations CompareAndSwap performs on the given bucket, key, oldValue and + // newValue tokens. + CompareAndSwap(ctx context.Context, bucket, key, oldValue, newValue []byte) error + + // Delete removes any value the given bucket might hold for the given key. It returns an error + // containing [ErrBucketNotFound] in its chain when the bucket does not exist. + // + // See [DB] for the additional validations Delete performs on the provided bucket and key + // tokens. + Delete(ctx context.Context, bucket, key []byte) error + + // Put stores the given value in the given bucket for the given key, overwriting any existing + // value may already be present. It returns an error containing [ErrBucketNotFound] in its chain + // when the given bucket does not exist. + // + // See [DB] for the validations Put performs on the provided bucket, key and value tokens. + Put(ctx context.Context, bucket, key, value []byte) error + + // PutMany stores the given records, overwriting the values of any existing ones. It returns an + // error containing [ErrBucketNotFound] in its chain, when the bucket of any given + // [Record] does not exist. + // + // See [DB] for the validations Put performs on the tokens of the given records. + PutMany(context.Context, ...Record) error +} + +// Record wraps the set of database records. +type Record struct { + // Bucket denotes the bucket the key/value pair belongs to. + Bucket []byte + + // Key denotes the key/value pair's key. + Key []byte + + // Key denotes the key/value pair's value. + Value []byte +} + +// String implements fmt.Stringer for [Record]. +func (r *Record) String() string { + return fmt.Sprintf("(%q, %q, %q)", r.Bucket, r.Key, r.Value) +} + +// GoString implements fmt.GoStringer for [Record]. +func (r *Record) GoString() string { + return fmt.Sprintf("nosql.Record{%q, %q, %q}", r.Bucket, r.Key, r.Value) +} + +// CompactedByFactor is the interface instances of [DB] also implement in case they support being +// compacted by a factor. +type CompactedByFactor interface { + CompactByFactor(context.Context, float64) error +} + +// Clone returns a copy of the [Record]. +func (r *Record) Clone() Record { + return Record{ + Bucket: slices.Clone(r.Bucket), + Key: slices.Clone(r.Key), + Value: slices.Clone(r.Value), } - err = db.Open(dataSourceName, opt...) - return } diff --git a/nosql_test.go b/nosql_test.go index 0d92622..eee269f 100644 --- a/nosql_test.go +++ b/nosql_test.go @@ -1,359 +1,92 @@ package nosql import ( - "encoding/json" - "fmt" - "os" + "context" "testing" - "github.com/smallstep/assert" - "github.com/smallstep/nosql/database" -) - -type testUser struct { - Fname, lname string - numPets int -} - -func run(t *testing.T, db database.DB) { - var boogers = []byte("boogers") - - ub := []byte("testNoSQLUsers") - assert.True(t, IsErrNotFound(db.DeleteTable(ub))) - assert.Nil(t, db.CreateTable(ub)) - // Verify that re-creating the table does not cause a "table already exists" error - assert.Nil(t, db.CreateTable(ub)) - - // Test that we can create tables with illegal/special characters (e.g. `-`) - illName := []byte("test-special-char") - assert.Nil(t, db.CreateTable(illName)) - assert.Nil(t, db.DeleteTable(illName)) - _, err := db.List(illName) - assert.True(t, IsErrNotFound(err)) - - // List should be empty - entries, err := db.List(ub) - assert.Nil(t, err) - assert.Equals(t, len(entries), 0) - - // check for mike - should not exist - _, err = db.Get(ub, []byte("mike")) - assert.True(t, IsErrNotFound(err)) - - // add mike - assert.Nil(t, db.Set(ub, []byte("mike"), boogers)) - - // verify that mike is in db - res, err := db.Get(ub, []byte("mike")) - assert.FatalError(t, err) - assert.Equals(t, boogers, res) - - // overwrite mike - mike := testUser{"mike", "malone", 1} - mikeb, err := json.Marshal(mike) - assert.FatalError(t, err) - - assert.Nil(t, db.Set(ub, []byte("mike"), mikeb)) - // verify overwrite - res, err = db.Get(ub, []byte("mike")) - assert.FatalError(t, err) - assert.Equals(t, mikeb, res) - - var swapped bool - // CmpAndSwap should load since mike is not nil - res, swapped, err = db.CmpAndSwap(ub, []byte("mike"), nil, boogers) - assert.FatalError(t, err) - assert.Equals(t, mikeb, res) - assert.False(t, swapped) - assert.Nil(t, err) - - // delete mike - assert.FatalError(t, db.Del(ub, []byte("mike"))) - - // CmpAndSwap should overwrite mike since mike is nil - res, swapped, err = db.CmpAndSwap(ub, []byte("mike"), nil, boogers) - assert.FatalError(t, err) - assert.Equals(t, boogers, res) - assert.True(t, swapped) - assert.Nil(t, err) - - // delete mike - assert.FatalError(t, db.Del(ub, []byte("mike"))) - - // check for mike - should not exist - _, err = db.Get(ub, []byte("mike")) - assert.True(t, IsErrNotFound(err)) - - // CmpAndSwap should store since mike does not exist - res, swapped, err = db.CmpAndSwap(ub, []byte("mike"), nil, mikeb) - assert.FatalError(t, err) - assert.Equals(t, res, mikeb) - assert.True(t, swapped) - assert.Nil(t, err) - - // delete mike - assert.FatalError(t, db.Del(ub, []byte("mike"))) - - // Update // - - // create txns for update test - mariano := testUser{"mariano", "Cano", 2} - marianob, err := json.Marshal(mariano) - assert.FatalError(t, err) - seb := testUser{"sebastian", "tiedtke", 0} - sebb, err := json.Marshal(seb) - assert.FatalError(t, err) - gates := testUser{"bill", "gates", 2} - gatesb, err := json.Marshal(gates) - assert.FatalError(t, err) - - casGates := &database.TxEntry{ - Bucket: ub, - Key: []byte("bill"), - Value: gatesb, - CmpValue: nil, - Cmd: database.CmpAndSwap, - } - setMike := &database.TxEntry{ - Bucket: ub, - Key: []byte("mike"), - Value: mikeb, - Cmd: database.Set, - } - readMike := &database.TxEntry{ - Bucket: ub, - Key: []byte("mike"), - Cmd: database.Get, - } - setMariano := &database.TxEntry{ - Bucket: ub, - Key: []byte("mariano"), - Value: marianob, - Cmd: database.Set, - } - setSeb := &database.TxEntry{ - Bucket: ub, - Key: []byte("sebastian"), - Value: sebb, - Cmd: database.Set, - } - readSeb := &database.TxEntry{ - Bucket: ub, - Key: []byte("sebastian"), - Cmd: database.Get, - } - casGates2 := &database.TxEntry{ - Bucket: ub, - Key: []byte("bill"), - Value: boogers, - CmpValue: gatesb, - Cmd: database.CmpAndSwap, - } - casGates3 := &database.TxEntry{ - Bucket: ub, - Key: []byte("bill"), - Value: []byte("belly-button-lint"), - CmpValue: gatesb, - Cmd: database.CmpAndSwap, - } - - // update: read write multiple entries. - tx := &database.Tx{Operations: []*database.TxEntry{setMike, setMariano, readMike, setSeb, readSeb, casGates, casGates2, casGates3}} - assert.Nil(t, db.Update(tx)) - - // verify that mike is in db - res, err = db.Get(ub, []byte("mike")) - assert.FatalError(t, err) - assert.Equals(t, mikeb, res) - - // verify that mariano is in db - res, err = db.Get(ub, []byte("mariano")) - assert.FatalError(t, err) - assert.Equals(t, marianob, res) - - // verify that bill gates is in db - res, err = db.Get(ub, []byte("bill")) - assert.FatalError(t, err) - assert.Equals(t, boogers, res) - - // verify that seb is in db - res, err = db.Get(ub, []byte("sebastian")) - assert.FatalError(t, err) - assert.Equals(t, sebb, res) - - // check that the readMike update txn was successful - assert.Equals(t, readMike.Result, mikeb) + "github.com/stretchr/testify/assert" - // check that the readSeb update txn was successful - assert.Equals(t, readSeb.Result, sebb) - - // check that the casGates update txn was a successful write - assert.True(t, casGates.Swapped) - assert.Equals(t, casGates.Result, gatesb) - - // check that the casGates2 update txn was successful - assert.True(t, casGates2.Swapped) - assert.Equals(t, casGates2.Result, boogers) - - // check that the casGates3 update txn was did not update. - assert.False(t, casGates3.Swapped) - assert.Equals(t, casGates3.Result, boogers) - - // List // - - _, err = db.List([]byte("clever")) - assert.True(t, IsErrNotFound(err)) - - entries, err = db.List(ub) - assert.FatalError(t, err) - assert.Equals(t, len(entries), 4) - - // Update Again // - - // create txns for update test - max := testUser{"max", "furman", 6} - maxb, err := json.Marshal(max) - assert.FatalError(t, err) - maxey := testUser{"mike", "maxey", 3} - maxeyb, err := json.Marshal(maxey) - assert.FatalError(t, err) - delMike := &database.TxEntry{ - Bucket: ub, - Key: []byte("mike"), - Cmd: database.Delete, - } - setMax := &database.TxEntry{ - Bucket: ub, - Key: []byte("max"), - Value: maxb, - Cmd: database.Set, - } - setMaxey := &database.TxEntry{ - Bucket: ub, - Key: []byte("maxey"), - Value: maxeyb, - Cmd: database.Set, - } - delMaxey := &database.TxEntry{ - Bucket: ub, - Key: []byte("maxey"), - Cmd: database.Delete, - } - delSeb := &database.TxEntry{ - Bucket: ub, - Key: []byte("sebastian"), - Cmd: database.Delete, - } - - // update: read write multiple entries. - tx = &database.Tx{Operations: []*database.TxEntry{ - delMike, setMax, setMaxey, delMaxey, delSeb, - }} - assert.Nil(t, db.Update(tx)) - - entries, err = db.List(ub) - assert.FatalError(t, err) - assert.Equals(t, len(entries), 3) - - // verify that max and mariano are in the db - res, err = db.Get(ub, []byte("max")) - assert.FatalError(t, err) - assert.Equals(t, maxb, res) - res, err = db.Get(ub, []byte("mariano")) - assert.FatalError(t, err) - assert.Equals(t, marianob, res) - - assert.Nil(t, db.DeleteTable(ub)) - _, err = db.List(ub) - assert.True(t, IsErrNotFound(err)) -} - -func TestMain(m *testing.M) { - - // setup - path := "./tmp" - if _, err := os.Stat(path); os.IsNotExist(err) { - os.Mkdir(path, 0755) - } - - // run - ret := m.Run() - - // teardown - os.RemoveAll(path) + "github.com/smallstep/nosql/internal/token" +) - os.Exit(ret) +func TestRegisterPanics(t *testing.T) { + assert.PanicsWithValue(t, "nosql: nil driver", func() { + Register("driver", nil) + }) } -func TestMySQL(t *testing.T) { +func TestRegister(t *testing.T) { var ( - uname = "user" - pwd = "password" - proto = "tcp" - addr = "127.0.0.1:3306" - //path = "/tmp/mysql.sock" - testDB = "test" + gotCtx context.Context + gotDSN string ) + drv := func(ctx context.Context, dsn string) (DB, error) { + gotCtx = ctx + gotDSN = dsn - isCITest := os.Getenv("CI") - if isCITest == "" { - fmt.Printf("Not running MySql integration tests\n") - return + return nil, assert.AnError } - db, err := New("mysql", - fmt.Sprintf("%s:%s@%s(%s)/", uname, pwd, proto, addr), - WithDatabase(testDB)) - assert.FatalError(t, err) - defer db.Close() - - run(t, db) -} - -func TestPostgreSQL(t *testing.T) { - var ( - uname = "user" - pwd = "password" - addr = "127.0.0.1:5432" - //path = "/tmp/postgresql.sock" - testDB = "test" + const ( + name = "testdriver" + dsn = "some random dsn" + val = "some context value" ) - isCITest := os.Getenv("CI") - if isCITest == "" { - fmt.Printf("Not running PostgreSQL integration tests\n") - return - } + Register(name, drv) + t.Cleanup(func() { unregister(name) }) - db, err := New("postgresql", - fmt.Sprintf("postgresql://%s:%s@%s/", uname, pwd, addr), - WithDatabase(testDB)) - assert.FatalError(t, err) - defer db.Close() + type contextKeyType struct{} + ctx := context.WithValue(context.Background(), contextKeyType{}, val) - run(t, db) + got, err := Open(ctx, name, dsn) + assert.Same(t, assert.AnError, err) + assert.Nil(t, got) + assert.Equal(t, val, gotCtx.Value(contextKeyType{}).(string)) + assert.Equal(t, dsn, gotDSN) } -func TestBadger(t *testing.T) { - path := "./tmp/badgerdb" - - if _, err := os.Stat(path); os.IsNotExist(err) { - assert.FatalError(t, os.Mkdir(path, 0755)) - } +func TestDrivers(t *testing.T) { + names := []string{ + "testdriver1", + "testdriver3", + "testdriver2", + } + t.Cleanup(func() { + for _, name := range names { + unregister(name) + } + }) + + for _, name := range names { + Register(name, func(context.Context, string) (DB, error) { + return nil, assert.AnError + }) + } + + assert.Equal(t, []string{ + "testdriver1", + "testdriver2", + "testdriver3", + }, Drivers()) +} - db, err := New("badger", path, WithValueDir(path)) - assert.FatalError(t, err) - defer db.Close() +func unregister(name string) { + driversMu.Lock() + defer driversMu.Unlock() - run(t, db) + delete(drivers, name) } -func TestBolt(t *testing.T) { - assert.FatalError(t, os.MkdirAll("./tmp", 0644)) - - db, err := New("bbolt", "./tmp/boltdb") - assert.FatalError(t, err) - defer db.Close() +func TestRecordClone(t *testing.T) { + var ( + r = Record{ + Bucket: token.New(t, 0, 50, false), + Key: token.New(t, 0, 100, false), + Value: token.New(t, 0, 200, false), + } + d = r.Clone() + ) - run(t, db) + assert.Equal(t, r, d) } diff --git a/postgresql/nopostgresql.go b/postgresql/nopostgresql.go deleted file mode 100644 index d68a28c..0000000 --- a/postgresql/nopostgresql.go +++ /dev/null @@ -1,8 +0,0 @@ -//go:build nopgx -// +build nopgx - -package postgresql - -import "github.com/smallstep/nosql/database" - -type DB = database.NotSupportedDB diff --git a/postgresql/postgresql.go b/postgresql/postgresql.go deleted file mode 100644 index 4109f99..0000000 --- a/postgresql/postgresql.go +++ /dev/null @@ -1,327 +0,0 @@ -//go:build !nopgx -// +build !nopgx - -package postgresql - -import ( - "bytes" - "context" - "database/sql" - "fmt" - "strings" - - "github.com/jackc/pgx/v4" - pgxstdlib "github.com/jackc/pgx/v4/stdlib" - "github.com/pkg/errors" - "github.com/smallstep/nosql/database" -) - -// DB is a wrapper over *sql.DB, -type DB struct { - db *sql.DB -} - -func quoteIdentifier(identifier string) string { - parts := strings.Split(identifier, ".") - return pgx.Identifier(parts).Sanitize() -} - -func createDatabase(config *pgx.ConnConfig) error { - db := config.Database - if db == "" { - // If no explicit database name is given, PostgreSQL defaults to the - // database with the same name as the user. - db = config.User - if db == "" { - return errors.New("error creating database: database name is missing") - } - } - - // The database "template1" is the default template for all new databases, - // so it should always exist. - tempConfig := config.Copy() - tempConfig.Database = "template1" - - conn, err := pgx.ConnectConfig(context.Background(), tempConfig) - if err != nil { - return errors.Wrap(err, "error connecting to PostgreSQL") - } - defer conn.Close(context.Background()) - - _, err = conn.Exec(context.Background(), fmt.Sprintf("CREATE DATABASE %s", quoteIdentifier(db))) - if err != nil { - if !strings.Contains(err.Error(), "(SQLSTATE 42P04)") { - return errors.Wrapf(err, "error creating database %s (if not exists)", db) - } - } - - return nil -} - -// Open creates a Driver and connects to the database with the given address -// and access details. -func (db *DB) Open(dataSourceName string, opt ...database.Option) error { - opts := &database.Options{} - for _, o := range opt { - if err := o(opts); err != nil { - return err - } - } - - config, err := pgx.ParseConfig(dataSourceName) - if err != nil { - return errors.Wrap(err, "error parsing PostgreSQL DSN") - } - // An explicit database name overrides one parsed from the DSN. - if opts.Database != "" { - config.Database = opts.Database - } - - // Attempt to open the database. - db.db = pgxstdlib.OpenDB(*config) - err = db.db.Ping() - if err != nil && strings.Contains(err.Error(), "(SQLSTATE 3D000)") { - // The database does not exist. Create it. - err = createDatabase(config) - if err != nil { - return err - } - - // Attempt to open the database again. - db.db = pgxstdlib.OpenDB(*config) - err = db.db.Ping() - } - if err != nil { - return errors.Wrapf(err, "error connecting to PostgreSQL database") - } - - return nil -} - -// Close shutsdown the database driver. -func (db *DB) Close() error { - return errors.WithStack(db.db.Close()) -} - -func getAllQry(bucket []byte) string { - return fmt.Sprintf("SELECT * FROM %s", quoteIdentifier(string(bucket))) -} - -func getQry(bucket []byte) string { - return fmt.Sprintf("SELECT nvalue FROM %s WHERE nkey = $1;", quoteIdentifier(string(bucket))) -} - -func getQryForUpdate(bucket []byte) string { - return fmt.Sprintf("SELECT nvalue FROM %s WHERE nkey = $1 FOR UPDATE;", quoteIdentifier(string(bucket))) -} - -func insertUpdateQry(bucket []byte) string { - return fmt.Sprintf("INSERT INTO %s (nkey, nvalue) VALUES ($1, $2) ON CONFLICT (nkey) DO UPDATE SET nvalue = excluded.nvalue;", quoteIdentifier(string(bucket))) -} - -func delQry(bucket []byte) string { - return fmt.Sprintf("DELETE FROM %s WHERE nkey = $1;", quoteIdentifier(string(bucket))) -} - -func createTableQry(bucket []byte) string { - return fmt.Sprintf("CREATE TABLE IF NOT EXISTS %s (nkey BYTEA CHECK (octet_length(nkey) <= 255), nvalue BYTEA, PRIMARY KEY (nkey));", quoteIdentifier(string(bucket))) -} - -func deleteTableQry(bucket []byte) string { - return fmt.Sprintf("DROP TABLE %s;", quoteIdentifier(string(bucket))) -} - -// Get retrieves the column/row with given key. -func (db *DB) Get(bucket, key []byte) ([]byte, error) { - var val string - err := db.db.QueryRow(getQry(bucket), key).Scan(&val) - switch { - case err == sql.ErrNoRows: - return nil, errors.Wrapf(database.ErrNotFound, "%s/%s not found", bucket, key) - case err != nil: - return nil, errors.Wrapf(err, "failed to get %s/%s", bucket, key) - default: - return []byte(val), nil - } -} - -// Set inserts the key and value into the given bucket(column). -func (db *DB) Set(bucket, key, value []byte) error { - _, err := db.db.Exec(insertUpdateQry(bucket), key, value) - if err != nil { - return errors.Wrapf(err, "failed to set %s/%s", bucket, key) - } - return nil -} - -// Del deletes a row from the database. -func (db *DB) Del(bucket, key []byte) error { - _, err := db.db.Exec(delQry(bucket), key) - return errors.Wrapf(err, "failed to delete %s/%s", bucket, key) -} - -// List returns the full list of entries in a column. -func (db *DB) List(bucket []byte) ([]*database.Entry, error) { - rows, err := db.db.Query(getAllQry(bucket)) - if err != nil { - estr := err.Error() - if strings.Contains(estr, "(SQLSTATE 42P01)") { - return nil, errors.Wrapf(database.ErrNotFound, estr) - } - return nil, errors.Wrapf(err, "error querying table %s", bucket) - } - defer rows.Close() - var ( - key, value string - entries []*database.Entry - ) - for rows.Next() { - err := rows.Scan(&key, &value) - if err != nil { - return nil, errors.Wrap(err, "error getting key and value from row") - } - entries = append(entries, &database.Entry{ - Bucket: bucket, - Key: []byte(key), - Value: []byte(value), - }) - } - err = rows.Err() - if err != nil { - return nil, errors.Wrap(err, "error accessing row") - } - return entries, nil -} - -// CmpAndSwap modifies the value at the given bucket and key (to newValue) -// only if the existing (current) value matches oldValue. -func (db *DB) CmpAndSwap(bucket, key, oldValue, newValue []byte) ([]byte, bool, error) { - sqlTx, err := db.db.Begin() - if err != nil { - return nil, false, errors.WithStack(err) - } - - val, swapped, err := cmpAndSwap(sqlTx, bucket, key, oldValue, newValue) - switch { - case err != nil: - if err := sqlTx.Rollback(); err != nil { - return nil, false, errors.Wrapf(err, "failed to execute CmpAndSwap transaction on %s/%s and failed to rollback transaction", bucket, key) - } - return nil, false, err - case swapped: - if err := sqlTx.Commit(); err != nil { - return nil, false, errors.Wrapf(err, "failed to commit PostgreSQL transaction") - } - return val, swapped, nil - default: - if err := sqlTx.Rollback(); err != nil { - return nil, false, errors.Wrapf(err, "failed to rollback read-only CmpAndSwap transaction on %s/%s", bucket, key) - } - return val, swapped, err - } -} - -func cmpAndSwap(sqlTx *sql.Tx, bucket, key, oldValue, newValue []byte) ([]byte, bool, error) { - var current []byte - err := sqlTx.QueryRow(getQryForUpdate(bucket), key).Scan(¤t) - - if err != nil && !errors.Is(err, sql.ErrNoRows) { - return nil, false, err - } - if !bytes.Equal(current, oldValue) { - return current, false, nil - } - - if _, err = sqlTx.Exec(insertUpdateQry(bucket), key, newValue); err != nil { - return nil, false, errors.Wrapf(err, "failed to set %s/%s", bucket, key) - } - return newValue, true, nil -} - -// Update performs multiple commands on one read-write transaction. -func (db *DB) Update(tx *database.Tx) error { - sqlTx, err := db.db.Begin() - if err != nil { - return errors.WithStack(err) - } - rollback := func(err error) error { - if rollbackErr := sqlTx.Rollback(); rollbackErr != nil { - return errors.Wrap(err, "UPDATE failed, unable to rollback transaction") - } - return errors.Wrap(err, "UPDATE failed") - } - for _, q := range tx.Operations { - // create or delete buckets - switch q.Cmd { - case database.CreateTable: - _, err := sqlTx.Exec(createTableQry(q.Bucket)) - if err != nil { - return rollback(errors.Wrapf(err, "failed to create table %s", q.Bucket)) - } - case database.DeleteTable: - _, err := sqlTx.Exec(deleteTableQry(q.Bucket)) - if err != nil { - estr := err.Error() - if strings.Contains(estr, "(SQLSTATE 42P01)") { - return errors.Wrapf(database.ErrNotFound, estr) - } - return errors.Wrapf(err, "failed to delete table %s", q.Bucket) - } - case database.Get: - var val string - err := sqlTx.QueryRow(getQry(q.Bucket), q.Key).Scan(&val) - switch { - case err == sql.ErrNoRows: - return rollback(errors.Wrapf(database.ErrNotFound, "%s/%s not found", q.Bucket, q.Key)) - case err != nil: - return rollback(errors.Wrapf(err, "failed to get %s/%s", q.Bucket, q.Key)) - default: - q.Result = []byte(val) - } - case database.Set: - if _, err = sqlTx.Exec(insertUpdateQry(q.Bucket), q.Key, q.Value); err != nil { - return rollback(errors.Wrapf(err, "failed to set %s/%s", q.Bucket, q.Key)) - } - case database.Delete: - if _, err = sqlTx.Exec(delQry(q.Bucket), q.Key); err != nil { - return rollback(errors.Wrapf(err, "failed to delete %s/%s", q.Bucket, q.Key)) - } - case database.CmpAndSwap: - q.Result, q.Swapped, err = cmpAndSwap(sqlTx, q.Bucket, q.Key, q.CmpValue, q.Value) - if err != nil { - return rollback(errors.Wrapf(err, "failed to load-or-store %s/%s", q.Bucket, q.Key)) - } - case database.CmpOrRollback: - return database.ErrOpNotSupported - default: - return database.ErrOpNotSupported - } - } - - if err = errors.WithStack(sqlTx.Commit()); err != nil { - return rollback(err) - } - return nil -} - -// CreateTable creates a table in the database. -func (db *DB) CreateTable(bucket []byte) error { - _, err := db.db.Exec(createTableQry(bucket)) - if err != nil { - return errors.Wrapf(err, "failed to create table %s", bucket) - } - return nil -} - -// DeleteTable deletes a table in the database. -func (db *DB) DeleteTable(bucket []byte) error { - _, err := db.db.Exec(deleteTableQry(bucket)) - if err != nil { - estr := err.Error() - if strings.Contains(estr, "(SQLSTATE 42P01)") { - return errors.Wrapf(database.ErrNotFound, estr) - } - return errors.Wrapf(err, "failed to delete table %s", bucket) - } - return nil -}