From 14dc24caf9073e6161b6eb82257c5a313d8c3515 Mon Sep 17 00:00:00 2001 From: Ben Foster Date: Tue, 5 Mar 2024 14:25:47 -0500 Subject: [PATCH 01/12] Handle prefixes when listing blocks from S3 fixes #3465 --- CHANGELOG.md | 1 + tempodb/backend/s3/s3.go | 2 +- tempodb/backend/s3/s3_test.go | 109 ++++++++++++++++++++++++++++++++++ 3 files changed, 111 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 298950a195e..64b92252707 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,7 @@ * [BUGFIX] Fix metrics query results when filtering and rating on the same attribute [#3428](https://github.com/grafana/tempo/issues/3428) (@mdisibio) * [BUGFIX] Fix metrics query results when series contain empty strings or nil values [#3429](https://github.com/grafana/tempo/issues/3429) (@mdisibio) * [BUGFIX] Return unfiltered results when a bad TraceQL query is provided in autocomplete. [#3426](https://github.com/grafana/tempo/pull/3426) (@mapno) +* [BUGFIX] Fix compaction/retention in AWS S3 when a prefix is configured. [#3465](https://github.com/grafana/tempo/issues/3465) (@bpfoster) ## v2.4.0 diff --git a/tempodb/backend/s3/s3.go b/tempodb/backend/s3/s3.go index 709a844660c..56d03306fa5 100644 --- a/tempodb/backend/s3/s3.go +++ b/tempodb/backend/s3/s3.go @@ -334,7 +334,7 @@ func (rw *readerWriter) ListBlocks( for _, c := range res.Contents { // i.e: /meta - parts := strings.Split(c.Key, "/") + parts := strings.Split(strings.TrimPrefix(c.Key, rw.cfg.Prefix), "/") if len(parts) != 3 { continue } diff --git a/tempodb/backend/s3/s3_test.go b/tempodb/backend/s3/s3_test.go index f4f460972c0..d86d4fb45c2 100644 --- a/tempodb/backend/s3/s3_test.go +++ b/tempodb/backend/s3/s3_test.go @@ -449,6 +449,115 @@ func TestObjectWithPrefix(t *testing.T) { } } +func TestListBlocksWithPrefix(t *testing.T) { + tests := []struct { + name string + prefix string + objectName string + keyPath backend.KeyPath + httpHandler func(t *testing.T) http.HandlerFunc + }{ + { + name: "with prefix", + prefix: "a/b/c/", + keyPath: backend.KeyPath{"test"}, + httpHandler: func(t *testing.T) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + if r.Method == getMethod { + _, _ = w.Write([]byte(` + + blerg + a/b/c + + 2 + 100 + url + false + + a/b/c/single-tenant/00000000-0000-0000-0000-000000000000/meta.json + 2024-03-01T00:00:00.000Z + "d42a22ddd183f61924c661b1c026c1ef" + 398 + STANDARD + + + + a/b/c/single-tenant/00000000-0000-0000-0000-000000000001/meta.compacted.json + 2024-03-01T00:00:00.000Z + "d42a22ddd183f61924c661b1c026c1ef" + 398 + STANDARD + + `)) + return + } + } + }, + }, + { + name: "without prefix", + prefix: "", + keyPath: backend.KeyPath{"test"}, + httpHandler: func(t *testing.T) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + if r.Method == getMethod { + _, _ = w.Write([]byte(` + + blerg + + + 2 + 100 + url + false + + single-tenant/00000000-0000-0000-0000-000000000000/meta.json + 2024-03-01T00:00:00.000Z + "d42a22ddd183f61924c661b1c026c1ef" + 398 + STANDARD + + + + single-tenant/00000000-0000-0000-0000-000000000001/meta.compacted.json + 2024-03-01T00:00:00.000Z + "d42a22ddd183f61924c661b1c026c1ef" + 398 + STANDARD + + `)) + return + } + } + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + server := testServer(t, tc.httpHandler(t)) + r, _, _, err := New(&Config{ + Region: "blerg", + AccessKey: "test", + SecretKey: flagext.SecretWithValue("test"), + Bucket: "blerg", + Prefix: tc.prefix, + Insecure: true, + Endpoint: server.URL[7:], + ListBlocksConcurrency: 1, + }) + require.NoError(t, err) + + ctx := context.Background() + blockIDs, compactedBlockIDs, err := r.ListBlocks(ctx, "single-tenant") + assert.NoError(t, err) + + assert.Equal(t, 1, len(blockIDs)) + assert.Equal(t, 1, len(compactedBlockIDs)) + }) + } +} + func TestObjectStorageClass(t *testing.T) { tests := []struct { name string From 9f3bc439d0a4c7407f9b8ffa0c02ddf2bb349717 Mon Sep 17 00:00:00 2001 From: Ben Foster Date: Wed, 6 Mar 2024 08:25:30 -0500 Subject: [PATCH 02/12] Handle prefixes when listing blocks from GCS --- CHANGELOG.md | 2 +- tempodb/backend/gcs/gcs.go | 2 +- tempodb/backend/gcs/gcs_test.go | 104 ++++++++++++++++++++++++++++++++ 3 files changed, 106 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 64b92252707..bbadba4d098 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,7 +5,7 @@ * [BUGFIX] Fix metrics query results when filtering and rating on the same attribute [#3428](https://github.com/grafana/tempo/issues/3428) (@mdisibio) * [BUGFIX] Fix metrics query results when series contain empty strings or nil values [#3429](https://github.com/grafana/tempo/issues/3429) (@mdisibio) * [BUGFIX] Return unfiltered results when a bad TraceQL query is provided in autocomplete. [#3426](https://github.com/grafana/tempo/pull/3426) (@mapno) -* [BUGFIX] Fix compaction/retention in AWS S3 when a prefix is configured. [#3465](https://github.com/grafana/tempo/issues/3465) (@bpfoster) +* [BUGFIX] Fix compaction/retention in AWS S3 and GCS when a prefix is configured. [#3465](https://github.com/grafana/tempo/issues/3465) (@bpfoster) ## v2.4.0 diff --git a/tempodb/backend/gcs/gcs.go b/tempodb/backend/gcs/gcs.go index c864daae3be..587296fb506 100644 --- a/tempodb/backend/gcs/gcs.go +++ b/tempodb/backend/gcs/gcs.go @@ -253,7 +253,7 @@ func (rw *readerWriter) ListBlocks(ctx context.Context, tenant string) ([]uuid.U return } - parts = strings.Split(attrs.Name, "/") + parts = strings.Split(strings.TrimPrefix(attrs.Name, rw.cfg.Prefix), "/") // ie: //meta.json if len(parts) != 3 { continue diff --git a/tempodb/backend/gcs/gcs_test.go b/tempodb/backend/gcs/gcs_test.go index 96face1846a..0021cb394eb 100644 --- a/tempodb/backend/gcs/gcs_test.go +++ b/tempodb/backend/gcs/gcs_test.go @@ -299,6 +299,110 @@ func TestObjectWithPrefix(t *testing.T) { } } +func TestListBlocksWithPrefix(t *testing.T) { + tests := []struct { + name string + prefix string + objectName string + keyPath backend.KeyPath + httpHandler func(t *testing.T) http.HandlerFunc + }{ + { + name: "with prefix", + prefix: "a/b/c/", + keyPath: backend.KeyPath{"test"}, + httpHandler: func(t *testing.T) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + if r.Method == "GET" { + _, _ = w.Write([]byte(` + { + "kind": "storage#objects", + "items": [{ + "kind": "storage#object", + "id": "1", + "name": "a/b/c/single-tenant/00000000-0000-0000-0000-000000000000/meta.json", + "bucket": "blerg", + "storageClass": "STANDARD", + "size": "1024", + "timeCreated": "2024-03-01T00:00:00.000Z", + "updated": "2024-03-01T00:00:00.000Z" + }, { + "kind": "storage#object", + "id": "2", + "name": "a/b/c/single-tenant/00000000-0000-0000-0000-000000000000/meta.compacted.json", + "bucket": "blerg", + "storageClass": "STANDARD", + "size": "1024", + "timeCreated": "2024-03-01T00:00:00.000Z", + "updated": "2024-03-01T00:00:00.000Z" + }] + } + `)) + return + } + } + }, + }, + { + name: "without prefix", + prefix: "", + keyPath: backend.KeyPath{"test"}, + httpHandler: func(t *testing.T) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + if r.Method == "GET" { + _, _ = w.Write([]byte(` + { + "kind": "storage#objects", + "items": [{ + "kind": "storage#object", + "id": "1", + "name": "single-tenant/00000000-0000-0000-0000-000000000000/meta.json", + "bucket": "blerg", + "storageClass": "STANDARD", + "size": "1024", + "timeCreated": "2024-03-01T00:00:00.000Z", + "updated": "2024-03-01T00:00:00.000Z" + }, { + "kind": "storage#object", + "id": "2", + "name": "single-tenant/00000000-0000-0000-0000-000000000000/meta.compacted.json", + "bucket": "blerg", + "storageClass": "STANDARD", + "size": "1024", + "timeCreated": "2024-03-01T00:00:00.000Z", + "updated": "2024-03-01T00:00:00.000Z" + }] + } + `)) + return + } + } + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + server := testServer(t, tc.httpHandler(t)) + r, _, _, err := New(&Config{ + BucketName: "blerg", + Endpoint: server.URL, + Insecure: true, + Prefix: tc.prefix, + ListBlocksConcurrency: 1, + }) + require.NoError(t, err) + + ctx := context.Background() + blockIDs, compactedBlockIDs, err := r.ListBlocks(ctx, "single-tenant") + assert.NoError(t, err) + + assert.Equal(t, 1, len(blockIDs)) + assert.Equal(t, 1, len(compactedBlockIDs)) + }) + } +} + func testServer(t *testing.T, httpHandler http.HandlerFunc) *httptest.Server { t.Helper() assert.NotNil(t, httpHandler) From 2c69f28cb52f89c008cdd50ec287261b4adfd484 Mon Sep 17 00:00:00 2001 From: Ben Foster Date: Wed, 6 Mar 2024 08:46:22 -0500 Subject: [PATCH 03/12] Add test for prefixes when listing blocks from Azure --- tempodb/backend/azure/azure_test.go | 144 ++++++++++++++++++++++++++++ 1 file changed, 144 insertions(+) diff --git a/tempodb/backend/azure/azure_test.go b/tempodb/backend/azure/azure_test.go index 8e028de00af..93dd9579a0a 100644 --- a/tempodb/backend/azure/azure_test.go +++ b/tempodb/backend/azure/azure_test.go @@ -252,6 +252,150 @@ func TestObjectWithPrefix(t *testing.T) { } } +func TestListBlocksWithPrefix(t *testing.T) { + tests := []struct { + name string + prefix string + objectName string + keyPath backend.KeyPath + httpHandler func(t *testing.T) http.HandlerFunc + }{ + { + name: "with prefix", + prefix: "a/b/c/", + keyPath: backend.KeyPath{"test"}, + httpHandler: func(t *testing.T) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + if r.Method == "GET" { + _, _ = w.Write([]byte(` + + + a/b/c/ + 100 + + + a/b/c/single-tenant/00000000-0000-0000-0000-000000000000/meta.json + https://myaccount.blob.core.windows.net/mycontainer/a/b/c/single-tenant/00000000-0000-0000-0000-000000000000/meta.json + + Fri, 01 Mar 2024 00:00:00 GMT + 0x8CBFF45D8A29A19 + 100 + text/html + + en-US + + no-cache + BlockBlob + unlocked + + + + + a/b/c/single-tenant/00000000-0000-0000-0000-000000000001/meta.compacted.json + https://myaccount.blob.core.windows.net/mycontainer/a/b/c/single-tenant/00000000-0000-0000-0000-000000000001/meta.compacted.json + + Fri, 01 Mar 2024 00:00:00 GMT + 0x8CBFF45D8A29A19 + 100 + text/html + + en-US + + no-cache + BlockBlob + unlocked + + + + + + `)) + return + } + } + }, + }, + { + name: "without prefix", + prefix: "", + keyPath: backend.KeyPath{"test"}, + httpHandler: func(t *testing.T) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + if r.Method == "GET" { + _, _ = w.Write([]byte(` + + + + 100 + + + single-tenant/00000000-0000-0000-0000-000000000000/meta.json + https://myaccount.blob.core.windows.net/mycontainer/single-tenant/00000000-0000-0000-0000-000000000000/meta.json + + Fri, 01 Mar 2024 00:00:00 GMT + 0x8CBFF45D8A29A19 + 100 + text/html + + en-US + + no-cache + BlockBlob + unlocked + + + + + single-tenant/00000000-0000-0000-0000-000000000001/meta.compacted.json + https://myaccount.blob.core.windows.net/mycontainer/single-tenant/00000000-0000-0000-0000-000000000001/meta.compacted.json + + Fri, 01 Mar 2024 00:00:00 GMT + 0x8CBFF45D8A29A19 + 100 + text/html + + en-US + + no-cache + BlockBlob + unlocked + + + + + + `)) + return + } + } + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + server := testServer(t, tc.httpHandler(t)) + r, _, _, err := New(&config.Config{ + StorageAccountName: "testing_account", + StorageAccountKey: flagext.SecretWithValue("YQo="), + MaxBuffers: 3, + BufferSize: 1000, + ContainerName: "blerg", + Prefix: tc.prefix, + Endpoint: server.URL[7:], // [7:] -> strip http://, + }) + require.NoError(t, err) + + ctx := context.Background() + blockIDs, compactedBlockIDs, err2 := r.ListBlocks(ctx, "single-tenant") + assert.NoError(t, err2) + + assert.Equal(t, 1, len(blockIDs)) + assert.Equal(t, 1, len(compactedBlockIDs)) + }) + } +} + func testServer(t *testing.T, httpHandler http.HandlerFunc) *httptest.Server { t.Helper() assert.NotNil(t, httpHandler) From 22b1b6b2df99b512f20794e823c351d22fb4fca0 Mon Sep 17 00:00:00 2001 From: Ben Foster Date: Tue, 12 Mar 2024 11:55:09 -0400 Subject: [PATCH 04/12] Update unit tests to check for actual block IDs instead of just length of the slices Cleanup unit tests --- tempodb/backend/azure/azure_test.go | 39 ++++++++++++++++---------- tempodb/backend/gcs/gcs_test.go | 43 +++++++++++++++++------------ tempodb/backend/s3/s3_test.go | 40 +++++++++++++++++---------- 3 files changed, 75 insertions(+), 47 deletions(-) diff --git a/tempodb/backend/azure/azure_test.go b/tempodb/backend/azure/azure_test.go index 93dd9579a0a..c923259a03e 100644 --- a/tempodb/backend/azure/azure_test.go +++ b/tempodb/backend/azure/azure_test.go @@ -254,19 +254,24 @@ func TestObjectWithPrefix(t *testing.T) { func TestListBlocksWithPrefix(t *testing.T) { tests := []struct { - name string - prefix string - objectName string - keyPath backend.KeyPath - httpHandler func(t *testing.T) http.HandlerFunc + name string + prefix string + liveBlockIDs []uuid.UUID + compactedBlockIDs []uuid.UUID + tenant string + httpHandler func(t *testing.T) http.HandlerFunc }{ { - name: "with prefix", - prefix: "a/b/c/", - keyPath: backend.KeyPath{"test"}, + name: "with prefix", + prefix: "a/b/c/", + tenant: "single-tenant", + liveBlockIDs: []uuid.UUID{uuid.MustParse("00000000-0000-0000-0000-000000000000")}, + compactedBlockIDs: []uuid.UUID{uuid.MustParse("00000000-0000-0000-0000-000000000001")}, httpHandler: func(t *testing.T) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { if r.Method == "GET" { + assert.Equal(t, "a/b/c/single-tenant/", r.URL.Query().Get("prefix")) + _, _ = w.Write([]byte(` @@ -316,12 +321,16 @@ func TestListBlocksWithPrefix(t *testing.T) { }, }, { - name: "without prefix", - prefix: "", - keyPath: backend.KeyPath{"test"}, + name: "without prefix", + prefix: "", + tenant: "single-tenant", + liveBlockIDs: []uuid.UUID{uuid.MustParse("00000000-0000-0000-0000-000000000000")}, + compactedBlockIDs: []uuid.UUID{uuid.MustParse("00000000-0000-0000-0000-000000000001")}, httpHandler: func(t *testing.T) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { if r.Method == "GET" { + assert.Equal(t, "single-tenant/", r.URL.Query().Get("prefix")) + _, _ = w.Write([]byte(` @@ -375,7 +384,7 @@ func TestListBlocksWithPrefix(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { server := testServer(t, tc.httpHandler(t)) - r, _, _, err := New(&config.Config{ + r, _, _, err := NewNoConfirm(&config.Config{ StorageAccountName: "testing_account", StorageAccountKey: flagext.SecretWithValue("YQo="), MaxBuffers: 3, @@ -387,11 +396,11 @@ func TestListBlocksWithPrefix(t *testing.T) { require.NoError(t, err) ctx := context.Background() - blockIDs, compactedBlockIDs, err2 := r.ListBlocks(ctx, "single-tenant") + blockIDs, compactedBlockIDs, err2 := r.ListBlocks(ctx, tc.tenant) assert.NoError(t, err2) - assert.Equal(t, 1, len(blockIDs)) - assert.Equal(t, 1, len(compactedBlockIDs)) + assert.ElementsMatchf(t, tc.liveBlockIDs, blockIDs, "Block IDs did not match") + assert.ElementsMatchf(t, tc.compactedBlockIDs, compactedBlockIDs, "Compacted block IDs did not match") }) } } diff --git a/tempodb/backend/gcs/gcs_test.go b/tempodb/backend/gcs/gcs_test.go index 0021cb394eb..e0153eced1e 100644 --- a/tempodb/backend/gcs/gcs_test.go +++ b/tempodb/backend/gcs/gcs_test.go @@ -301,19 +301,24 @@ func TestObjectWithPrefix(t *testing.T) { func TestListBlocksWithPrefix(t *testing.T) { tests := []struct { - name string - prefix string - objectName string - keyPath backend.KeyPath - httpHandler func(t *testing.T) http.HandlerFunc + name string + prefix string + tenant string + liveBlockIDs []uuid.UUID + compactedBlockIDs []uuid.UUID + httpHandler func(t *testing.T) http.HandlerFunc }{ { - name: "with prefix", - prefix: "a/b/c/", - keyPath: backend.KeyPath{"test"}, + name: "with prefix", + prefix: "a/b/c/", + tenant: "single-tenant", + liveBlockIDs: []uuid.UUID{uuid.MustParse("00000000-0000-0000-0000-000000000000")}, + compactedBlockIDs: []uuid.UUID{uuid.MustParse("00000000-0000-0000-0000-000000000001")}, httpHandler: func(t *testing.T) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { if r.Method == "GET" { + assert.Equal(t, "a/b/c/single-tenant/", r.URL.Query().Get("prefix")) + _, _ = w.Write([]byte(` { "kind": "storage#objects", @@ -329,7 +334,7 @@ func TestListBlocksWithPrefix(t *testing.T) { }, { "kind": "storage#object", "id": "2", - "name": "a/b/c/single-tenant/00000000-0000-0000-0000-000000000000/meta.compacted.json", + "name": "a/b/c/single-tenant/00000000-0000-0000-0000-000000000001/meta.compacted.json", "bucket": "blerg", "storageClass": "STANDARD", "size": "1024", @@ -344,12 +349,16 @@ func TestListBlocksWithPrefix(t *testing.T) { }, }, { - name: "without prefix", - prefix: "", - keyPath: backend.KeyPath{"test"}, + name: "without prefix", + prefix: "", + tenant: "single-tenant", + liveBlockIDs: []uuid.UUID{uuid.MustParse("00000000-0000-0000-0000-000000000000")}, + compactedBlockIDs: []uuid.UUID{uuid.MustParse("00000000-0000-0000-0000-000000000001")}, httpHandler: func(t *testing.T) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { if r.Method == "GET" { + assert.Equal(t, "single-tenant/", r.URL.Query().Get("prefix")) + _, _ = w.Write([]byte(` { "kind": "storage#objects", @@ -365,7 +374,7 @@ func TestListBlocksWithPrefix(t *testing.T) { }, { "kind": "storage#object", "id": "2", - "name": "single-tenant/00000000-0000-0000-0000-000000000000/meta.compacted.json", + "name": "single-tenant/00000000-0000-0000-0000-000000000001/meta.compacted.json", "bucket": "blerg", "storageClass": "STANDARD", "size": "1024", @@ -384,7 +393,7 @@ func TestListBlocksWithPrefix(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { server := testServer(t, tc.httpHandler(t)) - r, _, _, err := New(&Config{ + r, _, _, err := NewNoConfirm(&Config{ BucketName: "blerg", Endpoint: server.URL, Insecure: true, @@ -394,11 +403,11 @@ func TestListBlocksWithPrefix(t *testing.T) { require.NoError(t, err) ctx := context.Background() - blockIDs, compactedBlockIDs, err := r.ListBlocks(ctx, "single-tenant") + blockIDs, compactedBlockIDs, err := r.ListBlocks(ctx, tc.tenant) assert.NoError(t, err) - assert.Equal(t, 1, len(blockIDs)) - assert.Equal(t, 1, len(compactedBlockIDs)) + assert.ElementsMatchf(t, tc.liveBlockIDs, blockIDs, "Block IDs did not match") + assert.ElementsMatchf(t, tc.compactedBlockIDs, compactedBlockIDs, "Compacted block IDs did not match") }) } } diff --git a/tempodb/backend/s3/s3_test.go b/tempodb/backend/s3/s3_test.go index d86d4fb45c2..dceface4c37 100644 --- a/tempodb/backend/s3/s3_test.go +++ b/tempodb/backend/s3/s3_test.go @@ -7,6 +7,7 @@ import ( "encoding/json" "encoding/xml" "fmt" + "github.com/google/uuid" "math/rand" "net/http" "net/http/httptest" @@ -451,19 +452,24 @@ func TestObjectWithPrefix(t *testing.T) { func TestListBlocksWithPrefix(t *testing.T) { tests := []struct { - name string - prefix string - objectName string - keyPath backend.KeyPath - httpHandler func(t *testing.T) http.HandlerFunc + name string + prefix string + tenant string + liveBlockIDs []uuid.UUID + compactedBlockIDs []uuid.UUID + httpHandler func(t *testing.T) http.HandlerFunc }{ { - name: "with prefix", - prefix: "a/b/c/", - keyPath: backend.KeyPath{"test"}, + name: "with prefix", + prefix: "a/b/c/", + tenant: "single-tenant", + liveBlockIDs: []uuid.UUID{uuid.MustParse("00000000-0000-0000-0000-000000000000")}, + compactedBlockIDs: []uuid.UUID{uuid.MustParse("00000000-0000-0000-0000-000000000001")}, httpHandler: func(t *testing.T) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { if r.Method == getMethod { + assert.Equal(t, "a/b/c/single-tenant/", r.URL.Query().Get("prefix")) + _, _ = w.Write([]byte(` blerg @@ -495,12 +501,16 @@ func TestListBlocksWithPrefix(t *testing.T) { }, }, { - name: "without prefix", - prefix: "", - keyPath: backend.KeyPath{"test"}, + name: "without prefix", + prefix: "", + liveBlockIDs: []uuid.UUID{uuid.MustParse("00000000-0000-0000-0000-000000000000")}, + compactedBlockIDs: []uuid.UUID{uuid.MustParse("00000000-0000-0000-0000-000000000001")}, + tenant: "single-tenant", httpHandler: func(t *testing.T) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { if r.Method == getMethod { + assert.Equal(t, "single-tenant/", r.URL.Query().Get("prefix")) + _, _ = w.Write([]byte(` blerg @@ -536,7 +546,7 @@ func TestListBlocksWithPrefix(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { server := testServer(t, tc.httpHandler(t)) - r, _, _, err := New(&Config{ + r, _, _, err := NewNoConfirm(&Config{ Region: "blerg", AccessKey: "test", SecretKey: flagext.SecretWithValue("test"), @@ -549,11 +559,11 @@ func TestListBlocksWithPrefix(t *testing.T) { require.NoError(t, err) ctx := context.Background() - blockIDs, compactedBlockIDs, err := r.ListBlocks(ctx, "single-tenant") + blockIDs, compactedBlockIDs, err := r.ListBlocks(ctx, tc.tenant) assert.NoError(t, err) - assert.Equal(t, 1, len(blockIDs)) - assert.Equal(t, 1, len(compactedBlockIDs)) + assert.ElementsMatchf(t, tc.liveBlockIDs, blockIDs, "Block IDs did not match") + assert.ElementsMatchf(t, tc.compactedBlockIDs, compactedBlockIDs, "Compacted block IDs did not match") }) } } From b994d2db2c1d2674be13d8f1efa31feef15d3b92 Mon Sep 17 00:00:00 2001 From: Ben Foster Date: Tue, 12 Mar 2024 13:40:49 -0400 Subject: [PATCH 05/12] Further refine S3/GCS backend for ListBlocks Brings logic more in line with Azure object parsing. Also has the benefit of handling prefixes without a trailing slash. --- tempodb/backend/gcs/gcs.go | 12 ++++++------ tempodb/backend/s3/s3.go | 12 ++++++------ 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/tempodb/backend/gcs/gcs.go b/tempodb/backend/gcs/gcs.go index 587296fb506..1d4b346e68b 100644 --- a/tempodb/backend/gcs/gcs.go +++ b/tempodb/backend/gcs/gcs.go @@ -253,20 +253,20 @@ func (rw *readerWriter) ListBlocks(ctx context.Context, tenant string) ([]uuid.U return } - parts = strings.Split(strings.TrimPrefix(attrs.Name, rw.cfg.Prefix), "/") - // ie: //meta.json - if len(parts) != 3 { + parts = strings.Split(strings.TrimPrefix(attrs.Name, prefix), "/") + // ie: /meta.json + if len(parts) != 2 { continue } - switch parts[2] { + switch parts[1] { case backend.MetaName: case backend.CompactedMetaName: default: continue } - id, err = uuid.Parse(parts[1]) + id, err = uuid.Parse(parts[0]) if err != nil { continue } @@ -283,7 +283,7 @@ func (rw *readerWriter) ListBlocks(ctx context.Context, tenant string) ([]uuid.U } mtx.Lock() - switch parts[2] { + switch parts[1] { case backend.MetaName: blockIDs = append(blockIDs, id) case backend.CompactedMetaName: diff --git a/tempodb/backend/s3/s3.go b/tempodb/backend/s3/s3.go index 56d03306fa5..dd037112e68 100644 --- a/tempodb/backend/s3/s3.go +++ b/tempodb/backend/s3/s3.go @@ -333,20 +333,20 @@ func (rw *readerWriter) ListBlocks( } for _, c := range res.Contents { - // i.e: /meta - parts := strings.Split(strings.TrimPrefix(c.Key, rw.cfg.Prefix), "/") - if len(parts) != 3 { + // i.e: /meta + parts := strings.Split(strings.TrimPrefix(c.Key, prefix), "/") + if len(parts) != 2 { continue } - switch parts[2] { + switch parts[1] { case backend.MetaName: case backend.CompactedMetaName: default: continue } - id, err := uuid.Parse(parts[1]) + id, err := uuid.Parse(parts[0]) if err != nil { continue } @@ -363,7 +363,7 @@ func (rw *readerWriter) ListBlocks( } mtx.Lock() - switch parts[2] { + switch parts[1] { case backend.MetaName: blockIDs = append(blockIDs, id) case backend.CompactedMetaName: From 9a26f0af264671305ebb322b336912eaf2e809ea Mon Sep 17 00:00:00 2001 From: Ben Foster Date: Tue, 12 Mar 2024 13:41:19 -0400 Subject: [PATCH 06/12] Update poller integration test to exercise prefixes --- integration/poller/poller_test.go | 270 ++++++++++++++++-------------- 1 file changed, 148 insertions(+), 122 deletions(-) diff --git a/integration/poller/poller_test.go b/integration/poller/poller_test.go index 65af8b2541e..5d013455dae 100644 --- a/integration/poller/poller_test.go +++ b/integration/poller/poller_test.go @@ -61,132 +61,158 @@ func TestPollerOwnership(t *testing.T) { }, } + storageBackendTestPermutations := []struct { + name string + prefix string + }{ + { + name: "empty-string-prefix", + prefix: "", + }, + { + name: "no-prefix", + }, + { + name: "prefix", + prefix: "a/b/c/", + }, + { + name: "prefix-no-trailing-slash", + prefix: "a/b/c", + }, + } + logger := log.NewLogfmtLogger(os.Stdout) var hhh *e2e.HTTPService t.Parallel() for _, tc := range testCompactorOwnershipBackends { - t.Run(tc.name, func(t *testing.T) { - s, err := e2e.NewScenario("tempo-integration") - require.NoError(t, err) - defer s.Close() - - // set up the backend - cfg := app.Config{} - buff, err := os.ReadFile(tc.configFile) - require.NoError(t, err) - err = yaml.UnmarshalStrict(buff, &cfg) - require.NoError(t, err) - hhh, err = e2eBackend.New(s, cfg) - require.NoError(t, err) - - err = hhh.WaitReady() - require.NoError(t, err) - - err = hhh.Ready() - require.NoError(t, err) - - // Give some time for startup - time.Sleep(1 * time.Second) - - t.Logf("backend: %s", hhh.Endpoint(hhh.HTTPPort())) - - require.NoError(t, util.CopyFileToSharedDir(s, tc.configFile, "config.yaml")) - - var rr backend.RawReader - var ww backend.RawWriter - var cc backend.Compactor - - concurrency := 3 - - e := hhh.Endpoint(hhh.HTTPPort()) - switch tc.name { - case "s3": - cfg.StorageConfig.Trace.S3.ListBlocksConcurrency = concurrency - cfg.StorageConfig.Trace.S3.Endpoint = e - cfg.Overrides.UserConfigurableOverridesConfig.Client.S3.Endpoint = e - rr, ww, cc, err = s3.New(cfg.StorageConfig.Trace.S3) - case "gcs": - cfg.StorageConfig.Trace.GCS.ListBlocksConcurrency = concurrency - cfg.StorageConfig.Trace.GCS.Endpoint = e - cfg.Overrides.UserConfigurableOverridesConfig.Client.GCS.Endpoint = e - rr, ww, cc, err = gcs.New(cfg.StorageConfig.Trace.GCS) - case "azure": - cfg.StorageConfig.Trace.Azure.Endpoint = e - cfg.Overrides.UserConfigurableOverridesConfig.Client.Azure.Endpoint = e - rr, ww, cc, err = azure.New(cfg.StorageConfig.Trace.Azure) - } - require.NoError(t, err) - - r := backend.NewReader(rr) - w := backend.NewWriter(ww) - - blocklistPoller := blocklist.NewPoller(&blocklist.PollerConfig{ - PollConcurrency: 3, - TenantIndexBuilders: 1, - }, OwnsEverythingSharder, r, cc, w, logger) - - // Use the block boundaries in the GCS and S3 implementation - bb := blockboundary.CreateBlockBoundaries(concurrency) - // Pick a boundary to use for this test - base := bb[1] - expected := []uuid.UUID{} - - expected = append(expected, uuid.MustParse("00000000-0000-0000-0000-000000000000")) - expected = append(expected, uuid.MustParse("ffffffff-ffff-ffff-ffff-ffffffffffff")) - - // Grab the one before the boundary - decrementUUIDBytes(base) - expected = append(expected, uuid.UUID(base)) - - incrementUUIDBytes(base) - expected = append(expected, uuid.UUID(base)) - - incrementUUIDBytes(base) - expected = append(expected, uuid.UUID(base)) - - incrementUUIDBytes(base) - expected = append(expected, uuid.UUID(base)) - - writeTenantBlocks(t, w, tenant, expected) - - sort.Slice(expected, func(i, j int) bool { return expected[i].String() < expected[j].String() }) - t.Logf("expected: %v", expected) - - mmResults, cmResults, err := rr.ListBlocks(context.Background(), tenant) - require.NoError(t, err) - sort.Slice(mmResults, func(i, j int) bool { return mmResults[i].String() < mmResults[j].String() }) - t.Logf("mmResults: %s", mmResults) - t.Logf("cmResults: %s", cmResults) - - assert.Equal(t, expected, mmResults) - assert.Equal(t, len(expected), len(mmResults)) - assert.Equal(t, 0, len(cmResults)) - - l := blocklist.New() - mm, cm, err := blocklistPoller.Do(l) - require.NoError(t, err) - t.Logf("mm: %v", mm) - t.Logf("cm: %v", cm) - - l.ApplyPollResults(mm, cm) - - metas := l.Metas(tenant) - - actual := []uuid.UUID{} - for _, m := range metas { - actual = append(actual, m.BlockID) - } - - sort.Slice(actual, func(i, j int) bool { return actual[i].String() < actual[j].String() }) - - assert.Equal(t, expected, actual) - assert.Equal(t, len(expected), len(metas)) - t.Logf("actual: %v", actual) - - for _, e := range expected { - assert.True(t, found(e, metas)) - } - }) + for _, pc := range storageBackendTestPermutations { + t.Run(tc.name+"-"+pc.name, func(t *testing.T) { + s, err := e2e.NewScenario("tempo-integration") + require.NoError(t, err) + defer s.Close() + + // set up the backend + cfg := app.Config{} + buff, err := os.ReadFile(tc.configFile) + require.NoError(t, err) + err = yaml.UnmarshalStrict(buff, &cfg) + require.NoError(t, err) + hhh, err = e2eBackend.New(s, cfg) + require.NoError(t, err) + + err = hhh.WaitReady() + require.NoError(t, err) + + err = hhh.Ready() + require.NoError(t, err) + + // Give some time for startup + time.Sleep(1 * time.Second) + + t.Logf("backend: %s", hhh.Endpoint(hhh.HTTPPort())) + + require.NoError(t, util.CopyFileToSharedDir(s, tc.configFile, "config.yaml")) + + var rr backend.RawReader + var ww backend.RawWriter + var cc backend.Compactor + + concurrency := 3 + + e := hhh.Endpoint(hhh.HTTPPort()) + switch tc.name { + case "s3": + cfg.StorageConfig.Trace.S3.ListBlocksConcurrency = concurrency + cfg.StorageConfig.Trace.S3.Endpoint = e + cfg.StorageConfig.Trace.S3.Prefix = pc.prefix + cfg.Overrides.UserConfigurableOverridesConfig.Client.S3.Endpoint = e + rr, ww, cc, err = s3.New(cfg.StorageConfig.Trace.S3) + case "gcs": + cfg.StorageConfig.Trace.GCS.ListBlocksConcurrency = concurrency + cfg.StorageConfig.Trace.GCS.Endpoint = e + cfg.StorageConfig.Trace.GCS.Prefix = pc.prefix + cfg.Overrides.UserConfigurableOverridesConfig.Client.GCS.Endpoint = e + rr, ww, cc, err = gcs.New(cfg.StorageConfig.Trace.GCS) + case "azure": + cfg.StorageConfig.Trace.Azure.Endpoint = e + cfg.StorageConfig.Trace.Azure.Prefix = pc.prefix + cfg.Overrides.UserConfigurableOverridesConfig.Client.Azure.Endpoint = e + rr, ww, cc, err = azure.New(cfg.StorageConfig.Trace.Azure) + } + require.NoError(t, err) + + r := backend.NewReader(rr) + w := backend.NewWriter(ww) + + blocklistPoller := blocklist.NewPoller(&blocklist.PollerConfig{ + PollConcurrency: 3, + TenantIndexBuilders: 1, + }, OwnsEverythingSharder, r, cc, w, logger) + + // Use the block boundaries in the GCS and S3 implementation + bb := blockboundary.CreateBlockBoundaries(concurrency) + // Pick a boundary to use for this test + base := bb[1] + expected := []uuid.UUID{} + + expected = append(expected, uuid.MustParse("00000000-0000-0000-0000-000000000000")) + expected = append(expected, uuid.MustParse("ffffffff-ffff-ffff-ffff-ffffffffffff")) + + // Grab the one before the boundary + decrementUUIDBytes(base) + expected = append(expected, uuid.UUID(base)) + + incrementUUIDBytes(base) + expected = append(expected, uuid.UUID(base)) + + incrementUUIDBytes(base) + expected = append(expected, uuid.UUID(base)) + + incrementUUIDBytes(base) + expected = append(expected, uuid.UUID(base)) + + writeTenantBlocks(t, w, tenant, expected) + + sort.Slice(expected, func(i, j int) bool { return expected[i].String() < expected[j].String() }) + t.Logf("expected: %v", expected) + + mmResults, cmResults, err := rr.ListBlocks(context.Background(), tenant) + require.NoError(t, err) + sort.Slice(mmResults, func(i, j int) bool { return mmResults[i].String() < mmResults[j].String() }) + t.Logf("mmResults: %s", mmResults) + t.Logf("cmResults: %s", cmResults) + + assert.Equal(t, expected, mmResults) + assert.Equal(t, len(expected), len(mmResults)) + assert.Equal(t, 0, len(cmResults)) + + l := blocklist.New() + mm, cm, err := blocklistPoller.Do(l) + require.NoError(t, err) + t.Logf("mm: %v", mm) + t.Logf("cm: %v", cm) + + l.ApplyPollResults(mm, cm) + + metas := l.Metas(tenant) + + actual := []uuid.UUID{} + for _, m := range metas { + actual = append(actual, m.BlockID) + } + + sort.Slice(actual, func(i, j int) bool { return actual[i].String() < actual[j].String() }) + + assert.Equal(t, expected, actual) + assert.Equal(t, len(expected), len(metas)) + t.Logf("actual: %v", actual) + + for _, e := range expected { + assert.True(t, found(e, metas)) + } + }) + } } } From b5c5941ae62d348f9f05af0fac75e1c5741e55a3 Mon Sep 17 00:00:00 2001 From: Ben Foster Date: Tue, 12 Mar 2024 14:18:07 -0400 Subject: [PATCH 07/12] Update e2e test to exercise prefixes --- .../e2e/config-all-in-one-azurite.yaml | 1 + integration/e2e/config-all-in-one-gcs.yaml | 1 + integration/e2e/config-all-in-one-s3.yaml | 1 + integration/e2e/e2e_test.go | 139 +++++++++++------- 4 files changed, 86 insertions(+), 56 deletions(-) diff --git a/integration/e2e/config-all-in-one-azurite.yaml b/integration/e2e/config-all-in-one-azurite.yaml index 0eadfcd8866..3def0ff6f1b 100644 --- a/integration/e2e/config-all-in-one-azurite.yaml +++ b/integration/e2e/config-all-in-one-azurite.yaml @@ -37,6 +37,7 @@ storage: endpoint_suffix: tempo_e2e-azurite:10000 storage_account_name: "devstoreaccount1" storage_account_key: "Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==" + prefix: {{ .Prefix }} pool: max_workers: 10 queue_depth: 100 diff --git a/integration/e2e/config-all-in-one-gcs.yaml b/integration/e2e/config-all-in-one-gcs.yaml index 06970879de3..efffae085c9 100644 --- a/integration/e2e/config-all-in-one-gcs.yaml +++ b/integration/e2e/config-all-in-one-gcs.yaml @@ -36,6 +36,7 @@ storage: bucket_name: tempo endpoint: https://tempo_e2e-gcs:4443/storage/v1/ insecure: true + prefix: {{ .Prefix }} pool: max_workers: 10 queue_depth: 1000 diff --git a/integration/e2e/config-all-in-one-s3.yaml b/integration/e2e/config-all-in-one-s3.yaml index ab54c060987..a429b183628 100644 --- a/integration/e2e/config-all-in-one-s3.yaml +++ b/integration/e2e/config-all-in-one-s3.yaml @@ -38,6 +38,7 @@ storage: access_key: Cheescake # TODO: use cortex_e2e.MinioAccessKey secret_key: supersecret # TODO: use cortex_e2e.MinioSecretKey insecure: true + prefix: {{ .Prefix }} pool: max_workers: 10 queue_depth: 100 diff --git a/integration/e2e/e2e_test.go b/integration/e2e/e2e_test.go index 5fe1bff8349..bfa6e6f5aad 100644 --- a/integration/e2e/e2e_test.go +++ b/integration/e2e/e2e_test.go @@ -58,83 +58,110 @@ func TestAllInOne(t *testing.T) { }, } + storageBackendTestPermutations := []struct { + name string + prefix string + }{ + { + name: "empty-string-prefix", + prefix: "", + }, + { + name: "no-prefix", + }, + { + name: "prefix", + prefix: "a/b/c/", + }, + { + name: "prefix-no-trailing-slash", + prefix: "a/b/c", + }, + } + for _, tc := range testBackends { - t.Run(tc.name, func(t *testing.T) { - s, err := e2e.NewScenario("tempo_e2e") - require.NoError(t, err) - defer s.Close() + for _, pc := range storageBackendTestPermutations { + t.Run(tc.name+"-"+pc.name, func(t *testing.T) { + s, err := e2e.NewScenario("tempo_e2e") + require.NoError(t, err) + defer s.Close() - // set up the backend - cfg := app.Config{} - buff, err := os.ReadFile(tc.configFile) - require.NoError(t, err) - err = yaml.UnmarshalStrict(buff, &cfg) - require.NoError(t, err) - _, err = backend.New(s, cfg) - require.NoError(t, err) + // copy config template to shared directory and expand template variables + tmplConfig := map[string]any{"Prefix": pc.prefix} + configFile, err := util.CopyTemplateToSharedDir(s, tc.configFile, "config.yaml", tmplConfig) + require.NoError(t, err) - require.NoError(t, util.CopyFileToSharedDir(s, tc.configFile, "config.yaml")) - tempo := util.NewTempoAllInOne() - require.NoError(t, s.StartAndWaitReady(tempo)) + // set up the backend + cfg := app.Config{} + buff, err := os.ReadFile(configFile) + require.NoError(t, err) + err = yaml.UnmarshalStrict(buff, &cfg) + require.NoError(t, err) + _, err = backend.New(s, cfg) + require.NoError(t, err) - // Get port for the Jaeger gRPC receiver endpoint - c, err := util.NewJaegerGRPCClient(tempo.Endpoint(14250)) - require.NoError(t, err) - require.NotNil(t, c) + tempo := util.NewTempoAllInOne() + require.NoError(t, s.StartAndWaitReady(tempo)) - info := tempoUtil.NewTraceInfo(time.Now(), "") - require.NoError(t, info.EmitAllBatches(c)) + // Get port for the Jaeger gRPC receiver endpoint + c, err := util.NewJaegerGRPCClient(tempo.Endpoint(14250)) + require.NoError(t, err) + require.NotNil(t, c) - expected, err := info.ConstructTraceFromEpoch() - require.NoError(t, err) + info := tempoUtil.NewTraceInfo(time.Now(), "") + require.NoError(t, info.EmitAllBatches(c)) - // test metrics - require.NoError(t, tempo.WaitSumMetrics(e2e.Equals(spanCount(expected)), "tempo_distributor_spans_received_total")) + expected, err := info.ConstructTraceFromEpoch() + require.NoError(t, err) - // test echo - assertEcho(t, "http://"+tempo.Endpoint(3200)+"/api/echo") + // test metrics + require.NoError(t, tempo.WaitSumMetrics(e2e.Equals(spanCount(expected)), "tempo_distributor_spans_received_total")) - apiClient := httpclient.New("http://"+tempo.Endpoint(3200), "") + // test echo + assertEcho(t, "http://"+tempo.Endpoint(3200)+"/api/echo") - // query an in-memory trace - queryAndAssertTrace(t, apiClient, info) + apiClient := httpclient.New("http://"+tempo.Endpoint(3200), "") - // wait trace_idle_time and ensure trace is created in ingester - require.NoError(t, tempo.WaitSumMetricsWithOptions(e2e.Less(3), []string{"tempo_ingester_traces_created_total"}, e2e.WaitMissingMetrics)) + // query an in-memory trace + queryAndAssertTrace(t, apiClient, info) - // flush trace to backend - callFlush(t, tempo) + // wait trace_idle_time and ensure trace is created in ingester + require.NoError(t, tempo.WaitSumMetricsWithOptions(e2e.Less(3), []string{"tempo_ingester_traces_created_total"}, e2e.WaitMissingMetrics)) - // search for trace in backend - util.SearchAndAssertTrace(t, apiClient, info) - util.SearchTraceQLAndAssertTrace(t, apiClient, info) + // flush trace to backend + callFlush(t, tempo) - // sleep - time.Sleep(10 * time.Second) + // search for trace in backend + util.SearchAndAssertTrace(t, apiClient, info) + util.SearchTraceQLAndAssertTrace(t, apiClient, info) - // force clear completed block - callFlush(t, tempo) + // sleep + time.Sleep(10 * time.Second) - // test metrics - require.NoError(t, tempo.WaitSumMetrics(e2e.Equals(1), "tempo_ingester_blocks_flushed_total")) - require.NoError(t, tempo.WaitSumMetricsWithOptions(e2e.Equals(1), []string{"tempodb_blocklist_length"}, e2e.WaitMissingMetrics)) - require.NoError(t, tempo.WaitSumMetrics(e2e.Equals(3), "tempo_query_frontend_queries_total")) + // force clear completed block + callFlush(t, tempo) - // query trace - should fetch from backend - queryAndAssertTrace(t, apiClient, info) + // test metrics + require.NoError(t, tempo.WaitSumMetrics(e2e.Equals(1), "tempo_ingester_blocks_flushed_total")) + require.NoError(t, tempo.WaitSumMetricsWithOptions(e2e.Equals(1), []string{"tempodb_blocklist_length"}, e2e.WaitMissingMetrics)) + require.NoError(t, tempo.WaitSumMetrics(e2e.Equals(3), "tempo_query_frontend_queries_total")) - // search the backend. this works b/c we're passing a start/end AND setting query ingesters within min/max to 0 - now := time.Now() - util.SearchAndAssertTraceBackend(t, apiClient, info, now.Add(-20*time.Minute).Unix(), now.Unix()) + // query trace - should fetch from backend + queryAndAssertTrace(t, apiClient, info) - util.SearchAndAsserTagsBackend(t, apiClient, now.Add(-20*time.Minute).Unix(), now.Unix()) + // search the backend. this works b/c we're passing a start/end AND setting query ingesters within min/max to 0 + now := time.Now() + util.SearchAndAssertTraceBackend(t, apiClient, info, now.Add(-20*time.Minute).Unix(), now.Unix()) - // find the trace with streaming. using the http server b/c that's what Grafana will do - grpcClient, err := util.NewSearchGRPCClient(context.Background(), tempo.Endpoint(3200)) - require.NoError(t, err) + util.SearchAndAsserTagsBackend(t, apiClient, now.Add(-20*time.Minute).Unix(), now.Unix()) - util.SearchStreamAndAssertTrace(t, context.Background(), grpcClient, info, now.Add(-20*time.Minute).Unix(), now.Unix()) - }) + // find the trace with streaming. using the http server b/c that's what Grafana will do + grpcClient, err := util.NewSearchGRPCClient(context.Background(), tempo.Endpoint(3200)) + require.NoError(t, err) + + util.SearchStreamAndAssertTrace(t, context.Background(), grpcClient, info, now.Add(-20*time.Minute).Unix(), now.Unix()) + }) + } } } From e46d97b66f333d7330b94a6402b8233d7e491c0e Mon Sep 17 00:00:00 2001 From: Ben Foster Date: Wed, 13 Mar 2024 10:44:49 -0400 Subject: [PATCH 08/12] Fix format check error --- tempodb/backend/s3/s3_test.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tempodb/backend/s3/s3_test.go b/tempodb/backend/s3/s3_test.go index dceface4c37..bcccda6f39e 100644 --- a/tempodb/backend/s3/s3_test.go +++ b/tempodb/backend/s3/s3_test.go @@ -7,7 +7,6 @@ import ( "encoding/json" "encoding/xml" "fmt" - "github.com/google/uuid" "math/rand" "net/http" "net/http/httptest" @@ -19,6 +18,8 @@ import ( "testing" "time" + "github.com/google/uuid" + "github.com/aws/aws-sdk-go/service/s3" "github.com/grafana/dskit/flagext" "github.com/minio/minio-go/v7" From fbf0ca7761d573e841d6605cc9e85642f9071699 Mon Sep 17 00:00:00 2001 From: Ben Foster Date: Wed, 13 Mar 2024 11:24:49 -0400 Subject: [PATCH 09/12] Fix failing e2e tests --- integration/e2e/e2e_test.go | 9 +++++++-- integration/e2e/overrides_test.go | 9 +++++++-- 2 files changed, 14 insertions(+), 4 deletions(-) diff --git a/integration/e2e/e2e_test.go b/integration/e2e/e2e_test.go index bfa6e6f5aad..0ea52f48b86 100644 --- a/integration/e2e/e2e_test.go +++ b/integration/e2e/e2e_test.go @@ -344,16 +344,21 @@ func TestShutdownDelay(t *testing.T) { require.NoError(t, err) defer s.Close() + // copy config template to shared directory and expand template variables + tmplConfig := map[string]any{"Prefix": ""} + configFile, err := util.CopyTemplateToSharedDir(s, configAllInOneS3, "config.yaml", tmplConfig) + require.NoError(t, err) + // set up the backend cfg := app.Config{} - buff, err := os.ReadFile(configAllInOneS3) + buff, err := os.ReadFile(configFile) require.NoError(t, err) err = yaml.UnmarshalStrict(buff, &cfg) require.NoError(t, err) _, err = backend.New(s, cfg) require.NoError(t, err) - require.NoError(t, util.CopyFileToSharedDir(s, configAllInOneS3, "config.yaml")) + require.NoError(t, util.CopyFileToSharedDir(s, configFile, "config.yaml")) tempo := util.NewTempoAllInOne("-shutdown-delay=5s") // this line tests confirms that the readiness flag is up diff --git a/integration/e2e/overrides_test.go b/integration/e2e/overrides_test.go index 98cf0b7fbfa..dcfec803a45 100644 --- a/integration/e2e/overrides_test.go +++ b/integration/e2e/overrides_test.go @@ -49,16 +49,21 @@ func TestOverrides(t *testing.T) { require.NoError(t, err) defer s.Close() + // copy config template to shared directory and expand template variables + tmplConfig := map[string]any{"Prefix": ""} + configFile, err := util.CopyTemplateToSharedDir(s, tc.configFile, "config.yaml", tmplConfig) + require.NoError(t, err) + // set up the backend cfg := app.Config{} - buff, err := os.ReadFile(tc.configFile) + buff, err := os.ReadFile(configFile) require.NoError(t, err) err = yaml.UnmarshalStrict(buff, &cfg) require.NoError(t, err) _, err = backend.New(s, cfg) require.NoError(t, err) - require.NoError(t, util.CopyFileToSharedDir(s, tc.configFile, "config.yaml")) + require.NoError(t, util.CopyFileToSharedDir(s, configFile, "config.yaml")) tempo := util.NewTempoAllInOne() require.NoError(t, s.StartAndWaitReady(tempo)) From aa497c75c775354ee60f1f9537ff34dacf56d16a Mon Sep 17 00:00:00 2001 From: Ben Foster Date: Wed, 13 Mar 2024 12:22:31 -0400 Subject: [PATCH 10/12] Remove unnecessary prefix permutations from e2e test --- integration/e2e/e2e_test.go | 8 -------- 1 file changed, 8 deletions(-) diff --git a/integration/e2e/e2e_test.go b/integration/e2e/e2e_test.go index 0ea52f48b86..d4ad2b64aa9 100644 --- a/integration/e2e/e2e_test.go +++ b/integration/e2e/e2e_test.go @@ -62,10 +62,6 @@ func TestAllInOne(t *testing.T) { name string prefix string }{ - { - name: "empty-string-prefix", - prefix: "", - }, { name: "no-prefix", }, @@ -73,10 +69,6 @@ func TestAllInOne(t *testing.T) { name: "prefix", prefix: "a/b/c/", }, - { - name: "prefix-no-trailing-slash", - prefix: "a/b/c", - }, } for _, tc := range testBackends { From 5fffc1cf6bcb2700318a2e7021ff9cb16c0c9c77 Mon Sep 17 00:00:00 2001 From: Ben Foster Date: Thu, 14 Mar 2024 08:29:25 -0400 Subject: [PATCH 11/12] Remove unnecessary test config file copy --- integration/e2e/e2e_test.go | 1 - integration/e2e/overrides_test.go | 1 - 2 files changed, 2 deletions(-) diff --git a/integration/e2e/e2e_test.go b/integration/e2e/e2e_test.go index d4ad2b64aa9..36e1863ac3e 100644 --- a/integration/e2e/e2e_test.go +++ b/integration/e2e/e2e_test.go @@ -350,7 +350,6 @@ func TestShutdownDelay(t *testing.T) { _, err = backend.New(s, cfg) require.NoError(t, err) - require.NoError(t, util.CopyFileToSharedDir(s, configFile, "config.yaml")) tempo := util.NewTempoAllInOne("-shutdown-delay=5s") // this line tests confirms that the readiness flag is up diff --git a/integration/e2e/overrides_test.go b/integration/e2e/overrides_test.go index dcfec803a45..2e49dbbf476 100644 --- a/integration/e2e/overrides_test.go +++ b/integration/e2e/overrides_test.go @@ -63,7 +63,6 @@ func TestOverrides(t *testing.T) { _, err = backend.New(s, cfg) require.NoError(t, err) - require.NoError(t, util.CopyFileToSharedDir(s, configFile, "config.yaml")) tempo := util.NewTempoAllInOne() require.NoError(t, s.StartAndWaitReady(tempo)) From f93ed67fb9500be33dd49dfa54b394a83d64bfdd Mon Sep 17 00:00:00 2001 From: Zach Leslie Date: Thu, 14 Mar 2024 17:57:49 +0000 Subject: [PATCH 12/12] Ignore lint --- integration/e2e/e2e_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/integration/e2e/e2e_test.go b/integration/e2e/e2e_test.go index 36e1863ac3e..81ecb2832f9 100644 --- a/integration/e2e/e2e_test.go +++ b/integration/e2e/e2e_test.go @@ -110,6 +110,7 @@ func TestAllInOne(t *testing.T) { require.NoError(t, tempo.WaitSumMetrics(e2e.Equals(spanCount(expected)), "tempo_distributor_spans_received_total")) // test echo + // nolint:goconst assertEcho(t, "http://"+tempo.Endpoint(3200)+"/api/echo") apiClient := httpclient.New("http://"+tempo.Endpoint(3200), "")